diff --git a/cts/patterns.py b/cts/patterns.py
index 877ea69758..97b7eedd7a 100644
--- a/cts/patterns.py
+++ b/cts/patterns.py
@@ -1,413 +1,414 @@
""" Pattern-holding classes for Pacemaker's Cluster Test Suite (CTS)
"""
# Pacemaker targets compatibility with Python 2.7 and 3.2+
from __future__ import print_function, unicode_literals, absolute_import, division
__copyright__ = "Copyright 2008-2019 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import sys, os
from cts.CTSvars import *
patternvariants = {}
class BasePatterns(object):
def __init__(self, name):
self.name = name
patternvariants[name] = self
self.ignore = [
"avoid confusing Valgrind",
# Logging bug in some versions of libvirtd
r"libvirtd.*: internal error: Failed to parse PCI config address",
]
self.BadNews = []
self.components = {}
self.commands = {
"StatusCmd" : "crmadmin -t 60000 -S %s 2>/dev/null",
"CibQuery" : "cibadmin -Ql",
"CibAddXml" : "cibadmin --modify -c --xml-text %s",
"CibDelXpath" : "cibadmin --delete --xpath %s",
# 300,000 == 5 minutes
"RscRunning" : CTSvars.CRM_DAEMON_DIR + "/cts-exec-helper -R -r %s",
"CIBfile" : "%s:"+CTSvars.CRM_CONFIG_DIR+"/cib.xml",
"TmpDir" : "/tmp",
"BreakCommCmd" : "iptables -A INPUT -s %s -j DROP >/dev/null 2>&1",
"FixCommCmd" : "iptables -D INPUT -s %s -j DROP >/dev/null 2>&1",
# tc qdisc add dev lo root handle 1: cbq avpkt 1000 bandwidth 1000mbit
# tc class add dev lo parent 1: classid 1:1 cbq rate "$RATE"kbps allot 17000 prio 5 bounded isolated
# tc filter add dev lo parent 1: protocol ip prio 16 u32 match ip dst 127.0.0.1 match ip sport $PORT 0xFFFF flowid 1:1
# tc qdisc add dev lo parent 1: netem delay "$LATENCY"msec "$(($LATENCY/4))"msec 10% 2> /dev/null > /dev/null
"ReduceCommCmd" : "",
"RestoreCommCmd" : "tc qdisc del dev lo root",
"SetCheckInterval" : "cibadmin --modify -c --xml-text ''",
"ClearCheckInterval" : "cibadmin --delete --xpath \"//nvpair[@name='cluster-recheck-interval']\"",
"MaintenanceModeOn" : "cibadmin --modify -c --xml-text ''",
"MaintenanceModeOff" : "cibadmin --delete --xpath \"//nvpair[@name='maintenance-mode']\"",
"StandbyCmd" : "crm_attribute -Vq -U %s -n standby -l forever -v %s 2>/dev/null",
"StandbyQueryCmd" : "crm_attribute -qG -U %s -n standby -l forever -d off 2>/dev/null",
}
self.search = {
"Pat:DC_IDLE" : "pacemaker-controld.*State transition.*-> S_IDLE",
# This won't work if we have multiple partitions
"Pat:Local_started" : "%s\W.*controller successfully started",
"Pat:NonDC_started" : r"%s\W.*State transition.*-> S_NOT_DC",
"Pat:DC_started" : r"%s\W.*State transition.*-> S_IDLE",
"Pat:We_stopped" : "%s\W.*OVERRIDE THIS PATTERN",
"Pat:They_stopped" : "%s\W.*LOST:.* %s ",
"Pat:They_dead" : "node %s.*: is dead",
"Pat:TransitionComplete" : "Transition status: Complete: complete",
"Pat:Fencing_start" : r"(Initiating remote operation|Requesting peer fencing ).* (for|of) %s",
"Pat:Fencing_ok" : r"pacemaker-fenced.*:\s*Operation .* of %s by .* for .*@.*: OK",
"Pat:Fencing_recover" : r"pacemaker-schedulerd.*: Recover %s",
"Pat:Fencing_active" : r"pacemaker-schedulerd.*: Resource %s is active on .* nodes",
"Pat:Fencing_probe" : r"pacemaker-controld.* Result of probe operation for %s on .*: Error",
"Pat:RscOpOK" : r"pacemaker-controld.*:\s+Result of %s operation for %s.*: (0 \()?ok",
"Pat:RscRemoteOpOK" : r"pacemaker-controld.*:\s+Result of %s operation for %s on %s: (0 \()?ok",
"Pat:NodeFenced" : r"pacemaker-controld.*:\s* Peer %s was terminated \(.*\) by .* on behalf of .*: OK",
"Pat:FenceOpOK" : "Operation .* for host '%s' with device .* returned: 0",
}
def get_component(self, key):
if key in self.components:
return self.components[key]
print("Unknown component '%s' for %s" % (key, self.name))
return []
def get_patterns(self, key):
if key == "BadNews":
return self.BadNews
elif key == "BadNewsIgnore":
return self.ignore
elif key == "Commands":
return self.commands
elif key == "Search":
return self.search
elif key == "Components":
return self.components
def __getitem__(self, key):
if key == "Name":
return self.name
elif key in self.commands:
return self.commands[key]
elif key in self.search:
return self.search[key]
else:
print("Unknown template '%s' for %s" % (key, self.name))
return None
class crm_corosync(BasePatterns):
'''
Patterns for Corosync version 2 cluster manager class
'''
def __init__(self, name):
BasePatterns.__init__(self, name)
self.commands.update({
"StartCmd" : "service corosync start && service pacemaker start",
"StopCmd" : "service pacemaker stop; [ ! -e /usr/sbin/pacemaker-remoted ] || service pacemaker_remote stop; service corosync stop",
"EpochCmd" : "crm_node -e",
"QuorumCmd" : "crm_node -q",
"PartitionCmd" : "crm_node -p",
})
self.search.update({
# Close enough ... "Corosync Cluster Engine exiting normally" isn't
# printed reliably.
"Pat:We_stopped" : "%s\W.*Unloading all Corosync service engines",
"Pat:They_stopped" : "%s\W.*pacemaker-controld.*Node %s(\[|\s).*state is now lost",
"Pat:They_dead" : "pacemaker-controld.*Node %s(\[|\s).*state is now lost",
"Pat:ChildExit" : r"\[[0-9]+\] exited with status [0-9]+ \(",
- "Pat:ChildKilled" : r"%s\W.*pacemakerd.*%s\[[0-9]+\] terminated with signal 9",
+ # "with signal 9" == pcmk_child_exit(), "$" == check_active_before_startup_processes()
+ "Pat:ChildKilled" : r"%s\W.*pacemakerd.*%s\[[0-9]+\] terminated( with signal 9|$)",
"Pat:ChildRespawn" : "%s\W.*pacemakerd.*Respawning failed child process: %s",
"Pat:InfraUp" : "%s\W.*corosync.*Initializing transport",
"Pat:PacemakerUp" : "%s\W.*pacemakerd.*Starting Pacemaker",
})
self.ignore = self.ignore + [
r"crm_mon:",
r"crmadmin:",
r"update_trace_data",
r"async_notify:.*strange, client not found",
r"Parse error: Ignoring unknown option .*nodename",
r"error.*: Operation 'reboot' .* with device 'FencingFail' returned:",
r"getinfo response error: 1$",
r"sbd.* error: inquisitor_child: DEBUG MODE IS ACTIVE",
r"sbd.* pcmk:\s*error:.*Connection to cib_ro.* (failed|closed)",
]
self.BadNews = [
r"error:",
r"crit:",
r"ERROR:",
r"CRIT:",
r"Shutting down...NOW",
r"Timer I_TERMINATE just popped",
r"input=I_ERROR",
r"input=I_FAIL",
r"input=I_INTEGRATED cause=C_TIMER_POPPED",
r"input=I_FINALIZED cause=C_TIMER_POPPED",
r"input=I_ERROR",
r"(pacemakerd|pacemaker-execd|pacemaker-controld):.*, exiting",
r"schedulerd.*Attempting recovery of resource",
r"is taking more than 2x its timeout",
r"Confirm not received from",
r"Welcome reply not received from",
r"Attempting to schedule .* after a stop",
r"Resource .* was active at shutdown",
r"duplicate entries for call_id",
r"Search terminated:",
r":global_timer_callback",
r"Faking parameter digest creation",
r"Parameters to .* action changed:",
r"Parameters to .* changed",
- r"\[[0-9]+\] terminated with signal [0-9]+ \(",
- r"schedulerd:.*Recover .*\(.* -\> .*\)",
+ r"pacemakerd.*\[[0-9]+\] terminated( with signal| as IPC server|$)",
+ r"pacemaker-schedulerd.*Recover .*\(.* -\> .*\)",
r"rsyslogd.* imuxsock lost .* messages from pid .* due to rate-limiting",
r"Peer is not part of our cluster",
r"We appear to be in an election loop",
r"Unknown node -> we will not deliver message",
r"(Blackbox dump requested|Problem detected)",
r"pacemakerd.*Could not connect to Cluster Configuration Database API",
r"Receiving messages from a node we think is dead",
r"share the same cluster nodeid",
r"share the same name",
#r"crm_ipc_send:.*Request .* failed",
#r"crm_ipc_send:.*Sending to .* is disabled until pending reply is received",
# Not inherently bad, but worth tracking
#r"No need to invoke the TE",
#r"ping.*: DEBUG: Updated connected = 0",
#r"Digest mis-match:",
r"pacemaker-controld:.*Transition failed: terminated",
r"Local CIB .* differs from .*:",
r"warn.*:\s*Continuing but .* will NOT be used",
r"warn.*:\s*Cluster configuration file .* is corrupt",
#r"Executing .* fencing operation",
r"Election storm",
r"stalled the FSA with pending inputs",
]
self.components["common-ignore"] = [
r"Pending action:",
r"resource( was|s were) active at shutdown",
r"pending LRM operations at shutdown",
r"Lost connection to the CIB manager",
r"pacemaker-controld.*:\s*Action A_RECOVER .* not supported",
r"pacemaker-controld.*:\s*Performing A_EXIT_1 - forcefully exiting ",
r".*:\s*Executing .* fencing operation \(.*\) on ",
r".*:\s*Requesting fencing \([^)]+\) of node ",
r"(Blackbox dump requested|Problem detected)",
# "Resource .*stonith::.* is active on 2 nodes attempting recovery",
# "Transition .* ERRORs found during PE processing",
]
self.components["corosync-ignore"] = [
r"error:.*Connection to the CPG API failed: Library error",
r"\[[0-9]+\] exited with status [0-9]+ \(",
r"pacemaker-based.*error:.*Corosync connection lost",
r"pacemaker-fenced.*error:.*Corosync connection terminated",
r"pacemaker-controld.*State transition .* S_RECOVERY",
r"pacemaker-controld.*error:.*Input (I_ERROR|I_TERMINATE ) .*received in state",
r"pacemaker-controld.*error:.*Could not recover from internal error",
r"error:.*Connection to cib_(shm|rw).* (failed|closed)",
r"error:.*Connection to (fencer|stonith-ng).* (closed|failed|lost)",
r"crit: Fencing daemon connection failed",
# This is overbroad, but we don't have a way to say that only
# certain transition errors are acceptable (if the fencer respawns,
# fence devices may appear multiply active). We have to rely on
# other causes of a transition error logging their own error
# message, which is the usual practice.
r"pacemaker-schedulerd.* Calculated transition .*/pe-error",
]
self.components["corosync"] = [
# We expect each daemon to lose its cluster connection.
# However, if the CIB manager loses its connection first,
# it's possible for another daemon to lose that connection and
# exit before losing the cluster connection.
r"pacemakerd.*:\s*(crit|error):.*Lost connection to cluster layer",
r"pacemaker-attrd.*:\s*(crit|error):.*Lost connection to (cluster layer|the CIB manager)",
r"pacemaker-based.*:\s*(crit|error):.*Lost connection to cluster layer",
r"pacemaker-controld.*:\s*(crit|error):.*Lost connection to (cluster layer|the CIB manager)",
r"pacemaker-fenced.*:\s*(crit|error):.*Lost connection to (cluster layer|the CIB manager)",
r"schedulerd.*Scheduling Node .* for STONITH",
r"pacemaker-controld.*:\s*Peer .* was terminated \(.*\) by .* on behalf of .*:\s*OK",
]
self.components["pacemaker-based"] = [
r"pacemakerd.* pacemaker-attrd\[[0-9]+\] exited with status 102",
r"pacemakerd.* pacemaker-controld\[[0-9]+\] exited with status 1",
r"pacemakerd.* Respawning failed child process: pacemaker-attrd",
r"pacemakerd.* Respawning failed child process: pacemaker-based",
r"pacemakerd.* Respawning failed child process: pacemaker-controld",
r"pacemakerd.* Respawning failed child process: pacemaker-fenced",
r"pacemaker-.* Connection to cib_.* (failed|closed)",
r"pacemaker-attrd.*:.*Lost connection to the CIB manager",
r"pacemaker-controld.*:.*Lost connection to the CIB manager",
r"pacemaker-controld.*I_ERROR.*crmd_cib_connection_destroy",
r"pacemaker-controld.* State transition .* S_RECOVERY",
r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
r"pacemaker-controld.*Could not recover from internal error",
]
self.components["pacemaker-based-ignore"] = [
r"pacemaker-execd.*Connection to (fencer|stonith-ng).* (closed|failed|lost)",
# This is overbroad, but we don't have a way to say that only
# certain transition errors are acceptable (if the fencer respawns,
# fence devices may appear multiply active). We have to rely on
# other causes of a transition error logging their own error
# message, which is the usual practice.
r"pacemaker-schedulerd.* Calculated transition .*/pe-error",
]
self.components["pacemaker-execd"] = [
r"pacemaker-controld.*Connection to (pacemaker-execd|lrmd|executor) (failed|closed)",
r"pacemaker-controld.*I_ERROR.*lrm_connection_destroy",
r"pacemaker-controld.*State transition .* S_RECOVERY",
r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
r"pacemaker-controld.*Could not recover from internal error",
- r"pacemakerd.*pacemaker-execd.* terminated with signal 9",
r"pacemakerd.*pacemaker-controld\[[0-9]+\] exited with status 1",
r"pacemakerd.*Respawning failed child process: pacemaker-execd",
r"pacemakerd.*Respawning failed child process: pacemaker-controld",
]
self.components["pacemaker-execd-ignore"] = [
r"pacemaker-attrd.*Connection to lrmd (failed|closed)",
+ r"pacemaker-(attrd|controld).*Could not execute alert",
]
self.components["pacemaker-controld"] = [
# "WARN: determine_online_status: Node .* is unclean",
# "Scheduling Node .* for STONITH",
# "Executing .* fencing operation",
# Only if the node wasn't the DC: "State transition S_IDLE",
"State transition .* -> S_IDLE",
]
self.components["pacemaker-controld-ignore"] = []
self.components["pacemaker-attrd"] = []
self.components["pacemaker-attrd-ignore"] = []
self.components["pacemaker-schedulerd"] = [
"State transition .* S_RECOVERY",
r"Respawning failed child process: pacemaker-controld",
r"pacemaker-controld\[[0-9]+\] exited with status 1 \(",
"Connection to pengine failed",
"Connection to pengine.* closed",
r"Connection to the scheduler failed",
"pacemaker-controld.*I_ERROR.*save_cib_contents",
r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
"pacemaker-controld.*Could not recover from internal error",
]
self.components["pacemaker-schedulerd-ignore"] = []
self.components["pacemaker-fenced"] = [
r"error:.*Connection to (fencer|stonith-ng).* (closed|failed|lost)",
r"Fencing daemon connection failed",
r"pacemaker-controld.*Fencer successfully connected",
]
self.components["pacemaker-fenced-ignore"] = [
r"error:.*Connection to (fencer|stonith-ng).* (closed|failed|lost)",
r"crit:.*Fencing daemon connection failed",
r"error:.*Fencer connection failed \(will retry\)",
r"Connection to (fencer|stonith-ng) failed, finalizing .* pending operations",
r"pacemaker-controld.*:\s+Result of .* operation for Fencing.*Error",
# This is overbroad, but we don't have a way to say that only
# certain transition errors are acceptable (if the fencer respawns,
# fence devices may appear multiply active). We have to rely on
# other causes of a transition error logging their own error
# message, which is the usual practice.
r"pacemaker-schedulerd.* Calculated transition .*/pe-error",
]
self.components["pacemaker-fenced-ignore"].extend(self.components["common-ignore"])
class crm_corosync_docker(crm_corosync):
'''
Patterns for Corosync version 2 cluster manager class
'''
def __init__(self, name):
crm_corosync.__init__(self, name)
self.commands.update({
"StartCmd" : "pcmk_start",
"StopCmd" : "pcmk_stop",
})
class PatternSelector(object):
def __init__(self, name=None):
self.name = name
self.base = BasePatterns("crm-base")
if not name:
crm_corosync("crm-corosync")
elif name == "crm-corosync":
crm_corosync(name)
elif name == "crm-corosync-docker":
crm_corosync_docker(name)
def get_variant(self, variant):
if variant in patternvariants:
return patternvariants[variant]
print("defaulting to crm-base for %s" % variant)
return self.base
def get_patterns(self, variant, kind):
return self.get_variant(variant).get_patterns(kind)
def get_template(self, variant, key):
v = self.get_variant(variant)
return v[key]
def get_component(self, variant, kind):
return self.get_variant(variant).get_component(kind)
def __getitem__(self, key):
return self.get_template(self.name, key)
# python cts/CTSpatt.py -k crm-corosync -t StartCmd
if __name__ == '__main__':
pdir=os.path.dirname(sys.path[0])
sys.path.insert(0, pdir) # So that things work from the source directory
kind=None
template=None
skipthis=None
args=sys.argv[1:]
for i in range(0, len(args)):
if skipthis:
skipthis=None
continue
elif args[i] == "-k" or args[i] == "--kind":
skipthis=1
kind = args[i+1]
elif args[i] == "-t" or args[i] == "--template":
skipthis=1
template = args[i+1]
else:
print("Illegal argument " + args[i])
print(PatternSelector(kind)[template])
diff --git a/daemons/controld/controld_control.c b/daemons/controld/controld_control.c
index 232bcc48dd..b0fffd8675 100644
--- a/daemons/controld/controld_control.c
+++ b/daemons/controld/controld_control.c
@@ -1,869 +1,886 @@
/*
* Copyright 2004-2019 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
qb_ipcs_service_t *ipcs = NULL;
#if SUPPORT_COROSYNC
extern gboolean crm_connect_corosync(crm_cluster_t * cluster);
#endif
void crm_shutdown(int nsig);
gboolean crm_read_options(gpointer user_data);
gboolean fsa_has_quorum = FALSE;
crm_trigger_t *fsa_source = NULL;
crm_trigger_t *config_read = NULL;
bool no_quorum_suicide_escalation = FALSE;
/* A_HA_CONNECT */
void
do_ha_control(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state,
enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
gboolean registered = FALSE;
static crm_cluster_t *cluster = NULL;
if (cluster == NULL) {
cluster = calloc(1, sizeof(crm_cluster_t));
}
if (action & A_HA_DISCONNECT) {
crm_cluster_disconnect(cluster);
crm_info("Disconnected from the cluster");
set_bit(fsa_input_register, R_HA_DISCONNECTED);
}
if (action & A_HA_CONNECT) {
crm_set_status_callback(&peer_update_callback);
crm_set_autoreap(FALSE);
if (is_corosync_cluster()) {
#if SUPPORT_COROSYNC
registered = crm_connect_corosync(cluster);
#endif
}
if (registered == TRUE) {
controld_election_init(cluster->uname);
fsa_our_uname = cluster->uname;
fsa_our_uuid = cluster->uuid;
if(cluster->uuid == NULL) {
crm_err("Could not obtain local uuid");
registered = FALSE;
}
}
if (registered == FALSE) {
set_bit(fsa_input_register, R_HA_DISCONNECTED);
register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
return;
}
populate_cib_nodes(node_update_none, __FUNCTION__);
clear_bit(fsa_input_register, R_HA_DISCONNECTED);
crm_info("Connected to the cluster");
}
if (action & ~(A_HA_CONNECT | A_HA_DISCONNECT)) {
crm_err("Unexpected action %s in %s", fsa_action2string(action), __FUNCTION__);
}
}
/* A_SHUTDOWN */
void
do_shutdown(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
/* just in case */
set_bit(fsa_input_register, R_SHUTDOWN);
controld_disconnect_fencer(FALSE);
}
/* A_SHUTDOWN_REQ */
void
do_shutdown_req(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state,
enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
xmlNode *msg = NULL;
set_bit(fsa_input_register, R_SHUTDOWN);
crm_info("Sending shutdown request to all peers (DC is %s)",
(fsa_our_dc? fsa_our_dc : "not set"));
msg = create_request(CRM_OP_SHUTDOWN_REQ, NULL, NULL, CRM_SYSTEM_CRMD, CRM_SYSTEM_CRMD, NULL);
/* set_bit(fsa_input_register, R_STAYDOWN); */
if (send_cluster_message(NULL, crm_msg_crmd, msg, TRUE) == FALSE) {
register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
}
free_xml(msg);
}
extern char *max_generation_from;
extern xmlNode *max_generation_xml;
extern GHashTable *resource_history;
extern GHashTable *voted;
void
crmd_fast_exit(crm_exit_t exit_code)
{
if (is_set(fsa_input_register, R_STAYDOWN)) {
crm_warn("Inhibiting respawn "CRM_XS" remapping exit code %d to %d",
exit_code, CRM_EX_FATAL);
exit_code = CRM_EX_FATAL;
} else if ((exit_code == CRM_EX_OK)
&& is_set(fsa_input_register, R_IN_RECOVERY)) {
crm_err("Could not recover from internal error");
exit_code = CRM_EX_ERROR;
}
crm_exit(exit_code);
}
crm_exit_t
crmd_exit(crm_exit_t exit_code)
{
GListPtr gIter = NULL;
GMainLoop *mloop = crmd_mainloop;
static bool in_progress = FALSE;
if (in_progress && (exit_code == CRM_EX_OK)) {
crm_debug("Exit is already in progress");
return exit_code;
} else if(in_progress) {
crm_notice("Error during shutdown process, exiting now with status %d (%s)",
exit_code, crm_exit_str(exit_code));
crm_write_blackbox(SIGTRAP, NULL);
crmd_fast_exit(exit_code);
}
in_progress = TRUE;
crm_trace("Preparing to exit with status %d (%s)",
exit_code, crm_exit_str(exit_code));
/* Suppress secondary errors resulting from us disconnecting everything */
set_bit(fsa_input_register, R_HA_DISCONNECTED);
/* Close all IPC servers and clients to ensure any and all shared memory files are cleaned up */
if(ipcs) {
crm_trace("Closing IPC server");
mainloop_del_ipc_server(ipcs);
ipcs = NULL;
}
controld_close_attrd_ipc();
pe_subsystem_free();
controld_disconnect_fencer(TRUE);
if ((exit_code == CRM_EX_OK) && (crmd_mainloop == NULL)) {
crm_debug("No mainloop detected");
exit_code = CRM_EX_ERROR;
}
/* On an error, just get out.
*
* Otherwise, make the effort to have mainloop exit gracefully so
* that it (mostly) cleans up after itself and valgrind has less
* to report on - allowing real errors stand out
*/
if (exit_code != CRM_EX_OK) {
crm_notice("Forcing immediate exit with status %d (%s)",
exit_code, crm_exit_str(exit_code));
crm_write_blackbox(SIGTRAP, NULL);
crmd_fast_exit(exit_code);
}
/* Clean up as much memory as possible for valgrind */
for (gIter = fsa_message_queue; gIter != NULL; gIter = gIter->next) {
fsa_data_t *fsa_data = gIter->data;
crm_info("Dropping %s: [ state=%s cause=%s origin=%s ]",
fsa_input2string(fsa_data->fsa_input),
fsa_state2string(fsa_state),
fsa_cause2string(fsa_data->fsa_cause), fsa_data->origin);
delete_fsa_input(fsa_data);
}
clear_bit(fsa_input_register, R_MEMBERSHIP);
g_list_free(fsa_message_queue); fsa_message_queue = NULL;
metadata_cache_fini();
controld_election_fini();
/* Tear down the CIB manager connection, but don't free it yet -- it could
* be used when we drain the mainloop later.
*/
cib_free_callbacks(fsa_cib_conn);
fsa_cib_conn->cmds->signoff(fsa_cib_conn);
verify_stopped(fsa_state, LOG_WARNING);
clear_bit(fsa_input_register, R_LRM_CONNECTED);
lrm_state_destroy_all();
/* This basically will not work, since mainloop has a reference to it */
mainloop_destroy_trigger(fsa_source); fsa_source = NULL;
mainloop_destroy_trigger(config_read); config_read = NULL;
mainloop_destroy_trigger(transition_trigger); transition_trigger = NULL;
crm_client_cleanup();
crm_peer_destroy();
crm_timer_stop(transition_timer);
crm_timer_stop(integration_timer);
crm_timer_stop(finalization_timer);
crm_timer_stop(election_trigger);
crm_timer_stop(shutdown_escalation_timer);
crm_timer_stop(wait_timer);
crm_timer_stop(recheck_timer);
te_cleanup_stonith_history_sync(NULL, TRUE);
controld_free_sched_timer();
free(transition_timer); transition_timer = NULL;
free(integration_timer); integration_timer = NULL;
free(finalization_timer); finalization_timer = NULL;
free(election_trigger); election_trigger = NULL;
free(shutdown_escalation_timer); shutdown_escalation_timer = NULL;
free(wait_timer); wait_timer = NULL;
free(recheck_timer); recheck_timer = NULL;
free(fsa_our_dc_version); fsa_our_dc_version = NULL;
free(fsa_our_uname); fsa_our_uname = NULL;
free(fsa_our_uuid); fsa_our_uuid = NULL;
free(fsa_our_dc); fsa_our_dc = NULL;
free(fsa_cluster_name); fsa_cluster_name = NULL;
free(te_uuid); te_uuid = NULL;
free(failed_stop_offset); failed_stop_offset = NULL;
free(failed_start_offset); failed_start_offset = NULL;
free(max_generation_from); max_generation_from = NULL;
free_xml(max_generation_xml); max_generation_xml = NULL;
mainloop_destroy_signal(SIGPIPE);
mainloop_destroy_signal(SIGUSR1);
mainloop_destroy_signal(SIGTERM);
mainloop_destroy_signal(SIGTRAP);
/* leave SIGCHLD engaged as we might still want to drain some service-actions */
if (mloop) {
GMainContext *ctx = g_main_loop_get_context(crmd_mainloop);
/* Don't re-enter this block */
crmd_mainloop = NULL;
/* no signals on final draining anymore */
mainloop_destroy_signal(SIGCHLD);
crm_trace("Draining mainloop %d %d", g_main_loop_is_running(mloop), g_main_context_pending(ctx));
{
int lpc = 0;
while((g_main_context_pending(ctx) && lpc < 10)) {
lpc++;
crm_trace("Iteration %d", lpc);
g_main_context_dispatch(ctx);
}
}
crm_trace("Closing mainloop %d %d", g_main_loop_is_running(mloop), g_main_context_pending(ctx));
g_main_loop_quit(mloop);
/* Won't do anything yet, since we're inside it now */
g_main_loop_unref(mloop);
} else {
mainloop_destroy_signal(SIGCHLD);
}
cib_delete(fsa_cib_conn);
fsa_cib_conn = NULL;
throttle_fini();
/* Graceful */
crm_trace("Done preparing for exit with status %d (%s)",
exit_code, crm_exit_str(exit_code));
return exit_code;
}
/* A_EXIT_0, A_EXIT_1 */
void
do_exit(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
crm_exit_t exit_code = CRM_EX_OK;
int log_level = LOG_INFO;
const char *exit_type = "gracefully";
if (action & A_EXIT_1) {
log_level = LOG_ERR;
exit_type = "forcefully";
exit_code = CRM_EX_ERROR;
}
verify_stopped(cur_state, LOG_ERR);
do_crm_log(log_level, "Performing %s - %s exiting the controller",
fsa_action2string(action), exit_type);
crm_info("[%s] stopped (%d)", crm_system_name, exit_code);
crmd_exit(exit_code);
}
static void sigpipe_ignore(int nsig) { return; }
/* A_STARTUP */
void
do_startup(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
int was_error = 0;
crm_debug("Registering Signal Handlers");
mainloop_add_signal(SIGTERM, crm_shutdown);
mainloop_add_signal(SIGPIPE, sigpipe_ignore);
fsa_source = mainloop_add_trigger(G_PRIORITY_HIGH, crm_fsa_trigger, NULL);
config_read = mainloop_add_trigger(G_PRIORITY_HIGH, crm_read_options, NULL);
transition_trigger = mainloop_add_trigger(G_PRIORITY_LOW, te_graph_trigger, NULL);
crm_debug("Creating CIB manager and executor objects");
fsa_cib_conn = cib_new();
lrm_state_init_local();
/* set up the timers */
transition_timer = calloc(1, sizeof(fsa_timer_t));
integration_timer = calloc(1, sizeof(fsa_timer_t));
finalization_timer = calloc(1, sizeof(fsa_timer_t));
election_trigger = calloc(1, sizeof(fsa_timer_t));
shutdown_escalation_timer = calloc(1, sizeof(fsa_timer_t));
wait_timer = calloc(1, sizeof(fsa_timer_t));
recheck_timer = calloc(1, sizeof(fsa_timer_t));
if (election_trigger != NULL) {
election_trigger->source_id = 0;
election_trigger->period_ms = -1;
election_trigger->fsa_input = I_DC_TIMEOUT;
election_trigger->callback = crm_timer_popped;
election_trigger->log_error = FALSE;
} else {
was_error = TRUE;
}
if (transition_timer != NULL) {
transition_timer->source_id = 0;
transition_timer->period_ms = -1;
transition_timer->fsa_input = I_PE_CALC;
transition_timer->callback = crm_timer_popped;
transition_timer->log_error = FALSE;
} else {
was_error = TRUE;
}
if (integration_timer != NULL) {
integration_timer->source_id = 0;
integration_timer->period_ms = -1;
integration_timer->fsa_input = I_INTEGRATED;
integration_timer->callback = crm_timer_popped;
integration_timer->log_error = TRUE;
} else {
was_error = TRUE;
}
if (finalization_timer != NULL) {
finalization_timer->source_id = 0;
finalization_timer->period_ms = -1;
finalization_timer->fsa_input = I_FINALIZED;
finalization_timer->callback = crm_timer_popped;
finalization_timer->log_error = FALSE;
/* for possible enabling... a bug in the join protocol left
* a slave in S_PENDING while we think it's in S_NOT_DC
*
* raising I_FINALIZED put us into a transition loop which is
* never resolved.
* in this loop we continually send probes which the node
* NACK's because it's in S_PENDING
*
* if we have nodes where the cluster layer is active but the
* CRM is not... then this will be handled in the
* integration phase
*/
finalization_timer->fsa_input = I_ELECTION;
} else {
was_error = TRUE;
}
if (shutdown_escalation_timer != NULL) {
shutdown_escalation_timer->source_id = 0;
shutdown_escalation_timer->period_ms = -1;
shutdown_escalation_timer->fsa_input = I_STOP;
shutdown_escalation_timer->callback = crm_timer_popped;
shutdown_escalation_timer->log_error = TRUE;
} else {
was_error = TRUE;
}
if (wait_timer != NULL) {
wait_timer->source_id = 0;
wait_timer->period_ms = 2000;
wait_timer->fsa_input = I_NULL;
wait_timer->callback = crm_timer_popped;
wait_timer->log_error = FALSE;
} else {
was_error = TRUE;
}
if (recheck_timer != NULL) {
recheck_timer->source_id = 0;
recheck_timer->period_ms = -1;
recheck_timer->fsa_input = I_PE_CALC;
recheck_timer->callback = crm_timer_popped;
recheck_timer->log_error = FALSE;
} else {
was_error = TRUE;
}
if (was_error) {
register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
}
}
static int32_t
crmd_ipc_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid)
{
crm_trace("Connection %p", c);
if (crm_client_new(c, uid, gid) == NULL) {
return -EIO;
}
return 0;
}
static void
crmd_ipc_created(qb_ipcs_connection_t * c)
{
crm_trace("Connection %p", c);
}
static int32_t
crmd_ipc_dispatch(qb_ipcs_connection_t * c, void *data, size_t size)
{
uint32_t id = 0;
uint32_t flags = 0;
crm_client_t *client = crm_client_get(c);
xmlNode *msg = crm_ipcs_recv(client, data, size, &id, &flags);
crm_trace("Invoked: %s", crm_client_name(client));
crm_ipcs_send_ack(client, id, flags, "ack", __FUNCTION__, __LINE__);
if (msg == NULL) {
return 0;
}
#if ENABLE_ACL
CRM_ASSERT(client->user != NULL);
crm_acl_get_set_user(msg, F_CRM_USER, client->user);
#endif
crm_trace("Processing msg from %s", crm_client_name(client));
crm_log_xml_trace(msg, "controller[inbound]");
crm_xml_add(msg, F_CRM_SYS_FROM, client->id);
if (crmd_authorize_message(msg, client, NULL)) {
route_message(C_IPC_MESSAGE, msg);
}
trigger_fsa(fsa_source);
free_xml(msg);
return 0;
}
static int32_t
crmd_ipc_closed(qb_ipcs_connection_t * c)
{
crm_client_t *client = crm_client_get(c);
if (client) {
crm_trace("Disconnecting %sregistered client %s (%p/%p)",
(client->userdata? "" : "un"), crm_client_name(client),
c, client);
free(client->userdata);
crm_client_destroy(client);
trigger_fsa(fsa_source);
}
return 0;
}
static void
crmd_ipc_destroy(qb_ipcs_connection_t * c)
{
crm_trace("Connection %p", c);
crmd_ipc_closed(c);
}
/* A_STOP */
void
do_stop(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
crm_trace("Closing IPC server");
mainloop_del_ipc_server(ipcs); ipcs = NULL;
register_fsa_input(C_FSA_INTERNAL, I_TERMINATE, NULL);
}
/* A_STARTED */
void
do_started(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
static struct qb_ipcs_service_handlers crmd_callbacks = {
.connection_accept = crmd_ipc_accept,
.connection_created = crmd_ipc_created,
.msg_process = crmd_ipc_dispatch,
.connection_closed = crmd_ipc_closed,
.connection_destroyed = crmd_ipc_destroy
};
if (cur_state != S_STARTING) {
crm_err("Start cancelled... %s", fsa_state2string(cur_state));
return;
} else if (is_set(fsa_input_register, R_MEMBERSHIP) == FALSE) {
crm_info("Delaying start, no membership data (%.16llx)", R_MEMBERSHIP);
crmd_fsa_stall(TRUE);
return;
} else if (is_set(fsa_input_register, R_LRM_CONNECTED) == FALSE) {
crm_info("Delaying start, not connected to executor (%.16llx)", R_LRM_CONNECTED);
crmd_fsa_stall(TRUE);
return;
} else if (is_set(fsa_input_register, R_CIB_CONNECTED) == FALSE) {
crm_info("Delaying start, CIB not connected (%.16llx)", R_CIB_CONNECTED);
crmd_fsa_stall(TRUE);
return;
} else if (is_set(fsa_input_register, R_READ_CONFIG) == FALSE) {
crm_info("Delaying start, Config not read (%.16llx)", R_READ_CONFIG);
crmd_fsa_stall(TRUE);
return;
} else if (is_set(fsa_input_register, R_PEER_DATA) == FALSE) {
crm_info("Delaying start, No peer data (%.16llx)", R_PEER_DATA);
crmd_fsa_stall(TRUE);
return;
}
crm_debug("Init server comms");
ipcs = crmd_ipc_server_init(&crmd_callbacks);
if (ipcs == NULL) {
crm_err("Failed to create IPC server: shutting down and inhibiting respawn");
register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
} else {
crm_notice("Pacemaker controller successfully started and accepting connections");
}
controld_trigger_fencer_connect();
clear_bit(fsa_input_register, R_STARTING);
register_fsa_input(msg_data->fsa_cause, I_PENDING, NULL);
}
/* A_RECOVER */
void
do_recover(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
set_bit(fsa_input_register, R_IN_RECOVERY);
crm_warn("Fast-tracking shutdown in response to errors");
register_fsa_input(C_FSA_INTERNAL, I_TERMINATE, NULL);
}
/* *INDENT-OFF* */
static pe_cluster_option crmd_opts[] = {
/* name, old-name, validate, values, default, short description, long description */
{ "dc-version", NULL, "string", NULL, "none", NULL,
"Version of Pacemaker on the cluster's DC.",
"Includes the hash which identifies the exact changeset it was built from. Used for diagnostic purposes."
},
{ "cluster-infrastructure", NULL, "string", NULL, "corosync", NULL,
"The messaging stack on which Pacemaker is currently running.",
"Used for informational and diagnostic purposes." },
+ { "cluster-name", NULL, "string", NULL, NULL, NULL,
+ "An arbitrary name for the cluster",
+ "This optional value is mostly for users' convenience as desired "
+ "in administration, but may also be used in Pacemaker configuration "
+ "rules via the #cluster-name node attribute, and by higher-level tools "
+ "and resource agents."
+ },
{ XML_CONFIG_ATTR_DC_DEADTIME, NULL, "time", NULL, "20s", &check_time,
"How long to wait for a response from other nodes during startup.",
"The \"correct\" value will depend on the speed/load of your network and the type of switches used."
},
{ XML_CONFIG_ATTR_RECHECK, NULL, "time",
"Zero disables polling. Positive values are an interval in seconds (unless other SI units are specified. eg. 5min)",
"15min", &check_timer,
"Polling interval for time based changes to options, resource parameters and constraints.",
"The Cluster is primarily event driven, however the configuration can have elements that change based on time."
" To ensure these changes take effect, we can optionally poll the cluster's status for changes."
},
{ "load-threshold", NULL, "percentage", NULL, "80%", &check_utilization,
"The maximum amount of system resources that should be used by nodes in the cluster",
"The cluster will slow down its recovery process when the amount of system resources used"
" (currently CPU) approaches this limit",
},
{ "node-action-limit", NULL, "integer", NULL, "0", &check_number,
"The maximum number of jobs that can be scheduled per node. Defaults to 2x cores"},
+ { XML_CONFIG_ATTR_FENCE_REACTION, NULL, "string", NULL, "stop", NULL,
+ "How a cluster node should react if notified of its own fencing",
+ "A cluster node may receive notification of its own fencing if fencing "
+ "is misconfigured, or if fabric fencing is in use that doesn't cut "
+ "cluster communication. Allowed values are \"stop\" to attempt to "
+ "immediately stop pacemaker and stay stopped, or \"panic\" to attempt "
+ "to immediately reboot the local node, falling back to stop on failure."
+ },
{ XML_CONFIG_ATTR_ELECTION_FAIL, NULL, "time", NULL, "2min", &check_timer,
"*** Advanced Use Only ***.", "If need to adjust this value, it probably indicates the presence of a bug."
},
{ XML_CONFIG_ATTR_FORCE_QUIT, NULL, "time", NULL, "20min", &check_timer,
"*** Advanced Use Only ***.", "If need to adjust this value, it probably indicates the presence of a bug."
},
{
"join-integration-timeout", "crmd-integration-timeout",
"time", NULL, "3min", &check_timer,
"*** Advanced Use Only ***",
"If need to adjust this value, it probably indicates the presence of a bug"
},
{
"join-finalization-timeout", "crmd-finalization-timeout",
"time", NULL, "30min", &check_timer,
"*** Advanced Use Only ***",
"If you need to adjust this value, it probably indicates the presence of a bug"
},
{
"transition-delay", "crmd-transition-delay",
"time", NULL, "0s", &check_timer,
"*** Advanced Use Only *** Enabling this option will slow down cluster recovery under all conditions",
"Delay cluster recovery for the configured interval to allow for additional/related events to occur.\n"
"Useful if your configuration is sensitive to the order in which ping updates arrive."
},
{ "stonith-watchdog-timeout", NULL, "time", NULL, NULL, &check_sbd_timeout,
"How long to wait before we can assume nodes are safely down", NULL
},
{ "stonith-max-attempts",NULL,"integer",NULL,"10",&check_positive_number,
"How many times stonith can fail before it will no longer be attempted on a target"
},
{ "no-quorum-policy", NULL, "enum", "stop, freeze, ignore, suicide", "stop", &check_quorum, NULL, NULL },
};
/* *INDENT-ON* */
void
crmd_metadata(void)
{
config_metadata("pacemaker-controld", "1.0",
"controller properties",
"Cluster properties used by Pacemaker's controller,"
" formerly known as crmd",
crmd_opts, DIMOF(crmd_opts));
}
static void
verify_crmd_options(GHashTable * options)
{
verify_all_options(options, crmd_opts, DIMOF(crmd_opts));
}
static const char *
crmd_pref(GHashTable * options, const char *name)
{
return get_cluster_pref(options, crmd_opts, DIMOF(crmd_opts), name);
}
static void
config_query_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data)
{
const char *value = NULL;
GHashTable *config_hash = NULL;
crm_time_t *now = crm_time_new(NULL);
xmlNode *crmconfig = NULL;
xmlNode *alerts = NULL;
if (rc != pcmk_ok) {
fsa_data_t *msg_data = NULL;
crm_err("Local CIB query resulted in an error: %s", pcmk_strerror(rc));
register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
if (rc == -EACCES || rc == -pcmk_err_schema_validation) {
crm_err("The cluster is mis-configured - shutting down and staying down");
set_bit(fsa_input_register, R_STAYDOWN);
}
goto bail;
}
crmconfig = output;
if ((crmconfig) &&
(crm_element_name(crmconfig)) &&
(strcmp(crm_element_name(crmconfig), XML_CIB_TAG_CRMCONFIG) != 0)) {
crmconfig = first_named_child(crmconfig, XML_CIB_TAG_CRMCONFIG);
}
if (!crmconfig) {
fsa_data_t *msg_data = NULL;
crm_err("Local CIB query for " XML_CIB_TAG_CRMCONFIG " section failed");
register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
goto bail;
}
crm_debug("Call %d : Parsing CIB options", call_id);
config_hash = crm_str_table_new();
unpack_instance_attributes(crmconfig, crmconfig, XML_CIB_TAG_PROPSET, NULL, config_hash,
CIB_OPTIONS_FIRST, FALSE, now);
verify_crmd_options(config_hash);
value = crmd_pref(config_hash, XML_CONFIG_ATTR_DC_DEADTIME);
election_trigger->period_ms = crm_get_msec(value);
value = crmd_pref(config_hash, "node-action-limit"); /* Also checks migration-limit */
throttle_update_job_max(value);
value = crmd_pref(config_hash, "load-threshold");
if(value) {
throttle_set_load_target(strtof(value, NULL) / 100.0);
}
value = crmd_pref(config_hash, "no-quorum-policy");
if (safe_str_eq(value, "suicide") && pcmk_locate_sbd()) {
no_quorum_suicide_escalation = TRUE;
}
+ set_fence_reaction(crmd_pref(config_hash, XML_CONFIG_ATTR_FENCE_REACTION));
+
value = crmd_pref(config_hash,"stonith-max-attempts");
update_stonith_max_attempts(value);
value = crmd_pref(config_hash, XML_CONFIG_ATTR_FORCE_QUIT);
shutdown_escalation_timer->period_ms = crm_get_msec(value);
/* How long to declare an election over - even if not everyone voted */
crm_debug("Shutdown escalation occurs after: %dms", shutdown_escalation_timer->period_ms);
value = crmd_pref(config_hash, XML_CONFIG_ATTR_ELECTION_FAIL);
controld_set_election_period(value);
value = crmd_pref(config_hash, XML_CONFIG_ATTR_RECHECK);
recheck_timer->period_ms = crm_get_msec(value);
crm_debug("Checking for expired actions every %dms", recheck_timer->period_ms);
value = crmd_pref(config_hash, "transition-delay");
transition_timer->period_ms = crm_get_msec(value);
value = crmd_pref(config_hash, "join-integration-timeout");
integration_timer->period_ms = crm_get_msec(value);
value = crmd_pref(config_hash, "join-finalization-timeout");
finalization_timer->period_ms = crm_get_msec(value);
free(fsa_cluster_name);
fsa_cluster_name = NULL;
value = g_hash_table_lookup(config_hash, "cluster-name");
if (value) {
fsa_cluster_name = strdup(value);
}
alerts = first_named_child(output, XML_CIB_TAG_ALERTS);
crmd_unpack_alerts(alerts);
set_bit(fsa_input_register, R_READ_CONFIG);
crm_trace("Triggering FSA: %s", __FUNCTION__);
mainloop_set_trigger(fsa_source);
g_hash_table_destroy(config_hash);
bail:
crm_time_free(now);
}
gboolean
crm_read_options(gpointer user_data)
{
int call_id =
fsa_cib_conn->cmds->query(fsa_cib_conn,
"//" XML_CIB_TAG_CRMCONFIG " | //" XML_CIB_TAG_ALERTS,
NULL, cib_xpath | cib_scope_local);
fsa_register_cib_callback(call_id, FALSE, NULL, config_query_callback);
crm_trace("Querying the CIB... call %d", call_id);
return TRUE;
}
/* A_READCONFIG */
void
do_read_config(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state,
enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
throttle_init();
mainloop_set_trigger(config_read);
}
void
crm_shutdown(int nsig)
{
if (crmd_mainloop != NULL && g_main_loop_is_running(crmd_mainloop)) {
if (is_set(fsa_input_register, R_SHUTDOWN)) {
crm_err("Escalating the shutdown");
register_fsa_input_before(C_SHUTDOWN, I_ERROR, NULL);
} else {
set_bit(fsa_input_register, R_SHUTDOWN);
register_fsa_input(C_SHUTDOWN, I_SHUTDOWN, NULL);
if (shutdown_escalation_timer->period_ms < 1) {
const char *value = crmd_pref(NULL, XML_CONFIG_ATTR_FORCE_QUIT);
int msec = crm_get_msec(value);
crm_debug("Using default shutdown escalation: %dms", msec);
shutdown_escalation_timer->period_ms = msec;
}
/* can't rely on this... */
crm_notice("Shutting down cluster resource manager " CRM_XS
" limit=%dms", shutdown_escalation_timer->period_ms);
crm_timer_start(shutdown_escalation_timer);
}
} else {
crm_info("exit from shutdown");
crmd_exit(CRM_EX_OK);
}
}
diff --git a/daemons/controld/controld_fencing.c b/daemons/controld/controld_fencing.c
index afb862a17a..0d6d703d63 100644
--- a/daemons/controld/controld_fencing.c
+++ b/daemons/controld/controld_fencing.c
@@ -1,928 +1,952 @@
/*
* Copyright 2004-2019 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
static void
tengine_stonith_history_synced(stonith_t *st, stonith_event_t *st_event);
/*
* stonith failure counting
*
* We don't want to get stuck in a permanent fencing loop. Keep track of the
* number of fencing failures for each target node, and the most we'll restart a
* transition for.
*/
struct st_fail_rec {
int count;
};
+static bool fence_reaction_panic = FALSE;
static unsigned long int stonith_max_attempts = 10;
static GHashTable *stonith_failures = NULL;
void
update_stonith_max_attempts(const char *value)
{
if (safe_str_eq(value, CRM_INFINITY_S)) {
stonith_max_attempts = CRM_SCORE_INFINITY;
} else {
stonith_max_attempts = crm_int_helper(value, NULL);
}
}
+void
+set_fence_reaction(const char *reaction_s)
+{
+ if (safe_str_eq(reaction_s, "panic")) {
+ fence_reaction_panic = TRUE;
+
+ } else {
+ if (safe_str_neq(reaction_s, "stop")) {
+ crm_warn("Invalid value '%s' for %s, using 'stop'",
+ reaction_s, XML_CONFIG_ATTR_FENCE_REACTION);
+ }
+ fence_reaction_panic = FALSE;
+ }
+}
+
static gboolean
too_many_st_failures(const char *target)
{
GHashTableIter iter;
const char *key = NULL;
struct st_fail_rec *value = NULL;
if (stonith_failures == NULL) {
return FALSE;
}
if (target == NULL) {
g_hash_table_iter_init(&iter, stonith_failures);
while (g_hash_table_iter_next(&iter, (gpointer *) &key,
(gpointer *) &value)) {
if (value->count >= stonith_max_attempts) {
target = (const char*)key;
goto too_many;
}
}
} else {
value = g_hash_table_lookup(stonith_failures, target);
if ((value != NULL) && (value->count >= stonith_max_attempts)) {
goto too_many;
}
}
return FALSE;
too_many:
crm_warn("Too many failures (%d) to fence %s, giving up",
value->count, target);
return TRUE;
}
/*!
* \internal
* \brief Reset a stonith fail count
*
* \param[in] target Name of node to reset, or NULL for all
*/
void
st_fail_count_reset(const char *target)
{
if (stonith_failures == NULL) {
return;
}
if (target) {
struct st_fail_rec *rec = NULL;
rec = g_hash_table_lookup(stonith_failures, target);
if (rec) {
rec->count = 0;
}
} else {
GHashTableIter iter;
const char *key = NULL;
struct st_fail_rec *rec = NULL;
g_hash_table_iter_init(&iter, stonith_failures);
while (g_hash_table_iter_next(&iter, (gpointer *) &key,
(gpointer *) &rec)) {
rec->count = 0;
}
}
}
static void
st_fail_count_increment(const char *target)
{
struct st_fail_rec *rec = NULL;
if (stonith_failures == NULL) {
stonith_failures = crm_str_table_new();
}
rec = g_hash_table_lookup(stonith_failures, target);
if (rec) {
rec->count++;
} else {
rec = malloc(sizeof(struct st_fail_rec));
if(rec == NULL) {
return;
}
rec->count = 1;
g_hash_table_insert(stonith_failures, strdup(target), rec);
}
}
/* end stonith fail count functions */
static void
cib_fencing_updated(xmlNode *msg, int call_id, int rc, xmlNode *output,
void *user_data)
{
if (rc < pcmk_ok) {
crm_err("Fencing update %d for %s: failed - %s (%d)",
call_id, (char *)user_data, pcmk_strerror(rc), rc);
crm_log_xml_warn(msg, "Failed update");
abort_transition(INFINITY, tg_shutdown, "CIB update failed", NULL);
} else {
crm_info("Fencing update %d for %s: complete", call_id, (char *)user_data);
}
}
static void
send_stonith_update(crm_action_t *action, const char *target, const char *uuid)
{
int rc = pcmk_ok;
crm_node_t *peer = NULL;
/* We (usually) rely on the membership layer to do node_update_cluster,
* and the peer status callback to do node_update_peer, because the node
* might have already rejoined before we get the stonith result here.
*/
int flags = node_update_join | node_update_expected;
/* zero out the node-status & remove all LRM status info */
xmlNode *node_state = NULL;
CRM_CHECK(target != NULL, return);
CRM_CHECK(uuid != NULL, return);
/* Make sure the membership and join caches are accurate */
peer = crm_get_peer_full(0, target, CRM_GET_PEER_ANY);
CRM_CHECK(peer != NULL, return);
if (peer->state == NULL) {
/* Usually, we rely on the membership layer to update the cluster state
* in the CIB. However, if the node has never been seen, do it here, so
* the node is not considered unclean.
*/
flags |= node_update_cluster;
}
if (peer->uuid == NULL) {
crm_info("Recording uuid '%s' for node '%s'", uuid, target);
peer->uuid = strdup(uuid);
}
crmd_peer_down(peer, TRUE);
/* Generate a node state update for the CIB */
node_state = create_node_state_update(peer, flags, NULL, __FUNCTION__);
/* we have to mark whether or not remote nodes have already been fenced */
if (peer->flags & crm_remote_node) {
time_t now = time(NULL);
char *now_s = crm_itoa(now);
crm_xml_add(node_state, XML_NODE_IS_FENCED, now_s);
free(now_s);
}
/* Force our known ID */
crm_xml_add(node_state, XML_ATTR_UUID, uuid);
rc = fsa_cib_conn->cmds->update(fsa_cib_conn, XML_CIB_TAG_STATUS, node_state,
cib_quorum_override | cib_scope_local | cib_can_create);
/* Delay processing the trigger until the update completes */
crm_debug("Sending fencing update %d for %s", rc, target);
fsa_register_cib_callback(rc, FALSE, strdup(target), cib_fencing_updated);
/* Make sure it sticks */
/* fsa_cib_conn->cmds->bump_epoch(fsa_cib_conn, cib_quorum_override|cib_scope_local); */
erase_status_tag(peer->uname, XML_CIB_TAG_LRM, cib_scope_local);
erase_status_tag(peer->uname, XML_TAG_TRANSIENT_NODEATTRS, cib_scope_local);
free_xml(node_state);
return;
}
/*!
* \internal
* \brief Abort transition due to stonith failure
*
* \param[in] abort_action Whether to restart or stop transition
* \param[in] target Don't restart if this (NULL for any) has too many failures
* \param[in] reason Log this stonith action XML as abort reason (or NULL)
*/
static void
abort_for_stonith_failure(enum transition_action abort_action,
const char *target, xmlNode *reason)
{
/* If stonith repeatedly fails, we eventually give up on starting a new
* transition for that reason.
*/
if ((abort_action != tg_stop) && too_many_st_failures(target)) {
abort_action = tg_stop;
}
abort_transition(INFINITY, abort_action, "Stonith failed", reason);
}
/*
* stonith cleanup list
*
* If the DC is shot, proper notifications might not go out.
* The stonith cleanup list allows the cluster to (re-)send
* notifications once a new DC is elected.
*/
static GListPtr stonith_cleanup_list = NULL;
/*!
* \internal
* \brief Add a node to the stonith cleanup list
*
* \param[in] target Name of node to add
*/
void
add_stonith_cleanup(const char *target) {
stonith_cleanup_list = g_list_append(stonith_cleanup_list, strdup(target));
}
/*!
* \internal
* \brief Remove a node from the stonith cleanup list
*
* \param[in] Name of node to remove
*/
void
remove_stonith_cleanup(const char *target)
{
GListPtr iter = stonith_cleanup_list;
while (iter != NULL) {
GListPtr tmp = iter;
char *iter_name = tmp->data;
iter = iter->next;
if (safe_str_eq(target, iter_name)) {
crm_trace("Removing %s from the cleanup list", iter_name);
stonith_cleanup_list = g_list_delete_link(stonith_cleanup_list, tmp);
free(iter_name);
}
}
}
/*!
* \internal
* \brief Purge all entries from the stonith cleanup list
*/
void
purge_stonith_cleanup()
{
if (stonith_cleanup_list) {
GListPtr iter = NULL;
for (iter = stonith_cleanup_list; iter != NULL; iter = iter->next) {
char *target = iter->data;
crm_info("Purging %s from stonith cleanup list", target);
free(target);
}
g_list_free(stonith_cleanup_list);
stonith_cleanup_list = NULL;
}
}
/*!
* \internal
* \brief Send stonith updates for all entries in cleanup list, then purge it
*/
void
execute_stonith_cleanup()
{
GListPtr iter;
for (iter = stonith_cleanup_list; iter != NULL; iter = iter->next) {
char *target = iter->data;
crm_node_t *target_node = crm_get_peer(0, target);
const char *uuid = crm_peer_uuid(target_node);
crm_notice("Marking %s, target of a previous stonith action, as clean", target);
send_stonith_update(NULL, target, uuid);
free(target);
}
g_list_free(stonith_cleanup_list);
stonith_cleanup_list = NULL;
}
/* end stonith cleanup list functions */
/* stonith API client
*
* Functions that need to interact directly with the fencer via its API
*/
static stonith_t *stonith_api = NULL;
static crm_trigger_t *stonith_reconnect = NULL;
static char *te_client_id = NULL;
static gboolean
fail_incompletable_stonith(crm_graph_t *graph)
{
GListPtr lpc = NULL;
const char *task = NULL;
xmlNode *last_action = NULL;
if (graph == NULL) {
return FALSE;
}
for (lpc = graph->synapses; lpc != NULL; lpc = lpc->next) {
GListPtr lpc2 = NULL;
synapse_t *synapse = (synapse_t *) lpc->data;
if (synapse->confirmed) {
continue;
}
for (lpc2 = synapse->actions; lpc2 != NULL; lpc2 = lpc2->next) {
crm_action_t *action = (crm_action_t *) lpc2->data;
if (action->type != action_type_crm || action->confirmed) {
continue;
}
task = crm_element_value(action->xml, XML_LRM_ATTR_TASK);
if (task && safe_str_eq(task, CRM_OP_FENCE)) {
action->failed = TRUE;
last_action = action->xml;
update_graph(graph, action);
crm_notice("Failing action %d (%s): fencer terminated",
action->id, ID(action->xml));
}
}
}
if (last_action != NULL) {
crm_warn("Fencer failure resulted in unrunnable actions");
abort_for_stonith_failure(tg_restart, NULL, last_action);
return TRUE;
}
return FALSE;
}
static void
tengine_stonith_connection_destroy(stonith_t *st, stonith_event_t *e)
{
te_cleanup_stonith_history_sync(st, FALSE);
if (is_set(fsa_input_register, R_ST_REQUIRED)) {
crm_crit("Fencing daemon connection failed");
mainloop_set_trigger(stonith_reconnect);
} else {
crm_info("Fencing daemon disconnected");
}
if (stonith_api) {
/* the client API won't properly reconnect notifications
* if they are still in the table - so remove them
*/
if (stonith_api->state != stonith_disconnected) {
stonith_api->cmds->disconnect(st);
}
stonith_api->cmds->remove_notification(stonith_api, T_STONITH_NOTIFY_DISCONNECT);
stonith_api->cmds->remove_notification(stonith_api, T_STONITH_NOTIFY_FENCE);
stonith_api->cmds->remove_notification(stonith_api, T_STONITH_NOTIFY_HISTORY_SYNCED);
}
if (AM_I_DC) {
fail_incompletable_stonith(transition_graph);
trigger_graph();
}
}
static void
tengine_stonith_notify(stonith_t *st, stonith_event_t *st_event)
{
if (te_client_id == NULL) {
te_client_id = crm_strdup_printf("%s.%lu", crm_system_name,
(unsigned long) getpid());
}
if (st_event == NULL) {
crm_err("Notify data not found");
return;
}
crmd_alert_fencing_op(st_event);
if ((st_event->result == pcmk_ok) && safe_str_eq("on", st_event->action)) {
crm_notice("%s was successfully unfenced by %s (at the request of %s)",
st_event->target,
st_event->executioner? st_event->executioner : "",
st_event->origin);
/* TODO: Hook up st_event->device */
return;
} else if (safe_str_eq("on", st_event->action)) {
crm_err("Unfencing of %s by %s failed: %s (%d)",
st_event->target,
st_event->executioner? st_event->executioner : "",
pcmk_strerror(st_event->result), st_event->result);
return;
} else if ((st_event->result == pcmk_ok)
&& crm_str_eq(st_event->target, fsa_our_uname, TRUE)) {
/* We were notified of our own fencing. Most likely, either fencing was
* misconfigured, or fabric fencing that doesn't cut cluster
* communication is in use.
*
* Either way, shutting down the local host is a good idea, to require
* administrator intervention. Also, other nodes would otherwise likely
* set our status to lost because of the fencing callback and discard
* our subsequent election votes as "not part of our cluster".
*/
crm_crit("We were allegedly just fenced by %s for %s!",
st_event->executioner? st_event->executioner : "the cluster",
st_event->origin); /* Dumps blackbox if enabled */
- pcmk_panic(__FUNCTION__);
+ if (fence_reaction_panic) {
+ pcmk_panic(__FUNCTION__);
+ } else {
+ crm_exit(CRM_EX_FATAL);
+ }
return;
}
/* Update the count of stonith failures for this target, in case we become
* DC later. The current DC has already updated its fail count in
* tengine_stonith_callback().
*/
if (!AM_I_DC && safe_str_eq(st_event->operation, T_STONITH_NOTIFY_FENCE)) {
if (st_event->result == pcmk_ok) {
st_fail_count_reset(st_event->target);
} else {
st_fail_count_increment(st_event->target);
}
}
crm_notice("Peer %s was%s terminated (%s) by %s on behalf of %s: %s "
CRM_XS " initiator=%s ref=%s",
st_event->target, st_event->result == pcmk_ok ? "" : " not",
st_event->action,
st_event->executioner ? st_event->executioner : "",
(st_event->client_origin? st_event->client_origin : ""),
pcmk_strerror(st_event->result),
st_event->origin, st_event->id);
if (st_event->result == pcmk_ok) {
crm_node_t *peer = crm_find_known_peer_full(0, st_event->target, CRM_GET_PEER_ANY);
const char *uuid = NULL;
gboolean we_are_executioner = safe_str_eq(st_event->executioner, fsa_our_uname);
if (peer == NULL) {
return;
}
uuid = crm_peer_uuid(peer);
crm_trace("target=%s dc=%s", st_event->target, fsa_our_dc);
if(AM_I_DC) {
/* The DC always sends updates */
send_stonith_update(NULL, st_event->target, uuid);
/* @TODO Ideally, at this point, we'd check whether the fenced node
* hosted any guest nodes, and call remote_node_down() for them.
* Unfortunately, the controller doesn't have a simple, reliable way
* to map hosts to guests. It might be possible to track this in the
* peer cache via crm_remote_peer_cache_refresh(). For now, we rely
* on the PE creating fence pseudo-events for the guests.
*/
if (st_event->client_origin
&& safe_str_neq(st_event->client_origin, te_client_id)) {
/* Abort the current transition graph if it wasn't us
* that invoked stonith to fence someone
*/
crm_info("External fencing operation from %s fenced %s", st_event->client_origin, st_event->target);
abort_transition(INFINITY, tg_restart, "External Fencing Operation", NULL);
}
/* Assume it was our leader if we don't currently have one */
} else if (((fsa_our_dc == NULL) || safe_str_eq(fsa_our_dc, st_event->target))
&& is_not_set(peer->flags, crm_remote_node)) {
crm_notice("Target %s our leader %s (recorded: %s)",
fsa_our_dc ? "was" : "may have been", st_event->target,
fsa_our_dc ? fsa_our_dc : "");
/* Given the CIB resyncing that occurs around elections,
* have one node update the CIB now and, if the new DC is different,
* have them do so too after the election
*/
if (we_are_executioner) {
send_stonith_update(NULL, st_event->target, uuid);
}
add_stonith_cleanup(st_event->target);
}
/* If the target is a remote node, and we host its connection,
* immediately fail all monitors so it can be recovered quickly.
* The connection won't necessarily drop when a remote node is fenced,
* so the failure might not otherwise be detected until the next poke.
*/
if (is_set(peer->flags, crm_remote_node)) {
remote_ra_fail(st_event->target);
}
crmd_peer_down(peer, TRUE);
}
}
/*!
* \brief Connect to fencer
*
* \param[in] user_data If NULL, retry failures now, otherwise retry in main loop
*
* \return TRUE
* \note If user_data is NULL, this will wait 2s between attempts, for up to
* 30 attempts, meaning the controller could be blocked as long as 58s.
*/
static gboolean
te_connect_stonith(gpointer user_data)
{
int rc = pcmk_ok;
if (stonith_api == NULL) {
stonith_api = stonith_api_new();
+ if (stonith_api == NULL) {
+ crm_err("Could not connect to fencer: API memory allocation failed");
+ return TRUE;
+ }
}
if (stonith_api->state != stonith_disconnected) {
crm_trace("Already connected to fencer, no need to retry");
return TRUE;
}
if (user_data == NULL) {
// Blocking (retry failures now until successful)
rc = stonith_api_connect_retry(stonith_api, crm_system_name, 30);
if (rc != pcmk_ok) {
crm_err("Could not connect to fencer in 30 attempts: %s "
CRM_XS " rc=%d", pcmk_strerror(rc), rc);
}
} else {
// Non-blocking (retry failures later in main loop)
rc = stonith_api->cmds->connect(stonith_api, crm_system_name, NULL);
if (rc != pcmk_ok) {
if (is_set(fsa_input_register, R_ST_REQUIRED)) {
crm_err("Fencer connection failed (will retry): %s "
CRM_XS " rc=%d", pcmk_strerror(rc), rc);
mainloop_set_trigger(stonith_reconnect);
} else {
crm_info("Fencer connection failed (ignoring because no longer required): %s "
CRM_XS " rc=%d", pcmk_strerror(rc), rc);
}
return TRUE;
}
}
if (rc == pcmk_ok) {
stonith_api->cmds->register_notification(stonith_api,
T_STONITH_NOTIFY_DISCONNECT,
tengine_stonith_connection_destroy);
stonith_api->cmds->register_notification(stonith_api,
T_STONITH_NOTIFY_FENCE,
tengine_stonith_notify);
stonith_api->cmds->register_notification(stonith_api,
T_STONITH_NOTIFY_HISTORY_SYNCED,
tengine_stonith_history_synced);
te_trigger_stonith_history_sync(TRUE);
crm_notice("Fencer successfully connected");
}
return TRUE;
}
/*!
\internal
\brief Schedule fencer connection attempt in main loop
*/
void
controld_trigger_fencer_connect()
{
if (stonith_reconnect == NULL) {
stonith_reconnect = mainloop_add_trigger(G_PRIORITY_LOW,
te_connect_stonith,
GINT_TO_POINTER(TRUE));
}
set_bit(fsa_input_register, R_ST_REQUIRED);
mainloop_set_trigger(stonith_reconnect);
}
void
controld_disconnect_fencer(bool destroy)
{
if (stonith_api) {
// Prevent fencer connection from coming up again
clear_bit(fsa_input_register, R_ST_REQUIRED);
if (stonith_api->state != stonith_disconnected) {
stonith_api->cmds->disconnect(stonith_api);
}
stonith_api->cmds->remove_notification(stonith_api, T_STONITH_NOTIFY_DISCONNECT);
stonith_api->cmds->remove_notification(stonith_api, T_STONITH_NOTIFY_FENCE);
stonith_api->cmds->remove_notification(stonith_api, T_STONITH_NOTIFY_HISTORY_SYNCED);
}
if (destroy) {
if (stonith_api) {
stonith_api->cmds->free(stonith_api);
stonith_api = NULL;
}
if (stonith_reconnect) {
mainloop_destroy_trigger(stonith_reconnect);
stonith_reconnect = NULL;
}
if (te_client_id) {
free(te_client_id);
te_client_id = NULL;
}
}
}
static gboolean
do_stonith_history_sync(gpointer user_data)
{
if (stonith_api && (stonith_api->state != stonith_disconnected)) {
stonith_history_t *history = NULL;
te_cleanup_stonith_history_sync(stonith_api, FALSE);
stonith_api->cmds->history(stonith_api,
st_opt_sync_call | st_opt_broadcast,
NULL, &history, 5);
stonith_history_free(history);
return TRUE;
} else {
crm_info("Skip triggering stonith history-sync as stonith is disconnected");
return FALSE;
}
}
static void
tengine_stonith_callback(stonith_t *stonith, stonith_callback_data_t *data)
{
char *uuid = NULL;
int stonith_id = -1;
int transition_id = -1;
crm_action_t *action = NULL;
int call_id = data->call_id;
int rc = data->rc;
char *userdata = data->userdata;
CRM_CHECK(userdata != NULL, return);
crm_notice("Stonith operation %d/%s: %s (%d)", call_id, (char *)userdata,
pcmk_strerror(rc), rc);
if (AM_I_DC == FALSE) {
return;
}
/* crm_info("call=%d, optype=%d, node_name=%s, result=%d, node_list=%s, action=%s", */
/* op->call_id, op->optype, op->node_name, op->op_result, */
/* (char *)op->node_list, op->private_data); */
/* filter out old STONITH actions */
CRM_CHECK(decode_transition_key(userdata, &uuid, &transition_id, &stonith_id, NULL),
goto bail);
if (transition_graph->complete || stonith_id < 0 || safe_str_neq(uuid, te_uuid)
|| transition_graph->id != transition_id) {
crm_info("Ignoring STONITH action initiated outside of the current transition");
goto bail;
}
action = controld_get_action(stonith_id);
if (action == NULL) {
crm_err("Stonith action not matched");
goto bail;
}
stop_te_timer(action->timer);
if (rc == pcmk_ok) {
const char *target = crm_element_value(action->xml, XML_LRM_ATTR_TARGET);
const char *uuid = crm_element_value(action->xml, XML_LRM_ATTR_TARGET_UUID);
const char *op = crm_meta_value(action->params, "stonith_action");
crm_info("Stonith operation %d for %s passed", call_id, target);
if (action->confirmed == FALSE) {
te_action_confirmed(action, NULL);
if (safe_str_eq("on", op)) {
const char *value = NULL;
char *now = crm_itoa(time(NULL));
update_attrd(target, CRM_ATTR_UNFENCED, now, NULL, FALSE);
free(now);
value = crm_meta_value(action->params, XML_OP_ATTR_DIGESTS_ALL);
update_attrd(target, CRM_ATTR_DIGESTS_ALL, value, NULL, FALSE);
value = crm_meta_value(action->params, XML_OP_ATTR_DIGESTS_SECURE);
update_attrd(target, CRM_ATTR_DIGESTS_SECURE, value, NULL, FALSE);
} else if (action->sent_update == FALSE) {
send_stonith_update(action, target, uuid);
action->sent_update = TRUE;
}
}
st_fail_count_reset(target);
} else {
const char *target = crm_element_value(action->xml, XML_LRM_ATTR_TARGET);
enum transition_action abort_action = tg_restart;
action->failed = TRUE;
crm_notice("Stonith operation %d for %s failed (%s): aborting transition.",
call_id, target, pcmk_strerror(rc));
/* If no fence devices were available, there's no use in immediately
* checking again, so don't start a new transition in that case.
*/
if (rc == -ENODEV) {
crm_warn("No devices found in cluster to fence %s, giving up",
target);
abort_action = tg_stop;
}
/* Increment the fail count now, so abort_for_stonith_failure() can
* check it. Non-DC nodes will increment it in tengine_stonith_notify().
*/
st_fail_count_increment(target);
abort_for_stonith_failure(abort_action, target, NULL);
}
update_graph(transition_graph, action);
trigger_graph();
bail:
free(userdata);
free(uuid);
return;
}
gboolean
te_fence_node(crm_graph_t *graph, crm_action_t *action)
{
int rc = 0;
const char *id = NULL;
const char *uuid = NULL;
const char *target = NULL;
const char *type = NULL;
gboolean invalid_action = FALSE;
enum stonith_call_options options = st_opt_none;
id = ID(action->xml);
target = crm_element_value(action->xml, XML_LRM_ATTR_TARGET);
uuid = crm_element_value(action->xml, XML_LRM_ATTR_TARGET_UUID);
type = crm_meta_value(action->params, "stonith_action");
CRM_CHECK(id != NULL, invalid_action = TRUE);
CRM_CHECK(uuid != NULL, invalid_action = TRUE);
CRM_CHECK(type != NULL, invalid_action = TRUE);
CRM_CHECK(target != NULL, invalid_action = TRUE);
if (invalid_action) {
crm_log_xml_warn(action->xml, "BadAction");
return FALSE;
}
crm_notice("Requesting fencing (%s) of node %s "
CRM_XS " action=%s timeout=%d",
type, target, id, transition_graph->stonith_timeout);
/* Passing NULL means block until we can connect... */
te_connect_stonith(NULL);
if (crmd_join_phase_count(crm_join_confirmed) == 1) {
options |= st_opt_allow_suicide;
}
rc = stonith_api->cmds->fence(stonith_api, options, target, type,
transition_graph->stonith_timeout / 1000, 0);
stonith_api->cmds->register_callback(stonith_api, rc, transition_graph->stonith_timeout / 1000,
st_opt_timeout_updates,
generate_transition_key(transition_graph->id, action->id,
0, te_uuid),
"tengine_stonith_callback", tengine_stonith_callback);
return TRUE;
}
/* end stonith API client functions */
/*
* stonith history synchronization
*
* Each node's fencer keeps track of a cluster-wide fencing history. When a node
* joins or leaves, we need to synchronize the history across all nodes.
*/
static crm_trigger_t *stonith_history_sync_trigger = NULL;
static mainloop_timer_t *stonith_history_sync_timer_short = NULL;
static mainloop_timer_t *stonith_history_sync_timer_long = NULL;
void
te_cleanup_stonith_history_sync(stonith_t *st, bool free_timers)
{
if (free_timers) {
mainloop_timer_del(stonith_history_sync_timer_short);
stonith_history_sync_timer_short = NULL;
mainloop_timer_del(stonith_history_sync_timer_long);
stonith_history_sync_timer_long = NULL;
} else {
mainloop_timer_stop(stonith_history_sync_timer_short);
mainloop_timer_stop(stonith_history_sync_timer_long);
}
if (st) {
st->cmds->remove_notification(st, T_STONITH_NOTIFY_HISTORY_SYNCED);
}
}
static void
tengine_stonith_history_synced(stonith_t *st, stonith_event_t *st_event)
{
te_cleanup_stonith_history_sync(st, FALSE);
crm_debug("Fence-history synced - cancel all timers");
}
static gboolean
stonith_history_sync_set_trigger(gpointer user_data)
{
mainloop_set_trigger(stonith_history_sync_trigger);
return FALSE;
}
void
te_trigger_stonith_history_sync(bool long_timeout)
{
/* trigger a sync in 5s to give more nodes the
* chance to show up so that we don't create
* unnecessary stonith-history-sync traffic
*
* the long timeout of 30s is there as a fallback
* so that after a successful connection to fenced
* we will wait for 30s for the DC to trigger a
* history-sync
* if this doesn't happen we trigger a sync locally
* (e.g. fenced segfaults and is restarted by pacemakerd)
*/
/* as we are finally checking the stonith-connection
* in do_stonith_history_sync we should be fine
* leaving stonith_history_sync_time & stonith_history_sync_trigger
* around
*/
if (stonith_history_sync_trigger == NULL) {
stonith_history_sync_trigger =
mainloop_add_trigger(G_PRIORITY_LOW,
do_stonith_history_sync, NULL);
}
if (long_timeout) {
if(stonith_history_sync_timer_long == NULL) {
stonith_history_sync_timer_long =
mainloop_timer_add("history_sync_long", 30000,
FALSE, stonith_history_sync_set_trigger,
NULL);
}
crm_info("Fence history will be synchronized cluster-wide within 30 seconds");
mainloop_timer_start(stonith_history_sync_timer_long);
} else {
if(stonith_history_sync_timer_short == NULL) {
stonith_history_sync_timer_short =
mainloop_timer_add("history_sync_short", 5000,
FALSE, stonith_history_sync_set_trigger,
NULL);
}
crm_info("Fence history will be synchronized cluster-wide within 5 seconds");
mainloop_timer_start(stonith_history_sync_timer_short);
}
}
/* end stonith history synchronization functions */
diff --git a/daemons/controld/controld_fencing.h b/daemons/controld/controld_fencing.h
index 2fe6d88d4a..d0ecc82349 100644
--- a/daemons/controld/controld_fencing.h
+++ b/daemons/controld/controld_fencing.h
@@ -1,35 +1,38 @@
/*
* Copyright 2004-2019 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef CONTROLD_FENCING__H
# define CONTROLD_FENCING__H
#include // bool
#include // crm_graph_t, crm_action_t
+// reaction to notification of local node being fenced
+void set_fence_reaction(const char *reaction_s);
+
// stonith fail counts
void st_fail_count_reset(const char * target);
void update_stonith_max_attempts(const char* value);
// stonith API client
void controld_trigger_fencer_connect(void);
void controld_disconnect_fencer(bool destroy);
gboolean te_fence_node(crm_graph_t *graph, crm_action_t *action);
// stonith cleanup list
void add_stonith_cleanup(const char *target);
void remove_stonith_cleanup(const char *target);
void purge_stonith_cleanup(void);
void execute_stonith_cleanup(void);
// stonith history synchronization
void te_trigger_stonith_history_sync(bool long_timeout);
void te_cleanup_stonith_history_sync(stonith_t *st, bool free_timers);
#endif
diff --git a/daemons/execd/execd_commands.c b/daemons/execd/execd_commands.c
index 84ab9f4c43..20fbfac1b2 100644
--- a/daemons/execd/execd_commands.c
+++ b/daemons/execd/execd_commands.c
@@ -1,1837 +1,1841 @@
/*
* Copyright 2012-2019 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "pacemaker-execd.h"
#ifdef HAVE_SYS_TIMEB_H
# include
#endif
#define EXIT_REASON_MAX_LEN 128
GHashTable *rsc_list = NULL;
typedef struct lrmd_cmd_s {
int timeout;
guint interval_ms;
int start_delay;
int timeout_orig;
int call_id;
int exec_rc;
int lrmd_op_status;
int call_opts;
/* Timer ids, must be removed on cmd destruction. */
int delay_id;
int stonith_recurring_id;
int rsc_deleted;
int service_flags;
char *client_id;
char *origin;
char *rsc_id;
char *action;
char *real_action;
char *exit_reason;
char *output;
char *userdata_str;
#ifdef HAVE_SYS_TIMEB_H
/* Recurring and systemd operations may involve more than one executor
* command per operation, so they need info about the original and the most
* recent.
*/
struct timeb t_first_run; /* Timestamp of when op first ran */
struct timeb t_run; /* Timestamp of when op most recently ran */
struct timeb t_first_queue; /* Timestamp of when op first was queued */
struct timeb t_queue; /* Timestamp of when op most recently was queued */
struct timeb t_rcchange; /* Timestamp of last rc change */
#endif
int first_notify_sent;
int last_notify_rc;
int last_notify_op_status;
int last_pid;
GHashTable *params;
} lrmd_cmd_t;
static void cmd_finalize(lrmd_cmd_t * cmd, lrmd_rsc_t * rsc);
static gboolean lrmd_rsc_dispatch(gpointer user_data);
static void cancel_all_recurring(lrmd_rsc_t * rsc, const char *client_id);
static void
log_finished(lrmd_cmd_t * cmd, int exec_time, int queue_time)
{
char pid_str[32] = { 0, };
int log_level = LOG_INFO;
if (cmd->last_pid) {
snprintf(pid_str, 32, "%d", cmd->last_pid);
}
if (safe_str_eq(cmd->action, "monitor")) {
log_level = LOG_DEBUG;
}
#ifdef HAVE_SYS_TIMEB_H
do_crm_log(log_level,
"finished - rsc:%s action:%s call_id:%d %s%s exit-code:%d exec-time:%dms queue-time:%dms",
cmd->rsc_id, cmd->action, cmd->call_id, cmd->last_pid ? "pid:" : "", pid_str,
cmd->exec_rc, exec_time, queue_time);
#else
do_crm_log(log_level, "finished - rsc:%s action:%s call_id:%d %s%s exit-code:%d",
cmd->rsc_id,
cmd->action, cmd->call_id, cmd->last_pid ? "pid:" : "", pid_str, cmd->exec_rc);
#endif
}
static void
log_execute(lrmd_cmd_t * cmd)
{
int log_level = LOG_INFO;
if (safe_str_eq(cmd->action, "monitor")) {
log_level = LOG_DEBUG;
}
do_crm_log(log_level, "executing - rsc:%s action:%s call_id:%d",
cmd->rsc_id, cmd->action, cmd->call_id);
}
static const char *
normalize_action_name(lrmd_rsc_t * rsc, const char *action)
{
if (safe_str_eq(action, "monitor") &&
is_set(pcmk_get_ra_caps(rsc->class), pcmk_ra_cap_status)) {
return "status";
}
return action;
}
static lrmd_rsc_t *
build_rsc_from_xml(xmlNode * msg)
{
xmlNode *rsc_xml = get_xpath_object("//" F_LRMD_RSC, msg, LOG_ERR);
lrmd_rsc_t *rsc = NULL;
rsc = calloc(1, sizeof(lrmd_rsc_t));
crm_element_value_int(msg, F_LRMD_CALLOPTS, &rsc->call_opts);
rsc->rsc_id = crm_element_value_copy(rsc_xml, F_LRMD_RSC_ID);
rsc->class = crm_element_value_copy(rsc_xml, F_LRMD_CLASS);
rsc->provider = crm_element_value_copy(rsc_xml, F_LRMD_PROVIDER);
rsc->type = crm_element_value_copy(rsc_xml, F_LRMD_TYPE);
rsc->work = mainloop_add_trigger(G_PRIORITY_HIGH, lrmd_rsc_dispatch, rsc);
rsc->st_probe_rc = -ENODEV; // if stonith, initialize to "not running"
return rsc;
}
static lrmd_cmd_t *
create_lrmd_cmd(xmlNode * msg, crm_client_t * client)
{
int call_options = 0;
xmlNode *rsc_xml = get_xpath_object("//" F_LRMD_RSC, msg, LOG_ERR);
lrmd_cmd_t *cmd = NULL;
cmd = calloc(1, sizeof(lrmd_cmd_t));
crm_element_value_int(msg, F_LRMD_CALLOPTS, &call_options);
cmd->call_opts = call_options;
cmd->client_id = strdup(client->id);
crm_element_value_int(msg, F_LRMD_CALLID, &cmd->call_id);
crm_element_value_ms(rsc_xml, F_LRMD_RSC_INTERVAL, &cmd->interval_ms);
crm_element_value_int(rsc_xml, F_LRMD_TIMEOUT, &cmd->timeout);
crm_element_value_int(rsc_xml, F_LRMD_RSC_START_DELAY, &cmd->start_delay);
cmd->timeout_orig = cmd->timeout;
cmd->origin = crm_element_value_copy(rsc_xml, F_LRMD_ORIGIN);
cmd->action = crm_element_value_copy(rsc_xml, F_LRMD_RSC_ACTION);
cmd->userdata_str = crm_element_value_copy(rsc_xml, F_LRMD_RSC_USERDATA_STR);
cmd->rsc_id = crm_element_value_copy(rsc_xml, F_LRMD_RSC_ID);
cmd->params = xml2list(rsc_xml);
if (safe_str_eq(g_hash_table_lookup(cmd->params, "CRM_meta_on_fail"), "block")) {
crm_debug("Setting flag to leave pid group on timeout and only kill action pid for " CRM_OP_FMT,
cmd->rsc_id, cmd->action, cmd->interval_ms);
cmd->service_flags |= SVC_ACTION_LEAVE_GROUP;
}
return cmd;
}
static void
stop_recurring_timer(lrmd_cmd_t *cmd)
{
if (cmd) {
if (cmd->stonith_recurring_id) {
g_source_remove(cmd->stonith_recurring_id);
}
cmd->stonith_recurring_id = 0;
}
}
static void
free_lrmd_cmd(lrmd_cmd_t * cmd)
{
stop_recurring_timer(cmd);
if (cmd->delay_id) {
g_source_remove(cmd->delay_id);
}
if (cmd->params) {
g_hash_table_destroy(cmd->params);
}
free(cmd->origin);
free(cmd->action);
free(cmd->real_action);
free(cmd->userdata_str);
free(cmd->rsc_id);
free(cmd->output);
free(cmd->exit_reason);
free(cmd->client_id);
free(cmd);
}
static gboolean
stonith_recurring_op_helper(gpointer data)
{
lrmd_cmd_t *cmd = data;
lrmd_rsc_t *rsc;
cmd->stonith_recurring_id = 0;
if (!cmd->rsc_id) {
return FALSE;
}
rsc = g_hash_table_lookup(rsc_list, cmd->rsc_id);
CRM_ASSERT(rsc != NULL);
/* take it out of recurring_ops list, and put it in the pending ops
* to be executed */
rsc->recurring_ops = g_list_remove(rsc->recurring_ops, cmd);
rsc->pending_ops = g_list_append(rsc->pending_ops, cmd);
#ifdef HAVE_SYS_TIMEB_H
ftime(&cmd->t_queue);
if (cmd->t_first_queue.time == 0) {
cmd->t_first_queue = cmd->t_queue;
}
#endif
mainloop_set_trigger(rsc->work);
return FALSE;
}
static inline void
start_recurring_timer(lrmd_cmd_t *cmd)
{
if (cmd && (cmd->interval_ms > 0)) {
cmd->stonith_recurring_id = g_timeout_add(cmd->interval_ms,
stonith_recurring_op_helper,
cmd);
}
}
static gboolean
start_delay_helper(gpointer data)
{
lrmd_cmd_t *cmd = data;
lrmd_rsc_t *rsc = NULL;
cmd->delay_id = 0;
rsc = cmd->rsc_id ? g_hash_table_lookup(rsc_list, cmd->rsc_id) : NULL;
if (rsc) {
mainloop_set_trigger(rsc->work);
}
return FALSE;
}
static gboolean
merge_recurring_duplicate(lrmd_rsc_t * rsc, lrmd_cmd_t * cmd)
{
GListPtr gIter = NULL;
lrmd_cmd_t * dup = NULL;
gboolean dup_pending = FALSE;
if (cmd->interval_ms == 0) {
return 0;
}
for (gIter = rsc->pending_ops; gIter != NULL; gIter = gIter->next) {
dup = gIter->data;
if (safe_str_eq(cmd->action, dup->action)
&& (cmd->interval_ms == dup->interval_ms)) {
dup_pending = TRUE;
goto merge_dup;
}
}
/* if dup is in recurring_ops list, that means it has already executed
* and is in the interval loop. we can't just remove it in this case. */
for (gIter = rsc->recurring_ops; gIter != NULL; gIter = gIter->next) {
dup = gIter->data;
if (safe_str_eq(cmd->action, dup->action)
&& (cmd->interval_ms == dup->interval_ms)) {
goto merge_dup;
}
}
return FALSE;
merge_dup:
/* This should not occur. If it does, we need to investigate how something
* like this is possible in the controller.
*/
crm_warn("Duplicate recurring op entry detected (" CRM_OP_FMT "), merging with previous op entry",
rsc->rsc_id,
normalize_action_name(rsc, dup->action),
dup->interval_ms);
/* merge */
dup->first_notify_sent = 0;
free(dup->userdata_str);
dup->userdata_str = cmd->userdata_str;
cmd->userdata_str = NULL;
dup->call_id = cmd->call_id;
if (safe_str_eq(rsc->class, PCMK_RESOURCE_CLASS_STONITH)) {
/* if we are waiting for the next interval, kick it off now */
if (dup_pending == TRUE) {
stop_recurring_timer(cmd);
stonith_recurring_op_helper(cmd);
}
} else if (dup_pending == FALSE) {
/* if we've already handed this to the service lib, kick off an early execution */
services_action_kick(rsc->rsc_id,
normalize_action_name(rsc, dup->action),
dup->interval_ms);
}
free_lrmd_cmd(cmd);
return TRUE;
}
static void
schedule_lrmd_cmd(lrmd_rsc_t * rsc, lrmd_cmd_t * cmd)
{
gboolean dup_processed = FALSE;
CRM_CHECK(cmd != NULL, return);
CRM_CHECK(rsc != NULL, return);
crm_trace("Scheduling %s on %s", cmd->action, rsc->rsc_id);
dup_processed = merge_recurring_duplicate(rsc, cmd);
if (dup_processed) {
/* duplicate recurring cmd found, cmds merged */
return;
}
/* The controller expects the executor to automatically cancel
* recurring operations before a resource stops.
*/
if (safe_str_eq(cmd->action, "stop")) {
cancel_all_recurring(rsc, NULL);
}
rsc->pending_ops = g_list_append(rsc->pending_ops, cmd);
#ifdef HAVE_SYS_TIMEB_H
ftime(&cmd->t_queue);
if (cmd->t_first_queue.time == 0) {
cmd->t_first_queue = cmd->t_queue;
}
#endif
mainloop_set_trigger(rsc->work);
if (cmd->start_delay) {
cmd->delay_id = g_timeout_add(cmd->start_delay, start_delay_helper, cmd);
}
}
static xmlNode *
create_lrmd_reply(const char *origin, int rc, int call_id)
{
xmlNode *reply = create_xml_node(NULL, T_LRMD_REPLY);
crm_xml_add(reply, F_LRMD_ORIGIN, origin);
crm_xml_add_int(reply, F_LRMD_RC, rc);
crm_xml_add_int(reply, F_LRMD_CALLID, call_id);
return reply;
}
static void
send_client_notify(gpointer key, gpointer value, gpointer user_data)
{
xmlNode *update_msg = user_data;
crm_client_t *client = value;
int rc;
int log_level = LOG_WARNING;
const char *msg = NULL;
CRM_CHECK(client != NULL, return);
if (client->name == NULL) {
crm_trace("Skipping notification to client without name");
return;
}
rc = lrmd_server_send_notify(client, update_msg);
if (rc > 0) {
return; // success
}
switch (rc) {
case 0:
msg = "no data sent";
break;
case -ENOTCONN:
case -EPIPE: // Client exited without waiting for notification
log_level = LOG_INFO;
msg = "Disconnected";
break;
default:
msg = pcmk_strerror(rc);
break;
}
do_crm_log(log_level,
"Could not notify client %s/%s: %s " CRM_XS " rc=%d",
client->name, client->id, msg, rc);
}
#ifdef HAVE_SYS_TIMEB_H
/*!
* \internal
* \brief Return difference between two times in milliseconds
*
* \param[in] now More recent time (or NULL to use current time)
* \param[in] old Earlier time
*
* \return milliseconds difference (or 0 if old is NULL or has time zero)
*/
static int
time_diff_ms(struct timeb *now, struct timeb *old)
{
struct timeb local_now = { 0, };
if (now == NULL) {
ftime(&local_now);
now = &local_now;
}
if ((old == NULL) || (old->time == 0)) {
return 0;
}
return difftime(now->time, old->time) * 1000 + now->millitm - old->millitm;
}
/*!
* \internal
* \brief Reset a command's operation times to their original values.
*
* Reset a command's run and queued timestamps to the timestamps of the original
* command, so we report the entire time since then and not just the time since
* the most recent command (for recurring and systemd operations).
*
* /param[in] cmd Executor command object to reset
*
* /note It's not obvious what the queued time should be for a systemd
* start/stop operation, which might go like this:
* initial command queued 5ms, runs 3s
* monitor command queued 10ms, runs 10s
* monitor command queued 10ms, runs 10s
* Is the queued time for that operation 5ms, 10ms or 25ms? The current
* implementation will report 5ms. If it's 25ms, then we need to
* subtract 20ms from the total exec time so as not to count it twice.
* We can implement that later if it matters to anyone ...
*/
static void
cmd_original_times(lrmd_cmd_t * cmd)
{
cmd->t_run = cmd->t_first_run;
cmd->t_queue = cmd->t_first_queue;
}
#endif
static void
send_cmd_complete_notify(lrmd_cmd_t * cmd)
{
int exec_time = 0;
int queue_time = 0;
xmlNode *notify = NULL;
#ifdef HAVE_SYS_TIMEB_H
exec_time = time_diff_ms(NULL, &cmd->t_run);
queue_time = time_diff_ms(&cmd->t_run, &cmd->t_queue);
#endif
log_finished(cmd, exec_time, queue_time);
/* if the first notify result for a cmd has already been sent earlier, and the
* the option to only send notifies on result changes is set. Check to see
* if the last result is the same as the new one. If so, suppress this update */
if (cmd->first_notify_sent && (cmd->call_opts & lrmd_opt_notify_changes_only)) {
if (cmd->last_notify_rc == cmd->exec_rc &&
cmd->last_notify_op_status == cmd->lrmd_op_status) {
/* only send changes */
return;
}
}
cmd->first_notify_sent = 1;
cmd->last_notify_rc = cmd->exec_rc;
cmd->last_notify_op_status = cmd->lrmd_op_status;
notify = create_xml_node(NULL, T_LRMD_NOTIFY);
crm_xml_add(notify, F_LRMD_ORIGIN, __FUNCTION__);
crm_xml_add_int(notify, F_LRMD_TIMEOUT, cmd->timeout);
crm_xml_add_ms(notify, F_LRMD_RSC_INTERVAL, cmd->interval_ms);
crm_xml_add_int(notify, F_LRMD_RSC_START_DELAY, cmd->start_delay);
crm_xml_add_int(notify, F_LRMD_EXEC_RC, cmd->exec_rc);
crm_xml_add_int(notify, F_LRMD_OP_STATUS, cmd->lrmd_op_status);
crm_xml_add_int(notify, F_LRMD_CALLID, cmd->call_id);
crm_xml_add_int(notify, F_LRMD_RSC_DELETED, cmd->rsc_deleted);
#ifdef HAVE_SYS_TIMEB_H
crm_xml_add_int(notify, F_LRMD_RSC_RUN_TIME, cmd->t_run.time);
crm_xml_add_int(notify, F_LRMD_RSC_RCCHANGE_TIME, cmd->t_rcchange.time);
crm_xml_add_int(notify, F_LRMD_RSC_EXEC_TIME, exec_time);
crm_xml_add_int(notify, F_LRMD_RSC_QUEUE_TIME, queue_time);
#endif
crm_xml_add(notify, F_LRMD_OPERATION, LRMD_OP_RSC_EXEC);
crm_xml_add(notify, F_LRMD_RSC_ID, cmd->rsc_id);
if(cmd->real_action) {
crm_xml_add(notify, F_LRMD_RSC_ACTION, cmd->real_action);
} else {
crm_xml_add(notify, F_LRMD_RSC_ACTION, cmd->action);
}
crm_xml_add(notify, F_LRMD_RSC_USERDATA_STR, cmd->userdata_str);
crm_xml_add(notify, F_LRMD_RSC_OUTPUT, cmd->output);
crm_xml_add(notify, F_LRMD_RSC_EXIT_REASON, cmd->exit_reason);
if (cmd->params) {
char *key = NULL;
char *value = NULL;
GHashTableIter iter;
xmlNode *args = create_xml_node(notify, XML_TAG_ATTRS);
g_hash_table_iter_init(&iter, cmd->params);
while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) {
hash2smartfield((gpointer) key, (gpointer) value, args);
}
}
if (cmd->client_id && (cmd->call_opts & lrmd_opt_notify_orig_only)) {
crm_client_t *client = crm_client_get_by_id(cmd->client_id);
if (client) {
send_client_notify(client->id, client, notify);
}
} else if (client_connections != NULL) {
g_hash_table_foreach(client_connections, send_client_notify, notify);
}
free_xml(notify);
}
static void
send_generic_notify(int rc, xmlNode * request)
{
if (client_connections != NULL) {
int call_id = 0;
xmlNode *notify = NULL;
xmlNode *rsc_xml = get_xpath_object("//" F_LRMD_RSC, request, LOG_ERR);
const char *rsc_id = crm_element_value(rsc_xml, F_LRMD_RSC_ID);
const char *op = crm_element_value(request, F_LRMD_OPERATION);
crm_element_value_int(request, F_LRMD_CALLID, &call_id);
notify = create_xml_node(NULL, T_LRMD_NOTIFY);
crm_xml_add(notify, F_LRMD_ORIGIN, __FUNCTION__);
crm_xml_add_int(notify, F_LRMD_RC, rc);
crm_xml_add_int(notify, F_LRMD_CALLID, call_id);
crm_xml_add(notify, F_LRMD_OPERATION, op);
crm_xml_add(notify, F_LRMD_RSC_ID, rsc_id);
g_hash_table_foreach(client_connections, send_client_notify, notify);
free_xml(notify);
}
}
static void
cmd_reset(lrmd_cmd_t * cmd)
{
cmd->lrmd_op_status = 0;
cmd->last_pid = 0;
#ifdef HAVE_SYS_TIMEB_H
memset(&cmd->t_run, 0, sizeof(cmd->t_run));
memset(&cmd->t_queue, 0, sizeof(cmd->t_queue));
#endif
free(cmd->exit_reason);
cmd->exit_reason = NULL;
free(cmd->output);
cmd->output = NULL;
}
static void
cmd_finalize(lrmd_cmd_t * cmd, lrmd_rsc_t * rsc)
{
crm_trace("Resource operation rsc:%s action:%s completed (%p %p)", cmd->rsc_id, cmd->action,
rsc ? rsc->active : NULL, cmd);
if (rsc && (rsc->active == cmd)) {
rsc->active = NULL;
mainloop_set_trigger(rsc->work);
}
if (!rsc) {
cmd->rsc_deleted = 1;
}
/* reset original timeout so client notification has correct information */
cmd->timeout = cmd->timeout_orig;
send_cmd_complete_notify(cmd);
if (cmd->interval_ms && (cmd->lrmd_op_status == PCMK_LRM_OP_CANCELLED)) {
if (rsc) {
rsc->recurring_ops = g_list_remove(rsc->recurring_ops, cmd);
rsc->pending_ops = g_list_remove(rsc->pending_ops, cmd);
}
free_lrmd_cmd(cmd);
} else if (cmd->interval_ms == 0) {
if (rsc) {
rsc->pending_ops = g_list_remove(rsc->pending_ops, cmd);
}
free_lrmd_cmd(cmd);
} else {
/* Clear all the values pertaining just to the last iteration of a recurring op. */
cmd_reset(cmd);
}
}
static int
ocf2uniform_rc(int rc)
{
if (rc < 0 || rc > PCMK_OCF_FAILED_MASTER) {
return PCMK_OCF_UNKNOWN_ERROR;
}
return rc;
}
static int
stonith2uniform_rc(const char *action, int rc)
{
switch (rc) {
case pcmk_ok:
rc = PCMK_OCF_OK;
break;
case -ENODEV:
/* This should be possible only for probes in practice, but
* interpret for all actions to be safe.
*/
if (safe_str_eq(action, "monitor")) {
rc = PCMK_OCF_NOT_RUNNING;
} else if (safe_str_eq(action, "stop")) {
rc = PCMK_OCF_OK;
} else {
rc = PCMK_OCF_NOT_INSTALLED;
}
break;
case -EOPNOTSUPP:
rc = PCMK_OCF_UNIMPLEMENT_FEATURE;
break;
case -ETIME:
case -ETIMEDOUT:
rc = PCMK_OCF_TIMEOUT;
break;
default:
rc = PCMK_OCF_UNKNOWN_ERROR;
break;
}
return rc;
}
#if SUPPORT_NAGIOS
static int
nagios2uniform_rc(const char *action, int rc)
{
if (rc < 0) {
return PCMK_OCF_UNKNOWN_ERROR;
}
switch (rc) {
case NAGIOS_STATE_OK:
return PCMK_OCF_OK;
case NAGIOS_INSUFFICIENT_PRIV:
return PCMK_OCF_INSUFFICIENT_PRIV;
case NAGIOS_NOT_INSTALLED:
return PCMK_OCF_NOT_INSTALLED;
case NAGIOS_STATE_WARNING:
case NAGIOS_STATE_CRITICAL:
case NAGIOS_STATE_UNKNOWN:
case NAGIOS_STATE_DEPENDENT:
default:
return PCMK_OCF_UNKNOWN_ERROR;
}
return PCMK_OCF_UNKNOWN_ERROR;
}
#endif
static int
get_uniform_rc(const char *standard, const char *action, int rc)
{
if (safe_str_eq(standard, PCMK_RESOURCE_CLASS_OCF)) {
return ocf2uniform_rc(rc);
} else if (safe_str_eq(standard, PCMK_RESOURCE_CLASS_STONITH)) {
return stonith2uniform_rc(action, rc);
} else if (safe_str_eq(standard, PCMK_RESOURCE_CLASS_SYSTEMD)) {
return rc;
} else if (safe_str_eq(standard, PCMK_RESOURCE_CLASS_UPSTART)) {
return rc;
#if SUPPORT_NAGIOS
} else if (safe_str_eq(standard, PCMK_RESOURCE_CLASS_NAGIOS)) {
return nagios2uniform_rc(action, rc);
#endif
} else {
return services_get_ocf_exitcode(action, rc);
}
}
static int
action_get_uniform_rc(svc_action_t * action)
{
lrmd_cmd_t *cmd = action->cb_data;
return get_uniform_rc(action->standard, cmd->action, action->rc);
}
void
notify_of_new_client(crm_client_t *new_client)
{
crm_client_t *client = NULL;
GHashTableIter iter;
xmlNode *notify = NULL;
char *key = NULL;
notify = create_xml_node(NULL, T_LRMD_NOTIFY);
crm_xml_add(notify, F_LRMD_ORIGIN, __FUNCTION__);
crm_xml_add(notify, F_LRMD_OPERATION, LRMD_OP_NEW_CLIENT);
g_hash_table_iter_init(&iter, client_connections);
while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & client)) {
if (safe_str_eq(client->id, new_client->id)) {
continue;
}
send_client_notify((gpointer) key, (gpointer) client, (gpointer) notify);
}
free_xml(notify);
}
static char *
parse_exit_reason(const char *output)
{
const char *cur = NULL;
const char *last = NULL;
static int cookie_len = 0;
char *eol = NULL;
size_t reason_len = EXIT_REASON_MAX_LEN;
if (output == NULL) {
return NULL;
}
if (!cookie_len) {
cookie_len = strlen(PCMK_OCF_REASON_PREFIX);
}
cur = strstr(output, PCMK_OCF_REASON_PREFIX);
for (; cur != NULL; cur = strstr(cur, PCMK_OCF_REASON_PREFIX)) {
/* skip over the cookie delimiter string */
cur += cookie_len;
last = cur;
}
if (last == NULL) {
return NULL;
}
// Truncate everything after a new line, and limit reason string size
eol = strchr(last, '\n');
if (eol) {
reason_len = QB_MIN(reason_len, eol - last);
}
return strndup(last, reason_len);
}
void
client_disconnect_cleanup(const char *client_id)
{
GHashTableIter iter;
lrmd_rsc_t *rsc = NULL;
char *key = NULL;
g_hash_table_iter_init(&iter, rsc_list);
while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & rsc)) {
if (rsc->call_opts & lrmd_opt_drop_recurring) {
/* This client is disconnecting, drop any recurring operations
* it may have initiated on the resource */
cancel_all_recurring(rsc, client_id);
}
}
}
static void
action_complete(svc_action_t * action)
{
lrmd_rsc_t *rsc;
lrmd_cmd_t *cmd = action->cb_data;
const char *rclass = NULL;
bool goagain = false;
if (!cmd) {
crm_err("Completed executor action (%s) does not match any known operations",
action->id);
return;
}
#ifdef HAVE_SYS_TIMEB_H
if (cmd->exec_rc != action->rc) {
ftime(&cmd->t_rcchange);
}
#endif
cmd->last_pid = action->pid;
cmd->exec_rc = action_get_uniform_rc(action);
cmd->lrmd_op_status = action->status;
rsc = cmd->rsc_id ? g_hash_table_lookup(rsc_list, cmd->rsc_id) : NULL;
if (rsc && safe_str_eq(rsc->class, PCMK_RESOURCE_CLASS_SERVICE)) {
rclass = resources_find_service_class(rsc->type);
} else if(rsc) {
rclass = rsc->class;
}
if (safe_str_eq(rclass, PCMK_RESOURCE_CLASS_SYSTEMD)) {
if(cmd->exec_rc == PCMK_OCF_OK && safe_str_eq(cmd->action, "start")) {
/* systemd I curse thee!
*
* systemd returns from start actions after the start _begins_
* not after it completes.
*
* So we have to jump through a few hoops so that we don't
* report 'complete' to the rest of pacemaker until, you know,
* it's actually done.
*/
goagain = true;
cmd->real_action = cmd->action;
cmd->action = strdup("monitor");
} else if(cmd->exec_rc == PCMK_OCF_OK && safe_str_eq(cmd->action, "stop")) {
goagain = true;
cmd->real_action = cmd->action;
cmd->action = strdup("monitor");
} else if(cmd->real_action) {
/* Ok, so this is the follow up monitor action to check if start actually completed */
if(cmd->lrmd_op_status == PCMK_LRM_OP_DONE && cmd->exec_rc == PCMK_OCF_PENDING) {
goagain = true;
} else if(cmd->exec_rc == PCMK_OCF_OK && safe_str_eq(cmd->real_action, "stop")) {
goagain = true;
} else {
#ifdef HAVE_SYS_TIMEB_H
int time_sum = time_diff_ms(NULL, &cmd->t_first_run);
int timeout_left = cmd->timeout_orig - time_sum;
crm_debug("%s %s is now complete (elapsed=%dms, remaining=%dms): %s (%d)",
cmd->rsc_id, cmd->real_action, time_sum, timeout_left, services_ocf_exitcode_str(cmd->exec_rc), cmd->exec_rc);
cmd_original_times(cmd);
#endif
// Monitors may return "not running", but start/stop shouldn't
if ((cmd->lrmd_op_status == PCMK_LRM_OP_DONE)
&& (cmd->exec_rc == PCMK_OCF_NOT_RUNNING)) {
if (safe_str_eq(cmd->real_action, "start")) {
cmd->exec_rc = PCMK_OCF_UNKNOWN_ERROR;
} else if (safe_str_eq(cmd->real_action, "stop")) {
cmd->exec_rc = PCMK_OCF_OK;
}
}
}
}
}
#if SUPPORT_NAGIOS
if (rsc && safe_str_eq(rsc->class, PCMK_RESOURCE_CLASS_NAGIOS)) {
if (safe_str_eq(cmd->action, "monitor") &&
(cmd->interval_ms == 0) && cmd->exec_rc == PCMK_OCF_OK) {
/* Successfully executed --version for the nagios plugin */
cmd->exec_rc = PCMK_OCF_NOT_RUNNING;
} else if (safe_str_eq(cmd->action, "start") && cmd->exec_rc != PCMK_OCF_OK) {
goagain = true;
}
}
#endif
/* Wrapping this section in ifdef implies that systemd resources are not
* fully supported on platforms without sys/timeb.h. Since timeb is
* obsolete, we should eventually prefer a clock_gettime() implementation
* (wrapped in its own ifdef) with timeb as a fallback.
*/
if(goagain) {
#ifdef HAVE_SYS_TIMEB_H
int time_sum = time_diff_ms(NULL, &cmd->t_first_run);
int timeout_left = cmd->timeout_orig - time_sum;
int delay = cmd->timeout_orig / 10;
if(delay >= timeout_left && timeout_left > 20) {
delay = timeout_left/2;
}
delay = QB_MIN(2000, delay);
if (delay < timeout_left) {
cmd->start_delay = delay;
cmd->timeout = timeout_left;
if(cmd->exec_rc == PCMK_OCF_OK) {
crm_debug("%s %s may still be in progress: re-scheduling (elapsed=%dms, remaining=%dms, start_delay=%dms)",
cmd->rsc_id, cmd->real_action, time_sum, timeout_left, delay);
} else if(cmd->exec_rc == PCMK_OCF_PENDING) {
crm_info("%s %s is still in progress: re-scheduling (elapsed=%dms, remaining=%dms, start_delay=%dms)",
cmd->rsc_id, cmd->action, time_sum, timeout_left, delay);
} else {
crm_notice("%s %s failed '%s' (%d): re-scheduling (elapsed=%dms, remaining=%dms, start_delay=%dms)",
cmd->rsc_id, cmd->action, services_ocf_exitcode_str(cmd->exec_rc), cmd->exec_rc, time_sum, timeout_left, delay);
}
cmd_reset(cmd);
if(rsc) {
rsc->active = NULL;
}
schedule_lrmd_cmd(rsc, cmd);
/* Don't finalize cmd, we're not done with it yet */
return;
} else {
crm_notice("Giving up on %s %s (rc=%d): timeout (elapsed=%dms, remaining=%dms)",
cmd->rsc_id, cmd->real_action?cmd->real_action:cmd->action, cmd->exec_rc, time_sum, timeout_left);
cmd->lrmd_op_status = PCMK_LRM_OP_TIMEOUT;
cmd->exec_rc = PCMK_OCF_TIMEOUT;
cmd_original_times(cmd);
}
#endif
}
if (action->stderr_data) {
cmd->output = strdup(action->stderr_data);
cmd->exit_reason = parse_exit_reason(action->stderr_data);
} else if (action->stdout_data) {
cmd->output = strdup(action->stdout_data);
}
cmd_finalize(cmd, rsc);
}
/*!
* \internal
* \brief Determine operation status of a stonith operation
*
* Non-stonith resource operations get their operation status directly from the
* service library, but the fencer does not have an equivalent, so we must infer
* an operation status from the fencer API's return code.
*
* \param[in] action Name of action performed on stonith resource
* \param[in] interval_ms Action interval
* \param[in] rc Action result from fencer
*
* \return Operation status corresponding to fencer API return code
*/
static int
stonith_rc2status(const char *action, guint interval_ms, int rc)
{
int status = PCMK_LRM_OP_DONE;
switch (rc) {
case pcmk_ok:
break;
case -EOPNOTSUPP:
case -EPROTONOSUPPORT:
status = PCMK_LRM_OP_NOTSUPPORTED;
break;
case -ETIME:
case -ETIMEDOUT:
status = PCMK_LRM_OP_TIMEOUT;
break;
case -ENOTCONN:
case -ECOMM:
// Couldn't talk to fencer
status = PCMK_LRM_OP_ERROR;
break;
case -ENODEV:
// The device is not registered with the fencer
status = PCMK_LRM_OP_ERROR;
break;
default:
break;
}
return status;
}
static void
stonith_action_complete(lrmd_cmd_t * cmd, int rc)
{
// This can be NULL if resource was removed before command completed
lrmd_rsc_t *rsc = g_hash_table_lookup(rsc_list, cmd->rsc_id);
cmd->exec_rc = stonith2uniform_rc(cmd->action, rc);
/* This function may be called with status already set to cancelled, if a
* pending action was aborted. Otherwise, we need to determine status from
* the fencer return code.
*/
if (cmd->lrmd_op_status != PCMK_LRM_OP_CANCELLED) {
cmd->lrmd_op_status = stonith_rc2status(cmd->action, cmd->interval_ms,
rc);
// Certain successful actions change the known state of the resource
if (rsc && (cmd->exec_rc == PCMK_OCF_OK)) {
if (safe_str_eq(cmd->action, "start")) {
rsc->st_probe_rc = pcmk_ok; // maps to PCMK_OCF_OK
} else if (safe_str_eq(cmd->action, "stop")) {
rsc->st_probe_rc = -ENODEV; // maps to PCMK_OCF_NOT_RUNNING
}
}
}
/* The recurring timer should not be running at this point in any case, but
* as a failsafe, stop it if it is.
*/
stop_recurring_timer(cmd);
/* Reschedule this command if appropriate. If a recurring command is *not*
* rescheduled, its status must be PCMK_LRM_OP_CANCELLED, otherwise it will
* not be removed from recurring_ops by cmd_finalize().
*/
if (rsc && (cmd->interval_ms > 0)
&& (cmd->lrmd_op_status != PCMK_LRM_OP_CANCELLED)) {
start_recurring_timer(cmd);
}
cmd_finalize(cmd, rsc);
}
static void
lrmd_stonith_callback(stonith_t * stonith, stonith_callback_data_t * data)
{
stonith_action_complete(data->userdata, data->rc);
}
void
stonith_connection_failed(void)
{
GHashTableIter iter;
GList *cmd_list = NULL;
GList *cmd_iter = NULL;
lrmd_rsc_t *rsc = NULL;
char *key = NULL;
g_hash_table_iter_init(&iter, rsc_list);
while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & rsc)) {
if (safe_str_eq(rsc->class, PCMK_RESOURCE_CLASS_STONITH)) {
- /* This will cause future probes to return PCMK_OCF_UNKNOWN_ERROR
- * until the resource is stopped or started successfully. This is
- * especially important if the controller also went away (possibly
- * due to a cluster layer restart) and won't receive our client
- * notification of any monitors finalized below.
+ /* If we registered this fence device, we don't know whether the
+ * fencer still has the registration or not. Cause future probes to
+ * return PCMK_OCF_UNKNOWN_ERROR until the resource is stopped or
+ * started successfully. This is especially important if the
+ * controller also went away (possibly due to a cluster layer
+ * restart) and won't receive our client notification of any
+ * monitors finalized below.
*/
- rsc->st_probe_rc = pcmk_err_generic;
+ if (rsc->st_probe_rc == pcmk_ok) {
+ rsc->st_probe_rc = pcmk_err_generic;
+ }
if (rsc->active) {
cmd_list = g_list_append(cmd_list, rsc->active);
}
if (rsc->recurring_ops) {
cmd_list = g_list_concat(cmd_list, rsc->recurring_ops);
}
if (rsc->pending_ops) {
cmd_list = g_list_concat(cmd_list, rsc->pending_ops);
}
rsc->pending_ops = rsc->recurring_ops = NULL;
}
}
if (!cmd_list) {
return;
}
crm_err("Connection to fencer failed, finalizing %d pending operations",
g_list_length(cmd_list));
for (cmd_iter = cmd_list; cmd_iter; cmd_iter = cmd_iter->next) {
stonith_action_complete(cmd_iter->data, -ENOTCONN);
}
g_list_free(cmd_list);
}
/*!
* \internal
* \brief Execute a stonith resource "start" action
*
* Start a stonith resource by registering it with the fencer.
* (Stonith agents don't have a start command.)
*
* \param[in] stonith_api Connection to fencer
* \param[in] rsc Stonith resource to start
* \param[in] cmd Start command to execute
*
* \return pcmk_ok on success, -errno otherwise
*/
static int
execd_stonith_start(stonith_t *stonith_api, lrmd_rsc_t *rsc, lrmd_cmd_t *cmd)
{
char *key = NULL;
char *value = NULL;
stonith_key_value_t *device_params = NULL;
int rc = pcmk_ok;
// Convert command parameters to stonith API key/values
if (cmd->params) {
GHashTableIter iter;
g_hash_table_iter_init(&iter, cmd->params);
while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) {
device_params = stonith_key_value_add(device_params, key, value);
}
}
/* The fencer will automatically register devices via CIB notifications
* when the CIB changes, but to avoid a possible race condition between
* the fencer receiving the notification and the executor requesting that
* resource, the executor registers the device as well. The fencer knows how
* to handle duplicate registrations.
*/
rc = stonith_api->cmds->register_device(stonith_api, st_opt_sync_call,
cmd->rsc_id, rsc->provider,
rsc->type, device_params);
stonith_key_value_freeall(device_params, 1, 1);
return rc;
}
/*!
* \internal
* \brief Execute a stonith resource "stop" action
*
* Stop a stonith resource by unregistering it with the fencer.
* (Stonith agents don't have a stop command.)
*
* \param[in] stonith_api Connection to fencer
* \param[in] rsc Stonith resource to stop
*
* \return pcmk_ok on success, -errno otherwise
*/
static inline int
execd_stonith_stop(stonith_t *stonith_api, const lrmd_rsc_t *rsc)
{
/* @TODO Failure would indicate a problem communicating with fencer;
* perhaps we should try reconnecting and retrying a few times?
*/
return stonith_api->cmds->remove_device(stonith_api, st_opt_sync_call,
rsc->rsc_id);
}
/*!
* \internal
* \brief Initiate a stonith resource agent recurring "monitor" action
*
* \param[in] stonith_api Connection to fencer
* \param[in] rsc Stonith resource to monitor
* \param[in] cmd Monitor command being executed
*
* \return pcmk_ok if monitor was successfully initiated, -errno otherwise
*/
static inline int
execd_stonith_monitor(stonith_t *stonith_api, lrmd_rsc_t *rsc, lrmd_cmd_t *cmd)
{
int rc = stonith_api->cmds->monitor(stonith_api, 0, cmd->rsc_id,
cmd->timeout / 1000);
rc = stonith_api->cmds->register_callback(stonith_api, rc, 0, 0, cmd,
"lrmd_stonith_callback",
lrmd_stonith_callback);
if (rc == TRUE) {
rsc->active = cmd;
rc = pcmk_ok;
} else {
rc = -pcmk_err_generic;
}
return rc;
}
static void
lrmd_rsc_execute_stonith(lrmd_rsc_t * rsc, lrmd_cmd_t * cmd)
{
int rc = 0;
bool do_monitor = FALSE;
stonith_t *stonith_api = get_stonith_connection();
if (!stonith_api) {
rc = -ENOTCONN;
} else if (safe_str_eq(cmd->action, "start")) {
rc = execd_stonith_start(stonith_api, rsc, cmd);
if (rc == 0) {
do_monitor = TRUE;
}
} else if (safe_str_eq(cmd->action, "stop")) {
rc = execd_stonith_stop(stonith_api, rsc);
} else if (safe_str_eq(cmd->action, "monitor")) {
if (cmd->interval_ms > 0) {
do_monitor = TRUE;
} else {
rc = rsc->st_probe_rc;
}
}
if (do_monitor) {
rc = execd_stonith_monitor(stonith_api, rsc, cmd);
if (rc == pcmk_ok) {
// Don't clean up yet, we will find out result of the monitor later
return;
}
}
stonith_action_complete(cmd, rc);
}
static int
lrmd_rsc_execute_service_lib(lrmd_rsc_t * rsc, lrmd_cmd_t * cmd)
{
svc_action_t *action = NULL;
GHashTable *params_copy = NULL;
CRM_ASSERT(rsc);
CRM_ASSERT(cmd);
crm_trace("Creating action, resource:%s action:%s class:%s provider:%s agent:%s",
rsc->rsc_id, cmd->action, rsc->class, rsc->provider, rsc->type);
#if SUPPORT_NAGIOS
/* Recurring operations are cancelled anyway for a stop operation */
if (safe_str_eq(rsc->class, PCMK_RESOURCE_CLASS_NAGIOS)
&& safe_str_eq(cmd->action, "stop")) {
cmd->exec_rc = PCMK_OCF_OK;
goto exec_done;
}
#endif
params_copy = crm_str_table_dup(cmd->params);
action = resources_action_create(rsc->rsc_id, rsc->class, rsc->provider,
rsc->type,
normalize_action_name(rsc, cmd->action),
cmd->interval_ms, cmd->timeout,
params_copy, cmd->service_flags);
if (!action) {
crm_err("Failed to create action, action:%s on resource %s", cmd->action, rsc->rsc_id);
cmd->lrmd_op_status = PCMK_LRM_OP_ERROR;
goto exec_done;
}
action->cb_data = cmd;
/* 'cmd' may not be valid after this point if
* services_action_async() returned TRUE
*
* Upstart and systemd both synchronously determine monitor/status
* results and call action_complete (which may free 'cmd') if necessary.
*/
if (services_action_async(action, action_complete)) {
return TRUE;
}
cmd->exec_rc = action->rc;
if(action->status != PCMK_LRM_OP_DONE) {
cmd->lrmd_op_status = action->status;
} else {
cmd->lrmd_op_status = PCMK_LRM_OP_ERROR;
}
services_action_free(action);
action = NULL;
exec_done:
cmd_finalize(cmd, rsc);
return TRUE;
}
static gboolean
lrmd_rsc_execute(lrmd_rsc_t * rsc)
{
lrmd_cmd_t *cmd = NULL;
CRM_CHECK(rsc != NULL, return FALSE);
if (rsc->active) {
crm_trace("%s is still active", rsc->rsc_id);
return TRUE;
}
if (rsc->pending_ops) {
GList *first = rsc->pending_ops;
cmd = first->data;
if (cmd->delay_id) {
crm_trace
("Command %s %s was asked to run too early, waiting for start_delay timeout of %dms",
cmd->rsc_id, cmd->action, cmd->start_delay);
return TRUE;
}
rsc->pending_ops = g_list_remove_link(rsc->pending_ops, first);
g_list_free_1(first);
#ifdef HAVE_SYS_TIMEB_H
if (cmd->t_first_run.time == 0) {
ftime(&cmd->t_first_run);
}
ftime(&cmd->t_run);
#endif
}
if (!cmd) {
crm_trace("Nothing further to do for %s", rsc->rsc_id);
return TRUE;
}
rsc->active = cmd; /* only one op at a time for a rsc */
if (cmd->interval_ms) {
rsc->recurring_ops = g_list_append(rsc->recurring_ops, cmd);
}
log_execute(cmd);
if (safe_str_eq(rsc->class, PCMK_RESOURCE_CLASS_STONITH)) {
lrmd_rsc_execute_stonith(rsc, cmd);
} else {
lrmd_rsc_execute_service_lib(rsc, cmd);
}
return TRUE;
}
static gboolean
lrmd_rsc_dispatch(gpointer user_data)
{
return lrmd_rsc_execute(user_data);
}
void
free_rsc(gpointer data)
{
GListPtr gIter = NULL;
lrmd_rsc_t *rsc = data;
int is_stonith = safe_str_eq(rsc->class, PCMK_RESOURCE_CLASS_STONITH);
gIter = rsc->pending_ops;
while (gIter != NULL) {
GListPtr next = gIter->next;
lrmd_cmd_t *cmd = gIter->data;
/* command was never executed */
cmd->lrmd_op_status = PCMK_LRM_OP_CANCELLED;
cmd_finalize(cmd, NULL);
gIter = next;
}
/* frees list, but not list elements. */
g_list_free(rsc->pending_ops);
gIter = rsc->recurring_ops;
while (gIter != NULL) {
GListPtr next = gIter->next;
lrmd_cmd_t *cmd = gIter->data;
if (is_stonith) {
cmd->lrmd_op_status = PCMK_LRM_OP_CANCELLED;
/* If a stonith command is in-flight, just mark it as cancelled;
* it is not safe to finalize/free the cmd until the stonith api
* says it has either completed or timed out.
*/
if (rsc->active != cmd) {
cmd_finalize(cmd, NULL);
}
} else {
/* This command is already handed off to service library,
* let service library cancel it and tell us via the callback
* when it is cancelled. The rsc can be safely destroyed
* even if we are waiting for the cancel result */
services_action_cancel(rsc->rsc_id,
normalize_action_name(rsc, cmd->action),
cmd->interval_ms);
}
gIter = next;
}
/* frees list, but not list elements. */
g_list_free(rsc->recurring_ops);
free(rsc->rsc_id);
free(rsc->class);
free(rsc->provider);
free(rsc->type);
mainloop_destroy_trigger(rsc->work);
free(rsc);
}
static xmlNode *
process_lrmd_signon(crm_client_t *client, xmlNode *request, int call_id)
{
xmlNode *reply = NULL;
int rc = pcmk_ok;
const char *is_ipc_provider = crm_element_value(request, F_LRMD_IS_IPC_PROVIDER);
const char *protocol_version = crm_element_value(request, F_LRMD_PROTOCOL_VERSION);
if (compare_version(protocol_version, LRMD_MIN_PROTOCOL_VERSION) < 0) {
crm_err("Cluster API version must be greater than or equal to %s, not %s",
LRMD_MIN_PROTOCOL_VERSION, protocol_version);
rc = -EPROTO;
}
reply = create_lrmd_reply(__FUNCTION__, rc, call_id);
crm_xml_add(reply, F_LRMD_OPERATION, CRM_OP_REGISTER);
crm_xml_add(reply, F_LRMD_CLIENTID, client->id);
crm_xml_add(reply, F_LRMD_PROTOCOL_VERSION, LRMD_PROTOCOL_VERSION);
if (crm_is_true(is_ipc_provider)) {
// This is a remote connection from a cluster node's controller
#ifdef SUPPORT_REMOTE
ipc_proxy_add_provider(client);
#endif
}
return reply;
}
static int
process_lrmd_rsc_register(crm_client_t * client, uint32_t id, xmlNode * request)
{
int rc = pcmk_ok;
lrmd_rsc_t *rsc = build_rsc_from_xml(request);
lrmd_rsc_t *dup = g_hash_table_lookup(rsc_list, rsc->rsc_id);
if (dup &&
safe_str_eq(rsc->class, dup->class) &&
safe_str_eq(rsc->provider, dup->provider) && safe_str_eq(rsc->type, dup->type)) {
crm_notice("Ignoring duplicate registration of '%s'", rsc->rsc_id);
free_rsc(rsc);
return rc;
}
g_hash_table_replace(rsc_list, rsc->rsc_id, rsc);
crm_info("Cached agent information for '%s'", rsc->rsc_id);
return rc;
}
static xmlNode *
process_lrmd_get_rsc_info(xmlNode *request, int call_id)
{
int rc = pcmk_ok;
xmlNode *rsc_xml = get_xpath_object("//" F_LRMD_RSC, request, LOG_ERR);
const char *rsc_id = crm_element_value(rsc_xml, F_LRMD_RSC_ID);
xmlNode *reply = NULL;
lrmd_rsc_t *rsc = NULL;
if (rsc_id == NULL) {
rc = -ENODEV;
} else {
rsc = g_hash_table_lookup(rsc_list, rsc_id);
if (rsc == NULL) {
crm_info("Agent information for '%s' not in cache", rsc_id);
rc = -ENODEV;
}
}
reply = create_lrmd_reply(__FUNCTION__, rc, call_id);
if (rsc) {
crm_xml_add(reply, F_LRMD_RSC_ID, rsc->rsc_id);
crm_xml_add(reply, F_LRMD_CLASS, rsc->class);
crm_xml_add(reply, F_LRMD_PROVIDER, rsc->provider);
crm_xml_add(reply, F_LRMD_TYPE, rsc->type);
}
return reply;
}
static int
process_lrmd_rsc_unregister(crm_client_t * client, uint32_t id, xmlNode * request)
{
int rc = pcmk_ok;
lrmd_rsc_t *rsc = NULL;
xmlNode *rsc_xml = get_xpath_object("//" F_LRMD_RSC, request, LOG_ERR);
const char *rsc_id = crm_element_value(rsc_xml, F_LRMD_RSC_ID);
if (!rsc_id) {
return -ENODEV;
}
rsc = g_hash_table_lookup(rsc_list, rsc_id);
if (rsc == NULL) {
crm_info("Ignoring unregistration of resource '%s', which is not registered",
rsc_id);
return pcmk_ok;
}
if (rsc->active) {
/* let the caller know there are still active ops on this rsc to watch for */
crm_trace("Operation (0x%p) still in progress for unregistered resource %s",
rsc->active, rsc_id);
rc = -EINPROGRESS;
}
g_hash_table_remove(rsc_list, rsc_id);
return rc;
}
static int
process_lrmd_rsc_exec(crm_client_t * client, uint32_t id, xmlNode * request)
{
lrmd_rsc_t *rsc = NULL;
lrmd_cmd_t *cmd = NULL;
xmlNode *rsc_xml = get_xpath_object("//" F_LRMD_RSC, request, LOG_ERR);
const char *rsc_id = crm_element_value(rsc_xml, F_LRMD_RSC_ID);
int call_id;
if (!rsc_id) {
return -EINVAL;
}
if (!(rsc = g_hash_table_lookup(rsc_list, rsc_id))) {
crm_info("Resource '%s' not found (%d active resources)",
rsc_id, g_hash_table_size(rsc_list));
return -ENODEV;
}
cmd = create_lrmd_cmd(request, client);
call_id = cmd->call_id;
/* Don't reference cmd after handing it off to be scheduled.
* The cmd could get merged and freed. */
schedule_lrmd_cmd(rsc, cmd);
return call_id;
}
static int
cancel_op(const char *rsc_id, const char *action, guint interval_ms)
{
GListPtr gIter = NULL;
lrmd_rsc_t *rsc = g_hash_table_lookup(rsc_list, rsc_id);
/* How to cancel an action.
* 1. Check pending ops list, if it hasn't been handed off
* to the service library or stonith recurring list remove
* it there and that will stop it.
* 2. If it isn't in the pending ops list, then it's either a
* recurring op in the stonith recurring list, or the service
* library's recurring list. Stop it there
* 3. If not found in any lists, then this operation has either
* been executed already and is not a recurring operation, or
* never existed.
*/
if (!rsc) {
return -ENODEV;
}
for (gIter = rsc->pending_ops; gIter != NULL; gIter = gIter->next) {
lrmd_cmd_t *cmd = gIter->data;
if (safe_str_eq(cmd->action, action)
&& (cmd->interval_ms == interval_ms)) {
cmd->lrmd_op_status = PCMK_LRM_OP_CANCELLED;
cmd_finalize(cmd, rsc);
return pcmk_ok;
}
}
if (safe_str_eq(rsc->class, PCMK_RESOURCE_CLASS_STONITH)) {
/* The service library does not handle stonith operations.
* We have to handle recurring stonith operations ourselves. */
for (gIter = rsc->recurring_ops; gIter != NULL; gIter = gIter->next) {
lrmd_cmd_t *cmd = gIter->data;
if (safe_str_eq(cmd->action, action)
&& (cmd->interval_ms == interval_ms)) {
cmd->lrmd_op_status = PCMK_LRM_OP_CANCELLED;
if (rsc->active != cmd) {
cmd_finalize(cmd, rsc);
}
return pcmk_ok;
}
}
} else if (services_action_cancel(rsc_id,
normalize_action_name(rsc, action),
interval_ms) == TRUE) {
/* The service library will tell the action_complete callback function
* this action was cancelled, which will destroy the cmd and remove
* it from the recurring_op list. Do not do that in this function
* if the service library says it cancelled it. */
return pcmk_ok;
}
return -EOPNOTSUPP;
}
static void
cancel_all_recurring(lrmd_rsc_t * rsc, const char *client_id)
{
GList *cmd_list = NULL;
GList *cmd_iter = NULL;
/* Notice a copy of each list is created when concat is called.
* This prevents odd behavior from occurring when the cmd_list
* is iterated through later on. It is possible the cancel_op
* function may end up modifying the recurring_ops and pending_ops
* lists. If we did not copy those lists, our cmd_list iteration
* could get messed up.*/
if (rsc->recurring_ops) {
cmd_list = g_list_concat(cmd_list, g_list_copy(rsc->recurring_ops));
}
if (rsc->pending_ops) {
cmd_list = g_list_concat(cmd_list, g_list_copy(rsc->pending_ops));
}
if (!cmd_list) {
return;
}
for (cmd_iter = cmd_list; cmd_iter; cmd_iter = cmd_iter->next) {
lrmd_cmd_t *cmd = cmd_iter->data;
if (cmd->interval_ms == 0) {
continue;
}
if (client_id && safe_str_neq(cmd->client_id, client_id)) {
continue;
}
cancel_op(rsc->rsc_id, cmd->action, cmd->interval_ms);
}
/* frees only the copied list data, not the cmds */
g_list_free(cmd_list);
}
static int
process_lrmd_rsc_cancel(crm_client_t * client, uint32_t id, xmlNode * request)
{
xmlNode *rsc_xml = get_xpath_object("//" F_LRMD_RSC, request, LOG_ERR);
const char *rsc_id = crm_element_value(rsc_xml, F_LRMD_RSC_ID);
const char *action = crm_element_value(rsc_xml, F_LRMD_RSC_ACTION);
guint interval_ms = 0;
crm_element_value_ms(rsc_xml, F_LRMD_RSC_INTERVAL, &interval_ms);
if (!rsc_id || !action) {
return -EINVAL;
}
return cancel_op(rsc_id, action, interval_ms);
}
static void
add_recurring_op_xml(xmlNode *reply, lrmd_rsc_t *rsc)
{
xmlNode *rsc_xml = create_xml_node(reply, F_LRMD_RSC);
crm_xml_add(rsc_xml, F_LRMD_RSC_ID, rsc->rsc_id);
for (GList *item = rsc->recurring_ops; item != NULL; item = item->next) {
lrmd_cmd_t *cmd = item->data;
xmlNode *op_xml = create_xml_node(rsc_xml, T_LRMD_RSC_OP);
crm_xml_add(op_xml, F_LRMD_RSC_ACTION,
(cmd->real_action? cmd->real_action : cmd->action));
crm_xml_add_ms(op_xml, F_LRMD_RSC_INTERVAL, cmd->interval_ms);
crm_xml_add_int(op_xml, F_LRMD_TIMEOUT, cmd->timeout_orig);
}
}
static xmlNode *
process_lrmd_get_recurring(xmlNode *request, int call_id)
{
int rc = pcmk_ok;
const char *rsc_id = NULL;
lrmd_rsc_t *rsc = NULL;
xmlNode *reply = NULL;
xmlNode *rsc_xml = NULL;
// Resource ID is optional
rsc_xml = first_named_child(request, F_LRMD_CALLDATA);
if (rsc_xml) {
rsc_xml = first_named_child(rsc_xml, F_LRMD_RSC);
}
if (rsc_xml) {
rsc_id = crm_element_value(rsc_xml, F_LRMD_RSC_ID);
}
// If resource ID is specified, resource must exist
if (rsc_id != NULL) {
rsc = g_hash_table_lookup(rsc_list, rsc_id);
if (rsc == NULL) {
crm_info("Resource '%s' not found (%d active resources)",
rsc_id, g_hash_table_size(rsc_list));
rc = -ENODEV;
}
}
reply = create_lrmd_reply(__FUNCTION__, rc, call_id);
// If resource ID is not specified, check all resources
if (rsc_id == NULL) {
GHashTableIter iter;
char *key = NULL;
g_hash_table_iter_init(&iter, rsc_list);
while (g_hash_table_iter_next(&iter, (gpointer *) &key,
(gpointer *) &rsc)) {
add_recurring_op_xml(reply, rsc);
}
} else if (rsc) {
add_recurring_op_xml(reply, rsc);
}
return reply;
}
void
process_lrmd_message(crm_client_t * client, uint32_t id, xmlNode * request)
{
int rc = pcmk_ok;
int call_id = 0;
const char *op = crm_element_value(request, F_LRMD_OPERATION);
int do_reply = 0;
int do_notify = 0;
xmlNode *reply = NULL;
crm_trace("Processing %s operation from %s", op, client->id);
crm_element_value_int(request, F_LRMD_CALLID, &call_id);
if (crm_str_eq(op, CRM_OP_IPC_FWD, TRUE)) {
#ifdef SUPPORT_REMOTE
ipc_proxy_forward_client(client, request);
#endif
do_reply = 1;
} else if (crm_str_eq(op, CRM_OP_REGISTER, TRUE)) {
reply = process_lrmd_signon(client, request, call_id);
do_reply = 1;
} else if (crm_str_eq(op, LRMD_OP_RSC_REG, TRUE)) {
rc = process_lrmd_rsc_register(client, id, request);
do_notify = 1;
do_reply = 1;
} else if (crm_str_eq(op, LRMD_OP_RSC_INFO, TRUE)) {
reply = process_lrmd_get_rsc_info(request, call_id);
do_reply = 1;
} else if (crm_str_eq(op, LRMD_OP_RSC_UNREG, TRUE)) {
rc = process_lrmd_rsc_unregister(client, id, request);
/* don't notify anyone about failed un-registers */
if (rc == pcmk_ok || rc == -EINPROGRESS) {
do_notify = 1;
}
do_reply = 1;
} else if (crm_str_eq(op, LRMD_OP_RSC_EXEC, TRUE)) {
rc = process_lrmd_rsc_exec(client, id, request);
do_reply = 1;
} else if (crm_str_eq(op, LRMD_OP_RSC_CANCEL, TRUE)) {
rc = process_lrmd_rsc_cancel(client, id, request);
do_reply = 1;
} else if (crm_str_eq(op, LRMD_OP_POKE, TRUE)) {
do_notify = 1;
do_reply = 1;
} else if (crm_str_eq(op, LRMD_OP_CHECK, TRUE)) {
xmlNode *data = get_message_xml(request, F_LRMD_CALLDATA);
const char *timeout = crm_element_value(data, F_LRMD_WATCHDOG);
CRM_LOG_ASSERT(data != NULL);
check_sbd_timeout(timeout);
} else if (crm_str_eq(op, LRMD_OP_ALERT_EXEC, TRUE)) {
rc = process_lrmd_alert_exec(client, id, request);
do_reply = 1;
} else if (crm_str_eq(op, LRMD_OP_GET_RECURRING, TRUE)) {
reply = process_lrmd_get_recurring(request, call_id);
do_reply = 1;
} else {
rc = -EOPNOTSUPP;
do_reply = 1;
crm_err("Unknown %s from %s", op, client->name);
crm_log_xml_warn(request, "UnknownOp");
}
crm_debug("Processed %s operation from %s: rc=%d, reply=%d, notify=%d",
op, client->id, rc, do_reply, do_notify);
if (do_reply) {
int send_rc = pcmk_ok;
if (reply == NULL) {
reply = create_lrmd_reply(__FUNCTION__, rc, call_id);
}
send_rc = lrmd_server_send_reply(client, id, reply);
free_xml(reply);
if (send_rc < 0) {
crm_warn("Reply to client %s failed: %s " CRM_XS " %d",
client->name, pcmk_strerror(send_rc), send_rc);
}
}
if (do_notify) {
send_generic_notify(rc, request);
}
}
diff --git a/daemons/execd/pacemaker-execd.c b/daemons/execd/pacemaker-execd.c
index f4bb86ef65..be4fd5e4cd 100644
--- a/daemons/execd/pacemaker-execd.c
+++ b/daemons/execd/pacemaker-execd.c
@@ -1,631 +1,635 @@
/*
* Copyright 2012-2019 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "pacemaker-execd.h"
#if defined(HAVE_GNUTLS_GNUTLS_H) && defined(SUPPORT_REMOTE)
# define ENABLE_PCMK_REMOTE
// Hidden in liblrmd
extern int lrmd_tls_send_msg(crm_remote_t *session, xmlNode *msg, uint32_t id,
const char *msg_type);
#endif
static GMainLoop *mainloop = NULL;
static qb_ipcs_service_t *ipcs = NULL;
static stonith_t *stonith_api = NULL;
int lrmd_call_id = 0;
#ifdef ENABLE_PCMK_REMOTE
/* whether shutdown request has been sent */
static sig_atomic_t shutting_down = FALSE;
/* timer for waiting for acknowledgment of shutdown request */
static guint shutdown_ack_timer = 0;
static gboolean lrmd_exit(gpointer data);
#endif
static void
stonith_connection_destroy_cb(stonith_t * st, stonith_event_t * e)
{
stonith_api->state = stonith_disconnected;
crm_err("Connection to fencer lost");
stonith_connection_failed();
}
stonith_t *
get_stonith_connection(void)
{
if (stonith_api && stonith_api->state == stonith_disconnected) {
stonith_api_delete(stonith_api);
stonith_api = NULL;
}
if (stonith_api == NULL) {
int rc = pcmk_ok;
stonith_api = stonith_api_new();
+ if (stonith_api == NULL) {
+ crm_err("Could not connect to fencer: API memory allocation failed");
+ return NULL;
+ }
rc = stonith_api_connect_retry(stonith_api, crm_system_name, 10);
if (rc != pcmk_ok) {
crm_err("Could not connect to fencer in 10 attempts: %s "
CRM_XS " rc=%d", pcmk_strerror(rc), rc);
stonith_api_delete(stonith_api);
stonith_api = NULL;
} else {
stonith_api->cmds->register_notification(stonith_api,
T_STONITH_NOTIFY_DISCONNECT,
stonith_connection_destroy_cb);
}
}
return stonith_api;
}
static int32_t
lrmd_ipc_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid)
{
crm_trace("Connection %p", c);
if (crm_client_new(c, uid, gid) == NULL) {
return -EIO;
}
return 0;
}
static void
lrmd_ipc_created(qb_ipcs_connection_t * c)
{
crm_client_t *new_client = crm_client_get(c);
crm_trace("Connection %p", c);
CRM_ASSERT(new_client != NULL);
/* Now that the connection is offically established, alert
* the other clients a new connection exists. */
notify_of_new_client(new_client);
}
static int32_t
lrmd_ipc_dispatch(qb_ipcs_connection_t * c, void *data, size_t size)
{
uint32_t id = 0;
uint32_t flags = 0;
crm_client_t *client = crm_client_get(c);
xmlNode *request = crm_ipcs_recv(client, data, size, &id, &flags);
CRM_CHECK(client != NULL, crm_err("Invalid client");
return FALSE);
CRM_CHECK(client->id != NULL, crm_err("Invalid client: %p", client);
return FALSE);
CRM_CHECK(flags & crm_ipc_client_response, crm_err("Invalid client request: %p", client);
return FALSE);
if (!request) {
return 0;
}
if (!client->name) {
const char *value = crm_element_value(request, F_LRMD_CLIENTNAME);
if (value == NULL) {
client->name = crm_itoa(crm_ipcs_client_pid(c));
} else {
client->name = strdup(value);
}
}
lrmd_call_id++;
if (lrmd_call_id < 1) {
lrmd_call_id = 1;
}
crm_xml_add(request, F_LRMD_CLIENTID, client->id);
crm_xml_add(request, F_LRMD_CLIENTNAME, client->name);
crm_xml_add_int(request, F_LRMD_CALLID, lrmd_call_id);
process_lrmd_message(client, id, request);
free_xml(request);
return 0;
}
/*!
* \internal
* \brief Free a client connection, and exit if appropriate
*
* \param[in] client Client connection to free
*/
void
lrmd_client_destroy(crm_client_t *client)
{
crm_client_destroy(client);
#ifdef ENABLE_PCMK_REMOTE
/* If we were waiting to shut down, we can now safely do so
* if there are no more proxied IPC providers
*/
if (shutting_down && (ipc_proxy_get_provider() == NULL)) {
lrmd_exit(NULL);
}
#endif
}
static int32_t
lrmd_ipc_closed(qb_ipcs_connection_t * c)
{
crm_client_t *client = crm_client_get(c);
if (client == NULL) {
return 0;
}
crm_trace("Connection %p", c);
client_disconnect_cleanup(client->id);
#ifdef ENABLE_PCMK_REMOTE
ipc_proxy_remove_provider(client);
#endif
lrmd_client_destroy(client);
return 0;
}
static void
lrmd_ipc_destroy(qb_ipcs_connection_t * c)
{
lrmd_ipc_closed(c);
crm_trace("Connection %p", c);
}
static struct qb_ipcs_service_handlers lrmd_ipc_callbacks = {
.connection_accept = lrmd_ipc_accept,
.connection_created = lrmd_ipc_created,
.msg_process = lrmd_ipc_dispatch,
.connection_closed = lrmd_ipc_closed,
.connection_destroyed = lrmd_ipc_destroy
};
int
lrmd_server_send_reply(crm_client_t * client, uint32_t id, xmlNode * reply)
{
crm_trace("Sending reply (%d) to client (%s)", id, client->id);
switch (client->kind) {
case CRM_CLIENT_IPC:
return crm_ipcs_send(client, id, reply, FALSE);
#ifdef ENABLE_PCMK_REMOTE
case CRM_CLIENT_TLS:
return lrmd_tls_send_msg(client->remote, reply, id, "reply");
#endif
default:
crm_err("Could not send reply: unknown client type %d",
client->kind);
}
return -ENOTCONN;
}
int
lrmd_server_send_notify(crm_client_t * client, xmlNode * msg)
{
crm_trace("Sending notification to client (%s)", client->id);
switch (client->kind) {
case CRM_CLIENT_IPC:
if (client->ipcs == NULL) {
crm_trace("Could not notify local client: disconnected");
return -ENOTCONN;
}
return crm_ipcs_send(client, 0, msg, crm_ipc_server_event);
#ifdef ENABLE_PCMK_REMOTE
case CRM_CLIENT_TLS:
if (client->remote == NULL) {
crm_trace("Could not notify remote client: disconnected");
return -ENOTCONN;
}
return lrmd_tls_send_msg(client->remote, msg, 0, "notify");
#endif
default:
crm_err("Could not notify client: unknown type %d", client->kind);
}
return -ENOTCONN;
}
/*!
* \internal
* \brief Clean up and exit immediately
*
* \param[in] data Ignored
*
* \return Doesn't return
* \note This can be used as a timer callback.
*/
static gboolean
lrmd_exit(gpointer data)
{
crm_info("Terminating with %d clients",
crm_hash_table_size(client_connections));
if (stonith_api) {
stonith_api->cmds->remove_notification(stonith_api, T_STONITH_NOTIFY_DISCONNECT);
stonith_api->cmds->disconnect(stonith_api);
stonith_api_delete(stonith_api);
}
if (ipcs) {
mainloop_del_ipc_server(ipcs);
}
#ifdef ENABLE_PCMK_REMOTE
lrmd_tls_server_destroy();
ipc_proxy_cleanup();
#endif
crm_client_cleanup();
g_hash_table_destroy(rsc_list);
if (mainloop) {
lrmd_drain_alerts(mainloop);
}
crm_exit(CRM_EX_OK);
return FALSE;
}
/*!
* \internal
* \brief Request cluster shutdown if appropriate, otherwise exit immediately
*
* \param[in] nsig Signal that caused invocation (ignored)
*/
static void
lrmd_shutdown(int nsig)
{
#ifdef ENABLE_PCMK_REMOTE
crm_client_t *ipc_proxy = ipc_proxy_get_provider();
/* If there are active proxied IPC providers, then we may be running
* resources, so notify the cluster that we wish to shut down.
*/
if (ipc_proxy) {
if (shutting_down) {
crm_notice("Waiting for cluster to stop resources before exiting");
return;
}
crm_info("Sending shutdown request to cluster");
if (ipc_proxy_shutdown_req(ipc_proxy) < 0) {
crm_crit("Shutdown request failed, exiting immediately");
} else {
/* We requested a shutdown. Now, we need to wait for an
* acknowledgement from the proxy host (which ensures the proxy host
* supports shutdown requests), then wait for all proxy hosts to
* disconnect (which ensures that all resources have been stopped).
*/
shutting_down = TRUE;
/* Stop accepting new proxy connections */
lrmd_tls_server_destroy();
/* Older controller versions will never acknowledge our request, so
* set a fairly short timeout to exit quickly in that case. If we
* get the ack, we'll defuse this timer.
*/
shutdown_ack_timer = g_timeout_add_seconds(20, lrmd_exit, NULL);
/* Currently, we let the OS kill us if the clients don't disconnect
* in a reasonable time. We could instead set a long timer here
* (shorter than what the OS is likely to use) and exit immediately
* if it pops.
*/
return;
}
}
#endif
lrmd_exit(NULL);
}
/*!
* \internal
* \brief Defuse short exit timer if shutting down
*/
void handle_shutdown_ack()
{
#ifdef ENABLE_PCMK_REMOTE
if (shutting_down) {
crm_info("Received shutdown ack");
if (shutdown_ack_timer > 0) {
g_source_remove(shutdown_ack_timer);
shutdown_ack_timer = 0;
}
return;
}
#endif
crm_debug("Ignoring unexpected shutdown ack");
}
/*!
* \internal
* \brief Make short exit timer fire immediately
*/
void handle_shutdown_nack()
{
#ifdef ENABLE_PCMK_REMOTE
if (shutting_down) {
crm_info("Received shutdown nack");
if (shutdown_ack_timer > 0) {
g_source_remove(shutdown_ack_timer);
shutdown_ack_timer = g_timeout_add(0, lrmd_exit, NULL);
}
return;
}
#endif
crm_debug("Ignoring unexpected shutdown nack");
}
static pid_t main_pid = 0;
static void
sigdone(void)
{
exit(CRM_EX_OK);
}
static void
sigreap(void)
{
pid_t pid = 0;
int status;
do {
/*
* Opinions seem to differ as to what to put here:
* -1, any child process
* 0, any child process whose process group ID is equal to that of the calling process
*/
pid = waitpid(-1, &status, WNOHANG);
if(pid == main_pid) {
/* Exit when pacemaker-remote exits and use the same return code */
if (WIFEXITED(status)) {
exit(WEXITSTATUS(status));
}
exit(CRM_EX_ERROR);
}
} while (pid > 0);
}
static struct {
int sig;
void (*handler)(void);
} sigmap[] = {
{ SIGCHLD, sigreap },
{ SIGINT, sigdone },
};
static void spawn_pidone(int argc, char **argv, char **envp)
{
sigset_t set;
if (getpid() != 1) {
return;
}
/* Containers can be expected to have /var/log, but they may not have
* /var/log/pacemaker, so use a different default if no value has been
* explicitly configured in the container's environment.
*/
if (daemon_option("logfile") == NULL) {
set_daemon_option("logfile", "/var/log/pcmk-init.log");
}
sigfillset(&set);
sigprocmask(SIG_BLOCK, &set, 0);
main_pid = fork();
switch (main_pid) {
case 0:
sigprocmask(SIG_UNBLOCK, &set, NULL);
setsid();
setpgid(0, 0);
/* Child remains as pacemaker-remoted */
return;
case -1:
perror("fork");
}
/* Parent becomes the reaper of zombie processes */
/* Safe to initialize logging now if needed */
#ifdef HAVE___PROGNAME
/* Differentiate ourselves in the 'ps' output */
{
char *p;
int i, maxlen;
char *LastArgv = NULL;
const char *name = "pcmk-init";
for(i = 0; i < argc; i++) {
if(!i || (LastArgv + 1 == argv[i]))
LastArgv = argv[i] + strlen(argv[i]);
}
for(i = 0; envp[i] != NULL; i++) {
if((LastArgv + 1) == envp[i]) {
LastArgv = envp[i] + strlen(envp[i]);
}
}
maxlen = (LastArgv - argv[0]) - 2;
i = strlen(name);
/* We can overwrite individual argv[] arguments */
snprintf(argv[0], maxlen, "%s", name);
/* Now zero out everything else */
p = &argv[0][i];
while(p < LastArgv)
*p++ = '\0';
argv[1] = NULL;
}
#endif /* HAVE___PROGNAME */
while (1) {
int sig;
size_t i;
sigwait(&set, &sig);
for (i = 0; i < DIMOF(sigmap); i++) {
if (sigmap[i].sig == sig) {
sigmap[i].handler();
break;
}
}
}
}
/* *INDENT-OFF* */
static struct crm_option long_options[] = {
/* Top-level Options */
{"help", 0, 0, '?', "\tThis text"},
{"version", 0, 0, '$', "\tVersion information" },
{"verbose", 0, 0, 'V', "\tIncrease debug output"},
{"logfile", 1, 0, 'l', "\tSend logs to the additional named logfile"},
#ifdef ENABLE_PCMK_REMOTE
{"port", 1, 0, 'p', "\tPort to listen on"},
#endif
{0, 0, 0, 0}
};
/* *INDENT-ON* */
#ifdef ENABLE_PCMK_REMOTE
# define EXECD_TYPE "remote"
#else
# define EXECD_TYPE "local"
#endif
int
main(int argc, char **argv, char **envp)
{
int flag = 0;
int index = 0;
int bump_log_num = 0;
const char *option = NULL;
/* If necessary, create PID1 now before any FDs are opened */
spawn_pidone(argc, argv, envp);
#ifndef ENABLE_PCMK_REMOTE
crm_log_preinit("pacemaker-execd", argc, argv);
crm_set_options(NULL, "[options]", long_options,
"Resource agent executor daemon for cluster nodes");
#else
crm_log_preinit("pacemaker-remoted", argc, argv);
crm_set_options(NULL, "[options]", long_options,
"Resource agent executor daemon for Pacemaker Remote nodes");
#endif
while (1) {
flag = crm_get_option(argc, argv, &index);
if (flag == -1) {
break;
}
switch (flag) {
case 'l':
crm_add_logfile(optarg);
break;
case 'p':
setenv("PCMK_remote_port", optarg, 1);
break;
case 'V':
bump_log_num++;
break;
case '?':
case '$':
crm_help(flag, CRM_EX_OK);
break;
default:
crm_help('?', CRM_EX_USAGE);
break;
}
}
crm_log_init(NULL, LOG_INFO, TRUE, FALSE, argc, argv, FALSE);
while (bump_log_num > 0) {
crm_bump_log_level(argc, argv);
bump_log_num--;
}
option = daemon_option("logfacility");
if (option && safe_str_neq(option, "none")
&& safe_str_neq(option, "/dev/null")) {
setenv("HA_LOGFACILITY", option, 1); /* Used by the ocf_log/ha_log OCF macro */
}
option = daemon_option("logfile");
if(option && safe_str_neq(option, "none")) {
setenv("HA_LOGFILE", option, 1); /* Used by the ocf_log/ha_log OCF macro */
if (daemon_option_enabled(crm_system_name, "debug")) {
setenv("HA_DEBUGLOG", option, 1); /* Used by the ocf_log/ha_debug OCF macro */
}
}
crm_notice("Starting Pacemaker " EXECD_TYPE " executor");
/* The presence of this variable allegedly controls whether child
* processes like httpd will try and use Systemd's sd_notify
* API
*/
unsetenv("NOTIFY_SOCKET");
/* Used by RAs - Leave owned by root */
crm_build_path(CRM_RSCTMP_DIR, 0755);
rsc_list = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, free_rsc);
ipcs = mainloop_add_ipc_server(CRM_SYSTEM_LRMD, QB_IPC_SHM, &lrmd_ipc_callbacks);
if (ipcs == NULL) {
crm_err("Failed to create IPC server: shutting down and inhibiting respawn");
crm_exit(CRM_EX_FATAL);
}
#ifdef ENABLE_PCMK_REMOTE
if (lrmd_init_remote_tls_server() < 0) {
crm_err("Failed to create TLS listener: shutting down and staying down");
crm_exit(CRM_EX_FATAL);
}
ipc_proxy_init();
#endif
mainloop_add_signal(SIGTERM, lrmd_shutdown);
mainloop = g_main_loop_new(NULL, FALSE);
crm_notice("Pacemaker " EXECD_TYPE " executor successfully started and accepting connections");
g_main_loop_run(mainloop);
/* should never get here */
lrmd_exit(NULL);
return CRM_EX_OK;
}
diff --git a/daemons/fenced/cts-fence-helper.c b/daemons/fenced/cts-fence-helper.c
index 4552fc1178..8fd27c80c5 100644
--- a/daemons/fenced/cts-fence-helper.c
+++ b/daemons/fenced/cts-fence-helper.c
@@ -1,654 +1,657 @@
/*
* Copyright 2009-2019 the Pacemaker project contributors
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
static GMainLoop *mainloop = NULL;
static crm_trigger_t *trig = NULL;
static int mainloop_iter = 0;
static int callback_rc = 0;
typedef void (*mainloop_test_iteration_cb) (int check_event);
#define MAINLOOP_DEFAULT_TIMEOUT 2
#define mainloop_test_done(pass) \
if (pass) { \
crm_info("SUCCESS - %s", __FUNCTION__); \
mainloop_iter++; \
mainloop_set_trigger(trig); \
} else { \
crm_err("FAILURE = %s async_callback %d", __FUNCTION__, callback_rc); \
crm_exit(CRM_EX_ERROR); \
} \
callback_rc = 0; \
/* *INDENT-OFF* */
enum test_modes {
test_standard = 0, // test using a specific developer environment
test_passive, // watch notifications only
test_api_sanity, // sanity-test stonith client API using fence_dummy
test_api_mainloop, // sanity-test mainloop code with async responses
};
static struct crm_option long_options[] = {
{"verbose", 0, 0, 'V'},
{"version", 0, 0, '$'},
{"help", 0, 0, '?'},
{"passive", 0, 0, 'p'},
{"api_test", 0, 0, 't'},
{"mainloop_api_test", 0, 0, 'm'},
{0, 0, 0, 0}
};
/* *INDENT-ON* */
static stonith_t *st = NULL;
static struct pollfd pollfd;
static int st_opts = st_opt_sync_call;
static int expected_notifications = 0;
static int verbose = 0;
static void
dispatch_helper(int timeout)
{
int rc;
crm_debug("Looking for notification");
pollfd.events = POLLIN;
while (true) {
rc = poll(&pollfd, 1, timeout); /* wait 10 minutes, -1 forever */
if (rc > 0) {
if (!stonith_dispatch(st)) {
break;
}
} else {
break;
}
}
}
static void
st_callback(stonith_t * st, stonith_event_t * e)
{
if (st->state == stonith_disconnected) {
crm_exit(CRM_EX_DISCONNECT);
}
crm_notice("Operation %s requested by %s %s for peer %s. %s reported: %s (ref=%s)",
e->operation, e->origin, e->result == pcmk_ok ? "completed" : "failed",
e->target, e->executioner ? e->executioner : "",
pcmk_strerror(e->result), e->id);
if (expected_notifications) {
expected_notifications--;
}
}
static void
st_global_callback(stonith_t * stonith, stonith_callback_data_t * data)
{
crm_notice("Call id %d completed with rc %d", data->call_id, data->rc);
}
static void
passive_test(void)
{
int rc = 0;
rc = st->cmds->connect(st, crm_system_name, &pollfd.fd);
if (rc != pcmk_ok) {
stonith_api_delete(st);
crm_exit(CRM_EX_DISCONNECT);
}
st->cmds->register_notification(st, T_STONITH_NOTIFY_DISCONNECT, st_callback);
st->cmds->register_notification(st, T_STONITH_NOTIFY_FENCE, st_callback);
st->cmds->register_notification(st, STONITH_OP_DEVICE_ADD, st_callback);
st->cmds->register_notification(st, STONITH_OP_DEVICE_DEL, st_callback);
st->cmds->register_callback(st, 0, 120, st_opt_timeout_updates, NULL, "st_global_callback",
st_global_callback);
dispatch_helper(600 * 1000);
}
#define single_test(cmd, str, num_notifications, expected_rc) \
{ \
int rc = 0; \
rc = cmd; \
expected_notifications = 0; \
if (num_notifications) { \
expected_notifications = num_notifications; \
dispatch_helper(500); \
} \
if (rc != expected_rc) { \
crm_err("FAILURE - expected rc %d != %d(%s) for cmd - %s", expected_rc, rc, pcmk_strerror(rc), str); \
crm_exit(CRM_EX_ERROR); \
} else if (expected_notifications) { \
crm_err("FAILURE - expected %d notifications, got only %d for cmd - %s", \
num_notifications, num_notifications - expected_notifications, str); \
crm_exit(CRM_EX_ERROR); \
} else { \
if (verbose) { \
crm_info("SUCCESS - %s: %d", str, rc); \
} else { \
crm_debug("SUCCESS - %s: %d", str, rc); \
} \
} \
}\
static void
run_fence_failure_test(void)
{
stonith_key_value_t *params = NULL;
params = stonith_key_value_add(params, "pcmk_host_map", "false_1_node1=1,2 false_1_node2=3,4");
params = stonith_key_value_add(params, "mode", "fail");
single_test(st->
cmds->register_device(st, st_opts, "test-id1", "stonith-ng", "fence_dummy", params),
"Register device1 for failure test", 1, 0);
single_test(st->cmds->fence(st, st_opts, "false_1_node2", "off", 3, 0),
"Fence failure results off", 1, -pcmk_err_generic);
single_test(st->cmds->fence(st, st_opts, "false_1_node2", "reboot", 3, 0),
"Fence failure results reboot", 1, -pcmk_err_generic);
single_test(st->cmds->remove_device(st, st_opts, "test-id1"),
"Remove device1 for failure test", 1, 0);
stonith_key_value_freeall(params, 1, 1);
}
static void
run_fence_failure_rollover_test(void)
{
stonith_key_value_t *params = NULL;
params = stonith_key_value_add(params, "pcmk_host_map", "false_1_node1=1,2 false_1_node2=3,4");
params = stonith_key_value_add(params, "mode", "fail");
single_test(st->
cmds->register_device(st, st_opts, "test-id1", "stonith-ng", "fence_dummy", params),
"Register device1 for rollover test", 1, 0);
stonith_key_value_freeall(params, 1, 1);
params = NULL;
params = stonith_key_value_add(params, "pcmk_host_map", "false_1_node1=1,2 false_1_node2=3,4");
params = stonith_key_value_add(params, "mode", "pass");
single_test(st->
cmds->register_device(st, st_opts, "test-id2", "stonith-ng", "fence_dummy", params),
"Register device2 for rollover test", 1, 0);
single_test(st->cmds->fence(st, st_opts, "false_1_node2", "off", 3, 0),
"Fence rollover results off", 1, 0);
/* Expect -ENODEV because fence_dummy requires 'on' to be executed on target */
single_test(st->cmds->fence(st, st_opts, "false_1_node2", "on", 3, 0),
"Fence rollover results on", 1, -ENODEV);
single_test(st->cmds->remove_device(st, st_opts, "test-id1"),
"Remove device1 for rollover tests", 1, 0);
single_test(st->cmds->remove_device(st, st_opts, "test-id2"),
"Remove device2 for rollover tests", 1, 0);
stonith_key_value_freeall(params, 1, 1);
}
static void
run_standard_test(void)
{
stonith_key_value_t *params = NULL;
params = stonith_key_value_add(params, "pcmk_host_map", "false_1_node1=1,2 false_1_node2=3,4");
params = stonith_key_value_add(params, "mode", "pass");
params = stonith_key_value_add(params, "mock_dynamic_hosts", "false_1_node1 false_1_node2");
single_test(st->
cmds->register_device(st, st_opts, "test-id", "stonith-ng", "fence_dummy", params),
"Register", 1, 0);
stonith_key_value_freeall(params, 1, 1);
params = NULL;
single_test(st->cmds->list(st, st_opts, "test-id", NULL, 1), "list", 1, 0);
single_test(st->cmds->monitor(st, st_opts, "test-id", 1), "Monitor", 1, 0);
single_test(st->cmds->status(st, st_opts, "test-id", "false_1_node2", 1),
"Status false_1_node2", 1, 0);
single_test(st->cmds->status(st, st_opts, "test-id", "false_1_node1", 1),
"Status false_1_node1", 1, 0);
single_test(st->cmds->fence(st, st_opts, "unknown-host", "off", 1, 0),
"Fence unknown-host (expected failure)", 0, -ENODEV);
single_test(st->cmds->fence(st, st_opts, "false_1_node1", "off", 1, 0),
"Fence false_1_node1", 1, 0);
/* Expect -ENODEV because fence_dummy requires 'on' to be executed on target */
single_test(st->cmds->fence(st, st_opts, "false_1_node1", "on", 1, 0),
"Unfence false_1_node1", 1, -ENODEV);
/* Confirm that an invalid level index is rejected */
single_test(st->cmds->register_level(st, st_opts, "node1", 999, params),
"Attempt to register an invalid level index", 0, -EINVAL);
single_test(st->cmds->remove_device(st, st_opts, "test-id"), "Remove test-id", 1, 0);
stonith_key_value_freeall(params, 1, 1);
}
static void
sanity_tests(void)
{
int rc = 0;
rc = st->cmds->connect(st, crm_system_name, &pollfd.fd);
if (rc != pcmk_ok) {
stonith_api_delete(st);
crm_exit(CRM_EX_DISCONNECT);
}
st->cmds->register_notification(st, T_STONITH_NOTIFY_DISCONNECT, st_callback);
st->cmds->register_notification(st, T_STONITH_NOTIFY_FENCE, st_callback);
st->cmds->register_notification(st, STONITH_OP_DEVICE_ADD, st_callback);
st->cmds->register_notification(st, STONITH_OP_DEVICE_DEL, st_callback);
st->cmds->register_callback(st, 0, 120, st_opt_timeout_updates, NULL, "st_global_callback",
st_global_callback);
crm_info("Starting API Sanity Tests");
run_standard_test();
run_fence_failure_test();
run_fence_failure_rollover_test();
crm_info("Sanity Tests Passed");
}
static void
standard_dev_test(void)
{
int rc = 0;
char *tmp = NULL;
stonith_key_value_t *params = NULL;
rc = st->cmds->connect(st, crm_system_name, &pollfd.fd);
if (rc != pcmk_ok) {
stonith_api_delete(st);
crm_exit(CRM_EX_DISCONNECT);
}
params = stonith_key_value_add(params, "pcmk_host_map", "some-host=pcmk-7 true_1_node1=3,4");
rc = st->cmds->register_device(st, st_opts, "test-id", "stonith-ng", "fence_xvm", params);
crm_debug("Register: %d", rc);
rc = st->cmds->list(st, st_opts, "test-id", &tmp, 10);
crm_debug("List: %d output: %s", rc, tmp ? tmp : "");
rc = st->cmds->monitor(st, st_opts, "test-id", 10);
crm_debug("Monitor: %d", rc);
rc = st->cmds->status(st, st_opts, "test-id", "false_1_node2", 10);
crm_debug("Status false_1_node2: %d", rc);
rc = st->cmds->status(st, st_opts, "test-id", "false_1_node1", 10);
crm_debug("Status false_1_node1: %d", rc);
rc = st->cmds->fence(st, st_opts, "unknown-host", "off", 60, 0);
crm_debug("Fence unknown-host: %d", rc);
rc = st->cmds->status(st, st_opts, "test-id", "false_1_node1", 10);
crm_debug("Status false_1_node1: %d", rc);
rc = st->cmds->fence(st, st_opts, "false_1_node1", "off", 60, 0);
crm_debug("Fence false_1_node1: %d", rc);
rc = st->cmds->status(st, st_opts, "test-id", "false_1_node1", 10);
crm_debug("Status false_1_node1: %d", rc);
rc = st->cmds->fence(st, st_opts, "false_1_node1", "on", 10, 0);
crm_debug("Unfence false_1_node1: %d", rc);
rc = st->cmds->status(st, st_opts, "test-id", "false_1_node1", 10);
crm_debug("Status false_1_node1: %d", rc);
rc = st->cmds->fence(st, st_opts, "some-host", "off", 10, 0);
crm_debug("Fence alias: %d", rc);
rc = st->cmds->status(st, st_opts, "test-id", "some-host", 10);
crm_debug("Status alias: %d", rc);
rc = st->cmds->fence(st, st_opts, "false_1_node1", "on", 10, 0);
crm_debug("Unfence false_1_node1: %d", rc);
rc = st->cmds->remove_device(st, st_opts, "test-id");
crm_debug("Remove test-id: %d", rc);
stonith_key_value_freeall(params, 1, 1);
}
static void
iterate_mainloop_tests(gboolean event_ready);
static void
mainloop_callback(stonith_t * stonith, stonith_callback_data_t * data)
{
callback_rc = data->rc;
iterate_mainloop_tests(TRUE);
}
static int
register_callback_helper(int callid)
{
return st->cmds->register_callback(st,
callid,
MAINLOOP_DEFAULT_TIMEOUT,
st_opt_timeout_updates, NULL, "callback", mainloop_callback);
}
static void
test_async_fence_pass(int check_event)
{
int rc = 0;
if (check_event) {
if (callback_rc != 0) {
mainloop_test_done(FALSE);
} else {
mainloop_test_done(TRUE);
}
return;
}
rc = st->cmds->fence(st, 0, "true_1_node1", "off", MAINLOOP_DEFAULT_TIMEOUT, 0);
if (rc < 0) {
crm_err("fence failed with rc %d", rc);
mainloop_test_done(FALSE);
}
register_callback_helper(rc);
/* wait for event */
}
#define CUSTOM_TIMEOUT_ADDITION 10
static void
test_async_fence_custom_timeout(int check_event)
{
int rc = 0;
static time_t begin = 0;
if (check_event) {
uint32_t diff = (time(NULL) - begin);
if (callback_rc != -ETIME) {
mainloop_test_done(FALSE);
} else if (diff < CUSTOM_TIMEOUT_ADDITION + MAINLOOP_DEFAULT_TIMEOUT) {
crm_err
("Custom timeout test failed, callback expiration should be updated to %d, actual timeout was %d",
CUSTOM_TIMEOUT_ADDITION + MAINLOOP_DEFAULT_TIMEOUT, diff);
mainloop_test_done(FALSE);
} else {
mainloop_test_done(TRUE);
}
return;
}
begin = time(NULL);
rc = st->cmds->fence(st, 0, "custom_timeout_node1", "off", MAINLOOP_DEFAULT_TIMEOUT, 0);
if (rc < 0) {
crm_err("fence failed with rc %d", rc);
mainloop_test_done(FALSE);
}
register_callback_helper(rc);
/* wait for event */
}
static void
test_async_fence_timeout(int check_event)
{
int rc = 0;
if (check_event) {
if (callback_rc != -ENODEV) {
mainloop_test_done(FALSE);
} else {
mainloop_test_done(TRUE);
}
return;
}
rc = st->cmds->fence(st, 0, "false_1_node2", "off", MAINLOOP_DEFAULT_TIMEOUT, 0);
if (rc < 0) {
crm_err("fence failed with rc %d", rc);
mainloop_test_done(FALSE);
}
register_callback_helper(rc);
/* wait for event */
}
static void
test_async_monitor(int check_event)
{
int rc = 0;
if (check_event) {
if (callback_rc) {
mainloop_test_done(FALSE);
} else {
mainloop_test_done(TRUE);
}
return;
}
rc = st->cmds->monitor(st, 0, "false_1", MAINLOOP_DEFAULT_TIMEOUT);
if (rc < 0) {
crm_err("monitor failed with rc %d", rc);
mainloop_test_done(FALSE);
}
register_callback_helper(rc);
/* wait for event */
}
static void
test_register_async_devices(int check_event)
{
char buf[16] = { 0, };
stonith_key_value_t *params = NULL;
params = stonith_key_value_add(params, "pcmk_host_map", "false_1_node1=1,2");
params = stonith_key_value_add(params, "mode", "fail");
st->cmds->register_device(st, st_opts, "false_1", "stonith-ng", "fence_dummy", params);
stonith_key_value_freeall(params, 1, 1);
params = NULL;
params = stonith_key_value_add(params, "pcmk_host_map", "true_1_node1=1,2");
params = stonith_key_value_add(params, "mode", "pass");
st->cmds->register_device(st, st_opts, "true_1", "stonith-ng", "fence_dummy", params);
stonith_key_value_freeall(params, 1, 1);
params = NULL;
params = stonith_key_value_add(params, "pcmk_host_map", "custom_timeout_node1=1,2");
params = stonith_key_value_add(params, "mode", "fail");
params = stonith_key_value_add(params, "delay", "1000");
snprintf(buf, sizeof(buf) - 1, "%d", MAINLOOP_DEFAULT_TIMEOUT + CUSTOM_TIMEOUT_ADDITION);
params = stonith_key_value_add(params, "pcmk_off_timeout", buf);
st->cmds->register_device(st, st_opts, "false_custom_timeout", "stonith-ng", "fence_dummy",
params);
stonith_key_value_freeall(params, 1, 1);
mainloop_test_done(TRUE);
}
static void
try_mainloop_connect(int check_event)
{
int rc = stonith_api_connect_retry(st, crm_system_name, 10);
if (rc == pcmk_ok) {
mainloop_test_done(TRUE);
return;
}
crm_err("API CONNECTION FAILURE");
mainloop_test_done(FALSE);
}
static void
iterate_mainloop_tests(gboolean event_ready)
{
static mainloop_test_iteration_cb callbacks[] = {
try_mainloop_connect,
test_register_async_devices,
test_async_monitor,
test_async_fence_pass,
test_async_fence_timeout,
test_async_fence_custom_timeout,
};
if (mainloop_iter == (sizeof(callbacks) / sizeof(mainloop_test_iteration_cb))) {
/* all tests ran, everything passed */
crm_info("ALL MAINLOOP TESTS PASSED!");
crm_exit(CRM_EX_OK);
}
callbacks[mainloop_iter] (event_ready);
}
static gboolean
trigger_iterate_mainloop_tests(gpointer user_data)
{
iterate_mainloop_tests(FALSE);
return TRUE;
}
static void
test_shutdown(int nsig)
{
int rc = 0;
if (st) {
rc = st->cmds->disconnect(st);
crm_info("Disconnect: %d", rc);
crm_debug("Destroy");
stonith_api_delete(st);
}
if (rc) {
crm_exit(CRM_EX_ERROR);
}
}
static void
mainloop_tests(void)
{
trig = mainloop_add_trigger(G_PRIORITY_HIGH, trigger_iterate_mainloop_tests, NULL);
mainloop_set_trigger(trig);
mainloop_add_signal(SIGTERM, test_shutdown);
crm_info("Starting");
mainloop = g_main_loop_new(NULL, FALSE);
g_main_loop_run(mainloop);
}
int
main(int argc, char **argv)
{
int argerr = 0;
int flag;
int option_index = 0;
enum test_modes mode = test_standard;
crm_log_cli_init("cts-fence-helper");
crm_set_options(NULL, "mode [options]", long_options,
"Provides a summary of cluster's current state."
"\n\nOutputs varying levels of detail in a number of different formats.\n");
while (1) {
flag = crm_get_option(argc, argv, &option_index);
if (flag == -1) {
break;
}
switch (flag) {
case 'V':
verbose = 1;
break;
case '$':
case '?':
crm_help(flag, CRM_EX_OK);
break;
case 'p':
mode = test_passive;
break;
case 't':
mode = test_api_sanity;
break;
case 'm':
mode = test_api_mainloop;
break;
default:
++argerr;
break;
}
}
crm_log_init(NULL, LOG_INFO, TRUE, (verbose? TRUE : FALSE), argc, argv,
FALSE);
if (optind > argc) {
++argerr;
}
if (argerr) {
crm_help('?', CRM_EX_USAGE);
}
- crm_debug("Create");
st = stonith_api_new();
+ if (st == NULL) {
+ crm_err("Could not connect to fencer: API memory allocation failed");
+ crm_exit(CRM_EX_DISCONNECT);
+ }
switch (mode) {
case test_standard:
standard_dev_test();
break;
case test_passive:
passive_test();
break;
case test_api_sanity:
sanity_tests();
break;
case test_api_mainloop:
mainloop_tests();
break;
}
test_shutdown(0);
return CRM_EX_OK;
}
diff --git a/daemons/fenced/fenced_commands.c b/daemons/fenced/fenced_commands.c
index d816466262..f3ace2277d 100644
--- a/daemons/fenced/fenced_commands.c
+++ b/daemons/fenced/fenced_commands.c
@@ -1,2702 +1,2708 @@
/*
* Copyright 2009-2019 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#if SUPPORT_CIBSECRETS
# include
#endif
#include
GHashTable *device_list = NULL;
GHashTable *topology = NULL;
GList *cmd_list = NULL;
struct device_search_s {
/* target of fence action */
char *host;
/* requested fence action */
char *action;
/* timeout to use if a device is queried dynamically for possible targets */
int per_device_timeout;
/* number of registered fencing devices at time of request */
int replies_needed;
/* number of device replies received so far */
int replies_received;
/* whether the target is eligible to perform requested action (or off) */
bool allow_suicide;
/* private data to pass to search callback function */
void *user_data;
/* function to call when all replies have been received */
void (*callback) (GList * devices, void *user_data);
/* devices capable of performing requested action (or off if remapping) */
GListPtr capable;
};
static gboolean stonith_device_dispatch(gpointer user_data);
static void st_child_done(GPid pid, int rc, const char *output, gpointer user_data);
static void stonith_send_reply(xmlNode * reply, int call_options, const char *remote_peer,
const char *client_id);
static void search_devices_record_result(struct device_search_s *search, const char *device,
gboolean can_fence);
typedef struct async_command_s {
int id;
int pid;
int fd_stdout;
int options;
int default_timeout; /* seconds */
int timeout; /* seconds */
int start_delay; /* milliseconds */
int delay_id;
char *op;
char *origin;
char *client;
char *client_name;
char *remote_op_id;
char *victim;
uint32_t victim_nodeid;
char *action;
char *device;
char *mode;
GListPtr device_list;
GListPtr device_next;
void *internal_user_data;
void (*done_cb) (GPid pid, int rc, const char *output, gpointer user_data);
guint timer_sigterm;
guint timer_sigkill;
/*! If the operation timed out, this is the last signal
* we sent to the process to get it to terminate */
int last_timeout_signo;
stonith_device_t *active_on;
stonith_device_t *activating_on;
} async_command_t;
static xmlNode *stonith_construct_async_reply(async_command_t * cmd, const char *output,
xmlNode * data, int rc);
static gboolean
is_action_required(const char *action, stonith_device_t *device)
{
return device && device->automatic_unfencing && safe_str_eq(action, "on");
}
static int
get_action_delay_max(stonith_device_t * device, const char * action)
{
const char *value = NULL;
int delay_max_ms = 0;
if (safe_str_neq(action, "off") && safe_str_neq(action, "reboot")) {
return 0;
}
value = g_hash_table_lookup(device->params, STONITH_ATTR_DELAY_MAX);
if (value) {
delay_max_ms = crm_get_msec(value);
}
return delay_max_ms;
}
static int
get_action_delay_base(stonith_device_t * device, const char * action)
{
const char *value = NULL;
int delay_base_ms = 0;
if (safe_str_neq(action, "off") && safe_str_neq(action, "reboot")) {
return 0;
}
value = g_hash_table_lookup(device->params, STONITH_ATTR_DELAY_BASE);
if (value) {
delay_base_ms = crm_get_msec(value);
}
return delay_base_ms;
}
/*!
* \internal
* \brief Override STONITH timeout with pcmk_*_timeout if available
*
* \param[in] device STONITH device to use
* \param[in] action STONITH action name
* \param[in] default_timeout Timeout to use if device does not have
* a pcmk_*_timeout parameter for action
*
* \return Value of pcmk_(action)_timeout if available, otherwise default_timeout
* \note For consistency, it would be nice if reboot/off/on timeouts could be
* set the same way as start/stop/monitor timeouts, i.e. with an
* entry in the fencing resource configuration. However that
* is insufficient because fencing devices may be registered directly via
* the fencer's register_device() API instead of going through the CIB
* (e.g. stonith_admin uses it for its -R option, and the executor uses it
* to ensure a device is registered when a command is issued). As device
* properties, pcmk_*_timeout parameters can be grabbed by the fencer when
* the device is registered, whether by CIB change or API call.
*/
static int
get_action_timeout(stonith_device_t * device, const char *action, int default_timeout)
{
if (action && device && device->params) {
char buffer[64] = { 0, };
const char *value = NULL;
/* If "reboot" was requested but the device does not support it,
* we will remap to "off", so check timeout for "off" instead
*/
if (safe_str_eq(action, "reboot")
&& is_not_set(device->flags, st_device_supports_reboot)) {
crm_trace("%s doesn't support reboot, using timeout for off instead",
device->id);
action = "off";
}
/* If the device config specified an action-specific timeout, use it */
snprintf(buffer, sizeof(buffer), "pcmk_%s_timeout", action);
value = g_hash_table_lookup(device->params, buffer);
if (value) {
return atoi(value);
}
}
return default_timeout;
}
static void
free_async_command(async_command_t * cmd)
{
if (!cmd) {
return;
}
if (cmd->delay_id) {
g_source_remove(cmd->delay_id);
}
cmd_list = g_list_remove(cmd_list, cmd);
g_list_free_full(cmd->device_list, free);
free(cmd->device);
free(cmd->action);
free(cmd->victim);
free(cmd->remote_op_id);
free(cmd->client);
free(cmd->client_name);
free(cmd->origin);
free(cmd->mode);
free(cmd->op);
free(cmd);
}
static async_command_t *
create_async_command(xmlNode * msg)
{
async_command_t *cmd = NULL;
xmlNode *op = get_xpath_object("//@" F_STONITH_ACTION, msg, LOG_ERR);
const char *action = crm_element_value(op, F_STONITH_ACTION);
CRM_CHECK(action != NULL, crm_log_xml_warn(msg, "NoAction"); return NULL);
crm_log_xml_trace(msg, "Command");
cmd = calloc(1, sizeof(async_command_t));
crm_element_value_int(msg, F_STONITH_CALLID, &(cmd->id));
crm_element_value_int(msg, F_STONITH_CALLOPTS, &(cmd->options));
crm_element_value_int(msg, F_STONITH_TIMEOUT, &(cmd->default_timeout));
cmd->timeout = cmd->default_timeout;
cmd->origin = crm_element_value_copy(msg, F_ORIG);
cmd->remote_op_id = crm_element_value_copy(msg, F_STONITH_REMOTE_OP_ID);
cmd->client = crm_element_value_copy(msg, F_STONITH_CLIENTID);
cmd->client_name = crm_element_value_copy(msg, F_STONITH_CLIENTNAME);
cmd->op = crm_element_value_copy(msg, F_STONITH_OPERATION);
cmd->action = strdup(action);
cmd->victim = crm_element_value_copy(op, F_STONITH_TARGET);
cmd->mode = crm_element_value_copy(op, F_STONITH_MODE);
cmd->device = crm_element_value_copy(op, F_STONITH_DEVICE);
CRM_CHECK(cmd->op != NULL, crm_log_xml_warn(msg, "NoOp"); free_async_command(cmd); return NULL);
CRM_CHECK(cmd->client != NULL, crm_log_xml_warn(msg, "NoClient"));
cmd->done_cb = st_child_done;
cmd_list = g_list_append(cmd_list, cmd);
return cmd;
}
static int
get_action_limit(stonith_device_t * device)
{
const char *value = NULL;
int action_limit = 1;
value = g_hash_table_lookup(device->params, STONITH_ATTR_ACTION_LIMIT);
if (value) {
action_limit = crm_parse_int(value, "1");
if (action_limit == 0) {
/* pcmk_action_limit should not be 0. Enforce it to be 1. */
action_limit = 1;
}
}
return action_limit;
}
static int
get_active_cmds(stonith_device_t * device)
{
int counter = 0;
GListPtr gIter = NULL;
GListPtr gIterNext = NULL;
CRM_CHECK(device != NULL, return 0);
for (gIter = cmd_list; gIter != NULL; gIter = gIterNext) {
async_command_t *cmd = gIter->data;
gIterNext = gIter->next;
if (cmd->active_on == device) {
counter++;
}
}
return counter;
}
static void
fork_cb(GPid pid, gpointer user_data)
{
async_command_t *cmd = (async_command_t *) user_data;
stonith_device_t * device =
/* in case of a retry we've done the move from
activating_on to active_on already
*/
cmd->activating_on?cmd->activating_on:cmd->active_on;
CRM_ASSERT(device);
crm_debug("Operation %s%s%s on %s now running with pid=%d, timeout=%ds",
cmd->action, cmd->victim ? " for node " : "", cmd->victim ? cmd->victim : "",
device->id, pid, cmd->timeout);
cmd->active_on = device;
cmd->activating_on = NULL;
}
static gboolean
stonith_device_execute(stonith_device_t * device)
{
int exec_rc = 0;
const char *action_str = NULL;
async_command_t *cmd = NULL;
stonith_action_t *action = NULL;
int active_cmds = 0;
int action_limit = 0;
CRM_CHECK(device != NULL, return FALSE);
active_cmds = get_active_cmds(device);
action_limit = get_action_limit(device);
if (action_limit > -1 && active_cmds >= action_limit) {
crm_trace("%s is over its action limit of %d (%u active action%s)",
device->id, action_limit, active_cmds, active_cmds > 1 ? "s" : "");
return TRUE;
}
if (device->pending_ops) {
GList *first = device->pending_ops;
cmd = first->data;
if (cmd && cmd->delay_id) {
crm_trace
("Operation %s%s%s on %s was asked to run too early, waiting for start_delay timeout of %dms",
cmd->action, cmd->victim ? " for node " : "", cmd->victim ? cmd->victim : "",
device->id, cmd->start_delay);
return TRUE;
}
device->pending_ops = g_list_remove_link(device->pending_ops, first);
g_list_free_1(first);
}
if (cmd == NULL) {
crm_trace("Nothing further to do for %s", device->id);
return TRUE;
}
if(safe_str_eq(device->agent, STONITH_WATCHDOG_AGENT)) {
if(safe_str_eq(cmd->action, "reboot")) {
pcmk_panic(__FUNCTION__);
return TRUE;
} else if(safe_str_eq(cmd->action, "off")) {
pcmk_panic(__FUNCTION__);
return TRUE;
} else {
crm_info("Faking success for %s watchdog operation", cmd->action);
cmd->done_cb(0, 0, NULL, cmd);
return TRUE;
}
}
#if SUPPORT_CIBSECRETS
if (replace_secret_params(device->id, device->params) < 0) {
/* replacing secrets failed! */
if (safe_str_eq(cmd->action,"stop")) {
/* don't fail on stop! */
crm_info("proceeding with the stop operation for %s", device->id);
} else {
crm_err("failed to get secrets for %s, "
"considering resource not configured", device->id);
exec_rc = PCMK_OCF_NOT_CONFIGURED;
cmd->done_cb(0, exec_rc, NULL, cmd);
return TRUE;
}
}
#endif
action_str = cmd->action;
if (safe_str_eq(cmd->action, "reboot") && is_not_set(device->flags, st_device_supports_reboot)) {
crm_warn("Agent '%s' does not advertise support for 'reboot', performing 'off' action instead", device->agent);
action_str = "off";
}
action = stonith_action_create(device->agent,
action_str,
cmd->victim,
cmd->victim_nodeid,
cmd->timeout, device->params, device->aliases);
/* for async exec, exec_rc is negative for early error exit
otherwise handling of success/errors is done via callbacks */
cmd->activating_on = device;
exec_rc = stonith_action_execute_async(action, (void *)cmd,
cmd->done_cb, fork_cb);
if (exec_rc < 0) {
crm_warn("Operation %s%s%s on %s failed: %s (%d)",
cmd->action, cmd->victim ? " for node " : "", cmd->victim ? cmd->victim : "",
device->id, pcmk_strerror(exec_rc), exec_rc);
cmd->activating_on = NULL;
cmd->done_cb(0, exec_rc, NULL, cmd);
}
return TRUE;
}
static gboolean
stonith_device_dispatch(gpointer user_data)
{
return stonith_device_execute(user_data);
}
static gboolean
start_delay_helper(gpointer data)
{
async_command_t *cmd = data;
stonith_device_t *device = NULL;
cmd->delay_id = 0;
device = cmd->device ? g_hash_table_lookup(device_list, cmd->device) : NULL;
if (device) {
mainloop_set_trigger(device->work);
}
return FALSE;
}
static void
schedule_stonith_command(async_command_t * cmd, stonith_device_t * device)
{
int delay_max = 0;
int delay_base = 0;
CRM_CHECK(cmd != NULL, return);
CRM_CHECK(device != NULL, return);
if (cmd->device) {
free(cmd->device);
}
if (device->include_nodeid && cmd->victim) {
crm_node_t *node = crm_get_peer(0, cmd->victim);
cmd->victim_nodeid = node->id;
}
cmd->device = strdup(device->id);
cmd->timeout = get_action_timeout(device, cmd->action, cmd->default_timeout);
if (cmd->remote_op_id) {
crm_debug("Scheduling %s on %s for remote peer %s with op id (%s) (timeout=%ds)",
cmd->action, device->id, cmd->origin, cmd->remote_op_id, cmd->timeout);
} else {
crm_debug("Scheduling %s on %s for %s (timeout=%ds)",
cmd->action, device->id, cmd->client, cmd->timeout);
}
device->pending_ops = g_list_append(device->pending_ops, cmd);
mainloop_set_trigger(device->work);
delay_max = get_action_delay_max(device, cmd->action);
delay_base = get_action_delay_base(device, cmd->action);
if (delay_max == 0) {
delay_max = delay_base;
}
if (delay_max < delay_base) {
crm_warn("Base-delay (%dms) is larger than max-delay (%dms) "
"for %s on %s - limiting to max-delay",
delay_base, delay_max, cmd->action, device->id);
delay_base = delay_max;
}
if (delay_max > 0) {
// coverity[dont_call] We're not using rand() for security
cmd->start_delay =
((delay_max != delay_base)?(rand() % (delay_max - delay_base)):0)
+ delay_base;
crm_notice("Delaying %s on %s for %dms (timeout=%ds, base=%dms, "
"max=%dms)",
cmd->action, device->id, cmd->start_delay, cmd->timeout,
delay_base, delay_max);
cmd->delay_id =
g_timeout_add(cmd->start_delay, start_delay_helper, cmd);
}
}
static void
free_device(gpointer data)
{
GListPtr gIter = NULL;
stonith_device_t *device = data;
g_hash_table_destroy(device->params);
g_hash_table_destroy(device->aliases);
for (gIter = device->pending_ops; gIter != NULL; gIter = gIter->next) {
async_command_t *cmd = gIter->data;
crm_warn("Removal of device '%s' purged operation %s", device->id, cmd->action);
cmd->done_cb(0, -ENODEV, NULL, cmd);
}
g_list_free(device->pending_ops);
g_list_free_full(device->targets, free);
mainloop_destroy_trigger(device->work);
free_xml(device->agent_metadata);
free(device->namespace);
free(device->on_target_actions);
free(device->agent);
free(device->id);
free(device);
}
void free_device_list()
{
if (device_list != NULL) {
g_hash_table_destroy(device_list);
device_list = NULL;
}
}
void
init_device_list()
{
if (device_list == NULL) {
device_list = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL,
free_device);
}
}
static GHashTable *
build_port_aliases(const char *hostmap, GListPtr * targets)
{
char *name = NULL;
int last = 0, lpc = 0, max = 0, added = 0;
GHashTable *aliases = crm_strcase_table_new();
if (hostmap == NULL) {
return aliases;
}
max = strlen(hostmap);
for (; lpc <= max; lpc++) {
switch (hostmap[lpc]) {
/* Assignment chars */
case '=':
case ':':
if (lpc > last) {
free(name);
name = calloc(1, 1 + lpc - last);
memcpy(name, hostmap + last, lpc - last);
}
last = lpc + 1;
break;
/* Delimeter chars */
/* case ',': Potentially used to specify multiple ports */
case 0:
case ';':
case ' ':
case '\t':
if (name) {
char *value = NULL;
value = calloc(1, 1 + lpc - last);
memcpy(value, hostmap + last, lpc - last);
crm_debug("Adding alias '%s'='%s'", name, value);
g_hash_table_replace(aliases, name, value);
if (targets) {
*targets = g_list_append(*targets, strdup(value));
}
value = NULL;
name = NULL;
added++;
} else if (lpc > last) {
crm_debug("Parse error at offset %d near '%s'", lpc - last, hostmap + last);
}
last = lpc + 1;
break;
}
if (hostmap[lpc] == 0) {
break;
}
}
if (added == 0) {
crm_info("No host mappings detected in '%s'", hostmap);
}
free(name);
return aliases;
}
GHashTable *metadata_cache = NULL;
void
free_metadata_cache() {
if (metadata_cache != NULL) {
g_hash_table_destroy(metadata_cache);
metadata_cache = NULL;
}
}
static void
init_metadata_cache() {
if (metadata_cache == NULL) {
metadata_cache = crm_str_table_new();
}
}
static xmlNode *
get_agent_metadata(const char *agent)
{
xmlNode *xml = NULL;
char *buffer = NULL;
init_metadata_cache();
buffer = g_hash_table_lookup(metadata_cache, agent);
if(safe_str_eq(agent, STONITH_WATCHDOG_AGENT)) {
return NULL;
} else if(buffer == NULL) {
stonith_t *st = stonith_api_new();
- int rc = st->cmds->metadata(st, st_opt_sync_call, agent, NULL, &buffer, 10);
+ int rc;
+ if (st == NULL) {
+ crm_warn("Could not get agent meta-data: "
+ "API memory allocation failed");
+ return NULL;
+ }
+ rc = st->cmds->metadata(st, st_opt_sync_call, agent, NULL, &buffer, 10);
stonith_api_delete(st);
if (rc || !buffer) {
crm_err("Could not retrieve metadata for fencing agent %s", agent);
return NULL;
}
g_hash_table_replace(metadata_cache, strdup(agent), buffer);
}
xml = string2xml(buffer);
return xml;
}
static gboolean
is_nodeid_required(xmlNode * xml)
{
xmlXPathObjectPtr xpath = NULL;
if (stand_alone) {
return FALSE;
}
if (!xml) {
return FALSE;
}
xpath = xpath_search(xml, "//parameter[@name='nodeid']");
if (numXpathResults(xpath) <= 0) {
freeXpathObject(xpath);
return FALSE;
}
freeXpathObject(xpath);
return TRUE;
}
#define MAX_ACTION_LEN 256
static char *
add_action(char *actions, const char *action)
{
int offset = 0;
if (actions == NULL) {
actions = calloc(1, MAX_ACTION_LEN);
} else {
offset = strlen(actions);
}
if (offset > 0) {
offset += snprintf(actions+offset, MAX_ACTION_LEN - offset, " ");
}
offset += snprintf(actions+offset, MAX_ACTION_LEN - offset, "%s", action);
return actions;
}
static void
read_action_metadata(stonith_device_t *device)
{
xmlXPathObjectPtr xpath = NULL;
int max = 0;
int lpc = 0;
if (device->agent_metadata == NULL) {
return;
}
xpath = xpath_search(device->agent_metadata, "//action");
max = numXpathResults(xpath);
if (max <= 0) {
freeXpathObject(xpath);
return;
}
for (lpc = 0; lpc < max; lpc++) {
const char *on_target = NULL;
const char *action = NULL;
xmlNode *match = getXpathResult(xpath, lpc);
CRM_LOG_ASSERT(match != NULL);
if(match == NULL) { continue; };
on_target = crm_element_value(match, "on_target");
action = crm_element_value(match, "name");
if(safe_str_eq(action, "list")) {
set_bit(device->flags, st_device_supports_list);
} else if(safe_str_eq(action, "status")) {
set_bit(device->flags, st_device_supports_status);
} else if(safe_str_eq(action, "reboot")) {
set_bit(device->flags, st_device_supports_reboot);
} else if (safe_str_eq(action, "on")) {
/* "automatic" means the cluster will unfence node when it joins */
const char *automatic = crm_element_value(match, "automatic");
/* "required" is a deprecated synonym for "automatic" */
const char *required = crm_element_value(match, "required");
if (crm_is_true(automatic) || crm_is_true(required)) {
device->automatic_unfencing = TRUE;
}
}
if (action && crm_is_true(on_target)) {
device->on_target_actions = add_action(device->on_target_actions, action);
}
}
freeXpathObject(xpath);
}
/*!
* \internal
* \brief Set a pcmk_*_action parameter if not already set
*
* \param[in,out] params Device parameters
* \param[in] action Name of action
* \param[in] value Value to use if action is not already set
*/
static void
map_action(GHashTable *params, const char *action, const char *value)
{
char *key = crm_strdup_printf("pcmk_%s_action", action);
if (g_hash_table_lookup(params, key)) {
crm_warn("Ignoring %s='%s', see %s instead",
STONITH_ATTR_ACTION_OP, value, key);
free(key);
} else {
crm_warn("Mapping %s='%s' to %s='%s'",
STONITH_ATTR_ACTION_OP, value, key, value);
g_hash_table_insert(params, key, strdup(value));
}
}
/*!
* \internal
* \brief Create device parameter table from XML
*
* \param[in] name Device name (used for logging only)
* \param[in,out] params Device parameters
*/
static GHashTable *
xml2device_params(const char *name, xmlNode *dev)
{
GHashTable *params = xml2list(dev);
const char *value;
/* Action should never be specified in the device configuration,
* but we support it for users who are familiar with other software
* that worked that way.
*/
value = g_hash_table_lookup(params, STONITH_ATTR_ACTION_OP);
if (value != NULL) {
crm_warn("%s has '%s' parameter, which should never be specified in configuration",
name, STONITH_ATTR_ACTION_OP);
if (*value == '\0') {
crm_warn("Ignoring empty '%s' parameter", STONITH_ATTR_ACTION_OP);
} else if (strcmp(value, "reboot") == 0) {
crm_warn("Ignoring %s='reboot' (see stonith-action cluster property instead)",
STONITH_ATTR_ACTION_OP);
} else if (strcmp(value, "off") == 0) {
map_action(params, "reboot", value);
} else {
map_action(params, "off", value);
map_action(params, "reboot", value);
}
g_hash_table_remove(params, STONITH_ATTR_ACTION_OP);
}
return params;
}
static stonith_device_t *
build_device_from_xml(xmlNode * msg)
{
const char *value;
xmlNode *dev = get_xpath_object("//" F_STONITH_DEVICE, msg, LOG_ERR);
stonith_device_t *device = NULL;
char *agent = crm_element_value_copy(dev, "agent");
CRM_CHECK(agent != NULL, return device);
device = calloc(1, sizeof(stonith_device_t));
CRM_CHECK(device != NULL, {free(agent); return device;});
device->id = crm_element_value_copy(dev, XML_ATTR_ID);
device->agent = agent;
device->namespace = crm_element_value_copy(dev, "namespace");
device->params = xml2device_params(device->id, dev);
value = g_hash_table_lookup(device->params, STONITH_ATTR_HOSTLIST);
if (value) {
device->targets = stonith__parse_targets(value);
}
value = g_hash_table_lookup(device->params, STONITH_ATTR_HOSTMAP);
device->aliases = build_port_aliases(value, &(device->targets));
device->agent_metadata = get_agent_metadata(device->agent);
read_action_metadata(device);
value = g_hash_table_lookup(device->params, "nodeid");
if (!value) {
device->include_nodeid = is_nodeid_required(device->agent_metadata);
}
value = crm_element_value(dev, "rsc_provides");
if (safe_str_eq(value, "unfencing")) {
device->automatic_unfencing = TRUE;
}
if (is_action_required("on", device)) {
crm_info("The fencing device '%s' requires unfencing", device->id);
}
if (device->on_target_actions) {
crm_info("The fencing device '%s' requires actions (%s) to be executed on the target node",
device->id, device->on_target_actions);
}
device->work = mainloop_add_trigger(G_PRIORITY_HIGH, stonith_device_dispatch, device);
/* TODO: Hook up priority */
return device;
}
static const char *
target_list_type(stonith_device_t * dev)
{
const char *check_type = NULL;
check_type = g_hash_table_lookup(dev->params, STONITH_ATTR_HOSTCHECK);
if (check_type == NULL) {
if (g_hash_table_lookup(dev->params, STONITH_ATTR_HOSTLIST)) {
check_type = "static-list";
} else if (g_hash_table_lookup(dev->params, STONITH_ATTR_HOSTMAP)) {
check_type = "static-list";
} else if(is_set(dev->flags, st_device_supports_list)){
check_type = "dynamic-list";
} else if(is_set(dev->flags, st_device_supports_status)){
check_type = "status";
} else {
check_type = "none";
}
}
return check_type;
}
static void
schedule_internal_command(const char *origin,
stonith_device_t * device,
const char *action,
const char *victim,
int timeout,
void *internal_user_data,
void (*done_cb) (GPid pid, int rc, const char *output,
gpointer user_data))
{
async_command_t *cmd = NULL;
cmd = calloc(1, sizeof(async_command_t));
cmd->id = -1;
cmd->default_timeout = timeout ? timeout : 60;
cmd->timeout = cmd->default_timeout;
cmd->action = strdup(action);
cmd->victim = victim ? strdup(victim) : NULL;
cmd->device = strdup(device->id);
cmd->origin = strdup(origin);
cmd->client = strdup(crm_system_name);
cmd->client_name = strdup(crm_system_name);
cmd->internal_user_data = internal_user_data;
cmd->done_cb = done_cb; /* cmd, not internal_user_data, is passed to 'done_cb' as the userdata */
schedule_stonith_command(cmd, device);
}
gboolean
string_in_list(GListPtr list, const char *item)
{
int lpc = 0;
int max = g_list_length(list);
for (lpc = 0; lpc < max; lpc++) {
const char *value = g_list_nth_data(list, lpc);
if (safe_str_eq(item, value)) {
return TRUE;
} else {
crm_trace("%d: '%s' != '%s'", lpc, item, value);
}
}
return FALSE;
}
static void
status_search_cb(GPid pid, int rc, const char *output, gpointer user_data)
{
async_command_t *cmd = user_data;
struct device_search_s *search = cmd->internal_user_data;
stonith_device_t *dev = cmd->device ? g_hash_table_lookup(device_list, cmd->device) : NULL;
gboolean can = FALSE;
free_async_command(cmd);
if (!dev) {
search_devices_record_result(search, NULL, FALSE);
return;
}
mainloop_set_trigger(dev->work);
if (rc == 1 /* unknown */ ) {
crm_trace("Host %s is not known by %s", search->host, dev->id);
} else if (rc == 0 /* active */ || rc == 2 /* inactive */ ) {
crm_trace("Host %s is known by %s", search->host, dev->id);
can = TRUE;
} else {
crm_notice("Unknown result when testing if %s can fence %s: rc=%d", dev->id, search->host,
rc);
}
search_devices_record_result(search, dev->id, can);
}
static void
dynamic_list_search_cb(GPid pid, int rc, const char *output, gpointer user_data)
{
async_command_t *cmd = user_data;
struct device_search_s *search = cmd->internal_user_data;
stonith_device_t *dev = cmd->device ? g_hash_table_lookup(device_list, cmd->device) : NULL;
gboolean can_fence = FALSE;
free_async_command(cmd);
/* Host/alias must be in the list output to be eligible to be fenced
*
* Will cause problems if down'd nodes aren't listed or (for virtual nodes)
* if the guest is still listed despite being moved to another machine
*/
if (!dev) {
search_devices_record_result(search, NULL, FALSE);
return;
}
mainloop_set_trigger(dev->work);
/* If we successfully got the targets earlier, don't disable. */
if (rc != 0 && !dev->targets) {
crm_notice("Disabling port list queries for %s (%d): %s", dev->id, rc, output);
/* Fall back to status */
g_hash_table_replace(dev->params, strdup(STONITH_ATTR_HOSTCHECK), strdup("status"));
g_list_free_full(dev->targets, free);
dev->targets = NULL;
} else if (!rc) {
crm_info("Refreshing port list for %s", dev->id);
g_list_free_full(dev->targets, free);
dev->targets = stonith__parse_targets(output);
dev->targets_age = time(NULL);
}
if (dev->targets) {
const char *alias = g_hash_table_lookup(dev->aliases, search->host);
if (!alias) {
alias = search->host;
}
if (string_in_list(dev->targets, alias)) {
can_fence = TRUE;
}
}
search_devices_record_result(search, dev->id, can_fence);
}
/*!
* \internal
* \brief Returns true if any key in first is not in second or second has a different value for key
*/
static int
device_params_diff(GHashTable *first, GHashTable *second) {
char *key = NULL;
char *value = NULL;
GHashTableIter gIter;
g_hash_table_iter_init(&gIter, first);
while (g_hash_table_iter_next(&gIter, (void **)&key, (void **)&value)) {
if(strstr(key, "CRM_meta") == key) {
continue;
} else if(strcmp(key, "crm_feature_set") == 0) {
continue;
} else {
char *other_value = g_hash_table_lookup(second, key);
if (!other_value || safe_str_neq(other_value, value)) {
crm_trace("Different value for %s: %s != %s", key, other_value, value);
return 1;
}
}
}
return 0;
}
/*!
* \internal
* \brief Checks to see if an identical device already exists in the device_list
*/
static stonith_device_t *
device_has_duplicate(stonith_device_t * device)
{
stonith_device_t *dup = g_hash_table_lookup(device_list, device->id);
if (!dup) {
crm_trace("No match for %s", device->id);
return NULL;
} else if (safe_str_neq(dup->agent, device->agent)) {
crm_trace("Different agent: %s != %s", dup->agent, device->agent);
return NULL;
}
/* Use calculate_operation_digest() here? */
if (device_params_diff(device->params, dup->params) ||
device_params_diff(dup->params, device->params)) {
return NULL;
}
crm_trace("Match");
return dup;
}
int
stonith_device_register(xmlNode * msg, const char **desc, gboolean from_cib)
{
stonith_device_t *dup = NULL;
stonith_device_t *device = build_device_from_xml(msg);
CRM_CHECK(device != NULL, return -ENOMEM);
dup = device_has_duplicate(device);
if (dup) {
crm_debug("Device '%s' already existed in device list (%d active devices)", device->id,
g_hash_table_size(device_list));
free_device(device);
device = dup;
} else {
stonith_device_t *old = g_hash_table_lookup(device_list, device->id);
if (from_cib && old && old->api_registered) {
/* If the cib is writing over an entry that is shared with a stonith client,
* copy any pending ops that currently exist on the old entry to the new one.
* Otherwise the pending ops will be reported as failures
*/
crm_info("Overwriting an existing entry for %s from the cib", device->id);
device->pending_ops = old->pending_ops;
device->api_registered = TRUE;
old->pending_ops = NULL;
if (device->pending_ops) {
mainloop_set_trigger(device->work);
}
}
g_hash_table_replace(device_list, device->id, device);
crm_notice("Added '%s' to the device list (%d active devices)", device->id,
g_hash_table_size(device_list));
}
if (desc) {
*desc = device->id;
}
if (from_cib) {
device->cib_registered = TRUE;
} else {
device->api_registered = TRUE;
}
return pcmk_ok;
}
int
stonith_device_remove(const char *id, gboolean from_cib)
{
stonith_device_t *device = g_hash_table_lookup(device_list, id);
if (!device) {
crm_info("Device '%s' not found (%d active devices)", id, g_hash_table_size(device_list));
return pcmk_ok;
}
if (from_cib) {
device->cib_registered = FALSE;
} else {
device->verified = FALSE;
device->api_registered = FALSE;
}
if (!device->cib_registered && !device->api_registered) {
g_hash_table_remove(device_list, id);
crm_info("Removed '%s' from the device list (%d active devices)",
id, g_hash_table_size(device_list));
} else {
crm_trace("Not removing '%s' from the device list (%d active devices) "
"- still %s%s_registered", id, g_hash_table_size(device_list),
device->cib_registered?"cib":"", device->api_registered?"api":"");
}
return pcmk_ok;
}
/*!
* \internal
* \brief Return the number of stonith levels registered for a node
*
* \param[in] tp Node's topology table entry
*
* \return Number of non-NULL levels in topology entry
* \note This function is used only for log messages.
*/
static int
count_active_levels(stonith_topology_t * tp)
{
int lpc = 0;
int count = 0;
for (lpc = 0; lpc < ST_LEVEL_MAX; lpc++) {
if (tp->levels[lpc] != NULL) {
count++;
}
}
return count;
}
static void
free_topology_entry(gpointer data)
{
stonith_topology_t *tp = data;
int lpc = 0;
for (lpc = 0; lpc < ST_LEVEL_MAX; lpc++) {
if (tp->levels[lpc] != NULL) {
g_list_free_full(tp->levels[lpc], free);
}
}
free(tp->target);
free(tp->target_value);
free(tp->target_pattern);
free(tp->target_attribute);
free(tp);
}
void
free_topology_list()
{
if (topology != NULL) {
g_hash_table_destroy(topology);
topology = NULL;
}
}
void
init_topology_list()
{
if (topology == NULL) {
topology = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL,
free_topology_entry);
}
}
char *stonith_level_key(xmlNode *level, int mode)
{
if(mode == -1) {
mode = stonith_level_kind(level);
}
switch(mode) {
case 0:
return crm_element_value_copy(level, XML_ATTR_STONITH_TARGET);
case 1:
return crm_element_value_copy(level, XML_ATTR_STONITH_TARGET_PATTERN);
case 2:
{
const char *name = crm_element_value(level, XML_ATTR_STONITH_TARGET_ATTRIBUTE);
const char *value = crm_element_value(level, XML_ATTR_STONITH_TARGET_VALUE);
if(name && value) {
return crm_strdup_printf("%s=%s", name, value);
}
}
default:
return crm_strdup_printf("Unknown-%d-%s", mode, ID(level));
}
}
int stonith_level_kind(xmlNode * level)
{
int mode = 0;
const char *target = crm_element_value(level, XML_ATTR_STONITH_TARGET);
if(target == NULL) {
mode++;
target = crm_element_value(level, XML_ATTR_STONITH_TARGET_PATTERN);
}
if(stand_alone == FALSE && target == NULL) {
mode++;
if(crm_element_value(level, XML_ATTR_STONITH_TARGET_ATTRIBUTE) == NULL) {
mode++;
} else if(crm_element_value(level, XML_ATTR_STONITH_TARGET_VALUE) == NULL) {
mode++;
}
}
return mode;
}
static stonith_key_value_t *
parse_device_list(const char *devices)
{
int lpc = 0;
int max = 0;
int last = 0;
stonith_key_value_t *output = NULL;
if (devices == NULL) {
return output;
}
max = strlen(devices);
for (lpc = 0; lpc <= max; lpc++) {
if (devices[lpc] == ',' || devices[lpc] == 0) {
char *line = strndup(devices + last, lpc - last);
output = stonith_key_value_add(output, NULL, line);
free(line);
last = lpc + 1;
}
}
return output;
}
/*!
* \internal
* \brief Register a STONITH level for a target
*
* Given an XML request specifying the target name, level index, and device IDs
* for the level, this will create an entry for the target in the global topology
* table if one does not already exist, then append the specified device IDs to
* the entry's device list for the specified level.
*
* \param[in] msg XML request for STONITH level registration
* \param[out] desc If not NULL, will be set to string representation ("TARGET[LEVEL]")
*
* \return pcmk_ok on success, -EINVAL if XML does not specify valid level index
*/
int
stonith_level_register(xmlNode *msg, char **desc)
{
int id = 0;
xmlNode *level;
int mode;
char *target;
stonith_topology_t *tp;
stonith_key_value_t *dIter = NULL;
stonith_key_value_t *devices = NULL;
/* Allow the XML here to point to the level tag directly, or wrapped in
* another tag. If directly, don't search by xpath, because it might give
* multiple hits (e.g. if the XML is the CIB).
*/
if (safe_str_eq(TYPE(msg), XML_TAG_FENCING_LEVEL)) {
level = msg;
} else {
level = get_xpath_object("//" XML_TAG_FENCING_LEVEL, msg, LOG_ERR);
}
CRM_CHECK(level != NULL, return -EINVAL);
mode = stonith_level_kind(level);
target = stonith_level_key(level, mode);
crm_element_value_int(level, XML_ATTR_STONITH_INDEX, &id);
if (desc) {
*desc = crm_strdup_printf("%s[%d]", target, id);
}
/* Sanity-check arguments */
if (mode >= 3 || (id <= 0) || (id >= ST_LEVEL_MAX)) {
crm_trace("Could not add %s[%d] (%d) to the topology (%d active entries)", target, id, mode, g_hash_table_size(topology));
free(target);
crm_log_xml_err(level, "Bad topology");
return -EINVAL;
}
/* Find or create topology table entry */
tp = g_hash_table_lookup(topology, target);
if (tp == NULL) {
tp = calloc(1, sizeof(stonith_topology_t));
tp->kind = mode;
tp->target = target;
tp->target_value = crm_element_value_copy(level, XML_ATTR_STONITH_TARGET_VALUE);
tp->target_pattern = crm_element_value_copy(level, XML_ATTR_STONITH_TARGET_PATTERN);
tp->target_attribute = crm_element_value_copy(level, XML_ATTR_STONITH_TARGET_ATTRIBUTE);
g_hash_table_replace(topology, tp->target, tp);
crm_trace("Added %s (%d) to the topology (%d active entries)",
target, mode, g_hash_table_size(topology));
} else {
free(target);
}
if (tp->levels[id] != NULL) {
crm_info("Adding to the existing %s[%d] topology entry",
tp->target, id);
}
devices = parse_device_list(crm_element_value(level, XML_ATTR_STONITH_DEVICES));
for (dIter = devices; dIter; dIter = dIter->next) {
const char *device = dIter->value;
crm_trace("Adding device '%s' for %s[%d]", device, tp->target, id);
tp->levels[id] = g_list_append(tp->levels[id], strdup(device));
}
stonith_key_value_freeall(devices, 1, 1);
crm_info("Target %s has %d active fencing levels",
tp->target, count_active_levels(tp));
return pcmk_ok;
}
int
stonith_level_remove(xmlNode *msg, char **desc)
{
int id = 0;
stonith_topology_t *tp;
char *target;
/* Unlike additions, removal requests should always have one level tag */
xmlNode *level = get_xpath_object("//" XML_TAG_FENCING_LEVEL, msg, LOG_ERR);
CRM_CHECK(level != NULL, return -EINVAL);
target = stonith_level_key(level, -1);
crm_element_value_int(level, XML_ATTR_STONITH_INDEX, &id);
if (desc) {
*desc = crm_strdup_printf("%s[%d]", target, id);
}
/* Sanity-check arguments */
if (id >= ST_LEVEL_MAX) {
free(target);
return -EINVAL;
}
tp = g_hash_table_lookup(topology, target);
if (tp == NULL) {
crm_info("Topology for %s not found (%d active entries)",
target, g_hash_table_size(topology));
} else if (id == 0 && g_hash_table_remove(topology, target)) {
crm_info("Removed all %s related entries from the topology (%d active entries)",
target, g_hash_table_size(topology));
} else if (id > 0 && tp->levels[id] != NULL) {
g_list_free_full(tp->levels[id], free);
tp->levels[id] = NULL;
crm_info("Removed level '%d' from topology for %s (%d active levels remaining)",
id, target, count_active_levels(tp));
}
free(target);
return pcmk_ok;
}
/*!
* \internal
* \brief Schedule an (asynchronous) action directly on a stonith device
*
* Handle a STONITH_OP_EXEC API message by scheduling a requested agent action
* directly on a specified device. Only list, monitor, and status actions are
* expected to use this call, though it should work with any agent command.
*
* \param[in] msg API message XML with desired action
* \param[out] output Unused
*
* \return -EINPROGRESS on success, -errno otherwise
* \note If the action is monitor, the device must be registered via the API
* (CIB registration is not sufficient), because monitor should not be
* possible unless the device is "started" (API registered).
*/
static int
stonith_device_action(xmlNode * msg, char **output)
{
xmlNode *dev = get_xpath_object("//" F_STONITH_DEVICE, msg, LOG_ERR);
xmlNode *op = get_xpath_object("//@" F_STONITH_ACTION, msg, LOG_ERR);
const char *id = crm_element_value(dev, F_STONITH_DEVICE);
const char *action = crm_element_value(op, F_STONITH_ACTION);
async_command_t *cmd = NULL;
stonith_device_t *device = NULL;
if ((id == NULL) || (action == NULL)) {
crm_info("Malformed API action request: device %s, action %s",
(id? id : "not specified"),
(action? action : "not specified"));
return -EPROTO;
}
device = g_hash_table_lookup(device_list, id);
if ((device == NULL)
|| (!device->api_registered && !strcmp(action, "monitor"))) {
// Monitors may run only on "started" (API-registered) devices
crm_info("Ignoring API %s action request because device %s not found",
action, id);
return -ENODEV;
}
cmd = create_async_command(msg);
if (cmd == NULL) {
return -EPROTO;
}
schedule_stonith_command(cmd, device);
return -EINPROGRESS;
}
static void
search_devices_record_result(struct device_search_s *search, const char *device, gboolean can_fence)
{
search->replies_received++;
if (can_fence && device) {
search->capable = g_list_append(search->capable, strdup(device));
}
if (search->replies_needed == search->replies_received) {
crm_debug("Finished Search. %d devices can perform action (%s) on node %s",
g_list_length(search->capable),
search->action ? search->action : "",
search->host ? search->host : "");
search->callback(search->capable, search->user_data);
free(search->host);
free(search->action);
free(search);
}
}
/*!
* \internal
* \brief Check whether the local host is allowed to execute a fencing action
*
* \param[in] device Fence device to check
* \param[in] action Fence action to check
* \param[in] target Hostname of fence target
* \param[in] allow_suicide Whether self-fencing is allowed for this operation
*
* \return TRUE if local host is allowed to execute action, FALSE otherwise
*/
static gboolean
localhost_is_eligible(const stonith_device_t *device, const char *action,
const char *target, gboolean allow_suicide)
{
gboolean localhost_is_target = safe_str_eq(target, stonith_our_uname);
if (device && action && device->on_target_actions
&& strstr(device->on_target_actions, action)) {
if (!localhost_is_target) {
crm_trace("%s operation with %s can only be executed for localhost not %s",
action, device->id, target);
return FALSE;
}
} else if (localhost_is_target && !allow_suicide) {
crm_trace("%s operation does not support self-fencing", action);
return FALSE;
}
return TRUE;
}
static void
can_fence_host_with_device(stonith_device_t * dev, struct device_search_s *search)
{
gboolean can = FALSE;
const char *check_type = NULL;
const char *host = search->host;
const char *alias = NULL;
CRM_LOG_ASSERT(dev != NULL);
if (dev == NULL) {
goto search_report_results;
} else if (host == NULL) {
can = TRUE;
goto search_report_results;
}
/* Short-circuit query if this host is not allowed to perform the action */
if (safe_str_eq(search->action, "reboot")) {
/* A "reboot" *might* get remapped to "off" then "on", so short-circuit
* only if all three are disallowed. If only one or two are disallowed,
* we'll report that with the results. We never allow suicide for
* remapped "on" operations because the host is off at that point.
*/
if (!localhost_is_eligible(dev, "reboot", host, search->allow_suicide)
&& !localhost_is_eligible(dev, "off", host, search->allow_suicide)
&& !localhost_is_eligible(dev, "on", host, FALSE)) {
goto search_report_results;
}
} else if (!localhost_is_eligible(dev, search->action, host,
search->allow_suicide)) {
goto search_report_results;
}
alias = g_hash_table_lookup(dev->aliases, host);
if (alias == NULL) {
alias = host;
}
check_type = target_list_type(dev);
if (safe_str_eq(check_type, "none")) {
can = TRUE;
} else if (safe_str_eq(check_type, "static-list")) {
/* Presence in the hostmap is sufficient
* Only use if all hosts on which the device can be active can always fence all listed hosts
*/
if (string_in_list(dev->targets, host)) {
can = TRUE;
} else if (g_hash_table_lookup(dev->params, STONITH_ATTR_HOSTMAP)
&& g_hash_table_lookup(dev->aliases, host)) {
can = TRUE;
}
} else if (safe_str_eq(check_type, "dynamic-list")) {
time_t now = time(NULL);
if (dev->targets == NULL || dev->targets_age + 60 < now) {
crm_trace("Running '%s' to check whether %s is eligible to fence %s (%s)",
check_type, dev->id, search->host, search->action);
schedule_internal_command(__FUNCTION__, dev, "list", NULL,
search->per_device_timeout, search, dynamic_list_search_cb);
/* we'll respond to this search request async in the cb */
return;
}
if (string_in_list(dev->targets, alias)) {
can = TRUE;
}
} else if (safe_str_eq(check_type, "status")) {
crm_trace("Running '%s' to check whether %s is eligible to fence %s (%s)",
check_type, dev->id, search->host, search->action);
schedule_internal_command(__FUNCTION__, dev, "status", search->host,
search->per_device_timeout, search, status_search_cb);
/* we'll respond to this search request async in the cb */
return;
} else {
crm_err("Invalid value for " STONITH_ATTR_HOSTCHECK ": %s", check_type);
check_type = "Invalid " STONITH_ATTR_HOSTCHECK;
}
if (safe_str_eq(host, alias)) {
crm_notice("%s is%s eligible to fence (%s) %s: %s",
dev->id, (can? "" : " not"), search->action, host,
check_type);
} else {
crm_notice("%s is%s eligible to fence (%s) %s (aka. '%s'): %s",
dev->id, (can? "" : " not"), search->action, host, alias,
check_type);
}
search_report_results:
search_devices_record_result(search, dev ? dev->id : NULL, can);
}
static void
search_devices(gpointer key, gpointer value, gpointer user_data)
{
stonith_device_t *dev = value;
struct device_search_s *search = user_data;
can_fence_host_with_device(dev, search);
}
#define DEFAULT_QUERY_TIMEOUT 20
static void
get_capable_devices(const char *host, const char *action, int timeout, bool suicide, void *user_data,
void (*callback) (GList * devices, void *user_data))
{
struct device_search_s *search;
int per_device_timeout = DEFAULT_QUERY_TIMEOUT;
int devices_needing_async_query = 0;
char *key = NULL;
const char *check_type = NULL;
GHashTableIter gIter;
stonith_device_t *device = NULL;
if (!g_hash_table_size(device_list)) {
callback(NULL, user_data);
return;
}
search = calloc(1, sizeof(struct device_search_s));
if (!search) {
callback(NULL, user_data);
return;
}
g_hash_table_iter_init(&gIter, device_list);
while (g_hash_table_iter_next(&gIter, (void **)&key, (void **)&device)) {
check_type = target_list_type(device);
if (safe_str_eq(check_type, "status") || safe_str_eq(check_type, "dynamic-list")) {
devices_needing_async_query++;
}
}
/* If we have devices that require an async event in order to know what
* nodes they can fence, we have to give the events a timeout. The total
* query timeout is divided among those events. */
if (devices_needing_async_query) {
per_device_timeout = timeout / devices_needing_async_query;
if (!per_device_timeout) {
crm_err("STONITH timeout %ds is too low; using %ds, but consider raising to at least %ds",
timeout, DEFAULT_QUERY_TIMEOUT,
DEFAULT_QUERY_TIMEOUT * devices_needing_async_query);
per_device_timeout = DEFAULT_QUERY_TIMEOUT;
} else if (per_device_timeout < DEFAULT_QUERY_TIMEOUT) {
crm_notice("STONITH timeout %ds is low for the current configuration;"
" consider raising to at least %ds",
timeout, DEFAULT_QUERY_TIMEOUT * devices_needing_async_query);
}
}
search->host = host ? strdup(host) : NULL;
search->action = action ? strdup(action) : NULL;
search->per_device_timeout = per_device_timeout;
/* We are guaranteed this many replies. Even if a device gets
* unregistered some how during the async search, we will get
* the correct number of replies. */
search->replies_needed = g_hash_table_size(device_list);
search->allow_suicide = suicide;
search->callback = callback;
search->user_data = user_data;
/* kick off the search */
crm_debug("Searching through %d devices to see what is capable of action (%s) for target %s",
search->replies_needed,
search->action ? search->action : "",
search->host ? search->host : "");
g_hash_table_foreach(device_list, search_devices, search);
}
struct st_query_data {
xmlNode *reply;
char *remote_peer;
char *client_id;
char *target;
char *action;
int call_options;
};
/*!
* \internal
* \brief Add action-specific attributes to query reply XML
*
* \param[in,out] xml XML to add attributes to
* \param[in] action Fence action
* \param[in] device Fence device
*/
static void
add_action_specific_attributes(xmlNode *xml, const char *action,
stonith_device_t *device)
{
int action_specific_timeout;
int delay_max;
int delay_base;
CRM_CHECK(xml && action && device, return);
if (is_action_required(action, device)) {
crm_trace("Action %s is required on %s", action, device->id);
crm_xml_add_int(xml, F_STONITH_DEVICE_REQUIRED, 1);
}
action_specific_timeout = get_action_timeout(device, action, 0);
if (action_specific_timeout) {
crm_trace("Action %s has timeout %dms on %s",
action, action_specific_timeout, device->id);
crm_xml_add_int(xml, F_STONITH_ACTION_TIMEOUT, action_specific_timeout);
}
delay_max = get_action_delay_max(device, action);
if (delay_max > 0) {
crm_trace("Action %s has maximum random delay %dms on %s",
action, delay_max, device->id);
crm_xml_add_int(xml, F_STONITH_DELAY_MAX, delay_max / 1000);
}
delay_base = get_action_delay_base(device, action);
if (delay_base > 0) {
crm_xml_add_int(xml, F_STONITH_DELAY_BASE, delay_base / 1000);
}
if ((delay_max > 0) && (delay_base == 0)) {
crm_trace("Action %s has maximum random delay %dms on %s",
action, delay_max, device->id);
} else if ((delay_max == 0) && (delay_base > 0)) {
crm_trace("Action %s has a static delay of %dms on %s",
action, delay_base, device->id);
} else if ((delay_max > 0) && (delay_base > 0)) {
crm_trace("Action %s has a minimum delay of %dms and a randomly chosen "
"maximum delay of %dms on %s",
action, delay_base, delay_max, device->id);
}
}
/*!
* \internal
* \brief Add "disallowed" attribute to query reply XML if appropriate
*
* \param[in,out] xml XML to add attribute to
* \param[in] action Fence action
* \param[in] device Fence device
* \param[in] target Fence target
* \param[in] allow_suicide Whether self-fencing is allowed
*/
static void
add_disallowed(xmlNode *xml, const char *action, stonith_device_t *device,
const char *target, gboolean allow_suicide)
{
if (!localhost_is_eligible(device, action, target, allow_suicide)) {
crm_trace("Action %s on %s is disallowed for local host",
action, device->id);
crm_xml_add(xml, F_STONITH_ACTION_DISALLOWED, XML_BOOLEAN_TRUE);
}
}
/*!
* \internal
* \brief Add child element with action-specific values to query reply XML
*
* \param[in,out] xml XML to add attribute to
* \param[in] action Fence action
* \param[in] device Fence device
* \param[in] target Fence target
* \param[in] allow_suicide Whether self-fencing is allowed
*/
static void
add_action_reply(xmlNode *xml, const char *action, stonith_device_t *device,
const char *target, gboolean allow_suicide)
{
xmlNode *child = create_xml_node(xml, F_STONITH_ACTION);
crm_xml_add(child, XML_ATTR_ID, action);
add_action_specific_attributes(child, action, device);
add_disallowed(child, action, device, target, allow_suicide);
}
static void
stonith_query_capable_device_cb(GList * devices, void *user_data)
{
struct st_query_data *query = user_data;
int available_devices = 0;
xmlNode *dev = NULL;
xmlNode *list = NULL;
GListPtr lpc = NULL;
/* Pack the results into XML */
list = create_xml_node(NULL, __FUNCTION__);
crm_xml_add(list, F_STONITH_TARGET, query->target);
for (lpc = devices; lpc != NULL; lpc = lpc->next) {
stonith_device_t *device = g_hash_table_lookup(device_list, lpc->data);
const char *action = query->action;
if (!device) {
/* It is possible the device got unregistered while
* determining who can fence the target */
continue;
}
available_devices++;
dev = create_xml_node(list, F_STONITH_DEVICE);
crm_xml_add(dev, XML_ATTR_ID, device->id);
crm_xml_add(dev, "namespace", device->namespace);
crm_xml_add(dev, "agent", device->agent);
crm_xml_add_int(dev, F_STONITH_DEVICE_VERIFIED, device->verified);
/* If the originating fencer wants to reboot the node, and we have a
* capable device that doesn't support "reboot", remap to "off" instead.
*/
if (is_not_set(device->flags, st_device_supports_reboot)
&& safe_str_eq(query->action, "reboot")) {
crm_trace("%s doesn't support reboot, using values for off instead",
device->id);
action = "off";
}
/* Add action-specific values if available */
add_action_specific_attributes(dev, action, device);
if (safe_str_eq(query->action, "reboot")) {
/* A "reboot" *might* get remapped to "off" then "on", so after
* sending the "reboot"-specific values in the main element, we add
* sub-elements for "off" and "on" values.
*
* We short-circuited earlier if "reboot", "off" and "on" are all
* disallowed for the local host. However if only one or two are
* disallowed, we send back the results and mark which ones are
* disallowed. If "reboot" is disallowed, this might cause problems
* with older fencer versions, which won't check for it. Older
* versions will ignore "off" and "on", so they are not a problem.
*/
add_disallowed(dev, action, device, query->target,
is_set(query->call_options, st_opt_allow_suicide));
add_action_reply(dev, "off", device, query->target,
is_set(query->call_options, st_opt_allow_suicide));
add_action_reply(dev, "on", device, query->target, FALSE);
}
/* A query without a target wants device parameters */
if (query->target == NULL) {
xmlNode *attrs = create_xml_node(dev, XML_TAG_ATTRS);
g_hash_table_foreach(device->params, hash2field, attrs);
}
}
crm_xml_add_int(list, F_STONITH_AVAILABLE_DEVICES, available_devices);
if (query->target) {
crm_debug("Found %d matching devices for '%s'", available_devices, query->target);
} else {
crm_debug("%d devices installed", available_devices);
}
if (list != NULL) {
crm_log_xml_trace(list, "Add query results");
add_message_xml(query->reply, F_STONITH_CALLDATA, list);
}
stonith_send_reply(query->reply, query->call_options, query->remote_peer, query->client_id);
free_xml(query->reply);
free(query->remote_peer);
free(query->client_id);
free(query->target);
free(query->action);
free(query);
free_xml(list);
g_list_free_full(devices, free);
}
static void
stonith_query(xmlNode * msg, const char *remote_peer, const char *client_id, int call_options)
{
struct st_query_data *query = NULL;
const char *action = NULL;
const char *target = NULL;
int timeout = 0;
xmlNode *dev = get_xpath_object("//@" F_STONITH_ACTION, msg, LOG_TRACE);
crm_element_value_int(msg, F_STONITH_TIMEOUT, &timeout);
if (dev) {
const char *device = crm_element_value(dev, F_STONITH_DEVICE);
target = crm_element_value(dev, F_STONITH_TARGET);
action = crm_element_value(dev, F_STONITH_ACTION);
if (device && safe_str_eq(device, "manual_ack")) {
/* No query or reply necessary */
return;
}
}
crm_log_xml_debug(msg, "Query");
query = calloc(1, sizeof(struct st_query_data));
query->reply = stonith_construct_reply(msg, NULL, NULL, pcmk_ok);
query->remote_peer = remote_peer ? strdup(remote_peer) : NULL;
query->client_id = client_id ? strdup(client_id) : NULL;
query->target = target ? strdup(target) : NULL;
query->action = action ? strdup(action) : NULL;
query->call_options = call_options;
get_capable_devices(target, action, timeout,
is_set(call_options, st_opt_allow_suicide),
query, stonith_query_capable_device_cb);
}
#define ST_LOG_OUTPUT_MAX 512
static void
log_operation(async_command_t * cmd, int rc, int pid, const char *next, const char *output)
{
if (rc == 0) {
next = NULL;
}
if (cmd->victim != NULL) {
do_crm_log(rc == 0 ? LOG_NOTICE : LOG_ERR,
"Operation '%s' [%d] (call %d from %s) for host '%s' with device '%s' returned: %d (%s)%s%s",
cmd->action, pid, cmd->id, cmd->client_name, cmd->victim,
cmd->device, rc, pcmk_strerror(rc),
(next? ", retrying with " : ""), (next ? next : ""));
} else {
do_crm_log_unlikely(rc == 0 ? LOG_DEBUG : LOG_NOTICE,
"Operation '%s' [%d] for device '%s' returned: %d (%s)%s%s",
cmd->action, pid, cmd->device, rc, pcmk_strerror(rc),
(next? ", retrying with " : ""), (next ? next : ""));
}
if (output) {
/* Logging the whole string confuses syslog when the string is xml */
char *prefix = crm_strdup_printf("%s:%d", cmd->device, pid);
crm_log_output(rc == 0 ? LOG_DEBUG : LOG_WARNING, prefix, output);
free(prefix);
}
}
static void
stonith_send_async_reply(async_command_t * cmd, const char *output, int rc, GPid pid)
{
xmlNode *reply = NULL;
gboolean bcast = FALSE;
reply = stonith_construct_async_reply(cmd, output, NULL, rc);
if (safe_str_eq(cmd->action, "metadata")) {
/* Too verbose to log */
crm_trace("Metadata query for %s", cmd->device);
output = NULL;
} else if (crm_str_eq(cmd->action, "monitor", TRUE) ||
crm_str_eq(cmd->action, "list", TRUE) || crm_str_eq(cmd->action, "status", TRUE)) {
crm_trace("Never broadcast %s replies", cmd->action);
} else if (!stand_alone && safe_str_eq(cmd->origin, cmd->victim) && safe_str_neq(cmd->action, "on")) {
crm_trace("Broadcast %s reply for %s", cmd->action, cmd->victim);
crm_xml_add(reply, F_SUBTYPE, "broadcast");
bcast = TRUE;
}
log_operation(cmd, rc, pid, NULL, output);
crm_log_xml_trace(reply, "Reply");
if (bcast) {
crm_xml_add(reply, F_STONITH_OPERATION, T_STONITH_NOTIFY);
send_cluster_message(NULL, crm_msg_stonith_ng, reply, FALSE);
} else if (cmd->origin) {
crm_trace("Directed reply to %s", cmd->origin);
send_cluster_message(crm_get_peer(0, cmd->origin), crm_msg_stonith_ng, reply, FALSE);
} else {
crm_trace("Directed local %ssync reply to %s",
(cmd->options & st_opt_sync_call) ? "" : "a-", cmd->client_name);
do_local_reply(reply, cmd->client, cmd->options & st_opt_sync_call, FALSE);
}
if (stand_alone) {
/* Do notification with a clean data object */
xmlNode *notify_data = create_xml_node(NULL, T_STONITH_NOTIFY_FENCE);
crm_xml_add_int(notify_data, F_STONITH_RC, rc);
crm_xml_add(notify_data, F_STONITH_TARGET, cmd->victim);
crm_xml_add(notify_data, F_STONITH_OPERATION, cmd->op);
crm_xml_add(notify_data, F_STONITH_DELEGATE, "localhost");
crm_xml_add(notify_data, F_STONITH_DEVICE, cmd->device);
crm_xml_add(notify_data, F_STONITH_REMOTE_OP_ID, cmd->remote_op_id);
crm_xml_add(notify_data, F_STONITH_ORIGIN, cmd->client);
do_stonith_notify(0, T_STONITH_NOTIFY_FENCE, rc, notify_data);
do_stonith_notify(0, T_STONITH_NOTIFY_HISTORY, 0, NULL);
}
free_xml(reply);
}
static void
cancel_stonith_command(async_command_t * cmd)
{
stonith_device_t *device;
CRM_CHECK(cmd != NULL, return);
if (!cmd->device) {
return;
}
device = g_hash_table_lookup(device_list, cmd->device);
if (device) {
crm_trace("Cancel scheduled %s on %s", cmd->action, device->id);
device->pending_ops = g_list_remove(device->pending_ops, cmd);
}
}
static void
st_child_done(GPid pid, int rc, const char *output, gpointer user_data)
{
stonith_device_t *device = NULL;
stonith_device_t *next_device = NULL;
async_command_t *cmd = user_data;
GListPtr gIter = NULL;
GListPtr gIterNext = NULL;
CRM_CHECK(cmd != NULL, return);
cmd->active_on = NULL;
/* The device is ready to do something else now */
device = g_hash_table_lookup(device_list, cmd->device);
if (device) {
if (!device->verified && (rc == pcmk_ok) &&
(safe_str_eq(cmd->action, "list") ||
safe_str_eq(cmd->action, "monitor") || safe_str_eq(cmd->action, "status"))) {
device->verified = TRUE;
}
mainloop_set_trigger(device->work);
}
crm_debug("Operation '%s' on '%s' completed with rc=%d (%d remaining)",
cmd->action, cmd->device, rc, g_list_length(cmd->device_next));
if (rc == 0) {
GListPtr iter;
/* see if there are any required devices left to execute for this op */
for (iter = cmd->device_next; iter != NULL; iter = iter->next) {
next_device = g_hash_table_lookup(device_list, iter->data);
if (next_device != NULL && is_action_required(cmd->action, next_device)) {
cmd->device_next = iter->next;
break;
}
next_device = NULL;
}
} else if (rc != 0 && cmd->device_next && (is_action_required(cmd->action, device) == FALSE)) {
/* if this device didn't work out, see if there are any others we can try.
* if the failed device was 'required', we can't pick another device. */
next_device = g_hash_table_lookup(device_list, cmd->device_next->data);
cmd->device_next = cmd->device_next->next;
}
/* this operation requires more fencing, hooray! */
if (next_device) {
log_operation(cmd, rc, pid, next_device->id, output);
schedule_stonith_command(cmd, next_device);
/* Prevent cmd from being freed */
cmd = NULL;
goto done;
}
stonith_send_async_reply(cmd, output, rc, pid);
if (rc != 0) {
goto done;
}
/* Check to see if any operations are scheduled to do the exact
* same thing that just completed. If so, rather than
* performing the same fencing operation twice, return the result
* of this operation for all pending commands it matches. */
for (gIter = cmd_list; gIter != NULL; gIter = gIterNext) {
async_command_t *cmd_other = gIter->data;
gIterNext = gIter->next;
if (cmd == cmd_other) {
continue;
}
/* A pending scheduled command matches the command that just finished if.
* 1. The client connections are different.
* 2. The node victim is the same.
* 3. The fencing action is the same.
* 4. The device scheduled to execute the action is the same.
*/
if (safe_str_eq(cmd->client, cmd_other->client) ||
safe_str_neq(cmd->victim, cmd_other->victim) ||
safe_str_neq(cmd->action, cmd_other->action) ||
safe_str_neq(cmd->device, cmd_other->device)) {
continue;
}
/* Duplicate merging will do the right thing for either type of remapped
* reboot. If the executing fencer remapped an unsupported reboot to
* off, then cmd->action will be reboot and will be merged with any
* other reboot requests. If the originating fencer remapped a
* topology reboot to off then on, we will get here once with
* cmd->action "off" and once with "on", and they will be merged
* separately with similar requests.
*/
crm_notice
("Merging stonith action %s for node %s originating from client %s with identical stonith request from client %s",
cmd_other->action, cmd_other->victim, cmd_other->client_name, cmd->client_name);
cmd_list = g_list_remove_link(cmd_list, gIter);
stonith_send_async_reply(cmd_other, output, rc, pid);
cancel_stonith_command(cmd_other);
free_async_command(cmd_other);
g_list_free_1(gIter);
}
done:
free_async_command(cmd);
}
static gint
sort_device_priority(gconstpointer a, gconstpointer b)
{
const stonith_device_t *dev_a = a;
const stonith_device_t *dev_b = b;
if (dev_a->priority > dev_b->priority) {
return -1;
} else if (dev_a->priority < dev_b->priority) {
return 1;
}
return 0;
}
static void
stonith_fence_get_devices_cb(GList * devices, void *user_data)
{
async_command_t *cmd = user_data;
stonith_device_t *device = NULL;
crm_info("Found %d matching devices for '%s'", g_list_length(devices), cmd->victim);
if (g_list_length(devices) > 0) {
/* Order based on priority */
devices = g_list_sort(devices, sort_device_priority);
device = g_hash_table_lookup(device_list, devices->data);
if (device) {
cmd->device_list = devices;
cmd->device_next = devices->next;
devices = NULL; /* list owned by cmd now */
}
}
/* we have a device, schedule it for fencing. */
if (device) {
schedule_stonith_command(cmd, device);
/* in progress */
return;
}
/* no device found! */
stonith_send_async_reply(cmd, NULL, -ENODEV, 0);
free_async_command(cmd);
g_list_free_full(devices, free);
}
static int
stonith_fence(xmlNode * msg)
{
const char *device_id = NULL;
stonith_device_t *device = NULL;
async_command_t *cmd = create_async_command(msg);
xmlNode *dev = get_xpath_object("//@" F_STONITH_TARGET, msg, LOG_ERR);
if (cmd == NULL) {
return -EPROTO;
}
device_id = crm_element_value(dev, F_STONITH_DEVICE);
if (device_id) {
device = g_hash_table_lookup(device_list, device_id);
if (device == NULL) {
crm_err("Requested device '%s' is not available", device_id);
return -ENODEV;
}
schedule_stonith_command(cmd, device);
} else {
const char *host = crm_element_value(dev, F_STONITH_TARGET);
if (cmd->options & st_opt_cs_nodeid) {
int nodeid = crm_atoi(host, NULL);
crm_node_t *node = crm_find_known_peer_full(nodeid, NULL, CRM_GET_PEER_ANY);
if (node) {
host = node->uname;
}
}
/* If we get to here, then self-fencing is implicitly allowed */
get_capable_devices(host, cmd->action, cmd->default_timeout,
TRUE, cmd, stonith_fence_get_devices_cb);
}
return -EINPROGRESS;
}
xmlNode *
stonith_construct_reply(xmlNode * request, const char *output, xmlNode * data, int rc)
{
int lpc = 0;
xmlNode *reply = NULL;
const char *name = NULL;
const char *value = NULL;
const char *names[] = {
F_STONITH_OPERATION,
F_STONITH_CALLID,
F_STONITH_CLIENTID,
F_STONITH_CLIENTNAME,
F_STONITH_REMOTE_OP_ID,
F_STONITH_CALLOPTS
};
crm_trace("Creating a basic reply");
reply = create_xml_node(NULL, T_STONITH_REPLY);
crm_xml_add(reply, "st_origin", __FUNCTION__);
crm_xml_add(reply, F_TYPE, T_STONITH_NG);
crm_xml_add(reply, "st_output", output);
crm_xml_add_int(reply, F_STONITH_RC, rc);
CRM_CHECK(request != NULL, crm_warn("Can't create a sane reply"); return reply);
for (lpc = 0; lpc < DIMOF(names); lpc++) {
name = names[lpc];
value = crm_element_value(request, name);
crm_xml_add(reply, name, value);
}
if (data != NULL) {
crm_trace("Attaching reply output");
add_message_xml(reply, F_STONITH_CALLDATA, data);
}
return reply;
}
static xmlNode *
stonith_construct_async_reply(async_command_t * cmd, const char *output, xmlNode * data, int rc)
{
xmlNode *reply = NULL;
crm_trace("Creating a basic reply");
reply = create_xml_node(NULL, T_STONITH_REPLY);
crm_xml_add(reply, "st_origin", __FUNCTION__);
crm_xml_add(reply, F_TYPE, T_STONITH_NG);
crm_xml_add(reply, F_STONITH_OPERATION, cmd->op);
crm_xml_add(reply, F_STONITH_DEVICE, cmd->device);
crm_xml_add(reply, F_STONITH_REMOTE_OP_ID, cmd->remote_op_id);
crm_xml_add(reply, F_STONITH_CLIENTID, cmd->client);
crm_xml_add(reply, F_STONITH_CLIENTNAME, cmd->client_name);
crm_xml_add(reply, F_STONITH_TARGET, cmd->victim);
crm_xml_add(reply, F_STONITH_ACTION, cmd->op);
crm_xml_add(reply, F_STONITH_ORIGIN, cmd->origin);
crm_xml_add_int(reply, F_STONITH_CALLID, cmd->id);
crm_xml_add_int(reply, F_STONITH_CALLOPTS, cmd->options);
crm_xml_add_int(reply, F_STONITH_RC, rc);
crm_xml_add(reply, "st_output", output);
if (data != NULL) {
crm_info("Attaching reply output");
add_message_xml(reply, F_STONITH_CALLDATA, data);
}
return reply;
}
bool fencing_peer_active(crm_node_t *peer)
{
if (peer == NULL) {
return FALSE;
} else if (peer->uname == NULL) {
return FALSE;
} else if (is_set(peer->processes, crm_get_cluster_proc())) {
return TRUE;
}
return FALSE;
}
/*!
* \internal
* \brief Determine if we need to use an alternate node to
* fence the target. If so return that node's uname
*
* \retval NULL, no alternate host
* \retval uname, uname of alternate host to use
*/
static const char *
check_alternate_host(const char *target)
{
const char *alternate_host = NULL;
crm_trace("Checking if we (%s) can fence %s", stonith_our_uname, target);
if (find_topology_for_host(target) && safe_str_eq(target, stonith_our_uname)) {
GHashTableIter gIter;
crm_node_t *entry = NULL;
g_hash_table_iter_init(&gIter, crm_peer_cache);
while (g_hash_table_iter_next(&gIter, NULL, (void **)&entry)) {
crm_trace("Checking for %s.%d != %s", entry->uname, entry->id, target);
if (fencing_peer_active(entry)
&& safe_str_neq(entry->uname, target)) {
alternate_host = entry->uname;
break;
}
}
if (alternate_host == NULL) {
crm_err("No alternate host available to handle complex self fencing request");
g_hash_table_iter_init(&gIter, crm_peer_cache);
while (g_hash_table_iter_next(&gIter, NULL, (void **)&entry)) {
crm_notice("Peer[%d] %s", entry->id, entry->uname);
}
}
}
return alternate_host;
}
static void
stonith_send_reply(xmlNode * reply, int call_options, const char *remote_peer,
const char *client_id)
{
if (remote_peer) {
send_cluster_message(crm_get_peer(0, remote_peer), crm_msg_stonith_ng, reply, FALSE);
} else {
do_local_reply(reply, client_id, is_set(call_options, st_opt_sync_call), remote_peer != NULL);
}
}
static int
handle_request(crm_client_t * client, uint32_t id, uint32_t flags, xmlNode * request,
const char *remote_peer)
{
int call_options = 0;
int rc = -EOPNOTSUPP;
xmlNode *data = NULL;
xmlNode *reply = NULL;
char *output = NULL;
const char *op = crm_element_value(request, F_STONITH_OPERATION);
const char *client_id = crm_element_value(request, F_STONITH_CLIENTID);
crm_element_value_int(request, F_STONITH_CALLOPTS, &call_options);
if (is_set(call_options, st_opt_sync_call)) {
CRM_ASSERT(client == NULL || client->request_id == id);
}
if (crm_str_eq(op, CRM_OP_REGISTER, TRUE)) {
xmlNode *reply = create_xml_node(NULL, "reply");
CRM_ASSERT(client);
crm_xml_add(reply, F_STONITH_OPERATION, CRM_OP_REGISTER);
crm_xml_add(reply, F_STONITH_CLIENTID, client->id);
crm_ipcs_send(client, id, reply, flags);
client->request_id = 0;
free_xml(reply);
return 0;
} else if (crm_str_eq(op, STONITH_OP_EXEC, TRUE)) {
rc = stonith_device_action(request, &output);
} else if (crm_str_eq(op, STONITH_OP_TIMEOUT_UPDATE, TRUE)) {
const char *call_id = crm_element_value(request, F_STONITH_CALLID);
const char *client_id = crm_element_value(request, F_STONITH_CLIENTID);
int op_timeout = 0;
crm_element_value_int(request, F_STONITH_TIMEOUT, &op_timeout);
do_stonith_async_timeout_update(client_id, call_id, op_timeout);
return 0;
} else if (crm_str_eq(op, STONITH_OP_QUERY, TRUE)) {
if (remote_peer) {
create_remote_stonith_op(client_id, request, TRUE); /* Record it for the future notification */
}
stonith_query(request, remote_peer, client_id, call_options);
return 0;
} else if (crm_str_eq(op, T_STONITH_NOTIFY, TRUE)) {
const char *flag_name = NULL;
CRM_ASSERT(client);
flag_name = crm_element_value(request, F_STONITH_NOTIFY_ACTIVATE);
if (flag_name) {
crm_debug("Setting %s callbacks for %s (%s): ON", flag_name, client->name, client->id);
client->options |= get_stonith_flag(flag_name);
}
flag_name = crm_element_value(request, F_STONITH_NOTIFY_DEACTIVATE);
if (flag_name) {
crm_debug("Setting %s callbacks for %s (%s): off", flag_name, client->name, client->id);
client->options |= get_stonith_flag(flag_name);
}
if (flags & crm_ipc_client_response) {
crm_ipcs_send_ack(client, id, flags, "ack", __FUNCTION__, __LINE__);
}
return 0;
} else if (crm_str_eq(op, STONITH_OP_RELAY, TRUE)) {
xmlNode *dev = get_xpath_object("//@" F_STONITH_TARGET, request, LOG_TRACE);
crm_notice("Peer %s has received a forwarded fencing request from %s to fence (%s) peer %s",
stonith_our_uname,
client ? client->name : remote_peer,
crm_element_value(dev, F_STONITH_ACTION),
crm_element_value(dev, F_STONITH_TARGET));
if (initiate_remote_stonith_op(NULL, request, FALSE) != NULL) {
rc = -EINPROGRESS;
}
} else if (crm_str_eq(op, STONITH_OP_FENCE, TRUE)) {
if (remote_peer || stand_alone) {
rc = stonith_fence(request);
} else if (call_options & st_opt_manual_ack) {
remote_fencing_op_t *rop = NULL;
xmlNode *dev = get_xpath_object("//@" F_STONITH_TARGET, request, LOG_TRACE);
const char *target = crm_element_value(dev, F_STONITH_TARGET);
crm_notice("Received manual confirmation that %s is fenced", target);
rop = initiate_remote_stonith_op(client, request, TRUE);
rc = stonith_manual_ack(request, rop);
} else {
const char *alternate_host = NULL;
xmlNode *dev = get_xpath_object("//@" F_STONITH_TARGET, request, LOG_TRACE);
const char *target = crm_element_value(dev, F_STONITH_TARGET);
const char *action = crm_element_value(dev, F_STONITH_ACTION);
const char *device = crm_element_value(dev, F_STONITH_DEVICE);
if (client) {
int tolerance = 0;
crm_notice("Client %s.%.8s wants to fence (%s) '%s' with device '%s'",
client->name, client->id, action, target, device ? device : "(any)");
crm_element_value_int(dev, F_STONITH_TOLERANCE, &tolerance);
if (stonith_check_fence_tolerance(tolerance, target, action)) {
rc = 0;
goto done;
}
} else {
crm_notice("Peer %s wants to fence (%s) '%s' with device '%s'",
remote_peer, action, target, device ? device : "(any)");
}
alternate_host = check_alternate_host(target);
if (alternate_host && client) {
const char *client_id = NULL;
crm_notice("Forwarding complex self fencing request to peer %s", alternate_host);
if (client->id) {
client_id = client->id;
} else {
client_id = crm_element_value(request, F_STONITH_CLIENTID);
}
/* Create a record of it, otherwise call_id will be 0 if we need to notify of failures */
create_remote_stonith_op(client_id, request, FALSE);
crm_xml_add(request, F_STONITH_OPERATION, STONITH_OP_RELAY);
crm_xml_add(request, F_STONITH_CLIENTID, client->id);
send_cluster_message(crm_get_peer(0, alternate_host), crm_msg_stonith_ng, request,
FALSE);
rc = -EINPROGRESS;
} else if (initiate_remote_stonith_op(client, request, FALSE) != NULL) {
rc = -EINPROGRESS;
}
}
} else if (crm_str_eq(op, STONITH_OP_FENCE_HISTORY, TRUE)) {
rc = stonith_fence_history(request, &data, remote_peer, call_options);
if (call_options & st_opt_discard_reply) {
/* we don't expect answers to the broadcast
* we might have sent out
*/
free_xml(data);
return pcmk_ok;
}
} else if (crm_str_eq(op, STONITH_OP_DEVICE_ADD, TRUE)) {
const char *device_id = NULL;
rc = stonith_device_register(request, &device_id, FALSE);
do_stonith_notify_device(call_options, op, rc, device_id);
} else if (crm_str_eq(op, STONITH_OP_DEVICE_DEL, TRUE)) {
xmlNode *dev = get_xpath_object("//" F_STONITH_DEVICE, request, LOG_ERR);
const char *device_id = crm_element_value(dev, XML_ATTR_ID);
rc = stonith_device_remove(device_id, FALSE);
do_stonith_notify_device(call_options, op, rc, device_id);
} else if (crm_str_eq(op, STONITH_OP_LEVEL_ADD, TRUE)) {
char *device_id = NULL;
rc = stonith_level_register(request, &device_id);
do_stonith_notify_level(call_options, op, rc, device_id);
free(device_id);
} else if (crm_str_eq(op, STONITH_OP_LEVEL_DEL, TRUE)) {
char *device_id = NULL;
rc = stonith_level_remove(request, &device_id);
do_stonith_notify_level(call_options, op, rc, device_id);
} else if(safe_str_eq(op, CRM_OP_RM_NODE_CACHE)) {
int node_id = 0;
const char *name = NULL;
crm_element_value_int(request, XML_ATTR_ID, &node_id);
name = crm_element_value(request, XML_ATTR_UNAME);
reap_crm_member(node_id, name);
return pcmk_ok;
} else {
crm_err("Unknown %s from %s", op, client ? client->name : remote_peer);
crm_log_xml_warn(request, "UnknownOp");
}
done:
/* Always reply unless the request is in process still.
* If in progress, a reply will happen async after the request
* processing is finished */
if (rc != -EINPROGRESS) {
crm_trace("Reply handling: %p %u %u %d %d %s", client, client?client->request_id:0,
id, is_set(call_options, st_opt_sync_call), call_options,
crm_element_value(request, F_STONITH_CALLOPTS));
if (is_set(call_options, st_opt_sync_call)) {
CRM_ASSERT(client == NULL || client->request_id == id);
}
reply = stonith_construct_reply(request, output, data, rc);
stonith_send_reply(reply, call_options, remote_peer, client_id);
}
free(output);
free_xml(data);
free_xml(reply);
return rc;
}
static void
handle_reply(crm_client_t * client, xmlNode * request, const char *remote_peer)
{
const char *op = crm_element_value(request, F_STONITH_OPERATION);
if (crm_str_eq(op, STONITH_OP_QUERY, TRUE)) {
process_remote_stonith_query(request);
} else if (crm_str_eq(op, T_STONITH_NOTIFY, TRUE)) {
process_remote_stonith_exec(request);
} else if (crm_str_eq(op, STONITH_OP_FENCE, TRUE)) {
/* Reply to a complex fencing op */
process_remote_stonith_exec(request);
} else {
crm_err("Unknown %s reply from %s", op, client ? client->name : remote_peer);
crm_log_xml_warn(request, "UnknownOp");
}
}
void
stonith_command(crm_client_t * client, uint32_t id, uint32_t flags, xmlNode * request,
const char *remote_peer)
{
int call_options = 0;
int rc = 0;
gboolean is_reply = FALSE;
/* Copy op for reporting. The original might get freed by handle_reply()
* before we use it in crm_debug():
* handle_reply()
* |- process_remote_stonith_exec()
* |-- remote_op_done()
* |--- handle_local_reply_and_notify()
* |---- crm_xml_add(...F_STONITH_OPERATION...)
* |--- free_xml(op->request)
*/
char *op = crm_element_value_copy(request, F_STONITH_OPERATION);
if (get_xpath_object("//" T_STONITH_REPLY, request, LOG_TRACE)) {
is_reply = TRUE;
}
crm_element_value_int(request, F_STONITH_CALLOPTS, &call_options);
crm_debug("Processing %s%s %u from %s (%16x)", op, is_reply ? " reply" : "",
id, client ? client->name : remote_peer, call_options);
if (is_set(call_options, st_opt_sync_call)) {
CRM_ASSERT(client == NULL || client->request_id == id);
}
if (is_reply) {
handle_reply(client, request, remote_peer);
} else {
rc = handle_request(client, id, flags, request, remote_peer);
}
crm_debug("Processed %s%s from %s: %s (%d)", op,
is_reply ? " reply" : "", client ? client->name : remote_peer,
rc > 0 ? "" : pcmk_strerror(rc), rc);
free(op);
}
diff --git a/daemons/pacemakerd/pacemakerd.c b/daemons/pacemakerd/pacemakerd.c
index 18830d3464..f5b4845acd 100644
--- a/daemons/pacemakerd/pacemakerd.c
+++ b/daemons/pacemakerd/pacemakerd.c
@@ -1,1421 +1,1416 @@
/*
* Copyright 2010-2019 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include "pacemakerd.h"
#include
#include
#include
#include
#include
#include
#include
#include
#include /* indirectly: CRM_EX_* */
#include /* cib_channel_ro */
#include
#include
#include
#include
#include
#include /* PCMK__SPECIAL_PID*, ... */
#ifdef SUPPORT_COROSYNC
#include
#endif
#include
#include
static gboolean pcmk_quorate = FALSE;
static gboolean fatal_error = FALSE;
static GMainLoop *mainloop = NULL;
static bool global_keep_tracking = false;
#define PCMK_PROCESS_CHECK_INTERVAL 5
static const char *local_name = NULL;
static uint32_t local_nodeid = 0;
static crm_trigger_t *shutdown_trigger = NULL;
static const char *pid_file = "/var/run/pacemaker.pid";
typedef struct pcmk_child_s {
int pid;
long flag;
int start_seq;
int respawn_count;
gboolean respawn;
const char *name;
const char *uid;
const char *command;
const char *endpoint; /* IPC server name */
gboolean active_before_startup;
} pcmk_child_t;
/* Index into the array below */
#define PCMK_CHILD_CONTROLD 3
static pcmk_child_t pcmk_children[] = {
{
0, crm_proc_none, 0, 0, FALSE, "none",
NULL, NULL
},
{
0, crm_proc_execd, 3, 0, TRUE, "pacemaker-execd",
NULL, CRM_DAEMON_DIR "/pacemaker-execd",
CRM_SYSTEM_LRMD
},
{
0, crm_proc_based, 1, 0, TRUE, "pacemaker-based",
CRM_DAEMON_USER, CRM_DAEMON_DIR "/pacemaker-based",
CIB_CHANNEL_RO
},
{
0, crm_proc_controld, 6, 0, TRUE, "pacemaker-controld",
CRM_DAEMON_USER, CRM_DAEMON_DIR "/pacemaker-controld",
CRM_SYSTEM_CRMD
},
{
0, crm_proc_attrd, 4, 0, TRUE, "pacemaker-attrd",
CRM_DAEMON_USER, CRM_DAEMON_DIR "/pacemaker-attrd",
T_ATTRD
},
{
0, crm_proc_schedulerd, 5, 0, TRUE, "pacemaker-schedulerd",
CRM_DAEMON_USER, CRM_DAEMON_DIR "/pacemaker-schedulerd",
CRM_SYSTEM_PENGINE
},
{
0, crm_proc_fenced, 2, 0, TRUE, "pacemaker-fenced",
NULL, CRM_DAEMON_DIR "/pacemaker-fenced",
"stonith-ng"
},
};
static gboolean check_active_before_startup_processes(gpointer user_data);
static int pcmk_child_active(pcmk_child_t *child);
static gboolean start_child(pcmk_child_t * child);
static gboolean update_node_processes(uint32_t id, const char *uname,
uint32_t procs);
void update_process_clients(crm_client_t *client);
static uint32_t
get_process_list(void)
{
int lpc = 0;
uint32_t procs = crm_get_cluster_proc();
for (lpc = 0; lpc < SIZEOF(pcmk_children); lpc++) {
if (pcmk_children[lpc].pid != 0) {
procs |= pcmk_children[lpc].flag;
}
}
return procs;
}
static void
pcmk_process_exit(pcmk_child_t * child)
{
child->pid = 0;
child->active_before_startup = FALSE;
/* Broadcast the fact that one of our processes died ASAP
*
* Try to get some logging of the cause out first though
* because we're probably about to get fenced
*
* Potentially do this only if respawn_count > N
* to allow for local recovery
*/
update_node_processes(local_nodeid, NULL, get_process_list());
child->respawn_count += 1;
if (child->respawn_count > MAX_RESPAWN) {
crm_err("Child respawn count exceeded by %s", child->name);
child->respawn = FALSE;
}
if (shutdown_trigger) {
/* resume step-wise shutdown (returned TRUE yields no parallelizing) */
mainloop_set_trigger(shutdown_trigger);
/* intended to speed up propagating expected lay-off of the daemons? */
update_node_processes(local_nodeid, NULL, get_process_list());
} else if (!child->respawn) {
/* nothing to do */
} else if (crm_is_true(getenv("PCMK_fail_fast"))) {
crm_err("Rebooting system because of %s", child->name);
pcmk_panic(__FUNCTION__);
} else if (pcmk_child_active(child) == 1) {
crm_warn("One-off suppressing strict respawning of a child process %s,"
" appears alright per %s IPC end-point",
child->name, child->endpoint);
/* need to monitor how it evolves, and start new process if badly */
child->active_before_startup = TRUE;
if (!global_keep_tracking) {
global_keep_tracking = true;
g_timeout_add_seconds(PCMK_PROCESS_CHECK_INTERVAL,
check_active_before_startup_processes, NULL);
}
} else {
crm_notice("Respawning failed child process: %s", child->name);
start_child(child);
}
}
static void pcmk_exit_with_cluster(int exitcode)
{
#ifdef SUPPORT_COROSYNC
corosync_cfg_handle_t cfg_handle;
cs_error_t err;
if (exitcode == CRM_EX_FATAL) {
crm_info("Asking Corosync to shut down");
err = corosync_cfg_initialize(&cfg_handle, NULL);
if (err != CS_OK) {
crm_warn("Unable to open handle to corosync to close it down. err=%d", err);
}
err = corosync_cfg_try_shutdown(cfg_handle, COROSYNC_CFG_SHUTDOWN_FLAG_IMMEDIATE);
if (err != CS_OK) {
crm_warn("Corosync shutdown failed. err=%d", err);
}
corosync_cfg_finalize(cfg_handle);
}
#endif
crm_exit(exitcode);
}
static void
pcmk_child_exit(mainloop_child_t * p, pid_t pid, int core, int signo, int exitcode)
{
pcmk_child_t *child = mainloop_child_userdata(p);
const char *name = mainloop_child_name(p);
if (signo) {
do_crm_log(((signo == SIGKILL)? LOG_WARNING : LOG_ERR),
"%s[%d] terminated with signal %d (core=%d)",
name, pid, signo, core);
} else {
switch(exitcode) {
case CRM_EX_OK:
crm_info("%s[%d] exited with status %d (%s)",
name, pid, exitcode, crm_exit_str(exitcode));
break;
case CRM_EX_FATAL:
crm_warn("Shutting cluster down because %s[%d] had fatal failure",
name, pid);
child->respawn = FALSE;
fatal_error = TRUE;
pcmk_shutdown(SIGTERM);
break;
case CRM_EX_PANIC:
do_crm_log_always(LOG_EMERG,
"%s[%d] instructed the machine to reset",
name, pid);
child->respawn = FALSE;
fatal_error = TRUE;
pcmk_panic(__FUNCTION__);
pcmk_shutdown(SIGTERM);
break;
default:
crm_err("%s[%d] exited with status %d (%s)",
name, pid, exitcode, crm_exit_str(exitcode));
break;
}
}
pcmk_process_exit(child);
}
static gboolean
stop_child(pcmk_child_t * child, int signal)
{
if (signal == 0) {
signal = SIGTERM;
}
/* why to skip PID of 1?
- FreeBSD ~ how untrackable process behind IPC is masqueraded as
- elsewhere: how "init" task is designated; in particular, in systemd
arrangement of socket-based activation, this is pretty real */
if (child->command == NULL || child->pid == PCMK__SPECIAL_PID) {
crm_debug("Nothing to do for child \"%s\" (process %lld)",
child->name, (long long) PCMK__SPECIAL_PID_AS_0(child->pid));
return TRUE;
}
if (child->pid <= 0) {
crm_trace("Client %s not running", child->name);
return TRUE;
}
errno = 0;
if (kill(child->pid, signal) == 0) {
crm_notice("Stopping %s "CRM_XS" sent signal %d to process %d",
child->name, signal, child->pid);
} else {
crm_perror(LOG_ERR, "Could not stop %s (process %d) with signal %d",
child->name, child->pid, signal);
}
return TRUE;
}
static char *opts_default[] = { NULL, NULL };
static char *opts_vgrind[] = { NULL, NULL, NULL, NULL, NULL };
/* TODO once libqb is taught to juggle with IPC end-points carried over as
bare file descriptor (https://github.com/ClusterLabs/libqb/issues/325)
it shall hand over these descriptors here if/once they are successfully
pre-opened in (presumably) pcmk_child_active, to avoid any remaining
room for races */
static gboolean
start_child(pcmk_child_t * child)
{
int lpc = 0;
uid_t uid = 0;
gid_t gid = 0;
struct rlimit oflimits;
gboolean use_valgrind = FALSE;
gboolean use_callgrind = FALSE;
const char *devnull = "/dev/null";
const char *env_valgrind = getenv("PCMK_valgrind_enabled");
const char *env_callgrind = getenv("PCMK_callgrind_enabled");
child->active_before_startup = FALSE;
if (child->command == NULL) {
crm_info("Nothing to do for child \"%s\"", child->name);
return TRUE;
}
if (env_callgrind != NULL && crm_is_true(env_callgrind)) {
use_callgrind = TRUE;
use_valgrind = TRUE;
} else if (env_callgrind != NULL && strstr(env_callgrind, child->name)) {
use_callgrind = TRUE;
use_valgrind = TRUE;
} else if (env_valgrind != NULL && crm_is_true(env_valgrind)) {
use_valgrind = TRUE;
} else if (env_valgrind != NULL && strstr(env_valgrind, child->name)) {
use_valgrind = TRUE;
}
if (use_valgrind && strlen(VALGRIND_BIN) == 0) {
crm_warn("Cannot enable valgrind for %s:"
" The location of the valgrind binary is unknown", child->name);
use_valgrind = FALSE;
}
if (child->uid) {
if (crm_user_lookup(child->uid, &uid, &gid) < 0) {
crm_err("Invalid user (%s) for %s: not found", child->uid, child->name);
return FALSE;
}
crm_info("Using uid=%u and group=%u for process %s", uid, gid, child->name);
}
child->pid = fork();
CRM_ASSERT(child->pid != -1);
if (child->pid > 0) {
/* parent */
mainloop_child_add(child->pid, 0, child->name, child, pcmk_child_exit);
crm_info("Forked child %d for process %s%s", child->pid, child->name,
use_valgrind ? " (valgrind enabled: " VALGRIND_BIN ")" : "");
update_node_processes(local_nodeid, NULL, get_process_list());
return TRUE;
} else {
/* Start a new session */
(void)setsid();
/* Setup the two alternate arg arrays */
opts_vgrind[0] = strdup(VALGRIND_BIN);
if (use_callgrind) {
opts_vgrind[1] = strdup("--tool=callgrind");
opts_vgrind[2] = strdup("--callgrind-out-file=" CRM_STATE_DIR "/callgrind.out.%p");
opts_vgrind[3] = strdup(child->command);
opts_vgrind[4] = NULL;
} else {
opts_vgrind[1] = strdup(child->command);
opts_vgrind[2] = NULL;
opts_vgrind[3] = NULL;
opts_vgrind[4] = NULL;
}
opts_default[0] = strdup(child->command);
if(gid) {
// Whether we need root group access to talk to cluster layer
bool need_root_group = TRUE;
if (is_corosync_cluster()) {
/* Corosync clusters can drop root group access, because we set
* uidgid.gid.${gid}=1 via CMAP, which allows these processes to
* connect to corosync.
*/
need_root_group = FALSE;
}
// Drop root group access if not needed
if (!need_root_group && (setgid(gid) < 0)) {
crm_perror(LOG_ERR, "Could not set group to %d", gid);
}
/* Initialize supplementary groups to only those always granted to
* the user, plus haclient (so we can access IPC).
*/
if (initgroups(child->uid, gid) < 0) {
crm_err("Cannot initialize groups for %s: %s (%d)", child->uid, pcmk_strerror(errno), errno);
}
}
if (uid && setuid(uid) < 0) {
crm_perror(LOG_ERR, "Could not set user to %d (%s)", uid, child->uid);
}
/* Close all open file descriptors */
getrlimit(RLIMIT_NOFILE, &oflimits);
for (lpc = 0; lpc < oflimits.rlim_cur; lpc++) {
close(lpc);
}
(void)open(devnull, O_RDONLY); /* Stdin: fd 0 */
(void)open(devnull, O_WRONLY); /* Stdout: fd 1 */
(void)open(devnull, O_WRONLY); /* Stderr: fd 2 */
if (use_valgrind) {
(void)execvp(VALGRIND_BIN, opts_vgrind);
} else {
(void)execvp(child->command, opts_default);
}
crm_perror(LOG_ERR, "FATAL: Cannot exec %s", child->command);
crm_exit(CRM_EX_FATAL);
}
return TRUE; /* never reached */
}
static gboolean
escalate_shutdown(gpointer data)
{
pcmk_child_t *child = data;
if (child->pid == PCMK__SPECIAL_PID) {
pcmk_process_exit(child);
} else if (child->pid) {
/* Use SIGSEGV instead of SIGKILL to create a core so we can see what it was up to */
crm_err("Child %s not terminating in a timely manner, forcing", child->name);
stop_child(child, SIGSEGV);
}
return FALSE;
}
#define SHUTDOWN_ESCALATION_PERIOD 180000 /* 3m */
static gboolean
pcmk_shutdown_worker(gpointer user_data)
{
static int phase = 0;
static time_t next_log = 0;
static int max = SIZEOF(pcmk_children);
int lpc = 0;
if (phase == 0) {
crm_notice("Shutting down Pacemaker");
phase = max;
}
for (; phase > 0; phase--) {
/* Don't stop anything with start_seq < 1 */
for (lpc = max - 1; lpc >= 0; lpc--) {
pcmk_child_t *child = &(pcmk_children[lpc]);
if (phase != child->start_seq) {
continue;
}
if (child->pid) {
time_t now = time(NULL);
if (child->respawn) {
if (child->pid == PCMK__SPECIAL_PID) {
crm_warn("The process behind %s IPC cannot be"
" terminated, so either wait the graceful"
" period of %ld s for its native termination"
" if it vitally depends on some other daemons"
" going down in a controlled way already,"
" or locate and kill the correct %s process"
" on your own; set PCMK_fail_fast=1 to avoid"
" this altogether next time around",
child->name, (long) SHUTDOWN_ESCALATION_PERIOD,
child->command);
}
next_log = now + 30;
child->respawn = FALSE;
stop_child(child, SIGTERM);
if (phase < pcmk_children[PCMK_CHILD_CONTROLD].start_seq) {
g_timeout_add(SHUTDOWN_ESCALATION_PERIOD,
escalate_shutdown, child);
}
} else if (now >= next_log) {
next_log = now + 30;
crm_notice("Still waiting for %s to terminate "
CRM_XS " pid=%d seq=%d",
child->name, child->pid, child->start_seq);
}
return TRUE;
}
/* cleanup */
crm_debug("%s confirmed stopped", child->name);
child->pid = 0;
}
}
/* send_cluster_id(); */
crm_notice("Shutdown complete");
{
const char *delay = daemon_option("shutdown_delay");
if(delay) {
sync();
sleep(crm_get_msec(delay) / 1000);
}
}
g_main_loop_quit(mainloop);
if (fatal_error) {
crm_notice("Shutting down and staying down after fatal error");
pcmk_exit_with_cluster(CRM_EX_FATAL);
}
return TRUE;
}
static void
pcmk_ignore(int nsig)
{
crm_info("Ignoring signal %s (%d)", strsignal(nsig), nsig);
}
static void
pcmk_sigquit(int nsig)
{
pcmk_panic(__FUNCTION__);
}
void
pcmk_shutdown(int nsig)
{
if (shutdown_trigger == NULL) {
shutdown_trigger = mainloop_add_trigger(G_PRIORITY_HIGH, pcmk_shutdown_worker, NULL);
}
mainloop_set_trigger(shutdown_trigger);
}
static int32_t
pcmk_ipc_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid)
{
crm_trace("Connection %p", c);
if (crm_client_new(c, uid, gid) == NULL) {
return -EIO;
}
return 0;
}
static void
pcmk_ipc_created(qb_ipcs_connection_t * c)
{
crm_trace("Connection %p", c);
}
/* Exit code means? */
static int32_t
pcmk_ipc_dispatch(qb_ipcs_connection_t * qbc, void *data, size_t size)
{
uint32_t id = 0;
uint32_t flags = 0;
const char *task = NULL;
crm_client_t *c = crm_client_get(qbc);
xmlNode *msg = crm_ipcs_recv(c, data, size, &id, &flags);
crm_ipcs_send_ack(c, id, flags, "ack", __FUNCTION__, __LINE__);
if (msg == NULL) {
return 0;
}
task = crm_element_value(msg, F_CRM_TASK);
if (crm_str_eq(task, CRM_OP_QUIT, TRUE)) {
/* Time to quit */
crm_notice("Shutting down in response to ticket %s (%s)",
crm_element_value(msg, F_CRM_REFERENCE), crm_element_value(msg, F_CRM_ORIGIN));
pcmk_shutdown(15);
} else if (crm_str_eq(task, CRM_OP_RM_NODE_CACHE, TRUE)) {
/* Send to everyone */
struct iovec *iov;
int id = 0;
const char *name = NULL;
crm_element_value_int(msg, XML_ATTR_ID, &id);
name = crm_element_value(msg, XML_ATTR_UNAME);
crm_notice("Instructing peers to remove references to node %s/%u", name, id);
iov = calloc(1, sizeof(struct iovec));
iov->iov_base = dump_xml_unformatted(msg);
iov->iov_len = 1 + strlen(iov->iov_base);
send_cpg_iov(iov);
} else {
update_process_clients(c);
}
free_xml(msg);
return 0;
}
/* Error code means? */
static int32_t
pcmk_ipc_closed(qb_ipcs_connection_t * c)
{
crm_client_t *client = crm_client_get(c);
if (client == NULL) {
return 0;
}
crm_trace("Connection %p", c);
crm_client_destroy(client);
return 0;
}
static void
pcmk_ipc_destroy(qb_ipcs_connection_t * c)
{
crm_trace("Connection %p", c);
pcmk_ipc_closed(c);
}
struct qb_ipcs_service_handlers mcp_ipc_callbacks = {
.connection_accept = pcmk_ipc_accept,
.connection_created = pcmk_ipc_created,
.msg_process = pcmk_ipc_dispatch,
.connection_closed = pcmk_ipc_closed,
.connection_destroyed = pcmk_ipc_destroy
};
/*!
* \internal
* \brief Send an XML message with process list of all known peers to client(s)
*
* \param[in] client Send message to this client, or all clients if NULL
*/
void
update_process_clients(crm_client_t *client)
{
GHashTableIter iter;
crm_node_t *node = NULL;
xmlNode *update = create_xml_node(NULL, "nodes");
if (is_corosync_cluster()) {
crm_xml_add_int(update, "quorate", pcmk_quorate);
}
g_hash_table_iter_init(&iter, crm_peer_cache);
while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & node)) {
xmlNode *xml = create_xml_node(update, "node");
crm_xml_add_int(xml, "id", node->id);
crm_xml_add(xml, "uname", node->uname);
crm_xml_add(xml, "state", node->state);
crm_xml_add_int(xml, "processes", node->processes);
}
if(client) {
crm_trace("Sending process list to client %s", client->id);
crm_ipcs_send(client, 0, update, crm_ipc_server_event);
} else {
crm_trace("Sending process list to %d clients", crm_hash_table_size(client_connections));
g_hash_table_iter_init(&iter, client_connections);
while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & client)) {
crm_ipcs_send(client, 0, update, crm_ipc_server_event);
}
}
free_xml(update);
}
/*!
* \internal
* \brief Send a CPG message with local node's process list to all peers
*/
static void
update_process_peers(void)
{
/* Do nothing for corosync-2 based clusters */
struct iovec *iov = calloc(1, sizeof(struct iovec));
CRM_ASSERT(iov);
if (local_name) {
iov->iov_base = crm_strdup_printf("",
local_name, get_process_list());
} else {
iov->iov_base = crm_strdup_printf("",
get_process_list());
}
iov->iov_len = strlen(iov->iov_base) + 1;
crm_trace("Sending %s", (char*) iov->iov_base);
send_cpg_iov(iov);
}
/*!
* \internal
* \brief Update a node's process list, notifying clients and peers if needed
*
* \param[in] id Node ID of affected node
* \param[in] uname Uname of affected node
* \param[in] procs Affected node's process list mask
*
* \return TRUE if the process list changed, FALSE otherwise
*/
static gboolean
update_node_processes(uint32_t id, const char *uname, uint32_t procs)
{
gboolean changed = FALSE;
crm_node_t *node = crm_get_peer(id, uname);
if (procs != 0) {
if (procs != node->processes) {
crm_debug("Node %s now has process list: %.32x (was %.32x)",
node->uname, procs, node->processes);
node->processes = procs;
changed = TRUE;
/* If local node's processes have changed, notify clients/peers */
if (id == local_nodeid) {
update_process_clients(NULL);
update_process_peers();
}
} else {
crm_trace("Node %s still has process list: %.32x", node->uname, procs);
}
}
return changed;
}
/* *INDENT-OFF* */
static struct crm_option long_options[] = {
/* Top-level Options */
{"help", 0, 0, '?', "\tThis text"},
{"version", 0, 0, '$', "\tVersion information" },
{"verbose", 0, 0, 'V', "\tIncrease debug output"},
{"shutdown", 0, 0, 'S', "\tInstruct Pacemaker to shutdown on this machine"},
{"features", 0, 0, 'F', "\tDisplay the full version and list of features Pacemaker was built with"},
{"-spacer-", 1, 0, '-', "\nAdditional Options:"},
{"foreground", 0, 0, 'f', "\t(Ignored) Pacemaker always runs in the foreground"},
{"pid-file", 1, 0, 'p', "\t(Ignored) Daemon pid file location"},
{"standby", 0, 0, 's', "\tStart node in standby state"},
{NULL, 0, 0, 0}
};
/* *INDENT-ON* */
static void
mcp_chown(const char *path, uid_t uid, gid_t gid)
{
int rc = chown(path, uid, gid);
if (rc < 0) {
crm_warn("Cannot change the ownership of %s to user %s and gid %d: %s",
path, CRM_DAEMON_USER, gid, pcmk_strerror(errno));
}
}
/*!
* \internal
* \brief Check the liveness of the child based on IPC name and PID if tracked
*
* \param[inout] child Child tracked data
*
* \return 0 if no trace of child's liveness detected (while it is detectable
* to begin with, at least according to one of the two properties),
* 1 if everything is fine, 2 if it's up per PID, but not per IPC
* end-point (still starting?), -1 on error, and -2 when the child
* (its IPC) blocked with an unauthorized process (log message
* emitted in both latter cases)
*
* \note This function doesn't modify any of \p child members but \c pid,
* and is not actively toying with processes as such but invoking
* \c stop_child in one particular case (there's for some reason
* a different authentic holder of the IPC end-point).
*/
static int
pcmk_child_active(pcmk_child_t *child) {
static uid_t cl_uid = 0;
static gid_t cl_gid = 0;
const uid_t root_uid = 0;
const gid_t root_gid = 0;
const uid_t *ref_uid;
const gid_t *ref_gid;
int ret = 0;
pid_t ipc_pid = 0;
if (child->endpoint == NULL
&& (child->pid <= 0 || child->pid == PCMK__SPECIAL_PID)) {
crm_err("Cannot track child %s for missing both API end-point and PID",
child->name);
ret = -1; /* misuse of the function when child is not trackable */
} else if (child->endpoint != NULL) {
ref_uid = (child->uid != NULL) ? &cl_uid : &root_uid;
ref_gid = (child->uid != NULL) ? &cl_gid : &root_gid;
if (child->uid != NULL && !cl_uid && !cl_gid
&& crm_user_lookup(CRM_DAEMON_USER, &cl_uid, &cl_gid) < 0) {
crm_err("Could not find user and group IDs for user %s",
CRM_DAEMON_USER);
ret = -1;
} else if ((ret = pcmk__ipc_is_authentic_process_active(child->endpoint,
*ref_uid, *ref_gid,
&ipc_pid)) < 0) {
/* game over */
} else if (child->pid <= 0) {
/* hit new child to be initialized, or reset to zero
and investigate further for ret == 0 */
child->pid = ipc_pid;
} else if (ipc_pid && child->pid != ipc_pid) {
/* ultimately strange for ret == 1; either way, investigate */
ret = 0;
}
}
if (!ret) {
/* when no IPC based liveness detected (incl. if ever a child without
IPC is tracked), or detected for a different _authentic_ process;
safe on FreeBSD since the only change possible from a proper child's
PID into "special" PID of 1 behind more loosely related process */
ret = crm_pid_active(child->pid, child->name);
if (ipc_pid && (ret != 1
|| ipc_pid == PCMK__SPECIAL_PID
|| crm_pid_active(ipc_pid, child->name) == 1)) {
if (ret == 1) {
/* assume there's no forking-while-retaining-IPC-socket
involved in the "children's" lifecycle, hence that the
tracking got out of sync purely because of some external
(esotheric?) forces (user initiated process "refresh" by
force? or intentionally racing on start-up, even?), and
that switching over to this other detected, authentic
instance with an IPC already in possession is a better
trade-off than "neutralizing" it first so as to give
either the original or possibly a new to-be-spawned
daemon process a leeway for operation, which would
otherwise have to be carried out */
/* not possessing IPC, afterall (what about corosync CPG?) */
stop_child(child, SIGKILL);
} else {
ret = 1;
}
child->pid = ipc_pid;
} else if (ret == 1) {
ret = 2; /* up per PID, but not per IPC (still starting?) */
} else if (!child->pid && ret == -1) {
ret = 0; /* correct -1 on FreeBSD from above back to 0 */
}
}
return ret;
}
static gboolean
check_active_before_startup_processes(gpointer user_data)
{
int start_seq = 1, lpc = 0;
static int max = SIZEOF(pcmk_children);
gboolean keep_tracking = FALSE;
for (start_seq = 1; start_seq < max; start_seq++) {
for (lpc = 0; lpc < max; lpc++) {
if (pcmk_children[lpc].active_before_startup == FALSE) {
/* we are already tracking it as a child process. */
continue;
} else if (start_seq != pcmk_children[lpc].start_seq) {
continue;
} else {
const char *name = pcmk_children[lpc].name;
int ret;
switch ((ret = pcmk_child_active(&pcmk_children[lpc]))) {
case 1:
break;
case 0:
case 2: /* this very case: it was OK once already */
if (pcmk_children[lpc].respawn == TRUE) {
- /* presumably after crash, hence critical */
- crm_crit("Process %s terminated (pid=%lld)%s", \
- name, (long long)
- PCMK__SPECIAL_PID_AS_0(pcmk_children[lpc].pid),
- ret ? ", at least per IPC end-point that went AWOL"
- : "");
+ crm_err("%s[%d] terminated%s", name,
+ PCMK__SPECIAL_PID_AS_0(pcmk_children[lpc].pid),
+ ret ? " as IPC server" : "");
} else {
/* orderly shutdown */
- crm_notice("Process %s terminated (pid=%lld)%s", \
- name, (long long)
+ crm_notice("%s[%d] terminated%s", name,
PCMK__SPECIAL_PID_AS_0(pcmk_children[lpc].pid),
- ret ? ", at least per IPC end-point that went AWOL"
- : "");
+ ret ? " as IPC server" : "");
}
pcmk_process_exit(&(pcmk_children[lpc]));
continue;
default:
crm_crit("Unexpected value from pcmk_child_active:"
" %d (pid=%lld)", ret,
(long long) PCMK__SPECIAL_PID_AS_0(
pcmk_children[lpc].pid));
/* fall through */
case -1:
case -2:
/* message(s) already emitted */
crm_exit(CRM_EX_FATAL);
break; /* static analysis/noreturn */
}
}
/* at least one of the processes found at startup
* is still going, so keep this recurring timer around */
keep_tracking = TRUE;
}
}
global_keep_tracking = keep_tracking;
return keep_tracking;
}
/*!
* \internal
* \brief Initial one-off check of the pre-existing "child" processes
*
* With "child" process, we mean the subdaemon that defines an API end-point
* (all of them do as of the comment) -- the possible complement is skipped
* as it is deemed it has no such shared resources to cause conflicts about,
* hence it can presumably be started anew without hesitation.
* If that won't hold true in the future, the concept of a shared resource
* will have to be generalized beyond the API end-point.
*
* For boundary cases that the "child" is still starting (IPC end-point is yet
* to be witnessed), or more rarely (practically FreeBSD only), when there's
* a pre-existing "untrackable" authentic process, we give the situation some
* time to possibly unfold in the right direction, meaning that said socket
* will appear or the unattainable process will disappear per the observable
* IPC, respectively.
*
* \return 0 if no such "child" process found, positive number X when X
* "children" detected, -1 on an internal error, -2 when any
* would-be-used IPC is blocked with an unauthorized process
*
* \note Since this gets run at the very start, \c respawn_count fields
* for particular children get temporarily overloaded with "rounds
* of waiting" tracking, restored once we are about to finish with
* success (i.e. returning value >=0) and will remain unrestored
* otherwise. One way to suppress liveness detection logic for
* particular child is to set the said value to a negative number.
*/
#define WAIT_TRIES 4 /* together with interleaved sleeps, worst case ~ 1s */
static int
find_and_track_existing_processes(void)
{
unsigned tracking = 0U;
bool wait_in_progress;
int cur;
size_t i, rounds;
for (rounds = 1; rounds <= WAIT_TRIES; rounds++) {
wait_in_progress = false;
for (i = 0; i < SIZEOF(pcmk_children); i++) {
if (!pcmk_children[i].endpoint
|| pcmk_children[i].respawn_count < 0
|| !(cur = pcmk_child_active(&pcmk_children[i]))) {
/* as a speculation, don't give up in the context of
pcmk_child_active check if there are more rounds to
come for other reasons, but don't artificially wait just
because of this, since we would preferably start ASAP */
continue;
}
pcmk_children[i].respawn_count = rounds;
switch (cur) {
case 1:
if (pcmk_children[i].pid == PCMK__SPECIAL_PID) {
if (crm_is_true(getenv("PCMK_fail_fast"))) {
crm_crit("Cannot reliably track pre-existing"
" authentic process behind %s IPC on this"
" platform and PCMK_fail_fast requested",
pcmk_children[i].endpoint);
return -1;
} else if (pcmk_children[i].respawn_count == WAIT_TRIES) {
crm_notice("Assuming pre-existing authentic, though"
" on this platform untrackable, process"
" behind %s IPC is stable (was in %d"
" previous samples) so rather than"
" bailing out (PCMK_fail_fast not"
" requested), we just switch to a less"
" optimal IPC liveness monitoring"
" (not very suitable for heavy load)",
pcmk_children[i].name, WAIT_TRIES - 1);
crm_warn("The process behind %s IPC cannot be"
" terminated, so the overall shutdown"
" will get delayed implicitly (%ld s),"
" which serves as a graceful period for"
" its native termination if it vitally"
" depends on some other daemons going"
" down in a controlled way already",
pcmk_children[i].name,
(long) SHUTDOWN_ESCALATION_PERIOD);
} else {
wait_in_progress = true;
crm_warn("Cannot reliably track pre-existing"
" authentic process behind %s IPC on this"
" platform, can still disappear in %d"
" attempt(s)", pcmk_children[i].endpoint,
WAIT_TRIES - pcmk_children[i].respawn_count);
continue;
}
}
crm_notice("Tracking existing %s process (pid=%lld)",
pcmk_children[i].name,
(long long) PCMK__SPECIAL_PID_AS_0(
pcmk_children[i].pid));
pcmk_children[i].respawn_count = -1; /* 0~keep watching */
pcmk_children[i].active_before_startup = TRUE;
tracking++;
break;
case 2:
if (pcmk_children[i].respawn_count == WAIT_TRIES) {
crm_crit("%s IPC end-point for existing authentic"
" process %lld did not (re)appear",
pcmk_children[i].endpoint,
(long long) PCMK__SPECIAL_PID_AS_0(
pcmk_children[i].pid));
return -1;
}
wait_in_progress = true;
crm_warn("Cannot find %s IPC end-point for existing"
" authentic process %lld, can still (re)appear"
" in %d attempts (?)",
pcmk_children[i].endpoint,
(long long) PCMK__SPECIAL_PID_AS_0(
pcmk_children[i].pid),
WAIT_TRIES - pcmk_children[i].respawn_count);
continue;
case -1:
case -2:
return cur; /* messages already emitted */
default:
crm_crit("Unexpected condition"CRM_XS"cur=%d", cur);
return -1; /* unexpected condition */
}
}
if (!wait_in_progress) {
break;
}
(void) poll(NULL, 0, 250); /* a bit for changes to possibly happen */
}
for (i = 0; i < SIZEOF(pcmk_children); i++) {
pcmk_children[i].respawn_count = 0; /* restore pristine state */
}
if (tracking) {
g_timeout_add_seconds(PCMK_PROCESS_CHECK_INTERVAL,
check_active_before_startup_processes, NULL);
}
return (tracking > INT_MAX) ? INT_MAX : tracking;
}
static void
init_children_processes(void)
{
int start_seq = 1, lpc = 0;
static int max = SIZEOF(pcmk_children);
/* start any children that have not been detected */
for (start_seq = 1; start_seq < max; start_seq++) {
/* don't start anything with start_seq < 1 */
for (lpc = 0; lpc < max; lpc++) {
if (pcmk_children[lpc].pid) {
/* we are already tracking it */
continue;
}
if (start_seq == pcmk_children[lpc].start_seq) {
start_child(&(pcmk_children[lpc]));
}
}
}
/* From this point on, any daemons being started will be due to
* respawning rather than node start.
*
* This may be useful for the daemons to know
*/
setenv("PCMK_respawned", "true", 1);
}
static void
mcp_cpg_destroy(gpointer user_data)
{
crm_crit("Lost connection to cluster layer, shutting down");
crm_exit(CRM_EX_DISCONNECT);
}
/*!
* \internal
* \brief Process a CPG message (process list or manual peer cache removal)
*
* \param[in] handle CPG connection (ignored)
* \param[in] groupName CPG group name (ignored)
* \param[in] nodeid ID of affected node
* \param[in] pid Process ID (ignored)
* \param[in] msg CPG XML message
* \param[in] msg_len Length of msg in bytes (ignored)
*/
static void
mcp_cpg_deliver(cpg_handle_t handle,
const struct cpg_name *groupName,
uint32_t nodeid, uint32_t pid, void *msg, size_t msg_len)
{
xmlNode *xml = string2xml(msg);
const char *task = crm_element_value(xml, F_CRM_TASK);
crm_trace("Received CPG message (%s): %.200s",
(task? task : "process list"), (char*)msg);
if (task == NULL) {
if (nodeid == local_nodeid) {
crm_debug("Ignoring message with local node's process list");
} else {
uint32_t procs = 0;
const char *uname = crm_element_value(xml, "uname");
crm_element_value_int(xml, "proclist", (int *)&procs);
if (update_node_processes(nodeid, uname, procs)) {
update_process_clients(NULL);
}
}
} else if (crm_str_eq(task, CRM_OP_RM_NODE_CACHE, TRUE)) {
int id = 0;
const char *name = NULL;
crm_element_value_int(xml, XML_ATTR_ID, &id);
name = crm_element_value(xml, XML_ATTR_UNAME);
reap_crm_member(id, name);
}
if (xml != NULL) {
free_xml(xml);
}
}
static void
mcp_cpg_membership(cpg_handle_t handle,
const struct cpg_name *groupName,
const struct cpg_address *member_list, size_t member_list_entries,
const struct cpg_address *left_list, size_t left_list_entries,
const struct cpg_address *joined_list, size_t joined_list_entries)
{
/* Update peer cache if needed */
pcmk_cpg_membership(handle, groupName, member_list, member_list_entries,
left_list, left_list_entries,
joined_list, joined_list_entries);
/* Always broadcast our own presence after any membership change */
update_process_peers();
}
static gboolean
mcp_quorum_callback(unsigned long long seq, gboolean quorate)
{
pcmk_quorate = quorate;
return TRUE;
}
static void
mcp_quorum_destroy(gpointer user_data)
{
crm_info("connection lost");
}
int
main(int argc, char **argv)
{
int rc;
int flag;
int argerr = 0;
int option_index = 0;
gboolean shutdown = FALSE;
uid_t pcmk_uid = 0;
gid_t pcmk_gid = 0;
struct rlimit cores;
crm_ipc_t *old_instance = NULL;
qb_ipcs_service_t *ipcs = NULL;
static crm_cluster_t cluster;
crm_log_preinit(NULL, argc, argv);
crm_set_options(NULL, "mode [options]", long_options, "Start/Stop Pacemaker\n");
mainloop_add_signal(SIGHUP, pcmk_ignore);
mainloop_add_signal(SIGQUIT, pcmk_sigquit);
while (1) {
flag = crm_get_option(argc, argv, &option_index);
if (flag == -1)
break;
switch (flag) {
case 'V':
crm_bump_log_level(argc, argv);
break;
case 'f':
/* Legacy */
break;
case 'p':
pid_file = optarg;
break;
case 's':
set_daemon_option("node_start_state", "standby");
break;
case '$':
case '?':
crm_help(flag, CRM_EX_OK);
break;
case 'S':
shutdown = TRUE;
break;
case 'F':
printf("Pacemaker %s (Build: %s)\n Supporting v%s: %s\n", PACEMAKER_VERSION, BUILD_VERSION,
CRM_FEATURE_SET, CRM_FEATURES);
crm_exit(CRM_EX_OK);
default:
printf("Argument code 0%o (%c) is not (?yet?) supported\n", flag, flag);
++argerr;
break;
}
}
if (optind < argc) {
printf("non-option ARGV-elements: ");
while (optind < argc)
printf("%s ", argv[optind++]);
printf("\n");
}
if (argerr) {
crm_help('?', CRM_EX_USAGE);
}
setenv("LC_ALL", "C", 1);
set_daemon_option("mcp", "true");
crm_log_init(NULL, LOG_INFO, TRUE, FALSE, argc, argv, FALSE);
crm_debug("Checking for old instances of %s", CRM_SYSTEM_MCP);
old_instance = crm_ipc_new(CRM_SYSTEM_MCP, 0);
crm_ipc_connect(old_instance);
if (shutdown) {
crm_debug("Terminating previous instance");
while (crm_ipc_connected(old_instance)) {
xmlNode *cmd =
create_request(CRM_OP_QUIT, NULL, NULL, CRM_SYSTEM_MCP, CRM_SYSTEM_MCP, NULL);
crm_debug(".");
crm_ipc_send(old_instance, cmd, 0, 0, NULL);
free_xml(cmd);
sleep(2);
}
crm_ipc_close(old_instance);
crm_ipc_destroy(old_instance);
crm_exit(CRM_EX_OK);
} else if (crm_ipc_connected(old_instance)) {
crm_ipc_close(old_instance);
crm_ipc_destroy(old_instance);
crm_err("Pacemaker is already active, aborting startup");
crm_exit(CRM_EX_FATAL);
}
crm_ipc_close(old_instance);
crm_ipc_destroy(old_instance);
if (mcp_read_config() == FALSE) {
crm_notice("Could not obtain corosync config data, exiting");
crm_exit(CRM_EX_UNAVAILABLE);
}
// OCF shell functions and cluster-glue need facility under different name
{
const char *facility = daemon_option("logfacility");
if (facility && safe_str_neq(facility, "none")) {
setenv("HA_LOGFACILITY", facility, 1);
}
}
crm_notice("Starting Pacemaker %s "CRM_XS" build=%s features:%s",
PACEMAKER_VERSION, BUILD_VERSION, CRM_FEATURES);
mainloop = g_main_loop_new(NULL, FALSE);
rc = getrlimit(RLIMIT_CORE, &cores);
if (rc < 0) {
crm_perror(LOG_ERR, "Cannot determine current maximum core size.");
} else {
if (cores.rlim_max == 0 && geteuid() == 0) {
cores.rlim_max = RLIM_INFINITY;
} else {
crm_info("Maximum core file size is: %lu", (unsigned long)cores.rlim_max);
}
cores.rlim_cur = cores.rlim_max;
rc = setrlimit(RLIMIT_CORE, &cores);
if (rc < 0) {
crm_perror(LOG_ERR,
"Core file generation will remain disabled."
" Core files are an important diagnostic tool, so"
" please consider enabling them by default.");
}
}
if (crm_user_lookup(CRM_DAEMON_USER, &pcmk_uid, &pcmk_gid) < 0) {
crm_err("Cluster user %s does not exist, aborting Pacemaker startup", CRM_DAEMON_USER);
crm_exit(CRM_EX_NOUSER);
}
mkdir(CRM_STATE_DIR, 0750);
mcp_chown(CRM_STATE_DIR, pcmk_uid, pcmk_gid);
/* Used to store core/blackbox/scheduler/cib files in */
crm_build_path(CRM_PACEMAKER_DIR, 0750);
mcp_chown(CRM_PACEMAKER_DIR, pcmk_uid, pcmk_gid);
/* Used to store core files in */
crm_build_path(CRM_CORE_DIR, 0750);
mcp_chown(CRM_CORE_DIR, pcmk_uid, pcmk_gid);
/* Used to store blackbox dumps in */
crm_build_path(CRM_BLACKBOX_DIR, 0750);
mcp_chown(CRM_BLACKBOX_DIR, pcmk_uid, pcmk_gid);
// Used to store scheduler inputs in
crm_build_path(PE_STATE_DIR, 0750);
mcp_chown(PE_STATE_DIR, pcmk_uid, pcmk_gid);
/* Used to store the cluster configuration */
crm_build_path(CRM_CONFIG_DIR, 0750);
mcp_chown(CRM_CONFIG_DIR, pcmk_uid, pcmk_gid);
// Don't build CRM_RSCTMP_DIR, pacemaker-execd will do it
ipcs = mainloop_add_ipc_server(CRM_SYSTEM_MCP, QB_IPC_NATIVE, &mcp_ipc_callbacks);
if (ipcs == NULL) {
crm_err("Couldn't start IPC server");
crm_exit(CRM_EX_OSERR);
}
/* Allows us to block shutdown */
if (cluster_connect_cfg(&local_nodeid) == FALSE) {
crm_err("Couldn't connect to Corosync's CFG service");
crm_exit(CRM_EX_PROTOCOL);
}
if(pcmk_locate_sbd() > 0) {
setenv("PCMK_watchdog", "true", 1);
} else {
setenv("PCMK_watchdog", "false", 1);
}
switch (find_and_track_existing_processes()) {
case -1:
crm_crit("Internal fatality, see the log");
crm_exit(CRM_EX_FATAL);
case -2:
crm_crit("Blocked by foreign process, kill the offender");
crm_exit(CRM_EX_CANTCREAT);
default:
break;
};
cluster.destroy = mcp_cpg_destroy;
cluster.cpg.cpg_deliver_fn = mcp_cpg_deliver;
cluster.cpg.cpg_confchg_fn = mcp_cpg_membership;
crm_set_autoreap(FALSE);
rc = pcmk_ok;
if (cluster_connect_cpg(&cluster) == FALSE) {
crm_err("Couldn't connect to Corosync's CPG service");
rc = -ENOPROTOOPT;
} else if (cluster_connect_quorum(mcp_quorum_callback, mcp_quorum_destroy)
== FALSE) {
rc = -ENOTCONN;
} else {
local_name = get_local_node_name();
update_node_processes(local_nodeid, local_name, get_process_list());
mainloop_add_signal(SIGTERM, pcmk_shutdown);
mainloop_add_signal(SIGINT, pcmk_shutdown);
init_children_processes();
crm_notice("Pacemaker daemon successfully started and accepting connections");
g_main_loop_run(mainloop);
}
if (ipcs) {
crm_trace("Closing IPC server");
mainloop_del_ipc_server(ipcs);
ipcs = NULL;
}
g_main_loop_unref(mainloop);
cluster_disconnect_cpg(&cluster);
cluster_disconnect_cfg();
crm_exit(crm_errno2exit(rc));
}
diff --git a/doc/Pacemaker_Explained/en-US/Ch-Options.txt b/doc/Pacemaker_Explained/en-US/Ch-Options.txt
index 15001b1265..e058d91675 100644
--- a/doc/Pacemaker_Explained/en-US/Ch-Options.txt
+++ b/doc/Pacemaker_Explained/en-US/Ch-Options.txt
@@ -1,428 +1,439 @@
:compat-mode: legacy
= Cluster-Wide Configuration =
== Configuration Layout ==
The cluster is defined by the Cluster Information Base (CIB),
which uses XML notation. The simplest CIB, an empty one, looks like this:
.An empty configuration
======
[source,XML]
-------
-------
======
The empty configuration above contains the major sections that make up a CIB:
* +cib+: The entire CIB is enclosed with a +cib+ tag. Certain fundamental settings
are defined as attributes of this tag.
** +configuration+: This section -- the primary focus of this document --
contains traditional configuration information such as what resources the
cluster serves and the relationships among them.
*** +crm_config+: cluster-wide configuration options
*** +nodes+: the machines that host the cluster
*** +resources+: the services run by the cluster
*** +constraints+: indications of how resources should be placed
** +status+: This section contains the history of each resource on each node.
Based on this data, the cluster can construct the complete current
state of the cluster. The authoritative source for this section
is the local executor (pacemaker-execd process) on each cluster node, and
the cluster will occasionally repopulate the entire section. For this
reason, it is never written to disk, and administrators are advised
against modifying it in any way.
In this document, configuration settings will be described as 'properties' or 'options'
based on how they are defined in the CIB:
* Properties are XML attributes of an XML element.
* Options are name-value pairs expressed as +nvpair+ child elements of an XML element.
Normally, you will use command-line tools that abstract the XML, so the
distinction will be unimportant; both properties and options are
cluster settings you can tweak.
== CIB Properties ==
Certain settings are defined by CIB properties (that is, attributes of the
+cib+ tag) rather than with the rest of the cluster configuration in the
+configuration+ section.
The reason is simply a matter of parsing. These options are used by the
configuration database which is, by design, mostly ignorant of the content it
holds. So the decision was made to place them in an easy-to-find location.
.CIB Properties
[width="95%",cols="2m,<5",options="header",align="center"]
|=========================================================
|Field |Description
| admin_epoch |
indexterm:[Configuration Version,Cluster]
indexterm:[Cluster,Option,Configuration Version]
indexterm:[admin_epoch,Cluster Option]
indexterm:[Cluster,Option,admin_epoch]
When a node joins the cluster, the cluster performs a check to see
which node has the best configuration. It asks the node with the highest
(+admin_epoch+, +epoch+, +num_updates+) tuple to replace the configuration on
all the nodes -- which makes setting them, and setting them correctly, very
important. +admin_epoch+ is never modified by the cluster; you can use this
to make the configurations on any inactive nodes obsolete. _Never set this
value to zero_. In such cases, the cluster cannot tell the difference between
your configuration and the "empty" one used when nothing is found on disk.
| epoch |
indexterm:[epoch,Cluster Option]
indexterm:[Cluster,Option,epoch]
The cluster increments this every time the configuration is updated (usually by
the administrator).
| num_updates |
indexterm:[num_updates,Cluster Option]
indexterm:[Cluster,Option,num_updates]
The cluster increments this every time the configuration or status is updated
(usually by the cluster) and resets it to 0 when epoch changes.
| validate-with |
indexterm:[validate-with,Cluster Option]
indexterm:[Cluster,Option,validate-with]
Determines the type of XML validation that will be done on the configuration.
If set to +none+, the cluster will not verify that updates conform to the
DTD (nor reject ones that don't). This option can be useful when
operating a mixed-version cluster during an upgrade.
|cib-last-written |
indexterm:[cib-last-written,Cluster Property]
indexterm:[Cluster,Property,cib-last-written]
Indicates when the configuration was last written to disk. Maintained by the
cluster; for informational purposes only.
|have-quorum |
indexterm:[have-quorum,Cluster Property]
indexterm:[Cluster,Property,have-quorum]
Indicates if the cluster has quorum. If false, this may mean that the
cluster cannot start resources or fence other nodes (see
+no-quorum-policy+ below). Maintained by the cluster.
|dc-uuid |
indexterm:[dc-uuid,Cluster Property]
indexterm:[Cluster,Property,dc-uuid]
Indicates which cluster node is the current leader. Used by the
cluster when placing resources and determining the order of some
events. Maintained by the cluster.
|=========================================================
[[s-cluster-options]]
== Cluster Options ==
Cluster options, as you might expect, control how the cluster behaves
when confronted with certain situations.
They are grouped into sets within the +crm_config+ section, and, in advanced
configurations, there may be more than one set. (This will be described later
in the section on <> where we will show how to have the cluster use
different sets of options during working hours than during weekends.) For now,
we will describe the simple case where each option is present at most once.
You can obtain an up-to-date list of cluster options, including
their default values, by running the `man pacemaker-schedulerd` and
`man pacemaker-controld` commands.
.Cluster Options
[width="95%",cols="5m,2,<11",options="header",align="center"]
|=========================================================
|Option |Default |Description
| cluster-name | |
indexterm:[cluster-name,Cluster Property]
indexterm:[Cluster,Property,cluster-name]
An (optional) name for the cluster as a whole. This is mostly for users'
convenience for use as desired in administration, but this can be used
in the Pacemaker configuration in <> (as the
+#cluster-name+ <>). It may
also be used by higher-level tools when displaying cluster information, and by
certain resource agents (for example, the +ocf:heartbeat:GFS2+ agent stores the
cluster name in filesystem meta-data).
| dc-version | |
indexterm:[dc-version,Cluster Property]
indexterm:[Cluster,Property,dc-version]
Version of Pacemaker on the cluster's DC.
Determined automatically by the cluster.
Often includes the hash which identifies the exact Git changeset it was built
from. Used for diagnostic purposes.
| cluster-infrastructure | |
indexterm:[cluster-infrastructure,Cluster Property]
indexterm:[Cluster,Property,cluster-infrastructure]
The messaging stack on which Pacemaker is currently running.
Determined automatically by the cluster.
Used for informational and diagnostic purposes.
| no-quorum-policy | stop
a|
indexterm:[no-quorum-policy,Cluster Option]
indexterm:[Cluster,Option,no-quorum-policy]
What to do when the cluster does not have quorum. Allowed values:
* +ignore:+ continue all resource management
* +freeze:+ continue resource management, but don't recover resources from nodes not in the affected partition
* +stop:+ stop all resources in the affected cluster partition
* +suicide:+ fence all nodes in the affected cluster partition
| batch-limit | 0 |
indexterm:[batch-limit,Cluster Option]
indexterm:[Cluster,Option,batch-limit]
The maximum number of actions that the cluster may execute in parallel across
all nodes. The "correct" value will depend on the speed and load of your
network and cluster nodes. If zero, the cluster will impose a dynamically
calculated limit only when any node has high load.
| migration-limit | -1 |
indexterm:[migration-limit,Cluster Option]
indexterm:[Cluster,Option,migration-limit]
The number of <> actions that the cluster
is allowed to execute in parallel on a node. A value of -1 means unlimited.
| symmetric-cluster | TRUE |
indexterm:[symmetric-cluster,Cluster Option]
indexterm:[Cluster,Option,symmetric-cluster]
Can all resources run on any node by default?
| stop-all-resources | FALSE |
indexterm:[stop-all-resources,Cluster Option]
indexterm:[Cluster,Option,stop-all-resources]
Should the cluster stop all resources?
| stop-orphan-resources | TRUE |
indexterm:[stop-orphan-resources,Cluster Option]
indexterm:[Cluster,Option,stop-orphan-resources]
Should deleted resources be stopped? This value takes precedence over
+is-managed+ (i.e. even unmanaged resources will be stopped if deleted from
the configuration when this value is TRUE).
| stop-orphan-actions | TRUE |
indexterm:[stop-orphan-actions,Cluster Option]
indexterm:[Cluster,Option,stop-orphan-actions]
Should deleted actions be cancelled?
| start-failure-is-fatal | TRUE |
indexterm:[start-failure-is-fatal,Cluster Option]
indexterm:[Cluster,Option,start-failure-is-fatal]
Should a failure to start a resource on a particular node prevent further start
attempts on that node? If FALSE, the cluster will decide whether the same
node is still eligible based on the resource's current failure count
and +migration-threshold+ (see <>).
| enable-startup-probes | TRUE |
indexterm:[enable-startup-probes,Cluster Option]
indexterm:[Cluster,Option,enable-startup-probes]
Should the cluster check for active resources during startup?
| maintenance-mode | FALSE |
indexterm:[maintenance-mode,Cluster Option]
indexterm:[Cluster,Option,maintenance-mode]
Should the cluster refrain from monitoring, starting and stopping resources?
| stonith-enabled | TRUE |
indexterm:[stonith-enabled,Cluster Option]
indexterm:[Cluster,Option,stonith-enabled]
Should failed nodes and nodes with resources that can't be stopped be
shot? If you value your data, set up a STONITH device and enable this.
If true, or unset, the cluster will refuse to start resources unless
one or more STONITH resources have been configured.
If false, unresponsive nodes are immediately assumed to be running no
resources, and resource takeover to online nodes starts without any
further protection (which means _data loss_ if the unresponsive node
still accesses shared storage, for example). See also the +requires+
meta-attribute in <>.
| stonith-action | reboot |
indexterm:[stonith-action,Cluster Option]
indexterm:[Cluster,Option,stonith-action]
Action to send to STONITH device. Allowed values are +reboot+ and +off+.
The value +poweroff+ is also allowed, but is only used for
legacy devices.
| stonith-timeout | 60s |
indexterm:[stonith-timeout,Cluster Option]
indexterm:[Cluster,Option,stonith-timeout]
How long to wait for STONITH actions (reboot, on, off) to complete
| stonith-max-attempts | 10 |
indexterm:[stonith-max-attempts,Cluster Option]
indexterm:[Cluster,Option,stonith-max-attempts]
How many times fencing can fail for a target before the cluster will no longer
immediately re-attempt it.
| stonith-watchdog-timeout | 0 |
indexterm:[stonith-watchdog-timeout,Cluster Option]
indexterm:[Cluster,Option,stonith-watchdog-timeout]
If nonzero, rely on hardware watchdog self-fencing. If positive, assume unseen
nodes self-fence within this much time. If negative, and the
SBD_WATCHDOG_TIMEOUT environment variable is set, use twice that value.
| concurrent-fencing | FALSE |
indexterm:[concurrent-fencing,Cluster Option]
indexterm:[Cluster,Option,concurrent-fencing]
Is the cluster allowed to initiate multiple fence actions concurrently?
+| fence-reaction | stop |
+indexterm:[fence-reaction,Cluster Option]
+indexterm:[Cluster,Option,fence-reaction]
+How should a cluster node react if notified of its own fencing? A cluster node
+may receive notification of its own fencing if fencing is misconfigured, or if
+fabric fencing is in use that doesn't cut cluster communication. Allowed values
+are +stop+ to attempt to immediately stop pacemaker and stay stopped, or
++panic+ to attempt to immediately reboot the local node, falling back to stop
+on failure. The default is likely to be changed to +panic+ in a future release.
+'(since 2.0.3)'
+
| cluster-delay | 60s |
indexterm:[cluster-delay,Cluster Option]
indexterm:[Cluster,Option,cluster-delay]
Estimated maximum round-trip delay over the network (excluding action
execution). If the DC requires an action to be executed on another
node, it will consider the action failed if it does not get a response
from the other node in this time (after considering the action's
own timeout). The "correct" value will depend on the speed and load of your
network and cluster nodes.
| dc-deadtime | 20s |
indexterm:[dc-deadtime,Cluster Option]
indexterm:[Cluster,Option,dc-deadtime]
How long to wait for a response from other nodes during startup.
The "correct" value will depend on the speed/load of your network and the type of switches used.
| cluster-recheck-interval | 15min |
indexterm:[cluster-recheck-interval,Cluster Option]
indexterm:[Cluster,Option,cluster-recheck-interval]
Polling interval for time-based changes to options, resource parameters and constraints.
The Cluster is primarily event-driven, but your configuration can have
elements that take effect based on the time of day. To ensure these changes
take effect, we can optionally poll the cluster's status for changes. A value
of 0 disables polling. Positive values are an interval (in seconds unless other
SI units are specified, e.g. 5min).
| cluster-ipc-limit | 500 |
indexterm:[cluster-ipc-limit,Cluster Option]
indexterm:[Cluster,Option,cluster-ipc-limit]
The maximum IPC message backlog before one cluster daemon will disconnect
another. This is of use in large clusters, for which a good value is the number
of resources in the cluster multiplied by the number of nodes. The default of
500 is also the minimum. Raise this if you see "Evicting client" messages for
cluster daemon PIDs in the logs.
| pe-error-series-max | -1 |
indexterm:[pe-error-series-max,Cluster Option]
indexterm:[Cluster,Option,pe-error-series-max]
The number of PE inputs resulting in ERRORs to save. Used when reporting problems.
A value of -1 means unlimited (report all).
| pe-warn-series-max | -1 |
indexterm:[pe-warn-series-max,Cluster Option]
indexterm:[Cluster,Option,pe-warn-series-max]
The number of PE inputs resulting in WARNINGs to save. Used when reporting problems.
A value of -1 means unlimited (report all).
| pe-input-series-max | -1 |
indexterm:[pe-input-series-max,Cluster Option]
indexterm:[Cluster,Option,pe-input-series-max]
The number of "normal" PE inputs to save. Used when reporting problems.
A value of -1 means unlimited (report all).
| placement-strategy | default |
indexterm:[placement-strategy,Cluster Option]
indexterm:[Cluster,Option,placement-strategy]
How the cluster should allocate resources to nodes (see <>).
Allowed values are +default+, +utilization+, +balanced+, and +minimal+.
| node-health-strategy | none |
indexterm:[node-health-strategy,Cluster Option]
indexterm:[Cluster,Option,node-health-strategy]
How the cluster should react to node health attributes (see <>).
Allowed values are +none+, +migrate-on-red+, +only-green+, +progressive+, and
+custom+.
| enable-acl | FALSE |
indexterm:[enable-acl,Cluster Option]
indexterm:[Cluster,Option,enable-acl]
Whether access control lists (ACLs) (see <>) can be used to authorize
modifications to the CIB.
| node-health-base | 0 |
indexterm:[node-health-base,Cluster Option]
indexterm:[Cluster,Option,node-health-base]
The base health score assigned to a node. Only used when
+node-health-strategy+ is +progressive+.
| node-health-green | 0 |
indexterm:[node-health-green,Cluster Option]
indexterm:[Cluster,Option,node-health-green]
The score to use for a node health attribute whose value is +green+.
Only used when +node-health-strategy+ is +progressive+ or +custom+.
| node-health-yellow | 0 |
indexterm:[node-health-yellow,Cluster Option]
indexterm:[Cluster,Option,node-health-yellow]
The score to use for a node health attribute whose value is +yellow+.
Only used when +node-health-strategy+ is +progressive+ or +custom+.
| node-health-red | 0 |
indexterm:[node-health-red,Cluster Option]
indexterm:[Cluster,Option,node-health-red]
The score to use for a node health attribute whose value is +red+.
Only used when +node-health-strategy+ is +progressive+ or +custom+.
| remove-after-stop | FALSE |
indexterm:[remove-after-stop,Cluster Option]
indexterm:[Cluster,Option,remove-after-stop]
_Advanced Use Only:_ Should the cluster remove resources from the LRM after
they are stopped? Values other than the default are, at best, poorly tested and
potentially dangerous.
| startup-fencing | TRUE |
indexterm:[startup-fencing,Cluster Option]
indexterm:[Cluster,Option,startup-fencing]
_Advanced Use Only:_ Should the cluster shoot unseen nodes?
Not using the default is very unsafe!
| election-timeout | 2min |
indexterm:[election-timeout,Cluster Option]
indexterm:[Cluster,Option,election-timeout]
_Advanced Use Only:_ If you need to adjust this value, it probably indicates
the presence of a bug.
| shutdown-escalation | 20min |
indexterm:[shutdown-escalation,Cluster Option]
indexterm:[Cluster,Option,shutdown-escalation]
_Advanced Use Only:_ If you need to adjust this value, it probably indicates
the presence of a bug.
| join-integration-timeout | 3min |
indexterm:[join-integration-timeout,Cluster Option]
indexterm:[Cluster,Option,join-integration-timeout]
_Advanced Use Only:_ If you need to adjust this value, it probably indicates
the presence of a bug.
| join-finalization-timeout | 30min |
indexterm:[join-finalization-timeout,Cluster Option]
indexterm:[Cluster,Option,join-finalization-timeout]
_Advanced Use Only:_ If you need to adjust this value, it probably indicates
the presence of a bug.
| transition-delay | 0s |
indexterm:[transition-delay,Cluster Option]
indexterm:[Cluster,Option,transition-delay]
_Advanced Use Only:_ Delay cluster recovery for the configured interval to
allow for additional/related events to occur. Useful if your configuration is
sensitive to the order in which ping updates arrive.
Enabling this option will slow down cluster recovery under
all conditions.
|=========================================================
diff --git a/include/crm/lrmd.h b/include/crm/lrmd.h
index 953605e139..cfa2925167 100644
--- a/include/crm/lrmd.h
+++ b/include/crm/lrmd.h
@@ -1,564 +1,564 @@
/*
- * Copyright 2012-2018 the Pacemaker project contributors
+ * Copyright 2012-2019 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef LRMD__H
# define LRMD__H
#ifdef __cplusplus
extern "C" {
#endif
/**
* \file
* \brief Resource agent executor
* \ingroup lrmd
*/
#include // bool
#include // guint, GList
#include
#include
typedef struct lrmd_s lrmd_t;
typedef struct lrmd_key_value_s {
char *key;
char *value;
struct lrmd_key_value_s *next;
} lrmd_key_value_t;
/* This should be bumped every time there is an incompatible change that
* prevents older clients from connecting to this version of the server.
*/
#define LRMD_PROTOCOL_VERSION "1.1"
/* This is the version that the client version will actually be compared
* against. This should be identical to LRMD_PROTOCOL_VERSION. However, we
* accidentally bumped LRMD_PROTOCOL_VERSION in 6424a647 (1.1.15) when we didn't
* need to, so for now it's different. If we ever have a truly incompatible
* bump, we can drop this and compare against LRMD_PROTOCOL_VERSION.
*/
#define LRMD_MIN_PROTOCOL_VERSION "1.0"
/* *INDENT-OFF* */
#define DEFAULT_REMOTE_KEY_LOCATION PACEMAKER_CONFIG_DIR "/authkey"
#define ALT_REMOTE_KEY_LOCATION "/etc/corosync/authkey"
#define DEFAULT_REMOTE_PORT 3121
#define DEFAULT_REMOTE_USERNAME "lrmd"
#define F_LRMD_OPERATION "lrmd_op"
#define F_LRMD_CLIENTNAME "lrmd_clientname"
#define F_LRMD_IS_IPC_PROVIDER "lrmd_is_ipc_provider"
#define F_LRMD_CLIENTID "lrmd_clientid"
#define F_LRMD_PROTOCOL_VERSION "lrmd_protocol_version"
#define F_LRMD_REMOTE_MSG_TYPE "lrmd_remote_msg_type"
#define F_LRMD_REMOTE_MSG_ID "lrmd_remote_msg_id"
#define F_LRMD_CALLBACK_TOKEN "lrmd_async_id"
#define F_LRMD_CALLID "lrmd_callid"
#define F_LRMD_CALLOPTS "lrmd_callopt"
#define F_LRMD_CALLDATA "lrmd_calldata"
#define F_LRMD_RC "lrmd_rc"
#define F_LRMD_EXEC_RC "lrmd_exec_rc"
#define F_LRMD_OP_STATUS "lrmd_exec_op_status"
#define F_LRMD_TIMEOUT "lrmd_timeout"
#define F_LRMD_WATCHDOG "lrmd_watchdog"
#define F_LRMD_CLASS "lrmd_class"
#define F_LRMD_PROVIDER "lrmd_provider"
#define F_LRMD_TYPE "lrmd_type"
#define F_LRMD_ORIGIN "lrmd_origin"
#define F_LRMD_RSC_RUN_TIME "lrmd_run_time"
#define F_LRMD_RSC_RCCHANGE_TIME "lrmd_rcchange_time"
#define F_LRMD_RSC_EXEC_TIME "lrmd_exec_time"
#define F_LRMD_RSC_QUEUE_TIME "lrmd_queue_time"
#define F_LRMD_RSC_ID "lrmd_rsc_id"
#define F_LRMD_RSC_ACTION "lrmd_rsc_action"
#define F_LRMD_RSC_USERDATA_STR "lrmd_rsc_userdata_str"
#define F_LRMD_RSC_OUTPUT "lrmd_rsc_output"
#define F_LRMD_RSC_EXIT_REASON "lrmd_rsc_exit_reason"
#define F_LRMD_RSC_START_DELAY "lrmd_rsc_start_delay"
#define F_LRMD_RSC_INTERVAL "lrmd_rsc_interval"
#define F_LRMD_RSC_DELETED "lrmd_rsc_deleted"
#define F_LRMD_RSC "lrmd_rsc"
#define F_LRMD_ALERT_ID "lrmd_alert_id"
#define F_LRMD_ALERT_PATH "lrmd_alert_path"
#define F_LRMD_ALERT "lrmd_alert"
#define LRMD_OP_RSC_REG "lrmd_rsc_register"
#define LRMD_OP_RSC_EXEC "lrmd_rsc_exec"
#define LRMD_OP_RSC_CANCEL "lrmd_rsc_cancel"
#define LRMD_OP_RSC_UNREG "lrmd_rsc_unregister"
#define LRMD_OP_RSC_INFO "lrmd_rsc_info"
#define LRMD_OP_RSC_METADATA "lrmd_rsc_metadata"
#define LRMD_OP_POKE "lrmd_rsc_poke"
#define LRMD_OP_NEW_CLIENT "lrmd_rsc_new_client"
#define LRMD_OP_CHECK "lrmd_check"
#define LRMD_OP_ALERT_EXEC "lrmd_alert_exec"
#define LRMD_OP_GET_RECURRING "lrmd_get_recurring"
#define LRMD_IPC_OP_NEW "new"
#define LRMD_IPC_OP_DESTROY "destroy"
#define LRMD_IPC_OP_EVENT "event"
#define LRMD_IPC_OP_REQUEST "request"
#define LRMD_IPC_OP_RESPONSE "response"
#define LRMD_IPC_OP_SHUTDOWN_REQ "shutdown_req"
#define LRMD_IPC_OP_SHUTDOWN_ACK "shutdown_ack"
#define LRMD_IPC_OP_SHUTDOWN_NACK "shutdown_nack"
#define F_LRMD_IPC_OP "lrmd_ipc_op"
#define F_LRMD_IPC_IPC_SERVER "lrmd_ipc_server"
#define F_LRMD_IPC_SESSION "lrmd_ipc_session"
#define F_LRMD_IPC_CLIENT "lrmd_ipc_client"
#define F_LRMD_IPC_USER "lrmd_ipc_user"
#define F_LRMD_IPC_MSG "lrmd_ipc_msg"
#define F_LRMD_IPC_MSG_ID "lrmd_ipc_msg_id"
#define F_LRMD_IPC_MSG_FLAGS "lrmd_ipc_msg_flags"
#define T_LRMD "lrmd"
#define T_LRMD_REPLY "lrmd_reply"
#define T_LRMD_NOTIFY "lrmd_notify"
#define T_LRMD_IPC_PROXY "lrmd_ipc_proxy"
#define T_LRMD_RSC_OP "lrmd_rsc_op"
/* *INDENT-ON* */
/*!
* \brief Create a new connection to the local executor
*/
lrmd_t *lrmd_api_new(void);
/*!
* \brief Create a new TLS connection to a remote executor
*
* \param nodename name of remote node identified with this connection
* \param server name of server to connect to
* \param port port number to connect to
*
* \note nodename and server may be the same value.
*/
lrmd_t *lrmd_remote_api_new(const char *nodename, const char *server, int port);
/*!
* \brief Use after lrmd_poll returns 1 to read and dispatch a message
*
* \param[in,out] lrmd Executor connection object
*
* \return TRUE if connection is still up, FALSE if disconnected
*/
bool lrmd_dispatch(lrmd_t * lrmd);
/*!
* \brief Poll for a specified timeout period to determine if a message
* is ready for dispatch.
* \retval 1 msg is ready
* \retval 0 timeout occurred
* \retval negative error code
*/
int lrmd_poll(lrmd_t * lrmd, int timeout);
/*!
* \brief Destroy executor connection object
*/
void lrmd_api_delete(lrmd_t * lrmd);
lrmd_key_value_t *lrmd_key_value_add(lrmd_key_value_t * kvp, const char *key, const char *value);
/* *INDENT-OFF* */
/* Reserved for future use */
enum lrmd_call_options {
lrmd_opt_none = 0x00000000,
/* lrmd_opt_sync_call = 0x00000001, //Not implemented, patches welcome. */
/*! Only notify the client originating a exec() the results */
lrmd_opt_notify_orig_only = 0x00000002,
/*! Drop recurring operations initiated by a client when client disconnects.
* This call_option is only valid when registering a resource. When used
* remotely with the pacemaker_remote daemon, this option means that recurring
* operations will be dropped once all the remote connections disconnect. */
lrmd_opt_drop_recurring = 0x00000003,
/*! Send notifications for recurring operations only when the result changes */
lrmd_opt_notify_changes_only = 0x00000004,
};
enum lrmd_callback_event {
lrmd_event_register,
lrmd_event_unregister,
lrmd_event_exec_complete,
lrmd_event_disconnect,
lrmd_event_connect,
lrmd_event_poke,
lrmd_event_new_client,
};
/* *INDENT-ON* */
typedef struct lrmd_event_data_s {
/*! Type of event, register, unregister, call_completed... */
enum lrmd_callback_event type;
/*! The resource this event occurred on. */
const char *rsc_id;
/*! The action performed, start, stop, monitor... */
const char *op_type;
- /*! The userdata string given do exec() api function */
+ /*! The user data passed by caller of exec() API function */
const char *user_data;
/*! The client api call id associated with this event */
int call_id;
/*! The operation's timeout period in ms. */
int timeout;
/*! The operation's recurring interval in ms. */
guint interval_ms;
/*! The operation's start delay value in ms. */
int start_delay;
/*! This operation that just completed is on a deleted rsc. */
int rsc_deleted;
/*! The executed ra return code mapped to OCF */
enum ocf_exitcode rc;
/*! The executor status returned for exec_complete events */
int op_status;
/*! stdout from resource agent operation */
const char *output;
/*! Timestamp of when op ran */
unsigned int t_run;
/*! Timestamp of last rc change */
unsigned int t_rcchange;
/*! Time in length op took to execute */
unsigned int exec_time;
/*! Time in length spent in queue */
unsigned int queue_time;
/*! int connection result. Used for connection and poke events */
int connection_rc;
/* This is a GHashTable containing the
* parameters given to the operation */
void *params;
/*! client node name associated with this connection
* (used to match actions to the proper client when there are multiple)
*/
const char *remote_nodename;
/*! exit failure reason string from resource agent operation */
const char *exit_reason;
} lrmd_event_data_t;
lrmd_event_data_t *lrmd_copy_event(lrmd_event_data_t * event);
void lrmd_free_event(lrmd_event_data_t * event);
typedef struct lrmd_rsc_info_s {
char *id;
char *type;
char *standard;
char *provider;
} lrmd_rsc_info_t;
typedef struct lrmd_op_info_s {
char *rsc_id;
char *action;
char *interval_ms_s;
char *timeout_ms_s;
} lrmd_op_info_t;
lrmd_rsc_info_t *lrmd_new_rsc_info(const char *rsc_id, const char *standard,
const char *provider, const char *type);
lrmd_rsc_info_t *lrmd_copy_rsc_info(lrmd_rsc_info_t * rsc_info);
void lrmd_free_rsc_info(lrmd_rsc_info_t * rsc_info);
void lrmd_free_op_info(lrmd_op_info_t *op_info);
typedef void (*lrmd_event_callback) (lrmd_event_data_t * event);
typedef struct lrmd_list_s {
const char *val;
struct lrmd_list_s *next;
} lrmd_list_t;
void lrmd_list_freeall(lrmd_list_t * head);
void lrmd_key_value_freeall(lrmd_key_value_t * head);
typedef struct lrmd_api_operations_s {
/*!
* \brief Connect to an executor
*
* \retval 0, success
* \retval negative error code on failure
*/
int (*connect) (lrmd_t * lrmd, const char *client_name, int *fd);
/*!
* \brief Initiate an executor connection without blocking
* \note this function requires the use of mainloop.
*
* \note The is returned using the event callback.
* \note When this function returns 0, the callback will be invoked
* to report the final result of the connect.
* \retval 0, connect in progress, wait for event callback
* \retval -1, failure.
*/
int (*connect_async) (lrmd_t * lrmd, const char *client_name, int timeout /*ms */ );
/*!
* \brief Is connected to lrmd daemon?
*
* \retval 0, false
* \retval 1, true
*/
int (*is_connected) (lrmd_t * lrmd);
/*!
* \brief Poke executor connection to verify it is still capable of serving requests
* \note The response comes in the form of a poke event to the callback.
*
* \retval 0, wait for response in callback
* \retval -1, connection failure, callback may not be invoked
*/
int (*poke_connection) (lrmd_t * lrmd);
/*!
* \brief Disconnect from the executor.
*
* \retval 0, success
* \retval negative error code on failure
*/
int (*disconnect) (lrmd_t * lrmd);
/*!
* \brief Register a resource with the executor.
*
* \note Synchronous, guaranteed to occur in daemon before function returns.
*
* \retval 0, success
* \retval negative error code on failure
*/
int (*register_rsc) (lrmd_t * lrmd,
const char *rsc_id,
const char *standard,
const char *provider, const char *agent, enum lrmd_call_options options);
/*!
* \brief Retrieve registration info for a rsc
*
* \retval info on success
* \retval NULL on failure
*/
lrmd_rsc_info_t *(*get_rsc_info) (lrmd_t * lrmd,
const char *rsc_id, enum lrmd_call_options options);
/*!
* \brief Retrieve registered recurring operations
*
* \return pcmk_ok on success, -errno otherwise
*/
int (*get_recurring_ops) (lrmd_t *lrmd, const char *rsc_id, int timeout_ms,
enum lrmd_call_options options, GList **output);
/*!
* \brief Unregister a resource from the executor.
*
* \note All pending and recurring operations will be cancelled
* automatically.
*
* \note Synchronous, guaranteed to occur in daemon before function returns.
*
* \retval 0, success
* \retval -1, success, but operations are currently executing on the rsc which will
* return once they are completed.
* \retval negative error code on failure
*
*/
int (*unregister_rsc) (lrmd_t * lrmd, const char *rsc_id, enum lrmd_call_options options);
/*!
* \brief Set a callback for executor events
*/
void (*set_callback) (lrmd_t * lrmd, lrmd_event_callback callback);
/*!
* \brief Issue a command on a resource
*
* \note Asynchronous, command is queued in daemon on function return, but
* execution of command is not synced.
*
* \note Operations on individual resources are guaranteed to occur
* in the order the client api calls them in.
*
* \note Operations between different resources are not guaranteed
* to occur in any specific order in relation to one another
* regardless of what order the client api is called in.
* \retval call_id to track async event result on success
* \retval negative error code on failure
*/
int (*exec) (lrmd_t * lrmd, const char *rsc_id, const char *action, const char *userdata, /* userdata string given back in event notification */
guint interval_ms,
int timeout, /* ms */
int start_delay, /* ms */
enum lrmd_call_options options, lrmd_key_value_t * params); /* ownership of params is given up to api here */
/*!
* \brief Cancel a recurring command.
*
* \note Synchronous, guaranteed to occur in daemon before function returns.
*
* \note The cancel is completed async from this call.
* We can be guaranteed the cancel has completed once
* the callback receives an exec_complete event with
* the lrmd_op_status signifying that the operation is
* cancelled.
* \note For each resource, cancel operations and exec operations
* are processed in the order they are received.
* It is safe to assume that for a single resource, a cancel
* will occur in the executor before an exec if the client's cancel
* api call occurs before the exec api call.
*
* It is not however safe to assume any operation on one resource will
* occur before an operation on another resource regardless of
* the order the client api is called in.
*
* \retval 0, cancel command sent.
* \retval negative error code on failure
*/
int (*cancel) (lrmd_t *lrmd, const char *rsc_id, const char *action,
guint interval_ms);
/*!
* \brief Get resource metadata for a specified resource agent
*
* \param[in] lrmd Executor connection (unused)
* \param[in] standard Resource agent class
* \param[in] provider Resource agent provider
* \param[in] agent Resource agent type
* \param[out] output Metadata will be stored here (must not be NULL)
* \param[in] options Options to use with any executor API calls (unused)
*
* \note Caller is responsible for freeing output. This call is currently
* always synchronous (blocking), and always done directly by the
* library (not via the executor connection). This means that it is based
* on the local host environment, even if the executor connection is to a
* remote node, so (for most resource agent classes) this will fail if
* the agent is not installed locally. This also means that, if an
* external agent must be executed, it will be executed by the
* caller's user, not the executor's.
* \todo Add a metadata call to the executor API and let the server handle this.
*
* \retval lrmd_ok success
* \retval negative error code on failure
*/
int (*get_metadata) (lrmd_t * lrmd,
const char *standard,
const char *provider,
const char *agent, char **output, enum lrmd_call_options options);
/*!
* \brief Retrieve a list of installed resource agents.
*
* \note if standard is not provided, all known agents will be returned
* \note list must be freed using lrmd_list_freeall()
*
* \retval num items in list on success
* \retval negative error code on failure
*/
int (*list_agents) (lrmd_t * lrmd, lrmd_list_t ** agents,
const char *standard, const char *provider);
/*!
* \brief Retrieve a list of resource agent providers
*
* \note When the agent is provided, only the agent's provider will be returned
* \note When no agent is supplied, all providers will be returned.
* \note List must be freed using lrmd_list_freeall()
*
* \retval num items in list on success
* \retval negative error code on failure
*/
int (*list_ocf_providers) (lrmd_t * lrmd, const char *agent, lrmd_list_t ** providers);
/*!
* \brief Retrieve a list of standards supported by this machine/installation
*
* \note List must be freed using lrmd_list_freeall()
*
* \retval num items in list on success
* \retval negative error code on failure
*/
int (*list_standards) (lrmd_t * lrmd, lrmd_list_t ** standards);
/*!
* \brief Execute an alert agent
*
* \note Asynchronous, command is queued in daemon on function return, but
* execution of command is not synced.
*
* \note Operations on individual alerts are guaranteed to occur
* in the order the client api calls them in.
*
* \note Operations between different alerts are not guaranteed
* to occur in any specific order in relation to one another
* regardless of what order the client api is called in.
* \retval call_id to track async event result on success
* \retval negative error code on failure
*/
int (*exec_alert) (lrmd_t *lrmd, const char *alert_id,
const char *alert_path, int timeout, /* ms */
lrmd_key_value_t *params); /* ownership of params is given up to api here */
/*!
* \brief Get resource metadata for a resource agent, passing parameters
*
* \param[in] lrmd Executor connection (unused)
* \param[in] standard Resource agent class
* \param[in] provider Resource agent provider
* \param[in] agent Resource agent type
* \param[out] output Metadata will be stored here (must not be NULL)
* \param[in] options Options to use with any executor API calls (unused)
* \param[in] params Parameters to pass to agent via environment
*
* \note This is identical to the get_metadata() API call, except parameters
* will be passed to the resource agent via environment variables.
* \note The API will handle freeing params.
*
* \return lrmd_ok on success, negative error code on failure
*/
int (*get_metadata_params) (lrmd_t *lrmd, const char *standard,
const char *provider, const char *agent,
char **output, enum lrmd_call_options options,
lrmd_key_value_t *params);
} lrmd_api_operations_t;
struct lrmd_s {
lrmd_api_operations_t *cmds;
void *lrmd_private;
};
static inline const char *
lrmd_event_type2str(enum lrmd_callback_event type)
{
switch (type) {
case lrmd_event_register:
return "register";
case lrmd_event_unregister:
return "unregister";
case lrmd_event_exec_complete:
return "exec_complete";
case lrmd_event_disconnect:
return "disconnect";
case lrmd_event_connect:
return "connect";
case lrmd_event_poke:
return "poke";
case lrmd_event_new_client:
return "new_client";
}
return "unknown";
}
#ifdef __cplusplus
}
#endif
#endif
diff --git a/include/crm/msg_xml.h b/include/crm/msg_xml.h
index 2a355ec5f7..a7d7106abd 100644
--- a/include/crm/msg_xml.h
+++ b/include/crm/msg_xml.h
@@ -1,421 +1,422 @@
/*
- * Copyright 2004-2018 the Pacemaker project contributors
+ * Copyright 2004-2019 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef XML_TAGS__H
# define XML_TAGS__H
#ifdef __cplusplus
extern "C" {
#endif
# ifndef F_ORIG
# define F_ORIG "src"
# endif
# ifndef F_SEQ
# define F_SEQ "seq"
# endif
# ifndef F_SUBTYPE
# define F_SUBTYPE "subt"
# endif
# ifndef F_TYPE
# define F_TYPE "t"
# endif
# ifndef F_CLIENTNAME
# define F_CLIENTNAME "cn"
# endif
# ifndef F_XML_TAGNAME
# define F_XML_TAGNAME "__name__"
# endif
# ifndef T_CRM
# define T_CRM "crmd"
# endif
# ifndef T_ATTRD
# define T_ATTRD "attrd"
# endif
# define CIB_OPTIONS_FIRST "cib-bootstrap-options"
# define F_CRM_DATA "crm_xml"
# define F_CRM_TASK "crm_task"
# define F_CRM_HOST_TO "crm_host_to"
# define F_CRM_MSG_TYPE F_SUBTYPE
# define F_CRM_SYS_TO "crm_sys_to"
# define F_CRM_SYS_FROM "crm_sys_from"
# define F_CRM_HOST_FROM F_ORIG
# define F_CRM_REFERENCE XML_ATTR_REFERENCE
# define F_CRM_VERSION XML_ATTR_VERSION
# define F_CRM_ORIGIN "origin"
# define F_CRM_USER "crm_user"
# define F_CRM_JOIN_ID "join_id"
# define F_CRM_DC_LEAVING "dc-leaving"
# define F_CRM_ELECTION_ID "election-id"
# define F_CRM_ELECTION_AGE_S "election-age-sec"
# define F_CRM_ELECTION_AGE_US "election-age-nano-sec"
# define F_CRM_ELECTION_OWNER "election-owner"
# define F_CRM_TGRAPH "crm-tgraph-file"
# define F_CRM_TGRAPH_INPUT "crm-tgraph-in"
# define F_CRM_THROTTLE_MODE "crm-limit-mode"
# define F_CRM_THROTTLE_MAX "crm-limit-max"
/*---- Common tags/attrs */
# define XML_DIFF_MARKER "__crm_diff_marker__"
# define XML_TAG_CIB "cib"
# define XML_TAG_FAILED "failed"
# define XML_ATTR_CRM_VERSION "crm_feature_set"
# define XML_ATTR_DIGEST "digest"
# define XML_ATTR_VALIDATION "validate-with"
# define XML_ATTR_RA_VERSION "ra-version"
# define XML_ATTR_QUORUM_PANIC "no-quorum-panic"
# define XML_ATTR_HAVE_QUORUM "have-quorum"
# define XML_ATTR_HAVE_WATCHDOG "have-watchdog"
# define XML_ATTR_GENERATION "epoch"
# define XML_ATTR_GENERATION_ADMIN "admin_epoch"
# define XML_ATTR_NUMUPDATES "num_updates"
# define XML_ATTR_TIMEOUT "timeout"
# define XML_ATTR_ORIGIN "crm-debug-origin"
# define XML_ATTR_TSTAMP "crm-timestamp"
# define XML_CIB_ATTR_WRITTEN "cib-last-written"
# define XML_ATTR_VERSION "version"
# define XML_ATTR_DESC "description"
# define XML_ATTR_ID "id"
# define XML_ATTR_IDREF "id-ref"
# define XML_ATTR_ID_LONG "long-id"
# define XML_ATTR_TYPE "type"
# define XML_ATTR_VERBOSE "verbose"
# define XML_ATTR_OP "op"
# define XML_ATTR_DC_UUID "dc-uuid"
# define XML_ATTR_UPDATE_ORIG "update-origin"
# define XML_ATTR_UPDATE_CLIENT "update-client"
# define XML_ATTR_UPDATE_USER "update-user"
# define XML_BOOLEAN_TRUE "true"
# define XML_BOOLEAN_FALSE "false"
# define XML_BOOLEAN_YES XML_BOOLEAN_TRUE
# define XML_BOOLEAN_NO XML_BOOLEAN_FALSE
# define XML_TAG_OPTIONS "options"
/*---- top level tags/attrs */
# define XML_ATTR_REQUEST "request"
# define XML_ATTR_RESPONSE "response"
# define XML_ATTR_UNAME "uname"
# define XML_ATTR_UUID "id"
# define XML_ATTR_REFERENCE "reference"
# define XML_CRM_TAG_PING "ping_response"
# define XML_PING_ATTR_STATUS "result"
# define XML_PING_ATTR_SYSFROM "crm_subsystem"
# define XML_PING_ATTR_CRMDSTATE "crmd_state"
# define XML_TAG_FRAGMENT "cib_fragment"
# define XML_FAIL_TAG_CIB "failed_update"
# define XML_FAILCIB_ATTR_ID "id"
# define XML_FAILCIB_ATTR_OBJTYPE "object_type"
# define XML_FAILCIB_ATTR_OP "operation"
# define XML_FAILCIB_ATTR_REASON "reason"
/*---- CIB specific tags/attrs */
# define XML_CIB_TAG_SECTION_ALL "all"
# define XML_CIB_TAG_CONFIGURATION "configuration"
# define XML_CIB_TAG_STATUS "status"
# define XML_CIB_TAG_RESOURCES "resources"
# define XML_CIB_TAG_NODES "nodes"
# define XML_CIB_TAG_DOMAINS "domains"
# define XML_CIB_TAG_CONSTRAINTS "constraints"
# define XML_CIB_TAG_CRMCONFIG "crm_config"
# define XML_CIB_TAG_OPCONFIG "op_defaults"
# define XML_CIB_TAG_RSCCONFIG "rsc_defaults"
# define XML_CIB_TAG_ACLS "acls"
# define XML_CIB_TAG_ALERTS "alerts"
# define XML_CIB_TAG_ALERT "alert"
# define XML_CIB_TAG_ALERT_RECIPIENT "recipient"
# define XML_CIB_TAG_ALERT_SELECT "select"
# define XML_CIB_TAG_ALERT_ATTRIBUTES "select_attributes"
# define XML_CIB_TAG_ALERT_FENCING "select_fencing"
# define XML_CIB_TAG_ALERT_NODES "select_nodes"
# define XML_CIB_TAG_ALERT_RESOURCES "select_resources"
# define XML_CIB_TAG_ALERT_ATTR "attribute"
# define XML_CIB_TAG_STATE "node_state"
# define XML_CIB_TAG_NODE "node"
# define XML_CIB_TAG_NVPAIR "nvpair"
# define XML_CIB_TAG_PROPSET "cluster_property_set"
# define XML_TAG_ATTR_SETS "instance_attributes"
# define XML_TAG_META_SETS "meta_attributes"
# define XML_TAG_ATTRS "attributes"
# define XML_TAG_RSC_VER_ATTRS "rsc_versioned_attrs"
# define XML_TAG_OP_VER_ATTRS "op_versioned_attrs"
# define XML_TAG_OP_VER_META "op_versioned_meta"
# define XML_TAG_PARAMS "parameters"
# define XML_TAG_PARAM "param"
# define XML_TAG_UTILIZATION "utilization"
# define XML_TAG_RESOURCE_REF "resource_ref"
# define XML_CIB_TAG_RESOURCE "primitive"
# define XML_CIB_TAG_GROUP "group"
# define XML_CIB_TAG_INCARNATION "clone"
# define XML_CIB_TAG_MASTER "master" // deprecated since 2.0.0
# define XML_CIB_TAG_CONTAINER "bundle"
# define XML_CIB_TAG_RSC_TEMPLATE "template"
# define XML_RSC_ATTR_TARGET "container-attribute-target"
# define XML_RSC_ATTR_RESTART "restart-type"
# define XML_RSC_ATTR_ORDERED "ordered"
# define XML_RSC_ATTR_INTERLEAVE "interleave"
# define XML_RSC_ATTR_INCARNATION "clone"
# define XML_RSC_ATTR_INCARNATION_MAX "clone-max"
# define XML_RSC_ATTR_INCARNATION_MIN "clone-min"
# define XML_RSC_ATTR_INCARNATION_NODEMAX "clone-node-max"
# define XML_RSC_ATTR_PROMOTABLE "promotable"
# define XML_RSC_ATTR_PROMOTED_MAX "promoted-max"
# define XML_RSC_ATTR_PROMOTED_NODEMAX "promoted-node-max"
# define XML_RSC_ATTR_MASTER_MAX "master-max" // deprecated since 2.0.0
# define XML_RSC_ATTR_MASTER_NODEMAX "master-node-max" // deprecated since 2.0.0
# define XML_RSC_ATTR_MANAGED "is-managed"
# define XML_RSC_ATTR_TARGET_ROLE "target-role"
# define XML_RSC_ATTR_UNIQUE "globally-unique"
# define XML_RSC_ATTR_NOTIFY "notify"
# define XML_RSC_ATTR_STICKINESS "resource-stickiness"
# define XML_RSC_ATTR_FAIL_STICKINESS "migration-threshold"
# define XML_RSC_ATTR_FAIL_TIMEOUT "failure-timeout"
# define XML_RSC_ATTR_MULTIPLE "multiple-active"
# define XML_RSC_ATTR_REQUIRES "requires"
# define XML_RSC_ATTR_PROVIDES "provides"
# define XML_RSC_ATTR_CONTAINER "container"
# define XML_RSC_ATTR_INTERNAL_RSC "internal_rsc"
# define XML_RSC_ATTR_MAINTENANCE "maintenance"
# define XML_RSC_ATTR_REMOTE_NODE "remote-node"
# define XML_RSC_ATTR_CLEAR_OP "clear_failure_op"
# define XML_RSC_ATTR_CLEAR_INTERVAL "clear_failure_interval"
# define XML_RSC_ATTR_REMOTE_RA_ADDR "addr"
# define XML_RSC_ATTR_REMOTE_RA_SERVER "server"
# define XML_RSC_ATTR_REMOTE_RA_PORT "port"
# define XML_REMOTE_ATTR_RECONNECT_INTERVAL "reconnect_interval"
# define XML_OP_ATTR_ON_FAIL "on-fail"
# define XML_OP_ATTR_START_DELAY "start-delay"
# define XML_OP_ATTR_ALLOW_MIGRATE "allow-migrate"
# define XML_OP_ATTR_ORIGIN "interval-origin"
# define XML_OP_ATTR_PENDING "record-pending"
# define XML_OP_ATTR_DIGESTS_ALL "digests-all"
# define XML_OP_ATTR_DIGESTS_SECURE "digests-secure"
# define XML_CIB_TAG_LRM "lrm"
# define XML_LRM_TAG_RESOURCES "lrm_resources"
# define XML_LRM_TAG_RESOURCE "lrm_resource"
# define XML_LRM_TAG_RSC_OP "lrm_rsc_op"
# define XML_AGENT_ATTR_CLASS "class"
# define XML_AGENT_ATTR_PROVIDER "provider"
# define XML_CIB_ATTR_REPLACE "replace"
# define XML_CIB_ATTR_SOURCE "source"
# define XML_CIB_ATTR_PRIORITY "priority"
# define XML_CIB_ATTR_SOURCE "source"
# define XML_NODE_JOIN_STATE "join"
# define XML_NODE_EXPECTED "expected"
# define XML_NODE_IN_CLUSTER "in_ccm"
# define XML_NODE_IS_PEER "crmd"
# define XML_NODE_IS_REMOTE "remote_node"
# define XML_NODE_IS_FENCED "node_fenced"
# define XML_NODE_IS_MAINTENANCE "node_in_maintenance"
# define XML_CIB_ATTR_SHUTDOWN "shutdown"
/* Aside from being an old name for the executor, LRM is a misnomer here because
* the controller and scheduler use these to track actions, which are not always
* executor operations.
*/
// XML attribute that takes interval specification (user-facing configuration)
# define XML_LRM_ATTR_INTERVAL "interval"
// XML attribute that takes interval in milliseconds (daemon APIs)
// (identical value as above, but different constant allows clearer code intent)
# define XML_LRM_ATTR_INTERVAL_MS XML_LRM_ATTR_INTERVAL
# define XML_LRM_ATTR_TASK "operation"
# define XML_LRM_ATTR_TASK_KEY "operation_key"
# define XML_LRM_ATTR_TARGET "on_node"
# define XML_LRM_ATTR_TARGET_UUID "on_node_uuid"
/*! Actions to be executed on Pacemaker Remote nodes are routed through the
* controller on the cluster node hosting the remote connection. That cluster
* node is considered the router node for the action.
*/
# define XML_LRM_ATTR_ROUTER_NODE "router_node"
# define XML_LRM_ATTR_RSCID "rsc-id"
# define XML_LRM_ATTR_OPSTATUS "op-status"
# define XML_LRM_ATTR_RC "rc-code"
# define XML_LRM_ATTR_CALLID "call-id"
# define XML_LRM_ATTR_OP_DIGEST "op-digest"
# define XML_LRM_ATTR_OP_RESTART "op-force-restart"
# define XML_LRM_ATTR_OP_SECURE "op-secure-params"
# define XML_LRM_ATTR_RESTART_DIGEST "op-restart-digest"
# define XML_LRM_ATTR_SECURE_DIGEST "op-secure-digest"
# define XML_LRM_ATTR_EXIT_REASON "exit-reason"
# define XML_RSC_OP_LAST_CHANGE "last-rc-change"
# define XML_RSC_OP_LAST_RUN "last-run"
# define XML_RSC_OP_T_EXEC "exec-time"
# define XML_RSC_OP_T_QUEUE "queue-time"
# define XML_LRM_ATTR_MIGRATE_SOURCE "migrate_source"
# define XML_LRM_ATTR_MIGRATE_TARGET "migrate_target"
# define XML_TAG_GRAPH "transition_graph"
# define XML_GRAPH_TAG_RSC_OP "rsc_op"
# define XML_GRAPH_TAG_PSEUDO_EVENT "pseudo_event"
# define XML_GRAPH_TAG_CRM_EVENT "crm_event"
# define XML_GRAPH_TAG_DOWNED "downed"
# define XML_GRAPH_TAG_MAINTENANCE "maintenance"
# define XML_TAG_RULE "rule"
# define XML_RULE_ATTR_SCORE "score"
# define XML_RULE_ATTR_SCORE_ATTRIBUTE "score-attribute"
# define XML_RULE_ATTR_ROLE "role"
# define XML_RULE_ATTR_BOOLEAN_OP "boolean-op"
# define XML_TAG_EXPRESSION "expression"
# define XML_EXPR_ATTR_ATTRIBUTE "attribute"
# define XML_EXPR_ATTR_OPERATION "operation"
# define XML_EXPR_ATTR_VALUE "value"
# define XML_EXPR_ATTR_TYPE "type"
# define XML_EXPR_ATTR_VALUE_SOURCE "value-source"
# define XML_CONS_TAG_RSC_DEPEND "rsc_colocation"
# define XML_CONS_TAG_RSC_ORDER "rsc_order"
# define XML_CONS_TAG_RSC_LOCATION "rsc_location"
# define XML_CONS_TAG_RSC_TICKET "rsc_ticket"
# define XML_CONS_TAG_RSC_SET "resource_set"
# define XML_CONS_ATTR_SYMMETRICAL "symmetrical"
# define XML_LOCATION_ATTR_DISCOVERY "resource-discovery"
# define XML_COLOC_ATTR_SOURCE "rsc"
# define XML_COLOC_ATTR_SOURCE_ROLE "rsc-role"
# define XML_COLOC_ATTR_TARGET "with-rsc"
# define XML_COLOC_ATTR_TARGET_ROLE "with-rsc-role"
# define XML_COLOC_ATTR_NODE_ATTR "node-attribute"
# define XML_COLOC_ATTR_SOURCE_INSTANCE "rsc-instance"
# define XML_COLOC_ATTR_TARGET_INSTANCE "with-rsc-instance"
# define XML_LOC_ATTR_SOURCE "rsc"
# define XML_LOC_ATTR_SOURCE_PATTERN "rsc-pattern"
# define XML_ORDER_ATTR_FIRST "first"
# define XML_ORDER_ATTR_THEN "then"
# define XML_ORDER_ATTR_FIRST_ACTION "first-action"
# define XML_ORDER_ATTR_THEN_ACTION "then-action"
# define XML_ORDER_ATTR_FIRST_INSTANCE "first-instance"
# define XML_ORDER_ATTR_THEN_INSTANCE "then-instance"
# define XML_ORDER_ATTR_KIND "kind"
# define XML_TICKET_ATTR_TICKET "ticket"
# define XML_TICKET_ATTR_LOSS_POLICY "loss-policy"
# define XML_NVPAIR_ATTR_NAME "name"
# define XML_NVPAIR_ATTR_VALUE "value"
# define XML_NODE_ATTR_RSC_DISCOVERY "resource-discovery-enabled"
# define XML_CONFIG_ATTR_DC_DEADTIME "dc-deadtime"
# define XML_CONFIG_ATTR_ELECTION_FAIL "election-timeout"
# define XML_CONFIG_ATTR_FORCE_QUIT "shutdown-escalation"
# define XML_CONFIG_ATTR_RECHECK "cluster-recheck-interval"
+# define XML_CONFIG_ATTR_FENCE_REACTION "fence-reaction"
# define XML_ALERT_ATTR_PATH "path"
# define XML_ALERT_ATTR_TIMEOUT "timeout"
# define XML_ALERT_ATTR_TSTAMP_FORMAT "timestamp-format"
# define XML_ALERT_ATTR_REC_VALUE "value"
# define XML_CIB_TAG_GENERATION_TUPPLE "generation_tuple"
# define XML_ATTR_TRANSITION_MAGIC "transition-magic"
# define XML_ATTR_TRANSITION_KEY "transition-key"
# define XML_ATTR_TE_NOWAIT "op_no_wait"
# define XML_ATTR_TE_TARGET_RC "op_target_rc"
# define XML_TAG_TRANSIENT_NODEATTRS "transient_attributes"
# define XML_TAG_DIFF_ADDED "diff-added"
# define XML_TAG_DIFF_REMOVED "diff-removed"
# define XML_ACL_TAG_USER "acl_target"
# define XML_ACL_TAG_USERv1 "acl_user"
# define XML_ACL_TAG_GROUP "acl_group"
# define XML_ACL_TAG_ROLE "acl_role"
# define XML_ACL_TAG_PERMISSION "acl_permission"
# define XML_ACL_TAG_ROLE_REF "role"
# define XML_ACL_TAG_ROLE_REFv1 "role_ref"
# define XML_ACL_ATTR_KIND "kind"
# define XML_ACL_TAG_READ "read"
# define XML_ACL_TAG_WRITE "write"
# define XML_ACL_TAG_DENY "deny"
# define XML_ACL_ATTR_REF "reference"
# define XML_ACL_ATTR_REFv1 "ref"
# define XML_ACL_ATTR_TAG "object-type"
# define XML_ACL_ATTR_TAGv1 "tag"
# define XML_ACL_ATTR_XPATH "xpath"
# define XML_ACL_ATTR_ATTRIBUTE "attribute"
# define XML_CIB_TAG_TICKETS "tickets"
# define XML_CIB_TAG_TICKET_STATE "ticket_state"
# define XML_CIB_TAG_TAGS "tags"
# define XML_CIB_TAG_TAG "tag"
# define XML_CIB_TAG_OBJ_REF "obj_ref"
# define XML_TAG_FENCING_TOPOLOGY "fencing-topology"
# define XML_TAG_FENCING_LEVEL "fencing-level"
# define XML_ATTR_STONITH_INDEX "index"
# define XML_ATTR_STONITH_TARGET "target"
# define XML_ATTR_STONITH_TARGET_VALUE "target-value"
# define XML_ATTR_STONITH_TARGET_PATTERN "target-pattern"
# define XML_ATTR_STONITH_TARGET_ATTRIBUTE "target-attribute"
# define XML_ATTR_STONITH_DEVICES "devices"
# define XML_TAG_DIFF "diff"
# define XML_DIFF_VERSION "version"
# define XML_DIFF_VSOURCE "source"
# define XML_DIFF_VTARGET "target"
# define XML_DIFF_CHANGE "change"
# define XML_DIFF_LIST "change-list"
# define XML_DIFF_ATTR "change-attr"
# define XML_DIFF_RESULT "change-result"
# define XML_DIFF_OP "operation"
# define XML_DIFF_PATH "path"
# define XML_DIFF_POSITION "position"
# include
# define ID(x) crm_element_value(x, XML_ATTR_ID)
# define TYPE(x) crm_element_name(x)
#ifdef __cplusplus
}
#endif
#endif
diff --git a/lib/fencing/st_client.c b/lib/fencing/st_client.c
index 9585737c0c..6d1d0a3a6f 100644
--- a/lib/fencing/st_client.c
+++ b/lib/fencing/st_client.c
@@ -1,2499 +1,2515 @@
/*
* Copyright 2004-2019 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#if SUPPORT_CIBSECRETS
# include
#endif
CRM_TRACE_INIT_DATA(stonith);
struct stonith_action_s {
/*! user defined data */
char *agent;
char *action;
char *victim;
GHashTable *args;
int timeout;
int async;
void *userdata;
void (*done_cb) (GPid pid, gint status, const char *output, gpointer user_data);
void (*fork_cb) (GPid pid, gpointer user_data);
svc_action_t *svc_action;
/*! internal timing information */
time_t initial_start_time;
int tries;
int remaining_timeout;
int max_retries;
/* device output data */
GPid pid;
int rc;
char *output;
char *error;
};
typedef struct stonith_private_s {
char *token;
crm_ipc_t *ipc;
mainloop_io_t *source;
GHashTable *stonith_op_callback_table;
GList *notify_list;
int notify_refcnt;
bool notify_deletes;
void (*op_callback) (stonith_t * st, stonith_callback_data_t * data);
} stonith_private_t;
typedef struct stonith_notify_client_s {
const char *event;
const char *obj_id; /* implement one day */
const char *obj_type; /* implement one day */
void (*notify) (stonith_t * st, stonith_event_t * e);
bool delete;
} stonith_notify_client_t;
typedef struct stonith_callback_client_s {
void (*callback) (stonith_t * st, stonith_callback_data_t * data);
const char *id;
void *user_data;
gboolean only_success;
gboolean allow_timeout_updates;
struct timer_rec_s *timer;
} stonith_callback_client_t;
struct notify_blob_s {
stonith_t *stonith;
xmlNode *xml;
};
struct timer_rec_s {
int call_id;
int timeout;
guint ref;
stonith_t *stonith;
};
typedef int (*stonith_op_t) (const char *, int, const char *, xmlNode *,
xmlNode *, xmlNode *, xmlNode **, xmlNode **);
bool stonith_dispatch(stonith_t * st);
xmlNode *stonith_create_op(int call_id, const char *token, const char *op, xmlNode * data,
int call_options);
static int stonith_send_command(stonith_t *stonith, const char *op,
xmlNode *data, xmlNode **output_data,
int call_options, int timeout);
static void stonith_connection_destroy(gpointer user_data);
static void stonith_send_notification(gpointer data, gpointer user_data);
static int internal_stonith_action_execute(stonith_action_t * action);
static void log_action(stonith_action_t *action, pid_t pid);
/*!
* \brief Get agent namespace by name
*
* \param[in] namespace_s Name of namespace as string
*
* \return Namespace as enum value
*/
enum stonith_namespace
stonith_text2namespace(const char *namespace_s)
{
if ((namespace_s == NULL) || !strcmp(namespace_s, "any")) {
return st_namespace_any;
} else if (!strcmp(namespace_s, "redhat")
|| !strcmp(namespace_s, "stonith-ng")) {
return st_namespace_rhcs;
} else if (!strcmp(namespace_s, "internal")) {
return st_namespace_internal;
} else if (!strcmp(namespace_s, "heartbeat")) {
return st_namespace_lha;
}
return st_namespace_invalid;
}
/*!
* \brief Get agent namespace name
*
* \param[in] namespace Namespace as enum value
*
* \return Namespace name as string
*/
const char *
stonith_namespace2text(enum stonith_namespace st_namespace)
{
switch (st_namespace) {
case st_namespace_any: return "any";
case st_namespace_rhcs: return "stonith-ng";
case st_namespace_internal: return "internal";
case st_namespace_lha: return "heartbeat";
default: break;
}
return "unsupported";
}
/*!
* \brief Determine namespace of a fence agent
*
* \param[in] agent Fence agent type
* \param[in] namespace_s Name of agent namespace as string, if known
*
* \return Namespace of specified agent, as enum value
*/
enum stonith_namespace
stonith_get_namespace(const char *agent, const char *namespace_s)
{
if (safe_str_eq(namespace_s, "internal")) {
return st_namespace_internal;
}
if (stonith__agent_is_rhcs(agent)) {
return st_namespace_rhcs;
}
#if HAVE_STONITH_STONITH_H
if (stonith__agent_is_lha(agent)) {
return st_namespace_lha;
}
#endif
crm_err("Unknown fence agent: %s", agent);
return st_namespace_invalid;
}
static void
log_action(stonith_action_t *action, pid_t pid)
{
if (action->output) {
/* Logging the whole string confuses syslog when the string is xml */
char *prefix = crm_strdup_printf("%s[%d] stdout:", action->agent, pid);
crm_log_output(LOG_TRACE, prefix, action->output);
free(prefix);
}
if (action->error) {
/* Logging the whole string confuses syslog when the string is xml */
char *prefix = crm_strdup_printf("%s[%d] stderr:", action->agent, pid);
crm_log_output(LOG_WARNING, prefix, action->error);
free(prefix);
}
}
/* when cycling through the list we don't want to delete items
so just mark them and when we know nobody is using the list
loop over it to remove the marked items
*/
static void
foreach_notify_entry (stonith_private_t *private,
GFunc func,
gpointer user_data)
{
private->notify_refcnt++;
g_list_foreach(private->notify_list, func, user_data);
private->notify_refcnt--;
if ((private->notify_refcnt == 0) &&
private->notify_deletes) {
GList *list_item = private->notify_list;
private->notify_deletes = FALSE;
while (list_item != NULL)
{
stonith_notify_client_t *list_client = list_item->data;
GList *next = g_list_next(list_item);
if (list_client->delete) {
free(list_client);
private->notify_list =
g_list_delete_link(private->notify_list, list_item);
}
list_item = next;
}
}
}
static void
stonith_connection_destroy(gpointer user_data)
{
stonith_t *stonith = user_data;
stonith_private_t *native = NULL;
struct notify_blob_s blob;
crm_trace("Sending destroyed notification");
blob.stonith = stonith;
blob.xml = create_xml_node(NULL, "notify");
native = stonith->st_private;
native->ipc = NULL;
native->source = NULL;
free(native->token); native->token = NULL;
stonith->state = stonith_disconnected;
crm_xml_add(blob.xml, F_TYPE, T_STONITH_NOTIFY);
crm_xml_add(blob.xml, F_SUBTYPE, T_STONITH_NOTIFY_DISCONNECT);
foreach_notify_entry(native, stonith_send_notification, &blob);
free_xml(blob.xml);
}
xmlNode *
create_device_registration_xml(const char *id, enum stonith_namespace namespace,
const char *agent, stonith_key_value_t *params,
const char *rsc_provides)
{
xmlNode *data = create_xml_node(NULL, F_STONITH_DEVICE);
xmlNode *args = create_xml_node(data, XML_TAG_ATTRS);
#if HAVE_STONITH_STONITH_H
if (namespace == st_namespace_any) {
namespace = stonith_get_namespace(agent, NULL);
}
if (namespace == st_namespace_lha) {
hash2field((gpointer) "plugin", (gpointer) agent, args);
agent = "fence_legacy";
}
#endif
crm_xml_add(data, XML_ATTR_ID, id);
crm_xml_add(data, F_STONITH_ORIGIN, __FUNCTION__);
crm_xml_add(data, "agent", agent);
if ((namespace != st_namespace_any) && (namespace != st_namespace_invalid)) {
crm_xml_add(data, "namespace", stonith_namespace2text(namespace));
}
if (rsc_provides) {
crm_xml_add(data, "rsc_provides", rsc_provides);
}
for (; params; params = params->next) {
hash2field((gpointer) params->key, (gpointer) params->value, args);
}
return data;
}
static int
stonith_api_register_device(stonith_t * st, int call_options,
const char *id, const char *namespace, const char *agent,
stonith_key_value_t * params)
{
int rc = 0;
xmlNode *data = NULL;
data = create_device_registration_xml(id, stonith_text2namespace(namespace),
agent, params, NULL);
rc = stonith_send_command(st, STONITH_OP_DEVICE_ADD, data, NULL, call_options, 0);
free_xml(data);
return rc;
}
static int
stonith_api_remove_device(stonith_t * st, int call_options, const char *name)
{
int rc = 0;
xmlNode *data = NULL;
data = create_xml_node(NULL, F_STONITH_DEVICE);
crm_xml_add(data, F_STONITH_ORIGIN, __FUNCTION__);
crm_xml_add(data, XML_ATTR_ID, name);
rc = stonith_send_command(st, STONITH_OP_DEVICE_DEL, data, NULL, call_options, 0);
free_xml(data);
return rc;
}
static int
stonith_api_remove_level_full(stonith_t *st, int options,
const char *node, const char *pattern,
const char *attr, const char *value, int level)
{
int rc = 0;
xmlNode *data = NULL;
CRM_CHECK(node || pattern || (attr && value), return -EINVAL);
data = create_xml_node(NULL, XML_TAG_FENCING_LEVEL);
crm_xml_add(data, F_STONITH_ORIGIN, __FUNCTION__);
if (node) {
crm_xml_add(data, XML_ATTR_STONITH_TARGET, node);
} else if (pattern) {
crm_xml_add(data, XML_ATTR_STONITH_TARGET_PATTERN, pattern);
} else {
crm_xml_add(data, XML_ATTR_STONITH_TARGET_ATTRIBUTE, attr);
crm_xml_add(data, XML_ATTR_STONITH_TARGET_VALUE, value);
}
crm_xml_add_int(data, XML_ATTR_STONITH_INDEX, level);
rc = stonith_send_command(st, STONITH_OP_LEVEL_DEL, data, NULL, options, 0);
free_xml(data);
return rc;
}
static int
stonith_api_remove_level(stonith_t * st, int options, const char *node, int level)
{
return stonith_api_remove_level_full(st, options, node,
NULL, NULL, NULL, level);
}
/*!
* \internal
* \brief Create XML for fence topology level registration request
*
* \param[in] node If not NULL, target level by this node name
* \param[in] pattern If not NULL, target by node name using this regex
* \param[in] attr If not NULL, target by this node attribute
* \param[in] value If not NULL, target by this node attribute value
* \param[in] level Index number of level to register
* \param[in] device_list List of devices in level
*
* \return Newly allocated XML tree on success, NULL otherwise
*
* \note The caller should set only one of node, pattern or attr/value.
*/
xmlNode *
create_level_registration_xml(const char *node, const char *pattern,
const char *attr, const char *value,
int level, stonith_key_value_t *device_list)
{
int len = 0;
char *list = NULL;
xmlNode *data;
CRM_CHECK(node || pattern || (attr && value), return NULL);
data = create_xml_node(NULL, XML_TAG_FENCING_LEVEL);
CRM_CHECK(data, return NULL);
crm_xml_add(data, F_STONITH_ORIGIN, __FUNCTION__);
crm_xml_add_int(data, XML_ATTR_ID, level);
crm_xml_add_int(data, XML_ATTR_STONITH_INDEX, level);
if (node) {
crm_xml_add(data, XML_ATTR_STONITH_TARGET, node);
} else if (pattern) {
crm_xml_add(data, XML_ATTR_STONITH_TARGET_PATTERN, pattern);
} else {
crm_xml_add(data, XML_ATTR_STONITH_TARGET_ATTRIBUTE, attr);
crm_xml_add(data, XML_ATTR_STONITH_TARGET_VALUE, value);
}
for (; device_list; device_list = device_list->next) {
int adding = strlen(device_list->value);
if(list) {
adding++; /* +1 space */
}
crm_trace("Adding %s (%dc) at offset %d", device_list->value, adding, len);
list = realloc_safe(list, len + adding + 1); /* +1 EOS */
if (list == NULL) {
crm_perror(LOG_CRIT, "Could not create device list");
free_xml(data);
return NULL;
}
sprintf(list + len, "%s%s", len?",":"", device_list->value);
len += adding;
}
crm_xml_add(data, XML_ATTR_STONITH_DEVICES, list);
free(list);
return data;
}
static int
stonith_api_register_level_full(stonith_t * st, int options, const char *node,
const char *pattern,
const char *attr, const char *value,
int level, stonith_key_value_t *device_list)
{
int rc = 0;
xmlNode *data = create_level_registration_xml(node, pattern, attr, value,
level, device_list);
CRM_CHECK(data != NULL, return -EINVAL);
rc = stonith_send_command(st, STONITH_OP_LEVEL_ADD, data, NULL, options, 0);
free_xml(data);
return rc;
}
static int
stonith_api_register_level(stonith_t * st, int options, const char *node, int level,
stonith_key_value_t * device_list)
{
return stonith_api_register_level_full(st, options, node, NULL, NULL, NULL,
level, device_list);
}
static void
append_arg(const char *key, const char *value, GHashTable **args)
{
CRM_CHECK(key != NULL, return);
CRM_CHECK(value != NULL, return);
CRM_CHECK(args != NULL, return);
if (strstr(key, "pcmk_")) {
return;
} else if (strstr(key, CRM_META)) {
return;
} else if (safe_str_eq(key, "crm_feature_set")) {
return;
}
if (!*args) {
*args = crm_str_table_new();
}
CRM_CHECK(*args != NULL, return);
crm_trace("Appending: %s=%s", key, value);
g_hash_table_replace(*args, strdup(key), strdup(value));
}
static void
append_config_arg(gpointer key, gpointer value, gpointer user_data)
{
/* The fencer will filter action out when it registers the device,
* but ignore it here just in case any other library callers
* fail to do so.
*/
if (safe_str_neq(key, STONITH_ATTR_ACTION_OP)) {
append_arg(key, value, user_data);
return;
}
}
static GHashTable *
make_args(const char *agent, const char *action, const char *victim, uint32_t victim_nodeid, GHashTable * device_args,
GHashTable * port_map)
{
char buffer[512];
GHashTable *arg_list = NULL;
const char *value = NULL;
CRM_CHECK(action != NULL, return NULL);
snprintf(buffer, sizeof(buffer), "pcmk_%s_action", action);
if (device_args) {
value = g_hash_table_lookup(device_args, buffer);
}
if (value) {
crm_info("Substituting action '%s' for requested operation '%s'", value, action);
action = value;
}
append_arg(STONITH_ATTR_ACTION_OP, action, &arg_list);
if (victim && device_args) {
const char *alias = victim;
const char *param = g_hash_table_lookup(device_args, STONITH_ATTR_HOSTARG);
if (port_map && g_hash_table_lookup(port_map, victim)) {
alias = g_hash_table_lookup(port_map, victim);
}
/* Always supply the node's name, too:
* https://github.com/ClusterLabs/fence-agents/blob/master/doc/FenceAgentAPI.md
*/
append_arg("nodename", victim, &arg_list);
if (victim_nodeid) {
char nodeid_str[33] = { 0, };
if (snprintf(nodeid_str, 33, "%u", (unsigned int)victim_nodeid)) {
crm_info("For stonith action (%s) for victim %s, adding nodeid (%s) to parameters",
action, victim, nodeid_str);
append_arg("nodeid", nodeid_str, &arg_list);
}
}
/* Check if we need to supply the victim in any other form */
if(safe_str_eq(agent, "fence_legacy")) {
value = agent;
} else if (param == NULL) {
param = "port";
value = g_hash_table_lookup(device_args, param);
} else if (safe_str_eq(param, "none")) {
value = param; /* Nothing more to do */
} else {
value = g_hash_table_lookup(device_args, param);
}
/* Don't overwrite explictly set values for $param */
if (value == NULL || safe_str_eq(value, "dynamic")) {
crm_debug("Performing %s action for node '%s' as '%s=%s'", action, victim, param,
alias);
append_arg(param, alias, &arg_list);
}
}
if (device_args) {
g_hash_table_foreach(device_args, append_config_arg, &arg_list);
}
return arg_list;
}
/*!
* \internal
* \brief Free all memory used by a stonith action
*
* \param[in,out] action Action to free
*/
void
stonith__destroy_action(stonith_action_t *action)
{
if (action) {
free(action->agent);
if (action->args) {
g_hash_table_destroy(action->args);
}
free(action->action);
free(action->victim);
if (action->svc_action) {
services_action_free(action->svc_action);
}
free(action->output);
free(action->error);
free(action);
}
}
/*!
* \internal
* \brief Get the result of an executed stonith action
*
* \param[in,out] action Executed action
* \param[out] rc Where to store result code (or NULL)
* \param[out] output Where to store standard output (or NULL)
* \param[out] error_output Where to store standard error output (or NULL)
*
* \note If output or error_output is not NULL, the caller is responsible for
* freeing the memory.
*/
void
stonith__action_result(stonith_action_t *action, int *rc, char **output,
char **error_output)
{
if (rc) {
*rc = pcmk_ok;
}
if (output) {
*output = NULL;
}
if (error_output) {
*error_output = NULL;
}
if (action != NULL) {
if (rc) {
*rc = action->rc;
}
if (output && action->output) {
*output = action->output;
action->output = NULL; // hand off memory management to caller
}
if (error_output && action->error) {
*error_output = action->error;
action->error = NULL; // hand off memory management to caller
}
}
}
#define FAILURE_MAX_RETRIES 2
stonith_action_t *
stonith_action_create(const char *agent,
const char *_action,
const char *victim,
uint32_t victim_nodeid,
int timeout, GHashTable * device_args, GHashTable * port_map)
{
stonith_action_t *action;
action = calloc(1, sizeof(stonith_action_t));
action->args = make_args(agent, _action, victim, victim_nodeid, device_args, port_map);
crm_debug("Preparing '%s' action for %s using agent %s",
_action, (victim? victim : "no target"), agent);
action->agent = strdup(agent);
action->action = strdup(_action);
if (victim) {
action->victim = strdup(victim);
}
action->timeout = action->remaining_timeout = timeout;
action->max_retries = FAILURE_MAX_RETRIES;
if (device_args) {
char buffer[512];
const char *value = NULL;
snprintf(buffer, sizeof(buffer), "pcmk_%s_retries", _action);
value = g_hash_table_lookup(device_args, buffer);
if (value) {
action->max_retries = atoi(value);
}
}
return action;
}
static gboolean
update_remaining_timeout(stonith_action_t * action)
{
int diff = time(NULL) - action->initial_start_time;
if (action->tries >= action->max_retries) {
crm_info("Attempted to execute agent %s (%s) the maximum number of times (%d) allowed",
action->agent, action->action, action->max_retries);
action->remaining_timeout = 0;
} else if ((action->rc != -ETIME) && diff < (action->timeout * 0.7)) {
/* only set remaining timeout period if there is 30%
* or greater of the original timeout period left */
action->remaining_timeout = action->timeout - diff;
} else {
action->remaining_timeout = 0;
}
return action->remaining_timeout ? TRUE : FALSE;
}
static int
svc_action_to_errno(svc_action_t *svc_action) {
int rv = pcmk_ok;
if (svc_action->rc > 0) {
/* Try to provide a useful error code based on the fence agent's
* error output.
*/
if (svc_action->rc == PCMK_OCF_TIMEOUT) {
rv = -ETIME;
} else if (svc_action->stderr_data == NULL) {
rv = -ENODATA;
} else if (strstr(svc_action->stderr_data, "imed out")) {
/* Some agents have their own internal timeouts */
rv = -ETIME;
} else if (strstr(svc_action->stderr_data, "Unrecognised action")) {
rv = -EOPNOTSUPP;
} else {
rv = -pcmk_err_generic;
}
}
return rv;
}
static void
stonith_action_async_done(svc_action_t *svc_action)
{
stonith_action_t *action = (stonith_action_t *) svc_action->cb_data;
action->rc = svc_action_to_errno(svc_action);
action->output = svc_action->stdout_data;
svc_action->stdout_data = NULL;
action->error = svc_action->stderr_data;
svc_action->stderr_data = NULL;
svc_action->params = NULL;
crm_debug("Child process %d performing action '%s' exited with rc %d",
action->pid, action->action, svc_action->rc);
log_action(action, action->pid);
if (action->rc != pcmk_ok && update_remaining_timeout(action)) {
int rc = internal_stonith_action_execute(action);
if (rc == pcmk_ok) {
return;
}
}
if (action->done_cb) {
action->done_cb(action->pid, action->rc, action->output, action->userdata);
}
action->svc_action = NULL; // don't remove our caller
stonith__destroy_action(action);
}
static void
stonith_action_async_forked(svc_action_t *svc_action)
{
stonith_action_t *action = (stonith_action_t *) svc_action->cb_data;
action->pid = svc_action->pid;
action->svc_action = svc_action;
if (action->fork_cb) {
(action->fork_cb) (svc_action->pid, action->userdata);
}
crm_trace("Child process %d performing action '%s' successfully forked",
action->pid, action->action);
}
static int
internal_stonith_action_execute(stonith_action_t * action)
{
int rc = -EPROTO;
int is_retry = 0;
svc_action_t *svc_action = NULL;
static int stonith_sequence = 0;
char *buffer = NULL;
if (!action->tries) {
action->initial_start_time = time(NULL);
}
action->tries++;
if (action->tries > 1) {
crm_info("Attempt %d to execute %s (%s). remaining timeout is %d",
action->tries, action->agent, action->action, action->remaining_timeout);
is_retry = 1;
}
if (action->args == NULL || action->agent == NULL)
goto fail;
buffer = crm_strdup_printf(RH_STONITH_DIR "/%s", basename(action->agent));
svc_action = services_action_create_generic(buffer, NULL);
free(buffer);
svc_action->timeout = 1000 * action->remaining_timeout;
svc_action->standard = strdup(PCMK_RESOURCE_CLASS_STONITH);
svc_action->id = crm_strdup_printf("%s_%s_%d", basename(action->agent),
action->action, action->tries);
svc_action->agent = strdup(action->agent);
svc_action->sequence = stonith_sequence++;
svc_action->params = action->args;
svc_action->cb_data = (void *) action;
/* keep retries from executing out of control and free previous results */
if (is_retry) {
free(action->output);
action->output = NULL;
free(action->error);
action->error = NULL;
sleep(1);
}
if (action->async) {
/* async */
if(services_action_async_fork_notify(svc_action,
&stonith_action_async_done,
&stonith_action_async_forked) == FALSE) {
services_action_free(svc_action);
svc_action = NULL;
} else {
rc = 0;
}
} else {
/* sync */
if (services_action_sync(svc_action)) {
rc = 0;
action->rc = svc_action_to_errno(svc_action);
action->output = svc_action->stdout_data;
svc_action->stdout_data = NULL;
action->error = svc_action->stderr_data;
svc_action->stderr_data = NULL;
} else {
action->rc = -ECONNABORTED;
rc = action->rc;
}
svc_action->params = NULL;
services_action_free(svc_action);
}
fail:
return rc;
}
/*!
* \internal
* \brief Kick off execution of an async stonith action
*
* \param[in,out] action Action to be executed
* \param[in,out] userdata Datapointer to be passed to callbacks
* \param[in] done Callback to notify action has failed/succeeded
* \param[in] fork_callback Callback to notify successful fork of child
*
* \return pcmk_ok if ownership of action has been taken, -errno otherwise
*/
int
stonith_action_execute_async(stonith_action_t * action,
void *userdata,
void (*done) (GPid pid, int rc, const char *output,
gpointer user_data),
void (*fork_cb) (GPid pid, gpointer user_data))
{
if (!action) {
return -EINVAL;
}
action->userdata = userdata;
action->done_cb = done;
action->fork_cb = fork_cb;
action->async = 1;
return internal_stonith_action_execute(action);
}
/*!
* \internal
* \brief Execute a stonith action
*
* \param[in,out] action Action to execute
*
* \return pcmk_ok on success, -errno otherwise
*/
int
stonith__execute(stonith_action_t *action)
{
int rc = pcmk_ok;
CRM_CHECK(action != NULL, return -EINVAL);
// Keep trying until success, max retries, or timeout
do {
rc = internal_stonith_action_execute(action);
} while ((rc != pcmk_ok) && update_remaining_timeout(action));
return rc;
}
static int
stonith_api_device_list(stonith_t * stonith, int call_options, const char *namespace,
stonith_key_value_t ** devices, int timeout)
{
int count = 0;
enum stonith_namespace ns = stonith_text2namespace(namespace);
if (devices == NULL) {
crm_err("Parameter error: stonith_api_device_list");
return -EFAULT;
}
#if HAVE_STONITH_STONITH_H
// Include Linux-HA agents if requested
if ((ns == st_namespace_any) || (ns == st_namespace_lha)) {
count += stonith__list_lha_agents(devices);
}
#endif
// Include Red Hat agents if requested
if ((ns == st_namespace_any) || (ns == st_namespace_rhcs)) {
count += stonith__list_rhcs_agents(devices);
}
return count;
}
static int
stonith_api_device_metadata(stonith_t * stonith, int call_options, const char *agent,
const char *namespace, char **output, int timeout)
{
/* By executing meta-data directly, we can get it from stonith_admin when
* the cluster is not running, which is important for higher-level tools.
*/
enum stonith_namespace ns = stonith_get_namespace(agent, namespace);
crm_trace("Looking up metadata for %s agent %s",
stonith_namespace2text(ns), agent);
switch (ns) {
case st_namespace_rhcs:
return stonith__rhcs_metadata(agent, timeout, output);
#if HAVE_STONITH_STONITH_H
case st_namespace_lha:
return stonith__lha_metadata(agent, timeout, output);
#endif
default:
errno = EINVAL;
crm_perror(LOG_ERR,
"Agent %s not found or does not support meta-data",
agent);
break;
}
return -EINVAL;
}
static int
stonith_api_query(stonith_t * stonith, int call_options, const char *target,
stonith_key_value_t ** devices, int timeout)
{
int rc = 0, lpc = 0, max = 0;
xmlNode *data = NULL;
xmlNode *output = NULL;
xmlXPathObjectPtr xpathObj = NULL;
CRM_CHECK(devices != NULL, return -EINVAL);
data = create_xml_node(NULL, F_STONITH_DEVICE);
crm_xml_add(data, F_STONITH_ORIGIN, __FUNCTION__);
crm_xml_add(data, F_STONITH_TARGET, target);
crm_xml_add(data, F_STONITH_ACTION, "off");
rc = stonith_send_command(stonith, STONITH_OP_QUERY, data, &output, call_options, timeout);
if (rc < 0) {
return rc;
}
xpathObj = xpath_search(output, "//@agent");
if (xpathObj) {
max = numXpathResults(xpathObj);
for (lpc = 0; lpc < max; lpc++) {
xmlNode *match = getXpathResult(xpathObj, lpc);
CRM_LOG_ASSERT(match != NULL);
if(match != NULL) {
xmlChar *match_path = xmlGetNodePath(match);
crm_info("%s[%d] = %s", "//@agent", lpc, match_path);
free(match_path);
*devices = stonith_key_value_add(*devices, NULL, crm_element_value(match, XML_ATTR_ID));
}
}
freeXpathObject(xpathObj);
}
free_xml(output);
free_xml(data);
return max;
}
static int
stonith_api_call(stonith_t * stonith,
int call_options,
const char *id,
const char *action, const char *victim, int timeout, xmlNode ** output)
{
int rc = 0;
xmlNode *data = NULL;
data = create_xml_node(NULL, F_STONITH_DEVICE);
crm_xml_add(data, F_STONITH_ORIGIN, __FUNCTION__);
crm_xml_add(data, F_STONITH_DEVICE, id);
crm_xml_add(data, F_STONITH_ACTION, action);
crm_xml_add(data, F_STONITH_TARGET, victim);
rc = stonith_send_command(stonith, STONITH_OP_EXEC, data, output, call_options, timeout);
free_xml(data);
return rc;
}
static int
stonith_api_list(stonith_t * stonith, int call_options, const char *id, char **list_info,
int timeout)
{
int rc;
xmlNode *output = NULL;
rc = stonith_api_call(stonith, call_options, id, "list", NULL, timeout, &output);
if (output && list_info) {
const char *list_str;
list_str = crm_element_value(output, "st_output");
if (list_str) {
*list_info = strdup(list_str);
}
}
if (output) {
free_xml(output);
}
return rc;
}
static int
stonith_api_monitor(stonith_t * stonith, int call_options, const char *id, int timeout)
{
return stonith_api_call(stonith, call_options, id, "monitor", NULL, timeout, NULL);
}
static int
stonith_api_status(stonith_t * stonith, int call_options, const char *id, const char *port,
int timeout)
{
return stonith_api_call(stonith, call_options, id, "status", port, timeout, NULL);
}
static int
stonith_api_fence(stonith_t * stonith, int call_options, const char *node, const char *action,
int timeout, int tolerance)
{
int rc = 0;
xmlNode *data = NULL;
data = create_xml_node(NULL, __FUNCTION__);
crm_xml_add(data, F_STONITH_TARGET, node);
crm_xml_add(data, F_STONITH_ACTION, action);
crm_xml_add_int(data, F_STONITH_TIMEOUT, timeout);
crm_xml_add_int(data, F_STONITH_TOLERANCE, tolerance);
rc = stonith_send_command(stonith, STONITH_OP_FENCE, data, NULL, call_options, timeout);
free_xml(data);
return rc;
}
static int
stonith_api_confirm(stonith_t * stonith, int call_options, const char *target)
{
return stonith_api_fence(stonith, call_options | st_opt_manual_ack, target, "off", 0, 0);
}
static int
stonith_api_history(stonith_t * stonith, int call_options, const char *node,
stonith_history_t ** history, int timeout)
{
int rc = 0;
xmlNode *data = NULL;
xmlNode *output = NULL;
stonith_history_t *last = NULL;
*history = NULL;
if (node) {
data = create_xml_node(NULL, __FUNCTION__);
crm_xml_add(data, F_STONITH_TARGET, node);
}
rc = stonith_send_command(stonith, STONITH_OP_FENCE_HISTORY, data, &output,
call_options | st_opt_sync_call, timeout);
free_xml(data);
if (rc == 0) {
xmlNode *op = NULL;
xmlNode *reply = get_xpath_object("//" F_STONITH_HISTORY_LIST, output, LOG_TRACE);
for (op = __xml_first_child(reply); op != NULL; op = __xml_next(op)) {
stonith_history_t *kvp;
int completed;
kvp = calloc(1, sizeof(stonith_history_t));
kvp->target = crm_element_value_copy(op, F_STONITH_TARGET);
kvp->action = crm_element_value_copy(op, F_STONITH_ACTION);
kvp->origin = crm_element_value_copy(op, F_STONITH_ORIGIN);
kvp->delegate = crm_element_value_copy(op, F_STONITH_DELEGATE);
kvp->client = crm_element_value_copy(op, F_STONITH_CLIENTNAME);
crm_element_value_int(op, F_STONITH_DATE, &completed);
kvp->completed = (time_t) completed;
crm_element_value_int(op, F_STONITH_STATE, &kvp->state);
if (last) {
last->next = kvp;
} else {
*history = kvp;
}
last = kvp;
}
}
free_xml(output);
return rc;
}
void stonith_history_free(stonith_history_t *history)
{
stonith_history_t *hp, *hp_old;
for (hp = history; hp; hp_old = hp, hp = hp->next, free(hp_old)) {
free(hp->target);
free(hp->action);
free(hp->origin);
free(hp->delegate);
free(hp->client);
}
}
/*!
* \brief Deprecated (use stonith_get_namespace() instead)
*/
const char *
get_stonith_provider(const char *agent, const char *provider)
{
return stonith_namespace2text(stonith_get_namespace(agent, provider));
}
static gint
stonithlib_GCompareFunc(gconstpointer a, gconstpointer b)
{
int rc = 0;
const stonith_notify_client_t *a_client = a;
const stonith_notify_client_t *b_client = b;
if (a_client->delete || b_client->delete) {
/* make entries marked for deletion not findable */
return -1;
}
CRM_CHECK(a_client->event != NULL && b_client->event != NULL, return 0);
rc = strcmp(a_client->event, b_client->event);
if (rc == 0) {
if (a_client->notify == NULL || b_client->notify == NULL) {
return 0;
} else if (a_client->notify == b_client->notify) {
return 0;
} else if (((long)a_client->notify) < ((long)b_client->notify)) {
crm_err("callbacks for %s are not equal: %p vs. %p",
a_client->event, a_client->notify, b_client->notify);
return -1;
}
crm_err("callbacks for %s are not equal: %p vs. %p",
a_client->event, a_client->notify, b_client->notify);
return 1;
}
return rc;
}
xmlNode *
stonith_create_op(int call_id, const char *token, const char *op, xmlNode * data, int call_options)
{
xmlNode *op_msg = create_xml_node(NULL, "stonith_command");
CRM_CHECK(op_msg != NULL, return NULL);
CRM_CHECK(token != NULL, return NULL);
crm_xml_add(op_msg, F_XML_TAGNAME, "stonith_command");
crm_xml_add(op_msg, F_TYPE, T_STONITH_NG);
crm_xml_add(op_msg, F_STONITH_CALLBACK_TOKEN, token);
crm_xml_add(op_msg, F_STONITH_OPERATION, op);
crm_xml_add_int(op_msg, F_STONITH_CALLID, call_id);
crm_trace("Sending call options: %.8lx, %d", (long)call_options, call_options);
crm_xml_add_int(op_msg, F_STONITH_CALLOPTS, call_options);
if (data != NULL) {
add_message_xml(op_msg, F_STONITH_CALLDATA, data);
}
return op_msg;
}
static void
stonith_destroy_op_callback(gpointer data)
{
stonith_callback_client_t *blob = data;
if (blob->timer && blob->timer->ref > 0) {
g_source_remove(blob->timer->ref);
}
free(blob->timer);
free(blob);
}
static int
stonith_api_signoff(stonith_t * stonith)
{
stonith_private_t *native = stonith->st_private;
crm_debug("Disconnecting from the fencer");
if (native->source != NULL) {
/* Attached to mainloop */
mainloop_del_ipc_client(native->source);
native->source = NULL;
native->ipc = NULL;
} else if (native->ipc) {
/* Not attached to mainloop */
crm_ipc_t *ipc = native->ipc;
native->ipc = NULL;
crm_ipc_close(ipc);
crm_ipc_destroy(ipc);
}
free(native->token); native->token = NULL;
stonith->state = stonith_disconnected;
return pcmk_ok;
}
static int
stonith_api_del_callback(stonith_t * stonith, int call_id, bool all_callbacks)
{
stonith_private_t *private = stonith->st_private;
if (all_callbacks) {
private->op_callback = NULL;
g_hash_table_destroy(private->stonith_op_callback_table);
private->stonith_op_callback_table = g_hash_table_new_full(g_direct_hash, g_direct_equal,
NULL,
stonith_destroy_op_callback);
} else if (call_id == 0) {
private->op_callback = NULL;
} else {
g_hash_table_remove(private->stonith_op_callback_table, GINT_TO_POINTER(call_id));
}
return pcmk_ok;
}
static void
invoke_callback(stonith_t * st, int call_id, int rc, void *userdata,
void (*callback) (stonith_t * st, stonith_callback_data_t * data))
{
stonith_callback_data_t data = { 0, };
data.call_id = call_id;
data.rc = rc;
data.userdata = userdata;
callback(st, &data);
}
static void
stonith_perform_callback(stonith_t * stonith, xmlNode * msg, int call_id, int rc)
{
stonith_private_t *private = NULL;
stonith_callback_client_t *blob = NULL;
stonith_callback_client_t local_blob;
CRM_CHECK(stonith != NULL, return);
CRM_CHECK(stonith->st_private != NULL, return);
private = stonith->st_private;
local_blob.id = NULL;
local_blob.callback = NULL;
local_blob.user_data = NULL;
local_blob.only_success = FALSE;
if (msg != NULL) {
crm_element_value_int(msg, F_STONITH_RC, &rc);
crm_element_value_int(msg, F_STONITH_CALLID, &call_id);
}
CRM_CHECK(call_id > 0, crm_log_xml_err(msg, "Bad result"));
blob = g_hash_table_lookup(private->stonith_op_callback_table, GINT_TO_POINTER(call_id));
if (blob != NULL) {
local_blob = *blob;
blob = NULL;
stonith_api_del_callback(stonith, call_id, FALSE);
} else {
crm_trace("No callback found for call %d", call_id);
local_blob.callback = NULL;
}
if (local_blob.callback != NULL && (rc == pcmk_ok || local_blob.only_success == FALSE)) {
crm_trace("Invoking callback %s for call %d", crm_str(local_blob.id), call_id);
invoke_callback(stonith, call_id, rc, local_blob.user_data, local_blob.callback);
} else if (private->op_callback == NULL && rc != pcmk_ok) {
crm_warn("Fencing command failed: %s", pcmk_strerror(rc));
crm_log_xml_debug(msg, "Failed fence update");
}
if (private->op_callback != NULL) {
crm_trace("Invoking global callback for call %d", call_id);
invoke_callback(stonith, call_id, rc, NULL, private->op_callback);
}
crm_trace("OP callback activated.");
}
static gboolean
stonith_async_timeout_handler(gpointer data)
{
struct timer_rec_s *timer = data;
crm_err("Async call %d timed out after %dms", timer->call_id, timer->timeout);
stonith_perform_callback(timer->stonith, NULL, timer->call_id, -ETIME);
/* Always return TRUE, never remove the handler
* We do that in stonith_del_callback()
*/
return TRUE;
}
static void
set_callback_timeout(stonith_callback_client_t * callback, stonith_t * stonith, int call_id,
int timeout)
{
struct timer_rec_s *async_timer = callback->timer;
if (timeout <= 0) {
return;
}
if (!async_timer) {
async_timer = calloc(1, sizeof(struct timer_rec_s));
callback->timer = async_timer;
}
async_timer->stonith = stonith;
async_timer->call_id = call_id;
/* Allow a fair bit of grace to allow the server to tell us of a timeout
* This is only a fallback
*/
async_timer->timeout = (timeout + 60) * 1000;
if (async_timer->ref) {
g_source_remove(async_timer->ref);
}
async_timer->ref =
g_timeout_add(async_timer->timeout, stonith_async_timeout_handler, async_timer);
}
static void
update_callback_timeout(int call_id, int timeout, stonith_t * st)
{
stonith_callback_client_t *callback = NULL;
stonith_private_t *private = st->st_private;
callback = g_hash_table_lookup(private->stonith_op_callback_table, GINT_TO_POINTER(call_id));
if (!callback || !callback->allow_timeout_updates) {
return;
}
set_callback_timeout(callback, st, call_id, timeout);
}
static int
stonith_dispatch_internal(const char *buffer, ssize_t length, gpointer userdata)
{
const char *type = NULL;
struct notify_blob_s blob;
stonith_t *st = userdata;
stonith_private_t *private = NULL;
CRM_ASSERT(st != NULL);
private = st->st_private;
blob.stonith = st;
blob.xml = string2xml(buffer);
if (blob.xml == NULL) {
crm_warn("Received malformed message from fencer: %s", buffer);
return 0;
}
/* do callbacks */
type = crm_element_value(blob.xml, F_TYPE);
crm_trace("Activating %s callbacks...", type);
if (safe_str_eq(type, T_STONITH_NG)) {
stonith_perform_callback(st, blob.xml, 0, 0);
} else if (safe_str_eq(type, T_STONITH_NOTIFY)) {
foreach_notify_entry(private, stonith_send_notification, &blob);
} else if (safe_str_eq(type, T_STONITH_TIMEOUT_VALUE)) {
int call_id = 0;
int timeout = 0;
crm_element_value_int(blob.xml, F_STONITH_TIMEOUT, &timeout);
crm_element_value_int(blob.xml, F_STONITH_CALLID, &call_id);
update_callback_timeout(call_id, timeout, st);
} else {
crm_err("Unknown message type: %s", type);
crm_log_xml_warn(blob.xml, "BadReply");
}
free_xml(blob.xml);
return 1;
}
static int
stonith_api_signon(stonith_t * stonith, const char *name, int *stonith_fd)
{
int rc = pcmk_ok;
stonith_private_t *native = NULL;
const char *display_name = name? name : "client";
static struct ipc_client_callbacks st_callbacks = {
.dispatch = stonith_dispatch_internal,
.destroy = stonith_connection_destroy
};
CRM_CHECK(stonith != NULL, return -EINVAL);
native = stonith->st_private;
CRM_ASSERT(native != NULL);
crm_debug("Attempting fencer connection by %s with%s mainloop",
display_name, (stonith_fd? "out" : ""));
stonith->state = stonith_connected_command;
if (stonith_fd) {
/* No mainloop */
native->ipc = crm_ipc_new("stonith-ng", 0);
if (native->ipc && crm_ipc_connect(native->ipc)) {
*stonith_fd = crm_ipc_get_fd(native->ipc);
} else if (native->ipc) {
crm_ipc_close(native->ipc);
crm_ipc_destroy(native->ipc);
native->ipc = NULL;
}
} else {
/* With mainloop */
native->source =
mainloop_add_ipc_client("stonith-ng", G_PRIORITY_MEDIUM, 0, stonith, &st_callbacks);
native->ipc = mainloop_get_ipc_client(native->source);
}
if (native->ipc == NULL) {
rc = -ENOTCONN;
} else {
xmlNode *reply = NULL;
xmlNode *hello = create_xml_node(NULL, "stonith_command");
crm_xml_add(hello, F_TYPE, T_STONITH_NG);
crm_xml_add(hello, F_STONITH_OPERATION, CRM_OP_REGISTER);
crm_xml_add(hello, F_STONITH_CLIENTNAME, name);
rc = crm_ipc_send(native->ipc, hello, crm_ipc_client_response, -1, &reply);
if (rc < 0) {
crm_debug("Couldn't register with the fencer: %s "
CRM_XS " rc=%d", pcmk_strerror(rc), rc);
rc = -ECOMM;
} else if (reply == NULL) {
crm_debug("Couldn't register with the fencer: no reply");
rc = -EPROTO;
} else {
const char *msg_type = crm_element_value(reply, F_STONITH_OPERATION);
- const char *tmp_ticket = crm_element_value(reply, F_STONITH_CLIENTID);
+ native->token = crm_element_value_copy(reply, F_STONITH_CLIENTID);
if (safe_str_neq(msg_type, CRM_OP_REGISTER)) {
crm_debug("Couldn't register with the fencer: invalid reply type '%s'",
(msg_type? msg_type : "(missing)"));
crm_log_xml_debug(reply, "Invalid fencer reply");
rc = -EPROTO;
- } else if (tmp_ticket == NULL) {
+ } else if (native->token == NULL) {
crm_debug("Couldn't register with the fencer: no token in reply");
crm_log_xml_debug(reply, "Invalid fencer reply");
rc = -EPROTO;
} else {
- native->token = strdup(tmp_ticket);
#if HAVE_MSGFROMIPC_TIMEOUT
stonith->call_timeout = MAX_IPC_DELAY;
#endif
crm_debug("Connection to fencer by %s succeeded (registration token: %s)",
display_name, native->token);
rc = pcmk_ok;
}
}
free_xml(reply);
free_xml(hello);
}
if (rc != pcmk_ok) {
crm_debug("Connection attempt to fencer by %s failed: %s "
CRM_XS " rc=%d", display_name, pcmk_strerror(rc), rc);
stonith->cmds->disconnect(stonith);
}
return rc;
}
static int
stonith_set_notification(stonith_t * stonith, const char *callback, int enabled)
{
int rc = pcmk_ok;
xmlNode *notify_msg = create_xml_node(NULL, __FUNCTION__);
stonith_private_t *native = stonith->st_private;
if (stonith->state != stonith_disconnected) {
crm_xml_add(notify_msg, F_STONITH_OPERATION, T_STONITH_NOTIFY);
if (enabled) {
crm_xml_add(notify_msg, F_STONITH_NOTIFY_ACTIVATE, callback);
} else {
crm_xml_add(notify_msg, F_STONITH_NOTIFY_DEACTIVATE, callback);
}
rc = crm_ipc_send(native->ipc, notify_msg, crm_ipc_client_response, -1, NULL);
if (rc < 0) {
crm_perror(LOG_DEBUG, "Couldn't register for fencing notifications: %d", rc);
rc = -ECOMM;
} else {
rc = pcmk_ok;
}
}
free_xml(notify_msg);
return rc;
}
static int
stonith_api_add_notification(stonith_t * stonith, const char *event,
void (*callback) (stonith_t * stonith, stonith_event_t * e))
{
GList *list_item = NULL;
stonith_notify_client_t *new_client = NULL;
stonith_private_t *private = NULL;
private = stonith->st_private;
crm_trace("Adding callback for %s events (%d)", event, g_list_length(private->notify_list));
new_client = calloc(1, sizeof(stonith_notify_client_t));
new_client->event = event;
new_client->notify = callback;
list_item = g_list_find_custom(private->notify_list, new_client, stonithlib_GCompareFunc);
if (list_item != NULL) {
crm_warn("Callback already present");
free(new_client);
return -ENOTUNIQ;
} else {
private->notify_list = g_list_append(private->notify_list, new_client);
stonith_set_notification(stonith, event, 1);
crm_trace("Callback added (%d)", g_list_length(private->notify_list));
}
return pcmk_ok;
}
static int
stonith_api_del_notification(stonith_t * stonith, const char *event)
{
GList *list_item = NULL;
stonith_notify_client_t *new_client = NULL;
stonith_private_t *private = NULL;
crm_debug("Removing callback for %s events", event);
private = stonith->st_private;
new_client = calloc(1, sizeof(stonith_notify_client_t));
new_client->event = event;
new_client->notify = NULL;
list_item = g_list_find_custom(private->notify_list, new_client, stonithlib_GCompareFunc);
stonith_set_notification(stonith, event, 0);
if (list_item != NULL) {
stonith_notify_client_t *list_client = list_item->data;
if (private->notify_refcnt) {
list_client->delete = TRUE;
private->notify_deletes = TRUE;
} else {
private->notify_list = g_list_remove(private->notify_list, list_client);
free(list_client);
}
crm_trace("Removed callback");
} else {
crm_trace("Callback not present");
}
free(new_client);
return pcmk_ok;
}
static int
stonith_api_add_callback(stonith_t * stonith, int call_id, int timeout, int options,
void *user_data, const char *callback_name,
void (*callback) (stonith_t * st, stonith_callback_data_t * data))
{
stonith_callback_client_t *blob = NULL;
stonith_private_t *private = NULL;
CRM_CHECK(stonith != NULL, return -EINVAL);
CRM_CHECK(stonith->st_private != NULL, return -EINVAL);
private = stonith->st_private;
if (call_id == 0) {
private->op_callback = callback;
} else if (call_id < 0) {
if (!(options & st_opt_report_only_success)) {
crm_trace("Call failed, calling %s: %s", callback_name, pcmk_strerror(call_id));
invoke_callback(stonith, call_id, call_id, user_data, callback);
} else {
crm_warn("Fencer call failed: %s", pcmk_strerror(call_id));
}
return FALSE;
}
blob = calloc(1, sizeof(stonith_callback_client_t));
blob->id = callback_name;
blob->only_success = (options & st_opt_report_only_success) ? TRUE : FALSE;
blob->user_data = user_data;
blob->callback = callback;
blob->allow_timeout_updates = (options & st_opt_timeout_updates) ? TRUE : FALSE;
if (timeout > 0) {
set_callback_timeout(blob, stonith, call_id, timeout);
}
g_hash_table_insert(private->stonith_op_callback_table, GINT_TO_POINTER(call_id), blob);
crm_trace("Added callback to %s for call %d", callback_name, call_id);
return TRUE;
}
static void
stonith_dump_pending_op(gpointer key, gpointer value, gpointer user_data)
{
int call = GPOINTER_TO_INT(key);
stonith_callback_client_t *blob = value;
crm_debug("Call %d (%s): pending", call, crm_str(blob->id));
}
void
stonith_dump_pending_callbacks(stonith_t * stonith)
{
stonith_private_t *private = stonith->st_private;
if (private->stonith_op_callback_table == NULL) {
return;
}
return g_hash_table_foreach(private->stonith_op_callback_table, stonith_dump_pending_op, NULL);
}
/*
*/
static stonith_event_t *
xml_to_event(xmlNode * msg)
{
stonith_event_t *event = calloc(1, sizeof(stonith_event_t));
const char *ntype = crm_element_value(msg, F_SUBTYPE);
char *data_addr = crm_strdup_printf("//%s", ntype);
xmlNode *data = get_xpath_object(data_addr, msg, LOG_DEBUG);
crm_log_xml_trace(msg, "stonith_notify");
crm_element_value_int(msg, F_STONITH_RC, &(event->result));
if (safe_str_eq(ntype, T_STONITH_NOTIFY_FENCE)) {
event->operation = crm_element_value_copy(msg, F_STONITH_OPERATION);
if (data) {
event->origin = crm_element_value_copy(data, F_STONITH_ORIGIN);
event->action = crm_element_value_copy(data, F_STONITH_ACTION);
event->target = crm_element_value_copy(data, F_STONITH_TARGET);
event->executioner = crm_element_value_copy(data, F_STONITH_DELEGATE);
event->id = crm_element_value_copy(data, F_STONITH_REMOTE_OP_ID);
event->client_origin = crm_element_value_copy(data, F_STONITH_CLIENTNAME);
event->device = crm_element_value_copy(data, F_STONITH_DEVICE);
} else {
crm_err("No data for %s event", ntype);
crm_log_xml_notice(msg, "BadEvent");
}
}
free(data_addr);
return event;
}
static void
event_free(stonith_event_t * event)
{
free(event->id);
free(event->type);
free(event->message);
free(event->operation);
free(event->origin);
free(event->action);
free(event->target);
free(event->executioner);
free(event->device);
free(event->client_origin);
free(event);
}
static void
stonith_send_notification(gpointer data, gpointer user_data)
{
struct notify_blob_s *blob = user_data;
stonith_notify_client_t *entry = data;
stonith_event_t *st_event = NULL;
const char *event = NULL;
if (blob->xml == NULL) {
crm_warn("Skipping callback - NULL message");
return;
}
event = crm_element_value(blob->xml, F_SUBTYPE);
if (entry == NULL) {
crm_warn("Skipping callback - NULL callback client");
return;
} else if (entry->delete) {
crm_trace("Skipping callback - marked for deletion");
return;
} else if (entry->notify == NULL) {
crm_warn("Skipping callback - NULL callback");
return;
} else if (safe_str_neq(entry->event, event)) {
crm_trace("Skipping callback - event mismatch %p/%s vs. %s", entry, entry->event, event);
return;
}
st_event = xml_to_event(blob->xml);
crm_trace("Invoking callback for %p/%s event...", entry, event);
entry->notify(blob->stonith, st_event);
crm_trace("Callback invoked...");
event_free(st_event);
}
/*!
* \internal
* \brief Create and send an API request
*
* \param[in] stonith Stonith connection
* \param[in] op API operation to request
* \param[in] data Data to attach to request
* \param[out] output_data If not NULL, will be set to reply if synchronous
* \param[in] call_options Bitmask of stonith_call_options to use
* \param[in] timeout Error if not completed within this many seconds
*
* \return pcmk_ok (for synchronous requests) or positive call ID
* (for asynchronous requests) on success, -errno otherwise
*/
static int
stonith_send_command(stonith_t * stonith, const char *op, xmlNode * data, xmlNode ** output_data,
int call_options, int timeout)
{
int rc = 0;
int reply_id = -1;
xmlNode *op_msg = NULL;
xmlNode *op_reply = NULL;
stonith_private_t *native = NULL;
CRM_ASSERT(stonith && stonith->st_private && op);
native = stonith->st_private;
if (output_data != NULL) {
*output_data = NULL;
}
if ((stonith->state == stonith_disconnected) || (native->token == NULL)) {
return -ENOTCONN;
}
/* Increment the call ID, which must be positive to avoid conflicting with
* error codes. This shouldn't be a problem unless the client mucked with
* it or the counter wrapped around.
*/
stonith->call_id++;
if (stonith->call_id < 1) {
stonith->call_id = 1;
}
op_msg = stonith_create_op(stonith->call_id, native->token, op, data, call_options);
if (op_msg == NULL) {
return -EINVAL;
}
crm_xml_add_int(op_msg, F_STONITH_TIMEOUT, timeout);
crm_trace("Sending %s message to fencer with timeout %ds", op, timeout);
{
enum crm_ipc_flags ipc_flags = crm_ipc_flags_none;
if (call_options & st_opt_sync_call) {
ipc_flags |= crm_ipc_client_response;
}
rc = crm_ipc_send(native->ipc, op_msg, ipc_flags,
1000 * (timeout + 60), &op_reply);
}
free_xml(op_msg);
if (rc < 0) {
crm_perror(LOG_ERR, "Couldn't perform %s operation (timeout=%ds): %d", op, timeout, rc);
rc = -ECOMM;
goto done;
}
crm_log_xml_trace(op_reply, "Reply");
if (!(call_options & st_opt_sync_call)) {
crm_trace("Async call %d, returning", stonith->call_id);
free_xml(op_reply);
return stonith->call_id;
}
rc = pcmk_ok;
crm_element_value_int(op_reply, F_STONITH_CALLID, &reply_id);
if (reply_id == stonith->call_id) {
crm_trace("Synchronous reply %d received", reply_id);
if (crm_element_value_int(op_reply, F_STONITH_RC, &rc) != 0) {
rc = -ENOMSG;
}
if ((call_options & st_opt_discard_reply) || output_data == NULL) {
crm_trace("Discarding reply");
} else {
*output_data = op_reply;
op_reply = NULL; /* Prevent subsequent free */
}
} else if (reply_id <= 0) {
crm_err("Received bad reply: No id set");
crm_log_xml_err(op_reply, "Bad reply");
free_xml(op_reply);
rc = -ENOMSG;
} else {
crm_err("Received bad reply: %d (wanted %d)", reply_id, stonith->call_id);
crm_log_xml_err(op_reply, "Old reply");
free_xml(op_reply);
rc = -ENOMSG;
}
done:
if (crm_ipc_connected(native->ipc) == FALSE) {
crm_err("Fencer disconnected");
free(native->token); native->token = NULL;
stonith->state = stonith_disconnected;
}
free_xml(op_reply);
return rc;
}
/* Not used with mainloop */
bool
stonith_dispatch(stonith_t * st)
{
gboolean stay_connected = TRUE;
stonith_private_t *private = NULL;
CRM_ASSERT(st != NULL);
private = st->st_private;
while (crm_ipc_ready(private->ipc)) {
if (crm_ipc_read(private->ipc) > 0) {
const char *msg = crm_ipc_buffer(private->ipc);
stonith_dispatch_internal(msg, strlen(msg), st);
}
if (crm_ipc_connected(private->ipc) == FALSE) {
crm_err("Connection closed");
stay_connected = FALSE;
}
}
return stay_connected;
}
static int
stonith_api_free(stonith_t * stonith)
{
int rc = pcmk_ok;
crm_trace("Destroying %p", stonith);
if (stonith->state != stonith_disconnected) {
crm_trace("Disconnecting %p first", stonith);
rc = stonith->cmds->disconnect(stonith);
}
if (stonith->state == stonith_disconnected) {
stonith_private_t *private = stonith->st_private;
crm_trace("Removing %d callbacks", g_hash_table_size(private->stonith_op_callback_table));
g_hash_table_destroy(private->stonith_op_callback_table);
crm_trace("Destroying %d notification clients", g_list_length(private->notify_list));
g_list_free_full(private->notify_list, free);
free(stonith->st_private);
free(stonith->cmds);
free(stonith);
} else {
crm_err("Not free'ing active connection: %s (%d)", pcmk_strerror(rc), rc);
}
return rc;
}
void
stonith_api_delete(stonith_t * stonith)
{
crm_trace("Destroying %p", stonith);
if(stonith) {
stonith->cmds->free(stonith);
}
}
static int
stonith_api_validate(stonith_t *st, int call_options, const char *rsc_id,
const char *namespace_s, const char *agent,
stonith_key_value_t *params, int timeout, char **output,
char **error_output)
{
/* Validation should be done directly via the agent, so we can get it from
* stonith_admin when the cluster is not running, which is important for
* higher-level tools.
*/
int rc = pcmk_ok;
/* Use a dummy node name in case the agent requires a target. We assume the
* actual target doesn't matter for validation purposes (if in practice,
* that is incorrect, we will need to allow the caller to pass the target).
*/
const char *target = "node1";
GHashTable *params_table = crm_str_table_new();
// Convert parameter list to a hash table
for (; params; params = params->next) {
// Strip out Pacemaker-implemented parameters
if (!crm_starts_with(params->key, "pcmk_")
&& strcmp(params->key, "provides")
&& strcmp(params->key, "stonith-timeout")) {
g_hash_table_insert(params_table, strdup(params->key),
strdup(params->value));
}
}
#if SUPPORT_CIBSECRETS
rc = replace_secret_params(rsc_id, params_table);
if (rc < 0) {
crm_warn("Could not replace secret parameters for validation of %s: %s",
agent, pcmk_strerror(rc));
}
#endif
if (output) {
*output = NULL;
}
if (error_output) {
*error_output = NULL;
}
switch (stonith_get_namespace(agent, namespace_s)) {
case st_namespace_rhcs:
rc = stonith__rhcs_validate(st, call_options, target, agent,
params_table, timeout, output,
error_output);
break;
#if HAVE_STONITH_STONITH_H
case st_namespace_lha:
rc = stonith__lha_validate(st, call_options, target, agent,
params_table, timeout, output,
error_output);
break;
#endif
default:
rc = -EINVAL;
errno = EINVAL;
crm_perror(LOG_ERR,
"Agent %s not found or does not support validation",
agent);
break;
}
g_hash_table_destroy(params_table);
return rc;
}
stonith_t *
stonith_api_new(void)
{
stonith_t *new_stonith = NULL;
stonith_private_t *private = NULL;
new_stonith = calloc(1, sizeof(stonith_t));
+ if (new_stonith == NULL) {
+ return NULL;
+ }
+
private = calloc(1, sizeof(stonith_private_t));
+ if (private == NULL) {
+ free(new_stonith);
+ return NULL;
+ }
new_stonith->st_private = private;
private->stonith_op_callback_table = g_hash_table_new_full(g_direct_hash, g_direct_equal,
NULL, stonith_destroy_op_callback);
private->notify_list = NULL;
private->notify_refcnt = 0;
private->notify_deletes = FALSE;
new_stonith->call_id = 1;
new_stonith->state = stonith_disconnected;
new_stonith->cmds = calloc(1, sizeof(stonith_api_operations_t));
+ if (new_stonith->cmds == NULL) {
+ free(new_stonith->st_private);
+ free(new_stonith);
+ return NULL;
+ }
/* *INDENT-OFF* */
new_stonith->cmds->free = stonith_api_free;
new_stonith->cmds->connect = stonith_api_signon;
new_stonith->cmds->disconnect = stonith_api_signoff;
new_stonith->cmds->list = stonith_api_list;
new_stonith->cmds->monitor = stonith_api_monitor;
new_stonith->cmds->status = stonith_api_status;
new_stonith->cmds->fence = stonith_api_fence;
new_stonith->cmds->confirm = stonith_api_confirm;
new_stonith->cmds->history = stonith_api_history;
new_stonith->cmds->list_agents = stonith_api_device_list;
new_stonith->cmds->metadata = stonith_api_device_metadata;
new_stonith->cmds->query = stonith_api_query;
new_stonith->cmds->remove_device = stonith_api_remove_device;
new_stonith->cmds->register_device = stonith_api_register_device;
new_stonith->cmds->remove_level = stonith_api_remove_level;
new_stonith->cmds->remove_level_full = stonith_api_remove_level_full;
new_stonith->cmds->register_level = stonith_api_register_level;
new_stonith->cmds->register_level_full = stonith_api_register_level_full;
new_stonith->cmds->remove_callback = stonith_api_del_callback;
new_stonith->cmds->register_callback = stonith_api_add_callback;
new_stonith->cmds->remove_notification = stonith_api_del_notification;
new_stonith->cmds->register_notification = stonith_api_add_notification;
new_stonith->cmds->validate = stonith_api_validate;
/* *INDENT-ON* */
return new_stonith;
}
/*!
* \brief Make a blocking connection attempt to the fencer
*
* \param[in,out] st Fencer API object
* \param[in] name Client name to use with fencer
* \param[in] max_attempts Return error if this many attempts fail
*
* \return pcmk_ok on success, result of last attempt otherwise
*/
int
stonith_api_connect_retry(stonith_t *st, const char *name, int max_attempts)
{
int rc = -EINVAL; // if max_attempts is not positive
for (int attempt = 1; attempt <= max_attempts; attempt++) {
rc = st->cmds->connect(st, name, NULL);
if (rc == pcmk_ok) {
return pcmk_ok;
} else if (attempt < max_attempts) {
crm_notice("Fencer connection attempt %d of %d failed (retrying in 2s): %s "
CRM_XS " rc=%d",
attempt, max_attempts, pcmk_strerror(rc), rc);
sleep(2);
}
}
crm_notice("Could not connect to fencer: %s " CRM_XS " rc=%d",
pcmk_strerror(rc), rc);
return rc;
}
stonith_key_value_t *
stonith_key_value_add(stonith_key_value_t * head, const char *key, const char *value)
{
stonith_key_value_t *p, *end;
p = calloc(1, sizeof(stonith_key_value_t));
if (key) {
p->key = strdup(key);
}
if (value) {
p->value = strdup(value);
}
end = head;
while (end && end->next) {
end = end->next;
}
if (end) {
end->next = p;
} else {
head = p;
}
return head;
}
void
stonith_key_value_freeall(stonith_key_value_t * head, int keys, int values)
{
stonith_key_value_t *p;
while (head) {
p = head->next;
if (keys) {
free(head->key);
}
if (values) {
free(head->value);
}
free(head);
head = p;
}
}
#define api_log_open() openlog("stonith-api", LOG_CONS | LOG_NDELAY | LOG_PID, LOG_DAEMON)
#define api_log(level, fmt, args...) syslog(level, "%s: "fmt, __FUNCTION__, args)
int
stonith_api_kick(uint32_t nodeid, const char *uname, int timeout, bool off)
{
int rc = pcmk_ok;
stonith_t *st = stonith_api_new();
const char *action = off? "off" : "reboot";
api_log_open();
if (st == NULL) {
api_log(LOG_ERR, "API initialization failed, could not kick (%s) node %u/%s",
action, nodeid, uname);
return -EPROTO;
}
rc = st->cmds->connect(st, "stonith-api", NULL);
if (rc != pcmk_ok) {
api_log(LOG_ERR, "Connection failed, could not kick (%s) node %u/%s : %s (%d)",
action, nodeid, uname, pcmk_strerror(rc), rc);
} else {
char *name = NULL;
enum stonith_call_options opts = st_opt_sync_call | st_opt_allow_suicide;
if (uname != NULL) {
name = strdup(uname);
} else if (nodeid > 0) {
opts |= st_opt_cs_nodeid;
name = crm_itoa(nodeid);
}
rc = st->cmds->fence(st, opts, name, action, timeout, 0);
free(name);
if (rc != pcmk_ok) {
api_log(LOG_ERR, "Could not kick (%s) node %u/%s : %s (%d)",
action, nodeid, uname, pcmk_strerror(rc), rc);
} else {
api_log(LOG_NOTICE, "Node %u/%s kicked: %s", nodeid, uname, action);
}
}
stonith_api_delete(st);
return rc;
}
time_t
stonith_api_time(uint32_t nodeid, const char *uname, bool in_progress)
{
int rc = pcmk_ok;
time_t when = 0;
stonith_t *st = stonith_api_new();
stonith_history_t *history = NULL, *hp = NULL;
if (st == NULL) {
api_log(LOG_ERR, "Could not retrieve fence history for %u/%s: "
"API initialization failed", nodeid, uname);
return when;
}
rc = st->cmds->connect(st, "stonith-api", NULL);
if (rc != pcmk_ok) {
api_log(LOG_NOTICE, "Connection failed: %s (%d)", pcmk_strerror(rc), rc);
} else {
int entries = 0;
int progress = 0;
int completed = 0;
char *name = NULL;
enum stonith_call_options opts = st_opt_sync_call;
if (uname != NULL) {
name = strdup(uname);
} else if (nodeid > 0) {
opts |= st_opt_cs_nodeid;
name = crm_itoa(nodeid);
}
rc = st->cmds->history(st, opts, name, &history, 120);
free(name);
for (hp = history; hp; hp = hp->next) {
entries++;
if (in_progress) {
progress++;
if (hp->state != st_done && hp->state != st_failed) {
when = time(NULL);
}
} else if (hp->state == st_done) {
completed++;
if (hp->completed > when) {
when = hp->completed;
}
}
}
stonith_history_free(history);
if(rc == pcmk_ok) {
api_log(LOG_INFO, "Found %d entries for %u/%s: %d in progress, %d completed", entries, nodeid, uname, progress, completed);
} else {
api_log(LOG_ERR, "Could not retrieve fence history for %u/%s: %s (%d)", nodeid, uname, pcmk_strerror(rc), rc);
}
}
stonith_api_delete(st);
if(when) {
api_log(LOG_INFO, "Node %u/%s last kicked at: %ld", nodeid, uname, (long int)when);
}
return when;
}
bool
stonith_agent_exists(const char *agent, int timeout)
{
stonith_t *st = NULL;
stonith_key_value_t *devices = NULL;
stonith_key_value_t *dIter = NULL;
bool rc = FALSE;
if (agent == NULL) {
return rc;
}
st = stonith_api_new();
+ if (st == NULL) {
+ crm_err("Could not list fence agents: API memory allocation failed");
+ return FALSE;
+ }
st->cmds->list_agents(st, st_opt_sync_call, NULL, &devices, timeout == 0 ? 120 : timeout);
for (dIter = devices; dIter != NULL; dIter = dIter->next) {
if (crm_str_eq(dIter->value, agent, TRUE)) {
rc = TRUE;
break;
}
}
stonith_key_value_freeall(devices, 1, 1);
stonith_api_delete(st);
return rc;
}
const char *
stonith_action_str(const char *action)
{
if (action == NULL) {
return "fencing";
} else if (!strcmp(action, "on")) {
return "unfencing";
} else if (!strcmp(action, "off")) {
return "turning off";
} else {
return action;
}
}
/*!
* \internal
* \brief Parse a target name from one line of a target list string
*
* \param[in] line One line of a target list string
* \parma[in] len String length of line
* \param[in,out] output List to add newly allocated target name to
*/
static void
parse_list_line(const char *line, int len, GList **output)
{
size_t i = 0;
size_t entry_start = 0;
/* Skip complaints about additional parameters device doesn't understand
*
* @TODO Document or eliminate the implied restriction of target names
*/
if (strstr(line, "invalid") || strstr(line, "variable")) {
crm_debug("Skipping list output line: %s", line);
return;
}
// Process line content, character by character
for (i = 0; i <= len; i++) {
if (isspace(line[i]) || (line[i] == ',') || (line[i] == ';')
|| (line[i] == '\0')) {
// We've found a separator (i.e. the end of an entry)
int rc = 0;
char *entry = NULL;
if (i == entry_start) {
// Skip leading and sequential separators
entry_start = i + 1;
continue;
}
entry = calloc(i - entry_start + 1, sizeof(char));
CRM_ASSERT(entry != NULL);
/* Read entry, stopping at first separator
*
* @TODO Document or eliminate these character restrictions
*/
rc = sscanf(line + entry_start, "%[a-zA-Z0-9_-.]", entry);
if (rc != 1) {
crm_warn("Could not parse list output entry: %s "
CRM_XS " entry_start=%d position=%d",
line + entry_start, entry_start, i);
free(entry);
} else if (safe_str_eq(entry, "on") || safe_str_eq(entry, "off")) {
/* Some agents print the target status in the list output,
* though none are known now (the separate list-status command
* is used for this, but it can also print "UNKNOWN"). To handle
* this possibility, skip such entries.
*
* @TODO Document or eliminate the implied restriction of target
* names.
*/
free(entry);
} else {
// We have a valid entry
*output = g_list_append(*output, entry);
}
entry_start = i + 1;
}
}
}
/*!
* \internal
* \brief Parse a list of targets from a string
*
* \param[in] list_output Target list as a string
*
* \return List of target names
* \note The target list string format is flexible, to allow for user-specified
* lists such pcmk_host_list and the output of an agent's list action
* (whether direct or via the API, which escapes newlines). There may be
* multiple lines, separated by either a newline or an escaped newline
* (backslash n). Each line may have one or more target names, separated
* by any combination of whitespace, commas, and semi-colons. Lines
* containing "invalid" or "variable" will be ignored entirely. Target
* names "on" or "off" (case-insensitive) will be ignored. Target names
* may contain only alphanumeric characters, underbars (_), dashes (-),
* and dots (.) (if any other character occurs in the name, it and all
* subsequent characters in the name will be ignored).
* \note The caller is responsible for freeing the result with
* g_list_free_full(result, free).
*/
GList *
stonith__parse_targets(const char *target_spec)
{
GList *targets = NULL;
if (target_spec != NULL) {
size_t out_len = strlen(target_spec);
size_t line_start = 0; // Starting index of line being processed
for (size_t i = 0; i <= out_len; ++i) {
if ((target_spec[i] == '\n') || (target_spec[i] == '\0')
|| ((target_spec[i] == '\\') && (target_spec[i + 1] == 'n'))) {
// We've reached the end of one line of output
int len = i - line_start;
if (len > 0) {
char *line = strndup(target_spec + line_start, len);
line[len] = '\0'; // Because it might be a newline
parse_list_line(line, len, &targets);
free(line);
}
if (target_spec[i] == '\\') {
++i; // backslash-n takes up two positions
}
line_start = i + 1;
}
}
}
return targets;
}
diff --git a/lib/lrmd/lrmd_client.c b/lib/lrmd/lrmd_client.c
index 7749edc1de..db13297943 100644
--- a/lib/lrmd/lrmd_client.c
+++ b/lib/lrmd/lrmd_client.c
@@ -1,2019 +1,2034 @@
/*
* Copyright 2012-2018 David Vossel
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#ifdef HAVE_GNUTLS_GNUTLS_H
# undef KEYFILE
# include
#endif
#include
#include
#include
#include
#include
#define MAX_TLS_RECV_WAIT 10000
CRM_TRACE_INIT_DATA(lrmd);
static int lrmd_api_disconnect(lrmd_t * lrmd);
static int lrmd_api_is_connected(lrmd_t * lrmd);
/* IPC proxy functions */
int lrmd_internal_proxy_send(lrmd_t * lrmd, xmlNode *msg);
static void lrmd_internal_proxy_dispatch(lrmd_t *lrmd, xmlNode *msg);
void lrmd_internal_set_proxy_callback(lrmd_t * lrmd, void *userdata, void (*callback)(lrmd_t *lrmd, void *userdata, xmlNode *msg));
#ifdef HAVE_GNUTLS_GNUTLS_H
# define LRMD_CLIENT_HANDSHAKE_TIMEOUT 5000 /* 5 seconds */
gnutls_psk_client_credentials_t psk_cred_s;
int lrmd_tls_set_key(gnutls_datum_t * key);
static void lrmd_tls_disconnect(lrmd_t * lrmd);
static int global_remote_msg_id = 0;
int lrmd_tls_send_msg(crm_remote_t * session, xmlNode * msg, uint32_t id, const char *msg_type);
static void lrmd_tls_connection_destroy(gpointer userdata);
#endif
typedef struct lrmd_private_s {
enum client_type type;
char *token;
mainloop_io_t *source;
/* IPC parameters */
crm_ipc_t *ipc;
crm_remote_t *remote;
/* Extra TLS parameters */
char *remote_nodename;
#ifdef HAVE_GNUTLS_GNUTLS_H
char *server;
int port;
gnutls_psk_client_credentials_t psk_cred_c;
/* while the async connection is occurring, this is the id
* of the connection timeout timer. */
int async_timer;
int sock;
/* since tls requires a round trip across the network for a
* request/reply, there are times where we just want to be able
* to send a request from the client and not wait around (or even care
* about) what the reply is. */
int expected_late_replies;
GList *pending_notify;
crm_trigger_t *process_notify;
#endif
lrmd_event_callback callback;
/* Internal IPC proxy msg passing for remote guests */
void (*proxy_callback)(lrmd_t *lrmd, void *userdata, xmlNode *msg);
void *proxy_callback_userdata;
char *peer_version;
} lrmd_private_t;
static lrmd_list_t *
lrmd_list_add(lrmd_list_t * head, const char *value)
{
lrmd_list_t *p, *end;
p = calloc(1, sizeof(lrmd_list_t));
p->val = strdup(value);
end = head;
while (end && end->next) {
end = end->next;
}
if (end) {
end->next = p;
} else {
head = p;
}
return head;
}
void
lrmd_list_freeall(lrmd_list_t * head)
{
lrmd_list_t *p;
while (head) {
char *val = (char *)head->val;
p = head->next;
free(val);
free(head);
head = p;
}
}
lrmd_key_value_t *
lrmd_key_value_add(lrmd_key_value_t * head, const char *key, const char *value)
{
lrmd_key_value_t *p, *end;
p = calloc(1, sizeof(lrmd_key_value_t));
p->key = strdup(key);
p->value = strdup(value);
end = head;
while (end && end->next) {
end = end->next;
}
if (end) {
end->next = p;
} else {
head = p;
}
return head;
}
void
lrmd_key_value_freeall(lrmd_key_value_t * head)
{
lrmd_key_value_t *p;
while (head) {
p = head->next;
free(head->key);
free(head->value);
free(head);
head = p;
}
}
lrmd_event_data_t *
lrmd_copy_event(lrmd_event_data_t * event)
{
lrmd_event_data_t *copy = NULL;
copy = calloc(1, sizeof(lrmd_event_data_t));
/* This will get all the int values.
* we just have to be careful not to leave any
* dangling pointers to strings. */
memcpy(copy, event, sizeof(lrmd_event_data_t));
copy->rsc_id = event->rsc_id ? strdup(event->rsc_id) : NULL;
copy->op_type = event->op_type ? strdup(event->op_type) : NULL;
copy->user_data = event->user_data ? strdup(event->user_data) : NULL;
copy->output = event->output ? strdup(event->output) : NULL;
copy->exit_reason = event->exit_reason ? strdup(event->exit_reason) : NULL;
copy->remote_nodename = event->remote_nodename ? strdup(event->remote_nodename) : NULL;
copy->params = crm_str_table_dup(event->params);
return copy;
}
void
lrmd_free_event(lrmd_event_data_t * event)
{
if (!event) {
return;
}
/* free gives me grief if i try to cast */
free((char *)event->rsc_id);
free((char *)event->op_type);
free((char *)event->user_data);
free((char *)event->output);
free((char *)event->exit_reason);
free((char *)event->remote_nodename);
if (event->params) {
g_hash_table_destroy(event->params);
}
free(event);
}
static int
lrmd_dispatch_internal(lrmd_t * lrmd, xmlNode * msg)
{
const char *type;
const char *proxy_session = crm_element_value(msg, F_LRMD_IPC_SESSION);
lrmd_private_t *native = lrmd->lrmd_private;
lrmd_event_data_t event = { 0, };
if (proxy_session != NULL) {
/* this is proxy business */
lrmd_internal_proxy_dispatch(lrmd, msg);
return 1;
} else if (!native->callback) {
/* no callback set */
crm_trace("notify event received but client has not set callback");
return 1;
}
event.remote_nodename = native->remote_nodename;
type = crm_element_value(msg, F_LRMD_OPERATION);
crm_element_value_int(msg, F_LRMD_CALLID, &event.call_id);
event.rsc_id = crm_element_value(msg, F_LRMD_RSC_ID);
if (crm_str_eq(type, LRMD_OP_RSC_REG, TRUE)) {
event.type = lrmd_event_register;
} else if (crm_str_eq(type, LRMD_OP_RSC_UNREG, TRUE)) {
event.type = lrmd_event_unregister;
} else if (crm_str_eq(type, LRMD_OP_RSC_EXEC, TRUE)) {
crm_element_value_int(msg, F_LRMD_TIMEOUT, &event.timeout);
crm_element_value_ms(msg, F_LRMD_RSC_INTERVAL, &event.interval_ms);
crm_element_value_int(msg, F_LRMD_RSC_START_DELAY, &event.start_delay);
crm_element_value_int(msg, F_LRMD_EXEC_RC, (int *)&event.rc);
crm_element_value_int(msg, F_LRMD_OP_STATUS, &event.op_status);
crm_element_value_int(msg, F_LRMD_RSC_DELETED, &event.rsc_deleted);
crm_element_value_int(msg, F_LRMD_RSC_RUN_TIME, (int *)&event.t_run);
crm_element_value_int(msg, F_LRMD_RSC_RCCHANGE_TIME, (int *)&event.t_rcchange);
crm_element_value_int(msg, F_LRMD_RSC_EXEC_TIME, (int *)&event.exec_time);
crm_element_value_int(msg, F_LRMD_RSC_QUEUE_TIME, (int *)&event.queue_time);
event.op_type = crm_element_value(msg, F_LRMD_RSC_ACTION);
event.user_data = crm_element_value(msg, F_LRMD_RSC_USERDATA_STR);
event.output = crm_element_value(msg, F_LRMD_RSC_OUTPUT);
event.exit_reason = crm_element_value(msg, F_LRMD_RSC_EXIT_REASON);
event.type = lrmd_event_exec_complete;
event.params = xml2list(msg);
} else if (crm_str_eq(type, LRMD_OP_NEW_CLIENT, TRUE)) {
event.type = lrmd_event_new_client;
} else if (crm_str_eq(type, LRMD_OP_POKE, TRUE)) {
event.type = lrmd_event_poke;
} else {
return 1;
}
crm_trace("op %s notify event received", type);
native->callback(&event);
if (event.params) {
g_hash_table_destroy(event.params);
}
return 1;
}
static int
lrmd_ipc_dispatch(const char *buffer, ssize_t length, gpointer userdata)
{
lrmd_t *lrmd = userdata;
lrmd_private_t *native = lrmd->lrmd_private;
xmlNode *msg;
int rc;
if (!native->callback) {
/* no callback set */
return 1;
}
msg = string2xml(buffer);
rc = lrmd_dispatch_internal(lrmd, msg);
free_xml(msg);
return rc;
}
#ifdef HAVE_GNUTLS_GNUTLS_H
static void
lrmd_free_xml(gpointer userdata)
{
free_xml((xmlNode *) userdata);
}
static int
lrmd_tls_connected(lrmd_t * lrmd)
{
lrmd_private_t *native = lrmd->lrmd_private;
if (native->remote->tls_session) {
return TRUE;
}
return FALSE;
}
static int
lrmd_tls_dispatch(gpointer userdata)
{
lrmd_t *lrmd = userdata;
lrmd_private_t *native = lrmd->lrmd_private;
xmlNode *xml = NULL;
int rc = 0;
int disconnected = 0;
if (lrmd_tls_connected(lrmd) == FALSE) {
crm_trace("TLS dispatch triggered after disconnect");
return 0;
}
crm_trace("TLS dispatch triggered");
/* First check if there are any pending notifies to process that came
* while we were waiting for replies earlier. */
if (native->pending_notify) {
GList *iter = NULL;
crm_trace("Processing pending notifies");
for (iter = native->pending_notify; iter; iter = iter->next) {
lrmd_dispatch_internal(lrmd, iter->data);
}
g_list_free_full(native->pending_notify, lrmd_free_xml);
native->pending_notify = NULL;
}
/* Next read the current buffer and see if there are any messages to handle. */
rc = crm_remote_ready(native->remote, 0);
if (rc == 0) {
/* nothing to read, see if any full messages are already in buffer. */
xml = crm_remote_parse_buffer(native->remote);
} else if (rc < 0) {
disconnected = 1;
} else {
crm_remote_recv(native->remote, -1, &disconnected);
xml = crm_remote_parse_buffer(native->remote);
}
while (xml) {
const char *msg_type = crm_element_value(xml, F_LRMD_REMOTE_MSG_TYPE);
if (safe_str_eq(msg_type, "notify")) {
lrmd_dispatch_internal(lrmd, xml);
} else if (safe_str_eq(msg_type, "reply")) {
if (native->expected_late_replies > 0) {
native->expected_late_replies--;
} else {
int reply_id = 0;
crm_element_value_int(xml, F_LRMD_CALLID, &reply_id);
/* if this happens, we want to know about it */
crm_err("Got outdated Pacemaker Remote reply %d", reply_id);
}
}
free_xml(xml);
xml = crm_remote_parse_buffer(native->remote);
}
if (disconnected) {
crm_info("Lost %s executor connection while reading data",
(native->remote_nodename? native->remote_nodename : "local"));
lrmd_tls_disconnect(lrmd);
return 0;
}
return 1;
}
#endif
/* Not used with mainloop */
int
lrmd_poll(lrmd_t * lrmd, int timeout)
{
lrmd_private_t *native = lrmd->lrmd_private;
switch (native->type) {
case CRM_CLIENT_IPC:
return crm_ipc_ready(native->ipc);
#ifdef HAVE_GNUTLS_GNUTLS_H
case CRM_CLIENT_TLS:
if (native->pending_notify) {
return 1;
}
return crm_remote_ready(native->remote, 0);
#endif
default:
crm_err("Unsupported connection type: %d", native->type);
}
return 0;
}
/* Not used with mainloop */
bool
lrmd_dispatch(lrmd_t * lrmd)
{
lrmd_private_t *private = NULL;
CRM_ASSERT(lrmd != NULL);
private = lrmd->lrmd_private;
switch (private->type) {
case CRM_CLIENT_IPC:
while (crm_ipc_ready(private->ipc)) {
if (crm_ipc_read(private->ipc) > 0) {
const char *msg = crm_ipc_buffer(private->ipc);
lrmd_ipc_dispatch(msg, strlen(msg), lrmd);
}
}
break;
#ifdef HAVE_GNUTLS_GNUTLS_H
case CRM_CLIENT_TLS:
lrmd_tls_dispatch(lrmd);
break;
#endif
default:
crm_err("Unsupported connection type: %d", private->type);
}
if (lrmd_api_is_connected(lrmd) == FALSE) {
crm_err("Connection closed");
return FALSE;
}
return TRUE;
}
static xmlNode *
lrmd_create_op(const char *token, const char *op, xmlNode *data, int timeout,
enum lrmd_call_options options)
{
xmlNode *op_msg = create_xml_node(NULL, "lrmd_command");
CRM_CHECK(op_msg != NULL, return NULL);
CRM_CHECK(token != NULL, return NULL);
crm_xml_add(op_msg, F_XML_TAGNAME, "lrmd_command");
crm_xml_add(op_msg, F_TYPE, T_LRMD);
crm_xml_add(op_msg, F_LRMD_CALLBACK_TOKEN, token);
crm_xml_add(op_msg, F_LRMD_OPERATION, op);
crm_xml_add_int(op_msg, F_LRMD_TIMEOUT, timeout);
crm_xml_add_int(op_msg, F_LRMD_CALLOPTS, options);
if (data != NULL) {
add_message_xml(op_msg, F_LRMD_CALLDATA, data);
}
crm_trace("Created executor %s command with call options %.8lx (%d)",
op, (long)options, options);
return op_msg;
}
static void
lrmd_ipc_connection_destroy(gpointer userdata)
{
lrmd_t *lrmd = userdata;
lrmd_private_t *native = lrmd->lrmd_private;
crm_info("IPC connection destroyed");
/* Prevent these from being cleaned up in lrmd_api_disconnect() */
native->ipc = NULL;
native->source = NULL;
if (native->callback) {
lrmd_event_data_t event = { 0, };
event.type = lrmd_event_disconnect;
event.remote_nodename = native->remote_nodename;
native->callback(&event);
}
}
#ifdef HAVE_GNUTLS_GNUTLS_H
static void
lrmd_tls_connection_destroy(gpointer userdata)
{
lrmd_t *lrmd = userdata;
lrmd_private_t *native = lrmd->lrmd_private;
crm_info("TLS connection destroyed");
if (native->remote->tls_session) {
gnutls_bye(*native->remote->tls_session, GNUTLS_SHUT_RDWR);
gnutls_deinit(*native->remote->tls_session);
gnutls_free(native->remote->tls_session);
}
if (native->psk_cred_c) {
gnutls_psk_free_client_credentials(native->psk_cred_c);
}
if (native->sock) {
close(native->sock);
}
if (native->process_notify) {
mainloop_destroy_trigger(native->process_notify);
native->process_notify = NULL;
}
if (native->pending_notify) {
g_list_free_full(native->pending_notify, lrmd_free_xml);
native->pending_notify = NULL;
}
free(native->remote->buffer);
native->remote->buffer = NULL;
native->source = 0;
native->sock = 0;
native->psk_cred_c = NULL;
native->remote->tls_session = NULL;
native->sock = 0;
if (native->callback) {
lrmd_event_data_t event = { 0, };
event.remote_nodename = native->remote_nodename;
event.type = lrmd_event_disconnect;
native->callback(&event);
}
return;
}
int
lrmd_tls_send_msg(crm_remote_t * session, xmlNode * msg, uint32_t id, const char *msg_type)
{
crm_xml_add_int(msg, F_LRMD_REMOTE_MSG_ID, id);
crm_xml_add(msg, F_LRMD_REMOTE_MSG_TYPE, msg_type);
return crm_remote_send(session, msg);
}
static xmlNode *
lrmd_tls_recv_reply(lrmd_t * lrmd, int total_timeout, int expected_reply_id, int *disconnected)
{
lrmd_private_t *native = lrmd->lrmd_private;
xmlNode *xml = NULL;
time_t start = time(NULL);
const char *msg_type = NULL;
int reply_id = 0;
int remaining_timeout = 0;
/* A timeout of 0 here makes no sense. We have to wait a period of time
* for the response to come back. If -1 or 0, default to 10 seconds. */
if (total_timeout <= 0 || total_timeout > MAX_TLS_RECV_WAIT) {
total_timeout = MAX_TLS_RECV_WAIT;
}
while (!xml) {
xml = crm_remote_parse_buffer(native->remote);
if (!xml) {
/* read some more off the tls buffer if we still have time left. */
if (remaining_timeout) {
remaining_timeout = total_timeout - ((time(NULL) - start) * 1000);
} else {
remaining_timeout = total_timeout;
}
if (remaining_timeout <= 0) {
crm_err("Never received the expected reply during the timeout period, disconnecting.");
*disconnected = TRUE;
return NULL;
}
crm_remote_recv(native->remote, remaining_timeout, disconnected);
xml = crm_remote_parse_buffer(native->remote);
if (!xml) {
crm_err("Unable to receive expected reply, disconnecting.");
*disconnected = TRUE;
return NULL;
} else if (*disconnected) {
return NULL;
}
}
CRM_ASSERT(xml != NULL);
crm_element_value_int(xml, F_LRMD_REMOTE_MSG_ID, &reply_id);
msg_type = crm_element_value(xml, F_LRMD_REMOTE_MSG_TYPE);
if (!msg_type) {
crm_err("Empty msg type received while waiting for reply");
free_xml(xml);
xml = NULL;
} else if (safe_str_eq(msg_type, "notify")) {
/* got a notify while waiting for reply, trigger the notify to be processed later */
crm_info("queueing notify");
native->pending_notify = g_list_append(native->pending_notify, xml);
if (native->process_notify) {
crm_info("notify trigger set.");
mainloop_set_trigger(native->process_notify);
}
xml = NULL;
} else if (safe_str_neq(msg_type, "reply")) {
/* msg isn't a reply, make some noise */
crm_err("Expected a reply, got %s", msg_type);
free_xml(xml);
xml = NULL;
} else if (reply_id != expected_reply_id) {
if (native->expected_late_replies > 0) {
native->expected_late_replies--;
} else {
crm_err("Got outdated reply, expected id %d got id %d", expected_reply_id, reply_id);
}
free_xml(xml);
xml = NULL;
}
}
if (native->remote->buffer && native->process_notify) {
mainloop_set_trigger(native->process_notify);
}
return xml;
}
static int
lrmd_tls_send(lrmd_t * lrmd, xmlNode * msg)
{
int rc = 0;
lrmd_private_t *native = lrmd->lrmd_private;
global_remote_msg_id++;
if (global_remote_msg_id <= 0) {
global_remote_msg_id = 1;
}
rc = lrmd_tls_send_msg(native->remote, msg, global_remote_msg_id, "request");
if (rc <= 0) {
crm_err("Disconnecting because TLS message could not be sent to Pacemaker Remote");
lrmd_tls_disconnect(lrmd);
return -ENOTCONN;
}
return pcmk_ok;
}
static int
lrmd_tls_send_recv(lrmd_t * lrmd, xmlNode * msg, int timeout, xmlNode ** reply)
{
int rc = 0;
int disconnected = 0;
xmlNode *xml = NULL;
if (lrmd_tls_connected(lrmd) == FALSE) {
return -1;
}
rc = lrmd_tls_send(lrmd, msg);
if (rc < 0) {
return rc;
}
xml = lrmd_tls_recv_reply(lrmd, timeout, global_remote_msg_id, &disconnected);
if (disconnected) {
crm_err("Pacemaker Remote disconnected while waiting for reply to request id %d",
global_remote_msg_id);
lrmd_tls_disconnect(lrmd);
rc = -ENOTCONN;
} else if (!xml) {
crm_err("Did not receive reply from Pacemaker Remote for request id %d (timeout %dms)",
global_remote_msg_id, timeout);
rc = -ECOMM;
}
if (reply) {
*reply = xml;
} else {
free_xml(xml);
}
return rc;
}
#endif
static int
lrmd_send_xml(lrmd_t * lrmd, xmlNode * msg, int timeout, xmlNode ** reply)
{
int rc = -1;
lrmd_private_t *native = lrmd->lrmd_private;
switch (native->type) {
case CRM_CLIENT_IPC:
rc = crm_ipc_send(native->ipc, msg, crm_ipc_client_response, timeout, reply);
break;
#ifdef HAVE_GNUTLS_GNUTLS_H
case CRM_CLIENT_TLS:
rc = lrmd_tls_send_recv(lrmd, msg, timeout, reply);
break;
#endif
default:
crm_err("Unsupported connection type: %d", native->type);
}
return rc;
}
static int
lrmd_send_xml_no_reply(lrmd_t * lrmd, xmlNode * msg)
{
int rc = -1;
lrmd_private_t *native = lrmd->lrmd_private;
switch (native->type) {
case CRM_CLIENT_IPC:
rc = crm_ipc_send(native->ipc, msg, crm_ipc_flags_none, 0, NULL);
break;
#ifdef HAVE_GNUTLS_GNUTLS_H
case CRM_CLIENT_TLS:
rc = lrmd_tls_send(lrmd, msg);
if (rc == pcmk_ok) {
/* we don't want to wait around for the reply, but
* since the request/reply protocol needs to behave the same
* as libqb, a reply will eventually come later anyway. */
native->expected_late_replies++;
}
break;
#endif
default:
crm_err("Unsupported connection type: %d", native->type);
}
return rc;
}
static int
lrmd_api_is_connected(lrmd_t * lrmd)
{
lrmd_private_t *native = lrmd->lrmd_private;
switch (native->type) {
case CRM_CLIENT_IPC:
return crm_ipc_connected(native->ipc);
break;
#ifdef HAVE_GNUTLS_GNUTLS_H
case CRM_CLIENT_TLS:
return lrmd_tls_connected(lrmd);
break;
#endif
default:
crm_err("Unsupported connection type: %d", native->type);
}
return 0;
}
/*!
* \internal
* \brief Send a prepared API command to the executor
*
* \param[in] lrmd Existing connection to the executor
* \param[in] op Name of API command to send
* \param[in] data Command data XML to add to the sent command
* \param[out] output_data If expecting a reply, it will be stored here
* \param[in] timeout Timeout in milliseconds (if 0, defaults to
* a sensible value per the type of connection,
* standard vs. pacemaker remote);
* also propagated to the command XML
* \param[in] call_options Call options to pass to server when sending
* \param[in] expect_reply If TRUE, wait for a reply from the server;
* must be TRUE for IPC (as opposed to TLS) clients
*
* \return pcmk_ok on success, -errno on error
*/
static int
lrmd_send_command(lrmd_t *lrmd, const char *op, xmlNode *data,
xmlNode **output_data, int timeout,
enum lrmd_call_options options, gboolean expect_reply)
{
int rc = pcmk_ok;
lrmd_private_t *native = lrmd->lrmd_private;
xmlNode *op_msg = NULL;
xmlNode *op_reply = NULL;
if (!lrmd_api_is_connected(lrmd)) {
return -ENOTCONN;
}
if (op == NULL) {
crm_err("No operation specified");
return -EINVAL;
}
CRM_CHECK(native->token != NULL,;
);
crm_trace("Sending %s op to executor", op);
op_msg = lrmd_create_op(native->token, op, data, timeout, options);
if (op_msg == NULL) {
return -EINVAL;
}
if (expect_reply) {
rc = lrmd_send_xml(lrmd, op_msg, timeout, &op_reply);
} else {
rc = lrmd_send_xml_no_reply(lrmd, op_msg);
goto done;
}
if (rc < 0) {
crm_perror(LOG_ERR, "Couldn't perform %s operation (timeout=%d): %d", op, timeout, rc);
rc = -ECOMM;
goto done;
} else if(op_reply == NULL) {
rc = -ENOMSG;
goto done;
}
rc = pcmk_ok;
crm_trace("%s op reply received", op);
if (crm_element_value_int(op_reply, F_LRMD_RC, &rc) != 0) {
rc = -ENOMSG;
goto done;
}
crm_log_xml_trace(op_reply, "Reply");
if (output_data) {
*output_data = op_reply;
op_reply = NULL; /* Prevent subsequent free */
}
done:
if (lrmd_api_is_connected(lrmd) == FALSE) {
crm_err("Executor disconnected");
}
free_xml(op_msg);
free_xml(op_reply);
return rc;
}
static int
lrmd_api_poke_connection(lrmd_t * lrmd)
{
int rc;
lrmd_private_t *native = lrmd->lrmd_private;
xmlNode *data = create_xml_node(NULL, F_LRMD_RSC);
crm_xml_add(data, F_LRMD_ORIGIN, __FUNCTION__);
rc = lrmd_send_command(lrmd, LRMD_OP_POKE, data, NULL, 0, 0, native->type == CRM_CLIENT_IPC ? TRUE : FALSE);
free_xml(data);
return rc < 0 ? rc : pcmk_ok;
}
int
remote_proxy_check(lrmd_t * lrmd, GHashTable *hash)
{
int rc;
const char *value;
lrmd_private_t *native = lrmd->lrmd_private;
xmlNode *data = create_xml_node(NULL, F_LRMD_OPERATION);
crm_xml_add(data, F_LRMD_ORIGIN, __FUNCTION__);
value = g_hash_table_lookup(hash, "stonith-watchdog-timeout");
crm_xml_add(data, F_LRMD_WATCHDOG, value);
rc = lrmd_send_command(lrmd, LRMD_OP_CHECK, data, NULL, 0, 0, native->type == CRM_CLIENT_IPC ? TRUE : FALSE);
free_xml(data);
return rc < 0 ? rc : pcmk_ok;
}
static int
lrmd_handshake(lrmd_t * lrmd, const char *name)
{
int rc = pcmk_ok;
lrmd_private_t *native = lrmd->lrmd_private;
xmlNode *reply = NULL;
xmlNode *hello = create_xml_node(NULL, "lrmd_command");
crm_xml_add(hello, F_TYPE, T_LRMD);
crm_xml_add(hello, F_LRMD_OPERATION, CRM_OP_REGISTER);
crm_xml_add(hello, F_LRMD_CLIENTNAME, name);
crm_xml_add(hello, F_LRMD_PROTOCOL_VERSION, LRMD_PROTOCOL_VERSION);
/* advertise that we are a proxy provider */
if (native->proxy_callback) {
crm_xml_add(hello, F_LRMD_IS_IPC_PROVIDER, "true");
}
rc = lrmd_send_xml(lrmd, hello, -1, &reply);
if (rc < 0) {
crm_perror(LOG_DEBUG, "Couldn't complete registration with the executor API: %d", rc);
rc = -ECOMM;
} else if (reply == NULL) {
crm_err("Did not receive registration reply");
rc = -EPROTO;
} else {
const char *version = crm_element_value(reply, F_LRMD_PROTOCOL_VERSION);
const char *msg_type = crm_element_value(reply, F_LRMD_OPERATION);
const char *tmp_ticket = crm_element_value(reply, F_LRMD_CLIENTID);
crm_element_value_int(reply, F_LRMD_RC, &rc);
if (rc == -EPROTO) {
crm_err("Executor protocol version mismatch between client (%s) and server (%s)",
LRMD_PROTOCOL_VERSION, version);
crm_log_xml_err(reply, "Protocol Error");
} else if (safe_str_neq(msg_type, CRM_OP_REGISTER)) {
crm_err("Invalid registration message: %s", msg_type);
crm_log_xml_err(reply, "Bad reply");
rc = -EPROTO;
} else if (tmp_ticket == NULL) {
crm_err("No registration token provided");
crm_log_xml_err(reply, "Bad reply");
rc = -EPROTO;
} else {
crm_trace("Obtained registration token: %s", tmp_ticket);
native->token = strdup(tmp_ticket);
native->peer_version = strdup(version?version:"1.0"); /* Included since 1.1 */
rc = pcmk_ok;
}
}
free_xml(reply);
free_xml(hello);
if (rc != pcmk_ok) {
lrmd_api_disconnect(lrmd);
}
return rc;
}
static int
lrmd_ipc_connect(lrmd_t * lrmd, int *fd)
{
int rc = pcmk_ok;
lrmd_private_t *native = lrmd->lrmd_private;
static struct ipc_client_callbacks lrmd_callbacks = {
.dispatch = lrmd_ipc_dispatch,
.destroy = lrmd_ipc_connection_destroy
};
crm_info("Connecting to executor");
if (fd) {
/* No mainloop */
native->ipc = crm_ipc_new(CRM_SYSTEM_LRMD, 0);
if (native->ipc && crm_ipc_connect(native->ipc)) {
*fd = crm_ipc_get_fd(native->ipc);
} else if (native->ipc) {
crm_perror(LOG_ERR, "Connection to executor failed");
rc = -ENOTCONN;
}
} else {
native->source = mainloop_add_ipc_client(CRM_SYSTEM_LRMD, G_PRIORITY_HIGH, 0, lrmd, &lrmd_callbacks);
native->ipc = mainloop_get_ipc_client(native->source);
}
if (native->ipc == NULL) {
crm_debug("Could not connect to the executor API");
rc = -ENOTCONN;
}
return rc;
}
#ifdef HAVE_GNUTLS_GNUTLS_H
static void
copy_gnutls_datum(gnutls_datum_t *dest, gnutls_datum_t *source)
{
dest->data = gnutls_malloc(source->size);
CRM_ASSERT(dest->data);
memcpy(dest->data, source->data, source->size);
dest->size = source->size;
}
static void
clear_gnutls_datum(gnutls_datum_t *datum)
{
gnutls_free(datum->data);
datum->data = NULL;
datum->size = 0;
}
#define KEY_READ_LEN 256
static int
set_key(gnutls_datum_t * key, const char *location)
{
FILE *stream;
size_t buf_len = KEY_READ_LEN;
static gnutls_datum_t key_cache = { 0, };
static time_t key_cache_updated = 0;
if (location == NULL) {
return -1;
}
if (key_cache.data != NULL) {
if ((time(NULL) - key_cache_updated) < 60) {
copy_gnutls_datum(key, &key_cache);
crm_debug("Using cached Pacemaker Remote key");
return 0;
} else {
clear_gnutls_datum(&key_cache);
key_cache_updated = 0;
crm_debug("Cleared Pacemaker Remote key cache");
}
}
stream = fopen(location, "r");
if (!stream) {
return -1;
}
key->data = gnutls_malloc(buf_len);
key->size = 0;
while (!feof(stream)) {
int next = fgetc(stream);
if (next == EOF) {
if (!feof(stream)) {
crm_err("Error reading Pacemaker Remote key; copy in memory may be corrupted");
}
break;
}
if (key->size == buf_len) {
buf_len = key->size + KEY_READ_LEN;
key->data = gnutls_realloc(key->data, buf_len);
CRM_ASSERT(key->data);
}
key->data[key->size++] = (unsigned char) next;
}
fclose(stream);
if (key->size == 0) {
clear_gnutls_datum(key);
return -1;
}
if (key_cache.data == NULL) {
copy_gnutls_datum(&key_cache, key);
key_cache_updated = time(NULL);
crm_debug("Cached Pacemaker Remote key");
}
return 0;
}
int
lrmd_tls_set_key(gnutls_datum_t * key)
{
const char *specific_location = getenv("PCMK_authkey_location");
if (set_key(key, specific_location) == 0) {
crm_debug("Using custom authkey location %s", specific_location);
return pcmk_ok;
} else if (specific_location) {
crm_err("No valid Pacemaker Remote key found at %s, trying default location", specific_location);
}
if ((set_key(key, DEFAULT_REMOTE_KEY_LOCATION) != 0)
&& (set_key(key, ALT_REMOTE_KEY_LOCATION) != 0)) {
crm_err("No valid Pacemaker Remote key found at %s", DEFAULT_REMOTE_KEY_LOCATION);
return -ENOKEY;
}
return pcmk_ok;
}
static void
lrmd_gnutls_global_init(void)
{
static int gnutls_init = 0;
if (!gnutls_init) {
crm_gnutls_global_init();
}
gnutls_init = 1;
}
#endif
static void
report_async_connection_result(lrmd_t * lrmd, int rc)
{
lrmd_private_t *native = lrmd->lrmd_private;
if (native->callback) {
lrmd_event_data_t event = { 0, };
event.type = lrmd_event_connect;
event.remote_nodename = native->remote_nodename;
event.connection_rc = rc;
native->callback(&event);
}
}
#ifdef HAVE_GNUTLS_GNUTLS_H
static void
lrmd_tcp_connect_cb(void *userdata, int sock)
{
lrmd_t *lrmd = userdata;
lrmd_private_t *native = lrmd->lrmd_private;
char *name;
static struct mainloop_fd_callbacks lrmd_tls_callbacks = {
.dispatch = lrmd_tls_dispatch,
.destroy = lrmd_tls_connection_destroy,
};
int rc = sock;
gnutls_datum_t psk_key = { NULL, 0 };
native->async_timer = 0;
if (rc < 0) {
lrmd_tls_connection_destroy(lrmd);
crm_info("Could not connect to Pacemaker Remote at %s:%d",
native->server, native->port);
report_async_connection_result(lrmd, rc);
return;
}
/* The TCP connection was successful, so establish the TLS connection.
* @TODO make this async to avoid blocking code in client
*/
native->sock = sock;
rc = lrmd_tls_set_key(&psk_key);
if (rc != 0) {
crm_warn("Could not set key for Pacemaker Remote at %s:%d " CRM_XS " rc=%d",
native->server, native->port, rc);
lrmd_tls_connection_destroy(lrmd);
report_async_connection_result(lrmd, rc);
return;
}
gnutls_psk_allocate_client_credentials(&native->psk_cred_c);
gnutls_psk_set_client_credentials(native->psk_cred_c, DEFAULT_REMOTE_USERNAME, &psk_key, GNUTLS_PSK_KEY_RAW);
gnutls_free(psk_key.data);
native->remote->tls_session = pcmk__new_tls_session(sock, GNUTLS_CLIENT,
GNUTLS_CRD_PSK,
native->psk_cred_c);
if (native->remote->tls_session == NULL) {
lrmd_tls_connection_destroy(lrmd);
report_async_connection_result(lrmd, -EPROTO);
return;
}
if (crm_initiate_client_tls_handshake(native->remote, LRMD_CLIENT_HANDSHAKE_TIMEOUT) != 0) {
crm_warn("Disconnecting after TLS handshake with Pacemaker Remote server %s:%d failed",
native->server, native->port);
gnutls_deinit(*native->remote->tls_session);
gnutls_free(native->remote->tls_session);
native->remote->tls_session = NULL;
lrmd_tls_connection_destroy(lrmd);
report_async_connection_result(lrmd, -EKEYREJECTED);
return;
}
crm_info("TLS connection to Pacemaker Remote server %s:%d succeeded",
native->server, native->port);
name = crm_strdup_printf("pacemaker-remote-%s:%d",
native->server, native->port);
native->process_notify = mainloop_add_trigger(G_PRIORITY_HIGH, lrmd_tls_dispatch, lrmd);
native->source =
mainloop_add_fd(name, G_PRIORITY_HIGH, native->sock, lrmd, &lrmd_tls_callbacks);
rc = lrmd_handshake(lrmd, name);
free(name);
report_async_connection_result(lrmd, rc);
return;
}
static int
lrmd_tls_connect_async(lrmd_t * lrmd, int timeout /*ms */ )
{
int sock = 0;
int timer_id = 0;
lrmd_private_t *native = lrmd->lrmd_private;
lrmd_gnutls_global_init();
sock = crm_remote_tcp_connect_async(native->server, native->port, timeout,
&timer_id, lrmd, lrmd_tcp_connect_cb);
if (sock < 0) {
return sock;
}
native->sock = sock;
native->async_timer = timer_id;
return pcmk_ok;
}
static int
lrmd_tls_connect(lrmd_t * lrmd, int *fd)
{
static struct mainloop_fd_callbacks lrmd_tls_callbacks = {
.dispatch = lrmd_tls_dispatch,
.destroy = lrmd_tls_connection_destroy,
};
int rc;
lrmd_private_t *native = lrmd->lrmd_private;
int sock;
gnutls_datum_t psk_key = { NULL, 0 };
lrmd_gnutls_global_init();
sock = crm_remote_tcp_connect(native->server, native->port);
if (sock < 0) {
crm_warn("Could not establish Pacemaker Remote connection to %s", native->server);
lrmd_tls_connection_destroy(lrmd);
return -ENOTCONN;
}
native->sock = sock;
rc = lrmd_tls_set_key(&psk_key);
if (rc < 0) {
lrmd_tls_connection_destroy(lrmd);
return rc;
}
gnutls_psk_allocate_client_credentials(&native->psk_cred_c);
gnutls_psk_set_client_credentials(native->psk_cred_c, DEFAULT_REMOTE_USERNAME, &psk_key, GNUTLS_PSK_KEY_RAW);
gnutls_free(psk_key.data);
native->remote->tls_session = pcmk__new_tls_session(sock, GNUTLS_CLIENT,
GNUTLS_CRD_PSK,
native->psk_cred_c);
if (native->remote->tls_session == NULL) {
lrmd_tls_connection_destroy(lrmd);
return -EPROTO;
}
if (crm_initiate_client_tls_handshake(native->remote, LRMD_CLIENT_HANDSHAKE_TIMEOUT) != 0) {
crm_err("Session creation for %s:%d failed", native->server, native->port);
gnutls_deinit(*native->remote->tls_session);
gnutls_free(native->remote->tls_session);
native->remote->tls_session = NULL;
lrmd_tls_connection_destroy(lrmd);
return -EKEYREJECTED;
}
crm_info("Client TLS connection established with Pacemaker Remote server %s:%d", native->server,
native->port);
if (fd) {
*fd = sock;
} else {
char *name = crm_strdup_printf("pacemaker-remote-%s:%d",
native->server, native->port);
native->process_notify = mainloop_add_trigger(G_PRIORITY_HIGH, lrmd_tls_dispatch, lrmd);
native->source =
mainloop_add_fd(name, G_PRIORITY_HIGH, native->sock, lrmd, &lrmd_tls_callbacks);
free(name);
}
return pcmk_ok;
}
#endif
static int
lrmd_api_connect(lrmd_t * lrmd, const char *name, int *fd)
{
int rc = -ENOTCONN;
lrmd_private_t *native = lrmd->lrmd_private;
switch (native->type) {
case CRM_CLIENT_IPC:
rc = lrmd_ipc_connect(lrmd, fd);
break;
#ifdef HAVE_GNUTLS_GNUTLS_H
case CRM_CLIENT_TLS:
rc = lrmd_tls_connect(lrmd, fd);
break;
#endif
default:
crm_err("Unsupported connection type: %d", native->type);
}
if (rc == pcmk_ok) {
rc = lrmd_handshake(lrmd, name);
}
return rc;
}
static int
lrmd_api_connect_async(lrmd_t * lrmd, const char *name, int timeout)
{
int rc = 0;
lrmd_private_t *native = lrmd->lrmd_private;
CRM_CHECK(native && native->callback, return -1);
switch (native->type) {
case CRM_CLIENT_IPC:
/* fake async connection with ipc. it should be fast
* enough that we gain very little from async */
rc = lrmd_api_connect(lrmd, name, NULL);
if (!rc) {
report_async_connection_result(lrmd, rc);
}
break;
#ifdef HAVE_GNUTLS_GNUTLS_H
case CRM_CLIENT_TLS:
rc = lrmd_tls_connect_async(lrmd, timeout);
if (rc) {
/* connection failed, report rc now */
report_async_connection_result(lrmd, rc);
}
break;
#endif
default:
crm_err("Unsupported connection type: %d", native->type);
}
return rc;
}
static void
lrmd_ipc_disconnect(lrmd_t * lrmd)
{
lrmd_private_t *native = lrmd->lrmd_private;
if (native->source != NULL) {
/* Attached to mainloop */
mainloop_del_ipc_client(native->source);
native->source = NULL;
native->ipc = NULL;
} else if (native->ipc) {
/* Not attached to mainloop */
crm_ipc_t *ipc = native->ipc;
native->ipc = NULL;
crm_ipc_close(ipc);
crm_ipc_destroy(ipc);
}
}
#ifdef HAVE_GNUTLS_GNUTLS_H
static void
lrmd_tls_disconnect(lrmd_t * lrmd)
{
lrmd_private_t *native = lrmd->lrmd_private;
if (native->remote->tls_session) {
gnutls_bye(*native->remote->tls_session, GNUTLS_SHUT_RDWR);
gnutls_deinit(*native->remote->tls_session);
gnutls_free(native->remote->tls_session);
native->remote->tls_session = 0;
}
if (native->async_timer) {
g_source_remove(native->async_timer);
native->async_timer = 0;
}
if (native->source != NULL) {
/* Attached to mainloop */
mainloop_del_ipc_client(native->source);
native->source = NULL;
} else if (native->sock) {
close(native->sock);
native->sock = 0;
}
if (native->pending_notify) {
g_list_free_full(native->pending_notify, lrmd_free_xml);
native->pending_notify = NULL;
}
}
#endif
static int
lrmd_api_disconnect(lrmd_t * lrmd)
{
lrmd_private_t *native = lrmd->lrmd_private;
crm_info("Disconnecting %s %s executor connection",
crm_client_type_text(native->type),
(native->remote_nodename? native->remote_nodename : "local"));
switch (native->type) {
case CRM_CLIENT_IPC:
lrmd_ipc_disconnect(lrmd);
break;
#ifdef HAVE_GNUTLS_GNUTLS_H
case CRM_CLIENT_TLS:
lrmd_tls_disconnect(lrmd);
break;
#endif
default:
crm_err("Unsupported connection type: %d", native->type);
}
free(native->token);
native->token = NULL;
free(native->peer_version);
native->peer_version = NULL;
return 0;
}
static int
lrmd_api_register_rsc(lrmd_t * lrmd,
const char *rsc_id,
const char *class,
const char *provider, const char *type, enum lrmd_call_options options)
{
int rc = pcmk_ok;
xmlNode *data = NULL;
if (!class || !type || !rsc_id) {
return -EINVAL;
}
if (is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider) && !provider) {
return -EINVAL;
}
data = create_xml_node(NULL, F_LRMD_RSC);
crm_xml_add(data, F_LRMD_ORIGIN, __FUNCTION__);
crm_xml_add(data, F_LRMD_RSC_ID, rsc_id);
crm_xml_add(data, F_LRMD_CLASS, class);
crm_xml_add(data, F_LRMD_PROVIDER, provider);
crm_xml_add(data, F_LRMD_TYPE, type);
rc = lrmd_send_command(lrmd, LRMD_OP_RSC_REG, data, NULL, 0, options, TRUE);
free_xml(data);
return rc;
}
static int
lrmd_api_unregister_rsc(lrmd_t * lrmd, const char *rsc_id, enum lrmd_call_options options)
{
int rc = pcmk_ok;
xmlNode *data = create_xml_node(NULL, F_LRMD_RSC);
crm_xml_add(data, F_LRMD_ORIGIN, __FUNCTION__);
crm_xml_add(data, F_LRMD_RSC_ID, rsc_id);
rc = lrmd_send_command(lrmd, LRMD_OP_RSC_UNREG, data, NULL, 0, options, TRUE);
free_xml(data);
return rc;
}
lrmd_rsc_info_t *
lrmd_new_rsc_info(const char *rsc_id, const char *standard,
const char *provider, const char *type)
{
lrmd_rsc_info_t *rsc_info = calloc(1, sizeof(lrmd_rsc_info_t));
CRM_ASSERT(rsc_info);
if (rsc_id) {
rsc_info->id = strdup(rsc_id);
CRM_ASSERT(rsc_info->id);
}
if (standard) {
rsc_info->standard = strdup(standard);
CRM_ASSERT(rsc_info->standard);
}
if (provider) {
rsc_info->provider = strdup(provider);
CRM_ASSERT(rsc_info->provider);
}
if (type) {
rsc_info->type = strdup(type);
CRM_ASSERT(rsc_info->type);
}
return rsc_info;
}
lrmd_rsc_info_t *
lrmd_copy_rsc_info(lrmd_rsc_info_t * rsc_info)
{
return lrmd_new_rsc_info(rsc_info->id, rsc_info->standard,
rsc_info->provider, rsc_info->type);
}
void
lrmd_free_rsc_info(lrmd_rsc_info_t * rsc_info)
{
if (!rsc_info) {
return;
}
free(rsc_info->id);
free(rsc_info->type);
free(rsc_info->standard);
free(rsc_info->provider);
free(rsc_info);
}
static lrmd_rsc_info_t *
lrmd_api_get_rsc_info(lrmd_t * lrmd, const char *rsc_id, enum lrmd_call_options options)
{
lrmd_rsc_info_t *rsc_info = NULL;
xmlNode *data = create_xml_node(NULL, F_LRMD_RSC);
xmlNode *output = NULL;
const char *class = NULL;
const char *provider = NULL;
const char *type = NULL;
crm_xml_add(data, F_LRMD_ORIGIN, __FUNCTION__);
crm_xml_add(data, F_LRMD_RSC_ID, rsc_id);
lrmd_send_command(lrmd, LRMD_OP_RSC_INFO, data, &output, 0, options, TRUE);
free_xml(data);
if (!output) {
return NULL;
}
class = crm_element_value(output, F_LRMD_CLASS);
provider = crm_element_value(output, F_LRMD_PROVIDER);
type = crm_element_value(output, F_LRMD_TYPE);
if (!class || !type) {
free_xml(output);
return NULL;
} else if (is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider)
&& !provider) {
free_xml(output);
return NULL;
}
rsc_info = lrmd_new_rsc_info(rsc_id, class, provider, type);
free_xml(output);
return rsc_info;
}
void
lrmd_free_op_info(lrmd_op_info_t *op_info)
{
if (op_info) {
free(op_info->rsc_id);
free(op_info->action);
free(op_info->interval_ms_s);
free(op_info->timeout_ms_s);
free(op_info);
}
}
static int
lrmd_api_get_recurring_ops(lrmd_t *lrmd, const char *rsc_id, int timeout_ms,
enum lrmd_call_options options, GList **output)
{
xmlNode *data = NULL;
xmlNode *output_xml = NULL;
int rc = pcmk_ok;
if (output == NULL) {
return -EINVAL;
}
*output = NULL;
// Send request
if (rsc_id) {
data = create_xml_node(NULL, F_LRMD_RSC);
crm_xml_add(data, F_LRMD_ORIGIN, __FUNCTION__);
crm_xml_add(data, F_LRMD_RSC_ID, rsc_id);
}
rc = lrmd_send_command(lrmd, LRMD_OP_GET_RECURRING, data, &output_xml,
timeout_ms, options, TRUE);
if (data) {
free_xml(data);
}
// Process reply
if ((rc != pcmk_ok) || (output_xml == NULL)) {
return rc;
}
for (xmlNode *rsc_xml = first_named_child(output_xml, F_LRMD_RSC);
rsc_xml != NULL; rsc_xml = crm_next_same_xml(rsc_xml)) {
rsc_id = crm_element_value(rsc_xml, F_LRMD_RSC_ID);
if (rsc_id == NULL) {
crm_err("Could not parse recurring operation information from executor");
continue;
}
for (xmlNode *op_xml = first_named_child(rsc_xml, T_LRMD_RSC_OP);
op_xml != NULL; op_xml = crm_next_same_xml(op_xml)) {
lrmd_op_info_t *op_info = calloc(1, sizeof(lrmd_op_info_t));
CRM_CHECK(op_info != NULL, break);
op_info->rsc_id = strdup(rsc_id);
op_info->action = crm_element_value_copy(op_xml, F_LRMD_RSC_ACTION);
op_info->interval_ms_s = crm_element_value_copy(op_xml,
F_LRMD_RSC_INTERVAL);
op_info->timeout_ms_s = crm_element_value_copy(op_xml,
F_LRMD_TIMEOUT);
*output = g_list_prepend(*output, op_info);
}
}
free_xml(output_xml);
return rc;
}
static void
lrmd_api_set_callback(lrmd_t * lrmd, lrmd_event_callback callback)
{
lrmd_private_t *native = lrmd->lrmd_private;
native->callback = callback;
}
void
lrmd_internal_set_proxy_callback(lrmd_t * lrmd, void *userdata, void (*callback)(lrmd_t *lrmd, void *userdata, xmlNode *msg))
{
lrmd_private_t *native = lrmd->lrmd_private;
native->proxy_callback = callback;
native->proxy_callback_userdata = userdata;
}
void
lrmd_internal_proxy_dispatch(lrmd_t *lrmd, xmlNode *msg)
{
lrmd_private_t *native = lrmd->lrmd_private;
if (native->proxy_callback) {
crm_log_xml_trace(msg, "PROXY_INBOUND");
native->proxy_callback(lrmd, native->proxy_callback_userdata, msg);
}
}
int
lrmd_internal_proxy_send(lrmd_t * lrmd, xmlNode *msg)
{
if (lrmd == NULL) {
return -ENOTCONN;
}
crm_xml_add(msg, F_LRMD_OPERATION, CRM_OP_IPC_FWD);
crm_log_xml_trace(msg, "PROXY_OUTBOUND");
return lrmd_send_xml_no_reply(lrmd, msg);
}
static int
stonith_get_metadata(const char *provider, const char *type, char **output)
{
int rc = pcmk_ok;
stonith_t *stonith_api = stonith_api_new();
- if(stonith_api) {
- stonith_api->cmds->metadata(stonith_api, st_opt_sync_call, type, provider, output, 0);
- stonith_api->cmds->free(stonith_api);
+ if (stonith_api == NULL) {
+ crm_err("Could not get fence agent meta-data: API memory allocation failed");
+ return -ENOMEM;
}
- if (*output == NULL) {
+
+ rc = stonith_api->cmds->metadata(stonith_api, st_opt_sync_call, type,
+ provider, output, 0);
+ if ((rc == pcmk_ok) && (*output == NULL)) {
rc = -EIO;
}
+ stonith_api->cmds->free(stonith_api);
return rc;
}
static int
lrmd_api_get_metadata(lrmd_t *lrmd, const char *standard, const char *provider,
const char *type, char **output,
enum lrmd_call_options options)
{
return lrmd->cmds->get_metadata_params(lrmd, standard, provider, type,
output, options, NULL);
}
static int
lrmd_api_get_metadata_params(lrmd_t *lrmd, const char *standard,
const char *provider, const char *type,
char **output, enum lrmd_call_options options,
lrmd_key_value_t *params)
{
svc_action_t *action = NULL;
GHashTable *params_table = NULL;
if (!standard || !type) {
lrmd_key_value_freeall(params);
return -EINVAL;
}
if (safe_str_eq(standard, PCMK_RESOURCE_CLASS_STONITH)) {
lrmd_key_value_freeall(params);
return stonith_get_metadata(provider, type, output);
}
params_table = crm_str_table_new();
for (const lrmd_key_value_t *param = params; param; param = param->next) {
g_hash_table_insert(params_table, strdup(param->key), strdup(param->value));
}
action = resources_action_create(type, standard, provider, type,
CRMD_ACTION_METADATA, 0,
CRMD_METADATA_CALL_TIMEOUT, params_table,
0);
lrmd_key_value_freeall(params);
if (action == NULL) {
crm_err("Unable to retrieve meta-data for %s:%s:%s",
standard, provider, type);
return -EINVAL;
}
if (!services_action_sync(action)) {
crm_err("Failed to retrieve meta-data for %s:%s:%s",
standard, provider, type);
services_action_free(action);
return -EIO;
}
if (!action->stdout_data) {
crm_err("Failed to receive meta-data for %s:%s:%s",
standard, provider, type);
services_action_free(action);
return -EIO;
}
*output = strdup(action->stdout_data);
services_action_free(action);
return pcmk_ok;
}
static int
lrmd_api_exec(lrmd_t *lrmd, const char *rsc_id, const char *action,
const char *userdata, guint interval_ms,
int timeout, /* ms */
int start_delay, /* ms */
enum lrmd_call_options options, lrmd_key_value_t * params)
{
int rc = pcmk_ok;
xmlNode *data = create_xml_node(NULL, F_LRMD_RSC);
xmlNode *args = create_xml_node(data, XML_TAG_ATTRS);
lrmd_key_value_t *tmp = NULL;
crm_xml_add(data, F_LRMD_ORIGIN, __FUNCTION__);
crm_xml_add(data, F_LRMD_RSC_ID, rsc_id);
crm_xml_add(data, F_LRMD_RSC_ACTION, action);
crm_xml_add(data, F_LRMD_RSC_USERDATA_STR, userdata);
crm_xml_add_ms(data, F_LRMD_RSC_INTERVAL, interval_ms);
crm_xml_add_int(data, F_LRMD_TIMEOUT, timeout);
crm_xml_add_int(data, F_LRMD_RSC_START_DELAY, start_delay);
for (tmp = params; tmp; tmp = tmp->next) {
hash2smartfield((gpointer) tmp->key, (gpointer) tmp->value, args);
}
rc = lrmd_send_command(lrmd, LRMD_OP_RSC_EXEC, data, NULL, timeout, options, TRUE);
free_xml(data);
lrmd_key_value_freeall(params);
return rc;
}
/* timeout is in ms */
static int
lrmd_api_exec_alert(lrmd_t *lrmd, const char *alert_id, const char *alert_path,
int timeout, lrmd_key_value_t *params)
{
int rc = pcmk_ok;
xmlNode *data = create_xml_node(NULL, F_LRMD_ALERT);
xmlNode *args = create_xml_node(data, XML_TAG_ATTRS);
lrmd_key_value_t *tmp = NULL;
crm_xml_add(data, F_LRMD_ORIGIN, __FUNCTION__);
crm_xml_add(data, F_LRMD_ALERT_ID, alert_id);
crm_xml_add(data, F_LRMD_ALERT_PATH, alert_path);
crm_xml_add_int(data, F_LRMD_TIMEOUT, timeout);
for (tmp = params; tmp; tmp = tmp->next) {
hash2smartfield((gpointer) tmp->key, (gpointer) tmp->value, args);
}
rc = lrmd_send_command(lrmd, LRMD_OP_ALERT_EXEC, data, NULL, timeout,
lrmd_opt_notify_orig_only, TRUE);
free_xml(data);
lrmd_key_value_freeall(params);
return rc;
}
static int
lrmd_api_cancel(lrmd_t *lrmd, const char *rsc_id, const char *action,
guint interval_ms)
{
int rc = pcmk_ok;
xmlNode *data = create_xml_node(NULL, F_LRMD_RSC);
crm_xml_add(data, F_LRMD_ORIGIN, __FUNCTION__);
crm_xml_add(data, F_LRMD_RSC_ACTION, action);
crm_xml_add(data, F_LRMD_RSC_ID, rsc_id);
crm_xml_add_ms(data, F_LRMD_RSC_INTERVAL, interval_ms);
rc = lrmd_send_command(lrmd, LRMD_OP_RSC_CANCEL, data, NULL, 0, 0, TRUE);
free_xml(data);
return rc;
}
static int
list_stonith_agents(lrmd_list_t ** resources)
{
int rc = 0;
stonith_t *stonith_api = stonith_api_new();
stonith_key_value_t *stonith_resources = NULL;
stonith_key_value_t *dIter = NULL;
- if(stonith_api) {
- stonith_api->cmds->list_agents(stonith_api, st_opt_sync_call, NULL, &stonith_resources, 0);
- stonith_api->cmds->free(stonith_api);
+ if (stonith_api == NULL) {
+ crm_err("Could not list fence agents: API memory allocation failed");
+ return -ENOMEM;
}
+ stonith_api->cmds->list_agents(stonith_api, st_opt_sync_call, NULL,
+ &stonith_resources, 0);
+ stonith_api->cmds->free(stonith_api);
for (dIter = stonith_resources; dIter; dIter = dIter->next) {
rc++;
if (resources) {
*resources = lrmd_list_add(*resources, dIter->value);
}
}
stonith_key_value_freeall(stonith_resources, 1, 0);
return rc;
}
static int
lrmd_api_list_agents(lrmd_t * lrmd, lrmd_list_t ** resources, const char *class,
const char *provider)
{
int rc = 0;
+ int stonith_count = 0; // Initially, whether to include stonith devices
if (safe_str_eq(class, PCMK_RESOURCE_CLASS_STONITH)) {
- rc += list_stonith_agents(resources);
+ stonith_count = 1;
} else {
GListPtr gIter = NULL;
GList *agents = resources_list_agents(class, provider);
for (gIter = agents; gIter != NULL; gIter = gIter->next) {
*resources = lrmd_list_add(*resources, (const char *)gIter->data);
rc++;
}
g_list_free_full(agents, free);
if (!class) {
- rc += list_stonith_agents(resources);
+ stonith_count = 1;
}
}
+ if (stonith_count) {
+ // Now, if stonith devices are included, how many there are
+ stonith_count = list_stonith_agents(resources);
+ if (stonith_count > 0) {
+ rc += stonith_count;
+ }
+ }
if (rc == 0) {
crm_notice("No agents found for class %s", class);
rc = -EPROTONOSUPPORT;
}
return rc;
}
static int
does_provider_have_agent(const char *agent, const char *provider, const char *class)
{
int found = 0;
GList *agents = NULL;
GListPtr gIter2 = NULL;
agents = resources_list_agents(class, provider);
for (gIter2 = agents; gIter2 != NULL; gIter2 = gIter2->next) {
if (safe_str_eq(agent, gIter2->data)) {
found = 1;
}
}
g_list_free_full(agents, free);
return found;
}
static int
lrmd_api_list_ocf_providers(lrmd_t * lrmd, const char *agent, lrmd_list_t ** providers)
{
int rc = pcmk_ok;
char *provider = NULL;
GList *ocf_providers = NULL;
GListPtr gIter = NULL;
ocf_providers = resources_list_providers(PCMK_RESOURCE_CLASS_OCF);
for (gIter = ocf_providers; gIter != NULL; gIter = gIter->next) {
provider = gIter->data;
if (!agent || does_provider_have_agent(agent, provider,
PCMK_RESOURCE_CLASS_OCF)) {
*providers = lrmd_list_add(*providers, (const char *)gIter->data);
rc++;
}
}
g_list_free_full(ocf_providers, free);
return rc;
}
static int
lrmd_api_list_standards(lrmd_t * lrmd, lrmd_list_t ** supported)
{
int rc = 0;
GList *standards = NULL;
GListPtr gIter = NULL;
standards = resources_list_standards();
for (gIter = standards; gIter != NULL; gIter = gIter->next) {
*supported = lrmd_list_add(*supported, (const char *)gIter->data);
rc++;
}
if (list_stonith_agents(NULL) > 0) {
*supported = lrmd_list_add(*supported, PCMK_RESOURCE_CLASS_STONITH);
rc++;
}
g_list_free_full(standards, free);
return rc;
}
lrmd_t *
lrmd_api_new(void)
{
lrmd_t *new_lrmd = NULL;
lrmd_private_t *pvt = NULL;
new_lrmd = calloc(1, sizeof(lrmd_t));
pvt = calloc(1, sizeof(lrmd_private_t));
pvt->remote = calloc(1, sizeof(crm_remote_t));
new_lrmd->cmds = calloc(1, sizeof(lrmd_api_operations_t));
pvt->type = CRM_CLIENT_IPC;
new_lrmd->lrmd_private = pvt;
new_lrmd->cmds->connect = lrmd_api_connect;
new_lrmd->cmds->connect_async = lrmd_api_connect_async;
new_lrmd->cmds->is_connected = lrmd_api_is_connected;
new_lrmd->cmds->poke_connection = lrmd_api_poke_connection;
new_lrmd->cmds->disconnect = lrmd_api_disconnect;
new_lrmd->cmds->register_rsc = lrmd_api_register_rsc;
new_lrmd->cmds->unregister_rsc = lrmd_api_unregister_rsc;
new_lrmd->cmds->get_rsc_info = lrmd_api_get_rsc_info;
new_lrmd->cmds->get_recurring_ops = lrmd_api_get_recurring_ops;
new_lrmd->cmds->set_callback = lrmd_api_set_callback;
new_lrmd->cmds->get_metadata = lrmd_api_get_metadata;
new_lrmd->cmds->exec = lrmd_api_exec;
new_lrmd->cmds->cancel = lrmd_api_cancel;
new_lrmd->cmds->list_agents = lrmd_api_list_agents;
new_lrmd->cmds->list_ocf_providers = lrmd_api_list_ocf_providers;
new_lrmd->cmds->list_standards = lrmd_api_list_standards;
new_lrmd->cmds->exec_alert = lrmd_api_exec_alert;
new_lrmd->cmds->get_metadata_params = lrmd_api_get_metadata_params;
return new_lrmd;
}
lrmd_t *
lrmd_remote_api_new(const char *nodename, const char *server, int port)
{
#ifdef HAVE_GNUTLS_GNUTLS_H
lrmd_t *new_lrmd = lrmd_api_new();
lrmd_private_t *native = new_lrmd->lrmd_private;
if (!nodename && !server) {
lrmd_api_delete(new_lrmd);
return NULL;
}
native->type = CRM_CLIENT_TLS;
native->remote_nodename = nodename ? strdup(nodename) : strdup(server);
native->server = server ? strdup(server) : strdup(nodename);
native->port = port;
if (native->port == 0) {
native->port = crm_default_remote_port();
}
return new_lrmd;
#else
crm_err("Cannot communicate with Pacemaker Remote because GnuTLS is not enabled for this build");
return NULL;
#endif
}
void
lrmd_api_delete(lrmd_t * lrmd)
{
if (!lrmd) {
return;
}
lrmd->cmds->disconnect(lrmd); /* no-op if already disconnected */
free(lrmd->cmds);
if (lrmd->lrmd_private) {
lrmd_private_t *native = lrmd->lrmd_private;
#ifdef HAVE_GNUTLS_GNUTLS_H
free(native->server);
#endif
free(native->remote_nodename);
free(native->remote);
free(native->token);
free(native->peer_version);
}
free(lrmd->lrmd_private);
free(lrmd);
}
diff --git a/lib/pacemaker/pcmk_sched_allocate.c b/lib/pacemaker/pcmk_sched_allocate.c
index a9b0f3efb4..8c3f2d36af 100644
--- a/lib/pacemaker/pcmk_sched_allocate.c
+++ b/lib/pacemaker/pcmk_sched_allocate.c
@@ -1,2946 +1,2944 @@
/*
* Copyright 2004-2019 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
CRM_TRACE_INIT_DATA(pe_allocate);
void set_alloc_actions(pe_working_set_t * data_set);
extern void ReloadRsc(resource_t * rsc, node_t *node, pe_working_set_t * data_set);
extern gboolean DeleteRsc(resource_t * rsc, node_t * node, gboolean optional, pe_working_set_t * data_set);
static void apply_remote_node_ordering(pe_working_set_t *data_set);
static enum remote_connection_state get_remote_node_state(pe_node_t *node);
enum remote_connection_state {
remote_state_unknown = 0,
remote_state_alive = 1,
remote_state_resting = 2,
remote_state_failed = 3,
remote_state_stopped = 4
};
static const char *
state2text(enum remote_connection_state state)
{
switch (state) {
case remote_state_unknown:
return "unknown";
case remote_state_alive:
return "alive";
case remote_state_resting:
return "resting";
case remote_state_failed:
return "failed";
case remote_state_stopped:
return "stopped";
}
return "impossible";
}
resource_alloc_functions_t resource_class_alloc_functions[] = {
{
native_merge_weights,
native_color,
native_create_actions,
native_create_probe,
native_internal_constraints,
native_rsc_colocation_lh,
native_rsc_colocation_rh,
native_rsc_location,
native_action_flags,
native_update_actions,
native_expand,
native_append_meta,
},
{
group_merge_weights,
group_color,
group_create_actions,
native_create_probe,
group_internal_constraints,
group_rsc_colocation_lh,
group_rsc_colocation_rh,
group_rsc_location,
group_action_flags,
group_update_actions,
group_expand,
group_append_meta,
},
{
clone_merge_weights,
clone_color,
clone_create_actions,
clone_create_probe,
clone_internal_constraints,
clone_rsc_colocation_lh,
clone_rsc_colocation_rh,
clone_rsc_location,
clone_action_flags,
pcmk__multi_update_actions,
clone_expand,
clone_append_meta,
},
{
pcmk__bundle_merge_weights,
pcmk__bundle_color,
pcmk__bundle_create_actions,
pcmk__bundle_create_probe,
pcmk__bundle_internal_constraints,
pcmk__bundle_rsc_colocation_lh,
pcmk__bundle_rsc_colocation_rh,
pcmk__bundle_rsc_location,
pcmk__bundle_action_flags,
pcmk__multi_update_actions,
pcmk__bundle_expand,
pcmk__bundle_append_meta,
}
};
gboolean
update_action_flags(action_t * action, enum pe_action_flags flags, const char *source, int line)
{
static unsigned long calls = 0;
gboolean changed = FALSE;
gboolean clear = is_set(flags, pe_action_clear);
enum pe_action_flags last = action->flags;
if (clear) {
action->flags = crm_clear_bit(source, line, action->uuid, action->flags, flags);
} else {
action->flags = crm_set_bit(source, line, action->uuid, action->flags, flags);
}
if (last != action->flags) {
calls++;
changed = TRUE;
/* Useful for tracking down _who_ changed a specific flag */
/* CRM_ASSERT(calls != 534); */
clear_bit(flags, pe_action_clear);
crm_trace("%s on %s: %sset flags 0x%.6x (was 0x%.6x, now 0x%.6x, %lu, %s)",
action->uuid, action->node ? action->node->details->uname : "[none]",
clear ? "un-" : "", flags, last, action->flags, calls, source);
}
return changed;
}
static gboolean
check_rsc_parameters(resource_t * rsc, node_t * node, xmlNode * rsc_entry,
gboolean active_here, pe_working_set_t * data_set)
{
int attr_lpc = 0;
gboolean force_restart = FALSE;
gboolean delete_resource = FALSE;
gboolean changed = FALSE;
const char *value = NULL;
const char *old_value = NULL;
const char *attr_list[] = {
XML_ATTR_TYPE,
XML_AGENT_ATTR_CLASS,
XML_AGENT_ATTR_PROVIDER
};
for (; attr_lpc < DIMOF(attr_list); attr_lpc++) {
value = crm_element_value(rsc->xml, attr_list[attr_lpc]);
old_value = crm_element_value(rsc_entry, attr_list[attr_lpc]);
if (value == old_value /* i.e. NULL */
|| crm_str_eq(value, old_value, TRUE)) {
continue;
}
changed = TRUE;
trigger_unfencing(rsc, node, "Device definition changed", NULL, data_set);
if (active_here) {
force_restart = TRUE;
crm_notice("Forcing restart of %s on %s, %s changed: %s -> %s",
rsc->id, node->details->uname, attr_list[attr_lpc],
crm_str(old_value), crm_str(value));
}
}
if (force_restart) {
/* make sure the restart happens */
stop_action(rsc, node, FALSE);
set_bit(rsc->flags, pe_rsc_start_pending);
delete_resource = TRUE;
} else if (changed) {
delete_resource = TRUE;
}
return delete_resource;
}
static void
CancelXmlOp(resource_t * rsc, xmlNode * xml_op, node_t * active_node,
const char *reason, pe_working_set_t * data_set)
{
guint interval_ms = 0;
action_t *cancel = NULL;
const char *task = NULL;
const char *call_id = NULL;
const char *interval_ms_s = NULL;
CRM_CHECK(xml_op != NULL, return);
CRM_CHECK(active_node != NULL, return);
task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
call_id = crm_element_value(xml_op, XML_LRM_ATTR_CALLID);
interval_ms_s = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL_MS);
interval_ms = crm_parse_ms(interval_ms_s);
crm_info("Action " CRM_OP_FMT " on %s will be stopped: %s",
rsc->id, task, interval_ms,
active_node->details->uname, (reason? reason : "unknown"));
cancel = pe_cancel_op(rsc, task, interval_ms, active_node, data_set);
add_hash_param(cancel->meta, XML_LRM_ATTR_CALLID, call_id);
custom_action_order(rsc, stop_key(rsc), NULL, rsc, NULL, cancel, pe_order_optional, data_set);
}
static gboolean
check_action_definition(resource_t * rsc, node_t * active_node, xmlNode * xml_op,
pe_working_set_t * data_set)
{
char *key = NULL;
guint interval_ms = 0;
const char *interval_ms_s = NULL;
const op_digest_cache_t *digest_data = NULL;
gboolean did_change = FALSE;
const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
const char *digest_secure = NULL;
CRM_CHECK(active_node != NULL, return FALSE);
interval_ms_s = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL_MS);
interval_ms = crm_parse_ms(interval_ms_s);
if (interval_ms > 0) {
xmlNode *op_match = NULL;
/* we need to reconstruct the key because of the way we used to construct resource IDs */
key = generate_op_key(rsc->id, task, interval_ms);
pe_rsc_trace(rsc, "Checking parameters for %s", key);
op_match = find_rsc_op_entry(rsc, key);
if (op_match == NULL && is_set(data_set->flags, pe_flag_stop_action_orphans)) {
CancelXmlOp(rsc, xml_op, active_node, "orphan", data_set);
free(key);
return TRUE;
} else if (op_match == NULL) {
pe_rsc_debug(rsc, "Orphan action detected: %s on %s", key, active_node->details->uname);
free(key);
return TRUE;
}
free(key);
key = NULL;
}
crm_trace("Testing " CRM_OP_FMT " on %s",
rsc->id, task, interval_ms, active_node->details->uname);
if ((interval_ms == 0) && safe_str_eq(task, RSC_STATUS)) {
/* Reload based on the start action not a probe */
task = RSC_START;
} else if ((interval_ms == 0) && safe_str_eq(task, RSC_MIGRATED)) {
/* Reload based on the start action not a migrate */
task = RSC_START;
} else if ((interval_ms == 0) && safe_str_eq(task, RSC_PROMOTE)) {
/* Reload based on the start action not a promote */
task = RSC_START;
}
digest_data = rsc_action_digest_cmp(rsc, xml_op, active_node, data_set);
if(is_set(data_set->flags, pe_flag_sanitized)) {
digest_secure = crm_element_value(xml_op, XML_LRM_ATTR_SECURE_DIGEST);
}
if(digest_data->rc != RSC_DIGEST_MATCH
&& digest_secure
&& digest_data->digest_secure_calc
&& strcmp(digest_data->digest_secure_calc, digest_secure) == 0) {
if (is_set(data_set->flags, pe_flag_stdout)) {
printf("Only 'private' parameters to " CRM_OP_FMT " on %s changed: %s\n",
rsc->id, task, interval_ms, active_node->details->uname,
crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC));
}
} else if (digest_data->rc == RSC_DIGEST_RESTART) {
/* Changes that force a restart */
pe_action_t *required = NULL;
did_change = TRUE;
key = generate_op_key(rsc->id, task, interval_ms);
crm_log_xml_info(digest_data->params_restart, "params:restart");
required = custom_action(rsc, key, task, NULL, TRUE, TRUE, data_set);
pe_action_set_flag_reason(__FUNCTION__, __LINE__, required, NULL,
"resource definition change", pe_action_optional, TRUE);
trigger_unfencing(rsc, active_node, "Device parameters changed", NULL, data_set);
} else if ((digest_data->rc == RSC_DIGEST_ALL) || (digest_data->rc == RSC_DIGEST_UNKNOWN)) {
/* Changes that can potentially be handled by a reload */
const char *digest_restart = crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST);
did_change = TRUE;
trigger_unfencing(rsc, active_node, "Device parameters changed (reload)", NULL, data_set);
crm_log_xml_info(digest_data->params_all, "params:reload");
key = generate_op_key(rsc->id, task, interval_ms);
if (interval_ms > 0) {
action_t *op = NULL;
#if 0
/* Always reload/restart the entire resource */
ReloadRsc(rsc, active_node, data_set);
#else
/* Re-sending the recurring op is sufficient - the old one will be cancelled automatically */
op = custom_action(rsc, key, task, active_node, TRUE, TRUE, data_set);
set_bit(op->flags, pe_action_reschedule);
#endif
} else if (digest_restart) {
pe_rsc_trace(rsc, "Reloading '%s' action for resource %s", task, rsc->id);
/* Reload this resource */
ReloadRsc(rsc, active_node, data_set);
free(key);
} else {
pe_action_t *required = NULL;
pe_rsc_trace(rsc, "Resource %s doesn't know how to reload", rsc->id);
/* Re-send the start/demote/promote op
* Recurring ops will be detected independently
*/
required = custom_action(rsc, key, task, NULL, TRUE, TRUE, data_set);
pe_action_set_flag_reason(__FUNCTION__, __LINE__, required, NULL,
"resource definition change", pe_action_optional, TRUE);
}
}
return did_change;
}
/*!
* \internal
* \brief Do deferred action checks after allocation
*
* \param[in] data_set Working set for cluster
*/
static void
check_params(pe_resource_t *rsc, pe_node_t *node, xmlNode *rsc_op,
enum pe_check_parameters check, pe_working_set_t *data_set)
{
const char *reason = NULL;
op_digest_cache_t *digest_data = NULL;
switch (check) {
case pe_check_active:
if (check_action_definition(rsc, node, rsc_op, data_set)
&& pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL,
data_set)) {
reason = "action definition changed";
}
break;
case pe_check_last_failure:
digest_data = rsc_action_digest_cmp(rsc, rsc_op, node, data_set);
switch (digest_data->rc) {
case RSC_DIGEST_UNKNOWN:
crm_trace("Resource %s history entry %s on %s has no digest to compare",
rsc->id, ID(rsc_op), node->details->id);
break;
case RSC_DIGEST_MATCH:
break;
default:
reason = "resource parameters have changed";
break;
}
break;
}
if (reason) {
pe__clear_failcount(rsc, node, reason, data_set);
}
}
static void
check_actions_for(xmlNode * rsc_entry, resource_t * rsc, node_t * node, pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
int offset = -1;
guint interval_ms = 0;
int stop_index = 0;
int start_index = 0;
const char *task = NULL;
const char *interval_ms_s = NULL;
xmlNode *rsc_op = NULL;
GListPtr op_list = NULL;
GListPtr sorted_op_list = NULL;
CRM_CHECK(node != NULL, return);
if (is_set(rsc->flags, pe_rsc_orphan)) {
resource_t *parent = uber_parent(rsc);
if(parent == NULL
|| pe_rsc_is_clone(parent) == FALSE
|| is_set(parent->flags, pe_rsc_unique)) {
pe_rsc_trace(rsc, "Skipping param check for %s and deleting: orphan", rsc->id);
DeleteRsc(rsc, node, FALSE, data_set);
} else {
pe_rsc_trace(rsc, "Skipping param check for %s (orphan clone)", rsc->id);
}
return;
} else if (pe_find_node_id(rsc->running_on, node->details->id) == NULL) {
if (check_rsc_parameters(rsc, node, rsc_entry, FALSE, data_set)) {
DeleteRsc(rsc, node, FALSE, data_set);
}
pe_rsc_trace(rsc, "Skipping param check for %s: no longer active on %s",
rsc->id, node->details->uname);
return;
}
pe_rsc_trace(rsc, "Processing %s on %s", rsc->id, node->details->uname);
if (check_rsc_parameters(rsc, node, rsc_entry, TRUE, data_set)) {
DeleteRsc(rsc, node, FALSE, data_set);
}
for (rsc_op = __xml_first_child(rsc_entry); rsc_op != NULL; rsc_op = __xml_next_element(rsc_op)) {
if (crm_str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, TRUE)) {
op_list = g_list_prepend(op_list, rsc_op);
}
}
sorted_op_list = g_list_sort(op_list, sort_op_by_callid);
calculate_active_ops(sorted_op_list, &start_index, &stop_index);
for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) {
xmlNode *rsc_op = (xmlNode *) gIter->data;
offset++;
if (start_index < stop_index) {
/* stopped */
continue;
} else if (offset < start_index) {
/* action occurred prior to a start */
continue;
}
task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK);
interval_ms_s = crm_element_value(rsc_op, XML_LRM_ATTR_INTERVAL_MS);
interval_ms = crm_parse_ms(interval_ms_s);
if ((interval_ms > 0) &&
(is_set(rsc->flags, pe_rsc_maintenance) || node->details->maintenance)) {
// Maintenance mode cancels recurring operations
CancelXmlOp(rsc, rsc_op, node, "maintenance mode", data_set);
} else if ((interval_ms > 0)
|| safe_str_eq(task, RSC_STATUS)
|| safe_str_eq(task, RSC_START)
|| safe_str_eq(task, RSC_PROMOTE)
|| safe_str_eq(task, RSC_MIGRATED)) {
/* If a resource operation failed, and the operation's definition
* has changed, clear any fail count so they can be retried fresh.
*/
if (pe__bundle_needs_remote_name(rsc)) {
/* We haven't allocated resources to nodes yet, so if the
* REMOTE_CONTAINER_HACK is used, we may calculate the digest
* based on the literal "#uname" value rather than the properly
* substituted value. That would mistakenly make the action
* definition appear to have been changed. Defer the check until
* later in this case.
*/
pe__add_param_check(rsc_op, rsc, node, pe_check_active,
data_set);
} else if (check_action_definition(rsc, node, rsc_op, data_set)
&& pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL,
data_set)) {
pe__clear_failcount(rsc, node, "action definition changed",
data_set);
}
}
}
g_list_free(sorted_op_list);
}
static GListPtr
find_rsc_list(GListPtr result, resource_t * rsc, const char *id, gboolean renamed_clones,
gboolean partial, pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
gboolean match = FALSE;
if (id == NULL) {
return NULL;
} else if (rsc == NULL && data_set) {
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
resource_t *child = (resource_t *) gIter->data;
result = find_rsc_list(result, child, id, renamed_clones, partial, NULL);
}
return result;
} else if (rsc == NULL) {
return NULL;
}
if (partial) {
if (strstr(rsc->id, id)) {
match = TRUE;
} else if (renamed_clones && rsc->clone_name && strstr(rsc->clone_name, id)) {
match = TRUE;
}
} else {
if (strcmp(rsc->id, id) == 0) {
match = TRUE;
} else if (renamed_clones && rsc->clone_name && strcmp(rsc->clone_name, id) == 0) {
match = TRUE;
}
}
if (match) {
result = g_list_prepend(result, rsc);
}
if (rsc->children) {
gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
resource_t *child = (resource_t *) gIter->data;
result = find_rsc_list(result, child, id, renamed_clones, partial, NULL);
}
}
return result;
}
static void
check_actions(pe_working_set_t * data_set)
{
const char *id = NULL;
node_t *node = NULL;
xmlNode *lrm_rscs = NULL;
xmlNode *status = get_object_root(XML_CIB_TAG_STATUS, data_set->input);
xmlNode *node_state = NULL;
for (node_state = __xml_first_child(status); node_state != NULL;
node_state = __xml_next_element(node_state)) {
if (crm_str_eq((const char *)node_state->name, XML_CIB_TAG_STATE, TRUE)) {
id = crm_element_value(node_state, XML_ATTR_ID);
lrm_rscs = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE);
lrm_rscs = find_xml_node(lrm_rscs, XML_LRM_TAG_RESOURCES, FALSE);
node = pe_find_node_id(data_set->nodes, id);
if (node == NULL) {
continue;
/* Still need to check actions for a maintenance node to cancel existing monitor operations */
} else if (can_run_resources(node) == FALSE && node->details->maintenance == FALSE) {
crm_trace("Skipping param check for %s: can't run resources",
node->details->uname);
continue;
}
crm_trace("Processing node %s", node->details->uname);
if (node->details->online || is_set(data_set->flags, pe_flag_stonith_enabled)) {
xmlNode *rsc_entry = NULL;
for (rsc_entry = __xml_first_child(lrm_rscs); rsc_entry != NULL;
rsc_entry = __xml_next_element(rsc_entry)) {
if (crm_str_eq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE, TRUE)) {
if (xml_has_children(rsc_entry)) {
GListPtr gIter = NULL;
GListPtr result = NULL;
const char *rsc_id = ID(rsc_entry);
CRM_CHECK(rsc_id != NULL, return);
result = find_rsc_list(NULL, NULL, rsc_id, TRUE, FALSE, data_set);
for (gIter = result; gIter != NULL; gIter = gIter->next) {
resource_t *rsc = (resource_t *) gIter->data;
if (rsc->variant != pe_native) {
continue;
}
check_actions_for(rsc_entry, rsc, node, data_set);
}
g_list_free(result);
}
}
}
}
}
}
}
static gboolean
apply_placement_constraints(pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
crm_trace("Applying constraints...");
for (gIter = data_set->placement_constraints; gIter != NULL; gIter = gIter->next) {
pe__location_t *cons = gIter->data;
cons->rsc_lh->cmds->rsc_location(cons->rsc_lh, cons);
}
return TRUE;
}
static gboolean
failcount_clear_action_exists(node_t * node, resource_t * rsc)
{
gboolean rc = FALSE;
GList *list = pe__resource_actions(rsc, node, CRM_OP_CLEAR_FAILCOUNT, TRUE);
if (list) {
rc = TRUE;
}
g_list_free(list);
return rc;
}
/*!
* \internal
* \brief Force resource away if failures hit migration threshold
*
* \param[in,out] rsc Resource to check for failures
* \param[in,out] node Node to check for failures
* \param[in,out] data_set Cluster working set to update
*/
static void
check_migration_threshold(resource_t *rsc, node_t *node,
pe_working_set_t *data_set)
{
int fail_count, countdown;
resource_t *failed;
/* Migration threshold of 0 means never force away */
if (rsc->migration_threshold == 0) {
return;
}
// If we're ignoring failures, also ignore the migration threshold
if (is_set(rsc->flags, pe_rsc_failure_ignored)) {
return;
}
/* If there are no failures, there's no need to force away */
fail_count = pe_get_failcount(node, rsc, NULL,
pe_fc_effective|pe_fc_fillers, NULL,
data_set);
if (fail_count <= 0) {
return;
}
/* How many more times recovery will be tried on this node */
countdown = QB_MAX(rsc->migration_threshold - fail_count, 0);
/* If failed resource has a parent, we'll force the parent away */
failed = rsc;
if (is_not_set(rsc->flags, pe_rsc_unique)) {
failed = uber_parent(rsc);
}
if (countdown == 0) {
resource_location(failed, node, -INFINITY, "__fail_limit__", data_set);
crm_warn("Forcing %s away from %s after %d failures (max=%d)",
failed->id, node->details->uname, fail_count,
rsc->migration_threshold);
} else {
crm_info("%s can fail %d more times on %s before being forced off",
failed->id, countdown, node->details->uname);
}
}
static void
common_apply_stickiness(resource_t * rsc, node_t * node, pe_working_set_t * data_set)
{
if (rsc->children) {
GListPtr gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
resource_t *child_rsc = (resource_t *) gIter->data;
common_apply_stickiness(child_rsc, node, data_set);
}
return;
}
if (is_set(rsc->flags, pe_rsc_managed)
&& rsc->stickiness != 0 && g_list_length(rsc->running_on) == 1) {
node_t *current = pe_find_node_id(rsc->running_on, node->details->id);
node_t *match = pe_hash_table_lookup(rsc->allowed_nodes, node->details->id);
if (current == NULL) {
} else if (match != NULL || is_set(data_set->flags, pe_flag_symmetric_cluster)) {
resource_t *sticky_rsc = rsc;
resource_location(sticky_rsc, node, rsc->stickiness, "stickiness", data_set);
pe_rsc_debug(sticky_rsc, "Resource %s: preferring current location"
" (node=%s, weight=%d)", sticky_rsc->id,
node->details->uname, rsc->stickiness);
} else {
GHashTableIter iter;
node_t *nIter = NULL;
pe_rsc_debug(rsc, "Ignoring stickiness for %s: the cluster is asymmetric"
" and node %s is not explicitly allowed", rsc->id, node->details->uname);
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&nIter)) {
crm_err("%s[%s] = %d", rsc->id, nIter->details->uname, nIter->weight);
}
}
}
/* Check the migration threshold only if a failcount clear action
* has not already been placed for this resource on the node.
* There is no sense in potentially forcing the resource from this
* node if the failcount is being reset anyway.
*
* @TODO A clear_failcount operation can be scheduled in stage4() via
* check_actions_for(), or in stage5() via check_params(). This runs in
* stage2(), so it cannot detect those, meaning we might check the migration
* threshold when we shouldn't -- worst case, we stop or move the resource,
* then move it back next transition.
*/
if (failcount_clear_action_exists(node, rsc) == FALSE) {
check_migration_threshold(rsc, node, data_set);
}
}
void
complex_set_cmds(resource_t * rsc)
{
GListPtr gIter = rsc->children;
rsc->cmds = &resource_class_alloc_functions[rsc->variant];
for (; gIter != NULL; gIter = gIter->next) {
resource_t *child_rsc = (resource_t *) gIter->data;
complex_set_cmds(child_rsc);
}
}
void
set_alloc_actions(pe_working_set_t * data_set)
{
GListPtr gIter = data_set->resources;
for (; gIter != NULL; gIter = gIter->next) {
resource_t *rsc = (resource_t *) gIter->data;
complex_set_cmds(rsc);
}
}
static void
calculate_system_health(gpointer gKey, gpointer gValue, gpointer user_data)
{
const char *key = (const char *)gKey;
const char *value = (const char *)gValue;
int *system_health = (int *)user_data;
if (!gKey || !gValue || !user_data) {
return;
}
if (crm_starts_with(key, "#health")) {
int score;
/* Convert the value into an integer */
score = char2score(value);
/* Add it to the running total */
*system_health = merge_weights(score, *system_health);
}
}
static gboolean
apply_system_health(pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
const char *health_strategy = pe_pref(data_set->config_hash, "node-health-strategy");
int base_health = 0;
if (health_strategy == NULL || safe_str_eq(health_strategy, "none")) {
/* Prevent any accidental health -> score translation */
node_score_red = 0;
node_score_yellow = 0;
node_score_green = 0;
return TRUE;
} else if (safe_str_eq(health_strategy, "migrate-on-red")) {
/* Resources on nodes which have health values of red are
* weighted away from that node.
*/
node_score_red = -INFINITY;
node_score_yellow = 0;
node_score_green = 0;
} else if (safe_str_eq(health_strategy, "only-green")) {
/* Resources on nodes which have health values of red or yellow
* are forced away from that node.
*/
node_score_red = -INFINITY;
node_score_yellow = -INFINITY;
node_score_green = 0;
} else if (safe_str_eq(health_strategy, "progressive")) {
/* Same as the above, but use the r/y/g scores provided by the user
* Defaults are provided by the pe_prefs table
* Also, custom health "base score" can be used
*/
base_health = crm_parse_int(pe_pref(data_set->config_hash, "node-health-base"), "0");
} else if (safe_str_eq(health_strategy, "custom")) {
/* Requires the admin to configure the rsc_location constaints for
* processing the stored health scores
*/
/* TODO: Check for the existence of appropriate node health constraints */
return TRUE;
} else {
crm_err("Unknown node health strategy: %s", health_strategy);
return FALSE;
}
crm_info("Applying automated node health strategy: %s", health_strategy);
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
int system_health = base_health;
node_t *node = (node_t *) gIter->data;
/* Search through the node hash table for system health entries. */
g_hash_table_foreach(node->details->attrs, calculate_system_health, &system_health);
crm_info(" Node %s has an combined system health of %d",
node->details->uname, system_health);
/* If the health is non-zero, then create a new rsc2node so that the
* weight will be added later on.
*/
if (system_health != 0) {
GListPtr gIter2 = data_set->resources;
for (; gIter2 != NULL; gIter2 = gIter2->next) {
resource_t *rsc = (resource_t *) gIter2->data;
rsc2node_new(health_strategy, rsc, system_health, NULL, node, data_set);
}
}
}
return TRUE;
}
gboolean
stage0(pe_working_set_t * data_set)
{
xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, data_set->input);
if (data_set->input == NULL) {
return FALSE;
}
if (is_set(data_set->flags, pe_flag_have_status) == FALSE) {
crm_trace("Calculating status");
cluster_status(data_set);
}
set_alloc_actions(data_set);
apply_system_health(data_set);
unpack_constraints(cib_constraints, data_set);
return TRUE;
}
/*
* Check nodes for resources started outside of the LRM
*/
gboolean
probe_resources(pe_working_set_t * data_set)
{
action_t *probe_node_complete = NULL;
for (GListPtr gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
const char *probed = pe_node_attribute_raw(node, CRM_OP_PROBED);
if (node->details->online == FALSE) {
if (pe__is_remote_node(node) && node->details->remote_rsc
&& (get_remote_node_state(node) == remote_state_failed)) {
pe_fence_node(data_set, node, "the connection is unrecoverable");
}
continue;
} else if (node->details->unclean) {
continue;
} else if (node->details->rsc_discovery_enabled == FALSE) {
/* resource discovery is disabled for this node */
continue;
}
if (probed != NULL && crm_is_true(probed) == FALSE) {
action_t *probe_op = custom_action(NULL, crm_strdup_printf("%s-%s", CRM_OP_REPROBE, node->details->uname),
CRM_OP_REPROBE, node, FALSE, TRUE, data_set);
add_hash_param(probe_op->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE);
continue;
}
for (GListPtr gIter2 = data_set->resources; gIter2 != NULL; gIter2 = gIter2->next) {
resource_t *rsc = (resource_t *) gIter2->data;
rsc->cmds->create_probe(rsc, node, probe_node_complete, FALSE, data_set);
}
}
return TRUE;
}
static void
rsc_discover_filter(resource_t *rsc, node_t *node)
{
GListPtr gIter = rsc->children;
resource_t *top = uber_parent(rsc);
node_t *match;
if (rsc->exclusive_discover == FALSE && top->exclusive_discover == FALSE) {
return;
}
for (; gIter != NULL; gIter = gIter->next) {
resource_t *child_rsc = (resource_t *) gIter->data;
rsc_discover_filter(child_rsc, node);
}
match = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
if (match && match->rsc_discover_mode != pe_discover_exclusive) {
match->weight = -INFINITY;
}
}
/*
* Count how many valid nodes we have (so we know the maximum number of
* colors we can resolve).
*
* Apply node constraints (i.e. filter the "allowed_nodes" part of resources)
*/
gboolean
stage2(pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
crm_trace("Applying placement constraints");
gIter = data_set->nodes;
for (; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
if (node == NULL) {
/* error */
} else if (node->weight >= 0.0 /* global weight */
&& node->details->online && node->details->type != node_ping) {
data_set->max_valid_nodes++;
}
}
apply_placement_constraints(data_set);
gIter = data_set->nodes;
for (; gIter != NULL; gIter = gIter->next) {
GListPtr gIter2 = NULL;
node_t *node = (node_t *) gIter->data;
gIter2 = data_set->resources;
for (; gIter2 != NULL; gIter2 = gIter2->next) {
resource_t *rsc = (resource_t *) gIter2->data;
common_apply_stickiness(rsc, node, data_set);
rsc_discover_filter(rsc, node);
}
}
return TRUE;
}
/*
* Create internal resource constraints before allocation
*/
gboolean
stage3(pe_working_set_t * data_set)
{
GListPtr gIter = data_set->resources;
for (; gIter != NULL; gIter = gIter->next) {
resource_t *rsc = (resource_t *) gIter->data;
rsc->cmds->internal_constraints(rsc, data_set);
}
return TRUE;
}
/*
* Check for orphaned or redefined actions
*/
gboolean
stage4(pe_working_set_t * data_set)
{
check_actions(data_set);
return TRUE;
}
static void *
convert_const_pointer(const void *ptr)
{
/* Worst function ever */
return (void *)ptr;
}
static gint
sort_rsc_process_order(gconstpointer a, gconstpointer b, gpointer data)
{
int rc = 0;
int r1_weight = -INFINITY;
int r2_weight = -INFINITY;
const char *reason = "existence";
const GListPtr nodes = (GListPtr) data;
const resource_t *resource1 = a;
const resource_t *resource2 = b;
node_t *r1_node = NULL;
node_t *r2_node = NULL;
GListPtr gIter = NULL;
GHashTable *r1_nodes = NULL;
GHashTable *r2_nodes = NULL;
if (a == NULL && b == NULL) {
goto done;
}
if (a == NULL) {
return 1;
}
if (b == NULL) {
return -1;
}
reason = "priority";
r1_weight = resource1->priority;
r2_weight = resource2->priority;
if (r1_weight > r2_weight) {
rc = -1;
goto done;
}
if (r1_weight < r2_weight) {
rc = 1;
goto done;
}
reason = "no node list";
if (nodes == NULL) {
goto done;
}
r1_nodes = rsc_merge_weights(convert_const_pointer(resource1),
resource1->id, NULL, NULL, 1,
pe_weights_forward | pe_weights_init);
dump_node_scores(LOG_TRACE, NULL, resource1->id, r1_nodes);
r2_nodes = rsc_merge_weights(convert_const_pointer(resource2),
resource2->id, NULL, NULL, 1,
pe_weights_forward | pe_weights_init);
dump_node_scores(LOG_TRACE, NULL, resource2->id, r2_nodes);
/* Current location score */
reason = "current location";
r1_weight = -INFINITY;
r2_weight = -INFINITY;
if (resource1->running_on) {
r1_node = pe__current_node(resource1);
r1_node = g_hash_table_lookup(r1_nodes, r1_node->details->id);
if (r1_node != NULL) {
r1_weight = r1_node->weight;
}
}
if (resource2->running_on) {
r2_node = pe__current_node(resource2);
r2_node = g_hash_table_lookup(r2_nodes, r2_node->details->id);
if (r2_node != NULL) {
r2_weight = r2_node->weight;
}
}
if (r1_weight > r2_weight) {
rc = -1;
goto done;
}
if (r1_weight < r2_weight) {
rc = 1;
goto done;
}
reason = "score";
for (gIter = nodes; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
r1_node = NULL;
r2_node = NULL;
r1_weight = -INFINITY;
if (r1_nodes) {
r1_node = g_hash_table_lookup(r1_nodes, node->details->id);
}
if (r1_node) {
r1_weight = r1_node->weight;
}
r2_weight = -INFINITY;
if (r2_nodes) {
r2_node = g_hash_table_lookup(r2_nodes, node->details->id);
}
if (r2_node) {
r2_weight = r2_node->weight;
}
if (r1_weight > r2_weight) {
rc = -1;
goto done;
}
if (r1_weight < r2_weight) {
rc = 1;
goto done;
}
}
done:
crm_trace("%s (%d) on %s %c %s (%d) on %s: %s",
resource1->id, r1_weight, r1_node ? r1_node->details->id : "n/a",
rc < 0 ? '>' : rc > 0 ? '<' : '=',
resource2->id, r2_weight, r2_node ? r2_node->details->id : "n/a", reason);
if (r1_nodes) {
g_hash_table_destroy(r1_nodes);
}
if (r2_nodes) {
g_hash_table_destroy(r2_nodes);
}
return rc;
}
static void
allocate_resources(pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
if (is_set(data_set->flags, pe_flag_have_remote_nodes)) {
/* Force remote connection resources to be allocated first. This
* also forces any colocation dependencies to be allocated as well */
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
resource_t *rsc = (resource_t *) gIter->data;
if (rsc->is_remote_node == FALSE) {
continue;
}
pe_rsc_trace(rsc, "Allocating: %s", rsc->id);
/* For remote node connection resources, always prefer the partial
* migration target during resource allocation, if the rsc is in the
* middle of a migration.
*/
rsc->cmds->allocate(rsc, rsc->partial_migration_target, data_set);
}
}
/* now do the rest of the resources */
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
resource_t *rsc = (resource_t *) gIter->data;
if (rsc->is_remote_node == TRUE) {
continue;
}
pe_rsc_trace(rsc, "Allocating: %s", rsc->id);
rsc->cmds->allocate(rsc, NULL, data_set);
}
}
/* We always use pe_order_preserve with these convenience functions to exempt
* internally generated constraints from the prohibition of user constraints
* involving remote connection resources.
*
* The start ordering additionally uses pe_order_runnable_left so that the
* specified action is not runnable if the start is not runnable.
*/
static inline void
order_start_then_action(resource_t *lh_rsc, action_t *rh_action,
enum pe_ordering extra, pe_working_set_t *data_set)
{
if (lh_rsc && rh_action && data_set) {
custom_action_order(lh_rsc, start_key(lh_rsc), NULL,
rh_action->rsc, NULL, rh_action,
pe_order_preserve | pe_order_runnable_left | extra,
data_set);
}
}
static inline void
order_action_then_stop(action_t *lh_action, resource_t *rh_rsc,
enum pe_ordering extra, pe_working_set_t *data_set)
{
if (lh_action && rh_rsc && data_set) {
custom_action_order(lh_action->rsc, NULL, lh_action,
rh_rsc, stop_key(rh_rsc), NULL,
pe_order_preserve | extra, data_set);
}
}
static void
cleanup_orphans(resource_t * rsc, pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
if (is_set(data_set->flags, pe_flag_stop_rsc_orphans) == FALSE) {
return;
}
/* Don't recurse into ->children, those are just unallocated clone instances */
if(is_not_set(rsc->flags, pe_rsc_orphan)) {
return;
}
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
if (node->details->online
&& pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL,
data_set)) {
pe_action_t *clear_op = NULL;
clear_op = pe__clear_failcount(rsc, node, "it is orphaned",
data_set);
/* We can't use order_action_then_stop() here because its
* pe_order_preserve breaks things
*/
custom_action_order(clear_op->rsc, NULL, clear_op,
rsc, stop_key(rsc), NULL,
pe_order_optional, data_set);
}
}
}
gboolean
stage5(pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
if (safe_str_neq(data_set->placement_strategy, "default")) {
GListPtr nodes = g_list_copy(data_set->nodes);
nodes = sort_nodes_by_weight(nodes, NULL, data_set);
data_set->resources =
g_list_sort_with_data(data_set->resources, sort_rsc_process_order, nodes);
g_list_free(nodes);
}
gIter = data_set->nodes;
for (; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
dump_node_capacity(show_utilization ? 0 : utilization_log_level, "Original", node);
}
crm_trace("Allocating services");
/* Take (next) highest resource, assign it and create its actions */
allocate_resources(data_set);
gIter = data_set->nodes;
for (; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
dump_node_capacity(show_utilization ? 0 : utilization_log_level, "Remaining", node);
}
// Process deferred action checks
pe__foreach_param_check(data_set, check_params);
pe__free_param_checks(data_set);
if (is_set(data_set->flags, pe_flag_startup_probes)) {
crm_trace("Calculating needed probes");
/* This code probably needs optimization
* ptest -x with 100 nodes, 100 clones and clone-max=100:
With probes:
ptest[14781]: 2010/09/27_17:56:46 notice: TRACE: do_calculations: pengine.c:258 Calculate cluster status
ptest[14781]: 2010/09/27_17:56:46 notice: TRACE: do_calculations: pengine.c:278 Applying placement constraints
ptest[14781]: 2010/09/27_17:56:47 notice: TRACE: do_calculations: pengine.c:285 Create internal constraints
ptest[14781]: 2010/09/27_17:56:47 notice: TRACE: do_calculations: pengine.c:292 Check actions
ptest[14781]: 2010/09/27_17:56:48 notice: TRACE: do_calculations: pengine.c:299 Allocate resources
ptest[14781]: 2010/09/27_17:56:48 notice: TRACE: stage5: allocate.c:881 Allocating services
ptest[14781]: 2010/09/27_17:56:49 notice: TRACE: stage5: allocate.c:894 Calculating needed probes
ptest[14781]: 2010/09/27_17:56:51 notice: TRACE: stage5: allocate.c:899 Creating actions
ptest[14781]: 2010/09/27_17:56:52 notice: TRACE: stage5: allocate.c:905 Creating done
ptest[14781]: 2010/09/27_17:56:52 notice: TRACE: do_calculations: pengine.c:306 Processing fencing and shutdown cases
ptest[14781]: 2010/09/27_17:56:52 notice: TRACE: do_calculations: pengine.c:313 Applying ordering constraints
36s
ptest[14781]: 2010/09/27_17:57:28 notice: TRACE: do_calculations: pengine.c:320 Create transition graph
Without probes:
ptest[14637]: 2010/09/27_17:56:21 notice: TRACE: do_calculations: pengine.c:258 Calculate cluster status
ptest[14637]: 2010/09/27_17:56:22 notice: TRACE: do_calculations: pengine.c:278 Applying placement constraints
ptest[14637]: 2010/09/27_17:56:22 notice: TRACE: do_calculations: pengine.c:285 Create internal constraints
ptest[14637]: 2010/09/27_17:56:22 notice: TRACE: do_calculations: pengine.c:292 Check actions
ptest[14637]: 2010/09/27_17:56:23 notice: TRACE: do_calculations: pengine.c:299 Allocate resources
ptest[14637]: 2010/09/27_17:56:23 notice: TRACE: stage5: allocate.c:881 Allocating services
ptest[14637]: 2010/09/27_17:56:24 notice: TRACE: stage5: allocate.c:899 Creating actions
ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: stage5: allocate.c:905 Creating done
ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: do_calculations: pengine.c:306 Processing fencing and shutdown cases
ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: do_calculations: pengine.c:313 Applying ordering constraints
ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: do_calculations: pengine.c:320 Create transition graph
*/
probe_resources(data_set);
}
crm_trace("Handle orphans");
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
resource_t *rsc = (resource_t *) gIter->data;
cleanup_orphans(rsc, data_set);
}
crm_trace("Creating actions");
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
resource_t *rsc = (resource_t *) gIter->data;
rsc->cmds->create_actions(rsc, data_set);
}
crm_trace("Creating done");
return TRUE;
}
static gboolean
is_managed(const resource_t * rsc)
{
GListPtr gIter = rsc->children;
if (is_set(rsc->flags, pe_rsc_managed)) {
return TRUE;
}
for (; gIter != NULL; gIter = gIter->next) {
resource_t *child_rsc = (resource_t *) gIter->data;
if (is_managed(child_rsc)) {
return TRUE;
}
}
return FALSE;
}
static gboolean
any_managed_resources(pe_working_set_t * data_set)
{
GListPtr gIter = data_set->resources;
for (; gIter != NULL; gIter = gIter->next) {
resource_t *rsc = (resource_t *) gIter->data;
if (is_managed(rsc)) {
return TRUE;
}
}
return FALSE;
}
/*!
* \internal
* \brief Create pseudo-op for guest node fence, and order relative to it
*
* \param[in] node Guest node to fence
* \param[in] data_set Working set of CIB state
*/
static void
fence_guest(pe_node_t *node, pe_working_set_t *data_set)
{
resource_t *container = node->details->remote_rsc->container;
pe_action_t *stop = NULL;
pe_action_t *stonith_op = NULL;
/* The fence action is just a label; we don't do anything differently for
* off vs. reboot. We specify it explicitly, rather than let it default to
* cluster's default action, because we are not _initiating_ fencing -- we
* are creating a pseudo-event to describe fencing that is already occurring
* by other means (container recovery).
*/
const char *fence_action = "off";
/* Check whether guest's container resource has any explicit stop or
* start (the stop may be implied by fencing of the guest's host).
*/
if (container) {
stop = find_first_action(container->actions, NULL, CRMD_ACTION_STOP, NULL);
if (find_first_action(container->actions, NULL, CRMD_ACTION_START, NULL)) {
fence_action = "reboot";
}
}
/* Create a fence pseudo-event, so we have an event to order actions
* against, and the controller can always detect it.
*/
stonith_op = pe_fence_op(node, fence_action, FALSE, "guest is unclean", data_set);
update_action_flags(stonith_op, pe_action_pseudo | pe_action_runnable,
__FUNCTION__, __LINE__);
/* We want to imply stops/demotes after the guest is stopped, not wait until
* it is restarted, so we always order pseudo-fencing after stop, not start
* (even though start might be closer to what is done for a real reboot).
*/
if(stop && is_set(stop->flags, pe_action_pseudo)) {
pe_action_t *parent_stonith_op = pe_fence_op(stop->node, NULL, FALSE, NULL, data_set);
crm_info("Implying guest node %s is down (action %d) after %s fencing",
node->details->uname, stonith_op->id, stop->node->details->uname);
order_actions(parent_stonith_op, stonith_op,
pe_order_runnable_left|pe_order_implies_then);
} else if (stop) {
order_actions(stop, stonith_op,
pe_order_runnable_left|pe_order_implies_then);
crm_info("Implying guest node %s is down (action %d) "
"after container %s is stopped (action %d)",
node->details->uname, stonith_op->id,
container->id, stop->id);
} else {
/* If we're fencing the guest node but there's no stop for the guest
* resource, we must think the guest is already stopped. However, we may
* think so because its resource history was just cleaned. To avoid
* unnecessarily considering the guest node down if it's really up,
* order the pseudo-fencing after any stop of the connection resource,
* which will be ordered after any container (re-)probe.
*/
stop = find_first_action(node->details->remote_rsc->actions, NULL,
RSC_STOP, NULL);
if (stop) {
order_actions(stop, stonith_op, pe_order_optional);
crm_info("Implying guest node %s is down (action %d) "
"after connection is stopped (action %d)",
node->details->uname, stonith_op->id, stop->id);
} else {
/* Not sure why we're fencing, but everything must already be
* cleanly stopped.
*/
crm_info("Implying guest node %s is down (action %d) ",
node->details->uname, stonith_op->id);
}
}
/* Order/imply other actions relative to pseudo-fence as with real fence */
stonith_constraints(node, stonith_op, data_set);
}
/*
* Create dependencies for stonith and shutdown operations
*/
gboolean
stage6(pe_working_set_t * data_set)
{
action_t *dc_down = NULL;
action_t *stonith_op = NULL;
gboolean integrity_lost = FALSE;
gboolean need_stonith = TRUE;
GListPtr gIter;
GListPtr stonith_ops = NULL;
GList *shutdown_ops = NULL;
- /* Remote ordering constraints need to happen prior to calculate
- * fencing because it is one more place we will mark the node as
- * dirty.
+ /* Remote ordering constraints need to happen prior to calculating fencing
+ * because it is one more place we will mark the node as dirty.
*
- * A nice side-effect of doing it first is that we can remove a
- * bunch of special logic from apply_*_ordering() because its
- * already part of pe_fence_node()
+ * A nice side effect of doing them early is that apply_*_ordering() can be
+ * simpler because pe_fence_node() has already done some of the work.
*/
crm_trace("Creating remote ordering constraints");
apply_remote_node_ordering(data_set);
crm_trace("Processing fencing and shutdown cases");
if (any_managed_resources(data_set) == FALSE) {
crm_notice("Delaying fencing operations until there are resources to manage");
need_stonith = FALSE;
}
/* Check each node for stonith/shutdown */
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
/* Guest nodes are "fenced" by recovering their container resource,
* so handle them separately.
*/
if (pe__is_guest_node(node)) {
if (node->details->remote_requires_reset && need_stonith) {
fence_guest(node, data_set);
}
continue;
}
stonith_op = NULL;
if (node->details->unclean
&& need_stonith && pe_can_fence(data_set, node)) {
stonith_op = pe_fence_op(node, NULL, FALSE, "node is unclean", data_set);
pe_warn("Scheduling Node %s for STONITH", node->details->uname);
stonith_constraints(node, stonith_op, data_set);
if (node->details->is_dc) {
// Remember if the DC is being fenced
dc_down = stonith_op;
} else {
if (is_not_set(data_set->flags, pe_flag_concurrent_fencing)
&& (stonith_ops != NULL)) {
/* Concurrent fencing is disabled, so order each non-DC
* fencing in a chain. If there is any DC fencing or
* shutdown, it will be ordered after the last action in the
* chain later.
*/
order_actions((pe_action_t *) stonith_ops->data,
stonith_op, pe_order_optional);
}
// Remember all non-DC fencing actions in a separate list
stonith_ops = g_list_prepend(stonith_ops, stonith_op);
}
} else if (node->details->online && node->details->shutdown &&
/* TODO define what a shutdown op means for a remote node.
* For now we do not send shutdown operations for remote nodes, but
* if we can come up with a good use for this in the future, we will. */
pe__is_guest_or_remote_node(node) == FALSE) {
action_t *down_op = sched_shutdown_op(node, data_set);
if (node->details->is_dc) {
// Remember if the DC is being shut down
dc_down = down_op;
} else {
// Remember non-DC shutdowns for later ordering
shutdown_ops = g_list_prepend(shutdown_ops, down_op);
}
}
if (node->details->unclean && stonith_op == NULL) {
integrity_lost = TRUE;
pe_warn("Node %s is unclean!", node->details->uname);
}
}
if (integrity_lost) {
if (is_set(data_set->flags, pe_flag_stonith_enabled) == FALSE) {
pe_warn("YOUR RESOURCES ARE NOW LIKELY COMPROMISED");
pe_err("ENABLE STONITH TO KEEP YOUR RESOURCES SAFE");
} else if (is_set(data_set->flags, pe_flag_have_quorum) == FALSE) {
crm_notice("Cannot fence unclean nodes until quorum is"
" attained (or no-quorum-policy is set to ignore)");
}
}
if (dc_down != NULL) {
/* Order any non-DC shutdowns before any DC shutdown, to avoid repeated
* DC elections. However, we don't want to order non-DC shutdowns before
* a DC *fencing*, because even though we don't want a node that's
* shutting down to become DC, the DC fencing could be ordered before a
* clone stop that's also ordered before the shutdowns, thus leading to
* a graph loop.
*/
if (safe_str_eq(dc_down->task, CRM_OP_SHUTDOWN)) {
for (gIter = shutdown_ops; gIter != NULL; gIter = gIter->next) {
action_t *node_stop = (action_t *) gIter->data;
crm_debug("Ordering shutdown on %s before %s on DC %s",
node_stop->node->details->uname,
dc_down->task, dc_down->node->details->uname);
order_actions(node_stop, dc_down, pe_order_optional);
}
}
// Order any non-DC fencing before any DC fencing or shutdown
if (is_set(data_set->flags, pe_flag_concurrent_fencing)) {
/* With concurrent fencing, order each non-DC fencing action
* separately before any DC fencing or shutdown.
*/
for (gIter = stonith_ops; gIter != NULL; gIter = gIter->next) {
order_actions((pe_action_t *) gIter->data, dc_down,
pe_order_optional);
}
} else if (stonith_ops) {
/* Without concurrent fencing, the non-DC fencing actions are
* already ordered relative to each other, so we just need to order
* the DC fencing after the last action in the chain (which is the
* first item in the list).
*/
order_actions((pe_action_t *) stonith_ops->data, dc_down,
pe_order_optional);
}
}
g_list_free(stonith_ops);
g_list_free(shutdown_ops);
return TRUE;
}
/*
* Determine the sets of independent actions and the correct order for the
* actions in each set.
*
* Mark dependencies of un-runnable actions un-runnable
*
*/
static GListPtr
find_actions_by_task(GListPtr actions, resource_t * rsc, const char *original_key)
{
GListPtr list = NULL;
list = find_actions(actions, original_key, NULL);
if (list == NULL) {
/* we're potentially searching a child of the original resource */
char *key = NULL;
char *task = NULL;
guint interval_ms = 0;
if (parse_op_key(original_key, NULL, &task, &interval_ms)) {
key = generate_op_key(rsc->id, task, interval_ms);
list = find_actions(actions, key, NULL);
} else {
crm_err("search key: %s", original_key);
}
free(key);
free(task);
}
return list;
}
static void
rsc_order_then(pe_action_t *lh_action, pe_resource_t *rsc,
pe__ordering_t *order)
{
GListPtr gIter = NULL;
GListPtr rh_actions = NULL;
action_t *rh_action = NULL;
enum pe_ordering type;
CRM_CHECK(rsc != NULL, return);
CRM_CHECK(order != NULL, return);
type = order->type;
rh_action = order->rh_action;
crm_trace("Processing RH of ordering constraint %d", order->id);
if (rh_action != NULL) {
rh_actions = g_list_prepend(NULL, rh_action);
} else if (rsc != NULL) {
rh_actions = find_actions_by_task(rsc->actions, rsc, order->rh_action_task);
}
if (rh_actions == NULL) {
pe_rsc_trace(rsc, "No RH-Side (%s/%s) found for constraint..."
" ignoring", rsc->id, order->rh_action_task);
if (lh_action) {
pe_rsc_trace(rsc, "LH-Side was: %s", lh_action->uuid);
}
return;
}
if (lh_action && lh_action->rsc == rsc && is_set(lh_action->flags, pe_action_dangle)) {
pe_rsc_trace(rsc, "Detected dangling operation %s -> %s", lh_action->uuid,
order->rh_action_task);
clear_bit(type, pe_order_implies_then);
}
gIter = rh_actions;
for (; gIter != NULL; gIter = gIter->next) {
action_t *rh_action_iter = (action_t *) gIter->data;
if (lh_action) {
order_actions(lh_action, rh_action_iter, type);
} else if (type & pe_order_implies_then) {
update_action_flags(rh_action_iter, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
crm_warn("Unrunnable %s 0x%.6x", rh_action_iter->uuid, type);
} else {
crm_warn("neither %s 0x%.6x", rh_action_iter->uuid, type);
}
}
g_list_free(rh_actions);
}
static void
rsc_order_first(pe_resource_t *lh_rsc, pe__ordering_t *order,
pe_working_set_t *data_set)
{
GListPtr gIter = NULL;
GListPtr lh_actions = NULL;
action_t *lh_action = order->lh_action;
resource_t *rh_rsc = order->rh_rsc;
crm_trace("Processing LH of ordering constraint %d", order->id);
CRM_ASSERT(lh_rsc != NULL);
if (lh_action != NULL) {
lh_actions = g_list_prepend(NULL, lh_action);
} else {
lh_actions = find_actions_by_task(lh_rsc->actions, lh_rsc, order->lh_action_task);
}
if (lh_actions == NULL && lh_rsc != rh_rsc) {
char *key = NULL;
char *op_type = NULL;
guint interval_ms = 0;
parse_op_key(order->lh_action_task, NULL, &op_type, &interval_ms);
key = generate_op_key(lh_rsc->id, op_type, interval_ms);
if (lh_rsc->fns->state(lh_rsc, TRUE) == RSC_ROLE_STOPPED && safe_str_eq(op_type, RSC_STOP)) {
free(key);
pe_rsc_trace(lh_rsc, "No LH-Side (%s/%s) found for constraint %d with %s - ignoring",
lh_rsc->id, order->lh_action_task, order->id, order->rh_action_task);
} else if (lh_rsc->fns->state(lh_rsc, TRUE) == RSC_ROLE_SLAVE && safe_str_eq(op_type, RSC_DEMOTE)) {
free(key);
pe_rsc_trace(lh_rsc, "No LH-Side (%s/%s) found for constraint %d with %s - ignoring",
lh_rsc->id, order->lh_action_task, order->id, order->rh_action_task);
} else {
pe_rsc_trace(lh_rsc, "No LH-Side (%s/%s) found for constraint %d with %s - creating",
lh_rsc->id, order->lh_action_task, order->id, order->rh_action_task);
lh_action = custom_action(lh_rsc, key, op_type, NULL, TRUE, TRUE, data_set);
lh_actions = g_list_prepend(NULL, lh_action);
}
free(op_type);
}
gIter = lh_actions;
for (; gIter != NULL; gIter = gIter->next) {
action_t *lh_action_iter = (action_t *) gIter->data;
if (rh_rsc == NULL && order->rh_action) {
rh_rsc = order->rh_action->rsc;
}
if (rh_rsc) {
rsc_order_then(lh_action_iter, rh_rsc, order);
} else if (order->rh_action) {
order_actions(lh_action_iter, order->rh_action, order->type);
}
}
g_list_free(lh_actions);
}
extern void update_colo_start_chain(pe_action_t *action,
pe_working_set_t *data_set);
static int
is_recurring_action(action_t *action)
{
const char *interval_ms_s = g_hash_table_lookup(action->meta,
XML_LRM_ATTR_INTERVAL_MS);
guint interval_ms = crm_parse_ms(interval_ms_s);
return (interval_ms > 0);
}
static void
apply_container_ordering(action_t *action, pe_working_set_t *data_set)
{
/* VMs are also classified as containers for these purposes... in
* that they both involve a 'thing' running on a real or remote
* cluster node.
*
* This allows us to be smarter about the type and extent of
* recovery actions required in various scenarios
*/
resource_t *remote_rsc = NULL;
resource_t *container = NULL;
enum action_tasks task = text2task(action->task);
CRM_ASSERT(action->rsc);
CRM_ASSERT(action->node);
CRM_ASSERT(pe__is_guest_or_remote_node(action->node));
remote_rsc = action->node->details->remote_rsc;
CRM_ASSERT(remote_rsc);
container = remote_rsc->container;
CRM_ASSERT(container);
if(is_set(container->flags, pe_rsc_failed)) {
pe_fence_node(data_set, action->node, "container failed");
}
crm_trace("Order %s action %s relative to %s%s for %s%s",
action->task, action->uuid,
is_set(remote_rsc->flags, pe_rsc_failed)? "failed " : "",
remote_rsc->id,
is_set(container->flags, pe_rsc_failed)? "failed " : "",
container->id);
if (safe_str_eq(action->task, CRMD_ACTION_MIGRATE)
|| safe_str_eq(action->task, CRMD_ACTION_MIGRATED)) {
/* Migration ops map to "no_action", but we need to apply the same
* ordering as for stop or demote (see get_router_node()).
*/
task = stop_rsc;
}
switch (task) {
case start_rsc:
case action_promote:
/* Force resource recovery if the container is recovered */
order_start_then_action(container, action, pe_order_implies_then,
data_set);
/* Wait for the connection resource to be up too */
order_start_then_action(remote_rsc, action, pe_order_none,
data_set);
break;
case stop_rsc:
case action_demote:
if (is_set(container->flags, pe_rsc_failed)) {
/* When the container representing a guest node fails, any stop
* or demote actions for resources running on the guest node
* are implied by the container stopping. This is similar to
* how fencing operations work for cluster nodes and remote
* nodes.
*/
} else {
/* Ensure the operation happens before the connection is brought
* down.
*
* If we really wanted to, we could order these after the
* connection start, IFF the container's current role was
* stopped (otherwise we re-introduce an ordering loop when the
* connection is restarting).
*/
order_action_then_stop(action, remote_rsc, pe_order_none,
data_set);
}
break;
default:
/* Wait for the connection resource to be up */
if (is_recurring_action(action)) {
/* In case we ever get the recovery logic wrong, force
* recurring monitors to be restarted, even if just
* the connection was re-established
*/
if(task != no_action) {
order_start_then_action(remote_rsc, action,
pe_order_implies_then, data_set);
}
} else {
order_start_then_action(remote_rsc, action, pe_order_none,
data_set);
}
break;
}
}
static enum remote_connection_state
get_remote_node_state(pe_node_t *node)
{
resource_t *remote_rsc = NULL;
node_t *cluster_node = NULL;
CRM_ASSERT(node);
remote_rsc = node->details->remote_rsc;
CRM_ASSERT(remote_rsc);
cluster_node = pe__current_node(remote_rsc);
/* If the cluster node the remote connection resource resides on
* is unclean or went offline, we can't process any operations
* on that remote node until after it starts elsewhere.
*/
if(remote_rsc->next_role == RSC_ROLE_STOPPED || remote_rsc->allocated_to == NULL) {
/* The connection resource is not going to run anywhere */
if (cluster_node && cluster_node->details->unclean) {
/* The remote connection is failed because its resource is on a
* failed node and can't be recovered elsewhere, so we must fence.
*/
return remote_state_failed;
}
if (is_not_set(remote_rsc->flags, pe_rsc_failed)) {
/* Connection resource is cleanly stopped */
return remote_state_stopped;
}
/* Connection resource is failed */
if ((remote_rsc->next_role == RSC_ROLE_STOPPED)
&& remote_rsc->remote_reconnect_ms
&& node->details->remote_was_fenced
&& !pe__shutdown_requested(node)) {
/* We won't know whether the connection is recoverable until the
* reconnect interval expires and we reattempt connection.
*/
return remote_state_unknown;
}
/* The remote connection is in a failed state. If there are any
* resources known to be active on it (stop) or in an unknown state
* (probe), we must assume the worst and fence it.
*/
return remote_state_failed;
} else if (cluster_node == NULL) {
/* Connection is recoverable but not currently running anywhere, see if we can recover it first */
return remote_state_unknown;
} else if(cluster_node->details->unclean == TRUE
|| cluster_node->details->online == FALSE) {
/* Connection is running on a dead node, see if we can recover it first */
return remote_state_resting;
} else if (g_list_length(remote_rsc->running_on) > 1
&& remote_rsc->partial_migration_source
&& remote_rsc->partial_migration_target) {
/* We're in the middle of migrating a connection resource,
* wait until after the resource migrates before performing
* any actions.
*/
return remote_state_resting;
}
return remote_state_alive;
}
/*!
* \internal
* \brief Order actions on remote node relative to actions for the connection
*/
static void
apply_remote_ordering(action_t *action, pe_working_set_t *data_set)
{
resource_t *remote_rsc = NULL;
enum action_tasks task = text2task(action->task);
enum remote_connection_state state = get_remote_node_state(action->node);
enum pe_ordering order_opts = pe_order_none;
if (action->rsc == NULL) {
return;
}
CRM_ASSERT(action->node);
CRM_ASSERT(pe__is_guest_or_remote_node(action->node));
remote_rsc = action->node->details->remote_rsc;
CRM_ASSERT(remote_rsc);
crm_trace("Order %s action %s relative to %s%s (state: %s)",
action->task, action->uuid,
is_set(remote_rsc->flags, pe_rsc_failed)? "failed " : "",
remote_rsc->id, state2text(state));
if (safe_str_eq(action->task, CRMD_ACTION_MIGRATE)
|| safe_str_eq(action->task, CRMD_ACTION_MIGRATED)) {
/* Migration ops map to "no_action", but we need to apply the same
* ordering as for stop or demote (see get_router_node()).
*/
task = stop_rsc;
}
switch (task) {
case start_rsc:
case action_promote:
order_opts = pe_order_none;
if (state == remote_state_failed) {
/* Force recovery, by making this action required */
order_opts |= pe_order_implies_then;
}
/* Ensure connection is up before running this action */
order_start_then_action(remote_rsc, action, order_opts, data_set);
break;
case stop_rsc:
if(state == remote_state_alive) {
order_action_then_stop(action, remote_rsc,
pe_order_implies_first, data_set);
} else if(state == remote_state_failed) {
/* The resource is active on the node, but since we don't have a
* valid connection, the only way to stop the resource is by
* fencing the node. There is no need to order the stop relative
* to the remote connection, since the stop will become implied
* by the fencing.
*/
pe_fence_node(data_set, action->node, "resources are active and the connection is unrecoverable");
} else if(remote_rsc->next_role == RSC_ROLE_STOPPED) {
/* State must be remote_state_unknown or remote_state_stopped.
* Since the connection is not coming back up in this
* transition, stop this resource first.
*/
order_action_then_stop(action, remote_rsc,
pe_order_implies_first, data_set);
} else {
/* The connection is going to be started somewhere else, so
* stop this resource after that completes.
*/
order_start_then_action(remote_rsc, action, pe_order_none, data_set);
}
break;
case action_demote:
/* Only order this demote relative to the connection start if the
* connection isn't being torn down. Otherwise, the demote would be
* blocked because the connection start would not be allowed.
*/
if(state == remote_state_resting || state == remote_state_unknown) {
order_start_then_action(remote_rsc, action, pe_order_none,
data_set);
} /* Otherwise we can rely on the stop ordering */
break;
default:
/* Wait for the connection resource to be up */
if (is_recurring_action(action)) {
/* In case we ever get the recovery logic wrong, force
* recurring monitors to be restarted, even if just
* the connection was re-established
*/
order_start_then_action(remote_rsc, action,
pe_order_implies_then, data_set);
} else {
node_t *cluster_node = pe__current_node(remote_rsc);
if(task == monitor_rsc && state == remote_state_failed) {
/* We would only be here if we do not know the
* state of the resource on the remote node.
* Since we have no way to find out, it is
* necessary to fence the node.
*/
pe_fence_node(data_set, action->node, "resources are in an unknown state and the connection is unrecoverable");
}
if(cluster_node && state == remote_state_stopped) {
/* The connection is currently up, but is going
* down permanently.
*
* Make sure we check services are actually
* stopped _before_ we let the connection get
* closed
*/
order_action_then_stop(action, remote_rsc,
pe_order_runnable_left, data_set);
} else {
order_start_then_action(remote_rsc, action, pe_order_none,
data_set);
}
}
break;
}
}
static void
apply_remote_node_ordering(pe_working_set_t *data_set)
{
if (is_set(data_set->flags, pe_flag_have_remote_nodes) == FALSE) {
return;
}
for (GListPtr gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
action_t *action = (action_t *) gIter->data;
resource_t *remote = NULL;
// We are only interested in resource actions
if (action->rsc == NULL) {
continue;
}
/* Special case: If we are clearing the failcount of an actual
* remote connection resource, then make sure this happens before
* any start of the resource in this transition.
*/
if (action->rsc->is_remote_node &&
safe_str_eq(action->task, CRM_OP_CLEAR_FAILCOUNT)) {
custom_action_order(action->rsc,
NULL,
action,
action->rsc,
generate_op_key(action->rsc->id, RSC_START, 0),
NULL,
pe_order_optional,
data_set);
continue;
}
// We are only interested in actions allocated to a node
if (action->node == NULL) {
continue;
}
if (!pe__is_guest_or_remote_node(action->node)) {
continue;
}
/* We are only interested in real actions.
*
* @TODO This is probably wrong; pseudo-actions might be converted to
* real actions and vice versa later in update_actions() at the end of
* stage7().
*/
if (is_set(action->flags, pe_action_pseudo)) {
continue;
}
remote = action->node->details->remote_rsc;
if (remote == NULL) {
// Orphaned
continue;
}
/* Another special case: if a resource is moving to a Pacemaker Remote
* node, order the stop on the original node after any start of the
* remote connection. This ensures that if the connection fails to
* start, we leave the resource running on the original node.
*/
if (safe_str_eq(action->task, RSC_START)) {
for (GList *item = action->rsc->actions; item != NULL;
item = item->next) {
pe_action_t *rsc_action = item->data;
if ((rsc_action->node->details != action->node->details)
&& safe_str_eq(rsc_action->task, RSC_STOP)) {
custom_action_order(remote, start_key(remote), NULL,
action->rsc, NULL, rsc_action,
pe_order_optional, data_set);
}
}
}
/* The action occurs across a remote connection, so create
* ordering constraints that guarantee the action occurs while the node
* is active (after start, before stop ... things like that).
*
* This is somewhat brittle in that we need to make sure the results of
* this ordering are compatible with the result of get_router_node().
* It would probably be better to add XML_LRM_ATTR_ROUTER_NODE as part
* of this logic rather than action2xml().
*/
if (remote->container) {
crm_trace("Container ordering for %s", action->uuid);
apply_container_ordering(action, data_set);
} else {
crm_trace("Remote ordering for %s", action->uuid);
apply_remote_ordering(action, data_set);
}
}
}
static gboolean
order_first_probe_unneeded(pe_action_t * probe, pe_action_t * rh_action)
{
/* No need to probe the resource on the node that is being
* unfenced. Otherwise it might introduce transition loop
* since probe will be performed after the node is
* unfenced.
*/
if (safe_str_eq(rh_action->task, CRM_OP_FENCE)
&& probe->node && rh_action->node
&& probe->node->details == rh_action->node->details) {
const char *op = g_hash_table_lookup(rh_action->meta, "stonith_action");
if (safe_str_eq(op, "on")) {
return TRUE;
}
}
// Shutdown waits for probe to complete only if it's on the same node
if ((safe_str_eq(rh_action->task, CRM_OP_SHUTDOWN))
&& probe->node && rh_action->node
&& probe->node->details != rh_action->node->details) {
return TRUE;
}
return FALSE;
}
static void
order_first_probes_imply_stops(pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
for (gIter = data_set->ordering_constraints; gIter != NULL; gIter = gIter->next) {
pe__ordering_t *order = gIter->data;
enum pe_ordering order_type = pe_order_optional;
pe_resource_t *lh_rsc = order->lh_rsc;
pe_resource_t *rh_rsc = order->rh_rsc;
pe_action_t *lh_action = order->lh_action;
pe_action_t *rh_action = order->rh_action;
const char *lh_action_task = order->lh_action_task;
const char *rh_action_task = order->rh_action_task;
GListPtr probes = NULL;
GListPtr rh_actions = NULL;
GListPtr pIter = NULL;
if (lh_rsc == NULL) {
continue;
} else if (rh_rsc && lh_rsc == rh_rsc) {
continue;
}
if (lh_action == NULL && lh_action_task == NULL) {
continue;
}
if (rh_action == NULL && rh_action_task == NULL) {
continue;
}
/* Technically probe is expected to return "not running", which could be
* the alternative of stop action if the status of the resource is
* unknown yet.
*/
if (lh_action && safe_str_neq(lh_action->task, RSC_STOP)) {
continue;
} else if (lh_action == NULL
&& lh_action_task
&& crm_ends_with(lh_action_task, "_" RSC_STOP "_0") == FALSE) {
continue;
}
/* Do not probe the resource inside of a stopping container. Otherwise
* it might introduce transition loop since probe will be performed
* after the container starts again.
*/
if (rh_rsc && lh_rsc->container == rh_rsc) {
if (rh_action && safe_str_eq(rh_action->task, RSC_STOP)) {
continue;
} else if (rh_action == NULL && rh_action_task
&& crm_ends_with(rh_action_task,"_" RSC_STOP "_0")) {
continue;
}
}
if (order->type == pe_order_none) {
continue;
}
// Preserve the order options for future filtering
if (is_set(order->type, pe_order_apply_first_non_migratable)) {
set_bit(order_type, pe_order_apply_first_non_migratable);
}
if (is_set(order->type, pe_order_same_node)) {
set_bit(order_type, pe_order_same_node);
}
// Keep the order types for future filtering
if (order->type == pe_order_anti_colocation
|| order->type == pe_order_load) {
order_type = order->type;
}
probes = pe__resource_actions(lh_rsc, NULL, RSC_STATUS, FALSE);
if (probes == NULL) {
continue;
}
if (rh_action) {
rh_actions = g_list_prepend(rh_actions, rh_action);
} else if (rh_rsc && rh_action_task) {
rh_actions = find_actions(rh_rsc->actions, rh_action_task, NULL);
}
if (rh_actions == NULL) {
g_list_free(probes);
continue;
}
crm_trace("Processing for LH probe based on ordering constraint %s -> %s"
" (id=%d, type=%.6x)",
lh_action ? lh_action->uuid : lh_action_task,
rh_action ? rh_action->uuid : rh_action_task,
order->id, order->type);
for (pIter = probes; pIter != NULL; pIter = pIter->next) {
pe_action_t *probe = (pe_action_t *) pIter->data;
GListPtr rIter = NULL;
for (rIter = rh_actions; rIter != NULL; rIter = rIter->next) {
pe_action_t *rh_action_iter = (pe_action_t *) rIter->data;
if (order_first_probe_unneeded(probe, rh_action_iter)) {
continue;
}
order_actions(probe, rh_action_iter, order_type);
}
}
g_list_free(rh_actions);
g_list_free(probes);
}
}
static void
order_first_probe_then_restart_repromote(pe_action_t * probe,
pe_action_t * after,
pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
bool interleave = FALSE;
pe_resource_t *compatible_rsc = NULL;
if (probe == NULL
|| probe->rsc == NULL
|| probe->rsc->variant != pe_native) {
return;
}
if (after == NULL
// Avoid running into any possible loop
|| is_set(after->flags, pe_action_tracking)) {
return;
}
if (safe_str_neq(probe->task, RSC_STATUS)) {
return;
}
pe_set_action_bit(after, pe_action_tracking);
crm_trace("Processing based on %s %s -> %s %s",
probe->uuid,
probe->node ? probe->node->details->uname: "",
after->uuid,
after->node ? after->node->details->uname : "");
if (after->rsc
/* Better not build a dependency directly with a clone/group.
* We are going to proceed through the ordering chain and build
* dependencies with its children.
*/
&& after->rsc->variant == pe_native
&& probe->rsc != after->rsc) {
GListPtr then_actions = NULL;
enum pe_ordering probe_order_type = pe_order_optional;
if (safe_str_eq(after->task, RSC_START)) {
then_actions = pe__resource_actions(after->rsc, NULL, RSC_STOP, FALSE);
} else if (safe_str_eq(after->task, RSC_PROMOTE)) {
then_actions = pe__resource_actions(after->rsc, NULL, RSC_DEMOTE, FALSE);
}
for (gIter = then_actions; gIter != NULL; gIter = gIter->next) {
pe_action_t *then = (pe_action_t *) gIter->data;
// Skip any pseudo action which for example is implied by fencing
if (is_set(then->flags, pe_action_pseudo)) {
continue;
}
order_actions(probe, then, probe_order_type);
}
g_list_free(then_actions);
}
if (after->rsc
&& after->rsc->variant > pe_group) {
const char *interleave_s = g_hash_table_lookup(after->rsc->meta,
XML_RSC_ATTR_INTERLEAVE);
interleave = crm_is_true(interleave_s);
if (interleave) {
/* For an interleaved clone, we should build a dependency only
* with the relevant clone child.
*/
compatible_rsc = find_compatible_child(probe->rsc,
after->rsc,
RSC_ROLE_UNKNOWN,
FALSE, data_set);
}
}
for (gIter = after->actions_after; gIter != NULL; gIter = gIter->next) {
pe_action_wrapper_t *after_wrapper = (pe_action_wrapper_t *) gIter->data;
/* pe_order_implies_then is the reason why a required A.start
* implies/enforces B.start to be required too, which is the cause of
* B.restart/re-promote.
*
* Not sure about pe_order_implies_then_on_node though. It's now only
* used for unfencing case, which tends to introduce transition
* loops...
*/
if (is_not_set(after_wrapper->type, pe_order_implies_then)) {
/* The order type between a group/clone and its child such as
* B.start-> B_child.start is:
* pe_order_implies_first_printed | pe_order_runnable_left
*
* Proceed through the ordering chain and build dependencies with
* its children.
*/
if (after->rsc == NULL
|| after->rsc->variant < pe_group
|| probe->rsc->parent == after->rsc
|| after_wrapper->action->rsc == NULL
|| after_wrapper->action->rsc->variant > pe_group
|| after->rsc != after_wrapper->action->rsc->parent) {
continue;
}
/* Proceed to the children of a group or a non-interleaved clone.
* For an interleaved clone, proceed only to the relevant child.
*/
if (after->rsc->variant > pe_group
&& interleave == TRUE
&& (compatible_rsc == NULL
|| compatible_rsc != after_wrapper->action->rsc)) {
continue;
}
}
crm_trace("Proceeding through %s %s -> %s %s (type=0x%.6x)",
after->uuid,
after->node ? after->node->details->uname: "",
after_wrapper->action->uuid,
after_wrapper->action->node ? after_wrapper->action->node->details->uname : "",
after_wrapper->type);
order_first_probe_then_restart_repromote(probe, after_wrapper->action, data_set);
}
}
static void clear_actions_tracking_flag(pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
pe_action_t *action = (pe_action_t *) gIter->data;
if (is_set(action->flags, pe_action_tracking)) {
pe_clear_action_bit(action, pe_action_tracking);
}
}
}
static void
order_first_rsc_probes(pe_resource_t * rsc, pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
GListPtr probes = NULL;
for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
pe_resource_t * child = (pe_resource_t *) gIter->data;
order_first_rsc_probes(child, data_set);
}
if (rsc->variant != pe_native) {
return;
}
probes = pe__resource_actions(rsc, NULL, RSC_STATUS, FALSE);
for (gIter = probes; gIter != NULL; gIter= gIter->next) {
pe_action_t *probe = (pe_action_t *) gIter->data;
GListPtr aIter = NULL;
for (aIter = probe->actions_after; aIter != NULL; aIter = aIter->next) {
pe_action_wrapper_t *after_wrapper = (pe_action_wrapper_t *) aIter->data;
order_first_probe_then_restart_repromote(probe, after_wrapper->action, data_set);
clear_actions_tracking_flag(data_set);
}
}
g_list_free(probes);
}
static void
order_first_probes(pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter->data;
order_first_rsc_probes(rsc, data_set);
}
order_first_probes_imply_stops(data_set);
}
static void
order_then_probes(pe_working_set_t * data_set)
{
#if 0
GListPtr gIter = NULL;
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
resource_t *rsc = (resource_t *) gIter->data;
/* Given "A then B", we would prefer to wait for A to be
* started before probing B.
*
* If A was a filesystem on which the binaries and data for B
* lived, it would have been useful if the author of B's agent
* could assume that A is running before B.monitor will be
* called.
*
* However we can't _only_ probe once A is running, otherwise
* we'd not detect the state of B if A could not be started
* for some reason.
*
* In practice however, we cannot even do an opportunistic
* version of this because B may be moving:
*
* B.probe -> B.start
* B.probe -> B.stop
* B.stop -> B.start
* A.stop -> A.start
* A.start -> B.probe
*
* So far so good, but if we add the result of this code:
*
* B.stop -> A.stop
*
* Then we get a loop:
*
* B.probe -> B.stop -> A.stop -> A.start -> B.probe
*
* We could kill the 'B.probe -> B.stop' dependency, but that
* could mean stopping B "too" soon, because B.start must wait
* for the probes to complete.
*
* Another option is to allow it only if A is a non-unique
* clone with clone-max == node-max (since we'll never be
* moving it). However, we could still be stopping one
* instance at the same time as starting another.
* The complexity of checking for allowed conditions combined
* with the ever narrowing usecase suggests that this code
* should remain disabled until someone gets smarter.
*/
action_t *start = NULL;
GListPtr actions = NULL;
GListPtr probes = NULL;
actions = pe__resource_actions(rsc, NULL, RSC_START, FALSE);
if (actions) {
start = actions->data;
g_list_free(actions);
}
if(start == NULL) {
crm_err("No start action for %s", rsc->id);
continue;
}
probes = pe__resource_actions(rsc, NULL, RSC_STATUS, FALSE);
for (actions = start->actions_before; actions != NULL; actions = actions->next) {
action_wrapper_t *before = (action_wrapper_t *) actions->data;
GListPtr pIter = NULL;
action_t *first = before->action;
resource_t *first_rsc = first->rsc;
if(first->required_runnable_before) {
GListPtr clone_actions = NULL;
for (clone_actions = first->actions_before; clone_actions != NULL; clone_actions = clone_actions->next) {
before = (action_wrapper_t *) clone_actions->data;
crm_trace("Testing %s -> %s (%p) for %s", first->uuid, before->action->uuid, before->action->rsc, start->uuid);
CRM_ASSERT(before->action->rsc);
first_rsc = before->action->rsc;
break;
}
} else if(safe_str_neq(first->task, RSC_START)) {
crm_trace("Not a start op %s for %s", first->uuid, start->uuid);
}
if(first_rsc == NULL) {
continue;
} else if(uber_parent(first_rsc) == uber_parent(start->rsc)) {
crm_trace("Same parent %s for %s", first_rsc->id, start->uuid);
continue;
} else if(FALSE && pe_rsc_is_clone(uber_parent(first_rsc)) == FALSE) {
crm_trace("Not a clone %s for %s", first_rsc->id, start->uuid);
continue;
}
crm_err("Applying %s before %s %d", first->uuid, start->uuid, uber_parent(first_rsc)->variant);
for (pIter = probes; pIter != NULL; pIter = pIter->next) {
action_t *probe = (action_t *) pIter->data;
crm_err("Ordering %s before %s", first->uuid, probe->uuid);
order_actions(first, probe, pe_order_optional);
}
}
}
#endif
}
static void
order_probes(pe_working_set_t * data_set)
{
order_first_probes(data_set);
order_then_probes(data_set);
}
gboolean
stage7(pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
crm_trace("Applying ordering constraints");
/* Don't ask me why, but apparently they need to be processed in
* the order they were created in... go figure
*
* Also g_list_append() has horrendous performance characteristics
* So we need to use g_list_prepend() and then reverse the list here
*/
data_set->ordering_constraints = g_list_reverse(data_set->ordering_constraints);
for (gIter = data_set->ordering_constraints; gIter != NULL; gIter = gIter->next) {
pe__ordering_t *order = gIter->data;
resource_t *rsc = order->lh_rsc;
crm_trace("Applying ordering constraint: %d", order->id);
if (rsc != NULL) {
crm_trace("rsc_action-to-*");
rsc_order_first(rsc, order, data_set);
continue;
}
rsc = order->rh_rsc;
if (rsc != NULL) {
crm_trace("action-to-rsc_action");
rsc_order_then(order->lh_action, rsc, order);
} else {
crm_trace("action-to-action");
order_actions(order->lh_action, order->rh_action, order->type);
}
}
for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
action_t *action = (action_t *) gIter->data;
update_colo_start_chain(action, data_set);
}
crm_trace("Ordering probes");
order_probes(data_set);
crm_trace("Updating %d actions", g_list_length(data_set->actions));
for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
action_t *action = (action_t *) gIter->data;
update_action(action, data_set);
}
LogNodeActions(data_set, FALSE);
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
resource_t *rsc = (resource_t *) gIter->data;
LogActions(rsc, data_set, FALSE);
}
return TRUE;
}
static int transition_id = -1;
/*!
* \internal
* \brief Log a message after calculating a transition
*
* \param[in] filename Where transition input is stored
*/
void
pcmk__log_transition_summary(const char *filename)
{
if (was_processing_error) {
crm_err("Calculated transition %d (with errors), saving inputs in %s",
transition_id, filename);
} else if (was_processing_warning) {
crm_warn("Calculated transition %d (with warnings), saving inputs in %s",
transition_id, filename);
} else {
crm_notice("Calculated transition %d, saving inputs in %s",
transition_id, filename);
}
if (crm_config_error) {
crm_notice("Configuration errors found during scheduler processing,"
" please run \"crm_verify -L\" to identify issues");
}
}
/*
* Create a dependency graph to send to the transitioner (via the controller)
*/
gboolean
stage8(pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
const char *value = NULL;
transition_id++;
crm_trace("Creating transition graph %d.", transition_id);
data_set->graph = create_xml_node(NULL, XML_TAG_GRAPH);
value = pe_pref(data_set->config_hash, "cluster-delay");
crm_xml_add(data_set->graph, "cluster-delay", value);
value = pe_pref(data_set->config_hash, "stonith-timeout");
crm_xml_add(data_set->graph, "stonith-timeout", value);
crm_xml_add(data_set->graph, "failed-stop-offset", "INFINITY");
if (is_set(data_set->flags, pe_flag_start_failure_fatal)) {
crm_xml_add(data_set->graph, "failed-start-offset", "INFINITY");
} else {
crm_xml_add(data_set->graph, "failed-start-offset", "1");
}
value = pe_pref(data_set->config_hash, "batch-limit");
crm_xml_add(data_set->graph, "batch-limit", value);
crm_xml_add_int(data_set->graph, "transition_id", transition_id);
value = pe_pref(data_set->config_hash, "migration-limit");
if (crm_int_helper(value, NULL) > 0) {
crm_xml_add(data_set->graph, "migration-limit", value);
}
/* errors...
slist_iter(action, action_t, action_list, lpc,
if(action->optional == FALSE && action->runnable == FALSE) {
print_action("Ignoring", action, TRUE);
}
);
*/
gIter = data_set->resources;
for (; gIter != NULL; gIter = gIter->next) {
resource_t *rsc = (resource_t *) gIter->data;
pe_rsc_trace(rsc, "processing actions for rsc=%s", rsc->id);
rsc->cmds->expand(rsc, data_set);
}
crm_log_xml_trace(data_set->graph, "created resource-driven action list");
/* pseudo action to distribute list of nodes with maintenance state update */
add_maintenance_update(data_set);
/* catch any non-resource specific actions */
crm_trace("processing non-resource actions");
gIter = data_set->actions;
for (; gIter != NULL; gIter = gIter->next) {
action_t *action = (action_t *) gIter->data;
if (action->rsc
&& action->node
&& action->node->details->shutdown
&& is_not_set(action->rsc->flags, pe_rsc_maintenance)
&& is_not_set(action->flags, pe_action_optional)
&& is_not_set(action->flags, pe_action_runnable)
&& crm_str_eq(action->task, RSC_STOP, TRUE)
) {
/* Eventually we should just ignore the 'fence' case
* But for now it's the best way to detect (in CTS) when
* CIB resource updates are being lost
*/
if (is_set(data_set->flags, pe_flag_have_quorum)
|| data_set->no_quorum_policy == no_quorum_ignore) {
crm_crit("Cannot %s node '%s' because of %s:%s%s (%s)",
action->node->details->unclean ? "fence" : "shut down",
action->node->details->uname, action->rsc->id,
is_not_set(action->rsc->flags, pe_rsc_managed) ? " unmanaged" : " blocked",
is_set(action->rsc->flags, pe_rsc_failed) ? " failed" : "",
action->uuid);
}
}
graph_element_from_action(action, data_set);
}
crm_log_xml_trace(data_set->graph, "created generic action list");
crm_trace("Created transition graph %d.", transition_id);
return TRUE;
}
void
LogNodeActions(pe_working_set_t * data_set, gboolean terminal)
{
GListPtr gIter = NULL;
for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
char *node_name = NULL;
char *task = NULL;
action_t *action = (action_t *) gIter->data;
if (action->rsc != NULL) {
continue;
} else if (is_set(action->flags, pe_action_optional)) {
continue;
}
if (pe__is_guest_node(action->node)) {
node_name = crm_strdup_printf("%s (resource: %s)", action->node->details->uname, action->node->details->remote_rsc->container->id);
} else if(action->node) {
node_name = crm_strdup_printf("%s", action->node->details->uname);
}
if (safe_str_eq(action->task, CRM_OP_SHUTDOWN)) {
task = strdup("Shutdown");
} else if (safe_str_eq(action->task, CRM_OP_FENCE)) {
const char *op = g_hash_table_lookup(action->meta, "stonith_action");
task = crm_strdup_printf("Fence (%s)", op);
}
if(task == NULL) {
/* Nothing to report */
} else if(terminal && action->reason) {
printf(" * %s %s '%s'\n", task, node_name, action->reason);
} else if(terminal) {
printf(" * %s %s\n", task, node_name);
} else if(action->reason) {
crm_notice(" * %s %s '%s'\n", task, node_name, action->reason);
} else {
crm_notice(" * %s %s\n", task, node_name);
}
free(node_name);
free(task);
}
}
diff --git a/lib/pacemaker/pcmk_sched_native.c b/lib/pacemaker/pcmk_sched_native.c
index d377b34fc9..0ee5188c65 100644
--- a/lib/pacemaker/pcmk_sched_native.c
+++ b/lib/pacemaker/pcmk_sched_native.c
@@ -1,3329 +1,3330 @@
/*
* Copyright 2004-2019 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
// The controller removes the resource from the CIB, making this redundant
// #define DELETE_THEN_REFRESH 1
#define INFINITY_HACK (INFINITY * -100)
#define VARIANT_NATIVE 1
#include
void native_rsc_colocation_rh_must(resource_t * rsc_lh, gboolean update_lh,
resource_t * rsc_rh, gboolean update_rh);
void native_rsc_colocation_rh_mustnot(resource_t * rsc_lh, gboolean update_lh,
resource_t * rsc_rh, gboolean update_rh);
static void Recurring(resource_t *rsc, action_t *start, node_t *node,
pe_working_set_t *data_set);
static void RecurringOp(resource_t *rsc, action_t *start, node_t *node,
xmlNode *operation, pe_working_set_t *data_set);
static void Recurring_Stopped(resource_t *rsc, action_t *start, node_t *node,
pe_working_set_t *data_set);
static void RecurringOp_Stopped(resource_t *rsc, action_t *start, node_t *node,
xmlNode *operation, pe_working_set_t *data_set);
void ReloadRsc(resource_t * rsc, node_t *node, pe_working_set_t * data_set);
gboolean DeleteRsc(resource_t * rsc, node_t * node, gboolean optional, pe_working_set_t * data_set);
gboolean StopRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set);
gboolean StartRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set);
gboolean DemoteRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set);
gboolean PromoteRsc(resource_t * rsc, node_t * next, gboolean optional,
pe_working_set_t * data_set);
gboolean RoleError(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set);
gboolean NullOp(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set);
/* *INDENT-OFF* */
enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = {
/* Current State */
/* Next State: Unknown Stopped Started Slave Master */
/* Unknown */ { RSC_ROLE_UNKNOWN, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, },
/* Stopped */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STARTED, RSC_ROLE_SLAVE, RSC_ROLE_SLAVE, },
/* Started */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STARTED, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, },
/* Slave */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, },
/* Master */ { RSC_ROLE_STOPPED, RSC_ROLE_SLAVE, RSC_ROLE_SLAVE, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, },
};
gboolean (*rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX])(resource_t*,node_t*,gboolean,pe_working_set_t*) = {
/* Current State */
/* Next State: Unknown Stopped Started Slave Master */
/* Unknown */ { RoleError, StopRsc, RoleError, RoleError, RoleError, },
/* Stopped */ { RoleError, NullOp, StartRsc, StartRsc, RoleError, },
/* Started */ { RoleError, StopRsc, NullOp, NullOp, PromoteRsc, },
/* Slave */ { RoleError, StopRsc, StopRsc, NullOp, PromoteRsc, },
/* Master */ { RoleError, DemoteRsc, DemoteRsc, DemoteRsc, NullOp, },
};
/* *INDENT-ON* */
static gboolean
native_choose_node(resource_t * rsc, node_t * prefer, pe_working_set_t * data_set)
{
GListPtr nodes = NULL;
node_t *chosen = NULL;
node_t *best = NULL;
int multiple = 1;
int length = 0;
gboolean result = FALSE;
process_utilization(rsc, &prefer, data_set);
if (is_not_set(rsc->flags, pe_rsc_provisional)) {
return rsc->allocated_to ? TRUE : FALSE;
}
// Sort allowed nodes by weight
if (rsc->allowed_nodes) {
length = g_hash_table_size(rsc->allowed_nodes);
}
if (length > 0) {
nodes = g_hash_table_get_values(rsc->allowed_nodes);
nodes = sort_nodes_by_weight(nodes, pe__current_node(rsc), data_set);
// First node in sorted list has the best score
best = g_list_nth_data(nodes, 0);
}
if (prefer && nodes) {
chosen = g_hash_table_lookup(rsc->allowed_nodes, prefer->details->id);
if (chosen == NULL) {
pe_rsc_trace(rsc, "Preferred node %s for %s was unknown",
prefer->details->uname, rsc->id);
/* Favor the preferred node as long as its weight is at least as good as
* the best allowed node's.
*
* An alternative would be to favor the preferred node even if the best
* node is better, when the best node's weight is less than INFINITY.
*/
} else if ((chosen->weight < 0) || (chosen->weight < best->weight)) {
pe_rsc_trace(rsc, "Preferred node %s for %s was unsuitable",
chosen->details->uname, rsc->id);
chosen = NULL;
} else if (!can_run_resources(chosen)) {
pe_rsc_trace(rsc, "Preferred node %s for %s was unavailable",
chosen->details->uname, rsc->id);
chosen = NULL;
} else {
pe_rsc_trace(rsc,
"Chose preferred node %s for %s (ignoring %d candidates)",
chosen->details->uname, rsc->id, length);
}
}
if ((chosen == NULL) && nodes) {
/* Either there is no preferred node, or the preferred node is not
* available, but there are other nodes allowed to run the resource.
*/
chosen = best;
pe_rsc_trace(rsc, "Chose node %s for %s from %d candidates",
chosen ? chosen->details->uname : "", rsc->id, length);
if (!pe_rsc_is_unique_clone(rsc->parent)
&& chosen && (chosen->weight > 0) && can_run_resources(chosen)) {
/* If the resource is already running on a node, prefer that node if
* it is just as good as the chosen node.
*
* We don't do this for unique clone instances, because
* distribute_children() has already assigned instances to their
* running nodes when appropriate, and if we get here, we don't want
* remaining unallocated instances to prefer a node that's already
* running another instance.
*/
node_t *running = pe__current_node(rsc);
if (running && (can_run_resources(running) == FALSE)) {
pe_rsc_trace(rsc, "Current node for %s (%s) can't run resources",
rsc->id, running->details->uname);
} else if (running) {
for (GList *iter = nodes->next; iter; iter = iter->next) {
node_t *tmp = (node_t *) iter->data;
if (tmp->weight != chosen->weight) {
// The nodes are sorted by weight, so no more are equal
break;
}
if (tmp->details == running->details) {
// Scores are equal, so prefer the current node
chosen = tmp;
}
multiple++;
}
}
}
}
if (multiple > 1) {
static char score[33];
int log_level = (chosen->weight >= INFINITY)? LOG_WARNING : LOG_INFO;
score2char_stack(chosen->weight, score, sizeof(score));
do_crm_log(log_level,
"Chose node %s for %s from %d nodes with score %s",
chosen->details->uname, rsc->id, multiple, score);
}
result = native_assign_node(rsc, nodes, chosen, FALSE);
g_list_free(nodes);
return result;
}
static int
node_list_attr_score(GHashTable * list, const char *attr, const char *value)
{
GHashTableIter iter;
node_t *node = NULL;
int best_score = -INFINITY;
const char *best_node = NULL;
if (attr == NULL) {
attr = CRM_ATTR_UNAME;
}
g_hash_table_iter_init(&iter, list);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
int weight = node->weight;
if (can_run_resources(node) == FALSE) {
weight = -INFINITY;
}
if (weight > best_score || best_node == NULL) {
const char *tmp = pe_node_attribute_raw(node, attr);
if (safe_str_eq(value, tmp)) {
best_score = weight;
best_node = node->details->uname;
}
}
}
if (safe_str_neq(attr, CRM_ATTR_UNAME)) {
crm_info("Best score for %s=%s was %s with %d",
attr, value, best_node ? best_node : "", best_score);
}
return best_score;
}
static void
node_hash_update(GHashTable * list1, GHashTable * list2, const char *attr, float factor,
gboolean only_positive)
{
int score = 0;
int new_score = 0;
GHashTableIter iter;
node_t *node = NULL;
if (attr == NULL) {
attr = CRM_ATTR_UNAME;
}
g_hash_table_iter_init(&iter, list1);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
float weight_f = 0;
int weight = 0;
CRM_LOG_ASSERT(node != NULL);
if(node == NULL) { continue; };
score = node_list_attr_score(list2, attr, pe_node_attribute_raw(node, attr));
weight_f = factor * score;
/* Round the number */
/* http://c-faq.com/fp/round.html */
weight = (int)(weight_f < 0 ? weight_f - 0.5 : weight_f + 0.5);
new_score = merge_weights(weight, node->weight);
if (factor < 0 && score < 0) {
/* Negative preference for a node with a negative score
* should not become a positive preference
*
* TODO - Decide if we want to filter only if weight == -INFINITY
*
*/
crm_trace("%s: Filtering %d + %f*%d (factor * score)",
node->details->uname, node->weight, factor, score);
} else if (node->weight == INFINITY_HACK) {
crm_trace("%s: Filtering %d + %f*%d (node < 0)",
node->details->uname, node->weight, factor, score);
} else if (only_positive && new_score < 0 && node->weight > 0) {
node->weight = INFINITY_HACK;
crm_trace("%s: Filtering %d + %f*%d (score > 0)",
node->details->uname, node->weight, factor, score);
} else if (only_positive && new_score < 0 && node->weight == 0) {
crm_trace("%s: Filtering %d + %f*%d (score == 0)",
node->details->uname, node->weight, factor, score);
} else {
crm_trace("%s: %d + %f*%d", node->details->uname, node->weight, factor, score);
node->weight = new_score;
}
}
}
GHashTable *
node_hash_dup(GHashTable * hash)
{
/* Hack! */
GListPtr list = g_hash_table_get_values(hash);
GHashTable *result = node_hash_from_list(list);
g_list_free(list);
return result;
}
GHashTable *
native_merge_weights(resource_t * rsc, const char *rhs, GHashTable * nodes, const char *attr,
float factor, enum pe_weights flags)
{
return rsc_merge_weights(rsc, rhs, nodes, attr, factor, flags);
}
GHashTable *
rsc_merge_weights(resource_t * rsc, const char *rhs, GHashTable * nodes, const char *attr,
float factor, enum pe_weights flags)
{
GHashTable *work = NULL;
int multiplier = 1;
if (factor < 0) {
multiplier = -1;
}
if (is_set(rsc->flags, pe_rsc_merging)) {
pe_rsc_info(rsc, "%s: Breaking dependency loop at %s", rhs, rsc->id);
return nodes;
}
set_bit(rsc->flags, pe_rsc_merging);
if (is_set(flags, pe_weights_init)) {
if (rsc->variant == pe_group && rsc->children) {
GListPtr last = rsc->children;
while (last->next != NULL) {
last = last->next;
}
pe_rsc_trace(rsc, "Merging %s as a group %p %p", rsc->id, rsc->children, last);
work = rsc_merge_weights(last->data, rhs, NULL, attr, factor, flags);
} else {
work = node_hash_dup(rsc->allowed_nodes);
}
clear_bit(flags, pe_weights_init);
} else if (rsc->variant == pe_group && rsc->children) {
GListPtr iter = rsc->children;
pe_rsc_trace(rsc, "%s: Combining scores from %d children of %s", rhs, g_list_length(iter), rsc->id);
work = node_hash_dup(nodes);
for(iter = rsc->children; iter->next != NULL; iter = iter->next) {
work = rsc_merge_weights(iter->data, rhs, work, attr, factor, flags);
}
} else {
pe_rsc_trace(rsc, "%s: Combining scores from %s", rhs, rsc->id);
work = node_hash_dup(nodes);
node_hash_update(work, rsc->allowed_nodes, attr, factor,
is_set(flags, pe_weights_positive));
}
if (is_set(flags, pe_weights_rollback) && can_run_any(work) == FALSE) {
pe_rsc_info(rsc, "%s: Rolling back scores from %s", rhs, rsc->id);
g_hash_table_destroy(work);
clear_bit(rsc->flags, pe_rsc_merging);
return nodes;
}
if (can_run_any(work)) {
GListPtr gIter = NULL;
if (is_set(flags, pe_weights_forward)) {
gIter = rsc->rsc_cons;
crm_trace("Checking %d additional colocation constraints", g_list_length(gIter));
} else if(rsc->variant == pe_group && rsc->children) {
GListPtr last = rsc->children;
while (last->next != NULL) {
last = last->next;
}
gIter = ((resource_t*)last->data)->rsc_cons_lhs;
crm_trace("Checking %d additional optional group colocation constraints from %s",
g_list_length(gIter), ((resource_t*)last->data)->id);
} else {
gIter = rsc->rsc_cons_lhs;
crm_trace("Checking %d additional optional colocation constraints %s", g_list_length(gIter), rsc->id);
}
for (; gIter != NULL; gIter = gIter->next) {
resource_t *other = NULL;
rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data;
if (is_set(flags, pe_weights_forward)) {
other = constraint->rsc_rh;
} else {
other = constraint->rsc_lh;
}
pe_rsc_trace(rsc, "Applying %s (%s)", constraint->id, other->id);
work = rsc_merge_weights(other, rhs, work, constraint->node_attribute,
multiplier * (float)constraint->score / INFINITY, flags|pe_weights_rollback);
dump_node_scores(LOG_TRACE, NULL, rhs, work);
}
}
if (is_set(flags, pe_weights_positive)) {
node_t *node = NULL;
GHashTableIter iter;
g_hash_table_iter_init(&iter, work);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
if (node->weight == INFINITY_HACK) {
node->weight = 1;
}
}
}
if (nodes) {
g_hash_table_destroy(nodes);
}
clear_bit(rsc->flags, pe_rsc_merging);
return work;
}
static inline bool
node_has_been_unfenced(node_t *node)
{
const char *unfenced = pe_node_attribute_raw(node, CRM_ATTR_UNFENCED);
return unfenced && strcmp("0", unfenced);
}
static inline bool
is_unfence_device(resource_t *rsc, pe_working_set_t *data_set)
{
return is_set(rsc->flags, pe_rsc_fence_device)
&& is_set(data_set->flags, pe_flag_enable_unfencing);
}
node_t *
native_color(resource_t * rsc, node_t * prefer, pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
int alloc_details = scores_log_level + 1;
if (rsc->parent && is_not_set(rsc->parent->flags, pe_rsc_allocating)) {
/* never allocate children on their own */
pe_rsc_debug(rsc, "Escalating allocation of %s to its parent: %s", rsc->id,
rsc->parent->id);
rsc->parent->cmds->allocate(rsc->parent, prefer, data_set);
}
if (is_not_set(rsc->flags, pe_rsc_provisional)) {
return rsc->allocated_to;
}
if (is_set(rsc->flags, pe_rsc_allocating)) {
pe_rsc_debug(rsc, "Dependency loop detected involving %s", rsc->id);
return NULL;
}
set_bit(rsc->flags, pe_rsc_allocating);
print_resource(alloc_details, "Allocating: ", rsc, FALSE);
dump_node_scores(alloc_details, rsc, "Pre-alloc", rsc->allowed_nodes);
for (gIter = rsc->rsc_cons; gIter != NULL; gIter = gIter->next) {
rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data;
GHashTable *archive = NULL;
resource_t *rsc_rh = constraint->rsc_rh;
pe_rsc_trace(rsc, "%s: Pre-Processing %s (%s, %d, %s)",
rsc->id, constraint->id, rsc_rh->id,
constraint->score, role2text(constraint->role_lh));
if (constraint->role_lh >= RSC_ROLE_MASTER
|| (constraint->score < 0 && constraint->score > -INFINITY)) {
archive = node_hash_dup(rsc->allowed_nodes);
}
rsc_rh->cmds->allocate(rsc_rh, NULL, data_set);
rsc->cmds->rsc_colocation_lh(rsc, rsc_rh, constraint, data_set);
if (archive && can_run_any(rsc->allowed_nodes) == FALSE) {
pe_rsc_info(rsc, "%s: Rolling back scores from %s", rsc->id, rsc_rh->id);
g_hash_table_destroy(rsc->allowed_nodes);
rsc->allowed_nodes = archive;
archive = NULL;
}
if (archive) {
g_hash_table_destroy(archive);
}
}
dump_node_scores(alloc_details, rsc, "Post-coloc", rsc->allowed_nodes);
for (gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) {
rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data;
rsc->allowed_nodes =
constraint->rsc_lh->cmds->merge_weights(constraint->rsc_lh, rsc->id, rsc->allowed_nodes,
constraint->node_attribute,
(float)constraint->score / INFINITY,
pe_weights_rollback);
}
print_resource(LOG_TRACE, "Allocating: ", rsc, FALSE);
if (rsc->next_role == RSC_ROLE_STOPPED) {
pe_rsc_trace(rsc, "Making sure %s doesn't get allocated", rsc->id);
/* make sure it doesn't come up again */
resource_location(rsc, NULL, -INFINITY, XML_RSC_ATTR_TARGET_ROLE, data_set);
} else if(rsc->next_role > rsc->role
&& is_set(data_set->flags, pe_flag_have_quorum) == FALSE
&& data_set->no_quorum_policy == no_quorum_freeze) {
crm_notice("Resource %s cannot be elevated from %s to %s: no-quorum-policy=freeze",
rsc->id, role2text(rsc->role), role2text(rsc->next_role));
rsc->next_role = rsc->role;
}
dump_node_scores(show_scores ? 0 : scores_log_level, rsc, __FUNCTION__,
rsc->allowed_nodes);
if (is_set(data_set->flags, pe_flag_stonith_enabled)
&& is_set(data_set->flags, pe_flag_have_stonith_resource) == FALSE) {
clear_bit(rsc->flags, pe_rsc_managed);
}
if (is_not_set(rsc->flags, pe_rsc_managed)) {
const char *reason = NULL;
node_t *assign_to = NULL;
rsc->next_role = rsc->role;
assign_to = pe__current_node(rsc);
if (assign_to == NULL) {
reason = "inactive";
} else if (rsc->role == RSC_ROLE_MASTER) {
reason = "master";
} else if (is_set(rsc->flags, pe_rsc_failed)) {
reason = "failed";
} else {
reason = "active";
}
pe_rsc_info(rsc, "Unmanaged resource %s allocated to %s: %s", rsc->id,
(assign_to? assign_to->details->uname : "no node"), reason);
native_assign_node(rsc, NULL, assign_to, TRUE);
} else if (is_set(data_set->flags, pe_flag_stop_everything)) {
pe_rsc_debug(rsc, "Forcing %s to stop", rsc->id);
native_assign_node(rsc, NULL, NULL, TRUE);
} else if (is_set(rsc->flags, pe_rsc_provisional)
&& native_choose_node(rsc, prefer, data_set)) {
pe_rsc_trace(rsc, "Allocated resource %s to %s", rsc->id,
rsc->allocated_to->details->uname);
} else if (rsc->allocated_to == NULL) {
if (is_not_set(rsc->flags, pe_rsc_orphan)) {
pe_rsc_info(rsc, "Resource %s cannot run anywhere", rsc->id);
} else if (rsc->running_on != NULL) {
pe_rsc_info(rsc, "Stopping orphan resource %s", rsc->id);
}
} else {
pe_rsc_debug(rsc, "Pre-Allocated resource %s to %s", rsc->id,
rsc->allocated_to->details->uname);
}
clear_bit(rsc->flags, pe_rsc_allocating);
print_resource(LOG_TRACE, "Allocated ", rsc, TRUE);
if (rsc->is_remote_node) {
node_t *remote_node = pe_find_node(data_set->nodes, rsc->id);
CRM_ASSERT(remote_node != NULL);
if (rsc->allocated_to && rsc->next_role != RSC_ROLE_STOPPED) {
crm_trace("Setting Pacemaker Remote node %s to ONLINE",
remote_node->details->id);
remote_node->details->online = TRUE;
/* We shouldn't consider an unseen remote-node unclean if we are going
* to try and connect to it. Otherwise we get an unnecessary fence */
if (remote_node->details->unseen == TRUE) {
remote_node->details->unclean = FALSE;
}
} else {
crm_trace("Setting Pacemaker Remote node %s to SHUTDOWN (next role %s, %sallocated)",
remote_node->details->id, role2text(rsc->next_role),
(rsc->allocated_to? "" : "un"));
remote_node->details->shutdown = TRUE;
}
}
return rsc->allocated_to;
}
static gboolean
is_op_dup(resource_t *rsc, const char *name, guint interval_ms)
{
gboolean dup = FALSE;
const char *id = NULL;
const char *value = NULL;
xmlNode *operation = NULL;
guint interval2_ms = 0;
CRM_ASSERT(rsc);
for (operation = __xml_first_child(rsc->ops_xml); operation != NULL;
operation = __xml_next_element(operation)) {
if (crm_str_eq((const char *)operation->name, "op", TRUE)) {
value = crm_element_value(operation, "name");
if (safe_str_neq(value, name)) {
continue;
}
value = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
interval2_ms = crm_parse_interval_spec(value);
if (interval_ms != interval2_ms) {
continue;
}
if (id == NULL) {
id = ID(operation);
} else {
crm_config_err("Operation %s is a duplicate of %s", ID(operation), id);
crm_config_err
("Do not use the same (name, interval) combination more than once per resource");
dup = TRUE;
}
}
}
return dup;
}
static bool
op_cannot_recur(const char *name)
{
return safe_str_eq(name, RSC_STOP)
|| safe_str_eq(name, RSC_START)
|| safe_str_eq(name, RSC_DEMOTE)
|| safe_str_eq(name, RSC_PROMOTE);
}
static void
RecurringOp(resource_t * rsc, action_t * start, node_t * node,
xmlNode * operation, pe_working_set_t * data_set)
{
char *key = NULL;
const char *name = NULL;
const char *role = NULL;
const char *interval_spec = NULL;
const char *node_uname = node? node->details->uname : "n/a";
guint interval_ms = 0;
action_t *mon = NULL;
gboolean is_optional = TRUE;
GListPtr possible_matches = NULL;
CRM_ASSERT(rsc);
/* Only process for the operations without role="Stopped" */
role = crm_element_value(operation, "role");
if (role && text2role(role) == RSC_ROLE_STOPPED) {
return;
}
interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
interval_ms = crm_parse_interval_spec(interval_spec);
if (interval_ms == 0) {
return;
}
name = crm_element_value(operation, "name");
if (is_op_dup(rsc, name, interval_ms)) {
crm_trace("Not creating duplicate recurring action %s for %dms %s",
ID(operation), interval_ms, name);
return;
}
if (op_cannot_recur(name)) {
crm_config_err("Ignoring %s because action '%s' cannot be recurring",
ID(operation), name);
return;
}
key = generate_op_key(rsc->id, name, interval_ms);
if (find_rsc_op_entry(rsc, key) == NULL) {
crm_trace("Not creating recurring action %s for disabled resource %s",
ID(operation), rsc->id);
free(key);
return;
}
pe_rsc_trace(rsc, "Creating recurring action %s for %s in role %s on %s",
ID(operation), rsc->id, role2text(rsc->next_role), node_uname);
if (start != NULL) {
pe_rsc_trace(rsc, "Marking %s %s due to %s",
key, is_set(start->flags, pe_action_optional) ? "optional" : "mandatory",
start->uuid);
is_optional = (rsc->cmds->action_flags(start, NULL) & pe_action_optional);
} else {
pe_rsc_trace(rsc, "Marking %s optional", key);
is_optional = TRUE;
}
/* start a monitor for an already active resource */
possible_matches = find_actions_exact(rsc->actions, key, node);
if (possible_matches == NULL) {
is_optional = FALSE;
pe_rsc_trace(rsc, "Marking %s mandatory: not active", key);
} else {
GListPtr gIter = NULL;
for (gIter = possible_matches; gIter != NULL; gIter = gIter->next) {
action_t *op = (action_t *) gIter->data;
if (is_set(op->flags, pe_action_reschedule)) {
is_optional = FALSE;
break;
}
}
g_list_free(possible_matches);
}
if ((rsc->next_role == RSC_ROLE_MASTER && role == NULL)
|| (role != NULL && text2role(role) != rsc->next_role)) {
int log_level = LOG_TRACE;
const char *result = "Ignoring";
if (is_optional) {
char *after_key = NULL;
action_t *cancel_op = NULL;
// It's running, so cancel it
log_level = LOG_INFO;
result = "Cancelling";
cancel_op = pe_cancel_op(rsc, name, interval_ms, node, data_set);
switch (rsc->role) {
case RSC_ROLE_SLAVE:
case RSC_ROLE_STARTED:
if (rsc->next_role == RSC_ROLE_MASTER) {
after_key = promote_key(rsc);
} else if (rsc->next_role == RSC_ROLE_STOPPED) {
after_key = stop_key(rsc);
}
break;
case RSC_ROLE_MASTER:
after_key = demote_key(rsc);
break;
default:
break;
}
if (after_key) {
custom_action_order(rsc, NULL, cancel_op, rsc, after_key, NULL,
pe_order_runnable_left, data_set);
}
}
do_crm_log(log_level, "%s action %s (%s vs. %s)",
result, key, role ? role : role2text(RSC_ROLE_SLAVE),
role2text(rsc->next_role));
free(key);
return;
}
mon = custom_action(rsc, key, name, node, is_optional, TRUE, data_set);
key = mon->uuid;
if (is_optional) {
pe_rsc_trace(rsc, "%s\t %s (optional)", node_uname, mon->uuid);
}
if (start == NULL || is_set(start->flags, pe_action_runnable) == FALSE) {
pe_rsc_debug(rsc, "%s\t %s (cancelled : start un-runnable)",
node_uname, mon->uuid);
update_action_flags(mon, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
} else if (node == NULL || node->details->online == FALSE || node->details->unclean) {
pe_rsc_debug(rsc, "%s\t %s (cancelled : no node available)",
node_uname, mon->uuid);
update_action_flags(mon, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
} else if (is_set(mon->flags, pe_action_optional) == FALSE) {
pe_rsc_info(rsc, " Start recurring %s (%us) for %s on %s",
mon->task, interval_ms / 1000, rsc->id, node_uname);
}
if (rsc->next_role == RSC_ROLE_MASTER) {
char *running_master = crm_itoa(PCMK_OCF_RUNNING_MASTER);
add_hash_param(mon->meta, XML_ATTR_TE_TARGET_RC, running_master);
free(running_master);
}
if (node == NULL || is_set(rsc->flags, pe_rsc_managed)) {
custom_action_order(rsc, start_key(rsc), NULL,
NULL, strdup(key), mon,
pe_order_implies_then | pe_order_runnable_left, data_set);
custom_action_order(rsc, reload_key(rsc), NULL,
NULL, strdup(key), mon,
pe_order_implies_then | pe_order_runnable_left, data_set);
if (rsc->next_role == RSC_ROLE_MASTER) {
custom_action_order(rsc, promote_key(rsc), NULL,
rsc, NULL, mon,
pe_order_optional | pe_order_runnable_left, data_set);
} else if (rsc->role == RSC_ROLE_MASTER) {
custom_action_order(rsc, demote_key(rsc), NULL,
rsc, NULL, mon,
pe_order_optional | pe_order_runnable_left, data_set);
}
}
}
static void
Recurring(resource_t * rsc, action_t * start, node_t * node, pe_working_set_t * data_set)
{
if (is_not_set(rsc->flags, pe_rsc_maintenance) &&
(node == NULL || node->details->maintenance == FALSE)) {
xmlNode *operation = NULL;
for (operation = __xml_first_child(rsc->ops_xml); operation != NULL;
operation = __xml_next_element(operation)) {
if (crm_str_eq((const char *)operation->name, "op", TRUE)) {
RecurringOp(rsc, start, node, operation, data_set);
}
}
}
}
static void
RecurringOp_Stopped(resource_t * rsc, action_t * start, node_t * node,
xmlNode * operation, pe_working_set_t * data_set)
{
char *key = NULL;
const char *name = NULL;
const char *role = NULL;
const char *interval_spec = NULL;
const char *node_uname = node? node->details->uname : "n/a";
guint interval_ms = 0;
GListPtr possible_matches = NULL;
GListPtr gIter = NULL;
/* Only process for the operations with role="Stopped" */
role = crm_element_value(operation, "role");
if (role == NULL || text2role(role) != RSC_ROLE_STOPPED) {
return;
}
interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
interval_ms = crm_parse_interval_spec(interval_spec);
if (interval_ms == 0) {
return;
}
name = crm_element_value(operation, "name");
if (is_op_dup(rsc, name, interval_ms)) {
crm_trace("Not creating duplicate recurring action %s for %dms %s",
ID(operation), interval_ms, name);
return;
}
if (op_cannot_recur(name)) {
crm_config_err("Invalid recurring action %s wth name: '%s'", ID(operation), name);
return;
}
key = generate_op_key(rsc->id, name, interval_ms);
if (find_rsc_op_entry(rsc, key) == NULL) {
crm_trace("Not creating recurring action %s for disabled resource %s",
ID(operation), rsc->id);
free(key);
return;
}
// @TODO add support
if (is_set(rsc->flags, pe_rsc_unique) == FALSE) {
- crm_notice("Ignoring %s (recurring monitors for role=Stopped are not supported for anonyous clones)",
+ crm_notice("Ignoring %s (recurring monitors for Stopped role are "
+ "not supported for anonymous clones)",
ID(operation));
return;
}
pe_rsc_trace(rsc,
"Creating recurring action %s for %s in role %s on nodes where it should not be running",
ID(operation), rsc->id, role2text(rsc->next_role));
/* if the monitor exists on the node where the resource will be running, cancel it */
if (node != NULL) {
possible_matches = find_actions_exact(rsc->actions, key, node);
if (possible_matches) {
action_t *cancel_op = NULL;
g_list_free(possible_matches);
cancel_op = pe_cancel_op(rsc, name, interval_ms, node, data_set);
if (rsc->next_role == RSC_ROLE_STARTED || rsc->next_role == RSC_ROLE_SLAVE) {
/* rsc->role == RSC_ROLE_STOPPED: cancel the monitor before start */
/* rsc->role == RSC_ROLE_STARTED: for a migration, cancel the monitor on the target node before start */
custom_action_order(rsc, NULL, cancel_op, rsc, start_key(rsc), NULL,
pe_order_runnable_left, data_set);
}
pe_rsc_info(rsc, "Cancel action %s (%s vs. %s) on %s",
key, role, role2text(rsc->next_role), node_uname);
}
}
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
node_t *stop_node = (node_t *) gIter->data;
const char *stop_node_uname = stop_node->details->uname;
gboolean is_optional = TRUE;
gboolean probe_is_optional = TRUE;
gboolean stop_is_optional = TRUE;
action_t *stopped_mon = NULL;
char *rc_inactive = NULL;
GListPtr probe_complete_ops = NULL;
GListPtr stop_ops = NULL;
GListPtr local_gIter = NULL;
if (node && safe_str_eq(stop_node_uname, node_uname)) {
continue;
}
pe_rsc_trace(rsc, "Creating recurring action %s for %s on %s",
ID(operation), rsc->id, crm_str(stop_node_uname));
/* start a monitor for an already stopped resource */
possible_matches = find_actions_exact(rsc->actions, key, stop_node);
if (possible_matches == NULL) {
pe_rsc_trace(rsc, "Marking %s mandatory on %s: not active", key,
crm_str(stop_node_uname));
is_optional = FALSE;
} else {
pe_rsc_trace(rsc, "Marking %s optional on %s: already active", key,
crm_str(stop_node_uname));
is_optional = TRUE;
g_list_free(possible_matches);
}
stopped_mon = custom_action(rsc, strdup(key), name, stop_node, is_optional, TRUE, data_set);
rc_inactive = crm_itoa(PCMK_OCF_NOT_RUNNING);
add_hash_param(stopped_mon->meta, XML_ATTR_TE_TARGET_RC, rc_inactive);
free(rc_inactive);
if (is_set(rsc->flags, pe_rsc_managed)) {
GList *probes = pe__resource_actions(rsc, stop_node, RSC_STATUS,
FALSE);
GListPtr pIter = NULL;
for (pIter = probes; pIter != NULL; pIter = pIter->next) {
action_t *probe = (action_t *) pIter->data;
order_actions(probe, stopped_mon, pe_order_runnable_left);
crm_trace("%s then %s on %s", probe->uuid, stopped_mon->uuid, stop_node->details->uname);
}
g_list_free(probes);
}
if (probe_complete_ops) {
g_list_free(probe_complete_ops);
}
stop_ops = pe__resource_actions(rsc, stop_node, RSC_STOP, TRUE);
for (local_gIter = stop_ops; local_gIter != NULL; local_gIter = local_gIter->next) {
action_t *stop = (action_t *) local_gIter->data;
if (is_set(stop->flags, pe_action_optional) == FALSE) {
stop_is_optional = FALSE;
}
if (is_set(stop->flags, pe_action_runnable) == FALSE) {
crm_debug("%s\t %s (cancelled : stop un-runnable)",
crm_str(stop_node_uname), stopped_mon->uuid);
update_action_flags(stopped_mon, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
}
if (is_set(rsc->flags, pe_rsc_managed)) {
custom_action_order(rsc, stop_key(rsc), stop,
NULL, strdup(key), stopped_mon,
pe_order_implies_then | pe_order_runnable_left, data_set);
}
}
if (stop_ops) {
g_list_free(stop_ops);
}
if (is_optional == FALSE && probe_is_optional && stop_is_optional
&& is_set(rsc->flags, pe_rsc_managed) == FALSE) {
pe_rsc_trace(rsc, "Marking %s optional on %s due to unmanaged",
key, crm_str(stop_node_uname));
update_action_flags(stopped_mon, pe_action_optional, __FUNCTION__, __LINE__);
}
if (is_set(stopped_mon->flags, pe_action_optional)) {
pe_rsc_trace(rsc, "%s\t %s (optional)", crm_str(stop_node_uname), stopped_mon->uuid);
}
if (stop_node->details->online == FALSE || stop_node->details->unclean) {
pe_rsc_debug(rsc, "%s\t %s (cancelled : no node available)",
crm_str(stop_node_uname), stopped_mon->uuid);
update_action_flags(stopped_mon, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
}
if (is_set(stopped_mon->flags, pe_action_runnable)
&& is_set(stopped_mon->flags, pe_action_optional) == FALSE) {
crm_notice(" Start recurring %s (%us) for %s on %s", stopped_mon->task,
interval_ms / 1000, rsc->id, crm_str(stop_node_uname));
}
}
free(key);
}
static void
Recurring_Stopped(resource_t * rsc, action_t * start, node_t * node, pe_working_set_t * data_set)
{
if (is_not_set(rsc->flags, pe_rsc_maintenance) &&
(node == NULL || node->details->maintenance == FALSE)) {
xmlNode *operation = NULL;
for (operation = __xml_first_child(rsc->ops_xml); operation != NULL;
operation = __xml_next_element(operation)) {
if (crm_str_eq((const char *)operation->name, "op", TRUE)) {
RecurringOp_Stopped(rsc, start, node, operation, data_set);
}
}
}
}
static void
handle_migration_actions(resource_t * rsc, node_t *current, node_t *chosen, pe_working_set_t * data_set)
{
action_t *migrate_to = NULL;
action_t *migrate_from = NULL;
action_t *start = NULL;
action_t *stop = NULL;
gboolean partial = rsc->partial_migration_target ? TRUE : FALSE;
pe_rsc_trace(rsc, "Processing migration actions %s moving from %s to %s . partial migration = %s",
rsc->id, current->details->id, chosen->details->id, partial ? "TRUE" : "FALSE");
start = start_action(rsc, chosen, TRUE);
stop = stop_action(rsc, current, TRUE);
if (partial == FALSE) {
migrate_to = custom_action(rsc, generate_op_key(rsc->id, RSC_MIGRATE, 0), RSC_MIGRATE, current, TRUE, TRUE, data_set);
}
migrate_from = custom_action(rsc, generate_op_key(rsc->id, RSC_MIGRATED, 0), RSC_MIGRATED, chosen, TRUE, TRUE, data_set);
if ((migrate_to && migrate_from) || (migrate_from && partial)) {
set_bit(start->flags, pe_action_migrate_runnable);
set_bit(stop->flags, pe_action_migrate_runnable);
update_action_flags(start, pe_action_pseudo, __FUNCTION__, __LINE__); /* easier than trying to delete it from the graph */
/* order probes before migrations */
if (partial) {
set_bit(migrate_from->flags, pe_action_migrate_runnable);
migrate_from->needs = start->needs;
custom_action_order(rsc, generate_op_key(rsc->id, RSC_STATUS, 0), NULL,
rsc, generate_op_key(rsc->id, RSC_MIGRATED, 0), NULL, pe_order_optional, data_set);
} else {
set_bit(migrate_from->flags, pe_action_migrate_runnable);
set_bit(migrate_to->flags, pe_action_migrate_runnable);
migrate_to->needs = start->needs;
custom_action_order(rsc, generate_op_key(rsc->id, RSC_STATUS, 0), NULL,
rsc, generate_op_key(rsc->id, RSC_MIGRATE, 0), NULL, pe_order_optional, data_set);
custom_action_order(rsc, generate_op_key(rsc->id, RSC_MIGRATE, 0), NULL,
rsc, generate_op_key(rsc->id, RSC_MIGRATED, 0), NULL, pe_order_optional | pe_order_implies_first_migratable, data_set);
}
custom_action_order(rsc, generate_op_key(rsc->id, RSC_MIGRATED, 0), NULL,
rsc, generate_op_key(rsc->id, RSC_STOP, 0), NULL, pe_order_optional | pe_order_implies_first_migratable, data_set);
custom_action_order(rsc, generate_op_key(rsc->id, RSC_MIGRATED, 0), NULL,
rsc, generate_op_key(rsc->id, RSC_START, 0), NULL, pe_order_optional | pe_order_implies_first_migratable | pe_order_pseudo_left, data_set);
}
if (migrate_to) {
add_hash_param(migrate_to->meta, XML_LRM_ATTR_MIGRATE_SOURCE, current->details->uname);
add_hash_param(migrate_to->meta, XML_LRM_ATTR_MIGRATE_TARGET, chosen->details->uname);
/* Pacemaker Remote connections don't require pending to be recorded in
* the CIB. We can reduce CIB writes by not setting PENDING for them.
*/
if (rsc->is_remote_node == FALSE) {
/* migrate_to takes place on the source node, but can
* have an effect on the target node depending on how
* the agent is written. Because of this, we have to maintain
* a record that the migrate_to occurred, in case the source node
* loses membership while the migrate_to action is still in-flight.
*/
add_hash_param(migrate_to->meta, XML_OP_ATTR_PENDING, "true");
}
}
if (migrate_from) {
add_hash_param(migrate_from->meta, XML_LRM_ATTR_MIGRATE_SOURCE, current->details->uname);
add_hash_param(migrate_from->meta, XML_LRM_ATTR_MIGRATE_TARGET, chosen->details->uname);
}
}
void
native_create_actions(resource_t * rsc, pe_working_set_t * data_set)
{
action_t *start = NULL;
node_t *chosen = NULL;
node_t *current = NULL;
gboolean need_stop = FALSE;
gboolean is_moving = FALSE;
gboolean allow_migrate = is_set(rsc->flags, pe_rsc_allow_migrate) ? TRUE : FALSE;
GListPtr gIter = NULL;
unsigned int num_all_active = 0;
unsigned int num_clean_active = 0;
bool multiply_active = FALSE;
enum rsc_role_e role = RSC_ROLE_UNKNOWN;
enum rsc_role_e next_role = RSC_ROLE_UNKNOWN;
CRM_ASSERT(rsc);
chosen = rsc->allocated_to;
if (chosen != NULL && rsc->next_role == RSC_ROLE_UNKNOWN) {
rsc->next_role = RSC_ROLE_STARTED;
pe_rsc_trace(rsc, "Fixed next_role: unknown -> %s", role2text(rsc->next_role));
} else if (rsc->next_role == RSC_ROLE_UNKNOWN) {
rsc->next_role = RSC_ROLE_STOPPED;
pe_rsc_trace(rsc, "Fixed next_role: unknown -> %s", role2text(rsc->next_role));
}
pe_rsc_trace(rsc, "Processing state transition for %s %p: %s->%s", rsc->id, rsc,
role2text(rsc->role), role2text(rsc->next_role));
current = pe__find_active_on(rsc, &num_all_active, &num_clean_active);
for (gIter = rsc->dangling_migrations; gIter != NULL; gIter = gIter->next) {
node_t *dangling_source = (node_t *) gIter->data;
action_t *stop = stop_action(rsc, dangling_source, FALSE);
set_bit(stop->flags, pe_action_dangle);
pe_rsc_trace(rsc, "Forcing a cleanup of %s on %s",
rsc->id, dangling_source->details->uname);
if (is_set(data_set->flags, pe_flag_remove_after_stop)) {
DeleteRsc(rsc, dangling_source, FALSE, data_set);
}
}
if ((num_all_active == 2) && (num_clean_active == 2) && chosen
&& rsc->partial_migration_source && rsc->partial_migration_target
&& (current->details == rsc->partial_migration_source->details)
&& (chosen->details == rsc->partial_migration_target->details)) {
/* The chosen node is still the migration target from a partial
* migration. Attempt to continue the migration instead of recovering
* by stopping the resource everywhere and starting it on a single node.
*/
pe_rsc_trace(rsc,
"Will attempt to continue with a partial migration to target %s from %s",
rsc->partial_migration_target->details->id,
rsc->partial_migration_source->details->id);
} else if (is_not_set(rsc->flags, pe_rsc_needs_fencing)) {
/* If a resource has "requires" set to nothing or quorum, don't consider
* it active on unclean nodes (similar to how all resources behave when
* stonith-enabled is false). We can start such resources elsewhere
* before fencing completes, and if we considered the resource active on
* the failed node, we would attempt recovery for being active on
* multiple nodes.
*/
multiply_active = (num_clean_active > 1);
} else {
multiply_active = (num_all_active > 1);
}
if (multiply_active) {
if (rsc->partial_migration_target && rsc->partial_migration_source) {
// Migration was in progress, but we've chosen a different target
crm_notice("Resource %s can no longer migrate to %s. Stopping on %s too",
rsc->id, rsc->partial_migration_target->details->uname,
rsc->partial_migration_source->details->uname);
} else {
// Resource was incorrectly multiply active
pe_proc_err("Resource %s is active on %u nodes (%s)",
rsc->id, num_all_active,
recovery2text(rsc->recovery_type));
crm_notice("See https://wiki.clusterlabs.org/wiki/FAQ#Resource_is_Too_Active for more information");
}
if (rsc->recovery_type == recovery_stop_start) {
need_stop = TRUE;
}
/* If by chance a partial migration is in process, but the migration
* target is not chosen still, clear all partial migration data.
*/
rsc->partial_migration_source = rsc->partial_migration_target = NULL;
allow_migrate = FALSE;
}
if (is_set(rsc->flags, pe_rsc_start_pending)) {
start = start_action(rsc, chosen, TRUE);
set_bit(start->flags, pe_action_print_always);
}
if (current && chosen && current->details != chosen->details) {
pe_rsc_trace(rsc, "Moving %s", rsc->id);
is_moving = TRUE;
need_stop = TRUE;
} else if (is_set(rsc->flags, pe_rsc_failed)) {
pe_rsc_trace(rsc, "Recovering %s", rsc->id);
need_stop = TRUE;
} else if (is_set(rsc->flags, pe_rsc_block)) {
pe_rsc_trace(rsc, "Block %s", rsc->id);
need_stop = TRUE;
} else if (rsc->role > RSC_ROLE_STARTED && current != NULL && chosen != NULL) {
/* Recovery of a promoted resource */
start = start_action(rsc, chosen, TRUE);
if (is_set(start->flags, pe_action_optional) == FALSE) {
pe_rsc_trace(rsc, "Forced start %s", rsc->id);
need_stop = TRUE;
}
}
pe_rsc_trace(rsc, "Creating actions for %s: %s->%s", rsc->id,
role2text(rsc->role), role2text(rsc->next_role));
/* Create any additional actions required when bringing resource down and
* back up to same level.
*/
role = rsc->role;
while (role != RSC_ROLE_STOPPED) {
next_role = rsc_state_matrix[role][RSC_ROLE_STOPPED];
pe_rsc_trace(rsc, "Down: Executing: %s->%s (%s)%s", role2text(role), role2text(next_role),
rsc->id, need_stop ? " required" : "");
if (rsc_action_matrix[role][next_role] (rsc, current, !need_stop, data_set) == FALSE) {
break;
}
role = next_role;
}
while (rsc->role <= rsc->next_role && role != rsc->role && is_not_set(rsc->flags, pe_rsc_block)) {
next_role = rsc_state_matrix[role][rsc->role];
pe_rsc_trace(rsc, "Up: Executing: %s->%s (%s)%s", role2text(role), role2text(next_role),
rsc->id, need_stop ? " required" : "");
if (rsc_action_matrix[role][next_role] (rsc, chosen, !need_stop, data_set) == FALSE) {
break;
}
role = next_role;
}
role = rsc->role;
/* Required steps from this role to the next */
while (role != rsc->next_role) {
next_role = rsc_state_matrix[role][rsc->next_role];
pe_rsc_trace(rsc, "Role: Executing: %s->%s = (%s on %s)", role2text(role), role2text(next_role), rsc->id, chosen?chosen->details->uname:"NA");
if (rsc_action_matrix[role][next_role] (rsc, chosen, FALSE, data_set) == FALSE) {
break;
}
role = next_role;
}
if(is_set(rsc->flags, pe_rsc_block)) {
pe_rsc_trace(rsc, "No monitor additional ops for blocked resource");
} else if (rsc->next_role != RSC_ROLE_STOPPED || is_set(rsc->flags, pe_rsc_managed) == FALSE) {
pe_rsc_trace(rsc, "Monitor ops for active resource");
start = start_action(rsc, chosen, TRUE);
Recurring(rsc, start, chosen, data_set);
Recurring_Stopped(rsc, start, chosen, data_set);
} else {
pe_rsc_trace(rsc, "Monitor ops for inactive resource");
Recurring_Stopped(rsc, NULL, NULL, data_set);
}
/* if we are stuck in a partial migration, where the target
* of the partial migration no longer matches the chosen target.
* A full stop/start is required */
if (rsc->partial_migration_target && (chosen == NULL || rsc->partial_migration_target->details != chosen->details)) {
pe_rsc_trace(rsc, "Not allowing partial migration to continue. %s", rsc->id);
allow_migrate = FALSE;
} else if (is_moving == FALSE ||
is_not_set(rsc->flags, pe_rsc_managed) ||
is_set(rsc->flags, pe_rsc_failed) ||
is_set(rsc->flags, pe_rsc_start_pending) ||
(current && current->details->unclean) ||
rsc->next_role < RSC_ROLE_STARTED) {
allow_migrate = FALSE;
}
if (allow_migrate) {
handle_migration_actions(rsc, current, chosen, data_set);
}
}
static void
rsc_avoids_remote_nodes(resource_t *rsc)
{
GHashTableIter iter;
node_t *node = NULL;
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
if (node->details->remote_rsc) {
node->weight = -INFINITY;
}
}
}
/*!
* \internal
* \brief Return allowed nodes as (possibly sorted) list
*
* Convert a resource's hash table of allowed nodes to a list. If printing to
* stdout, sort the list, to keep action ID numbers consistent for regression
* test output (while avoiding the performance hit on a live cluster).
*
* \param[in] rsc Resource to check for allowed nodes
* \param[in] data_set Cluster working set
*
* \return List of resource's allowed nodes
* \note Callers should take care not to rely on the list being sorted.
*/
static GList *
allowed_nodes_as_list(pe_resource_t *rsc, pe_working_set_t *data_set)
{
GList *allowed_nodes = NULL;
if (rsc->allowed_nodes) {
allowed_nodes = g_hash_table_get_values(rsc->allowed_nodes);
}
if (is_set(data_set->flags, pe_flag_stdout)) {
allowed_nodes = g_list_sort(allowed_nodes, sort_node_uname);
}
return allowed_nodes;
}
void
native_internal_constraints(resource_t * rsc, pe_working_set_t * data_set)
{
/* This function is on the critical path and worth optimizing as much as possible */
pe_resource_t *top = NULL;
GList *allowed_nodes = NULL;
bool check_unfencing = FALSE;
bool check_utilization = FALSE;
if (is_not_set(rsc->flags, pe_rsc_managed)) {
pe_rsc_trace(rsc,
"Skipping native constraints for unmanaged resource: %s",
rsc->id);
return;
}
top = uber_parent(rsc);
// Whether resource requires unfencing
check_unfencing = is_not_set(rsc->flags, pe_rsc_fence_device)
&& is_set(data_set->flags, pe_flag_enable_unfencing)
&& is_set(rsc->flags, pe_rsc_needs_unfencing);
// Whether a non-default placement strategy is used
check_utilization = (g_hash_table_size(rsc->utilization) > 0)
&& safe_str_neq(data_set->placement_strategy, "default");
// Order stops before starts (i.e. restart)
custom_action_order(rsc, generate_op_key(rsc->id, RSC_STOP, 0), NULL,
rsc, generate_op_key(rsc->id, RSC_START, 0), NULL,
pe_order_optional | pe_order_implies_then | pe_order_restart,
data_set);
// Promotable ordering: demote before stop, start before promote
if (is_set(top->flags, pe_rsc_promotable) || (rsc->role > RSC_ROLE_SLAVE)) {
custom_action_order(rsc, generate_op_key(rsc->id, RSC_DEMOTE, 0), NULL,
rsc, generate_op_key(rsc->id, RSC_STOP, 0), NULL,
pe_order_implies_first_master, data_set);
custom_action_order(rsc, generate_op_key(rsc->id, RSC_START, 0), NULL,
rsc, generate_op_key(rsc->id, RSC_PROMOTE, 0), NULL,
pe_order_runnable_left, data_set);
}
// Certain checks need allowed nodes
if (check_unfencing || check_utilization || rsc->container) {
allowed_nodes = allowed_nodes_as_list(rsc, data_set);
}
if (check_unfencing) {
/* Check if the node needs to be unfenced first */
for (GList *item = allowed_nodes; item; item = item->next) {
pe_node_t *node = item->data;
pe_action_t *unfence = pe_fence_op(node, "on", TRUE, NULL, data_set);
crm_debug("Ordering any stops of %s before %s, and any starts after",
rsc->id, unfence->uuid);
/*
* It would be more efficient to order clone resources once,
* rather than order each instance, but ordering the instance
* allows us to avoid unnecessary dependencies that might conflict
* with user constraints.
*
* @TODO: This constraint can still produce a transition loop if the
* resource has a stop scheduled on the node being unfenced, and
* there is a user ordering constraint to start some other resource
* (which will be ordered after the unfence) before stopping this
* resource. An example is "start some slow-starting cloned service
* before stopping an associated virtual IP that may be moving to
* it":
* stop this -> unfencing -> start that -> stop this
*/
custom_action_order(rsc, stop_key(rsc), NULL,
NULL, strdup(unfence->uuid), unfence,
pe_order_optional|pe_order_same_node, data_set);
custom_action_order(NULL, strdup(unfence->uuid), unfence,
rsc, start_key(rsc), NULL,
pe_order_implies_then_on_node|pe_order_same_node,
data_set);
}
}
if (check_utilization) {
GListPtr gIter = NULL;
pe_rsc_trace(rsc, "Creating utilization constraints for %s - strategy: %s",
rsc->id, data_set->placement_strategy);
for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
node_t *current = (node_t *) gIter->data;
char *load_stopped_task = crm_concat(LOAD_STOPPED, current->details->uname, '_');
action_t *load_stopped = get_pseudo_op(load_stopped_task, data_set);
if (load_stopped->node == NULL) {
load_stopped->node = node_copy(current);
update_action_flags(load_stopped, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__);
}
custom_action_order(rsc, stop_key(rsc), NULL,
NULL, load_stopped_task, load_stopped, pe_order_load, data_set);
}
for (GList *item = allowed_nodes; item; item = item->next) {
pe_node_t *next = item->data;
char *load_stopped_task = crm_concat(LOAD_STOPPED, next->details->uname, '_');
action_t *load_stopped = get_pseudo_op(load_stopped_task, data_set);
if (load_stopped->node == NULL) {
load_stopped->node = node_copy(next);
update_action_flags(load_stopped, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__);
}
custom_action_order(NULL, strdup(load_stopped_task), load_stopped,
rsc, start_key(rsc), NULL, pe_order_load, data_set);
custom_action_order(NULL, strdup(load_stopped_task), load_stopped,
rsc, generate_op_key(rsc->id, RSC_MIGRATE, 0), NULL,
pe_order_load, data_set);
free(load_stopped_task);
}
}
if (rsc->container) {
resource_t *remote_rsc = NULL;
if (rsc->is_remote_node) {
// rsc is the implicit remote connection for a guest or bundle node
/* Do not allow a guest resource to live on a Pacemaker Remote node,
* to avoid nesting remotes. However, allow bundles to run on remote
* nodes.
*/
if (is_not_set(rsc->flags, pe_rsc_allow_remote_remotes)) {
rsc_avoids_remote_nodes(rsc->container);
}
/* If someone cleans up a guest or bundle node's container, we will
* likely schedule a (re-)probe of the container and recovery of the
* connection. Order the connection stop after the container probe,
* so that if we detect the container running, we will trigger a new
* transition and avoid the unnecessary recovery.
*/
new_rsc_order(rsc->container, RSC_STATUS, rsc, RSC_STOP,
pe_order_optional, data_set);
/* A user can specify that a resource must start on a Pacemaker Remote
* node by explicitly configuring it with the container=NODENAME
* meta-attribute. This is of questionable merit, since location
* constraints can accomplish the same thing. But we support it, so here
* we check whether a resource (that is not itself a remote connection)
* has container set to a remote node or guest node resource.
*/
} else if (rsc->container->is_remote_node) {
remote_rsc = rsc->container;
} else {
remote_rsc = pe__resource_contains_guest_node(data_set,
rsc->container);
}
if (remote_rsc) {
/* Force the resource on the Pacemaker Remote node instead of
* colocating the resource with the container resource.
*/
for (GList *item = allowed_nodes; item; item = item->next) {
pe_node_t *node = item->data;
if (node->details->remote_rsc != remote_rsc) {
node->weight = -INFINITY;
}
}
} else {
/* This resource is either a filler for a container that does NOT
* represent a Pacemaker Remote node, or a Pacemaker Remote
* connection resource for a guest node or bundle.
*/
int score;
crm_trace("Order and colocate %s relative to its container %s",
rsc->id, rsc->container->id);
custom_action_order(rsc->container, generate_op_key(rsc->container->id, RSC_START, 0), NULL,
rsc, generate_op_key(rsc->id, RSC_START, 0), NULL,
pe_order_implies_then | pe_order_runnable_left, data_set);
custom_action_order(rsc, generate_op_key(rsc->id, RSC_STOP, 0), NULL,
rsc->container, generate_op_key(rsc->container->id, RSC_STOP, 0), NULL,
pe_order_implies_first, data_set);
if (is_set(rsc->flags, pe_rsc_allow_remote_remotes)) {
score = 10000; /* Highly preferred but not essential */
} else {
score = INFINITY; /* Force them to run on the same host */
}
rsc_colocation_new("resource-with-container", NULL, score, rsc,
rsc->container, NULL, NULL, data_set);
}
}
if (rsc->is_remote_node || is_set(rsc->flags, pe_rsc_fence_device)) {
/* don't allow remote nodes to run stonith devices
* or remote connection resources.*/
rsc_avoids_remote_nodes(rsc);
}
g_list_free(allowed_nodes);
}
void
native_rsc_colocation_lh(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh,
rsc_colocation_t *constraint,
pe_working_set_t *data_set)
{
if (rsc_lh == NULL) {
pe_err("rsc_lh was NULL for %s", constraint->id);
return;
} else if (constraint->rsc_rh == NULL) {
pe_err("rsc_rh was NULL for %s", constraint->id);
return;
}
pe_rsc_trace(rsc_lh, "Processing colocation constraint between %s and %s", rsc_lh->id,
rsc_rh->id);
rsc_rh->cmds->rsc_colocation_rh(rsc_lh, rsc_rh, constraint, data_set);
}
enum filter_colocation_res
filter_colocation_constraint(resource_t * rsc_lh, resource_t * rsc_rh,
rsc_colocation_t * constraint, gboolean preview)
{
if (constraint->score == 0) {
return influence_nothing;
}
/* rh side must be allocated before we can process constraint */
if (preview == FALSE && is_set(rsc_rh->flags, pe_rsc_provisional)) {
return influence_nothing;
}
if ((constraint->role_lh >= RSC_ROLE_SLAVE) &&
rsc_lh->parent && is_set(rsc_lh->parent->flags, pe_rsc_promotable)
&& is_not_set(rsc_lh->flags, pe_rsc_provisional)) {
/* LH and RH resources have already been allocated, place the correct
* priority on LH rsc for the given promotable clone resource role */
return influence_rsc_priority;
}
if (preview == FALSE && is_not_set(rsc_lh->flags, pe_rsc_provisional)) {
// Log an error if we violated a mandatory colocation constraint
const pe_node_t *rh_node = rsc_rh->allocated_to;
if (rsc_lh->allocated_to == NULL) {
// Dependent resource isn't allocated, so constraint doesn't matter
return influence_nothing;
}
if (constraint->score >= INFINITY) {
// Dependent resource must colocate with rh_node
if ((rh_node == NULL)
|| (rh_node->details != rsc_lh->allocated_to->details)) {
crm_err("%s must be colocated with %s but is not (%s vs. %s)",
rsc_lh->id, rsc_rh->id,
rsc_lh->allocated_to->details->uname,
(rh_node? rh_node->details->uname : "unallocated"));
}
} else if (constraint->score <= -INFINITY) {
// Dependent resource must anti-colocate with rh_node
if ((rh_node != NULL)
&& (rsc_lh->allocated_to->details == rh_node->details)) {
crm_err("%s and %s must be anti-colocated but are allocated "
"to the same node (%s)",
rsc_lh->id, rsc_rh->id, rh_node->details->uname);
}
}
return influence_nothing;
}
if (constraint->score > 0
&& constraint->role_lh != RSC_ROLE_UNKNOWN && constraint->role_lh != rsc_lh->next_role) {
crm_trace("LH: Skipping constraint: \"%s\" state filter nextrole is %s",
role2text(constraint->role_lh), role2text(rsc_lh->next_role));
return influence_nothing;
}
if (constraint->score > 0
&& constraint->role_rh != RSC_ROLE_UNKNOWN && constraint->role_rh != rsc_rh->next_role) {
crm_trace("RH: Skipping constraint: \"%s\" state filter", role2text(constraint->role_rh));
return FALSE;
}
if (constraint->score < 0
&& constraint->role_lh != RSC_ROLE_UNKNOWN && constraint->role_lh == rsc_lh->next_role) {
crm_trace("LH: Skipping negative constraint: \"%s\" state filter",
role2text(constraint->role_lh));
return influence_nothing;
}
if (constraint->score < 0
&& constraint->role_rh != RSC_ROLE_UNKNOWN && constraint->role_rh == rsc_rh->next_role) {
crm_trace("RH: Skipping negative constraint: \"%s\" state filter",
role2text(constraint->role_rh));
return influence_nothing;
}
return influence_rsc_location;
}
static void
influence_priority(resource_t * rsc_lh, resource_t * rsc_rh, rsc_colocation_t * constraint)
{
const char *rh_value = NULL;
const char *lh_value = NULL;
const char *attribute = CRM_ATTR_ID;
int score_multiplier = 1;
if (constraint->node_attribute != NULL) {
attribute = constraint->node_attribute;
}
if (!rsc_rh->allocated_to || !rsc_lh->allocated_to) {
return;
}
lh_value = pe_node_attribute_raw(rsc_lh->allocated_to, attribute);
rh_value = pe_node_attribute_raw(rsc_rh->allocated_to, attribute);
if (!safe_str_eq(lh_value, rh_value)) {
if(constraint->score == INFINITY && constraint->role_lh == RSC_ROLE_MASTER) {
rsc_lh->priority = -INFINITY;
}
return;
}
if (constraint->role_rh && (constraint->role_rh != rsc_rh->next_role)) {
return;
}
if (constraint->role_lh == RSC_ROLE_SLAVE) {
score_multiplier = -1;
}
rsc_lh->priority = merge_weights(score_multiplier * constraint->score, rsc_lh->priority);
}
static void
colocation_match(resource_t * rsc_lh, resource_t * rsc_rh, rsc_colocation_t * constraint)
{
const char *tmp = NULL;
const char *value = NULL;
const char *attribute = CRM_ATTR_ID;
GHashTable *work = NULL;
gboolean do_check = FALSE;
GHashTableIter iter;
node_t *node = NULL;
if (constraint->node_attribute != NULL) {
attribute = constraint->node_attribute;
}
if (rsc_rh->allocated_to) {
value = pe_node_attribute_raw(rsc_rh->allocated_to, attribute);
do_check = TRUE;
} else if (constraint->score < 0) {
/* nothing to do:
* anti-colocation with something that is not running
*/
return;
}
work = node_hash_dup(rsc_lh->allowed_nodes);
g_hash_table_iter_init(&iter, work);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
tmp = pe_node_attribute_raw(node, attribute);
if (do_check && safe_str_eq(tmp, value)) {
if (constraint->score < INFINITY) {
pe_rsc_trace(rsc_lh, "%s: %s.%s += %d", constraint->id, rsc_lh->id,
node->details->uname, constraint->score);
node->weight = merge_weights(constraint->score, node->weight);
}
} else if (do_check == FALSE || constraint->score >= INFINITY) {
pe_rsc_trace(rsc_lh, "%s: %s.%s -= %d (%s)", constraint->id, rsc_lh->id,
node->details->uname, constraint->score,
do_check ? "failed" : "unallocated");
node->weight = merge_weights(-constraint->score, node->weight);
}
}
if (can_run_any(work)
|| constraint->score <= -INFINITY || constraint->score >= INFINITY) {
g_hash_table_destroy(rsc_lh->allowed_nodes);
rsc_lh->allowed_nodes = work;
work = NULL;
} else {
static char score[33];
score2char_stack(constraint->score, score, sizeof(score));
pe_rsc_info(rsc_lh, "%s: Rolling back scores from %s (%d, %s)",
rsc_lh->id, rsc_rh->id, do_check, score);
}
if (work) {
g_hash_table_destroy(work);
}
}
void
native_rsc_colocation_rh(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh,
rsc_colocation_t *constraint,
pe_working_set_t *data_set)
{
enum filter_colocation_res filter_results;
CRM_ASSERT(rsc_lh);
CRM_ASSERT(rsc_rh);
filter_results = filter_colocation_constraint(rsc_lh, rsc_rh, constraint, FALSE);
pe_rsc_trace(rsc_lh, "%sColocating %s with %s (%s, weight=%d, filter=%d)",
constraint->score >= 0 ? "" : "Anti-",
rsc_lh->id, rsc_rh->id, constraint->id, constraint->score, filter_results);
switch (filter_results) {
case influence_rsc_priority:
influence_priority(rsc_lh, rsc_rh, constraint);
break;
case influence_rsc_location:
pe_rsc_trace(rsc_lh, "%sColocating %s with %s (%s, weight=%d)",
constraint->score >= 0 ? "" : "Anti-",
rsc_lh->id, rsc_rh->id, constraint->id, constraint->score);
colocation_match(rsc_lh, rsc_rh, constraint);
break;
case influence_nothing:
default:
return;
}
}
static gboolean
filter_rsc_ticket(resource_t * rsc_lh, rsc_ticket_t * rsc_ticket)
{
if (rsc_ticket->role_lh != RSC_ROLE_UNKNOWN && rsc_ticket->role_lh != rsc_lh->role) {
pe_rsc_trace(rsc_lh, "LH: Skipping constraint: \"%s\" state filter",
role2text(rsc_ticket->role_lh));
return FALSE;
}
return TRUE;
}
void
rsc_ticket_constraint(resource_t * rsc_lh, rsc_ticket_t * rsc_ticket, pe_working_set_t * data_set)
{
if (rsc_ticket == NULL) {
pe_err("rsc_ticket was NULL");
return;
}
if (rsc_lh == NULL) {
pe_err("rsc_lh was NULL for %s", rsc_ticket->id);
return;
}
if (rsc_ticket->ticket->granted && rsc_ticket->ticket->standby == FALSE) {
return;
}
if (rsc_lh->children) {
GListPtr gIter = rsc_lh->children;
pe_rsc_trace(rsc_lh, "Processing ticket dependencies from %s", rsc_lh->id);
for (; gIter != NULL; gIter = gIter->next) {
resource_t *child_rsc = (resource_t *) gIter->data;
rsc_ticket_constraint(child_rsc, rsc_ticket, data_set);
}
return;
}
pe_rsc_trace(rsc_lh, "%s: Processing ticket dependency on %s (%s, %s)",
rsc_lh->id, rsc_ticket->ticket->id, rsc_ticket->id,
role2text(rsc_ticket->role_lh));
if ((rsc_ticket->ticket->granted == FALSE)
&& (rsc_lh->running_on != NULL)) {
GListPtr gIter = NULL;
switch (rsc_ticket->loss_policy) {
case loss_ticket_stop:
resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set);
break;
case loss_ticket_demote:
// Promotion score will be set to -INFINITY in promotion_order()
if (rsc_ticket->role_lh != RSC_ROLE_MASTER) {
resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set);
}
break;
case loss_ticket_fence:
if (filter_rsc_ticket(rsc_lh, rsc_ticket) == FALSE) {
return;
}
resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set);
for (gIter = rsc_lh->running_on; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
pe_fence_node(data_set, node, "deadman ticket was lost");
}
break;
case loss_ticket_freeze:
if (filter_rsc_ticket(rsc_lh, rsc_ticket) == FALSE) {
return;
}
if (rsc_lh->running_on != NULL) {
clear_bit(rsc_lh->flags, pe_rsc_managed);
set_bit(rsc_lh->flags, pe_rsc_block);
}
break;
}
} else if (rsc_ticket->ticket->granted == FALSE) {
if (rsc_ticket->role_lh != RSC_ROLE_MASTER || rsc_ticket->loss_policy == loss_ticket_stop) {
resource_location(rsc_lh, NULL, -INFINITY, "__no_ticket__", data_set);
}
} else if (rsc_ticket->ticket->standby) {
if (rsc_ticket->role_lh != RSC_ROLE_MASTER || rsc_ticket->loss_policy == loss_ticket_stop) {
resource_location(rsc_lh, NULL, -INFINITY, "__ticket_standby__", data_set);
}
}
}
enum pe_action_flags
native_action_flags(action_t * action, node_t * node)
{
return action->flags;
}
static inline bool
is_primitive_action(pe_action_t *action)
{
return action && action->rsc && (action->rsc->variant == pe_native);
}
/*!
* \internal
* \brief Set action bits appropriately when pe_restart_order is used
*
* \param[in] first 'First' action in an ordering with pe_restart_order
* \param[in] then 'Then' action in an ordering with pe_restart_order
* \param[in] filter What ordering flags to care about
*
* \note pe_restart_order is set for "stop resource before starting it" and
* "stop later group member before stopping earlier group member"
*/
static void
handle_restart_ordering(pe_action_t *first, pe_action_t *then,
enum pe_action_flags filter)
{
const char *reason = NULL;
CRM_ASSERT(is_primitive_action(first));
CRM_ASSERT(is_primitive_action(then));
// We need to update the action in two cases:
// ... if 'then' is required
if (is_set(filter, pe_action_optional)
&& is_not_set(then->flags, pe_action_optional)) {
reason = "restart";
}
/* ... if 'then' is unrunnable start of managed resource (if a resource
* should restart but can't start, we still want to stop)
*/
if (is_set(filter, pe_action_runnable)
&& is_not_set(then->flags, pe_action_runnable)
&& is_set(then->rsc->flags, pe_rsc_managed)
&& safe_str_eq(then->task, RSC_START)) {
reason = "stop";
}
if (reason == NULL) {
return;
}
pe_rsc_trace(first->rsc, "Handling %s -> %s for %s",
first->uuid, then->uuid, reason);
// Make 'first' required if it is runnable
if (is_set(first->flags, pe_action_runnable)) {
pe_action_implies(first, then, pe_action_optional);
}
// Make 'first' required if 'then' is required
if (is_not_set(then->flags, pe_action_optional)) {
pe_action_implies(first, then, pe_action_optional);
}
// Make 'first' unmigratable if 'then' is unmigratable
if (is_not_set(then->flags, pe_action_migrate_runnable)) {
pe_action_implies(first, then, pe_action_migrate_runnable);
}
// Make 'then' unrunnable if 'first' is required but unrunnable
if (is_not_set(first->flags, pe_action_optional)
&& is_not_set(first->flags, pe_action_runnable)) {
pe_action_implies(then, first, pe_action_runnable);
}
}
enum pe_graph_flags
native_update_actions(pe_action_t *first, pe_action_t *then, pe_node_t *node,
enum pe_action_flags flags, enum pe_action_flags filter,
enum pe_ordering type, pe_working_set_t *data_set)
{
/* flags == get_action_flags(first, then_node) called from update_action() */
enum pe_graph_flags changed = pe_graph_none;
enum pe_action_flags then_flags = then->flags;
enum pe_action_flags first_flags = first->flags;
crm_trace( "Testing %s on %s (0x%.6x) with %s 0x%.6x",
first->uuid, first->node ? first->node->details->uname : "[none]",
first->flags, then->uuid, then->flags);
if (type & pe_order_asymmetrical) {
resource_t *then_rsc = then->rsc;
enum rsc_role_e then_rsc_role = then_rsc ? then_rsc->fns->state(then_rsc, TRUE) : 0;
if (!then_rsc) {
/* ignore */
} else if ((then_rsc_role == RSC_ROLE_STOPPED) && safe_str_eq(then->task, RSC_STOP)) {
/* ignore... if 'then' is supposed to be stopped after 'first', but
* then is already stopped, there is nothing to be done when non-symmetrical. */
} else if ((then_rsc_role >= RSC_ROLE_STARTED)
&& safe_str_eq(then->task, RSC_START)
&& is_set(then->flags, pe_action_optional)
&& then->node
&& g_list_length(then_rsc->running_on) == 1
&& then->node->details == ((node_t *) then_rsc->running_on->data)->details) {
/* Ignore. If 'then' is supposed to be started after 'first', but
* 'then' is already started, there is nothing to be done when
* asymmetrical -- unless the start is mandatory, which indicates
* the resource is restarting, and the ordering is still needed.
*/
} else if (!(first->flags & pe_action_runnable)) {
/* prevent 'then' action from happening if 'first' is not runnable and
* 'then' has not yet occurred. */
pe_action_implies(then, first, pe_action_optional);
pe_action_implies(then, first, pe_action_runnable);
pe_rsc_trace(then->rsc, "Unset optional and runnable on %s", then->uuid);
} else {
/* ignore... then is allowed to start/stop if it wants to. */
}
}
if (type & pe_order_implies_first) {
if (is_set(filter, pe_action_optional) && is_not_set(flags /* Should be then_flags? */, pe_action_optional)) {
// Needs is_set(first_flags, pe_action_optional) too?
pe_rsc_trace(first->rsc, "Unset optional on %s because of %s", first->uuid, then->uuid);
pe_action_implies(first, then, pe_action_optional);
}
if (is_set(flags, pe_action_migrate_runnable) &&
is_set(then->flags, pe_action_migrate_runnable) == FALSE &&
is_set(then->flags, pe_action_optional) == FALSE) {
pe_rsc_trace(first->rsc, "Unset migrate runnable on %s because of %s",
first->uuid, then->uuid);
pe_action_implies(first, then, pe_action_migrate_runnable);
}
}
if (type & pe_order_implies_first_master) {
if ((filter & pe_action_optional) &&
((then->flags & pe_action_optional) == FALSE) &&
then->rsc && (then->rsc->role == RSC_ROLE_MASTER)) {
pe_action_implies(first, then, pe_action_optional);
if (is_set(first->flags, pe_action_migrate_runnable) &&
is_set(then->flags, pe_action_migrate_runnable) == FALSE) {
pe_rsc_trace(first->rsc, "Unset migrate runnable on %s because of %s", first->uuid, then->uuid);
pe_action_implies(first, then, pe_action_migrate_runnable);
}
pe_rsc_trace(then->rsc, "Unset optional on %s because of %s", first->uuid, then->uuid);
}
}
if ((type & pe_order_implies_first_migratable)
&& is_set(filter, pe_action_optional)) {
if (((then->flags & pe_action_migrate_runnable) == FALSE) ||
((then->flags & pe_action_runnable) == FALSE)) {
pe_rsc_trace(then->rsc, "Unset runnable on %s because %s is neither runnable or migratable", first->uuid, then->uuid);
pe_action_implies(first, then, pe_action_runnable);
}
if ((then->flags & pe_action_optional) == 0) {
pe_rsc_trace(then->rsc, "Unset optional on %s because %s is not optional", first->uuid, then->uuid);
pe_action_implies(first, then, pe_action_optional);
}
}
if ((type & pe_order_pseudo_left)
&& is_set(filter, pe_action_optional)) {
if ((first->flags & pe_action_runnable) == FALSE) {
pe_action_implies(then, first, pe_action_migrate_runnable);
pe_clear_action_bit(then, pe_action_pseudo);
pe_rsc_trace(then->rsc, "Unset pseudo on %s because %s is not runnable", then->uuid, first->uuid);
}
}
if (is_set(type, pe_order_runnable_left)
&& is_set(filter, pe_action_runnable)
&& is_set(then->flags, pe_action_runnable)
&& is_set(flags, pe_action_runnable) == FALSE) {
pe_rsc_trace(then->rsc, "Unset runnable on %s because of %s", then->uuid, first->uuid);
pe_action_implies(then, first, pe_action_runnable);
pe_action_implies(then, first, pe_action_migrate_runnable);
}
if (is_set(type, pe_order_implies_then)
&& is_set(filter, pe_action_optional)
&& is_set(then->flags, pe_action_optional)
&& is_set(flags, pe_action_optional) == FALSE) {
/* in this case, treat migrate_runnable as if first is optional */
if (is_set(first->flags, pe_action_migrate_runnable) == FALSE) {
pe_rsc_trace(then->rsc, "Unset optional on %s because of %s", then->uuid, first->uuid);
pe_action_implies(then, first, pe_action_optional);
}
}
if (is_set(type, pe_order_restart)) {
handle_restart_ordering(first, then, filter);
}
if (then_flags != then->flags) {
changed |= pe_graph_updated_then;
pe_rsc_trace(then->rsc,
"Then: Flags for %s on %s are now 0x%.6x (was 0x%.6x) because of %s 0x%.6x",
then->uuid, then->node ? then->node->details->uname : "[none]", then->flags,
then_flags, first->uuid, first->flags);
if(then->rsc && then->rsc->parent) {
/* "X_stop then X_start" doesn't get handled for cloned groups unless we do this */
update_action(then, data_set);
}
}
if (first_flags != first->flags) {
changed |= pe_graph_updated_first;
pe_rsc_trace(first->rsc,
"First: Flags for %s on %s are now 0x%.6x (was 0x%.6x) because of %s 0x%.6x",
first->uuid, first->node ? first->node->details->uname : "[none]",
first->flags, first_flags, then->uuid, then->flags);
}
return changed;
}
void
native_rsc_location(pe_resource_t *rsc, pe__location_t *constraint)
{
GListPtr gIter = NULL;
GHashTableIter iter;
node_t *node = NULL;
if (constraint == NULL) {
pe_err("Constraint is NULL");
return;
} else if (rsc == NULL) {
pe_err("LHS of rsc_to_node (%s) is NULL", constraint->id);
return;
}
pe_rsc_trace(rsc, "Applying %s (%s) to %s", constraint->id,
role2text(constraint->role_filter), rsc->id);
/* take "lifetime" into account */
if (constraint->role_filter > RSC_ROLE_UNKNOWN && constraint->role_filter != rsc->next_role) {
pe_rsc_debug(rsc, "Constraint (%s) is not active (role : %s vs. %s)",
constraint->id, role2text(constraint->role_filter), role2text(rsc->next_role));
return;
} else if (is_active(constraint) == FALSE) {
pe_rsc_trace(rsc, "Constraint (%s) is not active", constraint->id);
return;
}
if (constraint->node_list_rh == NULL) {
pe_rsc_trace(rsc, "RHS of constraint %s is NULL", constraint->id);
return;
}
for (gIter = constraint->node_list_rh; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
node_t *other_node = NULL;
other_node = (node_t *) pe_hash_table_lookup(rsc->allowed_nodes, node->details->id);
if (other_node != NULL) {
pe_rsc_trace(rsc, "%s + %s: %d + %d",
node->details->uname,
other_node->details->uname, node->weight, other_node->weight);
other_node->weight = merge_weights(other_node->weight, node->weight);
} else {
other_node = node_copy(node);
pe_rsc_trace(rsc, "%s: %d (insert %d)", other_node->details->uname, other_node->weight, constraint->discover_mode);
g_hash_table_insert(rsc->allowed_nodes, (gpointer) other_node->details->id, other_node);
}
if (other_node->rsc_discover_mode < constraint->discover_mode) {
if (constraint->discover_mode == pe_discover_exclusive) {
rsc->exclusive_discover = TRUE;
}
/* exclusive > never > always... always is default */
other_node->rsc_discover_mode = constraint->discover_mode;
}
}
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
pe_rsc_trace(rsc, "%s + %s : %d", rsc->id, node->details->uname, node->weight);
}
}
void
native_expand(resource_t * rsc, pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
CRM_ASSERT(rsc);
pe_rsc_trace(rsc, "Processing actions from %s", rsc->id);
for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) {
action_t *action = (action_t *) gIter->data;
crm_trace("processing action %d for rsc=%s", action->id, rsc->id);
graph_element_from_action(action, data_set);
}
for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
resource_t *child_rsc = (resource_t *) gIter->data;
child_rsc->cmds->expand(child_rsc, data_set);
}
}
#define log_change(a, fmt, args...) do { \
if(a && a->reason && terminal) { \
printf(" * "fmt" \tdue to %s\n", ##args, a->reason); \
} else if(a && a->reason) { \
crm_notice(fmt" \tdue to %s", ##args, a->reason); \
} else if(terminal) { \
printf(" * "fmt"\n", ##args); \
} else { \
crm_notice(fmt, ##args); \
} \
} while(0)
#define STOP_SANITY_ASSERT(lineno) do { \
if(current && current->details->unclean) { \
/* It will be a pseudo op */ \
} else if(stop == NULL) { \
crm_err("%s:%d: No stop action exists for %s", __FUNCTION__, lineno, rsc->id); \
CRM_ASSERT(stop != NULL); \
} else if(is_set(stop->flags, pe_action_optional)) { \
crm_err("%s:%d: Action %s is still optional", __FUNCTION__, lineno, stop->uuid); \
CRM_ASSERT(is_not_set(stop->flags, pe_action_optional)); \
} \
} while(0)
static int rsc_width = 5;
static int detail_width = 5;
static void
LogAction(const char *change, resource_t *rsc, pe_node_t *origin, pe_node_t *destination, pe_action_t *action, pe_action_t *source, gboolean terminal)
{
int len = 0;
char *reason = NULL;
char *details = NULL;
bool same_host = FALSE;
bool same_role = FALSE;
bool need_role = FALSE;
CRM_ASSERT(action);
CRM_ASSERT(destination != NULL || origin != NULL);
if(source == NULL) {
source = action;
}
len = strlen(rsc->id);
if(len > rsc_width) {
rsc_width = len + 2;
}
if(rsc->role > RSC_ROLE_STARTED || rsc->next_role > RSC_ROLE_SLAVE) {
need_role = TRUE;
}
if(origin != NULL && destination != NULL && origin->details == destination->details) {
same_host = TRUE;
}
if(rsc->role == rsc->next_role) {
same_role = TRUE;
}
if(need_role && origin == NULL) {
/* Promoting from Stopped */
details = crm_strdup_printf("%s -> %s %s", role2text(rsc->role), role2text(rsc->next_role), destination->details->uname);
} else if(need_role && destination == NULL) {
/* Demoting a Master or Stopping a Slave */
details = crm_strdup_printf("%s %s", role2text(rsc->role), origin->details->uname);
} else if(origin == NULL || destination == NULL) {
/* Starting or stopping a resource */
details = crm_strdup_printf("%s", origin?origin->details->uname:destination->details->uname);
} else if(need_role && same_role && same_host) {
/* Recovering or restarting a promotable clone resource */
details = crm_strdup_printf("%s %s", role2text(rsc->role), origin->details->uname);
} else if(same_role && same_host) {
/* Recovering or Restarting a normal resource */
details = crm_strdup_printf("%s", origin->details->uname);
} else if(same_role && need_role) {
/* Moving a promotable clone resource */
details = crm_strdup_printf("%s -> %s %s", origin->details->uname, destination->details->uname, role2text(rsc->role));
} else if(same_role) {
/* Moving a normal resource */
details = crm_strdup_printf("%s -> %s", origin->details->uname, destination->details->uname);
} else if(same_host) {
/* Promoting or demoting a promotable clone resource */
details = crm_strdup_printf("%s -> %s %s", role2text(rsc->role), role2text(rsc->next_role), origin->details->uname);
} else {
/* Moving and promoting/demoting */
details = crm_strdup_printf("%s %s -> %s %s", role2text(rsc->role), origin->details->uname, role2text(rsc->next_role), destination->details->uname);
}
len = strlen(details);
if(len > detail_width) {
detail_width = len;
}
if(source->reason && is_not_set(action->flags, pe_action_runnable)) {
reason = crm_strdup_printf(" due to %s (blocked)", source->reason);
} else if(source->reason) {
reason = crm_strdup_printf(" due to %s", source->reason);
} else if(is_not_set(action->flags, pe_action_runnable)) {
reason = strdup(" blocked");
} else {
reason = strdup("");
}
if(terminal) {
printf(" * %-8s %-*s ( %*s ) %s\n", change, rsc_width, rsc->id, detail_width, details, reason);
} else {
crm_notice(" * %-8s %-*s ( %*s ) %s", change, rsc_width, rsc->id, detail_width, details, reason);
}
free(details);
free(reason);
}
void
LogActions(resource_t * rsc, pe_working_set_t * data_set, gboolean terminal)
{
node_t *next = NULL;
node_t *current = NULL;
pe_node_t *start_node = NULL;
action_t *stop = NULL;
action_t *start = NULL;
action_t *demote = NULL;
action_t *promote = NULL;
char *key = NULL;
gboolean moving = FALSE;
GListPtr possible_matches = NULL;
if(rsc->variant == pe_container) {
pcmk__bundle_log_actions(rsc, data_set, terminal);
return;
}
if (rsc->children) {
GListPtr gIter = NULL;
for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
resource_t *child_rsc = (resource_t *) gIter->data;
LogActions(child_rsc, data_set, terminal);
}
return;
}
next = rsc->allocated_to;
if (rsc->running_on) {
current = pe__current_node(rsc);
if (rsc->role == RSC_ROLE_STOPPED) {
/*
* This can occur when resources are being recovered
* We fiddle with the current role in native_create_actions()
*/
rsc->role = RSC_ROLE_STARTED;
}
}
if (current == NULL && is_set(rsc->flags, pe_rsc_orphan)) {
/* Don't log stopped orphans */
return;
}
if (is_not_set(rsc->flags, pe_rsc_managed)
|| (current == NULL && next == NULL)) {
pe_rsc_info(rsc, "Leave %s\t(%s%s)",
rsc->id, role2text(rsc->role), is_not_set(rsc->flags,
pe_rsc_managed) ? " unmanaged" : "");
return;
}
if (current != NULL && next != NULL && safe_str_neq(current->details->id, next->details->id)) {
moving = TRUE;
}
possible_matches = pe__resource_actions(rsc, next, RSC_START, FALSE);
if (possible_matches) {
start = possible_matches->data;
g_list_free(possible_matches);
}
if ((start == NULL) || is_not_set(start->flags, pe_action_runnable)) {
start_node = NULL;
} else {
start_node = current;
}
possible_matches = pe__resource_actions(rsc, start_node, RSC_STOP, FALSE);
if (possible_matches) {
stop = possible_matches->data;
g_list_free(possible_matches);
}
possible_matches = pe__resource_actions(rsc, next, RSC_PROMOTE, FALSE);
if (possible_matches) {
promote = possible_matches->data;
g_list_free(possible_matches);
}
possible_matches = pe__resource_actions(rsc, next, RSC_DEMOTE, FALSE);
if (possible_matches) {
demote = possible_matches->data;
g_list_free(possible_matches);
}
if (rsc->role == rsc->next_role) {
action_t *migrate_op = NULL;
possible_matches = pe__resource_actions(rsc, next, RSC_MIGRATED, FALSE);
if (possible_matches) {
migrate_op = possible_matches->data;
}
CRM_CHECK(next != NULL,);
if (next == NULL) {
} else if (migrate_op && is_set(migrate_op->flags, pe_action_runnable) && current) {
LogAction("Migrate", rsc, current, next, start, NULL, terminal);
} else if (is_set(rsc->flags, pe_rsc_reload)) {
LogAction("Reload", rsc, current, next, start, NULL, terminal);
} else if (start == NULL || is_set(start->flags, pe_action_optional)) {
pe_rsc_info(rsc, "Leave %s\t(%s %s)", rsc->id, role2text(rsc->role),
next->details->uname);
} else if (start && is_set(start->flags, pe_action_runnable) == FALSE) {
LogAction("Stop", rsc, current, NULL, stop,
(stop && stop->reason)? stop : start, terminal);
STOP_SANITY_ASSERT(__LINE__);
} else if (moving && current) {
LogAction(is_set(rsc->flags, pe_rsc_failed) ? "Recover" : "Move",
rsc, current, next, stop, NULL, terminal);
} else if (is_set(rsc->flags, pe_rsc_failed)) {
LogAction("Recover", rsc, current, NULL, stop, NULL, terminal);
STOP_SANITY_ASSERT(__LINE__);
} else {
LogAction("Restart", rsc, current, next, start, NULL, terminal);
/* STOP_SANITY_ASSERT(__LINE__); False positive for migrate-fail-7 */
}
g_list_free(possible_matches);
return;
}
if(stop
&& (rsc->next_role == RSC_ROLE_STOPPED
|| (start && is_not_set(start->flags, pe_action_runnable)))) {
GListPtr gIter = NULL;
key = stop_key(rsc);
for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
action_t *stop_op = NULL;
possible_matches = find_actions(rsc->actions, key, node);
if (possible_matches) {
stop_op = possible_matches->data;
g_list_free(possible_matches);
}
if (stop_op && (stop_op->flags & pe_action_runnable)) {
STOP_SANITY_ASSERT(__LINE__);
}
LogAction("Stop", rsc, node, NULL, stop_op,
(stop_op && stop_op->reason)? stop_op : start, terminal);
}
free(key);
} else if (stop && is_set(rsc->flags, pe_rsc_failed)) {
/* 'stop' may be NULL if the failure was ignored */
LogAction("Recover", rsc, current, next, stop, start, terminal);
STOP_SANITY_ASSERT(__LINE__);
} else if (moving) {
LogAction("Move", rsc, current, next, stop, NULL, terminal);
STOP_SANITY_ASSERT(__LINE__);
} else if (is_set(rsc->flags, pe_rsc_reload)) {
LogAction("Reload", rsc, current, next, start, NULL, terminal);
} else if (stop != NULL && is_not_set(stop->flags, pe_action_optional)) {
LogAction("Restart", rsc, current, next, start, NULL, terminal);
STOP_SANITY_ASSERT(__LINE__);
} else if (rsc->role == RSC_ROLE_MASTER) {
CRM_LOG_ASSERT(current != NULL);
LogAction("Demote", rsc, current, next, demote, NULL, terminal);
} else if(rsc->next_role == RSC_ROLE_MASTER) {
CRM_LOG_ASSERT(next);
LogAction("Promote", rsc, current, next, promote, NULL, terminal);
} else if (rsc->role == RSC_ROLE_STOPPED && rsc->next_role > RSC_ROLE_STOPPED) {
LogAction("Start", rsc, current, next, start, NULL, terminal);
}
}
gboolean
StopRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
CRM_ASSERT(rsc);
pe_rsc_trace(rsc, "%s", rsc->id);
for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
node_t *current = (node_t *) gIter->data;
action_t *stop;
if (rsc->partial_migration_target) {
if (rsc->partial_migration_target->details == current->details) {
pe_rsc_trace(rsc, "Filtered %s -> %s %s", current->details->uname,
next->details->uname, rsc->id);
continue;
} else {
pe_rsc_trace(rsc, "Forced on %s %s", current->details->uname, rsc->id);
optional = FALSE;
}
}
pe_rsc_trace(rsc, "%s on %s", rsc->id, current->details->uname);
stop = stop_action(rsc, current, optional);
if(rsc->allocated_to == NULL) {
pe_action_set_reason(stop, "node availability", TRUE);
}
if (is_not_set(rsc->flags, pe_rsc_managed)) {
update_action_flags(stop, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
}
if (is_set(data_set->flags, pe_flag_remove_after_stop)) {
DeleteRsc(rsc, current, optional, data_set);
}
if(is_set(rsc->flags, pe_rsc_needs_unfencing)) {
action_t *unfence = pe_fence_op(current, "on", TRUE, NULL, data_set);
order_actions(stop, unfence, pe_order_implies_first);
if (!node_has_been_unfenced(current)) {
pe_proc_err("Stopping %s until %s can be unfenced", rsc->id, current->details->uname);
}
}
}
return TRUE;
}
static void
order_after_unfencing(resource_t *rsc, pe_node_t *node, action_t *action,
enum pe_ordering order, pe_working_set_t *data_set)
{
/* When unfencing is in use, we order unfence actions before any probe or
* start of resources that require unfencing, and also of fence devices.
*
* This might seem to violate the principle that fence devices require
* only quorum. However, fence agents that unfence often don't have enough
* information to even probe or start unless the node is first unfenced.
*/
if (is_unfence_device(rsc, data_set)
|| is_set(rsc->flags, pe_rsc_needs_unfencing)) {
/* Start with an optional ordering. Requiring unfencing would result in
* the node being unfenced, and all its resources being stopped,
* whenever a new resource is added -- which would be highly suboptimal.
*/
action_t *unfence = pe_fence_op(node, "on", TRUE, NULL, data_set);
order_actions(unfence, action, order);
if (!node_has_been_unfenced(node)) {
// But unfencing is required if it has never been done
char *reason = crm_strdup_printf("required by %s %s",
rsc->id, action->task);
trigger_unfencing(NULL, node, reason, NULL, data_set);
free(reason);
}
}
}
gboolean
StartRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set)
{
action_t *start = NULL;
CRM_ASSERT(rsc);
pe_rsc_trace(rsc, "%s on %s %d %d", rsc->id, next ? next->details->uname : "N/A", optional, next ? next->weight : 0);
start = start_action(rsc, next, TRUE);
order_after_unfencing(rsc, next, start, pe_order_implies_then, data_set);
if (is_set(start->flags, pe_action_runnable) && optional == FALSE) {
update_action_flags(start, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__);
}
return TRUE;
}
gboolean
PromoteRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
gboolean runnable = TRUE;
GListPtr action_list = NULL;
CRM_ASSERT(rsc);
CRM_CHECK(next != NULL, return FALSE);
pe_rsc_trace(rsc, "%s on %s", rsc->id, next->details->uname);
action_list = pe__resource_actions(rsc, next, RSC_START, TRUE);
for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
action_t *start = (action_t *) gIter->data;
if (is_set(start->flags, pe_action_runnable) == FALSE) {
runnable = FALSE;
}
}
g_list_free(action_list);
if (runnable) {
promote_action(rsc, next, optional);
return TRUE;
}
pe_rsc_debug(rsc, "%s\tPromote %s (canceled)", next->details->uname, rsc->id);
action_list = pe__resource_actions(rsc, next, RSC_PROMOTE, TRUE);
for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
action_t *promote = (action_t *) gIter->data;
update_action_flags(promote, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
}
g_list_free(action_list);
return TRUE;
}
gboolean
DemoteRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
CRM_ASSERT(rsc);
pe_rsc_trace(rsc, "%s", rsc->id);
/* CRM_CHECK(rsc->next_role == RSC_ROLE_SLAVE, return FALSE); */
for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
node_t *current = (node_t *) gIter->data;
pe_rsc_trace(rsc, "%s on %s", rsc->id, next ? next->details->uname : "N/A");
demote_action(rsc, current, optional);
}
return TRUE;
}
gboolean
RoleError(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set)
{
CRM_ASSERT(rsc);
crm_err("%s on %s", rsc->id, next ? next->details->uname : "N/A");
CRM_CHECK(FALSE, return FALSE);
return FALSE;
}
gboolean
NullOp(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set)
{
CRM_ASSERT(rsc);
pe_rsc_trace(rsc, "%s", rsc->id);
return FALSE;
}
gboolean
DeleteRsc(resource_t * rsc, node_t * node, gboolean optional, pe_working_set_t * data_set)
{
if (is_set(rsc->flags, pe_rsc_failed)) {
pe_rsc_trace(rsc, "Resource %s not deleted from %s: failed", rsc->id, node->details->uname);
return FALSE;
} else if (node == NULL) {
pe_rsc_trace(rsc, "Resource %s not deleted: NULL node", rsc->id);
return FALSE;
} else if (node->details->unclean || node->details->online == FALSE) {
pe_rsc_trace(rsc, "Resource %s not deleted from %s: unrunnable", rsc->id,
node->details->uname);
return FALSE;
}
crm_notice("Removing %s from %s", rsc->id, node->details->uname);
delete_action(rsc, node, optional);
new_rsc_order(rsc, RSC_STOP, rsc, RSC_DELETE,
optional ? pe_order_implies_then : pe_order_optional, data_set);
new_rsc_order(rsc, RSC_DELETE, rsc, RSC_START,
optional ? pe_order_implies_then : pe_order_optional, data_set);
return TRUE;
}
gboolean
native_create_probe(resource_t * rsc, node_t * node, action_t * complete,
gboolean force, pe_working_set_t * data_set)
{
enum pe_ordering flags = pe_order_optional;
char *key = NULL;
action_t *probe = NULL;
node_t *running = NULL;
node_t *allowed = NULL;
resource_t *top = uber_parent(rsc);
static const char *rc_master = NULL;
static const char *rc_inactive = NULL;
if (rc_inactive == NULL) {
rc_inactive = crm_itoa(PCMK_OCF_NOT_RUNNING);
rc_master = crm_itoa(PCMK_OCF_RUNNING_MASTER);
}
CRM_CHECK(node != NULL, return FALSE);
if (force == FALSE && is_not_set(data_set->flags, pe_flag_startup_probes)) {
pe_rsc_trace(rsc, "Skipping active resource detection for %s", rsc->id);
return FALSE;
}
if (pe__is_guest_or_remote_node(node)) {
const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
if (safe_str_eq(class, PCMK_RESOURCE_CLASS_STONITH)) {
pe_rsc_trace(rsc,
"Skipping probe for %s on %s because Pacemaker Remote nodes cannot run stonith agents",
rsc->id, node->details->id);
return FALSE;
} else if (pe__is_guest_node(node)
&& pe__resource_contains_guest_node(data_set, rsc)) {
pe_rsc_trace(rsc,
"Skipping probe for %s on %s because guest nodes cannot run resources containing guest nodes",
rsc->id, node->details->id);
return FALSE;
} else if (rsc->is_remote_node) {
pe_rsc_trace(rsc,
"Skipping probe for %s on %s because Pacemaker Remote nodes cannot host remote connections",
rsc->id, node->details->id);
return FALSE;
}
}
if (rsc->children) {
GListPtr gIter = NULL;
gboolean any_created = FALSE;
for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
resource_t *child_rsc = (resource_t *) gIter->data;
any_created = child_rsc->cmds->create_probe(child_rsc, node, complete, force, data_set)
|| any_created;
}
return any_created;
} else if ((rsc->container) && (!rsc->is_remote_node)) {
pe_rsc_trace(rsc, "Skipping %s: it is within container %s", rsc->id, rsc->container->id);
return FALSE;
}
if (is_set(rsc->flags, pe_rsc_orphan)) {
pe_rsc_trace(rsc, "Skipping orphan: %s", rsc->id);
return FALSE;
}
// Check whether resource is already known on node
if (!force && g_hash_table_lookup(rsc->known_on, node->details->id)) {
pe_rsc_trace(rsc, "Skipping known: %s on %s", rsc->id, node->details->uname);
return FALSE;
}
allowed = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
if (rsc->exclusive_discover || top->exclusive_discover) {
if (allowed == NULL) {
/* exclusive discover is enabled and this node is not in the allowed list. */
pe_rsc_trace(rsc, "Skipping probe for %s on node %s, A", rsc->id, node->details->id);
return FALSE;
} else if (allowed->rsc_discover_mode != pe_discover_exclusive) {
/* exclusive discover is enabled and this node is not marked
* as a node this resource should be discovered on */
pe_rsc_trace(rsc, "Skipping probe for %s on node %s, B", rsc->id, node->details->id);
return FALSE;
}
}
if(allowed == NULL && node->rsc_discover_mode == pe_discover_never) {
/* If this node was allowed to host this resource it would
* have been explicitly added to the 'allowed_nodes' list.
* However it wasn't and the node has discovery disabled, so
* no need to probe for this resource.
*/
pe_rsc_trace(rsc, "Skipping probe for %s on node %s, C", rsc->id, node->details->id);
return FALSE;
}
if (allowed && allowed->rsc_discover_mode == pe_discover_never) {
/* this resource is marked as not needing to be discovered on this node */
pe_rsc_trace(rsc, "Skipping probe for %s on node %s, discovery mode", rsc->id, node->details->id);
return FALSE;
}
if (pe__is_guest_node(node)) {
resource_t *remote = node->details->remote_rsc->container;
if(remote->role == RSC_ROLE_STOPPED) {
/* If the container is stopped, then we know anything that
* might have been inside it is also stopped and there is
* no need to probe.
*
* If we don't know the container's state on the target
* either:
*
* - the container is running, the transition will abort
* and we'll end up in a different case next time, or
*
* - the container is stopped
*
* Either way there is no need to probe.
*
*/
if(remote->allocated_to
&& g_hash_table_lookup(remote->known_on, remote->allocated_to->details->id) == NULL) {
/* For safety, we order the 'rsc' start after 'remote'
* has been probed.
*
* Using 'top' helps for groups, but we may need to
* follow the start's ordering chain backwards.
*/
custom_action_order(remote, generate_op_key(remote->id, RSC_STATUS, 0), NULL,
top, generate_op_key(top->id, RSC_START, 0), NULL,
pe_order_optional, data_set);
}
pe_rsc_trace(rsc, "Skipping probe for %s on node %s, %s is stopped",
rsc->id, node->details->id, remote->id);
return FALSE;
/* Here we really we want to check if remote->stop is required,
* but that information doesn't exist yet
*/
} else if(node->details->remote_requires_reset
|| node->details->unclean
|| is_set(remote->flags, pe_rsc_failed)
|| remote->next_role == RSC_ROLE_STOPPED
|| (remote->allocated_to
&& pe_find_node(remote->running_on, remote->allocated_to->details->uname) == NULL)
) {
/* The container is stopping or restarting, don't start
* 'rsc' until 'remote' stops as this also implies that
* 'rsc' is stopped - avoiding the need to probe
*/
custom_action_order(remote, generate_op_key(remote->id, RSC_STOP, 0), NULL,
top, generate_op_key(top->id, RSC_START, 0), NULL,
pe_order_optional, data_set);
pe_rsc_trace(rsc, "Skipping probe for %s on node %s, %s is stopping, restarting or moving",
rsc->id, node->details->id, remote->id);
return FALSE;
/* } else {
* The container is running so there is no problem probing it
*/
}
}
key = generate_op_key(rsc->id, RSC_STATUS, 0);
probe = custom_action(rsc, key, RSC_STATUS, node, FALSE, TRUE, data_set);
update_action_flags(probe, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__);
order_after_unfencing(rsc, node, probe, pe_order_optional, data_set);
/*
* We need to know if it's running_on (not just known_on) this node
* to correctly determine the target rc.
*/
running = pe_find_node_id(rsc->running_on, node->details->id);
if (running == NULL) {
add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, rc_inactive);
} else if (rsc->role == RSC_ROLE_MASTER) {
add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, rc_master);
}
crm_debug("Probing %s on %s (%s) %d %p", rsc->id, node->details->uname, role2text(rsc->role),
is_set(probe->flags, pe_action_runnable), rsc->running_on);
if (is_unfence_device(rsc, data_set) || !pe_rsc_is_clone(top)) {
top = rsc;
} else {
crm_trace("Probing %s on %s (%s) as %s", rsc->id, node->details->uname, role2text(rsc->role), top->id);
}
if(is_not_set(probe->flags, pe_action_runnable) && rsc->running_on == NULL) {
/* Prevent the start from occurring if rsc isn't active, but
* don't cause it to stop if it was active already
*/
flags |= pe_order_runnable_left;
}
custom_action_order(rsc, NULL, probe,
top, generate_op_key(top->id, RSC_START, 0), NULL,
flags, data_set);
/* Before any reloads, if they exist */
custom_action_order(rsc, NULL, probe,
top, reload_key(rsc), NULL,
pe_order_optional, data_set);
#if 0
// complete is always null currently
if (!is_unfence_device(rsc, data_set)) {
/* Normally rsc.start depends on probe complete which depends
* on rsc.probe. But this can't be the case for fence devices
* with unfencing, as it would create graph loops.
*
* So instead we explicitly order 'rsc.probe then rsc.start'
*/
order_actions(probe, complete, pe_order_implies_then);
}
#endif
return TRUE;
}
/*!
* \internal
* \brief Check whether a resource is known on a particular node
*
* \param[in] rsc Resource to check
* \param[in] node Node to check
*
* \return TRUE if resource (or parent if an anonymous clone) is known
*/
static bool
rsc_is_known_on(pe_resource_t *rsc, const pe_node_t *node)
{
if (pe_hash_table_lookup(rsc->known_on, node->details->id)) {
return TRUE;
} else if ((rsc->variant == pe_native)
&& pe_rsc_is_anon_clone(rsc->parent)
&& pe_hash_table_lookup(rsc->parent->known_on, node->details->id)) {
/* We check only the parent, not the uber-parent, because we cannot
* assume that the resource is known if it is in an anonymously cloned
* group (which may be only partially known).
*/
return TRUE;
}
return FALSE;
}
/*!
* \internal
* \brief Order a resource's start and promote actions relative to fencing
*
* \param[in] rsc Resource to be ordered
* \param[in] stonith_op Fence action
* \param[in] data_set Cluster information
*/
static void
native_start_constraints(resource_t * rsc, action_t * stonith_op, pe_working_set_t * data_set)
{
node_t *target;
GListPtr gIter = NULL;
CRM_CHECK(stonith_op && stonith_op->node, return);
target = stonith_op->node;
for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) {
action_t *action = (action_t *) gIter->data;
switch (action->needs) {
case rsc_req_nothing:
// Anything other than start or promote requires nothing
break;
case rsc_req_stonith:
order_actions(stonith_op, action, pe_order_optional);
break;
case rsc_req_quorum:
if (safe_str_eq(action->task, RSC_START)
&& pe_hash_table_lookup(rsc->allowed_nodes, target->details->id)
&& !rsc_is_known_on(rsc, target)) {
/* If we don't know the status of the resource on the node
* we're about to shoot, we have to assume it may be active
* there. Order the resource start after the fencing. This
* is analogous to waiting for all the probes for a resource
* to complete before starting it.
*
* The most likely explanation is that the DC died and took
* its status with it.
*/
pe_rsc_debug(rsc, "Ordering %s after %s recovery", action->uuid,
target->details->uname);
order_actions(stonith_op, action,
pe_order_optional | pe_order_runnable_left);
}
break;
}
}
}
static void
native_stop_constraints(resource_t * rsc, action_t * stonith_op, pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
GListPtr action_list = NULL;
bool order_implicit = FALSE;
resource_t *top = uber_parent(rsc);
node_t *target;
CRM_CHECK(stonith_op && stonith_op->node, return);
target = stonith_op->node;
/* Get a list of stop actions potentially implied by the fencing */
action_list = pe__resource_actions(rsc, target, RSC_STOP, FALSE);
// If resource requires fencing, implicit actions must occur after fencing
if (is_set(rsc->flags, pe_rsc_needs_fencing)) {
order_implicit = TRUE;
}
/* Implied stops and demotes of resources running on guest nodes are always
* ordered after fencing, even if the resource does not require fencing,
* because guest node "fencing" is actually just a resource stop.
*/
if (pe__is_guest_node(target)) {
order_implicit = TRUE;
}
for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
action_t *action = (action_t *) gIter->data;
// The stop would never complete, so convert it into a pseudo-action.
update_action_flags(action, pe_action_pseudo, __FUNCTION__, __LINE__);
update_action_flags(action, pe_action_runnable, __FUNCTION__, __LINE__);
if (order_implicit) {
enum pe_ordering flags = pe_order_optional;
action_t *parent_stop = find_first_action(top->actions, NULL, RSC_STOP, NULL);
if (is_set(rsc->flags, pe_rsc_failed)) {
crm_notice("Stop of failed resource %s is implicit after %s is fenced",
rsc->id, target->details->uname);
} else {
crm_info("%s is implicit after %s is fenced",
action->uuid, target->details->uname);
}
update_action_flags(action, pe_action_implied_by_stonith,
__FUNCTION__, __LINE__);
if (target->details->remote_rsc) {
/* User constraints must not order a resource in a guest node
* relative to the guest node container resource. This flag
* marks constraints as generated by the cluster and thus
* immune to that check.
*/
flags |= pe_order_preserve;
}
if (pe_rsc_is_bundled(rsc) == FALSE) {
order_actions(stonith_op, action, flags);
}
order_actions(stonith_op, parent_stop, flags);
} else {
if (is_set(rsc->flags, pe_rsc_failed)) {
crm_notice("Stop of failed resource %s is implicit because %s will be fenced",
rsc->id, target->details->uname);
} else {
crm_info("%s is implicit because %s will be fenced",
action->uuid, target->details->uname);
}
}
if (is_set(rsc->flags, pe_rsc_notify)) {
/* Create a second notification that will be delivered
* immediately after the node is fenced
*
* Basic problem:
* - C is a clone active on the node to be shot and stopping on another
* - R is a resource that depends on C
*
* + C.stop depends on R.stop
* + C.stopped depends on STONITH
* + C.notify depends on C.stopped
* + C.healthy depends on C.notify
* + R.stop depends on C.healthy
*
* The extra notification here changes
* + C.healthy depends on C.notify
* into:
* + C.healthy depends on C.notify'
* + C.notify' depends on STONITH'
* thus breaking the loop
*/
create_secondary_notification(action, rsc, stonith_op, data_set);
}
/* From Bug #1601, successful fencing must be an input to a failed resources stop action.
However given group(rA, rB) running on nodeX and B.stop has failed,
A := stop healthy resource (rA.stop)
B := stop failed resource (pseudo operation B.stop)
C := stonith nodeX
A requires B, B requires C, C requires A
This loop would prevent the cluster from making progress.
This block creates the "C requires A" dependency and therefore must (at least
for now) be disabled.
Instead, run the block above and treat all resources on nodeX as B would be
(marked as a pseudo op depending on the STONITH).
TODO: Break the "A requires B" dependency in update_action() and re-enable this block
} else if(is_stonith == FALSE) {
crm_info("Moving healthy resource %s"
" off %s before fencing",
rsc->id, node->details->uname);
* stop healthy resources before the
* stonith op
*
custom_action_order(
rsc, stop_key(rsc), NULL,
NULL,strdup(CRM_OP_FENCE),stonith_op,
pe_order_optional, data_set);
*/
}
g_list_free(action_list);
/* Get a list of demote actions potentially implied by the fencing */
action_list = pe__resource_actions(rsc, target, RSC_DEMOTE, FALSE);
for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
action_t *action = (action_t *) gIter->data;
if (action->node->details->online == FALSE || action->node->details->unclean == TRUE
|| is_set(rsc->flags, pe_rsc_failed)) {
if (is_set(rsc->flags, pe_rsc_failed)) {
pe_rsc_info(rsc,
"Demote of failed resource %s is implicit after %s is fenced",
rsc->id, target->details->uname);
} else {
pe_rsc_info(rsc, "%s is implicit after %s is fenced",
action->uuid, target->details->uname);
}
/* The demote would never complete and is now implied by the
* fencing, so convert it into a pseudo-action.
*/
update_action_flags(action, pe_action_pseudo, __FUNCTION__, __LINE__);
update_action_flags(action, pe_action_runnable, __FUNCTION__, __LINE__);
if (pe_rsc_is_bundled(rsc)) {
/* Do nothing, let the recovery be ordered after the parent's implied stop */
} else if (order_implicit) {
order_actions(stonith_op, action, pe_order_preserve|pe_order_optional);
}
}
}
g_list_free(action_list);
}
void
rsc_stonith_ordering(resource_t * rsc, action_t * stonith_op, pe_working_set_t * data_set)
{
if (rsc->children) {
GListPtr gIter = NULL;
for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
resource_t *child_rsc = (resource_t *) gIter->data;
rsc_stonith_ordering(child_rsc, stonith_op, data_set);
}
} else if (is_not_set(rsc->flags, pe_rsc_managed)) {
pe_rsc_trace(rsc, "Skipping fencing constraints for unmanaged resource: %s", rsc->id);
} else {
native_start_constraints(rsc, stonith_op, data_set);
native_stop_constraints(rsc, stonith_op, data_set);
}
}
void
ReloadRsc(resource_t * rsc, node_t *node, pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
action_t *reload = NULL;
if (rsc->children) {
for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
resource_t *child_rsc = (resource_t *) gIter->data;
ReloadRsc(child_rsc, node, data_set);
}
return;
} else if (rsc->variant > pe_native) {
/* Complex resource with no children */
return;
} else if (is_not_set(rsc->flags, pe_rsc_managed)) {
pe_rsc_trace(rsc, "%s: unmanaged", rsc->id);
return;
} else if (is_set(rsc->flags, pe_rsc_failed) || is_set(rsc->flags, pe_rsc_start_pending)) {
pe_rsc_trace(rsc, "%s: general resource state: flags=0x%.16llx", rsc->id, rsc->flags);
stop_action(rsc, node, FALSE); /* Force a full restart, overkill? */
return;
} else if (node == NULL) {
pe_rsc_trace(rsc, "%s: not active", rsc->id);
return;
}
pe_rsc_trace(rsc, "Processing %s", rsc->id);
set_bit(rsc->flags, pe_rsc_reload);
reload = custom_action(
rsc, reload_key(rsc), CRMD_ACTION_RELOAD, node, FALSE, TRUE, data_set);
pe_action_set_reason(reload, "resource definition change", FALSE);
custom_action_order(NULL, NULL, reload, rsc, stop_key(rsc), NULL,
pe_order_optional|pe_order_then_cancels_first,
data_set);
custom_action_order(NULL, NULL, reload, rsc, demote_key(rsc), NULL,
pe_order_optional|pe_order_then_cancels_first,
data_set);
}
void
native_append_meta(resource_t * rsc, xmlNode * xml)
{
char *value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION);
resource_t *parent;
if (value) {
char *name = NULL;
name = crm_meta_name(XML_RSC_ATTR_INCARNATION);
crm_xml_add(xml, name, value);
free(name);
}
value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_REMOTE_NODE);
if (value) {
char *name = NULL;
name = crm_meta_name(XML_RSC_ATTR_REMOTE_NODE);
crm_xml_add(xml, name, value);
free(name);
}
for (parent = rsc; parent != NULL; parent = parent->parent) {
if (parent->container) {
crm_xml_add(xml, CRM_META"_"XML_RSC_ATTR_CONTAINER, parent->container->id);
}
}
}
diff --git a/tools/crm_mon.c b/tools/crm_mon.c
index eeec852e3b..7988e3aae4 100644
--- a/tools/crm_mon.c
+++ b/tools/crm_mon.c
@@ -1,4425 +1,4425 @@
/*
* Copyright 2004-2019 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include /* crm_ends_with_ext */
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
static void clean_up_connections(void);
static crm_exit_t clean_up(crm_exit_t exit_code);
static void crm_diff_update(const char *event, xmlNode * msg);
static gboolean mon_refresh_display(gpointer user_data);
static int cib_connect(gboolean full);
static void mon_st_callback_event(stonith_t * st, stonith_event_t * e);
static void mon_st_callback_display(stonith_t * st, stonith_event_t * e);
static void kick_refresh(gboolean data_updated);
static char *get_node_display_name(node_t *node);
/*
* Definitions indicating which items to print
*/
#define mon_show_times (0x0001U)
#define mon_show_stack (0x0002U)
#define mon_show_dc (0x0004U)
#define mon_show_count (0x0008U)
#define mon_show_nodes (0x0010U)
#define mon_show_resources (0x0020U)
#define mon_show_attributes (0x0040U)
#define mon_show_failcounts (0x0080U)
#define mon_show_operations (0x0100U)
#define mon_show_tickets (0x0200U)
#define mon_show_bans (0x0400U)
#define mon_show_fence_history (0x0800U)
#define mon_show_headers (mon_show_times | mon_show_stack | mon_show_dc \
| mon_show_count)
#define mon_show_default (mon_show_headers | mon_show_nodes \
| mon_show_resources)
#define mon_show_all (mon_show_default | mon_show_attributes \
| mon_show_failcounts | mon_show_operations \
| mon_show_tickets | mon_show_bans \
| mon_show_fence_history)
static unsigned int show = mon_show_default;
/*
* Definitions indicating how to output
*/
enum mon_output_format_e {
mon_output_none,
mon_output_monitor,
mon_output_plain,
mon_output_console,
mon_output_xml,
mon_output_html,
mon_output_cgi
} output_format = mon_output_console;
static char *output_filename = NULL; /* if sending output to a file, its name */
/* other globals */
static char *pid_file = NULL;
static gboolean group_by_node = FALSE;
static gboolean inactive_resources = FALSE;
static int reconnect_msec = 5000;
static gboolean daemonize = FALSE;
static GMainLoop *mainloop = NULL;
static guint timer_id = 0;
static mainloop_timer_t *refresh_timer = NULL;
static pe_working_set_t *mon_data_set = NULL;
static GList *attr_list = NULL;
static const char *external_agent = NULL;
static const char *external_recipient = NULL;
static cib_t *cib = NULL;
static stonith_t *st = NULL;
static xmlNode *current_cib = NULL;
static gboolean one_shot = FALSE;
static gboolean has_warnings = FALSE;
static gboolean print_timing = FALSE;
static gboolean watch_fencing = FALSE;
static gboolean fence_history = FALSE;
static gboolean fence_full_history = FALSE;
static gboolean fence_connect = FALSE;
static int fence_history_level = 1;
static gboolean print_brief = FALSE;
static gboolean print_pending = TRUE;
static gboolean print_clone_detail = FALSE;
#if CURSES_ENABLED
static gboolean curses_console_initialized = FALSE;
#endif
/* FIXME allow, detect, and correctly interpret glob pattern or regex? */
const char *print_neg_location_prefix = "";
/* Never display node attributes whose name starts with one of these prefixes */
#define FILTER_STR { CRM_FAIL_COUNT_PREFIX, CRM_LAST_FAILURE_PREFIX, \
"shutdown", "terminate", "standby", "probe_complete", \
"#", NULL }
long last_refresh = 0;
crm_trigger_t *refresh_trigger = NULL;
/* Define exit codes for monitoring-compatible output
* For nagios plugins, the possibilities are
* OK=0, WARN=1, CRIT=2, and UNKNOWN=3
*/
#define MON_STATUS_WARN CRM_EX_ERROR
#define MON_STATUS_CRIT CRM_EX_INVALID_PARAM
#define MON_STATUS_UNKNOWN CRM_EX_UNIMPLEMENT_FEATURE
/* Convenience macro for prettifying output (e.g. "node" vs "nodes") */
#define s_if_plural(i) (((i) == 1)? "" : "s")
#if CURSES_ENABLED
# define print_dot() if (output_format == mon_output_console) { \
printw("."); \
clrtoeol(); \
refresh(); \
} else { \
fprintf(stdout, "."); \
}
#else
# define print_dot() fprintf(stdout, ".");
#endif
#if CURSES_ENABLED
# define print_as(fmt, args...) if (output_format == mon_output_console) { \
printw(fmt, ##args); \
clrtoeol(); \
refresh(); \
} else { \
fprintf(stdout, fmt, ##args); \
}
#else
# define print_as(fmt, args...) fprintf(stdout, fmt, ##args);
#endif
static void
blank_screen(void)
{
#if CURSES_ENABLED
int lpc = 0;
for (lpc = 0; lpc < LINES; lpc++) {
move(lpc, 0);
clrtoeol();
}
move(0, 0);
refresh();
#endif
}
static gboolean
mon_timer_popped(gpointer data)
{
int rc = pcmk_ok;
#if CURSES_ENABLED
if (output_format == mon_output_console) {
clear();
refresh();
}
#endif
if (timer_id > 0) {
g_source_remove(timer_id);
timer_id = 0;
}
print_as("Reconnecting...\n");
rc = cib_connect(TRUE);
if (rc != pcmk_ok) {
timer_id = g_timeout_add(reconnect_msec, mon_timer_popped, NULL);
}
return FALSE;
}
static void
mon_cib_connection_destroy(gpointer user_data)
{
print_as("Connection to the cluster-daemons terminated\n");
if (refresh_timer != NULL) {
/* we'll trigger a refresh after reconnect */
mainloop_timer_stop(refresh_timer);
}
if (timer_id) {
/* we'll trigger a new reconnect-timeout at the end */
g_source_remove(timer_id);
timer_id = 0;
}
if (st) {
/* the client API won't properly reconnect notifications
* if they are still in the table - so remove them
*/
st->cmds->remove_notification(st, T_STONITH_NOTIFY_DISCONNECT);
st->cmds->remove_notification(st, T_STONITH_NOTIFY_FENCE);
st->cmds->remove_notification(st, T_STONITH_NOTIFY_HISTORY);
if (st->state != stonith_disconnected) {
st->cmds->disconnect(st);
}
}
if (cib) {
cib->cmds->signoff(cib);
timer_id = g_timeout_add(reconnect_msec, mon_timer_popped, NULL);
}
return;
}
/*
* Mainloop signal handler.
*/
static void
mon_shutdown(int nsig)
{
clean_up(CRM_EX_OK);
}
#if CURSES_ENABLED
static sighandler_t ncurses_winch_handler;
static void
mon_winresize(int nsig)
{
static int not_done;
int lines = 0, cols = 0;
if (!not_done++) {
if (ncurses_winch_handler)
/* the original ncurses WINCH signal handler does the
* magic of retrieving the new window size;
* otherwise, we'd have to use ioctl or tgetent */
(*ncurses_winch_handler) (SIGWINCH);
getmaxyx(stdscr, lines, cols);
resizeterm(lines, cols);
mainloop_set_trigger(refresh_trigger);
}
not_done--;
}
#endif
static int
cib_connect(gboolean full)
{
int rc = pcmk_ok;
static gboolean need_pass = TRUE;
CRM_CHECK(cib != NULL, return -EINVAL);
if (getenv("CIB_passwd") != NULL) {
need_pass = FALSE;
}
if ((fence_connect) && (st == NULL)) {
st = stonith_api_new();
}
- if ((fence_connect) && (st->state == stonith_disconnected)) {
+ if (fence_connect && (st != NULL) && (st->state == stonith_disconnected)) {
rc = st->cmds->connect(st, crm_system_name, NULL);
if (rc == pcmk_ok) {
crm_trace("Setting up stonith callbacks");
if (watch_fencing) {
st->cmds->register_notification(st, T_STONITH_NOTIFY_DISCONNECT,
mon_st_callback_event);
st->cmds->register_notification(st, T_STONITH_NOTIFY_FENCE, mon_st_callback_event);
} else {
st->cmds->register_notification(st, T_STONITH_NOTIFY_DISCONNECT,
mon_st_callback_display);
st->cmds->register_notification(st, T_STONITH_NOTIFY_HISTORY, mon_st_callback_display);
}
}
}
if (cib->state != cib_connected_query && cib->state != cib_connected_command) {
crm_trace("Connecting to the CIB");
if ((output_format == mon_output_console) && need_pass && (cib->variant == cib_remote)) {
need_pass = FALSE;
print_as("Password:");
}
rc = cib->cmds->signon(cib, crm_system_name, cib_query);
if (rc != pcmk_ok) {
return rc;
}
rc = cib->cmds->query(cib, NULL, ¤t_cib, cib_scope_local | cib_sync_call);
if (rc == pcmk_ok) {
mon_refresh_display(NULL);
}
if (rc == pcmk_ok && full) {
if (rc == pcmk_ok) {
rc = cib->cmds->set_connection_dnotify(cib, mon_cib_connection_destroy);
if (rc == -EPROTONOSUPPORT) {
print_as
("Notification setup not supported, won't be able to reconnect after failure");
if (output_format == mon_output_console) {
sleep(2);
}
rc = pcmk_ok;
}
}
if (rc == pcmk_ok) {
cib->cmds->del_notify_callback(cib, T_CIB_DIFF_NOTIFY, crm_diff_update);
rc = cib->cmds->add_notify_callback(cib, T_CIB_DIFF_NOTIFY, crm_diff_update);
}
if (rc != pcmk_ok) {
print_as("Notification setup failed, could not monitor CIB actions");
if (output_format == mon_output_console) {
sleep(2);
}
clean_up_connections();
}
}
}
return rc;
}
/* *INDENT-OFF* */
static struct crm_option long_options[] = {
/* Top-level Options */
{"help", 0, 0, '?', "\tThis text"},
{"version", 0, 0, '$', "\tVersion information" },
{"verbose", 0, 0, 'V', "\tIncrease debug output"},
{"quiet", 0, 0, 'Q', "\tDisplay only essential output" },
{"-spacer-", 1, 0, '-', "\nModes (mutually exclusive):"},
{"as-html", 1, 0, 'h', "\tWrite cluster status to the named html file"},
{"as-xml", 0, 0, 'X', "\t\tWrite cluster status as xml to stdout. This will enable one-shot mode."},
{"web-cgi", 0, 0, 'w', "\t\tWeb mode with output suitable for CGI (preselected when run as *.cgi)"},
{"simple-status", 0, 0, 's', "\tDisplay the cluster status once as a simple one line output (suitable for nagios)"},
{"-spacer-", 1, 0, '-', "\nDisplay Options:"},
{"group-by-node", 0, 0, 'n', "\tGroup resources by node" },
{"inactive", 0, 0, 'r', "\t\tDisplay inactive resources" },
{"failcounts", 0, 0, 'f', "\tDisplay resource fail counts"},
{"operations", 0, 0, 'o', "\tDisplay resource operation history" },
{"timing-details", 0, 0, 't', "\tDisplay resource operation history with timing details" },
{"tickets", 0, 0, 'c', "\t\tDisplay cluster tickets"},
{"watch-fencing", 0, 0, 'W', "\tListen for fencing events. For use with --external-agent"},
{"fence-history", 2, 0, 'm', "Show fence history\n"
"\t\t\t\t\t0=off, 1=failures and pending (default without option),\n"
"\t\t\t\t\t2=add successes (default without value for option),\n"
"\t\t\t\t\t3=show full history without reduction to most recent of each flavor"},
{"neg-locations", 2, 0, 'L', "Display negative location constraints [optionally filtered by id prefix]"},
{"show-node-attributes", 0, 0, 'A', "Display node attributes" },
{"hide-headers", 0, 0, 'D', "\tHide all headers" },
{"show-detail", 0, 0, 'R', "\tShow more details (node IDs, individual clone instances)" },
{"brief", 0, 0, 'b', "\t\tBrief output" },
{"pending", 0, 0, 'j', "\t\tDisplay pending state if 'record-pending' is enabled", pcmk_option_hidden},
{"-spacer-", 1, 0, '-', "\nAdditional Options:"},
{"interval", 1, 0, 'i', "\tUpdate frequency in seconds" },
{"one-shot", 0, 0, '1', "\t\tDisplay the cluster status once on the console and exit"},
{"disable-ncurses",0, 0, 'N', "\tDisable the use of ncurses", !CURSES_ENABLED},
{"daemonize", 0, 0, 'd', "\tRun in the background as a daemon"},
{"pid-file", 1, 0, 'p', "\t(Advanced) Daemon pid file location"},
{"external-agent", 1, 0, 'E', "A program to run when resource operations take place."},
{"external-recipient",1, 0, 'e', "A recipient for your program (assuming you want the program to send something to someone)."},
{"xml-file", 1, 0, 'x', NULL, pcmk_option_hidden},
{"-spacer-", 1, 0, '-', "\nExamples:", pcmk_option_paragraph},
{"-spacer-", 1, 0, '-', "Display the cluster status on the console with updates as they occur:", pcmk_option_paragraph},
{"-spacer-", 1, 0, '-', " crm_mon", pcmk_option_example},
{"-spacer-", 1, 0, '-', "Display the cluster status on the console just once then exit:", pcmk_option_paragraph},
{"-spacer-", 1, 0, '-', " crm_mon -1", pcmk_option_example},
{"-spacer-", 1, 0, '-', "Display your cluster status, group resources by node, and include inactive resources in the list:", pcmk_option_paragraph},
{"-spacer-", 1, 0, '-', " crm_mon --group-by-node --inactive", pcmk_option_example},
{"-spacer-", 1, 0, '-', "Start crm_mon as a background daemon and have it write the cluster status to an HTML file:", pcmk_option_paragraph},
{"-spacer-", 1, 0, '-', " crm_mon --daemonize --as-html /path/to/docroot/filename.html", pcmk_option_example},
{"-spacer-", 1, 0, '-', "Start crm_mon and export the current cluster status as xml to stdout, then exit.:", pcmk_option_paragraph},
{"-spacer-", 1, 0, '-', " crm_mon --as-xml", pcmk_option_example},
{NULL, 0, 0, 0}
};
/* *INDENT-ON* */
#if CURSES_ENABLED
static const char *
get_option_desc(char c)
{
int lpc;
for (lpc = 0; long_options[lpc].name != NULL; lpc++) {
if (long_options[lpc].name[0] == '-')
continue;
if (long_options[lpc].val == c) {
static char *buf = NULL;
const char *rv;
char *nl;
/* chop off tabs and cut at newline */
free(buf); /* free string from last usage */
buf = strdup(long_options[lpc].desc);
rv = buf; /* make a copy to keep buf pointer unaltered
for freeing when we come by next time.
Like this the result stays valid until
the next call.
*/
while(isspace(rv[0])) {
rv++;
}
nl = strchr(rv, '\n');
if (nl) {
*nl = '\0';
}
return rv;
}
}
return NULL;
}
#define print_option_help(option, condition) \
print_as("%c %c: \t%s\n", ((condition)? '*': ' '), option, get_option_desc(option));
static gboolean
detect_user_input(GIOChannel *channel, GIOCondition condition, gpointer unused)
{
int c;
gboolean config_mode = FALSE;
while (1) {
/* Get user input */
c = getchar();
switch (c) {
case 'm':
if (!fence_history_level) {
fence_history = TRUE;
fence_connect = TRUE;
if (st == NULL) {
mon_cib_connection_destroy(NULL);
}
}
show ^= mon_show_fence_history;
break;
case 'c':
show ^= mon_show_tickets;
break;
case 'f':
show ^= mon_show_failcounts;
break;
case 'n':
group_by_node = ! group_by_node;
break;
case 'o':
show ^= mon_show_operations;
if ((show & mon_show_operations) == 0) {
print_timing = 0;
}
break;
case 'r':
inactive_resources = ! inactive_resources;
break;
case 'R':
print_clone_detail = ! print_clone_detail;
break;
case 't':
print_timing = ! print_timing;
if (print_timing) {
show |= mon_show_operations;
}
break;
case 'A':
show ^= mon_show_attributes;
break;
case 'L':
show ^= mon_show_bans;
break;
case 'D':
/* If any header is shown, clear them all, otherwise set them all */
if (show & mon_show_headers) {
show &= ~mon_show_headers;
} else {
show |= mon_show_headers;
}
break;
case 'b':
print_brief = ! print_brief;
break;
case 'j':
print_pending = ! print_pending;
break;
case '?':
config_mode = TRUE;
break;
default:
goto refresh;
}
if (!config_mode)
goto refresh;
blank_screen();
print_as("Display option change mode\n");
print_as("\n");
print_option_help('c', show & mon_show_tickets);
print_option_help('f', show & mon_show_failcounts);
print_option_help('n', group_by_node);
print_option_help('o', show & mon_show_operations);
print_option_help('r', inactive_resources);
print_option_help('t', print_timing);
print_option_help('A', show & mon_show_attributes);
print_option_help('L', show & mon_show_bans);
print_option_help('D', (show & mon_show_headers) == 0);
print_option_help('R', print_clone_detail);
print_option_help('b', print_brief);
print_option_help('j', print_pending);
print_option_help('m', (show & mon_show_fence_history));
print_as("\n");
print_as("Toggle fields via field letter, type any other key to return");
}
refresh:
mon_refresh_display(NULL);
return TRUE;
}
#endif
// Basically crm_signal_handler(SIGCHLD, SIG_IGN) plus the SA_NOCLDWAIT flag
static void
avoid_zombies()
{
struct sigaction sa;
memset(&sa, 0, sizeof(struct sigaction));
if (sigemptyset(&sa.sa_mask) < 0) {
crm_warn("Cannot avoid zombies: %s", pcmk_strerror(errno));
return;
}
sa.sa_handler = SIG_IGN;
sa.sa_flags = SA_RESTART|SA_NOCLDWAIT;
if (sigaction(SIGCHLD, &sa, NULL) < 0) {
crm_warn("Cannot avoid zombies: %s", pcmk_strerror(errno));
}
}
int
main(int argc, char **argv)
{
int flag;
int argerr = 0;
int option_index = 0;
int rc = pcmk_ok;
pid_file = strdup("/tmp/ClusterMon.pid");
crm_log_cli_init("crm_mon");
crm_set_options(NULL, "mode [options]", long_options,
"Provides a summary of cluster's current state."
"\n\nOutputs varying levels of detail in a number of different formats.\n");
// Avoid needing to wait for subprocesses forked for -E/--external-agent
avoid_zombies();
if (crm_ends_with_ext(argv[0], ".cgi") == TRUE) {
output_format = mon_output_cgi;
one_shot = TRUE;
}
/* to enable stonith-connection when called via some application like pcs
* set environment-variable FENCE_HISTORY to desired level
* so you don't have to modify this application
*/
/* fence_history_level = crm_atoi(getenv("FENCE_HISTORY"), "0"); */
while (1) {
flag = crm_get_option(argc, argv, &option_index);
if (flag == -1)
break;
switch (flag) {
case 'V':
crm_bump_log_level(argc, argv);
break;
case 'Q':
show &= ~mon_show_times;
break;
case 'i':
reconnect_msec = crm_get_msec(optarg);
break;
case 'n':
group_by_node = TRUE;
break;
case 'r':
inactive_resources = TRUE;
break;
case 'W':
watch_fencing = TRUE;
fence_connect = TRUE;
break;
case 'm':
fence_history_level = crm_atoi(optarg, "2");
break;
case 'd':
daemonize = TRUE;
break;
case 't':
print_timing = TRUE;
show |= mon_show_operations;
break;
case 'o':
show |= mon_show_operations;
break;
case 'f':
show |= mon_show_failcounts;
break;
case 'A':
show |= mon_show_attributes;
break;
case 'L':
show |= mon_show_bans;
print_neg_location_prefix = optarg? optarg : "";
break;
case 'D':
show &= ~mon_show_headers;
break;
case 'b':
print_brief = TRUE;
break;
case 'j':
print_pending = TRUE;
break;
case 'R':
print_clone_detail = TRUE;
break;
case 'c':
show |= mon_show_tickets;
break;
case 'p':
free(pid_file);
if(optarg == NULL) {
crm_help(flag, CRM_EX_USAGE);
}
pid_file = strdup(optarg);
break;
case 'x':
if(optarg == NULL) {
crm_help(flag, CRM_EX_USAGE);
}
setenv("CIB_file", optarg, 1);
one_shot = TRUE;
break;
case 'h':
if(optarg == NULL) {
crm_help(flag, CRM_EX_USAGE);
}
argerr += (output_format != mon_output_console);
output_format = mon_output_html;
output_filename = strdup(optarg);
umask(S_IWGRP | S_IWOTH);
break;
case 'X':
argerr += (output_format != mon_output_console);
output_format = mon_output_xml;
one_shot = TRUE;
break;
case 'w':
/* do not allow argv[0] and argv[1...] redundancy */
argerr += (output_format != mon_output_console);
output_format = mon_output_cgi;
one_shot = TRUE;
break;
case 's':
argerr += (output_format != mon_output_console);
output_format = mon_output_monitor;
one_shot = TRUE;
break;
case 'E':
external_agent = optarg;
break;
case 'e':
external_recipient = optarg;
break;
case '1':
one_shot = TRUE;
break;
case 'N':
if (output_format == mon_output_console) {
output_format = mon_output_plain;
}
break;
case '$':
case '?':
crm_help(flag, CRM_EX_OK);
break;
default:
printf("Argument code 0%o (%c) is not (?yet?) supported\n", flag, flag);
++argerr;
break;
}
}
if (watch_fencing) {
/* don't moan as fence_history_level == 1 is default */
fence_history_level = 0;
}
/* create the cib-object early to be able to do further
* decisions based on the cib-source
*/
cib = cib_new();
if (cib == NULL) {
rc = -EINVAL;
} else {
switch (cib->variant) {
case cib_native:
/* cib & fencing - everything available */
break;
case cib_file:
/* Don't try to connect to fencing as we
* either don't have a running cluster or
* the fencing-information would possibly
* not match the cib data from a file.
* As we don't expect cib-updates coming
* in enforce one-shot. */
fence_history_level = 0;
one_shot = TRUE;
break;
case cib_remote:
/* updates coming in but no fencing */
fence_history_level = 0;
break;
case cib_undefined:
case cib_database:
default:
/* something is odd */
rc = -EINVAL;
crm_err("Invalid cib-source");
break;
}
}
switch (fence_history_level) {
case 3:
fence_full_history = TRUE;
/* fall through to next lower level */
case 2:
show |= mon_show_fence_history;
/* fall through to next lower level */
case 1:
fence_history = TRUE;
fence_connect = TRUE;
break;
default:
break;
}
/* Extra sanity checks when in CGI mode */
if (output_format == mon_output_cgi) {
argerr += (optind < argc);
argerr += (output_filename != NULL);
argerr += ((cib) && (cib->variant == cib_file));
argerr += (external_agent != NULL);
argerr += (daemonize == TRUE); /* paranoia */
} else if (optind < argc) {
printf("non-option ARGV-elements: ");
while (optind < argc)
printf("%s ", argv[optind++]);
printf("\n");
}
if (argerr) {
return clean_up(CRM_EX_USAGE);
}
/* XML output always prints everything */
if (output_format == mon_output_xml) {
show = mon_show_all;
print_timing = TRUE;
}
if (one_shot) {
if (output_format == mon_output_console) {
output_format = mon_output_plain;
}
} else if (daemonize) {
if ((output_format == mon_output_console) || (output_format == mon_output_plain)) {
output_format = mon_output_none;
}
crm_enable_stderr(FALSE);
if ((output_format != mon_output_html)
&& !external_agent) {
printf ("Looks like you forgot to specify one or more of: "
"--as-html, --external-agent\n");
return clean_up(CRM_EX_USAGE);
}
if (cib) {
/* to be on the safe side don't have cib-object around
* when we are forking
*/
cib_delete(cib);
cib = NULL;
crm_make_daemon(crm_system_name, TRUE, pid_file);
cib = cib_new();
if (cib == NULL) {
rc = -EINVAL;
}
/* otherwise assume we've got the same cib-object we've just destroyed
* in our parent
*/
}
} else if (output_format == mon_output_console) {
#if CURSES_ENABLED
initscr();
cbreak();
noecho();
crm_enable_stderr(FALSE);
curses_console_initialized = TRUE;
#else
one_shot = TRUE;
output_format = mon_output_plain;
printf("Defaulting to one-shot mode\n");
printf("You need to have curses available at compile time to enable console mode\n");
#endif
}
crm_info("Starting %s", crm_system_name);
if (cib) {
do {
if (!one_shot) {
print_as("Waiting until cluster is available on this node ...\n");
}
rc = cib_connect(!one_shot);
if (one_shot) {
break;
} else if (rc != pcmk_ok) {
sleep(reconnect_msec / 1000);
#if CURSES_ENABLED
if (output_format == mon_output_console) {
clear();
refresh();
}
#endif
} else {
if (output_format == mon_output_html) {
print_as("Writing html to %s ...\n", output_filename);
}
}
} while (rc == -ENOTCONN);
}
if (rc != pcmk_ok) {
if (output_format == mon_output_monitor) {
printf("CLUSTER CRIT: Connection to cluster failed: %s\n",
pcmk_strerror(rc));
return clean_up(MON_STATUS_CRIT);
} else {
if (rc == -ENOTCONN) {
print_as("\nError: cluster is not available on this node\n");
} else {
print_as("\nConnection to cluster failed: %s\n",
pcmk_strerror(rc));
}
}
if (output_format == mon_output_console) {
sleep(2);
}
return clean_up(crm_errno2exit(rc));
}
if (one_shot) {
return clean_up(CRM_EX_OK);
}
mainloop = g_main_loop_new(NULL, FALSE);
mainloop_add_signal(SIGTERM, mon_shutdown);
mainloop_add_signal(SIGINT, mon_shutdown);
#if CURSES_ENABLED
if (output_format == mon_output_console) {
ncurses_winch_handler = crm_signal_handler(SIGWINCH, mon_winresize);
if (ncurses_winch_handler == SIG_DFL ||
ncurses_winch_handler == SIG_IGN || ncurses_winch_handler == SIG_ERR)
ncurses_winch_handler = NULL;
g_io_add_watch(g_io_channel_unix_new(STDIN_FILENO), G_IO_IN, detect_user_input, NULL);
}
#endif
refresh_trigger = mainloop_add_trigger(G_PRIORITY_LOW, mon_refresh_display, NULL);
g_main_loop_run(mainloop);
g_main_loop_unref(mainloop);
crm_info("Exiting %s", crm_system_name);
return clean_up(CRM_EX_OK);
}
#define mon_warn(fmt...) do { \
if (!has_warnings) { \
print_as("CLUSTER WARN:"); \
} else { \
print_as(","); \
} \
print_as(fmt); \
has_warnings = TRUE; \
} while(0)
static int
count_resources(pe_working_set_t * data_set, resource_t * rsc)
{
int count = 0;
GListPtr gIter = NULL;
if (rsc == NULL) {
gIter = data_set->resources;
} else if (rsc->children) {
gIter = rsc->children;
} else {
return is_not_set(rsc->flags, pe_rsc_orphan);
}
for (; gIter != NULL; gIter = gIter->next) {
count += count_resources(data_set, gIter->data);
}
return count;
}
/*!
* \internal
* \brief Print one-line status suitable for use with monitoring software
*
* \param[in] data_set Working set of CIB state
* \param[in] history List of stonith actions
*
* \note This function's output (and the return code when the program exits)
* should conform to https://www.monitoring-plugins.org/doc/guidelines.html
*/
static void
print_simple_status(pe_working_set_t * data_set,
stonith_history_t *history)
{
GListPtr gIter = NULL;
int nodes_online = 0;
int nodes_standby = 0;
int nodes_maintenance = 0;
if (data_set->dc_node == NULL) {
mon_warn(" No DC");
}
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
if (node->details->standby && node->details->online) {
nodes_standby++;
} else if (node->details->maintenance && node->details->online) {
nodes_maintenance++;
} else if (node->details->online) {
nodes_online++;
} else {
mon_warn(" offline node: %s", node->details->uname);
}
}
if (!has_warnings) {
int nresources = count_resources(data_set, NULL);
print_as("CLUSTER OK: %d node%s online", nodes_online, s_if_plural(nodes_online));
if (nodes_standby > 0) {
print_as(", %d standby node%s", nodes_standby, s_if_plural(nodes_standby));
}
if (nodes_maintenance > 0) {
print_as(", %d maintenance node%s", nodes_maintenance, s_if_plural(nodes_maintenance));
}
print_as(", %d resource%s configured", nresources, s_if_plural(nresources));
}
print_as("\n");
}
/*!
* \internal
* \brief Print a [name]=[value][units] pair, optionally using time string
*
* \param[in] stream File stream to display output to
* \param[in] name Name to display
* \param[in] value Value to display (or NULL to convert time instead)
* \param[in] units Units to display (or NULL for no units)
* \param[in] epoch_time Epoch time to convert if value is NULL
*/
static void
print_nvpair(FILE *stream, const char *name, const char *value,
const char *units, time_t epoch_time)
{
/* print name= */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as(" %s=", name);
break;
case mon_output_html:
case mon_output_cgi:
case mon_output_xml:
fprintf(stream, " %s=", name);
break;
default:
break;
}
/* If we have a value (and optionally units), print it */
if (value) {
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("%s%s", value, (units? units : ""));
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, "%s%s", value, (units? units : ""));
break;
case mon_output_xml:
fprintf(stream, "\"%s%s\"", value, (units? units : ""));
break;
default:
break;
}
/* Otherwise print user-friendly time string */
} else {
static char empty_str[] = "";
char *c, *date_str = asctime(localtime(&epoch_time));
for (c = (date_str != NULL) ? date_str : empty_str; *c != '\0'; ++c) {
if (*c == '\n') {
*c = '\0';
break;
}
}
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("'%s'", date_str);
break;
case mon_output_html:
case mon_output_cgi:
case mon_output_xml:
fprintf(stream, "\"%s\"", date_str);
break;
default:
break;
}
}
}
/*!
* \internal
* \brief Print whatever is needed to start a node section
*
* \param[in] stream File stream to display output to
* \param[in] node Node to print
*/
static void
print_node_start(FILE *stream, node_t *node)
{
char *node_name;
switch (output_format) {
case mon_output_plain:
case mon_output_console:
node_name = get_node_display_name(node);
print_as("* Node %s:\n", node_name);
free(node_name);
break;
case mon_output_html:
case mon_output_cgi:
node_name = get_node_display_name(node);
fprintf(stream, " Node: %s
\n \n", node_name);
free(node_name);
break;
case mon_output_xml:
fprintf(stream, " \n", node->details->uname);
break;
default:
break;
}
}
/*!
* \internal
* \brief Print whatever is needed to end a node section
*
* \param[in] stream File stream to display output to
*/
static void
print_node_end(FILE *stream)
{
switch (output_format) {
case mon_output_html:
case mon_output_cgi:
fprintf(stream, "
\n");
break;
case mon_output_xml:
fprintf(stream, " \n");
break;
default:
break;
}
}
/*!
* \internal
* \brief Print resources section heading appropriate to options
*
* \param[in] stream File stream to display output to
*/
static void
print_resources_heading(FILE *stream)
{
const char *heading;
if (group_by_node) {
/* Active resources have already been printed by node */
heading = (inactive_resources? "Inactive resources" : NULL);
} else if (inactive_resources) {
heading = "Full list of resources";
} else {
heading = "Active resources";
}
/* Print section heading */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("\n%s:\n\n", heading);
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, "
\n %s
\n", heading);
break;
case mon_output_xml:
fprintf(stream, " \n");
break;
default:
break;
}
}
/*!
* \internal
* \brief Print whatever resource section closing is appropriate
*
* \param[in] stream File stream to display output to
*/
static void
print_resources_closing(FILE *stream, gboolean printed_heading)
{
const char *heading;
/* What type of resources we did or did not display */
if (group_by_node) {
heading = "inactive ";
} else if (inactive_resources) {
heading = "";
} else {
heading = "active ";
}
switch (output_format) {
case mon_output_plain:
case mon_output_console:
if (!printed_heading) {
print_as("\nNo %sresources\n\n", heading);
}
break;
case mon_output_html:
case mon_output_cgi:
if (!printed_heading) {
fprintf(stream, "
\n No %sresources
\n", heading);
}
break;
case mon_output_xml:
fprintf(stream, " %s\n",
(printed_heading? "" : ""));
break;
default:
break;
}
}
/*!
* \internal
* \brief Print whatever resource section(s) are appropriate
*
* \param[in] stream File stream to display output to
* \param[in] data_set Cluster state to display
* \param[in] print_opts Bitmask of pe_print_options
*/
static void
print_resources(FILE *stream, pe_working_set_t *data_set, int print_opts)
{
GListPtr rsc_iter;
const char *prefix = NULL;
gboolean printed_heading = FALSE;
gboolean brief_output = print_brief;
/* If we already showed active resources by node, and
* we're not showing inactive resources, we have nothing to do
*/
if (group_by_node && !inactive_resources) {
return;
}
/* XML uses an indent, and ignores brief option for resources */
if (output_format == mon_output_xml) {
prefix = " ";
brief_output = FALSE;
}
/* If we haven't already printed resources grouped by node,
* and brief output was requested, print resource summary */
if (brief_output && !group_by_node) {
print_resources_heading(stream);
printed_heading = TRUE;
print_rscs_brief(data_set->resources, NULL, print_opts, stream,
inactive_resources);
}
/* For each resource, display it if appropriate */
for (rsc_iter = data_set->resources; rsc_iter != NULL; rsc_iter = rsc_iter->next) {
resource_t *rsc = (resource_t *) rsc_iter->data;
/* Complex resources may have some sub-resources active and some inactive */
gboolean is_active = rsc->fns->active(rsc, TRUE);
gboolean partially_active = rsc->fns->active(rsc, FALSE);
/* Skip inactive orphans (deleted but still in CIB) */
if (is_set(rsc->flags, pe_rsc_orphan) && !is_active) {
continue;
/* Skip active resources if we already displayed them by node */
} else if (group_by_node) {
if (is_active) {
continue;
}
/* Skip primitives already counted in a brief summary */
} else if (brief_output && (rsc->variant == pe_native)) {
continue;
/* Skip resources that aren't at least partially active,
* unless we're displaying inactive resources
*/
} else if (!partially_active && !inactive_resources) {
continue;
}
/* Print this resource */
if (printed_heading == FALSE) {
print_resources_heading(stream);
printed_heading = TRUE;
}
rsc->fns->print(rsc, prefix, print_opts, stream);
}
print_resources_closing(stream, printed_heading);
}
/*!
* \internal
* \brief Print heading for resource history
*
* \param[in] stream File stream to display output to
* \param[in] data_set Current state of CIB
* \param[in] node Node that ran this resource
* \param[in] rsc Resource to print
* \param[in] rsc_id ID of resource to print
* \param[in] all Whether to print every resource or just failed ones
*/
static void
print_rsc_history_start(FILE *stream, pe_working_set_t *data_set, node_t *node,
resource_t *rsc, const char *rsc_id, gboolean all)
{
time_t last_failure = 0;
int failcount = rsc?
pe_get_failcount(node, rsc, &last_failure, pe_fc_default,
NULL, data_set)
: 0;
if (!all && !failcount && (last_failure <= 0)) {
return;
}
/* Print resource ID */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as(" %s:", rsc_id);
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " %s:", rsc_id);
break;
case mon_output_xml:
fprintf(stream, " 0)) {
/* Print migration threshold */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as(" migration-threshold=%d", rsc->migration_threshold);
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " migration-threshold=%d", rsc->migration_threshold);
break;
case mon_output_xml:
fprintf(stream, " orphan=\"false\" migration-threshold=\"%d\"",
rsc->migration_threshold);
break;
default:
break;
}
/* Print fail count if any */
if (failcount > 0) {
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as(" " CRM_FAIL_COUNT_PREFIX "=%d", failcount);
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " " CRM_FAIL_COUNT_PREFIX "=%d", failcount);
break;
case mon_output_xml:
fprintf(stream, " " CRM_FAIL_COUNT_PREFIX "=\"%d\"",
failcount);
break;
default:
break;
}
}
/* Print last failure time if any */
if (last_failure > 0) {
print_nvpair(stream, CRM_LAST_FAILURE_PREFIX, NULL, NULL,
last_failure);
}
}
/* End the heading */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("\n");
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, "\n \n");
break;
case mon_output_xml:
fprintf(stream, ">\n");
break;
default:
break;
}
}
/*!
* \internal
* \brief Print closing for resource history
*
* \param[in] stream File stream to display output to
*/
static void
print_rsc_history_end(FILE *stream)
{
switch (output_format) {
case mon_output_html:
case mon_output_cgi:
fprintf(stream, "
\n \n");
break;
case mon_output_xml:
fprintf(stream, " \n");
break;
default:
break;
}
}
/*!
* \internal
* \brief Print operation history
*
* \param[in] stream File stream to display output to
* \param[in] data_set Current state of CIB
* \param[in] node Node this operation is for
* \param[in] xml_op Root of XML tree describing this operation
* \param[in] task Task parsed from this operation's XML
* \param[in] interval_ms_s Interval parsed from this operation's XML
* \param[in] rc Return code parsed from this operation's XML
*/
static void
print_op_history(FILE *stream, pe_working_set_t *data_set, node_t *node,
xmlNode *xml_op, const char *task, const char *interval_ms_s,
int rc)
{
const char *value = NULL;
const char *call = crm_element_value(xml_op, XML_LRM_ATTR_CALLID);
/* Begin the operation description */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as(" + (%s) %s:", call, task);
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " (%s) %s:", call, task);
break;
case mon_output_xml:
fprintf(stream, " 0) {
print_nvpair(stream, attr, NULL, NULL, int_value);
}
}
attr = XML_RSC_OP_LAST_RUN;
value = crm_element_value(xml_op, attr);
if (value) {
int_value = crm_parse_int(value, NULL);
if (int_value > 0) {
print_nvpair(stream, attr, NULL, NULL, int_value);
}
}
attr = XML_RSC_OP_T_EXEC;
value = crm_element_value(xml_op, attr);
if (value) {
print_nvpair(stream, attr, value, "ms", 0);
}
attr = XML_RSC_OP_T_QUEUE;
value = crm_element_value(xml_op, attr);
if (value) {
print_nvpair(stream, attr, value, "ms", 0);
}
}
/* End the operation description */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as(" rc=%d (%s)\n", rc, services_ocf_exitcode_str(rc));
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " rc=%d (%s)\n", rc, services_ocf_exitcode_str(rc));
break;
case mon_output_xml:
fprintf(stream, " rc=\"%d\" rc_text=\"%s\" />\n", rc, services_ocf_exitcode_str(rc));
break;
default:
break;
}
}
/*!
* \internal
* \brief Print resource operation/failure history
*
* \param[in] stream File stream to display output to
* \param[in] data_set Current state of CIB
* \param[in] node Node that ran this resource
* \param[in] rsc_entry Root of XML tree describing resource status
* \param[in] operations Whether to print operations or just failcounts
*/
static void
print_rsc_history(FILE *stream, pe_working_set_t *data_set, node_t *node,
xmlNode *rsc_entry, gboolean operations)
{
GListPtr gIter = NULL;
GListPtr op_list = NULL;
gboolean printed = FALSE;
const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID);
resource_t *rsc = pe_find_resource(data_set->resources, rsc_id);
xmlNode *rsc_op = NULL;
/* If we're not showing operations, just print the resource failure summary */
if (operations == FALSE) {
print_rsc_history_start(stream, data_set, node, rsc, rsc_id, FALSE);
print_rsc_history_end(stream);
return;
}
/* Create a list of this resource's operations */
for (rsc_op = __xml_first_child(rsc_entry); rsc_op != NULL; rsc_op = __xml_next(rsc_op)) {
if (crm_str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, TRUE)) {
op_list = g_list_append(op_list, rsc_op);
}
}
op_list = g_list_sort(op_list, sort_op_by_callid);
/* Print each operation */
for (gIter = op_list; gIter != NULL; gIter = gIter->next) {
xmlNode *xml_op = (xmlNode *) gIter->data;
const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
const char *interval_ms_s = crm_element_value(xml_op,
XML_LRM_ATTR_INTERVAL_MS);
const char *op_rc = crm_element_value(xml_op, XML_LRM_ATTR_RC);
int rc = crm_parse_int(op_rc, "0");
/* Display 0-interval monitors as "probe" */
if (safe_str_eq(task, CRMD_ACTION_STATUS)
&& ((interval_ms_s == NULL) || safe_str_eq(interval_ms_s, "0"))) {
task = "probe";
}
/* Ignore notifies and some probes */
if (safe_str_eq(task, CRMD_ACTION_NOTIFY) || (safe_str_eq(task, "probe") && (rc == 7))) {
continue;
}
/* If this is the first printed operation, print heading for resource */
if (printed == FALSE) {
printed = TRUE;
print_rsc_history_start(stream, data_set, node, rsc, rsc_id, TRUE);
}
/* Print the operation */
print_op_history(stream, data_set, node, xml_op, task, interval_ms_s,
rc);
}
/* Free the list we created (no need to free the individual items) */
g_list_free(op_list);
/* If we printed anything, close the resource */
if (printed) {
print_rsc_history_end(stream);
}
}
/*!
* \internal
* \brief Print node operation/failure history
*
* \param[in] stream File stream to display output to
* \param[in] data_set Current state of CIB
* \param[in] node_state Root of XML tree describing node status
* \param[in] operations Whether to print operations or just failcounts
*/
static void
print_node_history(FILE *stream, pe_working_set_t *data_set,
xmlNode *node_state, gboolean operations)
{
node_t *node = pe_find_node_id(data_set->nodes, ID(node_state));
xmlNode *lrm_rsc = NULL;
xmlNode *rsc_entry = NULL;
if (node && node->details && node->details->online) {
print_node_start(stream, node);
lrm_rsc = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE);
lrm_rsc = find_xml_node(lrm_rsc, XML_LRM_TAG_RESOURCES, FALSE);
/* Print history of each of the node's resources */
for (rsc_entry = __xml_first_child(lrm_rsc); rsc_entry != NULL;
rsc_entry = __xml_next(rsc_entry)) {
if (crm_str_eq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE, TRUE)) {
print_rsc_history(stream, data_set, node, rsc_entry, operations);
}
}
print_node_end(stream);
}
}
/*!
* \internal
* \brief Print extended information about an attribute if appropriate
*
* \param[in] data_set Working set of CIB state
*
* \return TRUE if extended information was printed, FALSE otherwise
* \note Currently, extended information is only supported for ping/pingd
* resources, for which a message will be printed if connectivity is lost
* or degraded.
*/
static gboolean
print_attr_msg(FILE *stream, node_t * node, GListPtr rsc_list, const char *attrname, const char *attrvalue)
{
GListPtr gIter = NULL;
for (gIter = rsc_list; gIter != NULL; gIter = gIter->next) {
resource_t *rsc = (resource_t *) gIter->data;
const char *type = g_hash_table_lookup(rsc->meta, "type");
if (rsc->children != NULL) {
if (print_attr_msg(stream, node, rsc->children, attrname, attrvalue)) {
return TRUE;
}
}
if (safe_str_eq(type, "ping") || safe_str_eq(type, "pingd")) {
const char *name = g_hash_table_lookup(rsc->parameters, "name");
if (name == NULL) {
name = "pingd";
}
/* To identify the resource with the attribute name. */
if (safe_str_eq(name, attrname)) {
int host_list_num = 0;
int expected_score = 0;
int value = crm_parse_int(attrvalue, "0");
const char *hosts = g_hash_table_lookup(rsc->parameters, "host_list");
const char *multiplier = g_hash_table_lookup(rsc->parameters, "multiplier");
if(hosts) {
char **host_list = g_strsplit(hosts, " ", 0);
host_list_num = g_strv_length(host_list);
g_strfreev(host_list);
}
/* pingd multiplier is the same as the default value. */
expected_score = host_list_num * crm_parse_int(multiplier, "1");
switch (output_format) {
case mon_output_plain:
case mon_output_console:
if (value <= 0) {
print_as("\t: Connectivity is lost");
} else if (value < expected_score) {
print_as("\t: Connectivity is degraded (Expected=%d)", expected_score);
}
break;
case mon_output_html:
case mon_output_cgi:
if (value <= 0) {
fprintf(stream, " (connectivity is lost)");
} else if (value < expected_score) {
fprintf(stream, " (connectivity is degraded -- expected %d)",
expected_score);
}
break;
case mon_output_xml:
fprintf(stream, " expected=\"%d\"", expected_score);
break;
default:
break;
}
return TRUE;
}
}
}
return FALSE;
}
static int
compare_attribute(gconstpointer a, gconstpointer b)
{
int rc;
rc = strcmp((const char *)a, (const char *)b);
return rc;
}
static void
create_attr_list(gpointer name, gpointer value, gpointer data)
{
int i;
const char *filt_str[] = FILTER_STR;
CRM_CHECK(name != NULL, return);
/* filtering automatic attributes */
for (i = 0; filt_str[i] != NULL; i++) {
if (g_str_has_prefix(name, filt_str[i])) {
return;
}
}
attr_list = g_list_insert_sorted(attr_list, name, compare_attribute);
}
/* structure for passing multiple user data to g_list_foreach() */
struct mon_attr_data {
FILE *stream;
node_t *node;
};
static void
print_node_attribute(gpointer name, gpointer user_data)
{
const char *value = NULL;
struct mon_attr_data *data = (struct mon_attr_data *) user_data;
value = pe_node_attribute_raw(data->node, name);
/* Print attribute name and value */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as(" + %-32s\t: %-10s", (char *)name, value);
break;
case mon_output_html:
case mon_output_cgi:
fprintf(data->stream, " %s: %s",
(char *)name, value);
break;
case mon_output_xml:
fprintf(data->stream,
" stream, data->node, data->node->details->running_rsc,
name, value);
/* Close out the attribute */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("\n");
break;
case mon_output_html:
case mon_output_cgi:
fprintf(data->stream, "\n");
break;
case mon_output_xml:
fprintf(data->stream, " />\n");
break;
default:
break;
}
}
static void
print_node_summary(FILE *stream, pe_working_set_t * data_set, gboolean operations)
{
xmlNode *node_state = NULL;
xmlNode *cib_status = get_object_root(XML_CIB_TAG_STATUS, data_set->input);
/* Print heading */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
if (operations) {
print_as("\nOperations:\n");
} else {
print_as("\nMigration Summary:\n");
}
break;
case mon_output_html:
case mon_output_cgi:
if (operations) {
fprintf(stream, "
\n Operations
\n");
} else {
fprintf(stream, "
\n Migration Summary
\n");
}
break;
case mon_output_xml:
fprintf(stream, " \n");
break;
default:
break;
}
/* Print each node in the CIB status */
for (node_state = __xml_first_child(cib_status); node_state != NULL;
node_state = __xml_next(node_state)) {
if (crm_str_eq((const char *)node_state->name, XML_CIB_TAG_STATE, TRUE)) {
print_node_history(stream, data_set, node_state, operations);
}
}
/* Close section */
switch (output_format) {
case mon_output_xml:
fprintf(stream, " \n");
break;
default:
break;
}
}
static void
print_ticket(gpointer name, gpointer value, gpointer data)
{
ticket_t *ticket = (ticket_t *) value;
FILE *stream = (FILE *) data;
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("* %s:\t%s%s", ticket->id,
(ticket->granted? "granted" : "revoked"),
(ticket->standby? " [standby]" : ""));
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " %s: %s%s", ticket->id,
(ticket->granted? "granted" : "revoked"),
(ticket->standby? " [standby]" : ""));
break;
case mon_output_xml:
fprintf(stream, " id, (ticket->granted? "granted" : "revoked"),
(ticket->standby? "true" : "false"));
break;
default:
break;
}
if (ticket->last_granted > -1) {
print_nvpair(stdout, "last-granted", NULL, NULL, ticket->last_granted);
}
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("\n");
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, "\n");
break;
case mon_output_xml:
fprintf(stream, " />\n");
break;
default:
break;
}
}
static void
print_cluster_tickets(FILE *stream, pe_working_set_t * data_set)
{
/* Print section heading */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("\nTickets:\n");
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, "
\n Tickets
\n \n");
break;
case mon_output_xml:
fprintf(stream, " \n");
break;
default:
break;
}
/* Print each ticket */
g_hash_table_foreach(data_set->tickets, print_ticket, stream);
/* Close section */
switch (output_format) {
case mon_output_html:
case mon_output_cgi:
fprintf(stream, "
\n");
break;
case mon_output_xml:
fprintf(stream, " \n");
break;
default:
break;
}
}
/*!
* \internal
* \brief Return human-friendly string representing node name
*
* The returned string will be in the format
* uname[@hostUname] [(nodeID)]
* "@hostUname" will be printed if the node is a guest node.
* "(nodeID)" will be printed if the node ID is different from the node uname,
* and detailed output has been requested.
*
* \param[in] node Node to represent
* \return Newly allocated string with representation of node name
* \note It is the caller's responsibility to free the result with free().
*/
static char *
get_node_display_name(node_t *node)
{
char *node_name;
const char *node_host = NULL;
const char *node_id = NULL;
int name_len;
CRM_ASSERT((node != NULL) && (node->details != NULL) && (node->details->uname != NULL));
/* Host is displayed only if this is a guest node */
if (pe__is_guest_node(node)) {
node_t *host_node = pe__current_node(node->details->remote_rsc);
if (host_node && host_node->details) {
node_host = host_node->details->uname;
}
if (node_host == NULL) {
node_host = ""; /* so we at least get "uname@" to indicate guest */
}
}
/* Node ID is displayed if different from uname and detail is requested */
if (print_clone_detail && safe_str_neq(node->details->uname, node->details->id)) {
node_id = node->details->id;
}
/* Determine name length */
name_len = strlen(node->details->uname) + 1;
if (node_host) {
name_len += strlen(node_host) + 1; /* "@node_host" */
}
if (node_id) {
name_len += strlen(node_id) + 3; /* + " (node_id)" */
}
/* Allocate and populate display name */
node_name = malloc(name_len);
CRM_ASSERT(node_name != NULL);
strcpy(node_name, node->details->uname);
if (node_host) {
strcat(node_name, "@");
strcat(node_name, node_host);
}
if (node_id) {
strcat(node_name, " (");
strcat(node_name, node_id);
strcat(node_name, ")");
}
return node_name;
}
/*!
* \internal
* \brief Print a negative location constraint
*
* \param[in] stream File stream to display output to
* \param[in] node Node affected by constraint
* \param[in] location Constraint to print
*/
static void
print_ban(FILE *stream, pe_node_t *node, pe__location_t *location)
{
char *node_name = NULL;
switch (output_format) {
case mon_output_plain:
case mon_output_console:
node_name = get_node_display_name(node);
print_as(" %s\tprevents %s from running %son %s\n",
location->id, location->rsc_lh->id,
((location->role_filter == RSC_ROLE_MASTER)? "as Master " : ""),
node_name);
break;
case mon_output_html:
case mon_output_cgi:
node_name = get_node_display_name(node);
fprintf(stream, " %s prevents %s from running %son %s\n",
location->id, location->rsc_lh->id,
((location->role_filter == RSC_ROLE_MASTER)? "as Master " : ""),
node_name);
break;
case mon_output_xml:
fprintf(stream,
" \n",
location->id, location->rsc_lh->id, node->details->uname, node->weight,
((location->role_filter == RSC_ROLE_MASTER)? "true" : "false"));
break;
default:
break;
}
free(node_name);
}
/*!
* \internal
* \brief Print section for negative location constraints
*
* \param[in] stream File stream to display output to
* \param[in] data_set Working set corresponding to CIB status to display
*/
static void
print_neg_locations(FILE *stream, pe_working_set_t *data_set)
{
GListPtr gIter, gIter2;
/* Print section heading */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("\nNegative Location Constraints:\n");
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, "
\n Negative Location Constraints
\n \n");
break;
case mon_output_xml:
fprintf(stream, " \n");
break;
default:
break;
}
/* Print each ban */
for (gIter = data_set->placement_constraints; gIter != NULL; gIter = gIter->next) {
pe__location_t *location = gIter->data;
if (!g_str_has_prefix(location->id, print_neg_location_prefix))
continue;
for (gIter2 = location->node_list_rh; gIter2 != NULL; gIter2 = gIter2->next) {
node_t *node = (node_t *) gIter2->data;
if (node->weight < 0) {
print_ban(stream, node, location);
}
}
}
/* Close section */
switch (output_format) {
case mon_output_cgi:
case mon_output_html:
fprintf(stream, "
\n");
break;
case mon_output_xml:
fprintf(stream, " \n");
break;
default:
break;
}
}
static void
crm_mon_get_parameters(resource_t *rsc, pe_working_set_t * data_set)
{
get_rsc_attributes(rsc->parameters, rsc, NULL, data_set);
crm_trace("Beekhof: unpacked params for %s (%d)", rsc->id, g_hash_table_size(rsc->parameters));
if(rsc->children) {
GListPtr gIter = NULL;
for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
crm_mon_get_parameters(gIter->data, data_set);
}
}
}
/*!
* \internal
* \brief Print node attributes section
*
* \param[in] stream File stream to display output to
* \param[in] data_set Working set of CIB state
*/
static void
print_node_attributes(FILE *stream, pe_working_set_t *data_set)
{
GListPtr gIter = NULL;
/* Print section heading */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("\nNode Attributes:\n");
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, "
\n Node Attributes
\n");
break;
case mon_output_xml:
fprintf(stream, " \n");
break;
default:
break;
}
/* Unpack all resource parameters (it would be more efficient to do this
* only when needed for the first time in print_attr_msg())
*/
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
crm_mon_get_parameters(gIter->data, data_set);
}
/* Display each node's attributes */
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
struct mon_attr_data data;
data.stream = stream;
data.node = (node_t *) gIter->data;
if (data.node && data.node->details && data.node->details->online) {
print_node_start(stream, data.node);
g_hash_table_foreach(data.node->details->attrs, create_attr_list, NULL);
g_list_foreach(attr_list, print_node_attribute, &data);
g_list_free(attr_list);
attr_list = NULL;
print_node_end(stream);
}
}
/* Print section footer */
switch (output_format) {
case mon_output_xml:
fprintf(stream, " \n");
break;
default:
break;
}
}
/*!
* \internal
* \brief Return resource display options corresponding to command-line choices
*
* \return Bitmask of pe_print_options suitable for resource print functions
*/
static int
get_resource_display_options(void)
{
int print_opts;
/* Determine basic output format */
switch (output_format) {
case mon_output_console:
print_opts = pe_print_ncurses;
break;
case mon_output_html:
case mon_output_cgi:
print_opts = pe_print_html;
break;
case mon_output_xml:
print_opts = pe_print_xml;
break;
default:
print_opts = pe_print_printf;
break;
}
/* Add optional display elements */
if (print_pending) {
print_opts |= pe_print_pending;
}
if (print_clone_detail) {
print_opts |= pe_print_clone_details|pe_print_implicit;
}
if (!inactive_resources) {
print_opts |= pe_print_clone_active;
}
if (print_brief) {
print_opts |= pe_print_brief;
}
return print_opts;
}
/*!
* \internal
* \brief Print header for cluster summary if needed
*
* \param[in] stream File stream to display output to
*/
static void
print_cluster_summary_header(FILE *stream)
{
switch (output_format) {
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " Cluster Summary
\n \n");
break;
case mon_output_xml:
fprintf(stream, " \n");
break;
default:
break;
}
}
/*!
* \internal
* \brief Print footer for cluster summary if needed
*
* \param[in] stream File stream to display output to
*/
static void
print_cluster_summary_footer(FILE *stream)
{
switch (output_format) {
case mon_output_cgi:
case mon_output_html:
fprintf(stream, "
\n");
break;
case mon_output_xml:
fprintf(stream, " \n");
break;
default:
break;
}
}
/*!
* \internal
* \brief Print times the display was last updated and CIB last changed
*
* \param[in] stream File stream to display output to
* \param[in] data_set Working set of CIB state
*/
static void
print_cluster_times(FILE *stream, pe_working_set_t *data_set)
{
const char *last_written = crm_element_value(data_set->input, XML_CIB_ATTR_WRITTEN);
const char *user = crm_element_value(data_set->input, XML_ATTR_UPDATE_USER);
const char *client = crm_element_value(data_set->input, XML_ATTR_UPDATE_CLIENT);
const char *origin = crm_element_value(data_set->input, XML_ATTR_UPDATE_ORIG);
switch (output_format) {
case mon_output_plain:
case mon_output_console: {
const char *now_str = crm_now_string(NULL);
print_as("Last updated: %s", now_str ? now_str : "Could not determine current time");
print_as((user || client || origin)? "\n" : "\t\t");
print_as("Last change: %s", last_written ? last_written : "");
if (user) {
print_as(" by %s", user);
}
if (client) {
print_as(" via %s", client);
}
if (origin) {
print_as(" on %s", origin);
}
print_as("\n");
break;
}
case mon_output_html:
case mon_output_cgi: {
const char *now_str = crm_now_string(NULL);
fprintf(stream, " Last updated: %s
\n",
now_str ? now_str : "Could not determine current time");
fprintf(stream, " Last change: %s", last_written ? last_written : "");
if (user) {
fprintf(stream, " by %s", user);
}
if (client) {
fprintf(stream, " via %s", client);
}
if (origin) {
fprintf(stream, " on %s", origin);
}
fprintf(stream, "
\n");
break;
}
case mon_output_xml: {
const char *now_str = crm_now_string(NULL);
fprintf(stream, " \n",
now_str ? now_str : "Could not determine current time");
fprintf(stream, " \n",
last_written ? last_written : "", user ? user : "",
client ? client : "", origin ? origin : "");
break;
}
default:
break;
}
}
/*!
* \internal
* \brief Print cluster stack
*
* \param[in] stream File stream to display output to
* \param[in] stack_s Stack name
*/
static void
print_cluster_stack(FILE *stream, const char *stack_s)
{
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("Stack: %s\n", stack_s);
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " Stack: %s
\n", stack_s);
break;
case mon_output_xml:
fprintf(stream, " \n", stack_s);
break;
default:
break;
}
}
/*!
* \internal
* \brief Print current DC and its version
*
* \param[in] stream File stream to display output to
* \param[in] data_set Working set of CIB state
*/
static void
print_cluster_dc(FILE *stream, pe_working_set_t *data_set)
{
node_t *dc = data_set->dc_node;
xmlNode *dc_version = get_xpath_object("//nvpair[@name='dc-version']",
data_set->input, LOG_DEBUG);
const char *dc_version_s = dc_version?
crm_element_value(dc_version, XML_NVPAIR_ATTR_VALUE)
: NULL;
const char *quorum = crm_element_value(data_set->input, XML_ATTR_HAVE_QUORUM);
char *dc_name = dc? get_node_display_name(dc) : NULL;
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("Current DC: ");
if (dc) {
print_as("%s (version %s) - partition %s quorum\n",
dc_name, (dc_version_s? dc_version_s : "unknown"),
(crm_is_true(quorum) ? "with" : "WITHOUT"));
} else {
print_as("NONE\n");
}
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " Current DC: ");
if (dc) {
fprintf(stream, "%s (version %s) - partition %s quorum",
dc_name, (dc_version_s? dc_version_s : "unknown"),
(crm_is_true(quorum)? "with" : "WITHOUT"));
} else {
fprintf(stream, "NONE");
}
fprintf(stream, "
\n");
break;
case mon_output_xml:
fprintf(stream, " details->uname, dc->details->id,
(crm_is_true(quorum) ? "true" : "false"));
} else {
fprintf(stream, "present=\"false\"");
}
fprintf(stream, " />\n");
break;
default:
break;
}
free(dc_name);
}
/*!
* \internal
* \brief Print counts of configured nodes and resources
*
* \param[in] stream File stream to display output to
* \param[in] data_set Working set of CIB state
* \param[in] stack_s Stack name
*/
static void
print_cluster_counts(FILE *stream, pe_working_set_t *data_set, const char *stack_s)
{
int nnodes = g_list_length(data_set->nodes);
int nresources = count_resources(data_set, NULL);
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("\n%d node%s configured\n", nnodes, s_if_plural(nnodes));
print_as("%d resource%s configured",
nresources, s_if_plural(nresources));
if(data_set->disabled_resources || data_set->blocked_resources) {
print_as(" (");
if (data_set->disabled_resources) {
print_as("%d DISABLED", data_set->disabled_resources);
}
if (data_set->disabled_resources && data_set->blocked_resources) {
print_as(", ");
}
if (data_set->blocked_resources) {
print_as("%d BLOCKED from starting due to failure",
data_set->blocked_resources);
}
print_as(")");
}
print_as("\n");
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " %d node%s configured
\n",
nnodes, s_if_plural(nnodes));
fprintf(stream, " %d resource%s configured",
nresources, s_if_plural(nresources));
if (data_set->disabled_resources || data_set->blocked_resources) {
fprintf(stream, " (");
if (data_set->disabled_resources) {
fprintf(stream, "%d DISABLED",
data_set->disabled_resources);
}
if (data_set->disabled_resources && data_set->blocked_resources) {
fprintf(stream, ", ");
}
if (data_set->blocked_resources) {
fprintf(stream,
"%d BLOCKED from starting due to failure",
data_set->blocked_resources);
}
fprintf(stream, ")");
}
fprintf(stream, "
\n");
break;
case mon_output_xml:
fprintf(stream,
" \n",
g_list_length(data_set->nodes));
fprintf(stream,
" \n",
count_resources(data_set, NULL),
data_set->disabled_resources, data_set->blocked_resources);
break;
default:
break;
}
}
/*!
* \internal
* \brief Print cluster-wide options
*
* \param[in] stream File stream to display output to
* \param[in] data_set Working set of CIB state
*
* \note Currently this is only implemented for HTML and XML output, and
* prints only a few options. If there is demand, more could be added.
*/
static void
print_cluster_options(FILE *stream, pe_working_set_t *data_set)
{
switch (output_format) {
case mon_output_plain:
case mon_output_console:
if (is_set(data_set->flags, pe_flag_maintenance_mode)) {
print_as("\n *** Resource management is DISABLED ***");
print_as("\n The cluster will not attempt to start, stop or recover services");
print_as("\n");
}
break;
case mon_output_html:
fprintf(stream, "
\n Config Options
\n");
fprintf(stream, " \n");
fprintf(stream, " STONITH of failed nodes | %s |
\n",
is_set(data_set->flags, pe_flag_stonith_enabled)? "enabled" : "disabled");
fprintf(stream, " Cluster is | %ssymmetric |
\n",
is_set(data_set->flags, pe_flag_symmetric_cluster)? "" : "a");
fprintf(stream, " No Quorum Policy | ");
switch (data_set->no_quorum_policy) {
case no_quorum_freeze:
fprintf(stream, "Freeze resources");
break;
case no_quorum_stop:
fprintf(stream, "Stop ALL resources");
break;
case no_quorum_ignore:
fprintf(stream, "Ignore");
break;
case no_quorum_suicide:
fprintf(stream, "Suicide");
break;
}
fprintf(stream, " |
\n");
fprintf(stream, " Resource management | ");
if (is_set(data_set->flags, pe_flag_maintenance_mode)) {
fprintf(stream, "DISABLED (the cluster will "
"not attempt to start, stop or recover services)");
} else {
fprintf(stream, "enabled");
}
fprintf(stream, " |
\n");
fprintf(stream, "
\n \n");
break;
case mon_output_xml:
fprintf(stream, " flags, pe_flag_stonith_enabled)?
"true" : "false");
fprintf(stream, " symmetric-cluster=\"%s\"",
is_set(data_set->flags, pe_flag_symmetric_cluster)?
"true" : "false");
fprintf(stream, " no-quorum-policy=\"");
switch (data_set->no_quorum_policy) {
case no_quorum_freeze:
fprintf(stream, "freeze");
break;
case no_quorum_stop:
fprintf(stream, "stop");
break;
case no_quorum_ignore:
fprintf(stream, "ignore");
break;
case no_quorum_suicide:
fprintf(stream, "suicide");
break;
}
fprintf(stream, "\"");
fprintf(stream, " maintenance-mode=\"%s\"",
is_set(data_set->flags, pe_flag_maintenance_mode)?
"true" : "false");
fprintf(stream, " />\n");
break;
default:
break;
}
}
/*!
* \internal
* \brief Get the name of the stack in use (or "unknown" if not available)
*
* \param[in] data_set Working set of CIB state
*
* \return String representing stack name
*/
static const char *
get_cluster_stack(pe_working_set_t *data_set)
{
xmlNode *stack = get_xpath_object("//nvpair[@name='cluster-infrastructure']",
data_set->input, LOG_DEBUG);
return stack? crm_element_value(stack, XML_NVPAIR_ATTR_VALUE) : "unknown";
}
/*!
* \internal
* \brief Print a summary of cluster-wide information
*
* \param[in] stream File stream to display output to
* \param[in] data_set Working set of CIB state
*/
static void
print_cluster_summary(FILE *stream, pe_working_set_t *data_set)
{
const char *stack_s = get_cluster_stack(data_set);
gboolean header_printed = FALSE;
if (show & mon_show_stack) {
if (header_printed == FALSE) {
print_cluster_summary_header(stream);
header_printed = TRUE;
}
print_cluster_stack(stream, stack_s);
}
/* Always print DC if none, even if not requested */
if ((data_set->dc_node == NULL) || (show & mon_show_dc)) {
if (header_printed == FALSE) {
print_cluster_summary_header(stream);
header_printed = TRUE;
}
print_cluster_dc(stream, data_set);
}
if (show & mon_show_times) {
if (header_printed == FALSE) {
print_cluster_summary_header(stream);
header_printed = TRUE;
}
print_cluster_times(stream, data_set);
}
if (is_set(data_set->flags, pe_flag_maintenance_mode)
|| data_set->disabled_resources
|| data_set->blocked_resources
|| is_set(show, mon_show_count)) {
if (header_printed == FALSE) {
print_cluster_summary_header(stream);
header_printed = TRUE;
}
print_cluster_counts(stream, data_set, stack_s);
}
/* There is not a separate option for showing cluster options, so show with
* stack for now; a separate option could be added if there is demand
*/
if (show & mon_show_stack) {
print_cluster_options(stream, data_set);
}
if (header_printed) {
print_cluster_summary_footer(stream);
}
}
/*!
* \internal
* \brief Print a failed action
*
* \param[in] stream File stream to display output to
* \param[in] xml_op Root of XML tree describing failed action
*/
static void
print_failed_action(FILE *stream, xmlNode *xml_op)
{
const char *op_key = crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY);
const char *op_key_attr = "op_key";
const char *last = crm_element_value(xml_op, XML_RSC_OP_LAST_CHANGE);
const char *node = crm_element_value(xml_op, XML_ATTR_UNAME);
const char *call = crm_element_value(xml_op, XML_LRM_ATTR_CALLID);
const char *exit_reason = crm_element_value(xml_op, XML_LRM_ATTR_EXIT_REASON);
int rc = crm_parse_int(crm_element_value(xml_op, XML_LRM_ATTR_RC), "0");
int status = crm_parse_int(crm_element_value(xml_op, XML_LRM_ATTR_OPSTATUS), "0");
char *exit_reason_cleaned;
/* If no op_key was given, use id instead */
if (op_key == NULL) {
op_key = ID(xml_op);
op_key_attr = "id";
}
/* If no exit reason was given, use "none" */
if (exit_reason == NULL) {
exit_reason = "none";
}
/* Print common action information */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("* %s on %s '%s' (%d): call=%s, status=%s, exitreason='%s'",
op_key, node, services_ocf_exitcode_str(rc), rc,
call, services_lrm_status_str(status), exit_reason);
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, " %s on %s '%s' (%d): call=%s, status=%s, exitreason='%s'",
op_key, node, services_ocf_exitcode_str(rc), rc,
call, services_lrm_status_str(status), exit_reason);
break;
case mon_output_xml:
exit_reason_cleaned = crm_xml_escape(exit_reason);
fprintf(stream, " \n");
break;
case mon_output_xml:
fprintf(stream, " />\n");
break;
default:
break;
}
}
/*!
* \internal
* \brief Print a section for failed actions
*
* \param[in] stream File stream to display output to
* \param[in] data_set Working set of CIB state
*/
static void
print_failed_actions(FILE *stream, pe_working_set_t *data_set)
{
xmlNode *xml_op = NULL;
/* Print section heading */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("\nFailed Resource Actions:\n");
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream,
"
\n Failed Resource Actions
\n \n");
break;
case mon_output_xml:
fprintf(stream, " \n");
break;
default:
break;
}
/* Print each failed action */
for (xml_op = __xml_first_child(data_set->failed); xml_op != NULL;
xml_op = __xml_next(xml_op)) {
print_failed_action(stream, xml_op);
}
/* End section */
switch (output_format) {
case mon_output_html:
case mon_output_cgi:
fprintf(stream, "
\n");
break;
case mon_output_xml:
fprintf(stream, " \n");
break;
default:
break;
}
}
/*!
* \internal
* \brief Reduce the stonith-history
* for successful actions we keep the last of every action-type & target
* for failed actions we record as well who had failed
* for actions in progress we keep full track
*
* \param[in] history List of stonith actions
*
*/
static stonith_history_t *
reduce_stonith_history(stonith_history_t *history)
{
stonith_history_t *new = NULL, *hp, *np, *tmp;
for (hp = history; hp; ) {
for (np = new; np; np = np->next) {
if ((hp->state == st_done) || (hp->state == st_failed)) {
/* action not in progress */
if (safe_str_eq(hp->target, np->target) &&
safe_str_eq(hp->action, np->action) &&
(hp->state == np->state)) {
if ((hp->state == st_done) ||
safe_str_eq(hp->delegate, np->delegate)) {
/* replace or purge */
if (hp->completed < np->completed) {
/* purge older hp */
tmp = hp->next;
hp->next = NULL;
stonith_history_free(hp);
hp = tmp;
break;
}
/* damn single linked list */
free(hp->target);
free(hp->action);
free(np->origin);
np->origin = hp->origin;
free(np->delegate);
np->delegate = hp->delegate;
free(np->client);
np->client = hp->client;
np->completed = hp->completed;
tmp = hp;
hp = hp->next;
free(tmp);
break;
}
}
if (np->next) {
continue;
}
}
np = 0; /* let outer loop progress hp */
break;
}
/* simply move hp from history to new */
if (np == NULL) {
tmp = hp->next;
hp->next = new;
new = hp;
hp = tmp;
}
}
return new;
}
/*!
* \internal
* \brief Sort the stonith-history
* sort by competed most current on the top
* pending actions lacking a completed-stamp are gathered at the top
*
* \param[in] history List of stonith actions
*
*/
static stonith_history_t *
sort_stonith_history(stonith_history_t *history)
{
stonith_history_t *new = NULL, *pending = NULL, *hp, *np, *tmp;
for (hp = history; hp; ) {
tmp = hp->next;
if ((hp->state == st_done) || (hp->state == st_failed)) {
/* sort into new */
if ((!new) || (hp->completed > new->completed)) {
hp->next = new;
new = hp;
} else {
np = new;
do {
if ((!np->next) || (hp->completed > np->next->completed)) {
hp->next = np->next;
np->next = hp;
break;
}
np = np->next;
} while (1);
}
} else {
/* put into pending */
hp->next = pending;
pending = hp;
}
hp = tmp;
}
/* pending actions don't have a completed-stamp so make them go front */
if (pending) {
stonith_history_t *last_pending = pending;
while (last_pending->next) {
last_pending = last_pending->next;
}
last_pending->next = new;
new = pending;
}
return new;
}
/*!
* \internal
* \brief Print a stonith action
*
* \param[in] stream File stream to display output to
* \param[in] event stonith event
*/
static void
print_stonith_action(FILE *stream, stonith_history_t *event)
{
const char *action_s = stonith_action_str(event->action);
char *run_at_s = ctime(&event->completed);
if ((run_at_s) && (run_at_s[0] != 0)) {
run_at_s[strlen(run_at_s)-1] = 0; /* Overwrite the newline */
}
switch(output_format) {
case mon_output_xml:
fprintf(stream, " target, event->action);
switch(event->state) {
case st_done:
fprintf(stream, " state=\"success\"");
break;
case st_failed:
fprintf(stream, " state=\"failed\"");
break;
default:
fprintf(stream, " state=\"pending\"");
}
fprintf(stream, " origin=\"%s\" client=\"%s\"",
event->origin, event->client);
if (event->delegate) {
fprintf(stream, " delegate=\"%s\"", event->delegate);
}
switch(event->state) {
case st_done:
case st_failed:
fprintf(stream, " completed=\"%s\"", run_at_s?run_at_s:"");
break;
default:
break;
}
fprintf(stream, " />\n");
break;
case mon_output_plain:
case mon_output_console:
switch(event->state) {
case st_done:
print_as("* %s of %s successful: delegate=%s, client=%s, origin=%s,\n"
" %s='%s'\n",
action_s, event->target,
event->delegate ? event->delegate : "",
event->client, event->origin,
fence_full_history?"completed":"last-successful",
run_at_s?run_at_s:"");
break;
case st_failed:
print_as("* %s of %s failed: delegate=%s, client=%s, origin=%s,\n"
" %s='%s'\n",
action_s, event->target,
event->delegate ? event->delegate : "",
event->client, event->origin,
fence_full_history?"completed":"last-failed",
run_at_s?run_at_s:"");
break;
default:
print_as("* %s of %s pending: client=%s, origin=%s\n",
action_s, event->target,
event->client, event->origin);
}
break;
case mon_output_html:
case mon_output_cgi:
switch(event->state) {
case st_done:
fprintf(stream, " %s of %s successful: delegate=%s, "
"client=%s, origin=%s, %s='%s'\n",
action_s, event->target,
event->delegate ? event->delegate : "",
event->client, event->origin,
fence_full_history?"completed":"last-successful",
run_at_s?run_at_s:"");
break;
case st_failed:
fprintf(stream, " %s of %s failed: delegate=%s, "
"client=%s, origin=%s, %s='%s'\n",
action_s, event->target,
event->delegate ? event->delegate : "",
event->client, event->origin,
fence_full_history?"completed":"last-failed",
run_at_s?run_at_s:"");
break;
default:
fprintf(stream, " %s of %s pending: client=%s, "
"origin=%s\n",
action_s, event->target,
event->client, event->origin);
}
break;
default:
/* no support for fence history for other formats so far */
break;
}
}
/*!
* \internal
* \brief Print a section for failed stonith actions
*
* \param[in] stream File stream to display output to
* \param[in] history List of stonith actions
*
*/
static void
print_failed_stonith_actions(FILE *stream, stonith_history_t *history)
{
stonith_history_t *hp;
for (hp = history; hp; hp = hp->next) {
if (hp->state == st_failed) {
break;
}
}
if (!hp) {
return;
}
/* Print section heading */
switch (output_format) {
/* no need to take care of xml in here as xml gets full
* history anyway
*/
case mon_output_plain:
case mon_output_console:
print_as("\nFailed Fencing Actions:\n");
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, "
\n Failed Fencing Actions
\n \n");
break;
default:
break;
}
/* Print each failed stonith action */
for (hp = history; hp; hp = hp->next) {
if (hp->state == st_failed) {
print_stonith_action(stream, hp);
}
}
/* End section */
switch (output_format) {
case mon_output_html:
case mon_output_cgi:
fprintf(stream, "
\n");
break;
default:
break;
}
}
/*!
* \internal
* \brief Print pending stonith actions
*
* \param[in] stream File stream to display output to
* \param[in] history List of stonith actions
*
*/
static void
print_stonith_pending(FILE *stream, stonith_history_t *history)
{
/* xml-output always shows the full history
* so we'll never have to show pending-actions
* separately
*/
if (history && (history->state != st_failed) &&
(history->state != st_done)) {
stonith_history_t *hp;
/* Print section heading */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("\nPending Fencing Actions:\n");
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, "
\n Pending Fencing Actions
\n \n");
break;
default:
break;
}
for (hp = history; hp; hp = hp->next) {
if ((hp->state == st_failed) || (hp->state == st_done)) {
break;
}
print_stonith_action(stream, hp);
}
/* End section */
switch (output_format) {
case mon_output_html:
case mon_output_cgi:
fprintf(stream, "
\n");
break;
default:
break;
}
}
}
/*!
* \internal
* \brief Print a section for stonith-history
*
* \param[in] stream File stream to display output to
* \param[in] history List of stonith actions
*
*/
static void
print_stonith_history(FILE *stream, stonith_history_t *history)
{
stonith_history_t *hp;
/* Print section heading */
switch (output_format) {
case mon_output_plain:
case mon_output_console:
print_as("\nFencing History:\n");
break;
case mon_output_html:
case mon_output_cgi:
fprintf(stream, "
\n Fencing History
\n \n");
break;
case mon_output_xml:
fprintf(stream, " \n");
break;
default:
break;
}
for (hp = history; hp; hp = hp->next) {
if ((hp->state != st_failed) || (output_format == mon_output_xml)) {
print_stonith_action(stream, hp);
}
}
/* End section */
switch (output_format) {
case mon_output_html:
case mon_output_cgi:
fprintf(stream, "
\n");
break;
case mon_output_xml:
fprintf(stream, " \n");
break;
default:
break;
}
}
/*!
* \internal
* \brief Print cluster status to screen
*
* This uses the global display preferences set by command-line options
* to display cluster status in a human-friendly way.
*
* \param[in] data_set Working set of CIB state
* \param[in] stonith_history List of stonith actions
*/
static void
print_status(pe_working_set_t * data_set,
stonith_history_t *stonith_history)
{
GListPtr gIter = NULL;
int print_opts = get_resource_display_options();
/* space-separated lists of node names */
char *online_nodes = NULL;
char *online_remote_nodes = NULL;
char *online_guest_nodes = NULL;
char *offline_nodes = NULL;
char *offline_remote_nodes = NULL;
if (output_format == mon_output_console) {
blank_screen();
}
print_cluster_summary(stdout, data_set);
print_as("\n");
/* Gather node information (and print if in bad state or grouping by node) */
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
const char *node_mode = NULL;
char *node_name = get_node_display_name(node);
/* Get node mode */
if (node->details->unclean) {
if (node->details->online) {
node_mode = "UNCLEAN (online)";
} else if (node->details->pending) {
node_mode = "UNCLEAN (pending)";
} else {
node_mode = "UNCLEAN (offline)";
}
} else if (node->details->pending) {
node_mode = "pending";
} else if (node->details->standby_onfail && node->details->online) {
node_mode = "standby (on-fail)";
} else if (node->details->standby) {
if (node->details->online) {
if (node->details->running_rsc) {
node_mode = "standby (with active resources)";
} else {
node_mode = "standby";
}
} else {
node_mode = "OFFLINE (standby)";
}
} else if (node->details->maintenance) {
if (node->details->online) {
node_mode = "maintenance";
} else {
node_mode = "OFFLINE (maintenance)";
}
} else if (node->details->online) {
node_mode = "online";
if (group_by_node == FALSE) {
if (pe__is_guest_node(node)) {
online_guest_nodes = add_list_element(online_guest_nodes, node_name);
} else if (pe__is_remote_node(node)) {
online_remote_nodes = add_list_element(online_remote_nodes, node_name);
} else {
online_nodes = add_list_element(online_nodes, node_name);
}
free(node_name);
continue;
}
} else {
node_mode = "OFFLINE";
if (group_by_node == FALSE) {
if (pe__is_remote_node(node)) {
offline_remote_nodes = add_list_element(offline_remote_nodes, node_name);
} else if (pe__is_guest_node(node)) {
/* ignore offline guest nodes */
} else {
offline_nodes = add_list_element(offline_nodes, node_name);
}
free(node_name);
continue;
}
}
/* If we get here, node is in bad state, or we're grouping by node */
/* Print the node name and status */
if (pe__is_guest_node(node)) {
print_as("Guest");
} else if (pe__is_remote_node(node)) {
print_as("Remote");
}
print_as("Node %s: %s\n", node_name, node_mode);
/* If we're grouping by node, print its resources */
if (group_by_node) {
if (print_brief) {
print_rscs_brief(node->details->running_rsc, "\t", print_opts | pe_print_rsconly,
stdout, FALSE);
} else {
GListPtr gIter2 = NULL;
for (gIter2 = node->details->running_rsc; gIter2 != NULL; gIter2 = gIter2->next) {
resource_t *rsc = (resource_t *) gIter2->data;
rsc->fns->print(rsc, "\t", print_opts | pe_print_rsconly, stdout);
}
}
}
free(node_name);
}
/* If we're not grouping by node, summarize nodes by status */
if (online_nodes) {
print_as("Online: [%s ]\n", online_nodes);
free(online_nodes);
}
if (offline_nodes) {
print_as("OFFLINE: [%s ]\n", offline_nodes);
free(offline_nodes);
}
if (online_remote_nodes) {
print_as("RemoteOnline: [%s ]\n", online_remote_nodes);
free(online_remote_nodes);
}
if (offline_remote_nodes) {
print_as("RemoteOFFLINE: [%s ]\n", offline_remote_nodes);
free(offline_remote_nodes);
}
if (online_guest_nodes) {
print_as("GuestOnline: [%s ]\n", online_guest_nodes);
free(online_guest_nodes);
}
/* Print resources section, if needed */
print_resources(stdout, data_set, print_opts);
/* print Node Attributes section if requested */
if (show & mon_show_attributes) {
print_node_attributes(stdout, data_set);
}
/* If requested, print resource operations (which includes failcounts)
* or just failcounts
*/
if (show & (mon_show_operations | mon_show_failcounts)) {
print_node_summary(stdout, data_set,
((show & mon_show_operations)? TRUE : FALSE));
}
/* If there were any failed actions, print them */
if (xml_has_children(data_set->failed)) {
print_failed_actions(stdout, data_set);
}
/* Print failed stonith actions */
if (fence_history) {
print_failed_stonith_actions(stdout, stonith_history);
}
/* Print tickets if requested */
if (show & mon_show_tickets) {
print_cluster_tickets(stdout, data_set);
}
/* Print negative location constraints if requested */
if (show & mon_show_bans) {
print_neg_locations(stdout, data_set);
}
/* Print stonith history */
if (fence_history) {
if (show & mon_show_fence_history) {
print_stonith_history(stdout, stonith_history);
} else {
print_stonith_pending(stdout, stonith_history);
}
}
#if CURSES_ENABLED
if (output_format == mon_output_console) {
refresh();
}
#endif
}
/*!
* \internal
* \brief Print cluster status in XML format
*
* \param[in] data_set Working set of CIB state
*/
static void
print_xml_status(pe_working_set_t * data_set,
stonith_history_t *stonith_history)
{
FILE *stream = stdout;
GListPtr gIter = NULL;
int print_opts = get_resource_display_options();
fprintf(stream, "\n");
fprintf(stream, "\n", VERSION);
print_cluster_summary(stream, data_set);
/*** NODES ***/
fprintf(stream, " \n");
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
const char *node_type = "unknown";
switch (node->details->type) {
case node_member:
node_type = "member";
break;
case node_remote:
node_type = "remote";
break;
case node_ping:
node_type = "ping";
break;
}
fprintf(stream, " details->uname);
fprintf(stream, "id=\"%s\" ", node->details->id);
fprintf(stream, "online=\"%s\" ", node->details->online ? "true" : "false");
fprintf(stream, "standby=\"%s\" ", node->details->standby ? "true" : "false");
fprintf(stream, "standby_onfail=\"%s\" ", node->details->standby_onfail ? "true" : "false");
fprintf(stream, "maintenance=\"%s\" ", node->details->maintenance ? "true" : "false");
fprintf(stream, "pending=\"%s\" ", node->details->pending ? "true" : "false");
fprintf(stream, "unclean=\"%s\" ", node->details->unclean ? "true" : "false");
fprintf(stream, "shutdown=\"%s\" ", node->details->shutdown ? "true" : "false");
fprintf(stream, "expected_up=\"%s\" ", node->details->expected_up ? "true" : "false");
fprintf(stream, "is_dc=\"%s\" ", node->details->is_dc ? "true" : "false");
fprintf(stream, "resources_running=\"%d\" ", g_list_length(node->details->running_rsc));
fprintf(stream, "type=\"%s\" ", node_type);
if (pe__is_guest_node(node)) {
fprintf(stream, "id_as_resource=\"%s\" ", node->details->remote_rsc->container->id);
}
if (group_by_node) {
GListPtr lpc2 = NULL;
fprintf(stream, ">\n");
for (lpc2 = node->details->running_rsc; lpc2 != NULL; lpc2 = lpc2->next) {
resource_t *rsc = (resource_t *) lpc2->data;
rsc->fns->print(rsc, " ", print_opts | pe_print_rsconly, stream);
}
fprintf(stream, " \n");
} else {
fprintf(stream, "/>\n");
}
}
fprintf(stream, " \n");
/* Print resources section, if needed */
print_resources(stream, data_set, print_opts);
/* print Node Attributes section if requested */
if (show & mon_show_attributes) {
print_node_attributes(stream, data_set);
}
/* If requested, print resource operations (which includes failcounts)
* or just failcounts
*/
if (show & (mon_show_operations | mon_show_failcounts)) {
print_node_summary(stream, data_set,
((show & mon_show_operations)? TRUE : FALSE));
}
/* If there were any failed actions, print them */
if (xml_has_children(data_set->failed)) {
print_failed_actions(stream, data_set);
}
/* Print stonith history */
if (fence_history) {
print_stonith_history(stdout, stonith_history);
}
/* Print tickets if requested */
if (show & mon_show_tickets) {
print_cluster_tickets(stream, data_set);
}
/* Print negative location constraints if requested */
if (show & mon_show_bans) {
print_neg_locations(stream, data_set);
}
fprintf(stream, "\n");
fflush(stream);
fclose(stream);
}
/*!
* \internal
* \brief Print cluster status in HTML format (with HTTP headers if CGI)
*
* \param[in] data_set Working set of CIB state
* \param[in] filename Name of file to write HTML to (ignored if CGI)
*
* \return 0 on success, -1 on error
*/
static int
print_html_status(pe_working_set_t * data_set,
const char *filename,
stonith_history_t *stonith_history)
{
FILE *stream;
GListPtr gIter = NULL;
char *filename_tmp = NULL;
int print_opts = get_resource_display_options();
if (output_format == mon_output_cgi) {
stream = stdout;
fprintf(stream, "Content-Type: text/html\n\n");
} else {
filename_tmp = crm_concat(filename, "tmp", '.');
stream = fopen(filename_tmp, "w");
if (stream == NULL) {
crm_perror(LOG_ERR, "Cannot open %s for writing", filename_tmp);
free(filename_tmp);
return -1;
}
}
fprintf(stream, "\n");
fprintf(stream, " \n");
fprintf(stream, " Cluster status\n");
fprintf(stream, " \n", reconnect_msec / 1000);
fprintf(stream, " \n");
fprintf(stream, "\n");
print_cluster_summary(stream, data_set);
/*** NODE LIST ***/
fprintf(stream, "
\n Node List
\n");
fprintf(stream, "\n");
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
char *node_name = get_node_display_name(node);
fprintf(stream, "- Node: %s: ", node_name);
if (node->details->standby_onfail && node->details->online) {
fprintf(stream, "standby (on-fail)\n");
} else if (node->details->standby && node->details->online) {
fprintf(stream, "standby%s\n",
node->details->running_rsc?" (with active resources)":"");
} else if (node->details->standby) {
fprintf(stream, "OFFLINE (standby)\n");
} else if (node->details->maintenance && node->details->online) {
fprintf(stream, "maintenance\n");
} else if (node->details->maintenance) {
fprintf(stream, "OFFLINE (maintenance)\n");
} else if (node->details->online) {
fprintf(stream, "online\n");
} else {
fprintf(stream, "OFFLINE\n");
}
if (print_brief && group_by_node) {
fprintf(stream, "
\n");
print_rscs_brief(node->details->running_rsc, NULL, print_opts | pe_print_rsconly,
stream, FALSE);
fprintf(stream, "
\n");
} else if (group_by_node) {
GListPtr lpc2 = NULL;
fprintf(stream, "\n");
for (lpc2 = node->details->running_rsc; lpc2 != NULL; lpc2 = lpc2->next) {
resource_t *rsc = (resource_t *) lpc2->data;
fprintf(stream, "- ");
rsc->fns->print(rsc, NULL, print_opts | pe_print_rsconly, stream);
fprintf(stream, "
\n");
}
fprintf(stream, "
\n");
}
fprintf(stream, " \n");
free(node_name);
}
fprintf(stream, "
\n");
/* Print resources section, if needed */
print_resources(stream, data_set, print_opts);
/* print Node Attributes section if requested */
if (show & mon_show_attributes) {
print_node_attributes(stream, data_set);
}
/* If requested, print resource operations (which includes failcounts)
* or just failcounts
*/
if (show & (mon_show_operations | mon_show_failcounts)) {
print_node_summary(stream, data_set,
((show & mon_show_operations)? TRUE : FALSE));
}
/* If there were any failed actions, print them */
if (xml_has_children(data_set->failed)) {
print_failed_actions(stream, data_set);
}
/* Print failed stonith actions */
if (fence_history) {
print_failed_stonith_actions(stream, stonith_history);
}
/* Print stonith history */
if (fence_history) {
if (show & mon_show_fence_history) {
print_stonith_history(stream, stonith_history);
} else {
print_stonith_pending(stdout, stonith_history);
}
}
/* Print tickets if requested */
if (show & mon_show_tickets) {
print_cluster_tickets(stream, data_set);
}
/* Print negative location constraints if requested */
if (show & mon_show_bans) {
print_neg_locations(stream, data_set);
}
fprintf(stream, "\n");
fprintf(stream, "\n");
fflush(stream);
fclose(stream);
if (output_format != mon_output_cgi) {
if (rename(filename_tmp, filename) != 0) {
crm_perror(LOG_ERR, "Unable to rename %s->%s", filename_tmp, filename);
}
free(filename_tmp);
}
return 0;
}
static int
send_custom_trap(const char *node, const char *rsc, const char *task, int target_rc, int rc,
int status, const char *desc)
{
pid_t pid;
/*setenv needs chars, these are ints */
char *rc_s = crm_itoa(rc);
char *status_s = crm_itoa(status);
char *target_rc_s = crm_itoa(target_rc);
crm_debug("Sending external notification to '%s' via '%s'", external_recipient, external_agent);
if(rsc) {
setenv("CRM_notify_rsc", rsc, 1);
}
if (external_recipient) {
setenv("CRM_notify_recipient", external_recipient, 1);
}
setenv("CRM_notify_node", node, 1);
setenv("CRM_notify_task", task, 1);
setenv("CRM_notify_desc", desc, 1);
setenv("CRM_notify_rc", rc_s, 1);
setenv("CRM_notify_target_rc", target_rc_s, 1);
setenv("CRM_notify_status", status_s, 1);
pid = fork();
if (pid == -1) {
crm_perror(LOG_ERR, "notification fork() failed.");
}
if (pid == 0) {
/* crm_debug("notification: I am the child. Executing the nofitication program."); */
execl(external_agent, external_agent, NULL);
exit(CRM_EX_ERROR);
}
crm_trace("Finished running custom notification program '%s'.", external_agent);
free(target_rc_s);
free(status_s);
free(rc_s);
return 0;
}
static void
handle_rsc_op(xmlNode * xml, const char *node_id)
{
int rc = -1;
int status = -1;
int target_rc = -1;
gboolean notify = TRUE;
char *rsc = NULL;
char *task = NULL;
const char *desc = NULL;
const char *magic = NULL;
const char *id = NULL;
const char *node = NULL;
xmlNode *n = xml;
xmlNode * rsc_op = xml;
if(strcmp((const char*)xml->name, XML_LRM_TAG_RSC_OP) != 0) {
xmlNode *cIter;
for(cIter = xml->children; cIter; cIter = cIter->next) {
handle_rsc_op(cIter, node_id);
}
return;
}
id = crm_element_value(rsc_op, XML_LRM_ATTR_TASK_KEY);
if (id == NULL) {
/* Compatibility with <= 1.1.5 */
id = ID(rsc_op);
}
magic = crm_element_value(rsc_op, XML_ATTR_TRANSITION_MAGIC);
if (magic == NULL) {
/* non-change */
return;
}
if (!decode_transition_magic(magic, NULL, NULL, NULL, &status, &rc,
&target_rc)) {
crm_err("Invalid event %s detected for %s", magic, id);
return;
}
if (parse_op_key(id, &rsc, &task, NULL) == FALSE) {
crm_err("Invalid event detected for %s", id);
goto bail;
}
node = crm_element_value(rsc_op, XML_LRM_ATTR_TARGET);
while (n != NULL && safe_str_neq(XML_CIB_TAG_STATE, TYPE(n))) {
n = n->parent;
}
if(node == NULL && n) {
node = crm_element_value(n, XML_ATTR_UNAME);
}
if (node == NULL && n) {
node = ID(n);
}
if (node == NULL) {
node = node_id;
}
if (node == NULL) {
crm_err("No node detected for event %s (%s)", magic, id);
goto bail;
}
/* look up where we expected it to be? */
desc = pcmk_strerror(pcmk_ok);
if (status == PCMK_LRM_OP_DONE && target_rc == rc) {
crm_notice("%s of %s on %s completed: %s", task, rsc, node, desc);
if (rc == PCMK_OCF_NOT_RUNNING) {
notify = FALSE;
}
} else if (status == PCMK_LRM_OP_DONE) {
desc = services_ocf_exitcode_str(rc);
crm_warn("%s of %s on %s failed: %s", task, rsc, node, desc);
} else {
desc = services_lrm_status_str(status);
crm_warn("%s of %s on %s failed: %s", task, rsc, node, desc);
}
if (notify && external_agent) {
send_custom_trap(node, rsc, task, target_rc, rc, status, desc);
}
bail:
free(rsc);
free(task);
}
static gboolean
mon_trigger_refresh(gpointer user_data)
{
mainloop_set_trigger(refresh_trigger);
return FALSE;
}
#define NODE_PATT "/lrm[@id="
static char *
get_node_from_xpath(const char *xpath)
{
char *nodeid = NULL;
char *tmp = strstr(xpath, NODE_PATT);
if(tmp) {
tmp += strlen(NODE_PATT);
tmp += 1;
nodeid = strdup(tmp);
tmp = strstr(nodeid, "\'");
CRM_ASSERT(tmp);
tmp[0] = 0;
}
return nodeid;
}
static void
crm_diff_update_v2(const char *event, xmlNode * msg)
{
xmlNode *change = NULL;
xmlNode *diff = get_message_xml(msg, F_CIB_UPDATE_RESULT);
for (change = __xml_first_child(diff); change != NULL; change = __xml_next(change)) {
const char *name = NULL;
const char *op = crm_element_value(change, XML_DIFF_OP);
const char *xpath = crm_element_value(change, XML_DIFF_PATH);
xmlNode *match = NULL;
const char *node = NULL;
if(op == NULL) {
continue;
} else if(strcmp(op, "create") == 0) {
match = change->children;
} else if(strcmp(op, "move") == 0) {
continue;
} else if(strcmp(op, "delete") == 0) {
continue;
} else if(strcmp(op, "modify") == 0) {
match = first_named_child(change, XML_DIFF_RESULT);
if(match) {
match = match->children;
}
}
if(match) {
name = (const char *)match->name;
}
crm_trace("Handling %s operation for %s %p, %s", op, xpath, match, name);
if(xpath == NULL) {
/* Version field, ignore */
} else if(name == NULL) {
crm_debug("No result for %s operation to %s", op, xpath);
CRM_ASSERT(strcmp(op, "delete") == 0 || strcmp(op, "move") == 0);
} else if(strcmp(name, XML_TAG_CIB) == 0) {
xmlNode *state = NULL;
xmlNode *status = first_named_child(match, XML_CIB_TAG_STATUS);
for (state = __xml_first_child(status); state != NULL; state = __xml_next(state)) {
node = crm_element_value(state, XML_ATTR_UNAME);
if (node == NULL) {
node = ID(state);
}
handle_rsc_op(state, node);
}
} else if(strcmp(name, XML_CIB_TAG_STATUS) == 0) {
xmlNode *state = NULL;
for (state = __xml_first_child(match); state != NULL; state = __xml_next(state)) {
node = crm_element_value(state, XML_ATTR_UNAME);
if (node == NULL) {
node = ID(state);
}
handle_rsc_op(state, node);
}
} else if(strcmp(name, XML_CIB_TAG_STATE) == 0) {
node = crm_element_value(match, XML_ATTR_UNAME);
if (node == NULL) {
node = ID(match);
}
handle_rsc_op(match, node);
} else if(strcmp(name, XML_CIB_TAG_LRM) == 0) {
node = ID(match);
handle_rsc_op(match, node);
} else if(strcmp(name, XML_LRM_TAG_RESOURCES) == 0) {
char *local_node = get_node_from_xpath(xpath);
handle_rsc_op(match, local_node);
free(local_node);
} else if(strcmp(name, XML_LRM_TAG_RESOURCE) == 0) {
char *local_node = get_node_from_xpath(xpath);
handle_rsc_op(match, local_node);
free(local_node);
} else if(strcmp(name, XML_LRM_TAG_RSC_OP) == 0) {
char *local_node = get_node_from_xpath(xpath);
handle_rsc_op(match, local_node);
free(local_node);
} else {
crm_trace("Ignoring %s operation for %s %p, %s", op, xpath, match, name);
}
}
}
static void
crm_diff_update_v1(const char *event, xmlNode * msg)
{
/* Process operation updates */
xmlXPathObject *xpathObj = xpath_search(msg,
"//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED
"//" XML_LRM_TAG_RSC_OP);
int lpc = 0, max = numXpathResults(xpathObj);
for (lpc = 0; lpc < max; lpc++) {
xmlNode *rsc_op = getXpathResult(xpathObj, lpc);
handle_rsc_op(rsc_op, NULL);
}
freeXpathObject(xpathObj);
}
static void
crm_diff_update(const char *event, xmlNode * msg)
{
int rc = -1;
static bool stale = FALSE;
gboolean cib_updated = FALSE;
xmlNode *diff = get_message_xml(msg, F_CIB_UPDATE_RESULT);
print_dot();
if (current_cib != NULL) {
rc = xml_apply_patchset(current_cib, diff, TRUE);
switch (rc) {
case -pcmk_err_diff_resync:
case -pcmk_err_diff_failed:
crm_notice("[%s] Patch aborted: %s (%d)", event, pcmk_strerror(rc), rc);
free_xml(current_cib); current_cib = NULL;
break;
case pcmk_ok:
cib_updated = TRUE;
break;
default:
crm_notice("[%s] ABORTED: %s (%d)", event, pcmk_strerror(rc), rc);
free_xml(current_cib); current_cib = NULL;
}
}
if (current_cib == NULL) {
crm_trace("Re-requesting the full cib");
cib->cmds->query(cib, NULL, ¤t_cib, cib_scope_local | cib_sync_call);
}
if (external_agent) {
int format = 0;
crm_element_value_int(diff, "format", &format);
switch(format) {
case 1:
crm_diff_update_v1(event, msg);
break;
case 2:
crm_diff_update_v2(event, msg);
break;
default:
crm_err("Unknown patch format: %d", format);
}
}
if (current_cib == NULL) {
if(!stale) {
print_as("--- Stale data ---");
}
stale = TRUE;
return;
}
stale = FALSE;
kick_refresh(cib_updated);
}
static gboolean
mon_refresh_display(gpointer user_data)
{
xmlNode *cib_copy = copy_xml(current_cib);
stonith_history_t *stonith_history = NULL;
last_refresh = time(NULL);
if (cli_config_update(&cib_copy, NULL, FALSE) == FALSE) {
if (cib) {
cib->cmds->signoff(cib);
}
print_as("Upgrade failed: %s", pcmk_strerror(-pcmk_err_schema_validation));
if (output_format == mon_output_console) {
sleep(2);
}
clean_up(CRM_EX_CONFIG);
return FALSE;
}
/* get the stonith-history if there is evidence we need it
*/
while (fence_history) {
if (st != NULL) {
if (st->cmds->history(st, st_opt_sync_call, NULL, &stonith_history, 120)) {
fprintf(stderr, "Critical: Unable to get stonith-history\n");
mon_cib_connection_destroy(NULL);
} else {
if ((!fence_full_history) && (output_format != mon_output_xml)) {
stonith_history = reduce_stonith_history(stonith_history);
}
stonith_history = sort_stonith_history(stonith_history);
break; /* all other cases are errors */
}
} else {
fprintf(stderr, "Critical: No stonith-API\n");
}
free_xml(cib_copy);
print_as("Reading stonith-history failed");
if (output_format == mon_output_console) {
sleep(2);
}
return FALSE;
}
if (mon_data_set == NULL) {
mon_data_set = pe_new_working_set();
CRM_ASSERT(mon_data_set != NULL);
}
mon_data_set->input = cib_copy;
cluster_status(mon_data_set);
/* Unpack constraints if any section will need them
* (tickets may be referenced in constraints but not granted yet,
* and bans need negative location constraints) */
if (show & (mon_show_bans | mon_show_tickets)) {
xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS,
mon_data_set->input);
unpack_constraints(cib_constraints, mon_data_set);
}
switch (output_format) {
case mon_output_html:
case mon_output_cgi:
if (print_html_status(mon_data_set, output_filename, stonith_history) != 0) {
fprintf(stderr, "Critical: Unable to output html file\n");
clean_up(CRM_EX_CANTCREAT);
return FALSE;
}
break;
case mon_output_xml:
print_xml_status(mon_data_set, stonith_history);
break;
case mon_output_monitor:
print_simple_status(mon_data_set, stonith_history);
if (has_warnings) {
clean_up(MON_STATUS_WARN);
return FALSE;
}
break;
case mon_output_plain:
case mon_output_console:
print_status(mon_data_set, stonith_history);
break;
case mon_output_none:
break;
}
stonith_history_free(stonith_history);
stonith_history = NULL;
pe_reset_working_set(mon_data_set);
return TRUE;
}
static void
mon_st_callback_event(stonith_t * st, stonith_event_t * e)
{
if (st->state == stonith_disconnected) {
/* disconnect cib as well and have everything reconnect */
mon_cib_connection_destroy(NULL);
} else if (external_agent) {
char *desc = crm_strdup_printf("Operation %s requested by %s for peer %s: %s (ref=%s)",
e->operation, e->origin, e->target, pcmk_strerror(e->result),
e->id);
send_custom_trap(e->target, NULL, e->operation, pcmk_ok, e->result, 0, desc);
free(desc);
}
}
static void
kick_refresh(gboolean data_updated)
{
static int updates = 0;
long now = time(NULL);
if (data_updated) {
updates++;
}
if(refresh_timer == NULL) {
refresh_timer = mainloop_timer_add("refresh", 2000, FALSE, mon_trigger_refresh, NULL);
}
/* Refresh
* - immediately if the last update was more than 5s ago
* - every 10 cib-updates
* - at most 2s after the last update
*/
if ((now - last_refresh) > (reconnect_msec / 1000)) {
mainloop_set_trigger(refresh_trigger);
mainloop_timer_stop(refresh_timer);
updates = 0;
} else if(updates >= 10) {
mainloop_set_trigger(refresh_trigger);
mainloop_timer_stop(refresh_timer);
updates = 0;
} else {
mainloop_timer_start(refresh_timer);
}
}
static void
mon_st_callback_display(stonith_t * st, stonith_event_t * e)
{
if (st->state == stonith_disconnected) {
/* disconnect cib as well and have everything reconnect */
mon_cib_connection_destroy(NULL);
} else {
print_dot();
kick_refresh(TRUE);
}
}
static void
clean_up_connections(void)
{
if (cib != NULL) {
cib->cmds->signoff(cib);
cib_delete(cib);
cib = NULL;
}
if (st != NULL) {
if (st->state != stonith_disconnected) {
st->cmds->remove_notification(st, T_STONITH_NOTIFY_DISCONNECT);
st->cmds->remove_notification(st, T_STONITH_NOTIFY_FENCE);
st->cmds->remove_notification(st, T_STONITH_NOTIFY_HISTORY);
st->cmds->disconnect(st);
}
stonith_api_delete(st);
st = NULL;
}
}
/*
* De-init ncurses, disconnect from the CIB manager, disconnect fencing,
* deallocate memory and show usage-message if requested.
*
* We don't actually return, but nominally returning crm_exit_t allows a usage
* like "return clean_up(exit_code);" which helps static analysis understand the
* code flow.
*/
static crm_exit_t
clean_up(crm_exit_t exit_code)
{
#if CURSES_ENABLED
if (curses_console_initialized) {
output_format = mon_output_plain;
echo();
nocbreak();
endwin();
curses_console_initialized = FALSE;
}
#endif
clean_up_connections();
free(output_filename);
free(pid_file);
pe_free_working_set(mon_data_set);
mon_data_set = NULL;
if (exit_code == CRM_EX_USAGE) {
if (output_format == mon_output_cgi) {
fprintf(stdout, "Content-Type: text/plain\n"
"Status: 500\n\n");
} else {
crm_help('?', CRM_EX_USAGE);
}
}
crm_exit(exit_code);
}
diff --git a/tools/stonith_admin.c b/tools/stonith_admin.c
index 3e29073bec..b2c7ebbca9 100644
--- a/tools/stonith_admin.c
+++ b/tools/stonith_admin.c
@@ -1,840 +1,841 @@
/*
* Copyright 2009-2019 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
char action = 0;
struct {
gboolean as_nodeid;
gboolean broadcast;
gboolean cleanup;
gboolean installed;
gboolean metadata;
gboolean registered;
gboolean validate_cfg;
stonith_key_value_t *devices;
stonith_key_value_t *params;
int fence_level;
int timeout ;
int tolerance;
int verbose;
char *agent;
char *confirm_host;
char *fence_host;
char *history;
char *last_fenced;
char *query;
char *reboot_host;
char *register_dev;
char *register_level;
char *targets;
char *terminate;
char *unfence_host;
char *unregister_dev;
char *unregister_level;
} options = {
.timeout = 120
};
gboolean add_env_params(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean add_stonith_device(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean add_stonith_params(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean add_tolerance(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean set_tag(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
#define INDENT " "
/* *INDENT-OFF* */
static GOptionEntry defn_entries[] = {
{ "register", 'R', 0, G_OPTION_ARG_STRING, &options.register_dev,
"Register the named stonith device. Requires: --agent.\n"
INDENT "Optional: --option, --env-option.",
"DEVICE" },
{ "deregister", 'D', 0, G_OPTION_ARG_STRING, &options.unregister_dev,
"De-register the named stonith device.",
"DEVICE" },
{ "register-level", 'r', 0, G_OPTION_ARG_STRING, &options.register_level,
"Register a stonith level for the named target,\n"
INDENT "specified as one of NAME, @PATTERN, or ATTR=VALUE.\n"
INDENT "Requires: --index and one or more --device entries.",
"TARGET" },
{ "deregister-level", 'd', 0, G_OPTION_ARG_STRING, &options.unregister_level,
"Unregister a stonith level for the named target,\n"
INDENT "specified as for --register-level. Requires: --index",
"TARGET" },
{ NULL }
};
static GOptionEntry query_entries[] = {
{ "list", 'l', 0, G_OPTION_ARG_STRING, &options.terminate,
"List devices that can terminate the specified host.\n"
INDENT "Optional: --timeout",
"HOST" },
{ "list-registered", 'L', 0, G_OPTION_ARG_NONE, &options.registered,
"List all registered devices. Optional: --timeout.",
NULL },
{ "list-installed", 'I', 0, G_OPTION_ARG_NONE, &options.installed,
"List all installed devices. Optional: --timeout.",
NULL },
{ "list-targets", 's', 0, G_OPTION_ARG_STRING, &options.targets,
"List the targets that can be fenced by the\n"
INDENT "named device. Optional: --timeout.",
"DEVICE" },
{ "metadata", 'M', 0, G_OPTION_ARG_NONE, &options.metadata,
"Show agent metadata. Requires: --agent.\n"
INDENT "Optional: --timeout.",
NULL },
{ "query", 'Q', 0, G_OPTION_ARG_STRING, &options.query,
"Check the named device's status. Optional: --timeout.",
"DEVICE" },
{ "history", 'H', 0, G_OPTION_ARG_STRING, &options.history,
"Show last successful fencing operation for named node\n"
INDENT "(or '*' for all nodes). Optional: --timeout, --cleanup,\n"
INDENT "--quiet (show only the operation's epoch timestamp),\n"
INDENT "--verbose (show all recorded and pending operations),\n"
INDENT "--broadcast (update history from all nodes available).",
"NODE" },
{ "last", 'h', 0, G_OPTION_ARG_STRING, &options.last_fenced,
"Indicate when the named node was last fenced.\n"
INDENT "Optional: --as-node-id.",
"NODE" },
{ "validate", 'K', 0, G_OPTION_ARG_NONE, &options.validate_cfg,
"Validate a fence device configuration.\n"
INDENT "Requires: --agent. Optional: --option, --env-option,\n"
INDENT "--quiet (print no output, only return status).",
NULL },
{ NULL }
};
static GOptionEntry fence_entries[] = {
{ "fence", 'F', 0, G_OPTION_ARG_STRING, &options.fence_host,
"Fence named host. Optional: --timeout, --tolerance.",
"HOST" },
{ "unfence", 'U', 0, G_OPTION_ARG_STRING, &options.unfence_host,
"Unfence named host. Optional: --timeout, --tolerance.",
"HOST" },
{ "reboot", 'B', 0, G_OPTION_ARG_STRING, &options.reboot_host,
"Reboot named host. Optional: --timeout, --tolerance.",
"HOST" },
{ "confirm", 'C', 0, G_OPTION_ARG_STRING, &options.confirm_host,
- "Tell clusted that named host is now safely down.",
+ "Tell cluster that named host is now safely down.",
"HOST", },
{ NULL }
};
static GOptionEntry addl_entries[] = {
{ "cleanup", 'c', 0, G_OPTION_ARG_NONE, &options.cleanup,
"Cleanup wherever appropriate. Requires --history.",
NULL },
{ "broadcast", 'b', 0, G_OPTION_ARG_NONE, &options.broadcast,
"Broadcast wherever appropriate.",
NULL },
{ "agent", 'a', 0, G_OPTION_ARG_STRING, &options.agent,
"The agent to use (for example, fence_xvm;\n"
INDENT "with --register, --metadata, --validate).",
"AGENT" },
{ "option", 'o', 0, G_OPTION_ARG_CALLBACK, add_stonith_params,
"Specify a device configuration parameter as NAME=VALUE\n"
INDENT "(may be specified multiple times; with --register,\n"
INDENT "--validate).",
"PARAM" },
{ "env-option", 'e', 0, G_OPTION_ARG_CALLBACK, add_env_params,
"Specify a device configuration parameter with the\n"
INDENT "specified name, using the value of the\n"
INDENT "environment variable of the same name prefixed with\n"
INDENT "OCF_RESKEY_ (may be specified multiple times;\n"
INDENT "with --register, --validate).",
"PARAM" },
{ "tag", 'T', 0, G_OPTION_ARG_CALLBACK, set_tag,
"Identify fencing operations in logs with the specified\n"
INDENT "tag; useful when multiple entities might invoke\n"
INDENT "stonith_admin (used with most commands).",
"TAG" },
{ "device", 'v', 0, G_OPTION_ARG_CALLBACK, add_stonith_device,
"Device ID (with --register-level, device to associate with\n"
INDENT "a given host and level; may be specified multiple times)"
#if SUPPORT_CIBSECRETS
"\n" INDENT "(with --validate, name to use to load CIB secrets)"
#endif
".",
"DEVICE" },
{ "index", 'i', 0, G_OPTION_ARG_INT, &options.fence_level,
"The stonith level (1-9) (with --register-level,\n"
INDENT "--deregister-level).",
"LEVEL" },
{ "timeout", 't', 0, G_OPTION_ARG_INT, &options.timeout,
"Operation timeout in seconds (default 120;\n"
INDENT "used with most commands).",
"SECONDS" },
{ "as-node-id", 'n', 0, G_OPTION_ARG_NONE, &options.as_nodeid,
"(Advanced) The supplied node is the corosync node ID\n"
INDENT "(with --last).",
NULL },
{ "tolerance", 0, 0, G_OPTION_ARG_CALLBACK, add_tolerance,
"(Advanced) Do nothing if an equivalent --fence request\n"
INDENT "succeeded less than this many seconds earlier\n"
INDENT "(with --fence, --unfence, --reboot).",
"SECONDS" },
{ NULL }
};
/* *INDENT-ON* */
static pcmk__supported_format_t formats[] = {
PCMK__SUPPORTED_FORMAT_HTML,
PCMK__SUPPORTED_FORMAT_TEXT,
PCMK__SUPPORTED_FORMAT_XML,
{ NULL, NULL, NULL }
};
static int st_opts = st_opt_sync_call | st_opt_allow_suicide;
static GMainLoop *mainloop = NULL;
struct {
stonith_t *st;
const char *target;
const char *action;
char *name;
int timeout;
int tolerance;
int rc;
} async_fence_data;
gboolean
add_env_params(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
char *key = crm_concat("OCF_RESKEY", optarg, '_');
const char *env = getenv(key);
gboolean retval = TRUE;
if (env == NULL) {
crm_err("Invalid option: -e %s", optarg);
g_set_error(error, G_OPTION_ERROR, CRM_EX_INVALID_PARAM, "Invalid option: -e %s", optarg);
retval = FALSE;
} else {
crm_info("Got: '%s'='%s'", optarg, env);
options.params = stonith_key_value_add(options.params, optarg, env);
}
free(key);
return retval;
}
gboolean
add_stonith_device(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
options.devices = stonith_key_value_add(options.devices, NULL, optarg);
return TRUE;
}
gboolean
add_tolerance(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
options.tolerance = crm_get_msec(optarg) / 1000;
return TRUE;
}
gboolean
add_stonith_params(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
char *name = NULL;
char *value = NULL;
int rc = 0;
gboolean retval = TRUE;
crm_info("Scanning: -o %s", optarg);
rc = pcmk_scan_nvpair(optarg, &name, &value);
if (rc != 2) {
crm_err("Invalid option: -o %s: %s", optarg, pcmk_strerror(rc));
g_set_error(error, G_OPTION_ERROR, rc, "Invalid option: -o %s: %s", optarg, pcmk_strerror(rc));
retval = FALSE;
} else {
crm_info("Got: '%s'='%s'", name, value);
options.params = stonith_key_value_add(options.params, name, value);
}
free(name);
free(value);
return retval;
}
gboolean
set_tag(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
free(async_fence_data.name);
async_fence_data.name = crm_strdup_printf("%s.%s", crm_system_name, optarg);
return TRUE;
}
static void
notify_callback(stonith_t * st, stonith_event_t * e)
{
if (e->result != pcmk_ok) {
return;
}
if (safe_str_eq(async_fence_data.target, e->target) &&
safe_str_eq(async_fence_data.action, e->action)) {
async_fence_data.rc = e->result;
g_main_loop_quit(mainloop);
}
}
static void
fence_callback(stonith_t * stonith, stonith_callback_data_t * data)
{
async_fence_data.rc = data->rc;
g_main_loop_quit(mainloop);
}
static gboolean
async_fence_helper(gpointer user_data)
{
stonith_t *st = async_fence_data.st;
int call_id = 0;
int rc = stonith_api_connect_retry(st, async_fence_data.name, 10);
if (rc != pcmk_ok) {
fprintf(stderr, "Could not connect to fencer: %s\n", pcmk_strerror(rc));
g_main_loop_quit(mainloop);
return TRUE;
}
st->cmds->register_notification(st, T_STONITH_NOTIFY_FENCE, notify_callback);
call_id = st->cmds->fence(st,
st_opt_allow_suicide,
async_fence_data.target,
async_fence_data.action,
async_fence_data.timeout, async_fence_data.tolerance);
if (call_id < 0) {
g_main_loop_quit(mainloop);
return TRUE;
}
st->cmds->register_callback(st,
call_id,
async_fence_data.timeout,
st_opt_timeout_updates, NULL, "callback", fence_callback);
return TRUE;
}
static int
mainloop_fencing(stonith_t * st, const char *target, const char *action, int timeout, int tolerance)
{
crm_trigger_t *trig;
async_fence_data.st = st;
async_fence_data.target = target;
async_fence_data.action = action;
async_fence_data.timeout = timeout;
async_fence_data.tolerance = tolerance;
async_fence_data.rc = -1;
trig = mainloop_add_trigger(G_PRIORITY_HIGH, async_fence_helper, NULL);
mainloop_set_trigger(trig);
mainloop = g_main_loop_new(NULL, FALSE);
g_main_loop_run(mainloop);
return async_fence_data.rc;
}
static int
handle_level(stonith_t *st, char *target, int fence_level,
stonith_key_value_t *devices, bool added)
{
char *node = NULL;
char *pattern = NULL;
char *name = NULL;
char *value = strchr(target, '=');
/* Determine if targeting by attribute, node name pattern or node name */
if (value != NULL) {
name = target;
*value++ = '\0';
} else if (*target == '@') {
pattern = target + 1;
} else {
node = target;
}
/* Register or unregister level as appropriate */
if (added) {
return st->cmds->register_level_full(st, st_opts, node, pattern,
name, value, fence_level,
devices);
}
return st->cmds->remove_level_full(st, st_opts, node, pattern,
name, value, fence_level);
}
static int
handle_history(stonith_t *st, const char *target, int timeout, int quiet,
int verbose, int cleanup, int broadcast, pcmk__output_t *out)
{
stonith_history_t *history = NULL, *hp, *latest = NULL;
int rc = 0;
if (!quiet) {
if (cleanup) {
out->info(out, "cleaning up fencing-history%s%s",
target ? " for node " : "", target ? target : "");
}
if (broadcast) {
out->info(out, "gather fencing-history from all nodes");
}
}
rc = st->cmds->history(st, st_opts | (cleanup?st_opt_cleanup:0) |
(broadcast?st_opt_broadcast:0),
(safe_str_eq(target, "*")? NULL : target),
&history, timeout);
out->begin_list(out, "Fencing history", "event", "events");
for (hp = history; hp; hp = hp->next) {
if (hp->state == st_done) {
latest = hp;
}
if (quiet || !verbose) {
continue;
}
out->message(out, "stonith-event", hp, 1);
}
if (latest) {
if (quiet && out->supports_quiet) {
out->info(out, "%lld", (long long) latest->completed);
} else if (!verbose) { // already printed if verbose
out->message(out, "stonith-event", latest, 0);
}
}
out->end_list(out);
stonith_history_free(history);
return rc;
}
static int
validate(stonith_t *st, const char *agent, const char *id,
stonith_key_value_t *params, int timeout, int quiet,
pcmk__output_t *out)
{
int rc = 1;
char *output = NULL;
char *error_output = NULL;
rc = st->cmds->validate(st, st_opt_sync_call, id, NULL, agent, params,
timeout, &output, &error_output);
if (quiet) {
return rc;
}
out->message(out, "validate", agent, id, output, error_output, rc);
return rc;
}
static GOptionContext *
build_arg_context(pcmk__common_args_t *args) {
GOptionContext *context = NULL;
GOptionGroup *defn_group, *query_group, *fence_group, *addl_group;
GOptionGroup *main_group;
GOptionEntry extra_prog_entries[] = {
{ "quiet", 'q', 0, G_OPTION_ARG_NONE, &(args->quiet),
"Be less descriptive in output.",
NULL },
{ NULL }
};
context = pcmk__build_arg_context(args, "text (default), html, xml");
/* Add the -q option, which cannot be part of the globally supported options
* because some tools use that flag for something else.
*/
main_group = g_option_context_get_main_group(context);
g_option_group_add_entries(main_group, extra_prog_entries);
defn_group = g_option_group_new("definition", "Device Definition Commands:",
"Show device definition help", NULL, NULL);
g_option_group_add_entries(defn_group, defn_entries);
g_option_context_add_group(context, defn_group);
query_group = g_option_group_new("queries", "Queries:", "Show query help", NULL, NULL);
g_option_group_add_entries(query_group, query_entries);
g_option_context_add_group(context, query_group);
fence_group = g_option_group_new("fence", "Fencing Commands:", "Show fence help", NULL, NULL);
g_option_group_add_entries(fence_group, fence_entries);
g_option_context_add_group(context, fence_group);
addl_group = g_option_group_new("additional", "Additional Options:", "Show additional options", NULL, NULL);
g_option_group_add_entries(addl_group, addl_entries);
g_option_context_add_group(context, addl_group);
return context;
}
int
main(int argc, char **argv)
{
int rc = 0;
bool no_connect = false;
bool required_agent = false;
char *target = NULL;
char *lists = NULL;
const char *device = NULL;
crm_exit_t exit_code = CRM_EX_OK;
stonith_t *st = NULL;
stonith_key_value_t *dIter = NULL;
pcmk__output_t *out = NULL;
pcmk__common_args_t *args = calloc(1, sizeof(pcmk__common_args_t));
GError *error = NULL;
GOptionContext *context = NULL;
gchar **processed_args = NULL;
if (args == NULL) {
crm_exit(crm_errno2exit(-ENOMEM));
}
args->summary = strdup("stonith_admin - Access the Pacemaker fencing API");
context = build_arg_context(args);
pcmk__register_formats(context, formats);
crm_log_cli_init("stonith_admin");
async_fence_data.name = strdup(crm_system_name);
processed_args = pcmk__cmdline_preproc(argc, argv, "adehilorstvBCDFHQRTU");
if (!g_option_context_parse_strv(context, &processed_args, &error)) {
fprintf(stderr, "%s: %s\n", g_get_prgname(), error->message);
}
for (int i = 0; i < options.verbose; i++) {
crm_bump_log_level(argc, argv);
}
rc = pcmk__output_new(&out, args->output_ty, args->output_dest, argv);
if (rc != 0) {
fprintf(stderr, "Error creating output format %s: %s\n", args->output_ty, pcmk_strerror(rc));
exit_code = CRM_EX_ERROR;
goto done;
}
stonith__register_messages(out);
if (args->version) {
out->version(out, false);
goto done;
}
if (options.validate_cfg) {
required_agent = true;
no_connect = true;
action = 'K';
}
if (options.installed) {
no_connect = true;
action = 'I';
}
if (options.registered) {
action = 'L';
}
if (options.register_dev != NULL) {
required_agent = true;
action = 'R';
device = options.register_dev;
}
if (options.query != NULL) {
action = 'Q';
device = options.query;
}
if (options.unregister_dev != NULL) {
action = 'D';
device = options.unregister_dev;
}
if (options.targets != NULL) {
action = 's';
device = options.targets;
}
if (options.terminate != NULL) {
action = 'L';
target = options.terminate;
}
if (options.metadata) {
no_connect = true;
required_agent = true;
action = 'M';
}
if (options.reboot_host != NULL) {
no_connect = true;
action = 'B';
target = options.reboot_host;
crm_log_args(argc, argv);
}
if (options.fence_host != NULL) {
no_connect = true;
action = 'F';
target = options.fence_host;
crm_log_args(argc, argv);
}
if (options.unfence_host != NULL) {
no_connect = true;
action = 'U';
target = options.unfence_host;
crm_log_args(argc, argv);
}
if (options.confirm_host != NULL) {
action = 'C';
target = options.confirm_host;
crm_log_args(argc, argv);
}
if (options.last_fenced != NULL) {
action = 'h';
target = options.last_fenced;
}
if (options.history != NULL) {
action = 'H';
target = options.history;
}
if (options.register_level != NULL) {
action = 'r';
target = options.register_level;
}
if (options.unregister_level != NULL) {
action = 'd';
target = options.unregister_level;
}
if (optind > argc || action == 0) {
out->err(out, "%s", g_option_context_get_help(context, TRUE, NULL));
exit_code = CRM_EX_USAGE;
goto done;
}
if (required_agent && options.agent == NULL) {
out->err(out, "Please specify an agent to query using -a,--agent [value]");
out->err(out, "%s", g_option_context_get_help(context, TRUE, NULL));
exit_code = CRM_EX_USAGE;
goto done;
}
st = stonith_api_new();
-
- if (!no_connect) {
+ if (st == NULL) {
+ rc = -ENOMEM;
+ } else if (!no_connect) {
rc = st->cmds->connect(st, async_fence_data.name, NULL);
- if (rc < 0) {
- out->err(out, "Could not connect to fencer: %s", pcmk_strerror(rc));
- exit_code = CRM_EX_DISCONNECT;
- goto done;
- }
+ }
+ if (rc < 0) {
+ out->err(out, "Could not connect to fencer: %s", pcmk_strerror(rc));
+ exit_code = CRM_EX_DISCONNECT;
+ goto done;
}
switch (action) {
case 'I':
rc = st->cmds->list_agents(st, st_opt_sync_call, NULL, &options.devices, options.timeout);
if (rc < 0) {
out->err(out, "Failed to list installed devices: %s", pcmk_strerror(rc));
break;
}
out->begin_list(out, "Installed fence devices", "fence device", "fence devices");
for (dIter = options.devices; dIter; dIter = dIter->next) {
out->list_item(out, "device", dIter->value);
}
out->end_list(out);
rc = 0;
stonith_key_value_freeall(options.devices, 1, 1);
break;
case 'L':
rc = st->cmds->query(st, st_opts, target, &options.devices, options.timeout);
if (rc < 0) {
out->err(out, "Failed to list registered devices: %s", pcmk_strerror(rc));
break;
}
out->begin_list(out, "Registered fence devices", "fence device", "fence devices");
for (dIter = options.devices; dIter; dIter = dIter->next) {
out->list_item(out, "device", dIter->value);
}
out->end_list(out);
rc = 0;
stonith_key_value_freeall(options.devices, 1, 1);
break;
case 'Q':
rc = st->cmds->monitor(st, st_opts, device, options.timeout);
if (rc < 0) {
rc = st->cmds->list(st, st_opts, device, NULL, options.timeout);
}
break;
case 's':
rc = st->cmds->list(st, st_opts, device, &lists, options.timeout);
if (rc == 0) {
GList *targets = stonith__parse_targets(lists);
out->begin_list(out, "Fence targets", "fence target", "fence targets");
while (targets != NULL) {
out->list_item(out, NULL, (const char *) targets->data);
targets = targets->next;
}
out->end_list(out);
free(lists);
} else if (rc != 0) {
out->err(out, "List command returned error. rc : %d", rc);
}
break;
case 'R':
rc = st->cmds->register_device(st, st_opts, device, NULL, options.agent,
options.params);
break;
case 'D':
rc = st->cmds->remove_device(st, st_opts, device);
break;
case 'd':
case 'r':
rc = handle_level(st, target, options.fence_level, options.devices, action == 'r');
break;
case 'M':
{
char *buffer = NULL;
rc = st->cmds->metadata(st, st_opt_sync_call, options.agent, NULL,
&buffer, options.timeout);
if (rc == pcmk_ok) {
out->output_xml(out, "metadata", buffer);
}
free(buffer);
}
break;
case 'C':
rc = st->cmds->confirm(st, st_opts, target);
break;
case 'B':
rc = mainloop_fencing(st, target, "reboot", options.timeout, options.tolerance);
break;
case 'F':
rc = mainloop_fencing(st, target, "off", options.timeout, options.tolerance);
break;
case 'U':
rc = mainloop_fencing(st, target, "on", options.timeout, options.tolerance);
break;
case 'h':
{
time_t when = 0;
if(options.as_nodeid) {
uint32_t nodeid = atol(target);
when = stonith_api_time(nodeid, NULL, FALSE);
} else {
when = stonith_api_time(0, target, FALSE);
}
out->message(out, "last-fenced", target, when);
}
break;
case 'H':
rc = handle_history(st, target, options.timeout, args->quiet,
options.verbose, options.cleanup,
options.broadcast, out);
break;
case 'K':
device = (options.devices ? options.devices->key : NULL);
rc = validate(st, options.agent, device, options.params,
options.timeout, args->quiet, out);
break;
}
crm_info("Command returned: %s (%d)", pcmk_strerror(rc), rc);
exit_code = crm_errno2exit(rc);
done:
g_strfreev(processed_args);
g_option_context_free(context);
if (out != NULL) {
out->finish(out, exit_code, true, NULL);
pcmk__output_free(out);
}
free(async_fence_data.name);
stonith_key_value_freeall(options.params, 1, 1);
if (st != NULL) {
st->cmds->disconnect(st);
stonith_api_delete(st);
}
return exit_code;
}