diff --git a/cts/patterns.py b/cts/patterns.py
index 1b86ee7c31..8de67b1f94 100644
--- a/cts/patterns.py
+++ b/cts/patterns.py
@@ -1,391 +1,391 @@
""" Pattern-holding classes for Pacemaker's Cluster Test Suite (CTS)
"""
# Pacemaker targets compatibility with Python 2.7 and 3.2+
from __future__ import print_function, unicode_literals, absolute_import, division
__copyright__ = "Copyright 2008-2019 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import sys, os
from cts.CTSvars import *
patternvariants = {}
class BasePatterns(object):
def __init__(self, name):
self.name = name
patternvariants[name] = self
self.ignore = [
"avoid confusing Valgrind",
# Logging bug in some versions of libvirtd
r"libvirtd.*: internal error: Failed to parse PCI config address",
]
self.BadNews = []
self.components = {}
self.commands = {
"StatusCmd" : "crmadmin -t 60000 -S %s 2>/dev/null",
"CibQuery" : "cibadmin -Ql",
"CibAddXml" : "cibadmin --modify -c --xml-text %s",
"CibDelXpath" : "cibadmin --delete --xpath %s",
# 300,000 == 5 minutes
"RscRunning" : CTSvars.CRM_DAEMON_DIR + "/cts-exec-helper -R -r %s",
"CIBfile" : "%s:"+CTSvars.CRM_CONFIG_DIR+"/cib.xml",
"TmpDir" : "/tmp",
"BreakCommCmd" : "iptables -A INPUT -s %s -j DROP >/dev/null 2>&1",
"FixCommCmd" : "iptables -D INPUT -s %s -j DROP >/dev/null 2>&1",
# tc qdisc add dev lo root handle 1: cbq avpkt 1000 bandwidth 1000mbit
# tc class add dev lo parent 1: classid 1:1 cbq rate "$RATE"kbps allot 17000 prio 5 bounded isolated
# tc filter add dev lo parent 1: protocol ip prio 16 u32 match ip dst 127.0.0.1 match ip sport $PORT 0xFFFF flowid 1:1
# tc qdisc add dev lo parent 1: netem delay "$LATENCY"msec "$(($LATENCY/4))"msec 10% 2> /dev/null > /dev/null
"ReduceCommCmd" : "",
"RestoreCommCmd" : "tc qdisc del dev lo root",
"SetCheckInterval" : "cibadmin --modify -c --xml-text ''",
"ClearCheckInterval" : "cibadmin --delete --xpath \"//nvpair[@name='cluster-recheck-interval']\"",
"MaintenanceModeOn" : "cibadmin --modify -c --xml-text ''",
"MaintenanceModeOff" : "cibadmin --delete --xpath \"//nvpair[@name='maintenance-mode']\"",
"StandbyCmd" : "crm_attribute -Vq -U %s -n standby -l forever -v %s 2>/dev/null",
"StandbyQueryCmd" : "crm_attribute -qG -U %s -n standby -l forever -d off 2>/dev/null",
}
self.search = {
"Pat:DC_IDLE" : "pacemaker-controld.*State transition.*-> S_IDLE",
# This won't work if we have multiple partitions
"Pat:Local_started" : "%s\W.*controller successfully started",
"Pat:NonDC_started" : r"%s\W.*State transition.*-> S_NOT_DC",
"Pat:DC_started" : r"%s\W.*State transition.*-> S_IDLE",
"Pat:We_stopped" : "%s\W.*OVERRIDE THIS PATTERN",
"Pat:They_stopped" : "%s\W.*LOST:.* %s ",
"Pat:They_dead" : "node %s.*: is dead",
"Pat:TransitionComplete" : "Transition status: Complete: complete",
"Pat:Fencing_start" : "(Initiating remote operation|Requesting peer fencing ).* (for|of) %s",
"Pat:Fencing_ok" : r"pacemaker-fenced.*:\s*Operation .* of %s by .* for .*@.*: OK",
"Pat:Fencing_recover" : r"schedulerd.*: Recover %s",
"Pat:RscOpOK" : r"pacemaker-controld.*:\s+Result of %s operation for %s.*: (0 \()?ok",
"Pat:RscRemoteOpOK" : r"pacemaker-controld.*:\s+Result of %s operation for %s on %s: (0 \()?ok",
"Pat:NodeFenced" : r"pacemaker-controld.*:\s* Peer %s was terminated \(.*\) by .* on behalf of .*: OK",
"Pat:FenceOpOK" : "Operation .* for host '%s' with device .* returned: 0",
}
def get_component(self, key):
if key in self.components:
return self.components[key]
print("Unknown component '%s' for %s" % (key, self.name))
return []
def get_patterns(self, key):
if key == "BadNews":
return self.BadNews
elif key == "BadNewsIgnore":
return self.ignore
elif key == "Commands":
return self.commands
elif key == "Search":
return self.search
elif key == "Components":
return self.components
def __getitem__(self, key):
if key == "Name":
return self.name
elif key in self.commands:
return self.commands[key]
elif key in self.search:
return self.search[key]
else:
print("Unknown template '%s' for %s" % (key, self.name))
return None
class crm_corosync(BasePatterns):
'''
Patterns for Corosync version 2 cluster manager class
'''
def __init__(self, name):
BasePatterns.__init__(self, name)
self.commands.update({
"StartCmd" : "service corosync start && service pacemaker start",
"StopCmd" : "service pacemaker stop; [ ! -e /usr/sbin/pacemaker-remoted ] || service pacemaker_remote stop; service corosync stop",
"EpochCmd" : "crm_node -e",
"QuorumCmd" : "crm_node -q",
"PartitionCmd" : "crm_node -p",
})
self.search.update({
# Close enough ... "Corosync Cluster Engine exiting normally" isn't
# printed reliably.
"Pat:We_stopped" : "%s\W.*Unloading all Corosync service engines",
"Pat:They_stopped" : "%s\W.*pacemaker-controld.*Node %s(\[|\s).*state is now lost",
"Pat:They_dead" : "pacemaker-controld.*Node %s(\[|\s).*state is now lost",
"Pat:ChildExit" : r"\[[0-9]+\] exited with status [0-9]+ \(",
"Pat:ChildKilled" : r"%s\W.*pacemakerd.*%s\[[0-9]+\] terminated with signal 9",
"Pat:ChildRespawn" : "%s\W.*pacemakerd.*Respawning failed child process: %s",
"Pat:InfraUp" : "%s\W.*corosync.*Initializing transport",
"Pat:PacemakerUp" : "%s\W.*pacemakerd.*Starting Pacemaker",
})
self.ignore = self.ignore + [
r"crm_mon:",
r"crmadmin:",
r"update_trace_data",
r"async_notify:.*strange, client not found",
r"Parse error: Ignoring unknown option .*nodename",
r"error.*: Operation 'reboot' .* with device 'FencingFail' returned:",
r"getinfo response error: 1$",
r"sbd.* error: inquisitor_child: DEBUG MODE IS ACTIVE",
r"sbd.* pcmk:\s*error:.*Connection to cib_ro.* (failed|closed)",
]
self.BadNews = [
r"error:",
r"crit:",
r"ERROR:",
r"CRIT:",
r"Shutting down...NOW",
r"Timer I_TERMINATE just popped",
r"input=I_ERROR",
r"input=I_FAIL",
r"input=I_INTEGRATED cause=C_TIMER_POPPED",
r"input=I_FINALIZED cause=C_TIMER_POPPED",
r"input=I_ERROR",
r"(pacemakerd|pacemaker-execd|pacemaker-controld):.*, exiting",
r"schedulerd.*Attempting recovery of resource",
r"is taking more than 2x its timeout",
r"Confirm not received from",
r"Welcome reply not received from",
r"Attempting to schedule .* after a stop",
r"Resource .* was active at shutdown",
r"duplicate entries for call_id",
r"Search terminated:",
r":global_timer_callback",
r"Faking parameter digest creation",
r"Parameters to .* action changed:",
r"Parameters to .* changed",
r"\[[0-9]+\] terminated with signal [0-9]+ \(",
r"schedulerd:.*Recover .*\(.* -\> .*\)",
r"rsyslogd.* imuxsock lost .* messages from pid .* due to rate-limiting",
r"Peer is not part of our cluster",
r"We appear to be in an election loop",
r"Unknown node -> we will not deliver message",
r"(Blackbox dump requested|Problem detected)",
r"pacemakerd.*Could not connect to Cluster Configuration Database API",
r"Receiving messages from a node we think is dead",
r"share the same cluster nodeid",
r"share the same name",
#r"crm_ipc_send:.*Request .* failed",
#r"crm_ipc_send:.*Sending to .* is disabled until pending reply is received",
# Not inherently bad, but worth tracking
#r"No need to invoke the TE",
#r"ping.*: DEBUG: Updated connected = 0",
#r"Digest mis-match:",
r"pacemaker-controld:.*Transition failed: terminated",
r"Local CIB .* differs from .*:",
r"warn.*:\s*Continuing but .* will NOT be used",
r"warn.*:\s*Cluster configuration file .* is corrupt",
#r"Executing .* fencing operation",
r"Election storm",
r"stalled the FSA with pending inputs",
]
self.components["common-ignore"] = [
r"Pending action:",
r"resource( was|s were) active at shutdown",
r"pending LRM operations at shutdown",
r"Lost connection to the CIB manager",
r"pacemaker-controld.*:\s*Action A_RECOVER .* not supported",
r"pacemaker-controld.*:\s*Performing A_EXIT_1 - forcefully exiting ",
r".*:\s*Executing .* fencing operation \(.*\) on ",
r".*:\s*Requesting fencing \([^)]+\) of node ",
r"(Blackbox dump requested|Problem detected)",
# "Resource .*stonith::.* is active on 2 nodes attempting recovery",
# "Transition .* ERRORs found during PE processing",
]
self.components["corosync-ignore"] = [
r"error:.*Connection to the CPG API failed: Library error",
r"\[[0-9]+\] exited with status [0-9]+ \(",
r"pacemaker-based.*error:.*Corosync connection lost",
r"pacemaker-fenced.*error:.*Corosync connection terminated",
r"pacemaker-controld.*State transition .* S_RECOVERY",
r"pacemaker-controld.*error:.*Input (I_ERROR|I_TERMINATE ) .*received in state",
r"pacemaker-controld.*error:.*Could not recover from internal error",
r"error:.*Connection to cib_(shm|rw).* (failed|closed)",
r"error:.*Connection to (fencer|stonith-ng).* (closed|failed|lost)",
r"crit: Fencing daemon connection failed",
]
self.components["corosync"] = [
# We expect each daemon to lose its cluster connection.
# However, if the CIB manager loses its connection first,
# it's possible for another daemon to lose that connection and
# exit before losing the cluster connection.
r"pacemakerd.*:\s*(crit|error):.*Lost connection to cluster layer",
r"pacemaker-attrd.*:\s*(crit|error):.*Lost connection to (cluster layer|the CIB manager)",
r"pacemaker-based.*:\s*(crit|error):.*Lost connection to cluster layer",
r"pacemaker-controld.*:\s*(crit|error):.*Lost connection to (cluster layer|the CIB manager)",
r"pacemaker-fenced.*:\s*(crit|error):.*Lost connection to (cluster layer|the CIB manager)",
r"schedulerd.*Scheduling Node .* for STONITH",
r"pacemaker-controld.*:\s*Peer .* was terminated \(.*\) by .* on behalf of .*:\s*OK",
]
self.components["pacemaker-based"] = [
r"pacemakerd.* pacemaker-attrd\[[0-9]+\] exited with status 102",
r"pacemakerd.* pacemaker-controld\[[0-9]+\] exited with status 1",
r"pacemakerd.* Respawning failed child process: pacemaker-attrd",
r"pacemakerd.* Respawning failed child process: pacemaker-based",
r"pacemakerd.* Respawning failed child process: pacemaker-controld",
r"pacemakerd.* Respawning failed child process: pacemaker-fenced",
r"pacemaker-.* Connection to cib_.* (failed|closed)",
r"pacemaker-attrd.*:.*Lost connection to the CIB manager",
r"pacemaker-controld.*:.*Lost connection to the CIB manager",
r"pacemaker-controld.*I_ERROR.*crmd_cib_connection_destroy",
r"pacemaker-controld.* State transition .* S_RECOVERY",
r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
r"pacemaker-controld.*Could not recover from internal error",
]
self.components["pacemaker-based-ignore"] = [
r"pacemaker-execd.*Connection to (fencer|stonith-ng).* (closed|failed|lost)",
]
self.components["pacemaker-execd"] = [
r"pacemaker-controld.*Connection to (pacemaker-execd|lrmd|executor) (failed|closed)",
r"pacemaker-controld.*I_ERROR.*lrm_connection_destroy",
r"pacemaker-controld.*State transition .* S_RECOVERY",
r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
r"pacemaker-controld.*Could not recover from internal error",
r"pacemakerd.*pacemaker-execd.* terminated with signal 9",
r"pacemakerd.*pacemaker-controld\[[0-9]+\] exited with status 1",
r"pacemakerd.*Respawning failed child process: pacemaker-execd",
r"pacemakerd.*Respawning failed child process: pacemaker-controld",
]
self.components["pacemaker-execd-ignore"] = []
self.components["pacemaker-controld"] = [
# "WARN: determine_online_status: Node .* is unclean",
# "Scheduling Node .* for STONITH",
# "Executing .* fencing operation",
# Only if the node wasn't the DC: "State transition S_IDLE",
"State transition .* -> S_IDLE",
]
self.components["pacemaker-controld-ignore"] = []
self.components["pacemaker-attrd"] = []
self.components["pacemaker-attrd-ignore"] = []
self.components["pacemaker-schedulerd"] = [
"State transition .* S_RECOVERY",
r"Respawning failed child process: pacemaker-controld",
r"pacemaker-controld\[[0-9]+\] exited with status 1 \(",
"Connection to pengine failed",
"Connection to pengine.* closed",
r"Connection to the scheduler failed",
"pacemaker-controld.*I_ERROR.*save_cib_contents",
r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
"pacemaker-controld.*Could not recover from internal error",
]
self.components["pacemaker-schedulerd-ignore"] = []
self.components["pacemaker-fenced"] = [
r"error:.*Connection to (fencer|stonith-ng).* (closed|failed|lost)",
r"Fencing daemon connection failed",
- r"pacemaker-controld.*:\s*warn.*:\s*Callback already present",
+ r"pacemaker-controld.*Fencer successfully connected",
]
self.components["pacemaker-fenced-ignore"] = [
r"error:.*Connection to (fencer|stonith-ng).* (closed|failed|lost)",
r"crit:.*Fencing daemon connection failed",
r"error:.*Fencer connection failed \(will retry\)",
r"Connection to (fencer|stonith-ng) failed, finalizing .* pending operations",
r"pacemaker-controld.*:\s+Result of .* operation for Fencing.*Error",
]
self.components["pacemaker-fenced-ignore"].extend(self.components["common-ignore"])
class crm_corosync_docker(crm_corosync):
'''
Patterns for Corosync version 2 cluster manager class
'''
def __init__(self, name):
crm_corosync.__init__(self, name)
self.commands.update({
"StartCmd" : "pcmk_start",
"StopCmd" : "pcmk_stop",
})
class PatternSelector(object):
def __init__(self, name=None):
self.name = name
self.base = BasePatterns("crm-base")
if not name:
crm_corosync("crm-corosync")
elif name == "crm-corosync":
crm_corosync(name)
elif name == "crm-corosync-docker":
crm_corosync_docker(name)
def get_variant(self, variant):
if variant in patternvariants:
return patternvariants[variant]
print("defaulting to crm-base for %s" % variant)
return self.base
def get_patterns(self, variant, kind):
return self.get_variant(variant).get_patterns(kind)
def get_template(self, variant, key):
v = self.get_variant(variant)
return v[key]
def get_component(self, variant, kind):
return self.get_variant(variant).get_component(kind)
def __getitem__(self, key):
return self.get_template(self.name, key)
# python cts/CTSpatt.py -k crm-corosync -t StartCmd
if __name__ == '__main__':
pdir=os.path.dirname(sys.path[0])
sys.path.insert(0, pdir) # So that things work from the source directory
kind=None
template=None
skipthis=None
args=sys.argv[1:]
for i in range(0, len(args)):
if skipthis:
skipthis=None
continue
elif args[i] == "-k" or args[i] == "--kind":
skipthis=1
kind = args[i+1]
elif args[i] == "-t" or args[i] == "--template":
skipthis=1
template = args[i+1]
else:
print("Illegal argument " + args[i])
print(PatternSelector(kind)[template])
diff --git a/daemons/controld/controld_callbacks.c b/daemons/controld/controld_callbacks.c
index 3ce7470442..48225ac58a 100644
--- a/daemons/controld/controld_callbacks.c
+++ b/daemons/controld/controld_callbacks.c
@@ -1,340 +1,340 @@
/*
* Copyright 2004-2019 the Pacemaker project contributors
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
/* From join_dc... */
extern gboolean check_join_state(enum crmd_fsa_state cur_state, const char *source);
void
crmd_ha_msg_filter(xmlNode * msg)
{
if (AM_I_DC) {
const char *sys_from = crm_element_value(msg, F_CRM_SYS_FROM);
if (safe_str_eq(sys_from, CRM_SYSTEM_DC)) {
const char *from = crm_element_value(msg, F_ORIG);
if (safe_str_neq(from, fsa_our_uname)) {
int level = LOG_INFO;
const char *op = crm_element_value(msg, F_CRM_TASK);
/* make sure the election happens NOW */
if (fsa_state != S_ELECTION) {
ha_msg_input_t new_input;
level = LOG_WARNING;
new_input.msg = msg;
register_fsa_error_adv(C_FSA_INTERNAL, I_ELECTION, NULL, &new_input,
__FUNCTION__);
}
do_crm_log(level, "Another DC detected: %s (op=%s)", from, op);
goto done;
}
}
} else {
const char *sys_to = crm_element_value(msg, F_CRM_SYS_TO);
if (safe_str_eq(sys_to, CRM_SYSTEM_DC)) {
return;
}
}
/* crm_log_xml_trace("HA[inbound]", msg); */
route_message(C_HA_MESSAGE, msg);
done:
trigger_fsa(fsa_source);
}
/*!
* \internal
* \brief Check whether a node is online
*
* \param[in] node Node to check
*
* \retval -1 if completely dead
* \retval 0 if partially alive
* \retval 1 if completely alive
*/
static int
node_alive(const crm_node_t *node)
{
if (is_set(node->flags, crm_remote_node)) {
// Pacemaker Remote nodes can't be partially alive
return safe_str_eq(node->state, CRM_NODE_MEMBER)? 1: -1;
} else if (crm_is_peer_active(node)) {
// Completely up cluster node: both cluster member and peer
return 1;
} else if (is_not_set(node->processes, crm_get_cluster_proc())
&& safe_str_neq(node->state, CRM_NODE_MEMBER)) {
// Completely down cluster node: neither cluster member nor peer
return -1;
}
// Partially up cluster node: only cluster member or only peer
return 0;
}
#define state_text(state) ((state)? (const char *)(state) : "in unknown state")
void
peer_update_callback(enum crm_status_type type, crm_node_t * node, const void *data)
{
uint32_t old = 0;
bool appeared = FALSE;
bool is_remote = is_set(node->flags, crm_remote_node);
/* The controller waits to receive some information from the membership
* layer before declaring itself operational. If this is being called for a
* cluster node, indicate that we have it.
*/
if (!is_remote) {
set_bit(fsa_input_register, R_PEER_DATA);
}
if (node->uname == NULL) {
return;
}
switch (type) {
case crm_status_uname:
/* If we've never seen the node, then it also won't be in the status section */
crm_info("%s node %s is now %s",
(is_remote? "Remote" : "Cluster"),
node->uname, state_text(node->state));
return;
case crm_status_nstate:
/* This callback should not be called unless the state actually
* changed, but here's a failsafe just in case.
*/
CRM_CHECK(safe_str_neq(data, node->state), return);
crm_info("%s node %s is now %s (was %s)",
(is_remote? "Remote" : "Cluster"),
node->uname, state_text(node->state), state_text(data));
if (safe_str_eq(CRM_NODE_MEMBER, node->state)) {
appeared = TRUE;
if (!is_remote) {
remove_stonith_cleanup(node->uname);
}
} else {
controld_remove_voter(node->uname);
}
crmd_alert_node_event(node);
break;
case crm_status_processes:
CRM_CHECK(data != NULL, return);
old = *(const uint32_t *)data;
appeared = is_set(node->processes, crm_get_cluster_proc());
crm_info("Node %s is %s a peer " CRM_XS " DC=%s old=0x%07x new=0x%07x",
node->uname, (appeared? "now" : "no longer"),
(AM_I_DC? "true" : (fsa_our_dc? fsa_our_dc : "")),
old, node->processes);
if (is_not_set((node->processes ^ old), crm_get_cluster_proc())) {
/* Peer status did not change. This should not be possible,
* since we don't track process flags other than peer status.
*/
crm_trace("Process flag 0x%7x did not change from 0x%7x to 0x%7x",
crm_get_cluster_proc(), old, node->processes);
return;
}
if (!appeared) {
controld_remove_voter(node->uname);
}
if (is_not_set(fsa_input_register, R_CIB_CONNECTED)) {
crm_trace("Ignoring peer status change because not connected to CIB");
return;
} else if (fsa_state == S_STOPPING) {
crm_trace("Ignoring peer status change because stopping");
return;
}
if (safe_str_eq(node->uname, fsa_our_uname) && !appeared) {
/* Did we get evicted? */
crm_notice("Our peer connection failed");
register_fsa_input(C_CRMD_STATUS_CALLBACK, I_ERROR, NULL);
} else if (safe_str_eq(node->uname, fsa_our_dc) && crm_is_peer_active(node) == FALSE) {
/* Did the DC leave us? */
crm_notice("Our peer on the DC (%s) is dead", fsa_our_dc);
register_fsa_input(C_CRMD_STATUS_CALLBACK, I_ELECTION, NULL);
/* @COMPAT DC < 1.1.13: If a DC shuts down normally, we don't
* want to fence it. Newer DCs will send their shutdown request
* to all peers, who will update the DC's expected state to
* down, thus avoiding fencing. We can safely erase the DC's
* transient attributes when it leaves in that case. However,
* the only way to avoid fencing older DCs is to leave the
* transient attributes intact until it rejoins.
*/
if (compare_version(fsa_our_dc_version, "3.0.9") > 0) {
erase_status_tag(node->uname, XML_TAG_TRANSIENT_NODEATTRS, cib_scope_local);
}
} else if(AM_I_DC) {
if (appeared) {
- te_trigger_stonith_history_sync();
+ te_trigger_stonith_history_sync(FALSE);
} else {
erase_status_tag(node->uname, XML_TAG_TRANSIENT_NODEATTRS, cib_scope_local);
}
}
break;
}
if (AM_I_DC) {
xmlNode *update = NULL;
int flags = node_update_peer;
int alive = node_alive(node);
crm_action_t *down = match_down_event(node->uuid);
crm_trace("Alive=%d, appeared=%d, down=%d",
alive, appeared, (down? down->id : -1));
if (appeared && (alive > 0) && !is_remote) {
register_fsa_input_before(C_FSA_INTERNAL, I_NODE_JOIN, NULL);
}
if (down) {
const char *task = crm_element_value(down->xml, XML_LRM_ATTR_TASK);
if (safe_str_eq(task, CRM_OP_FENCE)) {
/* tengine_stonith_callback() confirms fence actions */
crm_trace("Updating CIB %s fencer reported fencing of %s complete",
(down->confirmed? "after" : "before"), node->uname);
} else if (!appeared && safe_str_eq(task, CRM_OP_SHUTDOWN)) {
// Shutdown actions are immediately confirmed (i.e. no_wait)
if (!is_remote) {
flags |= node_update_join | node_update_expected;
crmd_peer_down(node, FALSE);
check_join_state(fsa_state, __FUNCTION__);
}
if (alive >= 0) {
crm_info("%s of peer %s is in progress " CRM_XS " action=%d",
task, node->uname, down->id);
} else {
crm_notice("%s of peer %s is complete " CRM_XS " action=%d",
task, node->uname, down->id);
update_graph(transition_graph, down);
trigger_graph();
}
} else {
crm_trace("Node %s is %s, was expected to %s (op %d)",
node->uname,
((alive > 0)? "alive" :
((alive < 0)? "dead" : "partially alive")),
task, down->id);
}
} else if (appeared == FALSE) {
crm_warn("Stonith/shutdown of node %s was not expected",
node->uname);
if (!is_remote) {
crm_update_peer_join(__FUNCTION__, node, crm_join_none);
check_join_state(fsa_state, __FUNCTION__);
}
abort_transition(INFINITY, tg_restart, "Node failure", NULL);
fail_incompletable_actions(transition_graph, node->uuid);
} else {
crm_trace("Node %s came up, was not expected to be down",
node->uname);
}
if (is_remote) {
/* A pacemaker_remote node won't have its cluster status updated
* in the CIB by membership-layer callbacks, so do it here.
*/
flags |= node_update_cluster;
/* Trigger resource placement on newly integrated nodes */
if (appeared) {
abort_transition(INFINITY, tg_restart,
"pacemaker_remote node integrated", NULL);
}
}
/* Update the CIB node state */
update = create_node_state_update(node, flags, NULL, __FUNCTION__);
if (update == NULL) {
crm_debug("Node state update not yet possible for %s", node->uname);
} else {
fsa_cib_anon_update(XML_CIB_TAG_STATUS, update);
}
free_xml(update);
}
trigger_fsa(fsa_source);
}
void
crmd_cib_connection_destroy(gpointer user_data)
{
CRM_CHECK(user_data == fsa_cib_conn,;);
crm_trace("Invoked");
trigger_fsa(fsa_source);
fsa_cib_conn->state = cib_disconnected;
if (is_set(fsa_input_register, R_CIB_CONNECTED) == FALSE) {
crm_info("Connection to the CIB manager terminated");
return;
}
// @TODO This should trigger a reconnect, not a shutdown
crm_crit("Lost connection to the CIB manager, shutting down");
register_fsa_input(C_FSA_INTERNAL, I_ERROR, NULL);
clear_bit(fsa_input_register, R_CIB_CONNECTED);
return;
}
gboolean
crm_fsa_trigger(gpointer user_data)
{
crm_trace("Invoked (queue len: %d)", g_list_length(fsa_message_queue));
s_crmd_fsa(C_FSA_INTERNAL);
crm_trace("Exited (queue len: %d)", g_list_length(fsa_message_queue));
return TRUE;
}
diff --git a/daemons/controld/controld_control.c b/daemons/controld/controld_control.c
index e99d605988..f3bb20f7db 100644
--- a/daemons/controld/controld_control.c
+++ b/daemons/controld/controld_control.c
@@ -1,878 +1,880 @@
/*
* Copyright 2004-2019 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
qb_ipcs_service_t *ipcs = NULL;
#if SUPPORT_COROSYNC
extern gboolean crm_connect_corosync(crm_cluster_t * cluster);
#endif
void crm_shutdown(int nsig);
gboolean crm_read_options(gpointer user_data);
gboolean fsa_has_quorum = FALSE;
crm_trigger_t *fsa_source = NULL;
crm_trigger_t *config_read = NULL;
bool no_quorum_suicide_escalation = FALSE;
/* A_HA_CONNECT */
void
do_ha_control(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state,
enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
gboolean registered = FALSE;
static crm_cluster_t *cluster = NULL;
if (cluster == NULL) {
cluster = calloc(1, sizeof(crm_cluster_t));
}
if (action & A_HA_DISCONNECT) {
crm_cluster_disconnect(cluster);
crm_info("Disconnected from the cluster");
set_bit(fsa_input_register, R_HA_DISCONNECTED);
}
if (action & A_HA_CONNECT) {
crm_set_status_callback(&peer_update_callback);
crm_set_autoreap(FALSE);
if (is_corosync_cluster()) {
#if SUPPORT_COROSYNC
registered = crm_connect_corosync(cluster);
#endif
}
if (registered == TRUE) {
controld_election_init(cluster->uname);
fsa_our_uname = cluster->uname;
fsa_our_uuid = cluster->uuid;
if(cluster->uuid == NULL) {
crm_err("Could not obtain local uuid");
registered = FALSE;
}
}
if (registered == FALSE) {
set_bit(fsa_input_register, R_HA_DISCONNECTED);
register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
return;
}
populate_cib_nodes(node_update_none, __FUNCTION__);
clear_bit(fsa_input_register, R_HA_DISCONNECTED);
crm_info("Connected to the cluster");
}
if (action & ~(A_HA_CONNECT | A_HA_DISCONNECT)) {
crm_err("Unexpected action %s in %s", fsa_action2string(action), __FUNCTION__);
}
}
/* A_SHUTDOWN */
void
do_shutdown(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
/* just in case */
set_bit(fsa_input_register, R_SHUTDOWN);
controld_disconnect_fencer(FALSE);
}
/* A_SHUTDOWN_REQ */
void
do_shutdown_req(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state,
enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
xmlNode *msg = NULL;
set_bit(fsa_input_register, R_SHUTDOWN);
crm_info("Sending shutdown request to all peers (DC is %s)",
(fsa_our_dc? fsa_our_dc : "not set"));
msg = create_request(CRM_OP_SHUTDOWN_REQ, NULL, NULL, CRM_SYSTEM_CRMD, CRM_SYSTEM_CRMD, NULL);
/* set_bit(fsa_input_register, R_STAYDOWN); */
if (send_cluster_message(NULL, crm_msg_crmd, msg, TRUE) == FALSE) {
register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
}
free_xml(msg);
}
extern char *max_generation_from;
extern xmlNode *max_generation_xml;
extern GHashTable *resource_history;
extern GHashTable *voted;
void
crmd_fast_exit(crm_exit_t exit_code)
{
if (is_set(fsa_input_register, R_STAYDOWN)) {
crm_warn("Inhibiting respawn "CRM_XS" remapping exit code %d to %d",
exit_code, CRM_EX_FATAL);
exit_code = CRM_EX_FATAL;
} else if ((exit_code == CRM_EX_OK)
&& is_set(fsa_input_register, R_IN_RECOVERY)) {
crm_err("Could not recover from internal error");
exit_code = CRM_EX_ERROR;
}
crm_exit(exit_code);
}
crm_exit_t
crmd_exit(crm_exit_t exit_code)
{
GListPtr gIter = NULL;
GMainLoop *mloop = crmd_mainloop;
static bool in_progress = FALSE;
if (in_progress && (exit_code == CRM_EX_OK)) {
crm_debug("Exit is already in progress");
return exit_code;
} else if(in_progress) {
crm_notice("Error during shutdown process, exiting now with status %d (%s)",
exit_code, crm_exit_str(exit_code));
crm_write_blackbox(SIGTRAP, NULL);
crmd_fast_exit(exit_code);
}
in_progress = TRUE;
crm_trace("Preparing to exit with status %d (%s)",
exit_code, crm_exit_str(exit_code));
/* Suppress secondary errors resulting from us disconnecting everything */
set_bit(fsa_input_register, R_HA_DISCONNECTED);
/* Close all IPC servers and clients to ensure any and all shared memory files are cleaned up */
if(ipcs) {
crm_trace("Closing IPC server");
mainloop_del_ipc_server(ipcs);
ipcs = NULL;
}
controld_close_attrd_ipc();
pe_subsystem_free();
controld_disconnect_fencer(TRUE);
if ((exit_code == CRM_EX_OK) && (crmd_mainloop == NULL)) {
crm_debug("No mainloop detected");
exit_code = CRM_EX_ERROR;
}
/* On an error, just get out.
*
* Otherwise, make the effort to have mainloop exit gracefully so
* that it (mostly) cleans up after itself and valgrind has less
* to report on - allowing real errors stand out
*/
if (exit_code != CRM_EX_OK) {
crm_notice("Forcing immediate exit with status %d (%s)",
exit_code, crm_exit_str(exit_code));
crm_write_blackbox(SIGTRAP, NULL);
crmd_fast_exit(exit_code);
}
/* Clean up as much memory as possible for valgrind */
for (gIter = fsa_message_queue; gIter != NULL; gIter = gIter->next) {
fsa_data_t *fsa_data = gIter->data;
crm_info("Dropping %s: [ state=%s cause=%s origin=%s ]",
fsa_input2string(fsa_data->fsa_input),
fsa_state2string(fsa_state),
fsa_cause2string(fsa_data->fsa_cause), fsa_data->origin);
delete_fsa_input(fsa_data);
}
clear_bit(fsa_input_register, R_MEMBERSHIP);
g_list_free(fsa_message_queue); fsa_message_queue = NULL;
metadata_cache_fini();
controld_election_fini();
/* Tear down the CIB manager connection, but don't free it yet -- it could
* be used when we drain the mainloop later.
*/
cib_free_callbacks(fsa_cib_conn);
fsa_cib_conn->cmds->signoff(fsa_cib_conn);
verify_stopped(fsa_state, LOG_WARNING);
clear_bit(fsa_input_register, R_LRM_CONNECTED);
lrm_state_destroy_all();
/* This basically will not work, since mainloop has a reference to it */
mainloop_destroy_trigger(fsa_source); fsa_source = NULL;
mainloop_destroy_trigger(config_read); config_read = NULL;
mainloop_destroy_trigger(transition_trigger); transition_trigger = NULL;
crm_client_cleanup();
crm_peer_destroy();
crm_timer_stop(transition_timer);
crm_timer_stop(integration_timer);
crm_timer_stop(finalization_timer);
crm_timer_stop(election_trigger);
crm_timer_stop(shutdown_escalation_timer);
crm_timer_stop(wait_timer);
crm_timer_stop(recheck_timer);
+ te_cleanup_stonith_history_sync(NULL, TRUE);
+
free(transition_timer); transition_timer = NULL;
free(integration_timer); integration_timer = NULL;
free(finalization_timer); finalization_timer = NULL;
free(election_trigger); election_trigger = NULL;
free(shutdown_escalation_timer); shutdown_escalation_timer = NULL;
free(wait_timer); wait_timer = NULL;
free(recheck_timer); recheck_timer = NULL;
free(fsa_our_dc_version); fsa_our_dc_version = NULL;
free(fsa_our_uname); fsa_our_uname = NULL;
free(fsa_our_uuid); fsa_our_uuid = NULL;
free(fsa_our_dc); fsa_our_dc = NULL;
free(fsa_cluster_name); fsa_cluster_name = NULL;
free(te_uuid); te_uuid = NULL;
free(fsa_pe_ref); fsa_pe_ref = NULL;
free(failed_stop_offset); failed_stop_offset = NULL;
free(failed_start_offset); failed_start_offset = NULL;
free(max_generation_from); max_generation_from = NULL;
free_xml(max_generation_xml); max_generation_xml = NULL;
mainloop_destroy_signal(SIGPIPE);
mainloop_destroy_signal(SIGUSR1);
mainloop_destroy_signal(SIGTERM);
mainloop_destroy_signal(SIGTRAP);
/* leave SIGCHLD engaged as we might still want to drain some service-actions */
if (mloop) {
GMainContext *ctx = g_main_loop_get_context(crmd_mainloop);
/* Don't re-enter this block */
crmd_mainloop = NULL;
/* no signals on final draining anymore */
mainloop_destroy_signal(SIGCHLD);
crm_trace("Draining mainloop %d %d", g_main_loop_is_running(mloop), g_main_context_pending(ctx));
{
int lpc = 0;
while((g_main_context_pending(ctx) && lpc < 10)) {
lpc++;
crm_trace("Iteration %d", lpc);
g_main_context_dispatch(ctx);
}
}
crm_trace("Closing mainloop %d %d", g_main_loop_is_running(mloop), g_main_context_pending(ctx));
g_main_loop_quit(mloop);
/* Won't do anything yet, since we're inside it now */
g_main_loop_unref(mloop);
} else {
mainloop_destroy_signal(SIGCHLD);
}
cib_delete(fsa_cib_conn);
fsa_cib_conn = NULL;
throttle_fini();
/* Graceful */
crm_trace("Done preparing for exit with status %d (%s)",
exit_code, crm_exit_str(exit_code));
return exit_code;
}
/* A_EXIT_0, A_EXIT_1 */
void
do_exit(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
crm_exit_t exit_code = CRM_EX_OK;
int log_level = LOG_INFO;
const char *exit_type = "gracefully";
if (action & A_EXIT_1) {
log_level = LOG_ERR;
exit_type = "forcefully";
exit_code = CRM_EX_ERROR;
}
verify_stopped(cur_state, LOG_ERR);
do_crm_log(log_level, "Performing %s - %s exiting the controller",
fsa_action2string(action), exit_type);
crm_info("[%s] stopped (%d)", crm_system_name, exit_code);
crmd_exit(exit_code);
}
static void sigpipe_ignore(int nsig) { return; }
/* A_STARTUP */
void
do_startup(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
int was_error = 0;
crm_debug("Registering Signal Handlers");
mainloop_add_signal(SIGTERM, crm_shutdown);
mainloop_add_signal(SIGPIPE, sigpipe_ignore);
fsa_source = mainloop_add_trigger(G_PRIORITY_HIGH, crm_fsa_trigger, NULL);
config_read = mainloop_add_trigger(G_PRIORITY_HIGH, crm_read_options, NULL);
transition_trigger = mainloop_add_trigger(G_PRIORITY_LOW, te_graph_trigger, NULL);
crm_debug("Creating CIB manager and executor objects");
fsa_cib_conn = cib_new();
lrm_state_init_local();
/* set up the timers */
transition_timer = calloc(1, sizeof(fsa_timer_t));
integration_timer = calloc(1, sizeof(fsa_timer_t));
finalization_timer = calloc(1, sizeof(fsa_timer_t));
election_trigger = calloc(1, sizeof(fsa_timer_t));
shutdown_escalation_timer = calloc(1, sizeof(fsa_timer_t));
wait_timer = calloc(1, sizeof(fsa_timer_t));
recheck_timer = calloc(1, sizeof(fsa_timer_t));
if (election_trigger != NULL) {
election_trigger->source_id = 0;
election_trigger->period_ms = -1;
election_trigger->fsa_input = I_DC_TIMEOUT;
election_trigger->callback = crm_timer_popped;
election_trigger->repeat = FALSE;
} else {
was_error = TRUE;
}
if (transition_timer != NULL) {
transition_timer->source_id = 0;
transition_timer->period_ms = -1;
transition_timer->fsa_input = I_PE_CALC;
transition_timer->callback = crm_timer_popped;
transition_timer->repeat = FALSE;
} else {
was_error = TRUE;
}
if (integration_timer != NULL) {
integration_timer->source_id = 0;
integration_timer->period_ms = -1;
integration_timer->fsa_input = I_INTEGRATED;
integration_timer->callback = crm_timer_popped;
integration_timer->repeat = FALSE;
} else {
was_error = TRUE;
}
if (finalization_timer != NULL) {
finalization_timer->source_id = 0;
finalization_timer->period_ms = -1;
finalization_timer->fsa_input = I_FINALIZED;
finalization_timer->callback = crm_timer_popped;
finalization_timer->repeat = FALSE;
/* for possible enabling... a bug in the join protocol left
* a slave in S_PENDING while we think it's in S_NOT_DC
*
* raising I_FINALIZED put us into a transition loop which is
* never resolved.
* in this loop we continually send probes which the node
* NACK's because it's in S_PENDING
*
* if we have nodes where the cluster layer is active but the
* CRM is not... then this will be handled in the
* integration phase
*/
finalization_timer->fsa_input = I_ELECTION;
} else {
was_error = TRUE;
}
if (shutdown_escalation_timer != NULL) {
shutdown_escalation_timer->source_id = 0;
shutdown_escalation_timer->period_ms = -1;
shutdown_escalation_timer->fsa_input = I_STOP;
shutdown_escalation_timer->callback = crm_timer_popped;
shutdown_escalation_timer->repeat = FALSE;
} else {
was_error = TRUE;
}
if (wait_timer != NULL) {
wait_timer->source_id = 0;
wait_timer->period_ms = 2000;
wait_timer->fsa_input = I_NULL;
wait_timer->callback = crm_timer_popped;
wait_timer->repeat = FALSE;
} else {
was_error = TRUE;
}
if (recheck_timer != NULL) {
recheck_timer->source_id = 0;
recheck_timer->period_ms = -1;
recheck_timer->fsa_input = I_PE_CALC;
recheck_timer->callback = crm_timer_popped;
recheck_timer->repeat = FALSE;
} else {
was_error = TRUE;
}
if (was_error) {
register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
}
}
static int32_t
crmd_ipc_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid)
{
crm_trace("Connection %p", c);
if (crm_client_new(c, uid, gid) == NULL) {
return -EIO;
}
return 0;
}
static void
crmd_ipc_created(qb_ipcs_connection_t * c)
{
crm_trace("Connection %p", c);
}
static int32_t
crmd_ipc_dispatch(qb_ipcs_connection_t * c, void *data, size_t size)
{
uint32_t id = 0;
uint32_t flags = 0;
crm_client_t *client = crm_client_get(c);
xmlNode *msg = crm_ipcs_recv(client, data, size, &id, &flags);
crm_trace("Invoked: %s", crm_client_name(client));
crm_ipcs_send_ack(client, id, flags, "ack", __FUNCTION__, __LINE__);
if (msg == NULL) {
return 0;
}
#if ENABLE_ACL
CRM_ASSERT(client->user != NULL);
crm_acl_get_set_user(msg, F_CRM_USER, client->user);
#endif
crm_trace("Processing msg from %s", crm_client_name(client));
crm_log_xml_trace(msg, "controller[inbound]");
crm_xml_add(msg, F_CRM_SYS_FROM, client->id);
if (crmd_authorize_message(msg, client, NULL)) {
route_message(C_IPC_MESSAGE, msg);
}
trigger_fsa(fsa_source);
free_xml(msg);
return 0;
}
static int32_t
crmd_ipc_closed(qb_ipcs_connection_t * c)
{
crm_client_t *client = crm_client_get(c);
if (client) {
crm_trace("Disconnecting %sregistered client %s (%p/%p)",
(client->userdata? "" : "un"), crm_client_name(client),
c, client);
free(client->userdata);
crm_client_destroy(client);
trigger_fsa(fsa_source);
}
return 0;
}
static void
crmd_ipc_destroy(qb_ipcs_connection_t * c)
{
crm_trace("Connection %p", c);
crmd_ipc_closed(c);
}
/* A_STOP */
void
do_stop(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
crm_trace("Closing IPC server");
mainloop_del_ipc_server(ipcs); ipcs = NULL;
register_fsa_input(C_FSA_INTERNAL, I_TERMINATE, NULL);
}
/* A_STARTED */
void
do_started(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
static struct qb_ipcs_service_handlers crmd_callbacks = {
.connection_accept = crmd_ipc_accept,
.connection_created = crmd_ipc_created,
.msg_process = crmd_ipc_dispatch,
.connection_closed = crmd_ipc_closed,
.connection_destroyed = crmd_ipc_destroy
};
if (cur_state != S_STARTING) {
crm_err("Start cancelled... %s", fsa_state2string(cur_state));
return;
} else if (is_set(fsa_input_register, R_MEMBERSHIP) == FALSE) {
crm_info("Delaying start, no membership data (%.16llx)", R_MEMBERSHIP);
crmd_fsa_stall(TRUE);
return;
} else if (is_set(fsa_input_register, R_LRM_CONNECTED) == FALSE) {
crm_info("Delaying start, not connected to executor (%.16llx)", R_LRM_CONNECTED);
crmd_fsa_stall(TRUE);
return;
} else if (is_set(fsa_input_register, R_CIB_CONNECTED) == FALSE) {
crm_info("Delaying start, CIB not connected (%.16llx)", R_CIB_CONNECTED);
crmd_fsa_stall(TRUE);
return;
} else if (is_set(fsa_input_register, R_READ_CONFIG) == FALSE) {
crm_info("Delaying start, Config not read (%.16llx)", R_READ_CONFIG);
crmd_fsa_stall(TRUE);
return;
} else if (is_set(fsa_input_register, R_PEER_DATA) == FALSE) {
crm_info("Delaying start, No peer data (%.16llx)", R_PEER_DATA);
crmd_fsa_stall(TRUE);
return;
}
crm_debug("Init server comms");
ipcs = crmd_ipc_server_init(&crmd_callbacks);
if (ipcs == NULL) {
crm_err("Failed to create IPC server: shutting down and inhibiting respawn");
register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
}
controld_trigger_fencer_connect();
crm_notice("Pacemaker controller successfully started and accepting connections");
clear_bit(fsa_input_register, R_STARTING);
register_fsa_input(msg_data->fsa_cause, I_PENDING, NULL);
}
/* A_RECOVER */
void
do_recover(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
set_bit(fsa_input_register, R_IN_RECOVERY);
crm_warn("Fast-tracking shutdown in response to errors");
register_fsa_input(C_FSA_INTERNAL, I_TERMINATE, NULL);
}
/* *INDENT-OFF* */
static pe_cluster_option crmd_opts[] = {
/* name, old-name, validate, values, default, short description, long description */
{ "dc-version", NULL, "string", NULL, "none", NULL,
"Version of Pacemaker on the cluster's DC.",
"Includes the hash which identifies the exact changeset it was built from. Used for diagnostic purposes."
},
{ "cluster-infrastructure", NULL, "string", NULL, "corosync", NULL,
"The messaging stack on which Pacemaker is currently running.",
"Used for informational and diagnostic purposes." },
{ XML_CONFIG_ATTR_DC_DEADTIME, NULL, "time", NULL, "20s", &check_time,
"How long to wait for a response from other nodes during startup.",
"The \"correct\" value will depend on the speed/load of your network and the type of switches used."
},
{ XML_CONFIG_ATTR_RECHECK, NULL, "time",
"Zero disables polling. Positive values are an interval in seconds (unless other SI units are specified. eg. 5min)",
"15min", &check_timer,
"Polling interval for time based changes to options, resource parameters and constraints.",
"The Cluster is primarily event driven, however the configuration can have elements that change based on time."
" To ensure these changes take effect, we can optionally poll the cluster's status for changes."
},
{ "load-threshold", NULL, "percentage", NULL, "80%", &check_utilization,
"The maximum amount of system resources that should be used by nodes in the cluster",
"The cluster will slow down its recovery process when the amount of system resources used"
" (currently CPU) approaches this limit",
},
{ "node-action-limit", NULL, "integer", NULL, "0", &check_number,
"The maximum number of jobs that can be scheduled per node. Defaults to 2x cores"},
{ XML_CONFIG_ATTR_ELECTION_FAIL, NULL, "time", NULL, "2min", &check_timer,
"*** Advanced Use Only ***.", "If need to adjust this value, it probably indicates the presence of a bug."
},
{ XML_CONFIG_ATTR_FORCE_QUIT, NULL, "time", NULL, "20min", &check_timer,
"*** Advanced Use Only ***.", "If need to adjust this value, it probably indicates the presence of a bug."
},
{
"join-integration-timeout", "crmd-integration-timeout",
"time", NULL, "3min", &check_timer,
"*** Advanced Use Only ***",
"If need to adjust this value, it probably indicates the presence of a bug"
},
{
"join-finalization-timeout", "crmd-finalization-timeout",
"time", NULL, "30min", &check_timer,
"*** Advanced Use Only ***",
"If you need to adjust this value, it probably indicates the presence of a bug"
},
{
"transition-delay", "crmd-transition-delay",
"time", NULL, "0s", &check_timer,
"*** Advanced Use Only *** Enabling this option will slow down cluster recovery under all conditions",
"Delay cluster recovery for the configured interval to allow for additional/related events to occur.\n"
"Useful if your configuration is sensitive to the order in which ping updates arrive."
},
{ "stonith-watchdog-timeout", NULL, "time", NULL, NULL, &check_sbd_timeout,
"How long to wait before we can assume nodes are safely down", NULL
},
{ "stonith-max-attempts",NULL,"integer",NULL,"10",&check_positive_number,
"How many times stonith can fail before it will no longer be attempted on a target"
},
{ "no-quorum-policy", NULL, "enum", "stop, freeze, ignore, suicide", "stop", &check_quorum, NULL, NULL },
};
/* *INDENT-ON* */
void
crmd_metadata(void)
{
config_metadata("pacemaker-controld", "1.0",
"controller properties",
"Cluster properties used by Pacemaker's controller,"
" formerly known as crmd",
crmd_opts, DIMOF(crmd_opts));
}
static void
verify_crmd_options(GHashTable * options)
{
verify_all_options(options, crmd_opts, DIMOF(crmd_opts));
}
static const char *
crmd_pref(GHashTable * options, const char *name)
{
return get_cluster_pref(options, crmd_opts, DIMOF(crmd_opts), name);
}
static void
config_query_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data)
{
const char *value = NULL;
GHashTable *config_hash = NULL;
crm_time_t *now = crm_time_new(NULL);
xmlNode *crmconfig = NULL;
xmlNode *alerts = NULL;
if (rc != pcmk_ok) {
fsa_data_t *msg_data = NULL;
crm_err("Local CIB query resulted in an error: %s", pcmk_strerror(rc));
register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
if (rc == -EACCES || rc == -pcmk_err_schema_validation) {
crm_err("The cluster is mis-configured - shutting down and staying down");
set_bit(fsa_input_register, R_STAYDOWN);
}
goto bail;
}
crmconfig = output;
if ((crmconfig) &&
(crm_element_name(crmconfig)) &&
(strcmp(crm_element_name(crmconfig), XML_CIB_TAG_CRMCONFIG) != 0)) {
crmconfig = first_named_child(crmconfig, XML_CIB_TAG_CRMCONFIG);
}
if (!crmconfig) {
fsa_data_t *msg_data = NULL;
crm_err("Local CIB query for " XML_CIB_TAG_CRMCONFIG " section failed");
register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
goto bail;
}
crm_debug("Call %d : Parsing CIB options", call_id);
config_hash = crm_str_table_new();
unpack_instance_attributes(crmconfig, crmconfig, XML_CIB_TAG_PROPSET, NULL, config_hash,
CIB_OPTIONS_FIRST, FALSE, now);
verify_crmd_options(config_hash);
value = crmd_pref(config_hash, XML_CONFIG_ATTR_DC_DEADTIME);
election_trigger->period_ms = crm_get_msec(value);
value = crmd_pref(config_hash, "node-action-limit"); /* Also checks migration-limit */
throttle_update_job_max(value);
value = crmd_pref(config_hash, "load-threshold");
if(value) {
throttle_set_load_target(strtof(value, NULL) / 100.0);
}
value = crmd_pref(config_hash, "no-quorum-policy");
if (safe_str_eq(value, "suicide") && pcmk_locate_sbd()) {
no_quorum_suicide_escalation = TRUE;
}
value = crmd_pref(config_hash,"stonith-max-attempts");
update_stonith_max_attempts(value);
value = crmd_pref(config_hash, XML_CONFIG_ATTR_FORCE_QUIT);
shutdown_escalation_timer->period_ms = crm_get_msec(value);
/* How long to declare an election over - even if not everyone voted */
crm_debug("Shutdown escalation occurs after: %dms", shutdown_escalation_timer->period_ms);
value = crmd_pref(config_hash, XML_CONFIG_ATTR_ELECTION_FAIL);
controld_set_election_period(value);
value = crmd_pref(config_hash, XML_CONFIG_ATTR_RECHECK);
recheck_timer->period_ms = crm_get_msec(value);
crm_debug("Checking for expired actions every %dms", recheck_timer->period_ms);
value = crmd_pref(config_hash, "transition-delay");
transition_timer->period_ms = crm_get_msec(value);
value = crmd_pref(config_hash, "join-integration-timeout");
integration_timer->period_ms = crm_get_msec(value);
value = crmd_pref(config_hash, "join-finalization-timeout");
finalization_timer->period_ms = crm_get_msec(value);
free(fsa_cluster_name);
fsa_cluster_name = NULL;
value = g_hash_table_lookup(config_hash, "cluster-name");
if (value) {
fsa_cluster_name = strdup(value);
}
alerts = first_named_child(output, XML_CIB_TAG_ALERTS);
crmd_unpack_alerts(alerts);
set_bit(fsa_input_register, R_READ_CONFIG);
crm_trace("Triggering FSA: %s", __FUNCTION__);
mainloop_set_trigger(fsa_source);
g_hash_table_destroy(config_hash);
bail:
crm_time_free(now);
}
gboolean
crm_read_options(gpointer user_data)
{
int call_id =
fsa_cib_conn->cmds->query(fsa_cib_conn,
"//" XML_CIB_TAG_CRMCONFIG " | //" XML_CIB_TAG_ALERTS,
NULL, cib_xpath | cib_scope_local);
fsa_register_cib_callback(call_id, FALSE, NULL, config_query_callback);
crm_trace("Querying the CIB... call %d", call_id);
return TRUE;
}
/* A_READCONFIG */
void
do_read_config(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state,
enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
throttle_init();
mainloop_set_trigger(config_read);
}
void
crm_shutdown(int nsig)
{
if (crmd_mainloop != NULL && g_main_loop_is_running(crmd_mainloop)) {
if (is_set(fsa_input_register, R_SHUTDOWN)) {
crm_err("Escalating the shutdown");
register_fsa_input_before(C_SHUTDOWN, I_ERROR, NULL);
} else {
set_bit(fsa_input_register, R_SHUTDOWN);
register_fsa_input(C_SHUTDOWN, I_SHUTDOWN, NULL);
if (shutdown_escalation_timer->period_ms < 1) {
const char *value = crmd_pref(NULL, XML_CONFIG_ATTR_FORCE_QUIT);
int msec = crm_get_msec(value);
crm_debug("Using default shutdown escalation: %dms", msec);
shutdown_escalation_timer->period_ms = msec;
}
/* can't rely on this... */
crm_notice("Shutting down cluster resource manager " CRM_XS
" limit=%dms", shutdown_escalation_timer->period_ms);
crm_timer_start(shutdown_escalation_timer);
}
} else {
crm_info("exit from shutdown");
crmd_exit(CRM_EX_OK);
}
}
diff --git a/daemons/controld/controld_fencing.c b/daemons/controld/controld_fencing.c
index 92336e970c..242816895f 100644
--- a/daemons/controld/controld_fencing.c
+++ b/daemons/controld/controld_fencing.c
@@ -1,879 +1,949 @@
/*
* Copyright 2004-2019 the Pacemaker project contributors
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#ifdef HAVE_SYS_REBOOT_H
# include
# include
#endif
+static void
+tengine_stonith_history_synced(stonith_t *st, stonith_event_t *st_event);
+
/*
* stonith failure counting
*
* We don't want to get stuck in a permanent fencing loop. Keep track of the
* number of fencing failures for each target node, and the most we'll restart a
* transition for.
*/
struct st_fail_rec {
int count;
};
static unsigned long int stonith_max_attempts = 10;
static GHashTable *stonith_failures = NULL;
void
update_stonith_max_attempts(const char *value)
{
if (safe_str_eq(value, CRM_INFINITY_S)) {
stonith_max_attempts = CRM_SCORE_INFINITY;
} else {
stonith_max_attempts = crm_int_helper(value, NULL);
}
}
static gboolean
too_many_st_failures(const char *target)
{
GHashTableIter iter;
const char *key = NULL;
struct st_fail_rec *value = NULL;
if (stonith_failures == NULL) {
return FALSE;
}
if (target == NULL) {
g_hash_table_iter_init(&iter, stonith_failures);
while (g_hash_table_iter_next(&iter, (gpointer *) &key,
(gpointer *) &value)) {
if (value->count >= stonith_max_attempts) {
target = (const char*)key;
goto too_many;
}
}
} else {
value = g_hash_table_lookup(stonith_failures, target);
if ((value != NULL) && (value->count >= stonith_max_attempts)) {
goto too_many;
}
}
return FALSE;
too_many:
crm_warn("Too many failures (%d) to fence %s, giving up",
value->count, target);
return TRUE;
}
/*!
* \internal
* \brief Reset a stonith fail count
*
* \param[in] target Name of node to reset, or NULL for all
*/
void
st_fail_count_reset(const char *target)
{
if (stonith_failures == NULL) {
return;
}
if (target) {
struct st_fail_rec *rec = NULL;
rec = g_hash_table_lookup(stonith_failures, target);
if (rec) {
rec->count = 0;
}
} else {
GHashTableIter iter;
const char *key = NULL;
struct st_fail_rec *rec = NULL;
g_hash_table_iter_init(&iter, stonith_failures);
while (g_hash_table_iter_next(&iter, (gpointer *) &key,
(gpointer *) &rec)) {
rec->count = 0;
}
}
}
static void
st_fail_count_increment(const char *target)
{
struct st_fail_rec *rec = NULL;
if (stonith_failures == NULL) {
stonith_failures = crm_str_table_new();
}
rec = g_hash_table_lookup(stonith_failures, target);
if (rec) {
rec->count++;
} else {
rec = malloc(sizeof(struct st_fail_rec));
if(rec == NULL) {
return;
}
rec->count = 1;
g_hash_table_insert(stonith_failures, strdup(target), rec);
}
}
/* end stonith fail count functions */
static void
cib_fencing_updated(xmlNode *msg, int call_id, int rc, xmlNode *output,
void *user_data)
{
if (rc < pcmk_ok) {
crm_err("Fencing update %d for %s: failed - %s (%d)",
call_id, (char *)user_data, pcmk_strerror(rc), rc);
crm_log_xml_warn(msg, "Failed update");
abort_transition(INFINITY, tg_shutdown, "CIB update failed", NULL);
} else {
crm_info("Fencing update %d for %s: complete", call_id, (char *)user_data);
}
}
static void
send_stonith_update(crm_action_t *action, const char *target, const char *uuid)
{
int rc = pcmk_ok;
crm_node_t *peer = NULL;
/* We (usually) rely on the membership layer to do node_update_cluster,
* and the peer status callback to do node_update_peer, because the node
* might have already rejoined before we get the stonith result here.
*/
int flags = node_update_join | node_update_expected;
/* zero out the node-status & remove all LRM status info */
xmlNode *node_state = NULL;
CRM_CHECK(target != NULL, return);
CRM_CHECK(uuid != NULL, return);
/* Make sure the membership and join caches are accurate */
peer = crm_get_peer_full(0, target, CRM_GET_PEER_ANY);
CRM_CHECK(peer != NULL, return);
if (peer->state == NULL) {
/* Usually, we rely on the membership layer to update the cluster state
* in the CIB. However, if the node has never been seen, do it here, so
* the node is not considered unclean.
*/
flags |= node_update_cluster;
}
if (peer->uuid == NULL) {
crm_info("Recording uuid '%s' for node '%s'", uuid, target);
peer->uuid = strdup(uuid);
}
crmd_peer_down(peer, TRUE);
/* Generate a node state update for the CIB */
node_state = create_node_state_update(peer, flags, NULL, __FUNCTION__);
/* we have to mark whether or not remote nodes have already been fenced */
if (peer->flags & crm_remote_node) {
time_t now = time(NULL);
char *now_s = crm_itoa(now);
crm_xml_add(node_state, XML_NODE_IS_FENCED, now_s);
free(now_s);
}
/* Force our known ID */
crm_xml_add(node_state, XML_ATTR_UUID, uuid);
rc = fsa_cib_conn->cmds->update(fsa_cib_conn, XML_CIB_TAG_STATUS, node_state,
cib_quorum_override | cib_scope_local | cib_can_create);
/* Delay processing the trigger until the update completes */
crm_debug("Sending fencing update %d for %s", rc, target);
fsa_register_cib_callback(rc, FALSE, strdup(target), cib_fencing_updated);
/* Make sure it sticks */
/* fsa_cib_conn->cmds->bump_epoch(fsa_cib_conn, cib_quorum_override|cib_scope_local); */
erase_status_tag(peer->uname, XML_CIB_TAG_LRM, cib_scope_local);
erase_status_tag(peer->uname, XML_TAG_TRANSIENT_NODEATTRS, cib_scope_local);
free_xml(node_state);
return;
}
/*!
* \internal
* \brief Abort transition due to stonith failure
*
* \param[in] abort_action Whether to restart or stop transition
* \param[in] target Don't restart if this (NULL for any) has too many failures
* \param[in] reason Log this stonith action XML as abort reason (or NULL)
*/
static void
abort_for_stonith_failure(enum transition_action abort_action,
const char *target, xmlNode *reason)
{
/* If stonith repeatedly fails, we eventually give up on starting a new
* transition for that reason.
*/
if ((abort_action != tg_stop) && too_many_st_failures(target)) {
abort_action = tg_stop;
}
abort_transition(INFINITY, abort_action, "Stonith failed", reason);
}
/*
* stonith cleanup list
*
* If the DC is shot, proper notifications might not go out.
* The stonith cleanup list allows the cluster to (re-)send
* notifications once a new DC is elected.
*/
static GListPtr stonith_cleanup_list = NULL;
/*!
* \internal
* \brief Add a node to the stonith cleanup list
*
* \param[in] target Name of node to add
*/
void
add_stonith_cleanup(const char *target) {
stonith_cleanup_list = g_list_append(stonith_cleanup_list, strdup(target));
}
/*!
* \internal
* \brief Remove a node from the stonith cleanup list
*
* \param[in] Name of node to remove
*/
void
remove_stonith_cleanup(const char *target)
{
GListPtr iter = stonith_cleanup_list;
while (iter != NULL) {
GListPtr tmp = iter;
char *iter_name = tmp->data;
iter = iter->next;
if (safe_str_eq(target, iter_name)) {
crm_trace("Removing %s from the cleanup list", iter_name);
stonith_cleanup_list = g_list_delete_link(stonith_cleanup_list, tmp);
free(iter_name);
}
}
}
/*!
* \internal
* \brief Purge all entries from the stonith cleanup list
*/
void
purge_stonith_cleanup()
{
if (stonith_cleanup_list) {
GListPtr iter = NULL;
for (iter = stonith_cleanup_list; iter != NULL; iter = iter->next) {
char *target = iter->data;
crm_info("Purging %s from stonith cleanup list", target);
free(target);
}
g_list_free(stonith_cleanup_list);
stonith_cleanup_list = NULL;
}
}
/*!
* \internal
* \brief Send stonith updates for all entries in cleanup list, then purge it
*/
void
execute_stonith_cleanup()
{
GListPtr iter;
for (iter = stonith_cleanup_list; iter != NULL; iter = iter->next) {
char *target = iter->data;
crm_node_t *target_node = crm_get_peer(0, target);
const char *uuid = crm_peer_uuid(target_node);
crm_notice("Marking %s, target of a previous stonith action, as clean", target);
send_stonith_update(NULL, target, uuid);
free(target);
}
g_list_free(stonith_cleanup_list);
stonith_cleanup_list = NULL;
}
/* end stonith cleanup list functions */
/* stonith API client
*
* Functions that need to interact directly with the fencer via its API
*/
static stonith_t *stonith_api = NULL;
static crm_trigger_t *stonith_reconnect = NULL;
static char *te_client_id = NULL;
static gboolean
fail_incompletable_stonith(crm_graph_t *graph)
{
GListPtr lpc = NULL;
const char *task = NULL;
xmlNode *last_action = NULL;
if (graph == NULL) {
return FALSE;
}
for (lpc = graph->synapses; lpc != NULL; lpc = lpc->next) {
GListPtr lpc2 = NULL;
synapse_t *synapse = (synapse_t *) lpc->data;
if (synapse->confirmed) {
continue;
}
for (lpc2 = synapse->actions; lpc2 != NULL; lpc2 = lpc2->next) {
crm_action_t *action = (crm_action_t *) lpc2->data;
if (action->type != action_type_crm || action->confirmed) {
continue;
}
task = crm_element_value(action->xml, XML_LRM_ATTR_TASK);
if (task && safe_str_eq(task, CRM_OP_FENCE)) {
action->failed = TRUE;
last_action = action->xml;
update_graph(graph, action);
crm_notice("Failing action %d (%s): fencer terminated",
action->id, ID(action->xml));
}
}
}
if (last_action != NULL) {
crm_warn("Fencer failure resulted in unrunnable actions");
abort_for_stonith_failure(tg_restart, NULL, last_action);
return TRUE;
}
return FALSE;
}
static void
tengine_stonith_connection_destroy(stonith_t *st, stonith_event_t *e)
{
+ te_cleanup_stonith_history_sync(st, FALSE);
+
if (is_set(fsa_input_register, R_ST_REQUIRED)) {
crm_crit("Fencing daemon connection failed");
mainloop_set_trigger(stonith_reconnect);
} else {
crm_info("Fencing daemon disconnected");
}
if (stonith_api) {
- stonith_api->state = stonith_disconnected;
+ /* the client API won't properly reconnect notifications
+ * if they are still in the table - so remove them
+ */
+ if (stonith_api->state != stonith_disconnected) {
+ stonith_api->cmds->disconnect(st);
+ }
+ stonith_api->cmds->remove_notification(stonith_api, T_STONITH_NOTIFY_DISCONNECT);
+ stonith_api->cmds->remove_notification(stonith_api, T_STONITH_NOTIFY_FENCE);
+ stonith_api->cmds->remove_notification(stonith_api, T_STONITH_NOTIFY_HISTORY_SYNCED);
}
if (AM_I_DC) {
fail_incompletable_stonith(transition_graph);
trigger_graph();
}
}
static void
tengine_stonith_notify(stonith_t *st, stonith_event_t *st_event)
{
if (te_client_id == NULL) {
te_client_id = crm_strdup_printf("%s.%lu", crm_system_name,
(unsigned long) getpid());
}
if (st_event == NULL) {
crm_err("Notify data not found");
return;
}
crmd_alert_fencing_op(st_event);
if ((st_event->result == pcmk_ok) && safe_str_eq("on", st_event->action)) {
crm_notice("%s was successfully unfenced by %s (at the request of %s)",
st_event->target,
st_event->executioner? st_event->executioner : "",
st_event->origin);
/* TODO: Hook up st_event->device */
return;
} else if (safe_str_eq("on", st_event->action)) {
crm_err("Unfencing of %s by %s failed: %s (%d)",
st_event->target,
st_event->executioner? st_event->executioner : "",
pcmk_strerror(st_event->result), st_event->result);
return;
} else if ((st_event->result == pcmk_ok)
&& crm_str_eq(st_event->target, fsa_our_uname, TRUE)) {
crm_crit("We were allegedly just fenced by %s for %s!",
st_event->executioner? st_event->executioner : "",
st_event->origin); /* Dumps blackbox if enabled */
qb_log_fini(); /* Try to get the above log message to disk - somehow */
/* Get out ASAP and do not come back up.
*
* Triggering a reboot is also not the worst idea either since
* the rest of the cluster thinks we're safely down
*/
#ifdef RB_HALT_SYSTEM
reboot(RB_HALT_SYSTEM);
#endif
/*
* If reboot() fails or is not supported, coming back up will
* probably lead to a situation where the other nodes set our
* status to 'lost' because of the fencing callback and will
* discard subsequent election votes with:
*
* Election 87 (current: 5171, owner: 103): Processed vote from east-03 (Peer is not part of our cluster)
*
* So just stay dead, something is seriously messed up anyway.
*
*/
exit(CRM_EX_FATAL); // None of our wrappers since we already called qb_log_fini()
return;
}
/* Update the count of stonith failures for this target, in case we become
* DC later. The current DC has already updated its fail count in
* tengine_stonith_callback().
*/
if (!AM_I_DC && safe_str_eq(st_event->operation, T_STONITH_NOTIFY_FENCE)) {
if (st_event->result == pcmk_ok) {
st_fail_count_reset(st_event->target);
} else {
st_fail_count_increment(st_event->target);
}
}
crm_notice("Peer %s was%s terminated (%s) by %s on behalf of %s: %s "
CRM_XS " initiator=%s ref=%s",
st_event->target, st_event->result == pcmk_ok ? "" : " not",
st_event->action,
st_event->executioner ? st_event->executioner : "",
(st_event->client_origin? st_event->client_origin : ""),
pcmk_strerror(st_event->result),
st_event->origin, st_event->id);
if (st_event->result == pcmk_ok) {
crm_node_t *peer = crm_find_known_peer_full(0, st_event->target, CRM_GET_PEER_ANY);
const char *uuid = NULL;
gboolean we_are_executioner = safe_str_eq(st_event->executioner, fsa_our_uname);
if (peer == NULL) {
return;
}
uuid = crm_peer_uuid(peer);
crm_trace("target=%s dc=%s", st_event->target, fsa_our_dc);
if(AM_I_DC) {
/* The DC always sends updates */
send_stonith_update(NULL, st_event->target, uuid);
/* @TODO Ideally, at this point, we'd check whether the fenced node
* hosted any guest nodes, and call remote_node_down() for them.
* Unfortunately, the controller doesn't have a simple, reliable way
* to map hosts to guests. It might be possible to track this in the
* peer cache via crm_remote_peer_cache_refresh(). For now, we rely
* on the PE creating fence pseudo-events for the guests.
*/
if (st_event->client_origin
&& safe_str_neq(st_event->client_origin, te_client_id)) {
/* Abort the current transition graph if it wasn't us
* that invoked stonith to fence someone
*/
crm_info("External fencing operation from %s fenced %s", st_event->client_origin, st_event->target);
abort_transition(INFINITY, tg_restart, "External Fencing Operation", NULL);
}
/* Assume it was our leader if we don't currently have one */
} else if (((fsa_our_dc == NULL) || safe_str_eq(fsa_our_dc, st_event->target))
&& is_not_set(peer->flags, crm_remote_node)) {
crm_notice("Target %s our leader %s (recorded: %s)",
fsa_our_dc ? "was" : "may have been", st_event->target,
fsa_our_dc ? fsa_our_dc : "");
/* Given the CIB resyncing that occurs around elections,
* have one node update the CIB now and, if the new DC is different,
* have them do so too after the election
*/
if (we_are_executioner) {
send_stonith_update(NULL, st_event->target, uuid);
}
add_stonith_cleanup(st_event->target);
}
/* If the target is a remote node, and we host its connection,
* immediately fail all monitors so it can be recovered quickly.
* The connection won't necessarily drop when a remote node is fenced,
* so the failure might not otherwise be detected until the next poke.
*/
if (is_set(peer->flags, crm_remote_node)) {
remote_ra_fail(st_event->target);
}
crmd_peer_down(peer, TRUE);
}
}
/*!
* \brief Connect to fencer
*
* \param[in] user_data If NULL, retry failures now, otherwise retry in main loop
*
* \return TRUE
* \note If user_data is NULL, this will wait 2s between attempts, for up to
* 30 attempts, meaning the controller could be blocked as long as 58s.
*/
static gboolean
te_connect_stonith(gpointer user_data)
{
int rc = pcmk_ok;
if (stonith_api == NULL) {
stonith_api = stonith_api_new();
}
if (stonith_api->state != stonith_disconnected) {
crm_trace("Already connected to fencer, no need to retry");
return TRUE;
}
if (user_data == NULL) {
// Blocking (retry failures now until successful)
rc = stonith_api_connect_retry(stonith_api, crm_system_name, 30);
if (rc != pcmk_ok) {
crm_err("Could not connect to fencer in 30 attempts: %s "
CRM_XS " rc=%d", pcmk_strerror(rc), rc);
}
} else {
// Non-blocking (retry failures later in main loop)
rc = stonith_api->cmds->connect(stonith_api, crm_system_name, NULL);
if (rc != pcmk_ok) {
if (is_set(fsa_input_register, R_ST_REQUIRED)) {
crm_err("Fencer connection failed (will retry): %s "
CRM_XS " rc=%d", pcmk_strerror(rc), rc);
mainloop_set_trigger(stonith_reconnect);
} else {
crm_info("Fencer connection failed (ignoring because no longer required): %s "
CRM_XS " rc=%d", pcmk_strerror(rc), rc);
}
return TRUE;
}
}
if (rc == pcmk_ok) {
stonith_api->cmds->register_notification(stonith_api,
T_STONITH_NOTIFY_DISCONNECT,
tengine_stonith_connection_destroy);
stonith_api->cmds->register_notification(stonith_api,
T_STONITH_NOTIFY_FENCE,
tengine_stonith_notify);
+ stonith_api->cmds->register_notification(stonith_api,
+ T_STONITH_NOTIFY_HISTORY_SYNCED,
+ tengine_stonith_history_synced);
+ te_trigger_stonith_history_sync(TRUE);
+ crm_notice("Fencer successfully connected");
}
+
return TRUE;
}
/*!
\internal
\brief Schedule fencer connection attempt in main loop
*/
void
controld_trigger_fencer_connect()
{
if (stonith_reconnect == NULL) {
stonith_reconnect = mainloop_add_trigger(G_PRIORITY_LOW,
te_connect_stonith,
GINT_TO_POINTER(TRUE));
}
set_bit(fsa_input_register, R_ST_REQUIRED);
mainloop_set_trigger(stonith_reconnect);
}
void
controld_disconnect_fencer(bool destroy)
{
if (stonith_api) {
// Prevent fencer connection from coming up again
clear_bit(fsa_input_register, R_ST_REQUIRED);
- stonith_api->cmds->disconnect(stonith_api);
+ if (stonith_api->state != stonith_disconnected) {
+ stonith_api->cmds->disconnect(stonith_api);
+ }
+ stonith_api->cmds->remove_notification(stonith_api, T_STONITH_NOTIFY_DISCONNECT);
+ stonith_api->cmds->remove_notification(stonith_api, T_STONITH_NOTIFY_FENCE);
+ stonith_api->cmds->remove_notification(stonith_api, T_STONITH_NOTIFY_HISTORY_SYNCED);
}
if (destroy) {
if (stonith_api) {
stonith_api->cmds->free(stonith_api);
stonith_api = NULL;
}
if (stonith_reconnect) {
mainloop_destroy_trigger(stonith_reconnect);
stonith_reconnect = NULL;
}
if (te_client_id) {
free(te_client_id);
te_client_id = NULL;
}
}
}
static gboolean
do_stonith_history_sync(gpointer user_data)
{
if (stonith_api && (stonith_api->state != stonith_disconnected)) {
stonith_history_t *history = NULL;
+ te_cleanup_stonith_history_sync(stonith_api, FALSE);
stonith_api->cmds->history(stonith_api,
st_opt_sync_call | st_opt_broadcast,
NULL, &history, 5);
stonith_history_free(history);
return TRUE;
} else {
crm_info("Skip triggering stonith history-sync as stonith is disconnected");
return FALSE;
}
}
static void
tengine_stonith_callback(stonith_t *stonith, stonith_callback_data_t *data)
{
char *uuid = NULL;
int stonith_id = -1;
int transition_id = -1;
crm_action_t *action = NULL;
int call_id = data->call_id;
int rc = data->rc;
char *userdata = data->userdata;
CRM_CHECK(userdata != NULL, return);
crm_notice("Stonith operation %d/%s: %s (%d)", call_id, (char *)userdata,
pcmk_strerror(rc), rc);
if (AM_I_DC == FALSE) {
return;
}
/* crm_info("call=%d, optype=%d, node_name=%s, result=%d, node_list=%s, action=%s", */
/* op->call_id, op->optype, op->node_name, op->op_result, */
/* (char *)op->node_list, op->private_data); */
/* filter out old STONITH actions */
CRM_CHECK(decode_transition_key(userdata, &uuid, &transition_id, &stonith_id, NULL),
goto bail);
if (transition_graph->complete || stonith_id < 0 || safe_str_neq(uuid, te_uuid)
|| transition_graph->id != transition_id) {
crm_info("Ignoring STONITH action initiated outside of the current transition");
goto bail;
}
action = get_action(stonith_id, FALSE);
if (action == NULL) {
crm_err("Stonith action not matched");
goto bail;
}
stop_te_timer(action->timer);
if (rc == pcmk_ok) {
const char *target = crm_element_value(action->xml, XML_LRM_ATTR_TARGET);
const char *uuid = crm_element_value(action->xml, XML_LRM_ATTR_TARGET_UUID);
const char *op = crm_meta_value(action->params, "stonith_action");
crm_info("Stonith operation %d for %s passed", call_id, target);
if (action->confirmed == FALSE) {
te_action_confirmed(action);
if (safe_str_eq("on", op)) {
const char *value = NULL;
char *now = crm_itoa(time(NULL));
update_attrd(target, CRM_ATTR_UNFENCED, now, NULL, FALSE);
free(now);
value = crm_meta_value(action->params, XML_OP_ATTR_DIGESTS_ALL);
update_attrd(target, CRM_ATTR_DIGESTS_ALL, value, NULL, FALSE);
value = crm_meta_value(action->params, XML_OP_ATTR_DIGESTS_SECURE);
update_attrd(target, CRM_ATTR_DIGESTS_SECURE, value, NULL, FALSE);
} else if (action->sent_update == FALSE) {
send_stonith_update(action, target, uuid);
action->sent_update = TRUE;
}
}
st_fail_count_reset(target);
} else {
const char *target = crm_element_value(action->xml, XML_LRM_ATTR_TARGET);
enum transition_action abort_action = tg_restart;
action->failed = TRUE;
crm_notice("Stonith operation %d for %s failed (%s): aborting transition.",
call_id, target, pcmk_strerror(rc));
/* If no fence devices were available, there's no use in immediately
* checking again, so don't start a new transition in that case.
*/
if (rc == -ENODEV) {
crm_warn("No devices found in cluster to fence %s, giving up",
target);
abort_action = tg_stop;
}
/* Increment the fail count now, so abort_for_stonith_failure() can
* check it. Non-DC nodes will increment it in tengine_stonith_notify().
*/
st_fail_count_increment(target);
abort_for_stonith_failure(abort_action, target, NULL);
}
update_graph(transition_graph, action);
trigger_graph();
bail:
free(userdata);
free(uuid);
return;
}
gboolean
te_fence_node(crm_graph_t *graph, crm_action_t *action)
{
int rc = 0;
const char *id = NULL;
const char *uuid = NULL;
const char *target = NULL;
const char *type = NULL;
gboolean invalid_action = FALSE;
enum stonith_call_options options = st_opt_none;
id = ID(action->xml);
target = crm_element_value(action->xml, XML_LRM_ATTR_TARGET);
uuid = crm_element_value(action->xml, XML_LRM_ATTR_TARGET_UUID);
type = crm_meta_value(action->params, "stonith_action");
CRM_CHECK(id != NULL, invalid_action = TRUE);
CRM_CHECK(uuid != NULL, invalid_action = TRUE);
CRM_CHECK(type != NULL, invalid_action = TRUE);
CRM_CHECK(target != NULL, invalid_action = TRUE);
if (invalid_action) {
crm_log_xml_warn(action->xml, "BadAction");
return FALSE;
}
crm_notice("Requesting fencing (%s) of node %s "
CRM_XS " action=%s timeout=%d",
type, target, id, transition_graph->stonith_timeout);
/* Passing NULL means block until we can connect... */
te_connect_stonith(NULL);
if (crmd_join_phase_count(crm_join_confirmed) == 1) {
options |= st_opt_allow_suicide;
}
rc = stonith_api->cmds->fence(stonith_api, options, target, type,
transition_graph->stonith_timeout / 1000, 0);
stonith_api->cmds->register_callback(stonith_api, rc, transition_graph->stonith_timeout / 1000,
st_opt_timeout_updates,
generate_transition_key(transition_graph->id, action->id,
0, te_uuid),
"tengine_stonith_callback", tengine_stonith_callback);
return TRUE;
}
/* end stonith API client functions */
/*
* stonith history synchronization
*
* Each node's fencer keeps track of a cluster-wide fencing history. When a node
* joins or leaves, we need to synchronize the history across all nodes.
*/
static crm_trigger_t *stonith_history_sync_trigger = NULL;
-static mainloop_timer_t *stonith_history_sync_timer = NULL;
+static mainloop_timer_t *stonith_history_sync_timer_short = NULL;
+static mainloop_timer_t *stonith_history_sync_timer_long = NULL;
+
+void
+te_cleanup_stonith_history_sync(stonith_t *st, bool free_timers)
+{
+ if (free_timers) {
+ mainloop_timer_del(stonith_history_sync_timer_short);
+ stonith_history_sync_timer_short = NULL;
+ mainloop_timer_del(stonith_history_sync_timer_long);
+ stonith_history_sync_timer_long = NULL;
+ } else {
+ mainloop_timer_stop(stonith_history_sync_timer_short);
+ mainloop_timer_stop(stonith_history_sync_timer_long);
+ }
+
+ if (st) {
+ st->cmds->remove_notification(st, T_STONITH_NOTIFY_HISTORY_SYNCED);
+ }
+}
+
+static void
+tengine_stonith_history_synced(stonith_t *st, stonith_event_t *st_event)
+{
+ te_cleanup_stonith_history_sync(st, FALSE);
+ crm_debug("Fence-history synced - cancel all timers");
+}
static gboolean
stonith_history_sync_set_trigger(gpointer user_data)
{
mainloop_set_trigger(stonith_history_sync_trigger);
return FALSE;
}
void
-te_trigger_stonith_history_sync(void)
+te_trigger_stonith_history_sync(bool long_timeout)
{
/* trigger a sync in 5s to give more nodes the
* chance to show up so that we don't create
* unnecessary stonith-history-sync traffic
+ *
+ * the long timeout of 30s is there as a fallback
+ * so that after a successful connection to fenced
+ * we will wait for 30s for the DC to trigger a
+ * history-sync
+ * if this doesn't happen we trigger a sync locally
+ * (e.g. fenced segfaults and is restarted by pacemakerd)
*/
/* as we are finally checking the stonith-connection
* in do_stonith_history_sync we should be fine
* leaving stonith_history_sync_time & stonith_history_sync_trigger
* around
*/
if (stonith_history_sync_trigger == NULL) {
stonith_history_sync_trigger =
mainloop_add_trigger(G_PRIORITY_LOW,
do_stonith_history_sync, NULL);
}
- if(stonith_history_sync_timer == NULL) {
- stonith_history_sync_timer =
- mainloop_timer_add("history_sync", 5000,
- FALSE, stonith_history_sync_set_trigger,
- NULL);
+ if (long_timeout) {
+ if(stonith_history_sync_timer_long == NULL) {
+ stonith_history_sync_timer_long =
+ mainloop_timer_add("history_sync_long", 30000,
+ FALSE, stonith_history_sync_set_trigger,
+ NULL);
+ }
+ crm_info("Fence history will be synchronized cluster-wide within 30 seconds");
+ mainloop_timer_start(stonith_history_sync_timer_long);
+ } else {
+ if(stonith_history_sync_timer_short == NULL) {
+ stonith_history_sync_timer_short =
+ mainloop_timer_add("history_sync_short", 5000,
+ FALSE, stonith_history_sync_set_trigger,
+ NULL);
+ }
+ crm_info("Fence history will be synchronized cluster-wide within 5 seconds");
+ mainloop_timer_start(stonith_history_sync_timer_short);
}
- crm_info("Fence history will be synchronized cluster-wide within 5 seconds");
- mainloop_timer_start(stonith_history_sync_timer);
+
}
/* end stonith history synchronization functions */
diff --git a/daemons/controld/controld_fencing.h b/daemons/controld/controld_fencing.h
index 8f7f19b40c..2fe6d88d4a 100644
--- a/daemons/controld/controld_fencing.h
+++ b/daemons/controld/controld_fencing.h
@@ -1,34 +1,35 @@
/*
* Copyright 2004-2019 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef CONTROLD_FENCING__H
# define CONTROLD_FENCING__H
#include // bool
#include // crm_graph_t, crm_action_t
// stonith fail counts
void st_fail_count_reset(const char * target);
void update_stonith_max_attempts(const char* value);
// stonith API client
void controld_trigger_fencer_connect(void);
void controld_disconnect_fencer(bool destroy);
gboolean te_fence_node(crm_graph_t *graph, crm_action_t *action);
// stonith cleanup list
void add_stonith_cleanup(const char *target);
void remove_stonith_cleanup(const char *target);
void purge_stonith_cleanup(void);
void execute_stonith_cleanup(void);
// stonith history synchronization
-void te_trigger_stonith_history_sync(void);
+void te_trigger_stonith_history_sync(bool long_timeout);
+void te_cleanup_stonith_history_sync(stonith_t *st, bool free_timers);
#endif
diff --git a/daemons/fenced/fenced_history.c b/daemons/fenced/fenced_history.c
index 7c129cca32..cd08d7460d 100644
--- a/daemons/fenced/fenced_history.c
+++ b/daemons/fenced/fenced_history.c
@@ -1,467 +1,487 @@
/*
* Copyright 2009-2018 Andrew Beekhof
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#define MAX_STONITH_HISTORY 500
/*!
* \internal
* \brief Send a broadcast to all nodes to trigger cleanup or
* history synchronisation
*
* \param[in] history Optional history to be attached
* \param[in] callopts We control cleanup via a flag in the callopts
* \param[in] target Cleanup can be limited to certain fence-targets
*/
static void
stonith_send_broadcast_history(xmlNode *history,
int callopts,
const char *target)
{
xmlNode *bcast = create_xml_node(NULL, "stonith_command");
xmlNode *data = create_xml_node(NULL, __FUNCTION__);
if (target) {
crm_xml_add(data, F_STONITH_TARGET, target);
}
crm_xml_add(bcast, F_TYPE, T_STONITH_NG);
crm_xml_add(bcast, F_SUBTYPE, "broadcast");
crm_xml_add(bcast, F_STONITH_OPERATION, STONITH_OP_FENCE_HISTORY);
crm_xml_add_int(bcast, F_STONITH_CALLOPTS, callopts);
if (history) {
add_node_copy(data, history);
}
add_message_xml(bcast, F_STONITH_CALLDATA, data);
send_cluster_message(NULL, crm_msg_stonith_ng, bcast, FALSE);
free_xml(data);
free_xml(bcast);
}
static gboolean
stonith_remove_history_entry (gpointer key,
gpointer value,
gpointer user_data)
{
remote_fencing_op_t *op = value;
const char *target = (const char *) user_data;
if ((op->state == st_failed) || (op->state == st_done)) {
if ((target) && (strcmp(op->target, target) != 0)) {
return FALSE;
}
return TRUE;
}
return FALSE; /* don't clean pending operations */
}
/*!
* \internal
* \brief Send out a cleanup broadcast or do a local history-cleanup
*
* \param[in] target Cleanup can be limited to certain fence-targets
* \param[in] broadcast Send out a cleanup broadcast
*/
static void
stonith_fence_history_cleanup(const char *target,
gboolean broadcast)
{
if (broadcast) {
stonith_send_broadcast_history(NULL,
st_opt_cleanup | st_opt_discard_reply,
target);
/* we'll do the local clean when we receive back our own broadcast */
} else if (stonith_remote_op_list) {
g_hash_table_foreach_remove(stonith_remote_op_list,
stonith_remove_history_entry,
(gpointer) target);
do_stonith_notify(0, T_STONITH_NOTIFY_HISTORY, 0, NULL);
}
}
/* keeping the length of fence-history within bounds
* =================================================
*
* If things are really running wild a lot of fencing-attempts
* might fill up the hash-map, eventually using up a lot
* of memory and creating huge history-sync messages.
* Before the history being synced across nodes at least
* the reboot of a cluster-node helped keeping the
* history within bounds even though not in a reliable
* manner.
*
* stonith_remote_op_list isn't sorted for time-stamps
* thus it would be kind of expensive to delete e.g.
* the oldest entry if it would grow past MAX_STONITH_HISTORY
* entries.
* It is more efficient to purge MAX_STONITH_HISTORY/2
* entries whenever the list grows beyond MAX_STONITH_HISTORY.
* (sort for age + purge the MAX_STONITH_HISTORY/2 oldest)
* That done on a per-node-base might raise the
* probability of large syncs to occur.
* Things like introducing a broadcast to purge
* MAX_STONITH_HISTORY/2 entries or not sync above a certain
* threshold coming to mind ...
* Simplest thing though is to purge the full history
* throughout the cluster once MAX_STONITH_HISTORY is reached.
* On the other hand this leads to purging the history in
* situations where it would be handy to have it probably.
*/
static int
op_time_sort(const void *a_voidp, const void *b_voidp)
{
const remote_fencing_op_t **a = (const remote_fencing_op_t **) a_voidp;
const remote_fencing_op_t **b = (const remote_fencing_op_t **) b_voidp;
gboolean a_pending = ((*a)->state != st_failed) && ((*a)->state != st_done);
gboolean b_pending = ((*b)->state != st_failed) && ((*b)->state != st_done);
if (a_pending && b_pending) {
return 0;
} else if (a_pending) {
return -1;
} else if (b_pending) {
return 1;
} else if ((*b)->completed == (*a)->completed) {
return 0;
} else if ((*b)->completed > (*a)->completed) {
return 1;
}
return -1;
}
/*!
* \internal
* \brief Do a local history-trim to MAX_STONITH_HISTORY / 2 entries
* once over MAX_STONITH_HISTORY
*/
void
stonith_fence_history_trim(void)
{
guint num_ops;
if (!stonith_remote_op_list) {
return;
}
num_ops = g_hash_table_size(stonith_remote_op_list);
if (num_ops > MAX_STONITH_HISTORY) {
remote_fencing_op_t *ops[num_ops];
remote_fencing_op_t *op = NULL;
GHashTableIter iter;
int i;
crm_trace("Fencing History growing beyond limit of %d so purge "
"half of failed/successful attempts", MAX_STONITH_HISTORY);
/* write all ops into an array */
i = 0;
g_hash_table_iter_init(&iter, stonith_remote_op_list);
while (g_hash_table_iter_next(&iter, NULL, (void **)&op)) {
ops[i++] = op;
}
/* run quicksort over the array so that we get pending ops
* first and then sorted most recent to oldest
*/
qsort(ops, num_ops, sizeof(remote_fencing_op_t *), op_time_sort);
/* purgest oldest half of the history entries */
for (i = MAX_STONITH_HISTORY / 2; i < num_ops; i++) {
/* keep pending ops even if they shouldn't fill more than
* half of our buffer
*/
if ((ops[i]->state == st_failed) || (ops[i]->state == st_done)) {
g_hash_table_remove(stonith_remote_op_list, ops[i]->id);
}
}
/* we've just purged valid data from the list so there is no need
* to create a notification - if displayed it can stay
*/
}
}
/*!
* \internal
* \brief Convert xml fence-history to a hash-table like stonith_remote_op_list
*
* \param[in] history Fence-history in xml
*
* \return Fence-history as hash-table
*/
static GHashTable *
stonith_xml_history_to_list(xmlNode *history)
{
xmlNode *xml_op = NULL;
GHashTable *rv = NULL;
init_stonith_remote_op_hash_table(&rv);
CRM_LOG_ASSERT(rv != NULL);
for (xml_op = __xml_first_child(history); xml_op != NULL;
xml_op = __xml_next(xml_op)) {
remote_fencing_op_t *op = NULL;
char *id = crm_element_value_copy(xml_op, F_STONITH_REMOTE_OP_ID);
int completed, state;
if (!id) {
crm_warn("History to convert to hashtable has no id in entry");
continue;
}
crm_trace("Attaching op %s to hashtable", id);
op = calloc(1, sizeof(remote_fencing_op_t));
op->id = id;
op->target = crm_element_value_copy(xml_op, F_STONITH_TARGET);
op->action = crm_element_value_copy(xml_op, F_STONITH_ACTION);
op->originator = crm_element_value_copy(xml_op, F_STONITH_ORIGIN);
op->delegate = crm_element_value_copy(xml_op, F_STONITH_DELEGATE);
op->client_name = crm_element_value_copy(xml_op, F_STONITH_CLIENTNAME);
crm_element_value_int(xml_op, F_STONITH_DATE, &completed);
op->completed = (time_t) completed;
crm_element_value_int(xml_op, F_STONITH_STATE, &state);
op->state = (enum op_state) state;
g_hash_table_replace(rv, id, op);
CRM_LOG_ASSERT(g_hash_table_lookup(rv, id) != NULL);
}
return rv;
}
/*!
* \internal
* \brief Craft xml difference between local fence-history and a history
* coming from remote
*
* \param[in] remote_history Fence-history as hash-table (may be NULL)
* \param[in] add_id If crafting the answer for an API
* history-request there is no need for the id
* \param[in] target Optionally limit to certain fence-target
*
* \return The fence-history as xml
*/
static xmlNode *
stonith_local_history_diff(GHashTable *remote_history,
gboolean add_id,
const char *target)
{
xmlNode *history = NULL;
int cnt = 0;
if (stonith_remote_op_list) {
GHashTableIter iter;
remote_fencing_op_t *op = NULL;
history = create_xml_node(NULL, F_STONITH_HISTORY_LIST);
g_hash_table_iter_init(&iter, stonith_remote_op_list);
while (g_hash_table_iter_next(&iter, NULL, (void **)&op)) {
xmlNode *entry = NULL;
if (remote_history &&
g_hash_table_lookup(remote_history, op->id)) {
continue; /* skip entries broadcasted already */
}
if (target && strcmp(op->target, target) != 0) {
continue;
}
cnt++;
crm_trace("Attaching op %s", op->id);
entry = create_xml_node(history, STONITH_OP_EXEC);
if (add_id) {
crm_xml_add(entry, F_STONITH_REMOTE_OP_ID, op->id);
}
crm_xml_add(entry, F_STONITH_TARGET, op->target);
crm_xml_add(entry, F_STONITH_ACTION, op->action);
crm_xml_add(entry, F_STONITH_ORIGIN, op->originator);
crm_xml_add(entry, F_STONITH_DELEGATE, op->delegate);
crm_xml_add(entry, F_STONITH_CLIENTNAME, op->client_name);
crm_xml_add_int(entry, F_STONITH_DATE, op->completed);
crm_xml_add_int(entry, F_STONITH_STATE, op->state);
}
}
if (cnt == 0) {
free_xml(history);
return NULL;
} else {
return history;
}
}
/*!
* \internal
* \brief Merge fence-history coming from remote into local history
*
* \param[in] history Hash-table holding remote history to be merged in
*/
static void
stonith_merge_in_history_list(GHashTable *history)
{
GHashTableIter iter;
remote_fencing_op_t *op = NULL;
gboolean updated = FALSE;
if (!history) {
return;
}
init_stonith_remote_op_hash_table(&stonith_remote_op_list);
g_hash_table_iter_init(&iter, history);
while (g_hash_table_iter_next(&iter, NULL, (void **)&op)) {
remote_fencing_op_t *stored_op =
g_hash_table_lookup(stonith_remote_op_list, op->id);
if (stored_op) {
continue; // Skip existent (@TODO state-merging might be desirable)
}
updated = TRUE;
g_hash_table_iter_steal(&iter);
+
+ if ((op->state != st_failed) &&
+ (op->state != st_done) &&
+ safe_str_eq(op->originator, stonith_our_uname)) {
+ crm_warn("received pending action we are supposed to be the "
+ "owner but it's not in our records -> fail it");
+ op->state = st_failed;
+ op->completed = time(NULL);
+ /* use -EHOSTUNREACH to not introduce a new return-code that might
+ trigger unexpected results at other places and to prevent
+ remote_op_done from setting the delegate if not present
+ */
+ stonith_bcast_result_to_peers(op, -EHOSTUNREACH);
+ }
+
g_hash_table_insert(stonith_remote_op_list, op->id, op);
/* we could trim the history here but if we bail
* out after trim we might miss more recent entries
* of those that might still be in the list
* if we don't bail out trimming once is more
* efficient and memory overhead is minimal as
* we are just moving pointers from one hash to
* another
*/
}
stonith_fence_history_trim();
if (updated) {
do_stonith_notify(0, T_STONITH_NOTIFY_HISTORY, 0, NULL);
}
g_hash_table_destroy(history); /* remove what is left */
}
/*!
* \internal
* \brief Handle fence-history messages (either from API or coming in as
* broadcasts
*
* \param[in] msg Request message
* \param[in] output In case of a request from the API used to craft
* a reply from
* \param[in] remote_peer
* \param[in] options call-options from the request
*
* \return always success as there is actully nothing that can go really wrong
*/
int
stonith_fence_history(xmlNode *msg, xmlNode **output,
const char *remote_peer, int options)
{
int rc = 0;
const char *target = NULL;
xmlNode *dev = get_xpath_object("//@" F_STONITH_TARGET, msg, LOG_TRACE);
xmlNode *out_history = NULL;
if (dev) {
target = crm_element_value(dev, F_STONITH_TARGET);
if (target && (options & st_opt_cs_nodeid)) {
int nodeid = crm_atoi(target, NULL);
crm_node_t *node = crm_find_known_peer_full(nodeid, NULL, CRM_GET_PEER_ANY);
if (node) {
target = node->uname;
}
}
}
if (options & st_opt_cleanup) {
crm_trace("Cleaning up operations on %s in %p", target,
stonith_remote_op_list);
stonith_fence_history_cleanup(target,
crm_element_value(msg, F_STONITH_CALLID) != NULL);
} else if (options & st_opt_broadcast) {
+ /* there is no clear sign atm for when a history sync
+ is done so send a notification for anything
+ that smells like history-sync
+ */
+ do_stonith_notify(0, T_STONITH_NOTIFY_HISTORY_SYNCED, 0, NULL);
if (crm_element_value(msg, F_STONITH_CALLID)) {
/* this is coming from the stonith-API
*
* craft a broadcast with node's history
* so that every node can merge and broadcast
* what it has on top
*/
out_history = stonith_local_history_diff(NULL, TRUE, NULL);
crm_trace("Broadcasting history to peers");
stonith_send_broadcast_history(out_history,
st_opt_broadcast | st_opt_discard_reply,
NULL);
} else if (remote_peer &&
!safe_str_eq(remote_peer, stonith_our_uname)) {
xmlNode *history =
get_xpath_object("//" F_STONITH_HISTORY_LIST, msg, LOG_TRACE);
GHashTable *received_history =
history?stonith_xml_history_to_list(history):NULL;
/* either a broadcast created directly upon stonith-API request
* or a diff as response to such a thing
*
* in both cases it may have a history or not
* if we have differential data
* merge in what we've received and stop
* otherwise broadcast what we have on top
* marking as differential and merge in afterwards
*/
if (!history ||
!crm_is_true(crm_element_value(history,
F_STONITH_DIFFERENTIAL))) {
out_history =
stonith_local_history_diff(received_history, TRUE, NULL);
if (out_history) {
crm_trace("Broadcasting history-diff to peers");
crm_xml_add(out_history, F_STONITH_DIFFERENTIAL,
XML_BOOLEAN_TRUE);
stonith_send_broadcast_history(out_history,
st_opt_broadcast | st_opt_discard_reply,
NULL);
} else {
crm_trace("History-diff is empty - skip broadcast");
}
}
stonith_merge_in_history_list(received_history);
} else {
crm_trace("Skipping history-query-broadcast (%s%s)"
" we sent ourselves",
remote_peer?"remote-peer=":"local-ipc",
remote_peer?remote_peer:"");
}
} else {
/* plain history request */
crm_trace("Looking for operations on %s in %p", target,
stonith_remote_op_list);
*output = stonith_local_history_diff(NULL, FALSE, target);
}
free_xml(out_history);
return rc;
}
diff --git a/daemons/fenced/fenced_remote.c b/daemons/fenced/fenced_remote.c
index 7d612499e9..5b86f0f78a 100644
--- a/daemons/fenced/fenced_remote.c
+++ b/daemons/fenced/fenced_remote.c
@@ -1,2059 +1,2059 @@
/*
* Copyright 2009-2018 Andrew Beekhof
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#define TIMEOUT_MULTIPLY_FACTOR 1.2
/* When one fencer queries its peers for devices able to handle a fencing
* request, each peer will reply with a list of such devices available to it.
* Each reply will be parsed into a st_query_result_t, with each device's
* information kept in a device_properties_t.
*/
typedef struct device_properties_s {
/* Whether access to this device has been verified */
gboolean verified;
/* The remaining members are indexed by the operation's "phase" */
/* Whether this device has been executed in each phase */
gboolean executed[st_phase_max];
/* Whether this device is disallowed from executing in each phase */
gboolean disallowed[st_phase_max];
/* Action-specific timeout for each phase */
int custom_action_timeout[st_phase_max];
/* Action-specific maximum random delay for each phase */
int delay_max[st_phase_max];
/* Action-specific base delay for each phase */
int delay_base[st_phase_max];
} device_properties_t;
typedef struct st_query_result_s {
/* Name of peer that sent this result */
char *host;
/* Only try peers for non-topology based operations once */
gboolean tried;
/* Number of entries in the devices table */
int ndevices;
/* Devices available to this host that are capable of fencing the target */
GHashTable *devices;
} st_query_result_t;
GHashTable *stonith_remote_op_list = NULL;
void call_remote_stonith(remote_fencing_op_t * op, st_query_result_t * peer);
static void remote_op_done(remote_fencing_op_t * op, xmlNode * data, int rc, int dup);
extern xmlNode *stonith_create_op(int call_id, const char *token, const char *op, xmlNode * data,
int call_options);
static void report_timeout_period(remote_fencing_op_t * op, int op_timeout);
static int get_op_total_timeout(const remote_fencing_op_t *op,
const st_query_result_t *chosen_peer);
static gint
sort_strings(gconstpointer a, gconstpointer b)
{
return strcmp(a, b);
}
static void
free_remote_query(gpointer data)
{
if (data) {
st_query_result_t *query = data;
crm_trace("Free'ing query result from %s", query->host);
g_hash_table_destroy(query->devices);
free(query->host);
free(query);
}
}
void
free_stonith_remote_op_list()
{
if (stonith_remote_op_list != NULL) {
g_hash_table_destroy(stonith_remote_op_list);
stonith_remote_op_list = NULL;
}
}
struct peer_count_data {
const remote_fencing_op_t *op;
gboolean verified_only;
int count;
};
/*!
* \internal
* \brief Increment a counter if a device has not been executed yet
*
* \param[in] key Device ID (ignored)
* \param[in] value Device properties
* \param[in] user_data Peer count data
*/
static void
count_peer_device(gpointer key, gpointer value, gpointer user_data)
{
device_properties_t *props = (device_properties_t*)value;
struct peer_count_data *data = user_data;
if (!props->executed[data->op->phase]
&& (!data->verified_only || props->verified)) {
++(data->count);
}
}
/*!
* \internal
* \brief Check the number of available devices in a peer's query results
*
* \param[in] op Operation that results are for
* \param[in] peer Peer to count
* \param[in] verified_only Whether to count only verified devices
*
* \return Number of devices available to peer that were not already executed
*/
static int
count_peer_devices(const remote_fencing_op_t *op, const st_query_result_t *peer,
gboolean verified_only)
{
struct peer_count_data data;
data.op = op;
data.verified_only = verified_only;
data.count = 0;
if (peer) {
g_hash_table_foreach(peer->devices, count_peer_device, &data);
}
return data.count;
}
/*!
* \internal
* \brief Search for a device in a query result
*
* \param[in] op Operation that result is for
* \param[in] peer Query result for a peer
* \param[in] device Device ID to search for
*
* \return Device properties if found, NULL otherwise
*/
static device_properties_t *
find_peer_device(const remote_fencing_op_t *op, const st_query_result_t *peer,
const char *device)
{
device_properties_t *props = g_hash_table_lookup(peer->devices, device);
return (props && !props->executed[op->phase]
&& !props->disallowed[op->phase])? props : NULL;
}
/*!
* \internal
* \brief Find a device in a peer's device list and mark it as executed
*
* \param[in] op Operation that peer result is for
* \param[in,out] peer Peer with results to search
* \param[in] device ID of device to mark as done
* \param[in] verified_devices_only Only consider verified devices
*
* \return TRUE if device was found and marked, FALSE otherwise
*/
static gboolean
grab_peer_device(const remote_fencing_op_t *op, st_query_result_t *peer,
const char *device, gboolean verified_devices_only)
{
device_properties_t *props = find_peer_device(op, peer, device);
if ((props == NULL) || (verified_devices_only && !props->verified)) {
return FALSE;
}
crm_trace("Removing %s from %s (%d remaining)",
device, peer->host, count_peer_devices(op, peer, FALSE));
props->executed[op->phase] = TRUE;
return TRUE;
}
static void
clear_remote_op_timers(remote_fencing_op_t * op)
{
if (op->query_timer) {
g_source_remove(op->query_timer);
op->query_timer = 0;
}
if (op->op_timer_total) {
g_source_remove(op->op_timer_total);
op->op_timer_total = 0;
}
if (op->op_timer_one) {
g_source_remove(op->op_timer_one);
op->op_timer_one = 0;
}
}
static void
free_remote_op(gpointer data)
{
remote_fencing_op_t *op = data;
crm_trace("Free'ing op %s for %s", op->id, op->target);
crm_log_xml_debug(op->request, "Destroying");
clear_remote_op_timers(op);
free(op->id);
free(op->action);
free(op->delegate);
free(op->target);
free(op->client_id);
free(op->client_name);
free(op->originator);
if (op->query_results) {
g_list_free_full(op->query_results, free_remote_query);
}
if (op->request) {
free_xml(op->request);
op->request = NULL;
}
if (op->devices_list) {
g_list_free_full(op->devices_list, free);
op->devices_list = NULL;
}
g_list_free_full(op->automatic_list, free);
g_list_free(op->duplicates);
free(op);
}
void
init_stonith_remote_op_hash_table(GHashTable **table)
{
if (*table == NULL) {
*table = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, free_remote_op);
}
}
/*!
* \internal
* \brief Return an operation's originally requested action (before any remap)
*
* \param[in] op Operation to check
*
* \return Operation's original action
*/
static const char *
op_requested_action(const remote_fencing_op_t *op)
{
return ((op->phase > st_phase_requested)? "reboot" : op->action);
}
/*!
* \internal
* \brief Remap a "reboot" operation to the "off" phase
*
* \param[in,out] op Operation to remap
*/
static void
op_phase_off(remote_fencing_op_t *op)
{
crm_info("Remapping multiple-device reboot of %s (%s) to off",
op->target, op->id);
op->phase = st_phase_off;
/* Happily, "off" and "on" are shorter than "reboot", so we can reuse the
* memory allocation at each phase.
*/
strcpy(op->action, "off");
}
/*!
* \internal
* \brief Advance a remapped reboot operation to the "on" phase
*
* \param[in,out] op Operation to remap
*/
static void
op_phase_on(remote_fencing_op_t *op)
{
GListPtr iter = NULL;
crm_info("Remapped off of %s complete, remapping to on for %s.%.8s",
op->target, op->client_name, op->id);
op->phase = st_phase_on;
strcpy(op->action, "on");
/* Skip devices with automatic unfencing, because the cluster will handle it
* when the node rejoins.
*/
for (iter = op->automatic_list; iter != NULL; iter = iter->next) {
GListPtr match = g_list_find_custom(op->devices_list, iter->data,
sort_strings);
if (match) {
op->devices_list = g_list_remove(op->devices_list, match->data);
}
}
g_list_free_full(op->automatic_list, free);
op->automatic_list = NULL;
/* Rewind device list pointer */
op->devices = op->devices_list;
}
/*!
* \internal
* \brief Reset a remapped reboot operation
*
* \param[in,out] op Operation to reset
*/
static void
undo_op_remap(remote_fencing_op_t *op)
{
if (op->phase > 0) {
crm_info("Undoing remap of reboot of %s for %s.%.8s",
op->target, op->client_name, op->id);
op->phase = st_phase_requested;
strcpy(op->action, "reboot");
}
}
static xmlNode *
create_op_done_notify(remote_fencing_op_t * op, int rc)
{
xmlNode *notify_data = create_xml_node(NULL, T_STONITH_NOTIFY_FENCE);
crm_xml_add_int(notify_data, "state", op->state);
crm_xml_add_int(notify_data, F_STONITH_RC, rc);
crm_xml_add(notify_data, F_STONITH_TARGET, op->target);
crm_xml_add(notify_data, F_STONITH_ACTION, op->action);
crm_xml_add(notify_data, F_STONITH_DELEGATE, op->delegate);
crm_xml_add(notify_data, F_STONITH_REMOTE_OP_ID, op->id);
crm_xml_add(notify_data, F_STONITH_ORIGIN, op->originator);
crm_xml_add(notify_data, F_STONITH_CLIENTID, op->client_id);
crm_xml_add(notify_data, F_STONITH_CLIENTNAME, op->client_name);
return notify_data;
}
-static void
-bcast_result_to_peers(remote_fencing_op_t * op, int rc)
+void
+stonith_bcast_result_to_peers(remote_fencing_op_t * op, int rc)
{
static int count = 0;
xmlNode *bcast = create_xml_node(NULL, T_STONITH_REPLY);
xmlNode *notify_data = create_op_done_notify(op, rc);
count++;
crm_trace("Broadcasting result to peers");
crm_xml_add(bcast, F_TYPE, T_STONITH_NOTIFY);
crm_xml_add(bcast, F_SUBTYPE, "broadcast");
crm_xml_add(bcast, F_STONITH_OPERATION, T_STONITH_NOTIFY);
crm_xml_add_int(bcast, "count", count);
add_message_xml(bcast, F_STONITH_CALLDATA, notify_data);
send_cluster_message(NULL, crm_msg_stonith_ng, bcast, FALSE);
free_xml(notify_data);
free_xml(bcast);
return;
}
static void
handle_local_reply_and_notify(remote_fencing_op_t * op, xmlNode * data, int rc)
{
xmlNode *notify_data = NULL;
xmlNode *reply = NULL;
if (op->notify_sent == TRUE) {
/* nothing to do */
return;
}
/* Do notification with a clean data object */
notify_data = create_op_done_notify(op, rc);
crm_xml_add_int(data, "state", op->state);
crm_xml_add(data, F_STONITH_TARGET, op->target);
crm_xml_add(data, F_STONITH_OPERATION, op->action);
reply = stonith_construct_reply(op->request, NULL, data, rc);
crm_xml_add(reply, F_STONITH_DELEGATE, op->delegate);
/* Send fencing OP reply to local client that initiated fencing */
do_local_reply(reply, op->client_id, op->call_options & st_opt_sync_call, FALSE);
/* bcast to all local clients that the fencing operation happend */
do_stonith_notify(0, T_STONITH_NOTIFY_FENCE, rc, notify_data);
do_stonith_notify(0, T_STONITH_NOTIFY_HISTORY, 0, NULL);
/* mark this op as having notify's already sent */
op->notify_sent = TRUE;
free_xml(reply);
free_xml(notify_data);
}
static void
handle_duplicates(remote_fencing_op_t * op, xmlNode * data, int rc)
{
GListPtr iter = NULL;
for (iter = op->duplicates; iter != NULL; iter = iter->next) {
remote_fencing_op_t *other = iter->data;
if (other->state == st_duplicate) {
other->state = op->state;
crm_debug("Performing duplicate notification for %s@%s.%.8s = %s",
other->client_name, other->originator, other->id,
pcmk_strerror(rc));
remote_op_done(other, data, rc, TRUE);
} else {
// Possible if (for example) it timed out already
crm_err("Skipping duplicate notification for %s@%s - %d", other->client_name,
other->originator, other->state);
}
}
}
/*!
* \internal
* \brief Finalize a remote operation.
*
* \description This function has two code paths.
*
* Path 1. This node is the owner of the operation and needs
* to notify the cpg group via a broadcast as to the operation's
* results.
*
* Path 2. The cpg broadcast is received. All nodes notify their local
* stonith clients the operation results.
*
* So, The owner of the operation first notifies the cluster of the result,
* and once that cpg notify is received back it notifies all the local clients.
*
* Nodes that are passive watchers of the operation will receive the
* broadcast and only need to notify their local clients the operation finished.
*
* \param op, The fencing operation to finalize
* \param data, The xml msg reply (if present) of the last delegated fencing
* operation.
* \param dup, Is this operation a duplicate, if so treat it a little differently
* making sure the broadcast is not sent out.
*/
static void
remote_op_done(remote_fencing_op_t * op, xmlNode * data, int rc, int dup)
{
int level = LOG_ERR;
const char *subt = NULL;
xmlNode *local_data = NULL;
op->completed = time(NULL);
clear_remote_op_timers(op);
undo_op_remap(op);
if (op->notify_sent == TRUE) {
crm_err("Already sent notifications for '%s of %s by %s' (for=%s@%s.%.8s, state=%d): %s",
op->action, op->target, op->delegate ? op->delegate : "",
op->client_name, op->originator, op->id, op->state, pcmk_strerror(rc));
goto remote_op_done_cleanup;
}
if (!op->delegate && data && rc != -ENODEV && rc != -EHOSTUNREACH) {
xmlNode *ndata = get_xpath_object("//@" F_STONITH_DELEGATE, data, LOG_TRACE);
if(ndata) {
op->delegate = crm_element_value_copy(ndata, F_STONITH_DELEGATE);
} else {
op->delegate = crm_element_value_copy(data, F_ORIG);
}
}
if (data == NULL) {
data = create_xml_node(NULL, "remote-op");
local_data = data;
}
/* Tell everyone the operation is done, we will continue
* with doing the local notifications once we receive
* the broadcast back. */
subt = crm_element_value(data, F_SUBTYPE);
if (dup == FALSE && safe_str_neq(subt, "broadcast")) {
/* Defer notification until the bcast message arrives */
- bcast_result_to_peers(op, rc);
+ stonith_bcast_result_to_peers(op, rc);
goto remote_op_done_cleanup;
}
if (rc == pcmk_ok || dup) {
level = LOG_NOTICE;
} else if (safe_str_neq(op->originator, stonith_our_uname)) {
level = LOG_NOTICE;
}
do_crm_log(level,
"Operation %s of %s by %s for %s@%s.%.8s: %s",
op->action, op->target, op->delegate ? op->delegate : "",
op->client_name, op->originator, op->id, pcmk_strerror(rc));
handle_local_reply_and_notify(op, data, rc);
if (dup == FALSE) {
handle_duplicates(op, data, rc);
}
/* Free non-essential parts of the record
* Keep the record around so we can query the history
*/
if (op->query_results) {
g_list_free_full(op->query_results, free_remote_query);
op->query_results = NULL;
}
if (op->request) {
free_xml(op->request);
op->request = NULL;
}
remote_op_done_cleanup:
free_xml(local_data);
}
static gboolean
remote_op_watchdog_done(gpointer userdata)
{
remote_fencing_op_t *op = userdata;
op->op_timer_one = 0;
crm_notice("Self-fencing (%s) by %s for %s.%8s assumed complete",
op->action, op->target, op->client_name, op->id);
op->state = st_done;
remote_op_done(op, NULL, pcmk_ok, FALSE);
return FALSE;
}
static gboolean
remote_op_timeout_one(gpointer userdata)
{
remote_fencing_op_t *op = userdata;
op->op_timer_one = 0;
crm_notice("Peer's fencing (%s) of %s for %s timed out" CRM_XS "id=%s",
op->action, op->target, op->client_name, op->id);
call_remote_stonith(op, NULL);
return FALSE;
}
static gboolean
remote_op_timeout(gpointer userdata)
{
remote_fencing_op_t *op = userdata;
op->op_timer_total = 0;
if (op->state == st_done) {
crm_debug("Action %s (%s) for %s (%s) already completed",
op->action, op->id, op->target, op->client_name);
return FALSE;
}
crm_debug("Action %s (%s) for %s (%s) timed out",
op->action, op->id, op->target, op->client_name);
if (op->phase == st_phase_on) {
/* A remapped reboot operation timed out in the "on" phase, but the
* "off" phase completed successfully, so quit trying any further
* devices, and return success.
*/
remote_op_done(op, NULL, pcmk_ok, FALSE);
return FALSE;
}
op->state = st_failed;
remote_op_done(op, NULL, -ETIME, FALSE);
return FALSE;
}
static gboolean
remote_op_query_timeout(gpointer data)
{
remote_fencing_op_t *op = data;
op->query_timer = 0;
if (op->state == st_done) {
crm_debug("Operation %s for %s already completed", op->id, op->target);
} else if (op->state == st_exec) {
crm_debug("Operation %s for %s already in progress", op->id, op->target);
} else if (op->query_results) {
crm_debug("Query %s for %s complete: %d", op->id, op->target, op->state);
call_remote_stonith(op, NULL);
} else {
crm_debug("Query %s for %s timed out: %d", op->id, op->target, op->state);
if (op->op_timer_total) {
g_source_remove(op->op_timer_total);
op->op_timer_total = 0;
}
remote_op_timeout(op);
}
return FALSE;
}
static gboolean
topology_is_empty(stonith_topology_t *tp)
{
int i;
if (tp == NULL) {
return TRUE;
}
for (i = 0; i < ST_LEVEL_MAX; i++) {
if (tp->levels[i] != NULL) {
return FALSE;
}
}
return TRUE;
}
/*!
* \internal
* \brief Add a device to an operation's automatic unfencing list
*
* \param[in,out] op Operation to modify
* \param[in] device Device ID to add
*/
static void
add_required_device(remote_fencing_op_t *op, const char *device)
{
GListPtr match = g_list_find_custom(op->automatic_list, device,
sort_strings);
if (!match) {
op->automatic_list = g_list_prepend(op->automatic_list, strdup(device));
}
}
/*!
* \internal
* \brief Remove a device from the automatic unfencing list
*
* \param[in,out] op Operation to modify
* \param[in] device Device ID to remove
*/
static void
remove_required_device(remote_fencing_op_t *op, const char *device)
{
GListPtr match = g_list_find_custom(op->automatic_list, device,
sort_strings);
if (match) {
op->automatic_list = g_list_remove(op->automatic_list, match->data);
}
}
/* deep copy the device list */
static void
set_op_device_list(remote_fencing_op_t * op, GListPtr devices)
{
GListPtr lpc = NULL;
if (op->devices_list) {
g_list_free_full(op->devices_list, free);
op->devices_list = NULL;
}
for (lpc = devices; lpc != NULL; lpc = lpc->next) {
op->devices_list = g_list_append(op->devices_list, strdup(lpc->data));
}
op->devices = op->devices_list;
}
/*!
* \internal
* \brief Check whether a node matches a topology target
*
* \param[in] tp Topology table entry to check
* \param[in] node Name of node to check
*
* \return TRUE if node matches topology target
*/
static gboolean
topology_matches(const stonith_topology_t *tp, const char *node)
{
regex_t r_patt;
CRM_CHECK(node && tp && tp->target, return FALSE);
switch(tp->kind) {
case 2:
/* This level targets by attribute, so tp->target is a NAME=VALUE pair
* of a permanent attribute applied to targeted nodes. The test below
* relies on the locally cached copy of the CIB, so if fencing needs to
* be done before the initial CIB is received or after a malformed CIB
* is received, then the topology will be unable to be used.
*/
if (node_has_attr(node, tp->target_attribute, tp->target_value)) {
crm_notice("Matched %s with %s by attribute", node, tp->target);
return TRUE;
}
break;
case 1:
/* This level targets by name, so tp->target is a regular expression
* matching names of nodes to be targeted.
*/
if (regcomp(&r_patt, tp->target_pattern, REG_EXTENDED|REG_NOSUB)) {
crm_info("Bad regex '%s' for fencing level", tp->target);
} else {
int status = regexec(&r_patt, node, 0, NULL, 0);
regfree(&r_patt);
if (status == 0) {
crm_notice("Matched %s with %s by name", node, tp->target);
return TRUE;
}
}
break;
case 0:
crm_trace("Testing %s against %s", node, tp->target);
return safe_str_eq(tp->target, node);
}
crm_trace("No match for %s with %s", node, tp->target);
return FALSE;
}
stonith_topology_t *
find_topology_for_host(const char *host)
{
GHashTableIter tIter;
stonith_topology_t *tp = g_hash_table_lookup(topology, host);
if(tp != NULL) {
crm_trace("Found %s for %s in %d entries", tp->target, host, g_hash_table_size(topology));
return tp;
}
g_hash_table_iter_init(&tIter, topology);
while (g_hash_table_iter_next(&tIter, NULL, (gpointer *) & tp)) {
if (topology_matches(tp, host)) {
crm_trace("Found %s for %s in %d entries", tp->target, host, g_hash_table_size(topology));
return tp;
}
}
crm_trace("No matches for %s in %d topology entries", host, g_hash_table_size(topology));
return NULL;
}
/*!
* \internal
* \brief Set fencing operation's device list to target's next topology level
*
* \param[in,out] op Remote fencing operation to modify
*
* \return pcmk_ok if successful, target was not specified (i.e. queries) or
* target has no topology, or -EINVAL if no more topology levels to try
*/
static int
stonith_topology_next(remote_fencing_op_t * op)
{
stonith_topology_t *tp = NULL;
if (op->target) {
/* Queries don't have a target set */
tp = find_topology_for_host(op->target);
}
if (topology_is_empty(tp)) {
return pcmk_ok;
}
set_bit(op->call_options, st_opt_topology);
/* This is a new level, so undo any remapping left over from previous */
undo_op_remap(op);
do {
op->level++;
} while (op->level < ST_LEVEL_MAX && tp->levels[op->level] == NULL);
if (op->level < ST_LEVEL_MAX) {
crm_trace("Attempting fencing level %d for %s (%d devices) - %s@%s.%.8s",
op->level, op->target, g_list_length(tp->levels[op->level]),
op->client_name, op->originator, op->id);
set_op_device_list(op, tp->levels[op->level]);
if (g_list_next(op->devices_list) && safe_str_eq(op->action, "reboot")) {
/* A reboot has been requested for a topology level with multiple
* devices. Instead of rebooting the devices sequentially, we will
* turn them all off, then turn them all on again. (Think about
* switched power outlets for redundant power supplies.)
*/
op_phase_off(op);
}
return pcmk_ok;
}
crm_notice("All fencing options to fence %s for %s@%s.%.8s failed",
op->target, op->client_name, op->originator, op->id);
return -EINVAL;
}
/*!
* \brief Check to see if this operation is a duplicate of another in flight
* operation. If so merge this operation into the inflight operation, and mark
* it as a duplicate.
*/
static void
merge_duplicates(remote_fencing_op_t * op)
{
GHashTableIter iter;
remote_fencing_op_t *other = NULL;
time_t now = time(NULL);
g_hash_table_iter_init(&iter, stonith_remote_op_list);
while (g_hash_table_iter_next(&iter, NULL, (void **)&other)) {
crm_node_t *peer = NULL;
const char *other_action = op_requested_action(other);
if (other->state > st_exec) {
/* Must be in-progress */
continue;
} else if (safe_str_neq(op->target, other->target)) {
/* Must be for the same node */
continue;
} else if (safe_str_neq(op->action, other_action)) {
crm_trace("Must be for the same action: %s vs. %s",
op->action, other_action);
continue;
} else if (safe_str_eq(op->client_name, other->client_name)) {
crm_trace("Must be for different clients: %s", op->client_name);
continue;
} else if (safe_str_eq(other->target, other->originator)) {
crm_trace("Can't be a suicide operation: %s", other->target);
continue;
}
peer = crm_get_peer(0, other->originator);
if(fencing_peer_active(peer) == FALSE) {
crm_notice("Failing stonith action %s for node %s originating from %s@%s.%.8s: Originator is dead",
other->action, other->target, other->client_name, other->originator, other->id);
other->state = st_failed;
continue;
} else if(other->total_timeout > 0 && now > (other->total_timeout + other->created)) {
crm_info("Stonith action %s for node %s originating from %s@%s.%.8s is too old: %ld vs. %ld + %d",
other->action, other->target, other->client_name, other->originator, other->id,
now, other->created, other->total_timeout);
continue;
}
/* There is another in-flight request to fence the same host
* Piggyback on that instead. If it fails, so do we.
*/
other->duplicates = g_list_append(other->duplicates, op);
if (other->total_timeout == 0) {
crm_trace("Making a best-guess as to the timeout used");
other->total_timeout = op->total_timeout =
TIMEOUT_MULTIPLY_FACTOR * get_op_total_timeout(op, NULL);
}
crm_notice
("Merging stonith action %s for node %s originating from client %s.%.8s with identical request from %s@%s.%.8s (%ds)",
op->action, op->target, op->client_name, op->id, other->client_name, other->originator,
other->id, other->total_timeout);
report_timeout_period(op, other->total_timeout);
op->state = st_duplicate;
}
}
static uint32_t fencing_active_peers(void)
{
uint32_t count = 0;
crm_node_t *entry;
GHashTableIter gIter;
g_hash_table_iter_init(&gIter, crm_peer_cache);
while (g_hash_table_iter_next(&gIter, NULL, (void **)&entry)) {
if(fencing_peer_active(entry)) {
count++;
}
}
return count;
}
int
stonith_manual_ack(xmlNode * msg, remote_fencing_op_t * op)
{
xmlNode *dev = get_xpath_object("//@" F_STONITH_TARGET, msg, LOG_ERR);
op->state = st_done;
op->completed = time(NULL);
op->delegate = strdup("a human");
crm_notice("Injecting manual confirmation that %s is safely off/down",
crm_element_value(dev, F_STONITH_TARGET));
remote_op_done(op, msg, pcmk_ok, FALSE);
/* Replies are sent via done_cb->stonith_send_async_reply()->do_local_reply() */
return -EINPROGRESS;
}
/*!
* \internal
* \brief Create a new remote stonith operation
*
* \param[in] client ID of local stonith client that initiated the operation
* \param[in] request The request from the client that started the operation
* \param[in] peer TRUE if this operation is owned by another stonith peer
* (an operation owned by one peer is stored on all peers,
* but only the owner executes it; all nodes get the results
* once the owner finishes execution)
*/
void *
create_remote_stonith_op(const char *client, xmlNode * request, gboolean peer)
{
remote_fencing_op_t *op = NULL;
xmlNode *dev = get_xpath_object("//@" F_STONITH_TARGET, request, LOG_TRACE);
int call_options = 0;
init_stonith_remote_op_hash_table(&stonith_remote_op_list);
/* If this operation is owned by another node, check to make
* sure we haven't already created this operation. */
if (peer && dev) {
const char *op_id = crm_element_value(dev, F_STONITH_REMOTE_OP_ID);
CRM_CHECK(op_id != NULL, return NULL);
op = g_hash_table_lookup(stonith_remote_op_list, op_id);
if (op) {
crm_debug("%s already exists", op_id);
return op;
}
}
op = calloc(1, sizeof(remote_fencing_op_t));
crm_element_value_int(request, F_STONITH_TIMEOUT, &(op->base_timeout));
if (peer && dev) {
op->id = crm_element_value_copy(dev, F_STONITH_REMOTE_OP_ID);
} else {
op->id = crm_generate_uuid();
}
g_hash_table_replace(stonith_remote_op_list, op->id, op);
CRM_LOG_ASSERT(g_hash_table_lookup(stonith_remote_op_list, op->id) != NULL);
crm_trace("Created %s", op->id);
op->state = st_query;
op->replies_expected = fencing_active_peers();
op->action = crm_element_value_copy(dev, F_STONITH_ACTION);
op->originator = crm_element_value_copy(dev, F_STONITH_ORIGIN);
op->delegate = crm_element_value_copy(dev, F_STONITH_DELEGATE); /* May not be set */
op->created = time(NULL);
if (op->originator == NULL) {
/* Local or relayed request */
op->originator = strdup(stonith_our_uname);
}
CRM_LOG_ASSERT(client != NULL);
if (client) {
op->client_id = strdup(client);
}
op->client_name = crm_element_value_copy(request, F_STONITH_CLIENTNAME);
op->target = crm_element_value_copy(dev, F_STONITH_TARGET);
op->request = copy_xml(request); /* TODO: Figure out how to avoid this */
crm_element_value_int(request, F_STONITH_CALLOPTS, &call_options);
op->call_options = call_options;
crm_element_value_int(request, F_STONITH_CALLID, &(op->client_callid));
crm_trace("%s new stonith op: %s - %s of %s for %s",
(peer
&& dev) ? "Recorded" : "Generated", op->id, op->action, op->target, op->client_name);
if (op->call_options & st_opt_cs_nodeid) {
int nodeid = crm_atoi(op->target, NULL);
crm_node_t *node = crm_find_known_peer_full(nodeid, NULL, CRM_GET_PEER_ANY);
/* Ensure the conversion only happens once */
op->call_options &= ~st_opt_cs_nodeid;
if (node && node->uname) {
free(op->target);
op->target = strdup(node->uname);
} else {
crm_warn("Could not expand nodeid '%s' into a host name", op->target);
}
}
/* check to see if this is a duplicate operation of another in-flight operation */
merge_duplicates(op);
if (op->state != st_duplicate) {
/* kick history readers */
do_stonith_notify(0, T_STONITH_NOTIFY_HISTORY, 0, NULL);
}
/* safe to trim as long as that doesn't touch pending ops */
stonith_fence_history_trim();
return op;
}
remote_fencing_op_t *
initiate_remote_stonith_op(crm_client_t * client, xmlNode * request, gboolean manual_ack)
{
int query_timeout = 0;
xmlNode *query = NULL;
const char *client_id = NULL;
remote_fencing_op_t *op = NULL;
if (client) {
client_id = client->id;
} else {
client_id = crm_element_value(request, F_STONITH_CLIENTID);
}
CRM_LOG_ASSERT(client_id != NULL);
op = create_remote_stonith_op(client_id, request, FALSE);
op->owner = TRUE;
if (manual_ack) {
crm_notice("Initiating manual confirmation for %s: %s",
op->target, op->id);
return op;
}
CRM_CHECK(op->action, return NULL);
if (stonith_topology_next(op) != pcmk_ok) {
op->state = st_failed;
}
switch (op->state) {
case st_failed:
crm_warn("Could not request peer fencing (%s) of %s "
CRM_XS " id=%s", op->action, op->target, op->id);
remote_op_done(op, NULL, -EINVAL, FALSE);
return op;
case st_duplicate:
crm_info("Requesting peer fencing (%s) of %s (duplicate) "
CRM_XS " id=%s", op->action, op->target, op->id);
return op;
default:
crm_notice("Requesting peer fencing (%s) of %s "
CRM_XS " id=%s state=%d",
op->action, op->target, op->id, op->state);
}
query = stonith_create_op(op->client_callid, op->id, STONITH_OP_QUERY,
NULL, op->call_options);
crm_xml_add(query, F_STONITH_REMOTE_OP_ID, op->id);
crm_xml_add(query, F_STONITH_TARGET, op->target);
crm_xml_add(query, F_STONITH_ACTION, op_requested_action(op));
crm_xml_add(query, F_STONITH_ORIGIN, op->originator);
crm_xml_add(query, F_STONITH_CLIENTID, op->client_id);
crm_xml_add(query, F_STONITH_CLIENTNAME, op->client_name);
crm_xml_add_int(query, F_STONITH_TIMEOUT, op->base_timeout);
send_cluster_message(NULL, crm_msg_stonith_ng, query, FALSE);
free_xml(query);
query_timeout = op->base_timeout * TIMEOUT_MULTIPLY_FACTOR;
op->query_timer = g_timeout_add((1000 * query_timeout), remote_op_query_timeout, op);
return op;
}
enum find_best_peer_options {
/*! Skip checking the target peer for capable fencing devices */
FIND_PEER_SKIP_TARGET = 0x0001,
/*! Only check the target peer for capable fencing devices */
FIND_PEER_TARGET_ONLY = 0x0002,
/*! Skip peers and devices that are not verified */
FIND_PEER_VERIFIED_ONLY = 0x0004,
};
static st_query_result_t *
find_best_peer(const char *device, remote_fencing_op_t * op, enum find_best_peer_options options)
{
GListPtr iter = NULL;
gboolean verified_devices_only = (options & FIND_PEER_VERIFIED_ONLY) ? TRUE : FALSE;
if (!device && is_set(op->call_options, st_opt_topology)) {
return NULL;
}
for (iter = op->query_results; iter != NULL; iter = iter->next) {
st_query_result_t *peer = iter->data;
crm_trace("Testing result from %s for %s with %d devices: %d %x",
peer->host, op->target, peer->ndevices, peer->tried, options);
if ((options & FIND_PEER_SKIP_TARGET) && safe_str_eq(peer->host, op->target)) {
continue;
}
if ((options & FIND_PEER_TARGET_ONLY) && safe_str_neq(peer->host, op->target)) {
continue;
}
if (is_set(op->call_options, st_opt_topology)) {
if (grab_peer_device(op, peer, device, verified_devices_only)) {
return peer;
}
} else if ((peer->tried == FALSE)
&& count_peer_devices(op, peer, verified_devices_only)) {
/* No topology: Use the current best peer */
crm_trace("Simple fencing");
return peer;
}
}
return NULL;
}
static st_query_result_t *
stonith_choose_peer(remote_fencing_op_t * op)
{
const char *device = NULL;
st_query_result_t *peer = NULL;
uint32_t active = fencing_active_peers();
do {
if (op->devices) {
device = op->devices->data;
crm_trace("Checking for someone to fence (%s) %s with %s",
op->action, op->target, device);
} else {
crm_trace("Checking for someone to fence (%s) %s",
op->action, op->target);
}
/* Best choice is a peer other than the target with verified access */
peer = find_best_peer(device, op, FIND_PEER_SKIP_TARGET|FIND_PEER_VERIFIED_ONLY);
if (peer) {
crm_trace("Found verified peer %s for %s", peer->host, device?device:"");
return peer;
}
if(op->query_timer != 0 && op->replies < QB_MIN(op->replies_expected, active)) {
crm_trace("Waiting before looking for unverified devices to fence %s", op->target);
return NULL;
}
/* If no other peer has verified access, next best is unverified access */
peer = find_best_peer(device, op, FIND_PEER_SKIP_TARGET);
if (peer) {
crm_trace("Found best unverified peer %s", peer->host);
return peer;
}
/* If no other peer can do it, last option is self-fencing
* (which is never allowed for the "on" phase of a remapped reboot)
*/
if (op->phase != st_phase_on) {
peer = find_best_peer(device, op, FIND_PEER_TARGET_ONLY);
if (peer) {
crm_trace("%s will fence itself", peer->host);
return peer;
}
}
/* Try the next fencing level if there is one (unless we're in the "on"
* phase of a remapped "reboot", because we ignore errors in that case)
*/
} while ((op->phase != st_phase_on)
&& is_set(op->call_options, st_opt_topology)
&& stonith_topology_next(op) == pcmk_ok);
crm_notice("Couldn't find anyone to fence (%s) %s with %s",
op->action, op->target, (device? device : "any device"));
return NULL;
}
static int
get_device_timeout(const remote_fencing_op_t *op, const st_query_result_t *peer,
const char *device)
{
device_properties_t *props;
if (!peer || !device) {
return op->base_timeout;
}
props = g_hash_table_lookup(peer->devices, device);
if (!props) {
return op->base_timeout;
}
return (props->custom_action_timeout[op->phase]?
props->custom_action_timeout[op->phase] : op->base_timeout)
+ props->delay_max[op->phase];
}
struct timeout_data {
const remote_fencing_op_t *op;
const st_query_result_t *peer;
int total_timeout;
};
/*!
* \internal
* \brief Add timeout to a total if device has not been executed yet
*
* \param[in] key GHashTable key (device ID)
* \param[in] value GHashTable value (device properties)
* \param[in] user_data Timeout data
*/
static void
add_device_timeout(gpointer key, gpointer value, gpointer user_data)
{
const char *device_id = key;
device_properties_t *props = value;
struct timeout_data *timeout = user_data;
if (!props->executed[timeout->op->phase]
&& !props->disallowed[timeout->op->phase]) {
timeout->total_timeout += get_device_timeout(timeout->op,
timeout->peer, device_id);
}
}
static int
get_peer_timeout(const remote_fencing_op_t *op, const st_query_result_t *peer)
{
struct timeout_data timeout;
timeout.op = op;
timeout.peer = peer;
timeout.total_timeout = 0;
g_hash_table_foreach(peer->devices, add_device_timeout, &timeout);
return (timeout.total_timeout? timeout.total_timeout : op->base_timeout);
}
static int
get_op_total_timeout(const remote_fencing_op_t *op,
const st_query_result_t *chosen_peer)
{
int total_timeout = 0;
stonith_topology_t *tp = find_topology_for_host(op->target);
if (is_set(op->call_options, st_opt_topology) && tp) {
int i;
GListPtr device_list = NULL;
GListPtr iter = NULL;
/* Yep, this looks scary, nested loops all over the place.
* Here is what is going on.
* Loop1: Iterate through fencing levels.
* Loop2: If a fencing level has devices, loop through each device
* Loop3: For each device in a fencing level, see what peer owns it
* and what that peer has reported the timeout is for the device.
*/
for (i = 0; i < ST_LEVEL_MAX; i++) {
if (!tp->levels[i]) {
continue;
}
for (device_list = tp->levels[i]; device_list; device_list = device_list->next) {
for (iter = op->query_results; iter != NULL; iter = iter->next) {
const st_query_result_t *peer = iter->data;
if (find_peer_device(op, peer, device_list->data)) {
total_timeout += get_device_timeout(op, peer,
device_list->data);
break;
}
} /* End Loop3: match device with peer that owns device, find device's timeout period */
} /* End Loop2: iterate through devices at a specific level */
} /*End Loop1: iterate through fencing levels */
} else if (chosen_peer) {
total_timeout = get_peer_timeout(op, chosen_peer);
} else {
total_timeout = op->base_timeout;
}
return total_timeout ? total_timeout : op->base_timeout;
}
static void
report_timeout_period(remote_fencing_op_t * op, int op_timeout)
{
GListPtr iter = NULL;
xmlNode *update = NULL;
const char *client_node = NULL;
const char *client_id = NULL;
const char *call_id = NULL;
if (op->call_options & st_opt_sync_call) {
/* There is no reason to report the timeout for a synchronous call. It
* is impossible to use the reported timeout to do anything when the client
* is blocking for the response. This update is only important for
* async calls that require a callback to report the results in. */
return;
} else if (!op->request) {
return;
}
crm_trace("Reporting timeout for %s.%.8s", op->client_name, op->id);
client_node = crm_element_value(op->request, F_STONITH_CLIENTNODE);
call_id = crm_element_value(op->request, F_STONITH_CALLID);
client_id = crm_element_value(op->request, F_STONITH_CLIENTID);
if (!client_node || !call_id || !client_id) {
return;
}
if (safe_str_eq(client_node, stonith_our_uname)) {
/* The client is connected to this node, send the update direclty to them */
do_stonith_async_timeout_update(client_id, call_id, op_timeout);
return;
}
/* The client is connected to another node, relay this update to them */
update = stonith_create_op(op->client_callid, op->id, STONITH_OP_TIMEOUT_UPDATE, NULL, 0);
crm_xml_add(update, F_STONITH_REMOTE_OP_ID, op->id);
crm_xml_add(update, F_STONITH_CLIENTID, client_id);
crm_xml_add(update, F_STONITH_CALLID, call_id);
crm_xml_add_int(update, F_STONITH_TIMEOUT, op_timeout);
send_cluster_message(crm_get_peer(0, client_node), crm_msg_stonith_ng, update, FALSE);
free_xml(update);
for (iter = op->duplicates; iter != NULL; iter = iter->next) {
remote_fencing_op_t *dup = iter->data;
crm_trace("Reporting timeout for duplicate %s.%.8s", dup->client_name, dup->id);
report_timeout_period(iter->data, op_timeout);
}
}
/*!
* \internal
* \brief Advance an operation to the next device in its topology
*
* \param[in,out] op Operation to advance
* \param[in] device ID of device just completed
* \param[in] msg XML reply that contained device result (if available)
* \param[in] rc Return code of device's execution
*/
static void
advance_op_topology(remote_fencing_op_t *op, const char *device, xmlNode *msg,
int rc)
{
/* Advance to the next device at this topology level, if any */
if (op->devices) {
op->devices = op->devices->next;
}
/* Handle automatic unfencing if an "on" action was requested */
if ((op->phase == st_phase_requested) && safe_str_eq(op->action, "on")) {
/* If the device we just executed was required, it's not anymore */
remove_required_device(op, device);
/* If there are no more devices at this topology level, run through any
* remaining devices with automatic unfencing
*/
if (op->devices == NULL) {
op->devices = op->automatic_list;
}
}
if ((op->devices == NULL) && (op->phase == st_phase_off)) {
/* We're done with this level and with required devices, but we had
* remapped "reboot" to "off", so start over with "on". If any devices
* need to be turned back on, op->devices will be non-NULL after this.
*/
op_phase_on(op);
}
if (op->devices) {
/* Necessary devices remain, so execute the next one */
crm_trace("Next for %s on behalf of %s@%s (rc was %d)",
op->target, op->originator, op->client_name, rc);
call_remote_stonith(op, NULL);
} else {
/* We're done with all devices and phases, so finalize operation */
crm_trace("Marking complex fencing op for %s as complete", op->target);
op->state = st_done;
remote_op_done(op, msg, rc, FALSE);
}
}
void
call_remote_stonith(remote_fencing_op_t * op, st_query_result_t * peer)
{
const char *device = NULL;
int timeout = op->base_timeout;
crm_trace("State for %s.%.8s: %s %d", op->target, op->client_name, op->id, op->state);
if (peer == NULL && !is_set(op->call_options, st_opt_topology)) {
peer = stonith_choose_peer(op);
}
if (!op->op_timer_total) {
int total_timeout = get_op_total_timeout(op, peer);
op->total_timeout = TIMEOUT_MULTIPLY_FACTOR * total_timeout;
op->op_timer_total = g_timeout_add(1000 * op->total_timeout, remote_op_timeout, op);
report_timeout_period(op, op->total_timeout);
crm_info("Total timeout set to %d for peer's fencing of %s for %s"
CRM_XS "id=%s",
total_timeout, op->target, op->client_name, op->id);
}
if (is_set(op->call_options, st_opt_topology) && op->devices) {
/* Ignore any peer preference, they might not have the device we need */
/* When using topology, stonith_choose_peer() removes the device from
* further consideration, so be sure to calculate timeout beforehand */
peer = stonith_choose_peer(op);
device = op->devices->data;
timeout = get_device_timeout(op, peer, device);
}
if (peer) {
int timeout_one = 0;
xmlNode *remote_op = stonith_create_op(op->client_callid, op->id, STONITH_OP_FENCE, NULL, 0);
crm_xml_add(remote_op, F_STONITH_REMOTE_OP_ID, op->id);
crm_xml_add(remote_op, F_STONITH_TARGET, op->target);
crm_xml_add(remote_op, F_STONITH_ACTION, op->action);
crm_xml_add(remote_op, F_STONITH_ORIGIN, op->originator);
crm_xml_add(remote_op, F_STONITH_CLIENTID, op->client_id);
crm_xml_add(remote_op, F_STONITH_CLIENTNAME, op->client_name);
crm_xml_add_int(remote_op, F_STONITH_TIMEOUT, timeout);
crm_xml_add_int(remote_op, F_STONITH_CALLOPTS, op->call_options);
if (device) {
timeout_one = TIMEOUT_MULTIPLY_FACTOR *
get_device_timeout(op, peer, device);
crm_notice("Requesting that '%s' perform op '%s %s' with '%s' " CRM_XS " for %s (%ds)", peer->host,
op->target, op->action, device, op->client_name, timeout_one);
crm_xml_add(remote_op, F_STONITH_DEVICE, device);
crm_xml_add(remote_op, F_STONITH_MODE, "slave");
} else {
timeout_one = TIMEOUT_MULTIPLY_FACTOR * get_peer_timeout(op, peer);
crm_notice("Requesting that '%s' perform op '%s %s' " CRM_XS " for %s (%ds, %lds)",
peer->host, op->target, op->action, op->client_name, timeout_one, stonith_watchdog_timeout_ms);
crm_xml_add(remote_op, F_STONITH_MODE, "smart");
}
op->state = st_exec;
if (op->op_timer_one) {
g_source_remove(op->op_timer_one);
}
if(stonith_watchdog_timeout_ms > 0 && device && safe_str_eq(device, "watchdog")) {
crm_notice("Waiting %lds for %s to self-fence (%s) for %s.%.8s (%p)",
stonith_watchdog_timeout_ms/1000, op->target,
op->action, op->client_name, op->id, device);
op->op_timer_one = g_timeout_add(stonith_watchdog_timeout_ms, remote_op_watchdog_done, op);
/* TODO check devices to verify watchdog will be in use */
} else if(stonith_watchdog_timeout_ms > 0
&& safe_str_eq(peer->host, op->target)
&& safe_str_neq(op->action, "on")) {
crm_notice("Waiting %lds for %s to self-fence (%s) for %s.%.8s (%p)",
stonith_watchdog_timeout_ms/1000, op->target,
op->action, op->client_name, op->id, device);
op->op_timer_one = g_timeout_add(stonith_watchdog_timeout_ms, remote_op_watchdog_done, op);
} else {
op->op_timer_one = g_timeout_add((1000 * timeout_one), remote_op_timeout_one, op);
}
send_cluster_message(crm_get_peer(0, peer->host), crm_msg_stonith_ng, remote_op, FALSE);
peer->tried = TRUE;
free_xml(remote_op);
return;
} else if (op->phase == st_phase_on) {
/* A remapped "on" cannot be executed, but the node was already
* turned off successfully, so ignore the error and continue.
*/
crm_warn("Ignoring %s 'on' failure (no capable peers) for %s after successful 'off'",
device, op->target);
advance_op_topology(op, device, NULL, pcmk_ok);
return;
} else if (op->owner == FALSE) {
crm_err("Fencing (%s) of %s for %s is not ours to control",
op->action, op->target, op->client_name);
} else if (op->query_timer == 0) {
/* We've exhausted all available peers */
crm_info("No remaining peers capable of fencing (%s) %s for %s (%d)",
op->target, op->action, op->client_name, op->state);
CRM_LOG_ASSERT(op->state < st_done);
remote_op_timeout(op);
} else if(op->replies >= op->replies_expected || op->replies >= fencing_active_peers()) {
int rc = -EHOSTUNREACH;
/* if the operation never left the query state,
* but we have all the expected replies, then no devices
* are available to execute the fencing operation. */
if(stonith_watchdog_timeout_ms && (device == NULL || safe_str_eq(device, "watchdog"))) {
crm_notice("Waiting %lds for %s to self-fence (%s) for %s.%.8s (%p)",
stonith_watchdog_timeout_ms/1000, op->target,
op->action, op->client_name, op->id, device);
op->op_timer_one = g_timeout_add(stonith_watchdog_timeout_ms, remote_op_watchdog_done, op);
return;
}
if (op->state == st_query) {
crm_info("No peers (out of %d) have devices capable of fencing (%s) %s for %s (%d)",
op->replies, op->action, op->target, op->client_name,
op->state);
rc = -ENODEV;
} else {
crm_info("No peers (out of %d) are capable of fencing (%s) %s for %s (%d)",
op->replies, op->action, op->target, op->client_name,
op->state);
}
op->state = st_failed;
remote_op_done(op, NULL, rc, FALSE);
} else if (device) {
crm_info("Waiting for additional peers capable of fencing (%s) %s with %s for %s.%.8s",
op->action, op->target, device, op->client_name, op->id);
} else {
crm_info("Waiting for additional peers capable of fencing (%s) %s for %s%.8s",
op->action, op->target, op->client_name, op->id);
}
}
/*!
* \internal
* \brief Comparison function for sorting query results
*
* \param[in] a GList item to compare
* \param[in] b GList item to compare
*
* \return Per the glib documentation, "a negative integer if the first value
* comes before the second, 0 if they are equal, or a positive integer
* if the first value comes after the second."
*/
static gint
sort_peers(gconstpointer a, gconstpointer b)
{
const st_query_result_t *peer_a = a;
const st_query_result_t *peer_b = b;
return (peer_b->ndevices - peer_a->ndevices);
}
/*!
* \internal
* \brief Determine if all the devices in the topology are found or not
*/
static gboolean
all_topology_devices_found(remote_fencing_op_t * op)
{
GListPtr device = NULL;
GListPtr iter = NULL;
device_properties_t *match = NULL;
stonith_topology_t *tp = NULL;
gboolean skip_target = FALSE;
int i;
tp = find_topology_for_host(op->target);
if (!tp) {
return FALSE;
}
if (safe_str_eq(op->action, "off") || safe_str_eq(op->action, "reboot")) {
/* Don't count the devices on the target node if we are killing
* the target node. */
skip_target = TRUE;
}
for (i = 0; i < ST_LEVEL_MAX; i++) {
for (device = tp->levels[i]; device; device = device->next) {
match = NULL;
for (iter = op->query_results; iter && !match; iter = iter->next) {
st_query_result_t *peer = iter->data;
if (skip_target && safe_str_eq(peer->host, op->target)) {
continue;
}
match = find_peer_device(op, peer, device->data);
}
if (!match) {
return FALSE;
}
}
}
return TRUE;
}
/*!
* \internal
* \brief Parse action-specific device properties from XML
*
* \param[in] msg XML element containing the properties
* \param[in] peer Name of peer that sent XML (for logs)
* \param[in] device Device ID (for logs)
* \param[in] action Action the properties relate to (for logs)
* \param[in] phase Phase the properties relate to
* \param[in,out] props Device properties to update
*/
static void
parse_action_specific(xmlNode *xml, const char *peer, const char *device,
const char *action, remote_fencing_op_t *op,
enum st_remap_phase phase, device_properties_t *props)
{
props->custom_action_timeout[phase] = 0;
crm_element_value_int(xml, F_STONITH_ACTION_TIMEOUT,
&props->custom_action_timeout[phase]);
if (props->custom_action_timeout[phase]) {
crm_trace("Peer %s with device %s returned %s action timeout %d",
peer, device, action, props->custom_action_timeout[phase]);
}
props->delay_max[phase] = 0;
crm_element_value_int(xml, F_STONITH_DELAY_MAX, &props->delay_max[phase]);
if (props->delay_max[phase]) {
crm_trace("Peer %s with device %s returned maximum of random delay %d for %s",
peer, device, props->delay_max[phase], action);
}
props->delay_base[phase] = 0;
crm_element_value_int(xml, F_STONITH_DELAY_BASE, &props->delay_base[phase]);
if (props->delay_base[phase]) {
crm_trace("Peer %s with device %s returned base delay %d for %s",
peer, device, props->delay_base[phase], action);
}
/* Handle devices with automatic unfencing */
if (safe_str_eq(action, "on")) {
int required = 0;
crm_element_value_int(xml, F_STONITH_DEVICE_REQUIRED, &required);
if (required) {
crm_trace("Peer %s requires device %s to execute for action %s",
peer, device, action);
add_required_device(op, device);
}
}
/* If a reboot is remapped to off+on, it's possible that a node is allowed
* to perform one action but not another.
*/
if (crm_is_true(crm_element_value(xml, F_STONITH_ACTION_DISALLOWED))) {
props->disallowed[phase] = TRUE;
crm_trace("Peer %s is disallowed from executing %s for device %s",
peer, action, device);
}
}
/*!
* \internal
* \brief Parse one device's properties from peer's XML query reply
*
* \param[in] xml XML node containing device properties
* \param[in,out] op Operation that query and reply relate to
* \param[in,out] result Peer's results
* \param[in] device ID of device being parsed
*/
static void
add_device_properties(xmlNode *xml, remote_fencing_op_t *op,
st_query_result_t *result, const char *device)
{
xmlNode *child;
int verified = 0;
device_properties_t *props = calloc(1, sizeof(device_properties_t));
/* Add a new entry to this result's devices list */
CRM_ASSERT(props != NULL);
g_hash_table_insert(result->devices, strdup(device), props);
/* Peers with verified (monitored) access will be preferred */
crm_element_value_int(xml, F_STONITH_DEVICE_VERIFIED, &verified);
if (verified) {
crm_trace("Peer %s has confirmed a verified device %s",
result->host, device);
props->verified = TRUE;
}
/* Parse action-specific device properties */
parse_action_specific(xml, result->host, device, op_requested_action(op),
op, st_phase_requested, props);
for (child = __xml_first_child(xml); child != NULL; child = __xml_next(child)) {
/* Replies for "reboot" operations will include the action-specific
* values for "off" and "on" in child elements, just in case the reboot
* winds up getting remapped.
*/
if (safe_str_eq(ID(child), "off")) {
parse_action_specific(child, result->host, device, "off",
op, st_phase_off, props);
} else if (safe_str_eq(ID(child), "on")) {
parse_action_specific(child, result->host, device, "on",
op, st_phase_on, props);
}
}
}
/*!
* \internal
* \brief Parse a peer's XML query reply and add it to operation's results
*
* \param[in,out] op Operation that query and reply relate to
* \param[in] host Name of peer that sent this reply
* \param[in] ndevices Number of devices expected in reply
* \param[in] xml XML node containing device list
*
* \return Newly allocated result structure with parsed reply
*/
static st_query_result_t *
add_result(remote_fencing_op_t *op, const char *host, int ndevices, xmlNode *xml)
{
st_query_result_t *result = calloc(1, sizeof(st_query_result_t));
xmlNode *child;
CRM_CHECK(result != NULL, return NULL);
result->host = strdup(host);
result->devices = crm_str_table_new();
/* Each child element describes one capable device available to the peer */
for (child = __xml_first_child(xml); child != NULL; child = __xml_next(child)) {
const char *device = ID(child);
if (device) {
add_device_properties(child, op, result, device);
}
}
result->ndevices = g_hash_table_size(result->devices);
CRM_CHECK(ndevices == result->ndevices,
crm_err("Query claimed to have %d devices but %d found",
ndevices, result->ndevices));
op->query_results = g_list_insert_sorted(op->query_results, result, sort_peers);
return result;
}
/*!
* \internal
* \brief Handle a peer's reply to our fencing query
*
* Parse a query result from XML and store it in the remote operation
* table, and when enough replies have been received, issue a fencing request.
*
* \param[in] msg XML reply received
*
* \return pcmk_ok on success, -errno on error
*
* \note See initiate_remote_stonith_op() for how the XML query was initially
* formed, and stonith_query() for how the peer formed its XML reply.
*/
int
process_remote_stonith_query(xmlNode * msg)
{
int ndevices = 0;
gboolean host_is_target = FALSE;
gboolean have_all_replies = FALSE;
const char *id = NULL;
const char *host = NULL;
remote_fencing_op_t *op = NULL;
st_query_result_t *result = NULL;
uint32_t replies_expected;
xmlNode *dev = get_xpath_object("//@" F_STONITH_REMOTE_OP_ID, msg, LOG_ERR);
CRM_CHECK(dev != NULL, return -EPROTO);
id = crm_element_value(dev, F_STONITH_REMOTE_OP_ID);
CRM_CHECK(id != NULL, return -EPROTO);
dev = get_xpath_object("//@" F_STONITH_AVAILABLE_DEVICES, msg, LOG_ERR);
CRM_CHECK(dev != NULL, return -EPROTO);
crm_element_value_int(dev, F_STONITH_AVAILABLE_DEVICES, &ndevices);
op = g_hash_table_lookup(stonith_remote_op_list, id);
if (op == NULL) {
crm_debug("Received query reply for unknown or expired operation %s",
id);
return -EOPNOTSUPP;
}
replies_expected = QB_MIN(op->replies_expected, fencing_active_peers());
if ((++op->replies >= replies_expected) && (op->state == st_query)) {
have_all_replies = TRUE;
}
host = crm_element_value(msg, F_ORIG);
host_is_target = safe_str_eq(host, op->target);
crm_info("Query result %d of %d from %s for %s/%s (%d devices) %s",
op->replies, replies_expected, host,
op->target, op->action, ndevices, id);
if (ndevices > 0) {
result = add_result(op, host, ndevices, dev);
}
if (is_set(op->call_options, st_opt_topology)) {
/* If we start the fencing before all the topology results are in,
* it is possible fencing levels will be skipped because of the missing
* query results. */
if (op->state == st_query && all_topology_devices_found(op)) {
/* All the query results are in for the topology, start the fencing ops. */
crm_trace("All topology devices found");
call_remote_stonith(op, result);
} else if (have_all_replies) {
crm_info("All topology query replies have arrived, continuing (%d expected/%d received) ",
replies_expected, op->replies);
call_remote_stonith(op, NULL);
}
} else if (op->state == st_query) {
int nverified = count_peer_devices(op, result, TRUE);
/* We have a result for a non-topology fencing op that looks promising,
* go ahead and start fencing before query timeout */
if (result && (host_is_target == FALSE) && nverified) {
/* we have a verified device living on a peer that is not the target */
crm_trace("Found %d verified devices", nverified);
call_remote_stonith(op, result);
} else if (have_all_replies) {
crm_info("All query replies have arrived, continuing (%d expected/%d received) ",
replies_expected, op->replies);
call_remote_stonith(op, NULL);
} else {
crm_trace("Waiting for more peer results before launching fencing operation");
}
} else if (result && (op->state == st_done)) {
crm_info("Discarding query result from %s (%d devices): Operation is in state %d",
result->host, result->ndevices, op->state);
}
return pcmk_ok;
}
/*!
* \internal
* \brief Handle a peer's reply to a fencing request
*
* Parse a fencing reply from XML, and either finalize the operation
* or attempt another device as appropriate.
*
* \param[in] msg XML reply received
*
* \return pcmk_ok on success, -errno on error
*/
int
process_remote_stonith_exec(xmlNode * msg)
{
int rc = 0;
const char *id = NULL;
const char *device = NULL;
remote_fencing_op_t *op = NULL;
xmlNode *dev = get_xpath_object("//@" F_STONITH_REMOTE_OP_ID, msg, LOG_ERR);
CRM_CHECK(dev != NULL, return -EPROTO);
id = crm_element_value(dev, F_STONITH_REMOTE_OP_ID);
CRM_CHECK(id != NULL, return -EPROTO);
dev = get_xpath_object("//@" F_STONITH_RC, msg, LOG_ERR);
CRM_CHECK(dev != NULL, return -EPROTO);
crm_element_value_int(dev, F_STONITH_RC, &rc);
device = crm_element_value(dev, F_STONITH_DEVICE);
if (stonith_remote_op_list) {
op = g_hash_table_lookup(stonith_remote_op_list, id);
}
if (op == NULL && rc == pcmk_ok) {
/* Record successful fencing operations */
const char *client_id = crm_element_value(dev, F_STONITH_CLIENTID);
op = create_remote_stonith_op(client_id, dev, TRUE);
}
if (op == NULL) {
/* Could be for an event that began before we started */
/* TODO: Record the op for later querying */
crm_info("Received peer result of unknown or expired operation %s", id);
return -EOPNOTSUPP;
}
if (op->devices && device && safe_str_neq(op->devices->data, device)) {
crm_err("Received outdated reply for device %s (instead of %s) to "
"fence (%s) %s. Operation already timed out at peer level.",
device, (const char *) op->devices->data, op->action, op->target);
return rc;
}
if (safe_str_eq(crm_element_value(msg, F_SUBTYPE), "broadcast")) {
crm_debug("Marking call to %s for %s on behalf of %s@%s.%.8s: %s (%d)",
op->action, op->target, op->client_name, op->id, op->originator,
pcmk_strerror(rc), rc);
if (rc == pcmk_ok) {
op->state = st_done;
} else {
op->state = st_failed;
}
remote_op_done(op, msg, rc, FALSE);
return pcmk_ok;
} else if (safe_str_neq(op->originator, stonith_our_uname)) {
/* If this isn't a remote level broadcast, and we are not the
* originator of the operation, we should not be receiving this msg. */
crm_err
("%s received non-broadcast fencing result for operation it does not own (device %s targeting %s)",
stonith_our_uname, device, op->target);
return rc;
}
if (is_set(op->call_options, st_opt_topology)) {
const char *device = crm_element_value(msg, F_STONITH_DEVICE);
crm_notice("Call to %s for '%s %s' on behalf of %s@%s: %s (%d)",
device, op->target, op->action, op->client_name, op->originator,
pcmk_strerror(rc), rc);
/* We own the op, and it is complete. broadcast the result to all nodes
* and notify our local clients. */
if (op->state == st_done) {
remote_op_done(op, msg, rc, FALSE);
return rc;
}
if ((op->phase == 2) && (rc != pcmk_ok)) {
/* A remapped "on" failed, but the node was already turned off
* successfully, so ignore the error and continue.
*/
crm_warn("Ignoring %s 'on' failure (exit code %d) for %s after successful 'off'",
device, rc, op->target);
rc = pcmk_ok;
}
if (rc == pcmk_ok) {
/* An operation completed successfully. Try another device if
* necessary, otherwise mark the operation as done. */
advance_op_topology(op, device, msg, rc);
return rc;
} else {
/* This device failed, time to try another topology level. If no other
* levels are available, mark this operation as failed and report results. */
if (stonith_topology_next(op) != pcmk_ok) {
op->state = st_failed;
remote_op_done(op, msg, rc, FALSE);
return rc;
}
}
} else if (rc == pcmk_ok && op->devices == NULL) {
crm_trace("All done for %s", op->target);
op->state = st_done;
remote_op_done(op, msg, rc, FALSE);
return rc;
} else if (rc == -ETIME && op->devices == NULL) {
/* If the operation timed out don't bother retrying other peers. */
op->state = st_failed;
remote_op_done(op, msg, rc, FALSE);
return rc;
} else {
/* fall-through and attempt other fencing action using another peer */
}
/* Retry on failure */
crm_trace("Next for %s on behalf of %s@%s (rc was %d)", op->target, op->originator,
op->client_name, rc);
call_remote_stonith(op, NULL);
return rc;
}
gboolean
stonith_check_fence_tolerance(int tolerance, const char *target, const char *action)
{
GHashTableIter iter;
time_t now = time(NULL);
remote_fencing_op_t *rop = NULL;
crm_trace("tolerance=%d, stonith_remote_op_list=%p", tolerance,
stonith_remote_op_list);
if (tolerance <= 0 || !stonith_remote_op_list || target == NULL ||
action == NULL) {
return FALSE;
}
g_hash_table_iter_init(&iter, stonith_remote_op_list);
while (g_hash_table_iter_next(&iter, NULL, (void **)&rop)) {
if (strcmp(rop->target, target) != 0) {
continue;
} else if (rop->state != st_done) {
continue;
/* We don't have to worry about remapped reboots here
* because if state is done, any remapping has been undone
*/
} else if (strcmp(rop->action, action) != 0) {
continue;
} else if ((rop->completed + tolerance) < now) {
continue;
}
crm_notice("Target %s was fenced (%s) less than %ds ago by %s on behalf of %s",
target, action, tolerance, rop->delegate, rop->originator);
return TRUE;
}
return FALSE;
}
diff --git a/daemons/fenced/pacemaker-fenced.c b/daemons/fenced/pacemaker-fenced.c
index 7e9bb07666..7a87f93f7c 100644
--- a/daemons/fenced/pacemaker-fenced.c
+++ b/daemons/fenced/pacemaker-fenced.c
@@ -1,1518 +1,1521 @@
/*
* Copyright 2009-2019 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include /* U32T ~ PRIu32, X32T ~ PRIx32 */
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
char *stonith_our_uname = NULL;
char *stonith_our_uuid = NULL;
long stonith_watchdog_timeout_ms = 0;
static GMainLoop *mainloop = NULL;
gboolean stand_alone = FALSE;
static gboolean no_cib_connect = FALSE;
static gboolean stonith_shutdown_flag = FALSE;
static qb_ipcs_service_t *ipcs = NULL;
static xmlNode *local_cib = NULL;
static pe_working_set_t *fenced_data_set = NULL;
static cib_t *cib_api = NULL;
static void *cib_library = NULL;
static void stonith_shutdown(int nsig);
static void stonith_cleanup(void);
static int32_t
st_ipc_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid)
{
if (stonith_shutdown_flag) {
crm_info("Ignoring new client [%d] during shutdown", crm_ipcs_client_pid(c));
return -EPERM;
}
if (crm_client_new(c, uid, gid) == NULL) {
return -EIO;
}
return 0;
}
static void
st_ipc_created(qb_ipcs_connection_t * c)
{
crm_trace("Connection created for %p", c);
}
/* Exit code means? */
static int32_t
st_ipc_dispatch(qb_ipcs_connection_t * qbc, void *data, size_t size)
{
uint32_t id = 0;
uint32_t flags = 0;
int call_options = 0;
xmlNode *request = NULL;
crm_client_t *c = crm_client_get(qbc);
const char *op = NULL;
if (c == NULL) {
crm_info("Invalid client: %p", qbc);
return 0;
}
request = crm_ipcs_recv(c, data, size, &id, &flags);
if (request == NULL) {
crm_ipcs_send_ack(c, id, flags, "nack", __FUNCTION__, __LINE__);
return 0;
}
op = crm_element_value(request, F_CRM_TASK);
if(safe_str_eq(op, CRM_OP_RM_NODE_CACHE)) {
crm_xml_add(request, F_TYPE, T_STONITH_NG);
crm_xml_add(request, F_STONITH_OPERATION, op);
crm_xml_add(request, F_STONITH_CLIENTID, c->id);
crm_xml_add(request, F_STONITH_CLIENTNAME, crm_client_name(c));
crm_xml_add(request, F_STONITH_CLIENTNODE, stonith_our_uname);
send_cluster_message(NULL, crm_msg_stonith_ng, request, FALSE);
free_xml(request);
return 0;
}
if (c->name == NULL) {
const char *value = crm_element_value(request, F_STONITH_CLIENTNAME);
if (value == NULL) {
value = "unknown";
}
c->name = crm_strdup_printf("%s.%u", value, c->pid);
}
crm_element_value_int(request, F_STONITH_CALLOPTS, &call_options);
crm_trace("Flags %" X32T "/%u for command %" U32T " from %s",
flags, call_options, id, crm_client_name(c));
if (is_set(call_options, st_opt_sync_call)) {
CRM_ASSERT(flags & crm_ipc_client_response);
CRM_LOG_ASSERT(c->request_id == 0); /* This means the client has two synchronous events in-flight */
c->request_id = id; /* Reply only to the last one */
}
crm_xml_add(request, F_STONITH_CLIENTID, c->id);
crm_xml_add(request, F_STONITH_CLIENTNAME, crm_client_name(c));
crm_xml_add(request, F_STONITH_CLIENTNODE, stonith_our_uname);
crm_log_xml_trace(request, "Client[inbound]");
stonith_command(c, id, flags, request, NULL);
free_xml(request);
return 0;
}
/* Error code means? */
static int32_t
st_ipc_closed(qb_ipcs_connection_t * c)
{
crm_client_t *client = crm_client_get(c);
if (client == NULL) {
return 0;
}
crm_trace("Connection %p closed", c);
crm_client_destroy(client);
/* 0 means: yes, go ahead and destroy the connection */
return 0;
}
static void
st_ipc_destroy(qb_ipcs_connection_t * c)
{
crm_trace("Connection %p destroyed", c);
st_ipc_closed(c);
}
static void
stonith_peer_callback(xmlNode * msg, void *private_data)
{
const char *remote_peer = crm_element_value(msg, F_ORIG);
const char *op = crm_element_value(msg, F_STONITH_OPERATION);
if (crm_str_eq(op, "poke", TRUE)) {
return;
}
crm_log_xml_trace(msg, "Peer[inbound]");
stonith_command(NULL, 0, 0, msg, remote_peer);
}
#if SUPPORT_COROSYNC
static void
stonith_peer_ais_callback(cpg_handle_t handle,
const struct cpg_name *groupName,
uint32_t nodeid, uint32_t pid, void *msg, size_t msg_len)
{
uint32_t kind = 0;
xmlNode *xml = NULL;
const char *from = NULL;
char *data = pcmk_message_common_cs(handle, nodeid, pid, msg, &kind, &from);
if(data == NULL) {
return;
}
if (kind == crm_class_cluster) {
xml = string2xml(data);
if (xml == NULL) {
crm_err("Invalid XML: '%.120s'", data);
free(data);
return;
}
crm_xml_add(xml, F_ORIG, from);
/* crm_xml_add_int(xml, F_SEQ, wrapper->id); */
stonith_peer_callback(xml, NULL);
}
free_xml(xml);
free(data);
return;
}
static void
stonith_peer_cs_destroy(gpointer user_data)
{
crm_crit("Lost connection to cluster layer, shutting down");
stonith_shutdown(0);
}
#endif
void
do_local_reply(xmlNode * notify_src, const char *client_id, gboolean sync_reply, gboolean from_peer)
{
/* send callback to originating child */
crm_client_t *client_obj = NULL;
int local_rc = pcmk_ok;
crm_trace("Sending response");
client_obj = crm_client_get_by_id(client_id);
crm_trace("Sending callback to request originator");
if (client_obj == NULL) {
local_rc = -1;
crm_trace("No client to sent the response to. F_STONITH_CLIENTID not set.");
} else {
int rid = 0;
if (sync_reply) {
CRM_LOG_ASSERT(client_obj->request_id);
rid = client_obj->request_id;
client_obj->request_id = 0;
crm_trace("Sending response %d to %s %s",
rid, client_obj->name, from_peer ? "(originator of delegated request)" : "");
} else {
crm_trace("Sending an event to %s %s",
client_obj->name, from_peer ? "(originator of delegated request)" : "");
}
local_rc = crm_ipcs_send(client_obj, rid, notify_src, sync_reply?crm_ipc_flags_none:crm_ipc_server_event);
}
if (local_rc < pcmk_ok && client_obj != NULL) {
crm_warn("%sSync reply to %s failed: %s",
sync_reply ? "" : "A-",
client_obj ? client_obj->name : "", pcmk_strerror(local_rc));
}
}
long long
get_stonith_flag(const char *name)
{
if (safe_str_eq(name, T_STONITH_NOTIFY_FENCE)) {
return st_callback_notify_fence;
} else if (safe_str_eq(name, STONITH_OP_DEVICE_ADD)) {
return st_callback_device_add;
} else if (safe_str_eq(name, STONITH_OP_DEVICE_DEL)) {
return st_callback_device_del;
} else if (safe_str_eq(name, T_STONITH_NOTIFY_HISTORY)) {
return st_callback_notify_history;
+ } else if (safe_str_eq(name, T_STONITH_NOTIFY_HISTORY_SYNCED)) {
+ return st_callback_notify_history_synced;
+
}
return st_callback_unknown;
}
static void
stonith_notify_client(gpointer key, gpointer value, gpointer user_data)
{
xmlNode *update_msg = user_data;
crm_client_t *client = value;
const char *type = NULL;
CRM_CHECK(client != NULL, return);
CRM_CHECK(update_msg != NULL, return);
type = crm_element_value(update_msg, F_SUBTYPE);
CRM_CHECK(type != NULL, crm_log_xml_err(update_msg, "notify"); return);
if (client->ipcs == NULL) {
crm_trace("Skipping client with NULL channel");
return;
}
if (client->options & get_stonith_flag(type)) {
int rc = crm_ipcs_send(client, 0, update_msg, crm_ipc_server_event | crm_ipc_server_error);
if (rc <= 0) {
crm_warn("%s notification of client %s.%.6s failed: %s (%d)",
type, crm_client_name(client), client->id, pcmk_strerror(rc), rc);
} else {
crm_trace("Sent %s notification to client %s.%.6s", type, crm_client_name(client),
client->id);
}
}
}
void
do_stonith_async_timeout_update(const char *client_id, const char *call_id, int timeout)
{
crm_client_t *client = NULL;
xmlNode *notify_data = NULL;
if (!timeout || !call_id || !client_id) {
return;
}
client = crm_client_get_by_id(client_id);
if (!client) {
return;
}
notify_data = create_xml_node(NULL, T_STONITH_TIMEOUT_VALUE);
crm_xml_add(notify_data, F_TYPE, T_STONITH_TIMEOUT_VALUE);
crm_xml_add(notify_data, F_STONITH_CALLID, call_id);
crm_xml_add_int(notify_data, F_STONITH_TIMEOUT, timeout);
crm_trace("timeout update is %d for client %s and call id %s", timeout, client_id, call_id);
if (client) {
crm_ipcs_send(client, 0, notify_data, crm_ipc_server_event);
}
free_xml(notify_data);
}
void
do_stonith_notify(int options, const char *type, int result, xmlNode * data)
{
/* TODO: Standardize the contents of data */
xmlNode *update_msg = create_xml_node(NULL, "notify");
CRM_CHECK(type != NULL,;);
crm_xml_add(update_msg, F_TYPE, T_STONITH_NOTIFY);
crm_xml_add(update_msg, F_SUBTYPE, type);
crm_xml_add(update_msg, F_STONITH_OPERATION, type);
crm_xml_add_int(update_msg, F_STONITH_RC, result);
if (data != NULL) {
add_message_xml(update_msg, F_STONITH_CALLDATA, data);
}
crm_trace("Notifying clients");
g_hash_table_foreach(client_connections, stonith_notify_client, update_msg);
free_xml(update_msg);
crm_trace("Notify complete");
}
static void
do_stonith_notify_config(int options, const char *op, int rc,
const char *desc, int active)
{
xmlNode *notify_data = create_xml_node(NULL, op);
CRM_CHECK(notify_data != NULL, return);
crm_xml_add(notify_data, F_STONITH_DEVICE, desc);
crm_xml_add_int(notify_data, F_STONITH_ACTIVE, active);
do_stonith_notify(options, op, rc, notify_data);
free_xml(notify_data);
}
void
do_stonith_notify_device(int options, const char *op, int rc, const char *desc)
{
do_stonith_notify_config(options, op, rc, desc, g_hash_table_size(device_list));
}
void
do_stonith_notify_level(int options, const char *op, int rc, const char *desc)
{
do_stonith_notify_config(options, op, rc, desc, g_hash_table_size(topology));
}
static void
topology_remove_helper(const char *node, int level)
{
int rc;
char *desc = NULL;
xmlNode *data = create_xml_node(NULL, XML_TAG_FENCING_LEVEL);
crm_xml_add(data, F_STONITH_ORIGIN, __FUNCTION__);
crm_xml_add_int(data, XML_ATTR_STONITH_INDEX, level);
crm_xml_add(data, XML_ATTR_STONITH_TARGET, node);
rc = stonith_level_remove(data, &desc);
do_stonith_notify_level(0, STONITH_OP_LEVEL_DEL, rc, desc);
free_xml(data);
free(desc);
}
static void
remove_cib_device(xmlXPathObjectPtr xpathObj)
{
int max = numXpathResults(xpathObj), lpc = 0;
for (lpc = 0; lpc < max; lpc++) {
const char *rsc_id = NULL;
const char *standard = NULL;
xmlNode *match = getXpathResult(xpathObj, lpc);
CRM_LOG_ASSERT(match != NULL);
if(match != NULL) {
standard = crm_element_value(match, XML_AGENT_ATTR_CLASS);
}
if (safe_str_neq(standard, PCMK_RESOURCE_CLASS_STONITH)) {
continue;
}
rsc_id = crm_element_value(match, XML_ATTR_ID);
stonith_device_remove(rsc_id, TRUE);
}
}
static void
handle_topology_change(xmlNode *match, bool remove)
{
int rc;
char *desc = NULL;
CRM_CHECK(match != NULL, return);
crm_trace("Updating %s", ID(match));
if(remove) {
int index = 0;
char *key = stonith_level_key(match, -1);
crm_element_value_int(match, XML_ATTR_STONITH_INDEX, &index);
topology_remove_helper(key, index);
free(key);
}
rc = stonith_level_register(match, &desc);
do_stonith_notify_level(0, STONITH_OP_LEVEL_ADD, rc, desc);
free(desc);
}
static void
remove_fencing_topology(xmlXPathObjectPtr xpathObj)
{
int max = numXpathResults(xpathObj), lpc = 0;
for (lpc = 0; lpc < max; lpc++) {
xmlNode *match = getXpathResult(xpathObj, lpc);
CRM_LOG_ASSERT(match != NULL);
if (match && crm_element_value(match, XML_DIFF_MARKER)) {
/* Deletion */
int index = 0;
char *target = stonith_level_key(match, -1);
crm_element_value_int(match, XML_ATTR_STONITH_INDEX, &index);
if (target == NULL) {
crm_err("Invalid fencing target in element %s", ID(match));
} else if (index <= 0) {
crm_err("Invalid level for %s in element %s", target, ID(match));
} else {
topology_remove_helper(target, index);
}
/* } else { Deal with modifications during the 'addition' stage */
}
}
}
static void
register_fencing_topology(xmlXPathObjectPtr xpathObj)
{
int max = numXpathResults(xpathObj), lpc = 0;
for (lpc = 0; lpc < max; lpc++) {
xmlNode *match = getXpathResult(xpathObj, lpc);
handle_topology_change(match, TRUE);
}
}
/* Fencing
*/
static void
fencing_topology_init()
{
xmlXPathObjectPtr xpathObj = NULL;
const char *xpath = "//" XML_TAG_FENCING_LEVEL;
crm_trace("Full topology refresh");
free_topology_list();
init_topology_list();
/* Grab everything */
xpathObj = xpath_search(local_cib, xpath);
register_fencing_topology(xpathObj);
freeXpathObject(xpathObj);
}
#define rsc_name(x) x->clone_name?x->clone_name:x->id
/*!
* \internal
* \brief Check whether our uname is in a resource's allowed node list
*
* \param[in] rsc Resource to check
*
* \return Pointer to node object if found, NULL otherwise
*/
static node_t *
our_node_allowed_for(resource_t *rsc)
{
GHashTableIter iter;
node_t *node = NULL;
if (rsc && stonith_our_uname) {
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
if (node && strcmp(node->details->uname, stonith_our_uname) == 0) {
break;
}
node = NULL;
}
}
return node;
}
/*!
* \internal
* \brief If a resource or any of its children are STONITH devices, update their
* definitions given a cluster working set.
*
* \param[in] rsc Resource to check
* \param[in] data_set Cluster working set with device information
*/
static void cib_device_update(resource_t *rsc, pe_working_set_t *data_set)
{
node_t *node = NULL;
const char *value = NULL;
const char *rclass = NULL;
node_t *parent = NULL;
gboolean remove = TRUE;
/* If this is a complex resource, check children rather than this resource itself.
* TODO: Mark each installed device and remove if untouched when this process finishes.
*/
if(rsc->children) {
GListPtr gIter = NULL;
for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
cib_device_update(gIter->data, data_set);
if(pe_rsc_is_clone(rsc)) {
crm_trace("Only processing one copy of the clone %s", rsc->id);
break;
}
}
return;
}
/* We only care about STONITH resources. */
rclass = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
if (safe_str_neq(rclass, PCMK_RESOURCE_CLASS_STONITH)) {
return;
}
/* If this STONITH resource is disabled, just remove it. */
value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
if (safe_str_eq(value, RSC_STOPPED)) {
crm_info("Device %s has been disabled", rsc->id);
goto update_done;
}
/* Check whether our node is allowed for this resource (and its parent if in a group) */
node = our_node_allowed_for(rsc);
if (rsc->parent && (rsc->parent->variant == pe_group)) {
parent = our_node_allowed_for(rsc->parent);
}
if(node == NULL) {
/* Our node is disallowed, so remove the device */
GHashTableIter iter;
crm_info("Device %s has been disabled on %s: unknown", rsc->id, stonith_our_uname);
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
crm_trace("Available: %s = %d", node->details->uname, node->weight);
}
goto update_done;
} else if(node->weight < 0 || (parent && parent->weight < 0)) {
/* Our node (or its group) is disallowed by score, so remove the device */
char *score = score2char((node->weight < 0) ? node->weight : parent->weight);
crm_info("Device %s has been disabled on %s: score=%s", rsc->id, stonith_our_uname, score);
free(score);
goto update_done;
} else {
/* Our node is allowed, so update the device information */
int rc;
xmlNode *data;
GHashTableIter gIter;
stonith_key_value_t *params = NULL;
const char *name = NULL;
const char *agent = crm_element_value(rsc->xml, XML_EXPR_ATTR_TYPE);
const char *rsc_provides = NULL;
crm_debug("Device %s is allowed on %s: score=%d", rsc->id, stonith_our_uname, node->weight);
get_rsc_attributes(rsc->parameters, rsc, node, data_set);
get_meta_attributes(rsc->meta, rsc, node, data_set);
rsc_provides = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_PROVIDES);
g_hash_table_iter_init(&gIter, rsc->parameters);
while (g_hash_table_iter_next(&gIter, (gpointer *) & name, (gpointer *) & value)) {
if (!name || !value) {
continue;
}
params = stonith_key_value_add(params, name, value);
crm_trace(" %s=%s", name, value);
}
remove = FALSE;
data = create_device_registration_xml(rsc_name(rsc), st_namespace_any,
agent, params, rsc_provides);
stonith_key_value_freeall(params, 1, 1);
rc = stonith_device_register(data, NULL, TRUE);
CRM_ASSERT(rc == pcmk_ok);
free_xml(data);
}
update_done:
if(remove && g_hash_table_lookup(device_list, rsc_name(rsc))) {
stonith_device_remove(rsc_name(rsc), TRUE);
}
}
/*!
* \internal
* \brief Update all STONITH device definitions based on current CIB
*/
static void
cib_devices_update(void)
{
GListPtr gIter = NULL;
crm_info("Updating devices to version %s.%s.%s",
crm_element_value(local_cib, XML_ATTR_GENERATION_ADMIN),
crm_element_value(local_cib, XML_ATTR_GENERATION),
crm_element_value(local_cib, XML_ATTR_NUMUPDATES));
CRM_ASSERT(fenced_data_set != NULL);
fenced_data_set->input = local_cib;
fenced_data_set->now = crm_time_new(NULL);
fenced_data_set->flags |= pe_flag_quick_location;
fenced_data_set->localhost = stonith_our_uname;
cluster_status(fenced_data_set);
pcmk__schedule_actions(fenced_data_set, NULL, NULL);
for (gIter = fenced_data_set->resources; gIter != NULL; gIter = gIter->next) {
cib_device_update(gIter->data, fenced_data_set);
}
fenced_data_set->input = NULL; // Wasn't a copy, so don't let API free it
pe_reset_working_set(fenced_data_set);
}
static void
update_cib_stonith_devices_v2(const char *event, xmlNode * msg)
{
xmlNode *change = NULL;
char *reason = NULL;
bool needs_update = FALSE;
xmlNode *patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT);
for (change = __xml_first_child(patchset); change != NULL; change = __xml_next(change)) {
const char *op = crm_element_value(change, XML_DIFF_OP);
const char *xpath = crm_element_value(change, XML_DIFF_PATH);
const char *shortpath = NULL;
if ((op == NULL) ||
(strcmp(op, "move") == 0) ||
strstr(xpath, "/"XML_CIB_TAG_STATUS)) {
continue;
} else if (safe_str_eq(op, "delete") && strstr(xpath, "/"XML_CIB_TAG_RESOURCE)) {
const char *rsc_id = NULL;
char *search = NULL;
char *mutable = NULL;
if (strstr(xpath, XML_TAG_ATTR_SETS) ||
strstr(xpath, XML_TAG_META_SETS)) {
needs_update = TRUE;
reason = strdup("(meta) attribute deleted from resource");
break;
}
mutable = strdup(xpath);
rsc_id = strstr(mutable, "primitive[@id=\'");
if (rsc_id != NULL) {
rsc_id += strlen("primitive[@id=\'");
search = strchr(rsc_id, '\'');
}
if (search != NULL) {
*search = 0;
stonith_device_remove(rsc_id, TRUE);
} else {
crm_warn("Ignoring malformed CIB update (resource deletion)");
}
free(mutable);
} else if (strstr(xpath, "/"XML_CIB_TAG_RESOURCES) ||
strstr(xpath, "/"XML_CIB_TAG_CONSTRAINTS) ||
strstr(xpath, "/"XML_CIB_TAG_RSCCONFIG)) {
shortpath = strrchr(xpath, '/'); CRM_ASSERT(shortpath);
reason = crm_strdup_printf("%s %s", op, shortpath+1);
needs_update = TRUE;
break;
}
}
if(needs_update) {
crm_info("Updating device list from the cib: %s", reason);
cib_devices_update();
} else {
crm_trace("No updates for device list found in cib");
}
free(reason);
}
static void
update_cib_stonith_devices_v1(const char *event, xmlNode * msg)
{
const char *reason = "none";
gboolean needs_update = FALSE;
xmlXPathObjectPtr xpath_obj = NULL;
/* process new constraints */
xpath_obj = xpath_search(msg, "//" F_CIB_UPDATE_RESULT "//" XML_CONS_TAG_RSC_LOCATION);
if (numXpathResults(xpath_obj) > 0) {
int max = numXpathResults(xpath_obj), lpc = 0;
/* Safest and simplest to always recompute */
needs_update = TRUE;
reason = "new location constraint";
for (lpc = 0; lpc < max; lpc++) {
xmlNode *match = getXpathResult(xpath_obj, lpc);
crm_log_xml_trace(match, "new constraint");
}
}
freeXpathObject(xpath_obj);
/* process deletions */
xpath_obj = xpath_search(msg, "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_REMOVED "//" XML_CIB_TAG_RESOURCE);
if (numXpathResults(xpath_obj) > 0) {
remove_cib_device(xpath_obj);
}
freeXpathObject(xpath_obj);
/* process additions */
xpath_obj = xpath_search(msg, "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED "//" XML_CIB_TAG_RESOURCE);
if (numXpathResults(xpath_obj) > 0) {
int max = numXpathResults(xpath_obj), lpc = 0;
for (lpc = 0; lpc < max; lpc++) {
const char *rsc_id = NULL;
const char *standard = NULL;
xmlNode *match = getXpathResult(xpath_obj, lpc);
rsc_id = crm_element_value(match, XML_ATTR_ID);
standard = crm_element_value(match, XML_AGENT_ATTR_CLASS);
if (safe_str_neq(standard, PCMK_RESOURCE_CLASS_STONITH)) {
continue;
}
crm_trace("Fencing resource %s was added or modified", rsc_id);
reason = "new resource";
needs_update = TRUE;
}
}
freeXpathObject(xpath_obj);
if(needs_update) {
crm_info("Updating device list from the cib: %s", reason);
cib_devices_update();
}
}
static void
update_cib_stonith_devices(const char *event, xmlNode * msg)
{
int format = 1;
xmlNode *patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT);
CRM_ASSERT(patchset);
crm_element_value_int(patchset, "format", &format);
switch(format) {
case 1:
update_cib_stonith_devices_v1(event, msg);
break;
case 2:
update_cib_stonith_devices_v2(event, msg);
break;
default:
crm_warn("Unknown patch format: %d", format);
}
}
/* Needs to hold node name + attribute name + attribute value + 75 */
#define XPATH_MAX 512
/*!
* \internal
* \brief Check whether a node has a specific attribute name/value
*
* \param[in] node Name of node to check
* \param[in] name Name of an attribute to look for
* \param[in] value The value the named attribute needs to be set to in order to be considered a match
*
* \return TRUE if the locally cached CIB has the specified node attribute
*/
gboolean
node_has_attr(const char *node, const char *name, const char *value)
{
char xpath[XPATH_MAX];
xmlNode *match;
int n;
CRM_CHECK(local_cib != NULL, return FALSE);
/* Search for the node's attributes in the CIB. While the schema allows
* multiple sets of instance attributes, and allows instance attributes to
* use id-ref to reference values elsewhere, that is intended for resources,
* so we ignore that here.
*/
n = snprintf(xpath, XPATH_MAX, "//" XML_CIB_TAG_NODES
"/" XML_CIB_TAG_NODE "[@uname='%s']/" XML_TAG_ATTR_SETS
"/" XML_CIB_TAG_NVPAIR "[@name='%s' and @value='%s']",
node, name, value);
match = get_xpath_object(xpath, local_cib, LOG_TRACE);
CRM_CHECK(n < XPATH_MAX, return FALSE);
return (match != NULL);
}
static void
update_fencing_topology(const char *event, xmlNode * msg)
{
int format = 1;
const char *xpath;
xmlXPathObjectPtr xpathObj = NULL;
xmlNode *patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT);
CRM_ASSERT(patchset);
crm_element_value_int(patchset, "format", &format);
if(format == 1) {
/* Process deletions (only) */
xpath = "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_REMOVED "//" XML_TAG_FENCING_LEVEL;
xpathObj = xpath_search(msg, xpath);
remove_fencing_topology(xpathObj);
freeXpathObject(xpathObj);
/* Process additions and changes */
xpath = "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED "//" XML_TAG_FENCING_LEVEL;
xpathObj = xpath_search(msg, xpath);
register_fencing_topology(xpathObj);
freeXpathObject(xpathObj);
} else if(format == 2) {
xmlNode *change = NULL;
int add[] = { 0, 0, 0 };
int del[] = { 0, 0, 0 };
xml_patch_versions(patchset, add, del);
for (change = __xml_first_child(patchset); change != NULL; change = __xml_next(change)) {
const char *op = crm_element_value(change, XML_DIFF_OP);
const char *xpath = crm_element_value(change, XML_DIFF_PATH);
if(op == NULL) {
continue;
} else if(strstr(xpath, "/" XML_TAG_FENCING_LEVEL) != NULL) {
/* Change to a specific entry */
crm_trace("Handling %s operation %d.%d.%d for %s", op, add[0], add[1], add[2], xpath);
if(strcmp(op, "move") == 0) {
continue;
} else if(strcmp(op, "create") == 0) {
handle_topology_change(change->children, FALSE);
} else if(strcmp(op, "modify") == 0) {
xmlNode *match = first_named_child(change, XML_DIFF_RESULT);
if(match) {
handle_topology_change(match->children, TRUE);
}
} else if(strcmp(op, "delete") == 0) {
/* Nuclear option, all we have is the path and an id... not enough to remove a specific entry */
crm_info("Re-initializing fencing topology after %s operation %d.%d.%d for %s",
op, add[0], add[1], add[2], xpath);
fencing_topology_init();
return;
}
} else if (strstr(xpath, "/" XML_TAG_FENCING_TOPOLOGY) != NULL) {
/* Change to the topology in general */
crm_info("Re-initializing fencing topology after top-level %s operation %d.%d.%d for %s",
op, add[0], add[1], add[2], xpath);
fencing_topology_init();
return;
} else if (strstr(xpath, "/" XML_CIB_TAG_CONFIGURATION)) {
/* Changes to the whole config section, possibly including the topology as a whild */
if(first_named_child(change, XML_TAG_FENCING_TOPOLOGY) == NULL) {
crm_trace("Nothing for us in %s operation %d.%d.%d for %s.",
op, add[0], add[1], add[2], xpath);
} else if(strcmp(op, "delete") == 0 || strcmp(op, "create") == 0) {
crm_info("Re-initializing fencing topology after top-level %s operation %d.%d.%d for %s.",
op, add[0], add[1], add[2], xpath);
fencing_topology_init();
return;
}
} else {
crm_trace("Nothing for us in %s operation %d.%d.%d for %s",
op, add[0], add[1], add[2], xpath);
}
}
} else {
crm_warn("Unknown patch format: %d", format);
}
}
static bool have_cib_devices = FALSE;
static void
update_cib_cache_cb(const char *event, xmlNode * msg)
{
int rc = pcmk_ok;
xmlNode *stonith_enabled_xml = NULL;
xmlNode *stonith_watchdog_xml = NULL;
const char *stonith_enabled_s = NULL;
static gboolean stonith_enabled_saved = TRUE;
if(!have_cib_devices) {
crm_trace("Skipping updates until we get a full dump");
return;
} else if(msg == NULL) {
crm_trace("Missing %s update", event);
return;
}
/* Maintain a local copy of the CIB so that we have full access
* to device definitions, location constraints, and node attributes
*/
if (local_cib != NULL) {
int rc = pcmk_ok;
xmlNode *patchset = NULL;
crm_element_value_int(msg, F_CIB_RC, &rc);
if (rc != pcmk_ok) {
return;
}
patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT);
xml_log_patchset(LOG_TRACE, "Config update", patchset);
rc = xml_apply_patchset(local_cib, patchset, TRUE);
switch (rc) {
case pcmk_ok:
case -pcmk_err_old_data:
break;
case -pcmk_err_diff_resync:
case -pcmk_err_diff_failed:
crm_notice("[%s] Patch aborted: %s (%d)", event, pcmk_strerror(rc), rc);
free_xml(local_cib);
local_cib = NULL;
break;
default:
crm_warn("[%s] ABORTED: %s (%d)", event, pcmk_strerror(rc), rc);
free_xml(local_cib);
local_cib = NULL;
}
}
if (local_cib == NULL) {
crm_trace("Re-requesting the full cib");
rc = cib_api->cmds->query(cib_api, NULL, &local_cib, cib_scope_local | cib_sync_call);
if(rc != pcmk_ok) {
crm_err("Couldn't retrieve the CIB: %s (%d)", pcmk_strerror(rc), rc);
return;
}
CRM_ASSERT(local_cib != NULL);
stonith_enabled_saved = FALSE; /* Trigger a full refresh below */
}
crm_peer_caches_refresh(local_cib);
stonith_enabled_xml = get_xpath_object("//nvpair[@name='stonith-enabled']", local_cib, LOG_TRACE);
if (stonith_enabled_xml) {
stonith_enabled_s = crm_element_value(stonith_enabled_xml, XML_NVPAIR_ATTR_VALUE);
}
if (stonith_enabled_s == NULL || crm_is_true(stonith_enabled_s)) {
long timeout_ms = 0;
const char *value = NULL;
stonith_watchdog_xml = get_xpath_object("//nvpair[@name='stonith-watchdog-timeout']", local_cib, LOG_TRACE);
if (stonith_watchdog_xml) {
value = crm_element_value(stonith_watchdog_xml, XML_NVPAIR_ATTR_VALUE);
}
if(value) {
timeout_ms = crm_get_msec(value);
}
if (timeout_ms < 0) {
timeout_ms = crm_auto_watchdog_timeout();
}
if(timeout_ms != stonith_watchdog_timeout_ms) {
crm_notice("New watchdog timeout %lds (was %lds)", timeout_ms/1000, stonith_watchdog_timeout_ms/1000);
stonith_watchdog_timeout_ms = timeout_ms;
}
} else {
stonith_watchdog_timeout_ms = 0;
}
if (stonith_enabled_s && crm_is_true(stonith_enabled_s) == FALSE) {
crm_trace("Ignoring cib updates while stonith is disabled");
stonith_enabled_saved = FALSE;
return;
} else if (stonith_enabled_saved == FALSE) {
crm_info("Updating stonith device and topology lists now that stonith is enabled");
stonith_enabled_saved = TRUE;
fencing_topology_init();
cib_devices_update();
} else {
update_fencing_topology(event, msg);
update_cib_stonith_devices(event, msg);
}
}
static void
init_cib_cache_cb(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data)
{
crm_info("Updating device list from the cib: init");
have_cib_devices = TRUE;
local_cib = copy_xml(output);
crm_peer_caches_refresh(local_cib);
fencing_topology_init();
cib_devices_update();
}
static void
stonith_shutdown(int nsig)
{
stonith_shutdown_flag = TRUE;
crm_info("Terminating with %d clients",
crm_hash_table_size(client_connections));
if (mainloop != NULL && g_main_loop_is_running(mainloop)) {
g_main_loop_quit(mainloop);
} else {
stonith_cleanup();
crm_exit(CRM_EX_OK);
}
}
static void
cib_connection_destroy(gpointer user_data)
{
if (stonith_shutdown_flag) {
crm_info("Connection to the CIB manager closed");
return;
} else {
crm_crit("Lost connection to the CIB manager, shutting down");
}
if (cib_api) {
cib_api->cmds->signoff(cib_api);
}
stonith_shutdown(0);
}
static void
stonith_cleanup(void)
{
if (cib_api) {
cib_api->cmds->signoff(cib_api);
}
if (ipcs) {
qb_ipcs_destroy(ipcs);
}
crm_peer_destroy();
crm_client_cleanup();
free_stonith_remote_op_list();
free_topology_list();
free_device_list();
free_metadata_cache();
free(stonith_our_uname);
stonith_our_uname = NULL;
free_xml(local_cib);
local_cib = NULL;
}
/* *INDENT-OFF* */
static struct crm_option long_options[] = {
{"stand-alone", 0, 0, 's'},
{"stand-alone-w-cpg", 0, 0, 'c'},
{"logfile", 1, 0, 'l'},
{"verbose", 0, 0, 'V'},
{"version", 0, 0, '$'},
{"help", 0, 0, '?'},
{0, 0, 0, 0}
};
/* *INDENT-ON* */
static void
setup_cib(void)
{
int rc, retries = 0;
static cib_t *(*cib_new_fn) (void) = NULL;
if (cib_new_fn == NULL) {
cib_new_fn = find_library_function(&cib_library, CIB_LIBRARY, "cib_new", TRUE);
}
if (cib_new_fn != NULL) {
cib_api = (*cib_new_fn) ();
}
if (cib_api == NULL) {
crm_err("No connection to the CIB manager");
return;
}
do {
sleep(retries);
rc = cib_api->cmds->signon(cib_api, CRM_SYSTEM_STONITHD, cib_command);
} while (rc == -ENOTCONN && ++retries < 5);
if (rc != pcmk_ok) {
crm_err("Could not connect to the CIB manager: %s (%d)", pcmk_strerror(rc), rc);
} else if (pcmk_ok !=
cib_api->cmds->add_notify_callback(cib_api, T_CIB_DIFF_NOTIFY, update_cib_cache_cb)) {
crm_err("Could not set CIB notification callback");
} else {
rc = cib_api->cmds->query(cib_api, NULL, NULL, cib_scope_local);
cib_api->cmds->register_callback(cib_api, rc, 120, FALSE, NULL, "init_cib_cache_cb",
init_cib_cache_cb);
cib_api->cmds->set_connection_dnotify(cib_api, cib_connection_destroy);
crm_info("Watching for stonith topology changes");
}
}
struct qb_ipcs_service_handlers ipc_callbacks = {
.connection_accept = st_ipc_accept,
.connection_created = st_ipc_created,
.msg_process = st_ipc_dispatch,
.connection_closed = st_ipc_closed,
.connection_destroyed = st_ipc_destroy
};
/*!
* \internal
* \brief Callback for peer status changes
*
* \param[in] type What changed
* \param[in] node What peer had the change
* \param[in] data Previous value of what changed
*/
static void
st_peer_update_callback(enum crm_status_type type, crm_node_t * node, const void *data)
{
if ((type != crm_status_processes) && !is_set(node->flags, crm_remote_node)) {
/*
* This is a hack until we can send to a nodeid and/or we fix node name lookups
* These messages are ignored in stonith_peer_callback()
*/
xmlNode *query = create_xml_node(NULL, "stonith_command");
crm_xml_add(query, F_XML_TAGNAME, "stonith_command");
crm_xml_add(query, F_TYPE, T_STONITH_NG);
crm_xml_add(query, F_STONITH_OPERATION, "poke");
crm_debug("Broadcasting our uname because of node %u", node->id);
send_cluster_message(NULL, crm_msg_stonith_ng, query, FALSE);
free_xml(query);
}
}
int
main(int argc, char **argv)
{
int flag;
int lpc = 0;
int argerr = 0;
int option_index = 0;
crm_cluster_t cluster;
const char *actions[] = { "reboot", "off", "on", "list", "monitor", "status" };
crm_ipc_t *old_instance = NULL;
crm_log_preinit(NULL, argc, argv);
crm_set_options(NULL, "mode [options]", long_options,
"Provides a summary of cluster's current state."
"\n\nOutputs varying levels of detail in a number of different formats.\n");
while (1) {
flag = crm_get_option(argc, argv, &option_index);
if (flag == -1) {
break;
}
switch (flag) {
case 'V':
crm_bump_log_level(argc, argv);
break;
case 'l':
crm_add_logfile(optarg);
break;
case 's':
stand_alone = TRUE;
break;
case 'c':
stand_alone = FALSE;
no_cib_connect = TRUE;
break;
case '$':
case '?':
crm_help(flag, CRM_EX_OK);
break;
default:
++argerr;
break;
}
}
if (argc - optind == 1 && safe_str_eq("metadata", argv[optind])) {
printf("\n");
printf("\n");
printf(" 1.0\n");
printf(" Instance attributes available for all \"stonith\"-class resources"
" and used by Pacemaker's fence daemon, formerly known as stonithd\n");
printf(" Instance attributes available for all \"stonith\"-class resources\n");
printf(" \n");
#if 0
// priority is not implemented yet
printf(" \n");
printf(" Devices that are not in a topology "
"are tried in order of highest to lowest integer priority\n");
printf(" \n");
printf(" \n");
#endif
printf(" \n", STONITH_ATTR_HOSTARG);
printf
(" Advanced use only: An alternate parameter to supply instead of 'port'\n");
printf
(" Some devices do not support the standard 'port' parameter or may provide additional ones.\n"
"Use this to specify an alternate, device-specific, parameter that should indicate the machine to be fenced.\n"
"A value of 'none' can be used to tell the cluster not to supply any additional parameters.\n"
" \n");
printf(" \n");
printf(" \n");
printf(" \n", STONITH_ATTR_HOSTMAP);
printf
(" A mapping of host names to ports numbers for devices that do not support host names.\n");
printf
(" Eg. node1:1;node2:2,3 would tell the cluster to use port 1 for node1 and ports 2 and 3 for node2\n");
printf(" \n");
printf(" \n");
printf(" \n", STONITH_ATTR_HOSTLIST);
printf
(" A list of machines controlled by this device (Optional unless %s=static-list).\n",
STONITH_ATTR_HOSTCHECK);
printf(" \n");
printf(" \n");
printf(" \n", STONITH_ATTR_HOSTCHECK);
printf
(" How to determine which machines are controlled by the device.\n");
printf(" Allowed values: dynamic-list "
"(query the device via the 'list' command), static-list "
"(check the " STONITH_ATTR_HOSTLIST " attribute), status "
"(query the device via the 'status' command), none (assume "
"every device can fence every machine)\n");
printf(" \n");
printf(" \n");
printf(" \n", STONITH_ATTR_DELAY_MAX);
printf
(" Enable a random delay for stonith actions and specify the maximum of random delay.\n");
printf
(" This prevents double fencing when using slow devices such as sbd.\n"
"Use this to enable a random delay for stonith actions.\n"
"The overall delay is derived from this random delay value adding a static delay so that the sum is kept below the maximum delay.\n");
printf(" \n");
printf(" \n");
printf(" \n", STONITH_ATTR_DELAY_BASE);
printf
(" Enable a base delay for stonith actions and specify base delay value.\n");
printf
(" This prevents double fencing when different delays are configured on the nodes.\n"
"Use this to enable a static delay for stonith actions.\n"
"The overall delay is derived from a random delay value adding this static delay so that the sum is kept below the maximum delay.\n");
printf(" \n");
printf(" \n");
printf(" \n", STONITH_ATTR_ACTION_LIMIT);
printf
(" The maximum number of actions can be performed in parallel on this device\n");
printf
(" Cluster property concurrent-fencing=true needs to be configured first.\n"
"Then use this to specify the maximum number of actions can be performed in parallel on this device. -1 is unlimited.\n");
printf(" \n");
printf(" \n");
for (lpc = 0; lpc < DIMOF(actions); lpc++) {
printf(" \n", actions[lpc]);
printf
(" Advanced use only: An alternate command to run instead of '%s'\n",
actions[lpc]);
printf
(" Some devices do not support the standard commands or may provide additional ones.\n"
"Use this to specify an alternate, device-specific, command that implements the '%s' action.\n",
actions[lpc]);
printf(" \n", actions[lpc]);
printf(" \n");
printf(" \n", actions[lpc]);
printf
(" Advanced use only: Specify an alternate timeout to use for %s actions instead of stonith-timeout\n",
actions[lpc]);
printf
(" Some devices need much more/less time to complete than normal.\n"
"Use this to specify an alternate, device-specific, timeout for '%s' actions.\n",
actions[lpc]);
printf(" \n");
printf(" \n");
printf(" \n", actions[lpc]);
printf
(" Advanced use only: The maximum number of times to retry the '%s' command within the timeout period\n",
actions[lpc]);
printf(" Some devices do not support multiple connections."
" Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining."
" Use this option to alter the number of times Pacemaker retries '%s' actions before giving up."
"\n", actions[lpc]);
printf(" \n");
printf(" \n");
}
printf(" \n");
printf("\n");
return CRM_EX_OK;
}
if (optind != argc) {
++argerr;
}
if (argerr) {
crm_help('?', CRM_EX_USAGE);
}
crm_log_init(NULL, LOG_INFO, TRUE, FALSE, argc, argv, FALSE);
crm_notice("Starting Pacemaker fencer");
old_instance = crm_ipc_new("stonith-ng", 0);
if (crm_ipc_connect(old_instance)) {
/* IPC end-point already up */
crm_ipc_close(old_instance);
crm_ipc_destroy(old_instance);
crm_err("pacemaker-fenced is already active, aborting startup");
crm_exit(CRM_EX_OK);
} else {
/* not up or not authentic, we'll proceed either way */
crm_ipc_destroy(old_instance);
old_instance = NULL;
}
mainloop_add_signal(SIGTERM, stonith_shutdown);
crm_peer_init();
fenced_data_set = pe_new_working_set();
CRM_ASSERT(fenced_data_set != NULL);
if (stand_alone == FALSE) {
if (is_corosync_cluster()) {
#if SUPPORT_COROSYNC
cluster.destroy = stonith_peer_cs_destroy;
cluster.cpg.cpg_deliver_fn = stonith_peer_ais_callback;
cluster.cpg.cpg_confchg_fn = pcmk_cpg_membership;
#endif
}
crm_set_status_callback(&st_peer_update_callback);
if (crm_cluster_connect(&cluster) == FALSE) {
crm_crit("Cannot sign in to the cluster... terminating");
crm_exit(CRM_EX_FATAL);
}
stonith_our_uname = cluster.uname;
stonith_our_uuid = cluster.uuid;
if (no_cib_connect == FALSE) {
setup_cib();
}
} else {
stonith_our_uname = strdup("localhost");
}
init_device_list();
init_topology_list();
if(stonith_watchdog_timeout_ms > 0) {
int rc;
xmlNode *xml;
stonith_key_value_t *params = NULL;
params = stonith_key_value_add(params, STONITH_ATTR_HOSTLIST, stonith_our_uname);
xml = create_device_registration_xml("watchdog", st_namespace_internal,
STONITH_WATCHDOG_AGENT, params,
NULL);
stonith_key_value_freeall(params, 1, 1);
rc = stonith_device_register(xml, NULL, FALSE);
free_xml(xml);
if (rc != pcmk_ok) {
crm_crit("Cannot register watchdog pseudo fence agent");
crm_exit(CRM_EX_FATAL);
}
}
stonith_ipc_server_init(&ipcs, &ipc_callbacks);
/* Create the mainloop and run it... */
mainloop = g_main_loop_new(NULL, FALSE);
crm_notice("Pacemaker fencer successfully started and accepting connections");
g_main_loop_run(mainloop);
stonith_cleanup();
pe_free_working_set(fenced_data_set);
crm_exit(CRM_EX_OK);
}
diff --git a/daemons/fenced/pacemaker-fenced.h b/daemons/fenced/pacemaker-fenced.h
index 3a2edbbc4a..583cb47a14 100644
--- a/daemons/fenced/pacemaker-fenced.h
+++ b/daemons/fenced/pacemaker-fenced.h
@@ -1,254 +1,263 @@
/*
* Copyright 2009-2019 the Pacemaker project contributors
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
/*!
* \internal
* \brief Check to see if target was fenced in the last few seconds.
* \param tolerance, The number of seconds to look back in time
* \param target, The node to search for
* \param action, The action we want to match.
*
* \retval FALSE, not match
* \retval TRUE, fencing operation took place in the last 'tolerance' number of seconds.
*/
gboolean stonith_check_fence_tolerance(int tolerance, const char *target, const char *action);
enum st_device_flags
{
st_device_supports_list = 0x0001,
st_device_supports_status = 0x0002,
st_device_supports_reboot = 0x0004,
};
typedef struct stonith_device_s {
char *id;
char *agent;
char *namespace;
/*! list of actions that must execute on the target node. Used for unfencing */
char *on_target_actions;
GListPtr targets;
time_t targets_age;
gboolean has_attr_map;
/* should nodeid parameter for victim be included in agent arguments */
gboolean include_nodeid;
/* whether the cluster should automatically unfence nodes with the device */
gboolean automatic_unfencing;
guint priority;
enum st_device_flags flags;
GHashTable *params;
GHashTable *aliases;
GList *pending_ops;
crm_trigger_t *work;
xmlNode *agent_metadata;
/*! A verified device is one that has contacted the
* agent successfully to perform a monitor operation */
gboolean verified;
gboolean cib_registered;
gboolean api_registered;
} stonith_device_t;
/* These values are used to index certain arrays by "phase". Usually an
* operation has only one "phase", so phase is always zero. However, some
* reboots are remapped to "off" then "on", in which case "reboot" will be
* phase 0, "off" will be phase 1 and "on" will be phase 2.
*/
enum st_remap_phase {
st_phase_requested = 0,
st_phase_off = 1,
st_phase_on = 2,
st_phase_max = 3
};
typedef struct remote_fencing_op_s {
/* The unique id associated with this operation */
char *id;
/*! The node this operation will fence */
char *target;
/*! The fencing action to perform on the target. (reboot, on, off) */
char *action;
/*! When was the fencing action recorded (seconds since epoch) */
time_t created;
/*! Marks if the final notifications have been sent to local stonith clients. */
gboolean notify_sent;
/*! The number of query replies received */
guint replies;
/*! The number of query replies expected */
guint replies_expected;
/*! Does this node own control of this operation */
gboolean owner;
/*! After query is complete, This the high level timer that expires the entire operation */
guint op_timer_total;
/*! This timer expires the current fencing request. Many fencing
* requests may exist in a single operation */
guint op_timer_one;
/*! This timer expires the query request sent out to determine
* what nodes are contain what devices, and who those devices can fence */
guint query_timer;
/*! This is the default timeout to use for each fencing device if no
* custom timeout is received in the query. */
gint base_timeout;
/*! This is the calculated total timeout an operation can take before
* expiring. This is calculated by adding together all the timeout
* values associated with the devices this fencing operation may call */
gint total_timeout;
/*! Delegate is the node being asked to perform a fencing action
* on behalf of the node that owns the remote operation. Some operations
* will involve multiple delegates. This value represents the final delegate
* that is used. */
char *delegate;
/*! The point at which the remote operation completed */
time_t completed;
/*! The stonith_call_options associated with this remote operation */
long long call_options;
/*! The current state of the remote operation. This indicates
* what stage the op is in, query, exec, done, duplicate, failed. */
enum op_state state;
/*! The node that owns the remote operation */
char *originator;
/*! The local client id that initiated the fencing request */
char *client_id;
/*! The client's call_id that initiated the fencing request */
int client_callid;
/*! The name of client that initiated the fencing request */
char *client_name;
/*! List of the received query results for all the nodes in the cpg group */
GListPtr query_results;
/*! The original request that initiated the remote stonith operation */
xmlNode *request;
/*! The current topology level being executed */
guint level;
/*! The current operation phase being executed */
enum st_remap_phase phase;
/*! Devices with automatic unfencing (always run if "on" requested, never if remapped) */
GListPtr automatic_list;
/*! List of all devices at the currently executing topology level */
GListPtr devices_list;
/*! Current entry in the topology device list */
GListPtr devices;
/*! List of duplicate operations attached to this operation. Once this operation
* completes, the duplicate operations will be closed out as well. */
GListPtr duplicates;
} remote_fencing_op_t;
+/*!
+ * \internal
+ * \brief Broadcast the result of an operation to the peers.
+ * \param op, Operation whose result should be broadcast
+ * \param rc, Result of the operation
+ */
+void stonith_bcast_result_to_peers(remote_fencing_op_t * op, int rc);
+
enum st_callback_flags {
- st_callback_unknown = 0x0000,
- st_callback_notify_fence = 0x0001,
- st_callback_device_add = 0x0004,
- st_callback_device_del = 0x0010,
- st_callback_notify_history = 0x0020
+ st_callback_unknown = 0x0000,
+ st_callback_notify_fence = 0x0001,
+ st_callback_device_add = 0x0004,
+ st_callback_device_del = 0x0010,
+ st_callback_notify_history = 0x0020,
+ st_callback_notify_history_synced = 0x0040
};
/*
* Complex fencing requirements are specified via fencing topologies.
* A topology consists of levels; each level is a list of fencing devices.
* Topologies are stored in a hash table by node name. When a node needs to be
* fenced, if it has an entry in the topology table, the levels are tried
* sequentially, and the devices in each level are tried sequentially.
* Fencing is considered successful as soon as any level succeeds;
* a level is considered successful if all its devices succeed.
* Essentially, all devices at a given level are "and-ed" and the
* levels are "or-ed".
*
* This structure is used for the topology table entries.
* Topology levels start from 1, so levels[0] is unused and always NULL.
*/
typedef struct stonith_topology_s {
int kind;
/*! Node name regex or attribute name=value for which topology applies */
char *target;
char *target_value;
char *target_pattern;
char *target_attribute;
/*! Names of fencing devices at each topology level */
GListPtr levels[ST_LEVEL_MAX];
} stonith_topology_t;
void init_device_list(void);
void free_device_list(void);
void init_topology_list(void);
void free_topology_list(void);
void free_stonith_remote_op_list(void);
void init_stonith_remote_op_hash_table(GHashTable **table);
void free_metadata_cache(void);
long long get_stonith_flag(const char *name);
void stonith_command(crm_client_t * client, uint32_t id, uint32_t flags,
xmlNode * op_request, const char *remote_peer);
int stonith_device_register(xmlNode * msg, const char **desc, gboolean from_cib);
int stonith_device_remove(const char *id, gboolean from_cib);
char *stonith_level_key(xmlNode * msg, int mode);
int stonith_level_kind(xmlNode * msg);
int stonith_level_register(xmlNode * msg, char **desc);
int stonith_level_remove(xmlNode * msg, char **desc);
stonith_topology_t *find_topology_for_host(const char *host);
void do_local_reply(xmlNode * notify_src, const char *client_id, gboolean sync_reply,
gboolean from_peer);
xmlNode *stonith_construct_reply(xmlNode * request, const char *output, xmlNode * data,
int rc);
void
do_stonith_async_timeout_update(const char *client, const char *call_id, int timeout);
void do_stonith_notify(int options, const char *type, int result, xmlNode * data);
void do_stonith_notify_device(int options, const char *op, int rc, const char *desc);
void do_stonith_notify_level(int options, const char *op, int rc, const char *desc);
remote_fencing_op_t *initiate_remote_stonith_op(crm_client_t * client, xmlNode * request,
gboolean manual_ack);
int process_remote_stonith_exec(xmlNode * msg);
int process_remote_stonith_query(xmlNode * msg);
void *create_remote_stonith_op(const char *client, xmlNode * request, gboolean peer);
int stonith_fence_history(xmlNode *msg, xmlNode **output,
const char *remote_peer, int options);
void stonith_fence_history_trim(void);
bool fencing_peer_active(crm_node_t *peer);
int stonith_manual_ack(xmlNode * msg, remote_fencing_op_t * op);
gboolean string_in_list(GListPtr list, const char *item);
gboolean node_has_attr(const char *node, const char *name, const char *value);
extern char *stonith_our_uname;
extern gboolean stand_alone;
extern GHashTable *device_list;
extern GHashTable *topology;
extern long stonith_watchdog_timeout_ms;
extern GHashTable *stonith_remote_op_list;
diff --git a/include/crm/stonith-ng.h b/include/crm/stonith-ng.h
index b6407324df..418a03c85e 100644
--- a/include/crm/stonith-ng.h
+++ b/include/crm/stonith-ng.h
@@ -1,554 +1,555 @@
/*
* Copyright 2004-2019 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifdef __cplusplus
extern "C" {
#endif
/**
* \file
* \brief Fencing aka. STONITH
* \ingroup fencing
*/
#ifndef STONITH_NG__H
# define STONITH_NG__H
# include
# include
# include // bool
# include // uint32_t
# include // time_t
# define T_STONITH_NOTIFY_DISCONNECT "st_notify_disconnect"
# define T_STONITH_NOTIFY_FENCE "st_notify_fence"
# define T_STONITH_NOTIFY_HISTORY "st_notify_history"
+# define T_STONITH_NOTIFY_HISTORY_SYNCED "st_notify_history_synced"
/* *INDENT-OFF* */
enum stonith_state {
stonith_connected_command,
stonith_connected_query,
stonith_disconnected,
};
enum stonith_call_options {
st_opt_none = 0x00000000,
st_opt_verbose = 0x00000001,
st_opt_allow_suicide = 0x00000002,
st_opt_manual_ack = 0x00000008,
st_opt_discard_reply = 0x00000010,
/* st_opt_all_replies = 0x00000020, */
st_opt_topology = 0x00000040,
st_opt_scope_local = 0x00000100,
st_opt_cs_nodeid = 0x00000200,
st_opt_sync_call = 0x00001000,
/*! Allow the timeout period for a callback to be adjusted
* based on the time the server reports the operation will take. */
st_opt_timeout_updates = 0x00002000,
/*! Only report back if operation is a success in callback */
st_opt_report_only_success = 0x00004000,
/* used where ever apropriate - e.g. cleanup of history */
st_opt_cleanup = 0x000080000,
/* used where ever apropriate - e.g. send out a history query to all nodes */
st_opt_broadcast = 0x000100000,
};
/*! Order matters here, do not change values */
enum op_state
{
st_query,
st_exec,
st_done,
st_duplicate,
st_failed,
};
// Supported fence agent interface standards
enum stonith_namespace {
st_namespace_invalid,
st_namespace_any,
st_namespace_internal, // Implemented internally by Pacemaker
/* Neither of these projects are active any longer, but the fence agent
* interfaces they created are still in use and supported by Pacemaker.
*/
st_namespace_rhcs, // Red Hat Cluster Suite compatible
st_namespace_lha, // Linux-HA compatible
};
enum stonith_namespace stonith_text2namespace(const char *namespace_s);
const char *stonith_namespace2text(enum stonith_namespace st_namespace);
enum stonith_namespace stonith_get_namespace(const char *agent,
const char *namespace_s);
typedef struct stonith_key_value_s {
char *key;
char *value;
struct stonith_key_value_s *next;
} stonith_key_value_t;
typedef struct stonith_history_s {
char *target;
char *action;
char *origin;
char *delegate;
char *client;
int state;
time_t completed;
struct stonith_history_s *next;
} stonith_history_t;
typedef struct stonith_s stonith_t;
typedef struct stonith_event_s
{
char *id;
char *type;
char *message;
char *operation;
int result;
char *origin;
char *target;
char *action;
char *executioner;
char *device;
/*! The name of the client that initiated the action. */
char *client_origin;
} stonith_event_t;
typedef struct stonith_callback_data_s
{
int rc;
int call_id;
void *userdata;
} stonith_callback_data_t;
typedef struct stonith_api_operations_s
{
/*!
* \brief Destroy the stonith api structure.
*/
int (*free) (stonith_t *st);
/*!
* \brief Connect to the local stonith daemon.
*
* \retval 0, success
* \retval negative error code on failure
*/
int (*connect) (stonith_t *st, const char *name, int *stonith_fd);
/*!
* \brief Disconnect from the local stonith daemon.
*
* \retval 0, success
* \retval negative error code on failure
*/
int (*disconnect)(stonith_t *st);
/*!
* \brief Remove a registered stonith device with the local stonith daemon.
*
* \note Synchronous, guaranteed to occur in daemon before function returns.
*
* \retval 0, success
* \retval negative error code on failure
*/
int (*remove_device)(
stonith_t *st, int options, const char *name);
/*!
* \brief Register a stonith device with the local stonith daemon.
*
* \note Synchronous, guaranteed to occur in daemon before function returns.
*
* \retval 0, success
* \retval negative error code on failure
*/
int (*register_device)(
stonith_t *st, int options, const char *id,
const char *provider, const char *agent, stonith_key_value_t *params);
/*!
* \brief Remove a fencing level for a specific node.
*
* \note This feature is not available when stonith is in standalone mode.
*
* \retval 0, success
* \retval negative error code on failure
*/
int (*remove_level)(
stonith_t *st, int options, const char *node, int level);
/*!
* \brief Register a fencing level containing the fencing devices to be used
* at that level for a specific node.
*
* \note This feature is not available when stonith is in standalone mode.
*
* \retval 0, success
* \retval negative error code on failure
*/
int (*register_level)(
stonith_t *st, int options, const char *node, int level, stonith_key_value_t *device_list);
/*!
* \brief Get the metadata documentation for a resource.
*
* \note Value is returned in output. Output must be freed when set.
*
* \retval 0 success
* \retval negative error code on failure
*/
int (*metadata)(stonith_t *st, int options,
const char *device, const char *provider, char **output, int timeout);
/*!
* \brief Retrieve a list of installed stonith agents
*
* \note if provider is not provided, all known agents will be returned
* \note list must be freed using stonith_key_value_freeall()
* \note call_options parameter is not used, it is reserved for future use.
*
* \retval num items in list on success
* \retval negative error code on failure
*/
int (*list_agents)(stonith_t *stonith, int call_options, const char *provider,
stonith_key_value_t **devices, int timeout);
/*!
* \brief Retrieve string listing hosts and port assignments from a local stonith device.
*
* \retval 0 on success
* \retval negative error code on failure
*/
int (*list)(stonith_t *st, int options, const char *id, char **list_output, int timeout);
/*!
* \brief Check to see if a local stonith device is reachable
*
* \retval 0 on success
* \retval negative error code on failure
*/
int (*monitor)(stonith_t *st, int options, const char *id, int timeout);
/*!
* \brief Check to see if a local stonith device's port is reachable
*
* \retval 0 on success
* \retval negative error code on failure
*/
int (*status)(stonith_t *st, int options, const char *id, const char *port, int timeout);
/*!
* \brief Retrieve a list of registered stonith devices.
*
* \note If node is provided, only devices that can fence the node id
* will be returned.
*
* \retval num items in list on success
* \retval negative error code on failure
*/
int (*query)(stonith_t *st, int options, const char *node,
stonith_key_value_t **devices, int timeout);
/*!
* \brief Issue a fencing action against a node.
*
* \note Possible actions are, 'on', 'off', and 'reboot'.
*
* \param st, stonith connection
* \param options, call options
* \param node, The target node to fence
* \param action, The fencing action to take
* \param timeout, The default per device timeout to use with each device
* capable of fencing the target.
*
* \retval 0 success
* \retval negative error code on failure.
*/
int (*fence)(stonith_t *st, int options, const char *node, const char *action,
int timeout, int tolerance);
/*!
* \brief Manually confirm that a node is down.
*
* \retval 0 success
* \retval negative error code on failure.
*/
int (*confirm)(stonith_t *st, int options, const char *node);
/*!
* \brief Retrieve a list of fencing operations that have occurred for a specific node.
*
* \note History is not available in standalone mode.
*
* \retval 0 success
* \retval negative error code on failure.
*/
int (*history)(stonith_t *st, int options, const char *node, stonith_history_t **output, int timeout);
int (*register_notification)(
stonith_t *st, const char *event,
void (*notify)(stonith_t *st, stonith_event_t *e));
int (*remove_notification)(stonith_t *st, const char *event);
/*!
* \brief Register a callback to receive the result of an asynchronous call
*
* \param[in] call_id The call ID to register callback for
* \param[in] timeout Default time to wait until callback expires
* \param[in] options Bitmask of \c stonith_call_options (respects
* \c st_opt_timeout_updates and
* \c st_opt_report_only_success)
* \param[in] userdata Pointer that will be given to callback
* \param[in] callback_name Unique name to identify callback
* \param[in] callback The callback function to register
*
* \return \c TRUE on success, \c FALSE if call_id is negative, -errno otherwise
*
* \todo This function should return \c pcmk_ok on success, and \c call_id
* when negative, but that would break backward compatibility.
*/
int (*register_callback)(stonith_t *st,
int call_id,
int timeout,
int options,
void *userdata,
const char *callback_name,
void (*callback)(stonith_t *st, stonith_callback_data_t *data));
/*!
* \brief Remove a registered callback for a given call id.
*/
int (*remove_callback)(stonith_t *st, int call_id, bool all_callbacks);
/*!
* \brief Remove fencing level for specific node, node regex or attribute
*
* \param[in] st Fencer connection to use
* \param[in] options Bitmask of stonith_call_options to pass to the fencer
* \param[in] node If not NULL, target level by this node name
* \param[in] pattern If not NULL, target by node name using this regex
* \param[in] attr If not NULL, target by this node attribute
* \param[in] value If not NULL, target by this node attribute value
* \param[in] level Index number of level to remove
*
* \return 0 on success, negative error code otherwise
*
* \note This feature is not available when stonith is in standalone mode.
* The caller should set only one of node, pattern or attr/value.
*/
int (*remove_level_full)(stonith_t *st, int options,
const char *node, const char *pattern,
const char *attr, const char *value, int level);
/*!
* \brief Register fencing level for specific node, node regex or attribute
*
* \param[in] st Fencer connection to use
* \param[in] options Bitmask of stonith_call_options to pass to fencer
* \param[in] node If not NULL, target level by this node name
* \param[in] pattern If not NULL, target by node name using this regex
* \param[in] attr If not NULL, target by this node attribute
* \param[in] value If not NULL, target by this node attribute value
* \param[in] level Index number of level to add
* \param[in] device_list Devices to use in level
*
* \return 0 on success, negative error code otherwise
*
* \note This feature is not available when stonith is in standalone mode.
* The caller should set only one of node, pattern or attr/value.
*/
int (*register_level_full)(stonith_t *st, int options,
const char *node, const char *pattern,
const char *attr, const char *value,
int level, stonith_key_value_t *device_list);
/*!
* \brief Validate an arbitrary stonith device configuration
*
* \param[in] st Stonithd connection to use
* \param[in] call_options Bitmask of stonith_call_options to use with fencer
* \param[in] rsc_id ID used to replace CIB secrets in params
* \param[in] namespace_s Namespace of fence agent to validate (optional)
* \param[in] agent Fence agent to validate
* \param[in] params Configuration parameters to pass to fence agent
* \param[in] timeout Fail if no response within this many seconds
* \param[out] output If non-NULL, where to store any agent output
* \param[out] error_output If non-NULL, where to store agent error output
*
* \return pcmk_ok if validation succeeds, -errno otherwise
*
* \note If pcmk_ok is returned, the caller is responsible for freeing
* the output (if requested).
*/
int (*validate)(stonith_t *st, int call_options, const char *rsc_id,
const char *namespace_s, const char *agent,
stonith_key_value_t *params, int timeout, char **output,
char **error_output);
} stonith_api_operations_t;
struct stonith_s
{
enum stonith_state state;
int call_id;
int call_timeout;
void *st_private;
stonith_api_operations_t *cmds;
};
/* *INDENT-ON* */
/* Core functions */
stonith_t *stonith_api_new(void);
void stonith_api_delete(stonith_t * st);
void stonith_dump_pending_callbacks(stonith_t * st);
// deprecated (use stonith_get_namespace() instead)
const char *get_stonith_provider(const char *agent, const char *provider);
bool stonith_dispatch(stonith_t * st);
stonith_key_value_t *stonith_key_value_add(stonith_key_value_t * kvp, const char *key,
const char *value);
void stonith_key_value_freeall(stonith_key_value_t * kvp, int keys, int values);
void stonith_history_free(stonith_history_t *history);
// Convenience functions
int stonith_api_connect_retry(stonith_t *st, const char *name,
int max_attempts);
/* Basic helpers that allows nodes to be fenced and the history to be
* queried without mainloop or the caller understanding the full API
*
* At least one of nodeid and uname are required
*/
int stonith_api_kick(uint32_t nodeid, const char *uname, int timeout, bool off);
time_t stonith_api_time(uint32_t nodeid, const char *uname, bool in_progress);
/*
* Helpers for using the above functions without install-time dependencies
*
* Usage:
* #include
*
* To turn a node off by corosync nodeid:
* stonith_api_kick_helper(nodeid, 120, 1);
*
* To check the last fence date/time (also by nodeid):
* last = stonith_api_time_helper(nodeid, 0);
*
* To check if fencing is in progress:
* if(stonith_api_time_helper(nodeid, 1) > 0) { ... }
*
* eg.
#include
#include
#include
int
main(int argc, char ** argv)
{
int rc = 0;
int nodeid = 102;
rc = stonith_api_time_helper(nodeid, 0);
printf("%d last fenced at %s\n", nodeid, ctime(rc));
rc = stonith_api_kick_helper(nodeid, 120, 1);
printf("%d fence result: %d\n", nodeid, rc);
rc = stonith_api_time_helper(nodeid, 0);
printf("%d last fenced at %s\n", nodeid, ctime(rc));
return 0;
}
*/
# define STONITH_LIBRARY "libstonithd.so.26"
typedef int (*st_api_kick_fn) (int nodeid, const char *uname, int timeout, bool off);
typedef time_t (*st_api_time_fn) (int nodeid, const char *uname, bool in_progress);
static inline int
stonith_api_kick_helper(uint32_t nodeid, int timeout, bool off)
{
static void *st_library = NULL;
static st_api_kick_fn st_kick_fn;
if (st_library == NULL) {
st_library = dlopen(STONITH_LIBRARY, RTLD_LAZY);
}
if (st_library && st_kick_fn == NULL) {
st_kick_fn = (st_api_kick_fn) dlsym(st_library, "stonith_api_kick");
}
if (st_kick_fn == NULL) {
#ifdef ELIBACC
return -ELIBACC;
#else
return -ENOSYS;
#endif
}
return (*st_kick_fn) (nodeid, NULL, timeout, off);
}
static inline time_t
stonith_api_time_helper(uint32_t nodeid, bool in_progress)
{
static void *st_library = NULL;
static st_api_time_fn st_time_fn;
if (st_library == NULL) {
st_library = dlopen(STONITH_LIBRARY, RTLD_LAZY);
}
if (st_library && st_time_fn == NULL) {
st_time_fn = (st_api_time_fn) dlsym(st_library, "stonith_api_time");
}
if (st_time_fn == NULL) {
return 0;
}
return (*st_time_fn) (nodeid, NULL, in_progress);
}
/**
* Does the given agent describe a stonith resource that can exist?
*
* \param[in] agent What is the name of the agent?
* \param[in] timeout Timeout to use when querying. If 0 is given,
* use a default of 120.
*
* \return A boolean
*/
bool stonith_agent_exists(const char *agent, int timeout);
/*!
* \brief Turn stonith action into a more readable string.
*
* \param action Stonith action
*/
const char *stonith_action_str(const char *action);
#ifdef __cplusplus
}
#endif
#endif
diff --git a/lib/fencing/st_client.c b/lib/fencing/st_client.c
index 629887a20d..ba23ac517a 100644
--- a/lib/fencing/st_client.c
+++ b/lib/fencing/st_client.c
@@ -1,2434 +1,2484 @@
/*
* Copyright 2004-2019 the Pacemaker project contributors
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#if SUPPORT_CIBSECRETS
# include
#endif
CRM_TRACE_INIT_DATA(stonith);
struct stonith_action_s {
/*! user defined data */
char *agent;
char *action;
char *victim;
GHashTable *args;
int timeout;
int async;
void *userdata;
void (*done_cb) (GPid pid, gint status, const char *output, gpointer user_data);
void (*fork_cb) (GPid pid, gpointer user_data);
svc_action_t *svc_action;
/*! internal timing information */
time_t initial_start_time;
int tries;
int remaining_timeout;
int max_retries;
/* device output data */
GPid pid;
int rc;
char *output;
char *error;
};
typedef struct stonith_private_s {
char *token;
crm_ipc_t *ipc;
mainloop_io_t *source;
GHashTable *stonith_op_callback_table;
GList *notify_list;
+ int notify_refcnt;
+ bool notify_deletes;
void (*op_callback) (stonith_t * st, stonith_callback_data_t * data);
} stonith_private_t;
typedef struct stonith_notify_client_s {
const char *event;
const char *obj_id; /* implement one day */
const char *obj_type; /* implement one day */
void (*notify) (stonith_t * st, stonith_event_t * e);
+ bool delete;
} stonith_notify_client_t;
typedef struct stonith_callback_client_s {
void (*callback) (stonith_t * st, stonith_callback_data_t * data);
const char *id;
void *user_data;
gboolean only_success;
gboolean allow_timeout_updates;
struct timer_rec_s *timer;
} stonith_callback_client_t;
struct notify_blob_s {
stonith_t *stonith;
xmlNode *xml;
};
struct timer_rec_s {
int call_id;
int timeout;
guint ref;
stonith_t *stonith;
};
typedef int (*stonith_op_t) (const char *, int, const char *, xmlNode *,
xmlNode *, xmlNode *, xmlNode **, xmlNode **);
bool stonith_dispatch(stonith_t * st);
xmlNode *stonith_create_op(int call_id, const char *token, const char *op, xmlNode * data,
int call_options);
int stonith_send_command(stonith_t * stonith, const char *op, xmlNode * data,
xmlNode ** output_data, int call_options, int timeout);
static void stonith_connection_destroy(gpointer user_data);
static void stonith_send_notification(gpointer data, gpointer user_data);
static int internal_stonith_action_execute(stonith_action_t * action);
static void log_action(stonith_action_t *action, pid_t pid);
/*!
* \brief Get agent namespace by name
*
* \param[in] namespace_s Name of namespace as string
*
* \return Namespace as enum value
*/
enum stonith_namespace
stonith_text2namespace(const char *namespace_s)
{
if ((namespace_s == NULL) || !strcmp(namespace_s, "any")) {
return st_namespace_any;
} else if (!strcmp(namespace_s, "redhat")
|| !strcmp(namespace_s, "stonith-ng")) {
return st_namespace_rhcs;
} else if (!strcmp(namespace_s, "internal")) {
return st_namespace_internal;
} else if (!strcmp(namespace_s, "heartbeat")) {
return st_namespace_lha;
}
return st_namespace_invalid;
}
/*!
* \brief Get agent namespace name
*
* \param[in] namespace Namespace as enum value
*
* \return Namespace name as string
*/
const char *
stonith_namespace2text(enum stonith_namespace st_namespace)
{
switch (st_namespace) {
case st_namespace_any: return "any";
case st_namespace_rhcs: return "stonith-ng";
case st_namespace_internal: return "internal";
case st_namespace_lha: return "heartbeat";
default: break;
}
return "unsupported";
}
/*!
* \brief Determine namespace of a fence agent
*
* \param[in] agent Fence agent type
* \param[in] namespace_s Name of agent namespace as string, if known
*
* \return Namespace of specified agent, as enum value
*/
enum stonith_namespace
stonith_get_namespace(const char *agent, const char *namespace_s)
{
if (safe_str_eq(namespace_s, "internal")) {
return st_namespace_internal;
}
if (stonith__agent_is_rhcs(agent)) {
return st_namespace_rhcs;
}
#if HAVE_STONITH_STONITH_H
if (stonith__agent_is_lha(agent)) {
return st_namespace_lha;
}
#endif
crm_err("Unknown fence agent: %s", agent);
return st_namespace_invalid;
}
static void
log_action(stonith_action_t *action, pid_t pid)
{
if (action->output) {
/* Logging the whole string confuses syslog when the string is xml */
char *prefix = crm_strdup_printf("%s[%d] stdout:", action->agent, pid);
crm_log_output(LOG_TRACE, prefix, action->output);
free(prefix);
}
if (action->error) {
/* Logging the whole string confuses syslog when the string is xml */
char *prefix = crm_strdup_printf("%s[%d] stderr:", action->agent, pid);
crm_log_output(LOG_WARNING, prefix, action->error);
free(prefix);
}
}
+/* when cycling through the list we don't want to delete items
+ so just mark them and when we know nobody is using the list
+ loop over it to remove the marked items
+ */
+static void
+foreach_notify_entry (stonith_private_t *private,
+ GFunc func,
+ gpointer user_data)
+{
+ private->notify_refcnt++;
+ g_list_foreach(private->notify_list, func, user_data);
+ private->notify_refcnt--;
+ if ((private->notify_refcnt == 0) &&
+ private->notify_deletes) {
+ GList *list_item = private->notify_list;
+
+ private->notify_deletes = FALSE;
+ while (list_item != NULL)
+ {
+ stonith_notify_client_t *list_client = list_item->data;
+ GList *next = g_list_next(list_item);
+
+ if (list_client->delete) {
+ free(list_client);
+ private->notify_list =
+ g_list_delete_link(private->notify_list, list_item);
+ }
+ list_item = next;
+ }
+ }
+}
+
static void
stonith_connection_destroy(gpointer user_data)
{
stonith_t *stonith = user_data;
stonith_private_t *native = NULL;
struct notify_blob_s blob;
crm_trace("Sending destroyed notification");
blob.stonith = stonith;
blob.xml = create_xml_node(NULL, "notify");
native = stonith->st_private;
native->ipc = NULL;
native->source = NULL;
stonith->state = stonith_disconnected;
crm_xml_add(blob.xml, F_TYPE, T_STONITH_NOTIFY);
crm_xml_add(blob.xml, F_SUBTYPE, T_STONITH_NOTIFY_DISCONNECT);
- g_list_foreach(native->notify_list, stonith_send_notification, &blob);
+ foreach_notify_entry(native, stonith_send_notification, &blob);
free_xml(blob.xml);
}
xmlNode *
create_device_registration_xml(const char *id, enum stonith_namespace namespace,
const char *agent, stonith_key_value_t *params,
const char *rsc_provides)
{
xmlNode *data = create_xml_node(NULL, F_STONITH_DEVICE);
xmlNode *args = create_xml_node(data, XML_TAG_ATTRS);
#if HAVE_STONITH_STONITH_H
if (namespace == st_namespace_any) {
namespace = stonith_get_namespace(agent, NULL);
}
if (namespace == st_namespace_lha) {
hash2field((gpointer) "plugin", (gpointer) agent, args);
agent = "fence_legacy";
}
#endif
crm_xml_add(data, XML_ATTR_ID, id);
crm_xml_add(data, F_STONITH_ORIGIN, __FUNCTION__);
crm_xml_add(data, "agent", agent);
if ((namespace != st_namespace_any) && (namespace != st_namespace_invalid)) {
crm_xml_add(data, "namespace", stonith_namespace2text(namespace));
}
if (rsc_provides) {
crm_xml_add(data, "rsc_provides", rsc_provides);
}
for (; params; params = params->next) {
hash2field((gpointer) params->key, (gpointer) params->value, args);
}
return data;
}
static int
stonith_api_register_device(stonith_t * st, int call_options,
const char *id, const char *namespace, const char *agent,
stonith_key_value_t * params)
{
int rc = 0;
xmlNode *data = NULL;
data = create_device_registration_xml(id, stonith_text2namespace(namespace),
agent, params, NULL);
rc = stonith_send_command(st, STONITH_OP_DEVICE_ADD, data, NULL, call_options, 0);
free_xml(data);
return rc;
}
static int
stonith_api_remove_device(stonith_t * st, int call_options, const char *name)
{
int rc = 0;
xmlNode *data = NULL;
data = create_xml_node(NULL, F_STONITH_DEVICE);
crm_xml_add(data, F_STONITH_ORIGIN, __FUNCTION__);
crm_xml_add(data, XML_ATTR_ID, name);
rc = stonith_send_command(st, STONITH_OP_DEVICE_DEL, data, NULL, call_options, 0);
free_xml(data);
return rc;
}
static int
stonith_api_remove_level_full(stonith_t *st, int options,
const char *node, const char *pattern,
const char *attr, const char *value, int level)
{
int rc = 0;
xmlNode *data = NULL;
CRM_CHECK(node || pattern || (attr && value), return -EINVAL);
data = create_xml_node(NULL, XML_TAG_FENCING_LEVEL);
crm_xml_add(data, F_STONITH_ORIGIN, __FUNCTION__);
if (node) {
crm_xml_add(data, XML_ATTR_STONITH_TARGET, node);
} else if (pattern) {
crm_xml_add(data, XML_ATTR_STONITH_TARGET_PATTERN, pattern);
} else {
crm_xml_add(data, XML_ATTR_STONITH_TARGET_ATTRIBUTE, attr);
crm_xml_add(data, XML_ATTR_STONITH_TARGET_VALUE, value);
}
crm_xml_add_int(data, XML_ATTR_STONITH_INDEX, level);
rc = stonith_send_command(st, STONITH_OP_LEVEL_DEL, data, NULL, options, 0);
free_xml(data);
return rc;
}
static int
stonith_api_remove_level(stonith_t * st, int options, const char *node, int level)
{
return stonith_api_remove_level_full(st, options, node,
NULL, NULL, NULL, level);
}
/*!
* \internal
* \brief Create XML for fence topology level registration request
*
* \param[in] node If not NULL, target level by this node name
* \param[in] pattern If not NULL, target by node name using this regex
* \param[in] attr If not NULL, target by this node attribute
* \param[in] value If not NULL, target by this node attribute value
* \param[in] level Index number of level to register
* \param[in] device_list List of devices in level
*
* \return Newly allocated XML tree on success, NULL otherwise
*
* \note The caller should set only one of node, pattern or attr/value.
*/
xmlNode *
create_level_registration_xml(const char *node, const char *pattern,
const char *attr, const char *value,
int level, stonith_key_value_t *device_list)
{
int len = 0;
char *list = NULL;
xmlNode *data;
CRM_CHECK(node || pattern || (attr && value), return NULL);
data = create_xml_node(NULL, XML_TAG_FENCING_LEVEL);
CRM_CHECK(data, return NULL);
crm_xml_add(data, F_STONITH_ORIGIN, __FUNCTION__);
crm_xml_add_int(data, XML_ATTR_ID, level);
crm_xml_add_int(data, XML_ATTR_STONITH_INDEX, level);
if (node) {
crm_xml_add(data, XML_ATTR_STONITH_TARGET, node);
} else if (pattern) {
crm_xml_add(data, XML_ATTR_STONITH_TARGET_PATTERN, pattern);
} else {
crm_xml_add(data, XML_ATTR_STONITH_TARGET_ATTRIBUTE, attr);
crm_xml_add(data, XML_ATTR_STONITH_TARGET_VALUE, value);
}
for (; device_list; device_list = device_list->next) {
int adding = strlen(device_list->value);
if(list) {
adding++; /* +1 space */
}
crm_trace("Adding %s (%dc) at offset %d", device_list->value, adding, len);
list = realloc_safe(list, len + adding + 1); /* +1 EOS */
if (list == NULL) {
crm_perror(LOG_CRIT, "Could not create device list");
free_xml(data);
return NULL;
}
sprintf(list + len, "%s%s", len?",":"", device_list->value);
len += adding;
}
crm_xml_add(data, XML_ATTR_STONITH_DEVICES, list);
free(list);
return data;
}
static int
stonith_api_register_level_full(stonith_t * st, int options, const char *node,
const char *pattern,
const char *attr, const char *value,
int level, stonith_key_value_t *device_list)
{
int rc = 0;
xmlNode *data = create_level_registration_xml(node, pattern, attr, value,
level, device_list);
CRM_CHECK(data != NULL, return -EINVAL);
rc = stonith_send_command(st, STONITH_OP_LEVEL_ADD, data, NULL, options, 0);
free_xml(data);
return rc;
}
static int
stonith_api_register_level(stonith_t * st, int options, const char *node, int level,
stonith_key_value_t * device_list)
{
return stonith_api_register_level_full(st, options, node, NULL, NULL, NULL,
level, device_list);
}
static void
append_arg(const char *key, const char *value, GHashTable **args)
{
CRM_CHECK(key != NULL, return);
CRM_CHECK(value != NULL, return);
CRM_CHECK(args != NULL, return);
if (strstr(key, "pcmk_")) {
return;
} else if (strstr(key, CRM_META)) {
return;
} else if (safe_str_eq(key, "crm_feature_set")) {
return;
}
if (!*args) {
*args = crm_str_table_new();
}
CRM_CHECK(*args != NULL, return);
crm_trace("Appending: %s=%s", key, value);
g_hash_table_replace(*args, strdup(key), strdup(value));
}
static void
append_config_arg(gpointer key, gpointer value, gpointer user_data)
{
/* The fencer will filter action out when it registers the device,
* but ignore it here just in case any other library callers
* fail to do so.
*/
if (safe_str_neq(key, STONITH_ATTR_ACTION_OP)) {
append_arg(key, value, user_data);
return;
}
}
static GHashTable *
make_args(const char *agent, const char *action, const char *victim, uint32_t victim_nodeid, GHashTable * device_args,
GHashTable * port_map)
{
char buffer[512];
GHashTable *arg_list = NULL;
const char *value = NULL;
CRM_CHECK(action != NULL, return NULL);
snprintf(buffer, sizeof(buffer), "pcmk_%s_action", action);
if (device_args) {
value = g_hash_table_lookup(device_args, buffer);
}
if (value) {
crm_info("Substituting action '%s' for requested operation '%s'", value, action);
action = value;
}
append_arg(STONITH_ATTR_ACTION_OP, action, &arg_list);
if (victim && device_args) {
const char *alias = victim;
const char *param = g_hash_table_lookup(device_args, STONITH_ATTR_HOSTARG);
if (port_map && g_hash_table_lookup(port_map, victim)) {
alias = g_hash_table_lookup(port_map, victim);
}
/* Always supply the node's name, too:
* https://github.com/ClusterLabs/fence-agents/blob/master/doc/FenceAgentAPI.md
*/
append_arg("nodename", victim, &arg_list);
if (victim_nodeid) {
char nodeid_str[33] = { 0, };
if (snprintf(nodeid_str, 33, "%u", (unsigned int)victim_nodeid)) {
crm_info("For stonith action (%s) for victim %s, adding nodeid (%s) to parameters",
action, victim, nodeid_str);
append_arg("nodeid", nodeid_str, &arg_list);
}
}
/* Check if we need to supply the victim in any other form */
if(safe_str_eq(agent, "fence_legacy")) {
value = agent;
} else if (param == NULL) {
param = "port";
value = g_hash_table_lookup(device_args, param);
} else if (safe_str_eq(param, "none")) {
value = param; /* Nothing more to do */
} else {
value = g_hash_table_lookup(device_args, param);
}
/* Don't overwrite explictly set values for $param */
if (value == NULL || safe_str_eq(value, "dynamic")) {
crm_debug("Performing %s action for node '%s' as '%s=%s'", action, victim, param,
alias);
append_arg(param, alias, &arg_list);
}
}
if (device_args) {
g_hash_table_foreach(device_args, append_config_arg, &arg_list);
}
return arg_list;
}
/*!
* \internal
* \brief Free all memory used by a stonith action
*
* \param[in,out] action Action to free
*/
void
stonith__destroy_action(stonith_action_t *action)
{
if (action) {
free(action->agent);
if (action->args) {
g_hash_table_destroy(action->args);
}
free(action->action);
free(action->victim);
if (action->svc_action) {
services_action_free(action->svc_action);
}
free(action->output);
free(action->error);
free(action);
}
}
/*!
* \internal
* \brief Get the result of an executed stonith action
*
* \param[in,out] action Executed action
* \param[out] rc Where to store result code (or NULL)
* \param[out] output Where to store standard output (or NULL)
* \param[out] error_output Where to store standard error output (or NULL)
*
* \note If output or error_output is not NULL, the caller is responsible for
* freeing the memory.
*/
void
stonith__action_result(stonith_action_t *action, int *rc, char **output,
char **error_output)
{
if (rc) {
*rc = pcmk_ok;
}
if (output) {
*output = NULL;
}
if (error_output) {
*error_output = NULL;
}
if (action != NULL) {
if (rc) {
*rc = action->rc;
}
if (output && action->output) {
*output = action->output;
action->output = NULL; // hand off memory management to caller
}
if (error_output && action->error) {
*error_output = action->error;
action->error = NULL; // hand off memory management to caller
}
}
}
#define FAILURE_MAX_RETRIES 2
stonith_action_t *
stonith_action_create(const char *agent,
const char *_action,
const char *victim,
uint32_t victim_nodeid,
int timeout, GHashTable * device_args, GHashTable * port_map)
{
stonith_action_t *action;
action = calloc(1, sizeof(stonith_action_t));
action->args = make_args(agent, _action, victim, victim_nodeid, device_args, port_map);
crm_debug("Preparing '%s' action for %s using agent %s",
_action, (victim? victim : "no target"), agent);
action->agent = strdup(agent);
action->action = strdup(_action);
if (victim) {
action->victim = strdup(victim);
}
action->timeout = action->remaining_timeout = timeout;
action->max_retries = FAILURE_MAX_RETRIES;
if (device_args) {
char buffer[512];
const char *value = NULL;
snprintf(buffer, sizeof(buffer), "pcmk_%s_retries", _action);
value = g_hash_table_lookup(device_args, buffer);
if (value) {
action->max_retries = atoi(value);
}
}
return action;
}
static gboolean
update_remaining_timeout(stonith_action_t * action)
{
int diff = time(NULL) - action->initial_start_time;
if (action->tries >= action->max_retries) {
crm_info("Attempted to execute agent %s (%s) the maximum number of times (%d) allowed",
action->agent, action->action, action->max_retries);
action->remaining_timeout = 0;
} else if ((action->rc != -ETIME) && diff < (action->timeout * 0.7)) {
/* only set remaining timeout period if there is 30%
* or greater of the original timeout period left */
action->remaining_timeout = action->timeout - diff;
} else {
action->remaining_timeout = 0;
}
return action->remaining_timeout ? TRUE : FALSE;
}
static int
svc_action_to_errno(svc_action_t *svc_action) {
int rv = pcmk_ok;
if (svc_action->rc > 0) {
/* Try to provide a useful error code based on the fence agent's
* error output.
*/
if (svc_action->rc == PCMK_OCF_TIMEOUT) {
rv = -ETIME;
} else if (svc_action->stderr_data == NULL) {
rv = -ENODATA;
} else if (strstr(svc_action->stderr_data, "imed out")) {
/* Some agents have their own internal timeouts */
rv = -ETIME;
} else if (strstr(svc_action->stderr_data, "Unrecognised action")) {
rv = -EOPNOTSUPP;
} else {
rv = -pcmk_err_generic;
}
}
return rv;
}
static void
stonith_action_async_done(svc_action_t *svc_action)
{
stonith_action_t *action = (stonith_action_t *) svc_action->cb_data;
action->rc = svc_action_to_errno(svc_action);
action->output = svc_action->stdout_data;
svc_action->stdout_data = NULL;
action->error = svc_action->stderr_data;
svc_action->stderr_data = NULL;
svc_action->params = NULL;
crm_debug("Child process %d performing action '%s' exited with rc %d",
action->pid, action->action, svc_action->rc);
log_action(action, action->pid);
if (action->rc != pcmk_ok && update_remaining_timeout(action)) {
int rc = internal_stonith_action_execute(action);
if (rc == pcmk_ok) {
return;
}
}
if (action->done_cb) {
action->done_cb(action->pid, action->rc, action->output, action->userdata);
}
action->svc_action = NULL; // don't remove our caller
stonith__destroy_action(action);
}
static void
stonith_action_async_forked(svc_action_t *svc_action)
{
stonith_action_t *action = (stonith_action_t *) svc_action->cb_data;
action->pid = svc_action->pid;
action->svc_action = svc_action;
if (action->fork_cb) {
(action->fork_cb) (svc_action->pid, action->userdata);
}
crm_trace("Child process %d performing action '%s' successfully forked",
action->pid, action->action);
}
static int
internal_stonith_action_execute(stonith_action_t * action)
{
int rc = -EPROTO;
int is_retry = 0;
svc_action_t *svc_action = NULL;
static int stonith_sequence = 0;
char *buffer = NULL;
if (!action->tries) {
action->initial_start_time = time(NULL);
}
action->tries++;
if (action->tries > 1) {
crm_info("Attempt %d to execute %s (%s). remaining timeout is %d",
action->tries, action->agent, action->action, action->remaining_timeout);
is_retry = 1;
}
if (action->args == NULL || action->agent == NULL)
goto fail;
buffer = crm_strdup_printf(RH_STONITH_DIR "/%s", basename(action->agent));
svc_action = services_action_create_generic(buffer, NULL);
free(buffer);
svc_action->timeout = 1000 * action->remaining_timeout;
svc_action->standard = strdup(PCMK_RESOURCE_CLASS_STONITH);
svc_action->id = crm_strdup_printf("%s_%s_%d", basename(action->agent),
action->action, action->tries);
svc_action->agent = strdup(action->agent);
svc_action->sequence = stonith_sequence++;
svc_action->params = action->args;
svc_action->cb_data = (void *) action;
/* keep retries from executing out of control and free previous results */
if (is_retry) {
free(action->output);
action->output = NULL;
free(action->error);
action->error = NULL;
sleep(1);
}
if (action->async) {
/* async */
if(services_action_async_fork_notify(svc_action,
&stonith_action_async_done,
&stonith_action_async_forked) == FALSE) {
services_action_free(svc_action);
svc_action = NULL;
} else {
rc = 0;
}
} else {
/* sync */
if (services_action_sync(svc_action)) {
rc = 0;
action->rc = svc_action_to_errno(svc_action);
action->output = svc_action->stdout_data;
svc_action->stdout_data = NULL;
action->error = svc_action->stderr_data;
svc_action->stderr_data = NULL;
} else {
action->rc = -ECONNABORTED;
rc = action->rc;
}
svc_action->params = NULL;
services_action_free(svc_action);
}
fail:
return rc;
}
/*!
* \internal
* \brief Kick off execution of an async stonith action
*
* \param[in,out] action Action to be executed
* \param[in,out] userdata Datapointer to be passed to callbacks
* \param[in] done Callback to notify action has failed/succeeded
* \param[in] fork_callback Callback to notify successful fork of child
*
* \return pcmk_ok if ownership of action has been taken, -errno otherwise
*/
int
stonith_action_execute_async(stonith_action_t * action,
void *userdata,
void (*done) (GPid pid, int rc, const char *output,
gpointer user_data),
void (*fork_cb) (GPid pid, gpointer user_data))
{
if (!action) {
return -EINVAL;
}
action->userdata = userdata;
action->done_cb = done;
action->fork_cb = fork_cb;
action->async = 1;
return internal_stonith_action_execute(action);
}
/*!
* \internal
* \brief Execute a stonith action
*
* \param[in,out] action Action to execute
*
* \return pcmk_ok on success, -errno otherwise
*/
int
stonith__execute(stonith_action_t *action)
{
int rc = pcmk_ok;
CRM_CHECK(action != NULL, return -EINVAL);
// Keep trying until success, max retries, or timeout
do {
rc = internal_stonith_action_execute(action);
} while ((rc != pcmk_ok) && update_remaining_timeout(action));
return rc;
}
static int
stonith_api_device_list(stonith_t * stonith, int call_options, const char *namespace,
stonith_key_value_t ** devices, int timeout)
{
int count = 0;
enum stonith_namespace ns = stonith_text2namespace(namespace);
if (devices == NULL) {
crm_err("Parameter error: stonith_api_device_list");
return -EFAULT;
}
#if HAVE_STONITH_STONITH_H
// Include Linux-HA agents if requested
if ((ns == st_namespace_any) || (ns == st_namespace_lha)) {
count += stonith__list_lha_agents(devices);
}
#endif
// Include Red Hat agents if requested
if ((ns == st_namespace_any) || (ns == st_namespace_rhcs)) {
count += stonith__list_rhcs_agents(devices);
}
return count;
}
static int
stonith_api_device_metadata(stonith_t * stonith, int call_options, const char *agent,
const char *namespace, char **output, int timeout)
{
/* By executing meta-data directly, we can get it from stonith_admin when
* the cluster is not running, which is important for higher-level tools.
*/
enum stonith_namespace ns = stonith_get_namespace(agent, namespace);
crm_trace("Looking up metadata for %s agent %s",
stonith_namespace2text(ns), agent);
switch (ns) {
case st_namespace_rhcs:
return stonith__rhcs_metadata(agent, timeout, output);
#if HAVE_STONITH_STONITH_H
case st_namespace_lha:
return stonith__lha_metadata(agent, timeout, output);
#endif
default:
errno = EINVAL;
crm_perror(LOG_ERR,
"Agent %s not found or does not support meta-data",
agent);
break;
}
return -EINVAL;
}
static int
stonith_api_query(stonith_t * stonith, int call_options, const char *target,
stonith_key_value_t ** devices, int timeout)
{
int rc = 0, lpc = 0, max = 0;
xmlNode *data = NULL;
xmlNode *output = NULL;
xmlXPathObjectPtr xpathObj = NULL;
CRM_CHECK(devices != NULL, return -EINVAL);
data = create_xml_node(NULL, F_STONITH_DEVICE);
crm_xml_add(data, F_STONITH_ORIGIN, __FUNCTION__);
crm_xml_add(data, F_STONITH_TARGET, target);
crm_xml_add(data, F_STONITH_ACTION, "off");
rc = stonith_send_command(stonith, STONITH_OP_QUERY, data, &output, call_options, timeout);
if (rc < 0) {
return rc;
}
xpathObj = xpath_search(output, "//@agent");
if (xpathObj) {
max = numXpathResults(xpathObj);
for (lpc = 0; lpc < max; lpc++) {
xmlNode *match = getXpathResult(xpathObj, lpc);
CRM_LOG_ASSERT(match != NULL);
if(match != NULL) {
xmlChar *match_path = xmlGetNodePath(match);
crm_info("%s[%d] = %s", "//@agent", lpc, match_path);
free(match_path);
*devices = stonith_key_value_add(*devices, NULL, crm_element_value(match, XML_ATTR_ID));
}
}
freeXpathObject(xpathObj);
}
free_xml(output);
free_xml(data);
return max;
}
static int
stonith_api_call(stonith_t * stonith,
int call_options,
const char *id,
const char *action, const char *victim, int timeout, xmlNode ** output)
{
int rc = 0;
xmlNode *data = NULL;
data = create_xml_node(NULL, F_STONITH_DEVICE);
crm_xml_add(data, F_STONITH_ORIGIN, __FUNCTION__);
crm_xml_add(data, F_STONITH_DEVICE, id);
crm_xml_add(data, F_STONITH_ACTION, action);
crm_xml_add(data, F_STONITH_TARGET, victim);
rc = stonith_send_command(stonith, STONITH_OP_EXEC, data, output, call_options, timeout);
free_xml(data);
return rc;
}
static int
stonith_api_list(stonith_t * stonith, int call_options, const char *id, char **list_info,
int timeout)
{
int rc;
xmlNode *output = NULL;
rc = stonith_api_call(stonith, call_options, id, "list", NULL, timeout, &output);
if (output && list_info) {
const char *list_str;
list_str = crm_element_value(output, "st_output");
if (list_str) {
*list_info = strdup(list_str);
}
}
if (output) {
free_xml(output);
}
return rc;
}
static int
stonith_api_monitor(stonith_t * stonith, int call_options, const char *id, int timeout)
{
return stonith_api_call(stonith, call_options, id, "monitor", NULL, timeout, NULL);
}
static int
stonith_api_status(stonith_t * stonith, int call_options, const char *id, const char *port,
int timeout)
{
return stonith_api_call(stonith, call_options, id, "status", port, timeout, NULL);
}
static int
stonith_api_fence(stonith_t * stonith, int call_options, const char *node, const char *action,
int timeout, int tolerance)
{
int rc = 0;
xmlNode *data = NULL;
data = create_xml_node(NULL, __FUNCTION__);
crm_xml_add(data, F_STONITH_TARGET, node);
crm_xml_add(data, F_STONITH_ACTION, action);
crm_xml_add_int(data, F_STONITH_TIMEOUT, timeout);
crm_xml_add_int(data, F_STONITH_TOLERANCE, tolerance);
rc = stonith_send_command(stonith, STONITH_OP_FENCE, data, NULL, call_options, timeout);
free_xml(data);
return rc;
}
static int
stonith_api_confirm(stonith_t * stonith, int call_options, const char *target)
{
return stonith_api_fence(stonith, call_options | st_opt_manual_ack, target, "off", 0, 0);
}
static int
stonith_api_history(stonith_t * stonith, int call_options, const char *node,
stonith_history_t ** history, int timeout)
{
int rc = 0;
xmlNode *data = NULL;
xmlNode *output = NULL;
stonith_history_t *last = NULL;
*history = NULL;
if (node) {
data = create_xml_node(NULL, __FUNCTION__);
crm_xml_add(data, F_STONITH_TARGET, node);
}
rc = stonith_send_command(stonith, STONITH_OP_FENCE_HISTORY, data, &output,
call_options | st_opt_sync_call, timeout);
free_xml(data);
if (rc == 0) {
xmlNode *op = NULL;
xmlNode *reply = get_xpath_object("//" F_STONITH_HISTORY_LIST, output, LOG_TRACE);
for (op = __xml_first_child(reply); op != NULL; op = __xml_next(op)) {
stonith_history_t *kvp;
int completed;
kvp = calloc(1, sizeof(stonith_history_t));
kvp->target = crm_element_value_copy(op, F_STONITH_TARGET);
kvp->action = crm_element_value_copy(op, F_STONITH_ACTION);
kvp->origin = crm_element_value_copy(op, F_STONITH_ORIGIN);
kvp->delegate = crm_element_value_copy(op, F_STONITH_DELEGATE);
kvp->client = crm_element_value_copy(op, F_STONITH_CLIENTNAME);
crm_element_value_int(op, F_STONITH_DATE, &completed);
kvp->completed = (time_t) completed;
crm_element_value_int(op, F_STONITH_STATE, &kvp->state);
if (last) {
last->next = kvp;
} else {
*history = kvp;
}
last = kvp;
}
}
free_xml(output);
return rc;
}
void stonith_history_free(stonith_history_t *history)
{
stonith_history_t *hp, *hp_old;
for (hp = history; hp; hp_old = hp, hp = hp->next, free(hp_old)) {
free(hp->target);
free(hp->action);
free(hp->origin);
free(hp->delegate);
free(hp->client);
}
}
/*!
* \brief Deprecated (use stonith_get_namespace() instead)
*/
const char *
get_stonith_provider(const char *agent, const char *provider)
{
return stonith_namespace2text(stonith_get_namespace(agent, provider));
}
static gint
stonithlib_GCompareFunc(gconstpointer a, gconstpointer b)
{
int rc = 0;
const stonith_notify_client_t *a_client = a;
const stonith_notify_client_t *b_client = b;
+ if (a_client->delete || b_client->delete) {
+ /* make entries marked for deletion not findable */
+ return -1;
+ }
CRM_CHECK(a_client->event != NULL && b_client->event != NULL, return 0);
rc = strcmp(a_client->event, b_client->event);
if (rc == 0) {
if (a_client->notify == NULL || b_client->notify == NULL) {
return 0;
} else if (a_client->notify == b_client->notify) {
return 0;
} else if (((long)a_client->notify) < ((long)b_client->notify)) {
crm_err("callbacks for %s are not equal: %p vs. %p",
a_client->event, a_client->notify, b_client->notify);
return -1;
}
crm_err("callbacks for %s are not equal: %p vs. %p",
a_client->event, a_client->notify, b_client->notify);
return 1;
}
return rc;
}
xmlNode *
stonith_create_op(int call_id, const char *token, const char *op, xmlNode * data, int call_options)
{
xmlNode *op_msg = create_xml_node(NULL, "stonith_command");
CRM_CHECK(op_msg != NULL, return NULL);
CRM_CHECK(token != NULL, return NULL);
crm_xml_add(op_msg, F_XML_TAGNAME, "stonith_command");
crm_xml_add(op_msg, F_TYPE, T_STONITH_NG);
crm_xml_add(op_msg, F_STONITH_CALLBACK_TOKEN, token);
crm_xml_add(op_msg, F_STONITH_OPERATION, op);
crm_xml_add_int(op_msg, F_STONITH_CALLID, call_id);
crm_trace("Sending call options: %.8lx, %d", (long)call_options, call_options);
crm_xml_add_int(op_msg, F_STONITH_CALLOPTS, call_options);
if (data != NULL) {
add_message_xml(op_msg, F_STONITH_CALLDATA, data);
}
return op_msg;
}
static void
stonith_destroy_op_callback(gpointer data)
{
stonith_callback_client_t *blob = data;
if (blob->timer && blob->timer->ref > 0) {
g_source_remove(blob->timer->ref);
}
free(blob->timer);
free(blob);
}
static int
stonith_api_signoff(stonith_t * stonith)
{
stonith_private_t *native = stonith->st_private;
crm_debug("Disconnecting from the fencer");
if (native->source != NULL) {
/* Attached to mainloop */
mainloop_del_ipc_client(native->source);
native->source = NULL;
native->ipc = NULL;
} else if (native->ipc) {
/* Not attached to mainloop */
crm_ipc_t *ipc = native->ipc;
native->ipc = NULL;
crm_ipc_close(ipc);
crm_ipc_destroy(ipc);
}
free(native->token); native->token = NULL;
stonith->state = stonith_disconnected;
return pcmk_ok;
}
static int
stonith_api_del_callback(stonith_t * stonith, int call_id, bool all_callbacks)
{
stonith_private_t *private = stonith->st_private;
if (all_callbacks) {
private->op_callback = NULL;
g_hash_table_destroy(private->stonith_op_callback_table);
private->stonith_op_callback_table = g_hash_table_new_full(g_direct_hash, g_direct_equal,
NULL,
stonith_destroy_op_callback);
} else if (call_id == 0) {
private->op_callback = NULL;
} else {
g_hash_table_remove(private->stonith_op_callback_table, GINT_TO_POINTER(call_id));
}
return pcmk_ok;
}
static void
invoke_callback(stonith_t * st, int call_id, int rc, void *userdata,
void (*callback) (stonith_t * st, stonith_callback_data_t * data))
{
stonith_callback_data_t data = { 0, };
data.call_id = call_id;
data.rc = rc;
data.userdata = userdata;
callback(st, &data);
}
static void
stonith_perform_callback(stonith_t * stonith, xmlNode * msg, int call_id, int rc)
{
stonith_private_t *private = NULL;
stonith_callback_client_t *blob = NULL;
stonith_callback_client_t local_blob;
CRM_CHECK(stonith != NULL, return);
CRM_CHECK(stonith->st_private != NULL, return);
private = stonith->st_private;
local_blob.id = NULL;
local_blob.callback = NULL;
local_blob.user_data = NULL;
local_blob.only_success = FALSE;
if (msg != NULL) {
crm_element_value_int(msg, F_STONITH_RC, &rc);
crm_element_value_int(msg, F_STONITH_CALLID, &call_id);
}
CRM_CHECK(call_id > 0, crm_log_xml_err(msg, "Bad result"));
blob = g_hash_table_lookup(private->stonith_op_callback_table, GINT_TO_POINTER(call_id));
if (blob != NULL) {
local_blob = *blob;
blob = NULL;
stonith_api_del_callback(stonith, call_id, FALSE);
} else {
crm_trace("No callback found for call %d", call_id);
local_blob.callback = NULL;
}
if (local_blob.callback != NULL && (rc == pcmk_ok || local_blob.only_success == FALSE)) {
crm_trace("Invoking callback %s for call %d", crm_str(local_blob.id), call_id);
invoke_callback(stonith, call_id, rc, local_blob.user_data, local_blob.callback);
} else if (private->op_callback == NULL && rc != pcmk_ok) {
crm_warn("Fencing command failed: %s", pcmk_strerror(rc));
crm_log_xml_debug(msg, "Failed fence update");
}
if (private->op_callback != NULL) {
crm_trace("Invoking global callback for call %d", call_id);
invoke_callback(stonith, call_id, rc, NULL, private->op_callback);
}
crm_trace("OP callback activated.");
}
static gboolean
stonith_async_timeout_handler(gpointer data)
{
struct timer_rec_s *timer = data;
crm_err("Async call %d timed out after %dms", timer->call_id, timer->timeout);
stonith_perform_callback(timer->stonith, NULL, timer->call_id, -ETIME);
/* Always return TRUE, never remove the handler
* We do that in stonith_del_callback()
*/
return TRUE;
}
static void
set_callback_timeout(stonith_callback_client_t * callback, stonith_t * stonith, int call_id,
int timeout)
{
struct timer_rec_s *async_timer = callback->timer;
if (timeout <= 0) {
return;
}
if (!async_timer) {
async_timer = calloc(1, sizeof(struct timer_rec_s));
callback->timer = async_timer;
}
async_timer->stonith = stonith;
async_timer->call_id = call_id;
/* Allow a fair bit of grace to allow the server to tell us of a timeout
* This is only a fallback
*/
async_timer->timeout = (timeout + 60) * 1000;
if (async_timer->ref) {
g_source_remove(async_timer->ref);
}
async_timer->ref =
g_timeout_add(async_timer->timeout, stonith_async_timeout_handler, async_timer);
}
static void
update_callback_timeout(int call_id, int timeout, stonith_t * st)
{
stonith_callback_client_t *callback = NULL;
stonith_private_t *private = st->st_private;
callback = g_hash_table_lookup(private->stonith_op_callback_table, GINT_TO_POINTER(call_id));
if (!callback || !callback->allow_timeout_updates) {
return;
}
set_callback_timeout(callback, st, call_id, timeout);
}
static int
stonith_dispatch_internal(const char *buffer, ssize_t length, gpointer userdata)
{
const char *type = NULL;
struct notify_blob_s blob;
stonith_t *st = userdata;
stonith_private_t *private = NULL;
CRM_ASSERT(st != NULL);
private = st->st_private;
blob.stonith = st;
blob.xml = string2xml(buffer);
if (blob.xml == NULL) {
crm_warn("Received malformed message from fencer: %s", buffer);
return 0;
}
/* do callbacks */
type = crm_element_value(blob.xml, F_TYPE);
crm_trace("Activating %s callbacks...", type);
if (safe_str_eq(type, T_STONITH_NG)) {
stonith_perform_callback(st, blob.xml, 0, 0);
} else if (safe_str_eq(type, T_STONITH_NOTIFY)) {
- g_list_foreach(private->notify_list, stonith_send_notification, &blob);
+ foreach_notify_entry(private, stonith_send_notification, &blob);
} else if (safe_str_eq(type, T_STONITH_TIMEOUT_VALUE)) {
int call_id = 0;
int timeout = 0;
crm_element_value_int(blob.xml, F_STONITH_TIMEOUT, &timeout);
crm_element_value_int(blob.xml, F_STONITH_CALLID, &call_id);
update_callback_timeout(call_id, timeout, st);
} else {
crm_err("Unknown message type: %s", type);
crm_log_xml_warn(blob.xml, "BadReply");
}
free_xml(blob.xml);
return 1;
}
static int
stonith_api_signon(stonith_t * stonith, const char *name, int *stonith_fd)
{
int rc = pcmk_ok;
stonith_private_t *native = NULL;
const char *display_name = name? name : "client";
static struct ipc_client_callbacks st_callbacks = {
.dispatch = stonith_dispatch_internal,
.destroy = stonith_connection_destroy
};
CRM_CHECK(stonith != NULL, return -EINVAL);
native = stonith->st_private;
CRM_ASSERT(native != NULL);
crm_debug("Attempting fencer connection by %s with%s mainloop",
display_name, (stonith_fd? "out" : ""));
stonith->state = stonith_connected_command;
if (stonith_fd) {
/* No mainloop */
native->ipc = crm_ipc_new("stonith-ng", 0);
if (native->ipc && crm_ipc_connect(native->ipc)) {
*stonith_fd = crm_ipc_get_fd(native->ipc);
} else if (native->ipc) {
crm_ipc_close(native->ipc);
crm_ipc_destroy(native->ipc);
native->ipc = NULL;
}
} else {
/* With mainloop */
native->source =
mainloop_add_ipc_client("stonith-ng", G_PRIORITY_MEDIUM, 0, stonith, &st_callbacks);
native->ipc = mainloop_get_ipc_client(native->source);
}
if (native->ipc == NULL) {
rc = -ENOTCONN;
} else {
xmlNode *reply = NULL;
xmlNode *hello = create_xml_node(NULL, "stonith_command");
crm_xml_add(hello, F_TYPE, T_STONITH_NG);
crm_xml_add(hello, F_STONITH_OPERATION, CRM_OP_REGISTER);
crm_xml_add(hello, F_STONITH_CLIENTNAME, name);
rc = crm_ipc_send(native->ipc, hello, crm_ipc_client_response, -1, &reply);
if (rc < 0) {
crm_debug("Couldn't register with the fencer: %s "
CRM_XS " rc=%d", pcmk_strerror(rc), rc);
rc = -ECOMM;
} else if (reply == NULL) {
crm_debug("Couldn't register with the fencer: no reply");
rc = -EPROTO;
} else {
const char *msg_type = crm_element_value(reply, F_STONITH_OPERATION);
const char *tmp_ticket = crm_element_value(reply, F_STONITH_CLIENTID);
if (safe_str_neq(msg_type, CRM_OP_REGISTER)) {
crm_debug("Couldn't register with the fencer: invalid reply type '%s'",
(msg_type? msg_type : "(missing)"));
crm_log_xml_debug(reply, "Invalid fencer reply");
rc = -EPROTO;
} else if (tmp_ticket == NULL) {
crm_debug("Couldn't register with the fencer: no token in reply");
crm_log_xml_debug(reply, "Invalid fencer reply");
rc = -EPROTO;
} else {
native->token = strdup(tmp_ticket);
#if HAVE_MSGFROMIPC_TIMEOUT
stonith->call_timeout = MAX_IPC_DELAY;
#endif
crm_debug("Connection to fencer by %s succeeded (registration token: %s)",
display_name, native->token);
rc = pcmk_ok;
}
}
free_xml(reply);
free_xml(hello);
}
if (rc != pcmk_ok) {
crm_debug("Connection attempt to fencer by %s failed: %s "
CRM_XS " rc=%d", display_name, pcmk_strerror(rc), rc);
stonith->cmds->disconnect(stonith);
}
return rc;
}
static int
stonith_set_notification(stonith_t * stonith, const char *callback, int enabled)
{
int rc = pcmk_ok;
xmlNode *notify_msg = create_xml_node(NULL, __FUNCTION__);
stonith_private_t *native = stonith->st_private;
if (stonith->state != stonith_disconnected) {
crm_xml_add(notify_msg, F_STONITH_OPERATION, T_STONITH_NOTIFY);
if (enabled) {
crm_xml_add(notify_msg, F_STONITH_NOTIFY_ACTIVATE, callback);
} else {
crm_xml_add(notify_msg, F_STONITH_NOTIFY_DEACTIVATE, callback);
}
rc = crm_ipc_send(native->ipc, notify_msg, crm_ipc_client_response, -1, NULL);
if (rc < 0) {
crm_perror(LOG_DEBUG, "Couldn't register for fencing notifications: %d", rc);
rc = -ECOMM;
} else {
rc = pcmk_ok;
}
}
free_xml(notify_msg);
return rc;
}
static int
stonith_api_add_notification(stonith_t * stonith, const char *event,
void (*callback) (stonith_t * stonith, stonith_event_t * e))
{
GList *list_item = NULL;
stonith_notify_client_t *new_client = NULL;
stonith_private_t *private = NULL;
private = stonith->st_private;
crm_trace("Adding callback for %s events (%d)", event, g_list_length(private->notify_list));
new_client = calloc(1, sizeof(stonith_notify_client_t));
new_client->event = event;
new_client->notify = callback;
list_item = g_list_find_custom(private->notify_list, new_client, stonithlib_GCompareFunc);
if (list_item != NULL) {
crm_warn("Callback already present");
free(new_client);
return -ENOTUNIQ;
} else {
private->notify_list = g_list_append(private->notify_list, new_client);
stonith_set_notification(stonith, event, 1);
crm_trace("Callback added (%d)", g_list_length(private->notify_list));
}
return pcmk_ok;
}
static int
stonith_api_del_notification(stonith_t * stonith, const char *event)
{
GList *list_item = NULL;
stonith_notify_client_t *new_client = NULL;
stonith_private_t *private = NULL;
crm_debug("Removing callback for %s events", event);
private = stonith->st_private;
new_client = calloc(1, sizeof(stonith_notify_client_t));
new_client->event = event;
new_client->notify = NULL;
list_item = g_list_find_custom(private->notify_list, new_client, stonithlib_GCompareFunc);
stonith_set_notification(stonith, event, 0);
if (list_item != NULL) {
stonith_notify_client_t *list_client = list_item->data;
- private->notify_list = g_list_remove(private->notify_list, list_client);
- free(list_client);
+ if (private->notify_refcnt) {
+ list_client->delete = TRUE;
+ private->notify_deletes = TRUE;
+ } else {
+ private->notify_list = g_list_remove(private->notify_list, list_client);
+ free(list_client);
+ }
crm_trace("Removed callback");
} else {
crm_trace("Callback not present");
}
free(new_client);
return pcmk_ok;
}
static int
stonith_api_add_callback(stonith_t * stonith, int call_id, int timeout, int options,
void *user_data, const char *callback_name,
void (*callback) (stonith_t * st, stonith_callback_data_t * data))
{
stonith_callback_client_t *blob = NULL;
stonith_private_t *private = NULL;
CRM_CHECK(stonith != NULL, return -EINVAL);
CRM_CHECK(stonith->st_private != NULL, return -EINVAL);
private = stonith->st_private;
if (call_id == 0) {
private->op_callback = callback;
} else if (call_id < 0) {
if (!(options & st_opt_report_only_success)) {
crm_trace("Call failed, calling %s: %s", callback_name, pcmk_strerror(call_id));
invoke_callback(stonith, call_id, call_id, user_data, callback);
} else {
crm_warn("Fencer call failed: %s", pcmk_strerror(call_id));
}
return FALSE;
}
blob = calloc(1, sizeof(stonith_callback_client_t));
blob->id = callback_name;
blob->only_success = (options & st_opt_report_only_success) ? TRUE : FALSE;
blob->user_data = user_data;
blob->callback = callback;
blob->allow_timeout_updates = (options & st_opt_timeout_updates) ? TRUE : FALSE;
if (timeout > 0) {
set_callback_timeout(blob, stonith, call_id, timeout);
}
g_hash_table_insert(private->stonith_op_callback_table, GINT_TO_POINTER(call_id), blob);
crm_trace("Added callback to %s for call %d", callback_name, call_id);
return TRUE;
}
static void
stonith_dump_pending_op(gpointer key, gpointer value, gpointer user_data)
{
int call = GPOINTER_TO_INT(key);
stonith_callback_client_t *blob = value;
crm_debug("Call %d (%s): pending", call, crm_str(blob->id));
}
void
stonith_dump_pending_callbacks(stonith_t * stonith)
{
stonith_private_t *private = stonith->st_private;
if (private->stonith_op_callback_table == NULL) {
return;
}
return g_hash_table_foreach(private->stonith_op_callback_table, stonith_dump_pending_op, NULL);
}
/*
*/
static stonith_event_t *
xml_to_event(xmlNode * msg)
{
stonith_event_t *event = calloc(1, sizeof(stonith_event_t));
const char *ntype = crm_element_value(msg, F_SUBTYPE);
char *data_addr = crm_strdup_printf("//%s", ntype);
xmlNode *data = get_xpath_object(data_addr, msg, LOG_DEBUG);
crm_log_xml_trace(msg, "stonith_notify");
crm_element_value_int(msg, F_STONITH_RC, &(event->result));
if (safe_str_eq(ntype, T_STONITH_NOTIFY_FENCE)) {
event->operation = crm_element_value_copy(msg, F_STONITH_OPERATION);
if (data) {
event->origin = crm_element_value_copy(data, F_STONITH_ORIGIN);
event->action = crm_element_value_copy(data, F_STONITH_ACTION);
event->target = crm_element_value_copy(data, F_STONITH_TARGET);
event->executioner = crm_element_value_copy(data, F_STONITH_DELEGATE);
event->id = crm_element_value_copy(data, F_STONITH_REMOTE_OP_ID);
event->client_origin = crm_element_value_copy(data, F_STONITH_CLIENTNAME);
event->device = crm_element_value_copy(data, F_STONITH_DEVICE);
} else {
crm_err("No data for %s event", ntype);
crm_log_xml_notice(msg, "BadEvent");
}
}
free(data_addr);
return event;
}
static void
event_free(stonith_event_t * event)
{
free(event->id);
free(event->type);
free(event->message);
free(event->operation);
free(event->origin);
free(event->action);
free(event->target);
free(event->executioner);
free(event->device);
free(event->client_origin);
free(event);
}
static void
stonith_send_notification(gpointer data, gpointer user_data)
{
struct notify_blob_s *blob = user_data;
stonith_notify_client_t *entry = data;
stonith_event_t *st_event = NULL;
const char *event = NULL;
if (blob->xml == NULL) {
crm_warn("Skipping callback - NULL message");
return;
}
event = crm_element_value(blob->xml, F_SUBTYPE);
if (entry == NULL) {
crm_warn("Skipping callback - NULL callback client");
return;
+ } else if (entry->delete) {
+ crm_trace("Skipping callback - marked for deletion");
+ return;
+
} else if (entry->notify == NULL) {
crm_warn("Skipping callback - NULL callback");
return;
} else if (safe_str_neq(entry->event, event)) {
crm_trace("Skipping callback - event mismatch %p/%s vs. %s", entry, entry->event, event);
return;
}
st_event = xml_to_event(blob->xml);
crm_trace("Invoking callback for %p/%s event...", entry, event);
entry->notify(blob->stonith, st_event);
crm_trace("Callback invoked...");
event_free(st_event);
}
int
stonith_send_command(stonith_t * stonith, const char *op, xmlNode * data, xmlNode ** output_data,
int call_options, int timeout)
{
int rc = 0;
int reply_id = -1;
enum crm_ipc_flags ipc_flags = crm_ipc_flags_none;
xmlNode *op_msg = NULL;
xmlNode *op_reply = NULL;
stonith_private_t *native = stonith->st_private;
if (stonith->state == stonith_disconnected) {
return -ENOTCONN;
}
if (output_data != NULL) {
*output_data = NULL;
}
if (op == NULL) {
crm_err("No operation specified");
return -EINVAL;
}
if (call_options & st_opt_sync_call) {
ipc_flags |= crm_ipc_client_response;
}
stonith->call_id++;
/* prevent call_id from being negative (or zero) and conflicting
* with the stonith_errors enum
* use 2 because we use it as (stonith->call_id - 1) below
*/
if (stonith->call_id < 1) {
stonith->call_id = 1;
}
CRM_CHECK(native->token != NULL,;
);
op_msg = stonith_create_op(stonith->call_id, native->token, op, data, call_options);
if (op_msg == NULL) {
return -EINVAL;
}
crm_xml_add_int(op_msg, F_STONITH_TIMEOUT, timeout);
crm_trace("Sending %s message to fencer with timeout %ds", op, timeout);
rc = crm_ipc_send(native->ipc, op_msg, ipc_flags, 1000 * (timeout + 60), &op_reply);
free_xml(op_msg);
if (rc < 0) {
crm_perror(LOG_ERR, "Couldn't perform %s operation (timeout=%ds): %d", op, timeout, rc);
rc = -ECOMM;
goto done;
}
crm_log_xml_trace(op_reply, "Reply");
if (!(call_options & st_opt_sync_call)) {
crm_trace("Async call %d, returning", stonith->call_id);
CRM_CHECK(stonith->call_id != 0, return -EPROTO);
free_xml(op_reply);
return stonith->call_id;
}
rc = pcmk_ok;
crm_element_value_int(op_reply, F_STONITH_CALLID, &reply_id);
if (reply_id == stonith->call_id) {
crm_trace("Synchronous reply %d received", reply_id);
if (crm_element_value_int(op_reply, F_STONITH_RC, &rc) != 0) {
rc = -ENOMSG;
}
if ((call_options & st_opt_discard_reply) || output_data == NULL) {
crm_trace("Discarding reply");
} else {
*output_data = op_reply;
op_reply = NULL; /* Prevent subsequent free */
}
} else if (reply_id <= 0) {
crm_err("Received bad reply: No id set");
crm_log_xml_err(op_reply, "Bad reply");
free_xml(op_reply);
rc = -ENOMSG;
} else {
crm_err("Received bad reply: %d (wanted %d)", reply_id, stonith->call_id);
crm_log_xml_err(op_reply, "Old reply");
free_xml(op_reply);
rc = -ENOMSG;
}
done:
if (crm_ipc_connected(native->ipc) == FALSE) {
crm_err("Fencer disconnected");
stonith->state = stonith_disconnected;
}
free_xml(op_reply);
return rc;
}
/* Not used with mainloop */
bool
stonith_dispatch(stonith_t * st)
{
gboolean stay_connected = TRUE;
stonith_private_t *private = NULL;
CRM_ASSERT(st != NULL);
private = st->st_private;
while (crm_ipc_ready(private->ipc)) {
if (crm_ipc_read(private->ipc) > 0) {
const char *msg = crm_ipc_buffer(private->ipc);
stonith_dispatch_internal(msg, strlen(msg), st);
}
if (crm_ipc_connected(private->ipc) == FALSE) {
crm_err("Connection closed");
stay_connected = FALSE;
}
}
return stay_connected;
}
static int
stonith_api_free(stonith_t * stonith)
{
int rc = pcmk_ok;
crm_trace("Destroying %p", stonith);
if (stonith->state != stonith_disconnected) {
crm_trace("Disconnecting %p first", stonith);
rc = stonith->cmds->disconnect(stonith);
}
if (stonith->state == stonith_disconnected) {
stonith_private_t *private = stonith->st_private;
crm_trace("Removing %d callbacks", g_hash_table_size(private->stonith_op_callback_table));
g_hash_table_destroy(private->stonith_op_callback_table);
crm_trace("Destroying %d notification clients", g_list_length(private->notify_list));
g_list_free_full(private->notify_list, free);
free(stonith->st_private);
free(stonith->cmds);
free(stonith);
} else {
crm_err("Not free'ing active connection: %s (%d)", pcmk_strerror(rc), rc);
}
return rc;
}
void
stonith_api_delete(stonith_t * stonith)
{
crm_trace("Destroying %p", stonith);
if(stonith) {
stonith->cmds->free(stonith);
}
}
static int
stonith_api_validate(stonith_t *st, int call_options, const char *rsc_id,
const char *namespace_s, const char *agent,
stonith_key_value_t *params, int timeout, char **output,
char **error_output)
{
/* Validation should be done directly via the agent, so we can get it from
* stonith_admin when the cluster is not running, which is important for
* higher-level tools.
*/
int rc = pcmk_ok;
/* Use a dummy node name in case the agent requires a target. We assume the
* actual target doesn't matter for validation purposes (if in practice,
* that is incorrect, we will need to allow the caller to pass the target).
*/
const char *target = "node1";
GHashTable *params_table = crm_str_table_new();
// Convert parameter list to a hash table
for (; params; params = params->next) {
// Strip out Pacemaker-implemented parameters
if (!crm_starts_with(params->key, "pcmk_")
&& strcmp(params->key, "provides")
&& strcmp(params->key, "stonith-timeout")) {
g_hash_table_insert(params_table, strdup(params->key),
strdup(params->value));
}
}
#if SUPPORT_CIBSECRETS
rc = replace_secret_params(rsc_id, params_table);
if (rc < 0) {
crm_warn("Could not replace secret parameters for validation of %s: %s",
agent, pcmk_strerror(rc));
}
#endif
if (output) {
*output = NULL;
}
if (error_output) {
*error_output = NULL;
}
switch (stonith_get_namespace(agent, namespace_s)) {
case st_namespace_rhcs:
rc = stonith__rhcs_validate(st, call_options, target, agent,
params_table, timeout, output,
error_output);
break;
#if HAVE_STONITH_STONITH_H
case st_namespace_lha:
rc = stonith__lha_validate(st, call_options, target, agent,
params_table, timeout, output,
error_output);
break;
#endif
default:
rc = -EINVAL;
errno = EINVAL;
crm_perror(LOG_ERR,
"Agent %s not found or does not support validation",
agent);
break;
}
g_hash_table_destroy(params_table);
return rc;
}
stonith_t *
stonith_api_new(void)
{
stonith_t *new_stonith = NULL;
stonith_private_t *private = NULL;
new_stonith = calloc(1, sizeof(stonith_t));
private = calloc(1, sizeof(stonith_private_t));
new_stonith->st_private = private;
private->stonith_op_callback_table = g_hash_table_new_full(g_direct_hash, g_direct_equal,
NULL, stonith_destroy_op_callback);
private->notify_list = NULL;
+ private->notify_refcnt = 0;
+ private->notify_deletes = FALSE;
new_stonith->call_id = 1;
new_stonith->state = stonith_disconnected;
new_stonith->cmds = calloc(1, sizeof(stonith_api_operations_t));
/* *INDENT-OFF* */
new_stonith->cmds->free = stonith_api_free;
new_stonith->cmds->connect = stonith_api_signon;
new_stonith->cmds->disconnect = stonith_api_signoff;
new_stonith->cmds->list = stonith_api_list;
new_stonith->cmds->monitor = stonith_api_monitor;
new_stonith->cmds->status = stonith_api_status;
new_stonith->cmds->fence = stonith_api_fence;
new_stonith->cmds->confirm = stonith_api_confirm;
new_stonith->cmds->history = stonith_api_history;
new_stonith->cmds->list_agents = stonith_api_device_list;
new_stonith->cmds->metadata = stonith_api_device_metadata;
new_stonith->cmds->query = stonith_api_query;
new_stonith->cmds->remove_device = stonith_api_remove_device;
new_stonith->cmds->register_device = stonith_api_register_device;
new_stonith->cmds->remove_level = stonith_api_remove_level;
new_stonith->cmds->remove_level_full = stonith_api_remove_level_full;
new_stonith->cmds->register_level = stonith_api_register_level;
new_stonith->cmds->register_level_full = stonith_api_register_level_full;
new_stonith->cmds->remove_callback = stonith_api_del_callback;
new_stonith->cmds->register_callback = stonith_api_add_callback;
new_stonith->cmds->remove_notification = stonith_api_del_notification;
new_stonith->cmds->register_notification = stonith_api_add_notification;
new_stonith->cmds->validate = stonith_api_validate;
/* *INDENT-ON* */
return new_stonith;
}
/*!
* \brief Make a blocking connection attempt to the fencer
*
* \param[in,out] st Fencer API object
* \param[in] name Client name to use with fencer
* \param[in] max_attempts Return error if this many attempts fail
*
* \return pcmk_ok on success, result of last attempt otherwise
*/
int
stonith_api_connect_retry(stonith_t *st, const char *name, int max_attempts)
{
int rc = -EINVAL; // if max_attempts is not positive
for (int attempt = 1; attempt <= max_attempts; attempt++) {
rc = st->cmds->connect(st, name, NULL);
if (rc == pcmk_ok) {
return pcmk_ok;
} else if (attempt < max_attempts) {
crm_notice("Fencer connection attempt %d of %d failed (retrying in 2s): %s "
CRM_XS " rc=%d",
attempt, max_attempts, pcmk_strerror(rc), rc);
sleep(2);
}
}
crm_notice("Could not connect to fencer: %s " CRM_XS " rc=%d",
pcmk_strerror(rc), rc);
return rc;
}
stonith_key_value_t *
stonith_key_value_add(stonith_key_value_t * head, const char *key, const char *value)
{
stonith_key_value_t *p, *end;
p = calloc(1, sizeof(stonith_key_value_t));
if (key) {
p->key = strdup(key);
}
if (value) {
p->value = strdup(value);
}
end = head;
while (end && end->next) {
end = end->next;
}
if (end) {
end->next = p;
} else {
head = p;
}
return head;
}
void
stonith_key_value_freeall(stonith_key_value_t * head, int keys, int values)
{
stonith_key_value_t *p;
while (head) {
p = head->next;
if (keys) {
free(head->key);
}
if (values) {
free(head->value);
}
free(head);
head = p;
}
}
#define api_log_open() openlog("stonith-api", LOG_CONS | LOG_NDELAY | LOG_PID, LOG_DAEMON)
#define api_log(level, fmt, args...) syslog(level, "%s: "fmt, __FUNCTION__, args)
int
stonith_api_kick(uint32_t nodeid, const char *uname, int timeout, bool off)
{
int rc = pcmk_ok;
stonith_t *st = stonith_api_new();
const char *action = off? "off" : "reboot";
api_log_open();
if (st == NULL) {
api_log(LOG_ERR, "API initialization failed, could not kick (%s) node %u/%s",
action, nodeid, uname);
return -EPROTO;
}
rc = st->cmds->connect(st, "stonith-api", NULL);
if (rc != pcmk_ok) {
api_log(LOG_ERR, "Connection failed, could not kick (%s) node %u/%s : %s (%d)",
action, nodeid, uname, pcmk_strerror(rc), rc);
} else {
char *name = NULL;
enum stonith_call_options opts = st_opt_sync_call | st_opt_allow_suicide;
if (uname != NULL) {
name = strdup(uname);
} else if (nodeid > 0) {
opts |= st_opt_cs_nodeid;
name = crm_itoa(nodeid);
}
rc = st->cmds->fence(st, opts, name, action, timeout, 0);
free(name);
if (rc != pcmk_ok) {
api_log(LOG_ERR, "Could not kick (%s) node %u/%s : %s (%d)",
action, nodeid, uname, pcmk_strerror(rc), rc);
} else {
api_log(LOG_NOTICE, "Node %u/%s kicked: %s", nodeid, uname, action);
}
}
stonith_api_delete(st);
return rc;
}
time_t
stonith_api_time(uint32_t nodeid, const char *uname, bool in_progress)
{
int rc = pcmk_ok;
time_t when = 0;
stonith_t *st = stonith_api_new();
stonith_history_t *history = NULL, *hp = NULL;
if (st == NULL) {
api_log(LOG_ERR, "Could not retrieve fence history for %u/%s: "
"API initialization failed", nodeid, uname);
return when;
}
rc = st->cmds->connect(st, "stonith-api", NULL);
if (rc != pcmk_ok) {
api_log(LOG_NOTICE, "Connection failed: %s (%d)", pcmk_strerror(rc), rc);
} else {
int entries = 0;
int progress = 0;
int completed = 0;
char *name = NULL;
enum stonith_call_options opts = st_opt_sync_call;
if (uname != NULL) {
name = strdup(uname);
} else if (nodeid > 0) {
opts |= st_opt_cs_nodeid;
name = crm_itoa(nodeid);
}
rc = st->cmds->history(st, opts, name, &history, 120);
free(name);
for (hp = history; hp; hp = hp->next) {
entries++;
if (in_progress) {
progress++;
if (hp->state != st_done && hp->state != st_failed) {
when = time(NULL);
}
} else if (hp->state == st_done) {
completed++;
if (hp->completed > when) {
when = hp->completed;
}
}
}
stonith_history_free(history);
if(rc == pcmk_ok) {
api_log(LOG_INFO, "Found %d entries for %u/%s: %d in progress, %d completed", entries, nodeid, uname, progress, completed);
} else {
api_log(LOG_ERR, "Could not retrieve fence history for %u/%s: %s (%d)", nodeid, uname, pcmk_strerror(rc), rc);
}
}
stonith_api_delete(st);
if(when) {
api_log(LOG_INFO, "Node %u/%s last kicked at: %ld", nodeid, uname, (long int)when);
}
return when;
}
bool
stonith_agent_exists(const char *agent, int timeout)
{
stonith_t *st = NULL;
stonith_key_value_t *devices = NULL;
stonith_key_value_t *dIter = NULL;
bool rc = FALSE;
if (agent == NULL) {
return rc;
}
st = stonith_api_new();
st->cmds->list_agents(st, st_opt_sync_call, NULL, &devices, timeout == 0 ? 120 : timeout);
for (dIter = devices; dIter != NULL; dIter = dIter->next) {
if (crm_str_eq(dIter->value, agent, TRUE)) {
rc = TRUE;
break;
}
}
stonith_key_value_freeall(devices, 1, 1);
stonith_api_delete(st);
return rc;
}
const char *
stonith_action_str(const char *action)
{
if (action == NULL) {
return "fencing";
} else if (!strcmp(action, "on")) {
return "unfencing";
} else if (!strcmp(action, "off")) {
return "turning off";
} else {
return action;
}
}
/*!
* \internal
* \brief Parse a target name from one line of a target list string
*
* \param[in] line One line of a target list string
* \parma[in] len String length of line
* \param[in,out] output List to add newly allocated target name to
*/
static void
parse_list_line(const char *line, int len, GList **output)
{
size_t i = 0;
size_t entry_start = 0;
/* Skip complaints about additional parameters device doesn't understand
*
* @TODO Document or eliminate the implied restriction of target names
*/
if (strstr(line, "invalid") || strstr(line, "variable")) {
crm_debug("Skipping list output line: %s", line);
return;
}
// Process line content, character by character
for (i = 0; i <= len; i++) {
if (isspace(line[i]) || (line[i] == ',') || (line[i] == ';')
|| (line[i] == '\0')) {
// We've found a separator (i.e. the end of an entry)
int rc = 0;
char *entry = NULL;
if (i == entry_start) {
// Skip leading and sequential separators
entry_start = i + 1;
continue;
}
entry = calloc(i - entry_start + 1, sizeof(char));
CRM_ASSERT(entry != NULL);
/* Read entry, stopping at first separator
*
* @TODO Document or eliminate these character restrictions
*/
rc = sscanf(line + entry_start, "%[a-zA-Z0-9_-.]", entry);
if (rc != 1) {
crm_warn("Could not parse list output entry: %s "
CRM_XS " entry_start=%d position=%d",
line + entry_start, entry_start, i);
free(entry);
} else if (safe_str_eq(entry, "on") || safe_str_eq(entry, "off")) {
/* Some agents print the target status in the list output,
* though none are known now (the separate list-status command
* is used for this, but it can also print "UNKNOWN"). To handle
* this possibility, skip such entries.
*
* @TODO Document or eliminate the implied restriction of target
* names.
*/
free(entry);
} else {
// We have a valid entry
*output = g_list_append(*output, entry);
}
entry_start = i + 1;
}
}
}
/*!
* \internal
* \brief Parse a list of targets from a string
*
* \param[in] list_output Target list as a string
*
* \return List of target names
* \note The target list string format is flexible, to allow for user-specified
* lists such pcmk_host_list and the output of an agent's list action
* (whether direct or via the API, which escapes newlines). There may be
* multiple lines, separated by either a newline or an escaped newline
* (backslash n). Each line may have one or more target names, separated
* by any combination of whitespace, commas, and semi-colons. Lines
* containing "invalid" or "variable" will be ignored entirely. Target
* names "on" or "off" (case-insensitive) will be ignored. Target names
* may contain only alphanumeric characters, underbars (_), dashes (-),
* and dots (.) (if any other character occurs in the name, it and all
* subsequent characters in the name will be ignored).
* \note The caller is responsible for freeing the result with
* g_list_free_full(result, free).
*/
GList *
stonith__parse_targets(const char *target_spec)
{
GList *targets = NULL;
if (target_spec != NULL) {
size_t out_len = strlen(target_spec);
size_t line_start = 0; // Starting index of line being processed
for (size_t i = 0; i <= out_len; ++i) {
if ((target_spec[i] == '\n') || (target_spec[i] == '\0')
|| ((target_spec[i] == '\\') && (target_spec[i + 1] == 'n'))) {
// We've reached the end of one line of output
int len = i - line_start;
if (len > 0) {
char *line = strndup(target_spec + line_start, len);
line[len] = '\0'; // Because it might be a newline
parse_list_line(line, len, &targets);
free(line);
}
if (target_spec[i] == '\\') {
++i; // backslash-n takes up two positions
}
line_start = i + 1;
}
}
}
return targets;
}