diff --git a/cts/patterns.py b/cts/patterns.py
index 745eaaf4b5..1bdace0ead 100644
--- a/cts/patterns.py
+++ b/cts/patterns.py
@@ -1,391 +1,391 @@
""" Pattern-holding classes for Pacemaker's Cluster Test Suite (CTS)
"""
# Pacemaker targets compatibility with Python 2.7 and 3.2+
from __future__ import print_function, unicode_literals, absolute_import, division
-__copyright__ = "Copyright 2008-2018 the Pacemaker project contributors"
+__copyright__ = "Copyright 2008-2019 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import sys, os
from cts.CTSvars import *
patternvariants = {}
class BasePatterns(object):
def __init__(self, name):
self.name = name
patternvariants[name] = self
self.ignore = [
"avoid confusing Valgrind",
# Logging bug in some versions of libvirtd
r"libvirtd.*: internal error: Failed to parse PCI config address",
]
self.BadNews = []
self.components = {}
self.commands = {
"StatusCmd" : "crmadmin -t 60000 -S %s 2>/dev/null",
"CibQuery" : "cibadmin -Ql",
"CibAddXml" : "cibadmin --modify -c --xml-text %s",
"CibDelXpath" : "cibadmin --delete --xpath %s",
# 300,000 == 5 minutes
"RscRunning" : CTSvars.CRM_DAEMON_DIR + "/cts-exec-helper -R -r %s",
"CIBfile" : "%s:"+CTSvars.CRM_CONFIG_DIR+"/cib.xml",
"TmpDir" : "/tmp",
"BreakCommCmd" : "iptables -A INPUT -s %s -j DROP >/dev/null 2>&1",
"FixCommCmd" : "iptables -D INPUT -s %s -j DROP >/dev/null 2>&1",
# tc qdisc add dev lo root handle 1: cbq avpkt 1000 bandwidth 1000mbit
# tc class add dev lo parent 1: classid 1:1 cbq rate "$RATE"kbps allot 17000 prio 5 bounded isolated
# tc filter add dev lo parent 1: protocol ip prio 16 u32 match ip dst 127.0.0.1 match ip sport $PORT 0xFFFF flowid 1:1
# tc qdisc add dev lo parent 1: netem delay "$LATENCY"msec "$(($LATENCY/4))"msec 10% 2> /dev/null > /dev/null
"ReduceCommCmd" : "",
"RestoreCommCmd" : "tc qdisc del dev lo root",
"SetCheckInterval" : "cibadmin --modify -c --xml-text ''",
"ClearCheckInterval" : "cibadmin --delete --xpath \"//nvpair[@name='cluster-recheck-interval']\"",
"MaintenanceModeOn" : "cibadmin --modify -c --xml-text ''",
"MaintenanceModeOff" : "cibadmin --delete --xpath \"//nvpair[@name='maintenance-mode']\"",
"StandbyCmd" : "crm_attribute -Vq -U %s -n standby -l forever -v %s 2>/dev/null",
"StandbyQueryCmd" : "crm_attribute -qG -U %s -n standby -l forever -d off 2>/dev/null",
}
self.search = {
"Pat:DC_IDLE" : "pacemaker-controld.*State transition.*-> S_IDLE",
# This won't work if we have multiple partitions
- "Pat:Local_started" : "%s\W.*The local CRM is operational",
+ "Pat:Local_started" : "%s\W.*controller successfully started",
"Pat:NonDC_started" : r"%s\W.*State transition.*-> S_NOT_DC",
"Pat:DC_started" : r"%s\W.*State transition.*-> S_IDLE",
"Pat:We_stopped" : "%s\W.*OVERRIDE THIS PATTERN",
"Pat:They_stopped" : "%s\W.*LOST:.* %s ",
"Pat:They_dead" : "node %s.*: is dead",
"Pat:TransitionComplete" : "Transition status: Complete: complete",
"Pat:Fencing_start" : "(Initiating remote operation|Requesting peer fencing ).* (for|of) %s",
"Pat:Fencing_ok" : r"pacemaker-fenced.*:\s*Operation .* of %s by .* for .*@.*: OK",
"Pat:Fencing_recover" : r"schedulerd.*: Recover %s",
"Pat:RscOpOK" : r"pacemaker-controld.*:\s+Result of %s operation for %s.*: (0 \()?ok",
"Pat:RscRemoteOpOK" : r"pacemaker-controld.*:\s+Result of %s operation for %s on %s: (0 \()?ok",
"Pat:NodeFenced" : r"pacemaker-controld.*:\s* Peer %s was terminated \(.*\) by .* on behalf of .*: OK",
"Pat:FenceOpOK" : "Operation .* for host '%s' with device .* returned: 0",
}
def get_component(self, key):
if key in self.components:
return self.components[key]
print("Unknown component '%s' for %s" % (key, self.name))
return []
def get_patterns(self, key):
if key == "BadNews":
return self.BadNews
elif key == "BadNewsIgnore":
return self.ignore
elif key == "Commands":
return self.commands
elif key == "Search":
return self.search
elif key == "Components":
return self.components
def __getitem__(self, key):
if key == "Name":
return self.name
elif key in self.commands:
return self.commands[key]
elif key in self.search:
return self.search[key]
else:
print("Unknown template '%s' for %s" % (key, self.name))
return None
class crm_corosync(BasePatterns):
'''
Patterns for Corosync version 2 cluster manager class
'''
def __init__(self, name):
BasePatterns.__init__(self, name)
self.commands.update({
"StartCmd" : "service corosync start && service pacemaker start",
"StopCmd" : "service pacemaker stop; [ ! -e /usr/sbin/pacemaker-remoted ] || service pacemaker_remote stop; service corosync stop",
"EpochCmd" : "crm_node -e",
"QuorumCmd" : "crm_node -q",
"PartitionCmd" : "crm_node -p",
})
self.search.update({
# Close enough ... "Corosync Cluster Engine exiting normally" isn't
# printed reliably.
"Pat:We_stopped" : "%s\W.*Unloading all Corosync service engines",
"Pat:They_stopped" : "%s\W.*pacemaker-controld.*Node %s(\[|\s).*state is now lost",
"Pat:They_dead" : "pacemaker-controld.*Node %s(\[|\s).*state is now lost",
"Pat:ChildExit" : r"\[[0-9]+\] exited with status [0-9]+ \(",
"Pat:ChildKilled" : r"%s\W.*pacemakerd.*%s\[[0-9]+\] terminated with signal 9",
"Pat:ChildRespawn" : "%s\W.*pacemakerd.*Respawning failed child process: %s",
"Pat:InfraUp" : "%s\W.*corosync.*Initializing transport",
"Pat:PacemakerUp" : "%s\W.*pacemakerd.*Starting Pacemaker",
})
self.ignore = self.ignore + [
r"crm_mon:",
r"crmadmin:",
r"update_trace_data",
r"async_notify:.*strange, client not found",
r"Parse error: Ignoring unknown option .*nodename",
r"error.*: Operation 'reboot' .* with device 'FencingFail' returned:",
r"getinfo response error: 1$",
r"sbd.* error: inquisitor_child: DEBUG MODE IS ACTIVE",
r"sbd.* pcmk:\s*error:.*Connection to cib_ro.* (failed|closed)",
]
self.BadNews = [
r"error:",
r"crit:",
r"ERROR:",
r"CRIT:",
r"Shutting down...NOW",
r"Timer I_TERMINATE just popped",
r"input=I_ERROR",
r"input=I_FAIL",
r"input=I_INTEGRATED cause=C_TIMER_POPPED",
r"input=I_FINALIZED cause=C_TIMER_POPPED",
r"input=I_ERROR",
r"(pacemakerd|pacemaker-execd|pacemaker-controld):.*, exiting",
r"schedulerd.*Attempting recovery of resource",
r"is taking more than 2x its timeout",
r"Confirm not received from",
r"Welcome reply not received from",
r"Attempting to schedule .* after a stop",
r"Resource .* was active at shutdown",
r"duplicate entries for call_id",
r"Search terminated:",
r":global_timer_callback",
r"Faking parameter digest creation",
r"Parameters to .* action changed:",
r"Parameters to .* changed",
r"\[[0-9]+\] terminated with signal [0-9]+ \(",
r"schedulerd:.*Recover .*\(.* -\> .*\)",
r"rsyslogd.* imuxsock lost .* messages from pid .* due to rate-limiting",
r"Peer is not part of our cluster",
r"We appear to be in an election loop",
r"Unknown node -> we will not deliver message",
r"(Blackbox dump requested|Problem detected)",
r"pacemakerd.*Could not connect to Cluster Configuration Database API",
r"Receiving messages from a node we think is dead",
r"share the same cluster nodeid",
r"share the same name",
#r"crm_ipc_send:.*Request .* failed",
#r"crm_ipc_send:.*Sending to .* is disabled until pending reply is received",
# Not inherently bad, but worth tracking
#r"No need to invoke the TE",
#r"ping.*: DEBUG: Updated connected = 0",
#r"Digest mis-match:",
r"pacemaker-controld:.*Transition failed: terminated",
r"Local CIB .* differs from .*:",
r"warn.*:\s*Continuing but .* will NOT be used",
r"warn.*:\s*Cluster configuration file .* is corrupt",
#r"Executing .* fencing operation",
r"Election storm",
r"stalled the FSA with pending inputs",
]
self.components["common-ignore"] = [
r"Pending action:",
r"resource( was|s were) active at shutdown",
r"pending LRM operations at shutdown",
r"Lost connection to the CIB manager",
r"pacemaker-controld.*:\s*Action A_RECOVER .* not supported",
r"pacemaker-controld.*:\s*Performing A_EXIT_1 - forcefully exiting ",
r".*:\s*Executing .* fencing operation \(.*\) on ",
r".*:\s*Requesting fencing \([^)]+\) of node ",
r"(Blackbox dump requested|Problem detected)",
# "Resource .*stonith::.* is active on 2 nodes attempting recovery",
# "Transition .* ERRORs found during PE processing",
]
self.components["corosync-ignore"] = [
r"error:.*Connection to the CPG API failed: Library error",
r"\[[0-9]+\] exited with status [0-9]+ \(",
r"pacemaker-based.*error:.*Corosync connection lost",
r"pacemaker-fenced.*error:.*Corosync connection terminated",
r"pacemaker-controld.*State transition .* S_RECOVERY",
r"pacemaker-controld.*error:.*Input (I_ERROR|I_TERMINATE ) .*received in state",
r"pacemaker-controld.*error:.*Could not recover from internal error",
r"error:.*Connection to cib_(shm|rw).* (failed|closed)",
r"error:.*Connection to (fencer|stonith-ng).* (closed|failed|lost)",
r"crit: Fencing daemon connection failed",
]
self.components["corosync"] = [
# We expect each daemon to lose its cluster connection.
# However, if the CIB manager loses its connection first,
# it's possible for another daemon to lose that connection and
# exit before losing the cluster connection.
r"pacemakerd.*:\s*(crit|error):.*Lost connection to cluster layer",
r"pacemaker-attrd.*:\s*(crit|error):.*Lost connection to (cluster layer|the CIB manager)",
r"pacemaker-based.*:\s*(crit|error):.*Lost connection to cluster layer",
r"pacemaker-controld.*:\s*(crit|error):.*Lost connection to (cluster layer|the CIB manager)",
r"pacemaker-fenced.*:\s*(crit|error):.*Lost connection to (cluster layer|the CIB manager)",
r"schedulerd.*Scheduling Node .* for STONITH",
r"pacemaker-controld.*:\s*Peer .* was terminated \(.*\) by .* on behalf of .*:\s*OK",
]
self.components["pacemaker-based"] = [
r"pacemakerd.* pacemaker-attrd\[[0-9]+\] exited with status 102",
r"pacemakerd.* pacemaker-controld\[[0-9]+\] exited with status 1",
r"pacemakerd.* Respawning failed child process: pacemaker-attrd",
r"pacemakerd.* Respawning failed child process: pacemaker-based",
r"pacemakerd.* Respawning failed child process: pacemaker-controld",
r"pacemakerd.* Respawning failed child process: pacemaker-fenced",
r"pacemaker-.* Connection to cib_.* (failed|closed)",
r"pacemaker-attrd.*:.*Lost connection to the CIB manager",
r"pacemaker-controld.*:.*Lost connection to the CIB manager",
r"pacemaker-controld.*I_ERROR.*crmd_cib_connection_destroy",
r"pacemaker-controld.* State transition .* S_RECOVERY",
r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
r"pacemaker-controld.*Could not recover from internal error",
]
self.components["pacemaker-based-ignore"] = [
r"pacemaker-execd.*Connection to (fencer|stonith-ng).* (closed|failed|lost)",
]
self.components["pacemaker-execd"] = [
r"pacemaker-controld.*Connection to (pacemaker-execd|lrmd|executor) (failed|closed)",
r"pacemaker-controld.*I_ERROR.*lrm_connection_destroy",
r"pacemaker-controld.*State transition .* S_RECOVERY",
r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
r"pacemaker-controld.*Could not recover from internal error",
r"pacemakerd.*pacemaker-execd.* terminated with signal 9",
r"pacemakerd.*pacemaker-controld\[[0-9]+\] exited with status 1",
r"pacemakerd.*Respawning failed child process: pacemaker-execd",
r"pacemakerd.*Respawning failed child process: pacemaker-controld",
]
self.components["pacemaker-execd-ignore"] = []
self.components["pacemaker-controld"] = [
# "WARN: determine_online_status: Node .* is unclean",
# "Scheduling Node .* for STONITH",
# "Executing .* fencing operation",
# Only if the node wasn't the DC: "State transition S_IDLE",
"State transition .* -> S_IDLE",
]
self.components["pacemaker-controld-ignore"] = []
self.components["pacemaker-attrd"] = []
self.components["pacemaker-attrd-ignore"] = []
self.components["pacemaker-schedulerd"] = [
"State transition .* S_RECOVERY",
r"Respawning failed child process: pacemaker-controld",
r"pacemaker-controld\[[0-9]+\] exited with status 1 \(",
"Connection to pengine failed",
"Connection to pengine.* closed",
r"Connection to the scheduler failed",
"pacemaker-controld.*I_ERROR.*save_cib_contents",
r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
"pacemaker-controld.*Could not recover from internal error",
]
self.components["pacemaker-schedulerd-ignore"] = []
self.components["pacemaker-fenced"] = [
r"error:.*Connection to (fencer|stonith-ng).* (closed|failed|lost)",
r"Fencing daemon connection failed",
r"pacemaker-controld.*:\s*warn.*:\s*Callback already present",
]
self.components["pacemaker-fenced-ignore"] = [
r"error:.*Connection to (fencer|stonith-ng).* (closed|failed|lost)",
r"crit:.*Fencing daemon connection failed",
r"error:.*Sign-in failed: triggered a retry",
r"Connection to (fencer|stonith-ng) failed, finalizing .* pending operations",
r"pacemaker-controld.*:\s+Result of .* operation for Fencing.*Error",
]
self.components["pacemaker-fenced-ignore"].extend(self.components["common-ignore"])
class crm_corosync_docker(crm_corosync):
'''
Patterns for Corosync version 2 cluster manager class
'''
def __init__(self, name):
crm_corosync.__init__(self, name)
self.commands.update({
"StartCmd" : "pcmk_start",
"StopCmd" : "pcmk_stop",
})
class PatternSelector(object):
def __init__(self, name=None):
self.name = name
self.base = BasePatterns("crm-base")
if not name:
crm_corosync("crm-corosync")
elif name == "crm-corosync":
crm_corosync(name)
elif name == "crm-corosync-docker":
crm_corosync_docker(name)
def get_variant(self, variant):
if variant in patternvariants:
return patternvariants[variant]
print("defaulting to crm-base for %s" % variant)
return self.base
def get_patterns(self, variant, kind):
return self.get_variant(variant).get_patterns(kind)
def get_template(self, variant, key):
v = self.get_variant(variant)
return v[key]
def get_component(self, variant, kind):
return self.get_variant(variant).get_component(kind)
def __getitem__(self, key):
return self.get_template(self.name, key)
# python cts/CTSpatt.py -k crm-corosync -t StartCmd
if __name__ == '__main__':
pdir=os.path.dirname(sys.path[0])
sys.path.insert(0, pdir) # So that things work from the source directory
kind=None
template=None
skipthis=None
args=sys.argv[1:]
for i in range(0, len(args)):
if skipthis:
skipthis=None
continue
elif args[i] == "-k" or args[i] == "--kind":
skipthis=1
kind = args[i+1]
elif args[i] == "-t" or args[i] == "--template":
skipthis=1
template = args[i+1]
else:
print("Illegal argument " + args[i])
print(PatternSelector(kind)[template])
diff --git a/daemons/attrd/pacemaker-attrd.c b/daemons/attrd/pacemaker-attrd.c
index 9ffe20115c..15c6a8075e 100644
--- a/daemons/attrd/pacemaker-attrd.c
+++ b/daemons/attrd/pacemaker-attrd.c
@@ -1,441 +1,441 @@
/*
* Copyright 2013-2019 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "pacemaker-attrd.h"
lrmd_t *the_lrmd = NULL;
crm_cluster_t *attrd_cluster = NULL;
crm_trigger_t *attrd_config_read = NULL;
static crm_exit_t attrd_exit_status = CRM_EX_OK;
static void
attrd_cpg_dispatch(cpg_handle_t handle,
const struct cpg_name *groupName,
uint32_t nodeid, uint32_t pid, void *msg, size_t msg_len)
{
uint32_t kind = 0;
xmlNode *xml = NULL;
const char *from = NULL;
char *data = pcmk_message_common_cs(handle, nodeid, pid, msg, &kind, &from);
if(data == NULL) {
return;
}
if (kind == crm_class_cluster) {
xml = string2xml(data);
}
if (xml == NULL) {
crm_err("Bad message of class %d received from %s[%u]: '%.120s'", kind, from, nodeid, data);
} else {
crm_node_t *peer = crm_get_peer(nodeid, from);
attrd_peer_message(peer, xml);
}
free_xml(xml);
free(data);
}
static void
attrd_cpg_destroy(gpointer unused)
{
if (attrd_shutting_down()) {
crm_info("Corosync disconnection complete");
} else {
crm_crit("Lost connection to cluster layer, shutting down");
attrd_exit_status = CRM_EX_DISCONNECT;
attrd_shutdown(0);
}
}
static void
attrd_cib_replaced_cb(const char *event, xmlNode * msg)
{
if (attrd_shutting_down()) {
return;
}
if (attrd_election_won()) {
crm_notice("Updating all attributes after %s event", event);
write_attributes(TRUE, FALSE);
}
// Check for changes in alerts
mainloop_set_trigger(attrd_config_read);
}
static void
attrd_cib_destroy_cb(gpointer user_data)
{
cib_t *conn = user_data;
conn->cmds->signoff(conn); /* Ensure IPC is cleaned up */
if (attrd_shutting_down()) {
crm_info("Connection disconnection complete");
} else {
/* eventually this should trigger a reconnect, not a shutdown */
crm_crit("Lost connection to the CIB manager, shutting down");
attrd_exit_status = CRM_EX_DISCONNECT;
attrd_shutdown(0);
}
return;
}
static void
attrd_erase_cb(xmlNode *msg, int call_id, int rc, xmlNode *output,
void *user_data)
{
do_crm_log_unlikely((rc? LOG_NOTICE : LOG_DEBUG),
"Cleared transient attributes: %s "
CRM_XS " xpath=%s rc=%d",
pcmk_strerror(rc), (char *) user_data, rc);
}
#define XPATH_TRANSIENT "//node_state[@uname='%s']/" XML_TAG_TRANSIENT_NODEATTRS
/*!
* \internal
* \brief Wipe all transient attributes for this node from the CIB
*
* Clear any previous transient node attributes from the CIB. This is
* normally done by the DC's controller when this node leaves the cluster, but
* this handles the case where the node restarted so quickly that the
* cluster layer didn't notice.
*
* \todo If pacemaker-attrd respawns after crashing (see PCMK_respawned),
* ideally we'd skip this and sync our attributes from the writer.
* However, currently we reject any values for us that the writer has, in
* attrd_peer_update().
*/
static void
attrd_erase_attrs()
{
int call_id;
char *xpath = crm_strdup_printf(XPATH_TRANSIENT, attrd_cluster->uname);
crm_info("Clearing transient attributes from CIB " CRM_XS " xpath=%s",
xpath);
call_id = the_cib->cmds->remove(the_cib, xpath, NULL,
cib_quorum_override | cib_xpath);
the_cib->cmds->register_callback_full(the_cib, call_id, 120, FALSE, xpath,
"attrd_erase_cb", attrd_erase_cb,
free);
}
static int
attrd_cib_connect(int max_retry)
{
static int attempts = 0;
int rc = -ENOTCONN;
the_cib = cib_new();
if (the_cib == NULL) {
return -ENOTCONN;
}
do {
if(attempts > 0) {
sleep(attempts);
}
attempts++;
crm_debug("Connection attempt %d to the CIB manager", attempts);
rc = the_cib->cmds->signon(the_cib, T_ATTRD, cib_command);
} while(rc != pcmk_ok && attempts < max_retry);
if (rc != pcmk_ok) {
crm_err("Connection to the CIB manager failed: %s " CRM_XS " rc=%d",
pcmk_strerror(rc), rc);
goto cleanup;
}
crm_debug("Connected to the CIB manager after %d attempts", attempts);
rc = the_cib->cmds->set_connection_dnotify(the_cib, attrd_cib_destroy_cb);
if (rc != pcmk_ok) {
crm_err("Could not set disconnection callback");
goto cleanup;
}
rc = the_cib->cmds->add_notify_callback(the_cib, T_CIB_REPLACE_NOTIFY, attrd_cib_replaced_cb);
if(rc != pcmk_ok) {
crm_err("Could not set CIB notification callback");
goto cleanup;
}
rc = the_cib->cmds->add_notify_callback(the_cib, T_CIB_DIFF_NOTIFY, attrd_cib_updated_cb);
if (rc != pcmk_ok) {
crm_err("Could not set CIB notification callback (update)");
goto cleanup;
}
return pcmk_ok;
cleanup:
the_cib->cmds->signoff(the_cib);
cib_delete(the_cib);
the_cib = NULL;
return -ENOTCONN;
}
/*!
* \internal
* \brief Prepare the CIB after cluster is connected
*/
static void
attrd_cib_init()
{
// We have no attribute values in memory, wipe the CIB to match
attrd_erase_attrs();
// Set a trigger for reading the CIB (for the alerts section)
attrd_config_read = mainloop_add_trigger(G_PRIORITY_HIGH, attrd_read_options, NULL);
// Always read the CIB at start-up
mainloop_set_trigger(attrd_config_read);
}
static qb_ipcs_service_t *ipcs = NULL;
static int32_t
attrd_ipc_dispatch(qb_ipcs_connection_t * c, void *data, size_t size)
{
uint32_t id = 0;
uint32_t flags = 0;
crm_client_t *client = crm_client_get(c);
xmlNode *xml = crm_ipcs_recv(client, data, size, &id, &flags);
const char *op;
if (xml == NULL) {
crm_debug("No msg from %d (%p)", crm_ipcs_client_pid(c), c);
return 0;
}
#if ENABLE_ACL
CRM_ASSERT(client->user != NULL);
crm_acl_get_set_user(xml, F_ATTRD_USER, client->user);
#endif
crm_trace("Processing msg from %d (%p)", crm_ipcs_client_pid(c), c);
crm_log_xml_trace(xml, __FUNCTION__);
op = crm_element_value(xml, F_ATTRD_TASK);
if (client->name == NULL) {
const char *value = crm_element_value(xml, F_ORIG);
client->name = crm_strdup_printf("%s.%d", value?value:"unknown", client->pid);
}
if (safe_str_eq(op, ATTRD_OP_PEER_REMOVE)) {
attrd_send_ack(client, id, flags);
attrd_client_peer_remove(client->name, xml);
} else if (safe_str_eq(op, ATTRD_OP_CLEAR_FAILURE)) {
attrd_send_ack(client, id, flags);
attrd_client_clear_failure(xml);
} else if (safe_str_eq(op, ATTRD_OP_UPDATE)) {
attrd_send_ack(client, id, flags);
attrd_client_update(xml);
} else if (safe_str_eq(op, ATTRD_OP_UPDATE_BOTH)) {
attrd_send_ack(client, id, flags);
attrd_client_update(xml);
} else if (safe_str_eq(op, ATTRD_OP_UPDATE_DELAY)) {
attrd_send_ack(client, id, flags);
attrd_client_update(xml);
} else if (safe_str_eq(op, ATTRD_OP_REFRESH)) {
attrd_send_ack(client, id, flags);
attrd_client_refresh();
} else if (safe_str_eq(op, ATTRD_OP_QUERY)) {
/* queries will get reply, so no ack is necessary */
attrd_client_query(client, id, flags, xml);
} else {
crm_info("Ignoring request from client %s with unknown operation %s",
client->name, op);
}
free_xml(xml);
return 0;
}
void
attrd_ipc_fini()
{
if (ipcs != NULL) {
crm_client_disconnect_all(ipcs);
qb_ipcs_destroy(ipcs);
ipcs = NULL;
}
}
static int
attrd_cluster_connect()
{
attrd_cluster = calloc(1, sizeof(crm_cluster_t));
attrd_cluster->destroy = attrd_cpg_destroy;
attrd_cluster->cpg.cpg_deliver_fn = attrd_cpg_dispatch;
attrd_cluster->cpg.cpg_confchg_fn = pcmk_cpg_membership;
crm_set_status_callback(&attrd_peer_change_cb);
if (crm_cluster_connect(attrd_cluster) == FALSE) {
crm_err("Cluster connection failed");
return -ENOTCONN;
}
return pcmk_ok;
}
/* *INDENT-OFF* */
static struct crm_option long_options[] = {
/* Top-level Options */
{"help", 0, 0, '?', "\tThis text"},
{"verbose", 0, 0, 'V', "\tIncrease debug output"},
{0, 0, 0, 0}
};
/* *INDENT-ON* */
int
main(int argc, char **argv)
{
int flag = 0;
int index = 0;
int argerr = 0;
crm_ipc_t *old_instance = NULL;
attrd_init_mainloop();
crm_log_preinit(NULL, argc, argv);
crm_set_options(NULL, "[options]", long_options,
"Daemon for aggregating and atomically storing node attribute updates into the CIB");
mainloop_add_signal(SIGTERM, attrd_shutdown);
while (1) {
flag = crm_get_option(argc, argv, &index);
if (flag == -1)
break;
switch (flag) {
case 'V':
crm_bump_log_level(argc, argv);
break;
case 'h': /* Help message */
crm_help(flag, CRM_EX_OK);
break;
default:
++argerr;
break;
}
}
if (optind > argc) {
++argerr;
}
if (argerr) {
crm_help('?', CRM_EX_USAGE);
}
crm_log_init(T_ATTRD, LOG_INFO, TRUE, FALSE, argc, argv, FALSE);
- crm_info("Starting up");
+ crm_notice("Starting Pacemaker node attribute manager");
old_instance = crm_ipc_new(T_ATTRD, 0);
if (crm_ipc_connect(old_instance)) {
/* IPC end-point already up */
crm_ipc_close(old_instance);
crm_ipc_destroy(old_instance);
- crm_err("pacemaker-attrdd is already active, aborting startup");
+ crm_err("pacemaker-attrd is already active, aborting startup");
crm_exit(CRM_EX_OK);
} else {
/* not up or not authentic, we'll proceed either way */
crm_ipc_destroy(old_instance);
old_instance = NULL;
}
attributes = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, free_attribute);
/* Connect to the CIB before connecting to the cluster or listening for IPC.
* This allows us to assume the CIB is connected whenever we process a
* cluster or IPC message (which also avoids start-up race conditions).
*/
if (attrd_cib_connect(10) != pcmk_ok) {
attrd_exit_status = CRM_EX_FATAL;
goto done;
}
crm_info("CIB connection active");
if (attrd_cluster_connect() != pcmk_ok) {
attrd_exit_status = CRM_EX_FATAL;
goto done;
}
crm_info("Cluster connection active");
// Initialization that requires the cluster to be connected
attrd_election_init();
attrd_cib_init();
/* Set a private attribute for ourselves with the protocol version we
* support. This lets all nodes determine the minimum supported version
* across all nodes. It also ensures that the writer learns our node name,
* so it can send our attributes to the CIB.
*/
attrd_broadcast_protocol();
attrd_init_ipc(&ipcs, attrd_ipc_dispatch);
- crm_info("Accepting attribute updates");
+ crm_notice("Pacemaker node attribute manager successfully started and accepting connections");
attrd_run_mainloop();
done:
crm_info("Shutting down attribute manager");
attrd_election_fini();
attrd_ipc_fini();
attrd_lrmd_disconnect();
attrd_cib_disconnect();
g_hash_table_destroy(attributes);
crm_exit(attrd_exit_status);
}
diff --git a/daemons/based/pacemaker-based.c b/daemons/based/pacemaker-based.c
index f004f671f5..f959976279 100644
--- a/daemons/based/pacemaker-based.c
+++ b/daemons/based/pacemaker-based.c
@@ -1,386 +1,369 @@
/*
* Copyright 2004-2019 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#if HAVE_BZLIB_H
# include
#endif
extern int init_remote_listener(int port, gboolean encrypted);
gboolean cib_shutdown_flag = FALSE;
int cib_status = pcmk_ok;
crm_cluster_t crm_cluster;
GMainLoop *mainloop = NULL;
const char *cib_root = NULL;
char *cib_our_uname = NULL;
static gboolean preserve_status = FALSE;
gboolean cib_writes_enabled = TRUE;
int remote_fd = 0;
int remote_tls_fd = 0;
GHashTable *config_hash = NULL;
GHashTable *local_notify_queue = NULL;
-static int cib_init(void);
+static void cib_init(void);
void cib_shutdown(int nsig);
static bool startCib(const char *filename);
extern int write_cib_contents(gpointer p);
void cib_cleanup(void);
static void
cib_enable_writes(int nsig)
{
crm_info("(Re)enabling disk writes");
cib_writes_enabled = TRUE;
}
-static void
-log_cib_client(gpointer key, gpointer value, gpointer user_data)
-{
- crm_info("Client %s", crm_client_name(value));
-}
-
/* *INDENT-OFF* */
static struct crm_option long_options[] = {
/* Top-level Options */
{"help", 0, 0, '?', "\tThis text"},
{"verbose", 0, 0, 'V', "\tIncrease debug output"},
{"stand-alone", 0, 0, 's', "\tAdvanced use only"},
{"disk-writes", 0, 0, 'w', "\tAdvanced use only"},
{"cib-root", 1, 0, 'r', "\tAdvanced use only"},
{0, 0, 0, 0}
};
/* *INDENT-ON* */
int
main(int argc, char **argv)
{
int flag;
int rc = 0;
int index = 0;
int argerr = 0;
struct passwd *pwentry = NULL;
crm_ipc_t *old_instance = NULL;
crm_log_preinit(NULL, argc, argv);
crm_set_options(NULL, "[options]",
long_options, "Daemon for storing and replicating the cluster configuration");
mainloop_add_signal(SIGTERM, cib_shutdown);
mainloop_add_signal(SIGPIPE, cib_enable_writes);
cib_writer = mainloop_add_trigger(G_PRIORITY_LOW, write_cib_contents, NULL);
while (1) {
flag = crm_get_option(argc, argv, &index);
if (flag == -1)
break;
switch (flag) {
case 'V':
crm_bump_log_level(argc, argv);
break;
case 's':
stand_alone = TRUE;
preserve_status = TRUE;
cib_writes_enabled = FALSE;
pwentry = getpwnam(CRM_DAEMON_USER);
CRM_CHECK(pwentry != NULL,
crm_perror(LOG_ERR, "Invalid uid (%s) specified", CRM_DAEMON_USER);
return CRM_EX_FATAL);
rc = setgid(pwentry->pw_gid);
if (rc < 0) {
crm_perror(LOG_ERR, "Could not set group to %d", pwentry->pw_gid);
return CRM_EX_FATAL;
}
rc = initgroups(CRM_DAEMON_USER, pwentry->pw_gid);
if (rc < 0) {
crm_perror(LOG_ERR, "Could not setup groups for user %d", pwentry->pw_uid);
return CRM_EX_FATAL;
}
rc = setuid(pwentry->pw_uid);
if (rc < 0) {
crm_perror(LOG_ERR, "Could not set user to %d", pwentry->pw_uid);
return CRM_EX_FATAL;
}
break;
case '?': /* Help message */
crm_help(flag, CRM_EX_OK);
break;
case 'w':
cib_writes_enabled = TRUE;
break;
case 'r':
cib_root = optarg;
break;
case 'm':
cib_metadata();
return CRM_EX_OK;
default:
++argerr;
break;
}
}
if (argc - optind == 1 && safe_str_eq("metadata", argv[optind])) {
cib_metadata();
return CRM_EX_OK;
}
if (optind > argc) {
++argerr;
}
if (argerr) {
crm_help('?', CRM_EX_USAGE);
}
crm_log_init(NULL, LOG_INFO, TRUE, FALSE, argc, argv, FALSE);
+ crm_notice("Starting Pacemaker CIB manager");
+
old_instance = crm_ipc_new(CIB_CHANNEL_RO, 0);
if (crm_ipc_connect(old_instance)) {
/* IPC end-point already up */
crm_ipc_close(old_instance);
crm_ipc_destroy(old_instance);
crm_err("pacemaker-based is already active, aborting startup");
crm_exit(CRM_EX_OK);
} else {
/* not up or not authentic, we'll proceed either way */
crm_ipc_destroy(old_instance);
old_instance = NULL;
}
if (cib_root == NULL) {
cib_root = CRM_CONFIG_DIR;
} else {
crm_notice("Using custom config location: %s", cib_root);
}
if (pcmk__daemon_can_write(cib_root, NULL) == FALSE) {
crm_err("Terminating due to bad permissions on %s", cib_root);
fprintf(stderr, "ERROR: Bad permissions on %s (see logs for details)\n",
cib_root);
fflush(stderr);
return CRM_EX_FATAL;
}
crm_peer_init();
- /* read local config file */
+ // Read initial CIB, connect to cluster, and start IPC servers
cib_init();
- // This should not be reachable
- CRM_CHECK(crm_hash_table_size(client_connections) == 0,
- crm_warn("Not all clients gone at exit"));
- g_hash_table_foreach(client_connections, log_cib_client, NULL);
- cib_cleanup();
+ // Run the main loop
+ mainloop = g_main_loop_new(NULL, FALSE);
+ crm_notice("Pacemaker CIB manager successfully started and accepting connections");
+ g_main_loop_run(mainloop);
- crm_info("Done");
+ /* If main loop returned, clean up and exit. We disconnect in case
+ * terminate_cib() was called with fast=-1.
+ */
+ crm_cluster_disconnect(&crm_cluster);
+ cib_ipc_servers_destroy(ipcs_ro, ipcs_rw, ipcs_shm);
return CRM_EX_OK;
}
void
cib_cleanup(void)
{
crm_peer_destroy();
if (local_notify_queue) {
g_hash_table_destroy(local_notify_queue);
}
crm_client_cleanup();
g_hash_table_destroy(config_hash);
free(cib_our_uname);
}
#if SUPPORT_COROSYNC
static void
cib_cs_dispatch(cpg_handle_t handle,
const struct cpg_name *groupName,
uint32_t nodeid, uint32_t pid, void *msg, size_t msg_len)
{
uint32_t kind = 0;
xmlNode *xml = NULL;
const char *from = NULL;
char *data = pcmk_message_common_cs(handle, nodeid, pid, msg, &kind, &from);
if(data == NULL) {
return;
}
if (kind == crm_class_cluster) {
xml = string2xml(data);
if (xml == NULL) {
crm_err("Invalid XML: '%.120s'", data);
free(data);
return;
}
crm_xml_add(xml, F_ORIG, from);
/* crm_xml_add_int(xml, F_SEQ, wrapper->id); */
cib_peer_callback(xml, NULL);
}
free_xml(xml);
free(data);
}
static void
cib_cs_destroy(gpointer user_data)
{
if (cib_shutdown_flag) {
crm_info("Corosync disconnection complete");
} else {
crm_crit("Lost connection to cluster layer, shutting down");
terminate_cib(__FUNCTION__, CRM_EX_DISCONNECT);
}
}
#endif
static void
cib_peer_update_callback(enum crm_status_type type, crm_node_t * node, const void *data)
{
switch (type) {
case crm_status_processes:
if (cib_legacy_mode()
&& is_not_set(node->processes, crm_get_cluster_proc())) {
uint32_t old = data? *(const uint32_t *)data : 0;
if ((node->processes ^ old) & crm_proc_cpg) {
crm_info("Attempting to disable legacy mode after %s left the cluster",
node->uname);
legacy_mode = FALSE;
}
}
break;
case crm_status_uname:
case crm_status_nstate:
if (cib_shutdown_flag && (crm_active_peers() < 2)
&& crm_hash_table_size(client_connections) == 0) {
crm_info("No more peers");
terminate_cib(__FUNCTION__, -1);
}
break;
}
}
-static int
+static void
cib_init(void)
{
if (is_corosync_cluster()) {
#if SUPPORT_COROSYNC
crm_cluster.destroy = cib_cs_destroy;
crm_cluster.cpg.cpg_deliver_fn = cib_cs_dispatch;
crm_cluster.cpg.cpg_confchg_fn = pcmk_cpg_membership;
#endif
}
config_hash = crm_str_table_new();
if (startCib("cib.xml") == FALSE) {
crm_crit("Cannot start CIB... terminating");
crm_exit(CRM_EX_NOINPUT);
}
if (stand_alone == FALSE) {
if (is_corosync_cluster()) {
crm_set_status_callback(&cib_peer_update_callback);
}
if (crm_cluster_connect(&crm_cluster) == FALSE) {
crm_crit("Cannot sign in to the cluster... terminating");
crm_exit(CRM_EX_FATAL);
}
cib_our_uname = crm_cluster.uname;
} else {
cib_our_uname = strdup("localhost");
}
cib_ipc_servers_init(&ipcs_ro,
&ipcs_rw,
&ipcs_shm,
&ipc_ro_callbacks,
&ipc_rw_callbacks);
if (stand_alone) {
cib_is_master = TRUE;
}
-
- /* Create the mainloop and run it... */
- mainloop = g_main_loop_new(NULL, FALSE);
- crm_info("Starting %s mainloop", crm_system_name);
- g_main_loop_run(mainloop);
-
- /* If main loop returned, clean up and exit. We disconnect in case
- * terminate_cib() was called with fast=-1.
- */
- crm_cluster_disconnect(&crm_cluster);
- cib_ipc_servers_destroy(ipcs_ro, ipcs_rw, ipcs_shm);
-
- crm_exit(CRM_EX_OK);
}
static bool
startCib(const char *filename)
{
gboolean active = FALSE;
xmlNode *cib = readCibXmlFile(cib_root, filename, !preserve_status);
if (activateCibXml(cib, TRUE, "start") == 0) {
int port = 0;
const char *port_s = NULL;
active = TRUE;
cib_read_config(config_hash, cib);
port_s = crm_element_value(cib, "remote-tls-port");
if (port_s) {
port = crm_parse_int(port_s, "0");
remote_tls_fd = init_remote_listener(port, TRUE);
}
port_s = crm_element_value(cib, "remote-clear-port");
if (port_s) {
port = crm_parse_int(port_s, "0");
remote_fd = init_remote_listener(port, FALSE);
}
-
- crm_info("CIB Initialization completed successfully");
}
-
return active;
}
diff --git a/daemons/controld/controld_control.c b/daemons/controld/controld_control.c
index 0ac358cbeb..03a4fb7b63 100644
--- a/daemons/controld/controld_control.c
+++ b/daemons/controld/controld_control.c
@@ -1,899 +1,899 @@
/*
* Copyright 2004-2019 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
qb_ipcs_service_t *ipcs = NULL;
#if SUPPORT_COROSYNC
extern gboolean crm_connect_corosync(crm_cluster_t * cluster);
#endif
void crm_shutdown(int nsig);
gboolean crm_read_options(gpointer user_data);
gboolean fsa_has_quorum = FALSE;
crm_trigger_t *fsa_source = NULL;
crm_trigger_t *config_read = NULL;
bool no_quorum_suicide_escalation = FALSE;
/* A_HA_CONNECT */
void
do_ha_control(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state,
enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
gboolean registered = FALSE;
static crm_cluster_t *cluster = NULL;
if (cluster == NULL) {
cluster = calloc(1, sizeof(crm_cluster_t));
}
if (action & A_HA_DISCONNECT) {
crm_cluster_disconnect(cluster);
crm_info("Disconnected from the cluster");
set_bit(fsa_input_register, R_HA_DISCONNECTED);
}
if (action & A_HA_CONNECT) {
crm_set_status_callback(&peer_update_callback);
crm_set_autoreap(FALSE);
if (is_corosync_cluster()) {
#if SUPPORT_COROSYNC
registered = crm_connect_corosync(cluster);
#endif
}
if (registered == TRUE) {
controld_election_init(cluster->uname);
fsa_our_uname = cluster->uname;
fsa_our_uuid = cluster->uuid;
if(cluster->uuid == NULL) {
crm_err("Could not obtain local uuid");
registered = FALSE;
}
}
if (registered == FALSE) {
set_bit(fsa_input_register, R_HA_DISCONNECTED);
register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
return;
}
populate_cib_nodes(node_update_none, __FUNCTION__);
clear_bit(fsa_input_register, R_HA_DISCONNECTED);
crm_info("Connected to the cluster");
}
if (action & ~(A_HA_CONNECT | A_HA_DISCONNECT)) {
crm_err("Unexpected action %s in %s", fsa_action2string(action), __FUNCTION__);
}
}
/* A_SHUTDOWN */
void
do_shutdown(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
/* just in case */
set_bit(fsa_input_register, R_SHUTDOWN);
if (stonith_api) {
/* Prevent it from coming up again */
clear_bit(fsa_input_register, R_ST_REQUIRED);
crm_info("Disconnecting from fencer");
stonith_api->cmds->disconnect(stonith_api);
}
}
/* A_SHUTDOWN_REQ */
void
do_shutdown_req(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state,
enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
xmlNode *msg = NULL;
set_bit(fsa_input_register, R_SHUTDOWN);
crm_info("Sending shutdown request to all peers (DC is %s)",
(fsa_our_dc? fsa_our_dc : "not set"));
msg = create_request(CRM_OP_SHUTDOWN_REQ, NULL, NULL, CRM_SYSTEM_CRMD, CRM_SYSTEM_CRMD, NULL);
/* set_bit(fsa_input_register, R_STAYDOWN); */
if (send_cluster_message(NULL, crm_msg_crmd, msg, TRUE) == FALSE) {
register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
}
free_xml(msg);
}
extern char *max_generation_from;
extern xmlNode *max_generation_xml;
extern GHashTable *resource_history;
extern GHashTable *voted;
extern char *te_client_id;
crm_exit_t
crmd_fast_exit(crm_exit_t exit_code)
{
if (is_set(fsa_input_register, R_STAYDOWN)) {
crm_warn("Inhibiting respawn "CRM_XS" remapping exit code %d to %d",
exit_code, CRM_EX_FATAL);
exit_code = CRM_EX_FATAL;
} else if ((exit_code == CRM_EX_OK)
&& is_set(fsa_input_register, R_IN_RECOVERY)) {
crm_err("Could not recover from internal error");
exit_code = CRM_EX_ERROR;
}
crm_exit(exit_code);
}
crm_exit_t
crmd_exit(crm_exit_t exit_code)
{
GListPtr gIter = NULL;
GMainLoop *mloop = crmd_mainloop;
static bool in_progress = FALSE;
if (in_progress && (exit_code == CRM_EX_OK)) {
crm_debug("Exit is already in progress");
return exit_code;
} else if(in_progress) {
crm_notice("Error during shutdown process, exiting now with status %d (%s)",
exit_code, crm_exit_str(exit_code));
crm_write_blackbox(SIGTRAP, NULL);
crmd_fast_exit(exit_code);
}
in_progress = TRUE;
crm_trace("Preparing to exit with status %d (%s)",
exit_code, crm_exit_str(exit_code));
/* Suppress secondary errors resulting from us disconnecting everything */
set_bit(fsa_input_register, R_HA_DISCONNECTED);
/* Close all IPC servers and clients to ensure any and all shared memory files are cleaned up */
if(ipcs) {
crm_trace("Closing IPC server");
mainloop_del_ipc_server(ipcs);
ipcs = NULL;
}
controld_close_attrd_ipc();
pe_subsystem_free();
if(stonith_api) {
crm_trace("Disconnecting fencing API");
clear_bit(fsa_input_register, R_ST_REQUIRED);
stonith_api->cmds->free(stonith_api); stonith_api = NULL;
}
if ((exit_code == CRM_EX_OK) && (crmd_mainloop == NULL)) {
crm_debug("No mainloop detected");
exit_code = CRM_EX_ERROR;
}
/* On an error, just get out.
*
* Otherwise, make the effort to have mainloop exit gracefully so
* that it (mostly) cleans up after itself and valgrind has less
* to report on - allowing real errors stand out
*/
if (exit_code != CRM_EX_OK) {
crm_notice("Forcing immediate exit with status %d (%s)",
exit_code, crm_exit_str(exit_code));
crm_write_blackbox(SIGTRAP, NULL);
return crmd_fast_exit(exit_code);
}
/* Clean up as much memory as possible for valgrind */
for (gIter = fsa_message_queue; gIter != NULL; gIter = gIter->next) {
fsa_data_t *fsa_data = gIter->data;
crm_info("Dropping %s: [ state=%s cause=%s origin=%s ]",
fsa_input2string(fsa_data->fsa_input),
fsa_state2string(fsa_state),
fsa_cause2string(fsa_data->fsa_cause), fsa_data->origin);
delete_fsa_input(fsa_data);
}
clear_bit(fsa_input_register, R_MEMBERSHIP);
g_list_free(fsa_message_queue); fsa_message_queue = NULL;
metadata_cache_fini();
controld_election_fini();
/* Tear down the CIB manager connection, but don't free it yet -- it could
* be used when we drain the mainloop later.
*/
cib_free_callbacks(fsa_cib_conn);
fsa_cib_conn->cmds->signoff(fsa_cib_conn);
verify_stopped(fsa_state, LOG_WARNING);
clear_bit(fsa_input_register, R_LRM_CONNECTED);
lrm_state_destroy_all();
/* This basically will not work, since mainloop has a reference to it */
mainloop_destroy_trigger(fsa_source); fsa_source = NULL;
mainloop_destroy_trigger(config_read); config_read = NULL;
mainloop_destroy_trigger(stonith_reconnect); stonith_reconnect = NULL;
mainloop_destroy_trigger(transition_trigger); transition_trigger = NULL;
crm_client_cleanup();
crm_peer_destroy();
crm_timer_stop(transition_timer);
crm_timer_stop(integration_timer);
crm_timer_stop(finalization_timer);
crm_timer_stop(election_trigger);
crm_timer_stop(shutdown_escalation_timer);
crm_timer_stop(wait_timer);
crm_timer_stop(recheck_timer);
free(transition_timer); transition_timer = NULL;
free(integration_timer); integration_timer = NULL;
free(finalization_timer); finalization_timer = NULL;
free(election_trigger); election_trigger = NULL;
free(shutdown_escalation_timer); shutdown_escalation_timer = NULL;
free(wait_timer); wait_timer = NULL;
free(recheck_timer); recheck_timer = NULL;
free(fsa_our_dc_version); fsa_our_dc_version = NULL;
free(fsa_our_uname); fsa_our_uname = NULL;
free(fsa_our_uuid); fsa_our_uuid = NULL;
free(fsa_our_dc); fsa_our_dc = NULL;
free(fsa_cluster_name); fsa_cluster_name = NULL;
free(te_uuid); te_uuid = NULL;
free(te_client_id); te_client_id = NULL;
free(fsa_pe_ref); fsa_pe_ref = NULL;
free(failed_stop_offset); failed_stop_offset = NULL;
free(failed_start_offset); failed_start_offset = NULL;
free(max_generation_from); max_generation_from = NULL;
free_xml(max_generation_xml); max_generation_xml = NULL;
mainloop_destroy_signal(SIGPIPE);
mainloop_destroy_signal(SIGUSR1);
mainloop_destroy_signal(SIGTERM);
mainloop_destroy_signal(SIGTRAP);
/* leave SIGCHLD engaged as we might still want to drain some service-actions */
if (mloop) {
GMainContext *ctx = g_main_loop_get_context(crmd_mainloop);
/* Don't re-enter this block */
crmd_mainloop = NULL;
/* no signals on final draining anymore */
mainloop_destroy_signal(SIGCHLD);
crm_trace("Draining mainloop %d %d", g_main_loop_is_running(mloop), g_main_context_pending(ctx));
{
int lpc = 0;
while((g_main_context_pending(ctx) && lpc < 10)) {
lpc++;
crm_trace("Iteration %d", lpc);
g_main_context_dispatch(ctx);
}
}
crm_trace("Closing mainloop %d %d", g_main_loop_is_running(mloop), g_main_context_pending(ctx));
g_main_loop_quit(mloop);
/* Won't do anything yet, since we're inside it now */
g_main_loop_unref(mloop);
} else {
mainloop_destroy_signal(SIGCHLD);
}
cib_delete(fsa_cib_conn);
fsa_cib_conn = NULL;
throttle_fini();
/* Graceful */
crm_trace("Done preparing for exit with status %d (%s)",
exit_code, crm_exit_str(exit_code));
return exit_code;
}
/* A_EXIT_0, A_EXIT_1 */
void
do_exit(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
crm_exit_t exit_code = CRM_EX_OK;
int log_level = LOG_INFO;
const char *exit_type = "gracefully";
if (action & A_EXIT_1) {
log_level = LOG_ERR;
exit_type = "forcefully";
exit_code = CRM_EX_ERROR;
}
verify_stopped(cur_state, LOG_ERR);
do_crm_log(log_level, "Performing %s - %s exiting the controller",
fsa_action2string(action), exit_type);
crm_info("[%s] stopped (%d)", crm_system_name, exit_code);
crmd_exit(exit_code);
}
static void sigpipe_ignore(int nsig) { return; }
/* A_STARTUP */
void
do_startup(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
int was_error = 0;
crm_debug("Registering Signal Handlers");
mainloop_add_signal(SIGTERM, crm_shutdown);
mainloop_add_signal(SIGPIPE, sigpipe_ignore);
fsa_source = mainloop_add_trigger(G_PRIORITY_HIGH, crm_fsa_trigger, NULL);
config_read = mainloop_add_trigger(G_PRIORITY_HIGH, crm_read_options, NULL);
transition_trigger = mainloop_add_trigger(G_PRIORITY_LOW, te_graph_trigger, NULL);
crm_debug("Creating CIB manager and executor objects");
fsa_cib_conn = cib_new();
lrm_state_init_local();
/* set up the timers */
transition_timer = calloc(1, sizeof(fsa_timer_t));
integration_timer = calloc(1, sizeof(fsa_timer_t));
finalization_timer = calloc(1, sizeof(fsa_timer_t));
election_trigger = calloc(1, sizeof(fsa_timer_t));
shutdown_escalation_timer = calloc(1, sizeof(fsa_timer_t));
wait_timer = calloc(1, sizeof(fsa_timer_t));
recheck_timer = calloc(1, sizeof(fsa_timer_t));
if (election_trigger != NULL) {
election_trigger->source_id = 0;
election_trigger->period_ms = -1;
election_trigger->fsa_input = I_DC_TIMEOUT;
election_trigger->callback = crm_timer_popped;
election_trigger->repeat = FALSE;
} else {
was_error = TRUE;
}
if (transition_timer != NULL) {
transition_timer->source_id = 0;
transition_timer->period_ms = -1;
transition_timer->fsa_input = I_PE_CALC;
transition_timer->callback = crm_timer_popped;
transition_timer->repeat = FALSE;
} else {
was_error = TRUE;
}
if (integration_timer != NULL) {
integration_timer->source_id = 0;
integration_timer->period_ms = -1;
integration_timer->fsa_input = I_INTEGRATED;
integration_timer->callback = crm_timer_popped;
integration_timer->repeat = FALSE;
} else {
was_error = TRUE;
}
if (finalization_timer != NULL) {
finalization_timer->source_id = 0;
finalization_timer->period_ms = -1;
finalization_timer->fsa_input = I_FINALIZED;
finalization_timer->callback = crm_timer_popped;
finalization_timer->repeat = FALSE;
/* for possible enabling... a bug in the join protocol left
* a slave in S_PENDING while we think it's in S_NOT_DC
*
* raising I_FINALIZED put us into a transition loop which is
* never resolved.
* in this loop we continually send probes which the node
* NACK's because it's in S_PENDING
*
* if we have nodes where the cluster layer is active but the
* CRM is not... then this will be handled in the
* integration phase
*/
finalization_timer->fsa_input = I_ELECTION;
} else {
was_error = TRUE;
}
if (shutdown_escalation_timer != NULL) {
shutdown_escalation_timer->source_id = 0;
shutdown_escalation_timer->period_ms = -1;
shutdown_escalation_timer->fsa_input = I_STOP;
shutdown_escalation_timer->callback = crm_timer_popped;
shutdown_escalation_timer->repeat = FALSE;
} else {
was_error = TRUE;
}
if (wait_timer != NULL) {
wait_timer->source_id = 0;
wait_timer->period_ms = 2000;
wait_timer->fsa_input = I_NULL;
wait_timer->callback = crm_timer_popped;
wait_timer->repeat = FALSE;
} else {
was_error = TRUE;
}
if (recheck_timer != NULL) {
recheck_timer->source_id = 0;
recheck_timer->period_ms = -1;
recheck_timer->fsa_input = I_PE_CALC;
recheck_timer->callback = crm_timer_popped;
recheck_timer->repeat = FALSE;
} else {
was_error = TRUE;
}
if (was_error) {
register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
}
}
static int32_t
crmd_ipc_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid)
{
crm_trace("Connection %p", c);
if (crm_client_new(c, uid, gid) == NULL) {
return -EIO;
}
return 0;
}
static void
crmd_ipc_created(qb_ipcs_connection_t * c)
{
crm_trace("Connection %p", c);
}
static int32_t
crmd_ipc_dispatch(qb_ipcs_connection_t * c, void *data, size_t size)
{
uint32_t id = 0;
uint32_t flags = 0;
crm_client_t *client = crm_client_get(c);
xmlNode *msg = crm_ipcs_recv(client, data, size, &id, &flags);
crm_trace("Invoked: %s", crm_client_name(client));
crm_ipcs_send_ack(client, id, flags, "ack", __FUNCTION__, __LINE__);
if (msg == NULL) {
return 0;
}
#if ENABLE_ACL
CRM_ASSERT(client->user != NULL);
crm_acl_get_set_user(msg, F_CRM_USER, client->user);
#endif
crm_trace("Processing msg from %s", crm_client_name(client));
crm_log_xml_trace(msg, "controller[inbound]");
crm_xml_add(msg, F_CRM_SYS_FROM, client->id);
if (crmd_authorize_message(msg, client, NULL)) {
route_message(C_IPC_MESSAGE, msg);
}
trigger_fsa(fsa_source);
free_xml(msg);
return 0;
}
static int32_t
crmd_ipc_closed(qb_ipcs_connection_t * c)
{
crm_client_t *client = crm_client_get(c);
if (client) {
crm_trace("Disconnecting %sregistered client %s (%p/%p)",
(client->userdata? "" : "un"), crm_client_name(client),
c, client);
free(client->userdata);
crm_client_destroy(client);
trigger_fsa(fsa_source);
}
return 0;
}
static void
crmd_ipc_destroy(qb_ipcs_connection_t * c)
{
crm_trace("Connection %p", c);
crmd_ipc_closed(c);
}
/* A_STOP */
void
do_stop(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
crm_trace("Closing IPC server");
mainloop_del_ipc_server(ipcs); ipcs = NULL;
register_fsa_input(C_FSA_INTERNAL, I_TERMINATE, NULL);
}
/* A_STARTED */
void
do_started(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
static struct qb_ipcs_service_handlers crmd_callbacks = {
.connection_accept = crmd_ipc_accept,
.connection_created = crmd_ipc_created,
.msg_process = crmd_ipc_dispatch,
.connection_closed = crmd_ipc_closed,
.connection_destroyed = crmd_ipc_destroy
};
if (cur_state != S_STARTING) {
crm_err("Start cancelled... %s", fsa_state2string(cur_state));
return;
} else if (is_set(fsa_input_register, R_MEMBERSHIP) == FALSE) {
crm_info("Delaying start, no membership data (%.16llx)", R_MEMBERSHIP);
crmd_fsa_stall(TRUE);
return;
} else if (is_set(fsa_input_register, R_LRM_CONNECTED) == FALSE) {
crm_info("Delaying start, not connected to executor (%.16llx)", R_LRM_CONNECTED);
crmd_fsa_stall(TRUE);
return;
} else if (is_set(fsa_input_register, R_CIB_CONNECTED) == FALSE) {
crm_info("Delaying start, CIB not connected (%.16llx)", R_CIB_CONNECTED);
crmd_fsa_stall(TRUE);
return;
} else if (is_set(fsa_input_register, R_READ_CONFIG) == FALSE) {
crm_info("Delaying start, Config not read (%.16llx)", R_READ_CONFIG);
crmd_fsa_stall(TRUE);
return;
} else if (is_set(fsa_input_register, R_PEER_DATA) == FALSE) {
crm_info("Delaying start, No peer data (%.16llx)", R_PEER_DATA);
crmd_fsa_stall(TRUE);
return;
}
crm_debug("Init server comms");
ipcs = crmd_ipc_server_init(&crmd_callbacks);
if (ipcs == NULL) {
crm_err("Failed to create IPC server: shutting down and inhibiting respawn");
register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
}
if (stonith_reconnect == NULL) {
int dummy;
stonith_reconnect = mainloop_add_trigger(G_PRIORITY_LOW, te_connect_stonith, &dummy);
}
set_bit(fsa_input_register, R_ST_REQUIRED);
mainloop_set_trigger(stonith_reconnect);
- crm_notice("The local CRM is operational");
+ crm_notice("Pacemaker controller successfully started and accepting connections");
clear_bit(fsa_input_register, R_STARTING);
register_fsa_input(msg_data->fsa_cause, I_PENDING, NULL);
}
/* A_RECOVER */
void
do_recover(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
set_bit(fsa_input_register, R_IN_RECOVERY);
crm_warn("Fast-tracking shutdown in response to errors");
register_fsa_input(C_FSA_INTERNAL, I_TERMINATE, NULL);
}
/* *INDENT-OFF* */
static pe_cluster_option crmd_opts[] = {
/* name, old-name, validate, values, default, short description, long description */
{ "dc-version", NULL, "string", NULL, "none", NULL,
"Version of Pacemaker on the cluster's DC.",
"Includes the hash which identifies the exact changeset it was built from. Used for diagnostic purposes."
},
{ "cluster-infrastructure", NULL, "string", NULL, "corosync", NULL,
"The messaging stack on which Pacemaker is currently running.",
"Used for informational and diagnostic purposes." },
{ XML_CONFIG_ATTR_DC_DEADTIME, NULL, "time", NULL, "20s", &check_time,
"How long to wait for a response from other nodes during startup.",
"The \"correct\" value will depend on the speed/load of your network and the type of switches used."
},
{ XML_CONFIG_ATTR_RECHECK, NULL, "time",
"Zero disables polling. Positive values are an interval in seconds (unless other SI units are specified. eg. 5min)",
"15min", &check_timer,
"Polling interval for time based changes to options, resource parameters and constraints.",
"The Cluster is primarily event driven, however the configuration can have elements that change based on time."
" To ensure these changes take effect, we can optionally poll the cluster's status for changes."
},
{ "load-threshold", NULL, "percentage", NULL, "80%", &check_utilization,
"The maximum amount of system resources that should be used by nodes in the cluster",
"The cluster will slow down its recovery process when the amount of system resources used"
" (currently CPU) approaches this limit",
},
{ "node-action-limit", NULL, "integer", NULL, "0", &check_number,
"The maximum number of jobs that can be scheduled per node. Defaults to 2x cores"},
{ XML_CONFIG_ATTR_ELECTION_FAIL, NULL, "time", NULL, "2min", &check_timer,
"*** Advanced Use Only ***.", "If need to adjust this value, it probably indicates the presence of a bug."
},
{ XML_CONFIG_ATTR_FORCE_QUIT, NULL, "time", NULL, "20min", &check_timer,
"*** Advanced Use Only ***.", "If need to adjust this value, it probably indicates the presence of a bug."
},
{
"join-integration-timeout", "crmd-integration-timeout",
"time", NULL, "3min", &check_timer,
"*** Advanced Use Only ***",
"If need to adjust this value, it probably indicates the presence of a bug"
},
{
"join-finalization-timeout", "crmd-finalization-timeout",
"time", NULL, "30min", &check_timer,
"*** Advanced Use Only ***",
"If you need to adjust this value, it probably indicates the presence of a bug"
},
{
"transition-delay", "crmd-transition-delay",
"time", NULL, "0s", &check_timer,
"*** Advanced Use Only *** Enabling this option will slow down cluster recovery under all conditions",
"Delay cluster recovery for the configured interval to allow for additional/related events to occur.\n"
"Useful if your configuration is sensitive to the order in which ping updates arrive."
},
{ "stonith-watchdog-timeout", NULL, "time", NULL, NULL, &check_sbd_timeout,
"How long to wait before we can assume nodes are safely down", NULL
},
{ "stonith-max-attempts",NULL,"integer",NULL,"10",&check_positive_number,
"How many times stonith can fail before it will no longer be attempted on a target"
},
{ "no-quorum-policy", NULL, "enum", "stop, freeze, ignore, suicide", "stop", &check_quorum, NULL, NULL },
};
/* *INDENT-ON* */
void
crmd_metadata(void)
{
config_metadata("pacemaker-controld", "1.0",
"controller properties",
"Cluster properties used by Pacemaker's controller,"
" formerly known as crmd",
crmd_opts, DIMOF(crmd_opts));
}
static void
verify_crmd_options(GHashTable * options)
{
verify_all_options(options, crmd_opts, DIMOF(crmd_opts));
}
static const char *
crmd_pref(GHashTable * options, const char *name)
{
return get_cluster_pref(options, crmd_opts, DIMOF(crmd_opts), name);
}
static void
config_query_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data)
{
const char *value = NULL;
GHashTable *config_hash = NULL;
crm_time_t *now = crm_time_new(NULL);
xmlNode *crmconfig = NULL;
xmlNode *alerts = NULL;
if (rc != pcmk_ok) {
fsa_data_t *msg_data = NULL;
crm_err("Local CIB query resulted in an error: %s", pcmk_strerror(rc));
register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
if (rc == -EACCES || rc == -pcmk_err_schema_validation) {
crm_err("The cluster is mis-configured - shutting down and staying down");
set_bit(fsa_input_register, R_STAYDOWN);
}
goto bail;
}
crmconfig = output;
if ((crmconfig) &&
(crm_element_name(crmconfig)) &&
(strcmp(crm_element_name(crmconfig), XML_CIB_TAG_CRMCONFIG) != 0)) {
crmconfig = first_named_child(crmconfig, XML_CIB_TAG_CRMCONFIG);
}
if (!crmconfig) {
fsa_data_t *msg_data = NULL;
crm_err("Local CIB query for " XML_CIB_TAG_CRMCONFIG " section failed");
register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
goto bail;
}
crm_debug("Call %d : Parsing CIB options", call_id);
config_hash = crm_str_table_new();
unpack_instance_attributes(crmconfig, crmconfig, XML_CIB_TAG_PROPSET, NULL, config_hash,
CIB_OPTIONS_FIRST, FALSE, now);
verify_crmd_options(config_hash);
value = crmd_pref(config_hash, XML_CONFIG_ATTR_DC_DEADTIME);
election_trigger->period_ms = crm_get_msec(value);
value = crmd_pref(config_hash, "node-action-limit"); /* Also checks migration-limit */
throttle_update_job_max(value);
value = crmd_pref(config_hash, "load-threshold");
if(value) {
throttle_set_load_target(strtof(value, NULL) / 100.0);
}
value = crmd_pref(config_hash, "no-quorum-policy");
if (safe_str_eq(value, "suicide") && pcmk_locate_sbd()) {
no_quorum_suicide_escalation = TRUE;
}
value = crmd_pref(config_hash,"stonith-max-attempts");
update_stonith_max_attempts(value);
value = crmd_pref(config_hash, XML_CONFIG_ATTR_FORCE_QUIT);
shutdown_escalation_timer->period_ms = crm_get_msec(value);
/* How long to declare an election over - even if not everyone voted */
crm_debug("Shutdown escalation occurs after: %dms", shutdown_escalation_timer->period_ms);
value = crmd_pref(config_hash, XML_CONFIG_ATTR_ELECTION_FAIL);
controld_set_election_period(value);
value = crmd_pref(config_hash, XML_CONFIG_ATTR_RECHECK);
recheck_timer->period_ms = crm_get_msec(value);
crm_debug("Checking for expired actions every %dms", recheck_timer->period_ms);
value = crmd_pref(config_hash, "transition-delay");
transition_timer->period_ms = crm_get_msec(value);
value = crmd_pref(config_hash, "join-integration-timeout");
integration_timer->period_ms = crm_get_msec(value);
value = crmd_pref(config_hash, "join-finalization-timeout");
finalization_timer->period_ms = crm_get_msec(value);
free(fsa_cluster_name);
fsa_cluster_name = NULL;
value = g_hash_table_lookup(config_hash, "cluster-name");
if (value) {
fsa_cluster_name = strdup(value);
}
alerts = first_named_child(output, XML_CIB_TAG_ALERTS);
crmd_unpack_alerts(alerts);
set_bit(fsa_input_register, R_READ_CONFIG);
crm_trace("Triggering FSA: %s", __FUNCTION__);
mainloop_set_trigger(fsa_source);
g_hash_table_destroy(config_hash);
bail:
crm_time_free(now);
}
gboolean
crm_read_options(gpointer user_data)
{
int call_id =
fsa_cib_conn->cmds->query(fsa_cib_conn,
"//" XML_CIB_TAG_CRMCONFIG " | //" XML_CIB_TAG_ALERTS,
NULL, cib_xpath | cib_scope_local);
fsa_register_cib_callback(call_id, FALSE, NULL, config_query_callback);
crm_trace("Querying the CIB... call %d", call_id);
return TRUE;
}
/* A_READCONFIG */
void
do_read_config(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state,
enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
throttle_init();
mainloop_set_trigger(config_read);
}
void
crm_shutdown(int nsig)
{
if (crmd_mainloop != NULL && g_main_loop_is_running(crmd_mainloop)) {
if (is_set(fsa_input_register, R_SHUTDOWN)) {
crm_err("Escalating the shutdown");
register_fsa_input_before(C_SHUTDOWN, I_ERROR, NULL);
} else {
set_bit(fsa_input_register, R_SHUTDOWN);
register_fsa_input(C_SHUTDOWN, I_SHUTDOWN, NULL);
if (shutdown_escalation_timer->period_ms < 1) {
const char *value = crmd_pref(NULL, XML_CONFIG_ATTR_FORCE_QUIT);
int msec = crm_get_msec(value);
crm_debug("Using default shutdown escalation: %dms", msec);
shutdown_escalation_timer->period_ms = msec;
}
/* can't rely on this... */
crm_notice("Shutting down cluster resource manager " CRM_XS
" limit=%dms", shutdown_escalation_timer->period_ms);
crm_timer_start(shutdown_escalation_timer);
}
} else {
crm_info("exit from shutdown");
crmd_exit(CRM_EX_OK);
}
}
diff --git a/daemons/controld/pacemaker-controld.c b/daemons/controld/pacemaker-controld.c
index 38842fbcda..148737f05a 100644
--- a/daemons/controld/pacemaker-controld.c
+++ b/daemons/controld/pacemaker-controld.c
@@ -1,173 +1,173 @@
/*
* Copyright 2004-2019 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#define OPTARGS "hV"
void usage(const char *cmd, int exit_status);
int crmd_init(void);
void crmd_hamsg_callback(const xmlNode * msg, void *private_data);
extern void init_dotfile(void);
GMainLoop *crmd_mainloop = NULL;
/* *INDENT-OFF* */
static struct crm_option long_options[] = {
/* Top-level Options */
{"help", 0, 0, '?', "\tThis text"},
{"verbose", 0, 0, 'V', "\tIncrease debug output"},
{0, 0, 0, 0}
};
/* *INDENT-ON* */
int
main(int argc, char **argv)
{
int flag;
int index = 0;
int argerr = 0;
crm_ipc_t *old_instance = NULL;
crmd_mainloop = g_main_loop_new(NULL, FALSE);
crm_log_preinit(NULL, argc, argv);
crm_set_options(NULL, "[options]", long_options,
"Daemon for aggregating resource and node failures as well as co-ordinating the cluster's response");
while (1) {
flag = crm_get_option(argc, argv, &index);
if (flag == -1)
break;
switch (flag) {
case 'V':
crm_bump_log_level(argc, argv);
break;
case 'h': /* Help message */
crm_help(flag, CRM_EX_OK);
break;
default:
++argerr;
break;
}
}
if (argc - optind == 1 && safe_str_eq("metadata", argv[optind])) {
crmd_metadata();
return CRM_EX_OK;
} else if (argc - optind == 1 && safe_str_eq("version", argv[optind])) {
fprintf(stdout, "CRM Version: %s (%s)\n", PACEMAKER_VERSION, BUILD_VERSION);
return CRM_EX_OK;
}
crm_log_init(NULL, LOG_INFO, TRUE, FALSE, argc, argv, FALSE);
- crm_info("CRM Git Version: %s (%s)", PACEMAKER_VERSION, BUILD_VERSION);
if (optind > argc) {
++argerr;
}
if (argerr) {
crm_help('?', CRM_EX_USAGE);
}
+ crm_notice("Starting Pacemaker controller");
+
old_instance = crm_ipc_new(CRM_SYSTEM_CRMD, 0);
if (crm_ipc_connect(old_instance)) {
/* IPC end-point already up */
crm_ipc_close(old_instance);
crm_ipc_destroy(old_instance);
crm_err("pacemaker-controld is already active, aborting startup");
crm_exit(CRM_EX_OK);
} else {
/* not up or not authentic, we'll proceed either way */
crm_ipc_destroy(old_instance);
old_instance = NULL;
}
if (pcmk__daemon_can_write(PE_STATE_DIR, NULL) == FALSE) {
crm_err("Terminating due to bad permissions on " PE_STATE_DIR);
fprintf(stderr,
"ERROR: Bad permissions on " PE_STATE_DIR " (see logs for details)\n");
fflush(stderr);
return CRM_EX_FATAL;
} else if (pcmk__daemon_can_write(CRM_CONFIG_DIR, NULL) == FALSE) {
crm_err("Terminating due to bad permissions on " CRM_CONFIG_DIR);
fprintf(stderr,
"ERROR: Bad permissions on " CRM_CONFIG_DIR " (see logs for details)\n");
fflush(stderr);
return CRM_EX_FATAL;
}
return crmd_init();
}
static void
log_deprecation_warnings()
{
// Add deprecations here as needed
}
int
crmd_init(void)
{
crm_exit_t exit_code = CRM_EX_OK;
enum crmd_fsa_state state;
log_deprecation_warnings();
fsa_state = S_STARTING;
fsa_input_register = 0; /* zero out the regester */
init_dotfile();
- crm_debug("Starting %s", crm_system_name);
register_fsa_input(C_STARTUP, I_STARTUP, NULL);
crm_peer_init();
state = s_crmd_fsa(C_STARTUP);
if (state == S_PENDING || state == S_STARTING) {
/* Create the mainloop and run it... */
crm_trace("Starting %s's mainloop", crm_system_name);
g_main_loop_run(crmd_mainloop);
if (is_set(fsa_input_register, R_STAYDOWN)) {
crm_info("Inhibiting automated respawn");
exit_code = CRM_EX_FATAL;
}
} else {
crm_err("Startup of %s failed. Current state: %s",
crm_system_name, fsa_state2string(state));
exit_code = CRM_EX_ERROR;
}
crm_info("%s[%lu] exiting with status %d (%s)",
crm_system_name, (unsigned long) getpid(), exit_code,
crm_exit_str(exit_code));
return crmd_fast_exit(exit_code);
}
diff --git a/daemons/execd/pacemaker-execd.c b/daemons/execd/pacemaker-execd.c
index 3cec288e75..21bb0edde8 100644
--- a/daemons/execd/pacemaker-execd.c
+++ b/daemons/execd/pacemaker-execd.c
@@ -1,622 +1,630 @@
/*
* Copyright 2012-2019 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "pacemaker-execd.h"
#if defined(HAVE_GNUTLS_GNUTLS_H) && defined(SUPPORT_REMOTE)
# define ENABLE_PCMK_REMOTE
// Hidden in liblrmd
extern int lrmd_tls_send_msg(crm_remote_t *session, xmlNode *msg, uint32_t id,
const char *msg_type);
#endif
static GMainLoop *mainloop = NULL;
static qb_ipcs_service_t *ipcs = NULL;
static stonith_t *stonith_api = NULL;
int lrmd_call_id = 0;
#ifdef ENABLE_PCMK_REMOTE
/* whether shutdown request has been sent */
static sig_atomic_t shutting_down = FALSE;
/* timer for waiting for acknowledgment of shutdown request */
static guint shutdown_ack_timer = 0;
static gboolean lrmd_exit(gpointer data);
#endif
static void
stonith_connection_destroy_cb(stonith_t * st, stonith_event_t * e)
{
stonith_api->state = stonith_disconnected;
crm_err("Connection to fencer lost");
stonith_connection_failed();
}
stonith_t *
get_stonith_connection(void)
{
if (stonith_api && stonith_api->state == stonith_disconnected) {
stonith_api_delete(stonith_api);
stonith_api = NULL;
}
if (!stonith_api) {
int rc = 0;
int tries = 10;
stonith_api = stonith_api_new();
do {
rc = stonith_api->cmds->connect(stonith_api, "pacemaker-execd", NULL);
if (rc == pcmk_ok) {
stonith_api->cmds->register_notification(stonith_api,
T_STONITH_NOTIFY_DISCONNECT,
stonith_connection_destroy_cb);
break;
}
sleep(1);
tries--;
} while (tries);
if (rc) {
crm_err("Unable to connect to stonith daemon to execute command. error: %s",
pcmk_strerror(rc));
stonith_api_delete(stonith_api);
stonith_api = NULL;
}
}
return stonith_api;
}
static int32_t
lrmd_ipc_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid)
{
crm_trace("Connection %p", c);
if (crm_client_new(c, uid, gid) == NULL) {
return -EIO;
}
return 0;
}
static void
lrmd_ipc_created(qb_ipcs_connection_t * c)
{
crm_client_t *new_client = crm_client_get(c);
crm_trace("Connection %p", c);
CRM_ASSERT(new_client != NULL);
/* Now that the connection is offically established, alert
* the other clients a new connection exists. */
notify_of_new_client(new_client);
}
static int32_t
lrmd_ipc_dispatch(qb_ipcs_connection_t * c, void *data, size_t size)
{
uint32_t id = 0;
uint32_t flags = 0;
crm_client_t *client = crm_client_get(c);
xmlNode *request = crm_ipcs_recv(client, data, size, &id, &flags);
CRM_CHECK(client != NULL, crm_err("Invalid client");
return FALSE);
CRM_CHECK(client->id != NULL, crm_err("Invalid client: %p", client);
return FALSE);
CRM_CHECK(flags & crm_ipc_client_response, crm_err("Invalid client request: %p", client);
return FALSE);
if (!request) {
return 0;
}
if (!client->name) {
const char *value = crm_element_value(request, F_LRMD_CLIENTNAME);
if (value == NULL) {
client->name = crm_itoa(crm_ipcs_client_pid(c));
} else {
client->name = strdup(value);
}
}
lrmd_call_id++;
if (lrmd_call_id < 1) {
lrmd_call_id = 1;
}
crm_xml_add(request, F_LRMD_CLIENTID, client->id);
crm_xml_add(request, F_LRMD_CLIENTNAME, client->name);
crm_xml_add_int(request, F_LRMD_CALLID, lrmd_call_id);
process_lrmd_message(client, id, request);
free_xml(request);
return 0;
}
/*!
* \internal
* \brief Free a client connection, and exit if appropriate
*
* \param[in] client Client connection to free
*/
void
lrmd_client_destroy(crm_client_t *client)
{
crm_client_destroy(client);
#ifdef ENABLE_PCMK_REMOTE
/* If we were waiting to shut down, we can now safely do so
* if there are no more proxied IPC providers
*/
if (shutting_down && (ipc_proxy_get_provider() == NULL)) {
lrmd_exit(NULL);
}
#endif
}
static int32_t
lrmd_ipc_closed(qb_ipcs_connection_t * c)
{
crm_client_t *client = crm_client_get(c);
if (client == NULL) {
return 0;
}
crm_trace("Connection %p", c);
client_disconnect_cleanup(client->id);
#ifdef ENABLE_PCMK_REMOTE
ipc_proxy_remove_provider(client);
#endif
lrmd_client_destroy(client);
return 0;
}
static void
lrmd_ipc_destroy(qb_ipcs_connection_t * c)
{
lrmd_ipc_closed(c);
crm_trace("Connection %p", c);
}
static struct qb_ipcs_service_handlers lrmd_ipc_callbacks = {
.connection_accept = lrmd_ipc_accept,
.connection_created = lrmd_ipc_created,
.msg_process = lrmd_ipc_dispatch,
.connection_closed = lrmd_ipc_closed,
.connection_destroyed = lrmd_ipc_destroy
};
int
lrmd_server_send_reply(crm_client_t * client, uint32_t id, xmlNode * reply)
{
crm_trace("Sending reply (%d) to client (%s)", id, client->id);
switch (client->kind) {
case CRM_CLIENT_IPC:
return crm_ipcs_send(client, id, reply, FALSE);
#ifdef ENABLE_PCMK_REMOTE
case CRM_CLIENT_TLS:
return lrmd_tls_send_msg(client->remote, reply, id, "reply");
#endif
default:
crm_err("Could not send reply: unknown client type %d",
client->kind);
}
return -ENOTCONN;
}
int
lrmd_server_send_notify(crm_client_t * client, xmlNode * msg)
{
crm_trace("Sending notification to client (%s)", client->id);
switch (client->kind) {
case CRM_CLIENT_IPC:
if (client->ipcs == NULL) {
crm_trace("Could not notify local client: disconnected");
return -ENOTCONN;
}
return crm_ipcs_send(client, 0, msg, crm_ipc_server_event);
#ifdef ENABLE_PCMK_REMOTE
case CRM_CLIENT_TLS:
if (client->remote == NULL) {
crm_trace("Could not notify remote client: disconnected");
return -ENOTCONN;
}
return lrmd_tls_send_msg(client->remote, msg, 0, "notify");
#endif
default:
crm_err("Could not notify client: unknown type %d", client->kind);
}
return -ENOTCONN;
}
/*!
* \internal
* \brief Clean up and exit immediately
*
* \param[in] data Ignored
*
* \return Doesn't return
* \note This can be used as a timer callback.
*/
static gboolean
lrmd_exit(gpointer data)
{
crm_info("Terminating with %d clients",
crm_hash_table_size(client_connections));
if (stonith_api) {
stonith_api->cmds->remove_notification(stonith_api, T_STONITH_NOTIFY_DISCONNECT);
stonith_api->cmds->disconnect(stonith_api);
stonith_api_delete(stonith_api);
}
if (ipcs) {
mainloop_del_ipc_server(ipcs);
}
#ifdef ENABLE_PCMK_REMOTE
lrmd_tls_server_destroy();
ipc_proxy_cleanup();
#endif
crm_client_cleanup();
g_hash_table_destroy(rsc_list);
if (mainloop) {
lrmd_drain_alerts(mainloop);
}
crm_exit(CRM_EX_OK);
return FALSE;
}
/*!
* \internal
* \brief Request cluster shutdown if appropriate, otherwise exit immediately
*
* \param[in] nsig Signal that caused invocation (ignored)
*/
static void
lrmd_shutdown(int nsig)
{
#ifdef ENABLE_PCMK_REMOTE
crm_client_t *ipc_proxy = ipc_proxy_get_provider();
/* If there are active proxied IPC providers, then we may be running
* resources, so notify the cluster that we wish to shut down.
*/
if (ipc_proxy) {
if (shutting_down) {
crm_notice("Waiting for cluster to stop resources before exiting");
return;
}
crm_info("Sending shutdown request to cluster");
if (ipc_proxy_shutdown_req(ipc_proxy) < 0) {
crm_crit("Shutdown request failed, exiting immediately");
} else {
/* We requested a shutdown. Now, we need to wait for an
* acknowledgement from the proxy host (which ensures the proxy host
* supports shutdown requests), then wait for all proxy hosts to
* disconnect (which ensures that all resources have been stopped).
*/
shutting_down = TRUE;
/* Stop accepting new proxy connections */
lrmd_tls_server_destroy();
/* Older controller versions will never acknowledge our request, so
* set a fairly short timeout to exit quickly in that case. If we
* get the ack, we'll defuse this timer.
*/
shutdown_ack_timer = g_timeout_add_seconds(20, lrmd_exit, NULL);
/* Currently, we let the OS kill us if the clients don't disconnect
* in a reasonable time. We could instead set a long timer here
* (shorter than what the OS is likely to use) and exit immediately
* if it pops.
*/
return;
}
}
#endif
lrmd_exit(NULL);
}
/*!
* \internal
* \brief Defuse short exit timer if shutting down
*/
void handle_shutdown_ack()
{
#ifdef ENABLE_PCMK_REMOTE
if (shutting_down) {
crm_info("Received shutdown ack");
if (shutdown_ack_timer > 0) {
g_source_remove(shutdown_ack_timer);
shutdown_ack_timer = 0;
}
return;
}
#endif
crm_debug("Ignoring unexpected shutdown ack");
}
/*!
* \internal
* \brief Make short exit timer fire immediately
*/
void handle_shutdown_nack()
{
#ifdef ENABLE_PCMK_REMOTE
if (shutting_down) {
crm_info("Received shutdown nack");
if (shutdown_ack_timer > 0) {
g_source_remove(shutdown_ack_timer);
shutdown_ack_timer = g_timeout_add(0, lrmd_exit, NULL);
}
return;
}
#endif
crm_debug("Ignoring unexpected shutdown nack");
}
static pid_t main_pid = 0;
static void
sigdone(void)
{
exit(CRM_EX_OK);
}
static void
sigreap(void)
{
pid_t pid = 0;
int status;
do {
/*
* Opinions seem to differ as to what to put here:
* -1, any child process
* 0, any child process whose process group ID is equal to that of the calling process
*/
pid = waitpid(-1, &status, WNOHANG);
if(pid == main_pid) {
/* Exit when pacemaker-remote exits and use the same return code */
if (WIFEXITED(status)) {
exit(WEXITSTATUS(status));
}
exit(CRM_EX_ERROR);
}
} while (pid > 0);
}
static struct {
int sig;
void (*handler)(void);
} sigmap[] = {
{ SIGCHLD, sigreap },
{ SIGINT, sigdone },
};
static void spawn_pidone(int argc, char **argv, char **envp)
{
sigset_t set;
if (getpid() != 1) {
return;
}
sigfillset(&set);
sigprocmask(SIG_BLOCK, &set, 0);
main_pid = fork();
switch (main_pid) {
case 0:
sigprocmask(SIG_UNBLOCK, &set, NULL);
setsid();
setpgid(0, 0);
/* Child remains as pacemaker-remoted */
return;
case -1:
perror("fork");
}
/* Parent becomes the reaper of zombie processes */
/* Safe to initialize logging now if needed */
#ifdef HAVE___PROGNAME
/* Differentiate ourselves in the 'ps' output */
{
char *p;
int i, maxlen;
char *LastArgv = NULL;
const char *name = "pcmk-init";
for(i = 0; i < argc; i++) {
if(!i || (LastArgv + 1 == argv[i]))
LastArgv = argv[i] + strlen(argv[i]);
}
for(i = 0; envp[i] != NULL; i++) {
if((LastArgv + 1) == envp[i]) {
LastArgv = envp[i] + strlen(envp[i]);
}
}
maxlen = (LastArgv - argv[0]) - 2;
i = strlen(name);
/* We can overwrite individual argv[] arguments */
snprintf(argv[0], maxlen, "%s", name);
/* Now zero out everything else */
p = &argv[0][i];
while(p < LastArgv)
*p++ = '\0';
argv[1] = NULL;
}
#endif /* HAVE___PROGNAME */
while (1) {
int sig;
size_t i;
sigwait(&set, &sig);
for (i = 0; i < DIMOF(sigmap); i++) {
if (sigmap[i].sig == sig) {
sigmap[i].handler();
break;
}
}
}
}
/* *INDENT-OFF* */
static struct crm_option long_options[] = {
/* Top-level Options */
{"help", 0, 0, '?', "\tThis text"},
{"version", 0, 0, '$', "\tVersion information" },
{"verbose", 0, 0, 'V', "\tIncrease debug output"},
{"logfile", 1, 0, 'l', "\tSend logs to the additional named logfile"},
#ifdef ENABLE_PCMK_REMOTE
{"port", 1, 0, 'p', "\tPort to listen on"},
#endif
{0, 0, 0, 0}
};
/* *INDENT-ON* */
+#ifdef ENABLE_PCMK_REMOTE
+# define EXECD_TYPE "remote"
+#else
+# define EXECD_TYPE "local"
+#endif
+
int
main(int argc, char **argv, char **envp)
{
int flag = 0;
int index = 0;
int bump_log_num = 0;
const char *option = NULL;
/* If necessary, create PID1 now before any FDs are opened */
spawn_pidone(argc, argv, envp);
#ifndef ENABLE_PCMK_REMOTE
crm_log_preinit("pacemaker-execd", argc, argv);
crm_set_options(NULL, "[options]", long_options,
"Resource agent executor daemon for cluster nodes");
#else
crm_log_preinit("pacemaker-remoted", argc, argv);
crm_set_options(NULL, "[options]", long_options,
"Resource agent executor daemon for Pacemaker Remote nodes");
#endif
while (1) {
flag = crm_get_option(argc, argv, &index);
if (flag == -1) {
break;
}
switch (flag) {
case 'l':
crm_add_logfile(optarg);
break;
case 'p':
setenv("PCMK_remote_port", optarg, 1);
break;
case 'V':
bump_log_num++;
break;
case '?':
case '$':
crm_help(flag, CRM_EX_OK);
break;
default:
crm_help('?', CRM_EX_USAGE);
break;
}
}
crm_log_init(NULL, LOG_INFO, TRUE, FALSE, argc, argv, FALSE);
while (bump_log_num > 0) {
crm_bump_log_level(argc, argv);
bump_log_num--;
}
option = daemon_option("logfacility");
if(option && safe_str_neq(option, "none")) {
setenv("HA_LOGFACILITY", option, 1); /* Used by the ocf_log/ha_log OCF macro */
}
option = daemon_option("logfile");
if(option && safe_str_neq(option, "none")) {
setenv("HA_LOGFILE", option, 1); /* Used by the ocf_log/ha_log OCF macro */
if (daemon_option_enabled(crm_system_name, "debug")) {
setenv("HA_DEBUGLOG", option, 1); /* Used by the ocf_log/ha_debug OCF macro */
}
}
+ crm_notice("Starting Pacemaker " EXECD_TYPE " executor");
+
/* The presence of this variable allegedly controls whether child
* processes like httpd will try and use Systemd's sd_notify
* API
*/
unsetenv("NOTIFY_SOCKET");
/* Used by RAs - Leave owned by root */
crm_build_path(CRM_RSCTMP_DIR, 0755);
rsc_list = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, free_rsc);
ipcs = mainloop_add_ipc_server(CRM_SYSTEM_LRMD, QB_IPC_SHM, &lrmd_ipc_callbacks);
if (ipcs == NULL) {
crm_err("Failed to create IPC server: shutting down and inhibiting respawn");
crm_exit(CRM_EX_FATAL);
}
#ifdef ENABLE_PCMK_REMOTE
if (lrmd_init_remote_tls_server() < 0) {
crm_err("Failed to create TLS listener: shutting down and staying down");
crm_exit(CRM_EX_FATAL);
}
ipc_proxy_init();
#endif
mainloop_add_signal(SIGTERM, lrmd_shutdown);
mainloop = g_main_loop_new(NULL, FALSE);
- crm_info("Starting");
+ crm_notice("Pacemaker " EXECD_TYPE " executor successfully started and accepting connections");
g_main_loop_run(mainloop);
/* should never get here */
lrmd_exit(NULL);
return CRM_EX_OK;
}
diff --git a/daemons/execd/remoted_tls.c b/daemons/execd/remoted_tls.c
index f3ab3c7994..8681419421 100644
--- a/daemons/execd/remoted_tls.c
+++ b/daemons/execd/remoted_tls.c
@@ -1,406 +1,408 @@
/*
- * Copyright 2012-2018 David Vossel
+ * Copyright 2012-2019 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "pacemaker-execd.h"
#ifdef HAVE_GNUTLS_GNUTLS_H
# include
// Hidden in liblrmd
extern int lrmd_tls_set_key(gnutls_datum_t *key);
# define LRMD_REMOTE_AUTH_TIMEOUT 10000
gnutls_psk_server_credentials_t psk_cred_s;
gnutls_dh_params_t dh_params;
static int ssock = -1;
extern int lrmd_call_id;
static void
debug_log(int level, const char *str)
{
fputs(str, stderr);
}
/*!
* \internal
* \brief Read (more) TLS handshake data from client
*/
static int
remoted__read_handshake_data(crm_client_t *client)
{
int rc = pcmk__read_handshake_data(client);
if (rc == 0) {
/* No more data is available at the moment. Just return for now;
* we'll get invoked again once the client sends more.
*/
return 0;
} else if (rc < 0) {
crm_err("TLS handshake with remote client failed: %s "
CRM_XS " rc=%d", gnutls_strerror(rc), rc);
return -1;
}
if (client->remote->auth_timeout) {
g_source_remove(client->remote->auth_timeout);
}
client->remote->auth_timeout = 0;
client->remote->tls_handshake_complete = TRUE;
crm_notice("Remote client connection accepted");
// Alert other clients of the new connection
notify_of_new_client(client);
return 0;
}
static int
lrmd_remote_client_msg(gpointer data)
{
int id = 0;
int rc = 0;
int disconnected = 0;
xmlNode *request = NULL;
crm_client_t *client = data;
if (client->remote->tls_handshake_complete == FALSE) {
return remoted__read_handshake_data(client);
}
rc = crm_remote_ready(client->remote, 0);
if (rc == 0) {
/* no msg to read */
return 0;
} else if (rc < 0) {
crm_info("Remote client disconnected while polling it");
return -1;
}
crm_remote_recv(client->remote, -1, &disconnected);
request = crm_remote_parse_buffer(client->remote);
while (request) {
crm_element_value_int(request, F_LRMD_REMOTE_MSG_ID, &id);
crm_trace("Processing remote client request %d", id);
if (!client->name) {
const char *value = crm_element_value(request, F_LRMD_CLIENTNAME);
if (value) {
client->name = strdup(value);
}
}
lrmd_call_id++;
if (lrmd_call_id < 1) {
lrmd_call_id = 1;
}
crm_xml_add(request, F_LRMD_CLIENTID, client->id);
crm_xml_add(request, F_LRMD_CLIENTNAME, client->name);
crm_xml_add_int(request, F_LRMD_CALLID, lrmd_call_id);
process_lrmd_message(client, id, request);
free_xml(request);
/* process all the messages in the current buffer */
request = crm_remote_parse_buffer(client->remote);
}
if (disconnected) {
crm_info("Remote client disconnected while reading from it");
return -1;
}
return 0;
}
static void
lrmd_remote_client_destroy(gpointer user_data)
{
crm_client_t *client = user_data;
if (client == NULL) {
return;
}
crm_notice("Cleaning up after remote client %s disconnected "
CRM_XS " id=%s",
(client->name? client->name : ""), client->id);
ipc_proxy_remove_provider(client);
/* if this is the last remote connection, stop recurring
* operations */
if (crm_hash_table_size(client_connections) == 1) {
client_disconnect_cleanup(NULL);
}
if (client->remote->tls_session) {
void *sock_ptr;
int csock;
sock_ptr = gnutls_transport_get_ptr(*client->remote->tls_session);
csock = GPOINTER_TO_INT(sock_ptr);
gnutls_bye(*client->remote->tls_session, GNUTLS_SHUT_RDWR);
gnutls_deinit(*client->remote->tls_session);
gnutls_free(client->remote->tls_session);
close(csock);
}
lrmd_client_destroy(client);
return;
}
static gboolean
lrmd_auth_timeout_cb(gpointer data)
{
crm_client_t *client = data;
client->remote->auth_timeout = 0;
if (client->remote->tls_handshake_complete == TRUE) {
return FALSE;
}
mainloop_del_fd(client->remote->source);
client->remote->source = NULL;
crm_err("Remote client authentication timed out");
return FALSE;
}
static int
lrmd_remote_listen(gpointer data)
{
int csock = 0;
gnutls_session_t *session = NULL;
crm_client_t *new_client = NULL;
static struct mainloop_fd_callbacks lrmd_remote_fd_cb = {
.dispatch = lrmd_remote_client_msg,
.destroy = lrmd_remote_client_destroy,
};
csock = crm_remote_accept(ssock);
if (csock < 0) {
return TRUE;
}
session = pcmk__new_tls_session(csock, GNUTLS_SERVER, GNUTLS_CRD_PSK,
psk_cred_s);
if (session == NULL) {
close(csock);
return TRUE;
}
new_client = crm_client_alloc(NULL);
new_client->remote = calloc(1, sizeof(crm_remote_t));
new_client->kind = CRM_CLIENT_TLS;
new_client->remote->tls_session = session;
// Require the client to authenticate within this time
new_client->remote->auth_timeout = g_timeout_add(LRMD_REMOTE_AUTH_TIMEOUT,
lrmd_auth_timeout_cb,
new_client);
crm_info("Remote client pending authentication "
CRM_XS " %p id: %s", new_client, new_client->id);
new_client->remote->source =
mainloop_add_fd("pacemaker-remote-client", G_PRIORITY_DEFAULT, csock,
new_client, &lrmd_remote_fd_cb);
return TRUE;
}
static void
lrmd_remote_connection_destroy(gpointer user_data)
{
crm_notice("TLS server session ended");
return;
}
static int
lrmd_tls_server_key_cb(gnutls_session_t session, const char *username, gnutls_datum_t * key)
{
return lrmd_tls_set_key(key);
}
static int
bind_and_listen(struct addrinfo *addr)
{
int optval;
int fd;
int rc;
char buffer[INET6_ADDRSTRLEN] = { 0, };
crm_sockaddr2str(addr->ai_addr, buffer);
crm_trace("Attempting to bind to address %s", buffer);
fd = socket(addr->ai_family, addr->ai_socktype, addr->ai_protocol);
if (fd < 0) {
crm_perror(LOG_ERR, "Listener socket creation failed");
return -1;
}
/* reuse address */
optval = 1;
rc = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(optval));
if (rc < 0) {
crm_perror(LOG_ERR, "Local address reuse not allowed on %s", buffer);
close(fd);
return -1;
}
if (addr->ai_family == AF_INET6) {
optval = 0;
rc = setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &optval, sizeof(optval));
if (rc < 0) {
crm_perror(LOG_INFO, "Couldn't disable IPV6-only on %s", buffer);
close(fd);
return -1;
}
}
if (bind(fd, addr->ai_addr, addr->ai_addrlen) != 0) {
crm_perror(LOG_ERR, "Cannot bind to %s", buffer);
close(fd);
return -1;
}
if (listen(fd, 10) == -1) {
crm_perror(LOG_ERR, "Cannot listen on %s", buffer);
close(fd);
return -1;
}
return fd;
}
int
lrmd_init_remote_tls_server()
{
int rc;
int filter;
int port = crm_default_remote_port();
struct addrinfo hints, *res = NULL, *iter;
char port_str[6]; // at most "65535"
gnutls_datum_t psk_key = { NULL, 0 };
static struct mainloop_fd_callbacks remote_listen_fd_callbacks = {
.dispatch = lrmd_remote_listen,
.destroy = lrmd_remote_connection_destroy,
};
- crm_notice("Starting TLS listener on port %d", port);
+ crm_debug("Starting TLS listener on port %d", port);
crm_gnutls_global_init();
gnutls_global_set_log_function(debug_log);
if (pcmk__init_tls_dh(&dh_params) != GNUTLS_E_SUCCESS) {
return -1;
}
gnutls_psk_allocate_server_credentials(&psk_cred_s);
gnutls_psk_set_server_credentials_function(psk_cred_s, lrmd_tls_server_key_cb);
gnutls_psk_set_server_dh_params(psk_cred_s, dh_params);
/* The key callback won't get called until the first client connection
* attempt. Do it once here, so we can warn the user at start-up if we can't
* read the key. We don't error out, though, because it's fine if the key is
* going to be added later.
*/
rc = lrmd_tls_set_key(&psk_key);
if (rc != 0) {
crm_warn("A cluster connection will not be possible until the key is available");
}
gnutls_free(psk_key.data);
memset(&hints, 0, sizeof(struct addrinfo));
/* Bind to the wildcard address (INADDR_ANY or IN6ADDR_ANY_INIT).
* @TODO allow user to specify a specific address
*/
hints.ai_flags = AI_PASSIVE;
hints.ai_family = AF_UNSPEC; /* Return IPv6 or IPv4 */
hints.ai_socktype = SOCK_STREAM;
hints.ai_protocol = IPPROTO_TCP;
snprintf(port_str, sizeof(port_str), "%d", port);
rc = getaddrinfo(NULL, port_str, &hints, &res);
if (rc) {
crm_err("Unable to get IP address info for local node: %s",
gai_strerror(rc));
return -1;
}
iter = res;
filter = AF_INET6;
/* Try IPv6 addresses first, then IPv4 */
while (iter) {
if (iter->ai_family == filter) {
ssock = bind_and_listen(iter);
}
if (ssock != -1) {
break;
}
iter = iter->ai_next;
if (iter == NULL && filter == AF_INET6) {
iter = res;
filter = AF_INET;
}
}
if (ssock < 0) {
goto init_remote_cleanup;
}
mainloop_add_fd("pacemaker-remote-server", G_PRIORITY_DEFAULT, ssock, NULL,
&remote_listen_fd_callbacks);
rc = ssock;
init_remote_cleanup:
if (rc < 0) {
close(ssock);
ssock = 0;
} else {
crm_debug("Started TLS listener on port %d", port);
}
freeaddrinfo(res);
return rc;
}
void
lrmd_tls_server_destroy(void)
{
if (psk_cred_s) {
gnutls_psk_free_server_credentials(psk_cred_s);
psk_cred_s = 0;
}
if (ssock > 0) {
close(ssock);
ssock = 0;
}
}
#endif
diff --git a/daemons/fenced/pacemaker-fenced.c b/daemons/fenced/pacemaker-fenced.c
index da0dce981e..7e9bb07666 100644
--- a/daemons/fenced/pacemaker-fenced.c
+++ b/daemons/fenced/pacemaker-fenced.c
@@ -1,1516 +1,1518 @@
/*
* Copyright 2009-2019 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include /* U32T ~ PRIu32, X32T ~ PRIx32 */
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
char *stonith_our_uname = NULL;
char *stonith_our_uuid = NULL;
long stonith_watchdog_timeout_ms = 0;
static GMainLoop *mainloop = NULL;
gboolean stand_alone = FALSE;
static gboolean no_cib_connect = FALSE;
static gboolean stonith_shutdown_flag = FALSE;
static qb_ipcs_service_t *ipcs = NULL;
static xmlNode *local_cib = NULL;
static pe_working_set_t *fenced_data_set = NULL;
static cib_t *cib_api = NULL;
static void *cib_library = NULL;
static void stonith_shutdown(int nsig);
static void stonith_cleanup(void);
static int32_t
st_ipc_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid)
{
if (stonith_shutdown_flag) {
crm_info("Ignoring new client [%d] during shutdown", crm_ipcs_client_pid(c));
return -EPERM;
}
if (crm_client_new(c, uid, gid) == NULL) {
return -EIO;
}
return 0;
}
static void
st_ipc_created(qb_ipcs_connection_t * c)
{
crm_trace("Connection created for %p", c);
}
/* Exit code means? */
static int32_t
st_ipc_dispatch(qb_ipcs_connection_t * qbc, void *data, size_t size)
{
uint32_t id = 0;
uint32_t flags = 0;
int call_options = 0;
xmlNode *request = NULL;
crm_client_t *c = crm_client_get(qbc);
const char *op = NULL;
if (c == NULL) {
crm_info("Invalid client: %p", qbc);
return 0;
}
request = crm_ipcs_recv(c, data, size, &id, &flags);
if (request == NULL) {
crm_ipcs_send_ack(c, id, flags, "nack", __FUNCTION__, __LINE__);
return 0;
}
op = crm_element_value(request, F_CRM_TASK);
if(safe_str_eq(op, CRM_OP_RM_NODE_CACHE)) {
crm_xml_add(request, F_TYPE, T_STONITH_NG);
crm_xml_add(request, F_STONITH_OPERATION, op);
crm_xml_add(request, F_STONITH_CLIENTID, c->id);
crm_xml_add(request, F_STONITH_CLIENTNAME, crm_client_name(c));
crm_xml_add(request, F_STONITH_CLIENTNODE, stonith_our_uname);
send_cluster_message(NULL, crm_msg_stonith_ng, request, FALSE);
free_xml(request);
return 0;
}
if (c->name == NULL) {
const char *value = crm_element_value(request, F_STONITH_CLIENTNAME);
if (value == NULL) {
value = "unknown";
}
c->name = crm_strdup_printf("%s.%u", value, c->pid);
}
crm_element_value_int(request, F_STONITH_CALLOPTS, &call_options);
crm_trace("Flags %" X32T "/%u for command %" U32T " from %s",
flags, call_options, id, crm_client_name(c));
if (is_set(call_options, st_opt_sync_call)) {
CRM_ASSERT(flags & crm_ipc_client_response);
CRM_LOG_ASSERT(c->request_id == 0); /* This means the client has two synchronous events in-flight */
c->request_id = id; /* Reply only to the last one */
}
crm_xml_add(request, F_STONITH_CLIENTID, c->id);
crm_xml_add(request, F_STONITH_CLIENTNAME, crm_client_name(c));
crm_xml_add(request, F_STONITH_CLIENTNODE, stonith_our_uname);
crm_log_xml_trace(request, "Client[inbound]");
stonith_command(c, id, flags, request, NULL);
free_xml(request);
return 0;
}
/* Error code means? */
static int32_t
st_ipc_closed(qb_ipcs_connection_t * c)
{
crm_client_t *client = crm_client_get(c);
if (client == NULL) {
return 0;
}
crm_trace("Connection %p closed", c);
crm_client_destroy(client);
/* 0 means: yes, go ahead and destroy the connection */
return 0;
}
static void
st_ipc_destroy(qb_ipcs_connection_t * c)
{
crm_trace("Connection %p destroyed", c);
st_ipc_closed(c);
}
static void
stonith_peer_callback(xmlNode * msg, void *private_data)
{
const char *remote_peer = crm_element_value(msg, F_ORIG);
const char *op = crm_element_value(msg, F_STONITH_OPERATION);
if (crm_str_eq(op, "poke", TRUE)) {
return;
}
crm_log_xml_trace(msg, "Peer[inbound]");
stonith_command(NULL, 0, 0, msg, remote_peer);
}
#if SUPPORT_COROSYNC
static void
stonith_peer_ais_callback(cpg_handle_t handle,
const struct cpg_name *groupName,
uint32_t nodeid, uint32_t pid, void *msg, size_t msg_len)
{
uint32_t kind = 0;
xmlNode *xml = NULL;
const char *from = NULL;
char *data = pcmk_message_common_cs(handle, nodeid, pid, msg, &kind, &from);
if(data == NULL) {
return;
}
if (kind == crm_class_cluster) {
xml = string2xml(data);
if (xml == NULL) {
crm_err("Invalid XML: '%.120s'", data);
free(data);
return;
}
crm_xml_add(xml, F_ORIG, from);
/* crm_xml_add_int(xml, F_SEQ, wrapper->id); */
stonith_peer_callback(xml, NULL);
}
free_xml(xml);
free(data);
return;
}
static void
stonith_peer_cs_destroy(gpointer user_data)
{
crm_crit("Lost connection to cluster layer, shutting down");
stonith_shutdown(0);
}
#endif
void
do_local_reply(xmlNode * notify_src, const char *client_id, gboolean sync_reply, gboolean from_peer)
{
/* send callback to originating child */
crm_client_t *client_obj = NULL;
int local_rc = pcmk_ok;
crm_trace("Sending response");
client_obj = crm_client_get_by_id(client_id);
crm_trace("Sending callback to request originator");
if (client_obj == NULL) {
local_rc = -1;
crm_trace("No client to sent the response to. F_STONITH_CLIENTID not set.");
} else {
int rid = 0;
if (sync_reply) {
CRM_LOG_ASSERT(client_obj->request_id);
rid = client_obj->request_id;
client_obj->request_id = 0;
crm_trace("Sending response %d to %s %s",
rid, client_obj->name, from_peer ? "(originator of delegated request)" : "");
} else {
crm_trace("Sending an event to %s %s",
client_obj->name, from_peer ? "(originator of delegated request)" : "");
}
local_rc = crm_ipcs_send(client_obj, rid, notify_src, sync_reply?crm_ipc_flags_none:crm_ipc_server_event);
}
if (local_rc < pcmk_ok && client_obj != NULL) {
crm_warn("%sSync reply to %s failed: %s",
sync_reply ? "" : "A-",
client_obj ? client_obj->name : "", pcmk_strerror(local_rc));
}
}
long long
get_stonith_flag(const char *name)
{
if (safe_str_eq(name, T_STONITH_NOTIFY_FENCE)) {
return st_callback_notify_fence;
} else if (safe_str_eq(name, STONITH_OP_DEVICE_ADD)) {
return st_callback_device_add;
} else if (safe_str_eq(name, STONITH_OP_DEVICE_DEL)) {
return st_callback_device_del;
} else if (safe_str_eq(name, T_STONITH_NOTIFY_HISTORY)) {
return st_callback_notify_history;
}
return st_callback_unknown;
}
static void
stonith_notify_client(gpointer key, gpointer value, gpointer user_data)
{
xmlNode *update_msg = user_data;
crm_client_t *client = value;
const char *type = NULL;
CRM_CHECK(client != NULL, return);
CRM_CHECK(update_msg != NULL, return);
type = crm_element_value(update_msg, F_SUBTYPE);
CRM_CHECK(type != NULL, crm_log_xml_err(update_msg, "notify"); return);
if (client->ipcs == NULL) {
crm_trace("Skipping client with NULL channel");
return;
}
if (client->options & get_stonith_flag(type)) {
int rc = crm_ipcs_send(client, 0, update_msg, crm_ipc_server_event | crm_ipc_server_error);
if (rc <= 0) {
crm_warn("%s notification of client %s.%.6s failed: %s (%d)",
type, crm_client_name(client), client->id, pcmk_strerror(rc), rc);
} else {
crm_trace("Sent %s notification to client %s.%.6s", type, crm_client_name(client),
client->id);
}
}
}
void
do_stonith_async_timeout_update(const char *client_id, const char *call_id, int timeout)
{
crm_client_t *client = NULL;
xmlNode *notify_data = NULL;
if (!timeout || !call_id || !client_id) {
return;
}
client = crm_client_get_by_id(client_id);
if (!client) {
return;
}
notify_data = create_xml_node(NULL, T_STONITH_TIMEOUT_VALUE);
crm_xml_add(notify_data, F_TYPE, T_STONITH_TIMEOUT_VALUE);
crm_xml_add(notify_data, F_STONITH_CALLID, call_id);
crm_xml_add_int(notify_data, F_STONITH_TIMEOUT, timeout);
crm_trace("timeout update is %d for client %s and call id %s", timeout, client_id, call_id);
if (client) {
crm_ipcs_send(client, 0, notify_data, crm_ipc_server_event);
}
free_xml(notify_data);
}
void
do_stonith_notify(int options, const char *type, int result, xmlNode * data)
{
/* TODO: Standardize the contents of data */
xmlNode *update_msg = create_xml_node(NULL, "notify");
CRM_CHECK(type != NULL,;);
crm_xml_add(update_msg, F_TYPE, T_STONITH_NOTIFY);
crm_xml_add(update_msg, F_SUBTYPE, type);
crm_xml_add(update_msg, F_STONITH_OPERATION, type);
crm_xml_add_int(update_msg, F_STONITH_RC, result);
if (data != NULL) {
add_message_xml(update_msg, F_STONITH_CALLDATA, data);
}
crm_trace("Notifying clients");
g_hash_table_foreach(client_connections, stonith_notify_client, update_msg);
free_xml(update_msg);
crm_trace("Notify complete");
}
static void
do_stonith_notify_config(int options, const char *op, int rc,
const char *desc, int active)
{
xmlNode *notify_data = create_xml_node(NULL, op);
CRM_CHECK(notify_data != NULL, return);
crm_xml_add(notify_data, F_STONITH_DEVICE, desc);
crm_xml_add_int(notify_data, F_STONITH_ACTIVE, active);
do_stonith_notify(options, op, rc, notify_data);
free_xml(notify_data);
}
void
do_stonith_notify_device(int options, const char *op, int rc, const char *desc)
{
do_stonith_notify_config(options, op, rc, desc, g_hash_table_size(device_list));
}
void
do_stonith_notify_level(int options, const char *op, int rc, const char *desc)
{
do_stonith_notify_config(options, op, rc, desc, g_hash_table_size(topology));
}
static void
topology_remove_helper(const char *node, int level)
{
int rc;
char *desc = NULL;
xmlNode *data = create_xml_node(NULL, XML_TAG_FENCING_LEVEL);
crm_xml_add(data, F_STONITH_ORIGIN, __FUNCTION__);
crm_xml_add_int(data, XML_ATTR_STONITH_INDEX, level);
crm_xml_add(data, XML_ATTR_STONITH_TARGET, node);
rc = stonith_level_remove(data, &desc);
do_stonith_notify_level(0, STONITH_OP_LEVEL_DEL, rc, desc);
free_xml(data);
free(desc);
}
static void
remove_cib_device(xmlXPathObjectPtr xpathObj)
{
int max = numXpathResults(xpathObj), lpc = 0;
for (lpc = 0; lpc < max; lpc++) {
const char *rsc_id = NULL;
const char *standard = NULL;
xmlNode *match = getXpathResult(xpathObj, lpc);
CRM_LOG_ASSERT(match != NULL);
if(match != NULL) {
standard = crm_element_value(match, XML_AGENT_ATTR_CLASS);
}
if (safe_str_neq(standard, PCMK_RESOURCE_CLASS_STONITH)) {
continue;
}
rsc_id = crm_element_value(match, XML_ATTR_ID);
stonith_device_remove(rsc_id, TRUE);
}
}
static void
handle_topology_change(xmlNode *match, bool remove)
{
int rc;
char *desc = NULL;
CRM_CHECK(match != NULL, return);
crm_trace("Updating %s", ID(match));
if(remove) {
int index = 0;
char *key = stonith_level_key(match, -1);
crm_element_value_int(match, XML_ATTR_STONITH_INDEX, &index);
topology_remove_helper(key, index);
free(key);
}
rc = stonith_level_register(match, &desc);
do_stonith_notify_level(0, STONITH_OP_LEVEL_ADD, rc, desc);
free(desc);
}
static void
remove_fencing_topology(xmlXPathObjectPtr xpathObj)
{
int max = numXpathResults(xpathObj), lpc = 0;
for (lpc = 0; lpc < max; lpc++) {
xmlNode *match = getXpathResult(xpathObj, lpc);
CRM_LOG_ASSERT(match != NULL);
if (match && crm_element_value(match, XML_DIFF_MARKER)) {
/* Deletion */
int index = 0;
char *target = stonith_level_key(match, -1);
crm_element_value_int(match, XML_ATTR_STONITH_INDEX, &index);
if (target == NULL) {
crm_err("Invalid fencing target in element %s", ID(match));
} else if (index <= 0) {
crm_err("Invalid level for %s in element %s", target, ID(match));
} else {
topology_remove_helper(target, index);
}
/* } else { Deal with modifications during the 'addition' stage */
}
}
}
static void
register_fencing_topology(xmlXPathObjectPtr xpathObj)
{
int max = numXpathResults(xpathObj), lpc = 0;
for (lpc = 0; lpc < max; lpc++) {
xmlNode *match = getXpathResult(xpathObj, lpc);
handle_topology_change(match, TRUE);
}
}
/* Fencing
*/
static void
fencing_topology_init()
{
xmlXPathObjectPtr xpathObj = NULL;
const char *xpath = "//" XML_TAG_FENCING_LEVEL;
crm_trace("Full topology refresh");
free_topology_list();
init_topology_list();
/* Grab everything */
xpathObj = xpath_search(local_cib, xpath);
register_fencing_topology(xpathObj);
freeXpathObject(xpathObj);
}
#define rsc_name(x) x->clone_name?x->clone_name:x->id
/*!
* \internal
* \brief Check whether our uname is in a resource's allowed node list
*
* \param[in] rsc Resource to check
*
* \return Pointer to node object if found, NULL otherwise
*/
static node_t *
our_node_allowed_for(resource_t *rsc)
{
GHashTableIter iter;
node_t *node = NULL;
if (rsc && stonith_our_uname) {
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
if (node && strcmp(node->details->uname, stonith_our_uname) == 0) {
break;
}
node = NULL;
}
}
return node;
}
/*!
* \internal
* \brief If a resource or any of its children are STONITH devices, update their
* definitions given a cluster working set.
*
* \param[in] rsc Resource to check
* \param[in] data_set Cluster working set with device information
*/
static void cib_device_update(resource_t *rsc, pe_working_set_t *data_set)
{
node_t *node = NULL;
const char *value = NULL;
const char *rclass = NULL;
node_t *parent = NULL;
gboolean remove = TRUE;
/* If this is a complex resource, check children rather than this resource itself.
* TODO: Mark each installed device and remove if untouched when this process finishes.
*/
if(rsc->children) {
GListPtr gIter = NULL;
for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
cib_device_update(gIter->data, data_set);
if(pe_rsc_is_clone(rsc)) {
crm_trace("Only processing one copy of the clone %s", rsc->id);
break;
}
}
return;
}
/* We only care about STONITH resources. */
rclass = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
if (safe_str_neq(rclass, PCMK_RESOURCE_CLASS_STONITH)) {
return;
}
/* If this STONITH resource is disabled, just remove it. */
value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
if (safe_str_eq(value, RSC_STOPPED)) {
crm_info("Device %s has been disabled", rsc->id);
goto update_done;
}
/* Check whether our node is allowed for this resource (and its parent if in a group) */
node = our_node_allowed_for(rsc);
if (rsc->parent && (rsc->parent->variant == pe_group)) {
parent = our_node_allowed_for(rsc->parent);
}
if(node == NULL) {
/* Our node is disallowed, so remove the device */
GHashTableIter iter;
crm_info("Device %s has been disabled on %s: unknown", rsc->id, stonith_our_uname);
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
crm_trace("Available: %s = %d", node->details->uname, node->weight);
}
goto update_done;
} else if(node->weight < 0 || (parent && parent->weight < 0)) {
/* Our node (or its group) is disallowed by score, so remove the device */
char *score = score2char((node->weight < 0) ? node->weight : parent->weight);
crm_info("Device %s has been disabled on %s: score=%s", rsc->id, stonith_our_uname, score);
free(score);
goto update_done;
} else {
/* Our node is allowed, so update the device information */
int rc;
xmlNode *data;
GHashTableIter gIter;
stonith_key_value_t *params = NULL;
const char *name = NULL;
const char *agent = crm_element_value(rsc->xml, XML_EXPR_ATTR_TYPE);
const char *rsc_provides = NULL;
crm_debug("Device %s is allowed on %s: score=%d", rsc->id, stonith_our_uname, node->weight);
get_rsc_attributes(rsc->parameters, rsc, node, data_set);
get_meta_attributes(rsc->meta, rsc, node, data_set);
rsc_provides = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_PROVIDES);
g_hash_table_iter_init(&gIter, rsc->parameters);
while (g_hash_table_iter_next(&gIter, (gpointer *) & name, (gpointer *) & value)) {
if (!name || !value) {
continue;
}
params = stonith_key_value_add(params, name, value);
crm_trace(" %s=%s", name, value);
}
remove = FALSE;
data = create_device_registration_xml(rsc_name(rsc), st_namespace_any,
agent, params, rsc_provides);
stonith_key_value_freeall(params, 1, 1);
rc = stonith_device_register(data, NULL, TRUE);
CRM_ASSERT(rc == pcmk_ok);
free_xml(data);
}
update_done:
if(remove && g_hash_table_lookup(device_list, rsc_name(rsc))) {
stonith_device_remove(rsc_name(rsc), TRUE);
}
}
/*!
* \internal
* \brief Update all STONITH device definitions based on current CIB
*/
static void
cib_devices_update(void)
{
GListPtr gIter = NULL;
crm_info("Updating devices to version %s.%s.%s",
crm_element_value(local_cib, XML_ATTR_GENERATION_ADMIN),
crm_element_value(local_cib, XML_ATTR_GENERATION),
crm_element_value(local_cib, XML_ATTR_NUMUPDATES));
CRM_ASSERT(fenced_data_set != NULL);
fenced_data_set->input = local_cib;
fenced_data_set->now = crm_time_new(NULL);
fenced_data_set->flags |= pe_flag_quick_location;
fenced_data_set->localhost = stonith_our_uname;
cluster_status(fenced_data_set);
pcmk__schedule_actions(fenced_data_set, NULL, NULL);
for (gIter = fenced_data_set->resources; gIter != NULL; gIter = gIter->next) {
cib_device_update(gIter->data, fenced_data_set);
}
fenced_data_set->input = NULL; // Wasn't a copy, so don't let API free it
pe_reset_working_set(fenced_data_set);
}
static void
update_cib_stonith_devices_v2(const char *event, xmlNode * msg)
{
xmlNode *change = NULL;
char *reason = NULL;
bool needs_update = FALSE;
xmlNode *patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT);
for (change = __xml_first_child(patchset); change != NULL; change = __xml_next(change)) {
const char *op = crm_element_value(change, XML_DIFF_OP);
const char *xpath = crm_element_value(change, XML_DIFF_PATH);
const char *shortpath = NULL;
if ((op == NULL) ||
(strcmp(op, "move") == 0) ||
strstr(xpath, "/"XML_CIB_TAG_STATUS)) {
continue;
} else if (safe_str_eq(op, "delete") && strstr(xpath, "/"XML_CIB_TAG_RESOURCE)) {
const char *rsc_id = NULL;
char *search = NULL;
char *mutable = NULL;
if (strstr(xpath, XML_TAG_ATTR_SETS) ||
strstr(xpath, XML_TAG_META_SETS)) {
needs_update = TRUE;
reason = strdup("(meta) attribute deleted from resource");
break;
}
mutable = strdup(xpath);
rsc_id = strstr(mutable, "primitive[@id=\'");
if (rsc_id != NULL) {
rsc_id += strlen("primitive[@id=\'");
search = strchr(rsc_id, '\'');
}
if (search != NULL) {
*search = 0;
stonith_device_remove(rsc_id, TRUE);
} else {
crm_warn("Ignoring malformed CIB update (resource deletion)");
}
free(mutable);
} else if (strstr(xpath, "/"XML_CIB_TAG_RESOURCES) ||
strstr(xpath, "/"XML_CIB_TAG_CONSTRAINTS) ||
strstr(xpath, "/"XML_CIB_TAG_RSCCONFIG)) {
shortpath = strrchr(xpath, '/'); CRM_ASSERT(shortpath);
reason = crm_strdup_printf("%s %s", op, shortpath+1);
needs_update = TRUE;
break;
}
}
if(needs_update) {
crm_info("Updating device list from the cib: %s", reason);
cib_devices_update();
} else {
crm_trace("No updates for device list found in cib");
}
free(reason);
}
static void
update_cib_stonith_devices_v1(const char *event, xmlNode * msg)
{
const char *reason = "none";
gboolean needs_update = FALSE;
xmlXPathObjectPtr xpath_obj = NULL;
/* process new constraints */
xpath_obj = xpath_search(msg, "//" F_CIB_UPDATE_RESULT "//" XML_CONS_TAG_RSC_LOCATION);
if (numXpathResults(xpath_obj) > 0) {
int max = numXpathResults(xpath_obj), lpc = 0;
/* Safest and simplest to always recompute */
needs_update = TRUE;
reason = "new location constraint";
for (lpc = 0; lpc < max; lpc++) {
xmlNode *match = getXpathResult(xpath_obj, lpc);
crm_log_xml_trace(match, "new constraint");
}
}
freeXpathObject(xpath_obj);
/* process deletions */
xpath_obj = xpath_search(msg, "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_REMOVED "//" XML_CIB_TAG_RESOURCE);
if (numXpathResults(xpath_obj) > 0) {
remove_cib_device(xpath_obj);
}
freeXpathObject(xpath_obj);
/* process additions */
xpath_obj = xpath_search(msg, "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED "//" XML_CIB_TAG_RESOURCE);
if (numXpathResults(xpath_obj) > 0) {
int max = numXpathResults(xpath_obj), lpc = 0;
for (lpc = 0; lpc < max; lpc++) {
const char *rsc_id = NULL;
const char *standard = NULL;
xmlNode *match = getXpathResult(xpath_obj, lpc);
rsc_id = crm_element_value(match, XML_ATTR_ID);
standard = crm_element_value(match, XML_AGENT_ATTR_CLASS);
if (safe_str_neq(standard, PCMK_RESOURCE_CLASS_STONITH)) {
continue;
}
crm_trace("Fencing resource %s was added or modified", rsc_id);
reason = "new resource";
needs_update = TRUE;
}
}
freeXpathObject(xpath_obj);
if(needs_update) {
crm_info("Updating device list from the cib: %s", reason);
cib_devices_update();
}
}
static void
update_cib_stonith_devices(const char *event, xmlNode * msg)
{
int format = 1;
xmlNode *patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT);
CRM_ASSERT(patchset);
crm_element_value_int(patchset, "format", &format);
switch(format) {
case 1:
update_cib_stonith_devices_v1(event, msg);
break;
case 2:
update_cib_stonith_devices_v2(event, msg);
break;
default:
crm_warn("Unknown patch format: %d", format);
}
}
/* Needs to hold node name + attribute name + attribute value + 75 */
#define XPATH_MAX 512
/*!
* \internal
* \brief Check whether a node has a specific attribute name/value
*
* \param[in] node Name of node to check
* \param[in] name Name of an attribute to look for
* \param[in] value The value the named attribute needs to be set to in order to be considered a match
*
* \return TRUE if the locally cached CIB has the specified node attribute
*/
gboolean
node_has_attr(const char *node, const char *name, const char *value)
{
char xpath[XPATH_MAX];
xmlNode *match;
int n;
CRM_CHECK(local_cib != NULL, return FALSE);
/* Search for the node's attributes in the CIB. While the schema allows
* multiple sets of instance attributes, and allows instance attributes to
* use id-ref to reference values elsewhere, that is intended for resources,
* so we ignore that here.
*/
n = snprintf(xpath, XPATH_MAX, "//" XML_CIB_TAG_NODES
"/" XML_CIB_TAG_NODE "[@uname='%s']/" XML_TAG_ATTR_SETS
"/" XML_CIB_TAG_NVPAIR "[@name='%s' and @value='%s']",
node, name, value);
match = get_xpath_object(xpath, local_cib, LOG_TRACE);
CRM_CHECK(n < XPATH_MAX, return FALSE);
return (match != NULL);
}
static void
update_fencing_topology(const char *event, xmlNode * msg)
{
int format = 1;
const char *xpath;
xmlXPathObjectPtr xpathObj = NULL;
xmlNode *patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT);
CRM_ASSERT(patchset);
crm_element_value_int(patchset, "format", &format);
if(format == 1) {
/* Process deletions (only) */
xpath = "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_REMOVED "//" XML_TAG_FENCING_LEVEL;
xpathObj = xpath_search(msg, xpath);
remove_fencing_topology(xpathObj);
freeXpathObject(xpathObj);
/* Process additions and changes */
xpath = "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED "//" XML_TAG_FENCING_LEVEL;
xpathObj = xpath_search(msg, xpath);
register_fencing_topology(xpathObj);
freeXpathObject(xpathObj);
} else if(format == 2) {
xmlNode *change = NULL;
int add[] = { 0, 0, 0 };
int del[] = { 0, 0, 0 };
xml_patch_versions(patchset, add, del);
for (change = __xml_first_child(patchset); change != NULL; change = __xml_next(change)) {
const char *op = crm_element_value(change, XML_DIFF_OP);
const char *xpath = crm_element_value(change, XML_DIFF_PATH);
if(op == NULL) {
continue;
} else if(strstr(xpath, "/" XML_TAG_FENCING_LEVEL) != NULL) {
/* Change to a specific entry */
crm_trace("Handling %s operation %d.%d.%d for %s", op, add[0], add[1], add[2], xpath);
if(strcmp(op, "move") == 0) {
continue;
} else if(strcmp(op, "create") == 0) {
handle_topology_change(change->children, FALSE);
} else if(strcmp(op, "modify") == 0) {
xmlNode *match = first_named_child(change, XML_DIFF_RESULT);
if(match) {
handle_topology_change(match->children, TRUE);
}
} else if(strcmp(op, "delete") == 0) {
/* Nuclear option, all we have is the path and an id... not enough to remove a specific entry */
crm_info("Re-initializing fencing topology after %s operation %d.%d.%d for %s",
op, add[0], add[1], add[2], xpath);
fencing_topology_init();
return;
}
} else if (strstr(xpath, "/" XML_TAG_FENCING_TOPOLOGY) != NULL) {
/* Change to the topology in general */
crm_info("Re-initializing fencing topology after top-level %s operation %d.%d.%d for %s",
op, add[0], add[1], add[2], xpath);
fencing_topology_init();
return;
} else if (strstr(xpath, "/" XML_CIB_TAG_CONFIGURATION)) {
/* Changes to the whole config section, possibly including the topology as a whild */
if(first_named_child(change, XML_TAG_FENCING_TOPOLOGY) == NULL) {
crm_trace("Nothing for us in %s operation %d.%d.%d for %s.",
op, add[0], add[1], add[2], xpath);
} else if(strcmp(op, "delete") == 0 || strcmp(op, "create") == 0) {
crm_info("Re-initializing fencing topology after top-level %s operation %d.%d.%d for %s.",
op, add[0], add[1], add[2], xpath);
fencing_topology_init();
return;
}
} else {
crm_trace("Nothing for us in %s operation %d.%d.%d for %s",
op, add[0], add[1], add[2], xpath);
}
}
} else {
crm_warn("Unknown patch format: %d", format);
}
}
static bool have_cib_devices = FALSE;
static void
update_cib_cache_cb(const char *event, xmlNode * msg)
{
int rc = pcmk_ok;
xmlNode *stonith_enabled_xml = NULL;
xmlNode *stonith_watchdog_xml = NULL;
const char *stonith_enabled_s = NULL;
static gboolean stonith_enabled_saved = TRUE;
if(!have_cib_devices) {
crm_trace("Skipping updates until we get a full dump");
return;
} else if(msg == NULL) {
crm_trace("Missing %s update", event);
return;
}
/* Maintain a local copy of the CIB so that we have full access
* to device definitions, location constraints, and node attributes
*/
if (local_cib != NULL) {
int rc = pcmk_ok;
xmlNode *patchset = NULL;
crm_element_value_int(msg, F_CIB_RC, &rc);
if (rc != pcmk_ok) {
return;
}
patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT);
xml_log_patchset(LOG_TRACE, "Config update", patchset);
rc = xml_apply_patchset(local_cib, patchset, TRUE);
switch (rc) {
case pcmk_ok:
case -pcmk_err_old_data:
break;
case -pcmk_err_diff_resync:
case -pcmk_err_diff_failed:
crm_notice("[%s] Patch aborted: %s (%d)", event, pcmk_strerror(rc), rc);
free_xml(local_cib);
local_cib = NULL;
break;
default:
crm_warn("[%s] ABORTED: %s (%d)", event, pcmk_strerror(rc), rc);
free_xml(local_cib);
local_cib = NULL;
}
}
if (local_cib == NULL) {
crm_trace("Re-requesting the full cib");
rc = cib_api->cmds->query(cib_api, NULL, &local_cib, cib_scope_local | cib_sync_call);
if(rc != pcmk_ok) {
crm_err("Couldn't retrieve the CIB: %s (%d)", pcmk_strerror(rc), rc);
return;
}
CRM_ASSERT(local_cib != NULL);
stonith_enabled_saved = FALSE; /* Trigger a full refresh below */
}
crm_peer_caches_refresh(local_cib);
stonith_enabled_xml = get_xpath_object("//nvpair[@name='stonith-enabled']", local_cib, LOG_TRACE);
if (stonith_enabled_xml) {
stonith_enabled_s = crm_element_value(stonith_enabled_xml, XML_NVPAIR_ATTR_VALUE);
}
if (stonith_enabled_s == NULL || crm_is_true(stonith_enabled_s)) {
long timeout_ms = 0;
const char *value = NULL;
stonith_watchdog_xml = get_xpath_object("//nvpair[@name='stonith-watchdog-timeout']", local_cib, LOG_TRACE);
if (stonith_watchdog_xml) {
value = crm_element_value(stonith_watchdog_xml, XML_NVPAIR_ATTR_VALUE);
}
if(value) {
timeout_ms = crm_get_msec(value);
}
if (timeout_ms < 0) {
timeout_ms = crm_auto_watchdog_timeout();
}
if(timeout_ms != stonith_watchdog_timeout_ms) {
crm_notice("New watchdog timeout %lds (was %lds)", timeout_ms/1000, stonith_watchdog_timeout_ms/1000);
stonith_watchdog_timeout_ms = timeout_ms;
}
} else {
stonith_watchdog_timeout_ms = 0;
}
if (stonith_enabled_s && crm_is_true(stonith_enabled_s) == FALSE) {
crm_trace("Ignoring cib updates while stonith is disabled");
stonith_enabled_saved = FALSE;
return;
} else if (stonith_enabled_saved == FALSE) {
crm_info("Updating stonith device and topology lists now that stonith is enabled");
stonith_enabled_saved = TRUE;
fencing_topology_init();
cib_devices_update();
} else {
update_fencing_topology(event, msg);
update_cib_stonith_devices(event, msg);
}
}
static void
init_cib_cache_cb(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data)
{
crm_info("Updating device list from the cib: init");
have_cib_devices = TRUE;
local_cib = copy_xml(output);
crm_peer_caches_refresh(local_cib);
fencing_topology_init();
cib_devices_update();
}
static void
stonith_shutdown(int nsig)
{
stonith_shutdown_flag = TRUE;
crm_info("Terminating with %d clients",
crm_hash_table_size(client_connections));
if (mainloop != NULL && g_main_loop_is_running(mainloop)) {
g_main_loop_quit(mainloop);
} else {
stonith_cleanup();
crm_exit(CRM_EX_OK);
}
}
static void
cib_connection_destroy(gpointer user_data)
{
if (stonith_shutdown_flag) {
crm_info("Connection to the CIB manager closed");
return;
} else {
crm_crit("Lost connection to the CIB manager, shutting down");
}
if (cib_api) {
cib_api->cmds->signoff(cib_api);
}
stonith_shutdown(0);
}
static void
stonith_cleanup(void)
{
if (cib_api) {
cib_api->cmds->signoff(cib_api);
}
if (ipcs) {
qb_ipcs_destroy(ipcs);
}
crm_peer_destroy();
crm_client_cleanup();
free_stonith_remote_op_list();
free_topology_list();
free_device_list();
free_metadata_cache();
free(stonith_our_uname);
stonith_our_uname = NULL;
free_xml(local_cib);
local_cib = NULL;
}
/* *INDENT-OFF* */
static struct crm_option long_options[] = {
{"stand-alone", 0, 0, 's'},
{"stand-alone-w-cpg", 0, 0, 'c'},
{"logfile", 1, 0, 'l'},
{"verbose", 0, 0, 'V'},
{"version", 0, 0, '$'},
{"help", 0, 0, '?'},
{0, 0, 0, 0}
};
/* *INDENT-ON* */
static void
setup_cib(void)
{
int rc, retries = 0;
static cib_t *(*cib_new_fn) (void) = NULL;
if (cib_new_fn == NULL) {
cib_new_fn = find_library_function(&cib_library, CIB_LIBRARY, "cib_new", TRUE);
}
if (cib_new_fn != NULL) {
cib_api = (*cib_new_fn) ();
}
if (cib_api == NULL) {
crm_err("No connection to the CIB manager");
return;
}
do {
sleep(retries);
rc = cib_api->cmds->signon(cib_api, CRM_SYSTEM_STONITHD, cib_command);
} while (rc == -ENOTCONN && ++retries < 5);
if (rc != pcmk_ok) {
crm_err("Could not connect to the CIB manager: %s (%d)", pcmk_strerror(rc), rc);
} else if (pcmk_ok !=
cib_api->cmds->add_notify_callback(cib_api, T_CIB_DIFF_NOTIFY, update_cib_cache_cb)) {
crm_err("Could not set CIB notification callback");
} else {
rc = cib_api->cmds->query(cib_api, NULL, NULL, cib_scope_local);
cib_api->cmds->register_callback(cib_api, rc, 120, FALSE, NULL, "init_cib_cache_cb",
init_cib_cache_cb);
cib_api->cmds->set_connection_dnotify(cib_api, cib_connection_destroy);
crm_info("Watching for stonith topology changes");
}
}
struct qb_ipcs_service_handlers ipc_callbacks = {
.connection_accept = st_ipc_accept,
.connection_created = st_ipc_created,
.msg_process = st_ipc_dispatch,
.connection_closed = st_ipc_closed,
.connection_destroyed = st_ipc_destroy
};
/*!
* \internal
* \brief Callback for peer status changes
*
* \param[in] type What changed
* \param[in] node What peer had the change
* \param[in] data Previous value of what changed
*/
static void
st_peer_update_callback(enum crm_status_type type, crm_node_t * node, const void *data)
{
if ((type != crm_status_processes) && !is_set(node->flags, crm_remote_node)) {
/*
* This is a hack until we can send to a nodeid and/or we fix node name lookups
* These messages are ignored in stonith_peer_callback()
*/
xmlNode *query = create_xml_node(NULL, "stonith_command");
crm_xml_add(query, F_XML_TAGNAME, "stonith_command");
crm_xml_add(query, F_TYPE, T_STONITH_NG);
crm_xml_add(query, F_STONITH_OPERATION, "poke");
crm_debug("Broadcasting our uname because of node %u", node->id);
send_cluster_message(NULL, crm_msg_stonith_ng, query, FALSE);
free_xml(query);
}
}
int
main(int argc, char **argv)
{
int flag;
int lpc = 0;
int argerr = 0;
int option_index = 0;
crm_cluster_t cluster;
const char *actions[] = { "reboot", "off", "on", "list", "monitor", "status" };
crm_ipc_t *old_instance = NULL;
crm_log_preinit(NULL, argc, argv);
crm_set_options(NULL, "mode [options]", long_options,
"Provides a summary of cluster's current state."
"\n\nOutputs varying levels of detail in a number of different formats.\n");
while (1) {
flag = crm_get_option(argc, argv, &option_index);
if (flag == -1) {
break;
}
switch (flag) {
case 'V':
crm_bump_log_level(argc, argv);
break;
case 'l':
crm_add_logfile(optarg);
break;
case 's':
stand_alone = TRUE;
break;
case 'c':
stand_alone = FALSE;
no_cib_connect = TRUE;
break;
case '$':
case '?':
crm_help(flag, CRM_EX_OK);
break;
default:
++argerr;
break;
}
}
if (argc - optind == 1 && safe_str_eq("metadata", argv[optind])) {
printf("\n");
printf("\n");
printf(" 1.0\n");
printf(" Instance attributes available for all \"stonith\"-class resources"
" and used by Pacemaker's fence daemon, formerly known as stonithd\n");
printf(" Instance attributes available for all \"stonith\"-class resources\n");
printf(" \n");
#if 0
// priority is not implemented yet
printf(" \n");
printf(" Devices that are not in a topology "
"are tried in order of highest to lowest integer priority\n");
printf(" \n");
printf(" \n");
#endif
printf(" \n", STONITH_ATTR_HOSTARG);
printf
(" Advanced use only: An alternate parameter to supply instead of 'port'\n");
printf
(" Some devices do not support the standard 'port' parameter or may provide additional ones.\n"
"Use this to specify an alternate, device-specific, parameter that should indicate the machine to be fenced.\n"
"A value of 'none' can be used to tell the cluster not to supply any additional parameters.\n"
" \n");
printf(" \n");
printf(" \n");
printf(" \n", STONITH_ATTR_HOSTMAP);
printf
(" A mapping of host names to ports numbers for devices that do not support host names.\n");
printf
(" Eg. node1:1;node2:2,3 would tell the cluster to use port 1 for node1 and ports 2 and 3 for node2\n");
printf(" \n");
printf(" \n");
printf(" \n", STONITH_ATTR_HOSTLIST);
printf
(" A list of machines controlled by this device (Optional unless %s=static-list).\n",
STONITH_ATTR_HOSTCHECK);
printf(" \n");
printf(" \n");
printf(" \n", STONITH_ATTR_HOSTCHECK);
printf
(" How to determine which machines are controlled by the device.\n");
printf(" Allowed values: dynamic-list "
"(query the device via the 'list' command), static-list "
"(check the " STONITH_ATTR_HOSTLIST " attribute), status "
"(query the device via the 'status' command), none (assume "
"every device can fence every machine)\n");
printf(" \n");
printf(" \n");
printf(" \n", STONITH_ATTR_DELAY_MAX);
printf
(" Enable a random delay for stonith actions and specify the maximum of random delay.\n");
printf
(" This prevents double fencing when using slow devices such as sbd.\n"
"Use this to enable a random delay for stonith actions.\n"
"The overall delay is derived from this random delay value adding a static delay so that the sum is kept below the maximum delay.\n");
printf(" \n");
printf(" \n");
printf(" \n", STONITH_ATTR_DELAY_BASE);
printf
(" Enable a base delay for stonith actions and specify base delay value.\n");
printf
(" This prevents double fencing when different delays are configured on the nodes.\n"
"Use this to enable a static delay for stonith actions.\n"
"The overall delay is derived from a random delay value adding this static delay so that the sum is kept below the maximum delay.\n");
printf(" \n");
printf(" \n");
printf(" \n", STONITH_ATTR_ACTION_LIMIT);
printf
(" The maximum number of actions can be performed in parallel on this device\n");
printf
(" Cluster property concurrent-fencing=true needs to be configured first.\n"
"Then use this to specify the maximum number of actions can be performed in parallel on this device. -1 is unlimited.\n");
printf(" \n");
printf(" \n");
for (lpc = 0; lpc < DIMOF(actions); lpc++) {
printf(" \n", actions[lpc]);
printf
(" Advanced use only: An alternate command to run instead of '%s'\n",
actions[lpc]);
printf
(" Some devices do not support the standard commands or may provide additional ones.\n"
"Use this to specify an alternate, device-specific, command that implements the '%s' action.\n",
actions[lpc]);
printf(" \n", actions[lpc]);
printf(" \n");
printf(" \n", actions[lpc]);
printf
(" Advanced use only: Specify an alternate timeout to use for %s actions instead of stonith-timeout\n",
actions[lpc]);
printf
(" Some devices need much more/less time to complete than normal.\n"
"Use this to specify an alternate, device-specific, timeout for '%s' actions.\n",
actions[lpc]);
printf(" \n");
printf(" \n");
printf(" \n", actions[lpc]);
printf
(" Advanced use only: The maximum number of times to retry the '%s' command within the timeout period\n",
actions[lpc]);
printf(" Some devices do not support multiple connections."
" Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining."
" Use this option to alter the number of times Pacemaker retries '%s' actions before giving up."
"\n", actions[lpc]);
printf(" \n");
printf(" \n");
}
printf(" \n");
printf("\n");
return CRM_EX_OK;
}
if (optind != argc) {
++argerr;
}
if (argerr) {
crm_help('?', CRM_EX_USAGE);
}
crm_log_init(NULL, LOG_INFO, TRUE, FALSE, argc, argv, FALSE);
+ crm_notice("Starting Pacemaker fencer");
+
old_instance = crm_ipc_new("stonith-ng", 0);
if (crm_ipc_connect(old_instance)) {
/* IPC end-point already up */
crm_ipc_close(old_instance);
crm_ipc_destroy(old_instance);
crm_err("pacemaker-fenced is already active, aborting startup");
crm_exit(CRM_EX_OK);
} else {
/* not up or not authentic, we'll proceed either way */
crm_ipc_destroy(old_instance);
old_instance = NULL;
}
mainloop_add_signal(SIGTERM, stonith_shutdown);
crm_peer_init();
fenced_data_set = pe_new_working_set();
CRM_ASSERT(fenced_data_set != NULL);
if (stand_alone == FALSE) {
if (is_corosync_cluster()) {
#if SUPPORT_COROSYNC
cluster.destroy = stonith_peer_cs_destroy;
cluster.cpg.cpg_deliver_fn = stonith_peer_ais_callback;
cluster.cpg.cpg_confchg_fn = pcmk_cpg_membership;
#endif
}
crm_set_status_callback(&st_peer_update_callback);
if (crm_cluster_connect(&cluster) == FALSE) {
crm_crit("Cannot sign in to the cluster... terminating");
crm_exit(CRM_EX_FATAL);
}
stonith_our_uname = cluster.uname;
stonith_our_uuid = cluster.uuid;
if (no_cib_connect == FALSE) {
setup_cib();
}
} else {
stonith_our_uname = strdup("localhost");
}
init_device_list();
init_topology_list();
if(stonith_watchdog_timeout_ms > 0) {
int rc;
xmlNode *xml;
stonith_key_value_t *params = NULL;
params = stonith_key_value_add(params, STONITH_ATTR_HOSTLIST, stonith_our_uname);
xml = create_device_registration_xml("watchdog", st_namespace_internal,
STONITH_WATCHDOG_AGENT, params,
NULL);
stonith_key_value_freeall(params, 1, 1);
rc = stonith_device_register(xml, NULL, FALSE);
free_xml(xml);
if (rc != pcmk_ok) {
crm_crit("Cannot register watchdog pseudo fence agent");
crm_exit(CRM_EX_FATAL);
}
}
stonith_ipc_server_init(&ipcs, &ipc_callbacks);
/* Create the mainloop and run it... */
mainloop = g_main_loop_new(NULL, FALSE);
- crm_info("Starting %s mainloop", crm_system_name);
+ crm_notice("Pacemaker fencer successfully started and accepting connections");
g_main_loop_run(mainloop);
stonith_cleanup();
pe_free_working_set(fenced_data_set);
crm_exit(CRM_EX_OK);
}
diff --git a/daemons/pacemakerd/pacemakerd.c b/daemons/pacemakerd/pacemakerd.c
index bebf938804..18830d3464 100644
--- a/daemons/pacemakerd/pacemakerd.c
+++ b/daemons/pacemakerd/pacemakerd.c
@@ -1,1422 +1,1421 @@
/*
* Copyright 2010-2019 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include "pacemakerd.h"
#include
#include
#include
#include
#include
#include
#include
#include
#include /* indirectly: CRM_EX_* */
#include /* cib_channel_ro */
#include
#include
#include
#include
#include
#include /* PCMK__SPECIAL_PID*, ... */
#ifdef SUPPORT_COROSYNC
#include
#endif
#include
#include
static gboolean pcmk_quorate = FALSE;
static gboolean fatal_error = FALSE;
static GMainLoop *mainloop = NULL;
static bool global_keep_tracking = false;
#define PCMK_PROCESS_CHECK_INTERVAL 5
static const char *local_name = NULL;
static uint32_t local_nodeid = 0;
static crm_trigger_t *shutdown_trigger = NULL;
static const char *pid_file = "/var/run/pacemaker.pid";
typedef struct pcmk_child_s {
int pid;
long flag;
int start_seq;
int respawn_count;
gboolean respawn;
const char *name;
const char *uid;
const char *command;
const char *endpoint; /* IPC server name */
gboolean active_before_startup;
} pcmk_child_t;
/* Index into the array below */
#define PCMK_CHILD_CONTROLD 3
static pcmk_child_t pcmk_children[] = {
{
0, crm_proc_none, 0, 0, FALSE, "none",
NULL, NULL
},
{
0, crm_proc_execd, 3, 0, TRUE, "pacemaker-execd",
NULL, CRM_DAEMON_DIR "/pacemaker-execd",
CRM_SYSTEM_LRMD
},
{
0, crm_proc_based, 1, 0, TRUE, "pacemaker-based",
CRM_DAEMON_USER, CRM_DAEMON_DIR "/pacemaker-based",
CIB_CHANNEL_RO
},
{
0, crm_proc_controld, 6, 0, TRUE, "pacemaker-controld",
CRM_DAEMON_USER, CRM_DAEMON_DIR "/pacemaker-controld",
CRM_SYSTEM_CRMD
},
{
0, crm_proc_attrd, 4, 0, TRUE, "pacemaker-attrd",
CRM_DAEMON_USER, CRM_DAEMON_DIR "/pacemaker-attrd",
T_ATTRD
},
{
0, crm_proc_schedulerd, 5, 0, TRUE, "pacemaker-schedulerd",
CRM_DAEMON_USER, CRM_DAEMON_DIR "/pacemaker-schedulerd",
CRM_SYSTEM_PENGINE
},
{
0, crm_proc_fenced, 2, 0, TRUE, "pacemaker-fenced",
NULL, CRM_DAEMON_DIR "/pacemaker-fenced",
"stonith-ng"
},
};
static gboolean check_active_before_startup_processes(gpointer user_data);
static int pcmk_child_active(pcmk_child_t *child);
static gboolean start_child(pcmk_child_t * child);
static gboolean update_node_processes(uint32_t id, const char *uname,
uint32_t procs);
void update_process_clients(crm_client_t *client);
static uint32_t
get_process_list(void)
{
int lpc = 0;
uint32_t procs = crm_get_cluster_proc();
for (lpc = 0; lpc < SIZEOF(pcmk_children); lpc++) {
if (pcmk_children[lpc].pid != 0) {
procs |= pcmk_children[lpc].flag;
}
}
return procs;
}
static void
pcmk_process_exit(pcmk_child_t * child)
{
child->pid = 0;
child->active_before_startup = FALSE;
/* Broadcast the fact that one of our processes died ASAP
*
* Try to get some logging of the cause out first though
* because we're probably about to get fenced
*
* Potentially do this only if respawn_count > N
* to allow for local recovery
*/
update_node_processes(local_nodeid, NULL, get_process_list());
child->respawn_count += 1;
if (child->respawn_count > MAX_RESPAWN) {
crm_err("Child respawn count exceeded by %s", child->name);
child->respawn = FALSE;
}
if (shutdown_trigger) {
/* resume step-wise shutdown (returned TRUE yields no parallelizing) */
mainloop_set_trigger(shutdown_trigger);
/* intended to speed up propagating expected lay-off of the daemons? */
update_node_processes(local_nodeid, NULL, get_process_list());
} else if (!child->respawn) {
/* nothing to do */
} else if (crm_is_true(getenv("PCMK_fail_fast"))) {
crm_err("Rebooting system because of %s", child->name);
pcmk_panic(__FUNCTION__);
} else if (pcmk_child_active(child) == 1) {
crm_warn("One-off suppressing strict respawning of a child process %s,"
" appears alright per %s IPC end-point",
child->name, child->endpoint);
/* need to monitor how it evolves, and start new process if badly */
child->active_before_startup = TRUE;
if (!global_keep_tracking) {
global_keep_tracking = true;
g_timeout_add_seconds(PCMK_PROCESS_CHECK_INTERVAL,
check_active_before_startup_processes, NULL);
}
} else {
crm_notice("Respawning failed child process: %s", child->name);
start_child(child);
}
}
static void pcmk_exit_with_cluster(int exitcode)
{
#ifdef SUPPORT_COROSYNC
corosync_cfg_handle_t cfg_handle;
cs_error_t err;
if (exitcode == CRM_EX_FATAL) {
crm_info("Asking Corosync to shut down");
err = corosync_cfg_initialize(&cfg_handle, NULL);
if (err != CS_OK) {
crm_warn("Unable to open handle to corosync to close it down. err=%d", err);
}
err = corosync_cfg_try_shutdown(cfg_handle, COROSYNC_CFG_SHUTDOWN_FLAG_IMMEDIATE);
if (err != CS_OK) {
crm_warn("Corosync shutdown failed. err=%d", err);
}
corosync_cfg_finalize(cfg_handle);
}
#endif
crm_exit(exitcode);
}
static void
pcmk_child_exit(mainloop_child_t * p, pid_t pid, int core, int signo, int exitcode)
{
pcmk_child_t *child = mainloop_child_userdata(p);
const char *name = mainloop_child_name(p);
if (signo) {
do_crm_log(((signo == SIGKILL)? LOG_WARNING : LOG_ERR),
"%s[%d] terminated with signal %d (core=%d)",
name, pid, signo, core);
} else {
switch(exitcode) {
case CRM_EX_OK:
crm_info("%s[%d] exited with status %d (%s)",
name, pid, exitcode, crm_exit_str(exitcode));
break;
case CRM_EX_FATAL:
crm_warn("Shutting cluster down because %s[%d] had fatal failure",
name, pid);
child->respawn = FALSE;
fatal_error = TRUE;
pcmk_shutdown(SIGTERM);
break;
case CRM_EX_PANIC:
do_crm_log_always(LOG_EMERG,
"%s[%d] instructed the machine to reset",
name, pid);
child->respawn = FALSE;
fatal_error = TRUE;
pcmk_panic(__FUNCTION__);
pcmk_shutdown(SIGTERM);
break;
default:
crm_err("%s[%d] exited with status %d (%s)",
name, pid, exitcode, crm_exit_str(exitcode));
break;
}
}
pcmk_process_exit(child);
}
static gboolean
stop_child(pcmk_child_t * child, int signal)
{
if (signal == 0) {
signal = SIGTERM;
}
/* why to skip PID of 1?
- FreeBSD ~ how untrackable process behind IPC is masqueraded as
- elsewhere: how "init" task is designated; in particular, in systemd
arrangement of socket-based activation, this is pretty real */
if (child->command == NULL || child->pid == PCMK__SPECIAL_PID) {
crm_debug("Nothing to do for child \"%s\" (process %lld)",
child->name, (long long) PCMK__SPECIAL_PID_AS_0(child->pid));
return TRUE;
}
if (child->pid <= 0) {
crm_trace("Client %s not running", child->name);
return TRUE;
}
errno = 0;
if (kill(child->pid, signal) == 0) {
crm_notice("Stopping %s "CRM_XS" sent signal %d to process %d",
child->name, signal, child->pid);
} else {
crm_perror(LOG_ERR, "Could not stop %s (process %d) with signal %d",
child->name, child->pid, signal);
}
return TRUE;
}
static char *opts_default[] = { NULL, NULL };
static char *opts_vgrind[] = { NULL, NULL, NULL, NULL, NULL };
/* TODO once libqb is taught to juggle with IPC end-points carried over as
bare file descriptor (https://github.com/ClusterLabs/libqb/issues/325)
it shall hand over these descriptors here if/once they are successfully
pre-opened in (presumably) pcmk_child_active, to avoid any remaining
room for races */
static gboolean
start_child(pcmk_child_t * child)
{
int lpc = 0;
uid_t uid = 0;
gid_t gid = 0;
struct rlimit oflimits;
gboolean use_valgrind = FALSE;
gboolean use_callgrind = FALSE;
const char *devnull = "/dev/null";
const char *env_valgrind = getenv("PCMK_valgrind_enabled");
const char *env_callgrind = getenv("PCMK_callgrind_enabled");
child->active_before_startup = FALSE;
if (child->command == NULL) {
crm_info("Nothing to do for child \"%s\"", child->name);
return TRUE;
}
if (env_callgrind != NULL && crm_is_true(env_callgrind)) {
use_callgrind = TRUE;
use_valgrind = TRUE;
} else if (env_callgrind != NULL && strstr(env_callgrind, child->name)) {
use_callgrind = TRUE;
use_valgrind = TRUE;
} else if (env_valgrind != NULL && crm_is_true(env_valgrind)) {
use_valgrind = TRUE;
} else if (env_valgrind != NULL && strstr(env_valgrind, child->name)) {
use_valgrind = TRUE;
}
if (use_valgrind && strlen(VALGRIND_BIN) == 0) {
crm_warn("Cannot enable valgrind for %s:"
" The location of the valgrind binary is unknown", child->name);
use_valgrind = FALSE;
}
if (child->uid) {
if (crm_user_lookup(child->uid, &uid, &gid) < 0) {
crm_err("Invalid user (%s) for %s: not found", child->uid, child->name);
return FALSE;
}
crm_info("Using uid=%u and group=%u for process %s", uid, gid, child->name);
}
child->pid = fork();
CRM_ASSERT(child->pid != -1);
if (child->pid > 0) {
/* parent */
mainloop_child_add(child->pid, 0, child->name, child, pcmk_child_exit);
crm_info("Forked child %d for process %s%s", child->pid, child->name,
use_valgrind ? " (valgrind enabled: " VALGRIND_BIN ")" : "");
update_node_processes(local_nodeid, NULL, get_process_list());
return TRUE;
} else {
/* Start a new session */
(void)setsid();
/* Setup the two alternate arg arrays */
opts_vgrind[0] = strdup(VALGRIND_BIN);
if (use_callgrind) {
opts_vgrind[1] = strdup("--tool=callgrind");
opts_vgrind[2] = strdup("--callgrind-out-file=" CRM_STATE_DIR "/callgrind.out.%p");
opts_vgrind[3] = strdup(child->command);
opts_vgrind[4] = NULL;
} else {
opts_vgrind[1] = strdup(child->command);
opts_vgrind[2] = NULL;
opts_vgrind[3] = NULL;
opts_vgrind[4] = NULL;
}
opts_default[0] = strdup(child->command);
if(gid) {
// Whether we need root group access to talk to cluster layer
bool need_root_group = TRUE;
if (is_corosync_cluster()) {
/* Corosync clusters can drop root group access, because we set
* uidgid.gid.${gid}=1 via CMAP, which allows these processes to
* connect to corosync.
*/
need_root_group = FALSE;
}
// Drop root group access if not needed
if (!need_root_group && (setgid(gid) < 0)) {
crm_perror(LOG_ERR, "Could not set group to %d", gid);
}
/* Initialize supplementary groups to only those always granted to
* the user, plus haclient (so we can access IPC).
*/
if (initgroups(child->uid, gid) < 0) {
crm_err("Cannot initialize groups for %s: %s (%d)", child->uid, pcmk_strerror(errno), errno);
}
}
if (uid && setuid(uid) < 0) {
crm_perror(LOG_ERR, "Could not set user to %d (%s)", uid, child->uid);
}
/* Close all open file descriptors */
getrlimit(RLIMIT_NOFILE, &oflimits);
for (lpc = 0; lpc < oflimits.rlim_cur; lpc++) {
close(lpc);
}
(void)open(devnull, O_RDONLY); /* Stdin: fd 0 */
(void)open(devnull, O_WRONLY); /* Stdout: fd 1 */
(void)open(devnull, O_WRONLY); /* Stderr: fd 2 */
if (use_valgrind) {
(void)execvp(VALGRIND_BIN, opts_vgrind);
} else {
(void)execvp(child->command, opts_default);
}
crm_perror(LOG_ERR, "FATAL: Cannot exec %s", child->command);
crm_exit(CRM_EX_FATAL);
}
return TRUE; /* never reached */
}
static gboolean
escalate_shutdown(gpointer data)
{
pcmk_child_t *child = data;
if (child->pid == PCMK__SPECIAL_PID) {
pcmk_process_exit(child);
} else if (child->pid) {
/* Use SIGSEGV instead of SIGKILL to create a core so we can see what it was up to */
crm_err("Child %s not terminating in a timely manner, forcing", child->name);
stop_child(child, SIGSEGV);
}
return FALSE;
}
#define SHUTDOWN_ESCALATION_PERIOD 180000 /* 3m */
static gboolean
pcmk_shutdown_worker(gpointer user_data)
{
static int phase = 0;
static time_t next_log = 0;
static int max = SIZEOF(pcmk_children);
int lpc = 0;
if (phase == 0) {
crm_notice("Shutting down Pacemaker");
phase = max;
}
for (; phase > 0; phase--) {
/* Don't stop anything with start_seq < 1 */
for (lpc = max - 1; lpc >= 0; lpc--) {
pcmk_child_t *child = &(pcmk_children[lpc]);
if (phase != child->start_seq) {
continue;
}
if (child->pid) {
time_t now = time(NULL);
if (child->respawn) {
if (child->pid == PCMK__SPECIAL_PID) {
crm_warn("The process behind %s IPC cannot be"
" terminated, so either wait the graceful"
" period of %ld s for its native termination"
" if it vitally depends on some other daemons"
" going down in a controlled way already,"
" or locate and kill the correct %s process"
" on your own; set PCMK_fail_fast=1 to avoid"
" this altogether next time around",
child->name, (long) SHUTDOWN_ESCALATION_PERIOD,
child->command);
}
next_log = now + 30;
child->respawn = FALSE;
stop_child(child, SIGTERM);
if (phase < pcmk_children[PCMK_CHILD_CONTROLD].start_seq) {
g_timeout_add(SHUTDOWN_ESCALATION_PERIOD,
escalate_shutdown, child);
}
} else if (now >= next_log) {
next_log = now + 30;
crm_notice("Still waiting for %s to terminate "
CRM_XS " pid=%d seq=%d",
child->name, child->pid, child->start_seq);
}
return TRUE;
}
/* cleanup */
crm_debug("%s confirmed stopped", child->name);
child->pid = 0;
}
}
/* send_cluster_id(); */
crm_notice("Shutdown complete");
{
const char *delay = daemon_option("shutdown_delay");
if(delay) {
sync();
sleep(crm_get_msec(delay) / 1000);
}
}
g_main_loop_quit(mainloop);
if (fatal_error) {
crm_notice("Shutting down and staying down after fatal error");
pcmk_exit_with_cluster(CRM_EX_FATAL);
}
return TRUE;
}
static void
pcmk_ignore(int nsig)
{
crm_info("Ignoring signal %s (%d)", strsignal(nsig), nsig);
}
static void
pcmk_sigquit(int nsig)
{
pcmk_panic(__FUNCTION__);
}
void
pcmk_shutdown(int nsig)
{
if (shutdown_trigger == NULL) {
shutdown_trigger = mainloop_add_trigger(G_PRIORITY_HIGH, pcmk_shutdown_worker, NULL);
}
mainloop_set_trigger(shutdown_trigger);
}
static int32_t
pcmk_ipc_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid)
{
crm_trace("Connection %p", c);
if (crm_client_new(c, uid, gid) == NULL) {
return -EIO;
}
return 0;
}
static void
pcmk_ipc_created(qb_ipcs_connection_t * c)
{
crm_trace("Connection %p", c);
}
/* Exit code means? */
static int32_t
pcmk_ipc_dispatch(qb_ipcs_connection_t * qbc, void *data, size_t size)
{
uint32_t id = 0;
uint32_t flags = 0;
const char *task = NULL;
crm_client_t *c = crm_client_get(qbc);
xmlNode *msg = crm_ipcs_recv(c, data, size, &id, &flags);
crm_ipcs_send_ack(c, id, flags, "ack", __FUNCTION__, __LINE__);
if (msg == NULL) {
return 0;
}
task = crm_element_value(msg, F_CRM_TASK);
if (crm_str_eq(task, CRM_OP_QUIT, TRUE)) {
/* Time to quit */
crm_notice("Shutting down in response to ticket %s (%s)",
crm_element_value(msg, F_CRM_REFERENCE), crm_element_value(msg, F_CRM_ORIGIN));
pcmk_shutdown(15);
} else if (crm_str_eq(task, CRM_OP_RM_NODE_CACHE, TRUE)) {
/* Send to everyone */
struct iovec *iov;
int id = 0;
const char *name = NULL;
crm_element_value_int(msg, XML_ATTR_ID, &id);
name = crm_element_value(msg, XML_ATTR_UNAME);
crm_notice("Instructing peers to remove references to node %s/%u", name, id);
iov = calloc(1, sizeof(struct iovec));
iov->iov_base = dump_xml_unformatted(msg);
iov->iov_len = 1 + strlen(iov->iov_base);
send_cpg_iov(iov);
} else {
update_process_clients(c);
}
free_xml(msg);
return 0;
}
/* Error code means? */
static int32_t
pcmk_ipc_closed(qb_ipcs_connection_t * c)
{
crm_client_t *client = crm_client_get(c);
if (client == NULL) {
return 0;
}
crm_trace("Connection %p", c);
crm_client_destroy(client);
return 0;
}
static void
pcmk_ipc_destroy(qb_ipcs_connection_t * c)
{
crm_trace("Connection %p", c);
pcmk_ipc_closed(c);
}
struct qb_ipcs_service_handlers mcp_ipc_callbacks = {
.connection_accept = pcmk_ipc_accept,
.connection_created = pcmk_ipc_created,
.msg_process = pcmk_ipc_dispatch,
.connection_closed = pcmk_ipc_closed,
.connection_destroyed = pcmk_ipc_destroy
};
/*!
* \internal
* \brief Send an XML message with process list of all known peers to client(s)
*
* \param[in] client Send message to this client, or all clients if NULL
*/
void
update_process_clients(crm_client_t *client)
{
GHashTableIter iter;
crm_node_t *node = NULL;
xmlNode *update = create_xml_node(NULL, "nodes");
if (is_corosync_cluster()) {
crm_xml_add_int(update, "quorate", pcmk_quorate);
}
g_hash_table_iter_init(&iter, crm_peer_cache);
while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & node)) {
xmlNode *xml = create_xml_node(update, "node");
crm_xml_add_int(xml, "id", node->id);
crm_xml_add(xml, "uname", node->uname);
crm_xml_add(xml, "state", node->state);
crm_xml_add_int(xml, "processes", node->processes);
}
if(client) {
crm_trace("Sending process list to client %s", client->id);
crm_ipcs_send(client, 0, update, crm_ipc_server_event);
} else {
crm_trace("Sending process list to %d clients", crm_hash_table_size(client_connections));
g_hash_table_iter_init(&iter, client_connections);
while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & client)) {
crm_ipcs_send(client, 0, update, crm_ipc_server_event);
}
}
free_xml(update);
}
/*!
* \internal
* \brief Send a CPG message with local node's process list to all peers
*/
static void
update_process_peers(void)
{
/* Do nothing for corosync-2 based clusters */
struct iovec *iov = calloc(1, sizeof(struct iovec));
CRM_ASSERT(iov);
if (local_name) {
iov->iov_base = crm_strdup_printf("",
local_name, get_process_list());
} else {
iov->iov_base = crm_strdup_printf("",
get_process_list());
}
iov->iov_len = strlen(iov->iov_base) + 1;
crm_trace("Sending %s", (char*) iov->iov_base);
send_cpg_iov(iov);
}
/*!
* \internal
* \brief Update a node's process list, notifying clients and peers if needed
*
* \param[in] id Node ID of affected node
* \param[in] uname Uname of affected node
* \param[in] procs Affected node's process list mask
*
* \return TRUE if the process list changed, FALSE otherwise
*/
static gboolean
update_node_processes(uint32_t id, const char *uname, uint32_t procs)
{
gboolean changed = FALSE;
crm_node_t *node = crm_get_peer(id, uname);
if (procs != 0) {
if (procs != node->processes) {
crm_debug("Node %s now has process list: %.32x (was %.32x)",
node->uname, procs, node->processes);
node->processes = procs;
changed = TRUE;
/* If local node's processes have changed, notify clients/peers */
if (id == local_nodeid) {
update_process_clients(NULL);
update_process_peers();
}
} else {
crm_trace("Node %s still has process list: %.32x", node->uname, procs);
}
}
return changed;
}
/* *INDENT-OFF* */
static struct crm_option long_options[] = {
/* Top-level Options */
{"help", 0, 0, '?', "\tThis text"},
{"version", 0, 0, '$', "\tVersion information" },
{"verbose", 0, 0, 'V', "\tIncrease debug output"},
{"shutdown", 0, 0, 'S', "\tInstruct Pacemaker to shutdown on this machine"},
{"features", 0, 0, 'F', "\tDisplay the full version and list of features Pacemaker was built with"},
{"-spacer-", 1, 0, '-', "\nAdditional Options:"},
{"foreground", 0, 0, 'f', "\t(Ignored) Pacemaker always runs in the foreground"},
{"pid-file", 1, 0, 'p', "\t(Ignored) Daemon pid file location"},
{"standby", 0, 0, 's', "\tStart node in standby state"},
{NULL, 0, 0, 0}
};
/* *INDENT-ON* */
static void
mcp_chown(const char *path, uid_t uid, gid_t gid)
{
int rc = chown(path, uid, gid);
if (rc < 0) {
crm_warn("Cannot change the ownership of %s to user %s and gid %d: %s",
path, CRM_DAEMON_USER, gid, pcmk_strerror(errno));
}
}
/*!
* \internal
* \brief Check the liveness of the child based on IPC name and PID if tracked
*
* \param[inout] child Child tracked data
*
* \return 0 if no trace of child's liveness detected (while it is detectable
* to begin with, at least according to one of the two properties),
* 1 if everything is fine, 2 if it's up per PID, but not per IPC
* end-point (still starting?), -1 on error, and -2 when the child
* (its IPC) blocked with an unauthorized process (log message
* emitted in both latter cases)
*
* \note This function doesn't modify any of \p child members but \c pid,
* and is not actively toying with processes as such but invoking
* \c stop_child in one particular case (there's for some reason
* a different authentic holder of the IPC end-point).
*/
static int
pcmk_child_active(pcmk_child_t *child) {
static uid_t cl_uid = 0;
static gid_t cl_gid = 0;
const uid_t root_uid = 0;
const gid_t root_gid = 0;
const uid_t *ref_uid;
const gid_t *ref_gid;
int ret = 0;
pid_t ipc_pid = 0;
if (child->endpoint == NULL
&& (child->pid <= 0 || child->pid == PCMK__SPECIAL_PID)) {
crm_err("Cannot track child %s for missing both API end-point and PID",
child->name);
ret = -1; /* misuse of the function when child is not trackable */
} else if (child->endpoint != NULL) {
ref_uid = (child->uid != NULL) ? &cl_uid : &root_uid;
ref_gid = (child->uid != NULL) ? &cl_gid : &root_gid;
if (child->uid != NULL && !cl_uid && !cl_gid
&& crm_user_lookup(CRM_DAEMON_USER, &cl_uid, &cl_gid) < 0) {
crm_err("Could not find user and group IDs for user %s",
CRM_DAEMON_USER);
ret = -1;
} else if ((ret = pcmk__ipc_is_authentic_process_active(child->endpoint,
*ref_uid, *ref_gid,
&ipc_pid)) < 0) {
/* game over */
} else if (child->pid <= 0) {
/* hit new child to be initialized, or reset to zero
and investigate further for ret == 0 */
child->pid = ipc_pid;
} else if (ipc_pid && child->pid != ipc_pid) {
/* ultimately strange for ret == 1; either way, investigate */
ret = 0;
}
}
if (!ret) {
/* when no IPC based liveness detected (incl. if ever a child without
IPC is tracked), or detected for a different _authentic_ process;
safe on FreeBSD since the only change possible from a proper child's
PID into "special" PID of 1 behind more loosely related process */
ret = crm_pid_active(child->pid, child->name);
if (ipc_pid && (ret != 1
|| ipc_pid == PCMK__SPECIAL_PID
|| crm_pid_active(ipc_pid, child->name) == 1)) {
if (ret == 1) {
/* assume there's no forking-while-retaining-IPC-socket
involved in the "children's" lifecycle, hence that the
tracking got out of sync purely because of some external
(esotheric?) forces (user initiated process "refresh" by
force? or intentionally racing on start-up, even?), and
that switching over to this other detected, authentic
instance with an IPC already in possession is a better
trade-off than "neutralizing" it first so as to give
either the original or possibly a new to-be-spawned
daemon process a leeway for operation, which would
otherwise have to be carried out */
/* not possessing IPC, afterall (what about corosync CPG?) */
stop_child(child, SIGKILL);
} else {
ret = 1;
}
child->pid = ipc_pid;
} else if (ret == 1) {
ret = 2; /* up per PID, but not per IPC (still starting?) */
} else if (!child->pid && ret == -1) {
ret = 0; /* correct -1 on FreeBSD from above back to 0 */
}
}
return ret;
}
static gboolean
check_active_before_startup_processes(gpointer user_data)
{
int start_seq = 1, lpc = 0;
static int max = SIZEOF(pcmk_children);
gboolean keep_tracking = FALSE;
for (start_seq = 1; start_seq < max; start_seq++) {
for (lpc = 0; lpc < max; lpc++) {
if (pcmk_children[lpc].active_before_startup == FALSE) {
/* we are already tracking it as a child process. */
continue;
} else if (start_seq != pcmk_children[lpc].start_seq) {
continue;
} else {
const char *name = pcmk_children[lpc].name;
int ret;
switch ((ret = pcmk_child_active(&pcmk_children[lpc]))) {
case 1:
break;
case 0:
case 2: /* this very case: it was OK once already */
if (pcmk_children[lpc].respawn == TRUE) {
/* presumably after crash, hence critical */
crm_crit("Process %s terminated (pid=%lld)%s", \
name, (long long)
PCMK__SPECIAL_PID_AS_0(pcmk_children[lpc].pid),
ret ? ", at least per IPC end-point that went AWOL"
: "");
} else {
/* orderly shutdown */
crm_notice("Process %s terminated (pid=%lld)%s", \
name, (long long)
PCMK__SPECIAL_PID_AS_0(pcmk_children[lpc].pid),
ret ? ", at least per IPC end-point that went AWOL"
: "");
}
pcmk_process_exit(&(pcmk_children[lpc]));
continue;
default:
crm_crit("Unexpected value from pcmk_child_active:"
" %d (pid=%lld)", ret,
(long long) PCMK__SPECIAL_PID_AS_0(
pcmk_children[lpc].pid));
/* fall through */
case -1:
case -2:
/* message(s) already emitted */
crm_exit(CRM_EX_FATAL);
break; /* static analysis/noreturn */
}
}
/* at least one of the processes found at startup
* is still going, so keep this recurring timer around */
keep_tracking = TRUE;
}
}
global_keep_tracking = keep_tracking;
return keep_tracking;
}
/*!
* \internal
* \brief Initial one-off check of the pre-existing "child" processes
*
* With "child" process, we mean the subdaemon that defines an API end-point
* (all of them do as of the comment) -- the possible complement is skipped
* as it is deemed it has no such shared resources to cause conflicts about,
* hence it can presumably be started anew without hesitation.
* If that won't hold true in the future, the concept of a shared resource
* will have to be generalized beyond the API end-point.
*
* For boundary cases that the "child" is still starting (IPC end-point is yet
* to be witnessed), or more rarely (practically FreeBSD only), when there's
* a pre-existing "untrackable" authentic process, we give the situation some
* time to possibly unfold in the right direction, meaning that said socket
* will appear or the unattainable process will disappear per the observable
* IPC, respectively.
*
* \return 0 if no such "child" process found, positive number X when X
* "children" detected, -1 on an internal error, -2 when any
* would-be-used IPC is blocked with an unauthorized process
*
* \note Since this gets run at the very start, \c respawn_count fields
* for particular children get temporarily overloaded with "rounds
* of waiting" tracking, restored once we are about to finish with
* success (i.e. returning value >=0) and will remain unrestored
* otherwise. One way to suppress liveness detection logic for
* particular child is to set the said value to a negative number.
*/
#define WAIT_TRIES 4 /* together with interleaved sleeps, worst case ~ 1s */
static int
find_and_track_existing_processes(void)
{
unsigned tracking = 0U;
bool wait_in_progress;
int cur;
size_t i, rounds;
for (rounds = 1; rounds <= WAIT_TRIES; rounds++) {
wait_in_progress = false;
for (i = 0; i < SIZEOF(pcmk_children); i++) {
if (!pcmk_children[i].endpoint
|| pcmk_children[i].respawn_count < 0
|| !(cur = pcmk_child_active(&pcmk_children[i]))) {
/* as a speculation, don't give up in the context of
pcmk_child_active check if there are more rounds to
come for other reasons, but don't artificially wait just
because of this, since we would preferably start ASAP */
continue;
}
pcmk_children[i].respawn_count = rounds;
switch (cur) {
case 1:
if (pcmk_children[i].pid == PCMK__SPECIAL_PID) {
if (crm_is_true(getenv("PCMK_fail_fast"))) {
crm_crit("Cannot reliably track pre-existing"
" authentic process behind %s IPC on this"
" platform and PCMK_fail_fast requested",
pcmk_children[i].endpoint);
return -1;
} else if (pcmk_children[i].respawn_count == WAIT_TRIES) {
crm_notice("Assuming pre-existing authentic, though"
" on this platform untrackable, process"
" behind %s IPC is stable (was in %d"
" previous samples) so rather than"
" bailing out (PCMK_fail_fast not"
" requested), we just switch to a less"
" optimal IPC liveness monitoring"
" (not very suitable for heavy load)",
pcmk_children[i].name, WAIT_TRIES - 1);
crm_warn("The process behind %s IPC cannot be"
" terminated, so the overall shutdown"
" will get delayed implicitly (%ld s),"
" which serves as a graceful period for"
" its native termination if it vitally"
" depends on some other daemons going"
" down in a controlled way already",
pcmk_children[i].name,
(long) SHUTDOWN_ESCALATION_PERIOD);
} else {
wait_in_progress = true;
crm_warn("Cannot reliably track pre-existing"
" authentic process behind %s IPC on this"
" platform, can still disappear in %d"
" attempt(s)", pcmk_children[i].endpoint,
WAIT_TRIES - pcmk_children[i].respawn_count);
continue;
}
}
crm_notice("Tracking existing %s process (pid=%lld)",
pcmk_children[i].name,
(long long) PCMK__SPECIAL_PID_AS_0(
pcmk_children[i].pid));
pcmk_children[i].respawn_count = -1; /* 0~keep watching */
pcmk_children[i].active_before_startup = TRUE;
tracking++;
break;
case 2:
if (pcmk_children[i].respawn_count == WAIT_TRIES) {
crm_crit("%s IPC end-point for existing authentic"
" process %lld did not (re)appear",
pcmk_children[i].endpoint,
(long long) PCMK__SPECIAL_PID_AS_0(
pcmk_children[i].pid));
return -1;
}
wait_in_progress = true;
crm_warn("Cannot find %s IPC end-point for existing"
" authentic process %lld, can still (re)appear"
" in %d attempts (?)",
pcmk_children[i].endpoint,
(long long) PCMK__SPECIAL_PID_AS_0(
pcmk_children[i].pid),
WAIT_TRIES - pcmk_children[i].respawn_count);
continue;
case -1:
case -2:
return cur; /* messages already emitted */
default:
crm_crit("Unexpected condition"CRM_XS"cur=%d", cur);
return -1; /* unexpected condition */
}
}
if (!wait_in_progress) {
break;
}
(void) poll(NULL, 0, 250); /* a bit for changes to possibly happen */
}
for (i = 0; i < SIZEOF(pcmk_children); i++) {
pcmk_children[i].respawn_count = 0; /* restore pristine state */
}
if (tracking) {
g_timeout_add_seconds(PCMK_PROCESS_CHECK_INTERVAL,
check_active_before_startup_processes, NULL);
}
return (tracking > INT_MAX) ? INT_MAX : tracking;
}
static void
init_children_processes(void)
{
int start_seq = 1, lpc = 0;
static int max = SIZEOF(pcmk_children);
/* start any children that have not been detected */
for (start_seq = 1; start_seq < max; start_seq++) {
/* don't start anything with start_seq < 1 */
for (lpc = 0; lpc < max; lpc++) {
if (pcmk_children[lpc].pid) {
/* we are already tracking it */
continue;
}
if (start_seq == pcmk_children[lpc].start_seq) {
start_child(&(pcmk_children[lpc]));
}
}
}
/* From this point on, any daemons being started will be due to
* respawning rather than node start.
*
* This may be useful for the daemons to know
*/
setenv("PCMK_respawned", "true", 1);
}
static void
mcp_cpg_destroy(gpointer user_data)
{
crm_crit("Lost connection to cluster layer, shutting down");
crm_exit(CRM_EX_DISCONNECT);
}
/*!
* \internal
* \brief Process a CPG message (process list or manual peer cache removal)
*
* \param[in] handle CPG connection (ignored)
* \param[in] groupName CPG group name (ignored)
* \param[in] nodeid ID of affected node
* \param[in] pid Process ID (ignored)
* \param[in] msg CPG XML message
* \param[in] msg_len Length of msg in bytes (ignored)
*/
static void
mcp_cpg_deliver(cpg_handle_t handle,
const struct cpg_name *groupName,
uint32_t nodeid, uint32_t pid, void *msg, size_t msg_len)
{
xmlNode *xml = string2xml(msg);
const char *task = crm_element_value(xml, F_CRM_TASK);
crm_trace("Received CPG message (%s): %.200s",
(task? task : "process list"), (char*)msg);
if (task == NULL) {
if (nodeid == local_nodeid) {
crm_debug("Ignoring message with local node's process list");
} else {
uint32_t procs = 0;
const char *uname = crm_element_value(xml, "uname");
crm_element_value_int(xml, "proclist", (int *)&procs);
if (update_node_processes(nodeid, uname, procs)) {
update_process_clients(NULL);
}
}
} else if (crm_str_eq(task, CRM_OP_RM_NODE_CACHE, TRUE)) {
int id = 0;
const char *name = NULL;
crm_element_value_int(xml, XML_ATTR_ID, &id);
name = crm_element_value(xml, XML_ATTR_UNAME);
reap_crm_member(id, name);
}
if (xml != NULL) {
free_xml(xml);
}
}
static void
mcp_cpg_membership(cpg_handle_t handle,
const struct cpg_name *groupName,
const struct cpg_address *member_list, size_t member_list_entries,
const struct cpg_address *left_list, size_t left_list_entries,
const struct cpg_address *joined_list, size_t joined_list_entries)
{
/* Update peer cache if needed */
pcmk_cpg_membership(handle, groupName, member_list, member_list_entries,
left_list, left_list_entries,
joined_list, joined_list_entries);
/* Always broadcast our own presence after any membership change */
update_process_peers();
}
static gboolean
mcp_quorum_callback(unsigned long long seq, gboolean quorate)
{
pcmk_quorate = quorate;
return TRUE;
}
static void
mcp_quorum_destroy(gpointer user_data)
{
crm_info("connection lost");
}
int
main(int argc, char **argv)
{
int rc;
int flag;
int argerr = 0;
int option_index = 0;
gboolean shutdown = FALSE;
uid_t pcmk_uid = 0;
gid_t pcmk_gid = 0;
struct rlimit cores;
crm_ipc_t *old_instance = NULL;
qb_ipcs_service_t *ipcs = NULL;
static crm_cluster_t cluster;
crm_log_preinit(NULL, argc, argv);
crm_set_options(NULL, "mode [options]", long_options, "Start/Stop Pacemaker\n");
mainloop_add_signal(SIGHUP, pcmk_ignore);
mainloop_add_signal(SIGQUIT, pcmk_sigquit);
while (1) {
flag = crm_get_option(argc, argv, &option_index);
if (flag == -1)
break;
switch (flag) {
case 'V':
crm_bump_log_level(argc, argv);
break;
case 'f':
/* Legacy */
break;
case 'p':
pid_file = optarg;
break;
case 's':
set_daemon_option("node_start_state", "standby");
break;
case '$':
case '?':
crm_help(flag, CRM_EX_OK);
break;
case 'S':
shutdown = TRUE;
break;
case 'F':
printf("Pacemaker %s (Build: %s)\n Supporting v%s: %s\n", PACEMAKER_VERSION, BUILD_VERSION,
CRM_FEATURE_SET, CRM_FEATURES);
crm_exit(CRM_EX_OK);
default:
printf("Argument code 0%o (%c) is not (?yet?) supported\n", flag, flag);
++argerr;
break;
}
}
if (optind < argc) {
printf("non-option ARGV-elements: ");
while (optind < argc)
printf("%s ", argv[optind++]);
printf("\n");
}
if (argerr) {
crm_help('?', CRM_EX_USAGE);
}
setenv("LC_ALL", "C", 1);
set_daemon_option("mcp", "true");
crm_log_init(NULL, LOG_INFO, TRUE, FALSE, argc, argv, FALSE);
crm_debug("Checking for old instances of %s", CRM_SYSTEM_MCP);
old_instance = crm_ipc_new(CRM_SYSTEM_MCP, 0);
crm_ipc_connect(old_instance);
if (shutdown) {
crm_debug("Terminating previous instance");
while (crm_ipc_connected(old_instance)) {
xmlNode *cmd =
create_request(CRM_OP_QUIT, NULL, NULL, CRM_SYSTEM_MCP, CRM_SYSTEM_MCP, NULL);
crm_debug(".");
crm_ipc_send(old_instance, cmd, 0, 0, NULL);
free_xml(cmd);
sleep(2);
}
crm_ipc_close(old_instance);
crm_ipc_destroy(old_instance);
crm_exit(CRM_EX_OK);
} else if (crm_ipc_connected(old_instance)) {
crm_ipc_close(old_instance);
crm_ipc_destroy(old_instance);
crm_err("Pacemaker is already active, aborting startup");
crm_exit(CRM_EX_FATAL);
}
crm_ipc_close(old_instance);
crm_ipc_destroy(old_instance);
if (mcp_read_config() == FALSE) {
crm_notice("Could not obtain corosync config data, exiting");
crm_exit(CRM_EX_UNAVAILABLE);
}
// OCF shell functions and cluster-glue need facility under different name
{
const char *facility = daemon_option("logfacility");
if (facility && safe_str_neq(facility, "none")) {
setenv("HA_LOGFACILITY", facility, 1);
}
}
crm_notice("Starting Pacemaker %s "CRM_XS" build=%s features:%s",
PACEMAKER_VERSION, BUILD_VERSION, CRM_FEATURES);
mainloop = g_main_loop_new(NULL, FALSE);
rc = getrlimit(RLIMIT_CORE, &cores);
if (rc < 0) {
crm_perror(LOG_ERR, "Cannot determine current maximum core size.");
} else {
if (cores.rlim_max == 0 && geteuid() == 0) {
cores.rlim_max = RLIM_INFINITY;
} else {
crm_info("Maximum core file size is: %lu", (unsigned long)cores.rlim_max);
}
cores.rlim_cur = cores.rlim_max;
rc = setrlimit(RLIMIT_CORE, &cores);
if (rc < 0) {
crm_perror(LOG_ERR,
"Core file generation will remain disabled."
" Core files are an important diagnostic tool, so"
" please consider enabling them by default.");
}
}
if (crm_user_lookup(CRM_DAEMON_USER, &pcmk_uid, &pcmk_gid) < 0) {
crm_err("Cluster user %s does not exist, aborting Pacemaker startup", CRM_DAEMON_USER);
crm_exit(CRM_EX_NOUSER);
}
mkdir(CRM_STATE_DIR, 0750);
mcp_chown(CRM_STATE_DIR, pcmk_uid, pcmk_gid);
/* Used to store core/blackbox/scheduler/cib files in */
crm_build_path(CRM_PACEMAKER_DIR, 0750);
mcp_chown(CRM_PACEMAKER_DIR, pcmk_uid, pcmk_gid);
/* Used to store core files in */
crm_build_path(CRM_CORE_DIR, 0750);
mcp_chown(CRM_CORE_DIR, pcmk_uid, pcmk_gid);
/* Used to store blackbox dumps in */
crm_build_path(CRM_BLACKBOX_DIR, 0750);
mcp_chown(CRM_BLACKBOX_DIR, pcmk_uid, pcmk_gid);
// Used to store scheduler inputs in
crm_build_path(PE_STATE_DIR, 0750);
mcp_chown(PE_STATE_DIR, pcmk_uid, pcmk_gid);
/* Used to store the cluster configuration */
crm_build_path(CRM_CONFIG_DIR, 0750);
mcp_chown(CRM_CONFIG_DIR, pcmk_uid, pcmk_gid);
// Don't build CRM_RSCTMP_DIR, pacemaker-execd will do it
ipcs = mainloop_add_ipc_server(CRM_SYSTEM_MCP, QB_IPC_NATIVE, &mcp_ipc_callbacks);
if (ipcs == NULL) {
crm_err("Couldn't start IPC server");
crm_exit(CRM_EX_OSERR);
}
/* Allows us to block shutdown */
if (cluster_connect_cfg(&local_nodeid) == FALSE) {
crm_err("Couldn't connect to Corosync's CFG service");
crm_exit(CRM_EX_PROTOCOL);
}
if(pcmk_locate_sbd() > 0) {
setenv("PCMK_watchdog", "true", 1);
} else {
setenv("PCMK_watchdog", "false", 1);
}
switch (find_and_track_existing_processes()) {
case -1:
crm_crit("Internal fatality, see the log");
crm_exit(CRM_EX_FATAL);
case -2:
crm_crit("Blocked by foreign process, kill the offender");
crm_exit(CRM_EX_CANTCREAT);
default:
break;
};
cluster.destroy = mcp_cpg_destroy;
cluster.cpg.cpg_deliver_fn = mcp_cpg_deliver;
cluster.cpg.cpg_confchg_fn = mcp_cpg_membership;
crm_set_autoreap(FALSE);
rc = pcmk_ok;
if (cluster_connect_cpg(&cluster) == FALSE) {
crm_err("Couldn't connect to Corosync's CPG service");
rc = -ENOPROTOOPT;
} else if (cluster_connect_quorum(mcp_quorum_callback, mcp_quorum_destroy)
== FALSE) {
rc = -ENOTCONN;
} else {
local_name = get_local_node_name();
update_node_processes(local_nodeid, local_name, get_process_list());
mainloop_add_signal(SIGTERM, pcmk_shutdown);
mainloop_add_signal(SIGINT, pcmk_shutdown);
init_children_processes();
- crm_info("Starting mainloop");
-
+ crm_notice("Pacemaker daemon successfully started and accepting connections");
g_main_loop_run(mainloop);
}
if (ipcs) {
crm_trace("Closing IPC server");
mainloop_del_ipc_server(ipcs);
ipcs = NULL;
}
g_main_loop_unref(mainloop);
cluster_disconnect_cpg(&cluster);
cluster_disconnect_cfg();
crm_exit(crm_errno2exit(rc));
}
diff --git a/daemons/schedulerd/pacemaker-schedulerd.c b/daemons/schedulerd/pacemaker-schedulerd.c
index e349bdc099..0eca09fd7b 100644
--- a/daemons/schedulerd/pacemaker-schedulerd.c
+++ b/daemons/schedulerd/pacemaker-schedulerd.c
@@ -1,350 +1,350 @@
/*
* Copyright 2004-2019 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#define OPTARGS "hVc"
static GMainLoop *mainloop = NULL;
static qb_ipcs_service_t *ipcs = NULL;
static pe_working_set_t *sched_data_set = NULL;
#define get_series() was_processing_error?1:was_processing_warning?2:3
typedef struct series_s {
const char *name;
const char *param;
int wrap;
} series_t;
series_t series[] = {
{"pe-unknown", "_dont_match_anything_", -1},
{"pe-error", "pe-error-series-max", -1},
{"pe-warn", "pe-warn-series-max", 200},
{"pe-input", "pe-input-series-max", 400},
};
void pengine_shutdown(int nsig);
static gboolean
process_pe_message(xmlNode * msg, xmlNode * xml_data, crm_client_t * sender)
{
static char *last_digest = NULL;
static char *filename = NULL;
time_t execution_date = time(NULL);
const char *sys_to = crm_element_value(msg, F_CRM_SYS_TO);
const char *op = crm_element_value(msg, F_CRM_TASK);
const char *ref = crm_element_value(msg, F_CRM_REFERENCE);
crm_trace("Processing %s op (ref=%s)...", op, ref);
if (op == NULL) {
/* error */
} else if (strcasecmp(op, CRM_OP_HELLO) == 0) {
/* ignore */
} else if (safe_str_eq(crm_element_value(msg, F_CRM_MSG_TYPE), XML_ATTR_RESPONSE)) {
/* ignore */
} else if (sys_to == NULL || strcasecmp(sys_to, CRM_SYSTEM_PENGINE) != 0) {
crm_trace("Bad sys-to %s", crm_str(sys_to));
return FALSE;
} else if (strcasecmp(op, CRM_OP_PECALC) == 0) {
int seq = -1;
int series_id = 0;
int series_wrap = 0;
char *digest = NULL;
const char *value = NULL;
xmlNode *converted = NULL;
xmlNode *reply = NULL;
gboolean is_repoke = FALSE;
gboolean process = TRUE;
crm_config_error = FALSE;
crm_config_warning = FALSE;
was_processing_error = FALSE;
was_processing_warning = FALSE;
if (sched_data_set == NULL) {
sched_data_set = pe_new_working_set();
CRM_ASSERT(sched_data_set != NULL);
}
digest = calculate_xml_versioned_digest(xml_data, FALSE, FALSE, CRM_FEATURE_SET);
converted = copy_xml(xml_data);
if (cli_config_update(&converted, NULL, TRUE) == FALSE) {
sched_data_set->graph = create_xml_node(NULL, XML_TAG_GRAPH);
crm_xml_add_int(sched_data_set->graph, "transition_id", 0);
crm_xml_add_int(sched_data_set->graph, "cluster-delay", 0);
process = FALSE;
free(digest);
} else if (safe_str_eq(digest, last_digest)) {
crm_info("Input has not changed since last time, not saving to disk");
is_repoke = TRUE;
free(digest);
} else {
free(last_digest);
last_digest = digest;
}
if (process) {
pcmk__schedule_actions(sched_data_set, converted, NULL);
}
series_id = get_series();
series_wrap = series[series_id].wrap;
value = pe_pref(sched_data_set->config_hash, series[series_id].param);
if (value != NULL) {
series_wrap = crm_int_helper(value, NULL);
if (errno != 0) {
series_wrap = series[series_id].wrap;
}
} else {
crm_config_warn("No value specified for cluster"
" preference: %s", series[series_id].param);
}
seq = get_last_sequence(PE_STATE_DIR, series[series_id].name);
crm_trace("Series %s: wrap=%d, seq=%d, pref=%s",
series[series_id].name, series_wrap, seq, value);
sched_data_set->input = NULL;
reply = create_reply(msg, sched_data_set->graph);
CRM_ASSERT(reply != NULL);
if (is_repoke == FALSE) {
free(filename);
filename =
generate_series_filename(PE_STATE_DIR, series[series_id].name, seq, HAVE_BZLIB_H);
}
crm_xml_add(reply, F_CRM_TGRAPH_INPUT, filename);
crm_xml_add_int(reply, "graph-errors", was_processing_error);
crm_xml_add_int(reply, "graph-warnings", was_processing_warning);
crm_xml_add_int(reply, "config-errors", crm_config_error);
crm_xml_add_int(reply, "config-warnings", crm_config_warning);
if (crm_ipcs_send(sender, 0, reply, crm_ipc_server_event) == FALSE) {
int graph_file_fd = 0;
char *graph_file = NULL;
umask(S_IWGRP | S_IWOTH | S_IROTH);
graph_file = crm_strdup_printf("%s/pengine.graph.XXXXXX",
PE_STATE_DIR);
graph_file_fd = mkstemp(graph_file);
crm_err("Couldn't send transition graph to peer, writing to %s instead",
graph_file);
crm_xml_add(reply, F_CRM_TGRAPH, graph_file);
write_xml_fd(sched_data_set->graph, graph_file, graph_file_fd, FALSE);
free(graph_file);
free_xml(first_named_child(reply, F_CRM_DATA));
CRM_ASSERT(crm_ipcs_send(sender, 0, reply, crm_ipc_server_event));
}
free_xml(reply);
pe_reset_working_set(sched_data_set);
pcmk__log_transition_summary(filename);
if (is_repoke == FALSE && series_wrap != 0) {
unlink(filename);
crm_xml_add_int(xml_data, "execution-date", execution_date);
write_xml_file(xml_data, filename, HAVE_BZLIB_H);
write_last_sequence(PE_STATE_DIR, series[series_id].name, seq + 1, series_wrap);
} else {
crm_trace("Not writing out %s: %d & %d", filename, is_repoke, series_wrap);
}
free_xml(converted);
}
return TRUE;
}
static int32_t
pe_ipc_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid)
{
crm_trace("Connection %p", c);
if (crm_client_new(c, uid, gid) == NULL) {
return -EIO;
}
return 0;
}
static void
pe_ipc_created(qb_ipcs_connection_t * c)
{
crm_trace("Connection %p", c);
}
gboolean process_pe_message(xmlNode * msg, xmlNode * xml_data, crm_client_t * sender);
static int32_t
pe_ipc_dispatch(qb_ipcs_connection_t * qbc, void *data, size_t size)
{
uint32_t id = 0;
uint32_t flags = 0;
crm_client_t *c = crm_client_get(qbc);
xmlNode *msg = crm_ipcs_recv(c, data, size, &id, &flags);
crm_ipcs_send_ack(c, id, flags, "ack", __FUNCTION__, __LINE__);
if (msg != NULL) {
xmlNode *data_xml = get_message_xml(msg, F_CRM_DATA);
process_pe_message(msg, data_xml, c);
free_xml(msg);
}
return 0;
}
/* Error code means? */
static int32_t
pe_ipc_closed(qb_ipcs_connection_t * c)
{
crm_client_t *client = crm_client_get(c);
if (client == NULL) {
return 0;
}
crm_trace("Connection %p", c);
crm_client_destroy(client);
return 0;
}
static void
pe_ipc_destroy(qb_ipcs_connection_t * c)
{
crm_trace("Connection %p", c);
pe_ipc_closed(c);
}
struct qb_ipcs_service_handlers ipc_callbacks = {
.connection_accept = pe_ipc_accept,
.connection_created = pe_ipc_created,
.msg_process = pe_ipc_dispatch,
.connection_closed = pe_ipc_closed,
.connection_destroyed = pe_ipc_destroy
};
/* *INDENT-OFF* */
static struct crm_option long_options[] = {
/* Top-level Options */
{"help", 0, 0, '?', "\tThis text"},
{"verbose", 0, 0, 'V', "\tIncrease debug output"},
{0, 0, 0, 0}
};
/* *INDENT-ON* */
int
main(int argc, char **argv)
{
int flag;
int index = 0;
int argerr = 0;
crm_log_preinit(NULL, argc, argv);
crm_set_options(NULL, "[options]",
long_options, "Daemon for calculating the cluster's response to events");
mainloop_add_signal(SIGTERM, pengine_shutdown);
while (1) {
flag = crm_get_option(argc, argv, &index);
if (flag == -1)
break;
switch (flag) {
case 'V':
crm_bump_log_level(argc, argv);
break;
case 'h': /* Help message */
crm_help('?', CRM_EX_OK);
break;
default:
++argerr;
break;
}
}
if (argc - optind == 1 && safe_str_eq("metadata", argv[optind])) {
pe_metadata();
return CRM_EX_OK;
}
if (optind > argc) {
++argerr;
}
if (argerr) {
crm_help('?', CRM_EX_USAGE);
}
crm_log_init(NULL, LOG_INFO, TRUE, FALSE, argc, argv, FALSE);
+ crm_notice("Starting Pacemaker scheduler");
+
if (pcmk__daemon_can_write(PE_STATE_DIR, NULL) == FALSE) {
crm_err("Terminating due to bad permissions on " PE_STATE_DIR);
fprintf(stderr,
"ERROR: Bad permissions on " PE_STATE_DIR " (see logs for details)\n");
fflush(stderr);
return CRM_EX_FATAL;
}
- crm_debug("Init server comms");
ipcs = mainloop_add_ipc_server(CRM_SYSTEM_PENGINE, QB_IPC_SHM, &ipc_callbacks);
if (ipcs == NULL) {
crm_err("Failed to create IPC server: shutting down and inhibiting respawn");
crm_exit(CRM_EX_FATAL);
}
/* Create the mainloop and run it... */
- crm_info("Starting %s", crm_system_name);
-
mainloop = g_main_loop_new(NULL, FALSE);
+ crm_notice("Pacemaker scheduler successfully started and accepting connections");
g_main_loop_run(mainloop);
pe_free_working_set(sched_data_set);
crm_info("Exiting %s", crm_system_name);
crm_exit(CRM_EX_OK);
}
void
pengine_shutdown(int nsig)
{
mainloop_del_ipc_server(ipcs);
pe_free_working_set(sched_data_set);
crm_exit(CRM_EX_OK);
}