Page Menu
Home
ClusterLabs Projects
Search
Configure Global Search
Log In
Files
F4624670
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
68 KB
Referenced Files
None
Subscribers
None
View Options
diff --git a/cts/patterns.py b/cts/patterns.py
index d030b21954..0ea8ab826d 100644
--- a/cts/patterns.py
+++ b/cts/patterns.py
@@ -1,410 +1,410 @@
""" Pattern-holding classes for Pacemaker's Cluster Test Suite (CTS)
"""
__copyright__ = "Copyright 2008-2020 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import sys, os
from cts.CTSvars import *
patternvariants = {}
class BasePatterns(object):
def __init__(self, name):
self.name = name
patternvariants[name] = self
self.ignore = [
"avoid confusing Valgrind",
# Logging bug in some versions of libvirtd
r"libvirtd.*: internal error: Failed to parse PCI config address",
# pcs can log this when node is fenced, but fencing is OK in some
# tests (and we will catch it in pacemaker logs when not OK)
r"pcs.daemon:No response from: .* request: get_configs, error:",
]
self.BadNews = []
self.components = {}
self.commands = {
- "StatusCmd" : "crmadmin -t 60000 -S %s 2>/dev/null",
+ "StatusCmd" : "crmadmin -t 60 -S %s 2>/dev/null",
"CibQuery" : "cibadmin -Ql",
"CibAddXml" : "cibadmin --modify -c --xml-text %s",
"CibDelXpath" : "cibadmin --delete --xpath %s",
# 300,000 == 5 minutes
"RscRunning" : CTSvars.CRM_DAEMON_DIR + "/cts-exec-helper -R -r %s",
"CIBfile" : "%s:"+CTSvars.CRM_CONFIG_DIR+"/cib.xml",
"TmpDir" : "/tmp",
"BreakCommCmd" : "iptables -A INPUT -s %s -j DROP >/dev/null 2>&1",
"FixCommCmd" : "iptables -D INPUT -s %s -j DROP >/dev/null 2>&1",
# tc qdisc add dev lo root handle 1: cbq avpkt 1000 bandwidth 1000mbit
# tc class add dev lo parent 1: classid 1:1 cbq rate "$RATE"kbps allot 17000 prio 5 bounded isolated
# tc filter add dev lo parent 1: protocol ip prio 16 u32 match ip dst 127.0.0.1 match ip sport $PORT 0xFFFF flowid 1:1
# tc qdisc add dev lo parent 1: netem delay "$LATENCY"msec "$(($LATENCY/4))"msec 10% 2> /dev/null > /dev/null
"ReduceCommCmd" : "",
"RestoreCommCmd" : "tc qdisc del dev lo root",
"MaintenanceModeOn" : "cibadmin --modify -c --xml-text '<cluster_property_set id=\"cib-bootstrap-options\"><nvpair id=\"cts-maintenance-mode-setting\" name=\"maintenance-mode\" value=\"true\"/></cluster_property_set>'",
"MaintenanceModeOff" : "cibadmin --delete --xpath \"//nvpair[@name='maintenance-mode']\"",
"StandbyCmd" : "crm_attribute -Vq -U %s -n standby -l forever -v %s 2>/dev/null",
"StandbyQueryCmd" : "crm_attribute -qG -U %s -n standby -l forever -d off 2>/dev/null",
}
self.search = {
"Pat:DC_IDLE" : "pacemaker-controld.*State transition.*-> S_IDLE",
# This won't work if we have multiple partitions
"Pat:Local_started" : "%s\W.*controller successfully started",
"Pat:NonDC_started" : r"%s\W.*State transition.*-> S_NOT_DC",
"Pat:DC_started" : r"%s\W.*State transition.*-> S_IDLE",
"Pat:We_stopped" : "%s\W.*OVERRIDE THIS PATTERN",
"Pat:They_stopped" : "%s\W.*LOST:.* %s ",
"Pat:They_dead" : "node %s.*: is dead",
"Pat:TransitionComplete" : "Transition status: Complete: complete",
"Pat:Fencing_start" : r"Requesting peer fencing .* targeting %s",
"Pat:Fencing_ok" : r"pacemaker-fenced.*:\s*Operation .* targeting %s by .* for .*@.*: OK",
"Pat:Fencing_recover" : r"pacemaker-schedulerd.*: Recover %s",
"Pat:Fencing_active" : r"stonith resource .* is active on 2 nodes (attempting recovery)",
"Pat:Fencing_probe" : r"pacemaker-controld.* Result of probe operation for %s on .*: Error",
"Pat:RscOpOK" : r"pacemaker-controld.*:\s+Result of %s operation for %s.*: (0 \()?ok",
"Pat:RscOpFail" : r"pacemaker-schedulerd.*:.*Unexpected result .* recorded for %s of %s ",
"Pat:CloneOpFail" : r"pacemaker-schedulerd.*:.*Unexpected result .* recorded for %s of (%s|%s) ",
"Pat:RscRemoteOpOK" : r"pacemaker-controld.*:\s+Result of %s operation for %s on %s: (0 \()?ok",
"Pat:NodeFenced" : r"pacemaker-controld.*:\s* Peer %s was terminated \(.*\) by .* on behalf of .*: OK",
"Pat:FenceOpOK" : "Operation .* targeting %s using .* returned 0",
}
def get_component(self, key):
if key in self.components:
return self.components[key]
print("Unknown component '%s' for %s" % (key, self.name))
return []
def get_patterns(self, key):
if key == "BadNews":
return self.BadNews
elif key == "BadNewsIgnore":
return self.ignore
elif key == "Commands":
return self.commands
elif key == "Search":
return self.search
elif key == "Components":
return self.components
def __getitem__(self, key):
if key == "Name":
return self.name
elif key in self.commands:
return self.commands[key]
elif key in self.search:
return self.search[key]
else:
print("Unknown template '%s' for %s" % (key, self.name))
return None
class crm_corosync(BasePatterns):
'''
Patterns for Corosync version 2 cluster manager class
'''
def __init__(self, name):
BasePatterns.__init__(self, name)
self.commands.update({
"StartCmd" : "service corosync start && service pacemaker start",
"StopCmd" : "service pacemaker stop; [ ! -e /usr/sbin/pacemaker-remoted ] || service pacemaker_remote stop; service corosync stop",
"EpochCmd" : "crm_node -e",
"QuorumCmd" : "crm_node -q",
"PartitionCmd" : "crm_node -p",
})
self.search.update({
# Close enough ... "Corosync Cluster Engine exiting normally" isn't
# printed reliably.
"Pat:We_stopped" : "%s\W.*Unloading all Corosync service engines",
"Pat:They_stopped" : "%s\W.*pacemaker-controld.*Node %s(\[|\s).*state is now lost",
"Pat:They_dead" : "pacemaker-controld.*Node %s(\[|\s).*state is now lost",
"Pat:ChildExit" : r"\[[0-9]+\] exited with status [0-9]+ \(",
# "with signal 9" == pcmk_child_exit(), "$" == check_active_before_startup_processes()
"Pat:ChildKilled" : r"%s\W.*pacemakerd.*%s\[[0-9]+\] terminated( with signal 9|$)",
"Pat:ChildRespawn" : "%s\W.*pacemakerd.*Respawning failed child process: %s",
"Pat:InfraUp" : "%s\W.*corosync.*Initializing transport",
"Pat:PacemakerUp" : "%s\W.*pacemakerd.*Starting Pacemaker",
})
self.ignore = self.ignore + [
r"crm_mon:",
r"crmadmin:",
r"update_trace_data",
r"async_notify:.*strange, client not found",
r"Parse error: Ignoring unknown option .*nodename",
r"error.*: Operation 'reboot' .* using FencingFail returned ",
r"getinfo response error: 1$",
r"sbd.* error: inquisitor_child: DEBUG MODE IS ACTIVE",
r"sbd.* pcmk:\s*error:.*Connection to cib_ro.* (failed|closed)",
]
self.BadNews = [
r"[^(]error:",
r"crit:",
r"ERROR:",
r"CRIT:",
r"Shutting down...NOW",
r"Timer I_TERMINATE just popped",
r"input=I_ERROR",
r"input=I_FAIL",
r"input=I_INTEGRATED cause=C_TIMER_POPPED",
r"input=I_FINALIZED cause=C_TIMER_POPPED",
r"input=I_ERROR",
r"(pacemakerd|pacemaker-execd|pacemaker-controld):.*, exiting",
r"schedulerd.*Attempting recovery of resource",
r"is taking more than 2x its timeout",
r"Confirm not received from",
r"Welcome reply not received from",
r"Attempting to schedule .* after a stop",
r"Resource .* was active at shutdown",
r"duplicate entries for call_id",
r"Search terminated:",
r":global_timer_callback",
r"Faking parameter digest creation",
r"Parameters to .* action changed:",
r"Parameters to .* changed",
r"pacemakerd.*\[[0-9]+\] terminated( with signal| as IPC server|$)",
r"pacemaker-schedulerd.*Recover .*\(.* -\> .*\)",
r"rsyslogd.* imuxsock lost .* messages from pid .* due to rate-limiting",
r"Peer is not part of our cluster",
r"We appear to be in an election loop",
r"Unknown node -> we will not deliver message",
r"(Blackbox dump requested|Problem detected)",
r"pacemakerd.*Could not connect to Cluster Configuration Database API",
r"Receiving messages from a node we think is dead",
r"share the same cluster nodeid",
r"share the same name",
#r"crm_ipc_send:.*Request .* failed",
#r"crm_ipc_send:.*Sending to .* is disabled until pending reply is received",
# Not inherently bad, but worth tracking
#r"No need to invoke the TE",
#r"ping.*: DEBUG: Updated connected = 0",
#r"Digest mis-match:",
r"pacemaker-controld:.*Transition failed: terminated",
r"Local CIB .* differs from .*:",
r"warn.*:\s*Continuing but .* will NOT be used",
r"warn.*:\s*Cluster configuration file .* is corrupt",
#r"Executing .* fencing operation",
r"Election storm",
r"stalled the FSA with pending inputs",
]
self.components["common-ignore"] = [
r"Pending action:",
r"resource( was|s were) active at shutdown",
r"pending LRM operations at shutdown",
r"Lost connection to the CIB manager",
r"pacemaker-controld.*:\s*Action A_RECOVER .* not supported",
r"pacemaker-controld.*:\s*Performing A_EXIT_1 - forcefully exiting ",
r".*:\s*Requesting fencing \([^)]+\) of node ",
r"(Blackbox dump requested|Problem detected)",
]
self.components["corosync-ignore"] = [
r"error:.*Connection to the CPG API failed: Library error",
r"\[[0-9]+\] exited with status [0-9]+ \(",
r"pacemaker-based.*error:.*Corosync connection lost",
r"pacemaker-fenced.*error:.*Corosync connection terminated",
r"pacemaker-controld.*State transition .* S_RECOVERY",
r"pacemaker-controld.*error:.*Input (I_ERROR|I_TERMINATE ) .*received in state",
r"pacemaker-controld.*error:.*Could not recover from internal error",
r"error:.*Connection to cib_(shm|rw).* (failed|closed)",
r"error:.*Connection to (fencer|stonith-ng).* (closed|failed|lost)",
r"crit: Fencing daemon connection failed",
# This is overbroad, but we don't have a way to say that only
# certain transition errors are acceptable (if the fencer respawns,
# fence devices may appear multiply active). We have to rely on
# other causes of a transition error logging their own error
# message, which is the usual practice.
r"pacemaker-schedulerd.* Calculated transition .*/pe-error",
]
self.components["corosync"] = [
# We expect each daemon to lose its cluster connection.
# However, if the CIB manager loses its connection first,
# it's possible for another daemon to lose that connection and
# exit before losing the cluster connection.
r"pacemakerd.*:\s*(crit|error):.*Lost connection to cluster layer",
r"pacemaker-attrd.*:\s*(crit|error):.*Lost connection to (cluster layer|the CIB manager)",
r"pacemaker-based.*:\s*(crit|error):.*Lost connection to cluster layer",
r"pacemaker-controld.*:\s*(crit|error):.*Lost connection to (cluster layer|the CIB manager)",
r"pacemaker-fenced.*:\s*(crit|error):.*Lost connection to (cluster layer|the CIB manager)",
r"schedulerd.*Scheduling Node .* for STONITH",
r"pacemaker-controld.*:\s*Peer .* was terminated \(.*\) by .* on behalf of .*:\s*OK",
]
self.components["pacemaker-based"] = [
r"pacemakerd.* pacemaker-attrd\[[0-9]+\] exited with status 102",
r"pacemakerd.* pacemaker-controld\[[0-9]+\] exited with status 1",
r"pacemakerd.* Respawning failed child process: pacemaker-attrd",
r"pacemakerd.* Respawning failed child process: pacemaker-based",
r"pacemakerd.* Respawning failed child process: pacemaker-controld",
r"pacemakerd.* Respawning failed child process: pacemaker-fenced",
r"pacemaker-.* Connection to cib_.* (failed|closed)",
r"pacemaker-attrd.*:.*Lost connection to the CIB manager",
r"pacemaker-controld.*:.*Lost connection to the CIB manager",
r"pacemaker-controld.*I_ERROR.*crmd_cib_connection_destroy",
r"pacemaker-controld.* State transition .* S_RECOVERY",
r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
r"pacemaker-controld.*Could not recover from internal error",
]
self.components["pacemaker-based-ignore"] = [
r"pacemaker-execd.*Connection to (fencer|stonith-ng).* (closed|failed|lost)",
# This is overbroad, but we don't have a way to say that only
# certain transition errors are acceptable (if the fencer respawns,
# fence devices may appear multiply active). We have to rely on
# other causes of a transition error logging their own error
# message, which is the usual practice.
r"pacemaker-schedulerd.* Calculated transition .*/pe-error",
]
self.components["pacemaker-execd"] = [
r"pacemaker-controld.*Connection to (pacemaker-execd|lrmd|executor) (failed|closed)",
r"pacemaker-controld.*I_ERROR.*lrm_connection_destroy",
r"pacemaker-controld.*State transition .* S_RECOVERY",
r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
r"pacemaker-controld.*Could not recover from internal error",
r"pacemakerd.*pacemaker-controld\[[0-9]+\] exited with status 1",
r"pacemakerd.*Respawning failed child process: pacemaker-execd",
r"pacemakerd.*Respawning failed child process: pacemaker-controld",
]
self.components["pacemaker-execd-ignore"] = [
r"pacemaker-attrd.*Connection to lrmd (failed|closed)",
r"pacemaker-(attrd|controld).*Could not execute alert",
]
self.components["pacemaker-controld"] = [
# "WARN: determine_online_status: Node .* is unclean",
# "Scheduling Node .* for STONITH",
# Only if the node wasn't the DC: "State transition S_IDLE",
"State transition .* -> S_IDLE",
]
self.components["pacemaker-controld-ignore"] = []
self.components["pacemaker-attrd"] = []
self.components["pacemaker-attrd-ignore"] = []
self.components["pacemaker-schedulerd"] = [
"State transition .* S_RECOVERY",
r"Respawning failed child process: pacemaker-controld",
r"pacemaker-controld\[[0-9]+\] exited with status 1 \(",
"Connection to pengine failed",
"Connection to pengine.* closed",
r"Connection to the scheduler failed",
"pacemaker-controld.*I_ERROR.*save_cib_contents",
r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
"pacemaker-controld.*Could not recover from internal error",
]
self.components["pacemaker-schedulerd-ignore"] = []
self.components["pacemaker-fenced"] = [
r"error:.*Connection to (fencer|stonith-ng).* (closed|failed|lost)",
r"Fencing daemon connection failed",
r"pacemaker-controld.*Fencer successfully connected",
]
self.components["pacemaker-fenced-ignore"] = [
r"error:.*Connection to (fencer|stonith-ng).* (closed|failed|lost)",
r"crit:.*Fencing daemon connection failed",
r"error:.*Fencer connection failed \(will retry\)",
r"Connection to (fencer|stonith-ng) failed, finalizing .* pending operations",
r"pacemaker-controld.*:\s+Result of .* operation for Fencing.*Error",
# This is overbroad, but we don't have a way to say that only
# certain transition errors are acceptable (if the fencer respawns,
# fence devices may appear multiply active). We have to rely on
# other causes of a transition error logging their own error
# message, which is the usual practice.
r"pacemaker-schedulerd.* Calculated transition .*/pe-error",
]
self.components["pacemaker-fenced-ignore"].extend(self.components["common-ignore"])
class crm_corosync_docker(crm_corosync):
'''
Patterns for Corosync version 2 cluster manager class
'''
def __init__(self, name):
crm_corosync.__init__(self, name)
self.commands.update({
"StartCmd" : "pcmk_start",
"StopCmd" : "pcmk_stop",
})
class PatternSelector(object):
def __init__(self, name=None):
self.name = name
self.base = BasePatterns("crm-base")
if not name:
crm_corosync("crm-corosync")
elif name == "crm-corosync":
crm_corosync(name)
elif name == "crm-corosync-docker":
crm_corosync_docker(name)
def get_variant(self, variant):
if variant in patternvariants:
return patternvariants[variant]
print("defaulting to crm-base for %s" % variant)
return self.base
def get_patterns(self, variant, kind):
return self.get_variant(variant).get_patterns(kind)
def get_template(self, variant, key):
v = self.get_variant(variant)
return v[key]
def get_component(self, variant, kind):
return self.get_variant(variant).get_component(kind)
def __getitem__(self, key):
return self.get_template(self.name, key)
# python cts/CTSpatt.py -k crm-corosync -t StartCmd
if __name__ == '__main__':
pdir=os.path.dirname(sys.path[0])
sys.path.insert(0, pdir) # So that things work from the source directory
kind=None
template=None
skipthis=None
args=sys.argv[1:]
for i in range(0, len(args)):
if skipthis:
skipthis=None
continue
elif args[i] == "-k" or args[i] == "--kind":
skipthis=1
kind = args[i+1]
elif args[i] == "-t" or args[i] == "--template":
skipthis=1
template = args[i+1]
else:
print("Illegal argument " + args[i])
print(PatternSelector(kind)[template])
diff --git a/include/pcmki/pcmki_cluster_queries.h b/include/pcmki/pcmki_cluster_queries.h
index 955eea35a6..e5fbb92c46 100644
--- a/include/pcmki/pcmki_cluster_queries.h
+++ b/include/pcmki/pcmki_cluster_queries.h
@@ -1,15 +1,11 @@
#include <glib.h> // gboolean, GMainLoop, etc.
#include <crm/crm.h>
#include <crm/common/output_internal.h>
#include <crm/common/ipc_controld.h>
#include <crm/common/ipc_pacemakerd.h>
int pcmk__controller_status(pcmk__output_t *out, char *dest_node, guint message_timeout_ms);
int pcmk__designated_controller(pcmk__output_t *out, guint message_timeout_ms);
int pcmk__pacemakerd_status(pcmk__output_t *out, char *ipc_name, guint message_timeout_ms);
int pcmk__list_nodes(pcmk__output_t *out, char *node_types, gboolean BASH_EXPORT);
-
-// remove when parameters removed from tools/crmadmin.c
-int pcmk__shutdown_controller(pcmk__output_t *out, char *dest_node);
-int pcmk__start_election(pcmk__output_t *out);
diff --git a/lib/pacemaker/pcmk_cluster_queries.c b/lib/pacemaker/pcmk_cluster_queries.c
index 6e6acdac74..0bb399edae 100644
--- a/lib/pacemaker/pcmk_cluster_queries.c
+++ b/lib/pacemaker/pcmk_cluster_queries.c
@@ -1,545 +1,499 @@
#include <glib.h> // gboolean, GMainLoop, etc.
#include <libxml/tree.h> // xmlNode
#include <pacemaker.h>
#include <pacemaker-internal.h>
#include <crm/crm.h>
#include <crm/cib.h>
#include <crm/msg_xml.h>
#include <crm/common/output_internal.h>
#include <crm/common/xml.h>
#include <crm/common/xml_internal.h>
#include <crm/common/iso8601.h>
#include <crm/common/ipc_controld.h>
#include <crm/common/ipc_pacemakerd.h>
#include <crm/common/mainloop.h>
#define DEFAULT_MESSAGE_TIMEOUT_MS 30000
typedef struct {
pcmk__output_t *out;
GMainLoop *mainloop;
int rc;
guint message_timer_id;
guint message_timeout_ms;
} data_t;
static void
quit_main_loop(data_t *data)
{
if (data->mainloop != NULL) {
GMainLoop *mloop = data->mainloop;
data->mainloop = NULL; // Don't re-enter this block
pcmk_quit_main_loop(mloop, 10);
g_main_loop_unref(mloop);
}
}
static gboolean
admin_message_timeout(gpointer user_data)
{
data_t *data = user_data;
pcmk__output_t *out = data->out;
out->err(out, "error: No reply received from controller before timeout (%dms)",
data->message_timeout_ms);
data->message_timer_id = 0;
data->rc = ETIMEDOUT;
quit_main_loop(data);
return FALSE; // Tells glib to remove source
}
static void
start_main_loop(data_t *data)
{
if (data->message_timeout_ms < 1) {
data->message_timeout_ms = DEFAULT_MESSAGE_TIMEOUT_MS;
}
data->rc = ECONNRESET; // For unexpected disconnects
data->mainloop = g_main_loop_new(NULL, FALSE);
data->message_timer_id = g_timeout_add(data->message_timeout_ms,
admin_message_timeout,
data);
g_main_loop_run(data->mainloop);
}
static void
event_done(data_t *data, pcmk_ipc_api_t *api)
{
pcmk_disconnect_ipc(api);
quit_main_loop(data);
}
static pcmk_controld_api_reply_t *
controld_event_reply(data_t *data, pcmk_ipc_api_t *controld_api, enum pcmk_ipc_event event_type, crm_exit_t status, void *event_data)
{
pcmk__output_t *out = data->out;
pcmk_controld_api_reply_t *reply = event_data;
switch (event_type) {
case pcmk_ipc_event_disconnect:
if (data->rc == ECONNRESET) { // Unexpected
out->err(out, "error: Lost connection to controller");
}
event_done(data, controld_api);
return NULL;
case pcmk_ipc_event_reply:
break;
default:
return NULL;
}
if (data->message_timer_id != 0) {
g_source_remove(data->message_timer_id);
data->message_timer_id = 0;
}
if (status != CRM_EX_OK) {
out->err(out, "error: Bad reply from controller: %s",
crm_exit_str(status));
data->rc = EBADMSG;
event_done(data, controld_api);
return NULL;
}
if (reply->reply_type != pcmk_controld_reply_ping) {
out->err(out, "error: Unknown reply type %d from controller",
reply->reply_type);
data->rc = EBADMSG;
event_done(data, controld_api);
return NULL;
}
return reply;
}
static void
controller_status_event_cb(pcmk_ipc_api_t *controld_api,
enum pcmk_ipc_event event_type, crm_exit_t status,
void *event_data, void *user_data)
{
data_t *data = user_data;
pcmk__output_t *out = data->out;
pcmk_controld_api_reply_t *reply = controld_event_reply(data, controld_api,
event_type, status, event_data);
if (reply != NULL) {
out->message(out, "health",
reply->data.ping.sys_from,
reply->host_from,
reply->data.ping.fsa_state,
reply->data.ping.result);
data->rc = pcmk_rc_ok;
}
event_done(data, controld_api);
}
static void
designated_controller_event_cb(pcmk_ipc_api_t *controld_api,
enum pcmk_ipc_event event_type, crm_exit_t status,
void *event_data, void *user_data)
{
data_t *data = user_data;
pcmk__output_t *out = data->out;
pcmk_controld_api_reply_t *reply = controld_event_reply(data, controld_api,
event_type, status, event_data);
if (reply != NULL) {
out->message(out, "dc", reply->host_from);
data->rc = pcmk_rc_ok;
}
event_done(data, controld_api);
}
static void
pacemakerd_event_cb(pcmk_ipc_api_t *pacemakerd_api,
enum pcmk_ipc_event event_type, crm_exit_t status,
void *event_data, void *user_data)
{
data_t *data = user_data;
pcmk__output_t *out = data->out;
pcmk_pacemakerd_api_reply_t *reply = event_data;
crm_time_t *crm_when;
char *pinged_buf = NULL;
switch (event_type) {
case pcmk_ipc_event_disconnect:
if (data->rc == ECONNRESET) { // Unexpected
out->err(out, "error: Lost connection to pacemakerd");
}
event_done(data, pacemakerd_api);
return;
case pcmk_ipc_event_reply:
break;
default:
return;
}
if (data->message_timer_id != 0) {
g_source_remove(data->message_timer_id);
data->message_timer_id = 0;
}
if (status != CRM_EX_OK) {
out->err(out, "error: Bad reply from pacemakerd: %s",
crm_exit_str(status));
event_done(data, pacemakerd_api);
return;
}
if (reply->reply_type != pcmk_pacemakerd_reply_ping) {
out->err(out, "error: Unknown reply type %d from pacemakerd",
reply->reply_type);
event_done(data, pacemakerd_api);
return;
}
// Parse desired information from reply
crm_when = crm_time_new(NULL);
crm_time_set_timet(crm_when, &reply->data.ping.last_good);
pinged_buf = crm_time_as_string(crm_when,
crm_time_log_date | crm_time_log_timeofday |
crm_time_log_with_timezone);
out->message(out, "pacemakerd-health",
reply->data.ping.sys_from,
(reply->data.ping.status == pcmk_rc_ok)?
pcmk_pacemakerd_api_daemon_state_enum2text(
reply->data.ping.state):"query failed",
(reply->data.ping.status == pcmk_rc_ok)?pinged_buf:"");
data->rc = pcmk_rc_ok;
crm_time_free(crm_when);
free(pinged_buf);
event_done(data, pacemakerd_api);
}
static pcmk_ipc_api_t *
ipc_connect(data_t *data, enum pcmk_ipc_server server, pcmk_ipc_callback_t cb)
{
int rc;
pcmk__output_t *out = data->out;
pcmk_ipc_api_t *api = NULL;
rc = pcmk_new_ipc_api(&api, server);
if (api == NULL) {
out->err(out, "error: Could not connect to %s: %s",
pcmk_ipc_name(api, true),
pcmk_rc_str(rc));
data->rc = rc;
return NULL;
}
if (cb != NULL) {
pcmk_register_ipc_callback(api, cb, data);
}
rc = pcmk_connect_ipc(api, pcmk_ipc_dispatch_main);
if (rc != pcmk_rc_ok) {
out->err(out, "error: Could not connect to %s: %s",
pcmk_ipc_name(api, true),
pcmk_rc_str(rc));
data->rc = rc;
return NULL;
}
return api;
}
int
pcmk__controller_status(pcmk__output_t *out, char *dest_node, guint message_timeout_ms)
{
data_t data = {
.out = out,
.mainloop = NULL,
.rc = pcmk_rc_ok,
.message_timer_id = 0,
.message_timeout_ms = message_timeout_ms
};
pcmk_ipc_api_t *controld_api = ipc_connect(&data, pcmk_ipc_controld, controller_status_event_cb);
if (controld_api != NULL) {
int rc = pcmk_controld_api_ping(controld_api, dest_node);
if (rc != pcmk_rc_ok) {
out->err(out, "error: Command failed: %s", pcmk_rc_str(rc));
data.rc = rc;
}
start_main_loop(&data);
pcmk_free_ipc_api(controld_api);
}
return data.rc;
}
int
pcmk_controller_status(xmlNodePtr *xml, char *dest_node, unsigned int message_timeout_ms)
{
pcmk__output_t *out = NULL;
int rc = pcmk_rc_ok;
rc = pcmk__out_prologue(&out, xml);
if (rc != pcmk_rc_ok) {
return rc;
}
pcmk__register_lib_messages(out);
rc = pcmk__controller_status(out, dest_node, (guint) message_timeout_ms);
pcmk__out_epilogue(out, xml, rc);
return rc;
}
int
pcmk__designated_controller(pcmk__output_t *out, guint message_timeout_ms)
{
data_t data = {
.out = out,
.mainloop = NULL,
.rc = pcmk_rc_ok,
.message_timer_id = 0,
.message_timeout_ms = message_timeout_ms
};
pcmk_ipc_api_t *controld_api = ipc_connect(&data, pcmk_ipc_controld, designated_controller_event_cb);
if (controld_api != NULL) {
int rc = pcmk_controld_api_ping(controld_api, NULL);
if (rc != pcmk_rc_ok) {
out->err(out, "error: Command failed: %s", pcmk_rc_str(rc));
data.rc = rc;
}
start_main_loop(&data);
pcmk_free_ipc_api(controld_api);
}
return data.rc;
}
int
pcmk_designated_controller(xmlNodePtr *xml, unsigned int message_timeout_ms)
{
pcmk__output_t *out = NULL;
int rc = pcmk_rc_ok;
rc = pcmk__out_prologue(&out, xml);
if (rc != pcmk_rc_ok) {
return rc;
}
pcmk__register_lib_messages(out);
rc = pcmk__designated_controller(out, (guint) message_timeout_ms);
pcmk__out_epilogue(out, xml, rc);
return rc;
}
int
pcmk__pacemakerd_status(pcmk__output_t *out, char *ipc_name, guint message_timeout_ms)
{
data_t data = {
.out = out,
.mainloop = NULL,
.rc = pcmk_rc_ok,
.message_timer_id = 0,
.message_timeout_ms = message_timeout_ms
};
pcmk_ipc_api_t *pacemakerd_api = ipc_connect(&data, pcmk_ipc_pacemakerd, pacemakerd_event_cb);
if (pacemakerd_api != NULL) {
int rc = pcmk_pacemakerd_api_ping(pacemakerd_api, ipc_name);
if (rc != pcmk_rc_ok) {
out->err(out, "error: Command failed: %s", pcmk_rc_str(rc));
data.rc = rc;
}
start_main_loop(&data);
pcmk_free_ipc_api(pacemakerd_api);
}
return data.rc;
}
int
pcmk_pacemakerd_status(xmlNodePtr *xml, char *ipc_name, unsigned int message_timeout_ms)
{
pcmk__output_t *out = NULL;
int rc = pcmk_rc_ok;
rc = pcmk__out_prologue(&out, xml);
if (rc != pcmk_rc_ok) {
return rc;
}
pcmk__register_lib_messages(out);
rc = pcmk__pacemakerd_status(out, ipc_name, (guint) message_timeout_ms);
pcmk__out_epilogue(out, xml, rc);
return rc;
}
/* user data for looping through remote node xpath searches */
struct node_data {
pcmk__output_t *out;
int found;
const char *field; /* XML attribute to check for node name */
const char *type;
gboolean BASH_EXPORT;
};
static void
remote_node_print_helper(xmlNode *result, void *user_data)
{
struct node_data *data = user_data;
pcmk__output_t *out = data->out;
const char *name = crm_element_value(result, XML_ATTR_UNAME);
const char *id = crm_element_value(result, data->field);
// node name and node id are the same for remote/guest nodes
out->message(out, "crmadmin-node", data->type,
name ? name : id,
id,
data->BASH_EXPORT);
data->found++;
}
// \return Standard Pacemaker return code
int
pcmk__list_nodes(pcmk__output_t *out, char *node_types, gboolean BASH_EXPORT)
{
cib_t *the_cib = cib_new();
xmlNode *xml_node = NULL;
int rc;
if (the_cib == NULL) {
return ENOMEM;
}
rc = the_cib->cmds->signon(the_cib, crm_system_name, cib_command);
if (rc != pcmk_ok) {
return pcmk_legacy2rc(rc);
}
rc = the_cib->cmds->query(the_cib, NULL, &xml_node,
cib_scope_local | cib_sync_call);
if (rc == pcmk_ok) {
struct node_data data = {
.out = out,
.found = 0,
.BASH_EXPORT = BASH_EXPORT
};
out->begin_list(out, NULL, NULL, "nodes");
if (!pcmk__str_empty(node_types) && strstr(node_types, "all")) {
node_types = NULL;
}
if (pcmk__str_empty(node_types) || strstr(node_types, "cluster")) {
data.field = "id";
data.type = "cluster";
crm_foreach_xpath_result(xml_node, PCMK__XP_MEMBER_NODE_CONFIG,
remote_node_print_helper, &data);
}
if (pcmk__str_empty(node_types) || strstr(node_types, "guest")) {
data.field = "value";
data.type = "guest";
crm_foreach_xpath_result(xml_node, PCMK__XP_GUEST_NODE_CONFIG,
remote_node_print_helper, &data);
}
if (pcmk__str_empty(node_types) || !pcmk__strcmp(node_types, ",|^remote", pcmk__str_regex)) {
data.field = "id";
data.type = "remote";
crm_foreach_xpath_result(xml_node, PCMK__XP_REMOTE_NODE_CONFIG,
remote_node_print_helper, &data);
}
out->end_list(out);
if (data.found == 0) {
out->info(out, "No nodes configured");
}
free_xml(xml_node);
}
the_cib->cmds->signoff(the_cib);
return pcmk_legacy2rc(rc);
}
int
pcmk_list_nodes(xmlNodePtr *xml, char *node_types)
{
pcmk__output_t *out = NULL;
int rc = pcmk_rc_ok;
rc = pcmk__out_prologue(&out, xml);
if (rc != pcmk_rc_ok) {
return rc;
}
pcmk__register_lib_messages(out);
rc = pcmk__list_nodes(out, node_types, FALSE);
pcmk__out_epilogue(out, xml, rc);
return rc;
}
-
-// remove when parameters removed from tools/crmadmin.c
-int
-pcmk__shutdown_controller(pcmk__output_t *out, char *dest_node)
-{
- data_t data = {
- .out = out,
- .mainloop = NULL,
- .rc = pcmk_rc_ok,
- };
- pcmk_ipc_api_t *controld_api = ipc_connect(&data, pcmk_ipc_controld, NULL);
-
- if (controld_api != NULL) {
- int rc = pcmk_controld_api_shutdown(controld_api, dest_node);
- if (rc != pcmk_rc_ok) {
- out->err(out, "error: Command failed: %s", pcmk_rc_str(rc));
- data.rc = rc;
- }
- pcmk_free_ipc_api(controld_api);
- }
-
- return data.rc;
-}
-
-int
-pcmk__start_election(pcmk__output_t *out)
-{
- data_t data = {
- .out = out,
- .mainloop = NULL,
- .rc = pcmk_rc_ok,
- };
- pcmk_ipc_api_t *controld_api = ipc_connect(&data, pcmk_ipc_controld, NULL);
-
- if (controld_api != NULL) {
- int rc = pcmk_controld_api_start_election(controld_api);
- if (rc != pcmk_rc_ok) {
- out->err(out, "error: Command failed: %s", pcmk_rc_str(rc));
- data.rc = rc;
- }
-
- pcmk_free_ipc_api(controld_api);
- }
-
- return data.rc;
-}
diff --git a/lib/pacemaker/pcmk_output.c b/lib/pacemaker/pcmk_output.c
index 655b723927..db407b920a 100644
--- a/lib/pacemaker/pcmk_output.c
+++ b/lib/pacemaker/pcmk_output.c
@@ -1,637 +1,637 @@
/*
* Copyright 2019-2021 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <crm/common/results.h>
#include <crm/common/output_internal.h>
#include <crm/stonith-ng.h>
#include <crm/fencing/internal.h>
#include <crm/pengine/internal.h>
#include <libxml/tree.h>
#include <pacemaker-internal.h>
pcmk__supported_format_t pcmk__out_formats[] = {
PCMK__SUPPORTED_FORMAT_XML,
{ NULL, NULL, NULL }
};
int
pcmk__out_prologue(pcmk__output_t **out, xmlNodePtr *xml) {
int rc = pcmk_rc_ok;
if (*xml != NULL) {
xmlFreeNode(*xml);
}
pcmk__register_formats(NULL, pcmk__out_formats);
rc = pcmk__output_new(out, "xml", NULL, NULL);
if (rc != pcmk_rc_ok) {
return rc;
}
return rc;
}
void
pcmk__out_epilogue(pcmk__output_t *out, xmlNodePtr *xml, int retval) {
if (retval == pcmk_rc_ok) {
out->finish(out, 0, FALSE, (void **) xml);
}
pcmk__output_free(out);
}
static char *
colocations_header(pe_resource_t *rsc, pcmk__colocation_t *cons,
gboolean dependents) {
char *score = NULL;
char *retval = NULL;
score = score2char(cons->score);
if (cons->role_rh > RSC_ROLE_STARTED) {
retval = crm_strdup_printf("%s (score=%s, %s role=%s, id=%s)",
rsc->id, score, dependents ? "needs" : "with",
role2text(cons->role_rh), cons->id);
} else {
retval = crm_strdup_printf("%s (score=%s, id=%s)",
rsc->id, score, cons->id);
}
free(score);
return retval;
}
static void
colocations_xml_node(pcmk__output_t *out, pe_resource_t *rsc,
pcmk__colocation_t *cons) {
char *score = NULL;
xmlNodePtr node = NULL;
score = score2char(cons->score);
node = pcmk__output_create_xml_node(out, XML_CONS_TAG_RSC_DEPEND,
"id", cons->id,
"rsc", cons->rsc_lh->id,
"with-rsc", cons->rsc_rh->id,
"score", score,
NULL);
if (cons->node_attribute) {
xmlSetProp(node, (pcmkXmlStr) "node-attribute", (pcmkXmlStr) cons->node_attribute);
}
if (cons->role_lh != RSC_ROLE_UNKNOWN) {
xmlSetProp(node, (pcmkXmlStr) "rsc-role", (pcmkXmlStr) role2text(cons->role_lh));
}
if (cons->role_rh != RSC_ROLE_UNKNOWN) {
xmlSetProp(node, (pcmkXmlStr) "with-rsc-role", (pcmkXmlStr) role2text(cons->role_rh));
}
free(score);
}
static int
do_locations_list_xml(pcmk__output_t *out, pe_resource_t *rsc, bool add_header)
{
GList *lpc = NULL;
GList *list = rsc->rsc_location;
int rc = pcmk_rc_no_output;
for (lpc = list; lpc != NULL; lpc = lpc->next) {
pe__location_t *cons = lpc->data;
GList *lpc2 = NULL;
for (lpc2 = cons->node_list_rh; lpc2 != NULL; lpc2 = lpc2->next) {
pe_node_t *node = (pe_node_t *) lpc2->data;
char *score = score2char(node->weight);
if (add_header) {
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "locations");
}
pcmk__output_create_xml_node(out, XML_CONS_TAG_RSC_LOCATION,
"node", node->details->uname,
"rsc", rsc->id,
"id", cons->id,
"score", score,
NULL);
free(score);
}
}
if (add_header) {
PCMK__OUTPUT_LIST_FOOTER(out, rc);
}
return rc;
}
PCMK__OUTPUT_ARGS("rsc-is-colocated-with-list", "pe_resource_t *", "gboolean")
static int
rsc_is_colocated_with_list(pcmk__output_t *out, va_list args) {
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
gboolean recursive = va_arg(args, gboolean);
int rc = pcmk_rc_no_output;
if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) {
return rc;
}
pe__set_resource_flags(rsc, pe_rsc_allocating);
for (GList *lpc = rsc->rsc_cons; lpc != NULL; lpc = lpc->next) {
pcmk__colocation_t *cons = (pcmk__colocation_t *) lpc->data;
char *hdr = NULL;
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Resources %s is colocated with", rsc->id);
if (pcmk_is_set(cons->rsc_rh->flags, pe_rsc_allocating)) {
out->list_item(out, NULL, "%s (id=%s - loop)", cons->rsc_rh->id, cons->id);
continue;
}
hdr = colocations_header(cons->rsc_rh, cons, FALSE);
out->list_item(out, NULL, "%s", hdr);
free(hdr);
/* Empty list header just for indentation of information about this resource. */
out->begin_list(out, NULL, NULL, NULL);
out->message(out, "locations-list", cons->rsc_rh);
if (recursive) {
out->message(out, "rsc-is-colocated-with-list", cons->rsc_rh, recursive);
}
out->end_list(out);
}
PCMK__OUTPUT_LIST_FOOTER(out, rc);
return rc;
}
PCMK__OUTPUT_ARGS("rsc-is-colocated-with-list", "pe_resource_t *", "gboolean")
static int
rsc_is_colocated_with_list_xml(pcmk__output_t *out, va_list args) {
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
gboolean recursive = va_arg(args, gboolean);
int rc = pcmk_rc_no_output;
if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) {
return rc;
}
pe__set_resource_flags(rsc, pe_rsc_allocating);
for (GList *lpc = rsc->rsc_cons; lpc != NULL; lpc = lpc->next) {
pcmk__colocation_t *cons = (pcmk__colocation_t *) lpc->data;
if (pcmk_is_set(cons->rsc_rh->flags, pe_rsc_allocating)) {
colocations_xml_node(out, cons->rsc_rh, cons);
continue;
}
colocations_xml_node(out, cons->rsc_rh, cons);
do_locations_list_xml(out, cons->rsc_rh, false);
if (recursive) {
out->message(out, "rsc-is-colocated-with-list", cons->rsc_rh, recursive);
}
}
return rc;
}
PCMK__OUTPUT_ARGS("rscs-colocated-with-list", "pe_resource_t *", "gboolean")
static int
rscs_colocated_with_list(pcmk__output_t *out, va_list args) {
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
gboolean recursive = va_arg(args, gboolean);
int rc = pcmk_rc_no_output;
if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) {
return rc;
}
pe__set_resource_flags(rsc, pe_rsc_allocating);
for (GList *lpc = rsc->rsc_cons_lhs; lpc != NULL; lpc = lpc->next) {
pcmk__colocation_t *cons = (pcmk__colocation_t *) lpc->data;
char *hdr = NULL;
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Resources colocated with %s", rsc->id);
if (pcmk_is_set(cons->rsc_lh->flags, pe_rsc_allocating)) {
out->list_item(out, NULL, "%s (id=%s - loop)", cons->rsc_lh->id, cons->id);
continue;
}
hdr = colocations_header(cons->rsc_lh, cons, TRUE);
out->list_item(out, NULL, "%s", hdr);
free(hdr);
/* Empty list header just for indentation of information about this resource. */
out->begin_list(out, NULL, NULL, NULL);
out->message(out, "locations-list", cons->rsc_lh);
if (recursive) {
out->message(out, "rscs-colocated-with-list", cons->rsc_lh, recursive);
}
out->end_list(out);
}
PCMK__OUTPUT_LIST_FOOTER(out, rc);
return rc;
}
PCMK__OUTPUT_ARGS("rscs-colocated-with-list", "pe_resource_t *", "gboolean")
static int
rscs_colocated_with_list_xml(pcmk__output_t *out, va_list args) {
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
gboolean recursive = va_arg(args, gboolean);
int rc = pcmk_rc_no_output;
if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) {
return rc;
}
pe__set_resource_flags(rsc, pe_rsc_allocating);
for (GList *lpc = rsc->rsc_cons_lhs; lpc != NULL; lpc = lpc->next) {
pcmk__colocation_t *cons = (pcmk__colocation_t *) lpc->data;
if (pcmk_is_set(cons->rsc_lh->flags, pe_rsc_allocating)) {
colocations_xml_node(out, cons->rsc_lh, cons);
continue;
}
colocations_xml_node(out, cons->rsc_lh, cons);
do_locations_list_xml(out, cons->rsc_lh, false);
if (recursive) {
out->message(out, "rscs-colocated-with-list", cons->rsc_lh, recursive);
}
}
return rc;
}
PCMK__OUTPUT_ARGS("locations-list", "pe_resource_t *")
static int
locations_list(pcmk__output_t *out, va_list args) {
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
GList *lpc = NULL;
GList *list = rsc->rsc_location;
int rc = pcmk_rc_no_output;
for (lpc = list; lpc != NULL; lpc = lpc->next) {
pe__location_t *cons = lpc->data;
GList *lpc2 = NULL;
for (lpc2 = cons->node_list_rh; lpc2 != NULL; lpc2 = lpc2->next) {
pe_node_t *node = (pe_node_t *) lpc2->data;
char *score = score2char(node->weight);
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Locations");
out->list_item(out, NULL, "Node %s (score=%s, id=%s, rsc=%s)",
node->details->uname, score, cons->id, rsc->id);
free(score);
}
}
PCMK__OUTPUT_LIST_FOOTER(out, rc);
return rc;
}
PCMK__OUTPUT_ARGS("locations-list", "pe_resource_t *")
static int
locations_list_xml(pcmk__output_t *out, va_list args) {
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
return do_locations_list_xml(out, rsc, true);
}
PCMK__OUTPUT_ARGS("stacks-constraints", "pe_resource_t *", "pe_working_set_t *", "gboolean")
static int
stacks_and_constraints(pcmk__output_t *out, va_list args) {
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
gboolean recursive = va_arg(args, gboolean);
xmlNodePtr cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS,
data_set->input);
unpack_constraints(cib_constraints, data_set);
// Constraints apply to group/clone, not member/instance
rsc = uber_parent(rsc);
out->message(out, "locations-list", rsc);
pe__clear_resource_flags_on_all(data_set, pe_rsc_allocating);
out->message(out, "rscs-colocated-with-list", rsc, recursive);
pe__clear_resource_flags_on_all(data_set, pe_rsc_allocating);
out->message(out, "rsc-is-colocated-with-list", rsc, recursive);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("stacks-constraints", "pe_resource_t *", "pe_working_set_t *", "gboolean")
static int
stacks_and_constraints_xml(pcmk__output_t *out, va_list args) {
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
gboolean recursive = va_arg(args, gboolean);
xmlNodePtr cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS,
data_set->input);
unpack_constraints(cib_constraints, data_set);
// Constraints apply to group/clone, not member/instance
rsc = uber_parent(rsc);
pcmk__output_xml_create_parent(out, "constraints", NULL);
do_locations_list_xml(out, rsc, false);
pe__clear_resource_flags_on_all(data_set, pe_rsc_allocating);
out->message(out, "rscs-colocated-with-list", rsc, recursive);
pe__clear_resource_flags_on_all(data_set, pe_rsc_allocating);
out->message(out, "rsc-is-colocated-with-list", rsc, recursive);
pcmk__output_xml_pop_parent(out);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("health", "const char *", "const char *", "const char *", "const char *")
static int
health_text(pcmk__output_t *out, va_list args)
{
- const char *sys_from = va_arg(args, const char *);
+ const char *sys_from G_GNUC_UNUSED = va_arg(args, const char *);
const char *host_from = va_arg(args, const char *);
const char *fsa_state = va_arg(args, const char *);
const char *result = va_arg(args, const char *);
if (!out->is_quiet(out)) {
- out->info(out, "Status of %s@%s: %s (%s)", crm_str(sys_from),
- crm_str(host_from), crm_str(fsa_state), crm_str(result));
+ out->info(out, "Controller on %s in state %s: %s", crm_str(host_from),
+ crm_str(fsa_state), crm_str(result));
} else if (fsa_state != NULL) {
out->info(out, "%s", fsa_state);
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("health", "const char *", "const char *", "const char *", "const char *")
static int
health_xml(pcmk__output_t *out, va_list args)
{
const char *sys_from = va_arg(args, const char *);
const char *host_from = va_arg(args, const char *);
const char *fsa_state = va_arg(args, const char *);
const char *result = va_arg(args, const char *);
pcmk__output_create_xml_node(out, crm_str(sys_from),
"node_name", crm_str(host_from),
"state", crm_str(fsa_state),
"result", crm_str(result),
NULL);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("pacemakerd-health", "const char *", "const char *", "const char *")
static int
pacemakerd_health_text(pcmk__output_t *out, va_list args)
{
const char *sys_from = va_arg(args, const char *);
const char *state = va_arg(args, const char *);
const char *last_updated = va_arg(args, const char *);
if (!out->is_quiet(out)) {
out->info(out, "Status of %s: '%s' %s %s", crm_str(sys_from),
crm_str(state), (!pcmk__str_empty(last_updated))?
"last updated":"", crm_str(last_updated));
} else {
out->info(out, "%s", crm_str(state));
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("pacemakerd-health", "const char *", "const char *", "const char *")
static int
pacemakerd_health_xml(pcmk__output_t *out, va_list args)
{
const char *sys_from = va_arg(args, const char *);
const char *state = va_arg(args, const char *);
const char *last_updated = va_arg(args, const char *);
pcmk__output_create_xml_node(out, crm_str(sys_from),
"state", crm_str(state),
"last_updated", crm_str(last_updated),
NULL);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("dc", "const char *")
static int
dc_text(pcmk__output_t *out, va_list args)
{
const char *dc = va_arg(args, const char *);
if (!out->is_quiet(out)) {
out->info(out, "Designated Controller is: %s", crm_str(dc));
} else if (dc != NULL) {
out->info(out, "%s", dc);
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("dc", "const char *")
static int
dc_xml(pcmk__output_t *out, va_list args)
{
const char *dc = va_arg(args, const char *);
pcmk__output_create_xml_node(out, "dc",
"node_name", crm_str(dc),
NULL);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("crmadmin-node", "const char *", "const char *", "const char *", "gboolean")
static int
crmadmin_node_text(pcmk__output_t *out, va_list args)
{
const char *type = va_arg(args, const char *);
const char *name = va_arg(args, const char *);
const char *id = va_arg(args, const char *);
gboolean BASH_EXPORT = va_arg(args, gboolean);
if (out->is_quiet(out)) {
out->info(out, "%s", crm_str(name));
} else if (BASH_EXPORT) {
out->info(out, "export %s=%s", crm_str(name), crm_str(id));
} else {
out->info(out, "%s node: %s (%s)", type ? type : "cluster",
crm_str(name), crm_str(id));
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("crmadmin-node", "const char *", "const char *", "const char *", "gboolean")
static int
crmadmin_node_xml(pcmk__output_t *out, va_list args)
{
const char *type = va_arg(args, const char *);
const char *name = va_arg(args, const char *);
const char *id = va_arg(args, const char *);
pcmk__output_create_xml_node(out, "node",
"type", type ? type : "cluster",
"name", crm_str(name),
"id", crm_str(id),
NULL);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("digests", "pe_resource_t *", "pe_node_t *", "const char *",
"guint", "op_digest_cache_t *")
static int
digests_text(pcmk__output_t *out, va_list args)
{
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
pe_node_t *node = va_arg(args, pe_node_t *);
const char *task = va_arg(args, const char *);
guint interval_ms = va_arg(args, guint);
op_digest_cache_t *digests = va_arg(args, op_digest_cache_t *);
char *action_desc = NULL;
const char *rsc_desc = "unknown resource";
const char *node_desc = "unknown node";
if (interval_ms != 0) {
action_desc = crm_strdup_printf("%ums-interval %s action", interval_ms,
((task == NULL)? "unknown" : task));
} else if (pcmk__str_eq(task, "monitor", pcmk__str_none)) {
action_desc = strdup("probe action");
} else {
action_desc = crm_strdup_printf("%s action",
((task == NULL)? "unknown" : task));
}
if ((rsc != NULL) && (rsc->id != NULL)) {
rsc_desc = rsc->id;
}
if ((node != NULL) && (node->details->uname != NULL)) {
node_desc = node->details->uname;
}
out->begin_list(out, NULL, NULL, "Digests for %s %s on %s",
rsc_desc, action_desc, node_desc);
free(action_desc);
if (digests == NULL) {
out->list_item(out, NULL, "none");
out->end_list(out);
return pcmk_rc_ok;
}
if (digests->digest_all_calc != NULL) {
out->list_item(out, NULL, "%s (all parameters)",
digests->digest_all_calc);
}
if (digests->digest_secure_calc != NULL) {
out->list_item(out, NULL, "%s (non-private parameters)",
digests->digest_secure_calc);
}
if (digests->digest_restart_calc != NULL) {
out->list_item(out, NULL, "%s (non-reloadable parameters)",
digests->digest_restart_calc);
}
out->end_list(out);
return pcmk_rc_ok;
}
static void
add_digest_xml(xmlNode *parent, const char *type, const char *digest,
xmlNode *digest_source)
{
if (digest != NULL) {
xmlNodePtr digest_xml = create_xml_node(parent, "digest");
crm_xml_add(digest_xml, "type", ((type == NULL)? "unspecified" : type));
crm_xml_add(digest_xml, "hash", digest);
if (digest_source != NULL) {
add_node_copy(digest_xml, digest_source);
}
}
}
PCMK__OUTPUT_ARGS("digests", "pe_resource_t *", "pe_node_t *", "const char *",
"guint", "op_digest_cache_t *")
static int
digests_xml(pcmk__output_t *out, va_list args)
{
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
pe_node_t *node = va_arg(args, pe_node_t *);
const char *task = va_arg(args, const char *);
guint interval_ms = va_arg(args, guint);
op_digest_cache_t *digests = va_arg(args, op_digest_cache_t *);
char *interval_s = crm_strdup_printf("%ums", interval_ms);
xmlNode *xml = NULL;
xml = pcmk__output_create_xml_node(out, "digests",
"resource", crm_str(rsc->id),
"node", crm_str(node->details->uname),
"task", crm_str(task),
"interval", interval_s,
NULL);
free(interval_s);
if (digests != NULL) {
add_digest_xml(xml, "all", digests->digest_all_calc,
digests->params_all);
add_digest_xml(xml, "nonprivate", digests->digest_secure_calc,
digests->params_secure);
add_digest_xml(xml, "nonreloadable", digests->digest_restart_calc,
digests->params_restart);
}
return pcmk_rc_ok;
}
static pcmk__message_entry_t fmt_functions[] = {
{ "rsc-is-colocated-with-list", "default", rsc_is_colocated_with_list },
{ "rsc-is-colocated-with-list", "xml", rsc_is_colocated_with_list_xml },
{ "rscs-colocated-with-list", "default", rscs_colocated_with_list },
{ "rscs-colocated-with-list", "xml", rscs_colocated_with_list_xml },
{ "locations-list", "default", locations_list },
{ "locations-list", "xml", locations_list_xml },
{ "stacks-constraints", "default", stacks_and_constraints },
{ "stacks-constraints", "xml", stacks_and_constraints_xml },
{ "health", "default", health_text },
{ "health", "xml", health_xml },
{ "pacemakerd-health", "default", pacemakerd_health_text },
{ "pacemakerd-health", "xml", pacemakerd_health_xml },
{ "dc", "default", dc_text },
{ "dc", "xml", dc_xml },
{ "crmadmin-node", "default", crmadmin_node_text },
{ "crmadmin-node", "xml", crmadmin_node_xml },
{ "digests", "default", digests_text },
{ "digests", "xml", digests_xml },
{ NULL, NULL, NULL }
};
void
pcmk__register_lib_messages(pcmk__output_t *out) {
pcmk__register_messages(out, fmt_functions);
}
diff --git a/tools/crmadmin.c b/tools/crmadmin.c
index 04d35f41d9..e754a8acca 100644
--- a/tools/crmadmin.c
+++ b/tools/crmadmin.c
@@ -1,289 +1,277 @@
/*
* Copyright 2004-2021 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <stdio.h>
#include <stdbool.h>
#include <stdlib.h> // atoi()
#include <glib.h> // gboolean, GMainLoop, etc.
#include <libxml/tree.h> // xmlNode
#include <pacemaker-internal.h>
#include <crm/common/cmdline_internal.h>
#include <crm/common/output_internal.h>
#define SUMMARY "query and manage the Pacemaker controller"
static enum {
cmd_none,
- cmd_shutdown,
cmd_health,
- cmd_elect_dc,
cmd_whois_dc,
cmd_list_nodes,
cmd_pacemakerd_health,
} command = cmd_none;
struct {
gboolean health;
gint timeout;
char *optarg;
char *ipc_name;
gboolean BASH_EXPORT;
} options = {
.optarg = NULL,
.ipc_name = NULL,
.BASH_EXPORT = FALSE
};
gboolean command_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
static GOptionEntry command_options[] = {
{ "status", 'S', 0, G_OPTION_ARG_CALLBACK, command_cb,
"Display the status of the specified node."
- "\n Result is state of node's internal finite state"
- "\n machine, which can be useful for debugging",
- NULL
+ "\n Result is state of node's internal finite state"
+ "\n machine, which can be useful for debugging",
+ "NODE"
},
{ "pacemakerd", 'P', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"Display the status of local pacemakerd."
- "\n Result is the state of the sub-daemons watched"
- "\n by pacemakerd.",
+ "\n Result is the state of the sub-daemons watched"
+ "\n by pacemakerd.",
NULL
},
{ "dc_lookup", 'D', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"Display the uname of the node co-ordinating the cluster."
- "\n This is an internal detail rarely useful to"
- "\n administrators except when deciding on which"
- "\n node to examine the logs.",
+ "\n This is an internal detail rarely useful to"
+ "\n administrators except when deciding on which"
+ "\n node to examine the logs.",
NULL
},
{ "nodes", 'N', G_OPTION_FLAG_OPTIONAL_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"Display the uname of all member nodes [optionally filtered by type (comma-separated)]"
- "\n Types: all (default), cluster, guest, remote",
+ "\n Types: all (default), cluster, guest, remote",
"TYPE"
},
- { "election", 'E', G_OPTION_FLAG_HIDDEN|G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
- "(Advanced) Start an election for the cluster co-ordinator",
- NULL
- },
- { "kill", 'K', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_CALLBACK, command_cb,
- "(Advanced) Stop controller (not rest of cluster stack) on specified node",
- NULL
- },
{ "health", 'H', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_NONE, &options.health,
NULL,
NULL
},
{ NULL }
};
static GOptionEntry additional_options[] = {
- { "timeout", 't', 0, G_OPTION_ARG_INT, &options.timeout,
- "Time (in milliseconds) to wait before declaring the"
- "\n operation failed",
- NULL
+ { "timeout", 't', 0, G_OPTION_ARG_CALLBACK, command_cb,
+ "Time to wait before declaring the operation"
+ "\n failed",
+ "TIMESPEC"
},
{ "bash-export", 'B', 0, G_OPTION_ARG_NONE, &options.BASH_EXPORT,
"Display nodes as shell commands of the form 'export uname=uuid'"
- "\n (valid with -N/--nodes)",
+ "\n (valid with -N/--nodes)",
},
{ "ipc-name", 'i', 0, G_OPTION_ARG_STRING, &options.ipc_name,
"Name to use for ipc instead of 'crmadmin' (with -P/--pacemakerd).",
- NULL
+ "NAME"
},
{ NULL }
};
gboolean
command_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error)
{
if (!strcmp(option_name, "--status") || !strcmp(option_name, "-S")) {
command = cmd_health;
crm_trace("Option %c => %s", 'S', optarg);
}
if (!strcmp(option_name, "--pacemakerd") || !strcmp(option_name, "-P")) {
command = cmd_pacemakerd_health;
}
if (!strcmp(option_name, "--dc_lookup") || !strcmp(option_name, "-D")) {
command = cmd_whois_dc;
}
if (!strcmp(option_name, "--nodes") || !strcmp(option_name, "-N")) {
command = cmd_list_nodes;
}
- if (!strcmp(option_name, "--election") || !strcmp(option_name, "-E")) {
- command = cmd_elect_dc;
- }
-
- if (!strcmp(option_name, "--kill") || !strcmp(option_name, "-K")) {
- command = cmd_shutdown;
- crm_trace("Option %c => %s", 'K', optarg);
+ if (!strcmp(option_name, "--timeout") || !strcmp(option_name, "-t")) {
+ options.timeout = crm_parse_interval_spec(optarg);
+ if (errno == EINVAL) {
+ return FALSE;
+ }
+ return TRUE;
}
if (optarg) {
if (options.optarg != NULL) {
free(options.optarg);
}
options.optarg = strdup(optarg);
}
return TRUE;
}
static pcmk__supported_format_t formats[] = {
PCMK__SUPPORTED_FORMAT_TEXT,
PCMK__SUPPORTED_FORMAT_XML,
{ NULL, NULL, NULL }
};
static GOptionContext *
build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) {
GOptionContext *context = NULL;
- const char *description = "Report bugs to users@clusterlabs.org";
+ const char *description = "Notes:\n\n"
+ "Time Specification:\n\n"
+ "The TIMESPEC in any command line option can be specified in many different\n"
+ "formats. It can be just an integer number of seconds, a number plus units\n"
+ "(ms/msec/us/usec/s/sec/m/min/h/hr), or an ISO 8601 period specification.\n\n"
+ "Report bugs to users@clusterlabs.org";
GOptionEntry extra_prog_entries[] = {
{ "quiet", 'q', 0, G_OPTION_ARG_NONE, &(args->quiet),
"Display only the essential query information",
NULL },
{ NULL }
};
context = pcmk__build_arg_context(args, "text (default), xml", group, NULL);
g_option_context_set_description(context, description);
/* Add the -q option, which cannot be part of the globally supported options
* because some tools use that flag for something else.
*/
pcmk__add_main_args(context, extra_prog_entries);
pcmk__add_arg_group(context, "command", "Commands:",
"Show command options", command_options);
pcmk__add_arg_group(context, "additional", "Additional Options:",
"Show additional options", additional_options);
return context;
}
int
main(int argc, char **argv)
{
crm_exit_t exit_code = CRM_EX_OK;
int rc;
int argerr = 0;
GError *error = NULL;
pcmk__output_t *out = NULL;
GOptionGroup *output_group = NULL;
pcmk__common_args_t *args = pcmk__new_common_args(SUMMARY);
gchar **processed_args = pcmk__cmdline_preproc(argv, "itBDEHKNPS");
GOptionContext *context = build_arg_context(args, &output_group);
pcmk__register_formats(output_group, formats);
if (!g_option_context_parse_strv(context, &processed_args, &error)) {
exit_code = CRM_EX_USAGE;
goto done;
}
pcmk__cli_init_logging("crmadmin", args->verbosity);
rc = pcmk__output_new(&out, args->output_ty, args->output_dest, argv);
if (rc != pcmk_rc_ok) {
exit_code = CRM_EX_ERROR;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Error creating output format %s: %s",
args->output_ty, pcmk_rc_str(rc));
goto done;
}
pcmk__register_lib_messages(out);
out->quiet = args->quiet;
if (!pcmk__force_args(context, &error, "%s --xml-simple-list --xml-substitute", g_get_prgname())) {
goto done;
}
if (args->version) {
out->version(out, false);
goto done;
}
if (options.health) {
out->err(out, "Cluster-wide health option not supported");
++argerr;
}
if (command == cmd_none) {
out->err(out, "error: Must specify a command option");
++argerr;
}
if (argerr) {
char *help = g_option_context_get_help(context, TRUE, NULL);
out->err(out, "%s", help);
g_free(help);
exit_code = CRM_EX_USAGE;
goto done;
}
switch (command) {
case cmd_health:
rc = pcmk__controller_status(out, options.optarg, options.timeout);
break;
case cmd_pacemakerd_health:
rc = pcmk__pacemakerd_status(out, options.ipc_name, options.timeout);
break;
case cmd_list_nodes:
rc = pcmk__list_nodes(out, options.optarg, options.BASH_EXPORT);
break;
case cmd_whois_dc:
rc = pcmk__designated_controller(out, options.timeout);
break;
- case cmd_shutdown:
- rc = pcmk__shutdown_controller(out, options.optarg);
- break;
- case cmd_elect_dc:
- rc = pcmk__start_election(out);
- break;
case cmd_none:
rc = pcmk_rc_error;
break;
}
if (rc != pcmk_rc_ok) {
out->err(out, "error: Command failed: %s", pcmk_rc_str(rc));
exit_code = pcmk_rc2exitc(rc);
}
done:
g_strfreev(processed_args);
pcmk__free_arg_context(context);
pcmk__output_and_clear_error(error, out);
if (out != NULL) {
out->finish(out, exit_code, true, NULL);
pcmk__output_free(out);
}
return crm_exit(exit_code);
}
File Metadata
Details
Attached
Mime Type
text/x-diff
Expires
Tue, Jul 8, 6:40 PM (2 h, 23 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
2002740
Default Alt Text
(68 KB)
Attached To
Mode
rP Pacemaker
Attached
Detach File
Event Timeline
Log In to Comment