Page MenuHomeClusterLabs Projects

No OneTemporary

diff --git a/lib/pacemaker/pcmk_cluster_queries.c b/lib/pacemaker/pcmk_cluster_queries.c
index c3ab1f51c0..6b4a355290 100644
--- a/lib/pacemaker/pcmk_cluster_queries.c
+++ b/lib/pacemaker/pcmk_cluster_queries.c
@@ -1,899 +1,899 @@
/*
* Copyright 2020-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <libxml/tree.h> // xmlNode
#include <pacemaker.h>
#include <pacemaker-internal.h>
#include <crm/crm.h>
#include <crm/cib.h>
#include <crm/cib/internal.h>
#include <crm/msg_xml.h>
#include <crm/common/output_internal.h>
#include <crm/common/xml.h>
#include <crm/common/xml_internal.h>
#include <crm/common/iso8601.h>
#include <crm/common/ipc_controld.h>
#include <crm/common/ipc_pacemakerd.h>
//! Object to store node info from the controller API
typedef struct {
/* Adapted from pcmk_controld_api_reply_t:data:node_info.
* (char **) are convenient here for use within callbacks: we can skip
* copying strings unless the caller passes a non-NULL value.
*/
uint32_t id;
char **node_name;
char **uuid;
char **state;
bool have_quorum;
bool is_remote;
} node_info_t;
//! Object to store API results, a timeout, and an output object
typedef struct {
pcmk__output_t *out;
bool show_output;
int rc;
unsigned int message_timeout_ms;
enum pcmk_pacemakerd_state pcmkd_state;
node_info_t node_info;
} data_t;
/*!
* \internal
* \brief Validate that an IPC API event is a good reply
*
* \param[in,out] data API results and options
* \param[in] api IPC API connection
* \param[in] event_type Type of event that occurred
* \param[in] status Event status
*
* \return Standard Pacemaker return code
*/
static int
validate_reply_event(data_t *data, const pcmk_ipc_api_t *api,
enum pcmk_ipc_event event_type, crm_exit_t status)
{
pcmk__output_t *out = data->out;
switch (event_type) {
case pcmk_ipc_event_reply:
break;
case pcmk_ipc_event_disconnect:
if (data->rc == ECONNRESET) { // Unexpected
out->err(out, "error: Lost connection to %s",
pcmk_ipc_name(api, true));
}
// Nothing bad but not the reply we're looking for
return ENOTSUP;
default:
// Ditto
return ENOTSUP;
}
if (status != CRM_EX_OK) {
out->err(out, "error: Bad reply from %s: %s",
pcmk_ipc_name(api, true), crm_exit_str(status));
data->rc = EBADMSG;
return data->rc;
}
return pcmk_rc_ok;
}
/*!
* \internal
* \brief Validate that a controller API event is a good reply of expected type
*
* \param[in,out] data API results and options
* \param[in] api Controller connection
* \param[in] event_type Type of event that occurred
* \param[in] status Event status
* \param[in] event_data Event-specific data
* \param[in] expected_type Expected reply type
*
* \return Standard Pacemaker return code
*/
static int
validate_controld_reply(data_t *data, const pcmk_ipc_api_t *api,
enum pcmk_ipc_event event_type, crm_exit_t status,
const void *event_data,
enum pcmk_controld_api_reply expected_type)
{
pcmk__output_t *out = data->out;
int rc = pcmk_rc_ok;
const pcmk_controld_api_reply_t *reply = NULL;
rc = validate_reply_event(data, api, event_type, status);
if (rc != pcmk_rc_ok) {
return rc;
}
reply = (const pcmk_controld_api_reply_t *) event_data;
if (reply->reply_type != expected_type) {
out->err(out, "error: Unexpected reply type '%s' from controller",
pcmk__controld_api_reply2str(reply->reply_type));
data->rc = EBADMSG;
return data->rc;
}
return pcmk_rc_ok;
}
/*!
* \internal
* \brief Validate that a \p pacemakerd API event is a good reply of expected
* type
*
* \param[in,out] data API results and options
* \param[in] api \p pacemakerd connection
* \param[in] event_type Type of event that occurred
* \param[in] status Event status
* \param[in] event_data Event-specific data
* \param[in] expected_type Expected reply type
*
* \return Standard Pacemaker return code
*/
static int
validate_pcmkd_reply(data_t *data, const pcmk_ipc_api_t *api,
enum pcmk_ipc_event event_type, crm_exit_t status,
const void *event_data,
enum pcmk_pacemakerd_api_reply expected_type)
{
pcmk__output_t *out = data->out;
const pcmk_pacemakerd_api_reply_t *reply = NULL;
int rc = validate_reply_event(data, api, event_type, status);
if (rc != pcmk_rc_ok) {
return rc;
}
reply = (const pcmk_pacemakerd_api_reply_t *) event_data;
if (reply->reply_type != expected_type) {
out->err(out, "error: Unexpected reply type '%s' from pacemakerd",
pcmk__pcmkd_api_reply2str(reply->reply_type));
data->rc = EBADMSG;
return data->rc;
}
return pcmk_rc_ok;
}
/*!
* \internal
* \brief Process a controller status IPC event
*
* \param[in,out] controld_api Controller connection
* \param[in] event_type Type of event that occurred
* \param[in] status Event status
* \param[in,out] event_data \p pcmk_controld_api_reply_t object containing
* event-specific data
* \param[in,out] user_data \p data_t object for API results and options
*/
static void
controller_status_event_cb(pcmk_ipc_api_t *controld_api,
enum pcmk_ipc_event event_type, crm_exit_t status,
void *event_data, void *user_data)
{
data_t *data = (data_t *) user_data;
pcmk__output_t *out = data->out;
const pcmk_controld_api_reply_t *reply = NULL;
int rc = validate_controld_reply(data, controld_api, event_type, status,
event_data, pcmk_controld_reply_ping);
if (rc != pcmk_rc_ok) {
return;
}
reply = (const pcmk_controld_api_reply_t *) event_data;
out->message(out, "health",
reply->data.ping.sys_from, reply->host_from,
reply->data.ping.fsa_state, reply->data.ping.result);
data->rc = pcmk_rc_ok;
}
/*!
* \internal
* \brief Process a designated controller IPC event
*
* \param[in,out] controld_api Controller connection
* \param[in] event_type Type of event that occurred
* \param[in] status Event status
* \param[in,out] event_data \p pcmk_controld_api_reply_t object containing
* event-specific data
* \param[in,out] user_data \p data_t object for API results and options
*/
static void
designated_controller_event_cb(pcmk_ipc_api_t *controld_api,
enum pcmk_ipc_event event_type,
crm_exit_t status, void *event_data,
void *user_data)
{
data_t *data = (data_t *) user_data;
pcmk__output_t *out = data->out;
const pcmk_controld_api_reply_t *reply = NULL;
int rc = validate_controld_reply(data, controld_api, event_type, status,
event_data, pcmk_controld_reply_ping);
if (rc != pcmk_rc_ok) {
return;
}
reply = (const pcmk_controld_api_reply_t *) event_data;
out->message(out, "dc", reply->host_from);
data->rc = pcmk_rc_ok;
}
/*!
* \internal
* \brief Process a node info IPC event
*
* \param[in,out] controld_api Controller connection
* \param[in] event_type Type of event that occurred
* \param[in] status Event status
* \param[in,out] event_data \p pcmk_controld_api_reply_t object containing
* event-specific data
* \param[in,out] user_data \p data_t object for API results and options
*/
static void
node_info_event_cb(pcmk_ipc_api_t *controld_api, enum pcmk_ipc_event event_type,
crm_exit_t status, void *event_data, void *user_data)
{
data_t *data = (data_t *) user_data;
pcmk__output_t *out = data->out;
const pcmk_controld_api_reply_t *reply = NULL;
int rc = validate_controld_reply(data, controld_api, event_type, status,
event_data, pcmk_controld_reply_info);
if (rc != pcmk_rc_ok) {
return;
}
reply = (const pcmk_controld_api_reply_t *) event_data;
if (reply->data.node_info.uname == NULL) {
out->err(out, "Node is not known to cluster");
data->rc = pcmk_rc_node_unknown;
return;
}
data->node_info.have_quorum = reply->data.node_info.have_quorum;
data->node_info.is_remote = reply->data.node_info.is_remote;
data->node_info.id = (uint32_t) reply->data.node_info.id;
pcmk__str_update(data->node_info.node_name, reply->data.node_info.uname);
pcmk__str_update(data->node_info.uuid, reply->data.node_info.uuid);
pcmk__str_update(data->node_info.state, reply->data.node_info.state);
if (data->show_output) {
out->message(out, "node-info",
- reply->data.node_info.id, reply->data.node_info.uname,
+ (uint32_t) reply->data.node_info.id, reply->data.node_info.uname,
reply->data.node_info.uuid, reply->data.node_info.state,
reply->data.node_info.have_quorum,
reply->data.node_info.is_remote);
}
data->rc = pcmk_rc_ok;
}
/*!
* \internal
* \brief Process a \p pacemakerd status IPC event
*
* \param[in,out] pacemakerd_api \p pacemakerd connection
* \param[in] event_type Type of event that occurred
* \param[in] status Event status
* \param[in,out] event_data \p pcmk_pacemakerd_api_reply_t object
* containing event-specific data
* \param[in,out] user_data \p data_t object for API results and options
*/
static void
pacemakerd_event_cb(pcmk_ipc_api_t *pacemakerd_api,
enum pcmk_ipc_event event_type, crm_exit_t status,
void *event_data, void *user_data)
{
data_t *data = user_data;
pcmk__output_t *out = data->out;
const pcmk_pacemakerd_api_reply_t *reply = NULL;
int rc = validate_pcmkd_reply(data, pacemakerd_api, event_type, status,
event_data, pcmk_pacemakerd_reply_ping);
if (rc != pcmk_rc_ok) {
return;
}
// Parse desired information from reply
reply = (const pcmk_pacemakerd_api_reply_t *) event_data;
data->pcmkd_state = reply->data.ping.state;
data->rc = pcmk_rc_ok;
if (!data->show_output) {
return;
}
if (reply->data.ping.status == pcmk_rc_ok) {
out->message(out, "pacemakerd-health",
reply->data.ping.sys_from, reply->data.ping.state, NULL,
reply->data.ping.last_good);
} else {
out->message(out, "pacemakerd-health",
reply->data.ping.sys_from, reply->data.ping.state,
"query failed", time(NULL));
}
}
static pcmk_ipc_api_t *
ipc_connect(data_t *data, enum pcmk_ipc_server server, pcmk_ipc_callback_t cb,
enum pcmk_ipc_dispatch dispatch_type, bool eremoteio_ok)
{
int rc;
pcmk__output_t *out = data->out;
pcmk_ipc_api_t *api = NULL;
rc = pcmk_new_ipc_api(&api, server);
if (api == NULL) {
out->err(out, "error: Could not connect to %s: %s",
pcmk_ipc_name(api, true),
pcmk_rc_str(rc));
data->rc = rc;
return NULL;
}
if (cb != NULL) {
pcmk_register_ipc_callback(api, cb, data);
}
rc = pcmk__connect_ipc(api, dispatch_type, 5);
if (rc != pcmk_rc_ok) {
if (rc == EREMOTEIO) {
data->pcmkd_state = pcmk_pacemakerd_state_remote;
if (eremoteio_ok) {
/* EREMOTEIO may be expected and acceptable for some callers
* on a Pacemaker Remote node
*/
crm_debug("Ignoring %s connection failure: No "
"Pacemaker Remote connection",
pcmk_ipc_name(api, true));
rc = pcmk_rc_ok;
} else {
out->err(out, "error: Could not connect to %s: %s",
pcmk_ipc_name(api, true), pcmk_rc_str(rc));
}
}
data->rc = rc;
pcmk_free_ipc_api(api);
return NULL;
}
return api;
}
/*!
* \internal
* \brief Poll an IPC API connection until timeout or a reply is received
*
* \param[in,out] data API results and options
* \param[in,out] api IPC API connection
* \param[in] on_node If not \p NULL, name of the node to poll (used only
* for logging)
*
* \note Sets the \p rc member of \p data on error
*/
static void
poll_until_reply(data_t *data, pcmk_ipc_api_t *api, const char *on_node)
{
pcmk__output_t *out = data->out;
uint64_t start_nsec = qb_util_nano_current_get();
uint64_t end_nsec = 0;
uint64_t elapsed_ms = 0;
uint64_t remaining_ms = data->message_timeout_ms;
while (remaining_ms > 0) {
int rc = pcmk_poll_ipc(api, remaining_ms);
if (rc == EAGAIN) {
// Poll timed out
break;
}
if (rc != pcmk_rc_ok) {
out->err(out, "error: Failed to poll %s API%s%s: %s",
pcmk_ipc_name(api, true), (on_node != NULL)? " on " : "",
pcmk__s(on_node, ""), pcmk_rc_str(rc));
data->rc = rc;
return;
}
pcmk_dispatch_ipc(api);
if (data->rc != EAGAIN) {
// Received a reply
return;
}
end_nsec = qb_util_nano_current_get();
elapsed_ms = (end_nsec - start_nsec) / QB_TIME_NS_IN_MSEC;
remaining_ms = data->message_timeout_ms - elapsed_ms;
}
out->err(out,
"error: Timed out after %ums waiting for reply from %s API%s%s",
data->message_timeout_ms, pcmk_ipc_name(api, true),
(on_node != NULL)? " on " : "", pcmk__s(on_node, ""));
data->rc = EAGAIN;
}
/*!
* \internal
* \brief Get and output controller status
*
* \param[in,out] out Output object
* \param[in] node_name Name of node whose status is desired
* (\p NULL for DC)
* \param[in] message_timeout_ms How long to wait for a reply from the
* \p pacemaker-controld API. If 0,
* \p pcmk_ipc_dispatch_sync will be used.
* Otherwise, \p pcmk_ipc_dispatch_poll will
* be used.
*
* \return Standard Pacemaker return code
*/
int
pcmk__controller_status(pcmk__output_t *out, const char *node_name,
unsigned int message_timeout_ms)
{
data_t data = {
.out = out,
.rc = EAGAIN,
.message_timeout_ms = message_timeout_ms,
};
enum pcmk_ipc_dispatch dispatch_type = pcmk_ipc_dispatch_poll;
pcmk_ipc_api_t *controld_api = NULL;
if (message_timeout_ms == 0) {
dispatch_type = pcmk_ipc_dispatch_sync;
}
controld_api = ipc_connect(&data, pcmk_ipc_controld,
controller_status_event_cb, dispatch_type,
false);
if (controld_api != NULL) {
int rc = pcmk_controld_api_ping(controld_api, node_name);
if (rc != pcmk_rc_ok) {
out->err(out, "error: Could not ping controller API on %s: %s",
pcmk__s(node_name, "DC"), pcmk_rc_str(rc));
data.rc = rc;
}
if (dispatch_type == pcmk_ipc_dispatch_poll) {
poll_until_reply(&data, controld_api, pcmk__s(node_name, "DC"));
}
pcmk_free_ipc_api(controld_api);
}
return data.rc;
}
// Documented in header
int
pcmk_controller_status(xmlNodePtr *xml, const char *node_name,
unsigned int message_timeout_ms)
{
pcmk__output_t *out = NULL;
int rc = pcmk_rc_ok;
rc = pcmk__xml_output_new(&out, xml);
if (rc != pcmk_rc_ok) {
return rc;
}
pcmk__register_lib_messages(out);
rc = pcmk__controller_status(out, node_name, message_timeout_ms);
pcmk__xml_output_finish(out, pcmk_rc2exitc(rc), xml);
return rc;
}
/*!
* \internal
* \brief Get and output designated controller node name
*
* \param[in,out] out Output object
* \param[in] message_timeout_ms How long to wait for a reply from the
* \p pacemaker-controld API. If 0,
* \p pcmk_ipc_dispatch_sync will be used.
* Otherwise, \p pcmk_ipc_dispatch_poll will
* be used.
*
* \return Standard Pacemaker return code
*/
int
pcmk__designated_controller(pcmk__output_t *out,
unsigned int message_timeout_ms)
{
data_t data = {
.out = out,
.rc = EAGAIN,
.message_timeout_ms = message_timeout_ms,
};
enum pcmk_ipc_dispatch dispatch_type = pcmk_ipc_dispatch_poll;
pcmk_ipc_api_t *controld_api = NULL;
if (message_timeout_ms == 0) {
dispatch_type = pcmk_ipc_dispatch_sync;
}
controld_api = ipc_connect(&data, pcmk_ipc_controld,
designated_controller_event_cb, dispatch_type,
false);
if (controld_api != NULL) {
int rc = pcmk_controld_api_ping(controld_api, NULL);
if (rc != pcmk_rc_ok) {
out->err(out, "error: Could not ping controller API on DC: %s",
pcmk_rc_str(rc));
data.rc = rc;
}
if (dispatch_type == pcmk_ipc_dispatch_poll) {
poll_until_reply(&data, controld_api, "DC");
}
pcmk_free_ipc_api(controld_api);
}
return data.rc;
}
// Documented in header
int
pcmk_designated_controller(xmlNodePtr *xml, unsigned int message_timeout_ms)
{
pcmk__output_t *out = NULL;
int rc = pcmk_rc_ok;
rc = pcmk__xml_output_new(&out, xml);
if (rc != pcmk_rc_ok) {
return rc;
}
pcmk__register_lib_messages(out);
rc = pcmk__designated_controller(out, message_timeout_ms);
pcmk__xml_output_finish(out, pcmk_rc2exitc(rc), xml);
return rc;
}
/*!
* \internal
* \brief Get and optionally output node info corresponding to a node ID from
* the controller
*
* \param[in,out] out Output object
* \param[in,out] node_id ID of node whose name to get. If \p NULL
* or 0, get the local node name. If not
* \p NULL, store the true node ID here on
* success.
* \param[out] node_name If not \p NULL, where to store the node
* name
* \param[out] uuid If not \p NULL, where to store the node
* UUID
* \param[out] state If not \p NULL, where to store the
* membership state
* \param[out] is_remote If not \p NULL, where to store whether the
* node is a Pacemaker Remote node
* \param[out] have_quorum If not \p NULL, where to store whether the
* node has quorum
* \param[in] show_output Whether to show the node info
* \param[in] message_timeout_ms How long to wait for a reply from the
* \p pacemaker-controld API. If 0,
* \p pcmk_ipc_dispatch_sync will be used.
* Otherwise, \p pcmk_ipc_dispatch_poll will
* be used.
*
* \return Standard Pacemaker return code
*
* \note The caller is responsible for freeing \p *node_name, \p *uuid, and
* \p *state using \p free().
*/
int
pcmk__query_node_info(pcmk__output_t *out, uint32_t *node_id, char **node_name,
char **uuid, char **state, bool *have_quorum,
bool *is_remote, bool show_output,
unsigned int message_timeout_ms)
{
data_t data = {
.out = out,
.show_output = show_output,
.rc = EAGAIN,
.message_timeout_ms = message_timeout_ms,
.node_info = {
.id = (node_id == NULL)? 0 : *node_id,
.node_name = node_name,
.uuid = uuid,
.state = state,
},
};
enum pcmk_ipc_dispatch dispatch_type = pcmk_ipc_dispatch_poll;
pcmk_ipc_api_t *controld_api = NULL;
if (node_name != NULL) {
*node_name = NULL;
}
if (uuid != NULL) {
*uuid = NULL;
}
if (state != NULL) {
*state = NULL;
}
if (message_timeout_ms == 0) {
dispatch_type = pcmk_ipc_dispatch_sync;
}
controld_api = ipc_connect(&data, pcmk_ipc_controld, node_info_event_cb,
dispatch_type, false);
if (controld_api != NULL) {
int rc = pcmk_controld_api_node_info(controld_api,
(node_id != NULL)? *node_id : 0);
if (rc != pcmk_rc_ok) {
out->err(out,
"error: Could not send request to controller API on local "
"node: %s", pcmk_rc_str(rc));
data.rc = rc;
}
if (dispatch_type == pcmk_ipc_dispatch_poll) {
poll_until_reply(&data, controld_api, "local node");
}
pcmk_free_ipc_api(controld_api);
}
if (data.rc != pcmk_rc_ok) {
return data.rc;
}
// String outputs are set in callback
if (node_id != NULL) {
*node_id = data.node_info.id;
}
if (have_quorum != NULL) {
*have_quorum = data.node_info.have_quorum;
}
if (is_remote != NULL) {
*is_remote = data.node_info.is_remote;
}
return data.rc;
}
// Documented in header
int
pcmk_query_node_info(xmlNodePtr *xml, uint32_t *node_id, char **node_name,
char **uuid, char **state, bool *have_quorum,
bool *is_remote, bool show_output,
unsigned int message_timeout_ms)
{
pcmk__output_t *out = NULL;
int rc = pcmk_rc_ok;
CRM_ASSERT(node_name != NULL);
rc = pcmk__xml_output_new(&out, xml);
if (rc != pcmk_rc_ok) {
return rc;
}
pcmk__register_lib_messages(out);
rc = pcmk__query_node_info(out, node_id, node_name, uuid, state,
have_quorum, is_remote, show_output,
message_timeout_ms);
pcmk__xml_output_finish(out, pcmk_rc2exitc(rc), xml);
return rc;
}
/*!
* \internal
* \brief Get and optionally output \p pacemakerd status
*
* \param[in,out] out Output object
* \param[in] ipc_name IPC name for request
* \param[in] message_timeout_ms How long to wait for a reply from the
* \p pacemakerd API. If 0,
* \p pcmk_ipc_dispatch_sync will be used.
* Otherwise, \p pcmk_ipc_dispatch_poll will
* be used.
* \param[in] show_output Whether to output the \p pacemakerd state
* \param[out] state Where to store the \p pacemakerd state, if
* not \p NULL
*
* \return Standard Pacemaker return code
*
* \note This function sets \p state to \p pcmk_pacemakerd_state_remote and
* returns \p pcmk_rc_ok if the IPC connection attempt returns
* \p EREMOTEIO. That code indicates that this is a Pacemaker Remote node
* with \p pacemaker-remoted running. The node may be connected to the
* cluster.
*/
int
pcmk__pacemakerd_status(pcmk__output_t *out, const char *ipc_name,
unsigned int message_timeout_ms, bool show_output,
enum pcmk_pacemakerd_state *state)
{
data_t data = {
.out = out,
.show_output = show_output,
.rc = EAGAIN,
.message_timeout_ms = message_timeout_ms,
.pcmkd_state = pcmk_pacemakerd_state_invalid,
};
enum pcmk_ipc_dispatch dispatch_type = pcmk_ipc_dispatch_poll;
pcmk_ipc_api_t *pacemakerd_api = NULL;
if (message_timeout_ms == 0) {
dispatch_type = pcmk_ipc_dispatch_sync;
}
pacemakerd_api = ipc_connect(&data, pcmk_ipc_pacemakerd,
pacemakerd_event_cb, dispatch_type, true);
if (pacemakerd_api != NULL) {
int rc = pcmk_pacemakerd_api_ping(pacemakerd_api, ipc_name);
if (rc != pcmk_rc_ok) {
out->err(out, "error: Could not ping launcher API: %s",
pcmk_rc_str(rc));
data.rc = rc;
}
if (dispatch_type == pcmk_ipc_dispatch_poll) {
poll_until_reply(&data, pacemakerd_api, NULL);
}
pcmk_free_ipc_api(pacemakerd_api);
} else if ((data.pcmkd_state == pcmk_pacemakerd_state_remote)
&& show_output) {
// No API connection so the callback wasn't run
out->message(out, "pacemakerd-health",
NULL, data.pcmkd_state, NULL, time(NULL));
}
if (state != NULL) {
*state = data.pcmkd_state;
}
return data.rc;
}
// Documented in header
int
pcmk_pacemakerd_status(xmlNodePtr *xml, const char *ipc_name,
unsigned int message_timeout_ms)
{
pcmk__output_t *out = NULL;
int rc = pcmk_rc_ok;
rc = pcmk__xml_output_new(&out, xml);
if (rc != pcmk_rc_ok) {
return rc;
}
pcmk__register_lib_messages(out);
rc = pcmk__pacemakerd_status(out, ipc_name, message_timeout_ms, true, NULL);
pcmk__xml_output_finish(out, pcmk_rc2exitc(rc), xml);
return rc;
}
/* user data for looping through remote node xpath searches */
struct node_data {
pcmk__output_t *out;
int found;
const char *field; /* XML attribute to check for node name */
const char *type;
bool bash_export;
};
static void
remote_node_print_helper(xmlNode *result, void *user_data)
{
struct node_data *data = user_data;
pcmk__output_t *out = data->out;
const char *name = crm_element_value(result, PCMK_XA_UNAME);
const char *id = crm_element_value(result, data->field);
// node name and node id are the same for remote/guest nodes
out->message(out, "crmadmin-node", data->type,
pcmk__s(name, id), id, data->bash_export);
data->found++;
}
// \return Standard Pacemaker return code
int
pcmk__list_nodes(pcmk__output_t *out, const char *node_types, bool bash_export)
{
xmlNode *xml_node = NULL;
int rc;
rc = cib__signon_query(out, NULL, &xml_node);
if (rc == pcmk_rc_ok) {
struct node_data data = {
.out = out,
.found = 0,
.bash_export = bash_export
};
out->begin_list(out, NULL, NULL, "nodes");
if (!pcmk__str_empty(node_types) && strstr(node_types, "all")) {
node_types = NULL;
}
if (pcmk__str_empty(node_types) || strstr(node_types, "cluster")) {
data.field = PCMK_XA_ID;
data.type = "cluster";
crm_foreach_xpath_result(xml_node, PCMK__XP_MEMBER_NODE_CONFIG,
remote_node_print_helper, &data);
}
if (pcmk__str_empty(node_types) || strstr(node_types, "guest")) {
data.field = PCMK_XA_VALUE;
data.type = "guest";
crm_foreach_xpath_result(xml_node, PCMK__XP_GUEST_NODE_CONFIG,
remote_node_print_helper, &data);
}
if (pcmk__str_empty(node_types)
|| pcmk__str_eq(node_types, ",|^remote", pcmk__str_regex)) {
data.field = PCMK_XA_ID;
data.type = "remote";
crm_foreach_xpath_result(xml_node, PCMK__XP_REMOTE_NODE_CONFIG,
remote_node_print_helper, &data);
}
out->end_list(out);
if (data.found == 0) {
out->info(out, "No nodes configured");
}
free_xml(xml_node);
}
return rc;
}
int
pcmk_list_nodes(xmlNodePtr *xml, const char *node_types)
{
pcmk__output_t *out = NULL;
int rc = pcmk_rc_ok;
rc = pcmk__xml_output_new(&out, xml);
if (rc != pcmk_rc_ok) {
return rc;
}
pcmk__register_lib_messages(out);
rc = pcmk__list_nodes(out, node_types, FALSE);
pcmk__xml_output_finish(out, pcmk_rc2exitc(rc), xml);
return rc;
}
diff --git a/lib/pacemaker/pcmk_scheduler.c b/lib/pacemaker/pcmk_scheduler.c
index 501950a6ac..fb298c80d4 100644
--- a/lib/pacemaker/pcmk_scheduler.c
+++ b/lib/pacemaker/pcmk_scheduler.c
@@ -1,822 +1,822 @@
/*
* Copyright 2004-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <crm/crm.h>
#include <crm/cib.h>
#include <crm/msg_xml.h>
#include <crm/common/xml.h>
#include <crm/common/xml_internal.h>
#include <crm/common/scheduler_internal.h>
#include <glib.h>
#include <crm/pengine/status.h>
#include <pacemaker-internal.h>
#include "libpacemaker_private.h"
CRM_TRACE_INIT_DATA(pacemaker);
/*!
* \internal
* \brief Do deferred action checks after assignment
*
* When unpacking the resource history, the scheduler checks for resource
* configurations that have changed since an action was run. However, at that
* time, bundles using the REMOTE_CONTAINER_HACK don't have their final
* parameter information, so instead they add a deferred check to a list. This
* function processes one entry in that list.
*
* \param[in,out] rsc Resource that action history is for
* \param[in,out] node Node that action history is for
* \param[in] rsc_op Action history entry
* \param[in] check Type of deferred check to do
*/
static void
check_params(pcmk_resource_t *rsc, pcmk_node_t *node, const xmlNode *rsc_op,
enum pcmk__check_parameters check)
{
const char *reason = NULL;
pcmk__op_digest_t *digest_data = NULL;
switch (check) {
case pcmk__check_active:
if (pcmk__check_action_config(rsc, node, rsc_op)
&& pe_get_failcount(node, rsc, NULL, pcmk__fc_effective,
NULL)) {
reason = "action definition changed";
}
break;
case pcmk__check_last_failure:
digest_data = rsc_action_digest_cmp(rsc, rsc_op, node,
rsc->cluster);
switch (digest_data->rc) {
case pcmk__digest_unknown:
crm_trace("Resource %s history entry %s on %s has "
"no digest to compare",
rsc->id, ID(rsc_op), node->details->id);
break;
case pcmk__digest_match:
break;
default:
reason = "resource parameters have changed";
break;
}
break;
}
if (reason != NULL) {
pe__clear_failcount(rsc, node, reason, rsc->cluster);
}
}
/*!
* \internal
* \brief Check whether a resource has failcount clearing scheduled on a node
*
* \param[in] node Node to check
* \param[in] rsc Resource to check
*
* \return true if \p rsc has failcount clearing scheduled on \p node,
* otherwise false
*/
static bool
failcount_clear_action_exists(const pcmk_node_t *node,
const pcmk_resource_t *rsc)
{
GList *list = pe__resource_actions(rsc, node, PCMK_ACTION_CLEAR_FAILCOUNT,
TRUE);
if (list != NULL) {
g_list_free(list);
return true;
}
return false;
}
/*!
* \internal
* \brief Ban a resource from a node if it reached its failure threshold there
*
* \param[in,out] data Resource to check failure threshold for
* \param[in] user_data Node to check resource on
*/
static void
check_failure_threshold(gpointer data, gpointer user_data)
{
pcmk_resource_t *rsc = data;
const pcmk_node_t *node = user_data;
// If this is a collective resource, apply recursively to children instead
if (rsc->children != NULL) {
g_list_foreach(rsc->children, check_failure_threshold, user_data);
return;
}
if (!failcount_clear_action_exists(node, rsc)) {
/* Don't force the resource away from this node due to a failcount
* that's going to be cleared.
*
* @TODO Failcount clearing can be scheduled in
* pcmk__handle_rsc_config_changes() via process_rsc_history(), or in
* schedule_resource_actions() via check_params(). This runs well before
* then, so it cannot detect those, meaning we might check the migration
* threshold when we shouldn't. Worst case, we stop or move the
* resource, then move it back in the next transition.
*/
pcmk_resource_t *failed = NULL;
if (pcmk__threshold_reached(rsc, node, &failed)) {
resource_location(failed, node, -INFINITY, "__fail_limit__",
rsc->cluster);
}
}
}
/*!
* \internal
* \brief If resource has exclusive discovery, ban node if not allowed
*
* Location constraints have a PCMK_XA_RESOURCE_DISCOVERY option that allows
* users to specify where probes are done for the affected resource. If this is
* set to \c exclusive, probes will only be done on nodes listed in exclusive
* constraints. This function bans the resource from the node if the node is not
* listed.
*
* \param[in,out] data Resource to check
* \param[in] user_data Node to check resource on
*/
static void
apply_exclusive_discovery(gpointer data, gpointer user_data)
{
pcmk_resource_t *rsc = data;
const pcmk_node_t *node = user_data;
if (rsc->exclusive_discover
|| pe__const_top_resource(rsc, false)->exclusive_discover) {
pcmk_node_t *match = NULL;
// If this is a collective resource, apply recursively to children
g_list_foreach(rsc->children, apply_exclusive_discovery, user_data);
match = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
if ((match != NULL)
&& (match->rsc_discover_mode != pcmk_probe_exclusive)) {
match->weight = -INFINITY;
}
}
}
/*!
* \internal
* \brief Apply stickiness to a resource if appropriate
*
* \param[in,out] data Resource to check for stickiness
* \param[in] user_data Ignored
*/
static void
apply_stickiness(gpointer data, gpointer user_data)
{
pcmk_resource_t *rsc = data;
pcmk_node_t *node = NULL;
// If this is a collective resource, apply recursively to children instead
if (rsc->children != NULL) {
g_list_foreach(rsc->children, apply_stickiness, NULL);
return;
}
/* A resource is sticky if it is managed, has stickiness configured, and is
* active on a single node.
*/
if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)
|| (rsc->stickiness < 1) || !pcmk__list_of_1(rsc->running_on)) {
return;
}
node = rsc->running_on->data;
/* In a symmetric cluster, stickiness can always be used. In an
* asymmetric cluster, we have to check whether the resource is still
* allowed on the node, so we don't keep the resource somewhere it is no
* longer explicitly enabled.
*/
if (!pcmk_is_set(rsc->cluster->flags, pcmk_sched_symmetric_cluster)
&& (g_hash_table_lookup(rsc->allowed_nodes,
node->details->id) == NULL)) {
pcmk__rsc_debug(rsc,
"Ignoring %s stickiness because the cluster is "
"asymmetric and %s is not explicitly allowed",
rsc->id, pcmk__node_name(node));
return;
}
pcmk__rsc_debug(rsc, "Resource %s has %d stickiness on %s",
rsc->id, rsc->stickiness, pcmk__node_name(node));
resource_location(rsc, node, rsc->stickiness, "stickiness", rsc->cluster);
}
/*!
* \internal
* \brief Apply shutdown locks for all resources as appropriate
*
* \param[in,out] scheduler Scheduler data
*/
static void
apply_shutdown_locks(pcmk_scheduler_t *scheduler)
{
if (!pcmk_is_set(scheduler->flags, pcmk_sched_shutdown_lock)) {
return;
}
for (GList *iter = scheduler->resources; iter != NULL; iter = iter->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
rsc->cmds->shutdown_lock(rsc);
}
}
/*!
* \internal
* \brief Calculate the number of available nodes in the cluster
*
* \param[in,out] scheduler Scheduler data
*/
static void
count_available_nodes(pcmk_scheduler_t *scheduler)
{
if (pcmk_is_set(scheduler->flags, pcmk_sched_no_compat)) {
return;
}
// @COMPAT for API backward compatibility only (cluster does not use value)
for (GList *iter = scheduler->nodes; iter != NULL; iter = iter->next) {
pcmk_node_t *node = (pcmk_node_t *) iter->data;
if ((node != NULL) && (node->weight >= 0) && node->details->online
&& (node->details->type != node_ping)) {
scheduler->max_valid_nodes++;
}
}
crm_trace("Online node count: %d", scheduler->max_valid_nodes);
}
/*
* \internal
* \brief Apply node-specific scheduling criteria
*
* After the CIB has been unpacked, process node-specific scheduling criteria
* including shutdown locks, location constraints, resource stickiness,
* migration thresholds, and exclusive resource discovery.
*/
static void
apply_node_criteria(pcmk_scheduler_t *scheduler)
{
crm_trace("Applying node-specific scheduling criteria");
apply_shutdown_locks(scheduler);
count_available_nodes(scheduler);
pcmk__apply_locations(scheduler);
g_list_foreach(scheduler->resources, apply_stickiness, NULL);
for (GList *node_iter = scheduler->nodes; node_iter != NULL;
node_iter = node_iter->next) {
for (GList *rsc_iter = scheduler->resources; rsc_iter != NULL;
rsc_iter = rsc_iter->next) {
check_failure_threshold(rsc_iter->data, node_iter->data);
apply_exclusive_discovery(rsc_iter->data, node_iter->data);
}
}
}
/*!
* \internal
* \brief Assign resources to nodes
*
* \param[in,out] scheduler Scheduler data
*/
static void
assign_resources(pcmk_scheduler_t *scheduler)
{
GList *iter = NULL;
crm_trace("Assigning resources to nodes");
if (!pcmk__str_eq(scheduler->placement_strategy, PCMK_VALUE_DEFAULT,
pcmk__str_casei)) {
pcmk__sort_resources(scheduler);
}
pcmk__show_node_capacities("Original", scheduler);
if (pcmk_is_set(scheduler->flags, pcmk_sched_have_remote_nodes)) {
/* Assign remote connection resources first (which will also assign any
* colocation dependencies). If the connection is migrating, always
* prefer the partial migration target.
*/
for (iter = scheduler->resources; iter != NULL; iter = iter->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (rsc->is_remote_node) {
pcmk__rsc_trace(rsc, "Assigning remote connection resource '%s'",
rsc->id);
rsc->cmds->assign(rsc, rsc->partial_migration_target, true);
}
}
}
/* now do the rest of the resources */
for (iter = scheduler->resources; iter != NULL; iter = iter->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (!rsc->is_remote_node) {
pcmk__rsc_trace(rsc, "Assigning %s resource '%s'",
rsc->xml->name, rsc->id);
rsc->cmds->assign(rsc, NULL, true);
}
}
pcmk__show_node_capacities("Remaining", scheduler);
}
/*!
* \internal
* \brief Schedule fail count clearing on online nodes if resource is orphaned
*
* \param[in,out] data Resource to check
* \param[in] user_data Ignored
*/
static void
clear_failcounts_if_orphaned(gpointer data, gpointer user_data)
{
pcmk_resource_t *rsc = data;
if (!pcmk_is_set(rsc->flags, pcmk_rsc_removed)) {
return;
}
crm_trace("Clear fail counts for orphaned resource %s", rsc->id);
/* There's no need to recurse into rsc->children because those
* should just be unassigned clone instances.
*/
for (GList *iter = rsc->cluster->nodes; iter != NULL; iter = iter->next) {
pcmk_node_t *node = (pcmk_node_t *) iter->data;
pcmk_action_t *clear_op = NULL;
if (!node->details->online) {
continue;
}
if (pe_get_failcount(node, rsc, NULL, pcmk__fc_effective, NULL) == 0) {
continue;
}
clear_op = pe__clear_failcount(rsc, node, "it is orphaned",
rsc->cluster);
/* We can't use order_action_then_stop() here because its
* pcmk__ar_guest_allowed breaks things
*/
pcmk__new_ordering(clear_op->rsc, NULL, clear_op, rsc, stop_key(rsc),
NULL, pcmk__ar_ordered, rsc->cluster);
}
}
/*!
* \internal
* \brief Schedule any resource actions needed
*
* \param[in,out] scheduler Scheduler data
*/
static void
schedule_resource_actions(pcmk_scheduler_t *scheduler)
{
// Process deferred action checks
pe__foreach_param_check(scheduler, check_params);
pe__free_param_checks(scheduler);
if (pcmk_is_set(scheduler->flags, pcmk_sched_probe_resources)) {
crm_trace("Scheduling probes");
pcmk__schedule_probes(scheduler);
}
if (pcmk_is_set(scheduler->flags, pcmk_sched_stop_removed_resources)) {
g_list_foreach(scheduler->resources, clear_failcounts_if_orphaned,
NULL);
}
crm_trace("Scheduling resource actions");
for (GList *iter = scheduler->resources; iter != NULL; iter = iter->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
rsc->cmds->create_actions(rsc);
}
}
/*!
* \internal
* \brief Check whether a resource or any of its descendants are managed
*
* \param[in] rsc Resource to check
*
* \return true if resource or any descendant is managed, otherwise false
*/
static bool
is_managed(const pcmk_resource_t *rsc)
{
if (pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
return true;
}
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
if (is_managed((pcmk_resource_t *) iter->data)) {
return true;
}
}
return false;
}
/*!
* \internal
* \brief Check whether any resources in the cluster are managed
*
* \param[in] scheduler Scheduler data
*
* \return true if any resource is managed, otherwise false
*/
static bool
any_managed_resources(const pcmk_scheduler_t *scheduler)
{
for (const GList *iter = scheduler->resources;
iter != NULL; iter = iter->next) {
if (is_managed((const pcmk_resource_t *) iter->data)) {
return true;
}
}
return false;
}
/*!
* \internal
* \brief Check whether a node requires fencing
*
* \param[in] node Node to check
* \param[in] have_managed Whether any resource in cluster is managed
*
* \return true if \p node should be fenced, otherwise false
*/
static bool
needs_fencing(const pcmk_node_t *node, bool have_managed)
{
return have_managed && node->details->unclean
&& pe_can_fence(node->details->data_set, node);
}
/*!
* \internal
* \brief Check whether a node requires shutdown
*
* \param[in] node Node to check
*
* \return true if \p node should be shut down, otherwise false
*/
static bool
needs_shutdown(const pcmk_node_t *node)
{
if (pe__is_guest_or_remote_node(node)) {
/* Do not send shutdown actions for Pacemaker Remote nodes.
* @TODO We might come up with a good use for this in the future.
*/
return false;
}
return node->details->online && node->details->shutdown;
}
/*!
* \internal
* \brief Track and order non-DC fencing
*
* \param[in,out] list List of existing non-DC fencing actions
* \param[in,out] action Fencing action to prepend to \p list
* \param[in] scheduler Scheduler data
*
* \return (Possibly new) head of \p list
*/
static GList *
add_nondc_fencing(GList *list, pcmk_action_t *action,
const pcmk_scheduler_t *scheduler)
{
if (!pcmk_is_set(scheduler->flags, pcmk_sched_concurrent_fencing)
&& (list != NULL)) {
/* Concurrent fencing is disabled, so order each non-DC
* fencing in a chain. If there is any DC fencing or
* shutdown, it will be ordered after the last action in the
* chain later.
*/
order_actions((pcmk_action_t *) list->data, action, pcmk__ar_ordered);
}
return g_list_prepend(list, action);
}
/*!
* \internal
* \brief Schedule a node for fencing
*
* \param[in,out] node Node that requires fencing
*/
static pcmk_action_t *
schedule_fencing(pcmk_node_t *node)
{
pcmk_action_t *fencing = pe_fence_op(node, NULL, FALSE, "node is unclean",
FALSE, node->details->data_set);
pcmk__sched_warn("Scheduling node %s for fencing", pcmk__node_name(node));
pcmk__order_vs_fence(fencing, node->details->data_set);
return fencing;
}
/*!
* \internal
* \brief Create and order node fencing and shutdown actions
*
* \param[in,out] scheduler Scheduler data
*/
static void
schedule_fencing_and_shutdowns(pcmk_scheduler_t *scheduler)
{
pcmk_action_t *dc_down = NULL;
bool integrity_lost = false;
bool have_managed = any_managed_resources(scheduler);
GList *fencing_ops = NULL;
GList *shutdown_ops = NULL;
crm_trace("Scheduling fencing and shutdowns as needed");
if (!have_managed) {
crm_notice("No fencing will be done until there are resources "
"to manage");
}
// Check each node for whether it needs fencing or shutdown
for (GList *iter = scheduler->nodes; iter != NULL; iter = iter->next) {
pcmk_node_t *node = (pcmk_node_t *) iter->data;
pcmk_action_t *fencing = NULL;
/* Guest nodes are "fenced" by recovering their container resource,
* so handle them separately.
*/
if (pe__is_guest_node(node)) {
if (node->details->remote_requires_reset && have_managed
&& pe_can_fence(scheduler, node)) {
pcmk__fence_guest(node);
}
continue;
}
if (needs_fencing(node, have_managed)) {
fencing = schedule_fencing(node);
// Track DC and non-DC fence actions separately
if (node->details->is_dc) {
dc_down = fencing;
} else {
fencing_ops = add_nondc_fencing(fencing_ops, fencing,
scheduler);
}
} else if (needs_shutdown(node)) {
pcmk_action_t *down_op = pcmk__new_shutdown_action(node);
// Track DC and non-DC shutdown actions separately
if (node->details->is_dc) {
dc_down = down_op;
} else {
shutdown_ops = g_list_prepend(shutdown_ops, down_op);
}
}
if ((fencing == NULL) && node->details->unclean) {
integrity_lost = true;
pcmk__config_warn("Node %s is unclean but cannot be fenced",
pcmk__node_name(node));
}
}
if (integrity_lost) {
if (!pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
pcmk__config_warn("Resource functionality and data integrity "
"cannot be guaranteed (configure, enable, "
"and test fencing to correct this)");
} else if (!pcmk_is_set(scheduler->flags, pcmk_sched_quorate)) {
crm_notice("Unclean nodes will not be fenced until quorum is "
"attained or " PCMK_OPT_NO_QUORUM_POLICY " is set to "
PCMK_VALUE_IGNORE);
}
}
if (dc_down != NULL) {
/* Order any non-DC shutdowns before any DC shutdown, to avoid repeated
* DC elections. However, we don't want to order non-DC shutdowns before
* a DC *fencing*, because even though we don't want a node that's
* shutting down to become DC, the DC fencing could be ordered before a
* clone stop that's also ordered before the shutdowns, thus leading to
* a graph loop.
*/
if (pcmk__str_eq(dc_down->task, PCMK_ACTION_DO_SHUTDOWN,
pcmk__str_none)) {
pcmk__order_after_each(dc_down, shutdown_ops);
}
// Order any non-DC fencing before any DC fencing or shutdown
if (pcmk_is_set(scheduler->flags, pcmk_sched_concurrent_fencing)) {
/* With concurrent fencing, order each non-DC fencing action
* separately before any DC fencing or shutdown.
*/
pcmk__order_after_each(dc_down, fencing_ops);
} else if (fencing_ops != NULL) {
/* Without concurrent fencing, the non-DC fencing actions are
* already ordered relative to each other, so we just need to order
* the DC fencing after the last action in the chain (which is the
* first item in the list).
*/
order_actions((pcmk_action_t *) fencing_ops->data, dc_down,
pcmk__ar_ordered);
}
}
g_list_free(fencing_ops);
g_list_free(shutdown_ops);
}
static void
log_resource_details(pcmk_scheduler_t *scheduler)
{
pcmk__output_t *out = scheduler->priv;
GList *all = NULL;
/* Due to the `crm_mon --node=` feature, out->message() for all the
* resource-related messages expects a list of nodes that we are allowed to
* output information for. Here, we create a wildcard to match all nodes.
*/
all = g_list_prepend(all, (gpointer) "*");
for (GList *item = scheduler->resources; item != NULL; item = item->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) item->data;
// Log all resources except inactive orphans
if (!pcmk_is_set(rsc->flags, pcmk_rsc_removed)
|| (rsc->role != pcmk_role_stopped)) {
- out->message(out, crm_map_element_name(rsc->xml), 0, rsc, all, all);
+ out->message(out, crm_map_element_name(rsc->xml), 0UL, rsc, all, all);
}
}
g_list_free(all);
}
static void
log_all_actions(pcmk_scheduler_t *scheduler)
{
/* This only ever outputs to the log, so ignore whatever output object was
* previously set and just log instead.
*/
pcmk__output_t *prev_out = scheduler->priv;
pcmk__output_t *out = NULL;
if (pcmk__log_output_new(&out) != pcmk_rc_ok) {
return;
}
pe__register_messages(out);
pcmk__register_lib_messages(out);
pcmk__output_set_log_level(out, LOG_NOTICE);
scheduler->priv = out;
out->begin_list(out, NULL, NULL, "Actions");
pcmk__output_actions(scheduler);
out->end_list(out);
out->finish(out, CRM_EX_OK, true, NULL);
pcmk__output_free(out);
scheduler->priv = prev_out;
}
/*!
* \internal
* \brief Log all required but unrunnable actions at trace level
*
* \param[in] scheduler Scheduler data
*/
static void
log_unrunnable_actions(const pcmk_scheduler_t *scheduler)
{
const uint64_t flags = pcmk_action_optional
|pcmk_action_runnable
|pcmk_action_pseudo;
crm_trace("Required but unrunnable actions:");
for (const GList *iter = scheduler->actions;
iter != NULL; iter = iter->next) {
const pcmk_action_t *action = (const pcmk_action_t *) iter->data;
if (!pcmk_any_flags_set(action->flags, flags)) {
pcmk__log_action("\t", action, true);
}
}
}
/*!
* \internal
* \brief Unpack the CIB for scheduling
*
* \param[in,out] cib CIB XML to unpack (may be NULL if already unpacked)
* \param[in] flags Scheduler flags to set in addition to defaults
* \param[in,out] scheduler Scheduler data
*/
static void
unpack_cib(xmlNode *cib, unsigned long long flags, pcmk_scheduler_t *scheduler)
{
const char* localhost_save = NULL;
if (pcmk_is_set(scheduler->flags, pcmk_sched_have_status)) {
crm_trace("Reusing previously calculated cluster status");
pcmk__set_scheduler_flags(scheduler, flags);
return;
}
if (scheduler->localhost) {
localhost_save = scheduler->localhost;
}
CRM_ASSERT(cib != NULL);
crm_trace("Calculating cluster status");
/* This will zero the entire struct without freeing anything first, so
* callers should never call pcmk__schedule_actions() with a populated data
* set unless pcmk_sched_have_status is set (i.e. cluster_status() was
* previously called, whether directly or via pcmk__schedule_actions()).
*/
set_working_set_defaults(scheduler);
if (localhost_save) {
scheduler->localhost = localhost_save;
}
pcmk__set_scheduler_flags(scheduler, flags);
scheduler->input = cib;
cluster_status(scheduler); // Sets pcmk_sched_have_status
}
/*!
* \internal
* \brief Run the scheduler for a given CIB
*
* \param[in,out] cib CIB XML to use as scheduler input
* \param[in] flags Scheduler flags to set in addition to defaults
* \param[in,out] scheduler Scheduler data
*/
void
pcmk__schedule_actions(xmlNode *cib, unsigned long long flags,
pcmk_scheduler_t *scheduler)
{
unpack_cib(cib, flags, scheduler);
pcmk__set_assignment_methods(scheduler);
pcmk__apply_node_health(scheduler);
pcmk__unpack_constraints(scheduler);
if (pcmk_is_set(scheduler->flags, pcmk_sched_validate_only)) {
return;
}
if (!pcmk_is_set(scheduler->flags, pcmk_sched_location_only)
&& pcmk__is_daemon) {
log_resource_details(scheduler);
}
apply_node_criteria(scheduler);
if (pcmk_is_set(scheduler->flags, pcmk_sched_location_only)) {
return;
}
pcmk__create_internal_constraints(scheduler);
pcmk__handle_rsc_config_changes(scheduler);
assign_resources(scheduler);
schedule_resource_actions(scheduler);
/* Remote ordering constraints need to happen prior to calculating fencing
* because it is one more place we can mark nodes as needing fencing.
*/
pcmk__order_remote_connection_actions(scheduler);
schedule_fencing_and_shutdowns(scheduler);
pcmk__apply_orderings(scheduler);
log_all_actions(scheduler);
pcmk__create_graph(scheduler);
if (get_crm_log_level() == LOG_TRACE) {
log_unrunnable_actions(scheduler);
}
}
diff --git a/tools/crm_node.c b/tools/crm_node.c
index 94880b80a6..c1a539294f 100644
--- a/tools/crm_node.c
+++ b/tools/crm_node.c
@@ -1,883 +1,883 @@
/*
* Copyright 2004-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <sys/types.h>
#include <crm/crm.h>
#include <crm/common/cmdline_internal.h>
#include <crm/common/output_internal.h>
#include <crm/common/mainloop.h>
#include <crm/msg_xml.h>
#include <crm/cib.h>
#include <crm/cib/internal.h>
#include <crm/common/ipc_controld.h>
#include <crm/common/attrd_internal.h>
#include <pacemaker-internal.h>
#define SUMMARY "crm_node - Tool for displaying low-level node information"
struct {
gboolean corosync;
gboolean dangerous_cmd;
gboolean force_flag;
char command;
int nodeid;
char *target_uname;
} options = {
.command = '\0',
.force_flag = FALSE
};
gboolean command_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean name_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean remove_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
static GError *error = NULL;
static GMainLoop *mainloop = NULL;
static crm_exit_t exit_code = CRM_EX_OK;
static pcmk__output_t *out = NULL;
#define INDENT " "
static GOptionEntry command_entries[] = {
{ "cluster-id", 'i', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"Display this node's cluster id",
NULL },
{ "list", 'l', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"Display all known members (past and present) of this cluster",
NULL },
{ "name", 'n', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"Display the name used by the cluster for this node",
NULL },
{ "partition", 'p', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"Display the members of this partition",
NULL },
{ "quorum", 'q', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"Display a 1 if our partition has quorum, 0 if not",
NULL },
{ "name-for-id", 'N', 0, G_OPTION_ARG_CALLBACK, name_cb,
"Display the name used by the cluster for the node with the specified ID",
"ID" },
{ "remove", 'R', 0, G_OPTION_ARG_CALLBACK, remove_cb,
"(Advanced) Remove the (stopped) node with the specified name from Pacemaker's\n"
INDENT "configuration and caches (the node must already have been removed from\n"
INDENT "the underlying cluster stack configuration",
"NAME" },
{ NULL }
};
static GOptionEntry addl_entries[] = {
{ "force", 'f', 0, G_OPTION_ARG_NONE, &options.force_flag,
NULL,
NULL },
#if SUPPORT_COROSYNC
/* Unused and deprecated */
{ "corosync", 'C', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_NONE, &options.corosync,
NULL,
NULL },
#endif
// @TODO add timeout option for when IPC replies are needed
{ NULL }
};
static pcmk__supported_format_t formats[] = {
PCMK__SUPPORTED_FORMAT_NONE,
PCMK__SUPPORTED_FORMAT_TEXT,
PCMK__SUPPORTED_FORMAT_XML,
{ NULL, NULL, NULL }
};
gboolean
command_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
if (pcmk__str_eq("-i", option_name, pcmk__str_casei) || pcmk__str_eq("--cluster-id", option_name, pcmk__str_casei)) {
options.command = 'i';
} else if (pcmk__str_eq("-l", option_name, pcmk__str_casei) || pcmk__str_eq("--list", option_name, pcmk__str_casei)) {
options.command = 'l';
} else if (pcmk__str_eq("-n", option_name, pcmk__str_casei) || pcmk__str_eq("--name", option_name, pcmk__str_casei)) {
options.command = 'n';
} else if (pcmk__str_eq("-p", option_name, pcmk__str_casei) || pcmk__str_eq("--partition", option_name, pcmk__str_casei)) {
options.command = 'p';
} else if (pcmk__str_eq("-q", option_name, pcmk__str_casei) || pcmk__str_eq("--quorum", option_name, pcmk__str_casei)) {
options.command = 'q';
} else {
g_set_error(error, PCMK__EXITC_ERROR, CRM_EX_INVALID_PARAM, "Unknown param passed to command_cb: %s", option_name);
return FALSE;
}
return TRUE;
}
gboolean
name_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
options.command = 'N';
pcmk__scan_min_int(optarg, &(options.nodeid), 0);
return TRUE;
}
gboolean
remove_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
if (optarg == NULL) {
g_set_error(error, PCMK__EXITC_ERROR, CRM_EX_INVALID_PARAM, "-R option requires an argument");
return FALSE;
}
options.command = 'R';
options.dangerous_cmd = TRUE;
pcmk__str_update(&options.target_uname, optarg);
return TRUE;
}
PCMK__OUTPUT_ARGS("node-id", "uint32_t")
static int
node_id_default(pcmk__output_t *out, va_list args) {
uint32_t node_id = va_arg(args, uint32_t);
out->info(out, "%" PRIu32, node_id);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("node-id", "uint32_t")
static int
node_id_xml(pcmk__output_t *out, va_list args) {
uint32_t node_id = va_arg(args, uint32_t);
char *id_s = crm_strdup_printf("%" PRIu32, node_id);
pcmk__output_create_xml_node(out, "node-info",
"nodeid", id_s,
NULL);
free(id_s);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("simple-node-list", "GList *")
static int
simple_node_list_default(pcmk__output_t *out, va_list args)
{
GList *nodes = va_arg(args, GList *);
for (GList *node_iter = nodes; node_iter != NULL; node_iter = node_iter->next) {
pcmk_controld_api_node_t *node = node_iter->data;
out->info(out, "%" PRIu32 " %s %s", node->id, pcmk__s(node->uname, ""),
pcmk__s(node->state, ""));
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("simple-node-list", "GList *")
static int
simple_node_list_xml(pcmk__output_t *out, va_list args)
{
GList *nodes = va_arg(args, GList *);
out->begin_list(out, NULL, NULL, PCMK_XE_NODES);
for (GList *node_iter = nodes; node_iter != NULL; node_iter = node_iter->next) {
pcmk_controld_api_node_t *node = node_iter->data;
char *id_s = crm_strdup_printf("%" PRIu32, node->id);
pcmk__output_create_xml_node(out, PCMK_XE_NODE,
PCMK_XA_ID, id_s,
PCMK_XA_NAME, node->uname,
"state", node->state,
NULL);
free(id_s);
}
out->end_list(out);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("node-name", "uint32_t", "const char *")
static int
node_name_default(pcmk__output_t *out, va_list args) {
uint32_t node_id G_GNUC_UNUSED = va_arg(args, uint32_t);
const char *node_name = va_arg(args, const char *);
out->info(out, "%s", node_name);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("node-name", "uint32_t", "const char *")
static int
node_name_xml(pcmk__output_t *out, va_list args) {
uint32_t node_id = va_arg(args, uint32_t);
const char *node_name = va_arg(args, const char *);
char *id_s = crm_strdup_printf("%" PRIu32, node_id);
pcmk__output_create_xml_node(out, "node-info",
"nodeid", id_s,
PCMK_XA_UNAME, node_name,
NULL);
free(id_s);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("partition-list", "GList *")
static int
partition_list_default(pcmk__output_t *out, va_list args)
{
GList *nodes = va_arg(args, GList *);
GString *buffer = NULL;
for (GList *node_iter = nodes; node_iter != NULL; node_iter = node_iter->next) {
pcmk_controld_api_node_t *node = node_iter->data;
if (pcmk__str_eq(node->state, "member", pcmk__str_none)) {
pcmk__add_separated_word(&buffer, 128, pcmk__s(node->uname, ""), " ");
}
}
if (buffer != NULL) {
out->info(out, "%s", buffer->str);
g_string_free(buffer, TRUE);
return pcmk_rc_ok;
}
return pcmk_rc_no_output;
}
PCMK__OUTPUT_ARGS("partition-list", "GList *")
static int
partition_list_xml(pcmk__output_t *out, va_list args)
{
GList *nodes = va_arg(args, GList *);
out->begin_list(out, NULL, NULL, PCMK_XE_NODES);
for (GList *node_iter = nodes; node_iter != NULL; node_iter = node_iter->next) {
pcmk_controld_api_node_t *node = node_iter->data;
if (pcmk__str_eq(node->state, "member", pcmk__str_none)) {
char *id_s = crm_strdup_printf("%" PRIu32, node->id);
pcmk__output_create_xml_node(out, PCMK_XE_NODE,
PCMK_XA_ID, id_s,
PCMK_XA_NAME, node->uname,
"state", node->state,
NULL);
free(id_s);
}
}
out->end_list(out);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("quorum", "bool")
static int
quorum_default(pcmk__output_t *out, va_list args) {
bool have_quorum = va_arg(args, int);
out->info(out, "%d", have_quorum);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("quorum", "bool")
static int
quorum_xml(pcmk__output_t *out, va_list args) {
bool have_quorum = va_arg(args, int);
pcmk__output_create_xml_node(out, "cluster-info",
"quorum", pcmk__btoa(have_quorum),
NULL);
return pcmk_rc_ok;
}
static pcmk__message_entry_t fmt_functions[] = {
{ "node-id", "default", node_id_default },
{ "node-id", "xml", node_id_xml },
{ "node-name", "default", node_name_default },
{ "node-name", "xml", node_name_xml },
{ "partition-list", "default", partition_list_default },
{ "partition-list", "xml", partition_list_xml },
{ "quorum", "default", quorum_default },
{ "quorum", "xml", quorum_xml },
{ "simple-node-list", "default", simple_node_list_default },
{ "simple-node-list", "xml", simple_node_list_xml },
{ NULL, NULL, NULL }
};
static gint
sort_node(gconstpointer a, gconstpointer b)
{
const pcmk_controld_api_node_t *node_a = a;
const pcmk_controld_api_node_t *node_b = b;
return pcmk__numeric_strcasecmp((node_a->uname? node_a->uname : ""),
(node_b->uname? node_b->uname : ""));
}
static void
controller_event_cb(pcmk_ipc_api_t *controld_api,
enum pcmk_ipc_event event_type, crm_exit_t status,
void *event_data, void *user_data)
{
pcmk_controld_api_reply_t *reply = event_data;
switch (event_type) {
case pcmk_ipc_event_disconnect:
if (exit_code == CRM_EX_DISCONNECT) { // Unexpected
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"Lost connection to controller");
}
goto done;
break;
case pcmk_ipc_event_reply:
break;
default:
return;
}
if (status != CRM_EX_OK) {
exit_code = status;
g_set_error(&error, PCMK__EXITC_ERROR, status,
"Bad reply from controller: %s",
crm_exit_str(status));
goto done;
}
if (reply->reply_type != pcmk_controld_reply_nodes) {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_INDETERMINATE,
"Unknown reply type %d from controller",
reply->reply_type);
goto done;
}
reply->data.nodes = g_list_sort(reply->data.nodes, sort_node);
if (options.command == 'p') {
out->message(out, "partition-list", reply->data.nodes);
} else if (options.command == 'l') {
out->message(out, "simple-node-list", reply->data.nodes);
}
// Success
exit_code = CRM_EX_OK;
done:
pcmk_disconnect_ipc(controld_api);
pcmk_quit_main_loop(mainloop, 10);
}
static void
run_controller_mainloop(void)
{
pcmk_ipc_api_t *controld_api = NULL;
int rc;
// Set disconnect exit code to handle unexpected disconnects
exit_code = CRM_EX_DISCONNECT;
// Create controller IPC object
rc = pcmk_new_ipc_api(&controld_api, pcmk_ipc_controld);
if (rc != pcmk_rc_ok) {
g_set_error(&error, PCMK__RC_ERROR, rc,
"Could not connect to controller: %s",
pcmk_rc_str(rc));
return;
}
pcmk_register_ipc_callback(controld_api, controller_event_cb, NULL);
// Connect to controller
rc = pcmk__connect_ipc(controld_api, pcmk_ipc_dispatch_main, 5);
if (rc != pcmk_rc_ok) {
exit_code = pcmk_rc2exitc(rc);
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"Could not connect to %s: %s",
pcmk_ipc_name(controld_api, true), pcmk_rc_str(rc));
return;
}
rc = pcmk_controld_api_list_nodes(controld_api);
if (rc != pcmk_rc_ok) {
pcmk_disconnect_ipc(controld_api);
exit_code = pcmk_rc2exitc(rc);
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"Could not ping controller: %s", pcmk_rc_str(rc));
return;
}
// Run main loop to get controller reply via controller_event_cb()
mainloop = g_main_loop_new(NULL, FALSE);
g_main_loop_run(mainloop);
g_main_loop_unref(mainloop);
mainloop = NULL;
pcmk_free_ipc_api(controld_api);
}
static void
print_node_id(void)
{
uint32_t nodeid;
int rc = pcmk__query_node_info(out, &nodeid, NULL, NULL, NULL, NULL, NULL,
false, 0);
if (rc != pcmk_rc_ok) {
/* pcmk__query_node_info already sets an error message on the output object,
* so there's no need to call g_set_error here. That would just create a
* duplicate error message in the output.
*/
exit_code = pcmk_rc2exitc(rc);
return;
}
rc = out->message(out, "node-id", nodeid);
if (rc != pcmk_rc_ok) {
g_set_error(&error, PCMK__RC_ERROR, rc, "Could not print node ID: %s",
pcmk_rc_str(rc));
}
exit_code = pcmk_rc2exitc(rc);
}
static void
print_node_name(uint32_t nodeid)
{
int rc = pcmk_rc_ok;
char *node_name = NULL;
if (nodeid == 0) {
// Check environment first (i.e. when called by resource agent)
const char *name = getenv("OCF_RESKEY_" CRM_META "_"
PCMK__META_ON_NODE);
if (name != NULL) {
- rc = out->message(out, "node-name", 0, name);
+ rc = out->message(out, "node-name", 0UL, name);
goto done;
}
}
// Otherwise ask the controller
/* pcmk__query_node_name already sets an error message on the output object,
* so there's no need to call g_set_error here. That would just create a
* duplicate error message in the output.
*/
rc = pcmk__query_node_name(out, nodeid, &node_name, 0);
if (rc != pcmk_rc_ok) {
exit_code = pcmk_rc2exitc(rc);
return;
}
- rc = out->message(out, "node-name", 0, node_name);
+ rc = out->message(out, "node-name", 0UL, node_name);
done:
if (node_name != NULL) {
free(node_name);
}
if (rc != pcmk_rc_ok) {
g_set_error(&error, PCMK__RC_ERROR, rc, "Could not print node name: %s",
pcmk_rc_str(rc));
}
exit_code = pcmk_rc2exitc(rc);
}
static void
print_quorum(void)
{
bool quorum;
int rc = pcmk__query_node_info(out, NULL, NULL, NULL, NULL, &quorum, NULL,
false, 0);
if (rc != pcmk_rc_ok) {
/* pcmk__query_node_info already sets an error message on the output object,
* so there's no need to call g_set_error here. That would just create a
* duplicate error message in the output.
*/
exit_code = pcmk_rc2exitc(rc);
return;
}
rc = out->message(out, "quorum", quorum);
if (rc != pcmk_rc_ok) {
g_set_error(&error, PCMK__RC_ERROR, rc, "Could not print quorum status: %s",
pcmk_rc_str(rc));
}
exit_code = pcmk_rc2exitc(rc);
}
/*!
* \internal
* \brief Extend a transaction by removing a node from a CIB section
*
* \param[in,out] cib Active CIB connection
* \param[in] element CIB element containing node name and/or ID
* \param[in] section CIB section that \p element is in
* \param[in] node_name Name of node to purge (NULL to leave unspecified)
* \param[in] node_id Node ID of node to purge (0 to leave unspecified)
*
* \note At least one of node_name and node_id must be specified.
* \return Standard Pacemaker return code
*/
static int
remove_from_section(cib_t *cib, const char *element, const char *section,
const char *node_name, long node_id)
{
xmlNode *xml = NULL;
int rc = pcmk_rc_ok;
xml = create_xml_node(NULL, element);
if (xml == NULL) {
return pcmk_rc_error;
}
crm_xml_add(xml, PCMK_XA_UNAME, node_name);
if (node_id > 0) {
crm_xml_set_id(xml, "%ld", node_id);
}
rc = cib->cmds->remove(cib, section, xml, cib_transaction);
free_xml(xml);
return (rc >= 0)? pcmk_rc_ok : pcmk_legacy2rc(rc);
}
/*!
* \internal
* \brief Purge a node from CIB
*
* \param[in] node_name Name of node to purge (or NULL to leave unspecified)
* \param[in] node_id Node ID of node to purge (or 0 to leave unspecified)
*
* \note At least one of node_name and node_id must be specified.
* \return Standard Pacemaker return code
*/
static int
purge_node_from_cib(const char *node_name, long node_id)
{
int rc = pcmk_rc_ok;
int commit_rc = pcmk_rc_ok;
cib_t *cib = NULL;
// Connect to CIB and start a transaction
cib = cib_new();
if (cib == NULL) {
return ENOTCONN;
}
rc = cib->cmds->signon(cib, crm_system_name, cib_command);
if (rc == pcmk_ok) {
rc = cib->cmds->init_transaction(cib);
}
if (rc != pcmk_ok) {
rc = pcmk_legacy2rc(rc);
cib__clean_up_connection(&cib);
return rc;
}
// Remove from configuration and status
rc = remove_from_section(cib, PCMK_XE_NODE, PCMK_XE_NODES, node_name,
node_id);
if (rc == pcmk_rc_ok) {
rc = remove_from_section(cib, PCMK__XE_NODE_STATE, PCMK_XE_STATUS,
node_name, node_id);
}
// Commit the transaction
commit_rc = cib->cmds->end_transaction(cib, (rc == pcmk_rc_ok),
cib_sync_call);
cib__clean_up_connection(&cib);
if ((rc == pcmk_rc_ok) && (commit_rc == pcmk_ok)) {
crm_debug("Purged node %s (%ld) from CIB",
pcmk__s(node_name, "by ID"), node_id);
}
return rc;
}
/*!
* \internal
* \brief Purge a node from a single server's peer cache
*
* \param[in] server IPC server to send request to
* \param[in] node_name Name of node to purge (or NULL to leave unspecified)
* \param[in] node_id Node ID of node to purge (or 0 to leave unspecified)
*
* \note At least one of node_name and node_id must be specified.
* \return Standard Pacemaker return code
*/
static int
purge_node_from(enum pcmk_ipc_server server, const char *node_name,
long node_id)
{
pcmk_ipc_api_t *api = NULL;
int rc;
rc = pcmk_new_ipc_api(&api, server);
if (rc != pcmk_rc_ok) {
goto done;
}
rc = pcmk__connect_ipc(api, pcmk_ipc_dispatch_sync, 5);
if (rc != pcmk_rc_ok) {
goto done;
}
rc = pcmk_ipc_purge_node(api, node_name, node_id);
done:
if (rc != pcmk_rc_ok) { // Debug message already logged on success
g_set_error(&error, PCMK__RC_ERROR, rc,
"Could not purge node %s from %s: %s",
pcmk__s(node_name, "by ID"), pcmk_ipc_name(api, true),
pcmk_rc_str(rc));
}
pcmk_free_ipc_api(api);
return rc;
}
/*!
* \internal
* \brief Purge a node from the fencer's peer cache
*
* \param[in] node_name Name of node to purge (or NULL to leave unspecified)
* \param[in] node_id Node ID of node to purge (or 0 to leave unspecified)
*
* \note At least one of node_name and node_id must be specified.
* \return Standard Pacemaker return code
*/
static int
purge_node_from_fencer(const char *node_name, long node_id)
{
int rc = pcmk_rc_ok;
crm_ipc_t *conn = NULL;
xmlNode *cmd = NULL;
conn = crm_ipc_new("stonith-ng", 0);
if (conn == NULL) {
rc = ENOTCONN;
exit_code = pcmk_rc2exitc(rc);
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"Could not connect to fencer to purge node %s",
pcmk__s(node_name, "by ID"));
return rc;
}
rc = pcmk__connect_generic_ipc(conn);
if (rc != pcmk_rc_ok) {
exit_code = pcmk_rc2exitc(rc);
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"Could not connect to fencer to purge node %s: %s",
pcmk__s(node_name, "by ID"), pcmk_rc_str(rc));
crm_ipc_destroy(conn);
return rc;
}
cmd = create_request(CRM_OP_RM_NODE_CACHE, NULL, NULL, "stonith-ng",
crm_system_name, NULL);
if (node_id > 0) {
crm_xml_set_id(cmd, "%ld", node_id);
}
crm_xml_add(cmd, PCMK_XA_UNAME, node_name);
rc = crm_ipc_send(conn, cmd, 0, 0, NULL);
if (rc >= 0) {
rc = pcmk_rc_ok;
crm_debug("Purged node %s (%ld) from fencer",
pcmk__s(node_name, "by ID"), node_id);
} else {
rc = pcmk_legacy2rc(rc);
fprintf(stderr, "Could not purge node %s from fencer: %s\n",
pcmk__s(node_name, "by ID"), pcmk_rc_str(rc));
}
free_xml(cmd);
crm_ipc_close(conn);
crm_ipc_destroy(conn);
return rc;
}
static void
remove_node(const char *target_uname)
{
int rc = pcmk_rc_ok;
long nodeid = 0;
const char *node_name = NULL;
char *endptr = NULL;
const enum pcmk_ipc_server servers[] = {
pcmk_ipc_controld,
pcmk_ipc_attrd,
};
// Check whether node was specified by name or numeric ID
errno = 0;
nodeid = strtol(target_uname, &endptr, 10);
if ((errno != 0) || (endptr == target_uname) || (*endptr != '\0')
|| (nodeid <= 0)) {
// It's not a positive integer, so assume it's a node name
nodeid = 0;
node_name = target_uname;
}
for (int i = 0; i < PCMK__NELEM(servers); ++i) {
rc = purge_node_from(servers[i], node_name, nodeid);
if (rc != pcmk_rc_ok) {
exit_code = pcmk_rc2exitc(rc);
return;
}
}
// The fencer hasn't been converted to pcmk_ipc_api_t yet
rc = purge_node_from_fencer(node_name, nodeid);
if (rc != pcmk_rc_ok) {
exit_code = pcmk_rc2exitc(rc);
return;
}
// Lastly, purge the node from the CIB itself
rc = purge_node_from_cib(node_name, nodeid);
exit_code = pcmk_rc2exitc(rc);
}
static GOptionContext *
build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) {
GOptionContext *context = NULL;
GOptionEntry extra_prog_entries[] = {
{ "quiet", 'Q', 0, G_OPTION_ARG_NONE, &(args->quiet),
"Be less descriptive in output.",
NULL },
{ NULL }
};
context = pcmk__build_arg_context(args, "text (default), xml", group, NULL);
/* Add the -q option, which cannot be part of the globally supported options
* because some tools use that flag for something else.
*/
pcmk__add_main_args(context, extra_prog_entries);
pcmk__add_arg_group(context, "commands", "Commands:",
"Show command help", command_entries);
pcmk__add_arg_group(context, "additional", "Additional Options:",
"Show additional options", addl_entries);
return context;
}
int
main(int argc, char **argv)
{
int rc = pcmk_rc_ok;
GOptionGroup *output_group = NULL;
pcmk__common_args_t *args = pcmk__new_common_args(SUMMARY);
gchar **processed_args = pcmk__cmdline_preproc(argv, "NR");
GOptionContext *context = build_arg_context(args, &output_group);
pcmk__register_formats(output_group, formats);
if (!g_option_context_parse_strv(context, &processed_args, &error)) {
exit_code = CRM_EX_USAGE;
goto done;
}
pcmk__cli_init_logging("crm_node", args->verbosity);
rc = pcmk__output_new(&out, args->output_ty, args->output_dest, argv);
if (rc != pcmk_rc_ok) {
exit_code = pcmk_rc2exitc(rc);
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"Error creating output format %s: %s", args->output_ty,
pcmk_rc_str(rc));
goto done;
}
if (!pcmk__force_args(context, &error, "%s --xml-simple-list", g_get_prgname())) {
exit_code = CRM_EX_SOFTWARE;
goto done;
}
if (args->version) {
out->version(out, false);
goto done;
}
if (options.command == 0) {
char *help = g_option_context_get_help(context, TRUE, NULL);
out->err(out, "%s", help);
g_free(help);
exit_code = CRM_EX_USAGE;
goto done;
}
if (options.dangerous_cmd && options.force_flag == FALSE) {
exit_code = CRM_EX_USAGE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"The supplied command is considered dangerous."
" To prevent accidental destruction of the cluster,"
" the --force flag is required in order to proceed.");
goto done;
}
pcmk__register_lib_messages(out);
pcmk__register_messages(out, fmt_functions);
switch (options.command) {
case 'i':
print_node_id();
break;
case 'n':
print_node_name(0);
break;
case 'q':
print_quorum();
break;
case 'N':
print_node_name(options.nodeid);
break;
case 'R':
remove_node(options.target_uname);
break;
case 'l':
case 'p':
run_controller_mainloop();
break;
default:
break;
}
done:
g_strfreev(processed_args);
pcmk__free_arg_context(context);
pcmk__output_and_clear_error(&error, out);
if (out != NULL) {
out->finish(out, exit_code, true, NULL);
pcmk__output_free(out);
}
pcmk__unregister_formats();
return crm_exit(exit_code);
}
diff --git a/tools/crm_resource.c b/tools/crm_resource.c
index 145a89e348..75949de1c2 100644
--- a/tools/crm_resource.c
+++ b/tools/crm_resource.c
@@ -1,2095 +1,2096 @@
/*
* Copyright 2004-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <crm_resource.h>
#include <crm/lrmd_internal.h>
#include <crm/common/cmdline_internal.h>
#include <crm/common/ipc_attrd_internal.h>
#include <crm/common/lists_internal.h>
#include <crm/common/output.h>
#include <pacemaker-internal.h>
#include <sys/param.h>
#include <stdio.h>
#include <sys/types.h>
#include <unistd.h>
#include <stdlib.h>
#include <errno.h>
#include <fcntl.h>
#include <libgen.h>
#include <time.h>
#include <crm/crm.h>
#include <crm/stonith-ng.h>
#include <crm/common/ipc_controld.h>
#include <crm/cib/internal.h>
#define SUMMARY "crm_resource - perform tasks related to Pacemaker cluster resources"
enum rsc_command {
cmd_none = 0, // No command option given (yet)
cmd_ban,
cmd_cleanup,
cmd_clear,
cmd_colocations,
cmd_cts,
cmd_delete,
cmd_delete_param,
cmd_digests,
cmd_execute_agent,
cmd_fail,
cmd_get_param,
cmd_get_property,
cmd_list_active_ops,
cmd_list_agents,
cmd_list_all_ops,
cmd_list_alternatives,
cmd_list_instances,
cmd_list_providers,
cmd_list_resources,
cmd_list_standards,
cmd_locate,
cmd_metadata,
cmd_move,
cmd_query_raw_xml,
cmd_query_xml,
cmd_refresh,
cmd_restart,
cmd_set_param,
cmd_set_property,
cmd_wait,
cmd_why,
};
struct {
enum rsc_command rsc_cmd; // crm_resource command to perform
// Infrastructure that given command needs to work
gboolean require_cib; // Whether command requires CIB IPC
int cib_options; // Options to use with CIB IPC calls
gboolean require_crmd; // Whether command requires controller IPC
gboolean require_scheduler; // Whether command requires scheduler data
gboolean require_resource; // Whether command requires resource specified
gboolean require_node; // Whether command requires node specified
int find_flags; // Flags to use when searching for resource
// Command-line option values
gchar *rsc_id; // Value of --resource
gchar *rsc_type; // Value of --resource-type
gboolean force; // --force was given
gboolean clear_expired; // --expired was given
gboolean recursive; // --recursive was given
gboolean promoted_role_only; // --promoted was given
gchar *host_uname; // Value of --node
gchar *interval_spec; // Value of --interval
gchar *move_lifetime; // Value of --lifetime
gchar *operation; // Value of --operation
const char *attr_set_type; // Instance, meta, utilization, or element attribute
gchar *prop_id; // --nvpair (attribute XML ID)
char *prop_name; // Attribute name
gchar *prop_set; // --set-name (attribute block XML ID)
gchar *prop_value; // --parameter-value (attribute value)
long long timeout_ms; // Parsed from --timeout value
char *agent_spec; // Standard and/or provider and/or agent
gchar *xml_file; // Value of (deprecated) --xml-file
int check_level; // Optional value of --validate or --force-check
// Resource configuration specified via command-line arguments
gboolean cmdline_config; // Resource configuration was via arguments
char *v_agent; // Value of --agent
char *v_class; // Value of --class
char *v_provider; // Value of --provider
GHashTable *cmdline_params; // Resource parameters specified
// Positional command-line arguments
gchar **remainder; // Positional arguments as given
GHashTable *override_params; // Resource parameter values that override config
} options = {
.attr_set_type = PCMK_XE_INSTANCE_ATTRIBUTES,
.check_level = -1,
.cib_options = cib_sync_call,
.require_cib = TRUE,
.require_scheduler = TRUE,
.require_resource = TRUE,
};
#if 0
// @COMPAT @TODO enable this at next backward compatibility break
#define SET_COMMAND(cmd) do { \
if (options.rsc_cmd != cmd_none) { \
g_set_error(error, PCMK__EXITC_ERROR, CRM_EX_USAGE, \
"Only one command option may be specified"); \
return FALSE; \
} \
options.rsc_cmd = (cmd); \
} while (0)
#else
#define SET_COMMAND(cmd) do { \
if (options.rsc_cmd != cmd_none) { \
reset_options(); \
} \
options.rsc_cmd = (cmd); \
} while (0)
#endif
gboolean agent_provider_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean attr_set_type_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean class_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean cleanup_refresh_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean delete_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean expired_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean list_agents_cb(const gchar *option_name, const gchar *optarg,
gpointer data, GError **error);
gboolean list_providers_cb(const gchar *option_name, const gchar *optarg,
gpointer data, GError **error);
gboolean list_standards_cb(const gchar *option_name, const gchar *optarg,
gpointer data, GError **error);
gboolean list_alternatives_cb(const gchar *option_name, const gchar *optarg,
gpointer data, GError **error);
gboolean metadata_cb(const gchar *option_name, const gchar *optarg,
gpointer data, GError **error);
gboolean option_cb(const gchar *option_name, const gchar *optarg,
gpointer data, GError **error);
gboolean fail_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean flag_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean get_param_prop_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean list_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean set_delete_param_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean set_prop_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean timeout_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean validate_or_force_cb(const gchar *option_name, const gchar *optarg,
gpointer data, GError **error);
gboolean restart_cb(const gchar *option_name, const gchar *optarg,
gpointer data, GError **error);
gboolean digests_cb(const gchar *option_name, const gchar *optarg,
gpointer data, GError **error);
gboolean wait_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean why_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
static crm_exit_t exit_code = CRM_EX_OK;
static pcmk__output_t *out = NULL;
static pcmk__common_args_t *args = NULL;
// Things that should be cleaned up on exit
static GError *error = NULL;
static GMainLoop *mainloop = NULL;
static cib_t *cib_conn = NULL;
static pcmk_ipc_api_t *controld_api = NULL;
static pcmk_scheduler_t *scheduler = NULL;
#define MESSAGE_TIMEOUT_S 60
#define INDENT " "
static pcmk__supported_format_t formats[] = {
PCMK__SUPPORTED_FORMAT_NONE,
PCMK__SUPPORTED_FORMAT_TEXT,
PCMK__SUPPORTED_FORMAT_XML,
{ NULL, NULL, NULL }
};
// Clean up and exit
static crm_exit_t
bye(crm_exit_t ec)
{
pcmk__output_and_clear_error(&error, out);
if (out != NULL) {
out->finish(out, ec, true, NULL);
pcmk__output_free(out);
}
pcmk__unregister_formats();
if (cib_conn != NULL) {
cib_t *save_cib_conn = cib_conn;
cib_conn = NULL; // Ensure we can't free this twice
cib__clean_up_connection(&save_cib_conn);
}
if (controld_api != NULL) {
pcmk_ipc_api_t *save_controld_api = controld_api;
controld_api = NULL; // Ensure we can't free this twice
pcmk_free_ipc_api(save_controld_api);
}
if (mainloop != NULL) {
g_main_loop_unref(mainloop);
mainloop = NULL;
}
pe_free_working_set(scheduler);
scheduler = NULL;
crm_exit(ec);
return ec;
}
static void
quit_main_loop(crm_exit_t ec)
{
exit_code = ec;
if (mainloop != NULL) {
GMainLoop *mloop = mainloop;
mainloop = NULL; // Don't re-enter this block
pcmk_quit_main_loop(mloop, 10);
g_main_loop_unref(mloop);
}
}
static gboolean
resource_ipc_timeout(gpointer data)
{
// Start with newline because "Waiting for ..." message doesn't have one
if (error != NULL) {
g_clear_error(&error);
}
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_TIMEOUT,
_("Aborting because no messages received in %d seconds"), MESSAGE_TIMEOUT_S);
quit_main_loop(CRM_EX_TIMEOUT);
return FALSE;
}
static void
controller_event_callback(pcmk_ipc_api_t *api, enum pcmk_ipc_event event_type,
crm_exit_t status, void *event_data, void *user_data)
{
switch (event_type) {
case pcmk_ipc_event_disconnect:
if (exit_code == CRM_EX_DISCONNECT) { // Unexpected
crm_info("Connection to controller was terminated");
}
quit_main_loop(exit_code);
break;
case pcmk_ipc_event_reply:
if (status != CRM_EX_OK) {
out->err(out, "Error: bad reply from controller: %s",
crm_exit_str(status));
pcmk_disconnect_ipc(api);
quit_main_loop(status);
} else {
if ((pcmk_controld_api_replies_expected(api) == 0)
&& mainloop && g_main_loop_is_running(mainloop)) {
out->info(out, "... got reply (done)");
crm_debug("Got all the replies we expected");
pcmk_disconnect_ipc(api);
quit_main_loop(CRM_EX_OK);
} else {
out->info(out, "... got reply");
}
}
break;
default:
break;
}
}
static void
start_mainloop(pcmk_ipc_api_t *capi)
{
unsigned int count = pcmk_controld_api_replies_expected(capi);
if (count > 0) {
out->info(out, "Waiting for %u %s from the controller",
count, pcmk__plural_alt(count, "reply", "replies"));
exit_code = CRM_EX_DISCONNECT; // For unexpected disconnects
mainloop = g_main_loop_new(NULL, FALSE);
g_timeout_add(MESSAGE_TIMEOUT_S * 1000, resource_ipc_timeout, NULL);
g_main_loop_run(mainloop);
}
}
static int
compare_id(gconstpointer a, gconstpointer b)
{
return strcmp((const char *)a, (const char *)b);
}
static GList *
build_constraint_list(xmlNode *root)
{
GList *retval = NULL;
xmlNode *cib_constraints = NULL;
xmlXPathObjectPtr xpathObj = NULL;
int ndx = 0;
cib_constraints = pcmk_find_cib_element(root, PCMK_XE_CONSTRAINTS);
xpathObj = xpath_search(cib_constraints, "//" PCMK_XE_RSC_LOCATION);
for (ndx = 0; ndx < numXpathResults(xpathObj); ndx++) {
xmlNode *match = getXpathResult(xpathObj, ndx);
retval = g_list_insert_sorted(retval, (gpointer) ID(match), compare_id);
}
freeXpathObject(xpathObj);
return retval;
}
/* short option letters still available: eEJkKXyYZ */
static GOptionEntry query_entries[] = {
{ "list", 'L', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_cb,
"List all cluster resources with status",
NULL },
{ "list-raw", 'l', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_cb,
"List IDs of all instantiated resources (individual members\n"
INDENT "rather than groups etc.)",
NULL },
{ "list-cts", 'c', G_OPTION_FLAG_HIDDEN|G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_cb,
NULL,
NULL },
{ "list-operations", 'O', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_cb,
"List active resource operations, optionally filtered by\n"
INDENT "--resource and/or --node",
NULL },
{ "list-all-operations", 'o', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_cb,
"List all resource operations, optionally filtered by\n"
INDENT "--resource and/or --node",
NULL },
{ "list-standards", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
list_standards_cb,
"List supported standards",
NULL },
{ "list-ocf-providers", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
list_providers_cb,
"List all available OCF providers",
NULL },
{ "list-agents", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK,
list_agents_cb,
"List all agents available for the named standard and/or provider",
"STD:PROV" },
{ "list-ocf-alternatives", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK,
list_alternatives_cb,
"List all available providers for the named OCF agent",
"AGENT" },
{ "show-metadata", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK,
metadata_cb,
"Show the metadata for the named class:provider:agent",
"SPEC" },
{ "query-xml", 'q', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb,
"Show XML configuration of resource (after any template expansion)",
NULL },
{ "query-xml-raw", 'w', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb,
"Show XML configuration of resource (before any template expansion)",
NULL },
{ "get-parameter", 'g', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, get_param_prop_cb,
"Display named parameter for resource (use instance attribute\n"
INDENT "unless --element, --meta, or --utilization is specified)",
"PARAM" },
{ "get-property", 'G', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_CALLBACK, get_param_prop_cb,
"Display named property of resource ('class', 'type', or 'provider') "
"(requires --resource)",
"PROPERTY" },
{ "locate", 'W', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb,
"Show node(s) currently running resource",
NULL },
{ "constraints", 'a', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb,
"Display the location and colocation constraints that apply to a\n"
INDENT "resource, and if --recursive is specified, to the resources\n"
INDENT "directly or indirectly involved in those colocations.\n"
INDENT "If the named resource is part of a group, or a clone or\n"
INDENT "bundle instance, constraints for the collective resource\n"
INDENT "will be shown unless --force is given.",
NULL },
{ "stack", 'A', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb,
"Equivalent to --constraints --recursive",
NULL },
{ "why", 'Y', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, why_cb,
"Show why resources are not running, optionally filtered by\n"
INDENT "--resource and/or --node",
NULL },
{ NULL }
};
static GOptionEntry command_entries[] = {
{ "validate", 0, G_OPTION_FLAG_OPTIONAL_ARG, G_OPTION_ARG_CALLBACK,
validate_or_force_cb,
"Validate resource configuration by calling agent's validate-all\n"
INDENT "action. The configuration may be specified either by giving an\n"
INDENT "existing resource name with -r, or by specifying --class,\n"
INDENT "--agent, and --provider arguments, along with any number of\n"
INDENT "--option arguments. An optional LEVEL argument can be given\n"
INDENT "to control the level of checking performed.",
"LEVEL" },
{ "cleanup", 'C', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, cleanup_refresh_cb,
"If resource has any past failures, clear its history and fail\n"
INDENT "count. Optionally filtered by --resource, --node, --operation\n"
INDENT "and --interval (otherwise all). --operation and --interval\n"
INDENT "apply to fail counts, but entire history is always clear, to\n"
INDENT "allow current state to be rechecked. If the named resource is\n"
INDENT "part of a group, or one numbered instance of a clone or bundled\n"
INDENT "resource, the clean-up applies to the whole collective resource\n"
INDENT "unless --force is given.",
NULL },
{ "refresh", 'R', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, cleanup_refresh_cb,
"Delete resource's history (including failures) so its current state\n"
INDENT "is rechecked. Optionally filtered by --resource and --node\n"
INDENT "(otherwise all). If the named resource is part of a group, or one\n"
INDENT "numbered instance of a clone or bundled resource, the refresh\n"
INDENT "applies to the whole collective resource unless --force is given.",
NULL },
{ "set-parameter", 'p', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, set_delete_param_cb,
"Set named parameter for resource (requires -v). Use instance\n"
INDENT "attribute unless --element, --meta, or --utilization is "
"specified.",
"PARAM" },
{ "delete-parameter", 'd', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, set_delete_param_cb,
"Delete named parameter for resource. Use instance attribute\n"
INDENT "unless --element, --meta or, --utilization is specified.",
"PARAM" },
{ "set-property", 'S', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_CALLBACK, set_prop_cb,
"Set named property of resource ('class', 'type', or 'provider') "
"(requires -r, -t, -v)",
"PROPERTY" },
{ NULL }
};
static GOptionEntry location_entries[] = {
{ "move", 'M', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb,
"Create a constraint to move resource. If --node is specified,\n"
INDENT "the constraint will be to move to that node, otherwise it\n"
INDENT "will be to ban the current node. Unless --force is specified\n"
INDENT "this will return an error if the resource is already running\n"
INDENT "on the specified node. If --force is specified, this will\n"
INDENT "always ban the current node.\n"
INDENT "Optional: --lifetime, --promoted. NOTE: This may prevent the\n"
INDENT "resource from running on its previous location until the\n"
INDENT "implicit constraint expires or is removed with --clear.",
NULL },
{ "ban", 'B', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb,
"Create a constraint to keep resource off a node.\n"
INDENT "Optional: --node, --lifetime, --promoted.\n"
INDENT "NOTE: This will prevent the resource from running on the\n"
INDENT "affected node until the implicit constraint expires or is\n"
INDENT "removed with --clear. If --node is not specified, it defaults\n"
INDENT "to the node currently running the resource for primitives\n"
INDENT "and groups, or the promoted instance of promotable clones with\n"
INDENT PCMK_META_PROMOTED_MAX "=1 (all other situations result in an\n"
INDENT "error as there is no sane default).",
NULL },
{ "clear", 'U', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb,
"Remove all constraints created by the --ban and/or --move\n"
INDENT "commands. Requires: --resource. Optional: --node, --promoted,\n"
INDENT "--expired. If --node is not specified, all constraints created\n"
INDENT "by --ban and --move will be removed for the named resource. If\n"
INDENT "--node and --force are specified, any constraint created by\n"
INDENT "--move will be cleared, even if it is not for the specified\n"
INDENT "node. If --expired is specified, only those constraints whose\n"
INDENT "lifetimes have expired will be removed.",
NULL },
{ "expired", 'e', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, expired_cb,
"Modifies the --clear argument to remove constraints with\n"
INDENT "expired lifetimes.",
NULL },
{ "lifetime", 'u', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.move_lifetime,
"Lifespan (as ISO 8601 duration) of created constraints (with\n"
INDENT "-B, -M) see https://en.wikipedia.org/wiki/ISO_8601#Durations)",
"TIMESPEC" },
{ "promoted", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE,
&options.promoted_role_only,
"Limit scope of command to promoted role (with -B, -M, -U). For\n"
INDENT "-B and -M, previously promoted instances may remain\n"
INDENT "active in the unpromoted role.",
NULL },
// Deprecated since 2.1.0
{ "master", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE,
&options.promoted_role_only,
"Deprecated: Use --promoted instead", NULL },
{ NULL }
};
static GOptionEntry advanced_entries[] = {
{ "delete", 'D', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, delete_cb,
"(Advanced) Delete a resource from the CIB. Required: -t",
NULL },
{ "fail", 'F', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, fail_cb,
"(Advanced) Tell the cluster this resource has failed",
NULL },
{ "restart", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, restart_cb,
"(Advanced) Tell the cluster to restart this resource and\n"
INDENT "anything that depends on it",
NULL },
{ "wait", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, wait_cb,
"(Advanced) Wait until the cluster settles into a stable state",
NULL },
{ "digests", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, digests_cb,
"(Advanced) Show parameter hashes that Pacemaker uses to detect\n"
INDENT "configuration changes (only accurate if there is resource\n"
INDENT "history on the specified node). Required: --resource, --node.\n"
INDENT "Optional: any NAME=VALUE parameters will be used to override\n"
INDENT "the configuration (to see what the hash would be with those\n"
INDENT "changes).",
NULL },
{ "force-demote", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
validate_or_force_cb,
"(Advanced) Bypass the cluster and demote a resource on the local\n"
INDENT "node. Unless --force is specified, this will refuse to do so if\n"
INDENT "the cluster believes the resource is a clone instance already\n"
INDENT "running on the local node.",
NULL },
{ "force-stop", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
validate_or_force_cb,
"(Advanced) Bypass the cluster and stop a resource on the local node",
NULL },
{ "force-start", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
validate_or_force_cb,
"(Advanced) Bypass the cluster and start a resource on the local\n"
INDENT "node. Unless --force is specified, this will refuse to do so if\n"
INDENT "the cluster believes the resource is a clone instance already\n"
INDENT "running on the local node.",
NULL },
{ "force-promote", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
validate_or_force_cb,
"(Advanced) Bypass the cluster and promote a resource on the local\n"
INDENT "node. Unless --force is specified, this will refuse to do so if\n"
INDENT "the cluster believes the resource is a clone instance already\n"
INDENT "running on the local node.",
NULL },
{ "force-check", 0, G_OPTION_FLAG_OPTIONAL_ARG, G_OPTION_ARG_CALLBACK,
validate_or_force_cb,
"(Advanced) Bypass the cluster and check the state of a resource on\n"
INDENT "the local node. An optional LEVEL argument can be given\n"
INDENT "to control the level of checking performed.",
"LEVEL" },
{ NULL }
};
static GOptionEntry addl_entries[] = {
{ "node", 'N', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.host_uname,
"Node name",
"NAME" },
{ "recursive", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.recursive,
"Follow colocation chains when using --set-parameter or --constraints",
NULL },
{ "resource-type", 't', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.rsc_type,
"Resource XML element (primitive, group, etc.) (with -D)",
"ELEMENT" },
{ "parameter-value", 'v', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.prop_value,
"Value to use with -p",
"PARAM" },
{ "meta", 'm', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, attr_set_type_cb,
"Use resource meta-attribute instead of instance attribute\n"
INDENT "(with -p, -g, -d)",
NULL },
{ "utilization", 'z', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, attr_set_type_cb,
"Use resource utilization attribute instead of instance attribute\n"
INDENT "(with -p, -g, -d)",
NULL },
{ "element", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, attr_set_type_cb,
"Use resource element attribute instead of instance attribute\n"
INDENT "(with -p, -g, -d)",
NULL },
{ "operation", 'n', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.operation,
"Operation to clear instead of all (with -C -r)",
"OPERATION" },
{ "interval", 'I', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.interval_spec,
"Interval of operation to clear (default 0) (with -C -r -n)",
"N" },
{ "class", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, class_cb,
"The standard the resource agent conforms to (for example, ocf).\n"
INDENT "Use with --agent, --provider, --option, and --validate.",
"CLASS" },
{ "agent", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, agent_provider_cb,
"The agent to use (for example, IPaddr). Use with --class,\n"
INDENT "--provider, --option, and --validate.",
"AGENT" },
{ "provider", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, agent_provider_cb,
"The vendor that supplies the resource agent (for example,\n"
INDENT "heartbeat). Use with --class, --agent, --option, and --validate.",
"PROVIDER" },
{ "option", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, option_cb,
"Specify a device configuration parameter as NAME=VALUE (may be\n"
INDENT "specified multiple times). Use with --validate and without the\n"
INDENT "-r option.",
"PARAM" },
{ "set-name", 's', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.prop_set,
"(Advanced) XML ID of attributes element to use (with -p, -d)",
"ID" },
{ "nvpair", 'i', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.prop_id,
"(Advanced) XML ID of nvpair element to use (with -p, -d)",
"ID" },
{ "timeout", 'T', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, timeout_cb,
"(Advanced) Abort if command does not finish in this time (with\n"
INDENT "--restart, --wait, --force-*)",
"N" },
{ "force", 'f', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.force,
"Force the action to be performed. See help for individual commands for\n"
INDENT "additional behavior.",
NULL },
{ "xml-file", 'x', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_FILENAME, &options.xml_file,
NULL,
"FILE" },
{ "host-uname", 'H', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_STRING, &options.host_uname,
NULL,
"HOST" },
{ NULL }
};
static void
reset_options(void) {
options.require_crmd = FALSE;
options.require_node = FALSE;
options.require_cib = TRUE;
options.require_scheduler = TRUE;
options.require_resource = TRUE;
options.find_flags = 0;
}
gboolean
agent_provider_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
options.cmdline_config = TRUE;
options.require_resource = FALSE;
if (pcmk__str_eq(option_name, "--provider", pcmk__str_casei)) {
pcmk__str_update(&options.v_provider, optarg);
} else {
pcmk__str_update(&options.v_agent, optarg);
}
return TRUE;
}
gboolean
attr_set_type_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
if (pcmk__str_any_of(option_name, "-m", "--meta", NULL)) {
options.attr_set_type = PCMK_XE_META_ATTRIBUTES;
} else if (pcmk__str_any_of(option_name, "-z", "--utilization", NULL)) {
options.attr_set_type = PCMK_XE_UTILIZATION;
} else if (pcmk__str_eq(option_name, "--element", pcmk__str_casei)) {
options.attr_set_type = ATTR_SET_ELEMENT;
}
return TRUE;
}
gboolean
class_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
pcmk__str_update(&options.v_class, optarg);
options.cmdline_config = TRUE;
options.require_resource = FALSE;
return TRUE;
}
gboolean
cleanup_refresh_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
if (pcmk__str_any_of(option_name, "-C", "--cleanup", NULL)) {
SET_COMMAND(cmd_cleanup);
} else {
SET_COMMAND(cmd_refresh);
}
options.require_resource = FALSE;
if (getenv("CIB_file") == NULL) {
options.require_crmd = TRUE;
}
options.find_flags = pcmk_rsc_match_history|pcmk_rsc_match_anon_basename;
return TRUE;
}
gboolean
delete_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
SET_COMMAND(cmd_delete);
options.require_scheduler = FALSE;
options.find_flags = pcmk_rsc_match_history|pcmk_rsc_match_basename;
return TRUE;
}
gboolean
expired_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
options.clear_expired = TRUE;
options.require_resource = FALSE;
return TRUE;
}
static void
get_agent_spec(const gchar *optarg)
{
options.require_cib = FALSE;
options.require_scheduler = FALSE;
options.require_resource = FALSE;
pcmk__str_update(&options.agent_spec, optarg);
}
gboolean
list_agents_cb(const gchar *option_name, const gchar *optarg, gpointer data,
GError **error)
{
SET_COMMAND(cmd_list_agents);
get_agent_spec(optarg);
return TRUE;
}
gboolean
list_providers_cb(const gchar *option_name, const gchar *optarg, gpointer data,
GError **error)
{
SET_COMMAND(cmd_list_providers);
get_agent_spec(optarg);
return TRUE;
}
gboolean
list_standards_cb(const gchar *option_name, const gchar *optarg, gpointer data,
GError **error)
{
SET_COMMAND(cmd_list_standards);
options.require_cib = FALSE;
options.require_scheduler = FALSE;
options.require_resource = FALSE;
return TRUE;
}
gboolean
list_alternatives_cb(const gchar *option_name, const gchar *optarg,
gpointer data, GError **error)
{
SET_COMMAND(cmd_list_alternatives);
get_agent_spec(optarg);
return TRUE;
}
gboolean
metadata_cb(const gchar *option_name, const gchar *optarg, gpointer data,
GError **error)
{
SET_COMMAND(cmd_metadata);
get_agent_spec(optarg);
return TRUE;
}
gboolean
option_cb(const gchar *option_name, const gchar *optarg, gpointer data,
GError **error)
{
char *name = NULL;
char *value = NULL;
if (pcmk__scan_nvpair(optarg, &name, &value) != 2) {
return FALSE;
}
if (options.cmdline_params == NULL) {
options.cmdline_params = pcmk__strkey_table(free, free);
}
g_hash_table_replace(options.cmdline_params, name, value);
return TRUE;
}
gboolean
fail_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
SET_COMMAND(cmd_fail);
options.require_crmd = TRUE;
options.require_node = TRUE;
return TRUE;
}
gboolean
flag_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
if (pcmk__str_any_of(option_name, "-U", "--clear", NULL)) {
SET_COMMAND(cmd_clear);
options.find_flags = pcmk_rsc_match_history
|pcmk_rsc_match_anon_basename;
} else if (pcmk__str_any_of(option_name, "-B", "--ban", NULL)) {
SET_COMMAND(cmd_ban);
options.find_flags = pcmk_rsc_match_history
|pcmk_rsc_match_anon_basename;
} else if (pcmk__str_any_of(option_name, "-M", "--move", NULL)) {
SET_COMMAND(cmd_move);
options.find_flags = pcmk_rsc_match_history
|pcmk_rsc_match_anon_basename;
} else if (pcmk__str_any_of(option_name, "-q", "--query-xml", NULL)) {
SET_COMMAND(cmd_query_xml);
options.find_flags = pcmk_rsc_match_history|pcmk_rsc_match_basename;
} else if (pcmk__str_any_of(option_name, "-w", "--query-xml-raw", NULL)) {
SET_COMMAND(cmd_query_raw_xml);
options.find_flags = pcmk_rsc_match_history|pcmk_rsc_match_basename;
} else if (pcmk__str_any_of(option_name, "-W", "--locate", NULL)) {
SET_COMMAND(cmd_locate);
options.find_flags = pcmk_rsc_match_history
|pcmk_rsc_match_anon_basename;
} else if (pcmk__str_any_of(option_name, "-a", "--constraints", NULL)) {
SET_COMMAND(cmd_colocations);
options.find_flags = pcmk_rsc_match_history
|pcmk_rsc_match_anon_basename;
} else if (pcmk__str_any_of(option_name, "-A", "--stack", NULL)) {
SET_COMMAND(cmd_colocations);
options.find_flags = pcmk_rsc_match_history
|pcmk_rsc_match_anon_basename;
options.recursive = TRUE;
}
return TRUE;
}
gboolean
get_param_prop_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
if (pcmk__str_any_of(option_name, "-g", "--get-parameter", NULL)) {
SET_COMMAND(cmd_get_param);
} else {
SET_COMMAND(cmd_get_property);
}
pcmk__str_update(&options.prop_name, optarg);
options.find_flags = pcmk_rsc_match_history|pcmk_rsc_match_basename;
return TRUE;
}
gboolean
list_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
if (pcmk__str_any_of(option_name, "-c", "--list-cts", NULL)) {
SET_COMMAND(cmd_cts);
} else if (pcmk__str_any_of(option_name, "-L", "--list", NULL)) {
SET_COMMAND(cmd_list_resources);
} else if (pcmk__str_any_of(option_name, "-l", "--list-raw", NULL)) {
SET_COMMAND(cmd_list_instances);
} else if (pcmk__str_any_of(option_name, "-O", "--list-operations", NULL)) {
SET_COMMAND(cmd_list_active_ops);
} else {
SET_COMMAND(cmd_list_all_ops);
}
options.require_resource = FALSE;
return TRUE;
}
gboolean
set_delete_param_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
if (pcmk__str_any_of(option_name, "-p", "--set-parameter", NULL)) {
SET_COMMAND(cmd_set_param);
} else {
SET_COMMAND(cmd_delete_param);
}
pcmk__str_update(&options.prop_name, optarg);
options.find_flags = pcmk_rsc_match_history|pcmk_rsc_match_basename;
return TRUE;
}
gboolean
set_prop_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
SET_COMMAND(cmd_set_property);
options.require_scheduler = FALSE;
pcmk__str_update(&options.prop_name, optarg);
options.find_flags = pcmk_rsc_match_history|pcmk_rsc_match_basename;
return TRUE;
}
gboolean
timeout_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
options.timeout_ms = crm_get_msec(optarg);
if (options.timeout_ms < 0) {
crm_warn("Ignoring invalid timeout '%s'", optarg);
options.timeout_ms = 0;
} else {
options.timeout_ms = QB_MIN(options.timeout_ms, INT_MAX);
}
return TRUE;
}
gboolean
validate_or_force_cb(const gchar *option_name, const gchar *optarg,
gpointer data, GError **error)
{
SET_COMMAND(cmd_execute_agent);
if (options.operation) {
g_free(options.operation);
}
options.operation = g_strdup(option_name + 2); // skip "--"
options.find_flags = pcmk_rsc_match_history|pcmk_rsc_match_anon_basename;
if (options.override_params == NULL) {
options.override_params = pcmk__strkey_table(free, free);
}
if (optarg != NULL) {
if (pcmk__scan_min_int(optarg, &options.check_level, 0) != pcmk_rc_ok) {
g_set_error(error, G_OPTION_ERROR, CRM_EX_INVALID_PARAM,
_("Invalid check level setting: %s"), optarg);
return FALSE;
}
}
return TRUE;
}
gboolean
restart_cb(const gchar *option_name, const gchar *optarg, gpointer data,
GError **error)
{
SET_COMMAND(cmd_restart);
options.find_flags = pcmk_rsc_match_history|pcmk_rsc_match_anon_basename;
return TRUE;
}
gboolean
digests_cb(const gchar *option_name, const gchar *optarg, gpointer data,
GError **error)
{
SET_COMMAND(cmd_digests);
options.find_flags = pcmk_rsc_match_history|pcmk_rsc_match_anon_basename;
if (options.override_params == NULL) {
options.override_params = pcmk__strkey_table(free, free);
}
options.require_node = TRUE;
options.require_scheduler = TRUE;
return TRUE;
}
gboolean
wait_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
SET_COMMAND(cmd_wait);
options.require_resource = FALSE;
options.require_scheduler = FALSE;
return TRUE;
}
gboolean
why_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
SET_COMMAND(cmd_why);
options.require_resource = FALSE;
options.find_flags = pcmk_rsc_match_history|pcmk_rsc_match_anon_basename;
return TRUE;
}
static int
ban_or_move(pcmk__output_t *out, pcmk_resource_t *rsc,
const char *move_lifetime)
{
int rc = pcmk_rc_ok;
pcmk_node_t *current = NULL;
unsigned int nactive = 0;
CRM_CHECK(rsc != NULL, return EINVAL);
current = pe__find_active_requires(rsc, &nactive);
if (nactive == 1) {
rc = cli_resource_ban(out, options.rsc_id, current->details->uname, move_lifetime,
cib_conn, options.cib_options, options.promoted_role_only,
PCMK__ROLE_PROMOTED);
} else if (pcmk_is_set(rsc->flags, pcmk_rsc_promotable)) {
int count = 0;
GList *iter = NULL;
current = NULL;
for(iter = rsc->children; iter; iter = iter->next) {
pcmk_resource_t *child = (pcmk_resource_t *)iter->data;
enum rsc_role_e child_role = child->fns->state(child, TRUE);
if (child_role == pcmk_role_promoted) {
count++;
current = pcmk__current_node(child);
}
}
if(count == 1 && current) {
rc = cli_resource_ban(out, options.rsc_id, current->details->uname, move_lifetime,
cib_conn, options.cib_options, options.promoted_role_only,
PCMK__ROLE_PROMOTED);
} else {
rc = EINVAL;
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
_("Resource '%s' not moved: active in %d locations (promoted in %d).\n"
"To prevent '%s' from running on a specific location, "
"specify a node."
"To prevent '%s' from being promoted at a specific "
"location, specify a node and the --promoted option."),
options.rsc_id, nactive, count, options.rsc_id, options.rsc_id);
}
} else {
rc = EINVAL;
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
_("Resource '%s' not moved: active in %d locations.\n"
"To prevent '%s' from running on a specific location, "
"specify a node."),
options.rsc_id, nactive, options.rsc_id);
}
return rc;
}
static void
cleanup(pcmk__output_t *out, pcmk_resource_t *rsc, pcmk_node_t *node)
{
int rc = pcmk_rc_ok;
if (options.force == FALSE) {
rsc = uber_parent(rsc);
}
crm_debug("Erasing failures of %s (%s requested) on %s",
rsc->id, options.rsc_id, (options.host_uname? options.host_uname: "all nodes"));
rc = cli_resource_delete(controld_api, options.host_uname, rsc,
options.operation, options.interval_spec, TRUE,
scheduler, options.force);
if ((rc == pcmk_rc_ok) && !out->is_quiet(out)) {
// Show any reasons why resource might stay stopped
cli_resource_check(out, rsc, node);
}
if (rc == pcmk_rc_ok) {
start_mainloop(controld_api);
}
}
static int
clear_constraints(pcmk__output_t *out, xmlNodePtr *cib_xml_copy)
{
GList *before = NULL;
GList *after = NULL;
GList *remaining = NULL;
GList *ele = NULL;
pcmk_node_t *dest = NULL;
int rc = pcmk_rc_ok;
if (!out->is_quiet(out)) {
before = build_constraint_list(scheduler->input);
}
if (options.clear_expired) {
rc = cli_resource_clear_all_expired(scheduler->input, cib_conn,
options.cib_options, options.rsc_id,
options.host_uname,
options.promoted_role_only);
} else if (options.host_uname) {
dest = pe_find_node(scheduler->nodes, options.host_uname);
if (dest == NULL) {
rc = pcmk_rc_node_unknown;
if (!out->is_quiet(out)) {
g_list_free(before);
}
return rc;
}
rc = cli_resource_clear(options.rsc_id, dest->details->uname, NULL,
cib_conn, options.cib_options, TRUE, options.force);
} else {
rc = cli_resource_clear(options.rsc_id, NULL, scheduler->nodes,
cib_conn, options.cib_options, TRUE, options.force);
}
if (!out->is_quiet(out)) {
rc = cib_conn->cmds->query(cib_conn, NULL, cib_xml_copy, cib_scope_local | cib_sync_call);
rc = pcmk_legacy2rc(rc);
if (rc != pcmk_rc_ok) {
g_set_error(&error, PCMK__RC_ERROR, rc,
_("Could not get modified CIB: %s\n"), pcmk_rc_str(rc));
g_list_free(before);
free_xml(*cib_xml_copy);
*cib_xml_copy = NULL;
return rc;
}
scheduler->input = *cib_xml_copy;
cluster_status(scheduler);
after = build_constraint_list(scheduler->input);
remaining = pcmk__subtract_lists(before, after, (GCompareFunc) strcmp);
for (ele = remaining; ele != NULL; ele = ele->next) {
out->info(out, "Removing constraint: %s", (char *) ele->data);
}
g_list_free(before);
g_list_free(after);
g_list_free(remaining);
}
return rc;
}
static int
initialize_scheduler_data(xmlNodePtr *cib_xml_copy)
{
int rc = pcmk_rc_ok;
if (options.xml_file != NULL) {
*cib_xml_copy = filename2xml(options.xml_file);
if (*cib_xml_copy == NULL) {
rc = pcmk_rc_cib_corrupt;
}
} else {
rc = cib_conn->cmds->query(cib_conn, NULL, cib_xml_copy, cib_scope_local | cib_sync_call);
rc = pcmk_legacy2rc(rc);
}
if (rc == pcmk_rc_ok) {
scheduler = pe_new_working_set();
if (scheduler == NULL) {
rc = ENOMEM;
} else {
pcmk__set_scheduler_flags(scheduler,
pcmk_sched_no_counts
|pcmk_sched_no_compat);
scheduler->priv = out;
rc = update_scheduler_input(scheduler, cib_xml_copy);
}
}
if (rc != pcmk_rc_ok) {
free_xml(*cib_xml_copy);
*cib_xml_copy = NULL;
return rc;
}
cluster_status(scheduler);
return pcmk_rc_ok;
}
static int
refresh(pcmk__output_t *out)
{
int rc = pcmk_rc_ok;
const char *router_node = options.host_uname;
int attr_options = pcmk__node_attr_none;
if (options.host_uname) {
pcmk_node_t *node = pe_find_node(scheduler->nodes, options.host_uname);
if (pe__is_guest_or_remote_node(node)) {
node = pcmk__current_node(node->details->remote_rsc);
if (node == NULL) {
rc = ENXIO;
g_set_error(&error, PCMK__RC_ERROR, rc,
_("No cluster connection to Pacemaker Remote node %s detected"),
options.host_uname);
return rc;
}
router_node = node->details->uname;
attr_options |= pcmk__node_attr_remote;
}
}
if (controld_api == NULL) {
out->info(out, "Dry run: skipping clean-up of %s due to CIB_file",
options.host_uname? options.host_uname : "all nodes");
rc = pcmk_rc_ok;
return rc;
}
crm_debug("Re-checking the state of all resources on %s", options.host_uname?options.host_uname:"all nodes");
rc = pcmk__attrd_api_clear_failures(NULL, options.host_uname, NULL,
NULL, NULL, NULL, attr_options);
if (pcmk_controld_api_reprobe(controld_api, options.host_uname,
router_node) == pcmk_rc_ok) {
start_mainloop(controld_api);
}
return rc;
}
static void
refresh_resource(pcmk__output_t *out, pcmk_resource_t *rsc, pcmk_node_t *node)
{
int rc = pcmk_rc_ok;
if (options.force == FALSE) {
rsc = uber_parent(rsc);
}
crm_debug("Re-checking the state of %s (%s requested) on %s",
rsc->id, options.rsc_id, (options.host_uname? options.host_uname: "all nodes"));
rc = cli_resource_delete(controld_api, options.host_uname, rsc, NULL, 0,
FALSE, scheduler, options.force);
if ((rc == pcmk_rc_ok) && !out->is_quiet(out)) {
// Show any reasons why resource might stay stopped
cli_resource_check(out, rsc, node);
}
if (rc == pcmk_rc_ok) {
start_mainloop(controld_api);
}
}
static int
set_property(void)
{
int rc = pcmk_rc_ok;
xmlNode *msg_data = NULL;
if (pcmk__str_empty(options.rsc_type)) {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
_("Must specify -t with resource type"));
rc = ENXIO;
return rc;
} else if (pcmk__str_empty(options.prop_value)) {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
_("Must supply -v with new value"));
rc = ENXIO;
return rc;
}
CRM_LOG_ASSERT(options.prop_name != NULL);
msg_data = create_xml_node(NULL, options.rsc_type);
crm_xml_add(msg_data, PCMK_XA_ID, options.rsc_id);
crm_xml_add(msg_data, options.prop_name, options.prop_value);
rc = cib_conn->cmds->modify(cib_conn, PCMK_XE_RESOURCES, msg_data,
options.cib_options);
rc = pcmk_legacy2rc(rc);
free_xml(msg_data);
return rc;
}
static int
show_metadata(pcmk__output_t *out, const char *agent_spec)
{
int rc = pcmk_rc_ok;
char *standard = NULL;
char *provider = NULL;
char *type = NULL;
char *metadata = NULL;
lrmd_t *lrmd_conn = NULL;
rc = lrmd__new(&lrmd_conn, NULL, NULL, 0);
if (rc != pcmk_rc_ok) {
g_set_error(&error, PCMK__RC_ERROR, rc,
_("Could not create executor connection"));
lrmd_api_delete(lrmd_conn);
return rc;
}
rc = crm_parse_agent_spec(agent_spec, &standard, &provider, &type);
rc = pcmk_legacy2rc(rc);
if (rc == pcmk_rc_ok) {
rc = lrmd_conn->cmds->get_metadata(lrmd_conn, standard,
provider, type,
&metadata, 0);
rc = pcmk_legacy2rc(rc);
if (metadata) {
out->output_xml(out, "metadata", metadata);
free(metadata);
} else {
/* We were given a validly formatted spec, but it doesn't necessarily
* match up with anything that exists. Use ENXIO as the return code
* here because that maps to an exit code of CRM_EX_NOSUCH, which
* probably is the most common reason to get here.
*/
rc = ENXIO;
g_set_error(&error, PCMK__RC_ERROR, rc,
_("Metadata query for %s failed: %s"),
agent_spec, pcmk_rc_str(rc));
}
} else {
rc = ENXIO;
g_set_error(&error, PCMK__RC_ERROR, rc,
_("'%s' is not a valid agent specification"), agent_spec);
}
lrmd_api_delete(lrmd_conn);
return rc;
}
static void
validate_cmdline_config(void)
{
// Cannot use both --resource and command-line resource configuration
if (options.rsc_id != NULL) {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
_("--resource cannot be used with --class, --agent, and --provider"));
// Not all commands support command-line resource configuration
} else if (options.rsc_cmd != cmd_execute_agent) {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
_("--class, --agent, and --provider can only be used with "
"--validate and --force-*"));
// Not all of --class, --agent, and --provider need to be given. Not all
// classes support the concept of a provider. Check that what we were given
// is valid.
} else if (pcmk__str_eq(options.v_class, "stonith", pcmk__str_none)) {
if (options.v_provider != NULL) {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
_("stonith does not support providers"));
} else if (stonith_agent_exists(options.v_agent, 0) == FALSE) {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
_("%s is not a known stonith agent"), options.v_agent ? options.v_agent : "");
}
} else if (resources_agent_exists(options.v_class, options.v_provider, options.v_agent) == FALSE) {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
_("%s:%s:%s is not a known resource"),
options.v_class ? options.v_class : "",
options.v_provider ? options.v_provider : "",
options.v_agent ? options.v_agent : "");
}
if (error != NULL) {
return;
}
if (options.cmdline_params == NULL) {
options.cmdline_params = pcmk__strkey_table(free, free);
}
options.require_resource = FALSE;
options.require_scheduler = FALSE;
options.require_cib = FALSE;
}
static GOptionContext *
build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) {
GOptionContext *context = NULL;
GOptionEntry extra_prog_entries[] = {
{ "quiet", 'Q', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &(args->quiet),
"Be less descriptive in output.",
NULL },
{ "resource", 'r', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.rsc_id,
"Resource ID",
"ID" },
{ G_OPTION_REMAINING, 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING_ARRAY, &options.remainder,
NULL,
NULL },
{ NULL }
};
const char *description = "Examples:\n\n"
"List the available OCF agents:\n\n"
"\t# crm_resource --list-agents ocf\n\n"
"List the available OCF agents from the linux-ha project:\n\n"
"\t# crm_resource --list-agents ocf:heartbeat\n\n"
"Move 'myResource' to a specific node:\n\n"
"\t# crm_resource --resource myResource --move --node altNode\n\n"
"Allow (but not force) 'myResource' to move back to its original "
"location:\n\n"
"\t# crm_resource --resource myResource --clear\n\n"
"Stop 'myResource' (and anything that depends on it):\n\n"
"\t# crm_resource --resource myResource --set-parameter "
PCMK_META_TARGET_ROLE "--meta --parameter-value Stopped\n\n"
"Tell the cluster not to manage 'myResource' (the cluster will not "
"attempt to start or stop the\n"
"resource under any circumstances; useful when performing maintenance "
"tasks on a resource):\n\n"
"\t# crm_resource --resource myResource --set-parameter "
PCMK_META_IS_MANAGED "--meta --parameter-value false\n\n"
"Erase the operation history of 'myResource' on 'aNode' (the cluster "
"will 'forget' the existing\n"
"resource state, including any errors, and attempt to recover the"
"resource; useful when a resource\n"
"had failed permanently and has been repaired by an administrator):\n\n"
"\t# crm_resource --resource myResource --cleanup --node aNode\n\n";
context = pcmk__build_arg_context(args, "text (default), xml", group, NULL);
g_option_context_set_description(context, description);
/* Add the -Q option, which cannot be part of the globally supported options
* because some tools use that flag for something else.
*/
pcmk__add_main_args(context, extra_prog_entries);
pcmk__add_arg_group(context, "queries", "Queries:",
"Show query help", query_entries);
pcmk__add_arg_group(context, "commands", "Commands:",
"Show command help", command_entries);
pcmk__add_arg_group(context, "locations", "Locations:",
"Show location help", location_entries);
pcmk__add_arg_group(context, "advanced", "Advanced:",
"Show advanced option help", advanced_entries);
pcmk__add_arg_group(context, "additional", "Additional Options:",
"Show additional options", addl_entries);
return context;
}
int
main(int argc, char **argv)
{
xmlNode *cib_xml_copy = NULL;
pcmk_resource_t *rsc = NULL;
pcmk_node_t *node = NULL;
int rc = pcmk_rc_ok;
GOptionGroup *output_group = NULL;
gchar **processed_args = NULL;
GOptionContext *context = NULL;
/*
* Parse command line arguments
*/
args = pcmk__new_common_args(SUMMARY);
processed_args = pcmk__cmdline_preproc(argv, "GHINSTdginpstuvx");
context = build_arg_context(args, &output_group);
pcmk__register_formats(output_group, formats);
if (!g_option_context_parse_strv(context, &processed_args, &error)) {
exit_code = CRM_EX_USAGE;
goto done;
}
pcmk__cli_init_logging("crm_resource", args->verbosity);
rc = pcmk__output_new(&out, args->output_ty, args->output_dest, argv);
if (rc != pcmk_rc_ok) {
exit_code = CRM_EX_ERROR;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code, _("Error creating output format %s: %s"),
args->output_ty, pcmk_rc_str(rc));
goto done;
}
pe__register_messages(out);
crm_resource_register_messages(out);
lrmd__register_messages(out);
pcmk__register_lib_messages(out);
out->quiet = args->quiet;
crm_log_args(argc, argv);
/*
* Validate option combinations
*/
// If the user didn't explicitly specify a command, list resources
if (options.rsc_cmd == cmd_none) {
options.rsc_cmd = cmd_list_resources;
options.require_resource = FALSE;
}
// --expired without --clear/-U doesn't make sense
if (options.clear_expired && (options.rsc_cmd != cmd_clear)) {
exit_code = CRM_EX_USAGE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code, _("--expired requires --clear or -U"));
goto done;
}
if ((options.remainder != NULL) && (options.override_params != NULL)) {
// Commands that use positional arguments will create override_params
for (gchar **s = options.remainder; *s; s++) {
char *name = calloc(1, strlen(*s));
char *value = calloc(1, strlen(*s));
int rc = sscanf(*s, "%[^=]=%s", name, value);
if (rc == 2) {
g_hash_table_replace(options.override_params, name, value);
} else {
exit_code = CRM_EX_USAGE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("Error parsing '%s' as a name=value pair"),
argv[optind]);
free(value);
free(name);
goto done;
}
}
} else if (options.remainder != NULL) {
gchar **strv = NULL;
gchar *msg = NULL;
int i = 1;
int len = 0;
for (gchar **s = options.remainder; *s; s++) {
len++;
}
CRM_ASSERT(len > 0);
/* Add 1 for the strv[0] string below, and add another 1 for the NULL
* at the end of the array so g_strjoinv knows when to stop.
*/
strv = calloc(len+2, sizeof(char *));
strv[0] = strdup("non-option ARGV-elements:\n");
for (gchar **s = options.remainder; *s; s++) {
strv[i] = crm_strdup_printf("[%d of %d] %s\n", i, len, *s);
i++;
}
strv[i] = NULL;
exit_code = CRM_EX_USAGE;
msg = g_strjoinv("", strv);
g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "%s", msg);
g_free(msg);
/* Don't try to free the last element, which is just NULL. */
for(i = 0; i < len+1; i++) {
free(strv[i]);
}
free(strv);
goto done;
}
if (pcmk__str_eq(args->output_ty, "xml", pcmk__str_none)) {
/* Kind of a hack to display XML lists using a real tag instead of <list>. This just
* saves from having to write custom messages to build the lists around all these things
*/
switch (options.rsc_cmd) {
case cmd_execute_agent:
case cmd_list_resources:
case cmd_query_xml:
case cmd_query_raw_xml:
case cmd_list_active_ops:
case cmd_list_all_ops:
case cmd_colocations:
pcmk__force_args(context, &error, "%s --xml-simple-list --xml-substitute", g_get_prgname());
break;
default:
pcmk__force_args(context, &error, "%s --xml-substitute", g_get_prgname());
break;
}
} else if (pcmk__str_eq(args->output_ty, "text", pcmk__str_null_matches)) {
if ((options.rsc_cmd == cmd_colocations) ||
options.rsc_cmd == cmd_list_resources) {
pcmk__force_args(context, &error, "%s --text-fancy", g_get_prgname());
}
}
if (args->version) {
out->version(out, false);
goto done;
}
if (options.cmdline_config) {
/* A resource configuration was given on the command line. Sanity-check
* the values and set error if they don't make sense.
*/
validate_cmdline_config();
if (error != NULL) {
exit_code = CRM_EX_USAGE;
goto done;
}
} else if (options.cmdline_params != NULL) {
// @COMPAT @TODO error out here when we can break backward compatibility
g_hash_table_destroy(options.cmdline_params);
options.cmdline_params = NULL;
}
if (options.require_resource && (options.rsc_id == NULL)) {
exit_code = CRM_EX_USAGE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("Must supply a resource id with -r"));
goto done;
}
if (options.require_node && (options.host_uname == NULL)) {
exit_code = CRM_EX_USAGE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("Must supply a node name with -N"));
goto done;
}
/*
* Set up necessary connections
*/
if (options.find_flags && options.rsc_id) {
options.require_scheduler = TRUE;
}
// Establish a connection to the CIB if needed
if (options.require_cib) {
cib_conn = cib_new();
if ((cib_conn == NULL) || (cib_conn->cmds == NULL)) {
exit_code = CRM_EX_DISCONNECT;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("Could not create CIB connection"));
goto done;
}
rc = cib_conn->cmds->signon(cib_conn, crm_system_name, cib_command);
rc = pcmk_legacy2rc(rc);
if (rc != pcmk_rc_ok) {
exit_code = pcmk_rc2exitc(rc);
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("Could not connect to the CIB: %s"), pcmk_rc_str(rc));
goto done;
}
}
// Populate scheduler data from XML file if specified or CIB query otherwise
if (options.require_scheduler) {
rc = initialize_scheduler_data(&cib_xml_copy);
if (rc != pcmk_rc_ok) {
exit_code = pcmk_rc2exitc(rc);
goto done;
}
}
// If command requires that resource exist if specified, find it
if (options.find_flags && options.rsc_id) {
rsc = pe_find_resource_with_flags(scheduler->resources, options.rsc_id,
options.find_flags);
if (rsc == NULL) {
exit_code = CRM_EX_NOSUCH;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("Resource '%s' not found"), options.rsc_id);
goto done;
}
/* The --ban, --clear, --move, and --restart commands do not work with
* instances of clone resourcs.
*/
if (strchr(options.rsc_id, ':') != NULL && pe_rsc_is_clone(rsc->parent) &&
(options.rsc_cmd == cmd_ban || options.rsc_cmd == cmd_clear ||
options.rsc_cmd == cmd_move || options.rsc_cmd == cmd_restart)) {
exit_code = CRM_EX_INVALID_PARAM;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("Cannot operate on clone resource instance '%s'"), options.rsc_id);
goto done;
}
}
// If user supplied a node name, check whether it exists
if ((options.host_uname != NULL) && (scheduler != NULL)) {
node = pe_find_node(scheduler->nodes, options.host_uname);
if (node == NULL) {
exit_code = CRM_EX_NOSUCH;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("Node '%s' not found"), options.host_uname);
goto done;
}
}
// Establish a connection to the controller if needed
if (options.require_crmd) {
rc = pcmk_new_ipc_api(&controld_api, pcmk_ipc_controld);
if (rc != pcmk_rc_ok) {
exit_code = pcmk_rc2exitc(rc);
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("Error connecting to the controller: %s"), pcmk_rc_str(rc));
goto done;
}
pcmk_register_ipc_callback(controld_api, controller_event_callback,
NULL);
rc = pcmk__connect_ipc(controld_api, pcmk_ipc_dispatch_main, 5);
if (rc != pcmk_rc_ok) {
exit_code = pcmk_rc2exitc(rc);
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("Error connecting to %s: %s"),
pcmk_ipc_name(controld_api, true), pcmk_rc_str(rc));
goto done;
}
}
/*
* Handle requested command
*/
switch (options.rsc_cmd) {
case cmd_list_resources: {
GList *all = NULL;
+ uint32_t show_opts = pcmk_show_inactive_rscs | pcmk_show_rsc_only | pcmk_show_pending;
+
all = g_list_prepend(all, (gpointer) "*");
rc = out->message(out, "resource-list", scheduler,
- pcmk_show_inactive_rscs | pcmk_show_rsc_only | pcmk_show_pending,
- true, all, all, false);
+ show_opts, true, all, all, false);
g_list_free(all);
if (rc == pcmk_rc_no_output) {
rc = ENXIO;
}
break;
}
case cmd_list_instances:
rc = out->message(out, "resource-names-list", scheduler->resources);
if (rc != pcmk_rc_ok) {
rc = ENXIO;
}
break;
case cmd_list_alternatives:
rc = pcmk__list_alternatives(out, options.agent_spec);
break;
case cmd_list_agents:
rc = pcmk__list_agents(out, options.agent_spec);
break;
case cmd_list_standards:
rc = pcmk__list_standards(out);
break;
case cmd_list_providers:
rc = pcmk__list_providers(out, options.agent_spec);
break;
case cmd_metadata:
rc = show_metadata(out, options.agent_spec);
break;
case cmd_restart:
/* We don't pass scheduler because rsc needs to stay valid for the
* entire lifetime of cli_resource_restart(), but it will reset and
* update the scheduler data multiple times, so it needs to use its
* own copy.
*/
rc = cli_resource_restart(out, rsc, node, options.move_lifetime,
options.timeout_ms, cib_conn,
options.cib_options, options.promoted_role_only,
options.force);
break;
case cmd_wait:
rc = wait_till_stable(out, options.timeout_ms, cib_conn);
break;
case cmd_execute_agent:
if (options.cmdline_config) {
exit_code = cli_resource_execute_from_params(out, NULL,
options.v_class, options.v_provider, options.v_agent,
options.operation, options.cmdline_params,
options.override_params, options.timeout_ms,
args->verbosity, options.force, options.check_level);
} else {
exit_code = cli_resource_execute(rsc, options.rsc_id,
options.operation, options.override_params,
options.timeout_ms, cib_conn, scheduler,
args->verbosity, options.force, options.check_level);
}
goto done;
case cmd_digests:
node = pe_find_node(scheduler->nodes, options.host_uname);
if (node == NULL) {
rc = pcmk_rc_node_unknown;
} else {
rc = pcmk__resource_digests(out, rsc, node,
options.override_params);
}
break;
case cmd_colocations:
rc = out->message(out, "locations-and-colocations", rsc,
options.recursive, (bool) options.force);
break;
case cmd_cts:
rc = pcmk_rc_ok;
g_list_foreach(scheduler->resources, (GFunc) cli_resource_print_cts,
out);
cli_resource_print_cts_constraints(scheduler);
break;
case cmd_fail:
rc = cli_resource_fail(controld_api, options.host_uname,
options.rsc_id, scheduler);
if (rc == pcmk_rc_ok) {
start_mainloop(controld_api);
}
break;
case cmd_list_active_ops:
rc = cli_resource_print_operations(options.rsc_id,
options.host_uname, TRUE,
scheduler);
break;
case cmd_list_all_ops:
rc = cli_resource_print_operations(options.rsc_id,
options.host_uname, FALSE,
scheduler);
break;
case cmd_locate: {
GList *nodes = cli_resource_search(rsc, options.rsc_id, scheduler);
rc = out->message(out, "resource-search-list", nodes, options.rsc_id);
g_list_free_full(nodes, free);
break;
}
case cmd_query_xml:
rc = cli_resource_print(rsc, scheduler, true);
break;
case cmd_query_raw_xml:
rc = cli_resource_print(rsc, scheduler, false);
break;
case cmd_why:
if ((options.host_uname != NULL) && (node == NULL)) {
rc = pcmk_rc_node_unknown;
} else {
rc = out->message(out, "resource-reasons-list",
scheduler->resources, rsc, node);
}
break;
case cmd_clear:
rc = clear_constraints(out, &cib_xml_copy);
break;
case cmd_move:
if (options.host_uname == NULL) {
rc = ban_or_move(out, rsc, options.move_lifetime);
} else {
rc = cli_resource_move(rsc, options.rsc_id, options.host_uname,
options.move_lifetime, cib_conn,
options.cib_options, scheduler,
options.promoted_role_only,
options.force);
}
if (rc == EINVAL) {
exit_code = CRM_EX_USAGE;
goto done;
}
break;
case cmd_ban:
if (options.host_uname == NULL) {
rc = ban_or_move(out, rsc, options.move_lifetime);
} else if (node == NULL) {
rc = pcmk_rc_node_unknown;
} else {
rc = cli_resource_ban(out, options.rsc_id, node->details->uname,
options.move_lifetime, cib_conn,
options.cib_options,
options.promoted_role_only,
PCMK__ROLE_PROMOTED);
}
if (rc == EINVAL) {
exit_code = CRM_EX_USAGE;
goto done;
}
break;
case cmd_get_property:
rc = out->message(out, "property-list", rsc, options.prop_name);
if (rc == pcmk_rc_no_output) {
rc = ENXIO;
}
break;
case cmd_set_property:
rc = set_property();
break;
case cmd_get_param: {
unsigned int count = 0;
GHashTable *params = NULL;
pcmk_node_t *current = rsc->fns->active_node(rsc, &count, NULL);
bool free_params = true;
const char* value = NULL;
if (count > 1) {
out->err(out, "%s is active on more than one node,"
" returning the default value for %s", rsc->id,
pcmk__s(options.prop_name, "unspecified property"));
current = NULL;
}
crm_debug("Looking up %s in %s", options.prop_name, rsc->id);
if (pcmk__str_eq(options.attr_set_type, PCMK_XE_INSTANCE_ATTRIBUTES,
pcmk__str_none)) {
params = pe_rsc_params(rsc, current, scheduler);
free_params = false;
value = g_hash_table_lookup(params, options.prop_name);
} else if (pcmk__str_eq(options.attr_set_type,
PCMK_XE_META_ATTRIBUTES, pcmk__str_none)) {
params = pcmk__strkey_table(free, free);
get_meta_attributes(params, rsc, current, scheduler);
value = g_hash_table_lookup(params, options.prop_name);
} else if (pcmk__str_eq(options.attr_set_type, ATTR_SET_ELEMENT, pcmk__str_none)) {
value = crm_element_value(rsc->xml, options.prop_name);
free_params = false;
} else {
params = pcmk__strkey_table(free, free);
pe__unpack_dataset_nvpairs(rsc->xml, PCMK_XE_UTILIZATION, NULL,
params, NULL, FALSE, scheduler);
value = g_hash_table_lookup(params, options.prop_name);
}
rc = out->message(out, "attribute-list", rsc, options.prop_name, value);
if (free_params) {
g_hash_table_destroy(params);
}
break;
}
case cmd_set_param:
if (pcmk__str_empty(options.prop_value)) {
exit_code = CRM_EX_USAGE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("You need to supply a value with the -v option"));
goto done;
}
/* coverity[var_deref_model] False positive */
rc = cli_resource_update_attribute(rsc, options.rsc_id,
options.prop_set,
options.attr_set_type,
options.prop_id,
options.prop_name,
options.prop_value,
options.recursive, cib_conn,
options.cib_options,
options.force);
break;
case cmd_delete_param:
/* coverity[var_deref_model] False positive */
rc = cli_resource_delete_attribute(rsc, options.rsc_id,
options.prop_set,
options.attr_set_type,
options.prop_id,
options.prop_name, cib_conn,
options.cib_options,
options.force);
break;
case cmd_cleanup:
if (rsc == NULL) {
rc = cli_cleanup_all(controld_api, options.host_uname,
options.operation, options.interval_spec,
scheduler);
if (rc == pcmk_rc_ok) {
start_mainloop(controld_api);
}
} else {
cleanup(out, rsc, node);
}
break;
case cmd_refresh:
if (rsc == NULL) {
rc = refresh(out);
} else {
refresh_resource(out, rsc, node);
}
break;
case cmd_delete:
/* rsc_id was already checked for NULL much earlier when validating
* command line arguments.
*/
if (options.rsc_type == NULL) {
// @COMPAT @TODO change this to exit_code = CRM_EX_USAGE
rc = ENXIO;
g_set_error(&error, PCMK__RC_ERROR, rc,
_("You need to specify a resource type with -t"));
} else {
rc = pcmk__resource_delete(cib_conn, options.cib_options,
options.rsc_id, options.rsc_type);
if (rc != pcmk_rc_ok) {
g_set_error(&error, PCMK__RC_ERROR, rc,
_("Could not delete resource %s: %s"),
options.rsc_id, pcmk_rc_str(rc));
}
}
break;
default:
exit_code = CRM_EX_USAGE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("Unimplemented command: %d"), (int) options.rsc_cmd);
goto done;
}
/* Convert rc into an exit code. */
if (rc != pcmk_rc_ok && rc != pcmk_rc_no_output) {
exit_code = pcmk_rc2exitc(rc);
}
/*
* Clean up and exit
*/
done:
/* When we get here, exit_code has been set one of two ways - either at one of
* the spots where there's a "goto done" (which itself could have happened either
* directly or by calling pcmk_rc2exitc), or just up above after any of the break
* statements.
*
* Thus, we can use just exit_code here to decide what to do.
*/
if (exit_code != CRM_EX_OK && exit_code != CRM_EX_USAGE) {
if (error != NULL) {
char *msg = crm_strdup_printf("%s\nError performing operation: %s",
error->message, crm_exit_str(exit_code));
g_clear_error(&error);
g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "%s", msg);
free(msg);
} else {
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("Error performing operation: %s"), crm_exit_str(exit_code));
}
}
g_free(options.host_uname);
g_free(options.interval_spec);
g_free(options.move_lifetime);
g_free(options.operation);
g_free(options.prop_id);
free(options.prop_name);
g_free(options.prop_set);
g_free(options.prop_value);
g_free(options.rsc_id);
g_free(options.rsc_type);
free(options.agent_spec);
free(options.v_agent);
free(options.v_class);
free(options.v_provider);
g_free(options.xml_file);
g_strfreev(options.remainder);
if (options.override_params != NULL) {
g_hash_table_destroy(options.override_params);
}
/* options.cmdline_params does not need to be destroyed here. See the
* comments in cli_resource_execute_from_params.
*/
g_strfreev(processed_args);
g_option_context_free(context);
return bye(exit_code);
}

File Metadata

Mime Type
text/x-diff
Expires
Thu, Jun 26, 7:53 PM (1 d, 17 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
1959620
Default Alt Text
(163 KB)

Event Timeline