Page MenuHomeClusterLabs Projects

No OneTemporary

diff --git a/daemons/controld/controld_control.c b/daemons/controld/controld_control.c
index 3c59192a68..b0fb58f602 100644
--- a/daemons/controld/controld_control.c
+++ b/daemons/controld/controld_control.c
@@ -1,836 +1,836 @@
/*
* Copyright 2004-2021 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <sys/param.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <crm/crm.h>
#include <crm/msg_xml.h>
#include <crm/pengine/rules.h>
#include <crm/cluster/internal.h>
#include <crm/cluster/election_internal.h>
#include <crm/common/ipc_internal.h>
#include <pacemaker-controld.h>
qb_ipcs_service_t *ipcs = NULL;
#if SUPPORT_COROSYNC
extern gboolean crm_connect_corosync(crm_cluster_t * cluster);
#endif
void crm_shutdown(int nsig);
gboolean crm_read_options(gpointer user_data);
gboolean fsa_has_quorum = FALSE;
crm_trigger_t *fsa_source = NULL;
crm_trigger_t *config_read = NULL;
bool no_quorum_suicide_escalation = FALSE;
bool controld_shutdown_lock_enabled = false;
/* A_HA_CONNECT */
void
do_ha_control(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state,
enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
gboolean registered = FALSE;
static crm_cluster_t *cluster = NULL;
if (cluster == NULL) {
cluster = calloc(1, sizeof(crm_cluster_t));
}
if (action & A_HA_DISCONNECT) {
crm_cluster_disconnect(cluster);
crm_info("Disconnected from the cluster");
controld_set_fsa_input_flags(R_HA_DISCONNECTED);
}
if (action & A_HA_CONNECT) {
crm_set_status_callback(&peer_update_callback);
crm_set_autoreap(FALSE);
if (is_corosync_cluster()) {
#if SUPPORT_COROSYNC
registered = crm_connect_corosync(cluster);
#endif
}
if (registered == TRUE) {
controld_election_init(cluster->uname);
fsa_our_uname = cluster->uname;
fsa_our_uuid = cluster->uuid;
if(cluster->uuid == NULL) {
crm_err("Could not obtain local uuid");
registered = FALSE;
}
}
if (registered == FALSE) {
controld_set_fsa_input_flags(R_HA_DISCONNECTED);
register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
return;
}
populate_cib_nodes(node_update_none, __func__);
controld_clear_fsa_input_flags(R_HA_DISCONNECTED);
crm_info("Connected to the cluster");
}
if (action & ~(A_HA_CONNECT | A_HA_DISCONNECT)) {
crm_err("Unexpected action %s in %s", fsa_action2string(action),
__func__);
}
}
/* A_SHUTDOWN */
void
do_shutdown(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
/* just in case */
controld_set_fsa_input_flags(R_SHUTDOWN);
controld_disconnect_fencer(FALSE);
}
/* A_SHUTDOWN_REQ */
void
do_shutdown_req(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state,
enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
xmlNode *msg = NULL;
controld_set_fsa_input_flags(R_SHUTDOWN);
//controld_set_fsa_input_flags(R_STAYDOWN);
crm_info("Sending shutdown request to all peers (DC is %s)",
(fsa_our_dc? fsa_our_dc : "not set"));
msg = create_request(CRM_OP_SHUTDOWN_REQ, NULL, NULL, CRM_SYSTEM_CRMD, CRM_SYSTEM_CRMD, NULL);
if (send_cluster_message(NULL, crm_msg_crmd, msg, TRUE) == FALSE) {
register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
}
free_xml(msg);
}
extern char *max_generation_from;
extern xmlNode *max_generation_xml;
extern GHashTable *resource_history;
extern GHashTable *voted;
void
crmd_fast_exit(crm_exit_t exit_code)
{
if (pcmk_is_set(fsa_input_register, R_STAYDOWN)) {
crm_warn("Inhibiting respawn "CRM_XS" remapping exit code %d to %d",
exit_code, CRM_EX_FATAL);
exit_code = CRM_EX_FATAL;
} else if ((exit_code == CRM_EX_OK)
&& pcmk_is_set(fsa_input_register, R_IN_RECOVERY)) {
crm_err("Could not recover from internal error");
exit_code = CRM_EX_ERROR;
}
crm_exit(exit_code);
}
crm_exit_t
crmd_exit(crm_exit_t exit_code)
{
GList *gIter = NULL;
GMainLoop *mloop = crmd_mainloop;
static bool in_progress = FALSE;
if (in_progress && (exit_code == CRM_EX_OK)) {
crm_debug("Exit is already in progress");
return exit_code;
} else if(in_progress) {
crm_notice("Error during shutdown process, exiting now with status %d (%s)",
exit_code, crm_exit_str(exit_code));
crm_write_blackbox(SIGTRAP, NULL);
crmd_fast_exit(exit_code);
}
in_progress = TRUE;
crm_trace("Preparing to exit with status %d (%s)",
exit_code, crm_exit_str(exit_code));
/* Suppress secondary errors resulting from us disconnecting everything */
controld_set_fsa_input_flags(R_HA_DISCONNECTED);
/* Close all IPC servers and clients to ensure any and all shared memory files are cleaned up */
if(ipcs) {
crm_trace("Closing IPC server");
mainloop_del_ipc_server(ipcs);
ipcs = NULL;
}
controld_close_attrd_ipc();
- pe_subsystem_free();
+ controld_shutdown_schedulerd_ipc();
controld_disconnect_fencer(TRUE);
if ((exit_code == CRM_EX_OK) && (crmd_mainloop == NULL)) {
crm_debug("No mainloop detected");
exit_code = CRM_EX_ERROR;
}
/* On an error, just get out.
*
* Otherwise, make the effort to have mainloop exit gracefully so
* that it (mostly) cleans up after itself and valgrind has less
* to report on - allowing real errors stand out
*/
if (exit_code != CRM_EX_OK) {
crm_notice("Forcing immediate exit with status %d (%s)",
exit_code, crm_exit_str(exit_code));
crm_write_blackbox(SIGTRAP, NULL);
crmd_fast_exit(exit_code);
}
/* Clean up as much memory as possible for valgrind */
for (gIter = fsa_message_queue; gIter != NULL; gIter = gIter->next) {
fsa_data_t *fsa_data = gIter->data;
crm_info("Dropping %s: [ state=%s cause=%s origin=%s ]",
fsa_input2string(fsa_data->fsa_input),
fsa_state2string(fsa_state),
fsa_cause2string(fsa_data->fsa_cause), fsa_data->origin);
delete_fsa_input(fsa_data);
}
controld_clear_fsa_input_flags(R_MEMBERSHIP);
g_list_free(fsa_message_queue); fsa_message_queue = NULL;
metadata_cache_fini();
controld_election_fini();
/* Tear down the CIB manager connection, but don't free it yet -- it could
* be used when we drain the mainloop later.
*/
controld_disconnect_cib_manager();
verify_stopped(fsa_state, LOG_WARNING);
controld_clear_fsa_input_flags(R_LRM_CONNECTED);
lrm_state_destroy_all();
/* This basically will not work, since mainloop has a reference to it */
mainloop_destroy_trigger(fsa_source); fsa_source = NULL;
mainloop_destroy_trigger(config_read); config_read = NULL;
mainloop_destroy_trigger(transition_trigger); transition_trigger = NULL;
pcmk__client_cleanup();
crm_peer_destroy();
controld_free_fsa_timers();
te_cleanup_stonith_history_sync(NULL, TRUE);
controld_free_sched_timer();
free(fsa_our_dc_version); fsa_our_dc_version = NULL;
free(fsa_our_uname); fsa_our_uname = NULL;
free(fsa_our_uuid); fsa_our_uuid = NULL;
free(fsa_our_dc); fsa_our_dc = NULL;
free(fsa_cluster_name); fsa_cluster_name = NULL;
free(te_uuid); te_uuid = NULL;
free(failed_stop_offset); failed_stop_offset = NULL;
free(failed_start_offset); failed_start_offset = NULL;
free(max_generation_from); max_generation_from = NULL;
free_xml(max_generation_xml); max_generation_xml = NULL;
mainloop_destroy_signal(SIGPIPE);
mainloop_destroy_signal(SIGUSR1);
mainloop_destroy_signal(SIGTERM);
mainloop_destroy_signal(SIGTRAP);
/* leave SIGCHLD engaged as we might still want to drain some service-actions */
if (mloop) {
GMainContext *ctx = g_main_loop_get_context(crmd_mainloop);
/* Don't re-enter this block */
crmd_mainloop = NULL;
/* no signals on final draining anymore */
mainloop_destroy_signal(SIGCHLD);
crm_trace("Draining mainloop %d %d", g_main_loop_is_running(mloop), g_main_context_pending(ctx));
{
int lpc = 0;
while((g_main_context_pending(ctx) && lpc < 10)) {
lpc++;
crm_trace("Iteration %d", lpc);
g_main_context_dispatch(ctx);
}
}
crm_trace("Closing mainloop %d %d", g_main_loop_is_running(mloop), g_main_context_pending(ctx));
g_main_loop_quit(mloop);
/* Won't do anything yet, since we're inside it now */
g_main_loop_unref(mloop);
} else {
mainloop_destroy_signal(SIGCHLD);
}
cib_delete(fsa_cib_conn);
fsa_cib_conn = NULL;
throttle_fini();
/* Graceful */
crm_trace("Done preparing for exit with status %d (%s)",
exit_code, crm_exit_str(exit_code));
return exit_code;
}
/* A_EXIT_0, A_EXIT_1 */
void
do_exit(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
crm_exit_t exit_code = CRM_EX_OK;
int log_level = LOG_INFO;
const char *exit_type = "gracefully";
if (action & A_EXIT_1) {
log_level = LOG_ERR;
exit_type = "forcefully";
exit_code = CRM_EX_ERROR;
}
verify_stopped(cur_state, LOG_ERR);
do_crm_log(log_level, "Performing %s - %s exiting the controller",
fsa_action2string(action), exit_type);
crm_info("[%s] stopped (%d)", crm_system_name, exit_code);
crmd_exit(exit_code);
}
static void sigpipe_ignore(int nsig) { return; }
/* A_STARTUP */
void
do_startup(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
crm_debug("Registering Signal Handlers");
mainloop_add_signal(SIGTERM, crm_shutdown);
mainloop_add_signal(SIGPIPE, sigpipe_ignore);
fsa_source = mainloop_add_trigger(G_PRIORITY_HIGH, crm_fsa_trigger, NULL);
config_read = mainloop_add_trigger(G_PRIORITY_HIGH, crm_read_options, NULL);
transition_trigger = mainloop_add_trigger(G_PRIORITY_LOW, te_graph_trigger, NULL);
crm_debug("Creating CIB manager and executor objects");
fsa_cib_conn = cib_new();
lrm_state_init_local();
if (controld_init_fsa_timers() == FALSE) {
register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
}
}
// \return libqb error code (0 on success, -errno on error)
static int32_t
accept_controller_client(qb_ipcs_connection_t *c, uid_t uid, gid_t gid)
{
crm_trace("Accepting new IPC client connection");
if (pcmk__new_client(c, uid, gid) == NULL) {
return -EIO;
}
return 0;
}
// \return libqb error code (0 on success, -errno on error)
static int32_t
dispatch_controller_ipc(qb_ipcs_connection_t * c, void *data, size_t size)
{
uint32_t id = 0;
uint32_t flags = 0;
pcmk__client_t *client = pcmk__find_client(c);
xmlNode *msg = pcmk__client_data2xml(client, data, &id, &flags);
if (msg == NULL) {
pcmk__ipc_send_ack(client, id, flags, "ack", CRM_EX_PROTOCOL);
return 0;
}
pcmk__ipc_send_ack(client, id, flags, "ack", CRM_EX_INDETERMINATE);
CRM_ASSERT(client->user != NULL);
pcmk__update_acl_user(msg, F_CRM_USER, client->user);
crm_xml_add(msg, F_CRM_SYS_FROM, client->id);
if (controld_authorize_ipc_message(msg, client, NULL)) {
crm_trace("Processing IPC message from client %s",
pcmk__client_name(client));
route_message(C_IPC_MESSAGE, msg);
}
trigger_fsa();
free_xml(msg);
return 0;
}
static int32_t
crmd_ipc_closed(qb_ipcs_connection_t * c)
{
pcmk__client_t *client = pcmk__find_client(c);
if (client) {
crm_trace("Disconnecting %sregistered client %s (%p/%p)",
(client->userdata? "" : "un"), pcmk__client_name(client),
c, client);
free(client->userdata);
pcmk__free_client(client);
trigger_fsa();
}
return 0;
}
static void
crmd_ipc_destroy(qb_ipcs_connection_t * c)
{
crm_trace("Connection %p", c);
crmd_ipc_closed(c);
}
/* A_STOP */
void
do_stop(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
crm_trace("Closing IPC server");
mainloop_del_ipc_server(ipcs); ipcs = NULL;
register_fsa_input(C_FSA_INTERNAL, I_TERMINATE, NULL);
}
/* A_STARTED */
void
do_started(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
static struct qb_ipcs_service_handlers crmd_callbacks = {
.connection_accept = accept_controller_client,
.connection_created = NULL,
.msg_process = dispatch_controller_ipc,
.connection_closed = crmd_ipc_closed,
.connection_destroyed = crmd_ipc_destroy
};
if (cur_state != S_STARTING) {
crm_err("Start cancelled... %s", fsa_state2string(cur_state));
return;
} else if (!pcmk_is_set(fsa_input_register, R_MEMBERSHIP)) {
crm_info("Delaying start, no membership data (%.16llx)", R_MEMBERSHIP);
crmd_fsa_stall(TRUE);
return;
} else if (!pcmk_is_set(fsa_input_register, R_LRM_CONNECTED)) {
crm_info("Delaying start, not connected to executor (%.16llx)", R_LRM_CONNECTED);
crmd_fsa_stall(TRUE);
return;
} else if (!pcmk_is_set(fsa_input_register, R_CIB_CONNECTED)) {
crm_info("Delaying start, CIB not connected (%.16llx)", R_CIB_CONNECTED);
crmd_fsa_stall(TRUE);
return;
} else if (!pcmk_is_set(fsa_input_register, R_READ_CONFIG)) {
crm_info("Delaying start, Config not read (%.16llx)", R_READ_CONFIG);
crmd_fsa_stall(TRUE);
return;
} else if (!pcmk_is_set(fsa_input_register, R_PEER_DATA)) {
crm_info("Delaying start, No peer data (%.16llx)", R_PEER_DATA);
crmd_fsa_stall(TRUE);
return;
}
crm_debug("Init server comms");
ipcs = pcmk__serve_controld_ipc(&crmd_callbacks);
if (ipcs == NULL) {
crm_err("Failed to create IPC server: shutting down and inhibiting respawn");
register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
} else {
crm_notice("Pacemaker controller successfully started and accepting connections");
}
controld_trigger_fencer_connect();
controld_clear_fsa_input_flags(R_STARTING);
register_fsa_input(msg_data->fsa_cause, I_PENDING, NULL);
}
/* A_RECOVER */
void
do_recover(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
controld_set_fsa_input_flags(R_IN_RECOVERY);
crm_warn("Fast-tracking shutdown in response to errors");
register_fsa_input(C_FSA_INTERNAL, I_TERMINATE, NULL);
}
static pcmk__cluster_option_t crmd_opts[] = {
/* name, old name, type, allowed values,
* default value, validator,
* short description,
* long description
*/
{
"dc-version", NULL, "string", NULL, "none", NULL,
"Pacemaker version on cluster node elected Designated Controller (DC)",
"Includes a hash which identifies the exact changeset the code was "
"built from. Used for diagnostic purposes."
},
{
"cluster-infrastructure", NULL, "string", NULL, "corosync", NULL,
"The messaging stack on which Pacemaker is currently running",
"Used for informational and diagnostic purposes."
},
{
"cluster-name", NULL, "string", NULL, NULL, NULL,
"An arbitrary name for the cluster",
"This optional value is mostly for users' convenience as desired "
"in administration, but may also be used in Pacemaker "
"configuration rules via the #cluster-name node attribute, and "
"by higher-level tools and resource agents."
},
{
XML_CONFIG_ATTR_DC_DEADTIME, NULL, "time",
NULL, "20s", pcmk__valid_interval_spec,
"How long to wait for a response from other nodes during start-up",
"The optimal value will depend on the speed and load of your network "
"and the type of switches used."
},
{
XML_CONFIG_ATTR_RECHECK, NULL, "time",
"Zero disables polling, while positive values are an interval in seconds"
"(unless other units are specified, for example \"5min\")",
"15min", pcmk__valid_interval_spec,
"Polling interval to recheck cluster state and evaluate rules "
"with date specifications",
"Pacemaker is primarily event-driven, and looks ahead to know when to "
"recheck cluster state for failure timeouts and most time-based "
"rules. However, it will also recheck the cluster after this "
"amount of inactivity, to evaluate rules with date specifications "
"and serve as a fail-safe for certain types of scheduler bugs."
},
{
"load-threshold", NULL, "percentage", NULL,
"80%", pcmk__valid_utilization,
"Maximum amount of system load that should be used by cluster nodes",
"The cluster will slow down its recovery process when the amount of "
"system resources used (currently CPU) approaches this limit",
},
{
"node-action-limit", NULL, "integer", NULL,
"0", pcmk__valid_number,
"Maximum number of jobs that can be scheduled per node "
"(defaults to 2x cores)"
},
{ XML_CONFIG_ATTR_FENCE_REACTION, NULL, "string", NULL, "stop", NULL,
"How a cluster node should react if notified of its own fencing",
"A cluster node may receive notification of its own fencing if fencing "
"is misconfigured, or if fabric fencing is in use that doesn't cut "
"cluster communication. Allowed values are \"stop\" to attempt to "
"immediately stop Pacemaker and stay stopped, or \"panic\" to attempt "
"to immediately reboot the local node, falling back to stop on failure."
},
{
XML_CONFIG_ATTR_ELECTION_FAIL, NULL, "time", NULL,
"2min", pcmk__valid_interval_spec,
"*** Advanced Use Only ***",
"Declare an election failed if it is not decided within this much "
"time. If you need to adjust this value, it probably indicates "
"the presence of a bug."
},
{
XML_CONFIG_ATTR_FORCE_QUIT, NULL, "time", NULL,
"20min", pcmk__valid_interval_spec,
"*** Advanced Use Only ***",
"Exit immediately if shutdown does not complete within this much "
"time. If you need to adjust this value, it probably indicates "
"the presence of a bug."
},
{
"join-integration-timeout", "crmd-integration-timeout", "time", NULL,
"3min", pcmk__valid_interval_spec,
"*** Advanced Use Only ***",
"If you need to adjust this value, it probably indicates "
"the presence of a bug."
},
{
"join-finalization-timeout", "crmd-finalization-timeout", "time", NULL,
"30min", pcmk__valid_interval_spec,
"*** Advanced Use Only ***",
"If you need to adjust this value, it probably indicates "
"the presence of a bug."
},
{
"transition-delay", "crmd-transition-delay", "time", NULL,
"0s", pcmk__valid_interval_spec,
"*** Advanced Use Only *** Enabling this option will slow down "
"cluster recovery under all conditions",
"Delay cluster recovery for this much time to allow for additional "
"events to occur. Useful if your configuration is sensitive to "
"the order in which ping updates arrive."
},
{
"stonith-watchdog-timeout", NULL, "time", NULL,
"0", controld_verify_stonith_watchdog_timeout,
"How long to wait before we can assume nodes are safely down "
"when watchdog-based self-fencing via SBD is in use",
"If nonzero, along with `have-watchdog=true` automatically set by the "
"cluster, when fencing is required, watchdog-based self-fencing "
"will be performed via SBD without requiring a fencing resource "
"explicitly configured. "
"If `stonith-watchdog-timeout` is set to a positive value, unseen "
"nodes are assumed to self-fence within this much time. +WARNING:+ "
"It must be ensured that this value is larger than the "
"`SBD_WATCHDOG_TIMEOUT` environment variable on all nodes. "
"Pacemaker verifies the settings individually on all nodes and "
"prevents startup or shuts down if configured wrongly on the fly. "
"It's strongly recommended that `SBD_WATCHDOG_TIMEOUT` is set to "
"the same value on all nodes. "
"If `stonith-watchdog-timeout` is set to a negative value, and "
"`SBD_WATCHDOG_TIMEOUT` is set, twice that value will be used. "
"+WARNING:+ In this case, it's essential (currently not verified by "
"Pacemaker) that `SBD_WATCHDOG_TIMEOUT` is set to the same value on "
"all nodes."
},
{
"stonith-max-attempts", NULL, "integer", NULL,
"10", pcmk__valid_positive_number,
"How many times fencing can fail before it will no longer be "
"immediately re-attempted on a target"
},
// Already documented in libpe_status (other values must be kept identical)
{
"no-quorum-policy", NULL, "select", "stop, freeze, ignore, demote, suicide",
"stop", pcmk__valid_quorum, NULL, NULL
},
{
XML_CONFIG_ATTR_SHUTDOWN_LOCK, NULL, "boolean", NULL,
"false", pcmk__valid_boolean, NULL, NULL
},
};
void
crmd_metadata(void)
{
pcmk__print_option_metadata("pacemaker-controld",
"Pacemaker controller options",
"Cluster options used by Pacemaker's "
"controller (formerly called crmd)",
crmd_opts, PCMK__NELEM(crmd_opts));
}
static void
verify_crmd_options(GHashTable * options)
{
pcmk__validate_cluster_options(options, crmd_opts, PCMK__NELEM(crmd_opts));
}
static const char *
crmd_pref(GHashTable * options, const char *name)
{
return pcmk__cluster_option(options, crmd_opts, PCMK__NELEM(crmd_opts),
name);
}
static void
config_query_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data)
{
const char *value = NULL;
GHashTable *config_hash = NULL;
crm_time_t *now = crm_time_new(NULL);
xmlNode *crmconfig = NULL;
xmlNode *alerts = NULL;
if (rc != pcmk_ok) {
fsa_data_t *msg_data = NULL;
crm_err("Local CIB query resulted in an error: %s", pcmk_strerror(rc));
register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
if (rc == -EACCES || rc == -pcmk_err_schema_validation) {
crm_err("The cluster is mis-configured - shutting down and staying down");
controld_set_fsa_input_flags(R_STAYDOWN);
}
goto bail;
}
crmconfig = output;
if ((crmconfig) &&
(crm_element_name(crmconfig)) &&
(strcmp(crm_element_name(crmconfig), XML_CIB_TAG_CRMCONFIG) != 0)) {
crmconfig = first_named_child(crmconfig, XML_CIB_TAG_CRMCONFIG);
}
if (!crmconfig) {
fsa_data_t *msg_data = NULL;
crm_err("Local CIB query for " XML_CIB_TAG_CRMCONFIG " section failed");
register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
goto bail;
}
crm_debug("Call %d : Parsing CIB options", call_id);
config_hash = pcmk__strkey_table(free, free);
pe_unpack_nvpairs(crmconfig, crmconfig, XML_CIB_TAG_PROPSET, NULL,
config_hash, CIB_OPTIONS_FIRST, FALSE, now, NULL);
verify_crmd_options(config_hash);
value = crmd_pref(config_hash, XML_CONFIG_ATTR_DC_DEADTIME);
election_trigger->period_ms = crm_parse_interval_spec(value);
value = crmd_pref(config_hash, "node-action-limit"); /* Also checks migration-limit */
throttle_update_job_max(value);
value = crmd_pref(config_hash, "load-threshold");
if(value) {
throttle_set_load_target(strtof(value, NULL) / 100.0);
}
value = crmd_pref(config_hash, "no-quorum-policy");
if (pcmk__str_eq(value, "suicide", pcmk__str_casei) && pcmk__locate_sbd()) {
no_quorum_suicide_escalation = TRUE;
}
set_fence_reaction(crmd_pref(config_hash, XML_CONFIG_ATTR_FENCE_REACTION));
value = crmd_pref(config_hash,"stonith-max-attempts");
update_stonith_max_attempts(value);
value = crmd_pref(config_hash, XML_CONFIG_ATTR_FORCE_QUIT);
shutdown_escalation_timer->period_ms = crm_parse_interval_spec(value);
crm_debug("Shutdown escalation occurs if DC has not responded to request in %ums",
shutdown_escalation_timer->period_ms);
value = crmd_pref(config_hash, XML_CONFIG_ATTR_ELECTION_FAIL);
controld_set_election_period(value);
value = crmd_pref(config_hash, XML_CONFIG_ATTR_RECHECK);
recheck_interval_ms = crm_parse_interval_spec(value);
crm_debug("Re-run scheduler after %dms of inactivity", recheck_interval_ms);
value = crmd_pref(config_hash, "transition-delay");
transition_timer->period_ms = crm_parse_interval_spec(value);
value = crmd_pref(config_hash, "join-integration-timeout");
integration_timer->period_ms = crm_parse_interval_spec(value);
value = crmd_pref(config_hash, "join-finalization-timeout");
finalization_timer->period_ms = crm_parse_interval_spec(value);
value = crmd_pref(config_hash, XML_CONFIG_ATTR_SHUTDOWN_LOCK);
controld_shutdown_lock_enabled = crm_is_true(value);
free(fsa_cluster_name);
fsa_cluster_name = NULL;
value = g_hash_table_lookup(config_hash, "cluster-name");
if (value) {
fsa_cluster_name = strdup(value);
}
alerts = first_named_child(output, XML_CIB_TAG_ALERTS);
crmd_unpack_alerts(alerts);
controld_set_fsa_input_flags(R_READ_CONFIG);
crm_trace("Triggering FSA: %s", __func__);
mainloop_set_trigger(fsa_source);
g_hash_table_destroy(config_hash);
bail:
crm_time_free(now);
}
gboolean
crm_read_options(gpointer user_data)
{
int call_id =
fsa_cib_conn->cmds->query(fsa_cib_conn,
"//" XML_CIB_TAG_CRMCONFIG " | //" XML_CIB_TAG_ALERTS,
NULL, cib_xpath | cib_scope_local);
fsa_register_cib_callback(call_id, FALSE, NULL, config_query_callback);
crm_trace("Querying the CIB... call %d", call_id);
return TRUE;
}
/* A_READCONFIG */
void
do_read_config(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state,
enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
throttle_init();
mainloop_set_trigger(config_read);
}
void
crm_shutdown(int nsig)
{
if ((crmd_mainloop == NULL) || !g_main_loop_is_running(crmd_mainloop)) {
crmd_exit(CRM_EX_OK);
return;
}
if (pcmk_is_set(fsa_input_register, R_SHUTDOWN)) {
crm_err("Escalating shutdown");
register_fsa_input_before(C_SHUTDOWN, I_ERROR, NULL);
return;
}
controld_set_fsa_input_flags(R_SHUTDOWN);
register_fsa_input(C_SHUTDOWN, I_SHUTDOWN, NULL);
if (shutdown_escalation_timer->period_ms == 0) {
const char *value = crmd_pref(NULL, XML_CONFIG_ATTR_FORCE_QUIT);
shutdown_escalation_timer->period_ms = crm_parse_interval_spec(value);
}
crm_notice("Initiating controller shutdown sequence " CRM_XS
" limit=%ums", shutdown_escalation_timer->period_ms);
controld_start_timer(shutdown_escalation_timer);
}
diff --git a/daemons/controld/controld_schedulerd.c b/daemons/controld/controld_schedulerd.c
index 09a3fbf654..ee665f801d 100644
--- a/daemons/controld/controld_schedulerd.c
+++ b/daemons/controld/controld_schedulerd.c
@@ -1,486 +1,484 @@
/*
* Copyright 2004-2021 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <unistd.h> /* pid_t, sleep, ssize_t */
#include <crm/cib.h>
#include <crm/cluster.h>
#include <crm/common/xml.h>
#include <crm/crm.h>
#include <crm/msg_xml.h>
#include <crm/common/xml_internal.h>
#include <crm/common/ipc.h>
#include <crm/common/ipc_schedulerd.h>
#include <pacemaker-controld.h>
-static void pe_ipc_destroy(void);
+static void handle_disconnect(void);
static pcmk_ipc_api_t *schedulerd_api = NULL;
/*!
* \internal
* \brief Close any scheduler connection and free associated memory
*/
void
-pe_subsystem_free(void)
+controld_shutdown_schedulerd_ipc(void)
{
controld_clear_fsa_input_flags(R_PE_REQUIRED);
pcmk_disconnect_ipc(schedulerd_api);
- pe_ipc_destroy();
+ handle_disconnect();
pcmk_free_ipc_api(schedulerd_api);
schedulerd_api = NULL;
}
/*!
* \internal
* \brief Save CIB query result to file, raising FSA error
*
* \param[in] msg Ignored
* \param[in] call_id Call ID of CIB query
* \param[in] rc Return code of CIB query
* \param[in] output Result of CIB query
* \param[in] user_data Unique identifier for filename (will be freed)
*
* \note This is intended to be called after a scheduler connection fails.
*/
static void
save_cib_contents(xmlNode *msg, int call_id, int rc, xmlNode *output,
void *user_data)
{
char *id = user_data;
register_fsa_error_adv(C_FSA_INTERNAL, I_ERROR, NULL, NULL, __func__);
CRM_CHECK(id != NULL, return);
if (rc == pcmk_ok) {
char *filename = crm_strdup_printf(PE_STATE_DIR "/pe-core-%s.bz2", id);
if (write_xml_file(output, filename, TRUE) < 0) {
crm_err("Could not save Cluster Information Base to %s after scheduler crash",
filename);
} else {
crm_notice("Saved Cluster Information Base to %s after scheduler crash",
filename);
}
free(filename);
}
}
/*!
* \internal
* \brief Respond to scheduler connection failure
- *
- * \param[in] user_data Ignored
*/
static void
-pe_ipc_destroy(void)
+handle_disconnect(void)
{
// If we aren't connected to the scheduler, we can't expect a reply
controld_expect_sched_reply(NULL);
if (pcmk_is_set(fsa_input_register, R_PE_REQUIRED)) {
int rc = pcmk_ok;
char *uuid_str = crm_generate_uuid();
crm_crit("Connection to the scheduler failed "
CRM_XS " uuid=%s", uuid_str);
/*
* The scheduler died...
*
* Save the current CIB so that we have a chance of
* figuring out what killed it.
*
* Delay raising the I_ERROR until the query below completes or
* 5s is up, whichever comes first.
*
*/
rc = fsa_cib_conn->cmds->query(fsa_cib_conn, NULL, NULL, cib_scope_local);
fsa_register_cib_callback(rc, FALSE, uuid_str, save_cib_contents);
} else {
crm_info("Connection to the scheduler released");
}
controld_clear_fsa_input_flags(R_PE_CONNECTED);
mainloop_set_trigger(fsa_source);
return;
}
static void
handle_reply(pcmk_schedulerd_api_reply_t *reply)
{
const char *msg_ref = NULL;
if (!AM_I_DC) {
return;
}
msg_ref = reply->data.graph.reference;
if (msg_ref == NULL) {
crm_err("%s - Ignoring calculation with no reference", CRM_OP_PECALC);
} else if (pcmk__str_eq(msg_ref, fsa_pe_ref, pcmk__str_none)) {
ha_msg_input_t fsa_input;
xmlNode *crm_data_node;
controld_stop_sched_timer();
/* do_te_invoke (which will eventually process the fsa_input we are constructing
* here) requires that fsa_input.xml be non-NULL. That will only happen if
* copy_ha_msg_input (which is called by register_fsa_input_adv) sees the
* fsa_input.msg that it is expecting. The scheduler's IPC dispatch function
* gave us the values we need, we just need to put them into XML.
*
* The name of the top level element here is irrelevant. Nothing checks it.
*/
fsa_input.msg = create_xml_node(NULL, "dummy-reply");
crm_xml_add(fsa_input.msg, XML_ATTR_REFERENCE, msg_ref);
crm_xml_add(fsa_input.msg, F_CRM_TGRAPH_INPUT, reply->data.graph.input);
crm_data_node = create_xml_node(fsa_input.msg, F_CRM_DATA);
add_node_copy(crm_data_node, reply->data.graph.tgraph);
register_fsa_input_later(C_IPC_MESSAGE, I_PE_SUCCESS, &fsa_input);
free_xml(fsa_input.msg);
} else {
crm_info("%s calculation %s is obsolete", CRM_OP_PECALC, msg_ref);
}
}
static void
scheduler_event_callback(pcmk_ipc_api_t *api, enum pcmk_ipc_event event_type,
crm_exit_t status, void *event_data, void *user_data)
{
pcmk_schedulerd_api_reply_t *reply = event_data;
switch (event_type) {
case pcmk_ipc_event_disconnect:
- pe_ipc_destroy();
+ handle_disconnect();
break;
case pcmk_ipc_event_reply:
handle_reply(reply);
break;
default:
break;
}
}
static bool
-pe_subsystem_new(void)
+new_schedulerd_ipc_connection(void)
{
int rc;
controld_set_fsa_input_flags(R_PE_REQUIRED);
if (schedulerd_api == NULL) {
rc = pcmk_new_ipc_api(&schedulerd_api, pcmk_ipc_schedulerd);
if (rc != pcmk_rc_ok) {
crm_err("Error connecting to the scheduler: %s", pcmk_rc_str(rc));
return false;
}
}
pcmk_register_ipc_callback(schedulerd_api, scheduler_event_callback, NULL);
rc = pcmk_connect_ipc(schedulerd_api, pcmk_ipc_dispatch_main);
if (rc != pcmk_rc_ok) {
crm_err("Error connecting to the scheduler: %s", pcmk_rc_str(rc));
return false;
}
controld_set_fsa_input_flags(R_PE_CONNECTED);
return true;
}
static void do_pe_invoke_callback(xmlNode *msg, int call_id, int rc,
xmlNode *output, void *user_data);
/* A_PE_START, A_PE_STOP, O_PE_RESTART */
void
do_pe_control(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state,
enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
if (action & A_PE_STOP) {
controld_clear_fsa_input_flags(R_PE_REQUIRED);
pcmk_disconnect_ipc(schedulerd_api);
- pe_ipc_destroy();
+ handle_disconnect();
}
if ((action & A_PE_START)
&& !pcmk_is_set(fsa_input_register, R_PE_CONNECTED)) {
if (cur_state == S_STOPPING) {
crm_info("Ignoring request to connect to scheduler while shutting down");
- } else if (!pe_subsystem_new()) {
+ } else if (!new_schedulerd_ipc_connection()) {
crm_warn("Could not connect to scheduler");
register_fsa_error(C_FSA_INTERNAL, I_FAIL, NULL);
}
}
}
int fsa_pe_query = 0;
char *fsa_pe_ref = NULL;
static mainloop_timer_t *controld_sched_timer = NULL;
// @TODO Make this a configurable cluster option if there's demand for it
#define SCHED_TIMEOUT_MS (120000)
/*!
* \internal
* \brief Handle a timeout waiting for scheduler reply
*
* \param[in] user_data Ignored
*
* \return FALSE (indicating that timer should not be restarted)
*/
static gboolean
controld_sched_timeout(gpointer user_data)
{
if (AM_I_DC) {
/* If this node is the DC but can't communicate with the scheduler, just
* exit (and likely get fenced) so this node doesn't interfere with any
* further DC elections.
*
* @TODO We could try something less drastic first, like disconnecting
* and reconnecting to the scheduler, but something is likely going
* seriously wrong, so perhaps it's better to just fail as quickly as
* possible.
*/
crmd_exit(CRM_EX_FATAL);
}
return FALSE;
}
void
controld_stop_sched_timer(void)
{
if (controld_sched_timer && fsa_pe_ref) {
crm_trace("Stopping timer for scheduler reply %s", fsa_pe_ref);
}
mainloop_timer_stop(controld_sched_timer);
}
/*!
* \internal
* \brief Set the scheduler request currently being waited on
*
* \param[in] ref Request to expect reply to (or NULL for none)
*/
void
controld_expect_sched_reply(char *ref)
{
if (ref) {
if (controld_sched_timer == NULL) {
controld_sched_timer = mainloop_timer_add("scheduler_reply_timer",
SCHED_TIMEOUT_MS, FALSE,
controld_sched_timeout,
NULL);
}
mainloop_timer_start(controld_sched_timer);
} else {
controld_stop_sched_timer();
}
free(fsa_pe_ref);
fsa_pe_ref = ref;
}
/*!
* \internal
* \brief Free the scheduler reply timer
*/
void
controld_free_sched_timer(void)
{
if (controld_sched_timer != NULL) {
mainloop_timer_del(controld_sched_timer);
controld_sched_timer = NULL;
}
}
/* A_PE_INVOKE */
void
do_pe_invoke(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state,
enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
if (AM_I_DC == FALSE) {
crm_err("Not invoking scheduler because not DC: %s",
fsa_action2string(action));
return;
}
if (!pcmk_is_set(fsa_input_register, R_PE_CONNECTED)) {
if (pcmk_is_set(fsa_input_register, R_SHUTDOWN)) {
crm_err("Cannot shut down gracefully without the scheduler");
register_fsa_input_before(C_FSA_INTERNAL, I_TERMINATE, NULL);
} else {
crm_info("Waiting for the scheduler to connect");
crmd_fsa_stall(FALSE);
controld_set_fsa_action_flags(A_PE_START);
trigger_fsa();
}
return;
}
if (cur_state != S_POLICY_ENGINE) {
crm_notice("Not invoking scheduler because in state %s",
fsa_state2string(cur_state));
return;
}
if (!pcmk_is_set(fsa_input_register, R_HAVE_CIB)) {
crm_err("Attempted to invoke scheduler without consistent Cluster Information Base!");
/* start the join from scratch */
register_fsa_input_before(C_FSA_INTERNAL, I_ELECTION, NULL);
return;
}
fsa_pe_query = fsa_cib_conn->cmds->query(fsa_cib_conn, NULL, NULL, cib_scope_local);
crm_debug("Query %d: Requesting the current CIB: %s", fsa_pe_query,
fsa_state2string(fsa_state));
controld_expect_sched_reply(NULL);
fsa_register_cib_callback(fsa_pe_query, FALSE, NULL, do_pe_invoke_callback);
}
static void
force_local_option(xmlNode *xml, const char *attr_name, const char *attr_value)
{
int max = 0;
int lpc = 0;
char *xpath_string = NULL;
xmlXPathObjectPtr xpathObj = NULL;
xpath_string = crm_strdup_printf("%.128s//%s//nvpair[@name='%.128s']",
get_object_path(XML_CIB_TAG_CRMCONFIG),
XML_CIB_TAG_PROPSET, attr_name);
xpathObj = xpath_search(xml, xpath_string);
max = numXpathResults(xpathObj);
free(xpath_string);
for (lpc = 0; lpc < max; lpc++) {
xmlNode *match = getXpathResult(xpathObj, lpc);
crm_trace("Forcing %s/%s = %s", ID(match), attr_name, attr_value);
crm_xml_add(match, XML_NVPAIR_ATTR_VALUE, attr_value);
}
if(max == 0) {
xmlNode *configuration = NULL;
xmlNode *crm_config = NULL;
xmlNode *cluster_property_set = NULL;
crm_trace("Creating %s-%s for %s=%s",
CIB_OPTIONS_FIRST, attr_name, attr_name, attr_value);
configuration = pcmk__xe_match(xml, XML_CIB_TAG_CONFIGURATION, NULL,
NULL);
if (configuration == NULL) {
configuration = create_xml_node(xml, XML_CIB_TAG_CONFIGURATION);
}
crm_config = pcmk__xe_match(configuration, XML_CIB_TAG_CRMCONFIG, NULL,
NULL);
if (crm_config == NULL) {
crm_config = create_xml_node(configuration, XML_CIB_TAG_CRMCONFIG);
}
cluster_property_set = pcmk__xe_match(crm_config, XML_CIB_TAG_PROPSET,
NULL, NULL);
if (cluster_property_set == NULL) {
cluster_property_set = create_xml_node(crm_config, XML_CIB_TAG_PROPSET);
crm_xml_add(cluster_property_set, XML_ATTR_ID, CIB_OPTIONS_FIRST);
}
xml = create_xml_node(cluster_property_set, XML_CIB_TAG_NVPAIR);
crm_xml_set_id(xml, "%s-%s", CIB_OPTIONS_FIRST, attr_name);
crm_xml_add(xml, XML_NVPAIR_ATTR_NAME, attr_name);
crm_xml_add(xml, XML_NVPAIR_ATTR_VALUE, attr_value);
}
freeXpathObject(xpathObj);
}
static void
do_pe_invoke_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data)
{
char *ref = NULL;
pid_t watchdog = pcmk__locate_sbd();
if (rc != pcmk_ok) {
crm_err("Could not retrieve the Cluster Information Base: %s "
CRM_XS " rc=%d call=%d", pcmk_strerror(rc), rc, call_id);
register_fsa_error_adv(C_FSA_INTERNAL, I_ERROR, NULL, NULL, __func__);
return;
} else if (call_id != fsa_pe_query) {
crm_trace("Skipping superseded CIB query: %d (current=%d)", call_id, fsa_pe_query);
return;
} else if (!AM_I_DC || !pcmk_is_set(fsa_input_register, R_PE_CONNECTED)) {
crm_debug("No need to invoke the scheduler anymore");
return;
} else if (fsa_state != S_POLICY_ENGINE) {
crm_debug("Discarding scheduler request in state: %s",
fsa_state2string(fsa_state));
return;
/* this callback counts as 1 */
} else if (num_cib_op_callbacks() > 1) {
crm_debug("Re-asking for the CIB: %d other peer updates still pending",
(num_cib_op_callbacks() - 1));
sleep(1);
controld_set_fsa_action_flags(A_PE_INVOKE);
trigger_fsa();
return;
}
CRM_LOG_ASSERT(output != NULL);
/* Refresh the remote node cache and the known node cache when the
* scheduler is invoked */
pcmk__refresh_node_caches_from_cib(output);
crm_xml_add(output, XML_ATTR_DC_UUID, fsa_our_uuid);
crm_xml_add_int(output, XML_ATTR_HAVE_QUORUM, fsa_has_quorum);
force_local_option(output, XML_ATTR_HAVE_WATCHDOG, pcmk__btoa(watchdog));
if (ever_had_quorum && crm_have_quorum == FALSE) {
crm_xml_add_int(output, XML_ATTR_QUORUM_PANIC, 1);
}
rc = pcmk_rc2legacy(pcmk_schedulerd_api_graph(schedulerd_api, output, &ref));
if (rc < 0) {
crm_err("Could not contact the scheduler: %s " CRM_XS " rc=%d",
pcmk_strerror(rc), rc);
register_fsa_error_adv(C_FSA_INTERNAL, I_ERROR, NULL, NULL, __func__);
} else {
CRM_ASSERT(ref != NULL);
controld_expect_sched_reply(ref);
crm_debug("Invoking the scheduler: query=%d, ref=%s, seq=%llu, quorate=%d",
fsa_pe_query, fsa_pe_ref, crm_peer_seq, fsa_has_quorum);
}
}
diff --git a/daemons/controld/controld_utils.h b/daemons/controld/controld_utils.h
index e6aa400c89..a695194a20 100644
--- a/daemons/controld/controld_utils.h
+++ b/daemons/controld/controld_utils.h
@@ -1,127 +1,127 @@
/*
* Copyright 2004-2020 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef CRMD_UTILS__H
# define CRMD_UTILS__H
# include <crm/crm.h>
# include <crm/common/xml.h>
# include <crm/cib/internal.h> // CIB_OP_MODIFY
# include <controld_fsa.h> // fsa_cib_conn
# include <controld_alerts.h>
# define FAKE_TE_ID "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
# define fsa_cib_update(section, data, options, call_id, user_name) \
if(fsa_cib_conn != NULL) { \
call_id = cib_internal_op( \
fsa_cib_conn, CIB_OP_MODIFY, NULL, section, data, \
NULL, options, user_name); \
\
} else { \
crm_err("No CIB manager connection available"); \
}
static inline void
fsa_cib_anon_update(const char *section, xmlNode *data) {
if (fsa_cib_conn == NULL) {
crm_err("No CIB connection available");
} else {
int opts = cib_scope_local | cib_quorum_override | cib_can_create;
fsa_cib_conn->cmds->modify(fsa_cib_conn, section, data, opts);
}
}
static inline void
fsa_cib_anon_update_discard_reply(const char *section, xmlNode *data) {
if (fsa_cib_conn == NULL) {
crm_err("No CIB connection available");
} else {
int opts = cib_scope_local | cib_quorum_override | cib_can_create | cib_discard_reply;
fsa_cib_conn->cmds->modify(fsa_cib_conn, section, data, opts);
}
}
extern gboolean fsa_has_quorum;
extern bool controld_shutdown_lock_enabled;
extern int last_peer_update;
extern int last_resource_update;
enum node_update_flags {
node_update_none = 0x0000,
node_update_quick = 0x0001,
node_update_cluster = 0x0010,
node_update_peer = 0x0020,
node_update_join = 0x0040,
node_update_expected = 0x0100,
node_update_all = node_update_cluster|node_update_peer|node_update_join|node_update_expected,
};
crm_exit_t crmd_exit(crm_exit_t exit_code);
_Noreturn void crmd_fast_exit(crm_exit_t exit_code);
-void pe_subsystem_free(void);
+void controld_shutdown_schedulerd_ipc(void);
void controld_stop_sched_timer(void);
void controld_free_sched_timer(void);
void controld_expect_sched_reply(char *ref);
void fsa_dump_actions(uint64_t action, const char *text);
void fsa_dump_inputs(int log_level, const char *text, long long input_register);
gboolean update_dc(xmlNode * msg);
void crm_update_peer_join(const char *source, crm_node_t * node, enum crm_join_phase phase);
xmlNode *create_node_state_update(crm_node_t *node, int flags,
xmlNode *parent, const char *source);
void populate_cib_nodes(enum node_update_flags flags, const char *source);
void crm_update_quorum(gboolean quorum, gboolean force_update);
void controld_close_attrd_ipc(void);
void update_attrd(const char *host, const char *name, const char *value, const char *user_name, gboolean is_remote_node);
void update_attrd_remote_node_removed(const char *host, const char *user_name);
void update_attrd_clear_failures(const char *host, const char *rsc,
const char *op, const char *interval_spec,
gboolean is_remote_node);
int crmd_join_phase_count(enum crm_join_phase phase);
void crmd_join_phase_log(int level);
void crmd_peer_down(crm_node_t *peer, bool full);
unsigned int cib_op_timeout(void);
bool feature_set_compatible(const char *dc_version, const char *join_version);
bool controld_action_is_recordable(const char *action);
// Subsections of node_state
enum controld_section_e {
controld_section_lrm,
controld_section_lrm_unlocked,
controld_section_attrs,
controld_section_all,
controld_section_all_unlocked
};
void controld_delete_node_state(const char *uname,
enum controld_section_e section, int options);
int controld_delete_resource_history(const char *rsc_id, const char *node,
const char *user_name, int call_options);
const char *get_node_id(xmlNode *lrm_rsc_op);
/* Convenience macro for registering a CIB callback
* (assumes that data can be freed with free())
*/
# define fsa_register_cib_callback(id, flag, data, fn) do { \
CRM_ASSERT(fsa_cib_conn); \
fsa_cib_conn->cmds->register_callback_full( \
fsa_cib_conn, id, cib_op_timeout(), \
flag, data, #fn, fn, free); \
} while(0)
#endif

File Metadata

Mime Type
text/x-diff
Expires
Thu, Oct 16, 12:25 AM (16 h, 54 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
2530849
Default Alt Text
(49 KB)

Event Timeline