diff --git a/daemons/attrd/pacemaker-attrd.c b/daemons/attrd/pacemaker-attrd.c index bae1bfd79b..a5dac1272a 100644 --- a/daemons/attrd/pacemaker-attrd.c +++ b/daemons/attrd/pacemaker-attrd.c @@ -1,224 +1,223 @@ /* * Copyright 2013-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include -#include #include #include #include #include #include #include #include #include #include "pacemaker-attrd.h" #define SUMMARY "daemon for managing Pacemaker node attributes" gboolean stand_alone = FALSE; gchar **log_files = NULL; static GOptionEntry entries[] = { { "stand-alone", 's', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &stand_alone, "(Advanced use only) Run in stand-alone mode", NULL }, { "logfile", 'l', G_OPTION_FLAG_NONE, G_OPTION_ARG_FILENAME_ARRAY, &log_files, "Send logs to the additional named logfile", NULL }, { NULL } }; static pcmk__output_t *out = NULL; static pcmk__supported_format_t formats[] = { PCMK__SUPPORTED_FORMAT_NONE, PCMK__SUPPORTED_FORMAT_TEXT, PCMK__SUPPORTED_FORMAT_XML, { NULL, NULL, NULL } }; lrmd_t *the_lrmd = NULL; pcmk_cluster_t *attrd_cluster = NULL; crm_trigger_t *attrd_config_read = NULL; crm_exit_t attrd_exit_status = CRM_EX_OK; static bool ipc_already_running(void) { pcmk_ipc_api_t *old_instance = NULL; int rc = pcmk_rc_ok; rc = pcmk_new_ipc_api(&old_instance, pcmk_ipc_attrd); if (rc != pcmk_rc_ok) { return false; } rc = pcmk__connect_ipc(old_instance, pcmk_ipc_dispatch_sync, 2); if (rc != pcmk_rc_ok) { crm_debug("No existing %s manager instance found: %s", pcmk_ipc_name(old_instance, true), pcmk_rc_str(rc)); pcmk_free_ipc_api(old_instance); return false; } pcmk_disconnect_ipc(old_instance); pcmk_free_ipc_api(old_instance); return true; } static GOptionContext * build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) { GOptionContext *context = NULL; context = pcmk__build_arg_context(args, "text (default), xml", group, NULL); pcmk__add_main_args(context, entries); return context; } int main(int argc, char **argv) { int rc = pcmk_rc_ok; GError *error = NULL; bool initialized = false; GOptionGroup *output_group = NULL; pcmk__common_args_t *args = pcmk__new_common_args(SUMMARY); gchar **processed_args = pcmk__cmdline_preproc(argv, NULL); GOptionContext *context = build_arg_context(args, &output_group); attrd_init_mainloop(); crm_log_preinit(NULL, argc, argv); mainloop_add_signal(SIGTERM, attrd_shutdown); pcmk__register_formats(output_group, formats); if (!g_option_context_parse_strv(context, &processed_args, &error)) { attrd_exit_status = CRM_EX_USAGE; goto done; } rc = pcmk__output_new(&out, args->output_ty, args->output_dest, argv); if ((rc != pcmk_rc_ok) || (out == NULL)) { attrd_exit_status = CRM_EX_ERROR; g_set_error(&error, PCMK__EXITC_ERROR, attrd_exit_status, "Error creating output format %s: %s", args->output_ty, pcmk_rc_str(rc)); goto done; } if (args->version) { out->version(out, false); goto done; } // Open additional log files pcmk__add_logfiles(log_files, out); crm_log_init(PCMK__VALUE_ATTRD, LOG_INFO, TRUE, FALSE, argc, argv, FALSE); crm_notice("Starting Pacemaker node attribute manager%s", stand_alone ? " in standalone mode" : ""); if (ipc_already_running()) { attrd_exit_status = CRM_EX_OK; g_set_error(&error, PCMK__EXITC_ERROR, attrd_exit_status, "Aborting start-up because an attribute manager " "instance is already active"); crm_crit("%s", error->message); goto done; } initialized = true; attributes = pcmk__strkey_table(NULL, attrd_free_attribute); /* Connect to the CIB before connecting to the cluster or listening for IPC. * This allows us to assume the CIB is connected whenever we process a * cluster or IPC message (which also avoids start-up race conditions). */ if (!stand_alone) { if (attrd_cib_connect(30) != pcmk_ok) { attrd_exit_status = CRM_EX_FATAL; g_set_error(&error, PCMK__EXITC_ERROR, attrd_exit_status, "Could not connect to the CIB"); goto done; } crm_info("CIB connection active"); } if (attrd_cluster_connect() != pcmk_ok) { attrd_exit_status = CRM_EX_FATAL; g_set_error(&error, PCMK__EXITC_ERROR, attrd_exit_status, "Could not connect to the cluster"); goto done; } crm_info("Cluster connection active"); // Initialization that requires the cluster to be connected attrd_election_init(); if (!stand_alone) { attrd_cib_init(); } /* Set a private attribute for ourselves with the protocol version we * support. This lets all nodes determine the minimum supported version * across all nodes. It also ensures that the writer learns our node name, * so it can send our attributes to the CIB. */ attrd_broadcast_protocol(); attrd_init_ipc(); crm_notice("Pacemaker node attribute manager successfully started and accepting connections"); attrd_run_mainloop(); done: if (initialized) { crm_info("Shutting down attribute manager"); attrd_ipc_fini(); attrd_lrmd_disconnect(); if (!stand_alone) { attrd_cib_disconnect(); } attrd_free_waitlist(); pcmk_cluster_disconnect(attrd_cluster); pcmk_cluster_free(attrd_cluster); g_hash_table_destroy(attributes); } g_strfreev(processed_args); pcmk__free_arg_context(context); g_strfreev(log_files); pcmk__output_and_clear_error(&error, out); if (out != NULL) { out->finish(out, attrd_exit_status, true, NULL); pcmk__output_free(out); } pcmk__unregister_formats(); crm_exit(attrd_exit_status); } diff --git a/daemons/controld/controld_control.c b/daemons/controld/controld_control.c index 9b254f1b70..4b00f894ef 100644 --- a/daemons/controld/controld_control.c +++ b/daemons/controld/controld_control.c @@ -1,698 +1,697 @@ /* * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include -#include #include #include #include #include static qb_ipcs_service_t *ipcs = NULL; static crm_trigger_t *config_read_trigger = NULL; #if SUPPORT_COROSYNC extern gboolean crm_connect_corosync(pcmk_cluster_t *cluster); #endif static void crm_shutdown(int nsig); static gboolean crm_read_options(gpointer user_data); /* A_HA_CONNECT */ void do_ha_control(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { gboolean registered = FALSE; if (controld_globals.cluster == NULL) { controld_globals.cluster = pcmk_cluster_new(); } if (action & A_HA_DISCONNECT) { pcmk_cluster_disconnect(controld_globals.cluster); crm_info("Disconnected from the cluster"); controld_set_fsa_input_flags(R_HA_DISCONNECTED); } if (action & A_HA_CONNECT) { pcmk__cluster_set_status_callback(&peer_update_callback); pcmk__cluster_set_autoreap(false); #if SUPPORT_COROSYNC if (pcmk_get_cluster_layer() == pcmk_cluster_layer_corosync) { registered = crm_connect_corosync(controld_globals.cluster); } #endif // SUPPORT_COROSYNC if (registered) { pcmk__node_status_t *node = controld_get_local_node_status(); controld_election_init(); free(controld_globals.our_uuid); controld_globals.our_uuid = pcmk__str_copy(pcmk__cluster_node_uuid(node)); if (controld_globals.our_uuid == NULL) { crm_err("Could not obtain local uuid"); registered = FALSE; } } if (!registered) { controld_set_fsa_input_flags(R_HA_DISCONNECTED); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); return; } populate_cib_nodes(node_update_none, __func__); controld_clear_fsa_input_flags(R_HA_DISCONNECTED); crm_info("Connected to the cluster"); } if (action & ~(A_HA_CONNECT | A_HA_DISCONNECT)) { crm_err("Unexpected action %s in %s", fsa_action2string(action), __func__); } } /* A_SHUTDOWN */ void do_shutdown(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { /* just in case */ controld_set_fsa_input_flags(R_SHUTDOWN); controld_disconnect_fencer(FALSE); } /* A_SHUTDOWN_REQ */ void do_shutdown_req(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { xmlNode *msg = NULL; controld_set_fsa_input_flags(R_SHUTDOWN); //controld_set_fsa_input_flags(R_STAYDOWN); crm_info("Sending shutdown request to all peers (DC is %s)", pcmk__s(controld_globals.dc_name, "not set")); msg = pcmk__new_request(pcmk_ipc_controld, CRM_SYSTEM_CRMD, NULL, CRM_SYSTEM_CRMD, CRM_OP_SHUTDOWN_REQ, NULL); if (!pcmk__cluster_send_message(NULL, pcmk_ipc_controld, msg)) { register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); } pcmk__xml_free(msg); } void crmd_fast_exit(crm_exit_t exit_code) { if (pcmk_is_set(controld_globals.fsa_input_register, R_STAYDOWN)) { crm_warn("Inhibiting respawn " QB_XS " remapping exit code %d to %d", exit_code, CRM_EX_FATAL); exit_code = CRM_EX_FATAL; } else if ((exit_code == CRM_EX_OK) && pcmk_is_set(controld_globals.fsa_input_register, R_IN_RECOVERY)) { crm_err("Could not recover from internal error"); exit_code = CRM_EX_ERROR; } if (controld_globals.logger_out != NULL) { controld_globals.logger_out->finish(controld_globals.logger_out, exit_code, true, NULL); pcmk__output_free(controld_globals.logger_out); controld_globals.logger_out = NULL; } crm_exit(exit_code); } crm_exit_t crmd_exit(crm_exit_t exit_code) { GMainLoop *mloop = controld_globals.mainloop; static bool in_progress = FALSE; if (in_progress && (exit_code == CRM_EX_OK)) { crm_debug("Exit is already in progress"); return exit_code; } else if(in_progress) { crm_notice("Error during shutdown process, exiting now with status %d (%s)", exit_code, crm_exit_str(exit_code)); crm_write_blackbox(SIGTRAP, NULL); crmd_fast_exit(exit_code); } in_progress = TRUE; crm_trace("Preparing to exit with status %d (%s)", exit_code, crm_exit_str(exit_code)); /* Suppress secondary errors resulting from us disconnecting everything */ controld_set_fsa_input_flags(R_HA_DISCONNECTED); /* Close all IPC servers and clients to ensure any and all shared memory files are cleaned up */ if(ipcs) { crm_trace("Closing IPC server"); mainloop_del_ipc_server(ipcs); ipcs = NULL; } controld_close_attrd_ipc(); controld_shutdown_schedulerd_ipc(); controld_disconnect_fencer(TRUE); if ((exit_code == CRM_EX_OK) && (controld_globals.mainloop == NULL)) { crm_debug("No mainloop detected"); exit_code = CRM_EX_ERROR; } /* On an error, just get out. * * Otherwise, make the effort to have mainloop exit gracefully so * that it (mostly) cleans up after itself and valgrind has less * to report on - allowing real errors stand out */ if (exit_code != CRM_EX_OK) { crm_notice("Forcing immediate exit with status %d (%s)", exit_code, crm_exit_str(exit_code)); crm_write_blackbox(SIGTRAP, NULL); crmd_fast_exit(exit_code); } /* Clean up as much memory as possible for valgrind */ for (GList *iter = controld_globals.fsa_message_queue; iter != NULL; iter = iter->next) { fsa_data_t *fsa_data = (fsa_data_t *) iter->data; crm_info("Dropping %s: [ state=%s cause=%s origin=%s ]", fsa_input2string(fsa_data->fsa_input), fsa_state2string(controld_globals.fsa_state), fsa_cause2string(fsa_data->fsa_cause), fsa_data->origin); delete_fsa_input(fsa_data); } controld_clear_fsa_input_flags(R_MEMBERSHIP); g_list_free(controld_globals.fsa_message_queue); controld_globals.fsa_message_queue = NULL; controld_free_node_pending_timers(); election_reset(controld_globals.cluster); // Stop any election timer /* Tear down the CIB manager connection, but don't free it yet -- it could * be used when we drain the mainloop later. */ controld_disconnect_cib_manager(); verify_stopped(controld_globals.fsa_state, LOG_WARNING); controld_clear_fsa_input_flags(R_LRM_CONNECTED); lrm_state_destroy_all(); mainloop_destroy_trigger(config_read_trigger); config_read_trigger = NULL; controld_destroy_fsa_trigger(); controld_destroy_transition_trigger(); pcmk__client_cleanup(); pcmk__cluster_destroy_node_caches(); controld_free_fsa_timers(); te_cleanup_stonith_history_sync(NULL, TRUE); controld_free_sched_timer(); free(controld_globals.our_uuid); controld_globals.our_uuid = NULL; free(controld_globals.dc_name); controld_globals.dc_name = NULL; free(controld_globals.dc_version); controld_globals.dc_version = NULL; free(controld_globals.cluster_name); controld_globals.cluster_name = NULL; free(controld_globals.te_uuid); controld_globals.te_uuid = NULL; free_max_generation(); controld_destroy_failed_sync_table(); controld_destroy_outside_events_table(); mainloop_destroy_signal(SIGPIPE); mainloop_destroy_signal(SIGUSR1); mainloop_destroy_signal(SIGTERM); mainloop_destroy_signal(SIGTRAP); /* leave SIGCHLD engaged as we might still want to drain some service-actions */ if (mloop) { GMainContext *ctx = g_main_loop_get_context(controld_globals.mainloop); /* Don't re-enter this block */ controld_globals.mainloop = NULL; /* no signals on final draining anymore */ mainloop_destroy_signal(SIGCHLD); crm_trace("Draining mainloop %d %d", g_main_loop_is_running(mloop), g_main_context_pending(ctx)); { int lpc = 0; while((g_main_context_pending(ctx) && lpc < 10)) { lpc++; crm_trace("Iteration %d", lpc); g_main_context_dispatch(ctx); } } crm_trace("Closing mainloop %d %d", g_main_loop_is_running(mloop), g_main_context_pending(ctx)); g_main_loop_quit(mloop); /* Won't do anything yet, since we're inside it now */ g_main_loop_unref(mloop); } else { mainloop_destroy_signal(SIGCHLD); } cib_delete(controld_globals.cib_conn); controld_globals.cib_conn = NULL; throttle_fini(); pcmk_cluster_free(controld_globals.cluster); controld_globals.cluster = NULL; /* Graceful */ crm_trace("Done preparing for exit with status %d (%s)", exit_code, crm_exit_str(exit_code)); return exit_code; } /* A_EXIT_0, A_EXIT_1 */ void do_exit(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { crm_exit_t exit_code = CRM_EX_OK; if (pcmk_is_set(action, A_EXIT_1)) { exit_code = CRM_EX_ERROR; crm_err("Exiting now due to errors"); } verify_stopped(cur_state, LOG_ERR); crmd_exit(exit_code); } static void sigpipe_ignore(int nsig) { return; } /* A_STARTUP */ void do_startup(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { crm_debug("Registering Signal Handlers"); mainloop_add_signal(SIGTERM, crm_shutdown); mainloop_add_signal(SIGPIPE, sigpipe_ignore); config_read_trigger = mainloop_add_trigger(G_PRIORITY_HIGH, crm_read_options, NULL); controld_init_fsa_trigger(); controld_init_transition_trigger(); crm_debug("Creating CIB manager and executor objects"); controld_globals.cib_conn = cib_new(); lrm_state_init_local(); if (controld_init_fsa_timers() == FALSE) { register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); } } // \return libqb error code (0 on success, -errno on error) static int32_t accept_controller_client(qb_ipcs_connection_t *c, uid_t uid, gid_t gid) { crm_trace("Accepting new IPC client connection"); if (pcmk__new_client(c, uid, gid) == NULL) { return -ENOMEM; } return 0; } // \return libqb error code (0 on success, -errno on error) static int32_t dispatch_controller_ipc(qb_ipcs_connection_t * c, void *data, size_t size) { uint32_t id = 0; uint32_t flags = 0; pcmk__client_t *client = pcmk__find_client(c); xmlNode *msg = pcmk__client_data2xml(client, data, &id, &flags); if (msg == NULL) { pcmk__ipc_send_ack(client, id, flags, PCMK__XE_ACK, NULL, CRM_EX_PROTOCOL); return 0; } pcmk__ipc_send_ack(client, id, flags, PCMK__XE_ACK, NULL, CRM_EX_INDETERMINATE); pcmk__assert(client->user != NULL); pcmk__update_acl_user(msg, PCMK__XA_CRM_USER, client->user); crm_xml_add(msg, PCMK__XA_CRM_SYS_FROM, client->id); if (controld_authorize_ipc_message(msg, client, NULL)) { crm_trace("Processing IPC message from client %s", pcmk__client_name(client)); route_message(C_IPC_MESSAGE, msg); } controld_trigger_fsa(); pcmk__xml_free(msg); return 0; } static int32_t ipc_client_disconnected(qb_ipcs_connection_t *c) { pcmk__client_t *client = pcmk__find_client(c); if (client) { crm_trace("Disconnecting %sregistered client %s (%p/%p)", (client->userdata? "" : "un"), pcmk__client_name(client), c, client); free(client->userdata); pcmk__free_client(client); controld_trigger_fsa(); } return 0; } static void ipc_connection_destroyed(qb_ipcs_connection_t *c) { crm_trace("Connection %p", c); ipc_client_disconnected(c); } /* A_STOP */ void do_stop(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { crm_trace("Closing IPC server"); mainloop_del_ipc_server(ipcs); ipcs = NULL; register_fsa_input(C_FSA_INTERNAL, I_TERMINATE, NULL); } /* A_STARTED */ void do_started(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { static struct qb_ipcs_service_handlers crmd_callbacks = { .connection_accept = accept_controller_client, .connection_created = NULL, .msg_process = dispatch_controller_ipc, .connection_closed = ipc_client_disconnected, .connection_destroyed = ipc_connection_destroyed }; if (cur_state != S_STARTING) { crm_err("Start cancelled... %s", fsa_state2string(cur_state)); return; } else if (!pcmk_is_set(controld_globals.fsa_input_register, R_MEMBERSHIP)) { crm_info("Delaying start, no membership data (%.16llx)", R_MEMBERSHIP); crmd_fsa_stall(TRUE); return; } else if (!pcmk_is_set(controld_globals.fsa_input_register, R_LRM_CONNECTED)) { crm_info("Delaying start, not connected to executor (%.16llx)", R_LRM_CONNECTED); crmd_fsa_stall(TRUE); return; } else if (!pcmk_is_set(controld_globals.fsa_input_register, R_CIB_CONNECTED)) { crm_info("Delaying start, CIB not connected (%.16llx)", R_CIB_CONNECTED); crmd_fsa_stall(TRUE); return; } else if (!pcmk_is_set(controld_globals.fsa_input_register, R_READ_CONFIG)) { crm_info("Delaying start, Config not read (%.16llx)", R_READ_CONFIG); crmd_fsa_stall(TRUE); return; } else if (!pcmk_is_set(controld_globals.fsa_input_register, R_PEER_DATA)) { crm_info("Delaying start, No peer data (%.16llx)", R_PEER_DATA); crmd_fsa_stall(TRUE); return; } crm_debug("Init server comms"); ipcs = pcmk__serve_controld_ipc(&crmd_callbacks); if (ipcs == NULL) { crm_err("Failed to create IPC server: shutting down and inhibiting respawn"); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); } else { crm_notice("Pacemaker controller successfully started and accepting connections"); } controld_set_fsa_input_flags(R_ST_REQUIRED); controld_timer_fencer_connect(GINT_TO_POINTER(TRUE)); controld_clear_fsa_input_flags(R_STARTING); register_fsa_input(msg_data->fsa_cause, I_PENDING, NULL); } /* A_RECOVER */ void do_recover(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { controld_set_fsa_input_flags(R_IN_RECOVERY); crm_warn("Fast-tracking shutdown in response to errors"); register_fsa_input(C_FSA_INTERNAL, I_TERMINATE, NULL); } static void config_query_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) { const char *value = NULL; GHashTable *config_hash = NULL; crm_time_t *now = crm_time_new(NULL); xmlNode *crmconfig = NULL; xmlNode *alerts = NULL; pcmk_rule_input_t rule_input = { .now = now, }; if (rc != pcmk_ok) { fsa_data_t *msg_data = NULL; crm_err("Local CIB query resulted in an error: %s", pcmk_strerror(rc)); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); if (rc == -EACCES || rc == -pcmk_err_schema_validation) { crm_err("The cluster is mis-configured - shutting down and staying down"); controld_set_fsa_input_flags(R_STAYDOWN); } goto bail; } crmconfig = output; if ((crmconfig != NULL) && !pcmk__xe_is(crmconfig, PCMK_XE_CRM_CONFIG)) { crmconfig = pcmk__xe_first_child(crmconfig, PCMK_XE_CRM_CONFIG, NULL, NULL); } if (!crmconfig) { fsa_data_t *msg_data = NULL; crm_err("Local CIB query for " PCMK_XE_CRM_CONFIG " section failed"); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); goto bail; } crm_debug("Call %d : Parsing CIB options", call_id); config_hash = pcmk__strkey_table(free, free); pcmk_unpack_nvpair_blocks(crmconfig, PCMK_XE_CLUSTER_PROPERTY_SET, PCMK_VALUE_CIB_BOOTSTRAP_OPTIONS, &rule_input, config_hash, NULL); // Validate all options, and use defaults if not already present in hash pcmk__validate_cluster_options(config_hash); /* Validate the watchdog timeout in the context of the local node * environment. If invalid, the controller will exit with a fatal error. * * We do this via a wrapper in the controller, so that we call * pcmk__valid_stonith_watchdog_timeout() only if watchdog fencing is * enabled for the local node. Otherwise, we may exit unnecessarily. * * A validator function in libcrmcommon can't act as such a wrapper, because * it doesn't have a stonith API connection or the local node name. */ value = g_hash_table_lookup(config_hash, PCMK_OPT_STONITH_WATCHDOG_TIMEOUT); controld_verify_stonith_watchdog_timeout(value); value = g_hash_table_lookup(config_hash, PCMK_OPT_NO_QUORUM_POLICY); if (pcmk__strcase_any_of(value, PCMK_VALUE_FENCE, PCMK_VALUE_FENCE_LEGACY, NULL) && (pcmk__locate_sbd() != 0)) { controld_set_global_flags(controld_no_quorum_panic); } value = g_hash_table_lookup(config_hash, PCMK_OPT_SHUTDOWN_LOCK); if (crm_is_true(value)) { controld_set_global_flags(controld_shutdown_lock_enabled); } else { controld_clear_global_flags(controld_shutdown_lock_enabled); } value = g_hash_table_lookup(config_hash, PCMK_OPT_SHUTDOWN_LOCK_LIMIT); pcmk_parse_interval_spec(value, &controld_globals.shutdown_lock_limit); controld_globals.shutdown_lock_limit /= 1000; value = g_hash_table_lookup(config_hash, PCMK_OPT_NODE_PENDING_TIMEOUT); pcmk_parse_interval_spec(value, &controld_globals.node_pending_timeout); controld_globals.node_pending_timeout /= 1000; value = g_hash_table_lookup(config_hash, PCMK_OPT_CLUSTER_NAME); pcmk__str_update(&(controld_globals.cluster_name), value); // Let subcomponents initialize their own static variables controld_configure_election(config_hash); controld_configure_fencing(config_hash); controld_configure_fsa_timers(config_hash); controld_configure_throttle(config_hash); alerts = pcmk__xe_first_child(output, PCMK_XE_ALERTS, NULL, NULL); crmd_unpack_alerts(alerts); controld_set_fsa_input_flags(R_READ_CONFIG); controld_trigger_fsa(); g_hash_table_destroy(config_hash); bail: crm_time_free(now); } /*! * \internal * \brief Trigger read and processing of the configuration * * \param[in] fn Calling function name * \param[in] line Line number where call occurred */ void controld_trigger_config_as(const char *fn, int line) { if (config_read_trigger != NULL) { crm_trace("%s:%d - Triggered config processing", fn, line); mainloop_set_trigger(config_read_trigger); } } gboolean crm_read_options(gpointer user_data) { cib_t *cib_conn = controld_globals.cib_conn; int call_id = cib_conn->cmds->query(cib_conn, "//" PCMK_XE_CRM_CONFIG " | //" PCMK_XE_ALERTS, NULL, cib_xpath); fsa_register_cib_callback(call_id, NULL, config_query_callback); crm_trace("Querying the CIB... call %d", call_id); return TRUE; } /* A_READCONFIG */ void do_read_config(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { throttle_init(); controld_trigger_config(); } static void crm_shutdown(int nsig) { const char *value = NULL; guint default_period_ms = 0; if ((controld_globals.mainloop == NULL) || !g_main_loop_is_running(controld_globals.mainloop)) { crmd_exit(CRM_EX_OK); return; } if (pcmk_is_set(controld_globals.fsa_input_register, R_SHUTDOWN)) { crm_err("Escalating shutdown"); register_fsa_input_before(C_SHUTDOWN, I_ERROR, NULL); return; } controld_set_fsa_input_flags(R_SHUTDOWN); register_fsa_input(C_SHUTDOWN, I_SHUTDOWN, NULL); /* If shutdown timer doesn't have a period set, use the default * * @TODO: Evaluate whether this is still necessary. As long as * config_query_callback() has been run at least once, it doesn't look like * anything could have changed the timer period since then. */ value = pcmk__cluster_option(NULL, PCMK_OPT_SHUTDOWN_ESCALATION); pcmk_parse_interval_spec(value, &default_period_ms); controld_shutdown_start_countdown(default_period_ms); } diff --git a/daemons/controld/controld_execd.c b/daemons/controld/controld_execd.c index 1bcd436779..36d7a3a7af 100644 --- a/daemons/controld/controld_execd.c +++ b/daemons/controld/controld_execd.c @@ -1,2415 +1,2414 @@ /* * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include // lrmd_event_data_t, lrmd_rsc_info_t, etc. #include #include -#include #include #include #include #define START_DELAY_THRESHOLD 5 * 60 * 1000 #define MAX_LRM_REG_FAILS 30 struct delete_event_s { int rc; const char *rsc; lrm_state_t *lrm_state; }; static gboolean is_rsc_active(lrm_state_t * lrm_state, const char *rsc_id); static gboolean build_active_RAs(lrm_state_t * lrm_state, xmlNode * rsc_list); static gboolean stop_recurring_actions(gpointer key, gpointer value, gpointer user_data); static lrmd_event_data_t *construct_op(const lrm_state_t *lrm_state, const xmlNode *rsc_op, const char *rsc_id, const char *operation); static void do_lrm_rsc_op(lrm_state_t *lrm_state, lrmd_rsc_info_t *rsc, xmlNode *msg, struct ra_metadata_s *md); static gboolean lrm_state_verify_stopped(lrm_state_t * lrm_state, enum crmd_fsa_state cur_state, int log_level); static void lrm_connection_destroy(void) { if (pcmk_is_set(controld_globals.fsa_input_register, R_LRM_CONNECTED)) { crm_crit("Lost connection to local executor"); register_fsa_input(C_FSA_INTERNAL, I_ERROR, NULL); controld_clear_fsa_input_flags(R_LRM_CONNECTED); } } static char * make_stop_id(const char *rsc, int call_id) { return crm_strdup_printf("%s:%d", rsc, call_id); } static void copy_instance_keys(gpointer key, gpointer value, gpointer user_data) { if (strstr(key, CRM_META "_") == NULL) { pcmk__insert_dup(user_data, (const char *) key, (const char *) value); } } static void copy_meta_keys(gpointer key, gpointer value, gpointer user_data) { if (strstr(key, CRM_META "_") != NULL) { pcmk__insert_dup(user_data, (const char *) key, (const char *) value); } } /*! * \internal * \brief Remove a recurring operation from a resource's history * * \param[in,out] history Resource history to modify * \param[in] op Operation to remove * * \return TRUE if the operation was found and removed, FALSE otherwise */ static gboolean history_remove_recurring_op(rsc_history_t *history, const lrmd_event_data_t *op) { GList *iter; for (iter = history->recurring_op_list; iter != NULL; iter = iter->next) { lrmd_event_data_t *existing = iter->data; if ((op->interval_ms == existing->interval_ms) && pcmk__str_eq(op->rsc_id, existing->rsc_id, pcmk__str_none) && pcmk__str_eq(op->op_type, existing->op_type, pcmk__str_casei)) { history->recurring_op_list = g_list_delete_link(history->recurring_op_list, iter); lrmd_free_event(existing); return TRUE; } } return FALSE; } /*! * \internal * \brief Free all recurring operations in resource history * * \param[in,out] history Resource history to modify */ static void history_free_recurring_ops(rsc_history_t *history) { GList *iter; for (iter = history->recurring_op_list; iter != NULL; iter = iter->next) { lrmd_free_event(iter->data); } g_list_free(history->recurring_op_list); history->recurring_op_list = NULL; } /*! * \internal * \brief Free resource history * * \param[in,out] history Resource history to free */ void history_free(gpointer data) { rsc_history_t *history = (rsc_history_t*)data; if (history->stop_params) { g_hash_table_destroy(history->stop_params); } /* Don't need to free history->rsc.id because it's set to history->id */ free(history->rsc.type); free(history->rsc.standard); free(history->rsc.provider); lrmd_free_event(history->failed); lrmd_free_event(history->last); free(history->id); history_free_recurring_ops(history); free(history); } static void update_history_cache(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, lrmd_event_data_t * op) { int target_rc = 0; rsc_history_t *entry = NULL; if (op->rsc_deleted) { crm_debug("Purged history for '%s' after %s", op->rsc_id, op->op_type); controld_delete_resource_history(op->rsc_id, lrm_state->node_name, NULL, crmd_cib_smart_opt()); return; } if (pcmk__str_eq(op->op_type, PCMK_ACTION_NOTIFY, pcmk__str_casei)) { return; } crm_debug("Updating history for '%s' with %s op", op->rsc_id, op->op_type); entry = g_hash_table_lookup(lrm_state->resource_history, op->rsc_id); if (entry == NULL && rsc) { entry = pcmk__assert_alloc(1, sizeof(rsc_history_t)); entry->id = pcmk__str_copy(op->rsc_id); g_hash_table_insert(lrm_state->resource_history, entry->id, entry); entry->rsc.id = entry->id; entry->rsc.type = pcmk__str_copy(rsc->type); entry->rsc.standard = pcmk__str_copy(rsc->standard); entry->rsc.provider = pcmk__str_copy(rsc->provider); } else if (entry == NULL) { crm_info("Resource %s no longer exists, not updating cache", op->rsc_id); return; } entry->last_callid = op->call_id; target_rc = rsc_op_expected_rc(op); if (op->op_status == PCMK_EXEC_CANCELLED) { if (op->interval_ms > 0) { crm_trace("Removing cancelled recurring op: " PCMK__OP_FMT, op->rsc_id, op->op_type, op->interval_ms); history_remove_recurring_op(entry, op); return; } else { crm_trace("Skipping " PCMK__OP_FMT " rc=%d, status=%d", op->rsc_id, op->op_type, op->interval_ms, op->rc, op->op_status); } } else if (did_rsc_op_fail(op, target_rc)) { /* Store failed monitors here, otherwise the block below will cause them * to be forgotten when a stop happens. */ if (entry->failed) { lrmd_free_event(entry->failed); } entry->failed = lrmd_copy_event(op); } else if (op->interval_ms == 0) { if (entry->last) { lrmd_free_event(entry->last); } entry->last = lrmd_copy_event(op); if (op->params && pcmk__strcase_any_of(op->op_type, PCMK_ACTION_START, PCMK_ACTION_RELOAD, PCMK_ACTION_RELOAD_AGENT, PCMK_ACTION_MONITOR, NULL)) { if (entry->stop_params) { g_hash_table_destroy(entry->stop_params); } entry->stop_params = pcmk__strkey_table(free, free); g_hash_table_foreach(op->params, copy_instance_keys, entry->stop_params); } } if (op->interval_ms > 0) { /* Ensure there are no duplicates */ history_remove_recurring_op(entry, op); crm_trace("Adding recurring op: " PCMK__OP_FMT, op->rsc_id, op->op_type, op->interval_ms); entry->recurring_op_list = g_list_prepend(entry->recurring_op_list, lrmd_copy_event(op)); } else if ((entry->recurring_op_list != NULL) && !pcmk__str_eq(op->op_type, PCMK_ACTION_MONITOR, pcmk__str_casei)) { crm_trace("Dropping %d recurring ops because of: " PCMK__OP_FMT, g_list_length(entry->recurring_op_list), op->rsc_id, op->op_type, op->interval_ms); history_free_recurring_ops(entry); } } /*! * \internal * \brief Send a direct OK ack for a resource task * * \param[in] lrm_state LRM connection * \param[in] input Input message being ack'ed * \param[in] rsc_id ID of affected resource * \param[in] rsc Affected resource (if available) * \param[in] task Operation task being ack'ed * \param[in] ack_host Name of host to send ack to * \param[in] ack_sys IPC system name to ack */ static void send_task_ok_ack(const lrm_state_t *lrm_state, const ha_msg_input_t *input, const char *rsc_id, const lrmd_rsc_info_t *rsc, const char *task, const char *ack_host, const char *ack_sys) { lrmd_event_data_t *op = construct_op(lrm_state, input->xml, rsc_id, task); lrmd__set_result(op, PCMK_OCF_OK, PCMK_EXEC_DONE, NULL); controld_ack_event_directly(ack_host, ack_sys, rsc, op, rsc_id); lrmd_free_event(op); } static inline const char * op_node_name(lrmd_event_data_t *op) { return pcmk__s(op->remote_nodename, controld_globals.cluster->priv->node_name); } void lrm_op_callback(lrmd_event_data_t * op) { CRM_CHECK(op != NULL, return); switch (op->type) { case lrmd_event_disconnect: if (op->remote_nodename == NULL) { /* If this is the local executor IPC connection, set the right * bits in the controller when the connection goes down. */ lrm_connection_destroy(); } break; case lrmd_event_exec_complete: { lrm_state_t *lrm_state = controld_get_executor_state(op_node_name(op), false); pcmk__assert(lrm_state != NULL); process_lrm_event(lrm_state, op, NULL, NULL); } break; default: break; } } static void try_local_executor_connect(long long action, fsa_data_t *msg_data, lrm_state_t *lrm_state) { int rc = pcmk_rc_ok; crm_debug("Connecting to the local executor"); // If we can connect, great rc = controld_connect_local_executor(lrm_state); if (rc == pcmk_rc_ok) { controld_set_fsa_input_flags(R_LRM_CONNECTED); crm_info("Connection to the local executor established"); return; } // Otherwise, if we can try again, set a timer to do so if (lrm_state->num_lrm_register_fails < MAX_LRM_REG_FAILS) { crm_warn("Failed to connect to the local executor %d time%s " "(%d max): %s", lrm_state->num_lrm_register_fails, pcmk__plural_s(lrm_state->num_lrm_register_fails), MAX_LRM_REG_FAILS, pcmk_rc_str(rc)); controld_start_wait_timer(); crmd_fsa_stall(FALSE); return; } // Otherwise give up crm_err("Failed to connect to the executor the max allowed " "%d time%s: %s", lrm_state->num_lrm_register_fails, pcmk__plural_s(lrm_state->num_lrm_register_fails), pcmk_rc_str(rc)); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); } /* A_LRM_CONNECT */ void do_lrm_control(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { /* This only pertains to local executor connections. Remote connections are * handled as resources within the scheduler. Connecting and disconnecting * from remote executor instances is handled differently. */ lrm_state_t *lrm_state = NULL; if (controld_globals.cluster->priv->node_name == NULL) { return; // Shouldn't be possible } lrm_state = controld_get_executor_state(NULL, true); if (lrm_state == NULL) { register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); return; } if (action & A_LRM_DISCONNECT) { if (lrm_state_verify_stopped(lrm_state, cur_state, LOG_INFO) == FALSE) { if (action == A_LRM_DISCONNECT) { crmd_fsa_stall(FALSE); return; } } controld_clear_fsa_input_flags(R_LRM_CONNECTED); lrm_state_disconnect(lrm_state); lrm_state_reset_tables(lrm_state, FALSE); } if (action & A_LRM_CONNECT) { try_local_executor_connect(action, msg_data, lrm_state); } if (action & ~(A_LRM_CONNECT | A_LRM_DISCONNECT)) { crm_err("Unexpected action %s in %s", fsa_action2string(action), __func__); } } static gboolean lrm_state_verify_stopped(lrm_state_t * lrm_state, enum crmd_fsa_state cur_state, int log_level) { int counter = 0; gboolean rc = TRUE; const char *when = "lrm disconnect"; GHashTableIter gIter; const char *key = NULL; rsc_history_t *entry = NULL; active_op_t *pending = NULL; crm_debug("Checking for active resources before exit"); if (cur_state == S_TERMINATE) { log_level = LOG_ERR; when = "shutdown"; } else if (pcmk_is_set(controld_globals.fsa_input_register, R_SHUTDOWN)) { when = "shutdown... waiting"; } if ((lrm_state->active_ops != NULL) && lrm_state_is_connected(lrm_state)) { guint removed = g_hash_table_foreach_remove(lrm_state->active_ops, stop_recurring_actions, lrm_state); guint nremaining = g_hash_table_size(lrm_state->active_ops); if (removed || nremaining) { crm_notice("Stopped %u recurring operation%s at %s (%u remaining)", removed, pcmk__plural_s(removed), when, nremaining); } } if (lrm_state->active_ops != NULL) { g_hash_table_iter_init(&gIter, lrm_state->active_ops); while (g_hash_table_iter_next(&gIter, NULL, (void **)&pending)) { /* Ignore recurring actions in the shutdown calculations */ if (pending->interval_ms == 0) { counter++; } } } if (counter > 0) { do_crm_log(log_level, "%d pending executor operation%s at %s", counter, pcmk__plural_s(counter), when); if ((cur_state == S_TERMINATE) || !pcmk_is_set(controld_globals.fsa_input_register, R_SENT_RSC_STOP)) { g_hash_table_iter_init(&gIter, lrm_state->active_ops); while (g_hash_table_iter_next(&gIter, (gpointer*)&key, (gpointer*)&pending)) { do_crm_log(log_level, "Pending action: %s (%s)", key, pending->op_key); } } else { rc = FALSE; } return rc; } if (lrm_state->resource_history == NULL) { return rc; } if (pcmk_is_set(controld_globals.fsa_input_register, R_SHUTDOWN)) { /* At this point we're not waiting, we're just shutting down */ when = "shutdown"; } counter = 0; g_hash_table_iter_init(&gIter, lrm_state->resource_history); while (g_hash_table_iter_next(&gIter, NULL, (gpointer*)&entry)) { if (is_rsc_active(lrm_state, entry->id) == FALSE) { continue; } counter++; if (log_level == LOG_ERR) { crm_info("Found %s active at %s", entry->id, when); } else { crm_trace("Found %s active at %s", entry->id, when); } if (lrm_state->active_ops != NULL) { GHashTableIter hIter; g_hash_table_iter_init(&hIter, lrm_state->active_ops); while (g_hash_table_iter_next(&hIter, (gpointer*)&key, (gpointer*)&pending)) { if (pcmk__str_eq(entry->id, pending->rsc_id, pcmk__str_none)) { crm_notice("%sction %s (%s) incomplete at %s", pending->interval_ms == 0 ? "A" : "Recurring a", key, pending->op_key, when); } } } } if (counter) { crm_err("%d resource%s active at %s", counter, (counter == 1)? " was" : "s were", when); } return rc; } static gboolean is_rsc_active(lrm_state_t * lrm_state, const char *rsc_id) { rsc_history_t *entry = NULL; entry = g_hash_table_lookup(lrm_state->resource_history, rsc_id); if (entry == NULL || entry->last == NULL) { return FALSE; } crm_trace("Processing %s: %s.%d=%d", rsc_id, entry->last->op_type, entry->last->interval_ms, entry->last->rc); if ((entry->last->rc == PCMK_OCF_OK) && pcmk__str_eq(entry->last->op_type, PCMK_ACTION_STOP, pcmk__str_casei)) { return FALSE; } else if (entry->last->rc == PCMK_OCF_OK && pcmk__str_eq(entry->last->op_type, PCMK_ACTION_MIGRATE_TO, pcmk__str_casei)) { // A stricter check is too complex ... leave that to the scheduler return FALSE; } else if (entry->last->rc == PCMK_OCF_NOT_RUNNING) { return FALSE; } else if ((entry->last->interval_ms == 0) && (entry->last->rc == PCMK_OCF_NOT_CONFIGURED)) { /* Badly configured resources can't be reliably stopped */ return FALSE; } return TRUE; } static gboolean build_active_RAs(lrm_state_t * lrm_state, xmlNode * rsc_list) { GHashTableIter iter; rsc_history_t *entry = NULL; g_hash_table_iter_init(&iter, lrm_state->resource_history); while (g_hash_table_iter_next(&iter, NULL, (void **)&entry)) { GList *gIter = NULL; xmlNode *xml_rsc = pcmk__xe_create(rsc_list, PCMK__XE_LRM_RESOURCE); crm_xml_add(xml_rsc, PCMK_XA_ID, entry->id); crm_xml_add(xml_rsc, PCMK_XA_TYPE, entry->rsc.type); crm_xml_add(xml_rsc, PCMK_XA_CLASS, entry->rsc.standard); crm_xml_add(xml_rsc, PCMK_XA_PROVIDER, entry->rsc.provider); if (entry->last && entry->last->params) { static const char *name = CRM_META "_" PCMK__META_CONTAINER; const char *container = g_hash_table_lookup(entry->last->params, name); if (container) { crm_trace("Resource %s is a part of container resource %s", entry->id, container); crm_xml_add(xml_rsc, PCMK__META_CONTAINER, container); } } controld_add_resource_history_xml(xml_rsc, &(entry->rsc), entry->failed, lrm_state->node_name); controld_add_resource_history_xml(xml_rsc, &(entry->rsc), entry->last, lrm_state->node_name); for (gIter = entry->recurring_op_list; gIter != NULL; gIter = gIter->next) { controld_add_resource_history_xml(xml_rsc, &(entry->rsc), gIter->data, lrm_state->node_name); } } return FALSE; } xmlNode * controld_query_executor_state(void) { xmlNode *xml_state = NULL; xmlNode *xml_data = NULL; xmlNode *rsc_list = NULL; pcmk__node_status_t *peer = NULL; lrm_state_t *lrm_state = controld_get_executor_state(NULL, false); if (!lrm_state) { crm_err("Could not get executor state for local node"); return NULL; } peer = pcmk__get_node(0, lrm_state->node_name, NULL, pcmk__node_search_any); CRM_CHECK(peer != NULL, return NULL); xml_state = create_node_state_update(peer, node_update_cluster|node_update_peer, NULL, __func__); if (xml_state == NULL) { return NULL; } xml_data = pcmk__xe_create(xml_state, PCMK__XE_LRM); crm_xml_add(xml_data, PCMK_XA_ID, peer->xml_id); rsc_list = pcmk__xe_create(xml_data, PCMK__XE_LRM_RESOURCES); /* Build a list of active (not always running) resources */ build_active_RAs(lrm_state, rsc_list); crm_log_xml_trace(xml_state, "Current executor state"); return xml_state; } /*! * \internal * \brief Map standard Pacemaker return code to operation status and OCF code * * \param[out] event Executor event whose status and return code should be set * \param[in] rc Standard Pacemaker return code */ void controld_rc2event(lrmd_event_data_t *event, int rc) { /* This is called for cleanup requests from controller peers/clients, not * for resource actions, so no exit reason is needed. */ switch (rc) { case pcmk_rc_ok: lrmd__set_result(event, PCMK_OCF_OK, PCMK_EXEC_DONE, NULL); break; case EACCES: lrmd__set_result(event, PCMK_OCF_INSUFFICIENT_PRIV, PCMK_EXEC_ERROR, NULL); break; default: lrmd__set_result(event, PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_ERROR, NULL); break; } } /*! * \internal * \brief Trigger a new transition after CIB status was deleted * * If a CIB status delete was not expected (as part of the transition graph), * trigger a new transition by updating the (arbitrary) "last-lrm-refresh" * cluster property. * * \param[in] from_sys IPC name that requested the delete * \param[in] rsc_id Resource whose status was deleted (for logging only) */ void controld_trigger_delete_refresh(const char *from_sys, const char *rsc_id) { if (!pcmk__str_eq(from_sys, CRM_SYSTEM_TENGINE, pcmk__str_casei)) { char *now_s = crm_strdup_printf("%lld", (long long) time(NULL)); crm_debug("Triggering a refresh after %s cleaned %s", from_sys, rsc_id); cib__update_node_attr(controld_globals.logger_out, controld_globals.cib_conn, cib_none, PCMK_XE_CRM_CONFIG, NULL, NULL, NULL, NULL, "last-lrm-refresh", now_s, NULL, NULL); free(now_s); } } static void notify_deleted(lrm_state_t * lrm_state, ha_msg_input_t * input, const char *rsc_id, int rc) { lrmd_event_data_t *op = NULL; const char *from_sys = crm_element_value(input->msg, PCMK__XA_CRM_SYS_FROM); const char *from_host = crm_element_value(input->msg, PCMK__XA_SRC); crm_info("Notifying %s on %s that %s was%s deleted", from_sys, (from_host? from_host : "localhost"), rsc_id, ((rc == pcmk_ok)? "" : " not")); op = construct_op(lrm_state, input->xml, rsc_id, PCMK_ACTION_DELETE); controld_rc2event(op, pcmk_legacy2rc(rc)); controld_ack_event_directly(from_host, from_sys, NULL, op, rsc_id); lrmd_free_event(op); controld_trigger_delete_refresh(from_sys, rsc_id); } static gboolean lrm_remove_deleted_rsc(gpointer key, gpointer value, gpointer user_data) { struct delete_event_s *event = user_data; struct pending_deletion_op_s *op = value; if (pcmk__str_eq(event->rsc, op->rsc, pcmk__str_none)) { notify_deleted(event->lrm_state, op->input, event->rsc, event->rc); return TRUE; } return FALSE; } static gboolean lrm_remove_deleted_op(gpointer key, gpointer value, gpointer user_data) { const char *rsc = user_data; active_op_t *pending = value; if (pcmk__str_eq(rsc, pending->rsc_id, pcmk__str_none)) { crm_info("Removing op %s:%d for deleted resource %s", pending->op_key, pending->call_id, rsc); return TRUE; } return FALSE; } static void delete_rsc_entry(lrm_state_t *lrm_state, ha_msg_input_t *input, const char *rsc_id, GHashTableIter *rsc_iter, int rc, const char *user_name, bool from_cib) { struct delete_event_s event; CRM_CHECK(rsc_id != NULL, return); if (rc == pcmk_ok) { char *rsc_id_copy = pcmk__str_copy(rsc_id); if (rsc_iter) { g_hash_table_iter_remove(rsc_iter); } else { g_hash_table_remove(lrm_state->resource_history, rsc_id_copy); } if (from_cib) { controld_delete_resource_history(rsc_id_copy, lrm_state->node_name, user_name, crmd_cib_smart_opt()); } g_hash_table_foreach_remove(lrm_state->active_ops, lrm_remove_deleted_op, rsc_id_copy); free(rsc_id_copy); } if (input) { notify_deleted(lrm_state, input, rsc_id, rc); } event.rc = rc; event.rsc = rsc_id; event.lrm_state = lrm_state; g_hash_table_foreach_remove(lrm_state->deletion_ops, lrm_remove_deleted_rsc, &event); } static inline gboolean last_failed_matches_op(rsc_history_t *entry, const char *op, guint interval_ms) { if (entry == NULL) { return FALSE; } if (op == NULL) { return TRUE; } return (pcmk__str_eq(op, entry->failed->op_type, pcmk__str_casei) && (interval_ms == entry->failed->interval_ms)); } /*! * \internal * \brief Clear a resource's last failure * * Erase a resource's last failure on a particular node from both the * LRM resource history in the CIB, and the resource history remembered * for the LRM state. * * \param[in] rsc_id Resource name * \param[in] node_name Node name * \param[in] operation If specified, only clear if matching this operation * \param[in] interval_ms If operation is specified, it has this interval */ void lrm_clear_last_failure(const char *rsc_id, const char *node_name, const char *operation, guint interval_ms) { lrm_state_t *lrm_state = controld_get_executor_state(node_name, false); if (lrm_state == NULL) { return; } if (lrm_state->resource_history != NULL) { rsc_history_t *entry = g_hash_table_lookup(lrm_state->resource_history, rsc_id); if (last_failed_matches_op(entry, operation, interval_ms)) { lrmd_free_event(entry->failed); entry->failed = NULL; } } } /* Returns: gboolean - cancellation is in progress */ static gboolean cancel_op(lrm_state_t * lrm_state, const char *rsc_id, const char *key, int op, gboolean remove) { int rc = pcmk_ok; char *local_key = NULL; active_op_t *pending = NULL; CRM_CHECK(op != 0, return FALSE); CRM_CHECK(rsc_id != NULL, return FALSE); if (key == NULL) { local_key = make_stop_id(rsc_id, op); key = local_key; } pending = g_hash_table_lookup(lrm_state->active_ops, key); if (pending) { if (remove && !pcmk_is_set(pending->flags, active_op_remove)) { controld_set_active_op_flags(pending, active_op_remove); crm_debug("Scheduling %s for removal", key); } if (pcmk_is_set(pending->flags, active_op_cancelled)) { crm_debug("Operation %s already cancelled", key); free(local_key); return FALSE; } controld_set_active_op_flags(pending, active_op_cancelled); } else { crm_info("No pending op found for %s", key); free(local_key); return FALSE; } crm_debug("Cancelling op %d for %s (%s)", op, rsc_id, key); rc = lrm_state_cancel(lrm_state, pending->rsc_id, pending->op_type, pending->interval_ms); if (rc == pcmk_ok) { crm_debug("Op %d for %s (%s): cancelled", op, rsc_id, key); free(local_key); return TRUE; } crm_debug("Op %d for %s (%s): Nothing to cancel", op, rsc_id, key); /* The caller needs to make sure the entry is * removed from the active operations list * * Usually by returning TRUE inside the worker function * supplied to g_hash_table_foreach_remove() * * Not removing the entry from active operations will block * the node from shutting down */ free(local_key); return FALSE; } struct cancel_data { gboolean done; gboolean remove; const char *key; lrmd_rsc_info_t *rsc; lrm_state_t *lrm_state; }; static gboolean cancel_action_by_key(gpointer key, gpointer value, gpointer user_data) { gboolean remove = FALSE; struct cancel_data *data = user_data; active_op_t *op = value; if (pcmk__str_eq(op->op_key, data->key, pcmk__str_none)) { data->done = TRUE; remove = !cancel_op(data->lrm_state, data->rsc->id, key, op->call_id, data->remove); } return remove; } static gboolean cancel_op_key(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, const char *key, gboolean remove) { guint removed = 0; struct cancel_data data; CRM_CHECK(rsc != NULL, return FALSE); CRM_CHECK(key != NULL, return FALSE); data.key = key; data.rsc = rsc; data.done = FALSE; data.remove = remove; data.lrm_state = lrm_state; removed = g_hash_table_foreach_remove(lrm_state->active_ops, cancel_action_by_key, &data); crm_trace("Removed %u op cache entries, new size: %u", removed, g_hash_table_size(lrm_state->active_ops)); return data.done; } /*! * \internal * \brief Retrieve resource information from LRM * * \param[in,out] lrm_state Executor connection state to use * \param[in] rsc_xml XML containing resource configuration * \param[in] do_create If true, register resource if not already * \param[out] rsc_info Where to store information obtained from executor * * \retval pcmk_ok Success (and rsc_info holds newly allocated result) * \retval -EINVAL Required information is missing from arguments * \retval -ENOTCONN No active connection to LRM * \retval -ENODEV Resource not found * \retval -errno Error communicating with executor when registering resource * * \note Caller is responsible for freeing result on success. */ static int get_lrm_resource(lrm_state_t *lrm_state, const xmlNode *rsc_xml, gboolean do_create, lrmd_rsc_info_t **rsc_info) { const char *id = pcmk__xe_id(rsc_xml); CRM_CHECK(lrm_state && rsc_xml && rsc_info, return -EINVAL); CRM_CHECK(id, return -EINVAL); if (lrm_state_is_connected(lrm_state) == FALSE) { return -ENOTCONN; } crm_trace("Retrieving resource information for %s from the executor", id); *rsc_info = lrm_state_get_rsc_info(lrm_state, id, 0); // If resource isn't known by ID, try clone name, if provided if (!*rsc_info) { const char *long_id = crm_element_value(rsc_xml, PCMK__XA_LONG_ID); if (long_id) { *rsc_info = lrm_state_get_rsc_info(lrm_state, long_id, 0); } } if ((*rsc_info == NULL) && do_create) { const char *class = crm_element_value(rsc_xml, PCMK_XA_CLASS); const char *provider = crm_element_value(rsc_xml, PCMK_XA_PROVIDER); const char *type = crm_element_value(rsc_xml, PCMK_XA_TYPE); int rc; crm_trace("Registering resource %s with the executor", id); rc = lrm_state_register_rsc(lrm_state, id, class, provider, type, lrmd_opt_drop_recurring); if (rc != pcmk_ok) { fsa_data_t *msg_data = NULL; crm_err("Could not register resource %s with the executor on %s: %s " QB_XS " rc=%d", id, lrm_state->node_name, pcmk_strerror(rc), rc); /* Register this as an internal error if this involves the local * executor. Otherwise, we're likely dealing with an unresponsive * remote node, which is not an FSA failure. */ if (lrm_state_is_local(lrm_state) == TRUE) { register_fsa_error(C_FSA_INTERNAL, I_FAIL, NULL); } return rc; } *rsc_info = lrm_state_get_rsc_info(lrm_state, id, 0); } return *rsc_info? pcmk_ok : -ENODEV; } static void delete_resource(lrm_state_t *lrm_state, const char *id, lrmd_rsc_info_t *rsc, GHashTableIter *iter, const char *sys, const char *user, ha_msg_input_t *request, bool unregister, bool from_cib) { int rc = pcmk_ok; crm_info("Removing resource %s from executor for %s%s%s", id, sys, (user? " as " : ""), (user? user : "")); if (rsc && unregister) { rc = lrm_state_unregister_rsc(lrm_state, id, 0); } if (rc == pcmk_ok) { crm_trace("Resource %s deleted from executor", id); } else if (rc == -EINPROGRESS) { crm_info("Deletion of resource '%s' from executor is pending", id); if (request) { struct pending_deletion_op_s *op = NULL; char *ref = crm_element_value_copy(request->msg, PCMK_XA_REFERENCE); op = pcmk__assert_alloc(1, sizeof(struct pending_deletion_op_s)); op->rsc = pcmk__str_copy(rsc->id); op->input = copy_ha_msg_input(request); g_hash_table_insert(lrm_state->deletion_ops, ref, op); } return; } else { crm_warn("Could not delete '%s' from executor for %s%s%s: %s " QB_XS " rc=%d", id, sys, (user? " as " : ""), (user? user : ""), pcmk_strerror(rc), rc); } delete_rsc_entry(lrm_state, request, id, iter, rc, user, from_cib); } static int get_fake_call_id(lrm_state_t *lrm_state, const char *rsc_id) { int call_id = 999999999; rsc_history_t *entry = NULL; if(lrm_state) { entry = g_hash_table_lookup(lrm_state->resource_history, rsc_id); } /* Make sure the call id is greater than the last successful operation, * otherwise the failure will not result in a possible recovery of the resource * as it could appear the failure occurred before the successful start */ if (entry) { call_id = entry->last_callid + 1; } if (call_id < 0) { call_id = 1; } return call_id; } static void fake_op_status(lrm_state_t *lrm_state, lrmd_event_data_t *op, int op_status, enum ocf_exitcode op_exitcode, const char *exit_reason) { op->call_id = get_fake_call_id(lrm_state, op->rsc_id); op->t_run = time(NULL); op->t_rcchange = op->t_run; lrmd__set_result(op, op_exitcode, op_status, exit_reason); } static void force_reprobe(lrm_state_t *lrm_state, const char *from_sys, const char *from_host, const char *user_name, gboolean is_remote_node, bool reprobe_all_nodes) { GHashTableIter gIter; rsc_history_t *entry = NULL; crm_info("Clearing resource history on node %s", lrm_state->node_name); g_hash_table_iter_init(&gIter, lrm_state->resource_history); while (g_hash_table_iter_next(&gIter, NULL, (void **)&entry)) { /* only unregister the resource during a reprobe if it is not a remote connection * resource. otherwise unregistering the connection will terminate remote-node * membership */ bool unregister = true; if (is_remote_lrmd_ra(NULL, NULL, entry->id)) { unregister = false; if (reprobe_all_nodes) { lrm_state_t *remote_lrm_state = controld_get_executor_state(entry->id, false); if (remote_lrm_state != NULL) { /* If reprobing all nodes, be sure to reprobe the remote * node before clearing its connection resource */ force_reprobe(remote_lrm_state, from_sys, from_host, user_name, TRUE, reprobe_all_nodes); } } } /* Don't delete from the CIB, since we'll delete the whole node's LRM * state from the CIB soon */ delete_resource(lrm_state, entry->id, &entry->rsc, &gIter, from_sys, user_name, NULL, unregister, false); } /* Now delete the copy in the CIB */ controld_delete_node_state(lrm_state->node_name, controld_section_lrm, cib_none); } /*! * \internal * \brief Fail a requested action without actually executing it * * For an action that can't be executed, process it similarly to an actual * execution result, with specified error status (except for notify actions, * which will always be treated as successful). * * \param[in,out] lrm_state Executor connection that action is for * \param[in] action Action XML from request * \param[in] rc Desired return code to use * \param[in] op_status Desired operation status to use * \param[in] exit_reason Human-friendly detail, if error */ static void synthesize_lrmd_failure(lrm_state_t *lrm_state, const xmlNode *action, int op_status, enum ocf_exitcode rc, const char *exit_reason) { lrmd_event_data_t *op = NULL; const char *operation = crm_element_value(action, PCMK_XA_OPERATION); const char *target_node = crm_element_value(action, PCMK__META_ON_NODE); xmlNode *xml_rsc = pcmk__xe_first_child(action, PCMK_XE_PRIMITIVE, NULL, NULL); if ((xml_rsc == NULL) || (pcmk__xe_id(xml_rsc) == NULL)) { /* @TODO Should we do something else, like direct ack? */ crm_info("Can't fake %s failure (%d) on %s without resource configuration", crm_element_value(action, PCMK__XA_OPERATION_KEY), rc, target_node); return; } else if(operation == NULL) { /* This probably came from crm_resource -C, nothing to do */ crm_info("Can't fake %s failure (%d) on %s without operation", pcmk__xe_id(xml_rsc), rc, target_node); return; } op = construct_op(lrm_state, action, pcmk__xe_id(xml_rsc), operation); if (pcmk__str_eq(operation, PCMK_ACTION_NOTIFY, pcmk__str_casei)) { // Notifications can't fail fake_op_status(lrm_state, op, PCMK_EXEC_DONE, PCMK_OCF_OK, NULL); } else { fake_op_status(lrm_state, op, op_status, rc, exit_reason); } crm_info("Faking " PCMK__OP_FMT " result (%d) on %s", op->rsc_id, op->op_type, op->interval_ms, op->rc, target_node); // Process the result as if it came from the LRM process_lrm_event(lrm_state, op, NULL, action); lrmd_free_event(op); } /*! * \internal * \brief Get target of an LRM operation (replacing \p NULL with local node * name) * * \param[in] xml LRM operation data XML * * \return LRM operation target node name (local node or Pacemaker Remote node) */ static const char * lrm_op_target(const xmlNode *xml) { const char *target = NULL; if (xml) { target = crm_element_value(xml, PCMK__META_ON_NODE); } if (target == NULL) { target = controld_globals.cluster->priv->node_name; } return target; } static void fail_lrm_resource(xmlNode *xml, lrm_state_t *lrm_state, const char *user_name, const char *from_host, const char *from_sys) { lrmd_event_data_t *op = NULL; lrmd_rsc_info_t *rsc = NULL; xmlNode *xml_rsc = pcmk__xe_first_child(xml, PCMK_XE_PRIMITIVE, NULL, NULL); CRM_CHECK(xml_rsc != NULL, return); /* The executor simply executes operations and reports the results, without * any concept of success or failure, so to fail a resource, we must fake * what a failure looks like. * * To do this, we create a fake executor operation event for the resource, * and pass that event to the executor client callback so it will be * processed as if it came from the executor. */ op = construct_op(lrm_state, xml, pcmk__xe_id(xml_rsc), "asyncmon"); free((char*) op->user_data); op->user_data = NULL; op->interval_ms = 0; if (user_name && !pcmk__is_privileged(user_name)) { crm_err("%s does not have permission to fail %s", user_name, pcmk__xe_id(xml_rsc)); fake_op_status(lrm_state, op, PCMK_EXEC_ERROR, PCMK_OCF_INSUFFICIENT_PRIV, "Unprivileged user cannot fail resources"); controld_ack_event_directly(from_host, from_sys, NULL, op, pcmk__xe_id(xml_rsc)); lrmd_free_event(op); return; } if (get_lrm_resource(lrm_state, xml_rsc, TRUE, &rsc) == pcmk_ok) { crm_info("Failing resource %s...", rsc->id); fake_op_status(lrm_state, op, PCMK_EXEC_DONE, PCMK_OCF_UNKNOWN_ERROR, "Simulated failure"); process_lrm_event(lrm_state, op, NULL, xml); op->rc = PCMK_OCF_OK; // The request to fail the resource succeeded lrmd_free_rsc_info(rsc); } else { crm_info("Cannot find/create resource in order to fail it..."); crm_log_xml_warn(xml, "bad input"); fake_op_status(lrm_state, op, PCMK_EXEC_ERROR, PCMK_OCF_UNKNOWN_ERROR, "Cannot fail unknown resource"); } controld_ack_event_directly(from_host, from_sys, NULL, op, pcmk__xe_id(xml_rsc)); lrmd_free_event(op); } static void handle_reprobe_op(lrm_state_t *lrm_state, xmlNode *msg, const char *from_sys, const char *from_host, const char *user_name, gboolean is_remote_node, bool reprobe_all_nodes) { crm_notice("Forcing the status of all resources to be redetected"); force_reprobe(lrm_state, from_sys, from_host, user_name, is_remote_node, reprobe_all_nodes); if (!pcmk__strcase_any_of(from_sys, CRM_SYSTEM_PENGINE, CRM_SYSTEM_TENGINE, NULL)) { xmlNode *reply = pcmk__new_reply(msg, NULL); crm_debug("ACK'ing re-probe from %s (%s)", from_sys, from_host); if (relay_message(reply, TRUE) == FALSE) { crm_log_xml_err(reply, "Unable to route reply"); } pcmk__xml_free(reply); } } static bool do_lrm_cancel(ha_msg_input_t *input, lrm_state_t *lrm_state, lrmd_rsc_info_t *rsc, const char *from_host, const char *from_sys) { char *op_key = NULL; char *meta_key = NULL; int call = 0; const char *call_id = NULL; const char *op_task = NULL; guint interval_ms = 0; gboolean in_progress = FALSE; xmlNode *params = pcmk__xe_first_child(input->xml, PCMK__XE_ATTRIBUTES, NULL, NULL); CRM_CHECK(params != NULL, return FALSE); meta_key = crm_meta_name(PCMK_XA_OPERATION); op_task = crm_element_value(params, meta_key); free(meta_key); CRM_CHECK(op_task != NULL, return FALSE); meta_key = crm_meta_name(PCMK_META_INTERVAL); if (crm_element_value_ms(params, meta_key, &interval_ms) != pcmk_ok) { free(meta_key); return FALSE; } free(meta_key); op_key = pcmk__op_key(rsc->id, op_task, interval_ms); meta_key = crm_meta_name(PCMK__XA_CALL_ID); call_id = crm_element_value(params, meta_key); free(meta_key); crm_debug("Scheduler requested op %s (call=%s) be cancelled", op_key, (call_id? call_id : "NA")); pcmk__scan_min_int(call_id, &call, 0); if (call == 0) { // Normal case when the scheduler cancels a recurring op in_progress = cancel_op_key(lrm_state, rsc, op_key, TRUE); } else { // Normal case when the scheduler cancels an orphan op in_progress = cancel_op(lrm_state, rsc->id, NULL, call, TRUE); } // Acknowledge cancellation operation if for a remote connection resource if (!in_progress || is_remote_lrmd_ra(NULL, NULL, rsc->id)) { char *op_id = make_stop_id(rsc->id, call); if (is_remote_lrmd_ra(NULL, NULL, rsc->id) == FALSE) { crm_info("Nothing known about operation %d for %s", call, op_key); } controld_delete_action_history_by_key(rsc->id, lrm_state->node_name, op_key, call); send_task_ok_ack(lrm_state, input, rsc->id, rsc, op_task, from_host, from_sys); /* needed at least for cancellation of a remote operation */ if (lrm_state->active_ops != NULL) { g_hash_table_remove(lrm_state->active_ops, op_id); } free(op_id); } free(op_key); return TRUE; } static void do_lrm_delete(ha_msg_input_t *input, lrm_state_t *lrm_state, lrmd_rsc_info_t *rsc, const char *from_sys, const char *from_host, bool crm_rsc_delete, const char *user_name) { bool unregister = true; int cib_rc = controld_delete_resource_history(rsc->id, lrm_state->node_name, user_name, cib_dryrun|cib_sync_call); if (cib_rc != pcmk_rc_ok) { lrmd_event_data_t *op = NULL; op = construct_op(lrm_state, input->xml, rsc->id, PCMK_ACTION_DELETE); /* These are resource clean-ups, not actions, so no exit reason is * needed. */ lrmd__set_result(op, pcmk_rc2ocf(cib_rc), PCMK_EXEC_ERROR, NULL); controld_ack_event_directly(from_host, from_sys, NULL, op, rsc->id); lrmd_free_event(op); return; } if (crm_rsc_delete && is_remote_lrmd_ra(NULL, NULL, rsc->id)) { unregister = false; } delete_resource(lrm_state, rsc->id, rsc, NULL, from_sys, user_name, input, unregister, true); } // User data for asynchronous metadata execution struct metadata_cb_data { lrmd_rsc_info_t *rsc; // Copy of resource information xmlNode *input_xml; // Copy of FSA input XML }; static struct metadata_cb_data * new_metadata_cb_data(lrmd_rsc_info_t *rsc, xmlNode *input_xml) { struct metadata_cb_data *data = NULL; data = pcmk__assert_alloc(1, sizeof(struct metadata_cb_data)); data->input_xml = pcmk__xml_copy(NULL, input_xml); data->rsc = lrmd_copy_rsc_info(rsc); return data; } static void free_metadata_cb_data(struct metadata_cb_data *data) { lrmd_free_rsc_info(data->rsc); pcmk__xml_free(data->input_xml); free(data); } /*! * \internal * \brief Execute an action after metadata has been retrieved * * \param[in] pid Ignored * \param[in] result Result of metadata action * \param[in] user_data Metadata callback data */ static void metadata_complete(int pid, const pcmk__action_result_t *result, void *user_data) { struct metadata_cb_data *data = (struct metadata_cb_data *) user_data; struct ra_metadata_s *md = NULL; lrm_state_t *lrm_state = controld_get_executor_state(lrm_op_target(data->input_xml), false); if ((lrm_state != NULL) && pcmk__result_ok(result)) { md = controld_cache_metadata(lrm_state->metadata_cache, data->rsc, result->action_stdout); } if (!pcmk_is_set(controld_globals.fsa_input_register, R_HA_DISCONNECTED)) { do_lrm_rsc_op(lrm_state, data->rsc, data->input_xml, md); } free_metadata_cb_data(data); } /* A_LRM_INVOKE */ void do_lrm_invoke(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { lrm_state_t *lrm_state = NULL; const char *crm_op = NULL; const char *from_sys = NULL; const char *from_host = NULL; const char *operation = NULL; ha_msg_input_t *input = fsa_typed_data(fsa_dt_ha_msg); const char *user_name = NULL; const char *target_node = lrm_op_target(input->xml); gboolean is_remote_node = FALSE; bool crm_rsc_delete = FALSE; // Message routed to the local node is targeting a specific, non-local node is_remote_node = !controld_is_local_node(target_node); lrm_state = controld_get_executor_state(target_node, false); if ((lrm_state == NULL) && is_remote_node) { crm_err("Failing action because local node has never had connection to remote node %s", target_node); synthesize_lrmd_failure(NULL, input->xml, PCMK_EXEC_NOT_CONNECTED, PCMK_OCF_UNKNOWN_ERROR, "Local node has no connection to remote"); return; } pcmk__assert(lrm_state != NULL); user_name = pcmk__update_acl_user(input->msg, PCMK__XA_CRM_USER, NULL); crm_op = crm_element_value(input->msg, PCMK__XA_CRM_TASK); from_sys = crm_element_value(input->msg, PCMK__XA_CRM_SYS_FROM); if (!pcmk__str_eq(from_sys, CRM_SYSTEM_TENGINE, pcmk__str_none)) { from_host = crm_element_value(input->msg, PCMK__XA_SRC); } if (pcmk__str_eq(crm_op, PCMK_ACTION_LRM_DELETE, pcmk__str_none)) { if (!pcmk__str_eq(from_sys, CRM_SYSTEM_TENGINE, pcmk__str_none)) { crm_rsc_delete = TRUE; // from crm_resource } operation = PCMK_ACTION_DELETE; } else if (input->xml != NULL) { operation = crm_element_value(input->xml, PCMK_XA_OPERATION); } CRM_CHECK(!pcmk__str_empty(crm_op) || !pcmk__str_empty(operation), return); crm_trace("'%s' execution request from %s as %s user", pcmk__s(crm_op, operation), pcmk__s(from_sys, "unknown subsystem"), pcmk__s(user_name, "current")); if (pcmk__str_eq(crm_op, CRM_OP_LRM_FAIL, pcmk__str_none)) { fail_lrm_resource(input->xml, lrm_state, user_name, from_host, from_sys); } else if (pcmk__str_eq(crm_op, CRM_OP_REPROBE, pcmk__str_none) || pcmk__str_eq(operation, CRM_OP_REPROBE, pcmk__str_none)) { const char *raw_target = NULL; if (input->xml != NULL) { // For CRM_OP_REPROBE, a NULL target means we're targeting all nodes raw_target = crm_element_value(input->xml, PCMK__META_ON_NODE); } handle_reprobe_op(lrm_state, input->msg, from_sys, from_host, user_name, is_remote_node, (raw_target == NULL)); } else if (operation != NULL) { lrmd_rsc_info_t *rsc = NULL; xmlNode *xml_rsc = pcmk__xe_first_child(input->xml, PCMK_XE_PRIMITIVE, NULL, NULL); gboolean create_rsc = !pcmk__str_eq(operation, PCMK_ACTION_DELETE, pcmk__str_none); int rc; // We can't return anything meaningful without a resource ID CRM_CHECK((xml_rsc != NULL) && (pcmk__xe_id(xml_rsc) != NULL), return); rc = get_lrm_resource(lrm_state, xml_rsc, create_rsc, &rsc); if (rc == -ENOTCONN) { synthesize_lrmd_failure(lrm_state, input->xml, PCMK_EXEC_NOT_CONNECTED, PCMK_OCF_UNKNOWN_ERROR, "Not connected to remote executor"); return; } else if ((rc < 0) && !create_rsc) { /* Delete of malformed or nonexistent resource * (deleting something that does not exist is a success) */ crm_debug("Not registering resource '%s' for a %s event " QB_XS " get-rc=%d (%s) transition-key=%s", pcmk__xe_id(xml_rsc), operation, rc, pcmk_strerror(rc), pcmk__xe_id(input->xml)); delete_rsc_entry(lrm_state, input, pcmk__xe_id(xml_rsc), NULL, pcmk_ok, user_name, true); return; } else if (rc == -EINVAL) { // Resource operation on malformed resource crm_err("Invalid resource definition for %s", pcmk__xe_id(xml_rsc)); crm_log_xml_warn(input->msg, "invalid resource"); synthesize_lrmd_failure(lrm_state, input->xml, PCMK_EXEC_ERROR, PCMK_OCF_NOT_CONFIGURED, // fatal error "Invalid resource definition"); return; } else if (rc < 0) { // Error communicating with the executor crm_err("Could not register resource '%s' with executor: %s " QB_XS " rc=%d", pcmk__xe_id(xml_rsc), pcmk_strerror(rc), rc); crm_log_xml_warn(input->msg, "failed registration"); synthesize_lrmd_failure(lrm_state, input->xml, PCMK_EXEC_ERROR, PCMK_OCF_INVALID_PARAM, // hard error "Could not register resource with executor"); return; } if (pcmk__str_eq(operation, PCMK_ACTION_CANCEL, pcmk__str_none)) { if (!do_lrm_cancel(input, lrm_state, rsc, from_host, from_sys)) { crm_log_xml_warn(input->xml, "Bad command"); } } else if (pcmk__str_eq(operation, PCMK_ACTION_DELETE, pcmk__str_none)) { do_lrm_delete(input, lrm_state, rsc, from_sys, from_host, crm_rsc_delete, user_name); } else { struct ra_metadata_s *md = NULL; /* Getting metadata from cache is OK except for start actions -- * always refresh from the agent for those, in case the resource * agent was updated. * * @TODO Only refresh metadata for starts if the agent actually * changed (using something like inotify, or a hash or modification * time of the agent executable). */ if (strcmp(operation, PCMK_ACTION_START) != 0) { md = controld_get_rsc_metadata(lrm_state, rsc, controld_metadata_from_cache); } if ((md == NULL) && crm_op_needs_metadata(rsc->standard, operation)) { /* Most likely, we'll need the agent metadata to record the * pending operation and the operation result. Get it now rather * than wait until then, so the metadata action doesn't eat into * the real action's timeout. * * @TODO Metadata is retrieved via direct execution of the * agent, which has a couple of related issues: the executor * should execute agents, not the controller; and metadata for * Pacemaker Remote nodes should be collected on those nodes, * not locally. */ struct metadata_cb_data *data = NULL; data = new_metadata_cb_data(rsc, input->xml); crm_info("Retrieving metadata for %s (%s%s%s:%s) asynchronously", rsc->id, rsc->standard, ((rsc->provider == NULL)? "" : ":"), ((rsc->provider == NULL)? "" : rsc->provider), rsc->type); (void) lrmd__metadata_async(rsc, metadata_complete, (void *) data); } else { do_lrm_rsc_op(lrm_state, rsc, input->xml, md); } } lrmd_free_rsc_info(rsc); } else { crm_err("Invalid execution request: unknown command '%s' (bug?)", crm_op); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); } } static lrmd_event_data_t * construct_op(const lrm_state_t *lrm_state, const xmlNode *rsc_op, const char *rsc_id, const char *operation) { lrmd_event_data_t *op = NULL; const char *op_delay = NULL; const char *op_timeout = NULL; GHashTable *params = NULL; xmlNode *primitive = NULL; const char *class = NULL; const char *transition = NULL; pcmk__assert((rsc_id != NULL) && (operation != NULL)); op = lrmd_new_event(rsc_id, operation, 0); op->type = lrmd_event_exec_complete; op->timeout = 0; op->start_delay = 0; lrmd__set_result(op, PCMK_OCF_UNKNOWN, PCMK_EXEC_PENDING, NULL); if (rsc_op == NULL) { CRM_LOG_ASSERT(pcmk__str_eq(operation, PCMK_ACTION_STOP, pcmk__str_casei)); op->user_data = NULL; /* the stop_all_resources() case * by definition there is no DC (or they'd be shutting * us down). * So we should put our version here. */ op->params = pcmk__strkey_table(free, free); pcmk__insert_dup(op->params, PCMK_XA_CRM_FEATURE_SET, CRM_FEATURE_SET); crm_trace("Constructed %s op for %s", operation, rsc_id); return op; } params = xml2list(rsc_op); g_hash_table_remove(params, CRM_META "_" PCMK__META_OP_TARGET_RC); op_delay = crm_meta_value(params, PCMK_META_START_DELAY); pcmk__scan_min_int(op_delay, &op->start_delay, 0); op_timeout = crm_meta_value(params, PCMK_META_TIMEOUT); pcmk__scan_min_int(op_timeout, &op->timeout, 0); if (pcmk__guint_from_hash(params, CRM_META "_" PCMK_META_INTERVAL, 0, &(op->interval_ms)) != pcmk_rc_ok) { op->interval_ms = 0; } /* Use pcmk_monitor_timeout instead of meta timeout for stonith recurring monitor, if set */ primitive = pcmk__xe_first_child(rsc_op, PCMK_XE_PRIMITIVE, NULL, NULL); class = crm_element_value(primitive, PCMK_XA_CLASS); if (pcmk_is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_fence_params) && pcmk__str_eq(operation, PCMK_ACTION_MONITOR, pcmk__str_casei) && (op->interval_ms > 0)) { op_timeout = g_hash_table_lookup(params, "pcmk_monitor_timeout"); if (op_timeout != NULL) { long long timeout_ms = crm_get_msec(op_timeout); op->timeout = (int) QB_MIN(timeout_ms, INT_MAX); } } if (!pcmk__str_eq(operation, PCMK_ACTION_STOP, pcmk__str_casei)) { op->params = params; } else { rsc_history_t *entry = NULL; if (lrm_state) { entry = g_hash_table_lookup(lrm_state->resource_history, rsc_id); } /* If we do not have stop parameters cached, use * whatever we are given */ if (!entry || !entry->stop_params) { op->params = params; } else { /* Copy the cached parameter list so that we stop the resource * with the old attributes, not the new ones */ op->params = pcmk__strkey_table(free, free); g_hash_table_foreach(params, copy_meta_keys, op->params); g_hash_table_foreach(entry->stop_params, copy_instance_keys, op->params); g_hash_table_destroy(params); params = NULL; } } /* sanity */ if (op->timeout <= 0) { op->timeout = op->interval_ms; } if (op->start_delay < 0) { op->start_delay = 0; } transition = crm_element_value(rsc_op, PCMK__XA_TRANSITION_KEY); CRM_CHECK(transition != NULL, return op); op->user_data = pcmk__str_copy(transition); if (op->interval_ms != 0) { if (pcmk__strcase_any_of(operation, PCMK_ACTION_START, PCMK_ACTION_STOP, NULL)) { crm_err("Start and Stop actions cannot have an interval: %u", op->interval_ms); op->interval_ms = 0; } } crm_trace("Constructed %s op for %s: interval=%u", operation, rsc_id, op->interval_ms); return op; } /*! * \internal * \brief Send a (synthesized) event result * * Reply with a synthesized event result directly, as opposed to going through * the executor. * * \param[in] to_host Host to send result to * \param[in] to_sys IPC name to send result (NULL for transition engine) * \param[in] rsc Type information about resource the result is for * \param[in,out] op Event with result to send * \param[in] rsc_id ID of resource the result is for */ void controld_ack_event_directly(const char *to_host, const char *to_sys, const lrmd_rsc_info_t *rsc, lrmd_event_data_t *op, const char *rsc_id) { xmlNode *reply = NULL; xmlNode *update, *iter; pcmk__node_status_t *peer = NULL; CRM_CHECK(op != NULL, return); if (op->rsc_id == NULL) { // op->rsc_id is a (const char *) but lrmd_free_event() frees it pcmk__assert(rsc_id != NULL); op->rsc_id = pcmk__str_copy(rsc_id); } if (to_sys == NULL) { to_sys = CRM_SYSTEM_TENGINE; } peer = controld_get_local_node_status(); update = create_node_state_update(peer, node_update_none, NULL, __func__); iter = pcmk__xe_create(update, PCMK__XE_LRM); crm_xml_add(iter, PCMK_XA_ID, controld_globals.our_uuid); iter = pcmk__xe_create(iter, PCMK__XE_LRM_RESOURCES); iter = pcmk__xe_create(iter, PCMK__XE_LRM_RESOURCE); crm_xml_add(iter, PCMK_XA_ID, op->rsc_id); controld_add_resource_history_xml(iter, rsc, op, controld_globals.cluster->priv->node_name); /* We don't have the original message ID, so use "direct-ack" (we just need * something non-NULL for this to create a reply) * * @TODO It would be better to use the server, message ID, and task from the * original request when callers have it available */ reply = pcmk__new_message(pcmk_ipc_controld, "direct-ack", CRM_SYSTEM_LRMD, to_host, to_sys, CRM_OP_INVOKE_LRM, update); crm_log_xml_trace(update, "[direct ACK]"); crm_debug("ACK'ing resource op " PCMK__OP_FMT " from %s: %s", op->rsc_id, op->op_type, op->interval_ms, op->user_data, crm_element_value(reply, PCMK_XA_REFERENCE)); if (relay_message(reply, TRUE) == FALSE) { crm_log_xml_err(reply, "Unable to route reply"); } pcmk__xml_free(update); pcmk__xml_free(reply); } gboolean verify_stopped(enum crmd_fsa_state cur_state, int log_level) { gboolean res = TRUE; GList *lrm_state_list = lrm_state_get_list(); GList *state_entry; for (state_entry = lrm_state_list; state_entry != NULL; state_entry = state_entry->next) { lrm_state_t *lrm_state = state_entry->data; if (!lrm_state_verify_stopped(lrm_state, cur_state, log_level)) { /* keep iterating through all even when false is returned */ res = FALSE; } } controld_set_fsa_input_flags(R_SENT_RSC_STOP); g_list_free(lrm_state_list); lrm_state_list = NULL; return res; } struct stop_recurring_action_s { lrmd_rsc_info_t *rsc; lrm_state_t *lrm_state; }; static gboolean stop_recurring_action_by_rsc(gpointer key, gpointer value, gpointer user_data) { gboolean remove = FALSE; struct stop_recurring_action_s *event = user_data; active_op_t *op = value; if ((op->interval_ms != 0) && pcmk__str_eq(op->rsc_id, event->rsc->id, pcmk__str_none)) { crm_debug("Cancelling op %d for %s (%s)", op->call_id, op->rsc_id, (char*)key); remove = !cancel_op(event->lrm_state, event->rsc->id, key, op->call_id, FALSE); } return remove; } static gboolean stop_recurring_actions(gpointer key, gpointer value, gpointer user_data) { gboolean remove = FALSE; lrm_state_t *lrm_state = user_data; active_op_t *op = value; if (op->interval_ms != 0) { crm_info("Cancelling op %d for %s (%s)", op->call_id, op->rsc_id, (const char *) key); remove = !cancel_op(lrm_state, op->rsc_id, key, op->call_id, FALSE); } return remove; } /*! * \internal * \brief Check whether recurring actions should be cancelled before an action * * \param[in] rsc_id Resource that action is for * \param[in] action Action being performed * \param[in] interval_ms Operation interval of \p action (in milliseconds) * * \return true if recurring actions should be cancelled, otherwise false */ static bool should_cancel_recurring(const char *rsc_id, const char *action, guint interval_ms) { if (is_remote_lrmd_ra(NULL, NULL, rsc_id) && (interval_ms == 0) && (strcmp(action, PCMK_ACTION_MIGRATE_TO) == 0)) { /* Don't stop monitoring a migrating Pacemaker Remote connection * resource until the entire migration has completed. We must detect if * the connection is unexpectedly severed, even during a migration. */ return false; } // Cancel recurring actions before changing resource state return (interval_ms == 0) && !pcmk__str_any_of(action, PCMK_ACTION_MONITOR, PCMK_ACTION_NOTIFY, NULL); } /*! * \internal * \brief Check whether an action should not be performed at this time * * \param[in] operation Action to be performed * * \return Readable description of why action should not be performed, * or NULL if it should be performed */ static const char * should_nack_action(const char *action) { if (pcmk_is_set(controld_globals.fsa_input_register, R_SHUTDOWN) && pcmk__str_eq(action, PCMK_ACTION_START, pcmk__str_none)) { register_fsa_input(C_SHUTDOWN, I_SHUTDOWN, NULL); return "Not attempting start due to shutdown in progress"; } switch (controld_globals.fsa_state) { case S_NOT_DC: case S_POLICY_ENGINE: // Recalculating case S_TRANSITION_ENGINE: break; default: if (!pcmk__str_eq(action, PCMK_ACTION_STOP, pcmk__str_none)) { return "Controller cannot attempt actions at this time"; } break; } return NULL; } static void do_lrm_rsc_op(lrm_state_t *lrm_state, lrmd_rsc_info_t *rsc, xmlNode *msg, struct ra_metadata_s *md) { int rc; int call_id = 0; char *op_id = NULL; lrmd_event_data_t *op = NULL; fsa_data_t *msg_data = NULL; const char *transition = NULL; const char *operation = NULL; const char *nack_reason = NULL; CRM_CHECK((rsc != NULL) && (msg != NULL), return); operation = crm_element_value(msg, PCMK_XA_OPERATION); CRM_CHECK(!pcmk__str_empty(operation), return); transition = crm_element_value(msg, PCMK__XA_TRANSITION_KEY); if (pcmk__str_empty(transition)) { crm_log_xml_err(msg, "Missing transition number"); } if (lrm_state == NULL) { // This shouldn't be possible, but provide a failsafe just in case crm_err("Cannot execute %s of %s: No executor connection " QB_XS " transition_key=%s", operation, rsc->id, pcmk__s(transition, "")); synthesize_lrmd_failure(NULL, msg, PCMK_EXEC_INVALID, PCMK_OCF_UNKNOWN_ERROR, "No executor connection"); return; } if (pcmk__str_any_of(operation, PCMK_ACTION_RELOAD, PCMK_ACTION_RELOAD_AGENT, NULL)) { /* Pre-2.1.0 DCs will schedule reload actions only, and 2.1.0+ DCs * will schedule reload-agent actions only. In either case, we need * to map that to whatever the resource agent actually supports. * Default to the OCF 1.1 name. */ if ((md != NULL) && pcmk_is_set(md->ra_flags, ra_supports_legacy_reload)) { operation = PCMK_ACTION_RELOAD; } else { operation = PCMK_ACTION_RELOAD_AGENT; } } op = construct_op(lrm_state, msg, rsc->id, operation); CRM_CHECK(op != NULL, return); if (should_cancel_recurring(rsc->id, operation, op->interval_ms)) { guint removed = 0; struct stop_recurring_action_s data; data.rsc = rsc; data.lrm_state = lrm_state; removed = g_hash_table_foreach_remove(lrm_state->active_ops, stop_recurring_action_by_rsc, &data); if (removed) { crm_debug("Stopped %u recurring operation%s in preparation for " PCMK__OP_FMT, removed, pcmk__plural_s(removed), rsc->id, operation, op->interval_ms); } } /* now do the op */ crm_notice("Requesting local execution of %s operation for %s on %s " QB_XS " transition_key=%s op_key=" PCMK__OP_FMT, pcmk__readable_action(op->op_type, op->interval_ms), rsc->id, lrm_state->node_name, pcmk__s(transition, ""), rsc->id, operation, op->interval_ms); nack_reason = should_nack_action(operation); if (nack_reason != NULL) { crm_notice("Discarding attempt to perform action %s on %s in state %s " "(shutdown=%s)", operation, rsc->id, fsa_state2string(controld_globals.fsa_state), pcmk__flag_text(controld_globals.fsa_input_register, R_SHUTDOWN)); lrmd__set_result(op, PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_INVALID, nack_reason); controld_ack_event_directly(NULL, NULL, rsc, op, rsc->id); lrmd_free_event(op); free(op_id); return; } controld_record_pending_op(lrm_state->node_name, rsc, op); op_id = pcmk__op_key(rsc->id, op->op_type, op->interval_ms); if (op->interval_ms > 0) { /* cancel it so we can then restart it without conflict */ cancel_op_key(lrm_state, rsc, op_id, FALSE); } rc = controld_execute_resource_agent(lrm_state, rsc->id, op->op_type, op->user_data, op->interval_ms, op->timeout, op->start_delay, op->params, &call_id); if (rc == pcmk_rc_ok) { /* record all operations so we can wait * for them to complete during shutdown */ char *call_id_s = make_stop_id(rsc->id, call_id); active_op_t *pending = NULL; pending = pcmk__assert_alloc(1, sizeof(active_op_t)); crm_trace("Recording pending op: %d - %s %s", call_id, op_id, call_id_s); pending->call_id = call_id; pending->interval_ms = op->interval_ms; pending->op_type = pcmk__str_copy(operation); pending->op_key = pcmk__str_copy(op_id); pending->rsc_id = pcmk__str_copy(rsc->id); pending->start_time = time(NULL); pending->user_data = pcmk__str_copy(op->user_data); if (crm_element_value_epoch(msg, PCMK_OPT_SHUTDOWN_LOCK, &(pending->lock_time)) != pcmk_ok) { pending->lock_time = 0; } g_hash_table_replace(lrm_state->active_ops, call_id_s, pending); if ((op->interval_ms > 0) && (op->start_delay > START_DELAY_THRESHOLD)) { int target_rc = PCMK_OCF_OK; crm_info("Faking confirmation of %s: execution postponed for over 5 minutes", op_id); decode_transition_key(op->user_data, NULL, NULL, NULL, &target_rc); lrmd__set_result(op, target_rc, PCMK_EXEC_DONE, NULL); controld_ack_event_directly(NULL, NULL, rsc, op, rsc->id); } pending->params = op->params; op->params = NULL; } else if (lrm_state_is_local(lrm_state)) { crm_err("Could not initiate %s action for resource %s locally: %s " QB_XS " rc=%d", operation, rsc->id, pcmk_rc_str(rc), rc); fake_op_status(lrm_state, op, PCMK_EXEC_NOT_CONNECTED, PCMK_OCF_UNKNOWN_ERROR, pcmk_rc_str(rc)); process_lrm_event(lrm_state, op, NULL, NULL); register_fsa_error(C_FSA_INTERNAL, I_FAIL, NULL); } else { crm_err("Could not initiate %s action for resource %s remotely on %s: " "%s " QB_XS " rc=%d", operation, rsc->id, lrm_state->node_name, pcmk_rc_str(rc), rc); fake_op_status(lrm_state, op, PCMK_EXEC_NOT_CONNECTED, PCMK_OCF_UNKNOWN_ERROR, pcmk_rc_str(rc)); process_lrm_event(lrm_state, op, NULL, NULL); } free(op_id); lrmd_free_event(op); } void do_lrm_event(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input cur_input, fsa_data_t * msg_data) { CRM_CHECK(FALSE, return); } static char * unescape_newlines(const char *string) { char *pch = NULL; char *ret = NULL; static const char *escaped_newline = "\\n"; if (!string) { return NULL; } ret = pcmk__str_copy(string); pch = strstr(ret, escaped_newline); while (pch != NULL) { /* Replace newline escape pattern with actual newline (and a space so we * don't have to shuffle the rest of the buffer) */ pch[0] = '\n'; pch[1] = ' '; pch = strstr(pch, escaped_newline); } return ret; } static bool did_lrm_rsc_op_fail(lrm_state_t *lrm_state, const char * rsc_id, const char * op_type, guint interval_ms) { rsc_history_t *entry = NULL; CRM_CHECK(lrm_state != NULL, return FALSE); CRM_CHECK(rsc_id != NULL, return FALSE); CRM_CHECK(op_type != NULL, return FALSE); entry = g_hash_table_lookup(lrm_state->resource_history, rsc_id); if (entry == NULL || entry->failed == NULL) { return FALSE; } if (pcmk__str_eq(entry->failed->rsc_id, rsc_id, pcmk__str_none) && pcmk__str_eq(entry->failed->op_type, op_type, pcmk__str_casei) && entry->failed->interval_ms == interval_ms) { return TRUE; } return FALSE; } /*! * \internal * \brief Log the result of an executor action (actual or synthesized) * * \param[in] op Executor action to log result for * \param[in] op_key Operation key for action * \param[in] node_name Name of node action was performed on, if known * \param[in] confirmed Whether to log that graph action was confirmed */ static void log_executor_event(const lrmd_event_data_t *op, const char *op_key, const char *node_name, gboolean confirmed) { int log_level = LOG_ERR; GString *str = g_string_sized_new(100); // reasonable starting size pcmk__g_strcat(str, "Result of ", pcmk__readable_action(op->op_type, op->interval_ms), " operation for ", op->rsc_id, NULL); if (node_name != NULL) { pcmk__g_strcat(str, " on ", node_name, NULL); } switch (op->op_status) { case PCMK_EXEC_DONE: log_level = LOG_NOTICE; pcmk__g_strcat(str, ": ", crm_exit_str((crm_exit_t) op->rc), NULL); break; case PCMK_EXEC_TIMEOUT: pcmk__g_strcat(str, ": ", pcmk_exec_status_str(op->op_status), " after ", pcmk__readable_interval(op->timeout), NULL); break; case PCMK_EXEC_CANCELLED: log_level = LOG_INFO; pcmk__g_strcat(str, ": ", pcmk_exec_status_str(op->op_status), NULL); break; default: pcmk__g_strcat(str, ": ", pcmk_exec_status_str(op->op_status), NULL); break; } if ((op->exit_reason != NULL) && ((op->op_status != PCMK_EXEC_DONE) || (op->rc != PCMK_OCF_OK))) { pcmk__g_strcat(str, " (", op->exit_reason, ")", NULL); } g_string_append(str, " " QB_XS); g_string_append_printf(str, " graph action %sconfirmed; call=%d key=%s", (confirmed? "" : "un"), op->call_id, op_key); if (op->op_status == PCMK_EXEC_DONE) { g_string_append_printf(str, " rc=%d", op->rc); } do_crm_log(log_level, "%s", str->str); g_string_free(str, TRUE); /* The services library has already logged the output at info or debug * level, so just raise to notice if it looks like a failure. */ if ((op->output != NULL) && (op->rc != PCMK_OCF_OK)) { char *prefix = crm_strdup_printf(PCMK__OP_FMT "@%s output", op->rsc_id, op->op_type, op->interval_ms, node_name); crm_log_output(LOG_NOTICE, prefix, op->output); free(prefix); } } void process_lrm_event(lrm_state_t *lrm_state, lrmd_event_data_t *op, active_op_t *pending, const xmlNode *action_xml) { char *op_id = NULL; char *op_key = NULL; gboolean remove = FALSE; gboolean removed = FALSE; bool need_direct_ack = FALSE; lrmd_rsc_info_t *rsc = NULL; const char *node_name = NULL; CRM_CHECK(op != NULL, return); CRM_CHECK(op->rsc_id != NULL, return); // Remap new status codes for older DCs if (compare_version(controld_globals.dc_version, "3.2.0") < 0) { switch (op->op_status) { case PCMK_EXEC_NOT_CONNECTED: lrmd__set_result(op, PCMK_OCF_CONNECTION_DIED, PCMK_EXEC_ERROR, op->exit_reason); break; case PCMK_EXEC_INVALID: lrmd__set_result(op, CRM_DIRECT_NACK_RC, PCMK_EXEC_ERROR, op->exit_reason); break; default: break; } } op_id = make_stop_id(op->rsc_id, op->call_id); op_key = pcmk__op_key(op->rsc_id, op->op_type, op->interval_ms); // Get resource info if available (from executor state or action XML) if (lrm_state) { rsc = lrm_state_get_rsc_info(lrm_state, op->rsc_id, 0); } if ((rsc == NULL) && action_xml) { xmlNode *xml = pcmk__xe_first_child(action_xml, PCMK_XE_PRIMITIVE, NULL, NULL); const char *standard = crm_element_value(xml, PCMK_XA_CLASS); const char *provider = crm_element_value(xml, PCMK_XA_PROVIDER); const char *type = crm_element_value(xml, PCMK_XA_TYPE); if (standard && type) { crm_info("%s agent information not cached, using %s%s%s:%s from action XML", op->rsc_id, standard, (provider? ":" : ""), (provider? provider : ""), type); rsc = lrmd_new_rsc_info(op->rsc_id, standard, provider, type); } else { crm_err("Can't process %s result because %s agent information not cached or in XML", op_key, op->rsc_id); } } // Get node name if available (from executor state or action XML) if (lrm_state) { node_name = lrm_state->node_name; } else if (action_xml) { node_name = crm_element_value(action_xml, PCMK__META_ON_NODE); } if(pending == NULL) { remove = TRUE; if (lrm_state) { pending = g_hash_table_lookup(lrm_state->active_ops, op_id); } } if (op->op_status == PCMK_EXEC_ERROR) { switch(op->rc) { case PCMK_OCF_NOT_RUNNING: case PCMK_OCF_RUNNING_PROMOTED: case PCMK_OCF_DEGRADED: case PCMK_OCF_DEGRADED_PROMOTED: // Leave it to the TE/scheduler to decide if this is an error op->op_status = PCMK_EXEC_DONE; break; default: /* Nothing to do */ break; } } if (op->op_status != PCMK_EXEC_CANCELLED) { /* We might not record the result, so directly acknowledge it to the * originator instead, so it doesn't time out waiting for the result * (especially important if part of a transition). */ need_direct_ack = TRUE; if (controld_action_is_recordable(op->op_type)) { if (node_name && rsc) { // We should record the result, and happily, we can time_t lock_time = (pending == NULL)? 0 : pending->lock_time; controld_update_resource_history(node_name, rsc, op, lock_time); need_direct_ack = FALSE; } else if (op->rsc_deleted) { /* We shouldn't record the result (likely the resource was * refreshed, cleaned, or removed while this operation was * in flight). */ crm_notice("Not recording %s result in CIB because " "resource information was removed since it was initiated", op_key); } else { /* This shouldn't be possible; the executor didn't consider the * resource deleted, but we couldn't find resource or node * information. */ crm_err("Unable to record %s result in CIB: %s", op_key, (node_name? "No resource information" : "No node name")); } } } else if (op->interval_ms == 0) { /* A non-recurring operation was cancelled. Most likely, the * never-initiated action was removed from the executor's pending * operations list upon resource removal. */ need_direct_ack = TRUE; } else if (pending == NULL) { /* This recurring operation was cancelled, but was not pending. No * transition actions are waiting on it, nothing needs to be done. */ } else if (op->user_data == NULL) { /* This recurring operation was cancelled and pending, but we don't * have a transition key. This should never happen. */ crm_err("Recurring operation %s was cancelled without transition information", op_key); } else if (pcmk_is_set(pending->flags, active_op_remove)) { /* This recurring operation was cancelled (by us) and pending, and we * have been waiting for it to finish. */ if (lrm_state) { controld_delete_action_history(op); } /* Directly acknowledge failed recurring actions here. The above call to * controld_delete_action_history() will not erase any corresponding * last_failure entry, which means that the DC won't confirm the * cancellation via process_op_deletion(), and the transition would * otherwise wait for the action timer to pop. */ if (did_lrm_rsc_op_fail(lrm_state, pending->rsc_id, pending->op_type, pending->interval_ms)) { need_direct_ack = TRUE; } } else if (op->rsc_deleted) { /* This recurring operation was cancelled (but not by us, and the * executor does not have resource information, likely due to resource * cleanup, refresh, or removal) and pending. */ crm_debug("Recurring op %s was cancelled due to resource deletion", op_key); need_direct_ack = TRUE; } else { /* This recurring operation was cancelled (but not by us, likely by the * executor before stopping the resource) and pending. We don't need to * do anything special. */ } if (need_direct_ack) { controld_ack_event_directly(NULL, NULL, NULL, op, op->rsc_id); } if(remove == FALSE) { /* The caller will do this afterwards, but keep the logging consistent */ removed = TRUE; } else if (lrm_state && ((op->interval_ms == 0) || (op->op_status == PCMK_EXEC_CANCELLED))) { gboolean found = g_hash_table_remove(lrm_state->active_ops, op_id); if (op->interval_ms != 0) { removed = TRUE; } else if (found) { removed = TRUE; crm_trace("Op %s (call=%d, stop-id=%s, remaining=%u): Confirmed", op_key, op->call_id, op_id, g_hash_table_size(lrm_state->active_ops)); } } log_executor_event(op, op_key, node_name, removed); if (lrm_state) { if (!pcmk__str_eq(op->op_type, PCMK_ACTION_META_DATA, pcmk__str_casei)) { crmd_alert_resource_op(lrm_state->node_name, op); } else if (rsc && (op->rc == PCMK_OCF_OK)) { char *metadata = unescape_newlines(op->output); controld_cache_metadata(lrm_state->metadata_cache, rsc, metadata); free(metadata); } } if (op->rsc_deleted) { crm_info("Deletion of resource '%s' complete after %s", op->rsc_id, op_key); if (lrm_state) { delete_rsc_entry(lrm_state, NULL, op->rsc_id, NULL, pcmk_ok, NULL, true); } } /* If a shutdown was escalated while operations were pending, * then the FSA will be stalled right now... allow it to continue */ controld_trigger_fsa(); if (lrm_state && rsc) { update_history_cache(lrm_state, rsc, op); } lrmd_free_rsc_info(rsc); free(op_key); free(op_id); } diff --git a/daemons/controld/controld_execd_state.c b/daemons/controld/controld_execd_state.c index f8098630fe..f4578c2d13 100644 --- a/daemons/controld/controld_execd_state.c +++ b/daemons/controld/controld_execd_state.c @@ -1,827 +1,826 @@ /* * Copyright 2012-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include -#include #include #include #include #include static GHashTable *lrm_state_table = NULL; extern GHashTable *proxy_table; int lrmd_internal_proxy_send(lrmd_t * lrmd, xmlNode *msg); void lrmd_internal_set_proxy_callback(lrmd_t * lrmd, void *userdata, void (*callback)(lrmd_t *lrmd, void *userdata, xmlNode *msg)); static void free_rsc_info(gpointer value) { lrmd_rsc_info_t *rsc_info = value; lrmd_free_rsc_info(rsc_info); } static void free_deletion_op(gpointer value) { struct pending_deletion_op_s *op = value; free(op->rsc); delete_ha_msg_input(op->input); free(op); } static void free_recurring_op(gpointer value) { active_op_t *op = value; free(op->user_data); free(op->rsc_id); free(op->op_type); free(op->op_key); if (op->params) { g_hash_table_destroy(op->params); } free(op); } static gboolean fail_pending_op(gpointer key, gpointer value, gpointer user_data) { lrmd_event_data_t event = { 0, }; lrm_state_t *lrm_state = user_data; active_op_t *op = value; crm_trace("Pre-emptively failing " PCMK__OP_FMT " on %s (call=%s, %s)", op->rsc_id, op->op_type, op->interval_ms, lrm_state->node_name, (char*)key, op->user_data); event.type = lrmd_event_exec_complete; event.rsc_id = op->rsc_id; event.op_type = op->op_type; event.user_data = op->user_data; event.timeout = 0; event.interval_ms = op->interval_ms; lrmd__set_result(&event, PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_NOT_CONNECTED, "Action was pending when executor connection was dropped"); event.t_run = op->start_time; event.t_rcchange = op->start_time; event.call_id = op->call_id; event.remote_nodename = lrm_state->node_name; event.params = op->params; process_lrm_event(lrm_state, &event, op, NULL); lrmd__reset_result(&event); return TRUE; } gboolean lrm_state_is_local(lrm_state_t *lrm_state) { return (lrm_state != NULL) && controld_is_local_node(lrm_state->node_name); } /*! * \internal * \brief Create executor state entry for a node and add it to the state table * * \param[in] node_name Node to create entry for * * \return Newly allocated executor state object initialized for \p node_name */ static lrm_state_t * lrm_state_create(const char *node_name) { lrm_state_t *state = NULL; if (!node_name) { crm_err("No node name given for lrm state object"); return NULL; } state = pcmk__assert_alloc(1, sizeof(lrm_state_t)); state->node_name = pcmk__str_copy(node_name); state->rsc_info_cache = pcmk__strkey_table(NULL, free_rsc_info); state->deletion_ops = pcmk__strkey_table(free, free_deletion_op); state->active_ops = pcmk__strkey_table(free, free_recurring_op); state->resource_history = pcmk__strkey_table(NULL, history_free); state->metadata_cache = metadata_cache_new(); g_hash_table_insert(lrm_state_table, (char *)state->node_name, state); return state; } static gboolean remote_proxy_remove_by_node(gpointer key, gpointer value, gpointer user_data) { remote_proxy_t *proxy = value; const char *node_name = user_data; if (pcmk__str_eq(node_name, proxy->node_name, pcmk__str_casei)) { return TRUE; } return FALSE; } static remote_proxy_t * find_connected_proxy_by_node(const char * node_name) { GHashTableIter gIter; remote_proxy_t *proxy = NULL; CRM_CHECK(proxy_table != NULL, return NULL); g_hash_table_iter_init(&gIter, proxy_table); while (g_hash_table_iter_next(&gIter, NULL, (gpointer *) &proxy)) { if (proxy->source && pcmk__str_eq(node_name, proxy->node_name, pcmk__str_casei)) { return proxy; } } return NULL; } static void remote_proxy_disconnect_by_node(const char * node_name) { remote_proxy_t *proxy = NULL; CRM_CHECK(proxy_table != NULL, return); while ((proxy = find_connected_proxy_by_node(node_name)) != NULL) { /* mainloop_del_ipc_client() eventually calls remote_proxy_disconnected() * , which removes the entry from proxy_table. * Do not do this in a g_hash_table_iter_next() loop. */ if (proxy->source) { mainloop_del_ipc_client(proxy->source); } } return; } static void internal_lrm_state_destroy(gpointer data) { lrm_state_t *lrm_state = data; if (!lrm_state) { return; } /* Rather than directly remove the recorded proxy entries from proxy_table, * make sure any connected proxies get disconnected. So that * remote_proxy_disconnected() will be called and as well remove the * entries from proxy_table. */ remote_proxy_disconnect_by_node(lrm_state->node_name); crm_trace("Destroying proxy table %s with %u members", lrm_state->node_name, g_hash_table_size(proxy_table)); // Just in case there's still any leftovers in proxy_table g_hash_table_foreach_remove(proxy_table, remote_proxy_remove_by_node, (char *) lrm_state->node_name); remote_ra_cleanup(lrm_state); lrmd_api_delete(lrm_state->conn); if (lrm_state->rsc_info_cache) { crm_trace("Destroying rsc info cache with %u members", g_hash_table_size(lrm_state->rsc_info_cache)); g_hash_table_destroy(lrm_state->rsc_info_cache); } if (lrm_state->resource_history) { crm_trace("Destroying history op cache with %u members", g_hash_table_size(lrm_state->resource_history)); g_hash_table_destroy(lrm_state->resource_history); } if (lrm_state->deletion_ops) { crm_trace("Destroying deletion op cache with %u members", g_hash_table_size(lrm_state->deletion_ops)); g_hash_table_destroy(lrm_state->deletion_ops); } if (lrm_state->active_ops != NULL) { crm_trace("Destroying pending op cache with %u members", g_hash_table_size(lrm_state->active_ops)); g_hash_table_destroy(lrm_state->active_ops); } metadata_cache_free(lrm_state->metadata_cache); free((char *)lrm_state->node_name); free(lrm_state); } void lrm_state_reset_tables(lrm_state_t * lrm_state, gboolean reset_metadata) { if (lrm_state->resource_history) { crm_trace("Resetting resource history cache with %u members", g_hash_table_size(lrm_state->resource_history)); g_hash_table_remove_all(lrm_state->resource_history); } if (lrm_state->deletion_ops) { crm_trace("Resetting deletion operations cache with %u members", g_hash_table_size(lrm_state->deletion_ops)); g_hash_table_remove_all(lrm_state->deletion_ops); } if (lrm_state->active_ops != NULL) { crm_trace("Resetting active operations cache with %u members", g_hash_table_size(lrm_state->active_ops)); g_hash_table_remove_all(lrm_state->active_ops); } if (lrm_state->rsc_info_cache) { crm_trace("Resetting resource information cache with %u members", g_hash_table_size(lrm_state->rsc_info_cache)); g_hash_table_remove_all(lrm_state->rsc_info_cache); } if (reset_metadata) { metadata_cache_reset(lrm_state->metadata_cache); } } gboolean lrm_state_init_local(void) { if (lrm_state_table) { return TRUE; } lrm_state_table = pcmk__strikey_table(NULL, internal_lrm_state_destroy); if (!lrm_state_table) { return FALSE; } proxy_table = pcmk__strikey_table(NULL, remote_proxy_free); if (!proxy_table) { g_hash_table_destroy(lrm_state_table); lrm_state_table = NULL; return FALSE; } return TRUE; } void lrm_state_destroy_all(void) { if (lrm_state_table) { crm_trace("Destroying state table with %u members", g_hash_table_size(lrm_state_table)); g_hash_table_destroy(lrm_state_table); lrm_state_table = NULL; } if(proxy_table) { crm_trace("Destroying proxy table with %u members", g_hash_table_size(proxy_table)); g_hash_table_destroy(proxy_table); proxy_table = NULL; } } /*! * \internal * \brief Get executor state object * * \param[in] node_name Get executor state for this node (local node if NULL) * \param[in] create If true, create executor state if it doesn't exist * * \return Executor state object for \p node_name */ lrm_state_t * controld_get_executor_state(const char *node_name, bool create) { lrm_state_t *state = NULL; if ((node_name == NULL) && (controld_globals.cluster != NULL)) { node_name = controld_globals.cluster->priv->node_name; } if ((node_name == NULL) || (lrm_state_table == NULL)) { return NULL; } state = g_hash_table_lookup(lrm_state_table, node_name); if ((state == NULL) && create) { state = lrm_state_create(node_name); } return state; } GList * lrm_state_get_list(void) { if (lrm_state_table == NULL) { return NULL; } return g_hash_table_get_values(lrm_state_table); } void lrm_state_disconnect_only(lrm_state_t * lrm_state) { int removed = 0; if (!lrm_state->conn) { return; } crm_trace("Disconnecting %s", lrm_state->node_name); remote_proxy_disconnect_by_node(lrm_state->node_name); ((lrmd_t *) lrm_state->conn)->cmds->disconnect(lrm_state->conn); if (!pcmk_is_set(controld_globals.fsa_input_register, R_SHUTDOWN)) { removed = g_hash_table_foreach_remove(lrm_state->active_ops, fail_pending_op, lrm_state); crm_trace("Synthesized %d operation failures for %s", removed, lrm_state->node_name); } } void lrm_state_disconnect(lrm_state_t * lrm_state) { if (!lrm_state->conn) { return; } lrm_state_disconnect_only(lrm_state); lrmd_api_delete(lrm_state->conn); lrm_state->conn = NULL; } int lrm_state_is_connected(lrm_state_t * lrm_state) { if (!lrm_state->conn) { return FALSE; } return ((lrmd_t *) lrm_state->conn)->cmds->is_connected(lrm_state->conn); } int lrm_state_poke_connection(lrm_state_t * lrm_state) { if (!lrm_state->conn) { return -ENOTCONN; } return ((lrmd_t *) lrm_state->conn)->cmds->poke_connection(lrm_state->conn); } // \return Standard Pacemaker return code int controld_connect_local_executor(lrm_state_t *lrm_state) { int rc = pcmk_rc_ok; if (lrm_state->conn == NULL) { lrmd_t *api = NULL; rc = lrmd__new(&api, NULL, NULL, 0); if (rc != pcmk_rc_ok) { return rc; } api->cmds->set_callback(api, lrm_op_callback); lrm_state->conn = api; } rc = ((lrmd_t *) lrm_state->conn)->cmds->connect(lrm_state->conn, CRM_SYSTEM_CRMD, NULL); rc = pcmk_legacy2rc(rc); if (rc == pcmk_rc_ok) { lrm_state->num_lrm_register_fails = 0; } else { lrm_state->num_lrm_register_fails++; } return rc; } static remote_proxy_t * crmd_remote_proxy_new(lrmd_t *lrmd, const char *node_name, const char *session_id, const char *channel) { struct ipc_client_callbacks proxy_callbacks = { .dispatch = remote_proxy_dispatch, .destroy = remote_proxy_disconnected }; remote_proxy_t *proxy = remote_proxy_new(lrmd, &proxy_callbacks, node_name, session_id, channel); return proxy; } gboolean crmd_is_proxy_session(const char *session) { return g_hash_table_lookup(proxy_table, session) ? TRUE : FALSE; } void crmd_proxy_send(const char *session, xmlNode *msg) { remote_proxy_t *proxy = g_hash_table_lookup(proxy_table, session); lrm_state_t *lrm_state = NULL; if (!proxy) { return; } crm_log_xml_trace(msg, "to-proxy"); lrm_state = controld_get_executor_state(proxy->node_name, false); if (lrm_state) { crm_trace("Sending event to %.8s on %s", proxy->session_id, proxy->node_name); remote_proxy_relay_event(proxy, msg); } } static void crmd_proxy_dispatch(const char *session, xmlNode *msg) { crm_trace("Processing proxied IPC message from session %s", session); crm_log_xml_trace(msg, "controller[inbound]"); crm_xml_add(msg, PCMK__XA_CRM_SYS_FROM, session); if (controld_authorize_ipc_message(msg, NULL, session)) { route_message(C_IPC_MESSAGE, msg); } controld_trigger_fsa(); } static void remote_config_check(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) { if (rc != pcmk_ok) { crm_err("Query resulted in an error: %s", pcmk_strerror(rc)); if (rc == -EACCES || rc == -pcmk_err_schema_validation) { crm_err("The cluster is mis-configured - shutting down and staying down"); } } else { lrmd_t * lrmd = (lrmd_t *)user_data; crm_time_t *now = crm_time_new(NULL); GHashTable *config_hash = pcmk__strkey_table(free, free); pcmk_rule_input_t rule_input = { .now = now, }; crm_debug("Call %d : Parsing CIB options", call_id); pcmk_unpack_nvpair_blocks(output, PCMK_XE_CLUSTER_PROPERTY_SET, PCMK_VALUE_CIB_BOOTSTRAP_OPTIONS, &rule_input, config_hash, NULL); /* Now send it to the remote peer */ lrmd__validate_remote_settings(lrmd, config_hash); g_hash_table_destroy(config_hash); crm_time_free(now); } } static void crmd_remote_proxy_cb(lrmd_t *lrmd, void *userdata, xmlNode *msg) { lrm_state_t *lrm_state = userdata; const char *session = crm_element_value(msg, PCMK__XA_LRMD_IPC_SESSION); remote_proxy_t *proxy = g_hash_table_lookup(proxy_table, session); const char *op = crm_element_value(msg, PCMK__XA_LRMD_IPC_OP); if (pcmk__str_eq(op, LRMD_IPC_OP_NEW, pcmk__str_casei)) { const char *channel = crm_element_value(msg, PCMK__XA_LRMD_IPC_SERVER); proxy = crmd_remote_proxy_new(lrmd, lrm_state->node_name, session, channel); if (!remote_ra_controlling_guest(lrm_state)) { if (proxy != NULL) { cib_t *cib_conn = controld_globals.cib_conn; /* Look up PCMK_OPT_STONITH_WATCHDOG_TIMEOUT and send to the * remote peer for validation */ int rc = cib_conn->cmds->query(cib_conn, PCMK_XE_CRM_CONFIG, NULL, cib_none); cib_conn->cmds->register_callback_full(cib_conn, rc, 10, FALSE, lrmd, "remote_config_check", remote_config_check, NULL); } } else { crm_debug("Skipping remote_config_check for guest-nodes"); } } else if (pcmk__str_eq(op, LRMD_IPC_OP_SHUTDOWN_REQ, pcmk__str_casei)) { char *now_s = NULL; crm_notice("%s requested shutdown of its remote connection", lrm_state->node_name); if (!remote_ra_is_in_maintenance(lrm_state)) { now_s = pcmk__ttoa(time(NULL)); update_attrd(lrm_state->node_name, PCMK__NODE_ATTR_SHUTDOWN, now_s, NULL, TRUE); free(now_s); remote_proxy_ack_shutdown(lrmd); crm_warn("Reconnection attempts to %s may result in failures that must be cleared", lrm_state->node_name); } else { remote_proxy_nack_shutdown(lrmd); crm_notice("Remote resource for %s is not managed so no ordered shutdown happening", lrm_state->node_name); } return; } else if (pcmk__str_eq(op, LRMD_IPC_OP_REQUEST, pcmk__str_casei) && proxy && proxy->is_local) { /* This is for the controller, which we are, so don't try * to send to ourselves over IPC -- do it directly. */ uint32_t flags = 0U; int rc = pcmk_rc_ok; xmlNode *wrapper = pcmk__xe_first_child(msg, PCMK__XE_LRMD_IPC_MSG, NULL, NULL); xmlNode *request = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); CRM_CHECK(request != NULL, return); CRM_CHECK(lrm_state->node_name, return); crm_xml_add(request, PCMK_XE_ACL_ROLE, "pacemaker-remote"); pcmk__update_acl_user(request, PCMK__XA_LRMD_IPC_USER, lrm_state->node_name); /* Pacemaker Remote nodes don't know their own names (as known to the * cluster). When getting a node info request with no name or ID, add * the name, so we don't return info for ourselves instead of the * Pacemaker Remote node. */ if (pcmk__str_eq(crm_element_value(request, PCMK__XA_CRM_TASK), CRM_OP_NODE_INFO, pcmk__str_none)) { int node_id = 0; crm_element_value_int(request, PCMK_XA_ID, &node_id); if ((node_id <= 0) && (crm_element_value(request, PCMK_XA_UNAME) == NULL)) { crm_xml_add(request, PCMK_XA_UNAME, lrm_state->node_name); } } crmd_proxy_dispatch(session, request); rc = pcmk__xe_get_flags(msg, PCMK__XA_LRMD_IPC_MSG_FLAGS, &flags, 0U); if (rc != pcmk_rc_ok) { crm_warn("Couldn't parse controller flags from remote request: %s", pcmk_rc_str(rc)); } if (pcmk_is_set(flags, crm_ipc_client_response)) { int msg_id = 0; xmlNode *op_reply = pcmk__xe_create(NULL, PCMK__XE_ACK); crm_xml_add(op_reply, PCMK_XA_FUNCTION, __func__); crm_xml_add_int(op_reply, PCMK__XA_LINE, __LINE__); crm_element_value_int(msg, PCMK__XA_LRMD_IPC_MSG_ID, &msg_id); remote_proxy_relay_response(proxy, op_reply, msg_id); pcmk__xml_free(op_reply); } } else { remote_proxy_cb(lrmd, lrm_state->node_name, msg); } } // \return Standard Pacemaker return code int controld_connect_remote_executor(lrm_state_t *lrm_state, const char *server, int port, int timeout_ms) { int rc = pcmk_rc_ok; if (lrm_state->conn == NULL) { lrmd_t *api = NULL; rc = lrmd__new(&api, lrm_state->node_name, server, port); if (rc != pcmk_rc_ok) { crm_warn("Pacemaker Remote connection to %s:%s failed: %s " QB_XS " rc=%d", server, port, pcmk_rc_str(rc), rc); return rc; } lrm_state->conn = api; api->cmds->set_callback(api, remote_lrm_op_callback); lrmd_internal_set_proxy_callback(api, lrm_state, crmd_remote_proxy_cb); } crm_trace("Initiating remote connection to %s:%d with timeout %dms", server, port, timeout_ms); rc = ((lrmd_t *) lrm_state->conn)->cmds->connect_async(lrm_state->conn, lrm_state->node_name, timeout_ms); if (rc == pcmk_ok) { lrm_state->num_lrm_register_fails = 0; } else { lrm_state->num_lrm_register_fails++; // Ignored for remote connections } return pcmk_legacy2rc(rc); } int lrm_state_get_metadata(lrm_state_t * lrm_state, const char *class, const char *provider, const char *agent, char **output, enum lrmd_call_options options) { lrmd_key_value_t *params = NULL; if (!lrm_state->conn) { return -ENOTCONN; } /* Add the node name to the environment, as is done with normal resource * action calls. Meta-data calls shouldn't need it, but some agents are * written with an ocf_local_nodename call at the beginning regardless of * action. Without the environment variable, the agent would try to contact * the controller to get the node name -- but the controller would be * blocking on the synchronous meta-data call. * * At this point, we have to assume that agents are unlikely to make other * calls that require the controller, such as crm_node --quorum or * --cluster-id. * * @TODO Make meta-data calls asynchronous. (This will be part of a larger * project to make meta-data calls via the executor rather than directly.) */ params = lrmd_key_value_add(params, CRM_META "_" PCMK__META_ON_NODE, lrm_state->node_name); return ((lrmd_t *) lrm_state->conn)->cmds->get_metadata_params(lrm_state->conn, class, provider, agent, output, options, params); } int lrm_state_cancel(lrm_state_t *lrm_state, const char *rsc_id, const char *action, guint interval_ms) { if (!lrm_state->conn) { return -ENOTCONN; } /* Figure out a way to make this async? * NOTICE: Currently it's synced and directly acknowledged in do_lrm_invoke(). */ if (is_remote_lrmd_ra(NULL, NULL, rsc_id)) { return remote_ra_cancel(lrm_state, rsc_id, action, interval_ms); } return ((lrmd_t *) lrm_state->conn)->cmds->cancel(lrm_state->conn, rsc_id, action, interval_ms); } lrmd_rsc_info_t * lrm_state_get_rsc_info(lrm_state_t * lrm_state, const char *rsc_id, enum lrmd_call_options options) { lrmd_rsc_info_t *rsc = NULL; if (!lrm_state->conn) { return NULL; } if (is_remote_lrmd_ra(NULL, NULL, rsc_id)) { return remote_ra_get_rsc_info(lrm_state, rsc_id); } rsc = g_hash_table_lookup(lrm_state->rsc_info_cache, rsc_id); if (rsc == NULL) { /* only contact the lrmd if we don't already have a cached rsc info */ rsc = ((lrmd_t *) lrm_state->conn)->cmds->get_rsc_info(lrm_state->conn, rsc_id, options); if (rsc == NULL) { return NULL; } /* cache the result */ g_hash_table_insert(lrm_state->rsc_info_cache, rsc->id, rsc); } return lrmd_copy_rsc_info(rsc); } /*! * \internal * \brief Initiate a resource agent action * * \param[in,out] lrm_state Executor state object * \param[in] rsc_id ID of resource for action * \param[in] action Action to execute * \param[in] userdata String to copy and pass to execution callback * \param[in] interval_ms Action interval (in milliseconds) * \param[in] timeout_ms Action timeout (in milliseconds) * \param[in] start_delay_ms Delay (in ms) before initiating action * \param[in] parameters Hash table of resource parameters * \param[out] call_id Where to store call ID on success * * \return Standard Pacemaker return code */ int controld_execute_resource_agent(lrm_state_t *lrm_state, const char *rsc_id, const char *action, const char *userdata, guint interval_ms, int timeout_ms, int start_delay_ms, GHashTable *parameters, int *call_id) { int rc = pcmk_rc_ok; lrmd_key_value_t *params = NULL; if (lrm_state->conn == NULL) { return ENOTCONN; } // Convert parameters from hash table to list if (parameters != NULL) { const char *key = NULL; const char *value = NULL; GHashTableIter iter; g_hash_table_iter_init(&iter, parameters); while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) { params = lrmd_key_value_add(params, key, value); } } if (is_remote_lrmd_ra(NULL, NULL, rsc_id)) { rc = controld_execute_remote_agent(lrm_state, rsc_id, action, userdata, interval_ms, timeout_ms, start_delay_ms, params, call_id); } else { rc = ((lrmd_t *) lrm_state->conn)->cmds->exec(lrm_state->conn, rsc_id, action, userdata, interval_ms, timeout_ms, start_delay_ms, lrmd_opt_notify_changes_only, params); if (rc < 0) { rc = pcmk_legacy2rc(rc); } else { *call_id = rc; rc = pcmk_rc_ok; } } return rc; } int lrm_state_register_rsc(lrm_state_t * lrm_state, const char *rsc_id, const char *class, const char *provider, const char *agent, enum lrmd_call_options options) { lrmd_t *conn = (lrmd_t *) lrm_state->conn; if (conn == NULL) { return -ENOTCONN; } if (is_remote_lrmd_ra(agent, provider, NULL)) { return controld_get_executor_state(rsc_id, true)? pcmk_ok : -EINVAL; } /* @TODO Implement an asynchronous version of this (currently a blocking * call to the lrmd). */ return conn->cmds->register_rsc(lrm_state->conn, rsc_id, class, provider, agent, options); } int lrm_state_unregister_rsc(lrm_state_t * lrm_state, const char *rsc_id, enum lrmd_call_options options) { if (!lrm_state->conn) { return -ENOTCONN; } if (is_remote_lrmd_ra(NULL, NULL, rsc_id)) { g_hash_table_remove(lrm_state_table, rsc_id); return pcmk_ok; } g_hash_table_remove(lrm_state->rsc_info_cache, rsc_id); /* @TODO Optimize this ... this function is a blocking round trip from * client to daemon. The controld_execd_state.c code path that uses this * function should always treat it as an async operation. The executor API * should make an async version available. */ return ((lrmd_t *) lrm_state->conn)->cmds->unregister_rsc(lrm_state->conn, rsc_id, options); } diff --git a/include/crm/pengine/rules.h b/include/crm/pengine/rules.h index 430f10aebc..9ba4c358cc 100644 --- a/include/crm/pengine/rules.h +++ b/include/crm/pengine/rules.h @@ -1,23 +1,34 @@ /* * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #ifndef PCMK__CRM_PENGINE_RULES__H # define PCMK__CRM_PENGINE_RULES__H # include # include # include # include +/** + * \file + * \brief Deprecated Pacemaker rule API + * \ingroup pengine + * \deprecated Do not include this header directly. The rule APIs in this + * header, and the header itself, will be removed in a future + * release. + */ + #if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1) #include #include +#else +#error Do not include the deprecated header crm/pengine/rules.h #endif #endif diff --git a/include/crm/pengine/rules_internal.h b/include/crm/pengine/rules_internal.h index 79029c2e96..ff37e96440 100644 --- a/include/crm/pengine/rules_internal.h +++ b/include/crm/pengine/rules_internal.h @@ -1,30 +1,29 @@ /* * Copyright 2015-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #ifndef PCMK__CRM_PENGINE_RULES_INTERNAL__H #define PCMK__CRM_PENGINE_RULES_INTERNAL__H #include #include #include -#include #ifdef __cplusplus extern "C" { #endif GList *pe_unpack_alerts(const xmlNode *alerts); void pe_free_alert_list(GList *alert_list); #ifdef __cplusplus } #endif #endif // PCMK__CRM_PENGINE_RULES_INTERNAL__H diff --git a/lib/cib/cib_utils.c b/lib/cib/cib_utils.c index 860d75f143..9d5bed5a6e 100644 --- a/lib/cib/cib_utils.c +++ b/lib/cib/cib_utils.c @@ -1,936 +1,935 @@ /* * Original copyright 2004 International Business Machines * Later changes copyright 2008-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include -#include gboolean cib_version_details(xmlNode * cib, int *admin_epoch, int *epoch, int *updates) { *epoch = -1; *updates = -1; *admin_epoch = -1; if (cib == NULL) { return FALSE; } else { crm_element_value_int(cib, PCMK_XA_EPOCH, epoch); crm_element_value_int(cib, PCMK_XA_NUM_UPDATES, updates); crm_element_value_int(cib, PCMK_XA_ADMIN_EPOCH, admin_epoch); } return TRUE; } gboolean cib_diff_version_details(xmlNode * diff, int *admin_epoch, int *epoch, int *updates, int *_admin_epoch, int *_epoch, int *_updates) { int add[] = { 0, 0, 0 }; int del[] = { 0, 0, 0 }; xml_patch_versions(diff, add, del); *admin_epoch = add[0]; *epoch = add[1]; *updates = add[2]; *_admin_epoch = del[0]; *_epoch = del[1]; *_updates = del[2]; return TRUE; } /*! * \internal * \brief Get the XML patchset from a CIB diff notification * * \param[in] msg CIB diff notification * \param[out] patchset Where to store XML patchset * * \return Standard Pacemaker return code */ int cib__get_notify_patchset(const xmlNode *msg, const xmlNode **patchset) { int rc = pcmk_err_generic; xmlNode *wrapper = NULL; pcmk__assert(patchset != NULL); *patchset = NULL; if (msg == NULL) { crm_err("CIB diff notification received with no XML"); return ENOMSG; } if ((crm_element_value_int(msg, PCMK__XA_CIB_RC, &rc) != 0) || (rc != pcmk_ok)) { crm_warn("Ignore failed CIB update: %s " QB_XS " rc=%d", pcmk_strerror(rc), rc); crm_log_xml_debug(msg, "failed"); return pcmk_legacy2rc(rc); } wrapper = pcmk__xe_first_child(msg, PCMK__XE_CIB_UPDATE_RESULT, NULL, NULL); *patchset = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); if (*patchset == NULL) { crm_err("CIB diff notification received with no patchset"); return ENOMSG; } return pcmk_rc_ok; } /*! * \brief Create XML for a new (empty) CIB * * \param[in] cib_epoch What to use as \c PCMK_XA_EPOCH CIB attribute * * \return Newly created XML for empty CIB * * \note It is the caller's responsibility to free the result with * \c pcmk__xml_free(). */ xmlNode * createEmptyCib(int cib_epoch) { xmlNode *cib_root = NULL, *config = NULL; cib_root = pcmk__xe_create(NULL, PCMK_XE_CIB); crm_xml_add(cib_root, PCMK_XA_CRM_FEATURE_SET, CRM_FEATURE_SET); crm_xml_add(cib_root, PCMK_XA_VALIDATE_WITH, pcmk__highest_schema_name()); crm_xml_add_int(cib_root, PCMK_XA_EPOCH, cib_epoch); crm_xml_add_int(cib_root, PCMK_XA_NUM_UPDATES, 0); crm_xml_add_int(cib_root, PCMK_XA_ADMIN_EPOCH, 0); config = pcmk__xe_create(cib_root, PCMK_XE_CONFIGURATION); pcmk__xe_create(cib_root, PCMK_XE_STATUS); pcmk__xe_create(config, PCMK_XE_CRM_CONFIG); pcmk__xe_create(config, PCMK_XE_NODES); pcmk__xe_create(config, PCMK_XE_RESOURCES); pcmk__xe_create(config, PCMK_XE_CONSTRAINTS); #if PCMK__RESOURCE_STICKINESS_DEFAULT != 0 { xmlNode *rsc_defaults = pcmk__xe_create(config, PCMK_XE_RSC_DEFAULTS); xmlNode *meta = pcmk__xe_create(rsc_defaults, PCMK_XE_META_ATTRIBUTES); xmlNode *nvpair = pcmk__xe_create(meta, PCMK_XE_NVPAIR); crm_xml_add(meta, PCMK_XA_ID, "build-resource-defaults"); crm_xml_add(nvpair, PCMK_XA_ID, "build-" PCMK_META_RESOURCE_STICKINESS); crm_xml_add(nvpair, PCMK_XA_NAME, PCMK_META_RESOURCE_STICKINESS); crm_xml_add_int(nvpair, PCMK_XA_VALUE, PCMK__RESOURCE_STICKINESS_DEFAULT); } #endif return cib_root; } static bool cib_acl_enabled(xmlNode *xml, const char *user) { bool rc = FALSE; if(pcmk_acl_required(user)) { const char *value = NULL; GHashTable *options = pcmk__strkey_table(free, free); cib_read_config(options, xml); value = pcmk__cluster_option(options, PCMK_OPT_ENABLE_ACL); rc = crm_is_true(value); g_hash_table_destroy(options); } crm_trace("CIB ACL is %s", rc ? "enabled" : "disabled"); return rc; } /*! * \internal * \brief Determine whether to perform operations on a scratch copy of the CIB * * \param[in] op CIB operation * \param[in] section CIB section * \param[in] call_options CIB call options * * \return \p true if we should make a copy of the CIB, or \p false otherwise */ static bool should_copy_cib(const char *op, const char *section, int call_options) { if (pcmk_is_set(call_options, cib_dryrun)) { // cib_dryrun implies a scratch copy by definition; no side effects return true; } if (pcmk__str_eq(op, PCMK__CIB_REQUEST_COMMIT_TRANSACT, pcmk__str_none)) { /* Commit-transaction must make a copy for atomicity. We must revert to * the original CIB if the entire transaction cannot be applied * successfully. */ return true; } if (pcmk_is_set(call_options, cib_transaction)) { /* If cib_transaction is set, then we're in the process of committing a * transaction. The commit-transaction request already made a scratch * copy, and we're accumulating changes in that copy. */ return false; } if (pcmk__str_eq(section, PCMK_XE_STATUS, pcmk__str_none)) { /* Copying large CIBs accounts for a huge percentage of our CIB usage, * and this avoids some of it. * * @TODO: Is this safe? See discussion at * https://github.com/ClusterLabs/pacemaker/pull/3094#discussion_r1211400690. */ return false; } // Default behavior is to operate on a scratch copy return true; } int cib_perform_op(cib_t *cib, const char *op, uint32_t call_options, cib__op_fn_t fn, bool is_query, const char *section, xmlNode *req, xmlNode *input, bool manage_counters, bool *config_changed, xmlNode **current_cib, xmlNode **result_cib, xmlNode **diff, xmlNode **output) { int rc = pcmk_ok; bool check_schema = true; bool make_copy = true; xmlNode *top = NULL; xmlNode *scratch = NULL; xmlNode *patchset_cib = NULL; xmlNode *local_diff = NULL; const char *user = crm_element_value(req, PCMK__XA_CIB_USER); bool with_digest = false; crm_trace("Begin %s%s%s op", (pcmk_is_set(call_options, cib_dryrun)? "dry run of " : ""), (is_query? "read-only " : ""), op); CRM_CHECK(output != NULL, return -ENOMSG); CRM_CHECK(current_cib != NULL, return -ENOMSG); CRM_CHECK(result_cib != NULL, return -ENOMSG); CRM_CHECK(config_changed != NULL, return -ENOMSG); if(output) { *output = NULL; } *result_cib = NULL; *config_changed = false; if (fn == NULL) { return -EINVAL; } if (is_query) { xmlNode *cib_ro = *current_cib; xmlNode *cib_filtered = NULL; if (cib_acl_enabled(cib_ro, user) && xml_acl_filtered_copy(user, *current_cib, *current_cib, &cib_filtered)) { if (cib_filtered == NULL) { crm_debug("Pre-filtered the entire cib"); return -EACCES; } cib_ro = cib_filtered; crm_log_xml_trace(cib_ro, "filtered"); } rc = (*fn) (op, call_options, section, req, input, cib_ro, result_cib, output); if(output == NULL || *output == NULL) { /* nothing */ } else if(cib_filtered == *output) { cib_filtered = NULL; /* Let them have this copy */ } else if (*output == *current_cib) { /* They already know not to free it */ } else if(cib_filtered && (*output)->doc == cib_filtered->doc) { /* We're about to free the document of which *output is a part */ *output = pcmk__xml_copy(NULL, *output); } else if ((*output)->doc == (*current_cib)->doc) { /* Give them a copy they can free */ *output = pcmk__xml_copy(NULL, *output); } pcmk__xml_free(cib_filtered); return rc; } make_copy = should_copy_cib(op, section, call_options); if (!make_copy) { /* Conditional on v2 patch style */ scratch = *current_cib; // Make a copy of the top-level element to store version details top = pcmk__xe_create(NULL, (const char *) scratch->name); pcmk__xe_copy_attrs(top, scratch, pcmk__xaf_none); patchset_cib = top; xml_track_changes(scratch, user, NULL, cib_acl_enabled(scratch, user)); rc = (*fn) (op, call_options, section, req, input, scratch, &scratch, output); /* If scratch points to a new object now (for example, after an erase * operation), then *current_cib should point to the same object. */ *current_cib = scratch; } else { scratch = pcmk__xml_copy(NULL, *current_cib); patchset_cib = *current_cib; xml_track_changes(scratch, user, NULL, cib_acl_enabled(scratch, user)); rc = (*fn) (op, call_options, section, req, input, *current_cib, &scratch, output); if ((scratch != NULL) && !xml_tracking_changes(scratch)) { crm_trace("Inferring changes after %s op", op); xml_track_changes(scratch, user, *current_cib, cib_acl_enabled(*current_cib, user)); xml_calculate_changes(*current_cib, scratch); } CRM_CHECK(*current_cib != scratch, return -EINVAL); } xml_acl_disable(scratch); /* Allow the system to make any additional changes */ if (rc == pcmk_ok && scratch == NULL) { rc = -EINVAL; goto done; } else if(rc == pcmk_ok && xml_acl_denied(scratch)) { crm_trace("ACL rejected part or all of the proposed changes"); rc = -EACCES; goto done; } else if (rc != pcmk_ok) { goto done; } /* If the CIB is from a file, we don't need to check that the feature set is * supported. All we care about in that case is the schema version, which * is checked elsewhere. */ if (scratch && (cib == NULL || cib->variant != cib_file)) { const char *new_version = crm_element_value(scratch, PCMK_XA_CRM_FEATURE_SET); rc = pcmk__check_feature_set(new_version); if (rc != pcmk_rc_ok) { crm_err("Discarding update with feature set '%s' greater than " "our own '%s'", new_version, CRM_FEATURE_SET); rc = pcmk_rc2legacy(rc); goto done; } } if (patchset_cib != NULL) { int old = 0; int new = 0; crm_element_value_int(scratch, PCMK_XA_ADMIN_EPOCH, &new); crm_element_value_int(patchset_cib, PCMK_XA_ADMIN_EPOCH, &old); if (old > new) { crm_err("%s went backwards: %d -> %d (Opts: %#x)", PCMK_XA_ADMIN_EPOCH, old, new, call_options); crm_log_xml_warn(req, "Bad Op"); crm_log_xml_warn(input, "Bad Data"); rc = -pcmk_err_old_data; } else if (old == new) { crm_element_value_int(scratch, PCMK_XA_EPOCH, &new); crm_element_value_int(patchset_cib, PCMK_XA_EPOCH, &old); if (old > new) { crm_err("%s went backwards: %d -> %d (Opts: %#x)", PCMK_XA_EPOCH, old, new, call_options); crm_log_xml_warn(req, "Bad Op"); crm_log_xml_warn(input, "Bad Data"); rc = -pcmk_err_old_data; } } } crm_trace("Massaging CIB contents"); pcmk__strip_xml_text(scratch); if (make_copy) { static time_t expires = 0; time_t tm_now = time(NULL); if (expires < tm_now) { expires = tm_now + 60; /* Validate clients are correctly applying v2-style diffs at most once a minute */ with_digest = true; } } local_diff = xml_create_patchset(0, patchset_cib, scratch, config_changed, manage_counters); pcmk__log_xml_changes(LOG_TRACE, scratch); xml_accept_changes(scratch); if(local_diff) { patchset_process_digest(local_diff, patchset_cib, scratch, with_digest); pcmk__log_xml_patchset(LOG_INFO, local_diff); crm_log_xml_trace(local_diff, "raw patch"); } if (make_copy && (local_diff != NULL)) { // Original to compare against doesn't exist pcmk__if_tracing( { // Validate the calculated patch set int test_rc = pcmk_ok; int format = 1; xmlNode *cib_copy = pcmk__xml_copy(NULL, patchset_cib); crm_element_value_int(local_diff, PCMK_XA_FORMAT, &format); test_rc = xml_apply_patchset(cib_copy, local_diff, manage_counters); if (test_rc != pcmk_ok) { save_xml_to_file(cib_copy, "PatchApply:calculated", NULL); save_xml_to_file(patchset_cib, "PatchApply:input", NULL); save_xml_to_file(scratch, "PatchApply:actual", NULL); save_xml_to_file(local_diff, "PatchApply:diff", NULL); crm_err("v%d patchset error, patch failed to apply: %s " "(%d)", format, pcmk_rc_str(pcmk_legacy2rc(test_rc)), test_rc); } pcmk__xml_free(cib_copy); }, {} ); } if (pcmk__str_eq(section, PCMK_XE_STATUS, pcmk__str_casei)) { /* Throttle the amount of costly validation we perform due to status updates * a) we don't really care whats in the status section * b) we don't validate any of its contents at the moment anyway */ check_schema = false; } /* === scratch must not be modified after this point === * Exceptions, anything in: static filter_t filter[] = { { 0, PCMK_XA_CRM_DEBUG_ORIGIN }, { 0, PCMK_XA_CIB_LAST_WRITTEN }, { 0, PCMK_XA_UPDATE_ORIGIN }, { 0, PCMK_XA_UPDATE_CLIENT }, { 0, PCMK_XA_UPDATE_USER }, }; */ if (*config_changed && !pcmk_is_set(call_options, cib_no_mtime)) { const char *schema = crm_element_value(scratch, PCMK_XA_VALIDATE_WITH); if (schema == NULL) { rc = -pcmk_err_cib_corrupt; } pcmk__xe_add_last_written(scratch); pcmk__warn_if_schema_deprecated(schema); /* Make values of origin, client, and user in scratch match * the ones in req (if the schema allows the attributes) */ if (pcmk__cmp_schemas_by_name(schema, "pacemaker-1.2") >= 0) { const char *origin = crm_element_value(req, PCMK__XA_SRC); const char *client = crm_element_value(req, PCMK__XA_CIB_CLIENTNAME); if (origin != NULL) { crm_xml_add(scratch, PCMK_XA_UPDATE_ORIGIN, origin); } else { pcmk__xe_remove_attr(scratch, PCMK_XA_UPDATE_ORIGIN); } if (client != NULL) { crm_xml_add(scratch, PCMK_XA_UPDATE_CLIENT, user); } else { pcmk__xe_remove_attr(scratch, PCMK_XA_UPDATE_CLIENT); } if (user != NULL) { crm_xml_add(scratch, PCMK_XA_UPDATE_USER, user); } else { pcmk__xe_remove_attr(scratch, PCMK_XA_UPDATE_USER); } } } crm_trace("Perform validation: %s", pcmk__btoa(check_schema)); if ((rc == pcmk_ok) && check_schema && !pcmk__configured_schema_validates(scratch)) { rc = -pcmk_err_schema_validation; } done: *result_cib = scratch; /* @TODO: This may not work correctly with !make_copy, since we don't * keep the original CIB. */ if ((rc != pcmk_ok) && cib_acl_enabled(patchset_cib, user) && xml_acl_filtered_copy(user, patchset_cib, scratch, result_cib)) { if (*result_cib == NULL) { crm_debug("Pre-filtered the entire cib result"); } pcmk__xml_free(scratch); } if(diff) { *diff = local_diff; } else { pcmk__xml_free(local_diff); } pcmk__xml_free(top); crm_trace("Done"); return rc; } int cib__create_op(cib_t *cib, const char *op, const char *host, const char *section, xmlNode *data, int call_options, const char *user_name, const char *client_name, xmlNode **op_msg) { CRM_CHECK((cib != NULL) && (op_msg != NULL), return -EPROTO); *op_msg = pcmk__xe_create(NULL, PCMK__XE_CIB_COMMAND); cib->call_id++; if (cib->call_id < 1) { cib->call_id = 1; } crm_xml_add(*op_msg, PCMK__XA_T, PCMK__VALUE_CIB); crm_xml_add(*op_msg, PCMK__XA_CIB_OP, op); crm_xml_add(*op_msg, PCMK__XA_CIB_HOST, host); crm_xml_add(*op_msg, PCMK__XA_CIB_SECTION, section); crm_xml_add(*op_msg, PCMK__XA_CIB_USER, user_name); crm_xml_add(*op_msg, PCMK__XA_CIB_CLIENTNAME, client_name); crm_xml_add_int(*op_msg, PCMK__XA_CIB_CALLID, cib->call_id); crm_trace("Sending call options: %.8lx, %d", (long)call_options, call_options); crm_xml_add_int(*op_msg, PCMK__XA_CIB_CALLOPT, call_options); if (data != NULL) { xmlNode *wrapper = pcmk__xe_create(*op_msg, PCMK__XE_CIB_CALLDATA); pcmk__xml_copy(wrapper, data); } return pcmk_ok; } /*! * \internal * \brief Check whether a CIB request is supported in a transaction * * \param[in] request CIB request * * \return Standard Pacemaker return code */ static int validate_transaction_request(const xmlNode *request) { const char *op = crm_element_value(request, PCMK__XA_CIB_OP); const char *host = crm_element_value(request, PCMK__XA_CIB_HOST); const cib__operation_t *operation = NULL; int rc = cib__get_operation(op, &operation); if (rc != pcmk_rc_ok) { // cib__get_operation() logs error return rc; } if (!pcmk_is_set(operation->flags, cib__op_attr_transaction)) { crm_err("Operation %s is not supported in CIB transactions", op); return EOPNOTSUPP; } if (host != NULL) { crm_err("Operation targeting a specific node (%s) is not supported in " "a CIB transaction", host); return EOPNOTSUPP; } return pcmk_rc_ok; } /*! * \internal * \brief Append a CIB request to a CIB transaction * * \param[in,out] cib CIB client whose transaction to extend * \param[in,out] request Request to add to transaction * * \return Legacy Pacemaker return code */ int cib__extend_transaction(cib_t *cib, xmlNode *request) { int rc = pcmk_rc_ok; pcmk__assert((cib != NULL) && (request != NULL)); rc = validate_transaction_request(request); if ((rc == pcmk_rc_ok) && (cib->transaction == NULL)) { rc = pcmk_rc_no_transaction; } if (rc == pcmk_rc_ok) { pcmk__xml_copy(cib->transaction, request); } else { const char *op = crm_element_value(request, PCMK__XA_CIB_OP); const char *client_id = NULL; cib->cmds->client_id(cib, NULL, &client_id); crm_err("Failed to add '%s' operation to transaction for client %s: %s", op, pcmk__s(client_id, "(unidentified)"), pcmk_rc_str(rc)); crm_log_xml_info(request, "failed"); } return pcmk_rc2legacy(rc); } void cib_native_callback(cib_t * cib, xmlNode * msg, int call_id, int rc) { xmlNode *output = NULL; cib_callback_client_t *blob = NULL; if (msg != NULL) { xmlNode *wrapper = NULL; crm_element_value_int(msg, PCMK__XA_CIB_RC, &rc); crm_element_value_int(msg, PCMK__XA_CIB_CALLID, &call_id); wrapper = pcmk__xe_first_child(msg, PCMK__XE_CIB_CALLDATA, NULL, NULL); output = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); } blob = cib__lookup_id(call_id); if (blob == NULL) { crm_trace("No callback found for call %d", call_id); } if (cib == NULL) { crm_debug("No cib object supplied"); } if (rc == -pcmk_err_diff_resync) { /* This is an internal value that clients do not and should not care about */ rc = pcmk_ok; } if (blob && blob->callback && (rc == pcmk_ok || blob->only_success == FALSE)) { crm_trace("Invoking callback %s for call %d", pcmk__s(blob->id, "without ID"), call_id); blob->callback(msg, call_id, rc, output, blob->user_data); } else if ((cib != NULL) && (rc != pcmk_ok)) { crm_warn("CIB command failed: %s", pcmk_strerror(rc)); crm_log_xml_debug(msg, "Failed CIB Update"); } /* This may free user_data, so do it after the callback */ if (blob) { remove_cib_op_callback(call_id, FALSE); } crm_trace("OP callback activated for %d", call_id); } void cib_native_notify(gpointer data, gpointer user_data) { xmlNode *msg = user_data; cib_notify_client_t *entry = data; const char *event = NULL; if (msg == NULL) { crm_warn("Skipping callback - NULL message"); return; } event = crm_element_value(msg, PCMK__XA_SUBT); if (entry == NULL) { crm_warn("Skipping callback - NULL callback client"); return; } else if (entry->callback == NULL) { crm_warn("Skipping callback - NULL callback"); return; } else if (!pcmk__str_eq(entry->event, event, pcmk__str_casei)) { crm_trace("Skipping callback - event mismatch %p/%s vs. %s", entry, entry->event, event); return; } crm_trace("Invoking callback for %p/%s event...", entry, event); entry->callback(event, msg); crm_trace("Callback invoked..."); } gboolean cib_read_config(GHashTable * options, xmlNode * current_cib) { xmlNode *config = NULL; crm_time_t *now = NULL; if (options == NULL || current_cib == NULL) { return FALSE; } now = crm_time_new(NULL); g_hash_table_remove_all(options); config = pcmk_find_cib_element(current_cib, PCMK_XE_CRM_CONFIG); if (config) { pcmk_rule_input_t rule_input = { .now = now, }; pcmk_unpack_nvpair_blocks(config, PCMK_XE_CLUSTER_PROPERTY_SET, PCMK_VALUE_CIB_BOOTSTRAP_OPTIONS, &rule_input, options, NULL); } pcmk__validate_cluster_options(options); crm_time_free(now); return TRUE; } int cib_internal_op(cib_t * cib, const char *op, const char *host, const char *section, xmlNode * data, xmlNode ** output_data, int call_options, const char *user_name) { int (*delegate)(cib_t *cib, const char *op, const char *host, const char *section, xmlNode *data, xmlNode **output_data, int call_options, const char *user_name) = NULL; if (cib == NULL) { return -EINVAL; } delegate = cib->delegate_fn; if (delegate == NULL) { return -EPROTONOSUPPORT; } if (user_name == NULL) { user_name = getenv("CIB_user"); } return delegate(cib, op, host, section, data, output_data, call_options, user_name); } /*! * \brief Apply a CIB update patch to a given CIB * * \param[in] event CIB update patch * \param[in] input CIB to patch * \param[out] output Resulting CIB after patch * \param[in] level Log the patch at this log level (unless LOG_CRIT) * * \return Legacy Pacemaker return code * \note sbd calls this function */ int cib_apply_patch_event(xmlNode *event, xmlNode *input, xmlNode **output, int level) { int rc = pcmk_err_generic; xmlNode *wrapper = NULL; xmlNode *diff = NULL; pcmk__assert((event != NULL) && (input != NULL) && (output != NULL)); crm_element_value_int(event, PCMK__XA_CIB_RC, &rc); wrapper = pcmk__xe_first_child(event, PCMK__XE_CIB_UPDATE_RESULT, NULL, NULL); diff = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); if (rc < pcmk_ok || diff == NULL) { return rc; } if (level > LOG_CRIT) { pcmk__log_xml_patchset(level, diff); } if (input != NULL) { rc = cib_process_diff(NULL, cib_none, NULL, event, diff, input, output, NULL); if (rc != pcmk_ok) { crm_debug("Update didn't apply: %s (%d) %p", pcmk_strerror(rc), rc, *output); if (rc == -pcmk_err_old_data) { crm_trace("Masking error, we already have the supplied update"); return pcmk_ok; } pcmk__xml_free(*output); *output = NULL; return rc; } } return rc; } #define log_signon_query_err(out, fmt, args...) do { \ if (out != NULL) { \ out->err(out, fmt, ##args); \ } else { \ crm_err(fmt, ##args); \ } \ } while (0) int cib__signon_query(pcmk__output_t *out, cib_t **cib, xmlNode **cib_object) { int rc = pcmk_rc_ok; cib_t *cib_conn = NULL; pcmk__assert(cib_object != NULL); if (cib == NULL) { cib_conn = cib_new(); } else { if (*cib == NULL) { *cib = cib_new(); } cib_conn = *cib; } if (cib_conn == NULL) { return ENOMEM; } if (cib_conn->state == cib_disconnected) { rc = cib_conn->cmds->signon(cib_conn, crm_system_name, cib_command); rc = pcmk_legacy2rc(rc); } if (rc != pcmk_rc_ok) { log_signon_query_err(out, "Could not connect to the CIB: %s", pcmk_rc_str(rc)); goto done; } if (out != NULL) { out->transient(out, "Querying CIB..."); } rc = cib_conn->cmds->query(cib_conn, NULL, cib_object, cib_sync_call); rc = pcmk_legacy2rc(rc); if (rc != pcmk_rc_ok) { log_signon_query_err(out, "CIB query failed: %s", pcmk_rc_str(rc)); } done: if (cib == NULL) { cib__clean_up_connection(&cib_conn); } if ((rc == pcmk_rc_ok) && (*cib_object == NULL)) { return pcmk_rc_no_input; } return rc; } int cib__signon_attempts(cib_t *cib, enum cib_conn_type type, int attempts) { int rc = pcmk_rc_ok; crm_trace("Attempting connection to CIB manager (up to %d time%s)", attempts, pcmk__plural_s(attempts)); for (int remaining = attempts - 1; remaining >= 0; --remaining) { rc = cib->cmds->signon(cib, crm_system_name, type); if ((rc == pcmk_rc_ok) || (remaining == 0) || ((errno != EAGAIN) && (errno != EALREADY))) { break; } // Retry after soft error (interrupted by signal, etc.) pcmk__sleep_ms((attempts - remaining) * 500); crm_debug("Re-attempting connection to CIB manager (%d attempt%s remaining)", remaining, pcmk__plural_s(remaining)); } return rc; } int cib__clean_up_connection(cib_t **cib) { int rc; if (*cib == NULL) { return pcmk_rc_ok; } rc = (*cib)->cmds->signoff(*cib); cib_delete(*cib); *cib = NULL; return pcmk_legacy2rc(rc); } diff --git a/lib/pacemaker/pcmk_sched_constraints.c b/lib/pacemaker/pcmk_sched_constraints.c index 0212f8c1df..bbf1bd9ae4 100644 --- a/lib/pacemaker/pcmk_sched_constraints.c +++ b/lib/pacemaker/pcmk_sched_constraints.c @@ -1,443 +1,442 @@ /* * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include "libpacemaker_private.h" /*! * \internal * \brief Unpack constraints from XML * * Given scheduler data, unpack all constraints from its input XML into * data structures. * * \param[in,out] scheduler Scheduler data */ void pcmk__unpack_constraints(pcmk_scheduler_t *scheduler) { xmlNode *xml_constraints = pcmk_find_cib_element(scheduler->input, PCMK_XE_CONSTRAINTS); for (xmlNode *xml_obj = pcmk__xe_first_child(xml_constraints, NULL, NULL, NULL); xml_obj != NULL; xml_obj = pcmk__xe_next(xml_obj, NULL)) { const char *id = crm_element_value(xml_obj, PCMK_XA_ID); const char *tag = (const char *) xml_obj->name; if (id == NULL) { pcmk__config_err("Ignoring <%s> constraint without " PCMK_XA_ID, tag); continue; } crm_trace("Unpacking %s constraint '%s'", tag, id); if (pcmk__str_eq(PCMK_XE_RSC_ORDER, tag, pcmk__str_none)) { pcmk__unpack_ordering(xml_obj, scheduler); } else if (pcmk__str_eq(PCMK_XE_RSC_COLOCATION, tag, pcmk__str_none)) { pcmk__unpack_colocation(xml_obj, scheduler); } else if (pcmk__str_eq(PCMK_XE_RSC_LOCATION, tag, pcmk__str_none)) { pcmk__unpack_location(xml_obj, scheduler); } else if (pcmk__str_eq(PCMK_XE_RSC_TICKET, tag, pcmk__str_none)) { pcmk__unpack_rsc_ticket(xml_obj, scheduler); } else { pcmk__config_err("Unsupported constraint type: %s", tag); } } } pcmk_resource_t * pcmk__find_constraint_resource(GList *rsc_list, const char *id) { if (id == NULL) { return NULL; } for (GList *iter = rsc_list; iter != NULL; iter = iter->next) { pcmk_resource_t *parent = iter->data; pcmk_resource_t *match = NULL; match = parent->priv->fns->find_rsc(parent, id, NULL, pcmk_rsc_match_history); if (match != NULL) { if (!pcmk__str_eq(match->id, id, pcmk__str_none)) { /* We found an instance of a clone instead */ match = uber_parent(match); crm_debug("Found %s for %s", match->id, id); } return match; } } crm_trace("No match for %s", id); return NULL; } /*! * \internal * \brief Check whether an ID references a resource tag * * \param[in] scheduler Scheduler data * \param[in] id Tag ID to search for * \param[out] tag Where to store tag, if found * * \return true if ID refers to a tagged resource or resource set template, * otherwise false */ static bool find_constraint_tag(const pcmk_scheduler_t *scheduler, const char *id, pcmk__idref_t **tag) { *tag = NULL; // Check whether id refers to a resource set template if (g_hash_table_lookup_extended(scheduler->priv->templates, id, NULL, (gpointer *) tag)) { if (*tag == NULL) { crm_notice("No resource is derived from template '%s'", id); return false; } return true; } // If not, check whether id refers to a tag if (g_hash_table_lookup_extended(scheduler->priv->tags, id, NULL, (gpointer *) tag)) { if (*tag == NULL) { crm_notice("No resource is tagged with '%s'", id); return false; } return true; } pcmk__config_warn("No resource, template, or tag named '%s'", id); return false; } /*! * \internal * \brief Parse a role attribute from a constraint * * This is like pcmk_parse_role() except that started is treated as * pcmk_role_unknown (indicating any role), and the return value is * pcmk_rc_unpack_error for invalid specifications. * * \param[in] id ID of constraint being parsed (for logging only) * \param[in] role_spec Role specification * \param[in] role Where to store parsed role * * \return Standard Pacemaker return code */ int pcmk__parse_constraint_role(const char *id, const char *role_spec, enum rsc_role_e *role) { *role = pcmk_parse_role(role_spec); switch (*role) { case pcmk_role_unknown: if (role_spec != NULL) { pcmk__config_err("Ignoring constraint %s: Invalid role '%s'", id, role_spec); return pcmk_rc_unpack_error; } break; case pcmk_role_started: *role = pcmk_role_unknown; break; default: break; } return pcmk_rc_ok; } /*! * \brief * \internal Check whether an ID refers to a valid resource or tag * * \param[in] scheduler Scheduler data * \param[in] id ID to search for * \param[out] rsc Where to store resource, if found * (or NULL to skip searching resources) * \param[out] tag Where to store tag, if found * (or NULL to skip searching tags) * * \return true if id refers to a resource (possibly indirectly via a tag) */ bool pcmk__valid_resource_or_tag(const pcmk_scheduler_t *scheduler, const char *id, pcmk_resource_t **rsc, pcmk__idref_t **tag) { if (rsc != NULL) { *rsc = pcmk__find_constraint_resource(scheduler->priv->resources, id); if (*rsc != NULL) { return true; } } if ((tag != NULL) && find_constraint_tag(scheduler, id, tag)) { return true; } return false; } /*! * \internal * \brief Replace any resource tags with equivalent \C PCMK_XE_RESOURCE_REF * entries * * If a given constraint has resource sets, check each set for * \c PCMK_XE_RESOURCE_REF entries that list tags rather than resource IDs, and * replace any found with \c PCMK_XE_RESOURCE_REF entries for the corresponding * resource IDs. * * \param[in,out] xml_obj Constraint XML * \param[in] scheduler Scheduler data * * \return Equivalent XML with resource tags replaced (or NULL if none) * \note It is the caller's responsibility to free the return value with * \c pcmk__xml_free(). */ xmlNode * pcmk__expand_tags_in_sets(xmlNode *xml_obj, const pcmk_scheduler_t *scheduler) { xmlNode *new_xml = NULL; bool any_refs = false; // Short-circuit if there are no sets if (pcmk__xe_first_child(xml_obj, PCMK_XE_RESOURCE_SET, NULL, NULL) == NULL) { return NULL; } new_xml = pcmk__xml_copy(NULL, xml_obj); for (xmlNode *set = pcmk__xe_first_child(new_xml, PCMK_XE_RESOURCE_SET, NULL, NULL); set != NULL; set = pcmk__xe_next(set, PCMK_XE_RESOURCE_SET)) { GList *tag_refs = NULL; GList *iter = NULL; for (xmlNode *xml_rsc = pcmk__xe_first_child(set, PCMK_XE_RESOURCE_REF, NULL, NULL); xml_rsc != NULL; xml_rsc = pcmk__xe_next(xml_rsc, PCMK_XE_RESOURCE_REF)) { pcmk_resource_t *rsc = NULL; pcmk__idref_t *tag = NULL; if (!pcmk__valid_resource_or_tag(scheduler, pcmk__xe_id(xml_rsc), &rsc, &tag)) { pcmk__config_err("Ignoring resource sets for constraint '%s' " "because '%s' is not a valid resource or tag", pcmk__xe_id(xml_obj), pcmk__xe_id(xml_rsc)); pcmk__xml_free(new_xml); return NULL; } else if (rsc) { continue; } else if (tag) { /* PCMK_XE_RESOURCE_REF under PCMK_XE_RESOURCE_SET references * template or tag */ xmlNode *last_ref = xml_rsc; /* For example, given the original XML: * * * * * * * * If rsc2 and rsc3 are tagged with tag1, we add them after it: * * * * * * * * */ for (iter = tag->refs; iter != NULL; iter = iter->next) { const char *ref_id = iter->data; xmlNode *new_ref = pcmk__xe_create(set, PCMK_XE_RESOURCE_REF); crm_xml_add(new_ref, PCMK_XA_ID, ref_id); xmlAddNextSibling(last_ref, new_ref); last_ref = new_ref; } any_refs = true; /* Freeing the resource_ref now would break the XML child * iteration, so just remember it for freeing later. */ tag_refs = g_list_append(tag_refs, xml_rsc); } } /* Now free '', and finally get: */ for (iter = tag_refs; iter != NULL; iter = iter->next) { xmlNode *tag_ref = iter->data; pcmk__xml_free(tag_ref); } g_list_free(tag_refs); } if (!any_refs) { pcmk__xml_free(new_xml); new_xml = NULL; } return new_xml; } /*! * \internal * \brief Convert a tag into a resource set of tagged resources * * \param[in,out] xml_obj Constraint XML * \param[out] rsc_set Where to store resource set XML * \param[in] attr Name of XML attribute with resource or tag ID * \param[in] convert_rsc If true, convert to set even if \p attr * references a resource * \param[in] scheduler Scheduler data */ bool pcmk__tag_to_set(xmlNode *xml_obj, xmlNode **rsc_set, const char *attr, bool convert_rsc, const pcmk_scheduler_t *scheduler) { const char *cons_id = NULL; const char *id = NULL; pcmk_resource_t *rsc = NULL; pcmk__idref_t *tag = NULL; *rsc_set = NULL; CRM_CHECK((xml_obj != NULL) && (attr != NULL), return false); cons_id = pcmk__xe_id(xml_obj); if (cons_id == NULL) { pcmk__config_err("Ignoring <%s> constraint without " PCMK_XA_ID, xml_obj->name); return false; } id = crm_element_value(xml_obj, attr); if (id == NULL) { return true; } if (!pcmk__valid_resource_or_tag(scheduler, id, &rsc, &tag)) { pcmk__config_err("Ignoring constraint '%s' because '%s' is not a " "valid resource or tag", cons_id, id); return false; } else if (tag) { /* The "attr" attribute (for a resource in a constraint) specifies a * template or tag. Add the corresponding PCMK_XE_RESOURCE_SET * containing the resources derived from or tagged with it. */ *rsc_set = pcmk__xe_create(xml_obj, PCMK_XE_RESOURCE_SET); crm_xml_add(*rsc_set, PCMK_XA_ID, id); for (GList *iter = tag->refs; iter != NULL; iter = iter->next) { const char *obj_ref = iter->data; xmlNode *rsc_ref = NULL; rsc_ref = pcmk__xe_create(*rsc_set, PCMK_XE_RESOURCE_REF); crm_xml_add(rsc_ref, PCMK_XA_ID, obj_ref); } // Set PCMK_XA_SEQUENTIAL=PCMK_VALUE_FALSE for the PCMK_XE_RESOURCE_SET pcmk__xe_set_bool_attr(*rsc_set, PCMK_XA_SEQUENTIAL, false); } else if ((rsc != NULL) && convert_rsc) { /* Even if a regular resource is referenced by "attr", convert it into a * PCMK_XE_RESOURCE_SET, because the other resource reference in the * constraint could be a template or tag. */ xmlNode *rsc_ref = NULL; *rsc_set = pcmk__xe_create(xml_obj, PCMK_XE_RESOURCE_SET); crm_xml_add(*rsc_set, PCMK_XA_ID, id); rsc_ref = pcmk__xe_create(*rsc_set, PCMK_XE_RESOURCE_REF); crm_xml_add(rsc_ref, PCMK_XA_ID, id); } else { return true; } /* Remove the "attr" attribute referencing the template/tag */ if (*rsc_set != NULL) { pcmk__xe_remove_attr(xml_obj, attr); } return true; } /*! * \internal * \brief Create constraints inherent to resource types * * \param[in,out] scheduler Scheduler data */ void pcmk__create_internal_constraints(pcmk_scheduler_t *scheduler) { crm_trace("Create internal constraints"); for (GList *iter = scheduler->priv->resources; iter != NULL; iter = iter->next) { pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data; rsc->priv->cmds->internal_constraints(rsc); } } diff --git a/lib/pacemaker/pcmk_sched_location.c b/lib/pacemaker/pcmk_sched_location.c index 08e0a291cf..a3915b5435 100644 --- a/lib/pacemaker/pcmk_sched_location.c +++ b/lib/pacemaker/pcmk_sched_location.c @@ -1,782 +1,781 @@ /* * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include -#include #include #include "libpacemaker_private.h" /*! * \internal * \brief Parse a role configuration for a location constraint * * \param[in] role_spec Role specification * \param[out] role Where to store parsed role * * \return true if role specification is valid, otherwise false */ static bool parse_location_role(const char *role_spec, enum rsc_role_e *role) { if (role_spec == NULL) { *role = pcmk_role_unknown; return true; } *role = pcmk_parse_role(role_spec); switch (*role) { case pcmk_role_unknown: return false; case pcmk_role_started: case pcmk_role_unpromoted: /* Any promotable clone instance cannot be promoted without being in * the unpromoted role first. Therefore, any constraint for the * started or unpromoted role applies to every role. */ *role = pcmk_role_unknown; break; default: break; } return true; } /*! * \internal * \brief Get the score attribute name (if any) used for a rule * * \param[in] rule_xml Rule XML * \param[out] allocated If the score attribute name needs to be allocated, * this will be set to the non-const equivalent of the * return value (should be set to NULL when passed) * \param[in] rule_input Values used to evaluate rule criteria * * \return Score attribute name used for rule, or NULL if none * \note The caller is responsible for freeing \p *allocated if it is non-NULL. */ static const char * score_attribute_name(const xmlNode *rule_xml, char **allocated, const pcmk_rule_input_t *rule_input) { const char *name = NULL; name = crm_element_value(rule_xml, PCMK_XA_SCORE_ATTRIBUTE); if (name == NULL) { return NULL; } /* A score attribute name may use submatches extracted from a * resource ID regular expression. For example, if score-attribute is * "loc-\1", rsc-pattern is "ip-(.*)", and the resource ID is "ip-db", then * the score attribute name is "loc-db". */ if ((rule_input->rsc_id != NULL) && (rule_input->rsc_id_nmatches > 0)) { *allocated = pcmk__replace_submatches(name, rule_input->rsc_id, rule_input->rsc_id_submatches, rule_input->rsc_id_nmatches); if (*allocated != NULL) { name = *allocated; } } return name; } /*! * \internal * \brief Parse a score from a rule without a score attribute * * \param[in] rule_xml Rule XML * \param[out] score Where to store parsed score * * \return Standard Pacemaker return code */ static int score_from_rule(const xmlNode *rule_xml, int *score) { int rc = pcmk_rc_ok; const char *score_s = crm_element_value(rule_xml, PCMK_XA_SCORE); if (score_s == NULL) { // Not possible with schema validation enabled pcmk__config_err("Ignoring location constraint rule %s because " "neither " PCMK_XA_SCORE " nor " PCMK_XA_SCORE_ATTRIBUTE " was specified", pcmk__xe_id(rule_xml)); return pcmk_rc_unpack_error; } rc = pcmk_parse_score(score_s, score, 0); if (rc != pcmk_rc_ok) { // Not possible with schema validation enabled pcmk__config_err("Ignoring location constraint rule %s because " "'%s' is not a valid " PCMK_XA_SCORE ": %s", pcmk__xe_id(rule_xml), score_s, pcmk_rc_str(rc)); return pcmk_rc_unpack_error; } return pcmk_rc_ok; } /*! * \internal * \brief Get a rule score from a node attribute * * \param[in] constraint_id Location constraint ID (for logging only) * \param[in] attr_name Name of node attribute with score * \param[in] node Node to get attribute for * \param[in] rsc Resource being located * \param[out] score Where to store parsed score * * \return Standard Pacemaker return code (pcmk_rc_ok if a valid score was * parsed, ENXIO if the node attribute was unset, and some other value * if the node attribute value was invalid) */ static int score_from_attr(const char *constraint_id, const char *attr_name, const pcmk_node_t *node, const pcmk_resource_t *rsc, int *score) { int rc = pcmk_rc_ok; const char *target = NULL; const char *score_s = NULL; target = g_hash_table_lookup(rsc->priv->meta, PCMK_META_CONTAINER_ATTRIBUTE_TARGET); score_s = pcmk__node_attr(node, attr_name, target, pcmk__rsc_node_current); if (pcmk__str_empty(score_s)) { crm_info("Ignoring location %s for %s on %s " "because it has no node attribute %s", constraint_id, rsc->id, pcmk__node_name(node), attr_name); return ENXIO; } rc = pcmk_parse_score(score_s, score, 0); if (rc != pcmk_rc_ok) { crm_warn("Ignoring location %s for node %s because node " "attribute %s value '%s' is not a valid score: %s", constraint_id, pcmk__node_name(node), attr_name, score_s, pcmk_rc_str(rc)); return rc; } return pcmk_rc_ok; } /*! * \internal * \brief Generate a location constraint from a rule * * \param[in,out] rsc Resource that constraint is for * \param[in] rule_xml Rule XML (sub-element of location constraint) * \param[in] discovery Value of \c PCMK_XA_RESOURCE_DISCOVERY for * constraint * \param[out] next_change Where to set when rule evaluation will change * \param[in,out] rule_input Values used to evaluate rule criteria * (node-specific values will be overwritten by * this function) * \param[in] constraint_id ID of location constraint (for logging only) * * \return true if rule is valid, otherwise false */ static bool generate_location_rule(pcmk_resource_t *rsc, xmlNode *rule_xml, const char *discovery, crm_time_t *next_change, pcmk_rule_input_t *rule_input, const char *constraint_id) { const char *rule_id = NULL; const char *score_attr = NULL; const char *boolean = NULL; const char *role_spec = NULL; GList *iter = NULL; int score = 0; char *local_score_attr = NULL; pcmk__location_t *location_rule = NULL; enum rsc_role_e role = pcmk_role_unknown; enum pcmk__combine combine = pcmk__combine_unknown; rule_xml = pcmk__xe_resolve_idref(rule_xml, rsc->priv->scheduler->input); if (rule_xml == NULL) { return false; // Error already logged } rule_id = crm_element_value(rule_xml, PCMK_XA_ID); if (rule_id == NULL) { pcmk__config_err("Ignoring location constraint '%s' because its rule " "has no " PCMK_XA_ID, constraint_id); return false; } boolean = crm_element_value(rule_xml, PCMK_XA_BOOLEAN_OP); role_spec = crm_element_value(rule_xml, PCMK_XA_ROLE); if (parse_location_role(role_spec, &role)) { crm_trace("Setting rule %s role filter to %s", rule_id, role_spec); } else { pcmk__config_err("Ignoring location constraint '%s' because rule '%s' " "has invalid " PCMK_XA_ROLE " '%s'", constraint_id, rule_id, role_spec); return false; } combine = pcmk__parse_combine(boolean); switch (combine) { case pcmk__combine_and: case pcmk__combine_or: break; default: // Not possible with schema validation enabled pcmk__config_err("Ignoring location constraint '%s' because rule " "'%s' has invalid " PCMK_XA_BOOLEAN_OP " '%s'", constraint_id, rule_id, boolean); return false; } /* Users may configure the rule with either a score or the name of a * node attribute whose value should be used as the constraint score for * that node. */ score_attr = score_attribute_name(rule_xml, &local_score_attr, rule_input); if ((score_attr == NULL) && (score_from_rule(rule_xml, &score) != pcmk_rc_ok)) { return false; } location_rule = pcmk__new_location(rule_id, rsc, 0, discovery, NULL); CRM_CHECK(location_rule != NULL, return NULL); location_rule->role_filter = role; for (iter = rsc->priv->scheduler->nodes; iter != NULL; iter = iter->next) { pcmk_node_t *node = iter->data; pcmk_node_t *local = NULL; rule_input->node_attrs = node->priv->attrs; rule_input->rsc_params = pe_rsc_params(rsc, node, rsc->priv->scheduler); if (pcmk_evaluate_rule(rule_xml, rule_input, next_change) != pcmk_rc_ok) { continue; } if ((score_attr != NULL) && (score_from_attr(constraint_id, score_attr, node, rsc, &score) != pcmk_rc_ok)) { continue; // Message already logged } local = pe__copy_node(node); location_rule->nodes = g_list_prepend(location_rule->nodes, local); local->assign->score = score; pcmk__rsc_trace(rsc, "Location %s score for %s on %s is %s via rule %s", constraint_id, rsc->id, pcmk__node_name(node), pcmk_readable_score(score), rule_id); } free(local_score_attr); if (location_rule->nodes == NULL) { crm_trace("No matching nodes for location constraint rule %s", rule_id); } else { crm_trace("Location constraint rule %s matched %d nodes", rule_id, g_list_length(location_rule->nodes)); } return true; } static void unpack_rsc_location(xmlNode *xml_obj, pcmk_resource_t *rsc, const char *role_spec, const char *score, char *rsc_id_match, int rsc_id_nmatches, regmatch_t *rsc_id_submatches) { const char *rsc_id = crm_element_value(xml_obj, PCMK_XA_RSC); const char *id = crm_element_value(xml_obj, PCMK_XA_ID); const char *node = crm_element_value(xml_obj, PCMK_XA_NODE); const char *discovery = crm_element_value(xml_obj, PCMK_XA_RESOURCE_DISCOVERY); if (rsc == NULL) { pcmk__config_warn("Ignoring constraint '%s' because resource '%s' " "does not exist", id, rsc_id); return; } if (score == NULL) { score = crm_element_value(xml_obj, PCMK_XA_SCORE); } if ((node != NULL) && (score != NULL)) { int score_i = 0; int rc = pcmk_rc_ok; pcmk_node_t *match = pcmk_find_node(rsc->priv->scheduler, node); enum rsc_role_e role = pcmk_role_unknown; pcmk__location_t *location = NULL; if (match == NULL) { crm_info("Ignoring location constraint %s " "because '%s' is not a known node", pcmk__s(id, "without ID"), node); return; } rc = pcmk_parse_score(score, &score_i, 0); if (rc != pcmk_rc_ok) { // Not possible with schema validation enabled pcmk__config_err("Ignoring location constraint %s " "because '%s' is not a valid score", id, score); return; } if (role_spec == NULL) { role_spec = crm_element_value(xml_obj, PCMK_XA_ROLE); } if (parse_location_role(role_spec, &role)) { crm_trace("Setting location constraint %s role filter: %s", id, role_spec); } else { // Not possible with schema validation enabled pcmk__config_err("Ignoring location constraint %s " "because '%s' is not a valid " PCMK_XA_ROLE, id, role_spec); return; } location = pcmk__new_location(id, rsc, score_i, discovery, match); if (location == NULL) { return; // Error already logged } location->role_filter = role; } else { crm_time_t *next_change = crm_time_new_undefined(); xmlNode *rule_xml = pcmk__xe_first_child(xml_obj, PCMK_XE_RULE, NULL, NULL); pcmk_rule_input_t rule_input = { .now = rsc->priv->scheduler->priv->now, .rsc_meta = rsc->priv->meta, .rsc_id = rsc_id_match, .rsc_id_submatches = rsc_id_submatches, .rsc_id_nmatches = rsc_id_nmatches, }; generate_location_rule(rsc, rule_xml, discovery, next_change, &rule_input, id); /* If there is a point in the future when the evaluation of a rule will * change, make sure the scheduler is re-run by that time. */ if (crm_time_is_defined(next_change)) { time_t t = (time_t) crm_time_get_seconds_since_epoch(next_change); pe__update_recheck_time(t, rsc->priv->scheduler, "location rule evaluation"); } crm_time_free(next_change); } } static void unpack_simple_location(xmlNode *xml_obj, pcmk_scheduler_t *scheduler) { const char *id = crm_element_value(xml_obj, PCMK_XA_ID); const char *value = crm_element_value(xml_obj, PCMK_XA_RSC); if (value) { pcmk_resource_t *rsc; rsc = pcmk__find_constraint_resource(scheduler->priv->resources, value); unpack_rsc_location(xml_obj, rsc, NULL, NULL, NULL, 0, NULL); } value = crm_element_value(xml_obj, PCMK_XA_RSC_PATTERN); if (value) { regex_t regex; bool invert = false; if (value[0] == '!') { value++; invert = true; } if (regcomp(®ex, value, REG_EXTENDED) != 0) { pcmk__config_err("Ignoring constraint '%s' because " PCMK_XA_RSC_PATTERN " has invalid value '%s'", id, value); return; } for (GList *iter = scheduler->priv->resources; iter != NULL; iter = iter->next) { pcmk_resource_t *r = iter->data; int nregs = 0; regmatch_t *pmatch = NULL; int status; if (regex.re_nsub > 0) { nregs = regex.re_nsub + 1; } else { nregs = 1; } pmatch = pcmk__assert_alloc(nregs, sizeof(regmatch_t)); status = regexec(®ex, r->id, nregs, pmatch, 0); if (!invert && (status == 0)) { crm_debug("'%s' matched '%s' for %s", r->id, value, id); unpack_rsc_location(xml_obj, r, NULL, NULL, r->id, nregs, pmatch); } else if (invert && (status != 0)) { crm_debug("'%s' is an inverted match of '%s' for %s", r->id, value, id); unpack_rsc_location(xml_obj, r, NULL, NULL, NULL, 0, NULL); } else { crm_trace("'%s' does not match '%s' for %s", r->id, value, id); } free(pmatch); } regfree(®ex); } } // \return Standard Pacemaker return code static int unpack_location_tags(xmlNode *xml_obj, xmlNode **expanded_xml, pcmk_scheduler_t *scheduler) { const char *id = NULL; const char *rsc_id = NULL; const char *state = NULL; pcmk_resource_t *rsc = NULL; pcmk__idref_t *tag = NULL; xmlNode *rsc_set = NULL; *expanded_xml = NULL; CRM_CHECK(xml_obj != NULL, return EINVAL); id = pcmk__xe_id(xml_obj); if (id == NULL) { pcmk__config_err("Ignoring <%s> constraint without " PCMK_XA_ID, xml_obj->name); return pcmk_rc_unpack_error; } // Check whether there are any resource sets with template or tag references *expanded_xml = pcmk__expand_tags_in_sets(xml_obj, scheduler); if (*expanded_xml != NULL) { crm_log_xml_trace(*expanded_xml, "Expanded " PCMK_XE_RSC_LOCATION); return pcmk_rc_ok; } rsc_id = crm_element_value(xml_obj, PCMK_XA_RSC); if (rsc_id == NULL) { return pcmk_rc_ok; } if (!pcmk__valid_resource_or_tag(scheduler, rsc_id, &rsc, &tag)) { pcmk__config_err("Ignoring constraint '%s' because '%s' is not a " "valid resource or tag", id, rsc_id); return pcmk_rc_unpack_error; } else if (rsc != NULL) { // No template is referenced return pcmk_rc_ok; } state = crm_element_value(xml_obj, PCMK_XA_ROLE); *expanded_xml = pcmk__xml_copy(NULL, xml_obj); /* Convert any template or tag reference into constraint * PCMK_XE_RESOURCE_SET */ if (!pcmk__tag_to_set(*expanded_xml, &rsc_set, PCMK_XA_RSC, false, scheduler)) { pcmk__xml_free(*expanded_xml); *expanded_xml = NULL; return pcmk_rc_unpack_error; } if (rsc_set != NULL) { if (state != NULL) { /* Move PCMK_XA_RSC_ROLE into converted PCMK_XE_RESOURCE_SET as * PCMK_XA_ROLE attribute */ crm_xml_add(rsc_set, PCMK_XA_ROLE, state); pcmk__xe_remove_attr(*expanded_xml, PCMK_XA_ROLE); } crm_log_xml_trace(*expanded_xml, "Expanded " PCMK_XE_RSC_LOCATION); } else { // No sets pcmk__xml_free(*expanded_xml); *expanded_xml = NULL; } return pcmk_rc_ok; } // \return Standard Pacemaker return code static int unpack_location_set(xmlNode *location, xmlNode *set, pcmk_scheduler_t *scheduler) { xmlNode *xml_rsc = NULL; pcmk_resource_t *resource = NULL; const char *set_id; const char *role; const char *local_score; CRM_CHECK(set != NULL, return EINVAL); set_id = pcmk__xe_id(set); if (set_id == NULL) { pcmk__config_err("Ignoring " PCMK_XE_RESOURCE_SET " without " PCMK_XA_ID " in constraint '%s'", pcmk__s(pcmk__xe_id(location), "(missing ID)")); return pcmk_rc_unpack_error; } role = crm_element_value(set, PCMK_XA_ROLE); local_score = crm_element_value(set, PCMK_XA_SCORE); for (xml_rsc = pcmk__xe_first_child(set, PCMK_XE_RESOURCE_REF, NULL, NULL); xml_rsc != NULL; xml_rsc = pcmk__xe_next(xml_rsc, PCMK_XE_RESOURCE_REF)) { resource = pcmk__find_constraint_resource(scheduler->priv->resources, pcmk__xe_id(xml_rsc)); if (resource == NULL) { pcmk__config_err("%s: No resource found for %s", set_id, pcmk__xe_id(xml_rsc)); return pcmk_rc_unpack_error; } unpack_rsc_location(location, resource, role, local_score, NULL, 0, NULL); } return pcmk_rc_ok; } void pcmk__unpack_location(xmlNode *xml_obj, pcmk_scheduler_t *scheduler) { xmlNode *set = NULL; bool any_sets = false; xmlNode *orig_xml = NULL; xmlNode *expanded_xml = NULL; if (unpack_location_tags(xml_obj, &expanded_xml, scheduler) != pcmk_rc_ok) { return; } if (expanded_xml) { orig_xml = xml_obj; xml_obj = expanded_xml; } for (set = pcmk__xe_first_child(xml_obj, PCMK_XE_RESOURCE_SET, NULL, NULL); set != NULL; set = pcmk__xe_next(set, PCMK_XE_RESOURCE_SET)) { any_sets = true; set = pcmk__xe_resolve_idref(set, scheduler->input); if ((set == NULL) // Configuration error, message already logged || (unpack_location_set(xml_obj, set, scheduler) != pcmk_rc_ok)) { if (expanded_xml) { pcmk__xml_free(expanded_xml); } return; } } if (expanded_xml) { pcmk__xml_free(expanded_xml); xml_obj = orig_xml; } if (!any_sets) { unpack_simple_location(xml_obj, scheduler); } } /*! * \internal * \brief Add a new location constraint to scheduler data * * \param[in] id XML ID of location constraint * \param[in,out] rsc Resource in location constraint * \param[in] node_score Constraint score * \param[in] probe_mode When resource should be probed on node * \param[in] node Node in constraint (or NULL if rule-based) * * \return Newly allocated location constraint on success, otherwise NULL * \note The result will be added to the cluster (via \p rsc) and should not be * freed separately. */ pcmk__location_t * pcmk__new_location(const char *id, pcmk_resource_t *rsc, int node_score, const char *probe_mode, pcmk_node_t *node) { pcmk__location_t *new_con = NULL; CRM_CHECK((node != NULL) || (node_score == 0), return NULL); if (id == NULL) { pcmk__config_err("Invalid constraint: no ID specified"); return NULL; } if (rsc == NULL) { pcmk__config_err("Invalid constraint %s: no resource specified", id); return NULL; } new_con = pcmk__assert_alloc(1, sizeof(pcmk__location_t)); new_con->id = pcmk__str_copy(id); new_con->rsc = rsc; new_con->nodes = NULL; new_con->role_filter = pcmk_role_unknown; if (pcmk__str_eq(probe_mode, PCMK_VALUE_ALWAYS, pcmk__str_null_matches|pcmk__str_casei)) { new_con->probe_mode = pcmk__probe_always; } else if (pcmk__str_eq(probe_mode, PCMK_VALUE_NEVER, pcmk__str_casei)) { new_con->probe_mode = pcmk__probe_never; } else if (pcmk__str_eq(probe_mode, PCMK_VALUE_EXCLUSIVE, pcmk__str_casei)) { new_con->probe_mode = pcmk__probe_exclusive; pcmk__set_rsc_flags(rsc, pcmk__rsc_exclusive_probes); } else { pcmk__config_err("Invalid " PCMK_XA_RESOURCE_DISCOVERY " value %s " "in location constraint", probe_mode); } if (node != NULL) { pcmk_node_t *copy = pe__copy_node(node); copy->assign->score = node_score; new_con->nodes = g_list_prepend(NULL, copy); } rsc->priv->scheduler->priv->location_constraints = g_list_prepend(rsc->priv->scheduler->priv->location_constraints, new_con); rsc->priv->location_constraints = g_list_prepend(rsc->priv->location_constraints, new_con); return new_con; } /*! * \internal * \brief Apply all location constraints * * \param[in,out] scheduler Scheduler data */ void pcmk__apply_locations(pcmk_scheduler_t *scheduler) { for (GList *iter = scheduler->priv->location_constraints; iter != NULL; iter = iter->next) { pcmk__location_t *location = iter->data; location->rsc->priv->cmds->apply_location(location->rsc, location); } } /*! * \internal * \brief Apply a location constraint to a resource's allowed node scores * * \param[in,out] rsc Resource to apply constraint to * \param[in,out] location Location constraint to apply * * \note This does not consider the resource's children, so the resource's * apply_location() method should be used instead in most cases. */ void pcmk__apply_location(pcmk_resource_t *rsc, pcmk__location_t *location) { bool need_role = false; pcmk__assert((rsc != NULL) && (location != NULL)); // If a role was specified, ensure constraint is applicable need_role = (location->role_filter > pcmk_role_unknown); if (need_role && (location->role_filter != rsc->priv->next_role)) { pcmk__rsc_trace(rsc, "Not applying %s to %s because role will be %s not %s", location->id, rsc->id, pcmk_role_text(rsc->priv->next_role), pcmk_role_text(location->role_filter)); return; } if (location->nodes == NULL) { pcmk__rsc_trace(rsc, "Not applying %s to %s because no nodes match", location->id, rsc->id); return; } for (GList *iter = location->nodes; iter != NULL; iter = iter->next) { pcmk_node_t *node = iter->data; pcmk_node_t *allowed_node = NULL; allowed_node = g_hash_table_lookup(rsc->priv->allowed_nodes, node->priv->id); pcmk__rsc_trace(rsc, "Applying %s%s%s to %s score on %s: %c %s", location->id, (need_role? " for role " : ""), (need_role? pcmk_role_text(location->role_filter) : ""), rsc->id, pcmk__node_name(node), ((allowed_node == NULL)? '=' : '+'), pcmk_readable_score(node->assign->score)); if (allowed_node == NULL) { allowed_node = pe__copy_node(node); g_hash_table_insert(rsc->priv->allowed_nodes, (gpointer) allowed_node->priv->id, allowed_node); } else { allowed_node->assign->score = pcmk__add_scores(allowed_node->assign->score, node->assign->score); } if (allowed_node->assign->probe_mode < location->probe_mode) { if (location->probe_mode == pcmk__probe_exclusive) { pcmk__set_rsc_flags(rsc, pcmk__rsc_exclusive_probes); } /* exclusive > never > always... always is default */ allowed_node->assign->probe_mode = location->probe_mode; } } } diff --git a/lib/pengine/bundle.c b/lib/pengine/bundle.c index dd2752ebca..3459197776 100644 --- a/lib/pengine/bundle.c +++ b/lib/pengine/bundle.c @@ -1,2093 +1,2092 @@ /* * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include -#include #include #include #include #include #include #include enum pe__bundle_mount_flags { pe__bundle_mount_none = 0x00, // mount instance-specific subdirectory rather than source directly pe__bundle_mount_subdir = 0x01 }; typedef struct { char *source; char *target; char *options; uint32_t flags; // bitmask of pe__bundle_mount_flags } pe__bundle_mount_t; typedef struct { char *source; char *target; } pe__bundle_port_t; enum pe__container_agent { PE__CONTAINER_AGENT_UNKNOWN, PE__CONTAINER_AGENT_DOCKER, PE__CONTAINER_AGENT_PODMAN, }; #define PE__CONTAINER_AGENT_UNKNOWN_S "unknown" #define PE__CONTAINER_AGENT_DOCKER_S "docker" #define PE__CONTAINER_AGENT_PODMAN_S "podman" typedef struct pe__bundle_variant_data_s { int promoted_max; int nreplicas; int nreplicas_per_host; char *prefix; char *image; const char *ip_last; char *host_network; char *host_netmask; char *control_port; char *container_network; char *ip_range_start; gboolean add_host; gchar *container_host_options; char *container_command; char *launcher_options; const char *attribute_target; pcmk_resource_t *child; GList *replicas; // pcmk__bundle_replica_t * GList *ports; // pe__bundle_port_t * GList *mounts; // pe__bundle_mount_t * enum pe__container_agent agent_type; } pe__bundle_variant_data_t; #define get_bundle_variant_data(data, rsc) do { \ pcmk__assert(pcmk__is_bundle(rsc)); \ data = rsc->priv->variant_opaque; \ } while (0) /*! * \internal * \brief Get maximum number of bundle replicas allowed to run * * \param[in] rsc Bundle or bundled resource to check * * \return Maximum replicas for bundle corresponding to \p rsc */ int pe__bundle_max(const pcmk_resource_t *rsc) { const pe__bundle_variant_data_t *bundle_data = NULL; get_bundle_variant_data(bundle_data, pe__const_top_resource(rsc, true)); return bundle_data->nreplicas; } /*! * \internal * \brief Get the resource inside a bundle * * \param[in] bundle Bundle to check * * \return Resource inside \p bundle if any, otherwise NULL */ pcmk_resource_t * pe__bundled_resource(const pcmk_resource_t *rsc) { const pe__bundle_variant_data_t *bundle_data = NULL; get_bundle_variant_data(bundle_data, pe__const_top_resource(rsc, true)); return bundle_data->child; } /*! * \internal * \brief Get containerized resource corresponding to a given bundle container * * \param[in] instance Collective instance that might be a bundle container * * \return Bundled resource instance inside \p instance if it is a bundle * container instance, otherwise NULL */ const pcmk_resource_t * pe__get_rsc_in_container(const pcmk_resource_t *instance) { const pe__bundle_variant_data_t *data = NULL; const pcmk_resource_t *top = pe__const_top_resource(instance, true); if (!pcmk__is_bundle(top)) { return NULL; } get_bundle_variant_data(data, top); for (const GList *iter = data->replicas; iter != NULL; iter = iter->next) { const pcmk__bundle_replica_t *replica = iter->data; if (instance == replica->container) { return replica->child; } } return NULL; } /*! * \internal * \brief Check whether a given node is created by a bundle * * \param[in] bundle Bundle resource to check * \param[in] node Node to check * * \return true if \p node is an instance of \p bundle, otherwise false */ bool pe__node_is_bundle_instance(const pcmk_resource_t *bundle, const pcmk_node_t *node) { pe__bundle_variant_data_t *bundle_data = NULL; get_bundle_variant_data(bundle_data, bundle); for (GList *iter = bundle_data->replicas; iter != NULL; iter = iter->next) { pcmk__bundle_replica_t *replica = iter->data; if (pcmk__same_node(node, replica->node)) { return true; } } return false; } /*! * \internal * \brief Get the container of a bundle's first replica * * \param[in] bundle Bundle resource to get container for * * \return Container resource from first replica of \p bundle if any, * otherwise NULL */ pcmk_resource_t * pe__first_container(const pcmk_resource_t *bundle) { const pe__bundle_variant_data_t *bundle_data = NULL; const pcmk__bundle_replica_t *replica = NULL; get_bundle_variant_data(bundle_data, bundle); if (bundle_data->replicas == NULL) { return NULL; } replica = bundle_data->replicas->data; return replica->container; } /*! * \internal * \brief Iterate over bundle replicas * * \param[in,out] bundle Bundle to iterate over * \param[in] fn Function to call for each replica (its return value * indicates whether to continue iterating) * \param[in,out] user_data Pointer to pass to \p fn */ void pe__foreach_bundle_replica(pcmk_resource_t *bundle, bool (*fn)(pcmk__bundle_replica_t *, void *), void *user_data) { const pe__bundle_variant_data_t *bundle_data = NULL; get_bundle_variant_data(bundle_data, bundle); for (GList *iter = bundle_data->replicas; iter != NULL; iter = iter->next) { if (!fn((pcmk__bundle_replica_t *) iter->data, user_data)) { break; } } } /*! * \internal * \brief Iterate over const bundle replicas * * \param[in] bundle Bundle to iterate over * \param[in] fn Function to call for each replica (its return value * indicates whether to continue iterating) * \param[in,out] user_data Pointer to pass to \p fn */ void pe__foreach_const_bundle_replica(const pcmk_resource_t *bundle, bool (*fn)(const pcmk__bundle_replica_t *, void *), void *user_data) { const pe__bundle_variant_data_t *bundle_data = NULL; get_bundle_variant_data(bundle_data, bundle); for (const GList *iter = bundle_data->replicas; iter != NULL; iter = iter->next) { if (!fn((const pcmk__bundle_replica_t *) iter->data, user_data)) { break; } } } static char * next_ip(const char *last_ip) { unsigned int oct1 = 0; unsigned int oct2 = 0; unsigned int oct3 = 0; unsigned int oct4 = 0; int rc = sscanf(last_ip, "%u.%u.%u.%u", &oct1, &oct2, &oct3, &oct4); if (rc != 4) { /*@ TODO check for IPv6 */ return NULL; } else if (oct3 > 253) { return NULL; } else if (oct4 > 253) { ++oct3; oct4 = 1; } else { ++oct4; } return crm_strdup_printf("%u.%u.%u.%u", oct1, oct2, oct3, oct4); } static void allocate_ip(pe__bundle_variant_data_t *data, pcmk__bundle_replica_t *replica, GString *buffer) { if(data->ip_range_start == NULL) { return; } else if(data->ip_last) { replica->ipaddr = next_ip(data->ip_last); } else { replica->ipaddr = strdup(data->ip_range_start); } data->ip_last = replica->ipaddr; switch (data->agent_type) { case PE__CONTAINER_AGENT_DOCKER: case PE__CONTAINER_AGENT_PODMAN: if (data->add_host) { g_string_append_printf(buffer, " --add-host=%s-%d:%s", data->prefix, replica->offset, replica->ipaddr); } else { g_string_append_printf(buffer, " --hosts-entry=%s=%s-%d", replica->ipaddr, data->prefix, replica->offset); } break; default: // PE__CONTAINER_AGENT_UNKNOWN break; } } static xmlNode * create_resource(const char *name, const char *provider, const char *kind) { xmlNode *rsc = pcmk__xe_create(NULL, PCMK_XE_PRIMITIVE); crm_xml_add(rsc, PCMK_XA_ID, name); crm_xml_add(rsc, PCMK_XA_CLASS, PCMK_RESOURCE_CLASS_OCF); crm_xml_add(rsc, PCMK_XA_PROVIDER, provider); crm_xml_add(rsc, PCMK_XA_TYPE, kind); return rsc; } /*! * \internal * \brief Check whether cluster can manage resource inside container * * \param[in,out] data Container variant data * * \return TRUE if networking configuration is acceptable, FALSE otherwise * * \note The resource is manageable if an IP range or control port has been * specified. If a control port is used without an IP range, replicas per * host must be 1. */ static bool valid_network(pe__bundle_variant_data_t *data) { if(data->ip_range_start) { return TRUE; } if(data->control_port) { if(data->nreplicas_per_host > 1) { pcmk__config_err("Specifying the '" PCMK_XA_CONTROL_PORT "' for %s " "requires '" PCMK_XA_REPLICAS_PER_HOST "=1'", data->prefix); data->nreplicas_per_host = 1; // @TODO to be sure: // pcmk__clear_rsc_flags(rsc, pcmk__rsc_unique); } return TRUE; } return FALSE; } static int create_ip_resource(pcmk_resource_t *parent, pe__bundle_variant_data_t *data, pcmk__bundle_replica_t *replica) { if(data->ip_range_start) { char *id = NULL; xmlNode *xml_ip = NULL; xmlNode *xml_obj = NULL; id = crm_strdup_printf("%s-ip-%s", data->prefix, replica->ipaddr); pcmk__xml_sanitize_id(id); xml_ip = create_resource(id, "heartbeat", "IPaddr2"); free(id); xml_obj = pcmk__xe_create(xml_ip, PCMK_XE_INSTANCE_ATTRIBUTES); pcmk__xe_set_id(xml_obj, "%s-attributes-%d", data->prefix, replica->offset); crm_create_nvpair_xml(xml_obj, NULL, "ip", replica->ipaddr); if(data->host_network) { crm_create_nvpair_xml(xml_obj, NULL, "nic", data->host_network); } if(data->host_netmask) { crm_create_nvpair_xml(xml_obj, NULL, "cidr_netmask", data->host_netmask); } else { crm_create_nvpair_xml(xml_obj, NULL, "cidr_netmask", "32"); } xml_obj = pcmk__xe_create(xml_ip, PCMK_XE_OPERATIONS); crm_create_op_xml(xml_obj, pcmk__xe_id(xml_ip), PCMK_ACTION_MONITOR, "60s", NULL); // TODO: Other ops? Timeouts and intervals from underlying resource? if (pe__unpack_resource(xml_ip, &replica->ip, parent, parent->priv->scheduler) != pcmk_rc_ok) { return pcmk_rc_unpack_error; } parent->priv->children = g_list_append(parent->priv->children, replica->ip); } return pcmk_rc_ok; } static const char* container_agent_str(enum pe__container_agent t) { switch (t) { case PE__CONTAINER_AGENT_DOCKER: return PE__CONTAINER_AGENT_DOCKER_S; case PE__CONTAINER_AGENT_PODMAN: return PE__CONTAINER_AGENT_PODMAN_S; default: // PE__CONTAINER_AGENT_UNKNOWN break; } return PE__CONTAINER_AGENT_UNKNOWN_S; } static int create_container_resource(pcmk_resource_t *parent, const pe__bundle_variant_data_t *data, pcmk__bundle_replica_t *replica) { char *id = NULL; xmlNode *xml_container = NULL; xmlNode *xml_obj = NULL; // Agent-specific const char *hostname_opt = NULL; const char *env_opt = NULL; const char *agent_str = NULL; GString *buffer = NULL; GString *dbuffer = NULL; // Where syntax differences are drop-in replacements, set them now switch (data->agent_type) { case PE__CONTAINER_AGENT_DOCKER: case PE__CONTAINER_AGENT_PODMAN: hostname_opt = "-h "; env_opt = "-e "; break; default: // PE__CONTAINER_AGENT_UNKNOWN return pcmk_rc_unpack_error; } agent_str = container_agent_str(data->agent_type); buffer = g_string_sized_new(4096); id = crm_strdup_printf("%s-%s-%d", data->prefix, agent_str, replica->offset); pcmk__xml_sanitize_id(id); xml_container = create_resource(id, "heartbeat", agent_str); free(id); xml_obj = pcmk__xe_create(xml_container, PCMK_XE_INSTANCE_ATTRIBUTES); pcmk__xe_set_id(xml_obj, "%s-attributes-%d", data->prefix, replica->offset); crm_create_nvpair_xml(xml_obj, NULL, "image", data->image); crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", PCMK_VALUE_TRUE); crm_create_nvpair_xml(xml_obj, NULL, "force_kill", PCMK_VALUE_FALSE); crm_create_nvpair_xml(xml_obj, NULL, "reuse", PCMK_VALUE_FALSE); if (data->agent_type == PE__CONTAINER_AGENT_DOCKER) { g_string_append(buffer, " --restart=no"); } /* Set a container hostname only if we have an IP to map it to. The user can * set -h or --uts=host themselves if they want a nicer name for logs, but * this makes applications happy who need their hostname to match the IP * they bind to. */ if (data->ip_range_start != NULL) { g_string_append_printf(buffer, " %s%s-%d", hostname_opt, data->prefix, replica->offset); } pcmk__g_strcat(buffer, " ", env_opt, "PCMK_stderr=1", NULL); if (data->container_network != NULL) { pcmk__g_strcat(buffer, " --net=", data->container_network, NULL); } if (data->control_port != NULL) { pcmk__g_strcat(buffer, " ", env_opt, "PCMK_" PCMK__ENV_REMOTE_PORT "=", data->control_port, NULL); } else { g_string_append_printf(buffer, " %sPCMK_" PCMK__ENV_REMOTE_PORT "=%d", env_opt, DEFAULT_REMOTE_PORT); } for (GList *iter = data->mounts; iter != NULL; iter = iter->next) { pe__bundle_mount_t *mount = (pe__bundle_mount_t *) iter->data; char *source = NULL; if (pcmk_is_set(mount->flags, pe__bundle_mount_subdir)) { source = crm_strdup_printf("%s/%s-%d", mount->source, data->prefix, replica->offset); pcmk__add_separated_word(&dbuffer, 1024, source, ","); } switch (data->agent_type) { case PE__CONTAINER_AGENT_DOCKER: case PE__CONTAINER_AGENT_PODMAN: pcmk__g_strcat(buffer, " -v ", pcmk__s(source, mount->source), ":", mount->target, NULL); if (mount->options != NULL) { pcmk__g_strcat(buffer, ":", mount->options, NULL); } break; default: break; } free(source); } for (GList *iter = data->ports; iter != NULL; iter = iter->next) { pe__bundle_port_t *port = (pe__bundle_port_t *) iter->data; switch (data->agent_type) { case PE__CONTAINER_AGENT_DOCKER: case PE__CONTAINER_AGENT_PODMAN: if (replica->ipaddr != NULL) { pcmk__g_strcat(buffer, " -p ", replica->ipaddr, ":", port->source, ":", port->target, NULL); } else if (!pcmk__str_eq(data->container_network, PCMK_VALUE_HOST, pcmk__str_none)) { // No need to do port mapping if net == host pcmk__g_strcat(buffer, " -p ", port->source, ":", port->target, NULL); } break; default: break; } } /* @COMPAT: We should use pcmk__add_word() here, but we can't yet, because * it would cause restarts during rolling upgrades. * * In a previous version of the container resource creation logic, if * data->launcher_options is not NULL, we append * (" %s", data->launcher_options) even if data->launcher_options is an * empty string. Likewise for data->container_host_options. Using * * pcmk__add_word(buffer, 0, data->launcher_options) * * removes that extra trailing space, causing a resource definition change. */ if (data->launcher_options != NULL) { pcmk__g_strcat(buffer, " ", data->launcher_options, NULL); } if (data->container_host_options != NULL) { pcmk__g_strcat(buffer, " ", data->container_host_options, NULL); } crm_create_nvpair_xml(xml_obj, NULL, "run_opts", (const char *) buffer->str); g_string_free(buffer, TRUE); crm_create_nvpair_xml(xml_obj, NULL, "mount_points", (dbuffer != NULL)? (const char *) dbuffer->str : ""); if (dbuffer != NULL) { g_string_free(dbuffer, TRUE); } if (replica->child != NULL) { if (data->container_command != NULL) { crm_create_nvpair_xml(xml_obj, NULL, "run_cmd", data->container_command); } else { crm_create_nvpair_xml(xml_obj, NULL, "run_cmd", SBIN_DIR "/" PCMK__SERVER_REMOTED); } /* TODO: Allow users to specify their own? * * We just want to know if the container is alive; we'll monitor the * child independently. */ crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true"); #if 0 /* @TODO Consider supporting the use case where we can start and stop * resources, but not proxy local commands (such as setting node * attributes), by running the local executor in stand-alone mode. * However, this would probably be better done via ACLs as with other * Pacemaker Remote nodes. */ } else if ((child != NULL) && data->untrusted) { crm_create_nvpair_xml(xml_obj, NULL, "run_cmd", CRM_DAEMON_DIR "/" PCMK__SERVER_EXECD); crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", CRM_DAEMON_DIR "/pacemaker/cts-exec-helper -c poke"); #endif } else { if (data->container_command != NULL) { crm_create_nvpair_xml(xml_obj, NULL, "run_cmd", data->container_command); } /* TODO: Allow users to specify their own? * * We don't know what's in the container, so we just want to know if it * is alive. */ crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true"); } xml_obj = pcmk__xe_create(xml_container, PCMK_XE_OPERATIONS); crm_create_op_xml(xml_obj, pcmk__xe_id(xml_container), PCMK_ACTION_MONITOR, "60s", NULL); // TODO: Other ops? Timeouts and intervals from underlying resource? if (pe__unpack_resource(xml_container, &replica->container, parent, parent->priv->scheduler) != pcmk_rc_ok) { return pcmk_rc_unpack_error; } pcmk__set_rsc_flags(replica->container, pcmk__rsc_replica_container); parent->priv->children = g_list_append(parent->priv->children, replica->container); return pcmk_rc_ok; } /*! * \brief Ban a node from a resource's (and its children's) allowed nodes list * * \param[in,out] rsc Resource to modify * \param[in] uname Name of node to ban */ static void disallow_node(pcmk_resource_t *rsc, const char *uname) { gpointer match = g_hash_table_lookup(rsc->priv->allowed_nodes, uname); if (match) { ((pcmk_node_t *) match)->assign->score = -PCMK_SCORE_INFINITY; ((pcmk_node_t *) match)->assign->probe_mode = pcmk__probe_never; } g_list_foreach(rsc->priv->children, (GFunc) disallow_node, (gpointer) uname); } static int create_remote_resource(pcmk_resource_t *parent, pe__bundle_variant_data_t *data, pcmk__bundle_replica_t *replica) { if (replica->child && valid_network(data)) { GHashTableIter gIter; pcmk_node_t *node = NULL; xmlNode *xml_remote = NULL; char *id = crm_strdup_printf("%s-%d", data->prefix, replica->offset); char *port_s = NULL; const char *uname = NULL; const char *connect_name = NULL; pcmk_scheduler_t *scheduler = parent->priv->scheduler; if (pe_find_resource(scheduler->priv->resources, id) != NULL) { free(id); // The biggest hammer we have id = crm_strdup_printf("pcmk-internal-%s-remote-%d", replica->child->id, replica->offset); //@TODO return error instead of asserting? pcmk__assert(pe_find_resource(scheduler->priv->resources, id) == NULL); } /* REMOTE_CONTAINER_HACK: Using "#uname" as the server name when the * connection does not have its own IP is a magic string that we use to * support nested remotes (i.e. a bundle running on a remote node). */ connect_name = (replica->ipaddr? replica->ipaddr : "#uname"); if (data->control_port == NULL) { port_s = pcmk__itoa(DEFAULT_REMOTE_PORT); } /* This sets replica->container as replica->remote's container, which is * similar to what happens with guest nodes. This is how the scheduler * knows that the bundle node is fenced by recovering the container, and * that remote should be ordered relative to the container. */ xml_remote = pe_create_remote_xml(NULL, id, replica->container->id, NULL, NULL, NULL, connect_name, (data->control_port? data->control_port : port_s)); free(port_s); /* Abandon our created ID, and pull the copy from the XML, because we * need something that will get freed during scheduler data cleanup to * use as the node ID and uname. */ free(id); id = NULL; uname = pcmk__xe_id(xml_remote); /* Ensure a node has been created for the guest (it may have already * been, if it has a permanent node attribute), and ensure its weight is * -INFINITY so no other resources can run on it. */ node = pcmk_find_node(scheduler, uname); if (node == NULL) { node = pe_create_node(uname, uname, PCMK_VALUE_REMOTE, -PCMK_SCORE_INFINITY, scheduler); } else { node->assign->score = -PCMK_SCORE_INFINITY; } node->assign->probe_mode = pcmk__probe_never; /* unpack_remote_nodes() ensures that each remote node and guest node * has a pcmk_node_t entry. Ideally, it would do the same for bundle * nodes. Unfortunately, a bundle has to be mostly unpacked before it's * obvious what nodes will be needed, so we do it just above. * * Worse, that means that the node may have been utilized while * unpacking other resources, without our weight correction. The most * likely place for this to happen is when pe__unpack_resource() calls * resource_location() to set a default score in symmetric clusters. * This adds a node *copy* to each resource's allowed nodes, and these * copies will have the wrong weight. * * As a hacky workaround, fix those copies here. * * @TODO Possible alternative: ensure bundles are unpacked before other * resources, so the weight is correct before any copies are made. */ g_list_foreach(scheduler->priv->resources, (GFunc) disallow_node, (gpointer) uname); replica->node = pe__copy_node(node); replica->node->assign->score = 500; replica->node->assign->probe_mode = pcmk__probe_exclusive; /* Ensure the node shows up as allowed and with the correct discovery set */ if (replica->child->priv->allowed_nodes != NULL) { g_hash_table_destroy(replica->child->priv->allowed_nodes); } replica->child->priv->allowed_nodes = pcmk__strkey_table(NULL, free); g_hash_table_insert(replica->child->priv->allowed_nodes, (gpointer) replica->node->priv->id, pe__copy_node(replica->node)); { const pcmk_resource_t *parent = replica->child->priv->parent; pcmk_node_t *copy = pe__copy_node(replica->node); copy->assign->score = -PCMK_SCORE_INFINITY; g_hash_table_insert(parent->priv->allowed_nodes, (gpointer) replica->node->priv->id, copy); } if (pe__unpack_resource(xml_remote, &replica->remote, parent, scheduler) != pcmk_rc_ok) { return pcmk_rc_unpack_error; } g_hash_table_iter_init(&gIter, replica->remote->priv->allowed_nodes); while (g_hash_table_iter_next(&gIter, NULL, (void **)&node)) { if (pcmk__is_pacemaker_remote_node(node)) { /* Remote resources can only run on 'normal' cluster node */ node->assign->score = -PCMK_SCORE_INFINITY; } } replica->node->priv->remote = replica->remote; // Ensure pcmk__is_guest_or_bundle_node() functions correctly replica->remote->priv->launcher = replica->container; /* A bundle's #kind is closer to "container" (guest node) than the * "remote" set by pe_create_node(). */ pcmk__insert_dup(replica->node->priv->attrs, CRM_ATTR_KIND, "container"); /* One effect of this is that unpack_launcher() will add * replica->remote to replica->container's launched resources, which * will make pe__resource_contains_guest_node() true for * replica->container. * * replica->child does NOT get added to replica->container's launched * resources. The only noticeable effect if it did would be for its * fail count to be taken into account when checking * replica->container's migration threshold. */ parent->priv->children = g_list_append(parent->priv->children, replica->remote); } return pcmk_rc_ok; } static int create_replica_resources(pcmk_resource_t *parent, pe__bundle_variant_data_t *data, pcmk__bundle_replica_t *replica) { int rc = pcmk_rc_ok; rc = create_container_resource(parent, data, replica); if (rc != pcmk_rc_ok) { return rc; } rc = create_ip_resource(parent, data, replica); if (rc != pcmk_rc_ok) { return rc; } rc = create_remote_resource(parent, data, replica); if (rc != pcmk_rc_ok) { return rc; } if ((replica->child != NULL) && (replica->ipaddr != NULL)) { pcmk__insert_meta(replica->child->priv, "external-ip", replica->ipaddr); } if (replica->remote != NULL) { /* * Allow the remote connection resource to be allocated to a * different node than the one on which the container is active. * * This makes it possible to have Pacemaker Remote nodes running * containers with the remote executor inside in order to start * services inside those containers. */ pcmk__set_rsc_flags(replica->remote, pcmk__rsc_remote_nesting_allowed); } return rc; } static void mount_add(pe__bundle_variant_data_t *bundle_data, const char *source, const char *target, const char *options, uint32_t flags) { pe__bundle_mount_t *mount = pcmk__assert_alloc(1, sizeof(pe__bundle_mount_t)); mount->source = pcmk__str_copy(source); mount->target = pcmk__str_copy(target); mount->options = pcmk__str_copy(options); mount->flags = flags; bundle_data->mounts = g_list_append(bundle_data->mounts, mount); } static void mount_free(pe__bundle_mount_t *mount) { free(mount->source); free(mount->target); free(mount->options); free(mount); } static void port_free(pe__bundle_port_t *port) { free(port->source); free(port->target); free(port); } static pcmk__bundle_replica_t * replica_for_remote(pcmk_resource_t *remote) { pcmk_resource_t *top = remote; pe__bundle_variant_data_t *bundle_data = NULL; if (top == NULL) { return NULL; } while (top->priv->parent != NULL) { top = top->priv->parent; } get_bundle_variant_data(bundle_data, top); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pcmk__bundle_replica_t *replica = gIter->data; if (replica->remote == remote) { return replica; } } CRM_LOG_ASSERT(FALSE); return NULL; } bool pe__bundle_needs_remote_name(pcmk_resource_t *rsc) { const char *value; GHashTable *params = NULL; if (rsc == NULL) { return false; } // Use NULL node since pcmk__bundle_expand() uses that to set value params = pe_rsc_params(rsc, NULL, rsc->priv->scheduler); value = g_hash_table_lookup(params, PCMK_REMOTE_RA_ADDR); return pcmk__str_eq(value, "#uname", pcmk__str_casei) && xml_contains_remote_node(rsc->priv->xml); } const char * pe__add_bundle_remote_name(pcmk_resource_t *rsc, xmlNode *xml, const char *field) { // REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside pcmk_node_t *node = NULL; pcmk__bundle_replica_t *replica = NULL; if (!pe__bundle_needs_remote_name(rsc)) { return NULL; } replica = replica_for_remote(rsc); if (replica == NULL) { return NULL; } node = replica->container->priv->assigned_node; if (node == NULL) { /* If it won't be running anywhere after the * transition, go with where it's running now. */ node = pcmk__current_node(replica->container); } if(node == NULL) { crm_trace("Cannot determine address for bundle connection %s", rsc->id); return NULL; } crm_trace("Setting address for bundle connection %s to bundle host %s", rsc->id, pcmk__node_name(node)); if(xml != NULL && field != NULL) { crm_xml_add(xml, field, node->priv->name); } return node->priv->name; } #define pe__set_bundle_mount_flags(mount_xml, flags, flags_to_set) do { \ flags = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, \ "Bundle mount", pcmk__xe_id(mount_xml), \ flags, (flags_to_set), #flags_to_set); \ } while (0) gboolean pe__unpack_bundle(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler) { const char *value = NULL; xmlNode *xml_obj = NULL; const xmlNode *xml_child = NULL; xmlNode *xml_resource = NULL; pe__bundle_variant_data_t *bundle_data = NULL; bool need_log_mount = TRUE; pcmk__assert(rsc != NULL); pcmk__rsc_trace(rsc, "Processing resource %s...", rsc->id); bundle_data = pcmk__assert_alloc(1, sizeof(pe__bundle_variant_data_t)); rsc->priv->variant_opaque = bundle_data; bundle_data->prefix = strdup(rsc->id); xml_obj = pcmk__xe_first_child(rsc->priv->xml, PCMK_XE_DOCKER, NULL, NULL); if (xml_obj != NULL) { bundle_data->agent_type = PE__CONTAINER_AGENT_DOCKER; } if (xml_obj == NULL) { xml_obj = pcmk__xe_first_child(rsc->priv->xml, PCMK_XE_PODMAN, NULL, NULL); if (xml_obj != NULL) { bundle_data->agent_type = PE__CONTAINER_AGENT_PODMAN; } } if (xml_obj == NULL) { return FALSE; } // Use 0 for default, minimum, and invalid PCMK_XA_PROMOTED_MAX value = crm_element_value(xml_obj, PCMK_XA_PROMOTED_MAX); pcmk__scan_min_int(value, &bundle_data->promoted_max, 0); /* Default replicas to PCMK_XA_PROMOTED_MAX if it was specified and 1 * otherwise */ value = crm_element_value(xml_obj, PCMK_XA_REPLICAS); if ((value == NULL) && (bundle_data->promoted_max > 0)) { bundle_data->nreplicas = bundle_data->promoted_max; } else { pcmk__scan_min_int(value, &bundle_data->nreplicas, 1); } /* * Communication between containers on the same host via the * floating IPs only works if the container is started with: * --userland-proxy=false --ip-masq=false */ value = crm_element_value(xml_obj, PCMK_XA_REPLICAS_PER_HOST); pcmk__scan_min_int(value, &bundle_data->nreplicas_per_host, 1); if (bundle_data->nreplicas_per_host == 1) { pcmk__clear_rsc_flags(rsc, pcmk__rsc_unique); } bundle_data->container_command = crm_element_value_copy(xml_obj, PCMK_XA_RUN_COMMAND); bundle_data->launcher_options = crm_element_value_copy(xml_obj, PCMK_XA_OPTIONS); bundle_data->image = crm_element_value_copy(xml_obj, PCMK_XA_IMAGE); bundle_data->container_network = crm_element_value_copy(xml_obj, PCMK_XA_NETWORK); xml_obj = pcmk__xe_first_child(rsc->priv->xml, PCMK_XE_NETWORK, NULL, NULL); if(xml_obj) { bundle_data->ip_range_start = crm_element_value_copy(xml_obj, PCMK_XA_IP_RANGE_START); bundle_data->host_netmask = crm_element_value_copy(xml_obj, PCMK_XA_HOST_NETMASK); bundle_data->host_network = crm_element_value_copy(xml_obj, PCMK_XA_HOST_INTERFACE); bundle_data->control_port = crm_element_value_copy(xml_obj, PCMK_XA_CONTROL_PORT); value = crm_element_value(xml_obj, PCMK_XA_ADD_HOST); if (crm_str_to_boolean(value, &bundle_data->add_host) != 1) { bundle_data->add_host = TRUE; } for (xml_child = pcmk__xe_first_child(xml_obj, PCMK_XE_PORT_MAPPING, NULL, NULL); xml_child != NULL; xml_child = pcmk__xe_next(xml_child, PCMK_XE_PORT_MAPPING)) { pe__bundle_port_t *port = pcmk__assert_alloc(1, sizeof(pe__bundle_port_t)); port->source = crm_element_value_copy(xml_child, PCMK_XA_PORT); if(port->source == NULL) { port->source = crm_element_value_copy(xml_child, PCMK_XA_RANGE); } else { port->target = crm_element_value_copy(xml_child, PCMK_XA_INTERNAL_PORT); } if(port->source != NULL && strlen(port->source) > 0) { if(port->target == NULL) { port->target = strdup(port->source); } bundle_data->ports = g_list_append(bundle_data->ports, port); } else { pcmk__config_err("Invalid " PCMK_XA_PORT " directive %s", pcmk__xe_id(xml_child)); port_free(port); } } } xml_obj = pcmk__xe_first_child(rsc->priv->xml, PCMK_XE_STORAGE, NULL, NULL); for (xml_child = pcmk__xe_first_child(xml_obj, PCMK_XE_STORAGE_MAPPING, NULL, NULL); xml_child != NULL; xml_child = pcmk__xe_next(xml_child, PCMK_XE_STORAGE_MAPPING)) { const char *source = crm_element_value(xml_child, PCMK_XA_SOURCE_DIR); const char *target = crm_element_value(xml_child, PCMK_XA_TARGET_DIR); const char *options = crm_element_value(xml_child, PCMK_XA_OPTIONS); int flags = pe__bundle_mount_none; if (source == NULL) { source = crm_element_value(xml_child, PCMK_XA_SOURCE_DIR_ROOT); pe__set_bundle_mount_flags(xml_child, flags, pe__bundle_mount_subdir); } if (source && target) { mount_add(bundle_data, source, target, options, flags); if (strcmp(target, "/var/log") == 0) { need_log_mount = FALSE; } } else { pcmk__config_err("Invalid mount directive %s", pcmk__xe_id(xml_child)); } } xml_obj = pcmk__xe_first_child(rsc->priv->xml, PCMK_XE_PRIMITIVE, NULL, NULL); if (xml_obj && valid_network(bundle_data)) { const char *suffix = NULL; char *value = NULL; xmlNode *xml_set = NULL; xml_resource = pcmk__xe_create(NULL, PCMK_XE_CLONE); /* @COMPAT We no longer use the tag, but we need to keep it as * part of the resource name, so that bundles don't restart in a rolling * upgrade. (It also avoids needing to change regression tests.) */ suffix = (const char *) xml_resource->name; if (bundle_data->promoted_max > 0) { suffix = "master"; } pcmk__xe_set_id(xml_resource, "%s-%s", bundle_data->prefix, suffix); xml_set = pcmk__xe_create(xml_resource, PCMK_XE_META_ATTRIBUTES); pcmk__xe_set_id(xml_set, "%s-%s-meta", bundle_data->prefix, xml_resource->name); crm_create_nvpair_xml(xml_set, NULL, PCMK_META_ORDERED, PCMK_VALUE_TRUE); value = pcmk__itoa(bundle_data->nreplicas); crm_create_nvpair_xml(xml_set, NULL, PCMK_META_CLONE_MAX, value); free(value); value = pcmk__itoa(bundle_data->nreplicas_per_host); crm_create_nvpair_xml(xml_set, NULL, PCMK_META_CLONE_NODE_MAX, value); free(value); crm_create_nvpair_xml(xml_set, NULL, PCMK_META_GLOBALLY_UNIQUE, pcmk__btoa(bundle_data->nreplicas_per_host > 1)); if (bundle_data->promoted_max) { crm_create_nvpair_xml(xml_set, NULL, PCMK_META_PROMOTABLE, PCMK_VALUE_TRUE); value = pcmk__itoa(bundle_data->promoted_max); crm_create_nvpair_xml(xml_set, NULL, PCMK_META_PROMOTED_MAX, value); free(value); } //crm_xml_add(xml_obj, PCMK_XA_ID, bundle_data->prefix); pcmk__xml_copy(xml_resource, xml_obj); } else if(xml_obj) { pcmk__config_err("Cannot control %s inside %s without either " PCMK_XA_IP_RANGE_START " or " PCMK_XA_CONTROL_PORT, rsc->id, pcmk__xe_id(xml_obj)); return FALSE; } if(xml_resource) { int lpc = 0; GList *childIter = NULL; pe__bundle_port_t *port = NULL; GString *buffer = NULL; if (pe__unpack_resource(xml_resource, &(bundle_data->child), rsc, scheduler) != pcmk_rc_ok) { return FALSE; } /* Currently, we always map the default authentication key location * into the same location inside the container. * * Ideally, we would respect the host's PCMK_authkey_location, but: * - it may be different on different nodes; * - the actual connection will do extra checking to make sure the key * file exists and is readable, that we can't do here on the DC * - tools such as crm_resource and crm_simulate may not have the same * environment variables as the cluster, causing operation digests to * differ * * Always using the default location inside the container is fine, * because we control the pacemaker_remote environment, and it avoids * having to pass another environment variable to the container. * * @TODO A better solution may be to have only pacemaker_remote use the * environment variable, and have the cluster nodes use a new * cluster option for key location. This would introduce the limitation * of the location being the same on all cluster nodes, but that's * reasonable. */ mount_add(bundle_data, DEFAULT_REMOTE_KEY_LOCATION, DEFAULT_REMOTE_KEY_LOCATION, NULL, pe__bundle_mount_none); if (need_log_mount) { mount_add(bundle_data, CRM_BUNDLE_DIR, "/var/log", NULL, pe__bundle_mount_subdir); } port = pcmk__assert_alloc(1, sizeof(pe__bundle_port_t)); if(bundle_data->control_port) { port->source = strdup(bundle_data->control_port); } else { /* If we wanted to respect PCMK_remote_port, we could use * crm_default_remote_port() here and elsewhere in this file instead * of DEFAULT_REMOTE_PORT. * * However, it gains nothing, since we control both the container * environment and the connection resource parameters, and the user * can use a different port if desired by setting * PCMK_XA_CONTROL_PORT. */ port->source = pcmk__itoa(DEFAULT_REMOTE_PORT); } port->target = strdup(port->source); bundle_data->ports = g_list_append(bundle_data->ports, port); buffer = g_string_sized_new(1024); for (childIter = bundle_data->child->priv->children; childIter != NULL; childIter = childIter->next) { pcmk__bundle_replica_t *replica = NULL; replica = pcmk__assert_alloc(1, sizeof(pcmk__bundle_replica_t)); replica->child = childIter->data; pcmk__set_rsc_flags(replica->child, pcmk__rsc_exclusive_probes); replica->offset = lpc++; // Ensure the child's notify gets set based on the underlying primitive's value if (pcmk_is_set(replica->child->flags, pcmk__rsc_notify)) { pcmk__set_rsc_flags(bundle_data->child, pcmk__rsc_notify); } allocate_ip(bundle_data, replica, buffer); bundle_data->replicas = g_list_append(bundle_data->replicas, replica); bundle_data->attribute_target = g_hash_table_lookup(replica->child->priv->meta, PCMK_META_CONTAINER_ATTRIBUTE_TARGET); } bundle_data->container_host_options = g_string_free(buffer, FALSE); if (bundle_data->attribute_target) { pcmk__insert_dup(rsc->priv->meta, PCMK_META_CONTAINER_ATTRIBUTE_TARGET, bundle_data->attribute_target); pcmk__insert_dup(bundle_data->child->priv->meta, PCMK_META_CONTAINER_ATTRIBUTE_TARGET, bundle_data->attribute_target); } } else { // Just a naked container, no pacemaker-remote GString *buffer = g_string_sized_new(1024); for (int lpc = 0; lpc < bundle_data->nreplicas; lpc++) { pcmk__bundle_replica_t *replica = NULL; replica = pcmk__assert_alloc(1, sizeof(pcmk__bundle_replica_t)); replica->offset = lpc; allocate_ip(bundle_data, replica, buffer); bundle_data->replicas = g_list_append(bundle_data->replicas, replica); } bundle_data->container_host_options = g_string_free(buffer, FALSE); } for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pcmk__bundle_replica_t *replica = gIter->data; if (create_replica_resources(rsc, bundle_data, replica) != pcmk_rc_ok) { pcmk__config_err("Failed unpacking resource %s", rsc->id); rsc->priv->fns->free(rsc); return FALSE; } /* Utilization needs special handling for bundles. It makes no sense for * the inner primitive to have utilization, because it is tied * one-to-one to the guest node created by the container resource -- and * there's no way to set capacities for that guest node anyway. * * What the user really wants is to configure utilization for the * container. However, the schema only allows utilization for * primitives, and the container resource is implicit anyway, so the * user can *only* configure utilization for the inner primitive. If * they do, move the primitive's utilization values to the container. * * @TODO This means that bundles without an inner primitive can't have * utilization. An alternative might be to allow utilization values in * the top-level bundle XML in the schema, and copy those to each * container. */ if (replica->child != NULL) { GHashTable *empty = replica->container->priv->utilization; replica->container->priv->utilization = replica->child->priv->utilization; replica->child->priv->utilization = empty; } } if (bundle_data->child) { rsc->priv->children = g_list_append(rsc->priv->children, bundle_data->child); } return TRUE; } static int replica_resource_active(pcmk_resource_t *rsc, gboolean all) { if (rsc) { gboolean child_active = rsc->priv->fns->active(rsc, all); if (child_active && !all) { return TRUE; } else if (!child_active && all) { return FALSE; } } return -1; } gboolean pe__bundle_active(pcmk_resource_t *rsc, gboolean all) { pe__bundle_variant_data_t *bundle_data = NULL; GList *iter = NULL; get_bundle_variant_data(bundle_data, rsc); for (iter = bundle_data->replicas; iter != NULL; iter = iter->next) { pcmk__bundle_replica_t *replica = iter->data; int rsc_active; rsc_active = replica_resource_active(replica->ip, all); if (rsc_active >= 0) { return (gboolean) rsc_active; } rsc_active = replica_resource_active(replica->child, all); if (rsc_active >= 0) { return (gboolean) rsc_active; } rsc_active = replica_resource_active(replica->container, all); if (rsc_active >= 0) { return (gboolean) rsc_active; } rsc_active = replica_resource_active(replica->remote, all); if (rsc_active >= 0) { return (gboolean) rsc_active; } } /* If "all" is TRUE, we've already checked that no resources were inactive, * so return TRUE; if "all" is FALSE, we didn't find any active resources, * so return FALSE. */ return all; } /*! * \internal * \brief Find the bundle replica corresponding to a given node * * \param[in] bundle Top-level bundle resource * \param[in] node Node to search for * * \return Bundle replica if found, NULL otherwise */ pcmk_resource_t * pe__find_bundle_replica(const pcmk_resource_t *bundle, const pcmk_node_t *node) { pe__bundle_variant_data_t *bundle_data = NULL; pcmk__assert((bundle != NULL) && (node != NULL)); get_bundle_variant_data(bundle_data, bundle); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pcmk__bundle_replica_t *replica = gIter->data; pcmk__assert((replica != NULL) && (replica->node != NULL)); if (pcmk__same_node(replica->node, node)) { return replica->child; } } return NULL; } PCMK__OUTPUT_ARGS("bundle", "uint32_t", "pcmk_resource_t *", "GList *", "GList *") int pe__bundle_xml(pcmk__output_t *out, va_list args) { uint32_t show_opts = va_arg(args, uint32_t); pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); GList *only_node = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); pe__bundle_variant_data_t *bundle_data = NULL; int rc = pcmk_rc_no_output; gboolean printed_header = FALSE; gboolean print_everything = TRUE; const char *desc = NULL; pcmk__assert(rsc != NULL); get_bundle_variant_data(bundle_data, rsc); if (rsc->priv->fns->is_filtered(rsc, only_rsc, TRUE)) { return rc; } print_everything = pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pcmk__bundle_replica_t *replica = gIter->data; pcmk_resource_t *ip = replica->ip; pcmk_resource_t *child = replica->child; pcmk_resource_t *container = replica->container; pcmk_resource_t *remote = replica->remote; char *id = NULL; gboolean print_ip, print_child, print_ctnr, print_remote; pcmk__assert(replica != NULL); if (pcmk__rsc_filtered_by_node(container, only_node)) { continue; } print_ip = (ip != NULL) && !ip->priv->fns->is_filtered(ip, only_rsc, print_everything); print_child = (child != NULL) && !child->priv->fns->is_filtered(child, only_rsc, print_everything); print_ctnr = !container->priv->fns->is_filtered(container, only_rsc, print_everything); print_remote = (remote != NULL) && !remote->priv->fns->is_filtered(remote, only_rsc, print_everything); if (!print_everything && !print_ip && !print_child && !print_ctnr && !print_remote) { continue; } if (!printed_header) { const char *type = container_agent_str(bundle_data->agent_type); const char *unique = pcmk__flag_text(rsc->flags, pcmk__rsc_unique); const char *maintenance = pcmk__flag_text(rsc->flags, pcmk__rsc_maintenance); const char *managed = pcmk__flag_text(rsc->flags, pcmk__rsc_managed); const char *failed = pcmk__flag_text(rsc->flags, pcmk__rsc_failed); printed_header = TRUE; desc = pe__resource_description(rsc, show_opts); rc = pe__name_and_nvpairs_xml(out, true, PCMK_XE_BUNDLE, PCMK_XA_ID, rsc->id, PCMK_XA_TYPE, type, PCMK_XA_IMAGE, bundle_data->image, PCMK_XA_UNIQUE, unique, PCMK_XA_MAINTENANCE, maintenance, PCMK_XA_MANAGED, managed, PCMK_XA_FAILED, failed, PCMK_XA_DESCRIPTION, desc, NULL); pcmk__assert(rc == pcmk_rc_ok); } id = pcmk__itoa(replica->offset); rc = pe__name_and_nvpairs_xml(out, true, PCMK_XE_REPLICA, PCMK_XA_ID, id, NULL); free(id); pcmk__assert(rc == pcmk_rc_ok); if (print_ip) { out->message(out, (const char *) ip->priv->xml->name, show_opts, ip, only_node, only_rsc); } if (print_child) { out->message(out, (const char *) child->priv->xml->name, show_opts, child, only_node, only_rsc); } if (print_ctnr) { out->message(out, (const char *) container->priv->xml->name, show_opts, container, only_node, only_rsc); } if (print_remote) { out->message(out, (const char *) remote->priv->xml->name, show_opts, remote, only_node, only_rsc); } pcmk__output_xml_pop_parent(out); // replica } if (printed_header) { pcmk__output_xml_pop_parent(out); // bundle } return rc; } static void pe__bundle_replica_output_html(pcmk__output_t *out, pcmk__bundle_replica_t *replica, pcmk_node_t *node, uint32_t show_opts) { pcmk_resource_t *rsc = replica->child; int offset = 0; char buffer[LINE_MAX]; if(rsc == NULL) { rsc = replica->container; } if (replica->remote) { offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(replica->remote)); } else { offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(replica->container)); } if (replica->ipaddr) { offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)", replica->ipaddr); } pe__common_output_html(out, rsc, buffer, node, show_opts); } /*! * \internal * \brief Get a string describing a resource's unmanaged state or lack thereof * * \param[in] rsc Resource to describe * * \return A string indicating that a resource is in maintenance mode or * otherwise unmanaged, or an empty string otherwise */ static const char * get_unmanaged_str(const pcmk_resource_t *rsc) { if (pcmk_is_set(rsc->flags, pcmk__rsc_maintenance)) { return " (maintenance)"; } if (!pcmk_is_set(rsc->flags, pcmk__rsc_managed)) { return " (unmanaged)"; } return ""; } PCMK__OUTPUT_ARGS("bundle", "uint32_t", "pcmk_resource_t *", "GList *", "GList *") int pe__bundle_html(pcmk__output_t *out, va_list args) { uint32_t show_opts = va_arg(args, uint32_t); pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); GList *only_node = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); const char *desc = NULL; pe__bundle_variant_data_t *bundle_data = NULL; int rc = pcmk_rc_no_output; gboolean print_everything = TRUE; pcmk__assert(rsc != NULL); get_bundle_variant_data(bundle_data, rsc); desc = pe__resource_description(rsc, show_opts); if (rsc->priv->fns->is_filtered(rsc, only_rsc, TRUE)) { return rc; } print_everything = pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pcmk__bundle_replica_t *replica = gIter->data; pcmk_resource_t *ip = replica->ip; pcmk_resource_t *child = replica->child; pcmk_resource_t *container = replica->container; pcmk_resource_t *remote = replica->remote; gboolean print_ip, print_child, print_ctnr, print_remote; pcmk__assert(replica != NULL); if (pcmk__rsc_filtered_by_node(container, only_node)) { continue; } print_ip = (ip != NULL) && !ip->priv->fns->is_filtered(ip, only_rsc, print_everything); print_child = (child != NULL) && !child->priv->fns->is_filtered(child, only_rsc, print_everything); print_ctnr = !container->priv->fns->is_filtered(container, only_rsc, print_everything); print_remote = (remote != NULL) && !remote->priv->fns->is_filtered(remote, only_rsc, print_everything); if (pcmk_is_set(show_opts, pcmk_show_implicit_rscs) || (print_everything == FALSE && (print_ip || print_child || print_ctnr || print_remote))) { /* The text output messages used below require pe_print_implicit to * be set to do anything. */ uint32_t new_show_opts = show_opts | pcmk_show_implicit_rscs; PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s%s%s%s", (bundle_data->nreplicas > 1)? " set" : "", rsc->id, bundle_data->image, pcmk_is_set(rsc->flags, pcmk__rsc_unique)? " (unique)" : "", desc ? " (" : "", desc ? desc : "", desc ? ")" : "", get_unmanaged_str(rsc)); if (pcmk__list_of_multiple(bundle_data->replicas)) { out->begin_list(out, NULL, NULL, "Replica[%d]", replica->offset); } if (print_ip) { out->message(out, (const char *) ip->priv->xml->name, new_show_opts, ip, only_node, only_rsc); } if (print_child) { out->message(out, (const char *) child->priv->xml->name, new_show_opts, child, only_node, only_rsc); } if (print_ctnr) { out->message(out, (const char *) container->priv->xml->name, new_show_opts, container, only_node, only_rsc); } if (print_remote) { out->message(out, (const char *) remote->priv->xml->name, new_show_opts, remote, only_node, only_rsc); } if (pcmk__list_of_multiple(bundle_data->replicas)) { out->end_list(out); } } else if (print_everything == FALSE && !(print_ip || print_child || print_ctnr || print_remote)) { continue; } else { PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s%s%s%s", (bundle_data->nreplicas > 1)? " set" : "", rsc->id, bundle_data->image, pcmk_is_set(rsc->flags, pcmk__rsc_unique)? " (unique)" : "", desc ? " (" : "", desc ? desc : "", desc ? ")" : "", get_unmanaged_str(rsc)); pe__bundle_replica_output_html(out, replica, pcmk__current_node(container), show_opts); } } PCMK__OUTPUT_LIST_FOOTER(out, rc); return rc; } static void pe__bundle_replica_output_text(pcmk__output_t *out, pcmk__bundle_replica_t *replica, pcmk_node_t *node, uint32_t show_opts) { const pcmk_resource_t *rsc = replica->child; int offset = 0; char buffer[LINE_MAX]; if(rsc == NULL) { rsc = replica->container; } if (replica->remote) { offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(replica->remote)); } else { offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(replica->container)); } if (replica->ipaddr) { offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)", replica->ipaddr); } pe__common_output_text(out, rsc, buffer, node, show_opts); } PCMK__OUTPUT_ARGS("bundle", "uint32_t", "pcmk_resource_t *", "GList *", "GList *") int pe__bundle_text(pcmk__output_t *out, va_list args) { uint32_t show_opts = va_arg(args, uint32_t); pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); GList *only_node = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); const char *desc = NULL; pe__bundle_variant_data_t *bundle_data = NULL; int rc = pcmk_rc_no_output; gboolean print_everything = TRUE; desc = pe__resource_description(rsc, show_opts); pcmk__assert(rsc != NULL); get_bundle_variant_data(bundle_data, rsc); if (rsc->priv->fns->is_filtered(rsc, only_rsc, TRUE)) { return rc; } print_everything = pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pcmk__bundle_replica_t *replica = gIter->data; pcmk_resource_t *ip = replica->ip; pcmk_resource_t *child = replica->child; pcmk_resource_t *container = replica->container; pcmk_resource_t *remote = replica->remote; gboolean print_ip, print_child, print_ctnr, print_remote; pcmk__assert(replica != NULL); if (pcmk__rsc_filtered_by_node(container, only_node)) { continue; } print_ip = (ip != NULL) && !ip->priv->fns->is_filtered(ip, only_rsc, print_everything); print_child = (child != NULL) && !child->priv->fns->is_filtered(child, only_rsc, print_everything); print_ctnr = !container->priv->fns->is_filtered(container, only_rsc, print_everything); print_remote = (remote != NULL) && !remote->priv->fns->is_filtered(remote, only_rsc, print_everything); if (pcmk_is_set(show_opts, pcmk_show_implicit_rscs) || (print_everything == FALSE && (print_ip || print_child || print_ctnr || print_remote))) { /* The text output messages used below require pe_print_implicit to * be set to do anything. */ uint32_t new_show_opts = show_opts | pcmk_show_implicit_rscs; PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s%s%s%s", (bundle_data->nreplicas > 1)? " set" : "", rsc->id, bundle_data->image, pcmk_is_set(rsc->flags, pcmk__rsc_unique)? " (unique)" : "", desc ? " (" : "", desc ? desc : "", desc ? ")" : "", get_unmanaged_str(rsc)); if (pcmk__list_of_multiple(bundle_data->replicas)) { out->list_item(out, NULL, "Replica[%d]", replica->offset); } out->begin_list(out, NULL, NULL, NULL); if (print_ip) { out->message(out, (const char *) ip->priv->xml->name, new_show_opts, ip, only_node, only_rsc); } if (print_child) { out->message(out, (const char *) child->priv->xml->name, new_show_opts, child, only_node, only_rsc); } if (print_ctnr) { out->message(out, (const char *) container->priv->xml->name, new_show_opts, container, only_node, only_rsc); } if (print_remote) { out->message(out, (const char *) remote->priv->xml->name, new_show_opts, remote, only_node, only_rsc); } out->end_list(out); } else if (print_everything == FALSE && !(print_ip || print_child || print_ctnr || print_remote)) { continue; } else { PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s%s%s%s", (bundle_data->nreplicas > 1)? " set" : "", rsc->id, bundle_data->image, pcmk_is_set(rsc->flags, pcmk__rsc_unique)? " (unique)" : "", desc ? " (" : "", desc ? desc : "", desc ? ")" : "", get_unmanaged_str(rsc)); pe__bundle_replica_output_text(out, replica, pcmk__current_node(container), show_opts); } } PCMK__OUTPUT_LIST_FOOTER(out, rc); return rc; } static void free_bundle_replica(pcmk__bundle_replica_t *replica) { if (replica == NULL) { return; } if (replica->node) { free(replica->node); replica->node = NULL; } if (replica->ip) { pcmk__xml_free(replica->ip->priv->xml); replica->ip->priv->xml = NULL; replica->ip->priv->fns->free(replica->ip); } if (replica->container) { pcmk__xml_free(replica->container->priv->xml); replica->container->priv->xml = NULL; replica->container->priv->fns->free(replica->container); } if (replica->remote) { pcmk__xml_free(replica->remote->priv->xml); replica->remote->priv->xml = NULL; replica->remote->priv->fns->free(replica->remote); } free(replica->ipaddr); free(replica); } void pe__free_bundle(pcmk_resource_t *rsc) { pe__bundle_variant_data_t *bundle_data = NULL; CRM_CHECK(rsc != NULL, return); get_bundle_variant_data(bundle_data, rsc); pcmk__rsc_trace(rsc, "Freeing %s", rsc->id); free(bundle_data->prefix); free(bundle_data->image); free(bundle_data->control_port); free(bundle_data->host_network); free(bundle_data->host_netmask); free(bundle_data->ip_range_start); free(bundle_data->container_network); free(bundle_data->launcher_options); free(bundle_data->container_command); g_free(bundle_data->container_host_options); g_list_free_full(bundle_data->replicas, (GDestroyNotify) free_bundle_replica); g_list_free_full(bundle_data->mounts, (GDestroyNotify)mount_free); g_list_free_full(bundle_data->ports, (GDestroyNotify)port_free); g_list_free(rsc->priv->children); if(bundle_data->child) { pcmk__xml_free(bundle_data->child->priv->xml); bundle_data->child->priv->xml = NULL; bundle_data->child->priv->fns->free(bundle_data->child); } common_free(rsc); } enum rsc_role_e pe__bundle_resource_state(const pcmk_resource_t *rsc, gboolean current) { enum rsc_role_e container_role = pcmk_role_unknown; return container_role; } /*! * \brief Get the number of configured replicas in a bundle * * \param[in] rsc Bundle resource * * \return Number of configured replicas, or 0 on error */ int pe_bundle_replicas(const pcmk_resource_t *rsc) { if (pcmk__is_bundle(rsc)) { pe__bundle_variant_data_t *bundle_data = NULL; get_bundle_variant_data(bundle_data, rsc); return bundle_data->nreplicas; } return 0; } void pe__count_bundle(pcmk_resource_t *rsc) { pe__bundle_variant_data_t *bundle_data = NULL; get_bundle_variant_data(bundle_data, rsc); for (GList *item = bundle_data->replicas; item != NULL; item = item->next) { pcmk__bundle_replica_t *replica = item->data; if (replica->ip) { replica->ip->priv->fns->count(replica->ip); } if (replica->child) { replica->child->priv->fns->count(replica->child); } if (replica->container) { replica->container->priv->fns->count(replica->container); } if (replica->remote) { replica->remote->priv->fns->count(replica->remote); } } } gboolean pe__bundle_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc, gboolean check_parent) { gboolean passes = FALSE; pe__bundle_variant_data_t *bundle_data = NULL; if (pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches)) { passes = TRUE; } else { get_bundle_variant_data(bundle_data, rsc); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pcmk__bundle_replica_t *replica = gIter->data; pcmk_resource_t *ip = replica->ip; pcmk_resource_t *child = replica->child; pcmk_resource_t *container = replica->container; pcmk_resource_t *remote = replica->remote; if ((ip != NULL) && !ip->priv->fns->is_filtered(ip, only_rsc, FALSE)) { passes = TRUE; break; } if ((child != NULL) && !child->priv->fns->is_filtered(child, only_rsc, FALSE)) { passes = TRUE; break; } if (!container->priv->fns->is_filtered(container, only_rsc, FALSE)) { passes = TRUE; break; } if ((remote != NULL) && !remote->priv->fns->is_filtered(remote, only_rsc, FALSE)) { passes = TRUE; break; } } } return !passes; } /*! * \internal * \brief Get a list of a bundle's containers * * \param[in] bundle Bundle resource * * \return Newly created list of \p bundle's containers * \note It is the caller's responsibility to free the result with * g_list_free(). */ GList * pe__bundle_containers(const pcmk_resource_t *bundle) { GList *containers = NULL; const pe__bundle_variant_data_t *data = NULL; get_bundle_variant_data(data, bundle); for (GList *iter = data->replicas; iter != NULL; iter = iter->next) { pcmk__bundle_replica_t *replica = iter->data; containers = g_list_append(containers, replica->container); } return containers; } // Bundle implementation of pcmk__rsc_methods_t:active_node() pcmk_node_t * pe__bundle_active_node(const pcmk_resource_t *rsc, unsigned int *count_all, unsigned int *count_clean) { pcmk_node_t *active = NULL; pcmk_node_t *node = NULL; pcmk_resource_t *container = NULL; GList *containers = NULL; GList *iter = NULL; GHashTable *nodes = NULL; const pe__bundle_variant_data_t *data = NULL; if (count_all != NULL) { *count_all = 0; } if (count_clean != NULL) { *count_clean = 0; } if (rsc == NULL) { return NULL; } /* For the purposes of this method, we only care about where the bundle's * containers are active, so build a list of active containers. */ get_bundle_variant_data(data, rsc); for (iter = data->replicas; iter != NULL; iter = iter->next) { pcmk__bundle_replica_t *replica = iter->data; if (replica->container->priv->active_nodes != NULL) { containers = g_list_append(containers, replica->container); } } if (containers == NULL) { return NULL; } /* If the bundle has only a single active container, just use that * container's method. If live migration is ever supported for bundle * containers, this will allow us to prefer the migration source when there * is only one container and it is migrating. For now, this just lets us * avoid creating the nodes table. */ if (pcmk__list_of_1(containers)) { container = containers->data; node = container->priv->fns->active_node(container, count_all, count_clean); g_list_free(containers); return node; } // Add all containers' active nodes to a hash table (for uniqueness) nodes = g_hash_table_new(NULL, NULL); for (iter = containers; iter != NULL; iter = iter->next) { container = iter->data; for (GList *node_iter = container->priv->active_nodes; node_iter != NULL; node_iter = node_iter->next) { node = node_iter->data; // If insert returns true, we haven't counted this node yet if (g_hash_table_insert(nodes, (gpointer) node->details, (gpointer) node) && !pe__count_active_node(rsc, node, &active, count_all, count_clean)) { goto done; } } } done: g_list_free(containers); g_hash_table_destroy(nodes); return active; } /*! * \internal * \brief Get maximum bundle resource instances per node * * \param[in] rsc Bundle resource to check * * \return Maximum number of \p rsc instances that can be active on one node */ unsigned int pe__bundle_max_per_node(const pcmk_resource_t *rsc) { pe__bundle_variant_data_t *bundle_data = NULL; get_bundle_variant_data(bundle_data, rsc); pcmk__assert(bundle_data->nreplicas_per_host >= 0); return (unsigned int) bundle_data->nreplicas_per_host; } diff --git a/lib/pengine/clone.c b/lib/pengine/clone.c index c1d59d61e3..24953cd131 100644 --- a/lib/pengine/clone.c +++ b/lib/pengine/clone.c @@ -1,1257 +1,1256 @@ /* * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include -#include #include #include #include #include #include #include #include typedef struct clone_variant_data_s { int clone_max; int clone_node_max; int promoted_max; int promoted_node_max; int total_clones; uint32_t flags; // Group of enum pcmk__clone_flags notify_data_t *stop_notify; notify_data_t *start_notify; notify_data_t *demote_notify; notify_data_t *promote_notify; xmlNode *xml_obj_child; } clone_variant_data_t; #define get_clone_variant_data(data, rsc) do { \ pcmk__assert(pcmk__is_clone(rsc)); \ data = rsc->priv->variant_opaque; \ } while (0) /*! * \internal * \brief Return the maximum number of clone instances allowed to be run * * \param[in] clone Clone or clone instance to check * * \return Maximum instances for \p clone */ int pe__clone_max(const pcmk_resource_t *clone) { const clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, pe__const_top_resource(clone, false)); return clone_data->clone_max; } /*! * \internal * \brief Return the maximum number of clone instances allowed per node * * \param[in] clone Promotable clone or clone instance to check * * \return Maximum allowed instances per node for \p clone */ int pe__clone_node_max(const pcmk_resource_t *clone) { const clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, pe__const_top_resource(clone, false)); return clone_data->clone_node_max; } /*! * \internal * \brief Return the maximum number of clone instances allowed to be promoted * * \param[in] clone Promotable clone or clone instance to check * * \return Maximum promoted instances for \p clone */ int pe__clone_promoted_max(const pcmk_resource_t *clone) { clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, pe__const_top_resource(clone, false)); return clone_data->promoted_max; } /*! * \internal * \brief Return the maximum number of clone instances allowed to be promoted * * \param[in] clone Promotable clone or clone instance to check * * \return Maximum promoted instances for \p clone */ int pe__clone_promoted_node_max(const pcmk_resource_t *clone) { clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, pe__const_top_resource(clone, false)); return clone_data->promoted_node_max; } static GList * sorted_hash_table_values(GHashTable *table) { GList *retval = NULL; GHashTableIter iter; gpointer key, value; g_hash_table_iter_init(&iter, table); while (g_hash_table_iter_next(&iter, &key, &value)) { if (!g_list_find_custom(retval, value, (GCompareFunc) strcmp)) { retval = g_list_prepend(retval, (char *) value); } } retval = g_list_sort(retval, (GCompareFunc) strcmp); return retval; } static GList * nodes_with_status(GHashTable *table, const char *status) { GList *retval = NULL; GHashTableIter iter; gpointer key, value; g_hash_table_iter_init(&iter, table); while (g_hash_table_iter_next(&iter, &key, &value)) { if (!strcmp((char *) value, status)) { retval = g_list_prepend(retval, key); } } retval = g_list_sort(retval, (GCompareFunc) pcmk__numeric_strcasecmp); return retval; } static GString * node_list_to_str(const GList *list) { GString *retval = NULL; for (const GList *iter = list; iter != NULL; iter = iter->next) { pcmk__add_word(&retval, 1024, (const char *) iter->data); } return retval; } static void clone_header(pcmk__output_t *out, int *rc, const pcmk_resource_t *rsc, clone_variant_data_t *clone_data, const char *desc) { GString *attrs = NULL; if (pcmk_is_set(rsc->flags, pcmk__rsc_promotable)) { pcmk__add_separated_word(&attrs, 64, "promotable", ", "); } if (pcmk_is_set(rsc->flags, pcmk__rsc_unique)) { pcmk__add_separated_word(&attrs, 64, "unique", ", "); } if (pe__resource_is_disabled(rsc)) { pcmk__add_separated_word(&attrs, 64, "disabled", ", "); } if (pcmk_is_set(rsc->flags, pcmk__rsc_maintenance)) { pcmk__add_separated_word(&attrs, 64, "maintenance", ", "); } else if (!pcmk_is_set(rsc->flags, pcmk__rsc_managed)) { pcmk__add_separated_word(&attrs, 64, "unmanaged", ", "); } if (attrs != NULL) { PCMK__OUTPUT_LIST_HEADER(out, FALSE, *rc, "Clone Set: %s [%s] (%s)%s%s%s", rsc->id, pcmk__xe_id(clone_data->xml_obj_child), (const char *) attrs->str, desc ? " (" : "", desc ? desc : "", desc ? ")" : ""); g_string_free(attrs, TRUE); } else { PCMK__OUTPUT_LIST_HEADER(out, FALSE, *rc, "Clone Set: %s [%s]%s%s%s", rsc->id, pcmk__xe_id(clone_data->xml_obj_child), desc ? " (" : "", desc ? desc : "", desc ? ")" : ""); } } void pe__force_anon(const char *standard, pcmk_resource_t *rsc, const char *rid, pcmk_scheduler_t *scheduler) { if (pcmk__is_clone(rsc)) { clone_variant_data_t *clone_data = rsc->priv->variant_opaque; pcmk__config_warn("Ignoring " PCMK_META_GLOBALLY_UNIQUE " for %s " "because %s resources such as %s can be used only as " "anonymous clones", rsc->id, standard, rid); clone_data->clone_node_max = 1; clone_data->clone_max = QB_MIN(clone_data->clone_max, g_list_length(scheduler->nodes)); } } pcmk_resource_t * pe__create_clone_child(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler) { gboolean as_orphan = FALSE; char *inc_num = NULL; char *inc_max = NULL; pcmk_resource_t *child_rsc = NULL; xmlNode *child_copy = NULL; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); CRM_CHECK(clone_data->xml_obj_child != NULL, return FALSE); if (clone_data->total_clones >= clone_data->clone_max) { // If we've already used all available instances, this is an orphan as_orphan = TRUE; } // Allocate instance numbers in numerical order (starting at 0) inc_num = pcmk__itoa(clone_data->total_clones); inc_max = pcmk__itoa(clone_data->clone_max); child_copy = pcmk__xml_copy(NULL, clone_data->xml_obj_child); crm_xml_add(child_copy, PCMK__META_CLONE, inc_num); if (pe__unpack_resource(child_copy, &child_rsc, rsc, scheduler) != pcmk_rc_ok) { goto bail; } /* child_rsc->globally_unique = rsc->globally_unique; */ pcmk__assert(child_rsc != NULL); clone_data->total_clones += 1; pcmk__rsc_trace(child_rsc, "Setting clone attributes for: %s", child_rsc->id); rsc->priv->children = g_list_append(rsc->priv->children, child_rsc); if (as_orphan) { pe__set_resource_flags_recursive(child_rsc, pcmk__rsc_removed); } pcmk__insert_meta(child_rsc->priv, PCMK_META_CLONE_MAX, inc_max); pcmk__rsc_trace(rsc, "Added %s instance %s", rsc->id, child_rsc->id); bail: free(inc_num); free(inc_max); return child_rsc; } /*! * \internal * \brief Unpack a nonnegative integer value from a resource meta-attribute * * \param[in] rsc Resource with meta-attribute * \param[in] meta_name Name of meta-attribute to unpack * \param[in] deprecated_name If not NULL, try unpacking this * if \p meta_name is unset * \param[in] default_value Value to use if unset * * \return Integer parsed from resource's specified meta-attribute if a valid * nonnegative integer, \p default_value if unset, or 0 if invalid */ static int unpack_meta_int(const pcmk_resource_t *rsc, const char *meta_name, const char *deprecated_name, int default_value) { int integer = default_value; const char *value = g_hash_table_lookup(rsc->priv->meta, meta_name); if ((value == NULL) && (deprecated_name != NULL)) { value = g_hash_table_lookup(rsc->priv->meta, deprecated_name); if (value != NULL) { if (pcmk__str_eq(deprecated_name, PCMK__META_PROMOTED_MAX_LEGACY, pcmk__str_none)) { pcmk__warn_once(pcmk__wo_clone_master_max, "Support for the " PCMK__META_PROMOTED_MAX_LEGACY " meta-attribute (such as in %s) is deprecated " "and will be removed in a future release. Use the " PCMK_META_PROMOTED_MAX " meta-attribute instead.", rsc->id); } else if (pcmk__str_eq(deprecated_name, PCMK__META_PROMOTED_NODE_MAX_LEGACY, pcmk__str_none)) { pcmk__warn_once(pcmk__wo_clone_master_node_max, "Support for the " PCMK__META_PROMOTED_NODE_MAX_LEGACY " meta-attribute (such as in %s) is deprecated " "and will be removed in a future release. Use the " PCMK_META_PROMOTED_NODE_MAX " meta-attribute instead.", rsc->id); } } } if (value != NULL) { pcmk__scan_min_int(value, &integer, 0); } return integer; } gboolean clone_unpack(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler) { int lpc = 0; xmlNode *a_child = NULL; xmlNode *xml_obj = rsc->priv->xml; clone_variant_data_t *clone_data = NULL; pcmk__rsc_trace(rsc, "Processing resource %s...", rsc->id); clone_data = pcmk__assert_alloc(1, sizeof(clone_variant_data_t)); rsc->priv->variant_opaque = clone_data; if (pcmk_is_set(rsc->flags, pcmk__rsc_promotable)) { // Use 1 as default but 0 for minimum and invalid // @COMPAT PCMK__META_PROMOTED_MAX_LEGACY deprecated since 2.0.0 clone_data->promoted_max = unpack_meta_int(rsc, PCMK_META_PROMOTED_MAX, PCMK__META_PROMOTED_MAX_LEGACY, 1); // Use 1 as default but 0 for minimum and invalid // @COMPAT PCMK__META_PROMOTED_NODE_MAX_LEGACY deprecated since 2.0.0 clone_data->promoted_node_max = unpack_meta_int(rsc, PCMK_META_PROMOTED_NODE_MAX, PCMK__META_PROMOTED_NODE_MAX_LEGACY, 1); } // Use 1 as default but 0 for minimum and invalid clone_data->clone_node_max = unpack_meta_int(rsc, PCMK_META_CLONE_NODE_MAX, NULL, 1); /* Use number of nodes (but always at least 1, which is handy for crm_verify * for a CIB without nodes) as default, but 0 for minimum and invalid */ clone_data->clone_max = unpack_meta_int(rsc, PCMK_META_CLONE_MAX, NULL, QB_MAX(1, g_list_length(scheduler->nodes))); if (crm_is_true(g_hash_table_lookup(rsc->priv->meta, PCMK_META_ORDERED))) { clone_data->flags = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, "Clone", rsc->id, clone_data->flags, pcmk__clone_ordered, "pcmk__clone_ordered"); } if (!pcmk_is_set(rsc->flags, pcmk__rsc_unique) && (clone_data->clone_node_max > 1)) { pcmk__config_err("Ignoring " PCMK_META_CLONE_NODE_MAX " of %d for %s " "because anonymous clones support only one instance " "per node", clone_data->clone_node_max, rsc->id); clone_data->clone_node_max = 1; } pcmk__rsc_trace(rsc, "Options for %s", rsc->id); pcmk__rsc_trace(rsc, "\tClone max: %d", clone_data->clone_max); pcmk__rsc_trace(rsc, "\tClone node max: %d", clone_data->clone_node_max); pcmk__rsc_trace(rsc, "\tClone is unique: %s", pcmk__flag_text(rsc->flags, pcmk__rsc_unique)); pcmk__rsc_trace(rsc, "\tClone is promotable: %s", pcmk__flag_text(rsc->flags, pcmk__rsc_promotable)); // Clones may contain a single group or primitive for (a_child = pcmk__xe_first_child(xml_obj, NULL, NULL, NULL); a_child != NULL; a_child = pcmk__xe_next(a_child, NULL)) { if (pcmk__str_any_of((const char *) a_child->name, PCMK_XE_PRIMITIVE, PCMK_XE_GROUP, NULL)) { clone_data->xml_obj_child = a_child; break; } } if (clone_data->xml_obj_child == NULL) { pcmk__config_err("%s has nothing to clone", rsc->id); return FALSE; } /* * Make clones ever so slightly sticky by default * * This helps ensure clone instances are not shuffled around the cluster * for no benefit in situations when pre-allocation is not appropriate */ if (g_hash_table_lookup(rsc->priv->meta, PCMK_META_RESOURCE_STICKINESS) == NULL) { pcmk__insert_meta(rsc->priv, PCMK_META_RESOURCE_STICKINESS, "1"); } /* This ensures that the PCMK_META_GLOBALLY_UNIQUE value always exists for * children to inherit when being unpacked, as well as in resource agents' * environment. */ pcmk__insert_meta(rsc->priv, PCMK_META_GLOBALLY_UNIQUE, pcmk__flag_text(rsc->flags, pcmk__rsc_unique)); if (clone_data->clone_max <= 0) { /* Create one child instance so that unpack_find_resource() will hook up * any orphans up to the parent correctly. */ if (pe__create_clone_child(rsc, scheduler) == NULL) { return FALSE; } } else { // Create a child instance for each available instance number for (lpc = 0; lpc < clone_data->clone_max; lpc++) { if (pe__create_clone_child(rsc, scheduler) == NULL) { return FALSE; } } } pcmk__rsc_trace(rsc, "Added %d children to resource %s...", clone_data->clone_max, rsc->id); return TRUE; } gboolean clone_active(pcmk_resource_t * rsc, gboolean all) { for (GList *gIter = rsc->priv->children; gIter != NULL; gIter = gIter->next) { pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data; gboolean child_active = child_rsc->priv->fns->active(child_rsc, all); if (all == FALSE && child_active) { return TRUE; } else if (all && child_active == FALSE) { return FALSE; } } if (all) { return TRUE; } else { return FALSE; } } static const char * configured_role_str(pcmk_resource_t * rsc) { const char *target_role = g_hash_table_lookup(rsc->priv->meta, PCMK_META_TARGET_ROLE); if ((target_role == NULL) && (rsc->priv->children != NULL)) { // Any instance will do pcmk_resource_t *instance = rsc->priv->children->data; target_role = g_hash_table_lookup(instance->priv->meta, PCMK_META_TARGET_ROLE); } return target_role; } static enum rsc_role_e configured_role(pcmk_resource_t *rsc) { enum rsc_role_e role = pcmk_role_unknown; const char *target_role = configured_role_str(rsc); if (target_role != NULL) { role = pcmk_parse_role(target_role); if (role == pcmk_role_unknown) { pcmk__config_err("Invalid " PCMK_META_TARGET_ROLE " for resource %s", rsc->id); } } return role; } bool is_set_recursive(const pcmk_resource_t *rsc, long long flag, bool any) { bool all = !any; if (pcmk_is_set(rsc->flags, flag)) { if(any) { return TRUE; } } else if(all) { return FALSE; } for (GList *gIter = rsc->priv->children; gIter != NULL; gIter = gIter->next) { if(is_set_recursive(gIter->data, flag, any)) { if(any) { return TRUE; } } else if(all) { return FALSE; } } if(all) { return TRUE; } return FALSE; } PCMK__OUTPUT_ARGS("clone", "uint32_t", "pcmk_resource_t *", "GList *", "GList *") int pe__clone_xml(pcmk__output_t *out, va_list args) { uint32_t show_opts = va_arg(args, uint32_t); pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); GList *only_node = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); GList *all = NULL; int rc = pcmk_rc_no_output; gboolean printed_header = FALSE; gboolean print_everything = TRUE; if (rsc->priv->fns->is_filtered(rsc, only_rsc, TRUE)) { return rc; } print_everything = pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches) || (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches)); all = g_list_prepend(all, (gpointer) "*"); for (GList *gIter = rsc->priv->children; gIter != NULL; gIter = gIter->next) { pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data; if (pcmk__rsc_filtered_by_node(child_rsc, only_node)) { continue; } if (child_rsc->priv->fns->is_filtered(child_rsc, only_rsc, print_everything)) { continue; } if (!printed_header) { const char *multi_state = pcmk__flag_text(rsc->flags, pcmk__rsc_promotable); const char *unique = pcmk__flag_text(rsc->flags, pcmk__rsc_unique); const char *maintenance = pcmk__flag_text(rsc->flags, pcmk__rsc_maintenance); const char *managed = pcmk__flag_text(rsc->flags, pcmk__rsc_managed); const char *disabled = pcmk__btoa(pe__resource_is_disabled(rsc)); const char *failed = pcmk__flag_text(rsc->flags, pcmk__rsc_failed); const char *ignored = pcmk__flag_text(rsc->flags, pcmk__rsc_ignore_failure); const char *target_role = configured_role_str(rsc); const char *desc = pe__resource_description(rsc, show_opts); printed_header = TRUE; rc = pe__name_and_nvpairs_xml(out, true, PCMK_XE_CLONE, PCMK_XA_ID, rsc->id, PCMK_XA_MULTI_STATE, multi_state, PCMK_XA_UNIQUE, unique, PCMK_XA_MAINTENANCE, maintenance, PCMK_XA_MANAGED, managed, PCMK_XA_DISABLED, disabled, PCMK_XA_FAILED, failed, PCMK_XA_FAILURE_IGNORED, ignored, PCMK_XA_TARGET_ROLE, target_role, PCMK_XA_DESCRIPTION, desc, NULL); pcmk__assert(rc == pcmk_rc_ok); } out->message(out, (const char *) child_rsc->priv->xml->name, show_opts, child_rsc, only_node, all); } if (printed_header) { pcmk__output_xml_pop_parent(out); } g_list_free(all); return rc; } PCMK__OUTPUT_ARGS("clone", "uint32_t", "pcmk_resource_t *", "GList *", "GList *") int pe__clone_default(pcmk__output_t *out, va_list args) { uint32_t show_opts = va_arg(args, uint32_t); pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); GList *only_node = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); GHashTable *stopped = NULL; GString *list_text = NULL; GList *promoted_list = NULL; GList *started_list = NULL; GList *gIter = NULL; const char *desc = NULL; clone_variant_data_t *clone_data = NULL; int active_instances = 0; int rc = pcmk_rc_no_output; gboolean print_everything = TRUE; desc = pe__resource_description(rsc, show_opts); get_clone_variant_data(clone_data, rsc); if (rsc->priv->fns->is_filtered(rsc, only_rsc, TRUE)) { return rc; } print_everything = pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches) || (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches)); for (gIter = rsc->priv->children; gIter != NULL; gIter = gIter->next) { gboolean print_full = FALSE; pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data; gboolean partially_active = child_rsc->priv->fns->active(child_rsc, FALSE); if (pcmk__rsc_filtered_by_node(child_rsc, only_node)) { continue; } if (child_rsc->priv->fns->is_filtered(child_rsc, only_rsc, print_everything)) { continue; } if (pcmk_is_set(show_opts, pcmk_show_clone_detail)) { print_full = TRUE; } if (pcmk_is_set(rsc->flags, pcmk__rsc_unique)) { // Print individual instance when unique (except stopped orphans) if (partially_active || !pcmk_is_set(rsc->flags, pcmk__rsc_removed)) { print_full = TRUE; } // Everything else in this block is for anonymous clones } else if (pcmk_is_set(show_opts, pcmk_show_pending) && (child_rsc->priv->pending_action != NULL) && (strcmp(child_rsc->priv->pending_action, "probe") != 0)) { // Print individual instance when non-probe action is pending print_full = TRUE; } else if (partially_active == FALSE) { // List stopped instances when requested (except orphans) if (!pcmk_is_set(child_rsc->flags, pcmk__rsc_removed) && !pcmk_is_set(show_opts, pcmk_show_clone_detail) && pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) { if (stopped == NULL) { stopped = pcmk__strkey_table(free, free); } pcmk__insert_dup(stopped, child_rsc->id, "Stopped"); } } else if (is_set_recursive(child_rsc, pcmk__rsc_removed, TRUE) || !is_set_recursive(child_rsc, pcmk__rsc_managed, FALSE) || is_set_recursive(child_rsc, pcmk__rsc_failed, TRUE)) { // Print individual instance when active orphaned/unmanaged/failed print_full = TRUE; } else if (child_rsc->priv->fns->active(child_rsc, TRUE)) { // Instance of fully active anonymous clone pcmk_node_t *location = NULL; location = child_rsc->priv->fns->location(child_rsc, NULL, pcmk__rsc_node_current); if (location) { // Instance is active on a single node enum rsc_role_e a_role; a_role = child_rsc->priv->fns->state(child_rsc, TRUE); if (location->details->online == FALSE && location->details->unclean) { print_full = TRUE; } else if (a_role > pcmk_role_unpromoted) { promoted_list = g_list_append(promoted_list, location); } else { started_list = g_list_append(started_list, location); } } else { /* uncolocated group - bleh */ print_full = TRUE; } } else { // Instance of partially active anonymous clone print_full = TRUE; } if (print_full) { GList *all = NULL; clone_header(out, &rc, rsc, clone_data, desc); /* Print every resource that's a child of this clone. */ all = g_list_prepend(all, (gpointer) "*"); out->message(out, (const char *) child_rsc->priv->xml->name, show_opts, child_rsc, only_node, all); g_list_free(all); } } if (pcmk_is_set(show_opts, pcmk_show_clone_detail)) { PCMK__OUTPUT_LIST_FOOTER(out, rc); return pcmk_rc_ok; } /* Promoted */ promoted_list = g_list_sort(promoted_list, pe__cmp_node_name); for (gIter = promoted_list; gIter; gIter = gIter->next) { pcmk_node_t *host = gIter->data; if (!pcmk__str_in_list(host->priv->name, only_node, pcmk__str_star_matches|pcmk__str_casei)) { continue; } pcmk__add_word(&list_text, 1024, host->priv->name); active_instances++; } g_list_free(promoted_list); if ((list_text != NULL) && (list_text->len > 0)) { clone_header(out, &rc, rsc, clone_data, desc); out->list_item(out, NULL, PCMK_ROLE_PROMOTED ": [ %s ]", (const char *) list_text->str); g_string_truncate(list_text, 0); } /* Started/Unpromoted */ started_list = g_list_sort(started_list, pe__cmp_node_name); for (gIter = started_list; gIter; gIter = gIter->next) { pcmk_node_t *host = gIter->data; if (!pcmk__str_in_list(host->priv->name, only_node, pcmk__str_star_matches|pcmk__str_casei)) { continue; } pcmk__add_word(&list_text, 1024, host->priv->name); active_instances++; } g_list_free(started_list); if ((list_text != NULL) && (list_text->len > 0)) { clone_header(out, &rc, rsc, clone_data, desc); if (pcmk_is_set(rsc->flags, pcmk__rsc_promotable)) { enum rsc_role_e role = configured_role(rsc); if (role == pcmk_role_unpromoted) { out->list_item(out, NULL, PCMK_ROLE_UNPROMOTED " (" PCMK_META_TARGET_ROLE "): [ %s ]", (const char *) list_text->str); } else { out->list_item(out, NULL, PCMK_ROLE_UNPROMOTED ": [ %s ]", (const char *) list_text->str); } } else { out->list_item(out, NULL, "Started: [ %s ]", (const char *) list_text->str); } } if (list_text != NULL) { g_string_free(list_text, TRUE); } if (pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) { if (!pcmk_is_set(rsc->flags, pcmk__rsc_unique) && (clone_data->clone_max > active_instances)) { GList *nIter; GList *list = g_hash_table_get_values(rsc->priv->allowed_nodes); /* Custom stopped table for non-unique clones */ if (stopped != NULL) { g_hash_table_destroy(stopped); stopped = NULL; } if (list == NULL) { /* Clusters with PCMK_OPT_SYMMETRIC_CLUSTER=false haven't * calculated allowed nodes yet. If we've not probed for them * yet, the Stopped list will be empty. */ list = g_hash_table_get_values(rsc->priv->probed_nodes); } list = g_list_sort(list, pe__cmp_node_name); for (nIter = list; nIter != NULL; nIter = nIter->next) { pcmk_node_t *node = (pcmk_node_t *) nIter->data; if ((pcmk__find_node_in_list(rsc->priv->active_nodes, node->priv->name) == NULL) && pcmk__str_in_list(node->priv->name, only_node, pcmk__str_star_matches|pcmk__str_casei)) { xmlNode *probe_op = NULL; const char *state = "Stopped"; if (configured_role(rsc) == pcmk_role_stopped) { state = "Stopped (disabled)"; } if (stopped == NULL) { stopped = pcmk__strkey_table(free, free); } probe_op = pe__failed_probe_for_rsc(rsc, node->priv->name); if (probe_op != NULL) { int rc; pcmk__scan_min_int(crm_element_value(probe_op, PCMK__XA_RC_CODE), &rc, 0); g_hash_table_insert(stopped, strdup(node->priv->name), crm_strdup_printf("Stopped (%s)", crm_exit_str(rc))); } else { pcmk__insert_dup(stopped, node->priv->name, state); } } } g_list_free(list); } if (stopped != NULL) { GList *list = sorted_hash_table_values(stopped); clone_header(out, &rc, rsc, clone_data, desc); for (GList *status_iter = list; status_iter != NULL; status_iter = status_iter->next) { const char *status = status_iter->data; GList *nodes = nodes_with_status(stopped, status); GString *nodes_str = node_list_to_str(nodes); if (nodes_str != NULL) { if (nodes_str->len > 0) { out->list_item(out, NULL, "%s: [ %s ]", status, (const char *) nodes_str->str); } g_string_free(nodes_str, TRUE); } g_list_free(nodes); } g_list_free(list); g_hash_table_destroy(stopped); /* If there are no instances of this clone (perhaps because there are no * nodes configured), simply output the clone header by itself. This can * come up in PCS testing. */ } else if (active_instances == 0) { clone_header(out, &rc, rsc, clone_data, desc); PCMK__OUTPUT_LIST_FOOTER(out, rc); return rc; } } PCMK__OUTPUT_LIST_FOOTER(out, rc); return rc; } void clone_free(pcmk_resource_t * rsc) { clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); pcmk__rsc_trace(rsc, "Freeing %s", rsc->id); for (GList *gIter = rsc->priv->children; gIter != NULL; gIter = gIter->next) { pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data; pcmk__assert(child_rsc != NULL); pcmk__rsc_trace(child_rsc, "Freeing child %s", child_rsc->id); pcmk__xml_free(child_rsc->priv->xml); child_rsc->priv->xml = NULL; /* There could be a saved unexpanded xml */ pcmk__xml_free(child_rsc->priv->orig_xml); child_rsc->priv->orig_xml = NULL; child_rsc->priv->fns->free(child_rsc); } g_list_free(rsc->priv->children); if (clone_data) { pcmk__assert((clone_data->demote_notify == NULL) && (clone_data->stop_notify == NULL) && (clone_data->start_notify == NULL) && (clone_data->promote_notify == NULL)); } common_free(rsc); } enum rsc_role_e clone_resource_state(const pcmk_resource_t * rsc, gboolean current) { enum rsc_role_e clone_role = pcmk_role_unknown; for (GList *gIter = rsc->priv->children; gIter != NULL; gIter = gIter->next) { pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data; enum rsc_role_e a_role = child_rsc->priv->fns->state(child_rsc, current); if (a_role > clone_role) { clone_role = a_role; } } pcmk__rsc_trace(rsc, "%s role: %s", rsc->id, pcmk_role_text(clone_role)); return clone_role; } /*! * \internal * \brief Check whether a clone has an instance for every node * * \param[in] rsc Clone to check * \param[in] scheduler Scheduler data */ bool pe__is_universal_clone(const pcmk_resource_t *rsc, const pcmk_scheduler_t *scheduler) { if (pcmk__is_clone(rsc)) { clone_variant_data_t *clone_data = rsc->priv->variant_opaque; if (clone_data->clone_max == g_list_length(scheduler->nodes)) { return TRUE; } } return FALSE; } gboolean pe__clone_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc, gboolean check_parent) { gboolean passes = FALSE; clone_variant_data_t *clone_data = NULL; if (pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches)) { passes = TRUE; } else { get_clone_variant_data(clone_data, rsc); passes = pcmk__str_in_list(pcmk__xe_id(clone_data->xml_obj_child), only_rsc, pcmk__str_star_matches); if (!passes) { for (const GList *iter = rsc->priv->children; iter != NULL; iter = iter->next) { const pcmk_resource_t *child_rsc = NULL; child_rsc = (const pcmk_resource_t *) iter->data; if (!child_rsc->priv->fns->is_filtered(child_rsc, only_rsc, FALSE)) { passes = TRUE; break; } } } } return !passes; } const char * pe__clone_child_id(const pcmk_resource_t *rsc) { clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); return pcmk__xe_id(clone_data->xml_obj_child); } /*! * \internal * \brief Check whether a clone is ordered * * \param[in] clone Clone resource to check * * \return true if clone is ordered, otherwise false */ bool pe__clone_is_ordered(const pcmk_resource_t *clone) { clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, clone); return pcmk_is_set(clone_data->flags, pcmk__clone_ordered); } /*! * \internal * \brief Set a clone flag * * \param[in,out] clone Clone resource to set flag for * \param[in] flag Clone flag to set * * \return Standard Pacemaker return code (either pcmk_rc_ok if flag was not * already set or pcmk_rc_already if it was) */ int pe__set_clone_flag(pcmk_resource_t *clone, enum pcmk__clone_flags flag) { clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, clone); if (pcmk_is_set(clone_data->flags, flag)) { return pcmk_rc_already; } clone_data->flags = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, "Clone", clone->id, clone_data->flags, flag, "flag"); return pcmk_rc_ok; } /*! * \internal * \brief Check whether a clone flag is set * * \param[in] group Clone resource to check * \param[in] flags Flag or flags to check * * \return \c true if all \p flags are set for \p clone, otherwise \c false */ bool pe__clone_flag_is_set(const pcmk_resource_t *clone, uint32_t flags) { clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, clone); pcmk__assert(clone_data != NULL); return pcmk_all_flags_set(clone_data->flags, flags); } /*! * \internal * \brief Create pseudo-actions needed for promotable clones * * \param[in,out] clone Promotable clone to create actions for * \param[in] any_promoting Whether any instances will be promoted * \param[in] any_demoting Whether any instance will be demoted */ void pe__create_promotable_pseudo_ops(pcmk_resource_t *clone, bool any_promoting, bool any_demoting) { pcmk_action_t *action = NULL; pcmk_action_t *action_complete = NULL; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, clone); // Create a "promote" action for the clone itself action = pe__new_rsc_pseudo_action(clone, PCMK_ACTION_PROMOTE, !any_promoting, true); // Create a "promoted" action for when all promotions are done action_complete = pe__new_rsc_pseudo_action(clone, PCMK_ACTION_PROMOTED, !any_promoting, true); action_complete->priority = PCMK_SCORE_INFINITY; // Create notification pseudo-actions for promotion if (clone_data->promote_notify == NULL) { clone_data->promote_notify = pe__action_notif_pseudo_ops(clone, PCMK_ACTION_PROMOTE, action, action_complete); } // Create a "demote" action for the clone itself action = pe__new_rsc_pseudo_action(clone, PCMK_ACTION_DEMOTE, !any_demoting, true); // Create a "demoted" action for when all demotions are done action_complete = pe__new_rsc_pseudo_action(clone, PCMK_ACTION_DEMOTED, !any_demoting, true); action_complete->priority = PCMK_SCORE_INFINITY; // Create notification pseudo-actions for demotion if (clone_data->demote_notify == NULL) { clone_data->demote_notify = pe__action_notif_pseudo_ops(clone, PCMK_ACTION_DEMOTE, action, action_complete); if (clone_data->promote_notify != NULL) { order_actions(clone_data->stop_notify->post_done, clone_data->promote_notify->pre, pcmk__ar_ordered); order_actions(clone_data->start_notify->post_done, clone_data->promote_notify->pre, pcmk__ar_ordered); order_actions(clone_data->demote_notify->post_done, clone_data->promote_notify->pre, pcmk__ar_ordered); order_actions(clone_data->demote_notify->post_done, clone_data->start_notify->pre, pcmk__ar_ordered); order_actions(clone_data->demote_notify->post_done, clone_data->stop_notify->pre, pcmk__ar_ordered); } } } /*! * \internal * \brief Create all notification data and actions for a clone * * \param[in,out] clone Clone to create notifications for */ void pe__create_clone_notifications(pcmk_resource_t *clone) { clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, clone); pe__create_action_notifications(clone, clone_data->start_notify); pe__create_action_notifications(clone, clone_data->stop_notify); pe__create_action_notifications(clone, clone_data->promote_notify); pe__create_action_notifications(clone, clone_data->demote_notify); } /*! * \internal * \brief Free all notification data for a clone * * \param[in,out] clone Clone to free notification data for */ void pe__free_clone_notification_data(pcmk_resource_t *clone) { clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, clone); pe__free_action_notification_data(clone_data->demote_notify); clone_data->demote_notify = NULL; pe__free_action_notification_data(clone_data->stop_notify); clone_data->stop_notify = NULL; pe__free_action_notification_data(clone_data->start_notify); clone_data->start_notify = NULL; pe__free_action_notification_data(clone_data->promote_notify); clone_data->promote_notify = NULL; } /*! * \internal * \brief Create pseudo-actions for clone start/stop notifications * * \param[in,out] clone Clone to create pseudo-actions for * \param[in,out] start Start action for \p clone * \param[in,out] stop Stop action for \p clone * \param[in,out] started Started action for \p clone * \param[in,out] stopped Stopped action for \p clone */ void pe__create_clone_notif_pseudo_ops(pcmk_resource_t *clone, pcmk_action_t *start, pcmk_action_t *started, pcmk_action_t *stop, pcmk_action_t *stopped) { clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, clone); if (clone_data->start_notify == NULL) { clone_data->start_notify = pe__action_notif_pseudo_ops(clone, PCMK_ACTION_START, start, started); } if (clone_data->stop_notify == NULL) { clone_data->stop_notify = pe__action_notif_pseudo_ops(clone, PCMK_ACTION_STOP, stop, stopped); if ((clone_data->start_notify != NULL) && (clone_data->stop_notify != NULL)) { order_actions(clone_data->stop_notify->post_done, clone_data->start_notify->pre, pcmk__ar_ordered); } } } /*! * \internal * \brief Get maximum clone resource instances per node * * \param[in] rsc Clone resource to check * * \return Maximum number of \p rsc instances that can be active on one node */ unsigned int pe__clone_max_per_node(const pcmk_resource_t *rsc) { const clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); return clone_data->clone_node_max; } diff --git a/lib/pengine/complex.c b/lib/pengine/complex.c index cbc76e30eb..b0c044c032 100644 --- a/lib/pengine/complex.c +++ b/lib/pengine/complex.c @@ -1,1259 +1,1258 @@ /* * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include -#include #include #include #include #include #include "pe_status_private.h" void populate_hash(xmlNode * nvpair_list, GHashTable * hash, const char **attrs, int attrs_length); static pcmk_node_t *active_node(const pcmk_resource_t *rsc, unsigned int *count_all, unsigned int *count_clean); static pcmk__rsc_methods_t resource_class_functions[] = { { native_unpack, native_find_rsc, native_parameter, native_active, native_resource_state, native_location, native_free, pe__count_common, pe__native_is_filtered, active_node, pe__primitive_max_per_node, }, { group_unpack, native_find_rsc, native_parameter, group_active, group_resource_state, native_location, group_free, pe__count_common, pe__group_is_filtered, active_node, pe__group_max_per_node, }, { clone_unpack, native_find_rsc, native_parameter, clone_active, clone_resource_state, native_location, clone_free, pe__count_common, pe__clone_is_filtered, active_node, pe__clone_max_per_node, }, { pe__unpack_bundle, native_find_rsc, native_parameter, pe__bundle_active, pe__bundle_resource_state, native_location, pe__free_bundle, pe__count_bundle, pe__bundle_is_filtered, pe__bundle_active_node, pe__bundle_max_per_node, } }; static enum pcmk__rsc_variant get_resource_type(const char *name) { if (pcmk__str_eq(name, PCMK_XE_PRIMITIVE, pcmk__str_casei)) { return pcmk__rsc_variant_primitive; } else if (pcmk__str_eq(name, PCMK_XE_GROUP, pcmk__str_casei)) { return pcmk__rsc_variant_group; } else if (pcmk__str_eq(name, PCMK_XE_CLONE, pcmk__str_casei)) { return pcmk__rsc_variant_clone; } else if (pcmk__str_eq(name, PCMK_XE_BUNDLE, pcmk__str_casei)) { return pcmk__rsc_variant_bundle; } return pcmk__rsc_variant_unknown; } /*! * \internal * \brief Insert a meta-attribute if not already present * * \param[in] key Meta-attribute name * \param[in] value Meta-attribute value to add if not already present * \param[in,out] table Meta-attribute hash table to insert into * * \note This is like pcmk__insert_meta() except it won't overwrite existing * values. */ static void dup_attr(gpointer key, gpointer value, gpointer user_data) { GHashTable *table = user_data; CRM_CHECK((key != NULL) && (table != NULL), return); if (pcmk__str_eq((const char *) value, "#default", pcmk__str_casei)) { // @COMPAT Deprecated since 2.1.8 pcmk__config_warn("Support for setting meta-attributes (such as %s) to " "the explicit value '#default' is deprecated and " "will be removed in a future release", (const char *) key); } else if ((value != NULL) && (g_hash_table_lookup(table, key) == NULL)) { pcmk__insert_dup(table, (const char *) key, (const char *) value); } } static void expand_parents_fixed_nvpairs(pcmk_resource_t *rsc, const pcmk_rule_input_t *rule_input, GHashTable *meta_hash, pcmk_scheduler_t *scheduler) { GHashTable *parent_orig_meta = pcmk__strkey_table(free, free); pcmk_resource_t *p = rsc->priv->parent; if (p == NULL) { return ; } /* Search all parent resources, get the fixed value of * PCMK_XE_META_ATTRIBUTES set only in the original xml, and stack it in the * hash table. The fixed value of the lower parent resource takes precedence * and is not overwritten. */ while(p != NULL) { /* A hash table for comparison is generated, including the id-ref. */ pe__unpack_dataset_nvpairs(p->priv->xml, PCMK_XE_META_ATTRIBUTES, rule_input, parent_orig_meta, NULL, scheduler); p = p->priv->parent; } if (parent_orig_meta != NULL) { // This will not overwrite any values already existing for child g_hash_table_foreach(parent_orig_meta, dup_attr, meta_hash); } if (parent_orig_meta != NULL) { g_hash_table_destroy(parent_orig_meta); } return ; } /* * \brief Get fully evaluated resource meta-attributes * * \param[in,out] meta_hash Where to store evaluated meta-attributes * \param[in] rsc Resource to get meta-attributes for * \param[in] node Ignored * \param[in,out] scheduler Scheduler data */ void get_meta_attributes(GHashTable * meta_hash, pcmk_resource_t * rsc, pcmk_node_t *node, pcmk_scheduler_t *scheduler) { const pcmk_rule_input_t rule_input = { .now = scheduler->priv->now, .rsc_standard = crm_element_value(rsc->priv->xml, PCMK_XA_CLASS), .rsc_provider = crm_element_value(rsc->priv->xml, PCMK_XA_PROVIDER), .rsc_agent = crm_element_value(rsc->priv->xml, PCMK_XA_TYPE) }; for (xmlAttrPtr a = pcmk__xe_first_attr(rsc->priv->xml); a != NULL; a = a->next) { if (a->children != NULL) { dup_attr((gpointer) a->name, (gpointer) a->children->content, meta_hash); } } pe__unpack_dataset_nvpairs(rsc->priv->xml, PCMK_XE_META_ATTRIBUTES, &rule_input, meta_hash, NULL, scheduler); /* Set the PCMK_XE_META_ATTRIBUTES explicitly set in the parent resource to * the hash table of the child resource. If it is already explicitly set as * a child, it will not be overwritten. */ if (rsc->priv->parent != NULL) { expand_parents_fixed_nvpairs(rsc, &rule_input, meta_hash, scheduler); } /* check the defaults */ pe__unpack_dataset_nvpairs(scheduler->priv->rsc_defaults, PCMK_XE_META_ATTRIBUTES, &rule_input, meta_hash, NULL, scheduler); /* If there is PCMK_XE_META_ATTRIBUTES that the parent resource has not * explicitly set, set a value that is not set from PCMK_XE_RSC_DEFAULTS * either. The values already set up to this point will not be overwritten. */ if (rsc->priv->parent != NULL) { g_hash_table_foreach(rsc->priv->parent->priv->meta, dup_attr, meta_hash); } } /*! * \brief Get final values of a resource's instance attributes * * \param[in,out] instance_attrs Where to store the instance attributes * \param[in] rsc Resource to get instance attributes for * \param[in] node If not NULL, evaluate rules for this node * \param[in,out] scheduler Scheduler data */ void get_rsc_attributes(GHashTable *instance_attrs, const pcmk_resource_t *rsc, const pcmk_node_t *node, pcmk_scheduler_t *scheduler) { pcmk_rule_input_t rule_input = { .now = NULL, }; CRM_CHECK((instance_attrs != NULL) && (rsc != NULL) && (scheduler != NULL), return); rule_input.now = scheduler->priv->now; if (node != NULL) { rule_input.node_attrs = node->priv->attrs; } // Evaluate resource's own values, then its ancestors' values pe__unpack_dataset_nvpairs(rsc->priv->xml, PCMK_XE_INSTANCE_ATTRIBUTES, &rule_input, instance_attrs, NULL, scheduler); if (rsc->priv->parent != NULL) { get_rsc_attributes(instance_attrs, rsc->priv->parent, node, scheduler); } } static char * template_op_key(xmlNode * op) { const char *name = crm_element_value(op, PCMK_XA_NAME); const char *role = crm_element_value(op, PCMK_XA_ROLE); char *key = NULL; if ((role == NULL) || pcmk__strcase_any_of(role, PCMK_ROLE_STARTED, PCMK_ROLE_UNPROMOTED, PCMK__ROLE_UNPROMOTED_LEGACY, NULL)) { role = PCMK__ROLE_UNKNOWN; } key = crm_strdup_printf("%s-%s", name, role); return key; } static gboolean unpack_template(xmlNode *xml_obj, xmlNode **expanded_xml, pcmk_scheduler_t *scheduler) { xmlNode *cib_resources = NULL; xmlNode *template = NULL; xmlNode *new_xml = NULL; xmlNode *child_xml = NULL; xmlNode *rsc_ops = NULL; xmlNode *template_ops = NULL; const char *template_ref = NULL; const char *id = NULL; if (xml_obj == NULL) { pcmk__config_err("No resource object for template unpacking"); return FALSE; } template_ref = crm_element_value(xml_obj, PCMK_XA_TEMPLATE); if (template_ref == NULL) { return TRUE; } id = pcmk__xe_id(xml_obj); if (id == NULL) { pcmk__config_err("'%s' object must have a id", xml_obj->name); return FALSE; } if (pcmk__str_eq(template_ref, id, pcmk__str_none)) { pcmk__config_err("The resource object '%s' should not reference itself", id); return FALSE; } cib_resources = get_xpath_object("//" PCMK_XE_RESOURCES, scheduler->input, LOG_TRACE); if (cib_resources == NULL) { pcmk__config_err("No resources configured"); return FALSE; } template = pcmk__xe_first_child(cib_resources, PCMK_XE_TEMPLATE, PCMK_XA_ID, template_ref); if (template == NULL) { pcmk__config_err("No template named '%s'", template_ref); return FALSE; } new_xml = pcmk__xml_copy(NULL, template); xmlNodeSetName(new_xml, xml_obj->name); crm_xml_add(new_xml, PCMK_XA_ID, id); crm_xml_add(new_xml, PCMK__META_CLONE, crm_element_value(xml_obj, PCMK__META_CLONE)); template_ops = pcmk__xe_first_child(new_xml, PCMK_XE_OPERATIONS, NULL, NULL); for (child_xml = pcmk__xe_first_child(xml_obj, NULL, NULL, NULL); child_xml != NULL; child_xml = pcmk__xe_next(child_xml, NULL)) { xmlNode *new_child = pcmk__xml_copy(new_xml, child_xml); if (pcmk__xe_is(new_child, PCMK_XE_OPERATIONS)) { rsc_ops = new_child; } } if (template_ops && rsc_ops) { xmlNode *op = NULL; GHashTable *rsc_ops_hash = pcmk__strkey_table(free, NULL); for (op = pcmk__xe_first_child(rsc_ops, NULL, NULL, NULL); op != NULL; op = pcmk__xe_next(op, NULL)) { char *key = template_op_key(op); g_hash_table_insert(rsc_ops_hash, key, op); } for (op = pcmk__xe_first_child(template_ops, NULL, NULL, NULL); op != NULL; op = pcmk__xe_next(op, NULL)) { char *key = template_op_key(op); if (g_hash_table_lookup(rsc_ops_hash, key) == NULL) { pcmk__xml_copy(rsc_ops, op); } free(key); } if (rsc_ops_hash) { g_hash_table_destroy(rsc_ops_hash); } pcmk__xml_free(template_ops); } /*pcmk__xml_free(*expanded_xml); */ *expanded_xml = new_xml; #if 0 /* Disable multi-level templates for now */ if (!unpack_template(new_xml, expanded_xml, scheduler)) { pcmk__xml_free(*expanded_xml); *expanded_xml = NULL; return FALSE; } #endif return TRUE; } static gboolean add_template_rsc(xmlNode *xml_obj, pcmk_scheduler_t *scheduler) { const char *template_ref = NULL; const char *id = NULL; if (xml_obj == NULL) { pcmk__config_err("No resource object for processing resource list " "of template"); return FALSE; } template_ref = crm_element_value(xml_obj, PCMK_XA_TEMPLATE); if (template_ref == NULL) { return TRUE; } id = pcmk__xe_id(xml_obj); if (id == NULL) { pcmk__config_err("'%s' object must have a id", xml_obj->name); return FALSE; } if (pcmk__str_eq(template_ref, id, pcmk__str_none)) { pcmk__config_err("The resource object '%s' should not reference itself", id); return FALSE; } pcmk__add_idref(scheduler->priv->templates, template_ref, id); return TRUE; } /*! * \internal * \brief Check whether a clone or instance being unpacked is globally unique * * \param[in] rsc Clone or clone instance to check * * \return \c true if \p rsc is globally unique according to its * meta-attributes, otherwise \c false */ static bool detect_unique(const pcmk_resource_t *rsc) { const char *value = g_hash_table_lookup(rsc->priv->meta, PCMK_META_GLOBALLY_UNIQUE); if (value == NULL) { // Default to true if clone-node-max > 1 value = g_hash_table_lookup(rsc->priv->meta, PCMK_META_CLONE_NODE_MAX); if (value != NULL) { int node_max = 1; if ((pcmk__scan_min_int(value, &node_max, 0) == pcmk_rc_ok) && (node_max > 1)) { return true; } } return false; } return crm_is_true(value); } static void free_params_table(gpointer data) { g_hash_table_destroy((GHashTable *) data); } /*! * \brief Get a table of resource parameters * * \param[in,out] rsc Resource to query * \param[in] node Node for evaluating rules (NULL for defaults) * \param[in,out] scheduler Scheduler data * * \return Hash table containing resource parameter names and values * (or NULL if \p rsc or \p scheduler is NULL) * \note The returned table will be destroyed when the resource is freed, so * callers should not destroy it. */ GHashTable * pe_rsc_params(pcmk_resource_t *rsc, const pcmk_node_t *node, pcmk_scheduler_t *scheduler) { GHashTable *params_on_node = NULL; /* A NULL node is used to request the resource's default parameters * (not evaluated for node), but we always want something non-NULL * as a hash table key. */ const char *node_name = ""; // Sanity check if ((rsc == NULL) || (scheduler == NULL)) { return NULL; } if ((node != NULL) && (node->priv->name != NULL)) { node_name = node->priv->name; } // Find the parameter table for given node if (rsc->priv->parameter_cache == NULL) { rsc->priv->parameter_cache = pcmk__strikey_table(free, free_params_table); } else { params_on_node = g_hash_table_lookup(rsc->priv->parameter_cache, node_name); } // If none exists yet, create one with parameters evaluated for node if (params_on_node == NULL) { params_on_node = pcmk__strkey_table(free, free); get_rsc_attributes(params_on_node, rsc, node, scheduler); g_hash_table_insert(rsc->priv->parameter_cache, strdup(node_name), params_on_node); } return params_on_node; } /*! * \internal * \brief Unpack a resource's \c PCMK_META_REQUIRES meta-attribute * * \param[in,out] rsc Resource being unpacked * \param[in] value Value of \c PCMK_META_REQUIRES meta-attribute * \param[in] is_default Whether \p value was selected by default */ static void unpack_requires(pcmk_resource_t *rsc, const char *value, bool is_default) { const pcmk_scheduler_t *scheduler = rsc->priv->scheduler; if (pcmk__str_eq(value, PCMK_VALUE_NOTHING, pcmk__str_casei)) { } else if (pcmk__str_eq(value, PCMK_VALUE_QUORUM, pcmk__str_casei)) { pcmk__set_rsc_flags(rsc, pcmk__rsc_needs_quorum); } else if (pcmk__str_eq(value, PCMK_VALUE_FENCING, pcmk__str_casei)) { pcmk__set_rsc_flags(rsc, pcmk__rsc_needs_fencing); if (!pcmk_is_set(scheduler->flags, pcmk__sched_fencing_enabled)) { pcmk__config_warn("%s requires fencing but fencing is disabled", rsc->id); } } else if (pcmk__str_eq(value, PCMK_VALUE_UNFENCING, pcmk__str_casei)) { if (pcmk_is_set(rsc->flags, pcmk__rsc_fence_device)) { pcmk__config_warn("Resetting \"" PCMK_META_REQUIRES "\" for %s " "to \"" PCMK_VALUE_QUORUM "\" because fencing " "devices cannot require unfencing", rsc->id); unpack_requires(rsc, PCMK_VALUE_QUORUM, true); return; } else if (!pcmk_is_set(scheduler->flags, pcmk__sched_fencing_enabled)) { pcmk__config_warn("Resetting \"" PCMK_META_REQUIRES "\" for %s " "to \"" PCMK_VALUE_QUORUM "\" because fencing is " "disabled", rsc->id); unpack_requires(rsc, PCMK_VALUE_QUORUM, true); return; } else { pcmk__set_rsc_flags(rsc, pcmk__rsc_needs_fencing |pcmk__rsc_needs_unfencing); } } else { const char *orig_value = value; if (pcmk_is_set(rsc->flags, pcmk__rsc_fence_device)) { value = PCMK_VALUE_QUORUM; } else if (pcmk__is_primitive(rsc) && xml_contains_remote_node(rsc->priv->xml)) { value = PCMK_VALUE_QUORUM; } else if (pcmk_is_set(scheduler->flags, pcmk__sched_enable_unfencing)) { value = PCMK_VALUE_UNFENCING; } else if (pcmk_is_set(scheduler->flags, pcmk__sched_fencing_enabled)) { value = PCMK_VALUE_FENCING; } else if (scheduler->no_quorum_policy == pcmk_no_quorum_ignore) { value = PCMK_VALUE_NOTHING; } else { value = PCMK_VALUE_QUORUM; } if (orig_value != NULL) { pcmk__config_err("Resetting '" PCMK_META_REQUIRES "' for %s " "to '%s' because '%s' is not valid", rsc->id, value, orig_value); } unpack_requires(rsc, value, true); return; } pcmk__rsc_trace(rsc, "\tRequired to start: %s%s", value, (is_default? " (default)" : "")); } /*! * \internal * \brief Parse resource priority from meta-attribute * * \param[in,out] rsc Resource being unpacked */ static void unpack_priority(pcmk_resource_t *rsc) { const char *value = g_hash_table_lookup(rsc->priv->meta, PCMK_META_PRIORITY); int rc = pcmk_parse_score(value, &(rsc->priv->priority), 0); if (rc != pcmk_rc_ok) { pcmk__config_warn("Using default (0) for resource %s " PCMK_META_PRIORITY " because '%s' is not a valid value: %s", rsc->id, value, pcmk_rc_str(rc)); } } /*! * \internal * \brief Parse resource stickiness from meta-attribute * * \param[in,out] rsc Resource being unpacked */ static void unpack_stickiness(pcmk_resource_t *rsc) { const char *value = g_hash_table_lookup(rsc->priv->meta, PCMK_META_RESOURCE_STICKINESS); if (pcmk__str_eq(value, PCMK_VALUE_DEFAULT, pcmk__str_casei)) { // @COMPAT Deprecated since 2.1.8 pcmk__config_warn("Support for setting " PCMK_META_RESOURCE_STICKINESS " to the explicit value '" PCMK_VALUE_DEFAULT "' is deprecated and will be removed in a " "future release (just leave it unset)"); } else { int rc = pcmk_parse_score(value, &(rsc->priv->stickiness), 0); if (rc != pcmk_rc_ok) { pcmk__config_warn("Using default (0) for resource %s " PCMK_META_RESOURCE_STICKINESS " because '%s' is not a valid value: %s", rsc->id, value, pcmk_rc_str(rc)); } } } /*! * \internal * \brief Parse resource migration threshold from meta-attribute * * \param[in,out] rsc Resource being unpacked */ static void unpack_migration_threshold(pcmk_resource_t *rsc) { const char *value = g_hash_table_lookup(rsc->priv->meta, PCMK_META_MIGRATION_THRESHOLD); if (pcmk__str_eq(value, PCMK_VALUE_DEFAULT, pcmk__str_casei)) { // @COMPAT Deprecated since 2.1.8 pcmk__config_warn("Support for setting " PCMK_META_MIGRATION_THRESHOLD " to the explicit value '" PCMK_VALUE_DEFAULT "' is deprecated and will be removed in a " "future release (just leave it unset)"); rsc->priv->ban_after_failures = PCMK_SCORE_INFINITY; } else { int rc = pcmk_parse_score(value, &(rsc->priv->ban_after_failures), PCMK_SCORE_INFINITY); if ((rc != pcmk_rc_ok) || (rsc->priv->ban_after_failures < 0)) { pcmk__config_warn("Using default (" PCMK_VALUE_INFINITY ") for resource %s meta-attribute " PCMK_META_MIGRATION_THRESHOLD " because '%s' is not a valid value: %s", rsc->id, value, pcmk_rc_str(rc)); rsc->priv->ban_after_failures = PCMK_SCORE_INFINITY; } } } /*! * \internal * \brief Unpack configuration XML for a given resource * * Unpack the XML object containing a resource's configuration into a new * \c pcmk_resource_t object. * * \param[in] xml_obj XML node containing the resource's configuration * \param[out] rsc Where to store the unpacked resource information * \param[in] parent Resource's parent, if any * \param[in,out] scheduler Scheduler data * * \return Standard Pacemaker return code * \note If pcmk_rc_ok is returned, \p *rsc is guaranteed to be non-NULL, and * the caller is responsible for freeing it using its variant-specific * free() method. Otherwise, \p *rsc is guaranteed to be NULL. */ int pe__unpack_resource(xmlNode *xml_obj, pcmk_resource_t **rsc, pcmk_resource_t *parent, pcmk_scheduler_t *scheduler) { xmlNode *expanded_xml = NULL; xmlNode *ops = NULL; const char *value = NULL; const char *id = NULL; bool guest_node = false; bool remote_node = false; pcmk__resource_private_t *rsc_private = NULL; pcmk_rule_input_t rule_input = { .now = NULL, }; CRM_CHECK(rsc != NULL, return EINVAL); CRM_CHECK((xml_obj != NULL) && (scheduler != NULL), *rsc = NULL; return EINVAL); rule_input.now = scheduler->priv->now; crm_log_xml_trace(xml_obj, "[raw XML]"); id = crm_element_value(xml_obj, PCMK_XA_ID); if (id == NULL) { pcmk__config_err("Ignoring <%s> configuration without " PCMK_XA_ID, xml_obj->name); return pcmk_rc_unpack_error; } if (unpack_template(xml_obj, &expanded_xml, scheduler) == FALSE) { return pcmk_rc_unpack_error; } *rsc = calloc(1, sizeof(pcmk_resource_t)); if (*rsc == NULL) { pcmk__sched_err(scheduler, "Unable to allocate memory for resource '%s'", id); return ENOMEM; } (*rsc)->priv = calloc(1, sizeof(pcmk__resource_private_t)); if ((*rsc)->priv == NULL) { pcmk__sched_err(scheduler, "Unable to allocate memory for resource '%s'", id); free(*rsc); return ENOMEM; } rsc_private = (*rsc)->priv; rsc_private->scheduler = scheduler; if (expanded_xml) { crm_log_xml_trace(expanded_xml, "[expanded XML]"); rsc_private->xml = expanded_xml; rsc_private->orig_xml = xml_obj; } else { rsc_private->xml = xml_obj; rsc_private->orig_xml = NULL; } /* Do not use xml_obj from here on, use (*rsc)->xml in case templates are involved */ rsc_private->parent = parent; ops = pcmk__xe_first_child(rsc_private->xml, PCMK_XE_OPERATIONS, NULL, NULL); rsc_private->ops_xml = pcmk__xe_resolve_idref(ops, scheduler->input); rsc_private->variant = get_resource_type((const char *) rsc_private->xml->name); if (rsc_private->variant == pcmk__rsc_variant_unknown) { pcmk__config_err("Ignoring resource '%s' of unknown type '%s'", id, rsc_private->xml->name); common_free(*rsc); *rsc = NULL; return pcmk_rc_unpack_error; } rsc_private->meta = pcmk__strkey_table(free, free); rsc_private->utilization = pcmk__strkey_table(free, free); rsc_private->probed_nodes = pcmk__strkey_table(NULL, free); rsc_private->allowed_nodes = pcmk__strkey_table(NULL, free); value = crm_element_value(rsc_private->xml, PCMK__META_CLONE); if (value) { (*rsc)->id = crm_strdup_printf("%s:%s", id, value); pcmk__insert_meta(rsc_private, PCMK__META_CLONE, value); } else { (*rsc)->id = strdup(id); } rsc_private->fns = &resource_class_functions[rsc_private->variant]; get_meta_attributes(rsc_private->meta, *rsc, NULL, scheduler); (*rsc)->flags = 0; pcmk__set_rsc_flags(*rsc, pcmk__rsc_unassigned); if (!pcmk_is_set(scheduler->flags, pcmk__sched_in_maintenance)) { pcmk__set_rsc_flags(*rsc, pcmk__rsc_managed); } rsc_private->orig_role = pcmk_role_stopped; rsc_private->next_role = pcmk_role_unknown; unpack_priority(*rsc); value = g_hash_table_lookup(rsc_private->meta, PCMK_META_CRITICAL); if ((value == NULL) || crm_is_true(value)) { pcmk__set_rsc_flags(*rsc, pcmk__rsc_critical); } value = g_hash_table_lookup(rsc_private->meta, PCMK_META_NOTIFY); if (crm_is_true(value)) { pcmk__set_rsc_flags(*rsc, pcmk__rsc_notify); } if (xml_contains_remote_node(rsc_private->xml)) { pcmk__set_rsc_flags(*rsc, pcmk__rsc_is_remote_connection); if (g_hash_table_lookup(rsc_private->meta, PCMK__META_CONTAINER)) { guest_node = true; } else { remote_node = true; } } value = g_hash_table_lookup(rsc_private->meta, PCMK_META_ALLOW_MIGRATE); if (crm_is_true(value)) { pcmk__set_rsc_flags(*rsc, pcmk__rsc_migratable); } else if ((value == NULL) && remote_node) { /* By default, we want remote nodes to be able * to float around the cluster without having to stop all the * resources within the remote-node before moving. Allowing * migration support enables this feature. If this ever causes * problems, migration support can be explicitly turned off with * PCMK_META_ALLOW_MIGRATE=false. */ pcmk__set_rsc_flags(*rsc, pcmk__rsc_migratable); } value = g_hash_table_lookup(rsc_private->meta, PCMK_META_IS_MANAGED); if (value != NULL) { if (pcmk__str_eq(PCMK_VALUE_DEFAULT, value, pcmk__str_casei)) { // @COMPAT Deprecated since 2.1.8 pcmk__config_warn("Support for setting " PCMK_META_IS_MANAGED " to the explicit value '" PCMK_VALUE_DEFAULT "' is deprecated and will be removed in a " "future release (just leave it unset)"); } else if (crm_is_true(value)) { pcmk__set_rsc_flags(*rsc, pcmk__rsc_managed); } else { pcmk__clear_rsc_flags(*rsc, pcmk__rsc_managed); } } value = g_hash_table_lookup(rsc_private->meta, PCMK_META_MAINTENANCE); if (crm_is_true(value)) { pcmk__clear_rsc_flags(*rsc, pcmk__rsc_managed); pcmk__set_rsc_flags(*rsc, pcmk__rsc_maintenance); } if (pcmk_is_set(scheduler->flags, pcmk__sched_in_maintenance)) { pcmk__clear_rsc_flags(*rsc, pcmk__rsc_managed); pcmk__set_rsc_flags(*rsc, pcmk__rsc_maintenance); } if (pcmk__is_clone(pe__const_top_resource(*rsc, false))) { if (detect_unique(*rsc)) { pcmk__set_rsc_flags(*rsc, pcmk__rsc_unique); } if (crm_is_true(g_hash_table_lookup((*rsc)->priv->meta, PCMK_META_PROMOTABLE))) { pcmk__set_rsc_flags(*rsc, pcmk__rsc_promotable); } } else { pcmk__set_rsc_flags(*rsc, pcmk__rsc_unique); } value = g_hash_table_lookup(rsc_private->meta, PCMK_META_MULTIPLE_ACTIVE); if (pcmk__str_eq(value, PCMK_VALUE_STOP_ONLY, pcmk__str_casei)) { rsc_private->multiply_active_policy = pcmk__multiply_active_stop; pcmk__rsc_trace(*rsc, "%s multiple running resource recovery: stop only", (*rsc)->id); } else if (pcmk__str_eq(value, PCMK_VALUE_BLOCK, pcmk__str_casei)) { rsc_private->multiply_active_policy = pcmk__multiply_active_block; pcmk__rsc_trace(*rsc, "%s multiple running resource recovery: block", (*rsc)->id); } else if (pcmk__str_eq(value, PCMK_VALUE_STOP_UNEXPECTED, pcmk__str_casei)) { rsc_private->multiply_active_policy = pcmk__multiply_active_unexpected; pcmk__rsc_trace(*rsc, "%s multiple running resource recovery: " "stop unexpected instances", (*rsc)->id); } else { // PCMK_VALUE_STOP_START if (!pcmk__str_eq(value, PCMK_VALUE_STOP_START, pcmk__str_casei|pcmk__str_null_matches)) { pcmk__config_warn("%s is not a valid value for " PCMK_META_MULTIPLE_ACTIVE ", using default of " "\"" PCMK_VALUE_STOP_START "\"", value); } rsc_private->multiply_active_policy = pcmk__multiply_active_restart; pcmk__rsc_trace(*rsc, "%s multiple running resource recovery: stop/start", (*rsc)->id); } unpack_stickiness(*rsc); unpack_migration_threshold(*rsc); if (pcmk__str_eq(crm_element_value(rsc_private->xml, PCMK_XA_CLASS), PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) { pcmk__set_scheduler_flags(scheduler, pcmk__sched_have_fencing); pcmk__set_rsc_flags(*rsc, pcmk__rsc_fence_device); } value = g_hash_table_lookup(rsc_private->meta, PCMK_META_REQUIRES); unpack_requires(*rsc, value, false); value = g_hash_table_lookup(rsc_private->meta, PCMK_META_FAILURE_TIMEOUT); if (value != NULL) { pcmk_parse_interval_spec(value, &(rsc_private->failure_expiration_ms)); } if (remote_node) { GHashTable *params = pe_rsc_params(*rsc, NULL, scheduler); /* Grabbing the value now means that any rules based on node attributes * will evaluate to false, so such rules should not be used with * PCMK_REMOTE_RA_RECONNECT_INTERVAL. * * @TODO Evaluate per node before using */ value = g_hash_table_lookup(params, PCMK_REMOTE_RA_RECONNECT_INTERVAL); if (value) { /* reconnect delay works by setting failure_timeout and preventing the * connection from starting until the failure is cleared. */ pcmk_parse_interval_spec(value, &(rsc_private->remote_reconnect_ms)); /* We want to override any default failure_timeout in use when remote * PCMK_REMOTE_RA_RECONNECT_INTERVAL is in use. */ rsc_private->failure_expiration_ms = rsc_private->remote_reconnect_ms; } } get_target_role(*rsc, &(rsc_private->next_role)); pcmk__rsc_trace(*rsc, "%s desired next state: %s", (*rsc)->id, (rsc_private->next_role == pcmk_role_unknown)? "default" : pcmk_role_text(rsc_private->next_role)); if (rsc_private->fns->unpack(*rsc, scheduler) == FALSE) { rsc_private->fns->free(*rsc); *rsc = NULL; return pcmk_rc_unpack_error; } if (pcmk_is_set(scheduler->flags, pcmk__sched_symmetric_cluster)) { // This tag must stay exactly the same because it is tested elsewhere resource_location(*rsc, NULL, 0, "symmetric_default", scheduler); } else if (guest_node) { /* remote resources tied to a container resource must always be allowed * to opt-in to the cluster. Whether the connection resource is actually * allowed to be placed on a node is dependent on the container resource */ resource_location(*rsc, NULL, 0, "remote_connection_default", scheduler); } pcmk__rsc_trace(*rsc, "%s action notification: %s", (*rsc)->id, pcmk_is_set((*rsc)->flags, pcmk__rsc_notify)? "required" : "not required"); pe__unpack_dataset_nvpairs(rsc_private->xml, PCMK_XE_UTILIZATION, &rule_input, rsc_private->utilization, NULL, scheduler); if (expanded_xml) { if (add_template_rsc(xml_obj, scheduler) == FALSE) { rsc_private->fns->free(*rsc); *rsc = NULL; return pcmk_rc_unpack_error; } } return pcmk_rc_ok; } gboolean is_parent(pcmk_resource_t *child, pcmk_resource_t *rsc) { pcmk_resource_t *parent = child; if (parent == NULL || rsc == NULL) { return FALSE; } while (parent->priv->parent != NULL) { if (parent->priv->parent == rsc) { return TRUE; } parent = parent->priv->parent; } return FALSE; } pcmk_resource_t * uber_parent(pcmk_resource_t *rsc) { pcmk_resource_t *parent = rsc; if (parent == NULL) { return NULL; } while ((parent->priv->parent != NULL) && !pcmk__is_bundle(parent->priv->parent)) { parent = parent->priv->parent; } return parent; } /*! * \internal * \brief Get the topmost parent of a resource as a const pointer * * \param[in] rsc Resource to check * \param[in] include_bundle If true, go all the way to bundle * * \return \p NULL if \p rsc is NULL, \p rsc if \p rsc has no parent, * the bundle if \p rsc is bundled and \p include_bundle is true, * otherwise the topmost parent of \p rsc up to a clone */ const pcmk_resource_t * pe__const_top_resource(const pcmk_resource_t *rsc, bool include_bundle) { const pcmk_resource_t *parent = rsc; if (parent == NULL) { return NULL; } while (parent->priv->parent != NULL) { if (!include_bundle && pcmk__is_bundle(parent->priv->parent)) { break; } parent = parent->priv->parent; } return parent; } void common_free(pcmk_resource_t * rsc) { if (rsc == NULL) { return; } pcmk__rsc_trace(rsc, "Freeing %s", rsc->id); if (rsc->priv->parameter_cache != NULL) { g_hash_table_destroy(rsc->priv->parameter_cache); } if ((rsc->priv->parent == NULL) && pcmk_is_set(rsc->flags, pcmk__rsc_removed)) { pcmk__xml_free(rsc->priv->xml); rsc->priv->xml = NULL; pcmk__xml_free(rsc->priv->orig_xml); rsc->priv->orig_xml = NULL; } else if (rsc->priv->orig_xml != NULL) { // rsc->private->xml was expanded from a template pcmk__xml_free(rsc->priv->xml); rsc->priv->xml = NULL; } free(rsc->id); free(rsc->priv->variant_opaque); free(rsc->priv->history_id); free(rsc->priv->pending_action); free(rsc->priv->assigned_node); g_list_free(rsc->priv->actions); g_list_free(rsc->priv->active_nodes); g_list_free(rsc->priv->launched); g_list_free(rsc->priv->dangling_migration_sources); g_list_free(rsc->priv->with_this_colocations); g_list_free(rsc->priv->this_with_colocations); g_list_free(rsc->priv->location_constraints); g_list_free(rsc->priv->ticket_constraints); if (rsc->priv->meta != NULL) { g_hash_table_destroy(rsc->priv->meta); } if (rsc->priv->utilization != NULL) { g_hash_table_destroy(rsc->priv->utilization); } if (rsc->priv->probed_nodes != NULL) { g_hash_table_destroy(rsc->priv->probed_nodes); } if (rsc->priv->allowed_nodes != NULL) { g_hash_table_destroy(rsc->priv->allowed_nodes); } free(rsc->priv); free(rsc); } /*! * \internal * \brief Count a node and update most preferred to it as appropriate * * \param[in] rsc An active resource * \param[in] node A node that \p rsc is active on * \param[in,out] active This will be set to \p node if \p node is more * preferred than the current value * \param[in,out] count_all If not NULL, this will be incremented * \param[in,out] count_clean If not NULL, this will be incremented if \p node * is online and clean * * \return true if the count should continue, or false if sufficiently known */ bool pe__count_active_node(const pcmk_resource_t *rsc, pcmk_node_t *node, pcmk_node_t **active, unsigned int *count_all, unsigned int *count_clean) { bool keep_looking = false; bool is_happy = false; CRM_CHECK((rsc != NULL) && (node != NULL) && (active != NULL), return false); is_happy = node->details->online && !node->details->unclean; if (count_all != NULL) { ++*count_all; } if ((count_clean != NULL) && is_happy) { ++*count_clean; } if ((count_all != NULL) || (count_clean != NULL)) { keep_looking = true; // We're counting, so go through entire list } if (rsc->priv->partial_migration_source != NULL) { if (pcmk__same_node(node, rsc->priv->partial_migration_source)) { *active = node; // This is the migration source } else { keep_looking = true; } } else if (!pcmk_is_set(rsc->flags, pcmk__rsc_needs_fencing)) { if (is_happy && ((*active == NULL) || !(*active)->details->online || (*active)->details->unclean)) { *active = node; // This is the first clean node } else { keep_looking = true; } } if (*active == NULL) { *active = node; // This is the first node checked } return keep_looking; } // Shared implementation of pcmk__rsc_methods_t:active_node() static pcmk_node_t * active_node(const pcmk_resource_t *rsc, unsigned int *count_all, unsigned int *count_clean) { pcmk_node_t *active = NULL; if (count_all != NULL) { *count_all = 0; } if (count_clean != NULL) { *count_clean = 0; } if (rsc == NULL) { return NULL; } for (GList *iter = rsc->priv->active_nodes; iter != NULL; iter = iter->next) { if (!pe__count_active_node(rsc, (pcmk_node_t *) iter->data, &active, count_all, count_clean)) { break; // Don't waste time iterating if we don't have to } } return active; } /*! * \brief * \internal Find and count active nodes according to \c PCMK_META_REQUIRES * * \param[in] rsc Resource to check * \param[out] count If not NULL, will be set to count of active nodes * * \return An active node (or NULL if resource is not active anywhere) * * \note This is a convenience wrapper for active_node() where the count of all * active nodes or only clean active nodes is desired according to the * \c PCMK_META_REQUIRES meta-attribute. */ pcmk_node_t * pe__find_active_requires(const pcmk_resource_t *rsc, unsigned int *count) { if (rsc == NULL) { if (count != NULL) { *count = 0; } return NULL; } if (pcmk_is_set(rsc->flags, pcmk__rsc_needs_fencing)) { return rsc->priv->fns->active_node(rsc, count, NULL); } else { return rsc->priv->fns->active_node(rsc, NULL, count); } } void pe__count_common(pcmk_resource_t *rsc) { if (rsc->priv->children != NULL) { for (GList *item = rsc->priv->children; item != NULL; item = item->next) { pcmk_resource_t *child = item->data; child->priv->fns->count(item->data); } } else if (!pcmk_is_set(rsc->flags, pcmk__rsc_removed) || (rsc->priv->orig_role > pcmk_role_stopped)) { rsc->priv->scheduler->priv->ninstances++; if (pe__resource_is_disabled(rsc)) { rsc->priv->scheduler->priv->disabled_resources++; } if (pcmk_is_set(rsc->flags, pcmk__rsc_blocked)) { rsc->priv->scheduler->priv->blocked_resources++; } } } /*! * \internal * \brief Update a resource's next role * * \param[in,out] rsc Resource to be updated * \param[in] role Resource's new next role * \param[in] why Human-friendly reason why role is changing (for logs) */ void pe__set_next_role(pcmk_resource_t *rsc, enum rsc_role_e role, const char *why) { pcmk__assert((rsc != NULL) && (why != NULL)); if (rsc->priv->next_role != role) { pcmk__rsc_trace(rsc, "Resetting next role for %s from %s to %s (%s)", rsc->id, pcmk_role_text(rsc->priv->next_role), pcmk_role_text(role), why); rsc->priv->next_role = role; } } diff --git a/lib/pengine/group.c b/lib/pengine/group.c index 0bd9cb1b30..2f3793da6c 100644 --- a/lib/pengine/group.c +++ b/lib/pengine/group.c @@ -1,470 +1,469 @@ /* * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include -#include #include #include #include #include #include #include #include typedef struct group_variant_data_s { pcmk_resource_t *last_child; // Last group member uint32_t flags; // Group of enum pcmk__group_flags } group_variant_data_t; /*! * \internal * \brief Get a group's last member * * \param[in] group Group resource to check * * \return Last member of \p group if any, otherwise NULL */ pcmk_resource_t * pe__last_group_member(const pcmk_resource_t *group) { if (group != NULL) { const group_variant_data_t *group_data = NULL; CRM_CHECK(pcmk__is_group(group), return NULL); group_data = group->priv->variant_opaque; return group_data->last_child; } return NULL; } /*! * \internal * \brief Check whether a group flag is set * * \param[in] group Group resource to check * \param[in] flags Flag or flags to check * * \return true if all \p flags are set for \p group, otherwise false */ bool pe__group_flag_is_set(const pcmk_resource_t *group, uint32_t flags) { const group_variant_data_t *group_data = NULL; CRM_CHECK(pcmk__is_group(group), return false); group_data = group->priv->variant_opaque; return pcmk_all_flags_set(group_data->flags, flags); } /*! * \internal * \brief Set a (deprecated) group flag * * \param[in,out] group Group resource to check * \param[in] option Name of boolean configuration option * \param[in] flag Flag to set if \p option is true (which is default) * \param[in] wo_bit "Warn once" flag to use for deprecation warning */ static void set_group_flag(pcmk_resource_t *group, const char *option, uint32_t flag, uint32_t wo_bit) { const char *value_s = NULL; int value = 0; value_s = g_hash_table_lookup(group->priv->meta, option); // We don't actually need the null check but it speeds up the common case if ((value_s == NULL) || (crm_str_to_boolean(value_s, &value) < 0) || (value != 0)) { group_variant_data_t *group_data = group->priv->variant_opaque; group_data->flags |= flag; } else { pcmk__warn_once(wo_bit, "Support for the '%s' group meta-attribute is " "deprecated and will be removed in a future release " "(use a resource set instead)", option); } } static int inactive_resources(pcmk_resource_t *rsc) { int retval = 0; for (GList *gIter = rsc->priv->children; gIter != NULL; gIter = gIter->next) { pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data; if (!child_rsc->priv->fns->active(child_rsc, TRUE)) { retval++; } } return retval; } static void group_header(pcmk__output_t *out, int *rc, const pcmk_resource_t *rsc, int n_inactive, bool show_inactive, const char *desc) { GString *attrs = NULL; if (n_inactive > 0 && !show_inactive) { attrs = g_string_sized_new(64); g_string_append_printf(attrs, "%d member%s inactive", n_inactive, pcmk__plural_s(n_inactive)); } if (pe__resource_is_disabled(rsc)) { pcmk__add_separated_word(&attrs, 64, "disabled", ", "); } if (pcmk_is_set(rsc->flags, pcmk__rsc_maintenance)) { pcmk__add_separated_word(&attrs, 64, "maintenance", ", "); } else if (!pcmk_is_set(rsc->flags, pcmk__rsc_managed)) { pcmk__add_separated_word(&attrs, 64, "unmanaged", ", "); } if (attrs != NULL) { PCMK__OUTPUT_LIST_HEADER(out, FALSE, *rc, "Resource Group: %s (%s)%s%s%s", rsc->id, (const char *) attrs->str, desc ? " (" : "", desc ? desc : "", desc ? ")" : ""); g_string_free(attrs, TRUE); } else { PCMK__OUTPUT_LIST_HEADER(out, FALSE, *rc, "Resource Group: %s%s%s%s", rsc->id, desc ? " (" : "", desc ? desc : "", desc ? ")" : ""); } } static bool skip_child_rsc(pcmk_resource_t *rsc, pcmk_resource_t *child, gboolean parent_passes, GList *only_rsc, uint32_t show_opts) { bool star_list = pcmk__list_of_1(only_rsc) && pcmk__str_eq("*", g_list_first(only_rsc)->data, pcmk__str_none); bool child_filtered = child->priv->fns->is_filtered(child, only_rsc, FALSE); bool child_active = child->priv->fns->active(child, FALSE); bool show_inactive = pcmk_is_set(show_opts, pcmk_show_inactive_rscs); /* If the resource is in only_rsc by name (so, ignoring "*") then allow * it regardless of if it's active or not. */ if (!star_list && !child_filtered) { return false; } else if (!child_filtered && (child_active || show_inactive)) { return false; } else if (parent_passes && (child_active || show_inactive)) { return false; } return true; } gboolean group_unpack(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler) { xmlNode *xml_obj = rsc->priv->xml; xmlNode *xml_native_rsc = NULL; group_variant_data_t *group_data = NULL; const char *clone_id = NULL; pcmk__rsc_trace(rsc, "Processing resource %s...", rsc->id); group_data = pcmk__assert_alloc(1, sizeof(group_variant_data_t)); group_data->last_child = NULL; rsc->priv->variant_opaque = group_data; // @COMPAT These are deprecated since 2.1.5 set_group_flag(rsc, PCMK_META_ORDERED, pcmk__group_ordered, pcmk__wo_group_order); set_group_flag(rsc, "collocated", pcmk__group_colocated, pcmk__wo_group_coloc); clone_id = crm_element_value(rsc->priv->xml, PCMK__META_CLONE); for (xml_native_rsc = pcmk__xe_first_child(xml_obj, PCMK_XE_PRIMITIVE, NULL, NULL); xml_native_rsc != NULL; xml_native_rsc = pcmk__xe_next(xml_native_rsc, PCMK_XE_PRIMITIVE)) { pcmk_resource_t *new_rsc = NULL; crm_xml_add(xml_native_rsc, PCMK__META_CLONE, clone_id); if (pe__unpack_resource(xml_native_rsc, &new_rsc, rsc, scheduler) != pcmk_rc_ok) { continue; } rsc->priv->children = g_list_append(rsc->priv->children, new_rsc); group_data->last_child = new_rsc; pcmk__rsc_trace(rsc, "Added %s member %s", rsc->id, new_rsc->id); } if (rsc->priv->children == NULL) { // Not possible with schema validation enabled free(group_data); rsc->priv->variant_opaque = NULL; pcmk__config_err("Group %s has no members", rsc->id); return FALSE; } return TRUE; } gboolean group_active(pcmk_resource_t *rsc, gboolean all) { gboolean c_all = TRUE; gboolean c_any = FALSE; for (GList *gIter = rsc->priv->children; gIter != NULL; gIter = gIter->next) { pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data; if (child_rsc->priv->fns->active(child_rsc, all)) { c_any = TRUE; } else { c_all = FALSE; } } if (c_any == FALSE) { return FALSE; } else if (all && c_all == FALSE) { return FALSE; } return TRUE; } PCMK__OUTPUT_ARGS("group", "uint32_t", "pcmk_resource_t *", "GList *", "GList *") int pe__group_xml(pcmk__output_t *out, va_list args) { uint32_t show_opts = va_arg(args, uint32_t); pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); GList *only_node = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); const char *desc = NULL; int rc = pcmk_rc_no_output; gboolean parent_passes = pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches) || (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches)); desc = pe__resource_description(rsc, show_opts); if (rsc->priv->fns->is_filtered(rsc, only_rsc, TRUE)) { return rc; } for (GList *gIter = rsc->priv->children; gIter != NULL; gIter = gIter->next) { pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data; if (skip_child_rsc(rsc, child_rsc, parent_passes, only_rsc, show_opts)) { continue; } if (rc == pcmk_rc_no_output) { char *count = pcmk__itoa(g_list_length(gIter)); const char *maintenance = pcmk__flag_text(rsc->flags, pcmk__rsc_maintenance); const char *managed = pcmk__flag_text(rsc->flags, pcmk__rsc_managed); const char *disabled = pcmk__btoa(pe__resource_is_disabled(rsc)); rc = pe__name_and_nvpairs_xml(out, true, PCMK_XE_GROUP, PCMK_XA_ID, rsc->id, PCMK_XA_NUMBER_RESOURCES, count, PCMK_XA_MAINTENANCE, maintenance, PCMK_XA_MANAGED, managed, PCMK_XA_DISABLED, disabled, PCMK_XA_DESCRIPTION, desc, NULL); free(count); pcmk__assert(rc == pcmk_rc_ok); } out->message(out, (const char *) child_rsc->priv->xml->name, show_opts, child_rsc, only_node, only_rsc); } if (rc == pcmk_rc_ok) { pcmk__output_xml_pop_parent(out); } return rc; } PCMK__OUTPUT_ARGS("group", "uint32_t", "pcmk_resource_t *", "GList *", "GList *") int pe__group_default(pcmk__output_t *out, va_list args) { uint32_t show_opts = va_arg(args, uint32_t); pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); GList *only_node = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); const char *desc = NULL; int rc = pcmk_rc_no_output; gboolean parent_passes = pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches) || (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches)); gboolean active = rsc->priv->fns->active(rsc, TRUE); gboolean partially_active = rsc->priv->fns->active(rsc, FALSE); desc = pe__resource_description(rsc, show_opts); if (rsc->priv->fns->is_filtered(rsc, only_rsc, TRUE)) { return rc; } if (pcmk_is_set(show_opts, pcmk_show_brief)) { GList *rscs = pe__filter_rsc_list(rsc->priv->children, only_rsc); if (rscs != NULL) { group_header(out, &rc, rsc, !active && partially_active ? inactive_resources(rsc) : 0, pcmk_is_set(show_opts, pcmk_show_inactive_rscs), desc); pe__rscs_brief_output(out, rscs, show_opts | pcmk_show_inactive_rscs); rc = pcmk_rc_ok; g_list_free(rscs); } } else { for (GList *gIter = rsc->priv->children; gIter != NULL; gIter = gIter->next) { pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data; if (skip_child_rsc(rsc, child_rsc, parent_passes, only_rsc, show_opts)) { continue; } group_header(out, &rc, rsc, !active && partially_active ? inactive_resources(rsc) : 0, pcmk_is_set(show_opts, pcmk_show_inactive_rscs), desc); out->message(out, (const char *) child_rsc->priv->xml->name, show_opts, child_rsc, only_node, only_rsc); } } PCMK__OUTPUT_LIST_FOOTER(out, rc); return rc; } void group_free(pcmk_resource_t * rsc) { CRM_CHECK(rsc != NULL, return); pcmk__rsc_trace(rsc, "Freeing %s", rsc->id); for (GList *gIter = rsc->priv->children; gIter != NULL; gIter = gIter->next) { pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data; pcmk__assert(child_rsc != NULL); pcmk__rsc_trace(child_rsc, "Freeing child %s", child_rsc->id); child_rsc->priv->fns->free(child_rsc); } pcmk__rsc_trace(rsc, "Freeing child list"); g_list_free(rsc->priv->children); common_free(rsc); } enum rsc_role_e group_resource_state(const pcmk_resource_t * rsc, gboolean current) { enum rsc_role_e group_role = pcmk_role_unknown; for (GList *gIter = rsc->priv->children; gIter != NULL; gIter = gIter->next) { pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data; enum rsc_role_e role = child_rsc->priv->fns->state(child_rsc, current); if (role > group_role) { group_role = role; } } pcmk__rsc_trace(rsc, "%s role: %s", rsc->id, pcmk_role_text(group_role)); return group_role; } gboolean pe__group_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc, gboolean check_parent) { gboolean passes = FALSE; if (check_parent && pcmk__str_in_list(rsc_printable_id(pe__const_top_resource(rsc, false)), only_rsc, pcmk__str_star_matches)) { passes = TRUE; } else if (pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches)) { passes = TRUE; } else if (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches)) { passes = TRUE; } else { for (const GList *iter = rsc->priv->children; iter != NULL; iter = iter->next) { const pcmk_resource_t *child_rsc = iter->data; if (!child_rsc->priv->fns->is_filtered(child_rsc, only_rsc, FALSE)) { passes = TRUE; break; } } } return !passes; } /*! * \internal * \brief Get maximum group resource instances per node * * \param[in] rsc Group resource to check * * \return Maximum number of \p rsc instances that can be active on one node */ unsigned int pe__group_max_per_node(const pcmk_resource_t *rsc) { pcmk__assert(pcmk__is_group(rsc)); return 1U; } diff --git a/lib/pengine/native.c b/lib/pengine/native.c index e2b8b32a95..3404949775 100644 --- a/lib/pengine/native.c +++ b/lib/pengine/native.c @@ -1,1178 +1,1177 @@ /* * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include -#include #include #include #include #include #include /*! * \internal * \brief Check whether a resource is active on multiple nodes */ static bool is_multiply_active(const pcmk_resource_t *rsc) { unsigned int count = 0; if (pcmk__is_primitive(rsc)) { pe__find_active_requires(rsc, &count); } return count > 1; } static void native_priority_to_node(pcmk_resource_t *rsc, pcmk_node_t *node, gboolean failed) { int priority = 0; const bool promoted = (rsc->priv->orig_role == pcmk_role_promoted); if ((rsc->priv->priority == 0) || failed) { return; } if (promoted) { // Promoted instance takes base priority + 1 priority = rsc->priv->priority + 1; } else { priority = rsc->priv->priority; } node->priv->priority += priority; pcmk__rsc_trace(rsc, "%s now has priority %d with %s'%s' (priority: %d%s)", pcmk__node_name(node), node->priv->priority, (promoted? "promoted " : ""), rsc->id, rsc->priv->priority, (promoted? " + 1" : "")); /* Priority of a resource running on a guest node is added to the cluster * node as well. */ if ((node->priv->remote != NULL) && (node->priv->remote->priv->launcher != NULL)) { const pcmk_resource_t *launcher = NULL; launcher = node->priv->remote->priv->launcher; for (GList *gIter = launcher->priv->active_nodes; gIter != NULL; gIter = gIter->next) { pcmk_node_t *a_node = gIter->data; a_node->priv->priority += priority; pcmk__rsc_trace(rsc, "%s now has priority %d with %s'%s' " "(priority: %d%s) from guest node %s", pcmk__node_name(a_node), a_node->priv->priority, (promoted? "promoted " : ""), rsc->id, rsc->priv->priority, (promoted? " + 1" : ""), pcmk__node_name(node)); } } } void native_add_running(pcmk_resource_t *rsc, pcmk_node_t *node, pcmk_scheduler_t *scheduler, gboolean failed) { pcmk_resource_t *parent = rsc->priv->parent; CRM_CHECK(node != NULL, return); for (GList *gIter = rsc->priv->active_nodes; gIter != NULL; gIter = gIter->next) { pcmk_node_t *a_node = (pcmk_node_t *) gIter->data; if (pcmk__same_node(a_node, node)) { return; } } pcmk__rsc_trace(rsc, "Adding %s to %s %s", rsc->id, pcmk__node_name(node), pcmk_is_set(rsc->flags, pcmk__rsc_managed)? "" : "(unmanaged)"); rsc->priv->active_nodes = g_list_append(rsc->priv->active_nodes, node); if (pcmk__is_primitive(rsc)) { node->details->running_rsc = g_list_append(node->details->running_rsc, rsc); native_priority_to_node(rsc, node, failed); if (node->details->maintenance) { pcmk__clear_rsc_flags(rsc, pcmk__rsc_managed); pcmk__set_rsc_flags(rsc, pcmk__rsc_maintenance); } } if (!pcmk_is_set(rsc->flags, pcmk__rsc_managed)) { pcmk_resource_t *p = parent; pcmk__rsc_info(rsc, "resource %s isn't managed", rsc->id); resource_location(rsc, node, PCMK_SCORE_INFINITY, "not_managed_default", scheduler); while(p && node->details->online) { /* add without the additional location constraint */ p->priv->active_nodes = g_list_append(p->priv->active_nodes, node); p = p->priv->parent; } return; } if (is_multiply_active(rsc)) { switch (rsc->priv->multiply_active_policy) { case pcmk__multiply_active_stop: { GHashTableIter gIter; pcmk_node_t *local_node = NULL; /* make sure it doesn't come up again */ if (rsc->priv->allowed_nodes != NULL) { g_hash_table_destroy(rsc->priv->allowed_nodes); } rsc->priv->allowed_nodes = pe__node_list2table(scheduler->nodes); g_hash_table_iter_init(&gIter, rsc->priv->allowed_nodes); while (g_hash_table_iter_next(&gIter, NULL, (void **)&local_node)) { local_node->assign->score = -PCMK_SCORE_INFINITY; } } break; case pcmk__multiply_active_block: pcmk__clear_rsc_flags(rsc, pcmk__rsc_managed); pcmk__set_rsc_flags(rsc, pcmk__rsc_blocked); /* If the resource belongs to a group or bundle configured with * PCMK_META_MULTIPLE_ACTIVE=PCMK_VALUE_BLOCK, block the entire * entity. */ if ((pcmk__is_group(parent) || pcmk__is_bundle(parent)) && (parent->priv->multiply_active_policy == pcmk__multiply_active_block)) { for (GList *gIter = parent->priv->children; gIter != NULL; gIter = gIter->next) { pcmk_resource_t *child = gIter->data; pcmk__clear_rsc_flags(child, pcmk__rsc_managed); pcmk__set_rsc_flags(child, pcmk__rsc_blocked); } } break; // pcmk__multiply_active_restart, pcmk__multiply_active_unexpected default: /* The scheduler will do the right thing because the relevant * variables and flags are set when unpacking the history. */ break; } crm_debug("%s is active on multiple nodes including %s: %s", rsc->id, pcmk__node_name(node), pcmk__multiply_active_text(rsc)); } else { pcmk__rsc_trace(rsc, "Resource %s is active on %s", rsc->id, pcmk__node_name(node)); } if (parent != NULL) { native_add_running(parent, node, scheduler, FALSE); } } static void recursive_clear_unique(pcmk_resource_t *rsc, gpointer user_data) { pcmk__clear_rsc_flags(rsc, pcmk__rsc_unique); pcmk__insert_meta(rsc->priv, PCMK_META_GLOBALLY_UNIQUE, PCMK_VALUE_FALSE); g_list_foreach(rsc->priv->children, (GFunc) recursive_clear_unique, NULL); } gboolean native_unpack(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler) { pcmk_resource_t *parent = uber_parent(rsc); const char *standard = crm_element_value(rsc->priv->xml, PCMK_XA_CLASS); uint32_t ra_caps = pcmk_get_ra_caps(standard); pcmk__rsc_trace(rsc, "Processing resource %s...", rsc->id); // Only some agent standards support unique and promotable clones if (!pcmk_is_set(ra_caps, pcmk_ra_cap_unique) && pcmk_is_set(rsc->flags, pcmk__rsc_unique) && pcmk__is_clone(parent)) { /* @COMPAT We should probably reject this situation as an error (as we * do for promotable below) rather than warn and convert, but that would * be a backward-incompatible change that we should probably do with a * transform at a schema major version bump. */ pe__force_anon(standard, parent, rsc->id, scheduler); /* Clear PCMK_META_GLOBALLY_UNIQUE on the parent and all its descendants * unpacked so far (clearing the parent should make any future children * unpacking correct). We have to clear this resource explicitly because * it isn't hooked into the parent's children yet. */ recursive_clear_unique(parent, NULL); recursive_clear_unique(rsc, NULL); } if (!pcmk_is_set(ra_caps, pcmk_ra_cap_promotable) && pcmk_is_set(parent->flags, pcmk__rsc_promotable)) { pcmk__config_err("Resource %s is of type %s and therefore " "cannot be used as a promotable clone resource", rsc->id, standard); return FALSE; } return TRUE; } static bool rsc_is_on_node(pcmk_resource_t *rsc, const pcmk_node_t *node, int flags) { pcmk__rsc_trace(rsc, "Checking whether %s is on %s", rsc->id, pcmk__node_name(node)); if (pcmk_is_set(flags, pcmk_rsc_match_current_node) && (rsc->priv->active_nodes != NULL)) { for (GList *iter = rsc->priv->active_nodes; iter != NULL; iter = iter->next) { if (pcmk__same_node((pcmk_node_t *) iter->data, node)) { return true; } } } else if (!pcmk_is_set(flags, pcmk_rsc_match_current_node) && (rsc->priv->assigned_node != NULL) && pcmk__same_node(rsc->priv->assigned_node, node)) { return true; } return false; } pcmk_resource_t * native_find_rsc(pcmk_resource_t *rsc, const char *id, const pcmk_node_t *on_node, int flags) { bool match = false; pcmk_resource_t *result = NULL; CRM_CHECK(id && rsc && rsc->id, return NULL); if (pcmk_is_set(flags, pcmk_rsc_match_clone_only)) { const char *rid = pcmk__xe_id(rsc->priv->xml); if (!pcmk__is_clone(pe__const_top_resource(rsc, false))) { match = false; } else if (!strcmp(id, rsc->id) || pcmk__str_eq(id, rid, pcmk__str_none)) { match = true; } } else if (!strcmp(id, rsc->id)) { match = true; } else if (pcmk_is_set(flags, pcmk_rsc_match_history) && pcmk__str_eq(rsc->priv->history_id, id, pcmk__str_none)) { match = true; } else if (pcmk_is_set(flags, pcmk_rsc_match_basename) || (pcmk_is_set(flags, pcmk_rsc_match_anon_basename) && !pcmk_is_set(rsc->flags, pcmk__rsc_unique))) { match = pe_base_name_eq(rsc, id); } if (match && on_node) { if (!rsc_is_on_node(rsc, on_node, flags)) { match = false; } } if (match) { return rsc; } for (GList *gIter = rsc->priv->children; gIter != NULL; gIter = gIter->next) { pcmk_resource_t *child = (pcmk_resource_t *) gIter->data; result = rsc->priv->fns->find_rsc(child, id, on_node, flags); if (result) { return result; } } return NULL; } // create is ignored char * native_parameter(pcmk_resource_t *rsc, pcmk_node_t *node, gboolean create, const char *name, pcmk_scheduler_t *scheduler) { const char *value = NULL; GHashTable *params = NULL; CRM_CHECK(rsc != NULL, return NULL); CRM_CHECK(name != NULL && strlen(name) != 0, return NULL); pcmk__rsc_trace(rsc, "Looking up %s in %s", name, rsc->id); params = pe_rsc_params(rsc, node, scheduler); value = g_hash_table_lookup(params, name); if (value == NULL) { /* try meta attributes instead */ value = g_hash_table_lookup(rsc->priv->meta, name); } return pcmk__str_copy(value); } gboolean native_active(pcmk_resource_t * rsc, gboolean all) { for (GList *gIter = rsc->priv->active_nodes; gIter != NULL; gIter = gIter->next) { pcmk_node_t *a_node = (pcmk_node_t *) gIter->data; if (a_node->details->unclean) { pcmk__rsc_trace(rsc, "Resource %s: %s is unclean", rsc->id, pcmk__node_name(a_node)); return TRUE; } else if (!a_node->details->online && pcmk_is_set(rsc->flags, pcmk__rsc_managed)) { pcmk__rsc_trace(rsc, "Resource %s: %s is offline", rsc->id, pcmk__node_name(a_node)); } else { pcmk__rsc_trace(rsc, "Resource %s active on %s", rsc->id, pcmk__node_name(a_node)); return TRUE; } } return FALSE; } struct print_data_s { long options; void *print_data; }; static const char * native_pending_state(const pcmk_resource_t *rsc) { const char *pending_state = NULL; if (pcmk__str_eq(rsc->priv->pending_action, PCMK_ACTION_START, pcmk__str_none)) { pending_state = "Starting"; } else if (pcmk__str_eq(rsc->priv->pending_action, PCMK_ACTION_STOP, pcmk__str_none)) { pending_state = "Stopping"; } else if (pcmk__str_eq(rsc->priv->pending_action, PCMK_ACTION_MIGRATE_TO, pcmk__str_none)) { pending_state = "Migrating"; } else if (pcmk__str_eq(rsc->priv->pending_action, PCMK_ACTION_MIGRATE_FROM, pcmk__str_none)) { /* Work might be done in here. */ pending_state = "Migrating"; } else if (pcmk__str_eq(rsc->priv->pending_action, PCMK_ACTION_PROMOTE, pcmk__str_none)) { pending_state = "Promoting"; } else if (pcmk__str_eq(rsc->priv->pending_action, PCMK_ACTION_DEMOTE, pcmk__str_none)) { pending_state = "Demoting"; } return pending_state; } static const char * native_pending_action(const pcmk_resource_t *rsc) { const char *pending_action = NULL; if (pcmk__str_eq(rsc->priv->pending_action, PCMK_ACTION_MONITOR, pcmk__str_none)) { pending_action = "Monitoring"; /* Pending probes are not printed, even if pending * operations are requested. If someone ever requests that * behavior, uncomment this and the corresponding part of * unpack.c:unpack_rsc_op(). */ #if 0 } else if (pcmk__str_eq(rsc->private->pending_action, "probe", pcmk__str_none)) { pending_action = "Checking"; #endif } return pending_action; } static enum rsc_role_e native_displayable_role(const pcmk_resource_t *rsc) { enum rsc_role_e role = rsc->priv->orig_role; if ((role == pcmk_role_started) && pcmk_is_set(pe__const_top_resource(rsc, false)->flags, pcmk__rsc_promotable)) { role = pcmk_role_unpromoted; } return role; } static const char * native_displayable_state(const pcmk_resource_t *rsc, bool print_pending) { const char *rsc_state = NULL; if (print_pending) { rsc_state = native_pending_state(rsc); } if (rsc_state == NULL) { rsc_state = pcmk_role_text(native_displayable_role(rsc)); } return rsc_state; } // Append a flag to resource description string's flags list static bool add_output_flag(GString *s, const char *flag_desc, bool have_flags) { g_string_append(s, (have_flags? ", " : " (")); g_string_append(s, flag_desc); return true; } // Append a node name to resource description string's node list static bool add_output_node(GString *s, const char *node, bool have_nodes) { g_string_append(s, (have_nodes? " " : " [ ")); g_string_append(s, node); return true; } /*! * \internal * \brief Create a string description of a resource * * \param[in] rsc Resource to describe * \param[in] name Desired identifier for the resource * \param[in] node If not NULL, node that resource is "on" * \param[in] show_opts Bitmask of pcmk_show_opt_e. * \param[in] target_role Resource's target role * \param[in] show_nodes Whether to display nodes when multiply active * * \return Newly allocated string description of resource * \note Caller must free the result with g_free(). */ gchar * pcmk__native_output_string(const pcmk_resource_t *rsc, const char *name, const pcmk_node_t *node, uint32_t show_opts, const char *target_role, bool show_nodes) { const char *class = crm_element_value(rsc->priv->xml, PCMK_XA_CLASS); const char *provider = NULL; const char *kind = crm_element_value(rsc->priv->xml, PCMK_XA_TYPE); GString *outstr = NULL; bool have_flags = false; if (!pcmk__is_primitive(rsc)) { return NULL; } CRM_CHECK(name != NULL, name = "unknown"); CRM_CHECK(kind != NULL, kind = "unknown"); CRM_CHECK(class != NULL, class = "unknown"); if (pcmk_is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider)) { provider = crm_element_value(rsc->priv->xml, PCMK_XA_PROVIDER); } if ((node == NULL) && (rsc->priv->lock_node != NULL)) { node = rsc->priv->lock_node; } if (pcmk_any_flags_set(show_opts, pcmk_show_rsc_only) || pcmk__list_of_multiple(rsc->priv->active_nodes)) { node = NULL; } outstr = g_string_sized_new(128); // Resource name and agent pcmk__g_strcat(outstr, name, "\t(", class, ((provider == NULL)? "" : ":"), pcmk__s(provider, ""), ":", kind, "):\t", NULL); // State on node if (pcmk_is_set(rsc->flags, pcmk__rsc_removed)) { g_string_append(outstr, " ORPHANED"); } if (pcmk_is_set(rsc->flags, pcmk__rsc_failed)) { enum rsc_role_e role = native_displayable_role(rsc); g_string_append(outstr, " FAILED"); if (role > pcmk_role_unpromoted) { pcmk__add_word(&outstr, 0, pcmk_role_text(role)); } } else { bool show_pending = pcmk_is_set(show_opts, pcmk_show_pending); pcmk__add_word(&outstr, 0, native_displayable_state(rsc, show_pending)); } if (node) { pcmk__add_word(&outstr, 0, pcmk__node_name(node)); } // Failed probe operation if (native_displayable_role(rsc) == pcmk_role_stopped) { xmlNode *probe_op = pe__failed_probe_for_rsc(rsc, node ? node->priv->name : NULL); if (probe_op != NULL) { int rc; pcmk__scan_min_int(crm_element_value(probe_op, PCMK__XA_RC_CODE), &rc, 0); pcmk__g_strcat(outstr, " (", crm_exit_str(rc), ") ", NULL); } } // Flags, as: ( [...]) if (node && !(node->details->online) && node->details->unclean) { have_flags = add_output_flag(outstr, "UNCLEAN", have_flags); } if ((node != NULL) && pcmk__same_node(node, rsc->priv->lock_node)) { have_flags = add_output_flag(outstr, "LOCKED", have_flags); } if (pcmk_is_set(show_opts, pcmk_show_pending)) { const char *pending_action = native_pending_action(rsc); if (pending_action != NULL) { have_flags = add_output_flag(outstr, pending_action, have_flags); } } if (target_role != NULL) { switch (pcmk_parse_role(target_role)) { case pcmk_role_unknown: pcmk__config_err("Invalid " PCMK_META_TARGET_ROLE " %s for resource %s", target_role, rsc->id); break; case pcmk_role_stopped: have_flags = add_output_flag(outstr, "disabled", have_flags); break; case pcmk_role_unpromoted: if (pcmk_is_set(pe__const_top_resource(rsc, false)->flags, pcmk__rsc_promotable)) { have_flags = add_output_flag(outstr, PCMK_META_TARGET_ROLE ":", have_flags); g_string_append(outstr, target_role); } break; default: /* Only show target role if it limits our abilities (i.e. ignore * Started, as it is the default anyways, and doesn't prevent * the resource from becoming promoted). */ break; } } // Blocked or maintenance implies unmanaged if (pcmk_any_flags_set(rsc->flags, pcmk__rsc_blocked|pcmk__rsc_maintenance)) { if (pcmk_is_set(rsc->flags, pcmk__rsc_blocked)) { have_flags = add_output_flag(outstr, "blocked", have_flags); } else if (pcmk_is_set(rsc->flags, pcmk__rsc_maintenance)) { have_flags = add_output_flag(outstr, "maintenance", have_flags); } } else if (!pcmk_is_set(rsc->flags, pcmk__rsc_managed)) { have_flags = add_output_flag(outstr, "unmanaged", have_flags); } if (pcmk_is_set(rsc->flags, pcmk__rsc_ignore_failure)) { have_flags = add_output_flag(outstr, "failure ignored", have_flags); } if (have_flags) { g_string_append_c(outstr, ')'); } // User-supplied description if (pcmk_any_flags_set(show_opts, pcmk_show_rsc_only|pcmk_show_description) || pcmk__list_of_multiple(rsc->priv->active_nodes)) { const char *desc = crm_element_value(rsc->priv->xml, PCMK_XA_DESCRIPTION); if (desc) { g_string_append(outstr, " ("); g_string_append(outstr, desc); g_string_append(outstr, ")"); } } if (show_nodes && !pcmk_is_set(show_opts, pcmk_show_rsc_only) && pcmk__list_of_multiple(rsc->priv->active_nodes)) { bool have_nodes = false; for (GList *iter = rsc->priv->active_nodes; iter != NULL; iter = iter->next) { pcmk_node_t *n = (pcmk_node_t *) iter->data; have_nodes = add_output_node(outstr, n->priv->name, have_nodes); } if (have_nodes) { g_string_append(outstr, " ]"); } } return g_string_free(outstr, FALSE); } int pe__common_output_html(pcmk__output_t *out, const pcmk_resource_t *rsc, const char *name, const pcmk_node_t *node, uint32_t show_opts) { const char *kind = crm_element_value(rsc->priv->xml, PCMK_XA_TYPE); const char *target_role = NULL; const char *cl = NULL; xmlNode *child = NULL; gchar *content = NULL; pcmk__assert((kind != NULL) && pcmk__is_primitive(rsc)); if (crm_is_true(g_hash_table_lookup(rsc->priv->meta, PCMK__META_INTERNAL_RSC)) && !pcmk_is_set(show_opts, pcmk_show_implicit_rscs)) { crm_trace("skipping print of internal resource %s", rsc->id); return pcmk_rc_no_output; } target_role = g_hash_table_lookup(rsc->priv->meta, PCMK_META_TARGET_ROLE); if (!pcmk_is_set(rsc->flags, pcmk__rsc_managed)) { cl = PCMK__VALUE_RSC_MANAGED; } else if (pcmk_is_set(rsc->flags, pcmk__rsc_failed)) { cl = PCMK__VALUE_RSC_FAILED; } else if (pcmk__is_primitive(rsc) && (rsc->priv->active_nodes == NULL)) { cl = PCMK__VALUE_RSC_FAILED; } else if (pcmk__list_of_multiple(rsc->priv->active_nodes)) { cl = PCMK__VALUE_RSC_MULTIPLE; } else if (pcmk_is_set(rsc->flags, pcmk__rsc_ignore_failure)) { cl = PCMK__VALUE_RSC_FAILURE_IGNORED; } else { cl = PCMK__VALUE_RSC_OK; } child = pcmk__output_create_html_node(out, "li", NULL, NULL, NULL); child = pcmk__html_create(child, PCMK__XE_SPAN, NULL, cl); content = pcmk__native_output_string(rsc, name, node, show_opts, target_role, true); pcmk__xe_set_content(child, "%s", content); g_free(content); return pcmk_rc_ok; } int pe__common_output_text(pcmk__output_t *out, const pcmk_resource_t *rsc, const char *name, const pcmk_node_t *node, uint32_t show_opts) { const char *target_role = NULL; pcmk__assert(pcmk__is_primitive(rsc)); if (crm_is_true(g_hash_table_lookup(rsc->priv->meta, PCMK__META_INTERNAL_RSC)) && !pcmk_is_set(show_opts, pcmk_show_implicit_rscs)) { crm_trace("skipping print of internal resource %s", rsc->id); return pcmk_rc_no_output; } target_role = g_hash_table_lookup(rsc->priv->meta, PCMK_META_TARGET_ROLE); { gchar *s = pcmk__native_output_string(rsc, name, node, show_opts, target_role, true); out->list_item(out, NULL, "%s", s); g_free(s); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("primitive", "uint32_t", "pcmk_resource_t *", "GList *", "GList *") int pe__resource_xml(pcmk__output_t *out, va_list args) { uint32_t show_opts = va_arg(args, uint32_t); pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); GList *only_node G_GNUC_UNUSED = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); int rc = pcmk_rc_no_output; bool print_pending = pcmk_is_set(show_opts, pcmk_show_pending); const char *class = crm_element_value(rsc->priv->xml, PCMK_XA_CLASS); const char *prov = crm_element_value(rsc->priv->xml, PCMK_XA_PROVIDER); char ra_name[LINE_MAX]; const char *rsc_state = native_displayable_state(rsc, print_pending); const char *target_role = NULL; const char *active = pcmk__btoa(rsc->priv->fns->active(rsc, TRUE)); const char *orphaned = pcmk__flag_text(rsc->flags, pcmk__rsc_removed); const char *blocked = pcmk__flag_text(rsc->flags, pcmk__rsc_blocked); const char *maintenance = pcmk__flag_text(rsc->flags, pcmk__rsc_maintenance); const char *managed = pcmk__flag_text(rsc->flags, pcmk__rsc_managed); const char *failed = pcmk__flag_text(rsc->flags, pcmk__rsc_failed); const char *ignored = pcmk__flag_text(rsc->flags, pcmk__rsc_ignore_failure); char *nodes_running_on = NULL; const char *pending = print_pending? native_pending_action(rsc) : NULL; const char *locked_to = NULL; const char *desc = pe__resource_description(rsc, show_opts); pcmk__assert(pcmk__is_primitive(rsc)); if (rsc->priv->fns->is_filtered(rsc, only_rsc, TRUE)) { return pcmk_rc_no_output; } // Resource information snprintf(ra_name, LINE_MAX, "%s%s%s:%s", class, ((prov == NULL)? "" : ":"), ((prov == NULL)? "" : prov), crm_element_value(rsc->priv->xml, PCMK_XA_TYPE)); target_role = g_hash_table_lookup(rsc->priv->meta, PCMK_META_TARGET_ROLE); nodes_running_on = pcmk__itoa(g_list_length(rsc->priv->active_nodes)); if (rsc->priv->lock_node != NULL) { locked_to = rsc->priv->lock_node->priv->name; } rc = pe__name_and_nvpairs_xml(out, true, PCMK_XE_RESOURCE, PCMK_XA_ID, rsc_printable_id(rsc), PCMK_XA_RESOURCE_AGENT, ra_name, PCMK_XA_ROLE, rsc_state, PCMK_XA_TARGET_ROLE, target_role, PCMK_XA_ACTIVE, active, PCMK_XA_ORPHANED, orphaned, PCMK_XA_BLOCKED, blocked, PCMK_XA_MAINTENANCE, maintenance, PCMK_XA_MANAGED, managed, PCMK_XA_FAILED, failed, PCMK_XA_FAILURE_IGNORED, ignored, PCMK_XA_NODES_RUNNING_ON, nodes_running_on, PCMK_XA_PENDING, pending, PCMK_XA_LOCKED_TO, locked_to, PCMK_XA_DESCRIPTION, desc, NULL); free(nodes_running_on); pcmk__assert(rc == pcmk_rc_ok); for (GList *gIter = rsc->priv->active_nodes; gIter != NULL; gIter = gIter->next) { pcmk_node_t *node = (pcmk_node_t *) gIter->data; const char *cached = pcmk__btoa(node->details->online); rc = pe__name_and_nvpairs_xml(out, false, PCMK_XE_NODE, PCMK_XA_NAME, node->priv->name, PCMK_XA_ID, node->priv->id, PCMK_XA_CACHED, cached, NULL); pcmk__assert(rc == pcmk_rc_ok); } pcmk__output_xml_pop_parent(out); return rc; } PCMK__OUTPUT_ARGS("primitive", "uint32_t", "pcmk_resource_t *", "GList *", "GList *") int pe__resource_html(pcmk__output_t *out, va_list args) { uint32_t show_opts = va_arg(args, uint32_t); pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); GList *only_node G_GNUC_UNUSED = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); const pcmk_node_t *node = pcmk__current_node(rsc); if (rsc->priv->fns->is_filtered(rsc, only_rsc, TRUE)) { return pcmk_rc_no_output; } pcmk__assert(pcmk__is_primitive(rsc)); if (node == NULL) { // This is set only if a non-probe action is pending on this node node = rsc->priv->pending_node; } return pe__common_output_html(out, rsc, rsc_printable_id(rsc), node, show_opts); } PCMK__OUTPUT_ARGS("primitive", "uint32_t", "pcmk_resource_t *", "GList *", "GList *") int pe__resource_text(pcmk__output_t *out, va_list args) { uint32_t show_opts = va_arg(args, uint32_t); pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); GList *only_node G_GNUC_UNUSED = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); const pcmk_node_t *node = pcmk__current_node(rsc); pcmk__assert(pcmk__is_primitive(rsc)); if (rsc->priv->fns->is_filtered(rsc, only_rsc, TRUE)) { return pcmk_rc_no_output; } if (node == NULL) { // This is set only if a non-probe action is pending on this node node = rsc->priv->pending_node; } return pe__common_output_text(out, rsc, rsc_printable_id(rsc), node, show_opts); } void native_free(pcmk_resource_t * rsc) { pcmk__rsc_trace(rsc, "Freeing resource action list (not the data)"); common_free(rsc); } enum rsc_role_e native_resource_state(const pcmk_resource_t * rsc, gboolean current) { enum rsc_role_e role = rsc->priv->next_role; if (current) { role = rsc->priv->orig_role; } pcmk__rsc_trace(rsc, "%s state: %s", rsc->id, pcmk_role_text(role)); return role; } /*! * \internal * \brief List nodes where a resource (or any of its children) is * * \param[in] rsc Resource to check * \param[out] list List to add result to * \param[in] target Which resource conditions to target (group of * enum pcmk__rsc_node flags) * * \return If list contains only one node, that node, or NULL otherwise */ pcmk_node_t * native_location(const pcmk_resource_t *rsc, GList **list, uint32_t target) { pcmk_node_t *one = NULL; GList *result = NULL; if (rsc->priv->children != NULL) { for (GList *gIter = rsc->priv->children; gIter != NULL; gIter = gIter->next) { pcmk_resource_t *child = (pcmk_resource_t *) gIter->data; child->priv->fns->location(child, &result, target); } } else { if (pcmk_is_set(target, pcmk__rsc_node_current)) { result = g_list_copy(rsc->priv->active_nodes); } if (pcmk_is_set(target, pcmk__rsc_node_pending) && (rsc->priv->pending_node != NULL) && !pe_find_node_id(result, rsc->priv->pending_node->priv->id)) { result = g_list_append(result, (gpointer) rsc->priv->pending_node); } if (pcmk_is_set(target, pcmk__rsc_node_assigned) && (rsc->priv->assigned_node != NULL)) { result = g_list_append(result, rsc->priv->assigned_node); } } if (result && (result->next == NULL)) { one = result->data; } if (list) { GList *gIter = result; for (; gIter != NULL; gIter = gIter->next) { pcmk_node_t *node = (pcmk_node_t *) gIter->data; if ((*list == NULL) || (pe_find_node_id(*list, node->priv->id) == NULL)) { *list = g_list_append(*list, node); } } } g_list_free(result); return one; } static void get_rscs_brief(GList *rsc_list, GHashTable * rsc_table, GHashTable * active_table) { GList *gIter = rsc_list; for (; gIter != NULL; gIter = gIter->next) { pcmk_resource_t *rsc = (pcmk_resource_t *) gIter->data; const char *class = crm_element_value(rsc->priv->xml, PCMK_XA_CLASS); const char *kind = crm_element_value(rsc->priv->xml, PCMK_XA_TYPE); int offset = 0; char buffer[LINE_MAX]; int *rsc_counter = NULL; int *active_counter = NULL; if (!pcmk__is_primitive(rsc)) { continue; } offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", class); if (pcmk_is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider)) { const char *prov = crm_element_value(rsc->priv->xml, PCMK_XA_PROVIDER); if (prov != NULL) { offset += snprintf(buffer + offset, LINE_MAX - offset, ":%s", prov); } } offset += snprintf(buffer + offset, LINE_MAX - offset, ":%s", kind); CRM_LOG_ASSERT(offset > 0); if (rsc_table) { rsc_counter = g_hash_table_lookup(rsc_table, buffer); if (rsc_counter == NULL) { rsc_counter = pcmk__assert_alloc(1, sizeof(int)); *rsc_counter = 0; g_hash_table_insert(rsc_table, strdup(buffer), rsc_counter); } (*rsc_counter)++; } if (active_table) { for (GList *gIter2 = rsc->priv->active_nodes; gIter2 != NULL; gIter2 = gIter2->next) { pcmk_node_t *node = (pcmk_node_t *) gIter2->data; GHashTable *node_table = NULL; if (node->details->unclean == FALSE && node->details->online == FALSE && pcmk_is_set(rsc->flags, pcmk__rsc_managed)) { continue; } node_table = g_hash_table_lookup(active_table, node->priv->name); if (node_table == NULL) { node_table = pcmk__strkey_table(free, free); g_hash_table_insert(active_table, strdup(node->priv->name), node_table); } active_counter = g_hash_table_lookup(node_table, buffer); if (active_counter == NULL) { active_counter = pcmk__assert_alloc(1, sizeof(int)); *active_counter = 0; g_hash_table_insert(node_table, strdup(buffer), active_counter); } (*active_counter)++; } } } } static void destroy_node_table(gpointer data) { GHashTable *node_table = data; if (node_table) { g_hash_table_destroy(node_table); } } int pe__rscs_brief_output(pcmk__output_t *out, GList *rsc_list, uint32_t show_opts) { GHashTable *rsc_table = pcmk__strkey_table(free, free); GHashTable *active_table = pcmk__strkey_table(free, destroy_node_table); GList *sorted_rscs; int rc = pcmk_rc_no_output; get_rscs_brief(rsc_list, rsc_table, active_table); /* Make a list of the rsc_table keys so that it can be sorted. This is to make sure * output order stays consistent between systems. */ sorted_rscs = g_hash_table_get_keys(rsc_table); sorted_rscs = g_list_sort(sorted_rscs, (GCompareFunc) strcmp); for (GList *gIter = sorted_rscs; gIter; gIter = gIter->next) { char *type = (char *) gIter->data; int *rsc_counter = g_hash_table_lookup(rsc_table, type); GList *sorted_nodes = NULL; int active_counter_all = 0; /* Also make a list of the active_table keys so it can be sorted. If there's * more than one instance of a type of resource running, we need the nodes to * be sorted to make sure output order stays consistent between systems. */ sorted_nodes = g_hash_table_get_keys(active_table); sorted_nodes = g_list_sort(sorted_nodes, (GCompareFunc) pcmk__numeric_strcasecmp); for (GList *gIter2 = sorted_nodes; gIter2; gIter2 = gIter2->next) { char *node_name = (char *) gIter2->data; GHashTable *node_table = g_hash_table_lookup(active_table, node_name); int *active_counter = NULL; if (node_table == NULL) { continue; } active_counter = g_hash_table_lookup(node_table, type); if (active_counter == NULL || *active_counter == 0) { continue; } else { active_counter_all += *active_counter; } if (pcmk_is_set(show_opts, pcmk_show_rsc_only)) { node_name = NULL; } if (pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) { out->list_item(out, NULL, "%d/%d\t(%s):\tActive %s", *active_counter, rsc_counter ? *rsc_counter : 0, type, (*active_counter > 0) && node_name ? node_name : ""); } else { out->list_item(out, NULL, "%d\t(%s):\tActive %s", *active_counter, type, (*active_counter > 0) && node_name ? node_name : ""); } rc = pcmk_rc_ok; } if (pcmk_is_set(show_opts, pcmk_show_inactive_rscs) && active_counter_all == 0) { out->list_item(out, NULL, "%d/%d\t(%s):\tActive", active_counter_all, rsc_counter ? *rsc_counter : 0, type); rc = pcmk_rc_ok; } if (sorted_nodes) { g_list_free(sorted_nodes); } } if (rsc_table) { g_hash_table_destroy(rsc_table); rsc_table = NULL; } if (active_table) { g_hash_table_destroy(active_table); active_table = NULL; } if (sorted_rscs) { g_list_free(sorted_rscs); } return rc; } gboolean pe__native_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc, gboolean check_parent) { if (pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches) || pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches)) { return FALSE; } else if (check_parent && (rsc->priv->parent != NULL)) { const pcmk_resource_t *up = pe__const_top_resource(rsc, true); return up->priv->fns->is_filtered(up, only_rsc, FALSE); } return TRUE; } /*! * \internal * \brief Get maximum primitive resource instances per node * * \param[in] rsc Primitive resource to check * * \return Maximum number of \p rsc instances that can be active on one node */ unsigned int pe__primitive_max_per_node(const pcmk_resource_t *rsc) { pcmk__assert(pcmk__is_primitive(rsc)); return 1U; } diff --git a/lib/pengine/rules_alerts.c b/lib/pengine/rules_alerts.c index 4d7fa39fe2..b5c49b801c 100644 --- a/lib/pengine/rules_alerts.c +++ b/lib/pengine/rules_alerts.c @@ -1,312 +1,311 @@ /* * Copyright 2015-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include -#include #include #include #include /*! * \internal * \brief Unpack an alert's or alert recipient's meta attributes * * \param[in,out] basenode Alert or recipient XML * \param[in,out] entry Where to store unpacked values * \param[in,out] max_timeout Max timeout of all alerts and recipients thus far * * \return Standard Pacemaker return code */ static int get_meta_attrs_from_cib(xmlNode *basenode, pcmk__alert_t *entry, guint *max_timeout) { GHashTable *config_hash = pcmk__strkey_table(free, free); crm_time_t *now = crm_time_new(NULL); const char *value = NULL; int rc = pcmk_rc_ok; pcmk_rule_input_t rule_input = { .now = now, }; pcmk_unpack_nvpair_blocks(basenode, PCMK_XE_META_ATTRIBUTES, NULL, &rule_input, config_hash, NULL); crm_time_free(now); value = g_hash_table_lookup(config_hash, PCMK_META_ENABLED); if ((value != NULL) && !crm_is_true(value)) { // No need to continue unpacking rc = pcmk_rc_disabled; goto done; } value = g_hash_table_lookup(config_hash, PCMK_META_TIMEOUT); if (value) { long long timeout_ms = crm_get_msec(value); entry->timeout = (int) QB_MIN(timeout_ms, INT_MAX); if (entry->timeout <= 0) { if (entry->timeout == 0) { crm_trace("Alert %s uses default timeout of %dmsec", entry->id, PCMK__ALERT_DEFAULT_TIMEOUT_MS); } else { pcmk__config_warn("Alert %s has invalid timeout value '%s', " "using default (%d ms)", entry->id, value, PCMK__ALERT_DEFAULT_TIMEOUT_MS); } entry->timeout = PCMK__ALERT_DEFAULT_TIMEOUT_MS; } else { crm_trace("Alert %s uses timeout of %dmsec", entry->id, entry->timeout); } if (entry->timeout > *max_timeout) { *max_timeout = entry->timeout; } } value = g_hash_table_lookup(config_hash, PCMK_META_TIMESTAMP_FORMAT); if (value) { /* hard to do any checks here as merely anything can * can be a valid time-format-string */ entry->tstamp_format = strdup(value); crm_trace("Alert %s uses timestamp format '%s'", entry->id, entry->tstamp_format); } done: g_hash_table_destroy(config_hash); return rc; } static void get_envvars_from_cib(xmlNode *basenode, pcmk__alert_t *entry) { xmlNode *child; if ((basenode == NULL) || (entry == NULL)) { return; } child = pcmk__xe_first_child(basenode, PCMK_XE_INSTANCE_ATTRIBUTES, NULL, NULL); if (child == NULL) { return; } if (entry->envvars == NULL) { entry->envvars = pcmk__strkey_table(free, free); } for (child = pcmk__xe_first_child(child, PCMK_XE_NVPAIR, NULL, NULL); child != NULL; child = pcmk__xe_next(child, PCMK_XE_NVPAIR)) { const char *name = crm_element_value(child, PCMK_XA_NAME); const char *value = crm_element_value(child, PCMK_XA_VALUE); if (value == NULL) { value = ""; } pcmk__insert_dup(entry->envvars, name, value); crm_trace("Alert %s: added environment variable %s='%s'", entry->id, name, value); } } static void unpack_alert_filter(xmlNode *basenode, pcmk__alert_t *entry) { xmlNode *select = pcmk__xe_first_child(basenode, PCMK_XE_SELECT, NULL, NULL); xmlNode *event_type = NULL; uint32_t flags = pcmk__alert_none; for (event_type = pcmk__xe_first_child(select, NULL, NULL, NULL); event_type != NULL; event_type = pcmk__xe_next(event_type, NULL)) { if (pcmk__xe_is(event_type, PCMK_XE_SELECT_FENCING)) { flags |= pcmk__alert_fencing; } else if (pcmk__xe_is(event_type, PCMK_XE_SELECT_NODES)) { flags |= pcmk__alert_node; } else if (pcmk__xe_is(event_type, PCMK_XE_SELECT_RESOURCES)) { flags |= pcmk__alert_resource; } else if (pcmk__xe_is(event_type, PCMK_XE_SELECT_ATTRIBUTES)) { xmlNode *attr; const char *attr_name; int nattrs = 0; flags |= pcmk__alert_attribute; for (attr = pcmk__xe_first_child(event_type, PCMK_XE_ATTRIBUTE, NULL, NULL); attr != NULL; attr = pcmk__xe_next(attr, PCMK_XE_ATTRIBUTE)) { attr_name = crm_element_value(attr, PCMK_XA_NAME); if (attr_name) { if (nattrs == 0) { g_strfreev(entry->select_attribute_name); entry->select_attribute_name = NULL; } ++nattrs; entry->select_attribute_name = pcmk__realloc(entry->select_attribute_name, (nattrs + 1) * sizeof(char*)); entry->select_attribute_name[nattrs - 1] = strdup(attr_name); entry->select_attribute_name[nattrs] = NULL; } } } } if (flags != pcmk__alert_none) { entry->flags = flags; crm_debug("Alert %s receives events: attributes:%s%s%s%s", entry->id, (pcmk_is_set(flags, pcmk__alert_attribute)? (entry->select_attribute_name? "some" : "all") : "none"), (pcmk_is_set(flags, pcmk__alert_fencing)? " fencing" : ""), (pcmk_is_set(flags, pcmk__alert_node)? " nodes" : ""), (pcmk_is_set(flags, pcmk__alert_resource)? " resources" : "")); } } /*! * \internal * \brief Unpack an alert or an alert recipient * * \param[in,out] alert Alert or recipient XML * \param[in,out] entry Where to store unpacked values * \param[in,out] max_timeout Max timeout of all alerts and recipients thus far * * \return Standard Pacemaker return code */ static int unpack_alert(xmlNode *alert, pcmk__alert_t *entry, guint *max_timeout) { int rc = pcmk_rc_ok; get_envvars_from_cib(alert, entry); rc = get_meta_attrs_from_cib(alert, entry, max_timeout); if (rc == pcmk_rc_ok) { unpack_alert_filter(alert, entry); } return rc; } /*! * \internal * \brief Unpack a CIB alerts section * * \param[in] alerts XML of alerts section * * \return List of unpacked alert entries * * \note Unlike most unpack functions, this is not used by the scheduler itself, * but is supplied for use by daemons that need to send alerts. */ GList * pe_unpack_alerts(const xmlNode *alerts) { xmlNode *alert; pcmk__alert_t *entry; guint max_timeout = 0; GList *alert_list = NULL; if (alerts == NULL) { return alert_list; } for (alert = pcmk__xe_first_child(alerts, PCMK_XE_ALERT, NULL, NULL); alert != NULL; alert = pcmk__xe_next(alert, PCMK_XE_ALERT)) { xmlNode *recipient; int recipients = 0; const char *alert_id = pcmk__xe_id(alert); const char *alert_path = crm_element_value(alert, PCMK_XA_PATH); /* The schema should enforce this, but to be safe ... */ if (alert_id == NULL) { pcmk__config_warn("Ignoring invalid alert without " PCMK_XA_ID); crm_log_xml_info(alert, "missing-id"); continue; } if (alert_path == NULL) { pcmk__config_warn("Ignoring alert %s: No " PCMK_XA_PATH, alert_id); continue; } entry = pcmk__alert_new(alert_id, alert_path); if (unpack_alert(alert, entry, &max_timeout) != pcmk_rc_ok) { // Don't allow recipients to override if entire alert is disabled crm_debug("Alert %s is disabled", entry->id); pcmk__free_alert(entry); continue; } if (entry->tstamp_format == NULL) { entry->tstamp_format = strdup(PCMK__ALERT_DEFAULT_TSTAMP_FORMAT); } crm_debug("Alert %s: path=%s timeout=%dms tstamp-format='%s' %u vars", entry->id, entry->path, entry->timeout, entry->tstamp_format, (entry->envvars? g_hash_table_size(entry->envvars) : 0)); for (recipient = pcmk__xe_first_child(alert, PCMK_XE_RECIPIENT, NULL, NULL); recipient != NULL; recipient = pcmk__xe_next(recipient, PCMK_XE_RECIPIENT)) { pcmk__alert_t *recipient_entry = pcmk__dup_alert(entry); recipients++; recipient_entry->recipient = crm_element_value_copy(recipient, PCMK_XA_VALUE); if (unpack_alert(recipient, recipient_entry, &max_timeout) != pcmk_rc_ok) { crm_debug("Alert %s: recipient %s is disabled", entry->id, recipient_entry->id); pcmk__free_alert(recipient_entry); continue; } alert_list = g_list_prepend(alert_list, recipient_entry); crm_debug("Alert %s has recipient %s with value %s and %d envvars", entry->id, pcmk__xe_id(recipient), recipient_entry->recipient, (recipient_entry->envvars? g_hash_table_size(recipient_entry->envvars) : 0)); } if (recipients == 0) { alert_list = g_list_prepend(alert_list, entry); } else { pcmk__free_alert(entry); } } return alert_list; } /*! * \internal * \brief Free an alert list generated by pe_unpack_alerts() * * \param[in,out] alert_list Alert list to free */ void pe_free_alert_list(GList *alert_list) { if (alert_list) { g_list_free_full(alert_list, (GDestroyNotify) pcmk__free_alert); } } diff --git a/lib/pengine/rules_compat.c b/lib/pengine/rules_compat.c index d8f7da897e..eceee75833 100644 --- a/lib/pengine/rules_compat.c +++ b/lib/pengine/rules_compat.c @@ -1,121 +1,121 @@ /* * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include // NULL #include // gboolean, GList, GHashTable #include // xmlNode #include #include // crm_time_t #include // enum rsc_role_e #include -#include // pcmk_rule_input_t, etc. +#include // pcmk_rule_input_t, etc. #include // pcmk__nvpair_unpack_t, etc. CRM_TRACE_INIT_DATA(pe_rules); // Deprecated functions kept only for backward API compatibility // LCOV_EXCL_START #include #include gboolean test_rule(xmlNode * rule, GHashTable * node_hash, enum rsc_role_e role, crm_time_t * now) { pcmk_rule_input_t rule_input = { .node_attrs = node_hash, .now = now, }; return pcmk_evaluate_rule(rule, &rule_input, NULL) == pcmk_rc_ok; } /*! * \internal * \brief Map pe_rule_eval_data_t to pcmk_rule_input_t * * \param[out] new New data struct * \param[in] old Old data struct */ static void map_rule_input(pcmk_rule_input_t *new, const pe_rule_eval_data_t *old) { if (old == NULL) { return; } new->now = old->now; new->node_attrs = old->node_hash; if (old->rsc_data != NULL) { new->rsc_standard = old->rsc_data->standard; new->rsc_provider = old->rsc_data->provider; new->rsc_agent = old->rsc_data->agent; } if (old->match_data != NULL) { new->rsc_params = old->match_data->params; new->rsc_meta = old->match_data->meta; if (old->match_data->re != NULL) { new->rsc_id = old->match_data->re->string; new->rsc_id_submatches = old->match_data->re->pmatch; new->rsc_id_nmatches = old->match_data->re->nregs; } } if (old->op_data != NULL) { new->op_name = old->op_data->op_name; new->op_interval_ms = old->op_data->interval; } } void pe_eval_nvpairs(xmlNode *top, const xmlNode *xml_obj, const char *set_name, const pe_rule_eval_data_t *rule_data, GHashTable *hash, const char *always_first, gboolean overwrite, crm_time_t *next_change) { GList *pairs = pcmk__xe_dereference_children(xml_obj, set_name); if (pairs) { pcmk__nvpair_unpack_t data = { .values = hash, .first_id = always_first, .overwrite = overwrite, .next_change = next_change, }; map_rule_input(&(data.rule_input), rule_data); pairs = g_list_sort_with_data(pairs, pcmk__cmp_nvpair_blocks, &data); g_list_foreach(pairs, pcmk__unpack_nvpair_block, &data); g_list_free(pairs); } } void pe_unpack_nvpairs(xmlNode *top, const xmlNode *xml_obj, const char *set_name, GHashTable *node_hash, GHashTable *hash, const char *always_first, gboolean overwrite, crm_time_t *now, crm_time_t *next_change) { pe_rule_eval_data_t rule_data = { .node_hash = node_hash, .now = now, .match_data = NULL, .rsc_data = NULL, .op_data = NULL }; pe_eval_nvpairs(NULL, xml_obj, set_name, &rule_data, hash, always_first, overwrite, next_change); } // LCOV_EXCL_STOP // End deprecated API diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c index 72202a0258..1fe58ab9c2 100644 --- a/lib/pengine/unpack.c +++ b/lib/pengine/unpack.c @@ -1,5100 +1,5099 @@ /* * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include -#include #include #include CRM_TRACE_INIT_DATA(pe_status); // A (parsed) resource action history entry struct action_history { pcmk_resource_t *rsc; // Resource that history is for pcmk_node_t *node; // Node that history is for xmlNode *xml; // History entry XML // Parsed from entry XML const char *id; // XML ID of history entry const char *key; // Operation key of action const char *task; // Action name const char *exit_reason; // Exit reason given for result guint interval_ms; // Action interval int call_id; // Call ID of action int expected_exit_status; // Expected exit status of action int exit_status; // Actual exit status of action int execution_status; // Execution status of action }; /* This uses pcmk__set_flags_as()/pcmk__clear_flags_as() directly rather than * use pcmk__set_scheduler_flags()/pcmk__clear_scheduler_flags() so that the * flag is stringified more readably in log messages. */ #define set_config_flag(scheduler, option, flag) do { \ GHashTable *config_hash = (scheduler)->priv->options; \ const char *scf_value = pcmk__cluster_option(config_hash, (option)); \ \ if (scf_value != NULL) { \ if (crm_is_true(scf_value)) { \ (scheduler)->flags = pcmk__set_flags_as(__func__, __LINE__, \ LOG_TRACE, "Scheduler", \ crm_system_name, (scheduler)->flags, \ (flag), #flag); \ } else { \ (scheduler)->flags = pcmk__clear_flags_as(__func__, __LINE__, \ LOG_TRACE, "Scheduler", \ crm_system_name, (scheduler)->flags, \ (flag), #flag); \ } \ } \ } while(0) static void unpack_rsc_op(pcmk_resource_t *rsc, pcmk_node_t *node, xmlNode *xml_op, xmlNode **last_failure, enum pcmk__on_fail *failed); static void determine_remote_online_status(pcmk_scheduler_t *scheduler, pcmk_node_t *this_node); static void add_node_attrs(const xmlNode *xml_obj, pcmk_node_t *node, bool overwrite, pcmk_scheduler_t *scheduler); static void determine_online_status(const xmlNode *node_state, pcmk_node_t *this_node, pcmk_scheduler_t *scheduler); static void unpack_node_lrm(pcmk_node_t *node, const xmlNode *xml, pcmk_scheduler_t *scheduler); /*! * \internal * \brief Check whether a node is a dangling guest node * * \param[in] node Node to check * * \return true if \p node had a Pacemaker Remote connection resource with a * launcher that was removed from the CIB, otherwise false. */ static bool is_dangling_guest_node(pcmk_node_t *node) { return pcmk__is_pacemaker_remote_node(node) && (node->priv->remote != NULL) && (node->priv->remote->priv->launcher == NULL) && pcmk_is_set(node->priv->remote->flags, pcmk__rsc_removed_launched); } /*! * \brief Schedule a fence action for a node * * \param[in,out] scheduler Scheduler data * \param[in,out] node Node to fence * \param[in] reason Text description of why fencing is needed * \param[in] priority_delay Whether to consider * \c PCMK_OPT_PRIORITY_FENCING_DELAY */ void pe_fence_node(pcmk_scheduler_t *scheduler, pcmk_node_t *node, const char *reason, bool priority_delay) { CRM_CHECK(node, return); if (pcmk__is_guest_or_bundle_node(node)) { // Fence a guest or bundle node by marking its launcher as failed pcmk_resource_t *rsc = node->priv->remote->priv->launcher; if (!pcmk_is_set(rsc->flags, pcmk__rsc_failed)) { if (!pcmk_is_set(rsc->flags, pcmk__rsc_managed)) { crm_notice("Not fencing guest node %s " "(otherwise would because %s): " "its guest resource %s is unmanaged", pcmk__node_name(node), reason, rsc->id); } else { pcmk__sched_warn(scheduler, "Guest node %s will be fenced " "(by recovering its guest resource %s): %s", pcmk__node_name(node), rsc->id, reason); /* We don't mark the node as unclean because that would prevent the * node from running resources. We want to allow it to run resources * in this transition if the recovery succeeds. */ pcmk__set_node_flags(node, pcmk__node_remote_reset); pcmk__set_rsc_flags(rsc, pcmk__rsc_failed|pcmk__rsc_stop_if_failed); } } } else if (is_dangling_guest_node(node)) { crm_info("Cleaning up dangling connection for guest node %s: " "fencing was already done because %s, " "and guest resource no longer exists", pcmk__node_name(node), reason); pcmk__set_rsc_flags(node->priv->remote, pcmk__rsc_failed|pcmk__rsc_stop_if_failed); } else if (pcmk__is_remote_node(node)) { pcmk_resource_t *rsc = node->priv->remote; if ((rsc != NULL) && !pcmk_is_set(rsc->flags, pcmk__rsc_managed)) { crm_notice("Not fencing remote node %s " "(otherwise would because %s): connection is unmanaged", pcmk__node_name(node), reason); } else if (!pcmk_is_set(node->priv->flags, pcmk__node_remote_reset)) { pcmk__set_node_flags(node, pcmk__node_remote_reset); pcmk__sched_warn(scheduler, "Remote node %s %s: %s", pcmk__node_name(node), pe_can_fence(scheduler, node)? "will be fenced" : "is unclean", reason); } node->details->unclean = TRUE; // No need to apply PCMK_OPT_PRIORITY_FENCING_DELAY for remote nodes pe_fence_op(node, NULL, TRUE, reason, FALSE, scheduler); } else if (node->details->unclean) { crm_trace("Cluster node %s %s because %s", pcmk__node_name(node), pe_can_fence(scheduler, node)? "would also be fenced" : "also is unclean", reason); } else { pcmk__sched_warn(scheduler, "Cluster node %s %s: %s", pcmk__node_name(node), pe_can_fence(scheduler, node)? "will be fenced" : "is unclean", reason); node->details->unclean = TRUE; pe_fence_op(node, NULL, TRUE, reason, priority_delay, scheduler); } } // @TODO xpaths can't handle templates, rules, or id-refs // nvpair with provides or requires set to unfencing #define XPATH_UNFENCING_NVPAIR PCMK_XE_NVPAIR \ "[(@" PCMK_XA_NAME "='" PCMK_STONITH_PROVIDES "'" \ "or @" PCMK_XA_NAME "='" PCMK_META_REQUIRES "') " \ "and @" PCMK_XA_VALUE "='" PCMK_VALUE_UNFENCING "']" // unfencing in rsc_defaults or any resource #define XPATH_ENABLE_UNFENCING \ "/" PCMK_XE_CIB "/" PCMK_XE_CONFIGURATION "/" PCMK_XE_RESOURCES \ "//" PCMK_XE_META_ATTRIBUTES "/" XPATH_UNFENCING_NVPAIR \ "|/" PCMK_XE_CIB "/" PCMK_XE_CONFIGURATION "/" PCMK_XE_RSC_DEFAULTS \ "/" PCMK_XE_META_ATTRIBUTES "/" XPATH_UNFENCING_NVPAIR static void set_if_xpath(uint64_t flag, const char *xpath, pcmk_scheduler_t *scheduler) { xmlXPathObjectPtr result = NULL; if (!pcmk_is_set(scheduler->flags, flag)) { result = xpath_search(scheduler->input, xpath); if (result && (numXpathResults(result) > 0)) { pcmk__set_scheduler_flags(scheduler, flag); } freeXpathObject(result); } } gboolean unpack_config(xmlNode *config, pcmk_scheduler_t *scheduler) { const char *value = NULL; GHashTable *config_hash = pcmk__strkey_table(free, free); const pcmk_rule_input_t rule_input = { .now = scheduler->priv->now, }; scheduler->priv->options = config_hash; pe__unpack_dataset_nvpairs(config, PCMK_XE_CLUSTER_PROPERTY_SET, &rule_input, config_hash, PCMK_VALUE_CIB_BOOTSTRAP_OPTIONS, scheduler); pcmk__validate_cluster_options(config_hash); set_config_flag(scheduler, PCMK_OPT_ENABLE_STARTUP_PROBES, pcmk__sched_probe_resources); if (!pcmk_is_set(scheduler->flags, pcmk__sched_probe_resources)) { crm_info("Startup probes: disabled (dangerous)"); } value = pcmk__cluster_option(config_hash, PCMK_OPT_HAVE_WATCHDOG); if (value && crm_is_true(value)) { crm_info("Watchdog-based self-fencing will be performed via SBD if " "fencing is required and " PCMK_OPT_STONITH_WATCHDOG_TIMEOUT " is nonzero"); pcmk__set_scheduler_flags(scheduler, pcmk__sched_have_fencing); } /* Set certain flags via xpath here, so they can be used before the relevant * configuration sections are unpacked. */ set_if_xpath(pcmk__sched_enable_unfencing, XPATH_ENABLE_UNFENCING, scheduler); value = pcmk__cluster_option(config_hash, PCMK_OPT_STONITH_TIMEOUT); pcmk_parse_interval_spec(value, &(scheduler->priv->fence_timeout_ms)); crm_debug("Default fencing action timeout: %s", pcmk__readable_interval(scheduler->priv->fence_timeout_ms)); set_config_flag(scheduler, PCMK_OPT_STONITH_ENABLED, pcmk__sched_fencing_enabled); if (pcmk_is_set(scheduler->flags, pcmk__sched_fencing_enabled)) { crm_debug("STONITH of failed nodes is enabled"); } else { crm_debug("STONITH of failed nodes is disabled"); } scheduler->priv->fence_action = pcmk__cluster_option(config_hash, PCMK_OPT_STONITH_ACTION); crm_trace("STONITH will %s nodes", scheduler->priv->fence_action); set_config_flag(scheduler, PCMK_OPT_CONCURRENT_FENCING, pcmk__sched_concurrent_fencing); if (pcmk_is_set(scheduler->flags, pcmk__sched_concurrent_fencing)) { crm_debug("Concurrent fencing is enabled"); } else { crm_debug("Concurrent fencing is disabled"); } value = pcmk__cluster_option(config_hash, PCMK_OPT_PRIORITY_FENCING_DELAY); if (value) { pcmk_parse_interval_spec(value, &(scheduler->priv->priority_fencing_ms)); crm_trace("Priority fencing delay is %s", pcmk__readable_interval(scheduler->priv->priority_fencing_ms)); } set_config_flag(scheduler, PCMK_OPT_STOP_ALL_RESOURCES, pcmk__sched_stop_all); crm_debug("Stop all active resources: %s", pcmk__flag_text(scheduler->flags, pcmk__sched_stop_all)); set_config_flag(scheduler, PCMK_OPT_SYMMETRIC_CLUSTER, pcmk__sched_symmetric_cluster); if (pcmk_is_set(scheduler->flags, pcmk__sched_symmetric_cluster)) { crm_debug("Cluster is symmetric" " - resources can run anywhere by default"); } value = pcmk__cluster_option(config_hash, PCMK_OPT_NO_QUORUM_POLICY); if (pcmk__str_eq(value, PCMK_VALUE_IGNORE, pcmk__str_casei)) { scheduler->no_quorum_policy = pcmk_no_quorum_ignore; } else if (pcmk__str_eq(value, PCMK_VALUE_FREEZE, pcmk__str_casei)) { scheduler->no_quorum_policy = pcmk_no_quorum_freeze; } else if (pcmk__str_eq(value, PCMK_VALUE_DEMOTE, pcmk__str_casei)) { scheduler->no_quorum_policy = pcmk_no_quorum_demote; } else if (pcmk__strcase_any_of(value, PCMK_VALUE_FENCE, PCMK_VALUE_FENCE_LEGACY, NULL)) { if (pcmk_is_set(scheduler->flags, pcmk__sched_fencing_enabled)) { int do_panic = 0; crm_element_value_int(scheduler->input, PCMK_XA_NO_QUORUM_PANIC, &do_panic); if (do_panic || pcmk_is_set(scheduler->flags, pcmk__sched_quorate)) { scheduler->no_quorum_policy = pcmk_no_quorum_fence; } else { crm_notice("Resetting " PCMK_OPT_NO_QUORUM_POLICY " to 'stop': cluster has never had quorum"); scheduler->no_quorum_policy = pcmk_no_quorum_stop; } } else { pcmk__config_err("Resetting " PCMK_OPT_NO_QUORUM_POLICY " to 'stop' because fencing is disabled"); scheduler->no_quorum_policy = pcmk_no_quorum_stop; } } else { scheduler->no_quorum_policy = pcmk_no_quorum_stop; } switch (scheduler->no_quorum_policy) { case pcmk_no_quorum_freeze: crm_debug("On loss of quorum: Freeze resources"); break; case pcmk_no_quorum_stop: crm_debug("On loss of quorum: Stop ALL resources"); break; case pcmk_no_quorum_demote: crm_debug("On loss of quorum: " "Demote promotable resources and stop other resources"); break; case pcmk_no_quorum_fence: crm_notice("On loss of quorum: Fence all remaining nodes"); break; case pcmk_no_quorum_ignore: crm_notice("On loss of quorum: Ignore"); break; } set_config_flag(scheduler, PCMK_OPT_STOP_ORPHAN_RESOURCES, pcmk__sched_stop_removed_resources); if (pcmk_is_set(scheduler->flags, pcmk__sched_stop_removed_resources)) { crm_trace("Orphan resources are stopped"); } else { crm_trace("Orphan resources are ignored"); } set_config_flag(scheduler, PCMK_OPT_STOP_ORPHAN_ACTIONS, pcmk__sched_cancel_removed_actions); if (pcmk_is_set(scheduler->flags, pcmk__sched_cancel_removed_actions)) { crm_trace("Orphan resource actions are stopped"); } else { crm_trace("Orphan resource actions are ignored"); } set_config_flag(scheduler, PCMK_OPT_MAINTENANCE_MODE, pcmk__sched_in_maintenance); crm_trace("Maintenance mode: %s", pcmk__flag_text(scheduler->flags, pcmk__sched_in_maintenance)); set_config_flag(scheduler, PCMK_OPT_START_FAILURE_IS_FATAL, pcmk__sched_start_failure_fatal); if (pcmk_is_set(scheduler->flags, pcmk__sched_start_failure_fatal)) { crm_trace("Start failures are always fatal"); } else { crm_trace("Start failures are handled by failcount"); } if (pcmk_is_set(scheduler->flags, pcmk__sched_fencing_enabled)) { set_config_flag(scheduler, PCMK_OPT_STARTUP_FENCING, pcmk__sched_startup_fencing); } if (pcmk_is_set(scheduler->flags, pcmk__sched_startup_fencing)) { crm_trace("Unseen nodes will be fenced"); } else { pcmk__warn_once(pcmk__wo_blind, "Blind faith: not fencing unseen nodes"); } pe__unpack_node_health_scores(scheduler); scheduler->priv->placement_strategy = pcmk__cluster_option(config_hash, PCMK_OPT_PLACEMENT_STRATEGY); crm_trace("Placement strategy: %s", scheduler->priv->placement_strategy); set_config_flag(scheduler, PCMK_OPT_SHUTDOWN_LOCK, pcmk__sched_shutdown_lock); if (pcmk_is_set(scheduler->flags, pcmk__sched_shutdown_lock)) { value = pcmk__cluster_option(config_hash, PCMK_OPT_SHUTDOWN_LOCK_LIMIT); pcmk_parse_interval_spec(value, &(scheduler->priv->shutdown_lock_ms)); crm_trace("Resources will be locked to nodes that were cleanly " "shut down (locks expire after %s)", pcmk__readable_interval(scheduler->priv->shutdown_lock_ms)); } else { crm_trace("Resources will not be locked to nodes that were cleanly " "shut down"); } value = pcmk__cluster_option(config_hash, PCMK_OPT_NODE_PENDING_TIMEOUT); pcmk_parse_interval_spec(value, &(scheduler->priv->node_pending_ms)); if (scheduler->priv->node_pending_ms == 0U) { crm_trace("Do not fence pending nodes"); } else { crm_trace("Fence pending nodes after %s", pcmk__readable_interval(scheduler->priv->node_pending_ms)); } return TRUE; } /*! * \internal * \brief Create a new node object in scheduler data * * \param[in] id ID of new node * \param[in] uname Name of new node * \param[in] type Type of new node * \param[in] score Score of new node * \param[in,out] scheduler Scheduler data * * \return Newly created node object * \note The returned object is part of the scheduler data and should not be * freed separately. */ pcmk_node_t * pe_create_node(const char *id, const char *uname, const char *type, int score, pcmk_scheduler_t *scheduler) { enum pcmk__node_variant variant = pcmk__node_variant_cluster; pcmk_node_t *new_node = NULL; if (pcmk_find_node(scheduler, uname) != NULL) { pcmk__config_warn("More than one node entry has name '%s'", uname); } if (pcmk__str_eq(type, PCMK_VALUE_MEMBER, pcmk__str_null_matches|pcmk__str_casei)) { variant = pcmk__node_variant_cluster; } else if (pcmk__str_eq(type, PCMK_VALUE_REMOTE, pcmk__str_casei)) { variant = pcmk__node_variant_remote; } else { pcmk__config_err("Ignoring node %s with unrecognized type '%s'", pcmk__s(uname, "without name"), type); return NULL; } new_node = calloc(1, sizeof(pcmk_node_t)); if (new_node == NULL) { pcmk__sched_err(scheduler, "Could not allocate memory for node %s", uname); return NULL; } new_node->assign = calloc(1, sizeof(struct pcmk__node_assignment)); new_node->details = calloc(1, sizeof(struct pcmk__node_details)); new_node->priv = calloc(1, sizeof(pcmk__node_private_t)); if ((new_node->assign == NULL) || (new_node->details == NULL) || (new_node->priv == NULL)) { free(new_node->assign); free(new_node->details); free(new_node->priv); free(new_node); pcmk__sched_err(scheduler, "Could not allocate memory for node %s", uname); return NULL; } crm_trace("Creating node for entry %s/%s", uname, id); new_node->assign->score = score; new_node->priv->id = id; new_node->priv->name = uname; new_node->priv->flags = pcmk__node_probes_allowed; new_node->details->online = FALSE; new_node->details->shutdown = FALSE; new_node->details->running_rsc = NULL; new_node->priv->scheduler = scheduler; new_node->priv->variant = variant; new_node->priv->attrs = pcmk__strkey_table(free, free); new_node->priv->utilization = pcmk__strkey_table(free, free); new_node->priv->digest_cache = pcmk__strkey_table(free, pe__free_digests); if (pcmk__is_pacemaker_remote_node(new_node)) { pcmk__insert_dup(new_node->priv->attrs, CRM_ATTR_KIND, "remote"); pcmk__set_scheduler_flags(scheduler, pcmk__sched_have_remote_nodes); } else { pcmk__insert_dup(new_node->priv->attrs, CRM_ATTR_KIND, "cluster"); } scheduler->nodes = g_list_insert_sorted(scheduler->nodes, new_node, pe__cmp_node_name); return new_node; } static const char * expand_remote_rsc_meta(xmlNode *xml_obj, xmlNode *parent, pcmk_scheduler_t *data) { xmlNode *attr_set = NULL; xmlNode *attr = NULL; const char *container_id = pcmk__xe_id(xml_obj); const char *remote_name = NULL; const char *remote_server = NULL; const char *remote_port = NULL; const char *connect_timeout = "60s"; const char *remote_allow_migrate=NULL; const char *is_managed = NULL; for (attr_set = pcmk__xe_first_child(xml_obj, PCMK_XE_META_ATTRIBUTES, NULL, NULL); attr_set != NULL; attr_set = pcmk__xe_next(attr_set, PCMK_XE_META_ATTRIBUTES)) { for (attr = pcmk__xe_first_child(attr_set, NULL, NULL, NULL); attr != NULL; attr = pcmk__xe_next(attr, NULL)) { const char *value = crm_element_value(attr, PCMK_XA_VALUE); const char *name = crm_element_value(attr, PCMK_XA_NAME); if (name == NULL) { // Sanity continue; } if (strcmp(name, PCMK_META_REMOTE_NODE) == 0) { remote_name = value; } else if (strcmp(name, PCMK_META_REMOTE_ADDR) == 0) { remote_server = value; } else if (strcmp(name, PCMK_META_REMOTE_PORT) == 0) { remote_port = value; } else if (strcmp(name, PCMK_META_REMOTE_CONNECT_TIMEOUT) == 0) { connect_timeout = value; } else if (strcmp(name, PCMK_META_REMOTE_ALLOW_MIGRATE) == 0) { remote_allow_migrate = value; } else if (strcmp(name, PCMK_META_IS_MANAGED) == 0) { is_managed = value; } } } if (remote_name == NULL) { return NULL; } if (pe_find_resource(data->priv->resources, remote_name) != NULL) { return NULL; } pe_create_remote_xml(parent, remote_name, container_id, remote_allow_migrate, is_managed, connect_timeout, remote_server, remote_port); return remote_name; } static void handle_startup_fencing(pcmk_scheduler_t *scheduler, pcmk_node_t *new_node) { if ((new_node->priv->variant == pcmk__node_variant_remote) && (new_node->priv->remote == NULL)) { /* Ignore fencing for remote nodes that don't have a connection resource * associated with them. This happens when remote node entries get left * in the nodes section after the connection resource is removed. */ return; } if (pcmk_is_set(scheduler->flags, pcmk__sched_startup_fencing)) { // All nodes are unclean until we've seen their status entry new_node->details->unclean = TRUE; } else { // Blind faith ... new_node->details->unclean = FALSE; } } gboolean unpack_nodes(xmlNode *xml_nodes, pcmk_scheduler_t *scheduler) { xmlNode *xml_obj = NULL; pcmk_node_t *new_node = NULL; const char *id = NULL; const char *uname = NULL; const char *type = NULL; for (xml_obj = pcmk__xe_first_child(xml_nodes, PCMK_XE_NODE, NULL, NULL); xml_obj != NULL; xml_obj = pcmk__xe_next(xml_obj, PCMK_XE_NODE)) { int score = 0; int rc = pcmk__xe_get_score(xml_obj, PCMK_XA_SCORE, &score, 0); new_node = NULL; id = crm_element_value(xml_obj, PCMK_XA_ID); uname = crm_element_value(xml_obj, PCMK_XA_UNAME); type = crm_element_value(xml_obj, PCMK_XA_TYPE); crm_trace("Processing node %s/%s", uname, id); if (id == NULL) { pcmk__config_err("Ignoring <" PCMK_XE_NODE "> entry in configuration without id"); continue; } if (rc != pcmk_rc_ok) { // Not possible with schema validation enabled pcmk__config_warn("Using 0 as score for node %s " "because '%s' is not a valid score: %s", pcmk__s(uname, "without name"), crm_element_value(xml_obj, PCMK_XA_SCORE), pcmk_rc_str(rc)); } new_node = pe_create_node(id, uname, type, score, scheduler); if (new_node == NULL) { return FALSE; } handle_startup_fencing(scheduler, new_node); add_node_attrs(xml_obj, new_node, FALSE, scheduler); crm_trace("Done with node %s", crm_element_value(xml_obj, PCMK_XA_UNAME)); } return TRUE; } static void unpack_launcher(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler) { const char *launcher_id = NULL; if (rsc->priv->children != NULL) { g_list_foreach(rsc->priv->children, (GFunc) unpack_launcher, scheduler); return; } launcher_id = g_hash_table_lookup(rsc->priv->meta, PCMK__META_CONTAINER); if ((launcher_id != NULL) && !pcmk__str_eq(launcher_id, rsc->id, pcmk__str_none)) { pcmk_resource_t *launcher = pe_find_resource(scheduler->priv->resources, launcher_id); if (launcher != NULL) { rsc->priv->launcher = launcher; launcher->priv->launched = g_list_append(launcher->priv->launched, rsc); pcmk__rsc_trace(rsc, "Resource %s's launcher is %s", rsc->id, launcher_id); } else { pcmk__config_err("Resource %s: Unknown " PCMK__META_CONTAINER " %s", rsc->id, launcher_id); } } } gboolean unpack_remote_nodes(xmlNode *xml_resources, pcmk_scheduler_t *scheduler) { xmlNode *xml_obj = NULL; /* Create remote nodes and guest nodes from the resource configuration * before unpacking resources. */ for (xml_obj = pcmk__xe_first_child(xml_resources, NULL, NULL, NULL); xml_obj != NULL; xml_obj = pcmk__xe_next(xml_obj, NULL)) { const char *new_node_id = NULL; /* Check for remote nodes, which are defined by ocf:pacemaker:remote * primitives. */ if (xml_contains_remote_node(xml_obj)) { new_node_id = pcmk__xe_id(xml_obj); /* The pcmk_find_node() check ensures we don't iterate over an * expanded node that has already been added to the node list */ if (new_node_id && (pcmk_find_node(scheduler, new_node_id) == NULL)) { crm_trace("Found remote node %s defined by resource %s", new_node_id, pcmk__xe_id(xml_obj)); pe_create_node(new_node_id, new_node_id, PCMK_VALUE_REMOTE, 0, scheduler); } continue; } /* Check for guest nodes, which are defined by special meta-attributes * of a primitive of any type (for example, VirtualDomain or Xen). */ if (pcmk__xe_is(xml_obj, PCMK_XE_PRIMITIVE)) { /* This will add an ocf:pacemaker:remote primitive to the * configuration for the guest node's connection, to be unpacked * later. */ new_node_id = expand_remote_rsc_meta(xml_obj, xml_resources, scheduler); if (new_node_id && (pcmk_find_node(scheduler, new_node_id) == NULL)) { crm_trace("Found guest node %s in resource %s", new_node_id, pcmk__xe_id(xml_obj)); pe_create_node(new_node_id, new_node_id, PCMK_VALUE_REMOTE, 0, scheduler); } continue; } /* Check for guest nodes inside a group. Clones are currently not * supported as guest nodes. */ if (pcmk__xe_is(xml_obj, PCMK_XE_GROUP)) { xmlNode *xml_obj2 = NULL; for (xml_obj2 = pcmk__xe_first_child(xml_obj, NULL, NULL, NULL); xml_obj2 != NULL; xml_obj2 = pcmk__xe_next(xml_obj2, NULL)) { new_node_id = expand_remote_rsc_meta(xml_obj2, xml_resources, scheduler); if (new_node_id && (pcmk_find_node(scheduler, new_node_id) == NULL)) { crm_trace("Found guest node %s in resource %s inside group %s", new_node_id, pcmk__xe_id(xml_obj2), pcmk__xe_id(xml_obj)); pe_create_node(new_node_id, new_node_id, PCMK_VALUE_REMOTE, 0, scheduler); } } } } return TRUE; } /* Call this after all the nodes and resources have been * unpacked, but before the status section is read. * * A remote node's online status is reflected by the state * of the remote node's connection resource. We need to link * the remote node to this connection resource so we can have * easy access to the connection resource during the scheduler calculations. */ static void link_rsc2remotenode(pcmk_scheduler_t *scheduler, pcmk_resource_t *new_rsc) { pcmk_node_t *remote_node = NULL; if (!pcmk_is_set(new_rsc->flags, pcmk__rsc_is_remote_connection)) { return; } if (pcmk_is_set(scheduler->flags, pcmk__sched_location_only)) { /* remote_nodes and remote_resources are not linked in quick location calculations */ return; } remote_node = pcmk_find_node(scheduler, new_rsc->id); CRM_CHECK(remote_node != NULL, return); pcmk__rsc_trace(new_rsc, "Linking remote connection resource %s to %s", new_rsc->id, pcmk__node_name(remote_node)); remote_node->priv->remote = new_rsc; if (new_rsc->priv->launcher == NULL) { /* Handle start-up fencing for remote nodes (as opposed to guest nodes) * the same as is done for cluster nodes. */ handle_startup_fencing(scheduler, remote_node); } else { /* pe_create_node() marks the new node as "remote" or "cluster"; now * that we know the node is a guest node, update it correctly. */ pcmk__insert_dup(remote_node->priv->attrs, CRM_ATTR_KIND, "container"); } } /*! * \internal * \brief Parse configuration XML for resource information * * \param[in] xml_resources Top of resource configuration XML * \param[in,out] scheduler Scheduler data * * \return TRUE * * \note unpack_remote_nodes() MUST be called before this, so that the nodes can * be used when pe__unpack_resource() calls resource_location() */ gboolean unpack_resources(const xmlNode *xml_resources, pcmk_scheduler_t *scheduler) { xmlNode *xml_obj = NULL; GList *gIter = NULL; scheduler->priv->templates = pcmk__strkey_table(free, pcmk__free_idref); for (xml_obj = pcmk__xe_first_child(xml_resources, NULL, NULL, NULL); xml_obj != NULL; xml_obj = pcmk__xe_next(xml_obj, NULL)) { pcmk_resource_t *new_rsc = NULL; const char *id = pcmk__xe_id(xml_obj); if (pcmk__str_empty(id)) { pcmk__config_err("Ignoring <%s> resource without ID", xml_obj->name); continue; } if (pcmk__xe_is(xml_obj, PCMK_XE_TEMPLATE)) { if (g_hash_table_lookup_extended(scheduler->priv->templates, id, NULL, NULL) == FALSE) { /* Record the template's ID for the knowledge of its existence anyway. */ pcmk__insert_dup(scheduler->priv->templates, id, NULL); } continue; } crm_trace("Unpacking <%s " PCMK_XA_ID "='%s'>", xml_obj->name, id); if (pe__unpack_resource(xml_obj, &new_rsc, NULL, scheduler) == pcmk_rc_ok) { scheduler->priv->resources = g_list_append(scheduler->priv->resources, new_rsc); pcmk__rsc_trace(new_rsc, "Added resource %s", new_rsc->id); } else { pcmk__config_err("Ignoring <%s> resource '%s' " "because configuration is invalid", xml_obj->name, id); } } for (gIter = scheduler->priv->resources; gIter != NULL; gIter = gIter->next) { pcmk_resource_t *rsc = (pcmk_resource_t *) gIter->data; unpack_launcher(rsc, scheduler); link_rsc2remotenode(scheduler, rsc); } scheduler->priv->resources = g_list_sort(scheduler->priv->resources, pe__cmp_rsc_priority); if (pcmk_is_set(scheduler->flags, pcmk__sched_location_only)) { /* Ignore */ } else if (pcmk_is_set(scheduler->flags, pcmk__sched_fencing_enabled) && !pcmk_is_set(scheduler->flags, pcmk__sched_have_fencing)) { pcmk__config_err("Resource start-up disabled since no STONITH resources have been defined"); pcmk__config_err("Either configure some or disable STONITH with the " PCMK_OPT_STONITH_ENABLED " option"); pcmk__config_err("NOTE: Clusters with shared data need STONITH to ensure data integrity"); } return TRUE; } /*! * \internal * \brief Validate the levels in a fencing topology * * \param[in] xml \c PCMK_XE_FENCING_TOPOLOGY element */ void pcmk__validate_fencing_topology(const xmlNode *xml) { if (xml == NULL) { return; } CRM_CHECK(pcmk__xe_is(xml, PCMK_XE_FENCING_TOPOLOGY), return); for (const xmlNode *level = pcmk__xe_first_child(xml, PCMK_XE_FENCING_LEVEL, NULL, NULL); level != NULL; level = pcmk__xe_next(level, PCMK_XE_FENCING_LEVEL)) { const char *id = pcmk__xe_id(level); int index = 0; if (pcmk__str_empty(id)) { pcmk__config_err("Ignoring fencing level without ID"); continue; } if (crm_element_value_int(level, PCMK_XA_INDEX, &index) != 0) { pcmk__config_err("Ignoring fencing level %s with invalid index", id); continue; } if ((index < ST__LEVEL_MIN) || (index > ST__LEVEL_MAX)) { pcmk__config_err("Ignoring fencing level %s with out-of-range " "index %d", id, index); } } } gboolean unpack_tags(xmlNode *xml_tags, pcmk_scheduler_t *scheduler) { xmlNode *xml_tag = NULL; scheduler->priv->tags = pcmk__strkey_table(free, pcmk__free_idref); for (xml_tag = pcmk__xe_first_child(xml_tags, PCMK_XE_TAG, NULL, NULL); xml_tag != NULL; xml_tag = pcmk__xe_next(xml_tag, PCMK_XE_TAG)) { xmlNode *xml_obj_ref = NULL; const char *tag_id = pcmk__xe_id(xml_tag); if (tag_id == NULL) { pcmk__config_err("Ignoring <%s> without " PCMK_XA_ID, (const char *) xml_tag->name); continue; } for (xml_obj_ref = pcmk__xe_first_child(xml_tag, PCMK_XE_OBJ_REF, NULL, NULL); xml_obj_ref != NULL; xml_obj_ref = pcmk__xe_next(xml_obj_ref, PCMK_XE_OBJ_REF)) { const char *obj_ref = pcmk__xe_id(xml_obj_ref); if (obj_ref == NULL) { pcmk__config_err("Ignoring <%s> for tag '%s' without " PCMK_XA_ID, xml_obj_ref->name, tag_id); continue; } pcmk__add_idref(scheduler->priv->tags, tag_id, obj_ref); } } return TRUE; } /*! * \internal * \brief Unpack a ticket state entry * * \param[in] xml_ticket XML ticket state to unpack * \param[in,out] userdata Scheduler data * * \return pcmk_rc_ok (to always continue unpacking further entries) */ static int unpack_ticket_state(xmlNode *xml_ticket, void *userdata) { pcmk_scheduler_t *scheduler = userdata; const char *ticket_id = NULL; const char *granted = NULL; const char *last_granted = NULL; const char *standby = NULL; xmlAttrPtr xIter = NULL; pcmk__ticket_t *ticket = NULL; ticket_id = pcmk__xe_id(xml_ticket); if (pcmk__str_empty(ticket_id)) { return pcmk_rc_ok; } crm_trace("Processing ticket state for %s", ticket_id); ticket = g_hash_table_lookup(scheduler->priv->ticket_constraints, ticket_id); if (ticket == NULL) { ticket = ticket_new(ticket_id, scheduler); if (ticket == NULL) { return pcmk_rc_ok; } } for (xIter = xml_ticket->properties; xIter; xIter = xIter->next) { const char *prop_name = (const char *)xIter->name; const char *prop_value = pcmk__xml_attr_value(xIter); if (pcmk__str_eq(prop_name, PCMK_XA_ID, pcmk__str_none)) { continue; } pcmk__insert_dup(ticket->state, prop_name, prop_value); } granted = g_hash_table_lookup(ticket->state, PCMK__XA_GRANTED); if (granted && crm_is_true(granted)) { pcmk__set_ticket_flags(ticket, pcmk__ticket_granted); crm_info("We have ticket '%s'", ticket->id); } else { pcmk__clear_ticket_flags(ticket, pcmk__ticket_granted); crm_info("We do not have ticket '%s'", ticket->id); } last_granted = g_hash_table_lookup(ticket->state, PCMK_XA_LAST_GRANTED); if (last_granted) { long long last_granted_ll = 0LL; int rc = pcmk__scan_ll(last_granted, &last_granted_ll, 0LL); if (rc != pcmk_rc_ok) { crm_warn("Using %lld instead of invalid " PCMK_XA_LAST_GRANTED " value '%s' in state for ticket %s: %s", last_granted_ll, last_granted, ticket->id, pcmk_rc_str(rc)); } ticket->last_granted = (time_t) last_granted_ll; } standby = g_hash_table_lookup(ticket->state, PCMK_XA_STANDBY); if (standby && crm_is_true(standby)) { pcmk__set_ticket_flags(ticket, pcmk__ticket_standby); if (pcmk_is_set(ticket->flags, pcmk__ticket_granted)) { crm_info("Granted ticket '%s' is in standby-mode", ticket->id); } } else { pcmk__clear_ticket_flags(ticket, pcmk__ticket_standby); } crm_trace("Done with ticket state for %s", ticket_id); return pcmk_rc_ok; } static void unpack_handle_remote_attrs(pcmk_node_t *this_node, const xmlNode *state, pcmk_scheduler_t *scheduler) { const char *discovery = NULL; const xmlNode *attrs = NULL; pcmk_resource_t *rsc = NULL; int maint = 0; if (!pcmk__xe_is(state, PCMK__XE_NODE_STATE)) { return; } if ((this_node == NULL) || !pcmk__is_pacemaker_remote_node(this_node)) { return; } crm_trace("Processing Pacemaker Remote node %s", pcmk__node_name(this_node)); pcmk__scan_min_int(crm_element_value(state, PCMK__XA_NODE_IN_MAINTENANCE), &maint, 0); if (maint) { pcmk__set_node_flags(this_node, pcmk__node_remote_maint); } else { pcmk__clear_node_flags(this_node, pcmk__node_remote_maint); } rsc = this_node->priv->remote; if (!pcmk_is_set(this_node->priv->flags, pcmk__node_remote_reset)) { this_node->details->unclean = FALSE; pcmk__set_node_flags(this_node, pcmk__node_seen); } attrs = pcmk__xe_first_child(state, PCMK__XE_TRANSIENT_ATTRIBUTES, NULL, NULL); add_node_attrs(attrs, this_node, TRUE, scheduler); if (pe__shutdown_requested(this_node)) { crm_info("%s is shutting down", pcmk__node_name(this_node)); this_node->details->shutdown = TRUE; } if (crm_is_true(pcmk__node_attr(this_node, PCMK_NODE_ATTR_STANDBY, NULL, pcmk__rsc_node_current))) { crm_info("%s is in standby mode", pcmk__node_name(this_node)); pcmk__set_node_flags(this_node, pcmk__node_standby); } if (crm_is_true(pcmk__node_attr(this_node, PCMK_NODE_ATTR_MAINTENANCE, NULL, pcmk__rsc_node_current)) || ((rsc != NULL) && !pcmk_is_set(rsc->flags, pcmk__rsc_managed))) { crm_info("%s is in maintenance mode", pcmk__node_name(this_node)); this_node->details->maintenance = TRUE; } discovery = pcmk__node_attr(this_node, PCMK__NODE_ATTR_RESOURCE_DISCOVERY_ENABLED, NULL, pcmk__rsc_node_current); if ((discovery != NULL) && !crm_is_true(discovery)) { pcmk__warn_once(pcmk__wo_rdisc_enabled, "Support for the " PCMK__NODE_ATTR_RESOURCE_DISCOVERY_ENABLED " node attribute is deprecated and will be removed" " (and behave as 'true') in a future release."); if (pcmk__is_remote_node(this_node) && !pcmk_is_set(scheduler->flags, pcmk__sched_fencing_enabled)) { pcmk__config_warn("Ignoring " PCMK__NODE_ATTR_RESOURCE_DISCOVERY_ENABLED " attribute on Pacemaker Remote node %s" " because fencing is disabled", pcmk__node_name(this_node)); } else { /* This is either a remote node with fencing enabled, or a guest * node. We don't care whether fencing is enabled when fencing guest * nodes, because they are "fenced" by recovering their containing * resource. */ crm_info("%s has resource discovery disabled", pcmk__node_name(this_node)); pcmk__clear_node_flags(this_node, pcmk__node_probes_allowed); } } } /*! * \internal * \brief Unpack a cluster node's transient attributes * * \param[in] state CIB node state XML * \param[in,out] node Cluster node whose attributes are being unpacked * \param[in,out] scheduler Scheduler data */ static void unpack_transient_attributes(const xmlNode *state, pcmk_node_t *node, pcmk_scheduler_t *scheduler) { const char *discovery = NULL; const xmlNode *attrs = pcmk__xe_first_child(state, PCMK__XE_TRANSIENT_ATTRIBUTES, NULL, NULL); add_node_attrs(attrs, node, TRUE, scheduler); if (crm_is_true(pcmk__node_attr(node, PCMK_NODE_ATTR_STANDBY, NULL, pcmk__rsc_node_current))) { crm_info("%s is in standby mode", pcmk__node_name(node)); pcmk__set_node_flags(node, pcmk__node_standby); } if (crm_is_true(pcmk__node_attr(node, PCMK_NODE_ATTR_MAINTENANCE, NULL, pcmk__rsc_node_current))) { crm_info("%s is in maintenance mode", pcmk__node_name(node)); node->details->maintenance = TRUE; } discovery = pcmk__node_attr(node, PCMK__NODE_ATTR_RESOURCE_DISCOVERY_ENABLED, NULL, pcmk__rsc_node_current); if ((discovery != NULL) && !crm_is_true(discovery)) { pcmk__config_warn("Ignoring " PCMK__NODE_ATTR_RESOURCE_DISCOVERY_ENABLED " attribute for %s because disabling resource" " discovery is not allowed for cluster nodes", pcmk__node_name(node)); } } /*! * \internal * \brief Unpack a node state entry (first pass) * * Unpack one node state entry from status. This unpacks information from the * \C PCMK__XE_NODE_STATE element itself and node attributes inside it, but not * the resource history inside it. Multiple passes through the status are needed * to fully unpack everything. * * \param[in] state CIB node state XML * \param[in,out] scheduler Scheduler data */ static void unpack_node_state(const xmlNode *state, pcmk_scheduler_t *scheduler) { const char *id = NULL; const char *uname = NULL; pcmk_node_t *this_node = NULL; id = crm_element_value(state, PCMK_XA_ID); if (id == NULL) { pcmk__config_err("Ignoring invalid " PCMK__XE_NODE_STATE " entry without " PCMK_XA_ID); crm_log_xml_info(state, "missing-id"); return; } uname = crm_element_value(state, PCMK_XA_UNAME); if (uname == NULL) { /* If a joining peer makes the cluster acquire the quorum from Corosync * but has not joined the controller CPG membership yet, it's possible * that the created PCMK__XE_NODE_STATE entry doesn't have a * PCMK_XA_UNAME yet. Recognize the node as pending and wait for it to * join CPG. */ crm_trace("Handling " PCMK__XE_NODE_STATE " entry with id=\"%s\" " "without " PCMK_XA_UNAME, id); } this_node = pe_find_node_any(scheduler->nodes, id, uname); if (this_node == NULL) { crm_notice("Ignoring recorded state for removed node with name %s and " PCMK_XA_ID " %s", pcmk__s(uname, "unknown"), id); return; } if (pcmk__is_pacemaker_remote_node(this_node)) { int remote_fenced = 0; /* We can't determine the online status of Pacemaker Remote nodes until * after all resource history has been unpacked. In this first pass, we * do need to mark whether the node has been fenced, as this plays a * role during unpacking cluster node resource state. */ pcmk__scan_min_int(crm_element_value(state, PCMK__XA_NODE_FENCED), &remote_fenced, 0); if (remote_fenced) { pcmk__set_node_flags(this_node, pcmk__node_remote_fenced); } else { pcmk__clear_node_flags(this_node, pcmk__node_remote_fenced); } return; } unpack_transient_attributes(state, this_node, scheduler); /* Provisionally mark this cluster node as clean. We have at least seen it * in the current cluster's lifetime. */ this_node->details->unclean = FALSE; pcmk__set_node_flags(this_node, pcmk__node_seen); crm_trace("Determining online status of cluster node %s (id %s)", pcmk__node_name(this_node), id); determine_online_status(state, this_node, scheduler); if (!pcmk_is_set(scheduler->flags, pcmk__sched_quorate) && this_node->details->online && (scheduler->no_quorum_policy == pcmk_no_quorum_fence)) { /* Everything else should flow from this automatically * (at least until the scheduler becomes able to migrate off * healthy resources) */ pe_fence_node(scheduler, this_node, "cluster does not have quorum", FALSE); } } /*! * \internal * \brief Unpack nodes' resource history as much as possible * * Unpack as many nodes' resource history as possible in one pass through the * status. We need to process Pacemaker Remote nodes' connections/containers * before unpacking their history; the connection/container history will be * in another node's history, so it might take multiple passes to unpack * everything. * * \param[in] status CIB XML status section * \param[in] fence If true, treat any not-yet-unpacked nodes as unseen * \param[in,out] scheduler Scheduler data * * \return Standard Pacemaker return code (specifically pcmk_rc_ok if done, * or EAGAIN if more unpacking remains to be done) */ static int unpack_node_history(const xmlNode *status, bool fence, pcmk_scheduler_t *scheduler) { int rc = pcmk_rc_ok; // Loop through all PCMK__XE_NODE_STATE entries in CIB status for (const xmlNode *state = pcmk__xe_first_child(status, PCMK__XE_NODE_STATE, NULL, NULL); state != NULL; state = pcmk__xe_next(state, PCMK__XE_NODE_STATE)) { const char *id = pcmk__xe_id(state); const char *uname = crm_element_value(state, PCMK_XA_UNAME); pcmk_node_t *this_node = NULL; if ((id == NULL) || (uname == NULL)) { // Warning already logged in first pass through status section crm_trace("Not unpacking resource history from malformed " PCMK__XE_NODE_STATE " without id and/or uname"); continue; } this_node = pe_find_node_any(scheduler->nodes, id, uname); if (this_node == NULL) { // Warning already logged in first pass through status section crm_trace("Not unpacking resource history for node %s because " "no longer in configuration", id); continue; } if (pcmk_is_set(this_node->priv->flags, pcmk__node_unpacked)) { crm_trace("Not unpacking resource history for node %s because " "already unpacked", id); continue; } if (fence) { // We're processing all remaining nodes } else if (pcmk__is_guest_or_bundle_node(this_node)) { /* We can unpack a guest node's history only after we've unpacked * other resource history to the point that we know that the node's * connection and containing resource are both up. */ const pcmk_resource_t *remote = this_node->priv->remote; const pcmk_resource_t *launcher = remote->priv->launcher; if ((remote->priv->orig_role != pcmk_role_started) || (launcher->priv->orig_role != pcmk_role_started)) { crm_trace("Not unpacking resource history for guest node %s " "because launcher and connection are not known to " "be up", id); continue; } } else if (pcmk__is_remote_node(this_node)) { /* We can unpack a remote node's history only after we've unpacked * other resource history to the point that we know that the node's * connection is up, with the exception of when shutdown locks are * in use. */ pcmk_resource_t *rsc = this_node->priv->remote; if ((rsc == NULL) || (!pcmk_is_set(scheduler->flags, pcmk__sched_shutdown_lock) && (rsc->priv->orig_role != pcmk_role_started))) { crm_trace("Not unpacking resource history for remote node %s " "because connection is not known to be up", id); continue; } /* If fencing and shutdown locks are disabled and we're not processing * unseen nodes, then we don't want to unpack offline nodes until online * nodes have been unpacked. This allows us to number active clone * instances first. */ } else if (!pcmk_any_flags_set(scheduler->flags, pcmk__sched_fencing_enabled |pcmk__sched_shutdown_lock) && !this_node->details->online) { crm_trace("Not unpacking resource history for offline " "cluster node %s", id); continue; } if (pcmk__is_pacemaker_remote_node(this_node)) { determine_remote_online_status(scheduler, this_node); unpack_handle_remote_attrs(this_node, state, scheduler); } crm_trace("Unpacking resource history for %snode %s", (fence? "unseen " : ""), id); pcmk__set_node_flags(this_node, pcmk__node_unpacked); unpack_node_lrm(this_node, state, scheduler); rc = EAGAIN; // Other node histories might depend on this one } return rc; } /* remove nodes that are down, stopping */ /* create positive rsc_to_node constraints between resources and the nodes they are running on */ /* anything else? */ gboolean unpack_status(xmlNode *status, pcmk_scheduler_t *scheduler) { xmlNode *state = NULL; crm_trace("Beginning unpack"); if (scheduler->priv->ticket_constraints == NULL) { scheduler->priv->ticket_constraints = pcmk__strkey_table(free, destroy_ticket); } for (state = pcmk__xe_first_child(status, NULL, NULL, NULL); state != NULL; state = pcmk__xe_next(state, NULL)) { if (pcmk__xe_is(state, PCMK_XE_TICKETS)) { pcmk__xe_foreach_child(state, PCMK__XE_TICKET_STATE, unpack_ticket_state, scheduler); } else if (pcmk__xe_is(state, PCMK__XE_NODE_STATE)) { unpack_node_state(state, scheduler); } } while (unpack_node_history(status, FALSE, scheduler) == EAGAIN) { crm_trace("Another pass through node resource histories is needed"); } // Now catch any nodes we didn't see unpack_node_history(status, pcmk_is_set(scheduler->flags, pcmk__sched_fencing_enabled), scheduler); /* Now that we know where resources are, we can schedule stops of containers * with failed bundle connections */ if (scheduler->priv->stop_needed != NULL) { for (GList *item = scheduler->priv->stop_needed; item != NULL; item = item->next) { pcmk_resource_t *container = item->data; pcmk_node_t *node = pcmk__current_node(container); if (node) { stop_action(container, node, FALSE); } } g_list_free(scheduler->priv->stop_needed); scheduler->priv->stop_needed = NULL; } /* Now that we know status of all Pacemaker Remote connections and nodes, * we can stop connections for node shutdowns, and check the online status * of remote/guest nodes that didn't have any node history to unpack. */ for (GList *gIter = scheduler->nodes; gIter != NULL; gIter = gIter->next) { pcmk_node_t *this_node = gIter->data; if (!pcmk__is_pacemaker_remote_node(this_node)) { continue; } if (this_node->details->shutdown && (this_node->priv->remote != NULL)) { pe__set_next_role(this_node->priv->remote, pcmk_role_stopped, "remote shutdown"); } if (!pcmk_is_set(this_node->priv->flags, pcmk__node_unpacked)) { determine_remote_online_status(scheduler, this_node); } } return TRUE; } /*! * \internal * \brief Unpack node's time when it became a member at the cluster layer * * \param[in] node_state Node's \c PCMK__XE_NODE_STATE entry * \param[in,out] scheduler Scheduler data * * \return Epoch time when node became a cluster member * (or scheduler effective time for legacy entries) if a member, * 0 if not a member, or -1 if no valid information available */ static long long unpack_node_member(const xmlNode *node_state, pcmk_scheduler_t *scheduler) { const char *member_time = crm_element_value(node_state, PCMK__XA_IN_CCM); int member = 0; if (member_time == NULL) { return -1LL; } else if (crm_str_to_boolean(member_time, &member) == 1) { /* If in_ccm=0, we'll return 0 here. If in_ccm=1, either the entry was * recorded as a boolean for a DC < 2.1.7, or the node is pending * shutdown and has left the CPG, in which case it was set to 1 to avoid * fencing for PCMK_OPT_NODE_PENDING_TIMEOUT. * * We return the effective time for in_ccm=1 because what's important to * avoid fencing is that effective time minus this value is less than * the pending node timeout. */ return member? (long long) get_effective_time(scheduler) : 0LL; } else { long long when_member = 0LL; if ((pcmk__scan_ll(member_time, &when_member, 0LL) != pcmk_rc_ok) || (when_member < 0LL)) { crm_warn("Unrecognized value '%s' for " PCMK__XA_IN_CCM " in " PCMK__XE_NODE_STATE " entry", member_time); return -1LL; } return when_member; } } /*! * \internal * \brief Unpack node's time when it became online in process group * * \param[in] node_state Node's \c PCMK__XE_NODE_STATE entry * * \return Epoch time when node became online in process group (or 0 if not * online, or 1 for legacy online entries) */ static long long unpack_node_online(const xmlNode *node_state) { const char *peer_time = crm_element_value(node_state, PCMK_XA_CRMD); // @COMPAT Entries recorded for DCs < 2.1.7 have "online" or "offline" if (pcmk__str_eq(peer_time, PCMK_VALUE_OFFLINE, pcmk__str_casei|pcmk__str_null_matches)) { return 0LL; } else if (pcmk__str_eq(peer_time, PCMK_VALUE_ONLINE, pcmk__str_casei)) { return 1LL; } else { long long when_online = 0LL; if ((pcmk__scan_ll(peer_time, &when_online, 0LL) != pcmk_rc_ok) || (when_online < 0)) { crm_warn("Unrecognized value '%s' for " PCMK_XA_CRMD " in " PCMK__XE_NODE_STATE " entry, assuming offline", peer_time); return 0LL; } return when_online; } } /*! * \internal * \brief Unpack node attribute for user-requested fencing * * \param[in] node Node to check * \param[in] node_state Node's \c PCMK__XE_NODE_STATE entry in CIB status * * \return \c true if fencing has been requested for \p node, otherwise \c false */ static bool unpack_node_terminate(const pcmk_node_t *node, const xmlNode *node_state) { long long value = 0LL; int value_i = 0; int rc = pcmk_rc_ok; const char *value_s = pcmk__node_attr(node, PCMK_NODE_ATTR_TERMINATE, NULL, pcmk__rsc_node_current); // Value may be boolean or an epoch time if (crm_str_to_boolean(value_s, &value_i) == 1) { return (value_i != 0); } rc = pcmk__scan_ll(value_s, &value, 0LL); if (rc == pcmk_rc_ok) { return (value > 0); } crm_warn("Ignoring unrecognized value '%s' for " PCMK_NODE_ATTR_TERMINATE "node attribute for %s: %s", value_s, pcmk__node_name(node), pcmk_rc_str(rc)); return false; } static gboolean determine_online_status_no_fencing(pcmk_scheduler_t *scheduler, const xmlNode *node_state, pcmk_node_t *this_node) { gboolean online = FALSE; const char *join = crm_element_value(node_state, PCMK__XA_JOIN); const char *exp_state = crm_element_value(node_state, PCMK_XA_EXPECTED); long long when_member = unpack_node_member(node_state, scheduler); long long when_online = unpack_node_online(node_state); if (when_member <= 0) { crm_trace("Node %s is %sdown", pcmk__node_name(this_node), ((when_member < 0)? "presumed " : "")); } else if (when_online > 0) { if (pcmk__str_eq(join, CRMD_JOINSTATE_MEMBER, pcmk__str_casei)) { online = TRUE; } else { crm_debug("Node %s is not ready to run resources: %s", pcmk__node_name(this_node), join); } } else if (!pcmk_is_set(this_node->priv->flags, pcmk__node_expected_up)) { crm_trace("Node %s controller is down: " "member@%lld online@%lld join=%s expected=%s", pcmk__node_name(this_node), when_member, when_online, pcmk__s(join, ""), pcmk__s(exp_state, "")); } else { /* mark it unclean */ pe_fence_node(scheduler, this_node, "peer is unexpectedly down", FALSE); crm_info("Node %s member@%lld online@%lld join=%s expected=%s", pcmk__node_name(this_node), when_member, when_online, pcmk__s(join, ""), pcmk__s(exp_state, "")); } return online; } /*! * \internal * \brief Check whether a node has taken too long to join controller group * * \param[in,out] scheduler Scheduler data * \param[in] node Node to check * \param[in] when_member Epoch time when node became a cluster member * \param[in] when_online Epoch time when node joined controller group * * \return true if node has been pending (on the way up) longer than * \c PCMK_OPT_NODE_PENDING_TIMEOUT, otherwise false * \note This will also update the cluster's recheck time if appropriate. */ static inline bool pending_too_long(pcmk_scheduler_t *scheduler, const pcmk_node_t *node, long long when_member, long long when_online) { if ((scheduler->priv->node_pending_ms > 0U) && (when_member > 0) && (when_online <= 0)) { // There is a timeout on pending nodes, and node is pending time_t timeout = when_member + pcmk__timeout_ms2s(scheduler->priv->node_pending_ms); if (get_effective_time(node->priv->scheduler) >= timeout) { return true; // Node has timed out } // Node is pending, but still has time pe__update_recheck_time(timeout, scheduler, "pending node timeout"); } return false; } static bool determine_online_status_fencing(pcmk_scheduler_t *scheduler, const xmlNode *node_state, pcmk_node_t *this_node) { bool termination_requested = unpack_node_terminate(this_node, node_state); const char *join = crm_element_value(node_state, PCMK__XA_JOIN); const char *exp_state = crm_element_value(node_state, PCMK_XA_EXPECTED); long long when_member = unpack_node_member(node_state, scheduler); long long when_online = unpack_node_online(node_state); /* - PCMK__XA_JOIN ::= member|down|pending|banned - PCMK_XA_EXPECTED ::= member|down @COMPAT with entries recorded for DCs < 2.1.7 - PCMK__XA_IN_CCM ::= true|false - PCMK_XA_CRMD ::= online|offline Since crm_feature_set 3.18.0 (pacemaker-2.1.7): - PCMK__XA_IN_CCM ::= |0 Since when node has been a cluster member. A value 0 of means the node is not a cluster member. - PCMK_XA_CRMD ::= |0 Since when peer has been online in CPG. A value 0 means the peer is offline in CPG. */ crm_trace("Node %s member@%lld online@%lld join=%s expected=%s%s", pcmk__node_name(this_node), when_member, when_online, pcmk__s(join, ""), pcmk__s(exp_state, ""), (termination_requested? " (termination requested)" : "")); if (this_node->details->shutdown) { crm_debug("%s is shutting down", pcmk__node_name(this_node)); /* Slightly different criteria since we can't shut down a dead peer */ return (when_online > 0); } if (when_member < 0) { pe_fence_node(scheduler, this_node, "peer has not been seen by the cluster", FALSE); return false; } if (pcmk__str_eq(join, CRMD_JOINSTATE_NACK, pcmk__str_none)) { pe_fence_node(scheduler, this_node, "peer failed Pacemaker membership criteria", FALSE); } else if (termination_requested) { if ((when_member <= 0) && (when_online <= 0) && pcmk__str_eq(join, CRMD_JOINSTATE_DOWN, pcmk__str_none)) { crm_info("%s was fenced as requested", pcmk__node_name(this_node)); return false; } pe_fence_node(scheduler, this_node, "fencing was requested", false); } else if (pcmk__str_eq(exp_state, CRMD_JOINSTATE_DOWN, pcmk__str_null_matches)) { if (pending_too_long(scheduler, this_node, when_member, when_online)) { pe_fence_node(scheduler, this_node, "peer pending timed out on joining the process group", FALSE); } else if ((when_member > 0) || (when_online > 0)) { crm_info("- %s is not ready to run resources", pcmk__node_name(this_node)); pcmk__set_node_flags(this_node, pcmk__node_standby); this_node->details->pending = TRUE; } else { crm_trace("%s is down or still coming up", pcmk__node_name(this_node)); } } else if (when_member <= 0) { // Consider PCMK_OPT_PRIORITY_FENCING_DELAY for lost nodes pe_fence_node(scheduler, this_node, "peer is no longer part of the cluster", TRUE); } else if (when_online <= 0) { pe_fence_node(scheduler, this_node, "peer process is no longer available", FALSE); /* Everything is running at this point, now check join state */ } else if (pcmk__str_eq(join, CRMD_JOINSTATE_MEMBER, pcmk__str_none)) { crm_info("%s is active", pcmk__node_name(this_node)); } else if (pcmk__str_any_of(join, CRMD_JOINSTATE_PENDING, CRMD_JOINSTATE_DOWN, NULL)) { crm_info("%s is not ready to run resources", pcmk__node_name(this_node)); pcmk__set_node_flags(this_node, pcmk__node_standby); this_node->details->pending = TRUE; } else { pe_fence_node(scheduler, this_node, "peer was in an unknown state", FALSE); } return (when_member > 0); } static void determine_remote_online_status(pcmk_scheduler_t *scheduler, pcmk_node_t *this_node) { pcmk_resource_t *rsc = this_node->priv->remote; pcmk_resource_t *launcher = NULL; pcmk_node_t *host = NULL; const char *node_type = "Remote"; if (rsc == NULL) { /* This is a leftover node state entry for a former Pacemaker Remote * node whose connection resource was removed. Consider it offline. */ crm_trace("Pacemaker Remote node %s is considered OFFLINE because " "its connection resource has been removed from the CIB", this_node->priv->id); this_node->details->online = FALSE; return; } launcher = rsc->priv->launcher; if (launcher != NULL) { node_type = "Guest"; if (pcmk__list_of_1(rsc->priv->active_nodes)) { host = rsc->priv->active_nodes->data; } } /* If the resource is currently started, mark it online. */ if (rsc->priv->orig_role == pcmk_role_started) { this_node->details->online = TRUE; } /* consider this node shutting down if transitioning start->stop */ if ((rsc->priv->orig_role == pcmk_role_started) && (rsc->priv->next_role == pcmk_role_stopped)) { crm_trace("%s node %s shutting down because connection resource is stopping", node_type, this_node->priv->id); this_node->details->shutdown = TRUE; } /* Now check all the failure conditions. */ if ((launcher != NULL) && pcmk_is_set(launcher->flags, pcmk__rsc_failed)) { crm_trace("Guest node %s UNCLEAN because guest resource failed", this_node->priv->id); this_node->details->online = FALSE; pcmk__set_node_flags(this_node, pcmk__node_remote_reset); } else if (pcmk_is_set(rsc->flags, pcmk__rsc_failed)) { crm_trace("%s node %s OFFLINE because connection resource failed", node_type, this_node->priv->id); this_node->details->online = FALSE; } else if ((rsc->priv->orig_role == pcmk_role_stopped) || ((launcher != NULL) && (launcher->priv->orig_role == pcmk_role_stopped))) { crm_trace("%s node %s OFFLINE because its resource is stopped", node_type, this_node->priv->id); this_node->details->online = FALSE; pcmk__clear_node_flags(this_node, pcmk__node_remote_reset); } else if (host && (host->details->online == FALSE) && host->details->unclean) { crm_trace("Guest node %s UNCLEAN because host is unclean", this_node->priv->id); this_node->details->online = FALSE; pcmk__set_node_flags(this_node, pcmk__node_remote_reset); } else { crm_trace("%s node %s is %s", node_type, this_node->priv->id, this_node->details->online? "ONLINE" : "OFFLINE"); } } static void determine_online_status(const xmlNode *node_state, pcmk_node_t *this_node, pcmk_scheduler_t *scheduler) { gboolean online = FALSE; const char *exp_state = crm_element_value(node_state, PCMK_XA_EXPECTED); CRM_CHECK(this_node != NULL, return); this_node->details->shutdown = FALSE; if (pe__shutdown_requested(this_node)) { this_node->details->shutdown = TRUE; } else if (pcmk__str_eq(exp_state, CRMD_JOINSTATE_MEMBER, pcmk__str_casei)) { pcmk__set_node_flags(this_node, pcmk__node_expected_up); } if (!pcmk_is_set(scheduler->flags, pcmk__sched_fencing_enabled)) { online = determine_online_status_no_fencing(scheduler, node_state, this_node); } else { online = determine_online_status_fencing(scheduler, node_state, this_node); } if (online) { this_node->details->online = TRUE; } else { /* remove node from contention */ this_node->assign->score = -PCMK_SCORE_INFINITY; } if (online && this_node->details->shutdown) { /* don't run resources here */ this_node->assign->score = -PCMK_SCORE_INFINITY; } if (this_node->details->unclean) { pcmk__sched_warn(scheduler, "%s is unclean", pcmk__node_name(this_node)); } else if (!this_node->details->online) { crm_trace("%s is offline", pcmk__node_name(this_node)); } else if (this_node->details->shutdown) { crm_info("%s is shutting down", pcmk__node_name(this_node)); } else if (this_node->details->pending) { crm_info("%s is pending", pcmk__node_name(this_node)); } else if (pcmk_is_set(this_node->priv->flags, pcmk__node_standby)) { crm_info("%s is in standby", pcmk__node_name(this_node)); } else if (this_node->details->maintenance) { crm_info("%s is in maintenance", pcmk__node_name(this_node)); } else { crm_info("%s is online", pcmk__node_name(this_node)); } } /*! * \internal * \brief Find the end of a resource's name, excluding any clone suffix * * \param[in] id Resource ID to check * * \return Pointer to last character of resource's base name */ const char * pe_base_name_end(const char *id) { if (!pcmk__str_empty(id)) { const char *end = id + strlen(id) - 1; for (const char *s = end; s > id; --s) { switch (*s) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': break; case ':': return (s == end)? s : (s - 1); default: return end; } } return end; } return NULL; } /*! * \internal * \brief Get a resource name excluding any clone suffix * * \param[in] last_rsc_id Resource ID to check * * \return Pointer to newly allocated string with resource's base name * \note It is the caller's responsibility to free() the result. * This asserts on error, so callers can assume result is not NULL. */ char * clone_strip(const char *last_rsc_id) { const char *end = pe_base_name_end(last_rsc_id); char *basename = NULL; pcmk__assert(end != NULL); basename = strndup(last_rsc_id, end - last_rsc_id + 1); pcmk__assert(basename != NULL); return basename; } /*! * \internal * \brief Get the name of the first instance of a cloned resource * * \param[in] last_rsc_id Resource ID to check * * \return Pointer to newly allocated string with resource's base name plus :0 * \note It is the caller's responsibility to free() the result. * This asserts on error, so callers can assume result is not NULL. */ char * clone_zero(const char *last_rsc_id) { const char *end = pe_base_name_end(last_rsc_id); size_t base_name_len = end - last_rsc_id + 1; char *zero = NULL; pcmk__assert(end != NULL); zero = pcmk__assert_alloc(base_name_len + 3, sizeof(char)); memcpy(zero, last_rsc_id, base_name_len); zero[base_name_len] = ':'; zero[base_name_len + 1] = '0'; return zero; } static pcmk_resource_t * create_fake_resource(const char *rsc_id, const xmlNode *rsc_entry, pcmk_scheduler_t *scheduler) { pcmk_resource_t *rsc = NULL; xmlNode *xml_rsc = pcmk__xe_create(NULL, PCMK_XE_PRIMITIVE); pcmk__xe_copy_attrs(xml_rsc, rsc_entry, pcmk__xaf_none); crm_xml_add(xml_rsc, PCMK_XA_ID, rsc_id); crm_log_xml_debug(xml_rsc, "Orphan resource"); if (pe__unpack_resource(xml_rsc, &rsc, NULL, scheduler) != pcmk_rc_ok) { return NULL; } if (xml_contains_remote_node(xml_rsc)) { pcmk_node_t *node; crm_debug("Detected orphaned remote node %s", rsc_id); node = pcmk_find_node(scheduler, rsc_id); if (node == NULL) { node = pe_create_node(rsc_id, rsc_id, PCMK_VALUE_REMOTE, 0, scheduler); } link_rsc2remotenode(scheduler, rsc); if (node) { crm_trace("Setting node %s as shutting down due to orphaned connection resource", rsc_id); node->details->shutdown = TRUE; } } if (crm_element_value(rsc_entry, PCMK__META_CONTAINER)) { // This removed resource needs to be mapped to a launcher crm_trace("Launched resource %s was removed from the configuration", rsc_id); pcmk__set_rsc_flags(rsc, pcmk__rsc_removed_launched); } pcmk__set_rsc_flags(rsc, pcmk__rsc_removed); scheduler->priv->resources = g_list_append(scheduler->priv->resources, rsc); return rsc; } /*! * \internal * \brief Create orphan instance for anonymous clone resource history * * \param[in,out] parent Clone resource that orphan will be added to * \param[in] rsc_id Orphan's resource ID * \param[in] node Where orphan is active (for logging only) * \param[in,out] scheduler Scheduler data * * \return Newly added orphaned instance of \p parent */ static pcmk_resource_t * create_anonymous_orphan(pcmk_resource_t *parent, const char *rsc_id, const pcmk_node_t *node, pcmk_scheduler_t *scheduler) { pcmk_resource_t *top = pe__create_clone_child(parent, scheduler); pcmk_resource_t *orphan = NULL; // find_rsc() because we might be a cloned group orphan = top->priv->fns->find_rsc(top, rsc_id, NULL, pcmk_rsc_match_clone_only); pcmk__rsc_debug(parent, "Created orphan %s for %s: %s on %s", top->id, parent->id, rsc_id, pcmk__node_name(node)); return orphan; } /*! * \internal * \brief Check a node for an instance of an anonymous clone * * Return a child instance of the specified anonymous clone, in order of * preference: (1) the instance running on the specified node, if any; * (2) an inactive instance (i.e. within the total of \c PCMK_META_CLONE_MAX * instances); (3) a newly created orphan (that is, \c PCMK_META_CLONE_MAX * instances are already active). * * \param[in,out] scheduler Scheduler data * \param[in] node Node on which to check for instance * \param[in,out] parent Clone to check * \param[in] rsc_id Name of cloned resource in history (no instance) */ static pcmk_resource_t * find_anonymous_clone(pcmk_scheduler_t *scheduler, const pcmk_node_t *node, pcmk_resource_t *parent, const char *rsc_id) { GList *rIter = NULL; pcmk_resource_t *rsc = NULL; pcmk_resource_t *inactive_instance = NULL; gboolean skip_inactive = FALSE; pcmk__assert(pcmk__is_anonymous_clone(parent)); // Check for active (or partially active, for cloned groups) instance pcmk__rsc_trace(parent, "Looking for %s on %s in %s", rsc_id, pcmk__node_name(node), parent->id); for (rIter = parent->priv->children; (rIter != NULL) && (rsc == NULL); rIter = rIter->next) { GList *locations = NULL; pcmk_resource_t *child = rIter->data; /* Check whether this instance is already known to be active or pending * anywhere, at this stage of unpacking. Because this function is called * for a resource before the resource's individual operation history * entries are unpacked, locations will generally not contain the * desired node. * * However, there are three exceptions: * (1) when child is a cloned group and we have already unpacked the * history of another member of the group on the same node; * (2) when we've already unpacked the history of another numbered * instance on the same node (which can happen if * PCMK_META_GLOBALLY_UNIQUE was flipped from true to false); and * (3) when we re-run calculations on the same scheduler data as part of * a simulation. */ child->priv->fns->location(child, &locations, pcmk__rsc_node_current |pcmk__rsc_node_pending); if (locations) { /* We should never associate the same numbered anonymous clone * instance with multiple nodes, and clone instances can't migrate, * so there must be only one location, regardless of history. */ CRM_LOG_ASSERT(locations->next == NULL); if (pcmk__same_node((pcmk_node_t *) locations->data, node)) { /* This child instance is active on the requested node, so check * for a corresponding configured resource. We use find_rsc() * instead of child because child may be a cloned group, and we * need the particular member corresponding to rsc_id. * * If the history entry is orphaned, rsc will be NULL. */ rsc = parent->priv->fns->find_rsc(child, rsc_id, NULL, pcmk_rsc_match_clone_only); if (rsc) { /* If there are multiple instance history entries for an * anonymous clone in a single node's history (which can * happen if PCMK_META_GLOBALLY_UNIQUE is switched from true * to false), we want to consider the instances beyond the * first as orphans, even if there are inactive instance * numbers available. */ if (rsc->priv->active_nodes != NULL) { crm_notice("Active (now-)anonymous clone %s has " "multiple (orphan) instance histories on %s", parent->id, pcmk__node_name(node)); skip_inactive = TRUE; rsc = NULL; } else { pcmk__rsc_trace(parent, "Resource %s, active", rsc->id); } } } g_list_free(locations); } else { pcmk__rsc_trace(parent, "Resource %s, skip inactive", child->id); if (!skip_inactive && !inactive_instance && !pcmk_is_set(child->flags, pcmk__rsc_blocked)) { // Remember one inactive instance in case we don't find active inactive_instance = parent->priv->fns->find_rsc(child, rsc_id, NULL, pcmk_rsc_match_clone_only); /* ... but don't use it if it was already associated with a * pending action on another node */ if (inactive_instance != NULL) { const pcmk_node_t *pending_node = NULL; pending_node = inactive_instance->priv->pending_node; if ((pending_node != NULL) && !pcmk__same_node(pending_node, node)) { inactive_instance = NULL; } } } } } if ((rsc == NULL) && !skip_inactive && (inactive_instance != NULL)) { pcmk__rsc_trace(parent, "Resource %s, empty slot", inactive_instance->id); rsc = inactive_instance; } /* If the resource has PCMK_META_REQUIRES set to PCMK_VALUE_QUORUM or * PCMK_VALUE_NOTHING, and we don't have a clone instance for every node, we * don't want to consume a valid instance number for unclean nodes. Such * instances may appear to be active according to the history, but should be * considered inactive, so we can start an instance elsewhere. Treat such * instances as orphans. * * An exception is instances running on guest nodes -- since guest node * "fencing" is actually just a resource stop, requires shouldn't apply. * * @TODO Ideally, we'd use an inactive instance number if it is not needed * for any clean instances. However, we don't know that at this point. */ if ((rsc != NULL) && !pcmk_is_set(rsc->flags, pcmk__rsc_needs_fencing) && (!node->details->online || node->details->unclean) && !pcmk__is_guest_or_bundle_node(node) && !pe__is_universal_clone(parent, scheduler)) { rsc = NULL; } if (rsc == NULL) { rsc = create_anonymous_orphan(parent, rsc_id, node, scheduler); pcmk__rsc_trace(parent, "Resource %s, orphan", rsc->id); } return rsc; } static pcmk_resource_t * unpack_find_resource(pcmk_scheduler_t *scheduler, const pcmk_node_t *node, const char *rsc_id) { pcmk_resource_t *rsc = NULL; pcmk_resource_t *parent = NULL; crm_trace("looking for %s", rsc_id); rsc = pe_find_resource(scheduler->priv->resources, rsc_id); if (rsc == NULL) { /* If we didn't find the resource by its name in the operation history, * check it again as a clone instance. Even when PCMK_META_CLONE_MAX=0, * we create a single :0 orphan to match against here. */ char *clone0_id = clone_zero(rsc_id); pcmk_resource_t *clone0 = pe_find_resource(scheduler->priv->resources, clone0_id); if (clone0 && !pcmk_is_set(clone0->flags, pcmk__rsc_unique)) { rsc = clone0; parent = uber_parent(clone0); crm_trace("%s found as %s (%s)", rsc_id, clone0_id, parent->id); } else { crm_trace("%s is not known as %s either (orphan)", rsc_id, clone0_id); } free(clone0_id); } else if (rsc->priv->variant > pcmk__rsc_variant_primitive) { crm_trace("Resource history for %s is orphaned " "because it is no longer primitive", rsc_id); return NULL; } else { parent = uber_parent(rsc); } if (pcmk__is_anonymous_clone(parent)) { if (pcmk__is_bundled(parent)) { rsc = pe__find_bundle_replica(parent->priv->parent, node); } else { char *base = clone_strip(rsc_id); rsc = find_anonymous_clone(scheduler, node, parent, base); free(base); pcmk__assert(rsc != NULL); } } if (rsc && !pcmk__str_eq(rsc_id, rsc->id, pcmk__str_none) && !pcmk__str_eq(rsc_id, rsc->priv->history_id, pcmk__str_none)) { pcmk__str_update(&(rsc->priv->history_id), rsc_id); pcmk__rsc_debug(rsc, "Internally renamed %s on %s to %s%s", rsc_id, pcmk__node_name(node), rsc->id, pcmk_is_set(rsc->flags, pcmk__rsc_removed)? " (ORPHAN)" : ""); } return rsc; } static pcmk_resource_t * process_orphan_resource(const xmlNode *rsc_entry, const pcmk_node_t *node, pcmk_scheduler_t *scheduler) { pcmk_resource_t *rsc = NULL; const char *rsc_id = crm_element_value(rsc_entry, PCMK_XA_ID); crm_debug("Detected orphan resource %s on %s", rsc_id, pcmk__node_name(node)); rsc = create_fake_resource(rsc_id, rsc_entry, scheduler); if (rsc == NULL) { return NULL; } if (!pcmk_is_set(scheduler->flags, pcmk__sched_stop_removed_resources)) { pcmk__clear_rsc_flags(rsc, pcmk__rsc_managed); } else { CRM_CHECK(rsc != NULL, return NULL); pcmk__rsc_trace(rsc, "Added orphan %s", rsc->id); resource_location(rsc, NULL, -PCMK_SCORE_INFINITY, "__orphan_do_not_run__", scheduler); } return rsc; } static void process_rsc_state(pcmk_resource_t *rsc, pcmk_node_t *node, enum pcmk__on_fail on_fail) { pcmk_node_t *tmpnode = NULL; char *reason = NULL; enum pcmk__on_fail save_on_fail = pcmk__on_fail_ignore; pcmk_scheduler_t *scheduler = NULL; bool known_active = false; pcmk__assert(rsc != NULL); scheduler = rsc->priv->scheduler; known_active = (rsc->priv->orig_role > pcmk_role_stopped); pcmk__rsc_trace(rsc, "Resource %s is %s on %s: on_fail=%s", rsc->id, pcmk_role_text(rsc->priv->orig_role), pcmk__node_name(node), pcmk__on_fail_text(on_fail)); /* process current state */ if (rsc->priv->orig_role != pcmk_role_unknown) { pcmk_resource_t *iter = rsc; while (iter) { if (g_hash_table_lookup(iter->priv->probed_nodes, node->priv->id) == NULL) { pcmk_node_t *n = pe__copy_node(node); pcmk__rsc_trace(rsc, "%s (%s in history) known on %s", rsc->id, pcmk__s(rsc->priv->history_id, "the same"), pcmk__node_name(n)); g_hash_table_insert(iter->priv->probed_nodes, (gpointer) n->priv->id, n); } if (pcmk_is_set(iter->flags, pcmk__rsc_unique)) { break; } iter = iter->priv->parent; } } /* If a managed resource is believed to be running, but node is down ... */ if (known_active && !node->details->online && !node->details->maintenance && pcmk_is_set(rsc->flags, pcmk__rsc_managed)) { gboolean should_fence = FALSE; /* If this is a guest node, fence it (regardless of whether fencing is * enabled, because guest node fencing is done by recovery of the * container resource rather than by the fencer). Mark the resource * we're processing as failed. When the guest comes back up, its * operation history in the CIB will be cleared, freeing the affected * resource to run again once we are sure we know its state. */ if (pcmk__is_guest_or_bundle_node(node)) { pcmk__set_rsc_flags(rsc, pcmk__rsc_failed|pcmk__rsc_stop_if_failed); should_fence = TRUE; } else if (pcmk_is_set(scheduler->flags, pcmk__sched_fencing_enabled)) { if (pcmk__is_remote_node(node) && (node->priv->remote != NULL) && !pcmk_is_set(node->priv->remote->flags, pcmk__rsc_failed)) { /* Setting unseen means that fencing of the remote node will * occur only if the connection resource is not going to start * somewhere. This allows connection resources on a failed * cluster node to move to another node without requiring the * remote nodes to be fenced as well. */ pcmk__clear_node_flags(node, pcmk__node_seen); reason = crm_strdup_printf("%s is active there (fencing will be" " revoked if remote connection can " "be re-established elsewhere)", rsc->id); } should_fence = TRUE; } if (should_fence) { if (reason == NULL) { reason = crm_strdup_printf("%s is thought to be active there", rsc->id); } pe_fence_node(scheduler, node, reason, FALSE); } free(reason); } /* In order to calculate priority_fencing_delay correctly, save the failure information and pass it to native_add_running(). */ save_on_fail = on_fail; if (node->details->unclean) { /* No extra processing needed * Also allows resources to be started again after a node is shot */ on_fail = pcmk__on_fail_ignore; } switch (on_fail) { case pcmk__on_fail_ignore: /* nothing to do */ break; case pcmk__on_fail_demote: pcmk__set_rsc_flags(rsc, pcmk__rsc_failed); demote_action(rsc, node, FALSE); break; case pcmk__on_fail_fence_node: /* treat it as if it is still running * but also mark the node as unclean */ reason = crm_strdup_printf("%s failed there", rsc->id); pe_fence_node(scheduler, node, reason, FALSE); free(reason); break; case pcmk__on_fail_standby_node: pcmk__set_node_flags(node, pcmk__node_standby|pcmk__node_fail_standby); break; case pcmk__on_fail_block: /* is_managed == FALSE will prevent any * actions being sent for the resource */ pcmk__clear_rsc_flags(rsc, pcmk__rsc_managed); pcmk__set_rsc_flags(rsc, pcmk__rsc_blocked); break; case pcmk__on_fail_ban: /* make sure it comes up somewhere else * or not at all */ resource_location(rsc, node, -PCMK_SCORE_INFINITY, "__action_migration_auto__", scheduler); break; case pcmk__on_fail_stop: pe__set_next_role(rsc, pcmk_role_stopped, PCMK_META_ON_FAIL "=" PCMK_VALUE_STOP); break; case pcmk__on_fail_restart: if (known_active) { pcmk__set_rsc_flags(rsc, pcmk__rsc_failed|pcmk__rsc_stop_if_failed); stop_action(rsc, node, FALSE); } break; case pcmk__on_fail_restart_container: pcmk__set_rsc_flags(rsc, pcmk__rsc_failed|pcmk__rsc_stop_if_failed); if ((rsc->priv->launcher != NULL) && pcmk__is_bundled(rsc)) { /* A bundle's remote connection can run on a different node than * the bundle's container. We don't necessarily know where the * container is running yet, so remember it and add a stop * action for it later. */ scheduler->priv->stop_needed = g_list_prepend(scheduler->priv->stop_needed, rsc->priv->launcher); } else if (rsc->priv->launcher != NULL) { stop_action(rsc->priv->launcher, node, FALSE); } else if (known_active) { stop_action(rsc, node, FALSE); } break; case pcmk__on_fail_reset_remote: pcmk__set_rsc_flags(rsc, pcmk__rsc_failed|pcmk__rsc_stop_if_failed); if (pcmk_is_set(scheduler->flags, pcmk__sched_fencing_enabled)) { tmpnode = NULL; if (pcmk_is_set(rsc->flags, pcmk__rsc_is_remote_connection)) { tmpnode = pcmk_find_node(scheduler, rsc->id); } if (pcmk__is_remote_node(tmpnode) && !pcmk_is_set(tmpnode->priv->flags, pcmk__node_remote_fenced)) { /* The remote connection resource failed in a way that * should result in fencing the remote node. */ pe_fence_node(scheduler, tmpnode, "remote connection is unrecoverable", FALSE); } } /* require the stop action regardless if fencing is occurring or not. */ if (known_active) { stop_action(rsc, node, FALSE); } /* if reconnect delay is in use, prevent the connection from exiting the * "STOPPED" role until the failure is cleared by the delay timeout. */ if (rsc->priv->remote_reconnect_ms > 0U) { pe__set_next_role(rsc, pcmk_role_stopped, "remote reset"); } break; } /* Ensure a remote connection failure forces an unclean Pacemaker Remote * node to be fenced. By marking the node as seen, the failure will result * in a fencing operation regardless if we're going to attempt to reconnect * in this transition. */ if (pcmk_all_flags_set(rsc->flags, pcmk__rsc_failed|pcmk__rsc_is_remote_connection)) { tmpnode = pcmk_find_node(scheduler, rsc->id); if (tmpnode && tmpnode->details->unclean) { pcmk__set_node_flags(tmpnode, pcmk__node_seen); } } if (known_active) { if (pcmk_is_set(rsc->flags, pcmk__rsc_removed)) { if (pcmk_is_set(rsc->flags, pcmk__rsc_managed)) { crm_notice("Removed resource %s is active on %s and will be " "stopped when possible", rsc->id, pcmk__node_name(node)); } else { crm_notice("Removed resource %s must be stopped manually on %s " "because " PCMK_OPT_STOP_ORPHAN_RESOURCES " is set to false", rsc->id, pcmk__node_name(node)); } } native_add_running(rsc, node, scheduler, (save_on_fail != pcmk__on_fail_ignore)); switch (on_fail) { case pcmk__on_fail_ignore: break; case pcmk__on_fail_demote: case pcmk__on_fail_block: pcmk__set_rsc_flags(rsc, pcmk__rsc_failed); break; default: pcmk__set_rsc_flags(rsc, pcmk__rsc_failed|pcmk__rsc_stop_if_failed); break; } } else if ((rsc->priv->history_id != NULL) && (strchr(rsc->priv->history_id, ':') != NULL)) { /* Only do this for older status sections that included instance numbers * Otherwise stopped instances will appear as orphans */ pcmk__rsc_trace(rsc, "Clearing history ID %s for %s (stopped)", rsc->priv->history_id, rsc->id); free(rsc->priv->history_id); rsc->priv->history_id = NULL; } else { GList *possible_matches = pe__resource_actions(rsc, node, PCMK_ACTION_STOP, FALSE); GList *gIter = possible_matches; for (; gIter != NULL; gIter = gIter->next) { pcmk_action_t *stop = (pcmk_action_t *) gIter->data; pcmk__set_action_flags(stop, pcmk__action_optional); } g_list_free(possible_matches); } /* A successful stop after migrate_to on the migration source doesn't make * the partially migrated resource stopped on the migration target. */ if ((rsc->priv->orig_role == pcmk_role_stopped) && (rsc->priv->active_nodes != NULL) && (rsc->priv->partial_migration_target != NULL) && pcmk__same_node(rsc->priv->partial_migration_source, node)) { rsc->priv->orig_role = pcmk_role_started; } } /* create active recurring operations as optional */ static void process_recurring(pcmk_node_t *node, pcmk_resource_t *rsc, int start_index, int stop_index, GList *sorted_op_list, pcmk_scheduler_t *scheduler) { int counter = -1; const char *task = NULL; const char *status = NULL; GList *gIter = sorted_op_list; pcmk__assert(rsc != NULL); pcmk__rsc_trace(rsc, "%s: Start index %d, stop index = %d", rsc->id, start_index, stop_index); for (; gIter != NULL; gIter = gIter->next) { xmlNode *rsc_op = (xmlNode *) gIter->data; guint interval_ms = 0; char *key = NULL; const char *id = pcmk__xe_id(rsc_op); counter++; if (node->details->online == FALSE) { pcmk__rsc_trace(rsc, "Skipping %s on %s: node is offline", rsc->id, pcmk__node_name(node)); break; /* Need to check if there's a monitor for role="Stopped" */ } else if (start_index < stop_index && counter <= stop_index) { pcmk__rsc_trace(rsc, "Skipping %s on %s: resource is not active", id, pcmk__node_name(node)); continue; } else if (counter < start_index) { pcmk__rsc_trace(rsc, "Skipping %s on %s: old %d", id, pcmk__node_name(node), counter); continue; } crm_element_value_ms(rsc_op, PCMK_META_INTERVAL, &interval_ms); if (interval_ms == 0) { pcmk__rsc_trace(rsc, "Skipping %s on %s: non-recurring", id, pcmk__node_name(node)); continue; } status = crm_element_value(rsc_op, PCMK__XA_OP_STATUS); if (pcmk__str_eq(status, "-1", pcmk__str_casei)) { pcmk__rsc_trace(rsc, "Skipping %s on %s: status", id, pcmk__node_name(node)); continue; } task = crm_element_value(rsc_op, PCMK_XA_OPERATION); /* create the action */ key = pcmk__op_key(rsc->id, task, interval_ms); pcmk__rsc_trace(rsc, "Creating %s on %s", key, pcmk__node_name(node)); custom_action(rsc, key, task, node, TRUE, scheduler); } } void calculate_active_ops(const GList *sorted_op_list, int *start_index, int *stop_index) { int counter = -1; int implied_monitor_start = -1; int implied_clone_start = -1; const char *task = NULL; const char *status = NULL; *stop_index = -1; *start_index = -1; for (const GList *iter = sorted_op_list; iter != NULL; iter = iter->next) { const xmlNode *rsc_op = (const xmlNode *) iter->data; counter++; task = crm_element_value(rsc_op, PCMK_XA_OPERATION); status = crm_element_value(rsc_op, PCMK__XA_OP_STATUS); if (pcmk__str_eq(task, PCMK_ACTION_STOP, pcmk__str_casei) && pcmk__str_eq(status, "0", pcmk__str_casei)) { *stop_index = counter; } else if (pcmk__strcase_any_of(task, PCMK_ACTION_START, PCMK_ACTION_MIGRATE_FROM, NULL)) { *start_index = counter; } else if ((implied_monitor_start <= *stop_index) && pcmk__str_eq(task, PCMK_ACTION_MONITOR, pcmk__str_casei)) { const char *rc = crm_element_value(rsc_op, PCMK__XA_RC_CODE); if (pcmk__strcase_any_of(rc, "0", "8", NULL)) { implied_monitor_start = counter; } } else if (pcmk__strcase_any_of(task, PCMK_ACTION_PROMOTE, PCMK_ACTION_DEMOTE, NULL)) { implied_clone_start = counter; } } if (*start_index == -1) { if (implied_clone_start != -1) { *start_index = implied_clone_start; } else if (implied_monitor_start != -1) { *start_index = implied_monitor_start; } } } // If resource history entry has shutdown lock, remember lock node and time static void unpack_shutdown_lock(const xmlNode *rsc_entry, pcmk_resource_t *rsc, const pcmk_node_t *node, pcmk_scheduler_t *scheduler) { time_t lock_time = 0; // When lock started (i.e. node shutdown time) if ((crm_element_value_epoch(rsc_entry, PCMK_OPT_SHUTDOWN_LOCK, &lock_time) == pcmk_ok) && (lock_time != 0)) { if ((scheduler->priv->shutdown_lock_ms > 0U) && (get_effective_time(scheduler) > (lock_time + pcmk__timeout_ms2s(scheduler->priv->shutdown_lock_ms)))) { pcmk__rsc_info(rsc, "Shutdown lock for %s on %s expired", rsc->id, pcmk__node_name(node)); pe__clear_resource_history(rsc, node); } else { rsc->priv->lock_node = node; rsc->priv->lock_time = lock_time; } } } /*! * \internal * \brief Unpack one \c PCMK__XE_LRM_RESOURCE entry from a node's CIB status * * \param[in,out] node Node whose status is being unpacked * \param[in] rsc_entry \c PCMK__XE_LRM_RESOURCE XML being unpacked * \param[in,out] scheduler Scheduler data * * \return Resource corresponding to the entry, or NULL if no operation history */ static pcmk_resource_t * unpack_lrm_resource(pcmk_node_t *node, const xmlNode *lrm_resource, pcmk_scheduler_t *scheduler) { GList *gIter = NULL; int stop_index = -1; int start_index = -1; enum rsc_role_e req_role = pcmk_role_unknown; const char *rsc_id = pcmk__xe_id(lrm_resource); pcmk_resource_t *rsc = NULL; GList *op_list = NULL; GList *sorted_op_list = NULL; xmlNode *rsc_op = NULL; xmlNode *last_failure = NULL; enum pcmk__on_fail on_fail = pcmk__on_fail_ignore; enum rsc_role_e saved_role = pcmk_role_unknown; if (rsc_id == NULL) { pcmk__config_err("Ignoring invalid " PCMK__XE_LRM_RESOURCE " entry: No " PCMK_XA_ID); crm_log_xml_info(lrm_resource, "missing-id"); return NULL; } crm_trace("Unpacking " PCMK__XE_LRM_RESOURCE " for %s on %s", rsc_id, pcmk__node_name(node)); /* Build a list of individual PCMK__XE_LRM_RSC_OP entries, so we can sort * them */ for (rsc_op = pcmk__xe_first_child(lrm_resource, PCMK__XE_LRM_RSC_OP, NULL, NULL); rsc_op != NULL; rsc_op = pcmk__xe_next(rsc_op, PCMK__XE_LRM_RSC_OP)) { op_list = g_list_prepend(op_list, rsc_op); } if (!pcmk_is_set(scheduler->flags, pcmk__sched_shutdown_lock)) { if (op_list == NULL) { // If there are no operations, there is nothing to do return NULL; } } /* find the resource */ rsc = unpack_find_resource(scheduler, node, rsc_id); if (rsc == NULL) { if (op_list == NULL) { // If there are no operations, there is nothing to do return NULL; } else { rsc = process_orphan_resource(lrm_resource, node, scheduler); } } pcmk__assert(rsc != NULL); // Check whether the resource is "shutdown-locked" to this node if (pcmk_is_set(scheduler->flags, pcmk__sched_shutdown_lock)) { unpack_shutdown_lock(lrm_resource, rsc, node, scheduler); } /* process operations */ saved_role = rsc->priv->orig_role; rsc->priv->orig_role = pcmk_role_unknown; sorted_op_list = g_list_sort(op_list, sort_op_by_callid); for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) { xmlNode *rsc_op = (xmlNode *) gIter->data; unpack_rsc_op(rsc, node, rsc_op, &last_failure, &on_fail); } /* create active recurring operations as optional */ calculate_active_ops(sorted_op_list, &start_index, &stop_index); process_recurring(node, rsc, start_index, stop_index, sorted_op_list, scheduler); /* no need to free the contents */ g_list_free(sorted_op_list); process_rsc_state(rsc, node, on_fail); if (get_target_role(rsc, &req_role)) { if ((rsc->priv->next_role == pcmk_role_unknown) || (req_role < rsc->priv->next_role)) { pe__set_next_role(rsc, req_role, PCMK_META_TARGET_ROLE); } else if (req_role > rsc->priv->next_role) { pcmk__rsc_info(rsc, "%s: Not overwriting calculated next role %s" " with requested next role %s", rsc->id, pcmk_role_text(rsc->priv->next_role), pcmk_role_text(req_role)); } } if (saved_role > rsc->priv->orig_role) { rsc->priv->orig_role = saved_role; } return rsc; } static void handle_removed_launched_resources(const xmlNode *lrm_rsc_list, pcmk_scheduler_t *scheduler) { for (const xmlNode *rsc_entry = pcmk__xe_first_child(lrm_rsc_list, PCMK__XE_LRM_RESOURCE, NULL, NULL); rsc_entry != NULL; rsc_entry = pcmk__xe_next(rsc_entry, PCMK__XE_LRM_RESOURCE)) { pcmk_resource_t *rsc; pcmk_resource_t *launcher = NULL; const char *rsc_id; const char *launcher_id = NULL; launcher_id = crm_element_value(rsc_entry, PCMK__META_CONTAINER); rsc_id = crm_element_value(rsc_entry, PCMK_XA_ID); if ((launcher_id == NULL) || (rsc_id == NULL)) { continue; } launcher = pe_find_resource(scheduler->priv->resources, launcher_id); if (launcher == NULL) { continue; } rsc = pe_find_resource(scheduler->priv->resources, rsc_id); if ((rsc == NULL) || (rsc->priv->launcher != NULL) || !pcmk_is_set(rsc->flags, pcmk__rsc_removed_launched)) { continue; } pcmk__rsc_trace(rsc, "Mapped launcher of removed resource %s to %s", rsc->id, launcher_id); rsc->priv->launcher = launcher; launcher->priv->launched = g_list_append(launcher->priv->launched, rsc); } } /*! * \internal * \brief Unpack one node's lrm status section * * \param[in,out] node Node whose status is being unpacked * \param[in] xml CIB node state XML * \param[in,out] scheduler Scheduler data */ static void unpack_node_lrm(pcmk_node_t *node, const xmlNode *xml, pcmk_scheduler_t *scheduler) { bool found_removed_launched_resource = false; // Drill down to PCMK__XE_LRM_RESOURCES section xml = pcmk__xe_first_child(xml, PCMK__XE_LRM, NULL, NULL); if (xml == NULL) { return; } xml = pcmk__xe_first_child(xml, PCMK__XE_LRM_RESOURCES, NULL, NULL); if (xml == NULL) { return; } // Unpack each PCMK__XE_LRM_RESOURCE entry for (const xmlNode *rsc_entry = pcmk__xe_first_child(xml, PCMK__XE_LRM_RESOURCE, NULL, NULL); rsc_entry != NULL; rsc_entry = pcmk__xe_next(rsc_entry, PCMK__XE_LRM_RESOURCE)) { pcmk_resource_t *rsc = unpack_lrm_resource(node, rsc_entry, scheduler); if ((rsc != NULL) && pcmk_is_set(rsc->flags, pcmk__rsc_removed_launched)) { found_removed_launched_resource = true; } } /* Now that all resource state has been unpacked for this node, map any * removed launched resources to their launchers. */ if (found_removed_launched_resource) { handle_removed_launched_resources(xml, scheduler); } } static void set_active(pcmk_resource_t *rsc) { const pcmk_resource_t *top = pe__const_top_resource(rsc, false); if (top && pcmk_is_set(top->flags, pcmk__rsc_promotable)) { rsc->priv->orig_role = pcmk_role_unpromoted; } else { rsc->priv->orig_role = pcmk_role_started; } } static void set_node_score(gpointer key, gpointer value, gpointer user_data) { pcmk_node_t *node = value; int *score = user_data; node->assign->score = *score; } #define XPATH_NODE_STATE "/" PCMK_XE_CIB "/" PCMK_XE_STATUS \ "/" PCMK__XE_NODE_STATE #define SUB_XPATH_LRM_RESOURCE "/" PCMK__XE_LRM \ "/" PCMK__XE_LRM_RESOURCES \ "/" PCMK__XE_LRM_RESOURCE #define SUB_XPATH_LRM_RSC_OP "/" PCMK__XE_LRM_RSC_OP static xmlNode * find_lrm_op(const char *resource, const char *op, const char *node, const char *source, int target_rc, pcmk_scheduler_t *scheduler) { GString *xpath = NULL; xmlNode *xml = NULL; CRM_CHECK((resource != NULL) && (op != NULL) && (node != NULL), return NULL); xpath = g_string_sized_new(256); pcmk__g_strcat(xpath, XPATH_NODE_STATE "[@" PCMK_XA_UNAME "='", node, "']" SUB_XPATH_LRM_RESOURCE "[@" PCMK_XA_ID "='", resource, "']" SUB_XPATH_LRM_RSC_OP "[@" PCMK_XA_OPERATION "='", op, "'", NULL); /* Need to check against transition_magic too? */ if ((source != NULL) && (strcmp(op, PCMK_ACTION_MIGRATE_TO) == 0)) { pcmk__g_strcat(xpath, " and @" PCMK__META_MIGRATE_TARGET "='", source, "']", NULL); } else if ((source != NULL) && (strcmp(op, PCMK_ACTION_MIGRATE_FROM) == 0)) { pcmk__g_strcat(xpath, " and @" PCMK__META_MIGRATE_SOURCE "='", source, "']", NULL); } else { g_string_append_c(xpath, ']'); } xml = get_xpath_object((const char *) xpath->str, scheduler->input, LOG_DEBUG); g_string_free(xpath, TRUE); if (xml && target_rc >= 0) { int rc = PCMK_OCF_UNKNOWN_ERROR; int status = PCMK_EXEC_ERROR; crm_element_value_int(xml, PCMK__XA_RC_CODE, &rc); crm_element_value_int(xml, PCMK__XA_OP_STATUS, &status); if ((rc != target_rc) || (status != PCMK_EXEC_DONE)) { return NULL; } } return xml; } static xmlNode * find_lrm_resource(const char *rsc_id, const char *node_name, pcmk_scheduler_t *scheduler) { GString *xpath = NULL; xmlNode *xml = NULL; CRM_CHECK((rsc_id != NULL) && (node_name != NULL), return NULL); xpath = g_string_sized_new(256); pcmk__g_strcat(xpath, XPATH_NODE_STATE "[@" PCMK_XA_UNAME "='", node_name, "']" SUB_XPATH_LRM_RESOURCE "[@" PCMK_XA_ID "='", rsc_id, "']", NULL); xml = get_xpath_object((const char *) xpath->str, scheduler->input, LOG_DEBUG); g_string_free(xpath, TRUE); return xml; } /*! * \internal * \brief Check whether a resource has no completed action history on a node * * \param[in,out] rsc Resource to check * \param[in] node_name Node to check * * \return true if \p rsc_id is unknown on \p node_name, otherwise false */ static bool unknown_on_node(pcmk_resource_t *rsc, const char *node_name) { bool result = false; xmlXPathObjectPtr search; char *xpath = NULL; xpath = crm_strdup_printf(XPATH_NODE_STATE "[@" PCMK_XA_UNAME "='%s']" SUB_XPATH_LRM_RESOURCE "[@" PCMK_XA_ID "='%s']" SUB_XPATH_LRM_RSC_OP "[@" PCMK__XA_RC_CODE "!='%d']", node_name, rsc->id, PCMK_OCF_UNKNOWN); search = xpath_search(rsc->priv->scheduler->input, xpath); result = (numXpathResults(search) == 0); freeXpathObject(search); free(xpath); return result; } /*! * \internal * \brief Check whether a probe/monitor indicating the resource was not running * on a node happened after some event * * \param[in] rsc_id Resource being checked * \param[in] node_name Node being checked * \param[in] xml_op Event that monitor is being compared to * \param[in,out] scheduler Scheduler data * * \return true if such a monitor happened after event, false otherwise */ static bool monitor_not_running_after(const char *rsc_id, const char *node_name, const xmlNode *xml_op, pcmk_scheduler_t *scheduler) { /* Any probe/monitor operation on the node indicating it was not running * there */ xmlNode *monitor = find_lrm_op(rsc_id, PCMK_ACTION_MONITOR, node_name, NULL, PCMK_OCF_NOT_RUNNING, scheduler); return (monitor != NULL) && (pe__is_newer_op(monitor, xml_op) > 0); } /*! * \internal * \brief Check whether any non-monitor operation on a node happened after some * event * * \param[in] rsc_id Resource being checked * \param[in] node_name Node being checked * \param[in] xml_op Event that non-monitor is being compared to * \param[in,out] scheduler Scheduler data * * \return true if such a operation happened after event, false otherwise */ static bool non_monitor_after(const char *rsc_id, const char *node_name, const xmlNode *xml_op, pcmk_scheduler_t *scheduler) { xmlNode *lrm_resource = NULL; lrm_resource = find_lrm_resource(rsc_id, node_name, scheduler); if (lrm_resource == NULL) { return false; } for (xmlNode *op = pcmk__xe_first_child(lrm_resource, PCMK__XE_LRM_RSC_OP, NULL, NULL); op != NULL; op = pcmk__xe_next(op, PCMK__XE_LRM_RSC_OP)) { const char * task = NULL; if (op == xml_op) { continue; } task = crm_element_value(op, PCMK_XA_OPERATION); if (pcmk__str_any_of(task, PCMK_ACTION_START, PCMK_ACTION_STOP, PCMK_ACTION_MIGRATE_TO, PCMK_ACTION_MIGRATE_FROM, NULL) && pe__is_newer_op(op, xml_op) > 0) { return true; } } return false; } /*! * \internal * \brief Check whether the resource has newer state on a node after a migration * attempt * * \param[in] rsc_id Resource being checked * \param[in] node_name Node being checked * \param[in] migrate_to Any migrate_to event that is being compared to * \param[in] migrate_from Any migrate_from event that is being compared to * \param[in,out] scheduler Scheduler data * * \return true if such a operation happened after event, false otherwise */ static bool newer_state_after_migrate(const char *rsc_id, const char *node_name, const xmlNode *migrate_to, const xmlNode *migrate_from, pcmk_scheduler_t *scheduler) { const xmlNode *xml_op = (migrate_from != NULL)? migrate_from : migrate_to; const char *source = crm_element_value(xml_op, PCMK__META_MIGRATE_SOURCE); /* It's preferred to compare to the migrate event on the same node if * existing, since call ids are more reliable. */ if ((xml_op != migrate_to) && (migrate_to != NULL) && pcmk__str_eq(node_name, source, pcmk__str_casei)) { xml_op = migrate_to; } /* If there's any newer non-monitor operation on the node, or any newer * probe/monitor operation on the node indicating it was not running there, * the migration events potentially no longer matter for the node. */ return non_monitor_after(rsc_id, node_name, xml_op, scheduler) || monitor_not_running_after(rsc_id, node_name, xml_op, scheduler); } /*! * \internal * \brief Parse migration source and target node names from history entry * * \param[in] entry Resource history entry for a migration action * \param[in] source_node If not NULL, source must match this node * \param[in] target_node If not NULL, target must match this node * \param[out] source_name Where to store migration source node name * \param[out] target_name Where to store migration target node name * * \return Standard Pacemaker return code */ static int get_migration_node_names(const xmlNode *entry, const pcmk_node_t *source_node, const pcmk_node_t *target_node, const char **source_name, const char **target_name) { *source_name = crm_element_value(entry, PCMK__META_MIGRATE_SOURCE); *target_name = crm_element_value(entry, PCMK__META_MIGRATE_TARGET); if ((*source_name == NULL) || (*target_name == NULL)) { pcmk__config_err("Ignoring resource history entry %s without " PCMK__META_MIGRATE_SOURCE " and " PCMK__META_MIGRATE_TARGET, pcmk__xe_id(entry)); return pcmk_rc_unpack_error; } if ((source_node != NULL) && !pcmk__str_eq(*source_name, source_node->priv->name, pcmk__str_casei|pcmk__str_null_matches)) { pcmk__config_err("Ignoring resource history entry %s because " PCMK__META_MIGRATE_SOURCE "='%s' does not match %s", pcmk__xe_id(entry), *source_name, pcmk__node_name(source_node)); return pcmk_rc_unpack_error; } if ((target_node != NULL) && !pcmk__str_eq(*target_name, target_node->priv->name, pcmk__str_casei|pcmk__str_null_matches)) { pcmk__config_err("Ignoring resource history entry %s because " PCMK__META_MIGRATE_TARGET "='%s' does not match %s", pcmk__xe_id(entry), *target_name, pcmk__node_name(target_node)); return pcmk_rc_unpack_error; } return pcmk_rc_ok; } /* * \internal * \brief Add a migration source to a resource's list of dangling migrations * * If the migrate_to and migrate_from actions in a live migration both * succeeded, but there is no stop on the source, the migration is considered * "dangling." Add the source to the resource's dangling migration list, which * will be used to schedule a stop on the source without affecting the target. * * \param[in,out] rsc Resource involved in migration * \param[in] node Migration source */ static void add_dangling_migration(pcmk_resource_t *rsc, const pcmk_node_t *node) { pcmk__rsc_trace(rsc, "Dangling migration of %s requires stop on %s", rsc->id, pcmk__node_name(node)); rsc->priv->orig_role = pcmk_role_stopped; rsc->priv->dangling_migration_sources = g_list_prepend(rsc->priv->dangling_migration_sources, (gpointer) node); } /*! * \internal * \brief Update resource role etc. after a successful migrate_to action * * \param[in,out] history Parsed action result history */ static void unpack_migrate_to_success(struct action_history *history) { /* A complete migration sequence is: * 1. migrate_to on source node (which succeeded if we get to this function) * 2. migrate_from on target node * 3. stop on source node * * If no migrate_from has happened, the migration is considered to be * "partial". If the migrate_from succeeded but no stop has happened, the * migration is considered to be "dangling". * * If a successful migrate_to and stop have happened on the source node, we * still need to check for a partial migration, due to scenarios (easier to * produce with batch-limit=1) like: * * - A resource is migrating from node1 to node2, and a migrate_to is * initiated for it on node1. * * - node2 goes into standby mode while the migrate_to is pending, which * aborts the transition. * * - Upon completion of the migrate_to, a new transition schedules a stop * on both nodes and a start on node1. * * - If the new transition is aborted for any reason while the resource is * stopping on node1, the transition after that stop completes will see * the migrate_to and stop on the source, but it's still a partial * migration, and the resource must be stopped on node2 because it is * potentially active there due to the migrate_to. * * We also need to take into account that either node's history may be * cleared at any point in the migration process. */ int from_rc = PCMK_OCF_OK; int from_status = PCMK_EXEC_PENDING; pcmk_node_t *target_node = NULL; xmlNode *migrate_from = NULL; const char *source = NULL; const char *target = NULL; bool source_newer_op = false; bool target_newer_state = false; bool active_on_target = false; pcmk_scheduler_t *scheduler = history->rsc->priv->scheduler; // Get source and target node names from XML if (get_migration_node_names(history->xml, history->node, NULL, &source, &target) != pcmk_rc_ok) { return; } // Check for newer state on the source source_newer_op = non_monitor_after(history->rsc->id, source, history->xml, scheduler); // Check for a migrate_from action from this source on the target migrate_from = find_lrm_op(history->rsc->id, PCMK_ACTION_MIGRATE_FROM, target, source, -1, scheduler); if (migrate_from != NULL) { if (source_newer_op) { /* There's a newer non-monitor operation on the source and a * migrate_from on the target, so this migrate_to is irrelevant to * the resource's state. */ return; } crm_element_value_int(migrate_from, PCMK__XA_RC_CODE, &from_rc); crm_element_value_int(migrate_from, PCMK__XA_OP_STATUS, &from_status); } /* If the resource has newer state on both the source and target after the * migration events, this migrate_to is irrelevant to the resource's state. */ target_newer_state = newer_state_after_migrate(history->rsc->id, target, history->xml, migrate_from, scheduler); if (source_newer_op && target_newer_state) { return; } /* Check for dangling migration (migrate_from succeeded but stop not done). * We know there's no stop because we already returned if the target has a * migrate_from and the source has any newer non-monitor operation. */ if ((from_rc == PCMK_OCF_OK) && (from_status == PCMK_EXEC_DONE)) { add_dangling_migration(history->rsc, history->node); return; } /* Without newer state, this migrate_to implies the resource is active. * (Clones are not allowed to migrate, so role can't be promoted.) */ history->rsc->priv->orig_role = pcmk_role_started; target_node = pcmk_find_node(scheduler, target); active_on_target = !target_newer_state && (target_node != NULL) && target_node->details->online; if (from_status != PCMK_EXEC_PENDING) { // migrate_from failed on target if (active_on_target) { native_add_running(history->rsc, target_node, scheduler, TRUE); } else { // Mark resource as failed, require recovery, and prevent migration pcmk__set_rsc_flags(history->rsc, pcmk__rsc_failed|pcmk__rsc_stop_if_failed); pcmk__clear_rsc_flags(history->rsc, pcmk__rsc_migratable); } return; } // The migrate_from is pending, complete but erased, or to be scheduled /* If there is no history at all for the resource on an online target, then * it was likely cleaned. Just return, and we'll schedule a probe. Once we * have the probe result, it will be reflected in target_newer_state. */ if ((target_node != NULL) && target_node->details->online && unknown_on_node(history->rsc, target)) { return; } if (active_on_target) { pcmk_node_t *source_node = pcmk_find_node(scheduler, source); native_add_running(history->rsc, target_node, scheduler, FALSE); if ((source_node != NULL) && source_node->details->online) { /* This is a partial migration: the migrate_to completed * successfully on the source, but the migrate_from has not * completed. Remember the source and target; if the newly * chosen target remains the same when we schedule actions * later, we may continue with the migration. */ history->rsc->priv->partial_migration_target = target_node; history->rsc->priv->partial_migration_source = source_node; } } else if (!source_newer_op) { // Mark resource as failed, require recovery, and prevent migration pcmk__set_rsc_flags(history->rsc, pcmk__rsc_failed|pcmk__rsc_stop_if_failed); pcmk__clear_rsc_flags(history->rsc, pcmk__rsc_migratable); } } /*! * \internal * \brief Update resource role etc. after a failed migrate_to action * * \param[in,out] history Parsed action result history */ static void unpack_migrate_to_failure(struct action_history *history) { xmlNode *target_migrate_from = NULL; const char *source = NULL; const char *target = NULL; pcmk_scheduler_t *scheduler = history->rsc->priv->scheduler; // Get source and target node names from XML if (get_migration_node_names(history->xml, history->node, NULL, &source, &target) != pcmk_rc_ok) { return; } /* If a migration failed, we have to assume the resource is active. Clones * are not allowed to migrate, so role can't be promoted. */ history->rsc->priv->orig_role = pcmk_role_started; // Check for migrate_from on the target target_migrate_from = find_lrm_op(history->rsc->id, PCMK_ACTION_MIGRATE_FROM, target, source, PCMK_OCF_OK, scheduler); if (/* If the resource state is unknown on the target, it will likely be * probed there. * Don't just consider it running there. We will get back here anyway in * case the probe detects it's running there. */ !unknown_on_node(history->rsc, target) /* If the resource has newer state on the target after the migration * events, this migrate_to no longer matters for the target. */ && !newer_state_after_migrate(history->rsc->id, target, history->xml, target_migrate_from, scheduler)) { /* The resource has no newer state on the target, so assume it's still * active there. * (if it is up). */ pcmk_node_t *target_node = pcmk_find_node(scheduler, target); if (target_node && target_node->details->online) { native_add_running(history->rsc, target_node, scheduler, FALSE); } } else if (!non_monitor_after(history->rsc->id, source, history->xml, scheduler)) { /* We know the resource has newer state on the target, but this * migrate_to still matters for the source as long as there's no newer * non-monitor operation there. */ // Mark node as having dangling migration so we can force a stop later history->rsc->priv->dangling_migration_sources = g_list_prepend(history->rsc->priv->dangling_migration_sources, (gpointer) history->node); } } /*! * \internal * \brief Update resource role etc. after a failed migrate_from action * * \param[in,out] history Parsed action result history */ static void unpack_migrate_from_failure(struct action_history *history) { xmlNode *source_migrate_to = NULL; const char *source = NULL; const char *target = NULL; pcmk_scheduler_t *scheduler = history->rsc->priv->scheduler; // Get source and target node names from XML if (get_migration_node_names(history->xml, NULL, history->node, &source, &target) != pcmk_rc_ok) { return; } /* If a migration failed, we have to assume the resource is active. Clones * are not allowed to migrate, so role can't be promoted. */ history->rsc->priv->orig_role = pcmk_role_started; // Check for a migrate_to on the source source_migrate_to = find_lrm_op(history->rsc->id, PCMK_ACTION_MIGRATE_TO, source, target, PCMK_OCF_OK, scheduler); if (/* If the resource state is unknown on the source, it will likely be * probed there. * Don't just consider it running there. We will get back here anyway in * case the probe detects it's running there. */ !unknown_on_node(history->rsc, source) /* If the resource has newer state on the source after the migration * events, this migrate_from no longer matters for the source. */ && !newer_state_after_migrate(history->rsc->id, source, source_migrate_to, history->xml, scheduler)) { /* The resource has no newer state on the source, so assume it's still * active there (if it is up). */ pcmk_node_t *source_node = pcmk_find_node(scheduler, source); if (source_node && source_node->details->online) { native_add_running(history->rsc, source_node, scheduler, TRUE); } } } /*! * \internal * \brief Add an action to cluster's list of failed actions * * \param[in,out] history Parsed action result history */ static void record_failed_op(struct action_history *history) { const pcmk_scheduler_t *scheduler = history->rsc->priv->scheduler; if (!(history->node->details->online)) { return; } for (const xmlNode *xIter = scheduler->priv->failed->children; xIter != NULL; xIter = xIter->next) { const char *key = pcmk__xe_history_key(xIter); const char *uname = crm_element_value(xIter, PCMK_XA_UNAME); if (pcmk__str_eq(history->key, key, pcmk__str_none) && pcmk__str_eq(uname, history->node->priv->name, pcmk__str_casei)) { crm_trace("Skipping duplicate entry %s on %s", history->key, pcmk__node_name(history->node)); return; } } crm_trace("Adding entry for %s on %s to failed action list", history->key, pcmk__node_name(history->node)); crm_xml_add(history->xml, PCMK_XA_UNAME, history->node->priv->name); crm_xml_add(history->xml, PCMK__XA_RSC_ID, history->rsc->id); pcmk__xml_copy(scheduler->priv->failed, history->xml); } static char * last_change_str(const xmlNode *xml_op) { time_t when; char *result = NULL; if (crm_element_value_epoch(xml_op, PCMK_XA_LAST_RC_CHANGE, &when) == pcmk_ok) { char *when_s = pcmk__epoch2str(&when, 0); const char *p = strchr(when_s, ' '); // Skip day of week to make message shorter if ((p != NULL) && (*(++p) != '\0')) { result = pcmk__str_copy(p); } free(when_s); } if (result == NULL) { result = pcmk__str_copy("unknown_time"); } return result; } /*! * \internal * \brief Ban a resource (or its clone if an anonymous instance) from all nodes * * \param[in,out] rsc Resource to ban */ static void ban_from_all_nodes(pcmk_resource_t *rsc) { int score = -PCMK_SCORE_INFINITY; const pcmk_scheduler_t *scheduler = rsc->priv->scheduler; if (rsc->priv->parent != NULL) { pcmk_resource_t *parent = uber_parent(rsc); if (pcmk__is_anonymous_clone(parent)) { /* For anonymous clones, if an operation with * PCMK_META_ON_FAIL=PCMK_VALUE_STOP fails for any instance, the * entire clone must stop. */ rsc = parent; } } // Ban the resource from all nodes crm_notice("%s will not be started under current conditions", rsc->id); if (rsc->priv->allowed_nodes != NULL) { g_hash_table_destroy(rsc->priv->allowed_nodes); } rsc->priv->allowed_nodes = pe__node_list2table(scheduler->nodes); g_hash_table_foreach(rsc->priv->allowed_nodes, set_node_score, &score); } /*! * \internal * \brief Get configured failure handling and role after failure for an action * * \param[in,out] history Unpacked action history entry * \param[out] on_fail Where to set configured failure handling * \param[out] fail_role Where to set to role after failure */ static void unpack_failure_handling(struct action_history *history, enum pcmk__on_fail *on_fail, enum rsc_role_e *fail_role) { xmlNode *config = pcmk__find_action_config(history->rsc, history->task, history->interval_ms, true); GHashTable *meta = pcmk__unpack_action_meta(history->rsc, history->node, history->task, history->interval_ms, config); const char *on_fail_str = g_hash_table_lookup(meta, PCMK_META_ON_FAIL); *on_fail = pcmk__parse_on_fail(history->rsc, history->task, history->interval_ms, on_fail_str); *fail_role = pcmk__role_after_failure(history->rsc, history->task, *on_fail, meta); g_hash_table_destroy(meta); } /*! * \internal * \brief Update resource role, failure handling, etc., after a failed action * * \param[in,out] history Parsed action result history * \param[in] config_on_fail Action failure handling from configuration * \param[in] fail_role Resource's role after failure of this action * \param[out] last_failure This will be set to the history XML * \param[in,out] on_fail Actual handling of action result */ static void unpack_rsc_op_failure(struct action_history *history, enum pcmk__on_fail config_on_fail, enum rsc_role_e fail_role, xmlNode **last_failure, enum pcmk__on_fail *on_fail) { bool is_probe = false; char *last_change_s = NULL; pcmk_scheduler_t *scheduler = history->rsc->priv->scheduler; *last_failure = history->xml; is_probe = pcmk_xe_is_probe(history->xml); last_change_s = last_change_str(history->xml); if (!pcmk_is_set(scheduler->flags, pcmk__sched_symmetric_cluster) && (history->exit_status == PCMK_OCF_NOT_INSTALLED)) { crm_trace("Unexpected result (%s%s%s) was recorded for " "%s of %s on %s at %s " QB_XS " exit-status=%d id=%s", crm_exit_str(history->exit_status), (pcmk__str_empty(history->exit_reason)? "" : ": "), pcmk__s(history->exit_reason, ""), (is_probe? "probe" : history->task), history->rsc->id, pcmk__node_name(history->node), last_change_s, history->exit_status, history->id); } else { pcmk__sched_warn(scheduler, "Unexpected result (%s%s%s) was recorded for %s of " "%s on %s at %s " QB_XS " exit-status=%d id=%s", crm_exit_str(history->exit_status), (pcmk__str_empty(history->exit_reason)? "" : ": "), pcmk__s(history->exit_reason, ""), (is_probe? "probe" : history->task), history->rsc->id, pcmk__node_name(history->node), last_change_s, history->exit_status, history->id); if (is_probe && (history->exit_status != PCMK_OCF_OK) && (history->exit_status != PCMK_OCF_NOT_RUNNING) && (history->exit_status != PCMK_OCF_RUNNING_PROMOTED)) { /* A failed (not just unexpected) probe result could mean the user * didn't know resources will be probed even where they can't run. */ crm_notice("If it is not possible for %s to run on %s, see " "the " PCMK_XA_RESOURCE_DISCOVERY " option for location " "constraints", history->rsc->id, pcmk__node_name(history->node)); } record_failed_op(history); } free(last_change_s); if (*on_fail < config_on_fail) { pcmk__rsc_trace(history->rsc, "on-fail %s -> %s for %s", pcmk__on_fail_text(*on_fail), pcmk__on_fail_text(config_on_fail), history->key); *on_fail = config_on_fail; } if (strcmp(history->task, PCMK_ACTION_STOP) == 0) { resource_location(history->rsc, history->node, -PCMK_SCORE_INFINITY, "__stop_fail__", scheduler); } else if (strcmp(history->task, PCMK_ACTION_MIGRATE_TO) == 0) { unpack_migrate_to_failure(history); } else if (strcmp(history->task, PCMK_ACTION_MIGRATE_FROM) == 0) { unpack_migrate_from_failure(history); } else if (strcmp(history->task, PCMK_ACTION_PROMOTE) == 0) { history->rsc->priv->orig_role = pcmk_role_promoted; } else if (strcmp(history->task, PCMK_ACTION_DEMOTE) == 0) { if (config_on_fail == pcmk__on_fail_block) { history->rsc->priv->orig_role = pcmk_role_promoted; pe__set_next_role(history->rsc, pcmk_role_stopped, "demote with " PCMK_META_ON_FAIL "=block"); } else if (history->exit_status == PCMK_OCF_NOT_RUNNING) { history->rsc->priv->orig_role = pcmk_role_stopped; } else { /* Staying in the promoted role would put the scheduler and * controller into a loop. Setting the role to unpromoted is not * dangerous because the resource will be stopped as part of * recovery, and any promotion will be ordered after that stop. */ history->rsc->priv->orig_role = pcmk_role_unpromoted; } } if (is_probe && (history->exit_status == PCMK_OCF_NOT_INSTALLED)) { /* leave stopped */ pcmk__rsc_trace(history->rsc, "Leaving %s stopped", history->rsc->id); history->rsc->priv->orig_role = pcmk_role_stopped; } else if (history->rsc->priv->orig_role < pcmk_role_started) { pcmk__rsc_trace(history->rsc, "Setting %s active", history->rsc->id); set_active(history->rsc); } pcmk__rsc_trace(history->rsc, "Resource %s: role=%s unclean=%s on_fail=%s fail_role=%s", history->rsc->id, pcmk_role_text(history->rsc->priv->orig_role), pcmk__btoa(history->node->details->unclean), pcmk__on_fail_text(config_on_fail), pcmk_role_text(fail_role)); if ((fail_role != pcmk_role_started) && (history->rsc->priv->next_role < fail_role)) { pe__set_next_role(history->rsc, fail_role, "failure"); } if (fail_role == pcmk_role_stopped) { ban_from_all_nodes(history->rsc); } } /*! * \internal * \brief Block a resource with a failed action if it cannot be recovered * * If resource action is a failed stop and fencing is not possible, mark the * resource as unmanaged and blocked, since recovery cannot be done. * * \param[in,out] history Parsed action history entry */ static void block_if_unrecoverable(struct action_history *history) { char *last_change_s = NULL; if (strcmp(history->task, PCMK_ACTION_STOP) != 0) { return; // All actions besides stop are always recoverable } if (pe_can_fence(history->node->priv->scheduler, history->node)) { return; // Failed stops are recoverable via fencing } last_change_s = last_change_str(history->xml); pcmk__sched_err(history->node->priv->scheduler, "No further recovery can be attempted for %s " "because %s on %s failed (%s%s%s) at %s " QB_XS " rc=%d id=%s", history->rsc->id, history->task, pcmk__node_name(history->node), crm_exit_str(history->exit_status), (pcmk__str_empty(history->exit_reason)? "" : ": "), pcmk__s(history->exit_reason, ""), last_change_s, history->exit_status, history->id); free(last_change_s); pcmk__clear_rsc_flags(history->rsc, pcmk__rsc_managed); pcmk__set_rsc_flags(history->rsc, pcmk__rsc_blocked); } /*! * \internal * \brief Update action history's execution status and why * * \param[in,out] history Parsed action history entry * \param[out] why Where to store reason for update * \param[in] value New value * \param[in] reason Description of why value was changed */ static inline void remap_because(struct action_history *history, const char **why, int value, const char *reason) { if (history->execution_status != value) { history->execution_status = value; *why = reason; } } /*! * \internal * \brief Remap informational monitor results and operation status * * For the monitor results, certain OCF codes are for providing extended information * to the user about services that aren't yet failed but not entirely healthy either. * These must be treated as the "normal" result by Pacemaker. * * For operation status, the action result can be used to determine an appropriate * status for the purposes of responding to the action. The status provided by the * executor is not directly usable since the executor does not know what was expected. * * \param[in,out] history Parsed action history entry * \param[in,out] on_fail What should be done about the result * \param[in] expired Whether result is expired * * \note If the result is remapped and the node is not shutting down or failed, * the operation will be recorded in the scheduler data's list of failed * operations to highlight it for the user. * * \note This may update the resource's current and next role. */ static void remap_operation(struct action_history *history, enum pcmk__on_fail *on_fail, bool expired) { bool is_probe = false; int orig_exit_status = history->exit_status; int orig_exec_status = history->execution_status; const char *why = NULL; const char *task = history->task; // Remap degraded results to their successful counterparts history->exit_status = pcmk__effective_rc(history->exit_status); if (history->exit_status != orig_exit_status) { why = "degraded result"; if (!expired && (!history->node->details->shutdown || history->node->details->online)) { record_failed_op(history); } } if (!pcmk__is_bundled(history->rsc) && pcmk_xe_mask_probe_failure(history->xml) && ((history->execution_status != PCMK_EXEC_DONE) || (history->exit_status != PCMK_OCF_NOT_RUNNING))) { history->execution_status = PCMK_EXEC_DONE; history->exit_status = PCMK_OCF_NOT_RUNNING; why = "equivalent probe result"; } /* If the executor reported an execution status of anything but done or * error, consider that final. But for done or error, we know better whether * it should be treated as a failure or not, because we know the expected * result. */ switch (history->execution_status) { case PCMK_EXEC_DONE: case PCMK_EXEC_ERROR: break; // These should be treated as node-fatal case PCMK_EXEC_NO_FENCE_DEVICE: case PCMK_EXEC_NO_SECRETS: remap_because(history, &why, PCMK_EXEC_ERROR_HARD, "node-fatal error"); goto remap_done; default: goto remap_done; } is_probe = pcmk_xe_is_probe(history->xml); if (is_probe) { task = "probe"; } if (history->expected_exit_status < 0) { /* Pre-1.0 Pacemaker versions, and Pacemaker 1.1.6 or earlier with * Heartbeat 2.0.7 or earlier as the cluster layer, did not include the * expected exit status in the transition key, which (along with the * similar case of a corrupted transition key in the CIB) will be * reported to this function as -1. Pacemaker 2.0+ does not support * rolling upgrades from those versions or processing of saved CIB files * from those versions, so we do not need to care much about this case. */ remap_because(history, &why, PCMK_EXEC_ERROR, "obsolete history format"); pcmk__config_warn("Expected result not found for %s on %s " "(corrupt or obsolete CIB?)", history->key, pcmk__node_name(history->node)); } else if (history->exit_status == history->expected_exit_status) { remap_because(history, &why, PCMK_EXEC_DONE, "expected result"); } else { remap_because(history, &why, PCMK_EXEC_ERROR, "unexpected result"); pcmk__rsc_debug(history->rsc, "%s on %s: expected %d (%s), got %d (%s%s%s)", history->key, pcmk__node_name(history->node), history->expected_exit_status, crm_exit_str(history->expected_exit_status), history->exit_status, crm_exit_str(history->exit_status), (pcmk__str_empty(history->exit_reason)? "" : ": "), pcmk__s(history->exit_reason, "")); } switch (history->exit_status) { case PCMK_OCF_OK: if (is_probe && (history->expected_exit_status == PCMK_OCF_NOT_RUNNING)) { char *last_change_s = last_change_str(history->xml); remap_because(history, &why, PCMK_EXEC_DONE, "probe"); pcmk__rsc_info(history->rsc, "Probe found %s active on %s at %s", history->rsc->id, pcmk__node_name(history->node), last_change_s); free(last_change_s); } break; case PCMK_OCF_NOT_RUNNING: if (is_probe || (history->expected_exit_status == history->exit_status) || !pcmk_is_set(history->rsc->flags, pcmk__rsc_managed)) { /* For probes, recurring monitors for the Stopped role, and * unmanaged resources, "not running" is not considered a * failure. */ remap_because(history, &why, PCMK_EXEC_DONE, "exit status"); history->rsc->priv->orig_role = pcmk_role_stopped; *on_fail = pcmk__on_fail_ignore; pe__set_next_role(history->rsc, pcmk_role_unknown, "not running"); } break; case PCMK_OCF_RUNNING_PROMOTED: if (is_probe && (history->exit_status != history->expected_exit_status)) { char *last_change_s = last_change_str(history->xml); remap_because(history, &why, PCMK_EXEC_DONE, "probe"); pcmk__rsc_info(history->rsc, "Probe found %s active and promoted on %s at %s", history->rsc->id, pcmk__node_name(history->node), last_change_s); free(last_change_s); } if (!expired || (history->exit_status == history->expected_exit_status)) { history->rsc->priv->orig_role = pcmk_role_promoted; } break; case PCMK_OCF_FAILED_PROMOTED: if (!expired) { history->rsc->priv->orig_role = pcmk_role_promoted; } remap_because(history, &why, PCMK_EXEC_ERROR, "exit status"); break; case PCMK_OCF_NOT_CONFIGURED: remap_because(history, &why, PCMK_EXEC_ERROR_FATAL, "exit status"); break; case PCMK_OCF_UNIMPLEMENT_FEATURE: { guint interval_ms = 0; crm_element_value_ms(history->xml, PCMK_META_INTERVAL, &interval_ms); if (interval_ms == 0) { if (!expired) { block_if_unrecoverable(history); } remap_because(history, &why, PCMK_EXEC_ERROR_HARD, "exit status"); } else { remap_because(history, &why, PCMK_EXEC_NOT_SUPPORTED, "exit status"); } } break; case PCMK_OCF_NOT_INSTALLED: case PCMK_OCF_INVALID_PARAM: case PCMK_OCF_INSUFFICIENT_PRIV: if (!expired) { block_if_unrecoverable(history); } remap_because(history, &why, PCMK_EXEC_ERROR_HARD, "exit status"); break; default: if (history->execution_status == PCMK_EXEC_DONE) { char *last_change_s = last_change_str(history->xml); crm_info("Treating unknown exit status %d from %s of %s " "on %s at %s as failure", history->exit_status, task, history->rsc->id, pcmk__node_name(history->node), last_change_s); remap_because(history, &why, PCMK_EXEC_ERROR, "unknown exit status"); free(last_change_s); } break; } remap_done: if (why != NULL) { pcmk__rsc_trace(history->rsc, "Remapped %s result from [%s: %s] to [%s: %s] " "because of %s", history->key, pcmk_exec_status_str(orig_exec_status), crm_exit_str(orig_exit_status), pcmk_exec_status_str(history->execution_status), crm_exit_str(history->exit_status), why); } } // return TRUE if start or monitor last failure but parameters changed static bool should_clear_for_param_change(const xmlNode *xml_op, const char *task, pcmk_resource_t *rsc, pcmk_node_t *node) { if (pcmk__str_any_of(task, PCMK_ACTION_START, PCMK_ACTION_MONITOR, NULL)) { if (pe__bundle_needs_remote_name(rsc)) { /* We haven't allocated resources yet, so we can't reliably * substitute addr parameters for the REMOTE_CONTAINER_HACK. * When that's needed, defer the check until later. */ pe__add_param_check(xml_op, rsc, node, pcmk__check_last_failure, rsc->priv->scheduler); } else { pcmk__op_digest_t *digest_data = NULL; digest_data = rsc_action_digest_cmp(rsc, xml_op, node, rsc->priv->scheduler); switch (digest_data->rc) { case pcmk__digest_unknown: crm_trace("Resource %s history entry %s on %s" " has no digest to compare", rsc->id, pcmk__xe_history_key(xml_op), node->priv->id); break; case pcmk__digest_match: break; default: return TRUE; } } } return FALSE; } // Order action after fencing of remote node, given connection rsc static void order_after_remote_fencing(pcmk_action_t *action, pcmk_resource_t *remote_conn, pcmk_scheduler_t *scheduler) { pcmk_node_t *remote_node = pcmk_find_node(scheduler, remote_conn->id); if (remote_node) { pcmk_action_t *fence = pe_fence_op(remote_node, NULL, TRUE, NULL, FALSE, scheduler); order_actions(fence, action, pcmk__ar_first_implies_then); } } static bool should_ignore_failure_timeout(const pcmk_resource_t *rsc, const char *task, guint interval_ms, bool is_last_failure) { /* Clearing failures of recurring monitors has special concerns. The * executor reports only changes in the monitor result, so if the * monitor is still active and still getting the same failure result, * that will go undetected after the failure is cleared. * * Also, the operation history will have the time when the recurring * monitor result changed to the given code, not the time when the * result last happened. * * @TODO We probably should clear such failures only when the failure * timeout has passed since the last occurrence of the failed result. * However we don't record that information. We could maybe approximate * that by clearing only if there is a more recent successful monitor or * stop result, but we don't even have that information at this point * since we are still unpacking the resource's operation history. * * This is especially important for remote connection resources with a * reconnect interval, so in that case, we skip clearing failures * if the remote node hasn't been fenced. */ if ((rsc->priv->remote_reconnect_ms > 0U) && pcmk_is_set(rsc->priv->scheduler->flags, pcmk__sched_fencing_enabled) && (interval_ms != 0) && pcmk__str_eq(task, PCMK_ACTION_MONITOR, pcmk__str_casei)) { pcmk_node_t *remote_node = pcmk_find_node(rsc->priv->scheduler, rsc->id); if (remote_node && !pcmk_is_set(remote_node->priv->flags, pcmk__node_remote_fenced)) { if (is_last_failure) { crm_info("Waiting to clear monitor failure for remote node %s" " until fencing has occurred", rsc->id); } return TRUE; } } return FALSE; } /*! * \internal * \brief Check operation age and schedule failure clearing when appropriate * * This function has two distinct purposes. The first is to check whether an * operation history entry is expired (i.e. the resource has a failure timeout, * the entry is older than the timeout, and the resource either has no fail * count or its fail count is entirely older than the timeout). The second is to * schedule fail count clearing when appropriate (i.e. the operation is expired * and either the resource has an expired fail count or the operation is a * last_failure for a remote connection resource with a reconnect interval, * or the operation is a last_failure for a start or monitor operation and the * resource's parameters have changed since the operation). * * \param[in,out] history Parsed action result history * * \return true if operation history entry is expired, otherwise false */ static bool check_operation_expiry(struct action_history *history) { bool expired = false; bool is_last_failure = pcmk__ends_with(history->id, "_last_failure_0"); time_t last_run = 0; int unexpired_fail_count = 0; const char *clear_reason = NULL; const guint expiration_sec = pcmk__timeout_ms2s(history->rsc->priv->failure_expiration_ms); pcmk_scheduler_t *scheduler = history->rsc->priv->scheduler; if (history->execution_status == PCMK_EXEC_NOT_INSTALLED) { pcmk__rsc_trace(history->rsc, "Resource history entry %s on %s is not expired: " "Not Installed does not expire", history->id, pcmk__node_name(history->node)); return false; // "Not installed" must always be cleared manually } if ((expiration_sec > 0) && (crm_element_value_epoch(history->xml, PCMK_XA_LAST_RC_CHANGE, &last_run) == 0)) { /* Resource has a PCMK_META_FAILURE_TIMEOUT and history entry has a * timestamp */ time_t now = get_effective_time(scheduler); time_t last_failure = 0; // Is this particular operation history older than the failure timeout? if ((now >= (last_run + expiration_sec)) && !should_ignore_failure_timeout(history->rsc, history->task, history->interval_ms, is_last_failure)) { expired = true; } // Does the resource as a whole have an unexpired fail count? unexpired_fail_count = pe_get_failcount(history->node, history->rsc, &last_failure, pcmk__fc_effective, history->xml); // Update scheduler recheck time according to *last* failure crm_trace("%s@%lld is %sexpired @%lld with unexpired_failures=%d " "expiration=%s last-failure@%lld", history->id, (long long) last_run, (expired? "" : "not "), (long long) now, unexpired_fail_count, pcmk__readable_interval(expiration_sec * 1000), (long long) last_failure); last_failure += expiration_sec + 1; if (unexpired_fail_count && (now < last_failure)) { pe__update_recheck_time(last_failure, scheduler, "fail count expiration"); } } if (expired) { if (pe_get_failcount(history->node, history->rsc, NULL, pcmk__fc_default, history->xml)) { // There is a fail count ignoring timeout if (unexpired_fail_count == 0) { // There is no fail count considering timeout clear_reason = "it expired"; } else { /* This operation is old, but there is an unexpired fail count. * In a properly functioning cluster, this should only be * possible if this operation is not a failure (otherwise the * fail count should be expired too), so this is really just a * failsafe. */ pcmk__rsc_trace(history->rsc, "Resource history entry %s on %s is not " "expired: Unexpired fail count", history->id, pcmk__node_name(history->node)); expired = false; } } else if (is_last_failure && (history->rsc->priv->remote_reconnect_ms > 0U)) { /* Clear any expired last failure when reconnect interval is set, * even if there is no fail count. */ clear_reason = "reconnect interval is set"; } } if (!expired && is_last_failure && should_clear_for_param_change(history->xml, history->task, history->rsc, history->node)) { clear_reason = "resource parameters have changed"; } if (clear_reason != NULL) { pcmk_action_t *clear_op = NULL; // Schedule clearing of the fail count clear_op = pe__clear_failcount(history->rsc, history->node, clear_reason, scheduler); if (pcmk_is_set(scheduler->flags, pcmk__sched_fencing_enabled) && (history->rsc->priv->remote_reconnect_ms > 0)) { /* If we're clearing a remote connection due to a reconnect * interval, we want to wait until any scheduled fencing * completes. * * We could limit this to remote_node->details->unclean, but at * this point, that's always true (it won't be reliable until * after unpack_node_history() is done). */ crm_info("Clearing %s failure will wait until any scheduled " "fencing of %s completes", history->task, history->rsc->id); order_after_remote_fencing(clear_op, history->rsc, scheduler); } } if (expired && (history->interval_ms == 0) && pcmk__str_eq(history->task, PCMK_ACTION_MONITOR, pcmk__str_none)) { switch (history->exit_status) { case PCMK_OCF_OK: case PCMK_OCF_NOT_RUNNING: case PCMK_OCF_RUNNING_PROMOTED: case PCMK_OCF_DEGRADED: case PCMK_OCF_DEGRADED_PROMOTED: // Don't expire probes that return these values pcmk__rsc_trace(history->rsc, "Resource history entry %s on %s is not " "expired: Probe result", history->id, pcmk__node_name(history->node)); expired = false; break; } } return expired; } int pe__target_rc_from_xml(const xmlNode *xml_op) { int target_rc = 0; const char *key = crm_element_value(xml_op, PCMK__XA_TRANSITION_KEY); if (key == NULL) { return -1; } decode_transition_key(key, NULL, NULL, NULL, &target_rc); return target_rc; } /*! * \internal * \brief Update a resource's state for an action result * * \param[in,out] history Parsed action history entry * \param[in] exit_status Exit status to base new state on * \param[in] last_failure Resource's last_failure entry, if known * \param[in,out] on_fail Resource's current failure handling */ static void update_resource_state(struct action_history *history, int exit_status, const xmlNode *last_failure, enum pcmk__on_fail *on_fail) { bool clear_past_failure = false; if ((exit_status == PCMK_OCF_NOT_INSTALLED) || (!pcmk__is_bundled(history->rsc) && pcmk_xe_mask_probe_failure(history->xml))) { history->rsc->priv->orig_role = pcmk_role_stopped; } else if (exit_status == PCMK_OCF_NOT_RUNNING) { clear_past_failure = true; } else if (pcmk__str_eq(history->task, PCMK_ACTION_MONITOR, pcmk__str_none)) { if ((last_failure != NULL) && pcmk__str_eq(history->key, pcmk__xe_history_key(last_failure), pcmk__str_none)) { clear_past_failure = true; } if (history->rsc->priv->orig_role < pcmk_role_started) { set_active(history->rsc); } } else if (pcmk__str_eq(history->task, PCMK_ACTION_START, pcmk__str_none)) { history->rsc->priv->orig_role = pcmk_role_started; clear_past_failure = true; } else if (pcmk__str_eq(history->task, PCMK_ACTION_STOP, pcmk__str_none)) { history->rsc->priv->orig_role = pcmk_role_stopped; clear_past_failure = true; } else if (pcmk__str_eq(history->task, PCMK_ACTION_PROMOTE, pcmk__str_none)) { history->rsc->priv->orig_role = pcmk_role_promoted; clear_past_failure = true; } else if (pcmk__str_eq(history->task, PCMK_ACTION_DEMOTE, pcmk__str_none)) { if (*on_fail == pcmk__on_fail_demote) { /* Demote clears an error only if * PCMK_META_ON_FAIL=PCMK_VALUE_DEMOTE */ clear_past_failure = true; } history->rsc->priv->orig_role = pcmk_role_unpromoted; } else if (pcmk__str_eq(history->task, PCMK_ACTION_MIGRATE_FROM, pcmk__str_none)) { history->rsc->priv->orig_role = pcmk_role_started; clear_past_failure = true; } else if (pcmk__str_eq(history->task, PCMK_ACTION_MIGRATE_TO, pcmk__str_none)) { unpack_migrate_to_success(history); } else if (history->rsc->priv->orig_role < pcmk_role_started) { pcmk__rsc_trace(history->rsc, "%s active on %s", history->rsc->id, pcmk__node_name(history->node)); set_active(history->rsc); } if (!clear_past_failure) { return; } switch (*on_fail) { case pcmk__on_fail_stop: case pcmk__on_fail_ban: case pcmk__on_fail_standby_node: case pcmk__on_fail_fence_node: pcmk__rsc_trace(history->rsc, "%s (%s) is not cleared by a completed %s", history->rsc->id, pcmk__on_fail_text(*on_fail), history->task); break; case pcmk__on_fail_block: case pcmk__on_fail_ignore: case pcmk__on_fail_demote: case pcmk__on_fail_restart: case pcmk__on_fail_restart_container: *on_fail = pcmk__on_fail_ignore; pe__set_next_role(history->rsc, pcmk_role_unknown, "clear past failures"); break; case pcmk__on_fail_reset_remote: if (history->rsc->priv->remote_reconnect_ms == 0U) { /* With no reconnect interval, the connection is allowed to * start again after the remote node is fenced and * completely stopped. (With a reconnect interval, we wait * for the failure to be cleared entirely before attempting * to reconnect.) */ *on_fail = pcmk__on_fail_ignore; pe__set_next_role(history->rsc, pcmk_role_unknown, "clear past failures and reset remote"); } break; } } /*! * \internal * \brief Check whether a given history entry matters for resource state * * \param[in] history Parsed action history entry * * \return true if action can affect resource state, otherwise false */ static inline bool can_affect_state(struct action_history *history) { return pcmk__str_any_of(history->task, PCMK_ACTION_MONITOR, PCMK_ACTION_START, PCMK_ACTION_STOP, PCMK_ACTION_PROMOTE, PCMK_ACTION_DEMOTE, PCMK_ACTION_MIGRATE_TO, PCMK_ACTION_MIGRATE_FROM, "asyncmon", NULL); } /*! * \internal * \brief Unpack execution/exit status and exit reason from a history entry * * \param[in,out] history Action history entry to unpack * * \return Standard Pacemaker return code */ static int unpack_action_result(struct action_history *history) { if ((crm_element_value_int(history->xml, PCMK__XA_OP_STATUS, &(history->execution_status)) < 0) || (history->execution_status < PCMK_EXEC_PENDING) || (history->execution_status > PCMK_EXEC_MAX) || (history->execution_status == PCMK_EXEC_CANCELLED)) { pcmk__config_err("Ignoring resource history entry %s for %s on %s " "with invalid " PCMK__XA_OP_STATUS " '%s'", history->id, history->rsc->id, pcmk__node_name(history->node), pcmk__s(crm_element_value(history->xml, PCMK__XA_OP_STATUS), "")); return pcmk_rc_unpack_error; } if ((crm_element_value_int(history->xml, PCMK__XA_RC_CODE, &(history->exit_status)) < 0) || (history->exit_status < 0) || (history->exit_status > CRM_EX_MAX)) { pcmk__config_err("Ignoring resource history entry %s for %s on %s " "with invalid " PCMK__XA_RC_CODE " '%s'", history->id, history->rsc->id, pcmk__node_name(history->node), pcmk__s(crm_element_value(history->xml, PCMK__XA_RC_CODE), "")); return pcmk_rc_unpack_error; } history->exit_reason = crm_element_value(history->xml, PCMK_XA_EXIT_REASON); return pcmk_rc_ok; } /*! * \internal * \brief Process an action history entry whose result expired * * \param[in,out] history Parsed action history entry * \param[in] orig_exit_status Action exit status before remapping * * \return Standard Pacemaker return code (in particular, pcmk_rc_ok means the * entry needs no further processing) */ static int process_expired_result(struct action_history *history, int orig_exit_status) { if (!pcmk__is_bundled(history->rsc) && pcmk_xe_mask_probe_failure(history->xml) && (orig_exit_status != history->expected_exit_status)) { if (history->rsc->priv->orig_role <= pcmk_role_stopped) { history->rsc->priv->orig_role = pcmk_role_unknown; } crm_trace("Ignoring resource history entry %s for probe of %s on %s: " "Masked failure expired", history->id, history->rsc->id, pcmk__node_name(history->node)); return pcmk_rc_ok; } if (history->exit_status == history->expected_exit_status) { return pcmk_rc_undetermined; // Only failures expire } if (history->interval_ms == 0) { crm_notice("Ignoring resource history entry %s for %s of %s on %s: " "Expired failure", history->id, history->task, history->rsc->id, pcmk__node_name(history->node)); return pcmk_rc_ok; } if (history->node->details->online && !history->node->details->unclean) { /* Reschedule the recurring action. schedule_cancel() won't work at * this stage, so as a hacky workaround, forcibly change the restart * digest so pcmk__check_action_config() does what we want later. * * @TODO We should skip this if there is a newer successful monitor. * Also, this causes rescheduling only if the history entry * has a PCMK__XA_OP_DIGEST (which the expire-non-blocked-failure * scheduler regression test doesn't, but that may not be a * realistic scenario in production). */ crm_notice("Rescheduling %s-interval %s of %s on %s " "after failure expired", pcmk__readable_interval(history->interval_ms), history->task, history->rsc->id, pcmk__node_name(history->node)); crm_xml_add(history->xml, PCMK__XA_OP_RESTART_DIGEST, "calculated-failure-timeout"); return pcmk_rc_ok; } return pcmk_rc_undetermined; } /*! * \internal * \brief Process a masked probe failure * * \param[in,out] history Parsed action history entry * \param[in] orig_exit_status Action exit status before remapping * \param[in] last_failure Resource's last_failure entry, if known * \param[in,out] on_fail Resource's current failure handling */ static void mask_probe_failure(struct action_history *history, int orig_exit_status, const xmlNode *last_failure, enum pcmk__on_fail *on_fail) { pcmk_resource_t *ban_rsc = history->rsc; if (!pcmk_is_set(history->rsc->flags, pcmk__rsc_unique)) { ban_rsc = uber_parent(history->rsc); } crm_notice("Treating probe result '%s' for %s on %s as 'not running'", crm_exit_str(orig_exit_status), history->rsc->id, pcmk__node_name(history->node)); update_resource_state(history, history->expected_exit_status, last_failure, on_fail); crm_xml_add(history->xml, PCMK_XA_UNAME, history->node->priv->name); record_failed_op(history); resource_location(ban_rsc, history->node, -PCMK_SCORE_INFINITY, "masked-probe-failure", ban_rsc->priv->scheduler); } /*! * \internal Check whether a given failure is for a given pending action * * \param[in] history Parsed history entry for pending action * \param[in] last_failure Resource's last_failure entry, if known * * \return true if \p last_failure is failure of pending action in \p history, * otherwise false * \note Both \p history and \p last_failure must come from the same * \c PCMK__XE_LRM_RESOURCE block, as node and resource are assumed to be * the same. */ static bool failure_is_newer(const struct action_history *history, const xmlNode *last_failure) { guint failure_interval_ms = 0U; long long failure_change = 0LL; long long this_change = 0LL; if (last_failure == NULL) { return false; // Resource has no last_failure entry } if (!pcmk__str_eq(history->task, crm_element_value(last_failure, PCMK_XA_OPERATION), pcmk__str_none)) { return false; // last_failure is for different action } if ((crm_element_value_ms(last_failure, PCMK_META_INTERVAL, &failure_interval_ms) != pcmk_ok) || (history->interval_ms != failure_interval_ms)) { return false; // last_failure is for action with different interval } if ((pcmk__scan_ll(crm_element_value(history->xml, PCMK_XA_LAST_RC_CHANGE), &this_change, 0LL) != pcmk_rc_ok) || (pcmk__scan_ll(crm_element_value(last_failure, PCMK_XA_LAST_RC_CHANGE), &failure_change, 0LL) != pcmk_rc_ok) || (failure_change < this_change)) { return false; // Failure is not known to be newer } return true; } /*! * \internal * \brief Update a resource's role etc. for a pending action * * \param[in,out] history Parsed history entry for pending action * \param[in] last_failure Resource's last_failure entry, if known */ static void process_pending_action(struct action_history *history, const xmlNode *last_failure) { /* For recurring monitors, a failure is recorded only in RSC_last_failure_0, * and there might be a RSC_monitor_INTERVAL entry with the last successful * or pending result. * * If last_failure contains the failure of the pending recurring monitor * we're processing here, and is newer, the action is no longer pending. * (Pending results have call ID -1, which sorts last, so the last failure * if any should be known.) */ if (failure_is_newer(history, last_failure)) { return; } if (strcmp(history->task, PCMK_ACTION_START) == 0) { pcmk__set_rsc_flags(history->rsc, pcmk__rsc_start_pending); set_active(history->rsc); } else if (strcmp(history->task, PCMK_ACTION_PROMOTE) == 0) { history->rsc->priv->orig_role = pcmk_role_promoted; } else if ((strcmp(history->task, PCMK_ACTION_MIGRATE_TO) == 0) && history->node->details->unclean) { /* A migrate_to action is pending on a unclean source, so force a stop * on the target. */ const char *migrate_target = NULL; pcmk_node_t *target = NULL; migrate_target = crm_element_value(history->xml, PCMK__META_MIGRATE_TARGET); target = pcmk_find_node(history->rsc->priv->scheduler, migrate_target); if (target != NULL) { stop_action(history->rsc, target, FALSE); } } if (history->rsc->priv->pending_action != NULL) { /* There should never be multiple pending actions, but as a failsafe, * just remember the first one processed for display purposes. */ return; } if (pcmk_is_probe(history->task, history->interval_ms)) { /* Pending probes are currently never displayed, even if pending * operations are requested. If we ever want to change that, * enable the below and the corresponding part of * native.c:native_pending_action(). */ #if 0 history->rsc->private->pending_action = strdup("probe"); history->rsc->private->pending_node = history->node; #endif } else { history->rsc->priv->pending_action = strdup(history->task); history->rsc->priv->pending_node = history->node; } } static void unpack_rsc_op(pcmk_resource_t *rsc, pcmk_node_t *node, xmlNode *xml_op, xmlNode **last_failure, enum pcmk__on_fail *on_fail) { int old_rc = 0; bool expired = false; pcmk_resource_t *parent = rsc; enum rsc_role_e fail_role = pcmk_role_unknown; enum pcmk__on_fail failure_strategy = pcmk__on_fail_restart; struct action_history history = { .rsc = rsc, .node = node, .xml = xml_op, .execution_status = PCMK_EXEC_UNKNOWN, }; CRM_CHECK(rsc && node && xml_op, return); history.id = pcmk__xe_id(xml_op); if (history.id == NULL) { pcmk__config_err("Ignoring resource history entry for %s on %s " "without ID", rsc->id, pcmk__node_name(node)); return; } // Task and interval history.task = crm_element_value(xml_op, PCMK_XA_OPERATION); if (history.task == NULL) { pcmk__config_err("Ignoring resource history entry %s for %s on %s " "without " PCMK_XA_OPERATION, history.id, rsc->id, pcmk__node_name(node)); return; } crm_element_value_ms(xml_op, PCMK_META_INTERVAL, &(history.interval_ms)); if (!can_affect_state(&history)) { pcmk__rsc_trace(rsc, "Ignoring resource history entry %s for %s on %s " "with irrelevant action '%s'", history.id, rsc->id, pcmk__node_name(node), history.task); return; } if (unpack_action_result(&history) != pcmk_rc_ok) { return; // Error already logged } history.expected_exit_status = pe__target_rc_from_xml(xml_op); history.key = pcmk__xe_history_key(xml_op); crm_element_value_int(xml_op, PCMK__XA_CALL_ID, &(history.call_id)); pcmk__rsc_trace(rsc, "Unpacking %s (%s call %d on %s): %s (%s)", history.id, history.task, history.call_id, pcmk__node_name(node), pcmk_exec_status_str(history.execution_status), crm_exit_str(history.exit_status)); if (node->details->unclean) { pcmk__rsc_trace(rsc, "%s is running on %s, which is unclean (further action " "depends on value of stop's on-fail attribute)", rsc->id, pcmk__node_name(node)); } expired = check_operation_expiry(&history); old_rc = history.exit_status; remap_operation(&history, on_fail, expired); if (expired && (process_expired_result(&history, old_rc) == pcmk_rc_ok)) { goto done; } if (!pcmk__is_bundled(rsc) && pcmk_xe_mask_probe_failure(xml_op)) { mask_probe_failure(&history, old_rc, *last_failure, on_fail); goto done; } if (!pcmk_is_set(rsc->flags, pcmk__rsc_unique)) { parent = uber_parent(rsc); } switch (history.execution_status) { case PCMK_EXEC_PENDING: process_pending_action(&history, *last_failure); goto done; case PCMK_EXEC_DONE: update_resource_state(&history, history.exit_status, *last_failure, on_fail); goto done; case PCMK_EXEC_NOT_INSTALLED: unpack_failure_handling(&history, &failure_strategy, &fail_role); if (failure_strategy == pcmk__on_fail_ignore) { crm_warn("Cannot ignore failed %s of %s on %s: " "Resource agent doesn't exist " QB_XS " status=%d rc=%d id=%s", history.task, rsc->id, pcmk__node_name(node), history.execution_status, history.exit_status, history.id); /* Also for printing it as "FAILED" by marking it as * pcmk__rsc_failed later */ *on_fail = pcmk__on_fail_ban; } resource_location(parent, node, -PCMK_SCORE_INFINITY, "hard-error", rsc->priv->scheduler); unpack_rsc_op_failure(&history, failure_strategy, fail_role, last_failure, on_fail); goto done; case PCMK_EXEC_NOT_CONNECTED: if (pcmk__is_pacemaker_remote_node(node) && pcmk_is_set(node->priv->remote->flags, pcmk__rsc_managed)) { /* We should never get into a situation where a managed remote * connection resource is considered OK but a resource action * behind the connection gets a "not connected" status. But as a * fail-safe in case a bug or unusual circumstances do lead to * that, ensure the remote connection is considered failed. */ pcmk__set_rsc_flags(node->priv->remote, pcmk__rsc_failed|pcmk__rsc_stop_if_failed); } break; // Not done, do error handling case PCMK_EXEC_ERROR: case PCMK_EXEC_ERROR_HARD: case PCMK_EXEC_ERROR_FATAL: case PCMK_EXEC_TIMEOUT: case PCMK_EXEC_NOT_SUPPORTED: case PCMK_EXEC_INVALID: break; // Not done, do error handling default: // No other value should be possible at this point break; } unpack_failure_handling(&history, &failure_strategy, &fail_role); if ((failure_strategy == pcmk__on_fail_ignore) || ((failure_strategy == pcmk__on_fail_restart_container) && (strcmp(history.task, PCMK_ACTION_STOP) == 0))) { char *last_change_s = last_change_str(xml_op); crm_warn("Pretending failed %s (%s%s%s) of %s on %s at %s succeeded " QB_XS " %s", history.task, crm_exit_str(history.exit_status), (pcmk__str_empty(history.exit_reason)? "" : ": "), pcmk__s(history.exit_reason, ""), rsc->id, pcmk__node_name(node), last_change_s, history.id); free(last_change_s); update_resource_state(&history, history.expected_exit_status, *last_failure, on_fail); crm_xml_add(xml_op, PCMK_XA_UNAME, node->priv->name); pcmk__set_rsc_flags(rsc, pcmk__rsc_ignore_failure); record_failed_op(&history); if ((failure_strategy == pcmk__on_fail_restart_container) && (*on_fail <= pcmk__on_fail_restart)) { *on_fail = failure_strategy; } } else { unpack_rsc_op_failure(&history, failure_strategy, fail_role, last_failure, on_fail); if (history.execution_status == PCMK_EXEC_ERROR_HARD) { uint8_t log_level = LOG_ERR; if (history.exit_status == PCMK_OCF_NOT_INSTALLED) { log_level = LOG_NOTICE; } do_crm_log(log_level, "Preventing %s from restarting on %s because " "of hard failure (%s%s%s) " QB_XS " %s", parent->id, pcmk__node_name(node), crm_exit_str(history.exit_status), (pcmk__str_empty(history.exit_reason)? "" : ": "), pcmk__s(history.exit_reason, ""), history.id); resource_location(parent, node, -PCMK_SCORE_INFINITY, "hard-error", rsc->priv->scheduler); } else if (history.execution_status == PCMK_EXEC_ERROR_FATAL) { pcmk__sched_err(rsc->priv->scheduler, "Preventing %s from restarting anywhere because " "of fatal failure (%s%s%s) " QB_XS " %s", parent->id, crm_exit_str(history.exit_status), (pcmk__str_empty(history.exit_reason)? "" : ": "), pcmk__s(history.exit_reason, ""), history.id); resource_location(parent, NULL, -PCMK_SCORE_INFINITY, "fatal-error", rsc->priv->scheduler); } } done: pcmk__rsc_trace(rsc, "%s role on %s after %s is %s (next %s)", rsc->id, pcmk__node_name(node), history.id, pcmk_role_text(rsc->priv->orig_role), pcmk_role_text(rsc->priv->next_role)); } /*! * \internal * \brief Insert a node attribute with value into a \c GHashTable * * \param[in,out] key Key to insert (either freed or owned by * \p user_data upon return) * \param[in] value Value to insert (owned by \p user_data upon return) * \param[in] user_data \c GHashTable to insert into */ static gboolean insert_attr(gpointer key, gpointer value, gpointer user_data) { GHashTable *table = user_data; g_hash_table_insert(table, key, value); return TRUE; } static void add_node_attrs(const xmlNode *xml_obj, pcmk_node_t *node, bool overwrite, pcmk_scheduler_t *scheduler) { const char *cluster_name = NULL; const char *dc_id = crm_element_value(scheduler->input, PCMK_XA_DC_UUID); const pcmk_rule_input_t rule_input = { .now = scheduler->priv->now, }; pcmk__insert_dup(node->priv->attrs, CRM_ATTR_UNAME, node->priv->name); pcmk__insert_dup(node->priv->attrs, CRM_ATTR_ID, node->priv->id); if ((scheduler->dc_node == NULL) && pcmk__str_eq(node->priv->id, dc_id, pcmk__str_casei)) { scheduler->dc_node = node; pcmk__insert_dup(node->priv->attrs, CRM_ATTR_IS_DC, PCMK_VALUE_TRUE); } else if (!pcmk__same_node(node, scheduler->dc_node)) { pcmk__insert_dup(node->priv->attrs, CRM_ATTR_IS_DC, PCMK_VALUE_FALSE); } cluster_name = g_hash_table_lookup(scheduler->priv->options, PCMK_OPT_CLUSTER_NAME); if (cluster_name) { pcmk__insert_dup(node->priv->attrs, CRM_ATTR_CLUSTER_NAME, cluster_name); } if (overwrite) { /* @TODO Try to reorder some unpacking so that we don't need the * overwrite argument or to unpack into a temporary table */ GHashTable *unpacked = pcmk__strkey_table(free, free); pe__unpack_dataset_nvpairs(xml_obj, PCMK_XE_INSTANCE_ATTRIBUTES, &rule_input, unpacked, NULL, scheduler); g_hash_table_foreach_steal(unpacked, insert_attr, node->priv->attrs); g_hash_table_destroy(unpacked); } else { pe__unpack_dataset_nvpairs(xml_obj, PCMK_XE_INSTANCE_ATTRIBUTES, &rule_input, node->priv->attrs, NULL, scheduler); } pe__unpack_dataset_nvpairs(xml_obj, PCMK_XE_UTILIZATION, &rule_input, node->priv->utilization, NULL, scheduler); if (pcmk__node_attr(node, CRM_ATTR_SITE_NAME, NULL, pcmk__rsc_node_current) == NULL) { const char *site_name = pcmk__node_attr(node, "site-name", NULL, pcmk__rsc_node_current); if (site_name) { pcmk__insert_dup(node->priv->attrs, CRM_ATTR_SITE_NAME, site_name); } else if (cluster_name) { /* Default to cluster-name if unset */ pcmk__insert_dup(node->priv->attrs, CRM_ATTR_SITE_NAME, cluster_name); } } } static GList * extract_operations(const char *node, const char *rsc, xmlNode * rsc_entry, gboolean active_filter) { int counter = -1; int stop_index = -1; int start_index = -1; xmlNode *rsc_op = NULL; GList *gIter = NULL; GList *op_list = NULL; GList *sorted_op_list = NULL; /* extract operations */ op_list = NULL; sorted_op_list = NULL; for (rsc_op = pcmk__xe_first_child(rsc_entry, PCMK__XE_LRM_RSC_OP, NULL, NULL); rsc_op != NULL; rsc_op = pcmk__xe_next(rsc_op, PCMK__XE_LRM_RSC_OP)) { crm_xml_add(rsc_op, PCMK_XA_RESOURCE, rsc); crm_xml_add(rsc_op, PCMK_XA_UNAME, node); op_list = g_list_prepend(op_list, rsc_op); } if (op_list == NULL) { /* if there are no operations, there is nothing to do */ return NULL; } sorted_op_list = g_list_sort(op_list, sort_op_by_callid); /* create active recurring operations as optional */ if (active_filter == FALSE) { return sorted_op_list; } op_list = NULL; calculate_active_ops(sorted_op_list, &start_index, &stop_index); for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) { xmlNode *rsc_op = (xmlNode *) gIter->data; counter++; if (start_index < stop_index) { crm_trace("Skipping %s: not active", pcmk__xe_id(rsc_entry)); break; } else if (counter < start_index) { crm_trace("Skipping %s: old", pcmk__xe_id(rsc_op)); continue; } op_list = g_list_append(op_list, rsc_op); } g_list_free(sorted_op_list); return op_list; } GList * find_operations(const char *rsc, const char *node, gboolean active_filter, pcmk_scheduler_t *scheduler) { GList *output = NULL; GList *intermediate = NULL; xmlNode *tmp = NULL; xmlNode *status = pcmk__xe_first_child(scheduler->input, PCMK_XE_STATUS, NULL, NULL); pcmk_node_t *this_node = NULL; xmlNode *node_state = NULL; CRM_CHECK(status != NULL, return NULL); for (node_state = pcmk__xe_first_child(status, PCMK__XE_NODE_STATE, NULL, NULL); node_state != NULL; node_state = pcmk__xe_next(node_state, PCMK__XE_NODE_STATE)) { const char *uname = crm_element_value(node_state, PCMK_XA_UNAME); if (node != NULL && !pcmk__str_eq(uname, node, pcmk__str_casei)) { continue; } this_node = pcmk_find_node(scheduler, uname); if(this_node == NULL) { CRM_LOG_ASSERT(this_node != NULL); continue; } else if (pcmk__is_pacemaker_remote_node(this_node)) { determine_remote_online_status(scheduler, this_node); } else { determine_online_status(node_state, this_node, scheduler); } if (this_node->details->online || pcmk_is_set(scheduler->flags, pcmk__sched_fencing_enabled)) { /* offline nodes run no resources... * unless stonith is enabled in which case we need to * make sure rsc start events happen after the stonith */ xmlNode *lrm_rsc = NULL; tmp = pcmk__xe_first_child(node_state, PCMK__XE_LRM, NULL, NULL); tmp = pcmk__xe_first_child(tmp, PCMK__XE_LRM_RESOURCES, NULL, NULL); for (lrm_rsc = pcmk__xe_first_child(tmp, PCMK__XE_LRM_RESOURCE, NULL, NULL); lrm_rsc != NULL; lrm_rsc = pcmk__xe_next(lrm_rsc, PCMK__XE_LRM_RESOURCE)) { const char *rsc_id = crm_element_value(lrm_rsc, PCMK_XA_ID); if ((rsc != NULL) && !pcmk__str_eq(rsc_id, rsc, pcmk__str_none)) { continue; } intermediate = extract_operations(uname, rsc_id, lrm_rsc, active_filter); output = g_list_concat(output, intermediate); } } } return output; } diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c index 426e3cd9ff..5e3c10ea58 100644 --- a/lib/pengine/utils.c +++ b/lib/pengine/utils.c @@ -1,925 +1,924 @@ /* * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include -#include #include #include "pe_status_private.h" extern bool pcmk__is_daemon; gboolean ghash_free_str_str(gpointer key, gpointer value, gpointer user_data); /*! * \internal * \brief Check whether we can fence a particular node * * \param[in] scheduler Scheduler data * \param[in] node Name of node to check * * \return true if node can be fenced, false otherwise */ bool pe_can_fence(const pcmk_scheduler_t *scheduler, const pcmk_node_t *node) { if (pcmk__is_guest_or_bundle_node(node)) { /* A guest or bundle node is fenced by stopping its launcher, which is * possible if the launcher's host is either online or fenceable. */ pcmk_resource_t *rsc = node->priv->remote->priv->launcher; for (GList *n = rsc->priv->active_nodes; n != NULL; n = n->next) { pcmk_node_t *launcher_node = n->data; if (!launcher_node->details->online && !pe_can_fence(scheduler, launcher_node)) { return false; } } return true; } else if (!pcmk_is_set(scheduler->flags, pcmk__sched_fencing_enabled)) { return false; /* Turned off */ } else if (!pcmk_is_set(scheduler->flags, pcmk__sched_have_fencing)) { return false; /* No devices */ } else if (pcmk_is_set(scheduler->flags, pcmk__sched_quorate)) { return true; } else if (scheduler->no_quorum_policy == pcmk_no_quorum_ignore) { return true; } else if(node == NULL) { return false; } else if(node->details->online) { crm_notice("We can fence %s without quorum because they're in our membership", pcmk__node_name(node)); return true; } crm_trace("Cannot fence %s", pcmk__node_name(node)); return false; } /*! * \internal * \brief Copy a node object * * \param[in] this_node Node object to copy * * \return Newly allocated shallow copy of this_node * \note This function asserts on errors and is guaranteed to return non-NULL. */ pcmk_node_t * pe__copy_node(const pcmk_node_t *this_node) { pcmk_node_t *new_node = NULL; pcmk__assert(this_node != NULL); new_node = pcmk__assert_alloc(1, sizeof(pcmk_node_t)); new_node->assign = pcmk__assert_alloc(1, sizeof(struct pcmk__node_assignment)); new_node->assign->probe_mode = this_node->assign->probe_mode; new_node->assign->score = this_node->assign->score; new_node->assign->count = this_node->assign->count; new_node->details = this_node->details; new_node->priv = this_node->priv; return new_node; } /*! * \internal * \brief Create a node hash table from a node list * * \param[in] list Node list * * \return Hash table equivalent of node list */ GHashTable * pe__node_list2table(const GList *list) { GHashTable *result = NULL; result = pcmk__strkey_table(NULL, free); for (const GList *gIter = list; gIter != NULL; gIter = gIter->next) { pcmk_node_t *new_node = NULL; new_node = pe__copy_node((const pcmk_node_t *) gIter->data); g_hash_table_insert(result, (gpointer) new_node->priv->id, new_node); } return result; } /*! * \internal * \brief Compare two nodes by name, with numeric portions sorted numerically * * Sort two node names case-insensitively like strcasecmp(), but with any * numeric portions of the name sorted numerically. For example, "node10" will * sort higher than "node9" but lower than "remotenode9". * * \param[in] a First node to compare (can be \c NULL) * \param[in] b Second node to compare (can be \c NULL) * * \retval -1 \c a comes before \c b (or \c a is \c NULL and \c b is not) * \retval 0 \c a and \c b are equal (or both are \c NULL) * \retval 1 \c a comes after \c b (or \c b is \c NULL and \c a is not) */ gint pe__cmp_node_name(gconstpointer a, gconstpointer b) { const pcmk_node_t *node1 = (const pcmk_node_t *) a; const pcmk_node_t *node2 = (const pcmk_node_t *) b; if ((node1 == NULL) && (node2 == NULL)) { return 0; } if (node1 == NULL) { return -1; } if (node2 == NULL) { return 1; } return pcmk__numeric_strcasecmp(node1->priv->name, node2->priv->name); } /*! * \internal * \brief Output node weights to stdout * * \param[in] rsc Use allowed nodes for this resource * \param[in] comment Text description to prefix lines with * \param[in] nodes If rsc is not specified, use these nodes * \param[in,out] scheduler Scheduler data */ static void pe__output_node_weights(const pcmk_resource_t *rsc, const char *comment, GHashTable *nodes, pcmk_scheduler_t *scheduler) { pcmk__output_t *out = scheduler->priv->out; // Sort the nodes so the output is consistent for regression tests GList *list = g_list_sort(g_hash_table_get_values(nodes), pe__cmp_node_name); for (const GList *gIter = list; gIter != NULL; gIter = gIter->next) { const pcmk_node_t *node = (const pcmk_node_t *) gIter->data; out->message(out, "node-weight", rsc, comment, node->priv->name, pcmk_readable_score(node->assign->score)); } g_list_free(list); } /*! * \internal * \brief Log node weights at trace level * * \param[in] file Caller's filename * \param[in] function Caller's function name * \param[in] line Caller's line number * \param[in] rsc If not NULL, include this resource's ID in logs * \param[in] comment Text description to prefix lines with * \param[in] nodes Nodes whose scores should be logged */ static void pe__log_node_weights(const char *file, const char *function, int line, const pcmk_resource_t *rsc, const char *comment, GHashTable *nodes) { GHashTableIter iter; pcmk_node_t *node = NULL; // Don't waste time if we're not tracing at this point pcmk__if_tracing({}, return); g_hash_table_iter_init(&iter, nodes); while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) { if (rsc) { qb_log_from_external_source(function, file, "%s: %s allocation score on %s: %s", LOG_TRACE, line, 0, comment, rsc->id, pcmk__node_name(node), pcmk_readable_score(node->assign->score)); } else { qb_log_from_external_source(function, file, "%s: %s = %s", LOG_TRACE, line, 0, comment, pcmk__node_name(node), pcmk_readable_score(node->assign->score)); } } } /*! * \internal * \brief Log or output node weights * * \param[in] file Caller's filename * \param[in] function Caller's function name * \param[in] line Caller's line number * \param[in] to_log Log if true, otherwise output * \param[in] rsc If not NULL, use this resource's ID in logs, * and show scores recursively for any children * \param[in] comment Text description to prefix lines with * \param[in] nodes Nodes whose scores should be shown * \param[in,out] scheduler Scheduler data */ void pe__show_node_scores_as(const char *file, const char *function, int line, bool to_log, const pcmk_resource_t *rsc, const char *comment, GHashTable *nodes, pcmk_scheduler_t *scheduler) { if ((rsc != NULL) && pcmk_is_set(rsc->flags, pcmk__rsc_removed)) { // Don't show allocation scores for orphans return; } if (nodes == NULL) { // Nothing to show return; } if (to_log) { pe__log_node_weights(file, function, line, rsc, comment, nodes); } else { pe__output_node_weights(rsc, comment, nodes, scheduler); } if (rsc == NULL) { return; } // If this resource has children, repeat recursively for each for (GList *gIter = rsc->priv->children; gIter != NULL; gIter = gIter->next) { pcmk_resource_t *child = (pcmk_resource_t *) gIter->data; pe__show_node_scores_as(file, function, line, to_log, child, comment, child->priv->allowed_nodes, scheduler); } } /*! * \internal * \brief Compare two resources by priority * * \param[in] a First resource to compare (can be \c NULL) * \param[in] b Second resource to compare (can be \c NULL) * * \retval -1 a's priority > b's priority (or \c b is \c NULL and \c a is not) * \retval 0 a's priority == b's priority (or both \c a and \c b are \c NULL) * \retval 1 a's priority < b's priority (or \c a is \c NULL and \c b is not) */ gint pe__cmp_rsc_priority(gconstpointer a, gconstpointer b) { const pcmk_resource_t *resource1 = (const pcmk_resource_t *)a; const pcmk_resource_t *resource2 = (const pcmk_resource_t *)b; if (a == NULL && b == NULL) { return 0; } if (a == NULL) { return 1; } if (b == NULL) { return -1; } if (resource1->priv->priority > resource2->priv->priority) { return -1; } if (resource1->priv->priority < resource2->priv->priority) { return 1; } return 0; } static void resource_node_score(pcmk_resource_t *rsc, const pcmk_node_t *node, int score, const char *tag) { pcmk_node_t *match = NULL; if ((pcmk_is_set(rsc->flags, pcmk__rsc_exclusive_probes) || (node->assign->probe_mode == pcmk__probe_never)) && pcmk__str_eq(tag, "symmetric_default", pcmk__str_casei)) { /* This string comparision may be fragile, but exclusive resources and * exclusive nodes should not have the symmetric_default constraint * applied to them. */ return; } else { for (GList *gIter = rsc->priv->children; gIter != NULL; gIter = gIter->next) { pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data; resource_node_score(child_rsc, node, score, tag); } } match = g_hash_table_lookup(rsc->priv->allowed_nodes, node->priv->id); if (match == NULL) { match = pe__copy_node(node); g_hash_table_insert(rsc->priv->allowed_nodes, (gpointer) match->priv->id, match); } match->assign->score = pcmk__add_scores(match->assign->score, score); pcmk__rsc_trace(rsc, "Enabling %s preference (%s) for %s on %s (now %s)", tag, pcmk_readable_score(score), rsc->id, pcmk__node_name(node), pcmk_readable_score(match->assign->score)); } void resource_location(pcmk_resource_t *rsc, const pcmk_node_t *node, int score, const char *tag, pcmk_scheduler_t *scheduler) { if (node != NULL) { resource_node_score(rsc, node, score, tag); } else if (scheduler != NULL) { GList *gIter = scheduler->nodes; for (; gIter != NULL; gIter = gIter->next) { pcmk_node_t *node_iter = (pcmk_node_t *) gIter->data; resource_node_score(rsc, node_iter, score, tag); } } else { GHashTableIter iter; pcmk_node_t *node_iter = NULL; g_hash_table_iter_init(&iter, rsc->priv->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&node_iter)) { resource_node_score(rsc, node_iter, score, tag); } } if ((node == NULL) && (score == -PCMK_SCORE_INFINITY) && (rsc->priv->assigned_node != NULL)) { // @TODO Should this be more like pcmk__unassign_resource()? crm_info("Unassigning %s from %s", rsc->id, pcmk__node_name(rsc->priv->assigned_node)); free(rsc->priv->assigned_node); rsc->priv->assigned_node = NULL; } } time_t get_effective_time(pcmk_scheduler_t *scheduler) { if(scheduler) { if (scheduler->priv->now == NULL) { crm_trace("Recording a new 'now'"); scheduler->priv->now = crm_time_new(NULL); } return crm_time_get_seconds_since_epoch(scheduler->priv->now); } crm_trace("Defaulting to 'now'"); return time(NULL); } gboolean get_target_role(const pcmk_resource_t *rsc, enum rsc_role_e *role) { enum rsc_role_e local_role = pcmk_role_unknown; const char *value = g_hash_table_lookup(rsc->priv->meta, PCMK_META_TARGET_ROLE); CRM_CHECK(role != NULL, return FALSE); if (pcmk__str_eq(value, PCMK_ROLE_STARTED, pcmk__str_null_matches|pcmk__str_casei)) { return FALSE; } if (pcmk__str_eq(PCMK_VALUE_DEFAULT, value, pcmk__str_casei)) { // @COMPAT Deprecated since 2.1.8 pcmk__config_warn("Support for setting " PCMK_META_TARGET_ROLE " to the explicit value '" PCMK_VALUE_DEFAULT "' is deprecated and will be removed in a " "future release (just leave it unset)"); return FALSE; } local_role = pcmk_parse_role(value); if (local_role == pcmk_role_unknown) { pcmk__config_err("Ignoring '" PCMK_META_TARGET_ROLE "' for %s " "because '%s' is not valid", rsc->id, value); return FALSE; } else if (local_role > pcmk_role_started) { if (pcmk_is_set(pe__const_top_resource(rsc, false)->flags, pcmk__rsc_promotable)) { if (local_role > pcmk_role_unpromoted) { /* This is what we'd do anyway, just leave the default to avoid messing up the placement algorithm */ return FALSE; } } else { pcmk__config_err("Ignoring '" PCMK_META_TARGET_ROLE "' for %s " "because '%s' only makes sense for promotable " "clones", rsc->id, value); return FALSE; } } *role = local_role; return TRUE; } gboolean order_actions(pcmk_action_t *first, pcmk_action_t *then, uint32_t flags) { GList *gIter = NULL; pcmk__related_action_t *wrapper = NULL; GList *list = NULL; if (flags == pcmk__ar_none) { return FALSE; } if ((first == NULL) || (then == NULL)) { return FALSE; } crm_trace("Creating action wrappers for ordering: %s then %s", first->uuid, then->uuid); /* Ensure we never create a dependency on ourselves... it's happened */ pcmk__assert(first != then); /* Filter dups, otherwise update_action_states() has too much work to do */ gIter = first->actions_after; for (; gIter != NULL; gIter = gIter->next) { pcmk__related_action_t *after = gIter->data; if ((after->action == then) && pcmk_any_flags_set(after->flags, flags)) { return FALSE; } } wrapper = pcmk__assert_alloc(1, sizeof(pcmk__related_action_t)); wrapper->action = then; wrapper->flags = flags; list = first->actions_after; list = g_list_prepend(list, wrapper); first->actions_after = list; wrapper = pcmk__assert_alloc(1, sizeof(pcmk__related_action_t)); wrapper->action = first; wrapper->flags = flags; list = then->actions_before; list = g_list_prepend(list, wrapper); then->actions_before = list; return TRUE; } void destroy_ticket(gpointer data) { pcmk__ticket_t *ticket = data; if (ticket->state) { g_hash_table_destroy(ticket->state); } free(ticket->id); free(ticket); } pcmk__ticket_t * ticket_new(const char *ticket_id, pcmk_scheduler_t *scheduler) { pcmk__ticket_t *ticket = NULL; if (pcmk__str_empty(ticket_id)) { return NULL; } if (scheduler->priv->ticket_constraints == NULL) { scheduler->priv->ticket_constraints = pcmk__strkey_table(free, destroy_ticket); } ticket = g_hash_table_lookup(scheduler->priv->ticket_constraints, ticket_id); if (ticket == NULL) { ticket = calloc(1, sizeof(pcmk__ticket_t)); if (ticket == NULL) { pcmk__sched_err(scheduler, "Cannot allocate ticket '%s'", ticket_id); return NULL; } crm_trace("Creating ticket entry for %s", ticket_id); ticket->id = strdup(ticket_id); ticket->last_granted = -1; ticket->state = pcmk__strkey_table(free, free); g_hash_table_insert(scheduler->priv->ticket_constraints, pcmk__str_copy(ticket->id), ticket); } return ticket; } const char * rsc_printable_id(const pcmk_resource_t *rsc) { if (pcmk_is_set(rsc->flags, pcmk__rsc_unique)) { return rsc->id; } return pcmk__xe_id(rsc->priv->xml); } void pe__clear_resource_flags_recursive(pcmk_resource_t *rsc, uint64_t flags) { pcmk__clear_rsc_flags(rsc, flags); for (GList *gIter = rsc->priv->children; gIter != NULL; gIter = gIter->next) { pe__clear_resource_flags_recursive((pcmk_resource_t *) gIter->data, flags); } } void pe__clear_resource_flags_on_all(pcmk_scheduler_t *scheduler, uint64_t flag) { for (GList *lpc = scheduler->priv->resources; lpc != NULL; lpc = lpc->next) { pcmk_resource_t *r = (pcmk_resource_t *) lpc->data; pe__clear_resource_flags_recursive(r, flag); } } void pe__set_resource_flags_recursive(pcmk_resource_t *rsc, uint64_t flags) { pcmk__set_rsc_flags(rsc, flags); for (GList *gIter = rsc->priv->children; gIter != NULL; gIter = gIter->next) { pe__set_resource_flags_recursive((pcmk_resource_t *) gIter->data, flags); } } void trigger_unfencing(pcmk_resource_t *rsc, pcmk_node_t *node, const char *reason, pcmk_action_t *dependency, pcmk_scheduler_t *scheduler) { if (!pcmk_is_set(scheduler->flags, pcmk__sched_enable_unfencing)) { /* No resources require it */ return; } else if ((rsc != NULL) && !pcmk_is_set(rsc->flags, pcmk__rsc_fence_device)) { /* Wasn't a stonith device */ return; } else if(node && node->details->online && node->details->unclean == FALSE && node->details->shutdown == FALSE) { pcmk_action_t *unfence = pe_fence_op(node, PCMK_ACTION_ON, FALSE, reason, FALSE, scheduler); if(dependency) { order_actions(unfence, dependency, pcmk__ar_ordered); } } else if(rsc) { GHashTableIter iter; g_hash_table_iter_init(&iter, rsc->priv->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { if(node->details->online && node->details->unclean == FALSE && node->details->shutdown == FALSE) { trigger_unfencing(rsc, node, reason, dependency, scheduler); } } } } /*! * \internal * \brief Check whether shutdown has been requested for a node * * \param[in] node Node to check * * \return TRUE if node has shutdown attribute set and nonzero, FALSE otherwise * \note This differs from simply using node->details->shutdown in that it can * be used before that has been determined (and in fact to determine it), * and it can also be used to distinguish requested shutdown from implicit * shutdown of remote nodes by virtue of their connection stopping. */ bool pe__shutdown_requested(const pcmk_node_t *node) { const char *shutdown = pcmk__node_attr(node, PCMK__NODE_ATTR_SHUTDOWN, NULL, pcmk__rsc_node_current); return !pcmk__str_eq(shutdown, "0", pcmk__str_null_matches); } /*! * \internal * \brief Update "recheck by" time in scheduler data * * \param[in] recheck Epoch time when recheck should happen * \param[in,out] scheduler Scheduler data * \param[in] reason What time is being updated for (for logs) */ void pe__update_recheck_time(time_t recheck, pcmk_scheduler_t *scheduler, const char *reason) { if ((recheck > get_effective_time(scheduler)) && ((scheduler->priv->recheck_by == 0) || (scheduler->priv->recheck_by > recheck))) { scheduler->priv->recheck_by = recheck; crm_debug("Updated next scheduler recheck to %s for %s", pcmk__trim(ctime(&recheck)), reason); } } /*! * \internal * \brief Extract nvpair blocks contained by a CIB XML element into a hash table * * \param[in] xml_obj XML element containing blocks of nvpair elements * \param[in] set_name If not NULL, only use blocks of this element * \param[in] rule_input Values used to evaluate rule criteria * (node_attrs member must be NULL if \p set_name * is PCMK_XE_META_ATTRIBUTES) * \param[out] hash Where to store extracted name/value pairs * \param[in] always_first If not NULL, process block with this ID first * \param[in,out] scheduler Scheduler data containing \p xml_obj */ void pe__unpack_dataset_nvpairs(const xmlNode *xml_obj, const char *set_name, const pcmk_rule_input_t *rule_input, GHashTable *hash, const char *always_first, pcmk_scheduler_t *scheduler) { crm_time_t *next_change = NULL; CRM_CHECK((set_name != NULL) && (rule_input != NULL) && (hash != NULL) && (scheduler != NULL), return); // Node attribute expressions are not allowed for meta-attributes CRM_CHECK((rule_input->node_attrs == NULL) || (strcmp(set_name, PCMK_XE_META_ATTRIBUTES) != 0), return); if (xml_obj == NULL) { return; } next_change = crm_time_new_undefined(); pcmk_unpack_nvpair_blocks(xml_obj, set_name, always_first, rule_input, hash, next_change); if (crm_time_is_defined(next_change)) { time_t recheck = (time_t) crm_time_get_seconds_since_epoch(next_change); pe__update_recheck_time(recheck, scheduler, "rule evaluation"); } crm_time_free(next_change); } bool pe__resource_is_disabled(const pcmk_resource_t *rsc) { const char *target_role = NULL; CRM_CHECK(rsc != NULL, return false); target_role = g_hash_table_lookup(rsc->priv->meta, PCMK_META_TARGET_ROLE); if (target_role) { // If invalid, we've already logged an error when unpacking enum rsc_role_e target_role_e = pcmk_parse_role(target_role); if ((target_role_e == pcmk_role_stopped) || ((target_role_e == pcmk_role_unpromoted) && pcmk_is_set(pe__const_top_resource(rsc, false)->flags, pcmk__rsc_promotable))) { return true; } } return false; } /*! * \internal * \brief Check whether a resource is running only on given node * * \param[in] rsc Resource to check * \param[in] node Node to check * * \return true if \p rsc is running only on \p node, otherwise false */ bool pe__rsc_running_on_only(const pcmk_resource_t *rsc, const pcmk_node_t *node) { return (rsc != NULL) && pcmk__list_of_1(rsc->priv->active_nodes) && pcmk__same_node((const pcmk_node_t *) rsc->priv->active_nodes->data, node); } bool pe__rsc_running_on_any(pcmk_resource_t *rsc, GList *node_list) { if (rsc != NULL) { for (GList *ele = rsc->priv->active_nodes; ele; ele = ele->next) { pcmk_node_t *node = (pcmk_node_t *) ele->data; if (pcmk__str_in_list(node->priv->name, node_list, pcmk__str_star_matches|pcmk__str_casei)) { return true; } } } return false; } bool pcmk__rsc_filtered_by_node(pcmk_resource_t *rsc, GList *only_node) { return rsc->priv->fns->active(rsc, FALSE) && !pe__rsc_running_on_any(rsc, only_node); } GList * pe__filter_rsc_list(GList *rscs, GList *filter) { GList *retval = NULL; for (GList *gIter = rscs; gIter; gIter = gIter->next) { pcmk_resource_t *rsc = (pcmk_resource_t *) gIter->data; /* I think the second condition is safe here for all callers of this * function. If not, it needs to move into pe__node_text. */ if (pcmk__str_in_list(rsc_printable_id(rsc), filter, pcmk__str_star_matches) || ((rsc->priv->parent != NULL) && pcmk__str_in_list(rsc_printable_id(rsc->priv->parent), filter, pcmk__str_star_matches))) { retval = g_list_prepend(retval, rsc); } } return retval; } GList * pe__build_node_name_list(pcmk_scheduler_t *scheduler, const char *s) { GList *nodes = NULL; if (pcmk__str_eq(s, "*", pcmk__str_null_matches)) { /* Nothing was given so return a list of all node names. Or, '*' was * given. This would normally fall into the pe__unames_with_tag branch * where it will return an empty list. Catch it here instead. */ nodes = g_list_prepend(nodes, strdup("*")); } else { pcmk_node_t *node = pcmk_find_node(scheduler, s); if (node) { /* The given string was a valid uname for a node. Return a * singleton list containing just that uname. */ nodes = g_list_prepend(nodes, strdup(s)); } else { /* The given string was not a valid uname. It's either a tag or * it's a typo or something. In the first case, we'll return a * list of all the unames of the nodes with the given tag. In the * second case, we'll return a NULL pointer and nothing will * get displayed. */ nodes = pe__unames_with_tag(scheduler, s); } } return nodes; } GList * pe__build_rsc_list(pcmk_scheduler_t *scheduler, const char *s) { GList *resources = NULL; if (pcmk__str_eq(s, "*", pcmk__str_null_matches)) { resources = g_list_prepend(resources, strdup("*")); } else { const uint32_t flags = pcmk_rsc_match_history|pcmk_rsc_match_basename; pcmk_resource_t *rsc = pe_find_resource_with_flags(scheduler->priv->resources, s, flags); if (rsc) { /* A colon in the name we were given means we're being asked to filter * on a specific instance of a cloned resource. Put that exact string * into the filter list. Otherwise, use the printable ID of whatever * resource was found that matches what was asked for. */ if (strstr(s, ":") != NULL) { resources = g_list_prepend(resources, strdup(rsc->id)); } else { resources = g_list_prepend(resources, strdup(rsc_printable_id(rsc))); } } else { /* The given string was not a valid resource name. It's a tag or a * typo or something. See pe__build_node_name_list() for more * detail. */ resources = pe__rscs_with_tag(scheduler, s); } } return resources; } xmlNode * pe__failed_probe_for_rsc(const pcmk_resource_t *rsc, const char *name) { const pcmk_resource_t *parent = pe__const_top_resource(rsc, false); const char *rsc_id = rsc->id; const pcmk_scheduler_t *scheduler = rsc->priv->scheduler; if (pcmk__is_clone(parent)) { rsc_id = pe__clone_child_id(parent); } for (xmlNode *xml_op = pcmk__xe_first_child(scheduler->priv->failed, NULL, NULL, NULL); xml_op != NULL; xml_op = pcmk__xe_next(xml_op, NULL)) { const char *value = NULL; char *op_id = NULL; /* This resource operation is not a failed probe. */ if (!pcmk_xe_mask_probe_failure(xml_op)) { continue; } /* This resource operation was not run on the given node. Note that if name is * NULL, this will always succeed. */ value = crm_element_value(xml_op, PCMK__META_ON_NODE); if (value == NULL || !pcmk__str_eq(value, name, pcmk__str_casei|pcmk__str_null_matches)) { continue; } if (!parse_op_key(pcmk__xe_history_key(xml_op), &op_id, NULL, NULL)) { continue; // This history entry is missing an operation key } /* This resource operation's ID does not match the rsc_id we are looking for. */ if (!pcmk__str_eq(op_id, rsc_id, pcmk__str_none)) { free(op_id); continue; } free(op_id); return xml_op; } return NULL; } diff --git a/tools/crm_resource.h b/tools/crm_resource.h index 56d11a55b8..96c99ba76a 100644 --- a/tools/crm_resource.h +++ b/tools/crm_resource.h @@ -1,142 +1,141 @@ /* * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include #define ATTR_SET_ELEMENT "attr_set_element" typedef struct node_info_s { const char *node_name; bool promoted; } node_info_t; typedef struct { char *attr_set_type; char *attr_set_id; char *attr_name; char *attr_value; char *given_rsc_id; char *found_attr_id; pcmk_resource_t *rsc; } attr_update_data_t; enum resource_check_flags { rsc_remain_stopped = (1 << 0), rsc_unpromotable = (1 << 1), rsc_unmanaged = (1 << 2), rsc_locked = (1 << 3), rsc_node_health = (1 << 4), }; typedef struct resource_checks_s { pcmk_resource_t *rsc; // Resource being checked uint32_t flags; // Group of enum resource_check_flags const char *lock_node; // Node that resource is shutdown-locked to, if any } resource_checks_t; resource_checks_t *cli_check_resource(pcmk_resource_t *rsc, char *role_s, char *managed); /* ban */ int cli_resource_prefer(pcmk__output_t *out, const char *rsc_id, const char *host, const char *move_lifetime, cib_t *cib_conn, gboolean promoted_role_only, const char *promoted_role); int cli_resource_ban(pcmk__output_t *out, const char *rsc_id, const char *host, const char *move_lifetime, cib_t *cib_conn, gboolean promoted_role_only, const char *promoted_role); int cli_resource_clear(const char *rsc_id, const char *host, GList *allnodes, cib_t *cib_conn, bool clear_ban_constraints, gboolean force); int cli_resource_clear_all_expired(xmlNode *root, cib_t *cib_conn, const char *rsc, const char *node, gboolean promoted_role_only); /* print */ void cli_resource_print_cts(pcmk_resource_t *rsc, pcmk__output_t *out); void cli_resource_print_cts_constraints(pcmk_scheduler_t *scheduler); int cli_resource_print(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler, bool expanded); int cli_resource_print_operations(const char *rsc_id, const char *host_uname, bool active, pcmk_scheduler_t *scheduler); /* runtime */ int cli_resource_check(pcmk__output_t *out, pcmk_resource_t *rsc, pcmk_node_t *node); int cli_resource_fail(pcmk_ipc_api_t *controld_api, const char *host_uname, const char *rsc_id, pcmk_scheduler_t *scheduler); GList *cli_resource_search(pcmk_resource_t *rsc, const char *requested_name, pcmk_scheduler_t *scheduler); int cli_resource_delete(pcmk_ipc_api_t *controld_api, const char *host_uname, const pcmk_resource_t *rsc, const char *operation, const char *interval_spec, bool just_failures, pcmk_scheduler_t *scheduler, gboolean force); int cli_cleanup_all(pcmk_ipc_api_t *controld_api, const char *node_name, const char *operation, const char *interval_spec, pcmk_scheduler_t *scheduler); int cli_resource_restart(pcmk__output_t *out, pcmk_resource_t *rsc, const pcmk_node_t *node, const char *move_lifetime, guint timeout_ms, cib_t *cib, gboolean promoted_role_only, gboolean force); int cli_resource_move(const pcmk_resource_t *rsc, const char *rsc_id, const char *host_name, const char *move_lifetime, cib_t *cib, pcmk_scheduler_t *scheduler, gboolean promoted_role_only, gboolean force); crm_exit_t cli_resource_execute_from_params(pcmk__output_t *out, const char *rsc_name, const char *rsc_class, const char *rsc_prov, const char *rsc_type, const char *rsc_action, GHashTable *params, GHashTable *override_hash, guint timeout_ms, int resource_verbose, gboolean force, int check_level); crm_exit_t cli_resource_execute(pcmk_resource_t *rsc, const char *requested_name, const char *rsc_action, GHashTable *override_hash, guint timeout_ms, cib_t *cib, pcmk_scheduler_t *scheduler, int resource_verbose, gboolean force, int check_level); int cli_resource_update_attribute(pcmk_resource_t *rsc, const char *requested_name, const char *attr_set, const char *attr_set_type, const char *attr_id, const char *attr_name, const char *attr_value, gboolean recursive, cib_t *cib, xmlNode *cib_xml_orig, gboolean force); int cli_resource_delete_attribute(pcmk_resource_t *rsc, const char *requested_name, const char *attr_set, const char *attr_set_type, const char *attr_id, const char *attr_name, cib_t *cib, xmlNode *cib_xml_orig, gboolean force); int update_scheduler_input(pcmk__output_t *out, pcmk_scheduler_t *scheduler, cib_t *cib, xmlNode **cib_xml_orig); int wait_till_stable(pcmk__output_t *out, guint timeout_ms, cib_t * cib); bool resource_is_running_on(pcmk_resource_t *rsc, const char *host); void crm_resource_register_messages(pcmk__output_t *out); diff --git a/tools/crm_ticket.c b/tools/crm_ticket.c index b6644a1779..4a2e904344 100644 --- a/tools/crm_ticket.c +++ b/tools/crm_ticket.c @@ -1,667 +1,666 @@ /* * Copyright 2012-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include GError *error = NULL; #define SUMMARY "Perform tasks related to cluster tickets\n\n" \ "Allows ticket attributes to be queried, modified and deleted." struct { gchar *attr_default; gchar *attr_id; char *attr_name; char *attr_value; gboolean force; char *get_attr_name; gboolean quiet; gchar *set_name; char ticket_cmd; gchar *ticket_id; gchar *xml_file; } options = { .ticket_cmd = 'S' }; GList *attr_delete; GHashTable *attr_set; bool modified = false; int cib_options = cib_sync_call; static pcmk__output_t *out = NULL; #define INDENT " " static pcmk__supported_format_t formats[] = { PCMK__SUPPORTED_FORMAT_NONE, PCMK__SUPPORTED_FORMAT_TEXT, PCMK__SUPPORTED_FORMAT_XML, { NULL, NULL, NULL } }; static gboolean attr_value_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) { pcmk__str_update(&options.attr_value, optarg); if (!options.attr_name || !options.attr_value) { return TRUE; } pcmk__insert_dup(attr_set, options.attr_name, options.attr_value); pcmk__str_update(&options.attr_name, NULL); pcmk__str_update(&options.attr_value, NULL); modified = true; return TRUE; } static gboolean command_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) { if (pcmk__str_any_of(option_name, "--info", "-l", NULL)) { options.ticket_cmd = 'l'; } else if (pcmk__str_any_of(option_name, "--details", "-L", NULL)) { options.ticket_cmd = 'L'; } else if (pcmk__str_any_of(option_name, "--raw", "-w", NULL)) { options.ticket_cmd = 'w'; } else if (pcmk__str_any_of(option_name, "--query-xml", "-q", NULL)) { options.ticket_cmd = 'q'; } else if (pcmk__str_any_of(option_name, "--constraints", "-c", NULL)) { options.ticket_cmd = 'c'; } else if (pcmk__str_any_of(option_name, "--cleanup", "-C", NULL)) { options.ticket_cmd = 'C'; } return TRUE; } static gboolean delete_attr_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) { attr_delete = g_list_append(attr_delete, strdup(optarg)); modified = true; return TRUE; } static gboolean get_attr_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) { pcmk__str_update(&options.get_attr_name, optarg); options.ticket_cmd = 'G'; return TRUE; } static gboolean grant_standby_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) { if (pcmk__str_any_of(option_name, "--grant", "-g", NULL)) { pcmk__insert_dup(attr_set, PCMK__XA_GRANTED, PCMK_VALUE_TRUE); modified = true; } else if (pcmk__str_any_of(option_name, "--revoke", "-r", NULL)) { pcmk__insert_dup(attr_set, PCMK__XA_GRANTED, PCMK_VALUE_FALSE); modified = true; } else if (pcmk__str_any_of(option_name, "--standby", "-s", NULL)) { pcmk__insert_dup(attr_set, PCMK_XA_STANDBY, PCMK_VALUE_TRUE); modified = true; } else if (pcmk__str_any_of(option_name, "--activate", "-a", NULL)) { pcmk__insert_dup(attr_set, PCMK_XA_STANDBY, PCMK_VALUE_FALSE); modified = true; } return TRUE; } static gboolean set_attr_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) { pcmk__str_update(&options.attr_name, optarg); if (!options.attr_name || !options.attr_value) { return TRUE; } pcmk__insert_dup(attr_set, options.attr_name, options.attr_value); pcmk__str_update(&options.attr_name, NULL); pcmk__str_update(&options.attr_value, NULL); modified = true; return TRUE; } static GOptionEntry query_entries[] = { { "info", 'l', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "Display the information of ticket(s)", NULL }, { "details", 'L', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "Display the details of ticket(s)", NULL }, { "raw", 'w', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "Display the IDs of ticket(s)", NULL }, { "query-xml", 'q', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "Query the XML of ticket(s)", NULL }, { "constraints", 'c', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "Display the " PCMK_XE_RSC_TICKET " constraints that apply to ticket(s)", NULL }, { NULL } }; static GOptionEntry command_entries[] = { { "grant", 'g', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, grant_standby_cb, "Grant a ticket to this cluster site", NULL }, { "revoke", 'r', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, grant_standby_cb, "Revoke a ticket from this cluster site", NULL }, { "standby", 's', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, grant_standby_cb, "Tell this cluster site this ticket is standby", NULL }, { "activate", 'a', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, grant_standby_cb, "Tell this cluster site this ticket is active", NULL }, { NULL } }; static GOptionEntry advanced_entries[] = { { "get-attr", 'G', 0, G_OPTION_ARG_CALLBACK, get_attr_cb, "Display the named attribute for a ticket", "ATTRIBUTE" }, { "set-attr", 'S', 0, G_OPTION_ARG_CALLBACK, set_attr_cb, "Set the named attribute for a ticket", "ATTRIBUTE" }, { "delete-attr", 'D', 0, G_OPTION_ARG_CALLBACK, delete_attr_cb, "Delete the named attribute for a ticket", "ATTRIBUTE" }, { "cleanup", 'C', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "Delete all state of a ticket at this cluster site", NULL }, { NULL} }; static GOptionEntry addl_entries[] = { { "attr-value", 'v', 0, G_OPTION_ARG_CALLBACK, attr_value_cb, "Attribute value to use with -S", "VALUE" }, { "default", 'd', 0, G_OPTION_ARG_STRING, &options.attr_default, "(Advanced) Default attribute value to display if none is found\n" INDENT "(for use with -G)", "VALUE" }, { "force", 'f', 0, G_OPTION_ARG_NONE, &options.force, "(Advanced) Force the action to be performed", NULL }, { "ticket", 't', 0, G_OPTION_ARG_STRING, &options.ticket_id, "Ticket ID", "ID" }, { "xml-file", 'x', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_STRING, &options.xml_file, NULL, NULL }, { NULL } }; static GOptionEntry deprecated_entries[] = { { "set-name", 'n', 0, G_OPTION_ARG_STRING, &options.set_name, "(Advanced) ID of the " PCMK_XE_INSTANCE_ATTRIBUTES " object to change", "ID" }, { "nvpair", 'i', 0, G_OPTION_ARG_STRING, &options.attr_id, "(Advanced) ID of the nvpair object to change/delete", "ID" }, { "quiet", 'Q', 0, G_OPTION_ARG_NONE, &options.quiet, "Print only the value on stdout", NULL }, { NULL } }; static void ticket_grant_warning(gchar *ticket_id) { out->err(out, "This command cannot help you verify whether '%s' has " "been already granted elsewhere.\n" "If you really want to grant '%s' to this site now, and " "you know what you are doing,\n" "please specify --force.", ticket_id, ticket_id); } static void ticket_revoke_warning(gchar *ticket_id) { out->err(out, "Revoking '%s' can trigger the specified '" PCMK_XA_LOSS_POLICY "'(s) relating to '%s'.\n\n" "You can check that with:\n" "crm_ticket --ticket %s --constraints\n\n" "Otherwise before revoking '%s', you may want to make '%s'" "standby with:\n" "crm_ticket --ticket %s --standby\n\n" "If you really want to revoke '%s' from this site now, and " "you know what you are doing,\n" "please specify --force.", ticket_id, ticket_id, ticket_id, ticket_id, ticket_id, ticket_id, ticket_id); } static GOptionContext * build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) { GOptionContext *context = NULL; const char *description = "Examples:\n\n" "Display the info of tickets:\n\n" "\tcrm_ticket --info\n\n" "Display the detailed info of tickets:\n\n" "\tcrm_ticket --details\n\n" "Display the XML of 'ticketA':\n\n" "\tcrm_ticket --ticket ticketA --query-xml\n\n" "Display the " PCMK_XE_RSC_TICKET " constraints that apply to 'ticketA':\n\n" "\tcrm_ticket --ticket ticketA --constraints\n\n" "Grant 'ticketA' to this cluster site:\n\n" "\tcrm_ticket --ticket ticketA --grant\n\n" "Revoke 'ticketA' from this cluster site:\n\n" "\tcrm_ticket --ticket ticketA --revoke\n\n" "Make 'ticketA' standby (the cluster site will treat a granted\n" "'ticketA' as 'standby', and the dependent resources will be\n" "stopped or demoted gracefully without triggering loss-policies):\n\n" "\tcrm_ticket --ticket ticketA --standby\n\n" "Activate 'ticketA' from being standby:\n\n" "\tcrm_ticket --ticket ticketA --activate\n\n" "Get the value of the 'granted' attribute for 'ticketA':\n\n" "\tcrm_ticket --ticket ticketA --get-attr granted\n\n" "Set the value of the 'standby' attribute for 'ticketA':\n\n" "\tcrm_ticket --ticket ticketA --set-attr standby --attr-value true\n\n" "Delete the 'granted' attribute for 'ticketA':\n\n" "\tcrm_ticket --ticket ticketA --delete-attr granted\n\n" "Erase the operation history of 'ticketA' at this cluster site,\n" "causing the cluster site to 'forget' the existing ticket state:\n\n" "\tcrm_ticket --ticket ticketA --cleanup\n\n"; context = pcmk__build_arg_context(args, "text (default), xml", group, NULL); g_option_context_set_description(context, description); pcmk__add_arg_group(context, "queries", "Queries:", "Show queries", query_entries); pcmk__add_arg_group(context, "commands", "Commands:", "Show command options", command_entries); pcmk__add_arg_group(context, "advanced", "Advanced Options:", "Show advanced options", advanced_entries); pcmk__add_arg_group(context, "additional", "Additional Options:", "Show additional options", addl_entries); pcmk__add_arg_group(context, "deprecated", "Deprecated Options:", "Show deprecated options", deprecated_entries); return context; } int main(int argc, char **argv) { pcmk_scheduler_t *scheduler = NULL; xmlNode *cib_xml_copy = NULL; cib_t *cib_conn = NULL; crm_exit_t exit_code = CRM_EX_OK; int rc = pcmk_rc_ok; GOptionGroup *output_group = NULL; pcmk__common_args_t *args = NULL; GOptionContext *context = NULL; gchar **processed_args = NULL; attr_set = pcmk__strkey_table(free, free); attr_delete = NULL; args = pcmk__new_common_args(SUMMARY); context = build_arg_context(args, &output_group); processed_args = pcmk__cmdline_preproc(argv, "dintvxCDGS"); pcmk__register_formats(output_group, formats); if (!g_option_context_parse_strv(context, &processed_args, &error)) { exit_code = CRM_EX_USAGE; goto done; } pcmk__cli_init_logging("crm_ticket", args->verbosity); rc = pcmk__output_new(&out, args->output_ty, args->output_dest, argv); if (rc != pcmk_rc_ok) { exit_code = pcmk_rc2exitc(rc); g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Error creating output format %s: %s", args->output_ty, pcmk_rc_str(rc)); goto done; } pe__register_messages(out); pcmk__register_lib_messages(out); if (args->version) { out->version(out, false); goto done; } scheduler = pe_new_working_set(); if (scheduler == NULL) { rc = errno; exit_code = pcmk_rc2exitc(rc); g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Could not allocate scheduler data: %s", pcmk_rc_str(rc)); goto done; } pcmk__set_scheduler_flags(scheduler, pcmk__sched_no_counts); cib_conn = cib_new(); if (cib_conn == NULL) { exit_code = CRM_EX_DISCONNECT; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Could not connect to the CIB manager"); goto done; } rc = cib__signon_attempts(cib_conn, cib_command, 5); rc = pcmk_legacy2rc(rc); if (rc != pcmk_rc_ok) { exit_code = pcmk_rc2exitc(rc); g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Could not connect to the CIB: %s", pcmk_rc_str(rc)); goto done; } if (options.xml_file != NULL) { cib_xml_copy = pcmk__xml_read(options.xml_file); } else { rc = cib_conn->cmds->query(cib_conn, NULL, &cib_xml_copy, cib_sync_call); rc = pcmk_legacy2rc(rc); if (rc != pcmk_rc_ok) { exit_code = pcmk_rc2exitc(rc); g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Could not get local CIB: %s", pcmk_rc_str(rc)); goto done; } } rc = pcmk__update_configured_schema(&cib_xml_copy, false); if (rc != pcmk_rc_ok) { exit_code = pcmk_rc2exitc(rc); g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Could not update local CIB to latest schema version"); goto done; } scheduler->input = cib_xml_copy; scheduler->priv->now = crm_time_new(NULL); cluster_status(scheduler); /* For recording the tickets that are referenced in PCMK_XE_RSC_TICKET * constraints but have never been granted yet. */ pcmk__unpack_constraints(scheduler); if (options.ticket_cmd == 'l' || options.ticket_cmd == 'L' || options.ticket_cmd == 'w') { bool raw = false; bool details = false; if (options.ticket_cmd == 'L') { details = true; } else if (options.ticket_cmd == 'w') { raw = true; } rc = pcmk__ticket_info(out, scheduler, options.ticket_id, details, raw); exit_code = pcmk_rc2exitc(rc); if (rc == ENXIO) { g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "No such ticket '%s'", options.ticket_id); } else if (rc != pcmk_rc_ok) { g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Could not get ticket info: %s", pcmk_rc_str(rc)); } } else if (options.ticket_cmd == 'q') { rc = pcmk__ticket_state(out, cib_conn, options.ticket_id); if (rc != pcmk_rc_ok && rc != pcmk_rc_duplicate_id) { exit_code = pcmk_rc2exitc(rc); g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Could not query ticket XML: %s", pcmk_rc_str(rc)); } else { exit_code = CRM_EX_OK; } } else if (options.ticket_cmd == 'c') { rc = pcmk__ticket_constraints(out, cib_conn, options.ticket_id); exit_code = pcmk_rc2exitc(rc); if (rc != pcmk_rc_ok) { g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Could not show ticket constraints: %s", pcmk_rc_str(rc)); } } else if (options.ticket_cmd == 'G') { if (options.ticket_id == NULL) { exit_code = CRM_EX_NOSUCH; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Must supply ticket ID with -t"); goto done; } rc = pcmk__ticket_get_attr(out, scheduler, options.ticket_id, options.get_attr_name, options.attr_default); exit_code = pcmk_rc2exitc(rc); } else if (options.ticket_cmd == 'C') { if (options.ticket_id == NULL) { exit_code = CRM_EX_USAGE; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Must supply ticket ID with -t"); goto done; } rc = pcmk__ticket_delete(out, cib_conn, scheduler, options.ticket_id, options.force); exit_code = pcmk_rc2exitc(rc); switch (rc) { case ENXIO: g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "No such ticket '%s'", options.ticket_id); break; case EACCES: ticket_revoke_warning(options.ticket_id); break; case pcmk_rc_ok: case pcmk_rc_duplicate_id: break; default: g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Could not clean up ticket: %s", pcmk_rc_str(rc)); break; } } else if (modified) { if (options.ticket_id == NULL) { exit_code = CRM_EX_USAGE; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Must supply ticket ID with -t"); goto done; } if (options.attr_value && (pcmk__str_empty(options.attr_name))) { exit_code = CRM_EX_USAGE; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Must supply attribute name with -S for -v %s", options.attr_value); goto done; } if (options.attr_name && (pcmk__str_empty(options.attr_value))) { exit_code = CRM_EX_USAGE; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Must supply attribute value with -v for -S %s", options.attr_value); goto done; } if (attr_delete != NULL) { rc = pcmk__ticket_remove_attr(out, cib_conn, scheduler, options.ticket_id, attr_delete, options.force); if (rc == EACCES) { ticket_revoke_warning(options.ticket_id); exit_code = CRM_EX_UNSAFE; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Ticket modification not allowed without --force"); goto done; } } else { rc = pcmk__ticket_set_attr(out, cib_conn, scheduler, options.ticket_id, attr_set, options.force); if (rc == EACCES) { const char *value = NULL; value = g_hash_table_lookup(attr_set, PCMK__XA_GRANTED); if (crm_is_true(value)) { ticket_grant_warning(options.ticket_id); } else { ticket_revoke_warning(options.ticket_id); } exit_code = CRM_EX_UNSAFE; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Ticket modification not allowed without --force"); goto done; } } exit_code = pcmk_rc2exitc(rc); if (rc != pcmk_rc_ok && error == NULL) { g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Could not modify ticket: %s", pcmk_rc_str(rc)); } } else if (options.ticket_cmd == 'S') { /* Correct usage was handled in the "if (modified)" block above, so * this is just for reporting usage errors */ if (pcmk__str_empty(options.attr_name)) { // We only get here if ticket_cmd was left as default exit_code = CRM_EX_USAGE; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Must supply a command"); goto done; } if (options.ticket_id == NULL) { exit_code = CRM_EX_USAGE; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Must supply ticket ID with -t"); goto done; } if (pcmk__str_empty(options.attr_value)) { exit_code = CRM_EX_USAGE; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Must supply value with -v for -S %s", options.attr_name); goto done; } } else { exit_code = CRM_EX_USAGE; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Unknown command: %c", options.ticket_cmd); } done: if (attr_set) { g_hash_table_destroy(attr_set); } attr_set = NULL; if (attr_delete) { g_list_free_full(attr_delete, free); } attr_delete = NULL; pe_free_working_set(scheduler); scheduler = NULL; cib__clean_up_connection(&cib_conn); g_strfreev(processed_args); pcmk__free_arg_context(context); g_free(options.attr_default); g_free(options.attr_id); free(options.attr_name); free(options.attr_value); free(options.get_attr_name); g_free(options.set_name); g_free(options.ticket_id); g_free(options.xml_file); pcmk__output_and_clear_error(&error, out); if (out != NULL) { out->finish(out, exit_code, true, NULL); pcmk__output_free(out); } pcmk__unregister_formats(); crm_exit(exit_code); }