diff --git a/include/crm/common/scheduler.h b/include/crm/common/scheduler.h index d4dffe2769..ba58fdec06 100644 --- a/include/crm/common/scheduler.h +++ b/include/crm/common/scheduler.h @@ -1,125 +1,124 @@ /* * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #ifndef PCMK__CRM_COMMON_SCHEDULER__H #define PCMK__CRM_COMMON_SCHEDULER__H #include // time_t #include // xmlNode #include // guint, GList, GHashTable #include // crm_time_t #include #include #include #include #include #include #ifdef __cplusplus extern "C" { #endif /*! * \file * \brief Scheduler API * \ingroup core */ // NOTE: sbd (as of at least 1.5.2) uses this enum //! Possible responses to loss of quorum enum pe_quorum_policy { pcmk_no_quorum_freeze, //!< Do not recover resources from outside partition pcmk_no_quorum_stop, //!< Stop all resources in partition pcmk_no_quorum_ignore, //!< Act as if partition still holds quorum pcmk_no_quorum_fence, //!< Fence all nodes in partition pcmk_no_quorum_demote, //!< Demote promotable resources and stop all others #if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1) // NOTE: sbd (as of at least 1.5.2) uses this value //! \deprecated Use pcmk_no_quorum_freeze instead no_quorum_freeze = pcmk_no_quorum_freeze, // NOTE: sbd (as of at least 1.5.2) uses this value //! \deprecated Use pcmk_no_quorum_stop instead no_quorum_stop = pcmk_no_quorum_stop, // NOTE: sbd (as of at least 1.5.2) uses this value //! \deprecated Use pcmk_no_quorum_ignore instead no_quorum_ignore = pcmk_no_quorum_ignore, //! \deprecated Use pcmk_no_quorum_fence instead no_quorum_suicide = pcmk_no_quorum_fence, // NOTE: sbd (as of at least 1.5.2) uses this value //! \deprecated Use pcmk_no_quorum_demote instead no_quorum_demote = pcmk_no_quorum_demote, #endif }; //! \internal Do not use typedef struct pcmk__scheduler_private pcmk__scheduler_private_t; /* Implementation of pcmk_scheduler_t * * @COMPAT Drop this struct once all members are moved to * pcmk__scheduler_private_t, and repoint pcmk_scheduler_t to that */ //!@{ //! \deprecated Do not use (public access will be removed in a future release) struct pcmk__scheduler { // Be careful about when each piece of information is available and final // NOTE: sbd (as of at least 1.5.2) uses this //! \deprecated Set scheduler input with pcmk_set_scheduler_cib() instead xmlNode *input; // CIB XML // NOTE: sbd (as of at least 1.5.2) uses this //! \deprecated Call pcmk_get_dc() instead pcmk_node_t *dc_node; // Node object for DC // NOTE: sbd (as of at least 1.5.2) uses this //! \deprecated Call pcmk_has_quorum() to check quorum uint64_t flags; // Group of enum pcmk__scheduler_flags // NOTE: sbd (as of at least 1.5.2) uses this //! \deprecated Call pcmk_get_no_quorum_policy() to get no-quorum policy enum pe_quorum_policy no_quorum_policy; // Response to loss of quorum // NOTE: sbd (as of at least 1.5.2) uses this //! \deprecated Call pcmk_find_node() to find a node instead GList *nodes; // Nodes in cluster - time_t recheck_by; // Hint to controller when to reschedule int ninstances; // Total number of resource instances guint shutdown_lock; // How long to lock resources (seconds) int priority_fencing_delay; // Priority fencing delay pcmk__scheduler_private_t *priv; // For Pacemaker use only guint node_pending_timeout; // Pending join times out after this (ms) }; //!@} pcmk_node_t *pcmk_get_dc(const pcmk_scheduler_t *scheduler); enum pe_quorum_policy pcmk_get_no_quorum_policy(const pcmk_scheduler_t *scheduler); int pcmk_set_scheduler_cib(pcmk_scheduler_t *scheduler, xmlNode *cib); bool pcmk_has_quorum(const pcmk_scheduler_t *scheduler); pcmk_node_t *pcmk_find_node(const pcmk_scheduler_t *scheduler, const char *node_name); #ifdef __cplusplus } #endif #endif // PCMK__CRM_COMMON_SCHEDULER__H diff --git a/include/crm/common/scheduler_internal.h b/include/crm/common/scheduler_internal.h index 5a99b4419d..2828b5e0f4 100644 --- a/include/crm/common/scheduler_internal.h +++ b/include/crm/common/scheduler_internal.h @@ -1,284 +1,285 @@ /* * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #ifndef PCMK__CRM_COMMON_SCHEDULER_INTERNAL__H #define PCMK__CRM_COMMON_SCHEDULER_INTERNAL__H #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef __cplusplus extern "C" { #endif enum pcmk__check_parameters { /* Clear fail count if parameters changed for un-expired start or monitor * last_failure. */ pcmk__check_last_failure, /* Clear fail count if parameters changed for start, monitor, promote, or * migrate_from actions for active resources. */ pcmk__check_active, }; // Scheduling options and conditions enum pcmk__scheduler_flags { // No scheduler flags set (compare with equality rather than bit set) pcmk__sched_none = 0ULL, /* These flags are dynamically determined conditions */ // Whether partition has quorum (via \c PCMK_XA_HAVE_QUORUM attribute) //! \deprecated Call pcmk_has_quorum() to check quorum instead pcmk__sched_quorate = (1ULL << 0), // Whether cluster is symmetric (via symmetric-cluster property) pcmk__sched_symmetric_cluster = (1ULL << 1), // Whether scheduling encountered a non-configuration error pcmk__sched_processing_error = (1ULL << 2), // Whether cluster is in maintenance mode (via maintenance-mode property) pcmk__sched_in_maintenance = (1ULL << 3), // Whether fencing is enabled (via stonith-enabled property) pcmk__sched_fencing_enabled = (1ULL << 4), // Whether cluster has a fencing resource (via CIB resources) /*! \deprecated To indicate the cluster has a fencing resource, add either a * fencing resource configuration or the have-watchdog cluster option to the * input CIB */ pcmk__sched_have_fencing = (1ULL << 5), // Whether any resource provides or requires unfencing (via CIB resources) pcmk__sched_enable_unfencing = (1ULL << 6), // Whether concurrent fencing is allowed (via concurrent-fencing property) pcmk__sched_concurrent_fencing = (1ULL << 7), /* * Whether resources removed from the configuration should be stopped (via * stop-orphan-resources property) */ pcmk__sched_stop_removed_resources = (1ULL << 8), /* * Whether recurring actions removed from the configuration should be * cancelled (via stop-orphan-actions property) */ pcmk__sched_cancel_removed_actions = (1ULL << 9), // Whether to stop all resources (via stop-all-resources property) pcmk__sched_stop_all = (1ULL << 10), // Whether scheduler processing encountered a warning pcmk__sched_processing_warning = (1ULL << 11), /* * Whether start failure should be treated as if * \c PCMK_META_MIGRATION_THRESHOLD is 1 (via * \c PCMK_OPT_START_FAILURE_IS_FATAL property) */ pcmk__sched_start_failure_fatal = (1ULL << 12), // Unused pcmk__sched_remove_after_stop = (1ULL << 13), // Whether unseen nodes should be fenced (via startup-fencing property) pcmk__sched_startup_fencing = (1ULL << 14), /* * Whether resources should be left stopped when their node shuts down * cleanly (via shutdown-lock property) */ pcmk__sched_shutdown_lock = (1ULL << 15), /* * Whether resources' current state should be probed (when unknown) before * scheduling any other actions (via the enable-startup-probes property) */ pcmk__sched_probe_resources = (1ULL << 16), // Whether the CIB status section has been parsed yet pcmk__sched_have_status = (1ULL << 17), // Whether the cluster includes any Pacemaker Remote nodes (via CIB) pcmk__sched_have_remote_nodes = (1ULL << 18), /* The remaining flags are scheduling options that must be set explicitly */ /* * Whether to skip unpacking the CIB status section and stop the scheduling * sequence after applying node-specific location criteria (skipping * assignment, ordering, actions, etc.). */ pcmk__sched_location_only = (1ULL << 20), // Whether sensitive resource attributes have been masked pcmk__sched_sanitized = (1ULL << 21), // Skip counting of total, disabled, and blocked resource instances pcmk__sched_no_counts = (1ULL << 23), // Whether node scores should be output instead of logged pcmk__sched_output_scores = (1ULL << 25), // Whether to show node and resource utilization (in log or output) pcmk__sched_show_utilization = (1ULL << 26), /* * Whether to stop the scheduling sequence after unpacking the CIB, * calculating cluster status, and applying node health (skipping * applying node-specific location criteria, assignment, etc.) */ pcmk__sched_validate_only = (1ULL << 27), }; // Implementation of pcmk__scheduler_private_t struct pcmk__scheduler_private { // Be careful about when each piece of information is available and final char *local_node_name; // Name of node running scheduler (if known) crm_time_t *now; // Time to use when evaluating rules pcmk__output_t *out; // Output object for displaying messages GHashTable *options; // Cluster options const char *fence_action; // Default fencing action int fence_timeout_ms; // Value of stonith-timeout property in ms const char *placement_strategy; // Value of placement-strategy property xmlNode *rsc_defaults; // Configured resource defaults xmlNode *op_defaults; // Configured operation defaults GList *resources; // Resources in cluster GHashTable *templates; // Key = template ID, value = resource list GHashTable *tags; // Key = tag ID, value = element list GList *actions; // All scheduled actions GHashTable *singletons; // Scheduled non-resource actions int next_action_id; // Counter used as ID for actions xmlNode *failed; // History entries of failed actions GList *param_check; // History entries that need to be checked GList *stop_needed; // Containers that need stop actions GList *location_constraints; // Location constraints GList *colocation_constraints; // Colocation constraints GList *ordering_constraints; // Ordering constraints GHashTable *ticket_constraints; // Key = ticket ID, value = pcmk__ticket_t int next_ordering_id; // Counter used as ID for orderings int blocked_resources; // Number of blocked resources in cluster int disabled_resources; // Number of disabled resources in cluster + time_t recheck_by; // Hint to controller when to reschedule xmlNode *graph; // Transition graph int synapse_count; // Number of transition graph synapses }; // Group of enum pcmk__warnings flags for warnings we want to log once extern uint32_t pcmk__warnings; /*! * \internal * \brief Log a resource-tagged message at info severity * * \param[in] rsc Tag message with this resource's ID * \param[in] fmt... printf(3)-style format and arguments */ #define pcmk__rsc_info(rsc, fmt, args...) \ crm_log_tag(LOG_INFO, ((rsc) == NULL)? "" : (rsc)->id, (fmt), ##args) /*! * \internal * \brief Log a resource-tagged message at debug severity * * \param[in] rsc Tag message with this resource's ID * \param[in] fmt... printf(3)-style format and arguments */ #define pcmk__rsc_debug(rsc, fmt, args...) \ crm_log_tag(LOG_DEBUG, ((rsc) == NULL)? "" : (rsc)->id, (fmt), ##args) /*! * \internal * \brief Log a resource-tagged message at trace severity * * \param[in] rsc Tag message with this resource's ID * \param[in] fmt... printf(3)-style format and arguments */ #define pcmk__rsc_trace(rsc, fmt, args...) \ crm_log_tag(LOG_TRACE, ((rsc) == NULL)? "" : (rsc)->id, (fmt), ##args) /*! * \internal * \brief Log an error and remember that current scheduler input has errors * * \param[in,out] scheduler Scheduler data * \param[in] fmt... printf(3)-style format and arguments */ #define pcmk__sched_err(scheduler, fmt...) do { \ pcmk__set_scheduler_flags((scheduler), \ pcmk__sched_processing_error); \ crm_err(fmt); \ } while (0) /*! * \internal * \brief Log a warning and remember that current scheduler input has warnings * * \param[in,out] scheduler Scheduler data * \param[in] fmt... printf(3)-style format and arguments */ #define pcmk__sched_warn(scheduler, fmt...) do { \ pcmk__set_scheduler_flags((scheduler), \ pcmk__sched_processing_warning); \ crm_warn(fmt); \ } while (0) /*! * \internal * \brief Set scheduler flags * * \param[in,out] scheduler Scheduler data * \param[in] flags_to_set Group of enum pcmk__scheduler_flags to set */ #define pcmk__set_scheduler_flags(scheduler, flags_to_set) do { \ (scheduler)->flags = pcmk__set_flags_as(__func__, __LINE__, \ LOG_TRACE, "Scheduler", crm_system_name, \ (scheduler)->flags, (flags_to_set), #flags_to_set); \ } while (0) /*! * \internal * \brief Clear scheduler flags * * \param[in,out] scheduler Scheduler data * \param[in] flags_to_clear Group of enum pcmk__scheduler_flags to clear */ #define pcmk__clear_scheduler_flags(scheduler, flags_to_clear) do { \ (scheduler)->flags = pcmk__clear_flags_as(__func__, __LINE__, \ LOG_TRACE, "Scheduler", crm_system_name, \ (scheduler)->flags, (flags_to_clear), #flags_to_clear); \ } while (0) #ifdef __cplusplus } #endif #endif // PCMK__CRM_COMMON_SCHEDULER_INTERNAL__H diff --git a/lib/pacemaker/pcmk_graph_producer.c b/lib/pacemaker/pcmk_graph_producer.c index 8194e961ab..6dbbf09ad8 100644 --- a/lib/pacemaker/pcmk_graph_producer.c +++ b/lib/pacemaker/pcmk_graph_producer.c @@ -1,1111 +1,1111 @@ /* * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include "libpacemaker_private.h" // Convenience macros for logging action properties #define action_type_str(flags) \ (pcmk_is_set((flags), pcmk__action_pseudo)? "pseudo-action" : "action") #define action_optional_str(flags) \ (pcmk_is_set((flags), pcmk__action_optional)? "optional" : "required") #define action_runnable_str(flags) \ (pcmk_is_set((flags), pcmk__action_runnable)? "runnable" : "unrunnable") #define action_node_str(a) \ (((a)->node == NULL)? "no node" : (a)->node->priv->name) /*! * \internal * \brief Add an XML node tag for a specified ID * * \param[in] id Node UUID to add * \param[in,out] xml Parent XML tag to add to */ static xmlNode* add_node_to_xml_by_id(const char *id, xmlNode *xml) { xmlNode *node_xml; node_xml = pcmk__xe_create(xml, PCMK_XE_NODE); crm_xml_add(node_xml, PCMK_XA_ID, id); return node_xml; } /*! * \internal * \brief Add an XML node tag for a specified node * * \param[in] node Node to add * \param[in,out] xml XML to add node to */ static void add_node_to_xml(const pcmk_node_t *node, void *xml) { add_node_to_xml_by_id(node->priv->id, (xmlNode *) xml); } /*! * \internal * \brief Count (optionally add to XML) nodes needing maintenance state update * * \param[in,out] xml Parent XML tag to add to, if any * \param[in] scheduler Scheduler data * * \return Count of nodes added * \note Only Pacemaker Remote nodes are considered currently */ static int add_maintenance_nodes(xmlNode *xml, const pcmk_scheduler_t *scheduler) { xmlNode *maintenance = NULL; int count = 0; if (xml != NULL) { maintenance = pcmk__xe_create(xml, PCMK__XE_MAINTENANCE); } for (const GList *iter = scheduler->nodes; iter != NULL; iter = iter->next) { const pcmk_node_t *node = iter->data; if (!pcmk__is_pacemaker_remote_node(node)) { continue; } if ((node->details->maintenance && !pcmk_is_set(node->priv->flags, pcmk__node_remote_maint)) || (!node->details->maintenance && pcmk_is_set(node->priv->flags, pcmk__node_remote_maint))) { if (maintenance != NULL) { crm_xml_add(add_node_to_xml_by_id(node->priv->id, maintenance), PCMK__XA_NODE_IN_MAINTENANCE, (node->details->maintenance? "1" : "0")); } count++; } } crm_trace("%s %d nodes in need of maintenance mode update in state", ((maintenance == NULL)? "Counted" : "Added"), count); return count; } /*! * \internal * \brief Add pseudo action with nodes needing maintenance state update * * \param[in,out] scheduler Scheduler data */ static void add_maintenance_update(pcmk_scheduler_t *scheduler) { pcmk_action_t *action = NULL; if (add_maintenance_nodes(NULL, scheduler) != 0) { action = get_pseudo_op(PCMK_ACTION_MAINTENANCE_NODES, scheduler); pcmk__set_action_flags(action, pcmk__action_always_in_graph); } } /*! * \internal * \brief Add XML with nodes that an action is expected to bring down * * If a specified action is expected to bring any nodes down, add an XML block * with their UUIDs. When a node is lost, this allows the controller to * determine whether it was expected. * * \param[in,out] xml Parent XML tag to add to * \param[in] action Action to check for downed nodes */ static void add_downed_nodes(xmlNode *xml, const pcmk_action_t *action) { CRM_CHECK((xml != NULL) && (action != NULL) && (action->node != NULL), return); if (pcmk__str_eq(action->task, PCMK_ACTION_DO_SHUTDOWN, pcmk__str_none)) { /* Shutdown makes the action's node down */ xmlNode *downed = pcmk__xe_create(xml, PCMK__XE_DOWNED); add_node_to_xml_by_id(action->node->priv->id, downed); } else if (pcmk__str_eq(action->task, PCMK_ACTION_STONITH, pcmk__str_none)) { /* Fencing makes the action's node and any hosted guest nodes down */ const char *fence = g_hash_table_lookup(action->meta, PCMK__META_STONITH_ACTION); if (pcmk__is_fencing_action(fence)) { xmlNode *downed = pcmk__xe_create(xml, PCMK__XE_DOWNED); add_node_to_xml_by_id(action->node->priv->id, downed); pe_foreach_guest_node(action->node->priv->scheduler, action->node, add_node_to_xml, downed); } } else if ((action->rsc != NULL) && pcmk_is_set(action->rsc->flags, pcmk__rsc_is_remote_connection) && pcmk__str_eq(action->task, PCMK_ACTION_STOP, pcmk__str_none)) { /* Stopping a remote connection resource makes connected node down, * unless it's part of a migration */ GList *iter; pcmk_action_t *input; bool migrating = false; for (iter = action->actions_before; iter != NULL; iter = iter->next) { input = ((pcmk__related_action_t *) iter->data)->action; if ((input->rsc != NULL) && pcmk__str_eq(action->rsc->id, input->rsc->id, pcmk__str_none) && pcmk__str_eq(input->task, PCMK_ACTION_MIGRATE_FROM, pcmk__str_none)) { migrating = true; break; } } if (!migrating) { xmlNode *downed = pcmk__xe_create(xml, PCMK__XE_DOWNED); add_node_to_xml_by_id(action->rsc->id, downed); } } } /*! * \internal * \brief Create a transition graph operation key for a clone action * * \param[in] action Clone action * \param[in] interval_ms Action interval in milliseconds * * \return Newly allocated string with transition graph operation key */ static char * clone_op_key(const pcmk_action_t *action, guint interval_ms) { if (pcmk__str_eq(action->task, PCMK_ACTION_NOTIFY, pcmk__str_none)) { const char *n_type = g_hash_table_lookup(action->meta, "notify_type"); const char *n_task = g_hash_table_lookup(action->meta, "notify_operation"); return pcmk__notify_key(action->rsc->priv->history_id, n_type, n_task); } return pcmk__op_key(action->rsc->priv->history_id, pcmk__s(action->cancel_task, action->task), interval_ms); } /*! * \internal * \brief Add node details to transition graph action XML * * \param[in] action Scheduled action * \param[in,out] xml Transition graph action XML for \p action */ static void add_node_details(const pcmk_action_t *action, xmlNode *xml) { pcmk_node_t *router_node = pcmk__connection_host_for_action(action); crm_xml_add(xml, PCMK__META_ON_NODE, action->node->priv->name); crm_xml_add(xml, PCMK__META_ON_NODE_UUID, action->node->priv->id); if (router_node != NULL) { crm_xml_add(xml, PCMK__XA_ROUTER_NODE, router_node->priv->name); } } /*! * \internal * \brief Add resource details to transition graph action XML * * \param[in] action Scheduled action * \param[in,out] action_xml Transition graph action XML for \p action */ static void add_resource_details(const pcmk_action_t *action, xmlNode *action_xml) { xmlNode *rsc_xml = NULL; const char *attr_list[] = { PCMK_XA_CLASS, PCMK_XA_PROVIDER, PCMK_XA_TYPE, }; /* If a resource is locked to a node via PCMK_OPT_SHUTDOWN_LOCK, mark its * actions so the controller can preserve the lock when the action * completes. */ if (pcmk__action_locks_rsc_to_node(action)) { crm_xml_add_ll(action_xml, PCMK_OPT_SHUTDOWN_LOCK, (long long) action->rsc->priv->lock_time); } // List affected resource rsc_xml = pcmk__xe_create(action_xml, (const char *) action->rsc->priv->xml->name); if (pcmk_is_set(action->rsc->flags, pcmk__rsc_removed) && (action->rsc->priv->history_id != NULL)) { /* Use the numbered instance name here, because if there is more * than one instance on a node, we need to make sure the command * goes to the right one. * * This is important even for anonymous clones, because the clone's * unique meta-attribute might have just been toggled from on to * off. */ crm_debug("Using orphan clone name %s instead of history ID %s", action->rsc->id, action->rsc->priv->history_id); crm_xml_add(rsc_xml, PCMK_XA_ID, action->rsc->priv->history_id); crm_xml_add(rsc_xml, PCMK__XA_LONG_ID, action->rsc->id); } else if (!pcmk_is_set(action->rsc->flags, pcmk__rsc_unique)) { const char *xml_id = pcmk__xe_id(action->rsc->priv->xml); crm_debug("Using anonymous clone name %s for %s (aka %s)", xml_id, action->rsc->id, action->rsc->priv->history_id); /* ID is what we'd like client to use * LONG_ID is what they might know it as instead * * LONG_ID is only strictly needed /here/ during the * transition period until all nodes in the cluster * are running the new software /and/ have rebooted * once (meaning that they've only ever spoken to a DC * supporting this feature). * * If anyone toggles the unique flag to 'on', the * 'instance free' name will correspond to an orphan * and fall into the clause above instead */ crm_xml_add(rsc_xml, PCMK_XA_ID, xml_id); if ((action->rsc->priv->history_id != NULL) && !pcmk__str_eq(xml_id, action->rsc->priv->history_id, pcmk__str_none)) { crm_xml_add(rsc_xml, PCMK__XA_LONG_ID, action->rsc->priv->history_id); } else { crm_xml_add(rsc_xml, PCMK__XA_LONG_ID, action->rsc->id); } } else { CRM_ASSERT(action->rsc->priv->history_id == NULL); crm_xml_add(rsc_xml, PCMK_XA_ID, action->rsc->id); } for (int lpc = 0; lpc < PCMK__NELEM(attr_list); lpc++) { crm_xml_add(rsc_xml, attr_list[lpc], g_hash_table_lookup(action->rsc->priv->meta, attr_list[lpc])); } } /*! * \internal * \brief Add action attributes to transition graph action XML * * \param[in,out] action Scheduled action * \param[in,out] action_xml Transition graph action XML for \p action */ static void add_action_attributes(pcmk_action_t *action, xmlNode *action_xml) { xmlNode *args_xml = NULL; pcmk_resource_t *rsc = action->rsc; /* We create free-standing XML to start, so we can sort the attributes * before adding it to action_xml, which keeps the scheduler regression * test graphs comparable. */ args_xml = pcmk__xe_create(action_xml, PCMK__XE_ATTRIBUTES); crm_xml_add(args_xml, PCMK_XA_CRM_FEATURE_SET, CRM_FEATURE_SET); g_hash_table_foreach(action->extra, hash2field, args_xml); if ((rsc != NULL) && (action->node != NULL)) { // Get the resource instance attributes, evaluated properly for node GHashTable *params = pe_rsc_params(rsc, action->node, rsc->priv->scheduler); pcmk__substitute_remote_addr(rsc, params); g_hash_table_foreach(params, hash2smartfield, args_xml); } else if ((rsc != NULL) && (rsc->priv->variant <= pcmk__rsc_variant_primitive)) { GHashTable *params = pe_rsc_params(rsc, NULL, rsc->priv->scheduler); g_hash_table_foreach(params, hash2smartfield, args_xml); } g_hash_table_foreach(action->meta, hash2metafield, args_xml); if (rsc != NULL) { pcmk_resource_t *parent = rsc; while (parent != NULL) { parent->priv->cmds->add_graph_meta(parent, args_xml); parent = parent->priv->parent; } pcmk__add_guest_meta_to_xml(args_xml, action); } else if (pcmk__str_eq(action->task, PCMK_ACTION_STONITH, pcmk__str_none) && (action->node != NULL)) { /* Pass the node's attributes as meta-attributes. * * @TODO: Determine whether it is still necessary to do this. It was * added in 33d99707, probably for the libfence-based implementation in * c9a90bd, which is no longer used. */ g_hash_table_foreach(action->node->priv->attrs, hash2metafield, args_xml); } pcmk__xe_sort_attrs(args_xml); } /*! * \internal * \brief Create the transition graph XML for a scheduled action * * \param[in,out] parent Parent XML element to add action to * \param[in,out] action Scheduled action * \param[in] skip_details If false, add action details as sub-elements * \param[in] scheduler Scheduler data */ static void create_graph_action(xmlNode *parent, pcmk_action_t *action, bool skip_details, const pcmk_scheduler_t *scheduler) { bool needs_node_info = true; bool needs_maintenance_info = false; xmlNode *action_xml = NULL; if ((action == NULL) || (scheduler == NULL)) { return; } // Create the top-level element based on task if (pcmk__str_eq(action->task, PCMK_ACTION_STONITH, pcmk__str_none)) { /* All fences need node info; guest node fences are pseudo-events */ if (pcmk_is_set(action->flags, pcmk__action_pseudo)) { action_xml = pcmk__xe_create(parent, PCMK__XE_PSEUDO_EVENT); } else { action_xml = pcmk__xe_create(parent, PCMK__XE_CRM_EVENT); } } else if (pcmk__str_any_of(action->task, PCMK_ACTION_DO_SHUTDOWN, PCMK_ACTION_CLEAR_FAILCOUNT, NULL)) { action_xml = pcmk__xe_create(parent, PCMK__XE_CRM_EVENT); } else if (pcmk__str_eq(action->task, PCMK_ACTION_LRM_DELETE, pcmk__str_none)) { // CIB-only clean-up for shutdown locks action_xml = pcmk__xe_create(parent, PCMK__XE_CRM_EVENT); crm_xml_add(action_xml, PCMK__XA_MODE, PCMK__VALUE_CIB); } else if (pcmk_is_set(action->flags, pcmk__action_pseudo)) { if (pcmk__str_eq(action->task, PCMK_ACTION_MAINTENANCE_NODES, pcmk__str_none)) { needs_maintenance_info = true; } action_xml = pcmk__xe_create(parent, PCMK__XE_PSEUDO_EVENT); needs_node_info = false; } else { action_xml = pcmk__xe_create(parent, PCMK__XE_RSC_OP); } crm_xml_add_int(action_xml, PCMK_XA_ID, action->id); crm_xml_add(action_xml, PCMK_XA_OPERATION, action->task); if ((action->rsc != NULL) && (action->rsc->priv->history_id != NULL)) { char *clone_key = NULL; guint interval_ms; if (pcmk__guint_from_hash(action->meta, PCMK_META_INTERVAL, 0, &interval_ms) != pcmk_rc_ok) { interval_ms = 0; } clone_key = clone_op_key(action, interval_ms); crm_xml_add(action_xml, PCMK__XA_OPERATION_KEY, clone_key); crm_xml_add(action_xml, "internal_" PCMK__XA_OPERATION_KEY, action->uuid); free(clone_key); } else { crm_xml_add(action_xml, PCMK__XA_OPERATION_KEY, action->uuid); } if (needs_node_info && (action->node != NULL)) { add_node_details(action, action_xml); pcmk__insert_dup(action->meta, PCMK__META_ON_NODE, action->node->priv->name); pcmk__insert_dup(action->meta, PCMK__META_ON_NODE_UUID, action->node->priv->id); } if (skip_details) { return; } if ((action->rsc != NULL) && !pcmk_is_set(action->flags, pcmk__action_pseudo)) { // This is a real resource action, so add resource details add_resource_details(action, action_xml); } /* List any attributes in effect */ add_action_attributes(action, action_xml); /* List any nodes this action is expected to make down */ if (needs_node_info && (action->node != NULL)) { add_downed_nodes(action_xml, action); } if (needs_maintenance_info) { add_maintenance_nodes(action_xml, scheduler); } } /*! * \internal * \brief Check whether an action should be added to the transition graph * * \param[in,out] action Action to check * * \return true if action should be added to graph, otherwise false */ static bool should_add_action_to_graph(pcmk_action_t *action) { if (!pcmk_is_set(action->flags, pcmk__action_runnable)) { crm_trace("Ignoring action %s (%d): unrunnable", action->uuid, action->id); return false; } if (pcmk_is_set(action->flags, pcmk__action_optional) && !pcmk_is_set(action->flags, pcmk__action_always_in_graph)) { crm_trace("Ignoring action %s (%d): optional", action->uuid, action->id); return false; } /* Actions for unmanaged resources should be excluded from the graph, * with the exception of monitors and cancellation of recurring monitors. */ if ((action->rsc != NULL) && !pcmk_is_set(action->rsc->flags, pcmk__rsc_managed) && !pcmk__str_eq(action->task, PCMK_ACTION_MONITOR, pcmk__str_none)) { const char *interval_ms_s; /* A cancellation of a recurring monitor will get here because the task * is cancel rather than monitor, but the interval can still be used to * recognize it. The interval has been normalized to milliseconds by * this point, so a string comparison is sufficient. */ interval_ms_s = g_hash_table_lookup(action->meta, PCMK_META_INTERVAL); if (pcmk__str_eq(interval_ms_s, "0", pcmk__str_null_matches)) { crm_trace("Ignoring action %s (%d): for unmanaged resource (%s)", action->uuid, action->id, action->rsc->id); return false; } } /* Always add pseudo-actions, fence actions, and shutdown actions (already * determined to be required and runnable by this point) */ if (pcmk_is_set(action->flags, pcmk__action_pseudo) || pcmk__strcase_any_of(action->task, PCMK_ACTION_STONITH, PCMK_ACTION_DO_SHUTDOWN, NULL)) { return true; } if (action->node == NULL) { pcmk__sched_err(action->scheduler, "Skipping action %s (%d) " "because it was not assigned to a node (bug?)", action->uuid, action->id); pcmk__log_action("Unassigned", action, false); return false; } if (pcmk_is_set(action->flags, pcmk__action_on_dc)) { crm_trace("Action %s (%d) should be dumped: " "can run on DC instead of %s", action->uuid, action->id, pcmk__node_name(action->node)); } else if (pcmk__is_guest_or_bundle_node(action->node) && !pcmk_is_set(action->node->priv->flags, pcmk__node_remote_reset)) { crm_trace("Action %s (%d) should be dumped: " "assuming will be runnable on guest %s", action->uuid, action->id, pcmk__node_name(action->node)); } else if (!action->node->details->online) { pcmk__sched_err(action->scheduler, "Skipping action %s (%d) " "because it was scheduled for offline node (bug?)", action->uuid, action->id); pcmk__log_action("Offline node", action, false); return false; } else if (action->node->details->unclean) { pcmk__sched_err(action->scheduler, "Skipping action %s (%d) " "because it was scheduled for unclean node (bug?)", action->uuid, action->id); pcmk__log_action("Unclean node", action, false); return false; } return true; } /*! * \internal * \brief Check whether an ordering's flags can change an action * * \param[in] ordering Ordering to check * * \return true if ordering has flags that can change an action, false otherwise */ static bool ordering_can_change_actions(const pcmk__related_action_t *ordering) { return pcmk_any_flags_set(ordering->flags, ~(pcmk__ar_then_implies_first_graphed |pcmk__ar_first_implies_then_graphed |pcmk__ar_ordered)); } /*! * \internal * \brief Check whether an action input should be in the transition graph * * \param[in] action Action to check * \param[in,out] input Action input to check * * \return true if input should be in graph, false otherwise * \note This function may not only check an input, but disable it under certian * circumstances (load or anti-colocation orderings that are not needed). */ static bool should_add_input_to_graph(const pcmk_action_t *action, pcmk__related_action_t *input) { if (input->graphed) { return true; } if (input->flags == pcmk__ar_none) { crm_trace("Ignoring %s (%d) input %s (%d): " "ordering disabled", action->uuid, action->id, input->action->uuid, input->action->id); return false; } else if (!pcmk_is_set(input->action->flags, pcmk__action_runnable) && !ordering_can_change_actions(input)) { crm_trace("Ignoring %s (%d) input %s (%d): " "optional and input unrunnable", action->uuid, action->id, input->action->uuid, input->action->id); return false; } else if (!pcmk_is_set(input->action->flags, pcmk__action_runnable) && pcmk_is_set(input->flags, pcmk__ar_min_runnable)) { crm_trace("Ignoring %s (%d) input %s (%d): " "minimum number of instances required but input unrunnable", action->uuid, action->id, input->action->uuid, input->action->id); return false; } else if (pcmk_is_set(input->flags, pcmk__ar_unmigratable_then_blocks) && !pcmk_is_set(input->action->flags, pcmk__action_runnable)) { crm_trace("Ignoring %s (%d) input %s (%d): " "input blocked if 'then' unmigratable", action->uuid, action->id, input->action->uuid, input->action->id); return false; } else if (pcmk_is_set(input->flags, pcmk__ar_if_first_unmigratable) && pcmk_is_set(input->action->flags, pcmk__action_migratable)) { crm_trace("Ignoring %s (%d) input %s (%d): ordering applies " "only if input is unmigratable, but it is migratable", action->uuid, action->id, input->action->uuid, input->action->id); return false; } else if ((input->flags == pcmk__ar_ordered) && pcmk_is_set(input->action->flags, pcmk__action_migratable) && pcmk__ends_with(input->action->uuid, "_stop_0")) { crm_trace("Ignoring %s (%d) input %s (%d): " "optional but stop in migration", action->uuid, action->id, input->action->uuid, input->action->id); return false; } else if (input->flags == pcmk__ar_if_on_same_node_or_target) { pcmk_node_t *input_node = input->action->node; if ((action->rsc != NULL) && pcmk__str_eq(action->task, PCMK_ACTION_MIGRATE_TO, pcmk__str_none)) { pcmk_node_t *assigned = action->rsc->priv->assigned_node; /* For load_stopped -> migrate_to orderings, we care about where * the resource has been assigned, not where migrate_to will be * executed. */ if (!pcmk__same_node(input_node, assigned)) { crm_trace("Ignoring %s (%d) input %s (%d): " "migration target %s is not same as input node %s", action->uuid, action->id, input->action->uuid, input->action->id, (assigned? assigned->priv->name : ""), (input_node? input_node->priv->name : "")); input->flags = pcmk__ar_none; return false; } } else if (!pcmk__same_node(input_node, action->node)) { crm_trace("Ignoring %s (%d) input %s (%d): " "not on same node (%s vs %s)", action->uuid, action->id, input->action->uuid, input->action->id, (action->node? action->node->priv->name : ""), (input_node? input_node->priv->name : "")); input->flags = pcmk__ar_none; return false; } else if (pcmk_is_set(input->action->flags, pcmk__action_optional)) { crm_trace("Ignoring %s (%d) input %s (%d): " "ordering optional", action->uuid, action->id, input->action->uuid, input->action->id); input->flags = pcmk__ar_none; return false; } } else if (input->flags == pcmk__ar_if_required_on_same_node) { if (input->action->node && action->node && !pcmk__same_node(input->action->node, action->node)) { crm_trace("Ignoring %s (%d) input %s (%d): " "not on same node (%s vs %s)", action->uuid, action->id, input->action->uuid, input->action->id, pcmk__node_name(action->node), pcmk__node_name(input->action->node)); input->flags = pcmk__ar_none; return false; } else if (pcmk_is_set(input->action->flags, pcmk__action_optional)) { crm_trace("Ignoring %s (%d) input %s (%d): optional", action->uuid, action->id, input->action->uuid, input->action->id); input->flags = pcmk__ar_none; return false; } } else if (input->action->rsc && input->action->rsc != action->rsc && pcmk_is_set(input->action->rsc->flags, pcmk__rsc_failed) && !pcmk_is_set(input->action->rsc->flags, pcmk__rsc_managed) && pcmk__ends_with(input->action->uuid, "_stop_0") && pcmk__is_clone(action->rsc)) { crm_warn("Ignoring requirement that %s complete before %s:" " unmanaged failed resources cannot prevent clone shutdown", input->action->uuid, action->uuid); return false; } else if (pcmk_is_set(input->action->flags, pcmk__action_optional) && !pcmk_any_flags_set(input->action->flags, pcmk__action_always_in_graph |pcmk__action_added_to_graph) && !should_add_action_to_graph(input->action)) { crm_trace("Ignoring %s (%d) input %s (%d): " "input optional", action->uuid, action->id, input->action->uuid, input->action->id); return false; } crm_trace("%s (%d) input %s %s (%d) on %s should be dumped: %s %s %#.6x", action->uuid, action->id, action_type_str(input->action->flags), input->action->uuid, input->action->id, action_node_str(input->action), action_runnable_str(input->action->flags), action_optional_str(input->action->flags), input->flags); return true; } /*! * \internal * \brief Check whether an ordering creates an ordering loop * * \param[in] init_action "First" action in ordering * \param[in] action Callers should always set this the same as * \p init_action (this function may use a different * value for recursive calls) * \param[in,out] input Action wrapper for "then" action in ordering * * \return true if the ordering creates a loop, otherwise false */ bool pcmk__graph_has_loop(const pcmk_action_t *init_action, const pcmk_action_t *action, pcmk__related_action_t *input) { bool has_loop = false; if (pcmk_is_set(input->action->flags, pcmk__action_detect_loop)) { crm_trace("Breaking tracking loop: %s@%s -> %s@%s (%#.6x)", input->action->uuid, input->action->node? input->action->node->priv->name : "", action->uuid, action->node? action->node->priv->name : "", input->flags); return false; } // Don't need to check inputs that won't be used if (!should_add_input_to_graph(action, input)) { return false; } if (input->action == init_action) { crm_debug("Input loop found in %s@%s ->...-> %s@%s", action->uuid, action->node? action->node->priv->name : "", init_action->uuid, init_action->node? init_action->node->priv->name : ""); return true; } pcmk__set_action_flags(input->action, pcmk__action_detect_loop); crm_trace("Checking inputs of action %s@%s input %s@%s (%#.6x)" "for graph loop with %s@%s ", action->uuid, action->node? action->node->priv->name : "", input->action->uuid, input->action->node? input->action->node->priv->name : "", input->flags, init_action->uuid, init_action->node? init_action->node->priv->name : ""); // Recursively check input itself for loops for (GList *iter = input->action->actions_before; iter != NULL; iter = iter->next) { if (pcmk__graph_has_loop(init_action, input->action, (pcmk__related_action_t *) iter->data)) { // Recursive call already logged a debug message has_loop = true; break; } } pcmk__clear_action_flags(input->action, pcmk__action_detect_loop); if (!has_loop) { crm_trace("No input loop found in %s@%s -> %s@%s (%#.6x)", input->action->uuid, input->action->node? input->action->node->priv->name : "", action->uuid, action->node? action->node->priv->name : "", input->flags); } return has_loop; } /*! * \internal * \brief Create a synapse XML element for a transition graph * * \param[in] action Action that synapse is for * \param[in,out] scheduler Scheduler data containing graph * * \return Newly added XML element for new graph synapse */ static xmlNode * create_graph_synapse(const pcmk_action_t *action, pcmk_scheduler_t *scheduler) { int synapse_priority = 0; xmlNode *syn = pcmk__xe_create(scheduler->priv->graph, "synapse"); crm_xml_add_int(syn, PCMK_XA_ID, scheduler->priv->synapse_count++); if (action->rsc != NULL) { synapse_priority = action->rsc->priv->priority; } if (action->priority > synapse_priority) { synapse_priority = action->priority; } if (synapse_priority > 0) { crm_xml_add_int(syn, PCMK__XA_PRIORITY, synapse_priority); } return syn; } /*! * \internal * \brief Add an action to the transition graph XML if appropriate * * \param[in,out] data Action to possibly add * \param[in,out] user_data Scheduler data * * \note This will de-duplicate the action inputs, meaning that the * pcmk__related_action_t:type flags can no longer be relied on to retain * their original settings. That means this MUST be called after * pcmk__apply_orderings() is complete, and nothing after this should rely * on those type flags. (For example, some code looks for type equal to * some flag rather than whether the flag is set, and some code looks for * particular combinations of flags -- such code must be done before * pcmk__create_graph().) */ static void add_action_to_graph(gpointer data, gpointer user_data) { pcmk_action_t *action = (pcmk_action_t *) data; pcmk_scheduler_t *scheduler = (pcmk_scheduler_t *) user_data; xmlNode *syn = NULL; xmlNode *set = NULL; xmlNode *in = NULL; /* If we haven't already, de-duplicate inputs (even if we won't be adding * the action to the graph, so that crm_simulate's dot graphs don't have * duplicates). */ if (!pcmk_is_set(action->flags, pcmk__action_inputs_deduplicated)) { pcmk__deduplicate_action_inputs(action); pcmk__set_action_flags(action, pcmk__action_inputs_deduplicated); } if (pcmk_is_set(action->flags, pcmk__action_added_to_graph) || !should_add_action_to_graph(action)) { return; // Already added, or shouldn't be } pcmk__set_action_flags(action, pcmk__action_added_to_graph); crm_trace("Adding action %d (%s%s%s) to graph", action->id, action->uuid, ((action->node == NULL)? "" : " on "), ((action->node == NULL)? "" : action->node->priv->name)); syn = create_graph_synapse(action, scheduler); set = pcmk__xe_create(syn, "action_set"); in = pcmk__xe_create(syn, "inputs"); create_graph_action(set, action, false, scheduler); for (GList *lpc = action->actions_before; lpc != NULL; lpc = lpc->next) { pcmk__related_action_t *input = lpc->data; if (should_add_input_to_graph(action, input)) { xmlNode *input_xml = pcmk__xe_create(in, "trigger"); input->graphed = true; create_graph_action(input_xml, input->action, true, scheduler); } } } static int transition_id = -1; /*! * \internal * \brief Log a message after calculating a transition * * \param[in] scheduler Scheduler data * \param[in] filename Where transition input is stored */ void pcmk__log_transition_summary(const pcmk_scheduler_t *scheduler, const char *filename) { if (pcmk_is_set(scheduler->flags, pcmk__sched_processing_error) || crm_config_error) { crm_err("Calculated transition %d (with errors)%s%s", transition_id, (filename == NULL)? "" : ", saving inputs in ", (filename == NULL)? "" : filename); } else if (pcmk_is_set(scheduler->flags, pcmk__sched_processing_warning) || crm_config_warning) { crm_warn("Calculated transition %d (with warnings)%s%s", transition_id, (filename == NULL)? "" : ", saving inputs in ", (filename == NULL)? "" : filename); } else { crm_notice("Calculated transition %d%s%s", transition_id, (filename == NULL)? "" : ", saving inputs in ", (filename == NULL)? "" : filename); } if (crm_config_error) { crm_notice("Configuration errors found during scheduler processing," " please run \"crm_verify -L\" to identify issues"); } } /*! * \internal * \brief Add a resource's actions to the transition graph * * \param[in,out] rsc Resource whose actions should be added */ void pcmk__add_rsc_actions_to_graph(pcmk_resource_t *rsc) { GList *iter = NULL; CRM_ASSERT(rsc != NULL); pcmk__rsc_trace(rsc, "Adding actions for %s to graph", rsc->id); // First add the resource's own actions g_list_foreach(rsc->priv->actions, add_action_to_graph, rsc->priv->scheduler); // Then recursively add its children's actions (appropriate to variant) for (iter = rsc->priv->children; iter != NULL; iter = iter->next) { pcmk_resource_t *child_rsc = (pcmk_resource_t *) iter->data; child_rsc->priv->cmds->add_actions_to_graph(child_rsc); } } /*! * \internal * \brief Create a transition graph with all cluster actions needed * * \param[in,out] scheduler Scheduler data */ void pcmk__create_graph(pcmk_scheduler_t *scheduler) { GList *iter = NULL; const char *value = NULL; long long limit = 0LL; GHashTable *config_hash = scheduler->priv->options; transition_id++; crm_trace("Creating transition graph %d", transition_id); scheduler->priv->graph = pcmk__xe_create(NULL, PCMK__XE_TRANSITION_GRAPH); value = pcmk__cluster_option(config_hash, PCMK_OPT_CLUSTER_DELAY); crm_xml_add(scheduler->priv->graph, PCMK_OPT_CLUSTER_DELAY, value); value = pcmk__cluster_option(config_hash, PCMK_OPT_STONITH_TIMEOUT); crm_xml_add(scheduler->priv->graph, PCMK_OPT_STONITH_TIMEOUT, value); crm_xml_add(scheduler->priv->graph, "failed-stop-offset", "INFINITY"); if (pcmk_is_set(scheduler->flags, pcmk__sched_start_failure_fatal)) { crm_xml_add(scheduler->priv->graph, "failed-start-offset", "INFINITY"); } else { crm_xml_add(scheduler->priv->graph, "failed-start-offset", "1"); } value = pcmk__cluster_option(config_hash, PCMK_OPT_BATCH_LIMIT); crm_xml_add(scheduler->priv->graph, PCMK_OPT_BATCH_LIMIT, value); crm_xml_add_int(scheduler->priv->graph, "transition_id", transition_id); value = pcmk__cluster_option(config_hash, PCMK_OPT_MIGRATION_LIMIT); if ((pcmk__scan_ll(value, &limit, 0LL) == pcmk_rc_ok) && (limit > 0)) { crm_xml_add(scheduler->priv->graph, PCMK_OPT_MIGRATION_LIMIT, value); } - if (scheduler->recheck_by > 0) { + if (scheduler->priv->recheck_by > 0) { char *recheck_epoch = NULL; - recheck_epoch = crm_strdup_printf("%llu", - (long long) scheduler->recheck_by); + recheck_epoch = crm_strdup_printf("%llu", (unsigned long long) + scheduler->priv->recheck_by); crm_xml_add(scheduler->priv->graph, "recheck-by", recheck_epoch); free(recheck_epoch); } /* The following code will de-duplicate action inputs, so nothing past this * should rely on the action input type flags retaining their original * values. */ // Add resource actions to graph for (iter = scheduler->priv->resources; iter != NULL; iter = iter->next) { pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data; pcmk__rsc_trace(rsc, "Processing actions for %s", rsc->id); rsc->priv->cmds->add_actions_to_graph(rsc); } // Add pseudo-action for list of nodes with maintenance state update add_maintenance_update(scheduler); // Add non-resource (node) actions for (iter = scheduler->priv->actions; iter != NULL; iter = iter->next) { pcmk_action_t *action = (pcmk_action_t *) iter->data; if ((action->rsc != NULL) && (action->node != NULL) && action->node->details->shutdown && !pcmk_is_set(action->rsc->flags, pcmk__rsc_maintenance) && !pcmk_any_flags_set(action->flags, pcmk__action_optional|pcmk__action_runnable) && pcmk__str_eq(action->task, PCMK_ACTION_STOP, pcmk__str_none)) { /* Eventually we should just ignore the 'fence' case, but for now * it's the best way to detect (in CTS) when CIB resource updates * are being lost. */ if (pcmk_is_set(scheduler->flags, pcmk__sched_quorate) || (scheduler->no_quorum_policy == pcmk_no_quorum_ignore)) { const bool managed = pcmk_is_set(action->rsc->flags, pcmk__rsc_managed); const bool failed = pcmk_is_set(action->rsc->flags, pcmk__rsc_failed); crm_crit("Cannot %s %s because of %s:%s%s (%s)", action->node->details->unclean? "fence" : "shut down", pcmk__node_name(action->node), action->rsc->id, (managed? " blocked" : " unmanaged"), (failed? " failed" : ""), action->uuid); } } add_action_to_graph((gpointer) action, (gpointer) scheduler); } crm_log_xml_trace(scheduler->priv->graph, "graph"); } diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c index f808c6d204..a9a6f89d2c 100644 --- a/lib/pengine/utils.c +++ b/lib/pengine/utils.c @@ -1,913 +1,913 @@ /* * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include "pe_status_private.h" extern bool pcmk__is_daemon; gboolean ghash_free_str_str(gpointer key, gpointer value, gpointer user_data); /*! * \internal * \brief Check whether we can fence a particular node * * \param[in] scheduler Scheduler data * \param[in] node Name of node to check * * \return true if node can be fenced, false otherwise */ bool pe_can_fence(const pcmk_scheduler_t *scheduler, const pcmk_node_t *node) { if (pcmk__is_guest_or_bundle_node(node)) { /* A guest or bundle node is fenced by stopping its launcher, which is * possible if the launcher's host is either online or fenceable. */ pcmk_resource_t *rsc = node->priv->remote->priv->launcher; for (GList *n = rsc->priv->active_nodes; n != NULL; n = n->next) { pcmk_node_t *launcher_node = n->data; if (!launcher_node->details->online && !pe_can_fence(scheduler, launcher_node)) { return false; } } return true; } else if (!pcmk_is_set(scheduler->flags, pcmk__sched_fencing_enabled)) { return false; /* Turned off */ } else if (!pcmk_is_set(scheduler->flags, pcmk__sched_have_fencing)) { return false; /* No devices */ } else if (pcmk_is_set(scheduler->flags, pcmk__sched_quorate)) { return true; } else if (scheduler->no_quorum_policy == pcmk_no_quorum_ignore) { return true; } else if(node == NULL) { return false; } else if(node->details->online) { crm_notice("We can fence %s without quorum because they're in our membership", pcmk__node_name(node)); return true; } crm_trace("Cannot fence %s", pcmk__node_name(node)); return false; } /*! * \internal * \brief Copy a node object * * \param[in] this_node Node object to copy * * \return Newly allocated shallow copy of this_node * \note This function asserts on errors and is guaranteed to return non-NULL. */ pcmk_node_t * pe__copy_node(const pcmk_node_t *this_node) { pcmk_node_t *new_node = NULL; CRM_ASSERT(this_node != NULL); new_node = pcmk__assert_alloc(1, sizeof(pcmk_node_t)); new_node->assign = pcmk__assert_alloc(1, sizeof(struct pcmk__node_assignment)); new_node->assign->probe_mode = this_node->assign->probe_mode; new_node->assign->score = this_node->assign->score; new_node->assign->count = this_node->assign->count; new_node->details = this_node->details; new_node->priv = this_node->priv; return new_node; } /*! * \internal * \brief Create a node hash table from a node list * * \param[in] list Node list * * \return Hash table equivalent of node list */ GHashTable * pe__node_list2table(const GList *list) { GHashTable *result = NULL; result = pcmk__strkey_table(NULL, free); for (const GList *gIter = list; gIter != NULL; gIter = gIter->next) { pcmk_node_t *new_node = NULL; new_node = pe__copy_node((const pcmk_node_t *) gIter->data); g_hash_table_insert(result, (gpointer) new_node->priv->id, new_node); } return result; } /*! * \internal * \brief Compare two nodes by name, with numeric portions sorted numerically * * Sort two node names case-insensitively like strcasecmp(), but with any * numeric portions of the name sorted numerically. For example, "node10" will * sort higher than "node9" but lower than "remotenode9". * * \param[in] a First node to compare (can be \c NULL) * \param[in] b Second node to compare (can be \c NULL) * * \retval -1 \c a comes before \c b (or \c a is \c NULL and \c b is not) * \retval 0 \c a and \c b are equal (or both are \c NULL) * \retval 1 \c a comes after \c b (or \c b is \c NULL and \c a is not) */ gint pe__cmp_node_name(gconstpointer a, gconstpointer b) { const pcmk_node_t *node1 = (const pcmk_node_t *) a; const pcmk_node_t *node2 = (const pcmk_node_t *) b; if ((node1 == NULL) && (node2 == NULL)) { return 0; } if (node1 == NULL) { return -1; } if (node2 == NULL) { return 1; } return pcmk__numeric_strcasecmp(node1->priv->name, node2->priv->name); } /*! * \internal * \brief Output node weights to stdout * * \param[in] rsc Use allowed nodes for this resource * \param[in] comment Text description to prefix lines with * \param[in] nodes If rsc is not specified, use these nodes * \param[in,out] scheduler Scheduler data */ static void pe__output_node_weights(const pcmk_resource_t *rsc, const char *comment, GHashTable *nodes, pcmk_scheduler_t *scheduler) { pcmk__output_t *out = scheduler->priv->out; // Sort the nodes so the output is consistent for regression tests GList *list = g_list_sort(g_hash_table_get_values(nodes), pe__cmp_node_name); for (const GList *gIter = list; gIter != NULL; gIter = gIter->next) { const pcmk_node_t *node = (const pcmk_node_t *) gIter->data; out->message(out, "node-weight", rsc, comment, node->priv->name, pcmk_readable_score(node->assign->score)); } g_list_free(list); } /*! * \internal * \brief Log node weights at trace level * * \param[in] file Caller's filename * \param[in] function Caller's function name * \param[in] line Caller's line number * \param[in] rsc If not NULL, include this resource's ID in logs * \param[in] comment Text description to prefix lines with * \param[in] nodes Nodes whose scores should be logged */ static void pe__log_node_weights(const char *file, const char *function, int line, const pcmk_resource_t *rsc, const char *comment, GHashTable *nodes) { GHashTableIter iter; pcmk_node_t *node = NULL; // Don't waste time if we're not tracing at this point pcmk__if_tracing({}, return); g_hash_table_iter_init(&iter, nodes); while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) { if (rsc) { qb_log_from_external_source(function, file, "%s: %s allocation score on %s: %s", LOG_TRACE, line, 0, comment, rsc->id, pcmk__node_name(node), pcmk_readable_score(node->assign->score)); } else { qb_log_from_external_source(function, file, "%s: %s = %s", LOG_TRACE, line, 0, comment, pcmk__node_name(node), pcmk_readable_score(node->assign->score)); } } } /*! * \internal * \brief Log or output node weights * * \param[in] file Caller's filename * \param[in] function Caller's function name * \param[in] line Caller's line number * \param[in] to_log Log if true, otherwise output * \param[in] rsc If not NULL, use this resource's ID in logs, * and show scores recursively for any children * \param[in] comment Text description to prefix lines with * \param[in] nodes Nodes whose scores should be shown * \param[in,out] scheduler Scheduler data */ void pe__show_node_scores_as(const char *file, const char *function, int line, bool to_log, const pcmk_resource_t *rsc, const char *comment, GHashTable *nodes, pcmk_scheduler_t *scheduler) { if ((rsc != NULL) && pcmk_is_set(rsc->flags, pcmk__rsc_removed)) { // Don't show allocation scores for orphans return; } if (nodes == NULL) { // Nothing to show return; } if (to_log) { pe__log_node_weights(file, function, line, rsc, comment, nodes); } else { pe__output_node_weights(rsc, comment, nodes, scheduler); } if (rsc == NULL) { return; } // If this resource has children, repeat recursively for each for (GList *gIter = rsc->priv->children; gIter != NULL; gIter = gIter->next) { pcmk_resource_t *child = (pcmk_resource_t *) gIter->data; pe__show_node_scores_as(file, function, line, to_log, child, comment, child->priv->allowed_nodes, scheduler); } } /*! * \internal * \brief Compare two resources by priority * * \param[in] a First resource to compare (can be \c NULL) * \param[in] b Second resource to compare (can be \c NULL) * * \retval -1 a's priority > b's priority (or \c b is \c NULL and \c a is not) * \retval 0 a's priority == b's priority (or both \c a and \c b are \c NULL) * \retval 1 a's priority < b's priority (or \c a is \c NULL and \c b is not) */ gint pe__cmp_rsc_priority(gconstpointer a, gconstpointer b) { const pcmk_resource_t *resource1 = (const pcmk_resource_t *)a; const pcmk_resource_t *resource2 = (const pcmk_resource_t *)b; if (a == NULL && b == NULL) { return 0; } if (a == NULL) { return 1; } if (b == NULL) { return -1; } if (resource1->priv->priority > resource2->priv->priority) { return -1; } if (resource1->priv->priority < resource2->priv->priority) { return 1; } return 0; } static void resource_node_score(pcmk_resource_t *rsc, const pcmk_node_t *node, int score, const char *tag) { pcmk_node_t *match = NULL; if ((pcmk_is_set(rsc->flags, pcmk__rsc_exclusive_probes) || (node->assign->probe_mode == pcmk__probe_never)) && pcmk__str_eq(tag, "symmetric_default", pcmk__str_casei)) { /* This string comparision may be fragile, but exclusive resources and * exclusive nodes should not have the symmetric_default constraint * applied to them. */ return; } else { for (GList *gIter = rsc->priv->children; gIter != NULL; gIter = gIter->next) { pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data; resource_node_score(child_rsc, node, score, tag); } } match = g_hash_table_lookup(rsc->priv->allowed_nodes, node->priv->id); if (match == NULL) { match = pe__copy_node(node); g_hash_table_insert(rsc->priv->allowed_nodes, (gpointer) match->priv->id, match); } match->assign->score = pcmk__add_scores(match->assign->score, score); pcmk__rsc_trace(rsc, "Enabling %s preference (%s) for %s on %s (now %s)", tag, pcmk_readable_score(score), rsc->id, pcmk__node_name(node), pcmk_readable_score(match->assign->score)); } void resource_location(pcmk_resource_t *rsc, const pcmk_node_t *node, int score, const char *tag, pcmk_scheduler_t *scheduler) { if (node != NULL) { resource_node_score(rsc, node, score, tag); } else if (scheduler != NULL) { GList *gIter = scheduler->nodes; for (; gIter != NULL; gIter = gIter->next) { pcmk_node_t *node_iter = (pcmk_node_t *) gIter->data; resource_node_score(rsc, node_iter, score, tag); } } else { GHashTableIter iter; pcmk_node_t *node_iter = NULL; g_hash_table_iter_init(&iter, rsc->priv->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&node_iter)) { resource_node_score(rsc, node_iter, score, tag); } } if ((node == NULL) && (score == -PCMK_SCORE_INFINITY) && (rsc->priv->assigned_node != NULL)) { // @TODO Should this be more like pcmk__unassign_resource()? crm_info("Unassigning %s from %s", rsc->id, pcmk__node_name(rsc->priv->assigned_node)); free(rsc->priv->assigned_node); rsc->priv->assigned_node = NULL; } } time_t get_effective_time(pcmk_scheduler_t *scheduler) { if(scheduler) { if (scheduler->priv->now == NULL) { crm_trace("Recording a new 'now'"); scheduler->priv->now = crm_time_new(NULL); } return crm_time_get_seconds_since_epoch(scheduler->priv->now); } crm_trace("Defaulting to 'now'"); return time(NULL); } gboolean get_target_role(const pcmk_resource_t *rsc, enum rsc_role_e *role) { enum rsc_role_e local_role = pcmk_role_unknown; const char *value = g_hash_table_lookup(rsc->priv->meta, PCMK_META_TARGET_ROLE); CRM_CHECK(role != NULL, return FALSE); if (pcmk__str_eq(value, PCMK_ROLE_STARTED, pcmk__str_null_matches|pcmk__str_casei)) { return FALSE; } if (pcmk__str_eq(PCMK_VALUE_DEFAULT, value, pcmk__str_casei)) { // @COMPAT Deprecated since 2.1.8 pcmk__config_warn("Support for setting " PCMK_META_TARGET_ROLE " to the explicit value '" PCMK_VALUE_DEFAULT "' is deprecated and will be removed in a " "future release (just leave it unset)"); return FALSE; } local_role = pcmk_parse_role(value); if (local_role == pcmk_role_unknown) { pcmk__config_err("Ignoring '" PCMK_META_TARGET_ROLE "' for %s " "because '%s' is not valid", rsc->id, value); return FALSE; } else if (local_role > pcmk_role_started) { if (pcmk_is_set(pe__const_top_resource(rsc, false)->flags, pcmk__rsc_promotable)) { if (local_role > pcmk_role_unpromoted) { /* This is what we'd do anyway, just leave the default to avoid messing up the placement algorithm */ return FALSE; } } else { pcmk__config_err("Ignoring '" PCMK_META_TARGET_ROLE "' for %s " "because '%s' only makes sense for promotable " "clones", rsc->id, value); return FALSE; } } *role = local_role; return TRUE; } gboolean order_actions(pcmk_action_t *lh_action, pcmk_action_t *rh_action, uint32_t flags) { GList *gIter = NULL; pcmk__related_action_t *wrapper = NULL; GList *list = NULL; if (flags == pcmk__ar_none) { return FALSE; } if (lh_action == NULL || rh_action == NULL) { return FALSE; } crm_trace("Creating action wrappers for ordering: %s then %s", lh_action->uuid, rh_action->uuid); /* Ensure we never create a dependency on ourselves... it's happened */ CRM_ASSERT(lh_action != rh_action); /* Filter dups, otherwise update_action_states() has too much work to do */ gIter = lh_action->actions_after; for (; gIter != NULL; gIter = gIter->next) { pcmk__related_action_t *after = gIter->data; if ((after->action == rh_action) && pcmk_any_flags_set(after->flags, flags)) { return FALSE; } } wrapper = pcmk__assert_alloc(1, sizeof(pcmk__related_action_t)); wrapper->action = rh_action; wrapper->flags = flags; list = lh_action->actions_after; list = g_list_prepend(list, wrapper); lh_action->actions_after = list; wrapper = pcmk__assert_alloc(1, sizeof(pcmk__related_action_t)); wrapper->action = lh_action; wrapper->flags = flags; list = rh_action->actions_before; list = g_list_prepend(list, wrapper); rh_action->actions_before = list; return TRUE; } void destroy_ticket(gpointer data) { pcmk__ticket_t *ticket = data; if (ticket->state) { g_hash_table_destroy(ticket->state); } free(ticket->id); free(ticket); } pcmk__ticket_t * ticket_new(const char *ticket_id, pcmk_scheduler_t *scheduler) { pcmk__ticket_t *ticket = NULL; if (pcmk__str_empty(ticket_id)) { return NULL; } if (scheduler->priv->ticket_constraints == NULL) { scheduler->priv->ticket_constraints = pcmk__strkey_table(free, destroy_ticket); } ticket = g_hash_table_lookup(scheduler->priv->ticket_constraints, ticket_id); if (ticket == NULL) { ticket = calloc(1, sizeof(pcmk__ticket_t)); if (ticket == NULL) { pcmk__sched_err(scheduler, "Cannot allocate ticket '%s'", ticket_id); return NULL; } crm_trace("Creating ticket entry for %s", ticket_id); ticket->id = strdup(ticket_id); ticket->last_granted = -1; ticket->state = pcmk__strkey_table(free, free); g_hash_table_insert(scheduler->priv->ticket_constraints, pcmk__str_copy(ticket->id), ticket); } return ticket; } const char * rsc_printable_id(const pcmk_resource_t *rsc) { if (pcmk_is_set(rsc->flags, pcmk__rsc_unique)) { return rsc->id; } return pcmk__xe_id(rsc->priv->xml); } void pe__clear_resource_flags_recursive(pcmk_resource_t *rsc, uint64_t flags) { pcmk__clear_rsc_flags(rsc, flags); for (GList *gIter = rsc->priv->children; gIter != NULL; gIter = gIter->next) { pe__clear_resource_flags_recursive((pcmk_resource_t *) gIter->data, flags); } } void pe__clear_resource_flags_on_all(pcmk_scheduler_t *scheduler, uint64_t flag) { for (GList *lpc = scheduler->priv->resources; lpc != NULL; lpc = lpc->next) { pcmk_resource_t *r = (pcmk_resource_t *) lpc->data; pe__clear_resource_flags_recursive(r, flag); } } void pe__set_resource_flags_recursive(pcmk_resource_t *rsc, uint64_t flags) { pcmk__set_rsc_flags(rsc, flags); for (GList *gIter = rsc->priv->children; gIter != NULL; gIter = gIter->next) { pe__set_resource_flags_recursive((pcmk_resource_t *) gIter->data, flags); } } void trigger_unfencing(pcmk_resource_t *rsc, pcmk_node_t *node, const char *reason, pcmk_action_t *dependency, pcmk_scheduler_t *scheduler) { if (!pcmk_is_set(scheduler->flags, pcmk__sched_enable_unfencing)) { /* No resources require it */ return; } else if ((rsc != NULL) && !pcmk_is_set(rsc->flags, pcmk__rsc_fence_device)) { /* Wasn't a stonith device */ return; } else if(node && node->details->online && node->details->unclean == FALSE && node->details->shutdown == FALSE) { pcmk_action_t *unfence = pe_fence_op(node, PCMK_ACTION_ON, FALSE, reason, FALSE, scheduler); if(dependency) { order_actions(unfence, dependency, pcmk__ar_ordered); } } else if(rsc) { GHashTableIter iter; g_hash_table_iter_init(&iter, rsc->priv->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { if(node->details->online && node->details->unclean == FALSE && node->details->shutdown == FALSE) { trigger_unfencing(rsc, node, reason, dependency, scheduler); } } } } /*! * \internal * \brief Check whether shutdown has been requested for a node * * \param[in] node Node to check * * \return TRUE if node has shutdown attribute set and nonzero, FALSE otherwise * \note This differs from simply using node->details->shutdown in that it can * be used before that has been determined (and in fact to determine it), * and it can also be used to distinguish requested shutdown from implicit * shutdown of remote nodes by virtue of their connection stopping. */ bool pe__shutdown_requested(const pcmk_node_t *node) { const char *shutdown = pcmk__node_attr(node, PCMK__NODE_ATTR_SHUTDOWN, NULL, pcmk__rsc_node_current); return !pcmk__str_eq(shutdown, "0", pcmk__str_null_matches); } /*! * \internal * \brief Update "recheck by" time in scheduler data * * \param[in] recheck Epoch time when recheck should happen * \param[in,out] scheduler Scheduler data * \param[in] reason What time is being updated for (for logs) */ void pe__update_recheck_time(time_t recheck, pcmk_scheduler_t *scheduler, const char *reason) { if ((recheck > get_effective_time(scheduler)) - && ((scheduler->recheck_by == 0) - || (scheduler->recheck_by > recheck))) { - scheduler->recheck_by = recheck; + && ((scheduler->priv->recheck_by == 0) + || (scheduler->priv->recheck_by > recheck))) { + scheduler->priv->recheck_by = recheck; crm_debug("Updated next scheduler recheck to %s for %s", pcmk__trim(ctime(&recheck)), reason); } } /*! * \internal * \brief Extract nvpair blocks contained by a CIB XML element into a hash table * * \param[in] xml_obj XML element containing blocks of nvpair elements * \param[in] set_name If not NULL, only use blocks of this element * \param[in] rule_data Matching parameters to use when unpacking * \param[out] hash Where to store extracted name/value pairs * \param[in] always_first If not NULL, process block with this ID first * \param[in] overwrite Whether to replace existing values with same name * \param[in,out] scheduler Scheduler data containing \p xml_obj */ void pe__unpack_dataset_nvpairs(const xmlNode *xml_obj, const char *set_name, const pe_rule_eval_data_t *rule_data, GHashTable *hash, const char *always_first, gboolean overwrite, pcmk_scheduler_t *scheduler) { crm_time_t *next_change = crm_time_new_undefined(); pe_eval_nvpairs(scheduler->input, xml_obj, set_name, rule_data, hash, always_first, overwrite, next_change); if (crm_time_is_defined(next_change)) { time_t recheck = (time_t) crm_time_get_seconds_since_epoch(next_change); pe__update_recheck_time(recheck, scheduler, "rule evaluation"); } crm_time_free(next_change); } bool pe__resource_is_disabled(const pcmk_resource_t *rsc) { const char *target_role = NULL; CRM_CHECK(rsc != NULL, return false); target_role = g_hash_table_lookup(rsc->priv->meta, PCMK_META_TARGET_ROLE); if (target_role) { // If invalid, we've already logged an error when unpacking enum rsc_role_e target_role_e = pcmk_parse_role(target_role); if ((target_role_e == pcmk_role_stopped) || ((target_role_e == pcmk_role_unpromoted) && pcmk_is_set(pe__const_top_resource(rsc, false)->flags, pcmk__rsc_promotable))) { return true; } } return false; } /*! * \internal * \brief Check whether a resource is running only on given node * * \param[in] rsc Resource to check * \param[in] node Node to check * * \return true if \p rsc is running only on \p node, otherwise false */ bool pe__rsc_running_on_only(const pcmk_resource_t *rsc, const pcmk_node_t *node) { return (rsc != NULL) && pcmk__list_of_1(rsc->priv->active_nodes) && pcmk__same_node((const pcmk_node_t *) rsc->priv->active_nodes->data, node); } bool pe__rsc_running_on_any(pcmk_resource_t *rsc, GList *node_list) { if (rsc != NULL) { for (GList *ele = rsc->priv->active_nodes; ele; ele = ele->next) { pcmk_node_t *node = (pcmk_node_t *) ele->data; if (pcmk__str_in_list(node->priv->name, node_list, pcmk__str_star_matches|pcmk__str_casei)) { return true; } } } return false; } bool pcmk__rsc_filtered_by_node(pcmk_resource_t *rsc, GList *only_node) { return rsc->priv->fns->active(rsc, FALSE) && !pe__rsc_running_on_any(rsc, only_node); } GList * pe__filter_rsc_list(GList *rscs, GList *filter) { GList *retval = NULL; for (GList *gIter = rscs; gIter; gIter = gIter->next) { pcmk_resource_t *rsc = (pcmk_resource_t *) gIter->data; /* I think the second condition is safe here for all callers of this * function. If not, it needs to move into pe__node_text. */ if (pcmk__str_in_list(rsc_printable_id(rsc), filter, pcmk__str_star_matches) || ((rsc->priv->parent != NULL) && pcmk__str_in_list(rsc_printable_id(rsc->priv->parent), filter, pcmk__str_star_matches))) { retval = g_list_prepend(retval, rsc); } } return retval; } GList * pe__build_node_name_list(pcmk_scheduler_t *scheduler, const char *s) { GList *nodes = NULL; if (pcmk__str_eq(s, "*", pcmk__str_null_matches)) { /* Nothing was given so return a list of all node names. Or, '*' was * given. This would normally fall into the pe__unames_with_tag branch * where it will return an empty list. Catch it here instead. */ nodes = g_list_prepend(nodes, strdup("*")); } else { pcmk_node_t *node = pcmk_find_node(scheduler, s); if (node) { /* The given string was a valid uname for a node. Return a * singleton list containing just that uname. */ nodes = g_list_prepend(nodes, strdup(s)); } else { /* The given string was not a valid uname. It's either a tag or * it's a typo or something. In the first case, we'll return a * list of all the unames of the nodes with the given tag. In the * second case, we'll return a NULL pointer and nothing will * get displayed. */ nodes = pe__unames_with_tag(scheduler, s); } } return nodes; } GList * pe__build_rsc_list(pcmk_scheduler_t *scheduler, const char *s) { GList *resources = NULL; if (pcmk__str_eq(s, "*", pcmk__str_null_matches)) { resources = g_list_prepend(resources, strdup("*")); } else { const uint32_t flags = pcmk_rsc_match_history|pcmk_rsc_match_basename; pcmk_resource_t *rsc = pe_find_resource_with_flags(scheduler->priv->resources, s, flags); if (rsc) { /* A colon in the name we were given means we're being asked to filter * on a specific instance of a cloned resource. Put that exact string * into the filter list. Otherwise, use the printable ID of whatever * resource was found that matches what was asked for. */ if (strstr(s, ":") != NULL) { resources = g_list_prepend(resources, strdup(rsc->id)); } else { resources = g_list_prepend(resources, strdup(rsc_printable_id(rsc))); } } else { /* The given string was not a valid resource name. It's a tag or a * typo or something. See pe__build_node_name_list() for more * detail. */ resources = pe__rscs_with_tag(scheduler, s); } } return resources; } xmlNode * pe__failed_probe_for_rsc(const pcmk_resource_t *rsc, const char *name) { const pcmk_resource_t *parent = pe__const_top_resource(rsc, false); const char *rsc_id = rsc->id; const pcmk_scheduler_t *scheduler = rsc->priv->scheduler; if (pcmk__is_clone(parent)) { rsc_id = pe__clone_child_id(parent); } for (xmlNode *xml_op = pcmk__xe_first_child(scheduler->priv->failed, NULL, NULL, NULL); xml_op != NULL; xml_op = pcmk__xe_next(xml_op)) { const char *value = NULL; char *op_id = NULL; /* This resource operation is not a failed probe. */ if (!pcmk_xe_mask_probe_failure(xml_op)) { continue; } /* This resource operation was not run on the given node. Note that if name is * NULL, this will always succeed. */ value = crm_element_value(xml_op, PCMK__META_ON_NODE); if (value == NULL || !pcmk__str_eq(value, name, pcmk__str_casei|pcmk__str_null_matches)) { continue; } if (!parse_op_key(pcmk__xe_history_key(xml_op), &op_id, NULL, NULL)) { continue; // This history entry is missing an operation key } /* This resource operation's ID does not match the rsc_id we are looking for. */ if (!pcmk__str_eq(op_id, rsc_id, pcmk__str_none)) { free(op_id); continue; } free(op_id); return xml_op; } return NULL; }