diff --git a/include/crm/common/resources.h b/include/crm/common/resources.h index 317c870e24..f806824cfe 100644 --- a/include/crm/common/resources.h +++ b/include/crm/common/resources.h @@ -1,147 +1,150 @@ /* * Copyright 2004-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #ifndef PCMK__CRM_COMMON_RESOURCES__H # define PCMK__CRM_COMMON_RESOURCES__H #ifdef __cplusplus extern "C" { #endif /*! * \file * \brief Scheduler API for resources * \ingroup core */ //! Resource variants supported by Pacemaker enum pe_obj_types { // Order matters: some code compares greater or lesser than pcmk_rsc_variant_unknown = -1, //!< Unknown resource variant pcmk_rsc_variant_primitive = 0, //!< Primitive resource pcmk_rsc_variant_group = 1, //!< Group resource pcmk_rsc_variant_clone = 2, //!< Clone resource pcmk_rsc_variant_bundle = 3, //!< Bundle resource #if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1) //! \deprecated Use pcmk_rsc_variant_unknown instead pe_unknown = pcmk_rsc_variant_unknown, //! \deprecated Use pcmk_rsc_variant_primitive instead pe_native = pcmk_rsc_variant_primitive, //! \deprecated Use pcmk_rsc_variant_group instead pe_group = pcmk_rsc_variant_group, //! \deprecated Use pcmk_rsc_variant_clone instead pe_clone = pcmk_rsc_variant_clone, //! \deprecated Use pcmk_rsc_variant_bundle instead pe_container = pcmk_rsc_variant_bundle, #endif }; //! What resource needs before it can be recovered from a failed node enum rsc_start_requirement { pcmk_requires_nothing = 0, //!< Resource can be recovered immediately pcmk_requires_quorum = 1, //!< Resource can be recovered if quorate pcmk_requires_fencing = 2, //!< Resource can be recovered after fencing #if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1) //! \deprecated Use pcmk_requires_nothing instead rsc_req_nothing = pcmk_requires_nothing, //! \deprecated Use pcmk_requires_quorum instead rsc_req_quorum = pcmk_requires_quorum, //! \deprecated Use pcmk_requires_fencing instead rsc_req_stonith = pcmk_requires_fencing, #endif }; //! How to recover a resource that is incorrectly active on multiple nodes enum rsc_recovery_type { pcmk_multiply_active_restart = 0, //!< Stop on all, start on desired pcmk_multiply_active_stop = 1, //!< Stop on all and leave stopped pcmk_multiply_active_block = 2, //!< Do nothing to resource pcmk_multiply_active_unexpected = 3, //!< Stop unexpected instances #if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1) //! \deprecated Use pcmk_multiply_active_restart instead recovery_stop_start = pcmk_multiply_active_restart, //! \deprecated Use pcmk_multiply_active_stop instead recovery_stop_only = pcmk_multiply_active_stop, //! \deprecated Use pcmk_multiply_active_block instead recovery_block = pcmk_multiply_active_block, //! \deprecated Use pcmk_multiply_active_unexpected instead recovery_stop_unexpected = pcmk_multiply_active_unexpected, #endif }; //! Search options for resources (exact resource ID always matches) enum pe_find { //! Also match clone instance ID from resource history pcmk_rsc_match_history = (1 << 0), //! Also match anonymous clone instances by base name pcmk_rsc_match_anon_basename = (1 << 1), //! Match only clones and their instances, by either clone or instance ID pcmk_rsc_match_clone_only = (1 << 2), + //! If matching by node, compare current node instead of assigned node + pcmk_rsc_match_current_node = (1 << 3), + #if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1) //! \deprecated Use pcmk_rsc_match_history instead pe_find_renamed = pcmk_rsc_match_history, //! \deprecated Use pcmk_rsc_match_anon_basename instead pe_find_anon = pcmk_rsc_match_anon_basename, //! \deprecated Use pcmk_rsc_match_clone_only instead pe_find_clone = pcmk_rsc_match_clone_only, #endif - pe_find_current = 0x008, //!< match resource active on specified node + pe_find_current = pcmk_rsc_match_current_node, pe_find_inactive = 0x010, //!< match resource not running anywhere pe_find_any = 0x020, //!< match base name of any clone instance }; //!@{ //! \deprecated Do not use enum pe_restart { pe_restart_restart, pe_restart_ignore, }; enum pe_print_options { pe_print_log = (1 << 0), pe_print_html = (1 << 1), pe_print_ncurses = (1 << 2), pe_print_printf = (1 << 3), pe_print_dev = (1 << 4), // Ignored pe_print_details = (1 << 5), // Ignored pe_print_max_details = (1 << 6), // Ignored pe_print_rsconly = (1 << 7), pe_print_ops = (1 << 8), pe_print_suppres_nl = (1 << 9), pe_print_xml = (1 << 10), pe_print_brief = (1 << 11), pe_print_pending = (1 << 12), pe_print_clone_details = (1 << 13), pe_print_clone_active = (1 << 14), // Print clone instances only if active pe_print_implicit = (1 << 15) // Print implicitly created resources }; //!@} #ifdef __cplusplus } #endif #endif // PCMK__CRM_COMMON_RESOURCES__H diff --git a/lib/pacemaker/pcmk_sched_promotable.c b/lib/pacemaker/pcmk_sched_promotable.c index da2106319f..76bf87ad01 100644 --- a/lib/pacemaker/pcmk_sched_promotable.c +++ b/lib/pacemaker/pcmk_sched_promotable.c @@ -1,1299 +1,1299 @@ /* * Copyright 2004-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include "libpacemaker_private.h" /*! * \internal * \brief Add implicit promotion ordering for a promotable instance * * \param[in,out] clone Clone resource * \param[in,out] child Instance of \p clone being ordered * \param[in,out] last Previous instance ordered (NULL if \p child is first) */ static void order_instance_promotion(pe_resource_t *clone, pe_resource_t *child, pe_resource_t *last) { // "Promote clone" -> promote instance -> "clone promoted" pcmk__order_resource_actions(clone, PCMK_ACTION_PROMOTE, child, PCMK_ACTION_PROMOTE, pe_order_optional); pcmk__order_resource_actions(child, PCMK_ACTION_PROMOTE, clone, PCMK_ACTION_PROMOTED, pe_order_optional); // If clone is ordered, order this instance relative to last if ((last != NULL) && pe__clone_is_ordered(clone)) { pcmk__order_resource_actions(last, PCMK_ACTION_PROMOTE, child, PCMK_ACTION_PROMOTE, pe_order_optional); } } /*! * \internal * \brief Add implicit demotion ordering for a promotable instance * * \param[in,out] clone Clone resource * \param[in,out] child Instance of \p clone being ordered * \param[in] last Previous instance ordered (NULL if \p child is first) */ static void order_instance_demotion(pe_resource_t *clone, pe_resource_t *child, pe_resource_t *last) { // "Demote clone" -> demote instance -> "clone demoted" pcmk__order_resource_actions(clone, PCMK_ACTION_DEMOTE, child, PCMK_ACTION_DEMOTE, pe_order_implies_first_printed); pcmk__order_resource_actions(child, PCMK_ACTION_DEMOTE, clone, PCMK_ACTION_DEMOTED, pe_order_implies_then_printed); // If clone is ordered, order this instance relative to last if ((last != NULL) && pe__clone_is_ordered(clone)) { pcmk__order_resource_actions(child, PCMK_ACTION_DEMOTE, last, PCMK_ACTION_DEMOTE, pe_order_optional); } } /*! * \internal * \brief Check whether an instance will be promoted or demoted * * \param[in] rsc Instance to check * \param[out] demoting If \p rsc will be demoted, this will be set to true * \param[out] promoting If \p rsc will be promoted, this will be set to true */ static void check_for_role_change(const pe_resource_t *rsc, bool *demoting, bool *promoting) { const GList *iter = NULL; // If this is a cloned group, check group members recursively if (rsc->children != NULL) { for (iter = rsc->children; iter != NULL; iter = iter->next) { check_for_role_change((const pe_resource_t *) iter->data, demoting, promoting); } return; } for (iter = rsc->actions; iter != NULL; iter = iter->next) { const pe_action_t *action = (const pe_action_t *) iter->data; if (*promoting && *demoting) { return; } else if (pcmk_is_set(action->flags, pe_action_optional)) { continue; } else if (pcmk__str_eq(PCMK_ACTION_DEMOTE, action->task, pcmk__str_none)) { *demoting = true; } else if (pcmk__str_eq(PCMK_ACTION_PROMOTE, action->task, pcmk__str_none)) { *promoting = true; } } } /*! * \internal * \brief Add promoted-role location constraint scores to an instance's priority * * Adjust a promotable clone instance's promotion priority by the scores of any * location constraints in a list that are both limited to the promoted role and * for the node where the instance will be placed. * * \param[in,out] child Promotable clone instance * \param[in] location_constraints List of location constraints to apply * \param[in] chosen Node where \p child will be placed */ static void apply_promoted_locations(pe_resource_t *child, const GList *location_constraints, const pe_node_t *chosen) { for (const GList *iter = location_constraints; iter; iter = iter->next) { const pe__location_t *location = iter->data; const pe_node_t *constraint_node = NULL; if (location->role_filter == pcmk_role_promoted) { constraint_node = pe_find_node_id(location->node_list_rh, chosen->details->id); } if (constraint_node != NULL) { int new_priority = pcmk__add_scores(child->priority, constraint_node->weight); pe_rsc_trace(child, "Applying location %s to %s promotion priority on %s: " "%s + %s = %s", location->id, child->id, pe__node_name(constraint_node), pcmk_readable_score(child->priority), pcmk_readable_score(constraint_node->weight), pcmk_readable_score(new_priority)); child->priority = new_priority; } } } /*! * \internal * \brief Get the node that an instance will be promoted on * * \param[in] rsc Promotable clone instance to check * * \return Node that \p rsc will be promoted on, or NULL if none */ static pe_node_t * node_to_be_promoted_on(const pe_resource_t *rsc) { pe_node_t *node = NULL; pe_node_t *local_node = NULL; const pe_resource_t *parent = NULL; // If this is a cloned group, bail if any group member can't be promoted for (GList *iter = rsc->children; iter != NULL; iter = iter->next) { pe_resource_t *child = (pe_resource_t *) iter->data; if (node_to_be_promoted_on(child) == NULL) { pe_rsc_trace(rsc, "%s can't be promoted because member %s can't", rsc->id, child->id); return NULL; } } node = rsc->fns->location(rsc, NULL, FALSE); if (node == NULL) { pe_rsc_trace(rsc, "%s can't be promoted because it won't be active", rsc->id); return NULL; } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) { if (rsc->fns->state(rsc, TRUE) == pcmk_role_promoted) { crm_notice("Unmanaged instance %s will be left promoted on %s", rsc->id, pe__node_name(node)); } else { pe_rsc_trace(rsc, "%s can't be promoted because it is unmanaged", rsc->id); return NULL; } } else if (rsc->priority < 0) { pe_rsc_trace(rsc, "%s can't be promoted because its promotion priority %d " "is negative", rsc->id, rsc->priority); return NULL; } else if (!pcmk__node_available(node, false, true)) { pe_rsc_trace(rsc, "%s can't be promoted because %s can't run resources", rsc->id, pe__node_name(node)); return NULL; } parent = pe__const_top_resource(rsc, false); local_node = g_hash_table_lookup(parent->allowed_nodes, node->details->id); if (local_node == NULL) { /* It should not be possible for the scheduler to have assigned the * instance to a node where its parent is not allowed, but it's good to * have a fail-safe. */ if (pcmk_is_set(rsc->flags, pe_rsc_managed)) { crm_warn("%s can't be promoted because %s is not allowed on %s " "(scheduler bug?)", rsc->id, parent->id, pe__node_name(node)); } // else the instance is unmanaged and already promoted return NULL; } else if ((local_node->count >= pe__clone_promoted_node_max(parent)) && pcmk_is_set(rsc->flags, pe_rsc_managed)) { pe_rsc_trace(rsc, "%s can't be promoted because %s has " "maximum promoted instances already", rsc->id, pe__node_name(node)); return NULL; } return local_node; } /*! * \internal * \brief Compare two promotable clone instances by promotion priority * * \param[in] a First instance to compare * \param[in] b Second instance to compare * * \return A negative number if \p a has higher promotion priority, * a positive number if \p b has higher promotion priority, * or 0 if promotion priorities are equal */ static gint cmp_promotable_instance(gconstpointer a, gconstpointer b) { const pe_resource_t *rsc1 = (const pe_resource_t *) a; const pe_resource_t *rsc2 = (const pe_resource_t *) b; enum rsc_role_e role1 = pcmk_role_unknown; enum rsc_role_e role2 = pcmk_role_unknown; CRM_ASSERT((rsc1 != NULL) && (rsc2 != NULL)); // Check sort index set by pcmk__set_instance_roles() if (rsc1->sort_index > rsc2->sort_index) { pe_rsc_trace(rsc1, "%s has higher promotion priority than %s " "(sort index %d > %d)", rsc1->id, rsc2->id, rsc1->sort_index, rsc2->sort_index); return -1; } else if (rsc1->sort_index < rsc2->sort_index) { pe_rsc_trace(rsc1, "%s has lower promotion priority than %s " "(sort index %d < %d)", rsc1->id, rsc2->id, rsc1->sort_index, rsc2->sort_index); return 1; } // If those are the same, prefer instance whose current role is higher role1 = rsc1->fns->state(rsc1, TRUE); role2 = rsc2->fns->state(rsc2, TRUE); if (role1 > role2) { pe_rsc_trace(rsc1, "%s has higher promotion priority than %s " "(higher current role)", rsc1->id, rsc2->id); return -1; } else if (role1 < role2) { pe_rsc_trace(rsc1, "%s has lower promotion priority than %s " "(lower current role)", rsc1->id, rsc2->id); return 1; } // Finally, do normal clone instance sorting return pcmk__cmp_instance(a, b); } /*! * \internal * \brief Add a promotable clone instance's sort index to its node's score * * Add a promotable clone instance's sort index (which sums its promotion * preferences and scores of relevant location constraints for the promoted * role) to the node score of the instance's assigned node. * * \param[in] data Promotable clone instance * \param[in,out] user_data Clone parent of \p data */ static void add_sort_index_to_node_score(gpointer data, gpointer user_data) { const pe_resource_t *child = (const pe_resource_t *) data; pe_resource_t *clone = (pe_resource_t *) user_data; pe_node_t *node = NULL; const pe_node_t *chosen = NULL; if (child->sort_index < 0) { pe_rsc_trace(clone, "Not adding sort index of %s: negative", child->id); return; } chosen = child->fns->location(child, NULL, FALSE); if (chosen == NULL) { pe_rsc_trace(clone, "Not adding sort index of %s: inactive", child->id); return; } node = g_hash_table_lookup(clone->allowed_nodes, chosen->details->id); CRM_ASSERT(node != NULL); node->weight = pcmk__add_scores(child->sort_index, node->weight); pe_rsc_trace(clone, "Added cumulative priority of %s (%s) to score on %s (now %s)", child->id, pcmk_readable_score(child->sort_index), pe__node_name(node), pcmk_readable_score(node->weight)); } /*! * \internal * \brief Apply colocation to dependent's node scores if for promoted role * * \param[in,out] data Colocation constraint to apply * \param[in,out] user_data Promotable clone that is constraint's dependent */ static void apply_coloc_to_dependent(gpointer data, gpointer user_data) { pcmk__colocation_t *colocation = data; pe_resource_t *clone = user_data; pe_resource_t *primary = colocation->primary; uint32_t flags = pcmk__coloc_select_default; float factor = colocation->score / (float) INFINITY; if (colocation->dependent_role != pcmk_role_promoted) { return; } if (colocation->score < INFINITY) { flags = pcmk__coloc_select_active; } pe_rsc_trace(clone, "Applying colocation %s (promoted %s with %s) @%s", colocation->id, colocation->dependent->id, colocation->primary->id, pcmk_readable_score(colocation->score)); primary->cmds->add_colocated_node_scores(primary, clone, clone->id, &clone->allowed_nodes, colocation, factor, flags); } /*! * \internal * \brief Apply colocation to primary's node scores if for promoted role * * \param[in,out] data Colocation constraint to apply * \param[in,out] user_data Promotable clone that is constraint's primary */ static void apply_coloc_to_primary(gpointer data, gpointer user_data) { pcmk__colocation_t *colocation = data; pe_resource_t *clone = user_data; pe_resource_t *dependent = colocation->dependent; const float factor = colocation->score / (float) INFINITY; const uint32_t flags = pcmk__coloc_select_active |pcmk__coloc_select_nonnegative; if ((colocation->primary_role != pcmk_role_promoted) || !pcmk__colocation_has_influence(colocation, NULL)) { return; } pe_rsc_trace(clone, "Applying colocation %s (%s with promoted %s) @%s", colocation->id, colocation->dependent->id, colocation->primary->id, pcmk_readable_score(colocation->score)); dependent->cmds->add_colocated_node_scores(dependent, clone, clone->id, &clone->allowed_nodes, colocation, factor, flags); } /*! * \internal * \brief Set clone instance's sort index to its node's score * * \param[in,out] data Promotable clone instance * \param[in] user_data Parent clone of \p data */ static void set_sort_index_to_node_score(gpointer data, gpointer user_data) { pe_resource_t *child = (pe_resource_t *) data; const pe_resource_t *clone = (const pe_resource_t *) user_data; pe_node_t *chosen = child->fns->location(child, NULL, FALSE); if (!pcmk_is_set(child->flags, pe_rsc_managed) && (child->next_role == pcmk_role_promoted)) { child->sort_index = INFINITY; pe_rsc_trace(clone, "Final sort index for %s is INFINITY (unmanaged promoted)", child->id); } else if ((chosen == NULL) || (child->sort_index < 0)) { pe_rsc_trace(clone, "Final sort index for %s is %d (ignoring node score)", child->id, child->sort_index); } else { const pe_node_t *node = g_hash_table_lookup(clone->allowed_nodes, chosen->details->id); CRM_ASSERT(node != NULL); child->sort_index = node->weight; pe_rsc_trace(clone, "Adding scores for %s: final sort index for %s is %d", clone->id, child->id, child->sort_index); } } /*! * \internal * \brief Sort a promotable clone's instances by descending promotion priority * * \param[in,out] clone Promotable clone to sort */ static void sort_promotable_instances(pe_resource_t *clone) { GList *colocations = NULL; if (pe__set_clone_flag(clone, pe__clone_promotion_constrained) == pcmk_rc_already) { return; } pe__set_resource_flags(clone, pe_rsc_merging); for (GList *iter = clone->children; iter != NULL; iter = iter->next) { pe_resource_t *child = (pe_resource_t *) iter->data; pe_rsc_trace(clone, "Adding scores for %s: initial sort index for %s is %d", clone->id, child->id, child->sort_index); } pe__show_node_scores(true, clone, "Before", clone->allowed_nodes, clone->cluster); g_list_foreach(clone->children, add_sort_index_to_node_score, clone); colocations = pcmk__this_with_colocations(clone); g_list_foreach(colocations, apply_coloc_to_dependent, clone); g_list_free(colocations); colocations = pcmk__with_this_colocations(clone); g_list_foreach(colocations, apply_coloc_to_primary, clone); g_list_free(colocations); // Ban resource from all nodes if it needs a ticket but doesn't have it pcmk__require_promotion_tickets(clone); pe__show_node_scores(true, clone, "After", clone->allowed_nodes, clone->cluster); // Reset sort indexes to final node scores g_list_foreach(clone->children, set_sort_index_to_node_score, clone); // Finally, sort instances in descending order of promotion priority clone->children = g_list_sort(clone->children, cmp_promotable_instance); pe__clear_resource_flags(clone, pe_rsc_merging); } /*! * \internal * \brief Find the active instance (if any) of an anonymous clone on a node * * \param[in] clone Anonymous clone to check * \param[in] id Instance ID (without instance number) to check * \param[in] node Node to check * * \return */ static pe_resource_t * find_active_anon_instance(const pe_resource_t *clone, const char *id, const pe_node_t *node) { for (GList *iter = clone->children; iter; iter = iter->next) { pe_resource_t *child = iter->data; pe_resource_t *active = NULL; // Use ->find_rsc() in case this is a cloned group active = clone->fns->find_rsc(child, id, node, pcmk_rsc_match_clone_only - |pe_find_current); + |pcmk_rsc_match_current_node); if (active != NULL) { return active; } } return NULL; } /* * \brief Check whether an anonymous clone instance is known on a node * * \param[in] clone Anonymous clone to check * \param[in] id Instance ID (without instance number) to check * \param[in] node Node to check * * \return true if \p id instance of \p clone is known on \p node, * otherwise false */ static bool anonymous_known_on(const pe_resource_t *clone, const char *id, const pe_node_t *node) { for (GList *iter = clone->children; iter; iter = iter->next) { pe_resource_t *child = iter->data; /* Use ->find_rsc() because this might be a cloned group, and knowing * that other members of the group are known here implies nothing. */ child = clone->fns->find_rsc(child, id, NULL, pcmk_rsc_match_clone_only); CRM_LOG_ASSERT(child != NULL); if (child != NULL) { if (g_hash_table_lookup(child->known_on, node->details->id)) { return true; } } } return false; } /*! * \internal * \brief Check whether a node is allowed to run a resource * * \param[in] rsc Resource to check * \param[in] node Node to check * * \return true if \p node is allowed to run \p rsc, otherwise false */ static bool is_allowed(const pe_resource_t *rsc, const pe_node_t *node) { pe_node_t *allowed = g_hash_table_lookup(rsc->allowed_nodes, node->details->id); return (allowed != NULL) && (allowed->weight >= 0); } /*! * \brief Check whether a clone instance's promotion score should be considered * * \param[in] rsc Promotable clone instance to check * \param[in] node Node where score would be applied * * \return true if \p rsc's promotion score should be considered on \p node, * otherwise false */ static bool promotion_score_applies(const pe_resource_t *rsc, const pe_node_t *node) { char *id = clone_strip(rsc->id); const pe_resource_t *parent = pe__const_top_resource(rsc, false); pe_resource_t *active = NULL; const char *reason = "allowed"; // Some checks apply only to anonymous clone instances if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) { // If instance is active on the node, its score definitely applies active = find_active_anon_instance(parent, id, node); if (active == rsc) { reason = "active"; goto check_allowed; } /* If *no* instance is active on this node, this instance's score will * count if it has been probed on this node. */ if ((active == NULL) && anonymous_known_on(parent, id, node)) { reason = "probed"; goto check_allowed; } } /* If this clone's status is unknown on *all* nodes (e.g. cluster startup), * take all instances' scores into account, to make sure we use any * permanent promotion scores. */ if ((rsc->running_on == NULL) && (g_hash_table_size(rsc->known_on) == 0)) { reason = "none probed"; goto check_allowed; } /* Otherwise, we've probed and/or started the resource *somewhere*, so * consider promotion scores on nodes where we know the status. */ if ((g_hash_table_lookup(rsc->known_on, node->details->id) != NULL) || (pe_find_node_id(rsc->running_on, node->details->id) != NULL)) { reason = "known"; } else { pe_rsc_trace(rsc, "Ignoring %s promotion score (for %s) on %s: not probed", rsc->id, id, pe__node_name(node)); free(id); return false; } check_allowed: if (is_allowed(rsc, node)) { pe_rsc_trace(rsc, "Counting %s promotion score (for %s) on %s: %s", rsc->id, id, pe__node_name(node), reason); free(id); return true; } pe_rsc_trace(rsc, "Ignoring %s promotion score (for %s) on %s: not allowed", rsc->id, id, pe__node_name(node)); free(id); return false; } /*! * \internal * \brief Get the value of a promotion score node attribute * * \param[in] rsc Promotable clone instance to get promotion score for * \param[in] node Node to get promotion score for * \param[in] name Resource name to use in promotion score attribute name * * \return Value of promotion score node attribute for \p rsc on \p node */ static const char * promotion_attr_value(const pe_resource_t *rsc, const pe_node_t *node, const char *name) { char *attr_name = NULL; const char *attr_value = NULL; enum pe__rsc_node node_type = pe__rsc_node_assigned; if (pcmk_is_set(rsc->flags, pe_rsc_provisional)) { // Not assigned yet node_type = pe__rsc_node_current; } attr_name = pcmk_promotion_score_name(name); attr_value = pe__node_attribute_calculated(node, attr_name, rsc, node_type, false); free(attr_name); return attr_value; } /*! * \internal * \brief Get the promotion score for a clone instance on a node * * \param[in] rsc Promotable clone instance to get score for * \param[in] node Node to get score for * \param[out] is_default If non-NULL, will be set true if no score available * * \return Promotion score for \p rsc on \p node (or 0 if none) */ static int promotion_score(const pe_resource_t *rsc, const pe_node_t *node, bool *is_default) { char *name = NULL; const char *attr_value = NULL; if (is_default != NULL) { *is_default = true; } CRM_CHECK((rsc != NULL) && (node != NULL), return 0); /* If this is an instance of a cloned group, the promotion score is the sum * of all members' promotion scores. */ if (rsc->children != NULL) { int score = 0; for (const GList *iter = rsc->children; iter != NULL; iter = iter->next) { const pe_resource_t *child = (const pe_resource_t *) iter->data; bool child_default = false; int child_score = promotion_score(child, node, &child_default); if (!child_default && (is_default != NULL)) { *is_default = false; } score += child_score; } return score; } if (!promotion_score_applies(rsc, node)) { return 0; } /* For the promotion score attribute name, use the name the resource is * known as in resource history, since that's what crm_attribute --promotion * would have used. */ name = (rsc->clone_name == NULL)? rsc->id : rsc->clone_name; attr_value = promotion_attr_value(rsc, node, name); if (attr_value != NULL) { pe_rsc_trace(rsc, "Promotion score for %s on %s = %s", name, pe__node_name(node), pcmk__s(attr_value, "(unset)")); } else if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) { /* If we don't have any resource history yet, we won't have clone_name. * In that case, for anonymous clones, try the resource name without * any instance number. */ name = clone_strip(rsc->id); if (strcmp(rsc->id, name) != 0) { attr_value = promotion_attr_value(rsc, node, name); pe_rsc_trace(rsc, "Promotion score for %s on %s (for %s) = %s", name, pe__node_name(node), rsc->id, pcmk__s(attr_value, "(unset)")); } free(name); } if (attr_value == NULL) { return 0; } if (is_default != NULL) { *is_default = false; } return char2score(attr_value); } /*! * \internal * \brief Include promotion scores in instances' node scores and priorities * * \param[in,out] rsc Promotable clone resource to update */ void pcmk__add_promotion_scores(pe_resource_t *rsc) { if (pe__set_clone_flag(rsc, pe__clone_promotion_added) == pcmk_rc_already) { return; } for (GList *iter = rsc->children; iter != NULL; iter = iter->next) { pe_resource_t *child_rsc = (pe_resource_t *) iter->data; GHashTableIter iter; pe_node_t *node = NULL; int score, new_score; g_hash_table_iter_init(&iter, child_rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) { if (!pcmk__node_available(node, false, false)) { /* This node will never be promoted, so don't apply the * promotion score, as that may lead to clone shuffling. */ continue; } score = promotion_score(child_rsc, node, NULL); if (score > 0) { new_score = pcmk__add_scores(node->weight, score); if (new_score != node->weight) { // Could remain INFINITY node->weight = new_score; pe_rsc_trace(rsc, "Added %s promotion priority (%s) to score " "on %s (now %s)", child_rsc->id, pcmk_readable_score(score), pe__node_name(node), pcmk_readable_score(new_score)); } } if (score > child_rsc->priority) { pe_rsc_trace(rsc, "Updating %s priority to promotion score (%d->%d)", child_rsc->id, child_rsc->priority, score); child_rsc->priority = score; } } } } /*! * \internal * \brief If a resource's current role is started, change it to unpromoted * * \param[in,out] data Resource to update * \param[in] user_data Ignored */ static void set_current_role_unpromoted(void *data, void *user_data) { pe_resource_t *rsc = (pe_resource_t *) data; if (rsc->role == pcmk_role_started) { // Promotable clones should use unpromoted role instead of started rsc->role = pcmk_role_unpromoted; } g_list_foreach(rsc->children, set_current_role_unpromoted, NULL); } /*! * \internal * \brief Set a resource's next role to unpromoted (or stopped if unassigned) * * \param[in,out] data Resource to update * \param[in] user_data Ignored */ static void set_next_role_unpromoted(void *data, void *user_data) { pe_resource_t *rsc = (pe_resource_t *) data; GList *assigned = NULL; rsc->fns->location(rsc, &assigned, FALSE); if (assigned == NULL) { pe__set_next_role(rsc, pcmk_role_stopped, "stopped instance"); } else { pe__set_next_role(rsc, pcmk_role_unpromoted, "unpromoted instance"); g_list_free(assigned); } g_list_foreach(rsc->children, set_next_role_unpromoted, NULL); } /*! * \internal * \brief Set a resource's next role to promoted if not already set * * \param[in,out] data Resource to update * \param[in] user_data Ignored */ static void set_next_role_promoted(void *data, gpointer user_data) { pe_resource_t *rsc = (pe_resource_t *) data; if (rsc->next_role == pcmk_role_unknown) { pe__set_next_role(rsc, pcmk_role_promoted, "promoted instance"); } g_list_foreach(rsc->children, set_next_role_promoted, NULL); } /*! * \internal * \brief Show instance's promotion score on node where it will be active * * \param[in,out] instance Promotable clone instance to show */ static void show_promotion_score(pe_resource_t *instance) { pe_node_t *chosen = instance->fns->location(instance, NULL, FALSE); if (pcmk_is_set(instance->cluster->flags, pe_flag_show_scores) && !pcmk__is_daemon && (instance->cluster->priv != NULL)) { pcmk__output_t *out = instance->cluster->priv; out->message(out, "promotion-score", instance, chosen, pcmk_readable_score(instance->sort_index)); } else { pe_rsc_debug(pe__const_top_resource(instance, false), "%s promotion score on %s: sort=%s priority=%s", instance->id, ((chosen == NULL)? "none" : pe__node_name(chosen)), pcmk_readable_score(instance->sort_index), pcmk_readable_score(instance->priority)); } } /*! * \internal * \brief Set a clone instance's promotion priority * * \param[in,out] data Promotable clone instance to update * \param[in] user_data Instance's parent clone */ static void set_instance_priority(gpointer data, gpointer user_data) { pe_resource_t *instance = (pe_resource_t *) data; const pe_resource_t *clone = (const pe_resource_t *) user_data; const pe_node_t *chosen = NULL; enum rsc_role_e next_role = pcmk_role_unknown; GList *list = NULL; pe_rsc_trace(clone, "Assigning priority for %s: %s", instance->id, role2text(instance->next_role)); if (instance->fns->state(instance, TRUE) == pcmk_role_started) { set_current_role_unpromoted(instance, NULL); } // Only an instance that will be active can be promoted chosen = instance->fns->location(instance, &list, FALSE); if (pcmk__list_of_multiple(list)) { pcmk__config_err("Cannot promote non-colocated child %s", instance->id); } g_list_free(list); if (chosen == NULL) { return; } next_role = instance->fns->state(instance, FALSE); switch (next_role) { case pcmk_role_started: case pcmk_role_unknown: // Set instance priority to its promotion score (or -1 if none) { bool is_default = false; instance->priority = promotion_score(instance, chosen, &is_default); if (is_default) { /* * Default to -1 if no value is set. This allows * instances eligible for promotion to be specified * based solely on rsc_location constraints, but * prevents any instance from being promoted if neither * a constraint nor a promotion score is present */ instance->priority = -1; } } break; case pcmk_role_unpromoted: case pcmk_role_stopped: // Instance can't be promoted instance->priority = -INFINITY; break; case pcmk_role_promoted: // Nothing needed (re-creating actions after scheduling fencing) break; default: CRM_CHECK(FALSE, crm_err("Unknown resource role %d for %s", next_role, instance->id)); } // Add relevant location constraint scores for promoted role apply_promoted_locations(instance, instance->rsc_location, chosen); apply_promoted_locations(instance, clone->rsc_location, chosen); // Consider instance's role-based colocations with other resources list = pcmk__this_with_colocations(instance); for (GList *iter = list; iter != NULL; iter = iter->next) { pcmk__colocation_t *cons = (pcmk__colocation_t *) iter->data; instance->cmds->apply_coloc_score(instance, cons->primary, cons, true); } g_list_free(list); instance->sort_index = instance->priority; if (next_role == pcmk_role_promoted) { instance->sort_index = INFINITY; } pe_rsc_trace(clone, "Assigning %s priority = %d", instance->id, instance->priority); } /*! * \internal * \brief Set a promotable clone instance's role * * \param[in,out] data Promotable clone instance to update * \param[in,out] user_data Pointer to count of instances chosen for promotion */ static void set_instance_role(gpointer data, gpointer user_data) { pe_resource_t *instance = (pe_resource_t *) data; int *count = (int *) user_data; const pe_resource_t *clone = pe__const_top_resource(instance, false); pe_node_t *chosen = NULL; show_promotion_score(instance); if (instance->sort_index < 0) { pe_rsc_trace(clone, "Not supposed to promote instance %s", instance->id); } else if ((*count < pe__clone_promoted_max(instance)) || !pcmk_is_set(clone->flags, pe_rsc_managed)) { chosen = node_to_be_promoted_on(instance); } if (chosen == NULL) { set_next_role_unpromoted(instance, NULL); return; } if ((instance->role < pcmk_role_promoted) && !pcmk_is_set(instance->cluster->flags, pe_flag_have_quorum) && (instance->cluster->no_quorum_policy == pcmk_no_quorum_freeze)) { crm_notice("Clone instance %s cannot be promoted without quorum", instance->id); set_next_role_unpromoted(instance, NULL); return; } chosen->count++; pe_rsc_info(clone, "Choosing %s (%s) on %s for promotion", instance->id, role2text(instance->role), pe__node_name(chosen)); set_next_role_promoted(instance, NULL); (*count)++; } /*! * \internal * \brief Set roles for all instances of a promotable clone * * \param[in,out] rsc Promotable clone resource to update */ void pcmk__set_instance_roles(pe_resource_t *rsc) { int promoted = 0; GHashTableIter iter; pe_node_t *node = NULL; // Repurpose count to track the number of promoted instances assigned g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { node->count = 0; } // Set instances' promotion priorities and sort by highest priority first g_list_foreach(rsc->children, set_instance_priority, rsc); sort_promotable_instances(rsc); // Choose the first N eligible instances to be promoted g_list_foreach(rsc->children, set_instance_role, &promoted); pe_rsc_info(rsc, "%s: Promoted %d instances of a possible %d", rsc->id, promoted, pe__clone_promoted_max(rsc)); } /*! * * \internal * \brief Create actions for promotable clone instances * * \param[in,out] clone Promotable clone to create actions for * \param[out] any_promoting Will be set true if any instance is promoting * \param[out] any_demoting Will be set true if any instance is demoting */ static void create_promotable_instance_actions(pe_resource_t *clone, bool *any_promoting, bool *any_demoting) { for (GList *iter = clone->children; iter != NULL; iter = iter->next) { pe_resource_t *instance = (pe_resource_t *) iter->data; instance->cmds->create_actions(instance); check_for_role_change(instance, any_demoting, any_promoting); } } /*! * \internal * \brief Reset each promotable instance's resource priority * * Reset the priority of each instance of a promotable clone to the clone's * priority (after promotion actions are scheduled, when instance priorities * were repurposed as promotion scores). * * \param[in,out] clone Promotable clone to reset */ static void reset_instance_priorities(pe_resource_t *clone) { for (GList *iter = clone->children; iter != NULL; iter = iter->next) { pe_resource_t *instance = (pe_resource_t *) iter->data; instance->priority = clone->priority; } } /*! * \internal * \brief Create actions specific to promotable clones * * \param[in,out] clone Promotable clone to create actions for */ void pcmk__create_promotable_actions(pe_resource_t *clone) { bool any_promoting = false; bool any_demoting = false; // Create actions for each clone instance individually create_promotable_instance_actions(clone, &any_promoting, &any_demoting); // Create pseudo-actions for clone as a whole pe__create_promotable_pseudo_ops(clone, any_promoting, any_demoting); // Undo our temporary repurposing of resource priority for instances reset_instance_priorities(clone); } /*! * \internal * \brief Create internal orderings for a promotable clone's instances * * \param[in,out] clone Promotable clone instance to order */ void pcmk__order_promotable_instances(pe_resource_t *clone) { pe_resource_t *previous = NULL; // Needed for ordered clones pcmk__promotable_restart_ordering(clone); for (GList *iter = clone->children; iter != NULL; iter = iter->next) { pe_resource_t *instance = (pe_resource_t *) iter->data; // Demote before promote pcmk__order_resource_actions(instance, PCMK_ACTION_DEMOTE, instance, PCMK_ACTION_PROMOTE, pe_order_optional); order_instance_promotion(clone, instance, previous); order_instance_demotion(clone, instance, previous); previous = instance; } } /*! * \internal * \brief Update dependent's allowed nodes for colocation with promotable * * \param[in,out] dependent Dependent resource to update * \param[in] primary Primary resource * \param[in] primary_node Node where an instance of the primary will be * \param[in] colocation Colocation constraint to apply */ static void update_dependent_allowed_nodes(pe_resource_t *dependent, const pe_resource_t *primary, const pe_node_t *primary_node, const pcmk__colocation_t *colocation) { GHashTableIter iter; pe_node_t *node = NULL; const char *primary_value = NULL; const char *attr = colocation->node_attribute; if (colocation->score >= INFINITY) { return; // Colocation is mandatory, so allowed node scores don't matter } primary_value = pcmk__colocation_node_attr(primary_node, attr, primary); pe_rsc_trace(colocation->primary, "Applying %s (%s with %s on %s by %s @%d) to %s", colocation->id, colocation->dependent->id, colocation->primary->id, pe__node_name(primary_node), attr, colocation->score, dependent->id); g_hash_table_iter_init(&iter, dependent->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) { const char *dependent_value = pcmk__colocation_node_attr(node, attr, dependent); if (pcmk__str_eq(primary_value, dependent_value, pcmk__str_casei)) { node->weight = pcmk__add_scores(node->weight, colocation->score); pe_rsc_trace(colocation->primary, "Added %s score (%s) to %s (now %s)", colocation->id, pcmk_readable_score(colocation->score), pe__node_name(node), pcmk_readable_score(node->weight)); } } } /*! * \brief Update dependent for a colocation with a promotable clone * * \param[in] primary Primary resource in the colocation * \param[in,out] dependent Dependent resource in the colocation * \param[in] colocation Colocation constraint to apply */ void pcmk__update_dependent_with_promotable(const pe_resource_t *primary, pe_resource_t *dependent, const pcmk__colocation_t *colocation) { GList *affected_nodes = NULL; /* Build a list of all nodes where an instance of the primary will be, and * (for optional colocations) update the dependent's allowed node scores for * each one. */ for (GList *iter = primary->children; iter != NULL; iter = iter->next) { pe_resource_t *instance = (pe_resource_t *) iter->data; pe_node_t *node = instance->fns->location(instance, NULL, FALSE); if (node == NULL) { continue; } if (instance->fns->state(instance, FALSE) == colocation->primary_role) { update_dependent_allowed_nodes(dependent, primary, node, colocation); affected_nodes = g_list_prepend(affected_nodes, node); } } /* For mandatory colocations, add the primary's node score to the * dependent's node score for each affected node, and ban the dependent * from all other nodes. * * However, skip this for promoted-with-promoted colocations, otherwise * inactive dependent instances can't start (in the unpromoted role). */ if ((colocation->score >= INFINITY) && ((colocation->dependent_role != pcmk_role_promoted) || (colocation->primary_role != pcmk_role_promoted))) { pe_rsc_trace(colocation->primary, "Applying %s (mandatory %s with %s) to %s", colocation->id, colocation->dependent->id, colocation->primary->id, dependent->id); pcmk__colocation_intersect_nodes(dependent, primary, colocation, affected_nodes, true); } g_list_free(affected_nodes); } /*! * \internal * \brief Update dependent priority for colocation with promotable * * \param[in] primary Primary resource in the colocation * \param[in,out] dependent Dependent resource in the colocation * \param[in] colocation Colocation constraint to apply */ void pcmk__update_promotable_dependent_priority(const pe_resource_t *primary, pe_resource_t *dependent, const pcmk__colocation_t *colocation) { pe_resource_t *primary_instance = NULL; // Look for a primary instance where dependent will be primary_instance = pcmk__find_compatible_instance(dependent, primary, colocation->primary_role, false); if (primary_instance != NULL) { // Add primary instance's priority to dependent's int new_priority = pcmk__add_scores(dependent->priority, colocation->score); pe_rsc_trace(colocation->primary, "Applying %s (%s with %s) to %s priority (%s + %s = %s)", colocation->id, colocation->dependent->id, colocation->primary->id, dependent->id, pcmk_readable_score(dependent->priority), pcmk_readable_score(colocation->score), pcmk_readable_score(new_priority)); dependent->priority = new_priority; } else if (colocation->score >= INFINITY) { // Mandatory colocation, but primary won't be here pe_rsc_trace(colocation->primary, "Applying %s (%s with %s) to %s: can't be promoted", colocation->id, colocation->dependent->id, colocation->primary->id, dependent->id); dependent->priority = -INFINITY; } } diff --git a/lib/pengine/native.c b/lib/pengine/native.c index c0f07eda62..9f39623cd6 100644 --- a/lib/pengine/native.c +++ b/lib/pengine/native.c @@ -1,1440 +1,1442 @@ /* * Copyright 2004-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #ifdef PCMK__COMPAT_2_0 #define PROVIDER_SEP "::" #else #define PROVIDER_SEP ":" #endif /*! * \internal * \brief Check whether a resource is active on multiple nodes */ static bool is_multiply_active(const pe_resource_t *rsc) { unsigned int count = 0; if (rsc->variant == pcmk_rsc_variant_primitive) { pe__find_active_requires(rsc, &count); } return count > 1; } static void native_priority_to_node(pe_resource_t * rsc, pe_node_t * node, gboolean failed) { int priority = 0; if ((rsc->priority == 0) || (failed == TRUE)) { return; } if (rsc->role == pcmk_role_promoted) { // Promoted instance takes base priority + 1 priority = rsc->priority + 1; } else { priority = rsc->priority; } node->details->priority += priority; pe_rsc_trace(rsc, "%s now has priority %d with %s'%s' (priority: %d%s)", pe__node_name(node), node->details->priority, (rsc->role == pcmk_role_promoted)? "promoted " : "", rsc->id, rsc->priority, (rsc->role == pcmk_role_promoted)? " + 1" : ""); /* Priority of a resource running on a guest node is added to the cluster * node as well. */ if (node->details->remote_rsc && node->details->remote_rsc->container) { GList *gIter = node->details->remote_rsc->container->running_on; for (; gIter != NULL; gIter = gIter->next) { pe_node_t *a_node = gIter->data; a_node->details->priority += priority; pe_rsc_trace(rsc, "%s now has priority %d with %s'%s' (priority: %d%s) " "from guest node %s", pe__node_name(a_node), a_node->details->priority, (rsc->role == pcmk_role_promoted)? "promoted " : "", rsc->id, rsc->priority, (rsc->role == pcmk_role_promoted)? " + 1" : "", pe__node_name(node)); } } } void native_add_running(pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * data_set, gboolean failed) { GList *gIter = rsc->running_on; CRM_CHECK(node != NULL, return); for (; gIter != NULL; gIter = gIter->next) { pe_node_t *a_node = (pe_node_t *) gIter->data; CRM_CHECK(a_node != NULL, return); if (pcmk__str_eq(a_node->details->id, node->details->id, pcmk__str_casei)) { return; } } pe_rsc_trace(rsc, "Adding %s to %s %s", rsc->id, pe__node_name(node), pcmk_is_set(rsc->flags, pe_rsc_managed)? "" : "(unmanaged)"); rsc->running_on = g_list_append(rsc->running_on, node); if (rsc->variant == pcmk_rsc_variant_primitive) { node->details->running_rsc = g_list_append(node->details->running_rsc, rsc); native_priority_to_node(rsc, node, failed); } if ((rsc->variant == pcmk_rsc_variant_primitive) && node->details->maintenance) { pe__clear_resource_flags(rsc, pe_rsc_managed); pe__set_resource_flags(rsc, pe_rsc_maintenance); } if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) { pe_resource_t *p = rsc->parent; pe_rsc_info(rsc, "resource %s isn't managed", rsc->id); resource_location(rsc, node, INFINITY, "not_managed_default", data_set); while(p && node->details->online) { /* add without the additional location constraint */ p->running_on = g_list_append(p->running_on, node); p = p->parent; } return; } if (is_multiply_active(rsc)) { switch (rsc->recovery_type) { case pcmk_multiply_active_stop: { GHashTableIter gIter; pe_node_t *local_node = NULL; /* make sure it doesn't come up again */ if (rsc->allowed_nodes != NULL) { g_hash_table_destroy(rsc->allowed_nodes); } rsc->allowed_nodes = pe__node_list2table(data_set->nodes); g_hash_table_iter_init(&gIter, rsc->allowed_nodes); while (g_hash_table_iter_next(&gIter, NULL, (void **)&local_node)) { local_node->weight = -INFINITY; } } break; case pcmk_multiply_active_block: pe__clear_resource_flags(rsc, pe_rsc_managed); pe__set_resource_flags(rsc, pe_rsc_block); /* If the resource belongs to a group or bundle configured with * multiple-active=block, block the entire entity. */ if (rsc->parent && ((rsc->parent->variant == pcmk_rsc_variant_group) || (rsc->parent->variant == pcmk_rsc_variant_bundle)) && (rsc->parent->recovery_type == pcmk_multiply_active_block)) { GList *gIter = rsc->parent->children; for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child = (pe_resource_t *) gIter->data; pe__clear_resource_flags(child, pe_rsc_managed); pe__set_resource_flags(child, pe_rsc_block); } } break; // pcmk_multiply_active_restart, pcmk_multiply_active_unexpected default: /* The scheduler will do the right thing because the relevant * variables and flags are set when unpacking the history. */ break; } crm_debug("%s is active on multiple nodes including %s: %s", rsc->id, pe__node_name(node), recovery2text(rsc->recovery_type)); } else { pe_rsc_trace(rsc, "Resource %s is active on %s", rsc->id, pe__node_name(node)); } if (rsc->parent != NULL) { native_add_running(rsc->parent, node, data_set, FALSE); } } static void recursive_clear_unique(pe_resource_t *rsc, gpointer user_data) { pe__clear_resource_flags(rsc, pe_rsc_unique); add_hash_param(rsc->meta, XML_RSC_ATTR_UNIQUE, XML_BOOLEAN_FALSE); g_list_foreach(rsc->children, (GFunc) recursive_clear_unique, NULL); } gboolean native_unpack(pe_resource_t * rsc, pe_working_set_t * data_set) { pe_resource_t *parent = uber_parent(rsc); const char *standard = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); uint32_t ra_caps = pcmk_get_ra_caps(standard); pe_rsc_trace(rsc, "Processing resource %s...", rsc->id); // Only some agent standards support unique and promotable clones if (!pcmk_is_set(ra_caps, pcmk_ra_cap_unique) && pcmk_is_set(rsc->flags, pe_rsc_unique) && pe_rsc_is_clone(parent)) { /* @COMPAT We should probably reject this situation as an error (as we * do for promotable below) rather than warn and convert, but that would * be a backward-incompatible change that we should probably do with a * transform at a schema major version bump. */ pe__force_anon(standard, parent, rsc->id, data_set); /* Clear globally-unique on the parent and all its descendants unpacked * so far (clearing the parent should make any future children unpacking * correct). We have to clear this resource explicitly because it isn't * hooked into the parent's children yet. */ recursive_clear_unique(parent, NULL); recursive_clear_unique(rsc, NULL); } if (!pcmk_is_set(ra_caps, pcmk_ra_cap_promotable) && pcmk_is_set(parent->flags, pe_rsc_promotable)) { pe_err("Resource %s is of type %s and therefore " "cannot be used as a promotable clone resource", rsc->id, standard); return FALSE; } return TRUE; } static bool rsc_is_on_node(pe_resource_t *rsc, const pe_node_t *node, int flags) { pe_rsc_trace(rsc, "Checking whether %s is on %s", rsc->id, pe__node_name(node)); - if (pcmk_is_set(flags, pe_find_current) && rsc->running_on) { + if (pcmk_is_set(flags, pcmk_rsc_match_current_node) + && (rsc->running_on != NULL)) { for (GList *iter = rsc->running_on; iter; iter = iter->next) { pe_node_t *loc = (pe_node_t *) iter->data; if (loc->details == node->details) { return true; } } } else if (pcmk_is_set(flags, pe_find_inactive) && (rsc->running_on == NULL)) { return true; - } else if (!pcmk_is_set(flags, pe_find_current) && rsc->allocated_to + } else if (!pcmk_is_set(flags, pcmk_rsc_match_current_node) + && (rsc->allocated_to != NULL) && (rsc->allocated_to->details == node->details)) { return true; } return false; } pe_resource_t * native_find_rsc(pe_resource_t * rsc, const char *id, const pe_node_t *on_node, int flags) { bool match = false; pe_resource_t *result = NULL; CRM_CHECK(id && rsc && rsc->id, return NULL); if (pcmk_is_set(flags, pcmk_rsc_match_clone_only)) { const char *rid = ID(rsc->xml); if (!pe_rsc_is_clone(pe__const_top_resource(rsc, false))) { match = false; } else if (!strcmp(id, rsc->id) || pcmk__str_eq(id, rid, pcmk__str_none)) { match = true; } } else if (!strcmp(id, rsc->id)) { match = true; } else if (pcmk_is_set(flags, pcmk_rsc_match_history) && rsc->clone_name && strcmp(rsc->clone_name, id) == 0) { match = true; } else if (pcmk_is_set(flags, pe_find_any) || (pcmk_is_set(flags, pcmk_rsc_match_anon_basename) && !pcmk_is_set(rsc->flags, pe_rsc_unique))) { match = pe_base_name_eq(rsc, id); } if (match && on_node) { if (!rsc_is_on_node(rsc, on_node, flags)) { match = false; } } if (match) { return rsc; } for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) { pe_resource_t *child = (pe_resource_t *) gIter->data; result = rsc->fns->find_rsc(child, id, on_node, flags); if (result) { return result; } } return NULL; } // create is ignored char * native_parameter(pe_resource_t * rsc, pe_node_t * node, gboolean create, const char *name, pe_working_set_t * data_set) { char *value_copy = NULL; const char *value = NULL; GHashTable *params = NULL; CRM_CHECK(rsc != NULL, return NULL); CRM_CHECK(name != NULL && strlen(name) != 0, return NULL); pe_rsc_trace(rsc, "Looking up %s in %s", name, rsc->id); params = pe_rsc_params(rsc, node, data_set); value = g_hash_table_lookup(params, name); if (value == NULL) { /* try meta attributes instead */ value = g_hash_table_lookup(rsc->meta, name); } pcmk__str_update(&value_copy, value); return value_copy; } gboolean native_active(pe_resource_t * rsc, gboolean all) { for (GList *gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) { pe_node_t *a_node = (pe_node_t *) gIter->data; if (a_node->details->unclean) { pe_rsc_trace(rsc, "Resource %s: %s is unclean", rsc->id, pe__node_name(a_node)); return TRUE; } else if (a_node->details->online == FALSE && pcmk_is_set(rsc->flags, pe_rsc_managed)) { pe_rsc_trace(rsc, "Resource %s: %s is offline", rsc->id, pe__node_name(a_node)); } else { pe_rsc_trace(rsc, "Resource %s active on %s", rsc->id, pe__node_name(a_node)); return TRUE; } } return FALSE; } struct print_data_s { long options; void *print_data; }; static const char * native_pending_state(const pe_resource_t *rsc) { const char *pending_state = NULL; if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_START, pcmk__str_casei)) { pending_state = "Starting"; } else if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_STOP, pcmk__str_casei)) { pending_state = "Stopping"; } else if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_MIGRATE_TO, pcmk__str_casei)) { pending_state = "Migrating"; } else if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_MIGRATE_FROM, pcmk__str_casei)) { /* Work might be done in here. */ pending_state = "Migrating"; } else if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_PROMOTE, pcmk__str_casei)) { pending_state = "Promoting"; } else if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_DEMOTE, pcmk__str_casei)) { pending_state = "Demoting"; } return pending_state; } static const char * native_pending_task(const pe_resource_t *rsc) { const char *pending_task = NULL; if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_MONITOR, pcmk__str_casei)) { pending_task = "Monitoring"; /* Pending probes are not printed, even if pending * operations are requested. If someone ever requests that * behavior, uncomment this and the corresponding part of * unpack.c:unpack_rsc_op(). */ /* } else if (pcmk__str_eq(rsc->pending_task, "probe", pcmk__str_casei)) { pending_task = "Checking"; */ } return pending_task; } static enum rsc_role_e native_displayable_role(const pe_resource_t *rsc) { enum rsc_role_e role = rsc->role; if ((role == pcmk_role_started) && pcmk_is_set(pe__const_top_resource(rsc, false)->flags, pe_rsc_promotable)) { role = pcmk_role_unpromoted; } return role; } static const char * native_displayable_state(const pe_resource_t *rsc, bool print_pending) { const char *rsc_state = NULL; if (print_pending) { rsc_state = native_pending_state(rsc); } if (rsc_state == NULL) { rsc_state = role2text(native_displayable_role(rsc)); } return rsc_state; } /*! * \internal * \deprecated This function will be removed in a future release */ static void native_print_xml(pe_resource_t *rsc, const char *pre_text, long options, void *print_data) { const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); const char *prov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER); const char *rsc_state = native_displayable_state(rsc, pcmk_is_set(options, pe_print_pending)); const char *target_role = NULL; /* resource information. */ status_print("%sxml, XML_ATTR_TYPE)); status_print("role=\"%s\" ", rsc_state); if (rsc->meta) { target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE); } if (target_role) { status_print("target_role=\"%s\" ", target_role); } status_print("active=\"%s\" ", pcmk__btoa(rsc->fns->active(rsc, TRUE))); status_print("orphaned=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_orphan)); status_print("blocked=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_block)); status_print("managed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_managed)); status_print("failed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_failed)); status_print("failure_ignored=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_failure_ignored)); status_print("nodes_running_on=\"%d\" ", g_list_length(rsc->running_on)); if (options & pe_print_pending) { const char *pending_task = native_pending_task(rsc); if (pending_task) { status_print("pending=\"%s\" ", pending_task); } } /* print out the nodes this resource is running on */ if (options & pe_print_rsconly) { status_print("/>\n"); /* do nothing */ } else if (rsc->running_on != NULL) { GList *gIter = rsc->running_on; status_print(">\n"); for (; gIter != NULL; gIter = gIter->next) { pe_node_t *node = (pe_node_t *) gIter->data; status_print("%s \n", pre_text, pcmk__s(node->details->uname, ""), node->details->id, pcmk__btoa(!node->details->online)); } status_print("%s\n", pre_text); } else { status_print("/>\n"); } } // Append a flag to resource description string's flags list static bool add_output_flag(GString *s, const char *flag_desc, bool have_flags) { g_string_append(s, (have_flags? ", " : " (")); g_string_append(s, flag_desc); return true; } // Append a node name to resource description string's node list static bool add_output_node(GString *s, const char *node, bool have_nodes) { g_string_append(s, (have_nodes? " " : " [ ")); g_string_append(s, node); return true; } /*! * \internal * \brief Create a string description of a resource * * \param[in] rsc Resource to describe * \param[in] name Desired identifier for the resource * \param[in] node If not NULL, node that resource is "on" * \param[in] show_opts Bitmask of pcmk_show_opt_e. * \param[in] target_role Resource's target role * \param[in] show_nodes Whether to display nodes when multiply active * * \return Newly allocated string description of resource * \note Caller must free the result with g_free(). */ gchar * pcmk__native_output_string(const pe_resource_t *rsc, const char *name, const pe_node_t *node, uint32_t show_opts, const char *target_role, bool show_nodes) { const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); const char *provider = NULL; const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE); GString *outstr = NULL; bool have_flags = false; if (rsc->variant != pcmk_rsc_variant_primitive) { return NULL; } CRM_CHECK(name != NULL, name = "unknown"); CRM_CHECK(kind != NULL, kind = "unknown"); CRM_CHECK(class != NULL, class = "unknown"); if (pcmk_is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider)) { provider = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER); } if ((node == NULL) && (rsc->lock_node != NULL)) { node = rsc->lock_node; } if (pcmk_any_flags_set(show_opts, pcmk_show_rsc_only) || pcmk__list_of_multiple(rsc->running_on)) { node = NULL; } outstr = g_string_sized_new(128); // Resource name and agent pcmk__g_strcat(outstr, name, "\t(", class, ((provider == NULL)? "" : PROVIDER_SEP), pcmk__s(provider, ""), ":", kind, "):\t", NULL); // State on node if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) { g_string_append(outstr, " ORPHANED"); } if (pcmk_is_set(rsc->flags, pe_rsc_failed)) { enum rsc_role_e role = native_displayable_role(rsc); g_string_append(outstr, " FAILED"); if (role > pcmk_role_unpromoted) { pcmk__add_word(&outstr, 0, role2text(role)); } } else { bool show_pending = pcmk_is_set(show_opts, pcmk_show_pending); pcmk__add_word(&outstr, 0, native_displayable_state(rsc, show_pending)); } if (node) { pcmk__add_word(&outstr, 0, pe__node_name(node)); } // Failed probe operation if (native_displayable_role(rsc) == pcmk_role_stopped) { xmlNode *probe_op = pe__failed_probe_for_rsc(rsc, node ? node->details->uname : NULL); if (probe_op != NULL) { int rc; pcmk__scan_min_int(crm_element_value(probe_op, XML_LRM_ATTR_RC), &rc, 0); pcmk__g_strcat(outstr, " (", services_ocf_exitcode_str(rc), ") ", NULL); } } // Flags, as: ( [...]) if (node && !(node->details->online) && node->details->unclean) { have_flags = add_output_flag(outstr, "UNCLEAN", have_flags); } if (node && (node == rsc->lock_node)) { have_flags = add_output_flag(outstr, "LOCKED", have_flags); } if (pcmk_is_set(show_opts, pcmk_show_pending)) { const char *pending_task = native_pending_task(rsc); if (pending_task) { have_flags = add_output_flag(outstr, pending_task, have_flags); } } if (target_role) { enum rsc_role_e target_role_e = text2role(target_role); /* Only show target role if it limits our abilities (i.e. ignore * Started, as it is the default anyways, and doesn't prevent the * resource from becoming promoted). */ if (target_role_e == pcmk_role_stopped) { have_flags = add_output_flag(outstr, "disabled", have_flags); } else if (pcmk_is_set(pe__const_top_resource(rsc, false)->flags, pe_rsc_promotable) && (target_role_e == pcmk_role_unpromoted)) { have_flags = add_output_flag(outstr, "target-role:", have_flags); g_string_append(outstr, target_role); } } // Blocked or maintenance implies unmanaged if (pcmk_any_flags_set(rsc->flags, pe_rsc_block|pe_rsc_maintenance)) { if (pcmk_is_set(rsc->flags, pe_rsc_block)) { have_flags = add_output_flag(outstr, "blocked", have_flags); } else if (pcmk_is_set(rsc->flags, pe_rsc_maintenance)) { have_flags = add_output_flag(outstr, "maintenance", have_flags); } } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) { have_flags = add_output_flag(outstr, "unmanaged", have_flags); } if (pcmk_is_set(rsc->flags, pe_rsc_failure_ignored)) { have_flags = add_output_flag(outstr, "failure ignored", have_flags); } if (have_flags) { g_string_append_c(outstr, ')'); } // User-supplied description if (pcmk_any_flags_set(show_opts, pcmk_show_rsc_only|pcmk_show_description) || pcmk__list_of_multiple(rsc->running_on)) { const char *desc = crm_element_value(rsc->xml, XML_ATTR_DESC); if (desc) { g_string_append(outstr, " ("); g_string_append(outstr, desc); g_string_append(outstr, ")"); } } if (show_nodes && !pcmk_is_set(show_opts, pcmk_show_rsc_only) && pcmk__list_of_multiple(rsc->running_on)) { bool have_nodes = false; for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) { pe_node_t *n = (pe_node_t *) iter->data; have_nodes = add_output_node(outstr, n->details->uname, have_nodes); } if (have_nodes) { g_string_append(outstr, " ]"); } } return g_string_free(outstr, FALSE); } int pe__common_output_html(pcmk__output_t *out, const pe_resource_t *rsc, const char *name, const pe_node_t *node, uint32_t show_opts) { const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE); const char *target_role = NULL; xmlNodePtr list_node = NULL; const char *cl = NULL; CRM_ASSERT(rsc->variant == pcmk_rsc_variant_primitive); CRM_ASSERT(kind != NULL); if (rsc->meta) { const char *is_internal = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INTERNAL_RSC); if (crm_is_true(is_internal) && !pcmk_is_set(show_opts, pcmk_show_implicit_rscs)) { crm_trace("skipping print of internal resource %s", rsc->id); return pcmk_rc_no_output; } target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE); } if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) { cl = "rsc-managed"; } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) { cl = "rsc-failed"; } else if ((rsc->variant == pcmk_rsc_variant_primitive) && (rsc->running_on == NULL)) { cl = "rsc-failed"; } else if (pcmk__list_of_multiple(rsc->running_on)) { cl = "rsc-multiple"; } else if (pcmk_is_set(rsc->flags, pe_rsc_failure_ignored)) { cl = "rsc-failure-ignored"; } else { cl = "rsc-ok"; } { gchar *s = pcmk__native_output_string(rsc, name, node, show_opts, target_role, true); list_node = pcmk__output_create_html_node(out, "li", NULL, NULL, NULL); pcmk_create_html_node(list_node, "span", NULL, cl, s); g_free(s); } return pcmk_rc_ok; } int pe__common_output_text(pcmk__output_t *out, const pe_resource_t *rsc, const char *name, const pe_node_t *node, uint32_t show_opts) { const char *target_role = NULL; CRM_ASSERT(rsc->variant == pcmk_rsc_variant_primitive); if (rsc->meta) { const char *is_internal = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INTERNAL_RSC); if (crm_is_true(is_internal) && !pcmk_is_set(show_opts, pcmk_show_implicit_rscs)) { crm_trace("skipping print of internal resource %s", rsc->id); return pcmk_rc_no_output; } target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE); } { gchar *s = pcmk__native_output_string(rsc, name, node, show_opts, target_role, true); out->list_item(out, NULL, "%s", s); g_free(s); } return pcmk_rc_ok; } /*! * \internal * \deprecated This function will be removed in a future release */ void common_print(pe_resource_t *rsc, const char *pre_text, const char *name, const pe_node_t *node, long options, void *print_data) { const char *target_role = NULL; CRM_ASSERT(rsc->variant == pcmk_rsc_variant_primitive); if (rsc->meta) { const char *is_internal = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INTERNAL_RSC); if (crm_is_true(is_internal) && !pcmk_is_set(options, pe_print_implicit)) { crm_trace("skipping print of internal resource %s", rsc->id); return; } target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE); } if (options & pe_print_xml) { native_print_xml(rsc, pre_text, options, print_data); return; } if ((pre_text == NULL) && (options & pe_print_printf)) { pre_text = " "; } if (options & pe_print_html) { if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) { status_print(""); } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) { status_print(""); } else if (rsc->running_on == NULL) { status_print(""); } else if (pcmk__list_of_multiple(rsc->running_on)) { status_print(""); } else if (pcmk_is_set(rsc->flags, pe_rsc_failure_ignored)) { status_print(""); } else { status_print(""); } } { gchar *resource_s = pcmk__native_output_string(rsc, name, node, options, target_role, false); status_print("%s%s", (pre_text? pre_text : ""), resource_s); g_free(resource_s); } if (pcmk_is_set(options, pe_print_html)) { status_print(" "); } if (!pcmk_is_set(options, pe_print_rsconly) && pcmk__list_of_multiple(rsc->running_on)) { GList *gIter = rsc->running_on; int counter = 0; if (options & pe_print_html) { status_print("
    \n"); } else if ((options & pe_print_printf) || (options & pe_print_ncurses)) { status_print("["); } for (; gIter != NULL; gIter = gIter->next) { pe_node_t *n = (pe_node_t *) gIter->data; counter++; if (options & pe_print_html) { status_print("
  • \n%s", pe__node_name(n)); } else if ((options & pe_print_printf) || (options & pe_print_ncurses)) { status_print(" %s", pe__node_name(n)); } else if ((options & pe_print_log)) { status_print("\t%d : %s", counter, pe__node_name(n)); } else { status_print("%s", pe__node_name(n)); } if (options & pe_print_html) { status_print("
  • \n"); } } if (options & pe_print_html) { status_print("
\n"); } else if ((options & pe_print_printf) || (options & pe_print_ncurses)) { status_print(" ]"); } } if (options & pe_print_html) { status_print("
\n"); } else if (options & pe_print_suppres_nl) { /* nothing */ } else if ((options & pe_print_printf) || (options & pe_print_ncurses)) { status_print("\n"); } } /*! * \internal * \deprecated This function will be removed in a future release */ void native_print(pe_resource_t *rsc, const char *pre_text, long options, void *print_data) { const pe_node_t *node = NULL; CRM_ASSERT(rsc->variant == pcmk_rsc_variant_primitive); if (options & pe_print_xml) { native_print_xml(rsc, pre_text, options, print_data); return; } node = pe__current_node(rsc); if (node == NULL) { // This is set only if a non-probe action is pending on this node node = rsc->pending_node; } common_print(rsc, pre_text, rsc_printable_id(rsc), node, options, print_data); } PCMK__OUTPUT_ARGS("primitive", "uint32_t", "pe_resource_t *", "GList *", "GList *") int pe__resource_xml(pcmk__output_t *out, va_list args) { uint32_t show_opts = va_arg(args, uint32_t); pe_resource_t *rsc = va_arg(args, pe_resource_t *); GList *only_node G_GNUC_UNUSED = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); bool print_pending = pcmk_is_set(show_opts, pcmk_show_pending); const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); const char *prov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER); const char *rsc_state = native_displayable_state(rsc, print_pending); const char *desc = NULL; char ra_name[LINE_MAX]; char *nodes_running_on = NULL; const char *lock_node_name = NULL; int rc = pcmk_rc_no_output; const char *target_role = NULL; desc = pe__resource_description(rsc, show_opts); if (rsc->meta != NULL) { target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE); } CRM_ASSERT(rsc->variant == pcmk_rsc_variant_primitive); if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) { return pcmk_rc_no_output; } /* resource information. */ snprintf(ra_name, LINE_MAX, "%s%s%s:%s", class, ((prov == NULL)? "" : PROVIDER_SEP), ((prov == NULL)? "" : prov), crm_element_value(rsc->xml, XML_ATTR_TYPE)); nodes_running_on = pcmk__itoa(g_list_length(rsc->running_on)); if (rsc->lock_node != NULL) { lock_node_name = rsc->lock_node->details->uname; } rc = pe__name_and_nvpairs_xml(out, true, "resource", 15, "id", rsc_printable_id(rsc), "resource_agent", ra_name, "role", rsc_state, "target_role", target_role, "active", pcmk__btoa(rsc->fns->active(rsc, TRUE)), "orphaned", pe__rsc_bool_str(rsc, pe_rsc_orphan), "blocked", pe__rsc_bool_str(rsc, pe_rsc_block), "maintenance", pe__rsc_bool_str(rsc, pe_rsc_maintenance), "managed", pe__rsc_bool_str(rsc, pe_rsc_managed), "failed", pe__rsc_bool_str(rsc, pe_rsc_failed), "failure_ignored", pe__rsc_bool_str(rsc, pe_rsc_failure_ignored), "nodes_running_on", nodes_running_on, "pending", (print_pending? native_pending_task(rsc) : NULL), "locked_to", lock_node_name, "description", desc); free(nodes_running_on); CRM_ASSERT(rc == pcmk_rc_ok); if (rsc->running_on != NULL) { GList *gIter = rsc->running_on; for (; gIter != NULL; gIter = gIter->next) { pe_node_t *node = (pe_node_t *) gIter->data; rc = pe__name_and_nvpairs_xml(out, false, "node", 3, "name", node->details->uname, "id", node->details->id, "cached", pcmk__btoa(node->details->online)); CRM_ASSERT(rc == pcmk_rc_ok); } } pcmk__output_xml_pop_parent(out); return rc; } PCMK__OUTPUT_ARGS("primitive", "uint32_t", "pe_resource_t *", "GList *", "GList *") int pe__resource_html(pcmk__output_t *out, va_list args) { uint32_t show_opts = va_arg(args, uint32_t); pe_resource_t *rsc = va_arg(args, pe_resource_t *); GList *only_node G_GNUC_UNUSED = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); const pe_node_t *node = pe__current_node(rsc); if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) { return pcmk_rc_no_output; } CRM_ASSERT(rsc->variant == pcmk_rsc_variant_primitive); if (node == NULL) { // This is set only if a non-probe action is pending on this node node = rsc->pending_node; } return pe__common_output_html(out, rsc, rsc_printable_id(rsc), node, show_opts); } PCMK__OUTPUT_ARGS("primitive", "uint32_t", "pe_resource_t *", "GList *", "GList *") int pe__resource_text(pcmk__output_t *out, va_list args) { uint32_t show_opts = va_arg(args, uint32_t); pe_resource_t *rsc = va_arg(args, pe_resource_t *); GList *only_node G_GNUC_UNUSED = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); const pe_node_t *node = pe__current_node(rsc); CRM_ASSERT(rsc->variant == pcmk_rsc_variant_primitive); if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) { return pcmk_rc_no_output; } if (node == NULL) { // This is set only if a non-probe action is pending on this node node = rsc->pending_node; } return pe__common_output_text(out, rsc, rsc_printable_id(rsc), node, show_opts); } void native_free(pe_resource_t * rsc) { pe_rsc_trace(rsc, "Freeing resource action list (not the data)"); common_free(rsc); } enum rsc_role_e native_resource_state(const pe_resource_t * rsc, gboolean current) { enum rsc_role_e role = rsc->next_role; if (current) { role = rsc->role; } pe_rsc_trace(rsc, "%s state: %s", rsc->id, role2text(role)); return role; } /*! * \internal * \brief List nodes where a resource (or any of its children) is * * \param[in] rsc Resource to check * \param[out] list List to add result to * \param[in] current 0 = where allocated, 1 = where running, * 2 = where running or pending * * \return If list contains only one node, that node, or NULL otherwise */ pe_node_t * native_location(const pe_resource_t *rsc, GList **list, int current) { // @COMPAT: Accept a pe__rsc_node argument instead of int current pe_node_t *one = NULL; GList *result = NULL; if (rsc->children) { GList *gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child = (pe_resource_t *) gIter->data; child->fns->location(child, &result, current); } } else if (current) { if (rsc->running_on) { result = g_list_copy(rsc->running_on); } if ((current == 2) && rsc->pending_node && !pe_find_node_id(result, rsc->pending_node->details->id)) { result = g_list_append(result, rsc->pending_node); } } else if (current == FALSE && rsc->allocated_to) { result = g_list_append(NULL, rsc->allocated_to); } if (result && (result->next == NULL)) { one = result->data; } if (list) { GList *gIter = result; for (; gIter != NULL; gIter = gIter->next) { pe_node_t *node = (pe_node_t *) gIter->data; if (*list == NULL || pe_find_node_id(*list, node->details->id) == NULL) { *list = g_list_append(*list, node); } } } g_list_free(result); return one; } static void get_rscs_brief(GList *rsc_list, GHashTable * rsc_table, GHashTable * active_table) { GList *gIter = rsc_list; for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *rsc = (pe_resource_t *) gIter->data; const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE); int offset = 0; char buffer[LINE_MAX]; int *rsc_counter = NULL; int *active_counter = NULL; if (rsc->variant != pcmk_rsc_variant_primitive) { continue; } offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", class); if (pcmk_is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider)) { const char *prov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER); if (prov != NULL) { offset += snprintf(buffer + offset, LINE_MAX - offset, PROVIDER_SEP "%s", prov); } } offset += snprintf(buffer + offset, LINE_MAX - offset, ":%s", kind); CRM_LOG_ASSERT(offset > 0); if (rsc_table) { rsc_counter = g_hash_table_lookup(rsc_table, buffer); if (rsc_counter == NULL) { rsc_counter = calloc(1, sizeof(int)); *rsc_counter = 0; g_hash_table_insert(rsc_table, strdup(buffer), rsc_counter); } (*rsc_counter)++; } if (active_table) { GList *gIter2 = rsc->running_on; for (; gIter2 != NULL; gIter2 = gIter2->next) { pe_node_t *node = (pe_node_t *) gIter2->data; GHashTable *node_table = NULL; if (node->details->unclean == FALSE && node->details->online == FALSE && pcmk_is_set(rsc->flags, pe_rsc_managed)) { continue; } node_table = g_hash_table_lookup(active_table, node->details->uname); if (node_table == NULL) { node_table = pcmk__strkey_table(free, free); g_hash_table_insert(active_table, strdup(node->details->uname), node_table); } active_counter = g_hash_table_lookup(node_table, buffer); if (active_counter == NULL) { active_counter = calloc(1, sizeof(int)); *active_counter = 0; g_hash_table_insert(node_table, strdup(buffer), active_counter); } (*active_counter)++; } } } } static void destroy_node_table(gpointer data) { GHashTable *node_table = data; if (node_table) { g_hash_table_destroy(node_table); } } /*! * \internal * \deprecated This function will be removed in a future release */ void print_rscs_brief(GList *rsc_list, const char *pre_text, long options, void *print_data, gboolean print_all) { GHashTable *rsc_table = pcmk__strkey_table(free, free); GHashTable *active_table = pcmk__strkey_table(free, destroy_node_table); GHashTableIter hash_iter; char *type = NULL; int *rsc_counter = NULL; get_rscs_brief(rsc_list, rsc_table, active_table); g_hash_table_iter_init(&hash_iter, rsc_table); while (g_hash_table_iter_next(&hash_iter, (gpointer *)&type, (gpointer *)&rsc_counter)) { GHashTableIter hash_iter2; char *node_name = NULL; GHashTable *node_table = NULL; int active_counter_all = 0; g_hash_table_iter_init(&hash_iter2, active_table); while (g_hash_table_iter_next(&hash_iter2, (gpointer *)&node_name, (gpointer *)&node_table)) { int *active_counter = g_hash_table_lookup(node_table, type); if (active_counter == NULL || *active_counter == 0) { continue; } else { active_counter_all += *active_counter; } if (options & pe_print_rsconly) { node_name = NULL; } if (options & pe_print_html) { status_print("
  • \n"); } if (print_all) { status_print("%s%d/%d\t(%s):\tActive %s\n", pre_text ? pre_text : "", active_counter ? *active_counter : 0, rsc_counter ? *rsc_counter : 0, type, active_counter && (*active_counter > 0) && node_name ? node_name : ""); } else { status_print("%s%d\t(%s):\tActive %s\n", pre_text ? pre_text : "", active_counter ? *active_counter : 0, type, active_counter && (*active_counter > 0) && node_name ? node_name : ""); } if (options & pe_print_html) { status_print("
  • \n"); } } if (print_all && active_counter_all == 0) { if (options & pe_print_html) { status_print("
  • \n"); } status_print("%s%d/%d\t(%s):\tActive\n", pre_text ? pre_text : "", active_counter_all, rsc_counter ? *rsc_counter : 0, type); if (options & pe_print_html) { status_print("
  • \n"); } } } if (rsc_table) { g_hash_table_destroy(rsc_table); rsc_table = NULL; } if (active_table) { g_hash_table_destroy(active_table); active_table = NULL; } } int pe__rscs_brief_output(pcmk__output_t *out, GList *rsc_list, uint32_t show_opts) { GHashTable *rsc_table = pcmk__strkey_table(free, free); GHashTable *active_table = pcmk__strkey_table(free, destroy_node_table); GList *sorted_rscs; int rc = pcmk_rc_no_output; get_rscs_brief(rsc_list, rsc_table, active_table); /* Make a list of the rsc_table keys so that it can be sorted. This is to make sure * output order stays consistent between systems. */ sorted_rscs = g_hash_table_get_keys(rsc_table); sorted_rscs = g_list_sort(sorted_rscs, (GCompareFunc) strcmp); for (GList *gIter = sorted_rscs; gIter; gIter = gIter->next) { char *type = (char *) gIter->data; int *rsc_counter = g_hash_table_lookup(rsc_table, type); GList *sorted_nodes = NULL; int active_counter_all = 0; /* Also make a list of the active_table keys so it can be sorted. If there's * more than one instance of a type of resource running, we need the nodes to * be sorted to make sure output order stays consistent between systems. */ sorted_nodes = g_hash_table_get_keys(active_table); sorted_nodes = g_list_sort(sorted_nodes, (GCompareFunc) pcmk__numeric_strcasecmp); for (GList *gIter2 = sorted_nodes; gIter2; gIter2 = gIter2->next) { char *node_name = (char *) gIter2->data; GHashTable *node_table = g_hash_table_lookup(active_table, node_name); int *active_counter = NULL; if (node_table == NULL) { continue; } active_counter = g_hash_table_lookup(node_table, type); if (active_counter == NULL || *active_counter == 0) { continue; } else { active_counter_all += *active_counter; } if (pcmk_is_set(show_opts, pcmk_show_rsc_only)) { node_name = NULL; } if (pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) { out->list_item(out, NULL, "%d/%d\t(%s):\tActive %s", *active_counter, rsc_counter ? *rsc_counter : 0, type, (*active_counter > 0) && node_name ? node_name : ""); } else { out->list_item(out, NULL, "%d\t(%s):\tActive %s", *active_counter, type, (*active_counter > 0) && node_name ? node_name : ""); } rc = pcmk_rc_ok; } if (pcmk_is_set(show_opts, pcmk_show_inactive_rscs) && active_counter_all == 0) { out->list_item(out, NULL, "%d/%d\t(%s):\tActive", active_counter_all, rsc_counter ? *rsc_counter : 0, type); rc = pcmk_rc_ok; } if (sorted_nodes) { g_list_free(sorted_nodes); } } if (rsc_table) { g_hash_table_destroy(rsc_table); rsc_table = NULL; } if (active_table) { g_hash_table_destroy(active_table); active_table = NULL; } if (sorted_rscs) { g_list_free(sorted_rscs); } return rc; } gboolean pe__native_is_filtered(const pe_resource_t *rsc, GList *only_rsc, gboolean check_parent) { if (pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches) || pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches)) { return FALSE; } else if (check_parent && rsc->parent) { const pe_resource_t *up = pe__const_top_resource(rsc, true); return up->fns->is_filtered(up, only_rsc, FALSE); } return TRUE; } /*! * \internal * \brief Get maximum primitive resource instances per node * * \param[in] rsc Primitive resource to check * * \return Maximum number of \p rsc instances that can be active on one node */ unsigned int pe__primitive_max_per_node(const pe_resource_t *rsc) { CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_primitive)); return 1U; } diff --git a/lib/pengine/tests/native/native_find_rsc_test.c b/lib/pengine/tests/native/native_find_rsc_test.c index 6320e269b9..c2a3e22f90 100644 --- a/lib/pengine/tests/native/native_find_rsc_test.c +++ b/lib/pengine/tests/native/native_find_rsc_test.c @@ -1,766 +1,907 @@ /* * Copyright 2022-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include xmlNode *input = NULL; pe_working_set_t *data_set = NULL; pe_node_t *cluster01, *cluster02, *httpd_bundle_0; pe_resource_t *exim_group, *inactive_group, *promotable_clone, *inactive_clone; pe_resource_t *httpd_bundle, *mysql_clone_group; static int setup(void **state) { char *path = NULL; crm_xml_init(); path = crm_strdup_printf("%s/crm_mon.xml", getenv("PCMK_CTS_CLI_DIR")); input = filename2xml(path); free(path); if (input == NULL) { return 1; } data_set = pe_new_working_set(); if (data_set == NULL) { return 1; } pe__set_working_set_flags(data_set, pe_flag_no_counts|pe_flag_no_compat); data_set->input = input; cluster_status(data_set); /* Get references to the cluster nodes so we don't have to find them repeatedly. */ cluster01 = pe_find_node(data_set->nodes, "cluster01"); cluster02 = pe_find_node(data_set->nodes, "cluster02"); httpd_bundle_0 = pe_find_node(data_set->nodes, "httpd-bundle-0"); /* Get references to several resources we use frequently. */ for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) { pe_resource_t *rsc = (pe_resource_t *) iter->data; if (strcmp(rsc->id, "exim-group") == 0) { exim_group = rsc; } else if (strcmp(rsc->id, "httpd-bundle") == 0) { httpd_bundle = rsc; } else if (strcmp(rsc->id, "inactive-clone") == 0) { inactive_clone = rsc; } else if (strcmp(rsc->id, "inactive-group") == 0) { inactive_group = rsc; } else if (strcmp(rsc->id, "mysql-clone-group") == 0) { mysql_clone_group = rsc; } else if (strcmp(rsc->id, "promotable-clone") == 0) { promotable_clone = rsc; } } return 0; } static int teardown(void **state) { pe_free_working_set(data_set); return 0; } static void bad_args(void **state) { pe_resource_t *rsc = (pe_resource_t *) g_list_first(data_set->resources)->data; char *id = rsc->id; char *name = NULL; assert_non_null(rsc); assert_null(native_find_rsc(NULL, "dummy", NULL, 0)); assert_null(native_find_rsc(rsc, NULL, NULL, 0)); /* No resources exist with these names. */ name = crm_strdup_printf("%sX", rsc->id); assert_null(native_find_rsc(rsc, name, NULL, 0)); free(name); name = crm_strdup_printf("x%s", rsc->id); assert_null(native_find_rsc(rsc, name, NULL, 0)); free(name); name = g_ascii_strup(rsc->id, -1); assert_null(native_find_rsc(rsc, name, NULL, 0)); g_free(name); /* Fails because resource ID is NULL. */ rsc->id = NULL; assert_null(native_find_rsc(rsc, id, NULL, 0)); rsc->id = id; } static void primitive_rsc(void **state) { pe_resource_t *dummy = NULL; /* Find the "dummy" resource, which is the only one with that ID in the set. */ for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) { pe_resource_t *rsc = (pe_resource_t *) iter->data; if (strcmp(rsc->id, "dummy") == 0) { dummy = rsc; break; } } assert_non_null(dummy); /* Passes because NULL was passed for node, regardless of flags. */ assert_ptr_equal(dummy, native_find_rsc(dummy, "dummy", NULL, 0)); - assert_ptr_equal(dummy, native_find_rsc(dummy, "dummy", NULL, pe_find_current)); + assert_ptr_equal(dummy, + native_find_rsc(dummy, "dummy", NULL, + pcmk_rsc_match_current_node)); /* Fails because resource is not a clone (nor cloned). */ assert_null(native_find_rsc(dummy, "dummy", NULL, pcmk_rsc_match_clone_only)); assert_null(native_find_rsc(dummy, "dummy", cluster02, pcmk_rsc_match_clone_only)); /* Fails because dummy is not running on cluster01, even with the right flags. */ - assert_null(native_find_rsc(dummy, "dummy", cluster01, pe_find_current)); + assert_null(native_find_rsc(dummy, "dummy", cluster01, + pcmk_rsc_match_current_node)); - /* Fails because pe_find_current is required if a node is given. */ + // Fails because pcmk_rsc_match_current_node is required if a node is given assert_null(native_find_rsc(dummy, "dummy", cluster02, 0)); /* Passes because dummy is running on cluster02. */ - assert_ptr_equal(dummy, native_find_rsc(dummy, "dummy", cluster02, pe_find_current)); + assert_ptr_equal(dummy, + native_find_rsc(dummy, "dummy", cluster02, + pcmk_rsc_match_current_node)); } static void group_rsc(void **state) { assert_non_null(exim_group); /* Passes because NULL was passed for node, regardless of flags. */ assert_ptr_equal(exim_group, native_find_rsc(exim_group, "exim-group", NULL, 0)); - assert_ptr_equal(exim_group, native_find_rsc(exim_group, "exim-group", NULL, pe_find_current)); + assert_ptr_equal(exim_group, + native_find_rsc(exim_group, "exim-group", NULL, + pcmk_rsc_match_current_node)); /* Fails because resource is not a clone (nor cloned). */ assert_null(native_find_rsc(exim_group, "exim-group", NULL, pcmk_rsc_match_clone_only)); assert_null(native_find_rsc(exim_group, "exim-group", cluster01, pcmk_rsc_match_clone_only)); /* Fails because none of exim-group's children are running on cluster01, even with the right flags. */ - assert_null(native_find_rsc(exim_group, "exim-group", cluster01, pe_find_current)); + assert_null(native_find_rsc(exim_group, "exim-group", cluster01, + pcmk_rsc_match_current_node)); - /* Fails because pe_find_current is required if a node is given. */ + // Fails because pcmk_rsc_match_current_node is required if a node is given assert_null(native_find_rsc(exim_group, "exim-group", cluster01, 0)); /* Passes because one of exim-group's children is running on cluster02. */ - assert_ptr_equal(exim_group, native_find_rsc(exim_group, "exim-group", cluster02, pe_find_current)); + assert_ptr_equal(exim_group, + native_find_rsc(exim_group, "exim-group", cluster02, + pcmk_rsc_match_current_node)); } static void inactive_group_rsc(void **state) { assert_non_null(inactive_group); /* Passes because NULL was passed for node, regardless of flags. */ assert_ptr_equal(inactive_group, native_find_rsc(inactive_group, "inactive-group", NULL, 0)); - assert_ptr_equal(inactive_group, native_find_rsc(inactive_group, "inactive-group", NULL, pe_find_current)); + assert_ptr_equal(inactive_group, + native_find_rsc(inactive_group, "inactive-group", NULL, + pcmk_rsc_match_current_node)); assert_ptr_equal(inactive_group, native_find_rsc(inactive_group, "inactive-group", NULL, pe_find_inactive)); /* Fails because resource is not a clone (nor cloned). */ assert_null(native_find_rsc(inactive_group, "inactive-group", NULL, pcmk_rsc_match_clone_only)); assert_null(native_find_rsc(inactive_group, "inactive-group", cluster01, pcmk_rsc_match_clone_only)); /* Fails because none of inactive-group's children are running. */ - assert_null(native_find_rsc(inactive_group, "inactive-group", cluster01, pe_find_current)); - assert_null(native_find_rsc(inactive_group, "inactive-group", cluster02, pe_find_current)); + assert_null(native_find_rsc(inactive_group, "inactive-group", cluster01, + pcmk_rsc_match_current_node)); + assert_null(native_find_rsc(inactive_group, "inactive-group", cluster02, + pcmk_rsc_match_current_node)); /* Passes because of flags. */ assert_ptr_equal(inactive_group, native_find_rsc(inactive_group, "inactive-group", cluster01, pe_find_inactive)); /* Passes because of flags. */ assert_ptr_equal(inactive_group, native_find_rsc(inactive_group, "inactive-group", cluster02, pe_find_inactive)); } static void group_member_rsc(void **state) { pe_resource_t *public_ip = NULL; /* Find the "Public-IP" resource, a member of "exim-group". */ for (GList *iter = exim_group->children; iter != NULL; iter = iter->next) { pe_resource_t *rsc = (pe_resource_t *) iter->data; if (strcmp(rsc->id, "Public-IP") == 0) { public_ip = rsc; break; } } assert_non_null(public_ip); /* Passes because NULL was passed for node, regardless of flags. */ assert_ptr_equal(public_ip, native_find_rsc(public_ip, "Public-IP", NULL, 0)); - assert_ptr_equal(public_ip, native_find_rsc(public_ip, "Public-IP", NULL, pe_find_current)); + assert_ptr_equal(public_ip, + native_find_rsc(public_ip, "Public-IP", NULL, + pcmk_rsc_match_current_node)); /* Fails because resource is not a clone (nor cloned). */ assert_null(native_find_rsc(public_ip, "Public-IP", NULL, pcmk_rsc_match_clone_only)); assert_null(native_find_rsc(public_ip, "Public-IP", cluster02, pcmk_rsc_match_clone_only)); /* Fails because Public-IP is not running on cluster01, even with the right flags. */ - assert_null(native_find_rsc(public_ip, "Public-IP", cluster01, pe_find_current)); + assert_null(native_find_rsc(public_ip, "Public-IP", cluster01, + pcmk_rsc_match_current_node)); - /* Fails because pe_find_current is required if a node is given. */ + // Fails because pcmk_rsc_match_current_node is required if a node is given assert_null(native_find_rsc(public_ip, "Public-IP", cluster02, 0)); /* Passes because Public-IP is running on cluster02. */ - assert_ptr_equal(public_ip, native_find_rsc(public_ip, "Public-IP", cluster02, pe_find_current)); + assert_ptr_equal(public_ip, + native_find_rsc(public_ip, "Public-IP", cluster02, + pcmk_rsc_match_current_node)); } static void inactive_group_member_rsc(void **state) { pe_resource_t *inactive_dummy_1 = NULL; /* Find the "inactive-dummy-1" resource, a member of "inactive-group". */ for (GList *iter = inactive_group->children; iter != NULL; iter = iter->next) { pe_resource_t *rsc = (pe_resource_t *) iter->data; if (strcmp(rsc->id, "inactive-dummy-1") == 0) { inactive_dummy_1 = rsc; break; } } assert_non_null(inactive_dummy_1); /* Passes because NULL was passed for node, regardless of flags. */ assert_ptr_equal(inactive_dummy_1, native_find_rsc(inactive_dummy_1, "inactive-dummy-1", NULL, 0)); - assert_ptr_equal(inactive_dummy_1, native_find_rsc(inactive_dummy_1, "inactive-dummy-1", NULL, pe_find_current)); + assert_ptr_equal(inactive_dummy_1, + native_find_rsc(inactive_dummy_1, "inactive-dummy-1", NULL, + pcmk_rsc_match_current_node)); /* Fails because resource is not a clone (nor cloned). */ assert_null(native_find_rsc(inactive_dummy_1, "inactive-dummy-1", NULL, pcmk_rsc_match_clone_only)); assert_null(native_find_rsc(inactive_dummy_1, "inactive-dummy-1", cluster01, pcmk_rsc_match_clone_only)); /* Fails because inactive-dummy-1 is not running. */ - assert_null(native_find_rsc(inactive_dummy_1, "inactive-dummy-1", cluster01, pe_find_current)); - assert_null(native_find_rsc(inactive_dummy_1, "inactive-dummy-1", cluster02, pe_find_current)); + assert_null(native_find_rsc(inactive_dummy_1, "inactive-dummy-1", cluster01, + pcmk_rsc_match_current_node)); + assert_null(native_find_rsc(inactive_dummy_1, "inactive-dummy-1", cluster02, + pcmk_rsc_match_current_node)); /* Passes because of flags. */ assert_ptr_equal(inactive_dummy_1, native_find_rsc(inactive_dummy_1, "inactive-dummy-1", cluster01, pe_find_inactive)); /* Passes because of flags. */ assert_ptr_equal(inactive_dummy_1, native_find_rsc(inactive_dummy_1, "inactive-dummy-1", cluster02, pe_find_inactive)); } static void clone_rsc(void **state) { assert_non_null(promotable_clone); /* Passes because NULL was passed for node, regardless of flags. */ assert_ptr_equal(promotable_clone, native_find_rsc(promotable_clone, "promotable-clone", NULL, 0)); - assert_ptr_equal(promotable_clone, native_find_rsc(promotable_clone, "promotable-clone", NULL, pe_find_current)); + assert_ptr_equal(promotable_clone, + native_find_rsc(promotable_clone, "promotable-clone", NULL, + pcmk_rsc_match_current_node)); assert_ptr_equal(promotable_clone, native_find_rsc(promotable_clone, "promotable-clone", NULL, pcmk_rsc_match_clone_only)); - /* Fails because pe_find_current is required if a node is given. */ + // Fails because pcmk_rsc_match_current_node is required if a node is given assert_null(native_find_rsc(promotable_clone, "promotable-clone", cluster01, 0)); /* Passes because one of ping-clone's children is running on cluster01. */ - assert_ptr_equal(promotable_clone, native_find_rsc(promotable_clone, "promotable-clone", cluster01, pe_find_current)); + assert_ptr_equal(promotable_clone, + native_find_rsc(promotable_clone, "promotable-clone", + cluster01, pcmk_rsc_match_current_node)); - /* Fails because pe_find_current is required if a node is given. */ + // Fails because pcmk_rsc_match_current_node is required if a node is given assert_null(native_find_rsc(promotable_clone, "promotable-clone", cluster02, 0)); /* Passes because one of ping_clone's children is running on cluster02. */ - assert_ptr_equal(promotable_clone, native_find_rsc(promotable_clone, "promotable-clone", cluster02, pe_find_current)); + assert_ptr_equal(promotable_clone, + native_find_rsc(promotable_clone, "promotable-clone", + cluster02, pcmk_rsc_match_current_node)); // Passes for previous reasons, plus includes pcmk_rsc_match_clone_only assert_ptr_equal(promotable_clone, native_find_rsc(promotable_clone, "promotable-clone", cluster01, pcmk_rsc_match_clone_only - |pe_find_current)); + |pcmk_rsc_match_current_node)); assert_ptr_equal(promotable_clone, native_find_rsc(promotable_clone, "promotable-clone", cluster02, pcmk_rsc_match_clone_only - |pe_find_current)); + |pcmk_rsc_match_current_node)); } static void inactive_clone_rsc(void **state) { assert_non_null(inactive_clone); /* Passes because NULL was passed for node, regardless of flags. */ assert_ptr_equal(inactive_clone, native_find_rsc(inactive_clone, "inactive-clone", NULL, 0)); - assert_ptr_equal(inactive_clone, native_find_rsc(inactive_clone, "inactive-clone", NULL, pe_find_current)); + assert_ptr_equal(inactive_clone, + native_find_rsc(inactive_clone, "inactive-clone", NULL, + pcmk_rsc_match_current_node)); assert_ptr_equal(inactive_clone, native_find_rsc(inactive_clone, "inactive-clone", NULL, pcmk_rsc_match_clone_only)); assert_ptr_equal(inactive_clone, native_find_rsc(inactive_clone, "inactive-clone", NULL, pe_find_inactive)); /* Fails because none of inactive-clone's children are running. */ assert_null(native_find_rsc(inactive_clone, "inactive-clone", cluster01, - pe_find_current|pcmk_rsc_match_clone_only)); + pcmk_rsc_match_current_node + |pcmk_rsc_match_clone_only)); assert_null(native_find_rsc(inactive_clone, "inactive-clone", cluster02, - pe_find_current|pcmk_rsc_match_clone_only)); + pcmk_rsc_match_current_node + |pcmk_rsc_match_clone_only)); /* Passes because of flags. */ assert_ptr_equal(inactive_clone, native_find_rsc(inactive_clone, "inactive-clone", cluster01, pe_find_inactive)); /* Passes because of flags. */ assert_ptr_equal(inactive_clone, native_find_rsc(inactive_clone, "inactive-clone", cluster02, pe_find_inactive)); } static void clone_instance_rsc(void **state) { pe_resource_t *promotable_0 = NULL; pe_resource_t *promotable_1 = NULL; /* Find the "promotable-rsc:0" and "promotable-rsc:1" resources, members of "promotable-clone". */ for (GList *iter = promotable_clone->children; iter != NULL; iter = iter->next) { pe_resource_t *rsc = (pe_resource_t *) iter->data; if (strcmp(rsc->id, "promotable-rsc:0") == 0) { promotable_0 = rsc; } else if (strcmp(rsc->id, "promotable-rsc:1") == 0) { promotable_1 = rsc; } } assert_non_null(promotable_0); assert_non_null(promotable_1); /* Passes because NULL was passed for node, regardless of flags. */ assert_ptr_equal(promotable_0, native_find_rsc(promotable_0, "promotable-rsc:0", NULL, 0)); - assert_ptr_equal(promotable_0, native_find_rsc(promotable_0, "promotable-rsc:0", NULL, pe_find_current)); + assert_ptr_equal(promotable_0, + native_find_rsc(promotable_0, "promotable-rsc:0", NULL, + pcmk_rsc_match_current_node)); assert_ptr_equal(promotable_1, native_find_rsc(promotable_1, "promotable-rsc:1", NULL, 0)); - assert_ptr_equal(promotable_1, native_find_rsc(promotable_1, "promotable-rsc:1", NULL, pe_find_current)); + assert_ptr_equal(promotable_1, + native_find_rsc(promotable_1, "promotable-rsc:1", NULL, + pcmk_rsc_match_current_node)); - /* Fails because pe_find_current is required if a node is given. */ + // Fails because pcmk_rsc_match_current_node is required if a node is given assert_null(native_find_rsc(promotable_0, "promotable-rsc:0", cluster02, 0)); assert_null(native_find_rsc(promotable_1, "promotable-rsc:1", cluster01, 0)); /* Check that the resource is running on the node we expect. */ - assert_ptr_equal(promotable_0, native_find_rsc(promotable_0, "promotable-rsc:0", cluster02, pe_find_current)); - assert_null(native_find_rsc(promotable_0, "promotable-rsc:0", cluster01, pe_find_current)); - assert_ptr_equal(promotable_1, native_find_rsc(promotable_1, "promotable-rsc:1", cluster01, pe_find_current)); - assert_null(native_find_rsc(promotable_1, "promotable-rsc:1", cluster02, pe_find_current)); + assert_ptr_equal(promotable_0, + native_find_rsc(promotable_0, "promotable-rsc:0", + cluster02, pcmk_rsc_match_current_node)); + assert_null(native_find_rsc(promotable_0, "promotable-rsc:0", cluster01, + pcmk_rsc_match_current_node)); + assert_ptr_equal(promotable_1, + native_find_rsc(promotable_1, "promotable-rsc:1", + cluster01, pcmk_rsc_match_current_node)); + assert_null(native_find_rsc(promotable_1, "promotable-rsc:1", cluster02, + pcmk_rsc_match_current_node)); /* Passes because NULL was passed for node and primitive name was given, with correct flags. */ assert_ptr_equal(promotable_0, native_find_rsc(promotable_0, "promotable-rsc", NULL, pcmk_rsc_match_clone_only)); /* Passes because pe_find_any matches any instance's base name. */ assert_ptr_equal(promotable_0, native_find_rsc(promotable_0, "promotable-rsc", NULL, pe_find_any)); assert_ptr_equal(promotable_1, native_find_rsc(promotable_1, "promotable-rsc", NULL, pe_find_any)); // Passes because pcmk_rsc_match_anon_basename matches assert_ptr_equal(promotable_0, native_find_rsc(promotable_0, "promotable-rsc", NULL, pcmk_rsc_match_anon_basename)); assert_ptr_equal(promotable_1, native_find_rsc(promotable_1, "promotable-rsc", NULL, pcmk_rsc_match_anon_basename)); /* Check that the resource is running on the node we expect. */ - assert_ptr_equal(promotable_0, native_find_rsc(promotable_0, "promotable-rsc", cluster02, pe_find_any|pe_find_current)); + assert_ptr_equal(promotable_0, + native_find_rsc(promotable_0, "promotable-rsc", cluster02, + pe_find_any|pcmk_rsc_match_current_node)); assert_ptr_equal(promotable_0, native_find_rsc(promotable_0, "promotable-rsc", cluster02, pcmk_rsc_match_anon_basename - |pe_find_current)); - assert_null(native_find_rsc(promotable_0, "promotable-rsc", cluster01, pe_find_any|pe_find_current)); + |pcmk_rsc_match_current_node)); + assert_null(native_find_rsc(promotable_0, "promotable-rsc", cluster01, + pe_find_any|pcmk_rsc_match_current_node)); assert_null(native_find_rsc(promotable_0, "promotable-rsc", cluster01, - pcmk_rsc_match_anon_basename|pe_find_current)); - assert_ptr_equal(promotable_1, native_find_rsc(promotable_1, "promotable-rsc", cluster01, pe_find_any|pe_find_current)); + pcmk_rsc_match_anon_basename + |pcmk_rsc_match_current_node)); + assert_ptr_equal(promotable_1, + native_find_rsc(promotable_1, "promotable-rsc", cluster01, + pe_find_any|pcmk_rsc_match_current_node)); assert_ptr_equal(promotable_1, native_find_rsc(promotable_1, "promotable-rsc", cluster01, pcmk_rsc_match_anon_basename - |pe_find_current)); - assert_null(native_find_rsc(promotable_1, "promotable-rsc", cluster02, pe_find_any|pe_find_current)); + |pcmk_rsc_match_current_node)); assert_null(native_find_rsc(promotable_1, "promotable-rsc", cluster02, - pcmk_rsc_match_anon_basename|pe_find_current)); + pe_find_any|pcmk_rsc_match_current_node)); + assert_null(native_find_rsc(promotable_1, "promotable-rsc", cluster02, + pcmk_rsc_match_anon_basename + |pcmk_rsc_match_current_node)); /* Fails because incorrect flags were given along with primitive name. */ - assert_null(native_find_rsc(promotable_0, "promotable-rsc", NULL, pe_find_current)); - assert_null(native_find_rsc(promotable_1, "promotable-rsc", NULL, pe_find_current)); + assert_null(native_find_rsc(promotable_0, "promotable-rsc", NULL, + pcmk_rsc_match_current_node)); + assert_null(native_find_rsc(promotable_1, "promotable-rsc", NULL, + pcmk_rsc_match_current_node)); /* And then we check failure possibilities again, except passing promotable_clone * instead of promotable_X as the first argument to native_find_rsc. */ - /* Fails because pe_find_current is required if a node is given. */ + // Fails because pcmk_rsc_match_current_node is required if a node is given assert_null(native_find_rsc(promotable_clone, "promotable-rsc:0", cluster02, 0)); assert_null(native_find_rsc(promotable_clone, "promotable-rsc:1", cluster01, 0)); /* Check that the resource is running on the node we expect. */ - assert_ptr_equal(promotable_0, native_find_rsc(promotable_clone, "promotable-rsc:0", cluster02, pe_find_current)); - assert_ptr_equal(promotable_0, native_find_rsc(promotable_clone, "promotable-rsc", cluster02, pe_find_any|pe_find_current)); + assert_ptr_equal(promotable_0, + native_find_rsc(promotable_clone, "promotable-rsc:0", + cluster02, pcmk_rsc_match_current_node)); + assert_ptr_equal(promotable_0, + native_find_rsc(promotable_clone, "promotable-rsc", + cluster02, + pe_find_any|pcmk_rsc_match_current_node)); assert_ptr_equal(promotable_0, native_find_rsc(promotable_clone, "promotable-rsc", cluster02, pcmk_rsc_match_anon_basename - |pe_find_current)); - assert_ptr_equal(promotable_1, native_find_rsc(promotable_clone, "promotable-rsc:1", cluster01, pe_find_current)); - assert_ptr_equal(promotable_1, native_find_rsc(promotable_clone, "promotable-rsc", cluster01, pe_find_any|pe_find_current)); + |pcmk_rsc_match_current_node)); + assert_ptr_equal(promotable_1, + native_find_rsc(promotable_clone, "promotable-rsc:1", + cluster01, pcmk_rsc_match_current_node)); + assert_ptr_equal(promotable_1, + native_find_rsc(promotable_clone, "promotable-rsc", + cluster01, + pe_find_any|pcmk_rsc_match_current_node)); assert_ptr_equal(promotable_1, native_find_rsc(promotable_clone, "promotable-rsc", cluster01, pcmk_rsc_match_anon_basename - |pe_find_current)); + |pcmk_rsc_match_current_node)); } static void renamed_rsc(void **state) { pe_resource_t *promotable_0 = NULL; pe_resource_t *promotable_1 = NULL; /* Find the "promotable-rsc:0" and "promotable-rsc:1" resources, members of "promotable-clone". */ for (GList *iter = promotable_clone->children; iter != NULL; iter = iter->next) { pe_resource_t *rsc = (pe_resource_t *) iter->data; if (strcmp(rsc->id, "promotable-rsc:0") == 0) { promotable_0 = rsc; } else if (strcmp(rsc->id, "promotable-rsc:1") == 0) { promotable_1 = rsc; } } assert_non_null(promotable_0); assert_non_null(promotable_1); // Passes because pcmk_rsc_match_history means base name matches clone_name assert_ptr_equal(promotable_0, native_find_rsc(promotable_0, "promotable-rsc", NULL, pcmk_rsc_match_history)); assert_ptr_equal(promotable_1, native_find_rsc(promotable_1, "promotable-rsc", NULL, pcmk_rsc_match_history)); } static void bundle_rsc(void **state) { assert_non_null(httpd_bundle); /* Passes because NULL was passed for node, regardless of flags. */ assert_ptr_equal(httpd_bundle, native_find_rsc(httpd_bundle, "httpd-bundle", NULL, 0)); - assert_ptr_equal(httpd_bundle, native_find_rsc(httpd_bundle, "httpd-bundle", NULL, pe_find_current)); + assert_ptr_equal(httpd_bundle, + native_find_rsc(httpd_bundle, "httpd-bundle", NULL, + pcmk_rsc_match_current_node)); /* Fails because resource is not a clone (nor cloned). */ assert_null(native_find_rsc(httpd_bundle, "httpd-bundle", NULL, pcmk_rsc_match_clone_only)); assert_null(native_find_rsc(httpd_bundle, "httpd-bundle", cluster01, pcmk_rsc_match_clone_only)); - /* Fails because pe_find_current is required if a node is given. */ + // Fails because pcmk_rsc_match_current_node is required if a node is given assert_null(native_find_rsc(httpd_bundle, "httpd-bundle", cluster01, 0)); /* Passes because one of httpd_bundle's children is running on cluster01. */ - assert_ptr_equal(httpd_bundle, native_find_rsc(httpd_bundle, "httpd-bundle", cluster01, pe_find_current)); + assert_ptr_equal(httpd_bundle, + native_find_rsc(httpd_bundle, "httpd-bundle", cluster01, + pcmk_rsc_match_current_node)); } static bool bundle_first_replica(pe__bundle_replica_t *replica, void *user_data) { pe_resource_t *ip_0 = replica->ip; pe_resource_t *child_0 = replica->child; pe_resource_t *container_0 = replica->container; pe_resource_t *remote_0 = replica->remote; assert_non_null(ip_0); assert_non_null(child_0); assert_non_null(container_0); assert_non_null(remote_0); /* Passes because NULL was passed for node, regardless of flags. */ assert_ptr_equal(ip_0, native_find_rsc(ip_0, "httpd-bundle-ip-192.168.122.131", NULL, 0)); assert_ptr_equal(child_0, native_find_rsc(child_0, "httpd:0", NULL, 0)); assert_ptr_equal(container_0, native_find_rsc(container_0, "httpd-bundle-docker-0", NULL, 0)); assert_ptr_equal(remote_0, native_find_rsc(remote_0, "httpd-bundle-0", NULL, 0)); - /* Fails because pe_find_current is required if a node is given. */ + // Fails because pcmk_rsc_match_current_node is required if a node is given assert_null(native_find_rsc(ip_0, "httpd-bundle-ip-192.168.122.131", cluster01, 0)); assert_null(native_find_rsc(child_0, "httpd:0", httpd_bundle_0, 0)); assert_null(native_find_rsc(container_0, "httpd-bundle-docker-0", cluster01, 0)); assert_null(native_find_rsc(remote_0, "httpd-bundle-0", cluster01, 0)); /* Check that the resource is running on the node we expect. */ - assert_ptr_equal(ip_0, native_find_rsc(ip_0, "httpd-bundle-ip-192.168.122.131", cluster01, pe_find_current)); - assert_null(native_find_rsc(ip_0, "httpd-bundle-ip-192.168.122.131", cluster02, pe_find_current)); - assert_null(native_find_rsc(ip_0, "httpd-bundle-ip-192.168.122.131", httpd_bundle_0, pe_find_current)); - assert_ptr_equal(child_0, native_find_rsc(child_0, "httpd:0", httpd_bundle_0, pe_find_current)); - assert_null(native_find_rsc(child_0, "httpd:0", cluster01, pe_find_current)); - assert_null(native_find_rsc(child_0, "httpd:0", cluster02, pe_find_current)); - assert_ptr_equal(container_0, native_find_rsc(container_0, "httpd-bundle-docker-0", cluster01, pe_find_current)); - assert_null(native_find_rsc(container_0, "httpd-bundle-docker-0", cluster02, pe_find_current)); - assert_null(native_find_rsc(container_0, "httpd-bundle-docker-0", httpd_bundle_0, pe_find_current)); - assert_ptr_equal(remote_0, native_find_rsc(remote_0, "httpd-bundle-0", cluster01, pe_find_current)); - assert_null(native_find_rsc(remote_0, "httpd-bundle-0", cluster02, pe_find_current)); - assert_null(native_find_rsc(remote_0, "httpd-bundle-0", httpd_bundle_0, pe_find_current)); + assert_ptr_equal(ip_0, + native_find_rsc(ip_0, "httpd-bundle-ip-192.168.122.131", + cluster01, pcmk_rsc_match_current_node)); + assert_null(native_find_rsc(ip_0, "httpd-bundle-ip-192.168.122.131", + cluster02, pcmk_rsc_match_current_node)); + assert_null(native_find_rsc(ip_0, "httpd-bundle-ip-192.168.122.131", + httpd_bundle_0, pcmk_rsc_match_current_node)); + assert_ptr_equal(child_0, + native_find_rsc(child_0, "httpd:0", httpd_bundle_0, + pcmk_rsc_match_current_node)); + assert_null(native_find_rsc(child_0, "httpd:0", cluster01, + pcmk_rsc_match_current_node)); + assert_null(native_find_rsc(child_0, "httpd:0", cluster02, + pcmk_rsc_match_current_node)); + assert_ptr_equal(container_0, + native_find_rsc(container_0, "httpd-bundle-docker-0", + cluster01, pcmk_rsc_match_current_node)); + assert_null(native_find_rsc(container_0, "httpd-bundle-docker-0", cluster02, + pcmk_rsc_match_current_node)); + assert_null(native_find_rsc(container_0, "httpd-bundle-docker-0", + httpd_bundle_0, pcmk_rsc_match_current_node)); + assert_ptr_equal(remote_0, + native_find_rsc(remote_0, "httpd-bundle-0", cluster01, + pcmk_rsc_match_current_node)); + assert_null(native_find_rsc(remote_0, "httpd-bundle-0", cluster02, + pcmk_rsc_match_current_node)); + assert_null(native_find_rsc(remote_0, "httpd-bundle-0", httpd_bundle_0, + pcmk_rsc_match_current_node)); /* Passes because pe_find_any matches any replica's base name. */ assert_ptr_equal(child_0, native_find_rsc(child_0, "httpd", NULL, pe_find_any)); // Passes because pcmk_rsc_match_anon_basename matches assert_ptr_equal(child_0, native_find_rsc(child_0, "httpd", NULL, pcmk_rsc_match_anon_basename)); /* Check that the resource is running on the node we expect. */ - assert_ptr_equal(child_0, native_find_rsc(child_0, "httpd", httpd_bundle_0, pe_find_any|pe_find_current)); + assert_ptr_equal(child_0, + native_find_rsc(child_0, "httpd", httpd_bundle_0, + pe_find_any|pcmk_rsc_match_current_node)); assert_ptr_equal(child_0, native_find_rsc(child_0, "httpd", httpd_bundle_0, pcmk_rsc_match_anon_basename - |pe_find_current)); - assert_null(native_find_rsc(child_0, "httpd", cluster01, pe_find_any|pe_find_current)); + |pcmk_rsc_match_current_node)); assert_null(native_find_rsc(child_0, "httpd", cluster01, - pcmk_rsc_match_anon_basename|pe_find_current)); - assert_null(native_find_rsc(child_0, "httpd", cluster02, pe_find_any|pe_find_current)); + pe_find_any|pcmk_rsc_match_current_node)); + assert_null(native_find_rsc(child_0, "httpd", cluster01, + pcmk_rsc_match_anon_basename + |pcmk_rsc_match_current_node)); + assert_null(native_find_rsc(child_0, "httpd", cluster02, + pe_find_any|pcmk_rsc_match_current_node)); assert_null(native_find_rsc(child_0, "httpd", cluster02, - pcmk_rsc_match_anon_basename|pe_find_current)); + pcmk_rsc_match_anon_basename + |pcmk_rsc_match_current_node)); /* Fails because incorrect flags were given along with base name. */ - assert_null(native_find_rsc(child_0, "httpd", NULL, pe_find_current)); + assert_null(native_find_rsc(child_0, "httpd", NULL, + pcmk_rsc_match_current_node)); /* And then we check failure possibilities again, except passing httpd-bundle * instead of X_0 as the first argument to native_find_rsc. */ - /* Fails because pe_find_current is required if a node is given. */ + // Fails because pcmk_rsc_match_current_node is required if a node is given assert_null(native_find_rsc(httpd_bundle, "httpd-bundle-ip-192.168.122.131", cluster01, 0)); assert_null(native_find_rsc(httpd_bundle, "httpd:0", httpd_bundle_0, 0)); assert_null(native_find_rsc(httpd_bundle, "httpd-bundle-docker-0", cluster01, 0)); assert_null(native_find_rsc(httpd_bundle, "httpd-bundle-0", cluster01, 0)); /* Check that the resource is running on the node we expect. */ - assert_ptr_equal(ip_0, native_find_rsc(httpd_bundle, "httpd-bundle-ip-192.168.122.131", cluster01, pe_find_current)); - assert_ptr_equal(child_0, native_find_rsc(httpd_bundle, "httpd:0", httpd_bundle_0, pe_find_current)); - assert_ptr_equal(container_0, native_find_rsc(httpd_bundle, "httpd-bundle-docker-0", cluster01, pe_find_current)); - assert_ptr_equal(remote_0, native_find_rsc(httpd_bundle, "httpd-bundle-0", cluster01, pe_find_current)); + assert_ptr_equal(ip_0, + native_find_rsc(httpd_bundle, + "httpd-bundle-ip-192.168.122.131", + cluster01, pcmk_rsc_match_current_node)); + assert_ptr_equal(child_0, + native_find_rsc(httpd_bundle, "httpd:0", httpd_bundle_0, + pcmk_rsc_match_current_node)); + assert_ptr_equal(container_0, + native_find_rsc(httpd_bundle, "httpd-bundle-docker-0", + cluster01, pcmk_rsc_match_current_node)); + assert_ptr_equal(remote_0, + native_find_rsc(httpd_bundle, "httpd-bundle-0", cluster01, + pcmk_rsc_match_current_node)); return false; // Do not iterate through any further replicas } static void bundle_replica_rsc(void **state) { pe__foreach_bundle_replica(httpd_bundle, bundle_first_replica, NULL); } static void clone_group_rsc(void **rsc) { assert_non_null(mysql_clone_group); /* Passes because NULL was passed for node, regardless of flags. */ assert_ptr_equal(mysql_clone_group, native_find_rsc(mysql_clone_group, "mysql-clone-group", NULL, 0)); - assert_ptr_equal(mysql_clone_group, native_find_rsc(mysql_clone_group, "mysql-clone-group", NULL, pe_find_current)); + assert_ptr_equal(mysql_clone_group, + native_find_rsc(mysql_clone_group, "mysql-clone-group", + NULL, pcmk_rsc_match_current_node)); assert_ptr_equal(mysql_clone_group, native_find_rsc(mysql_clone_group, "mysql-clone-group", NULL, pcmk_rsc_match_clone_only)); - /* Fails because pe_find_current is required if a node is given. */ + // Fails because pcmk_rsc_match_current_node is required if a node is given assert_null(native_find_rsc(mysql_clone_group, "mysql-clone-group", cluster01, 0)); /* Passes because one of mysql-clone-group's children is running on cluster01. */ - assert_ptr_equal(mysql_clone_group, native_find_rsc(mysql_clone_group, "mysql-clone-group", cluster01, pe_find_current)); + assert_ptr_equal(mysql_clone_group, + native_find_rsc(mysql_clone_group, "mysql-clone-group", + cluster01, pcmk_rsc_match_current_node)); - /* Fails because pe_find_current is required if a node is given. */ + // Fails because pcmk_rsc_match_current_node is required if a node is given assert_null(native_find_rsc(mysql_clone_group, "mysql-clone-group", cluster02, 0)); /* Passes because one of mysql-clone-group's children is running on cluster02. */ - assert_ptr_equal(mysql_clone_group, native_find_rsc(mysql_clone_group, "mysql-clone-group", cluster02, pe_find_current)); + assert_ptr_equal(mysql_clone_group, + native_find_rsc(mysql_clone_group, "mysql-clone-group", + cluster02, pcmk_rsc_match_current_node)); // Passes for previous reasons, plus includes pcmk_rsc_match_clone_only assert_ptr_equal(mysql_clone_group, native_find_rsc(mysql_clone_group, "mysql-clone-group", cluster01, pcmk_rsc_match_clone_only - |pe_find_current)); + |pcmk_rsc_match_current_node)); assert_ptr_equal(mysql_clone_group, native_find_rsc(mysql_clone_group, "mysql-clone-group", cluster02, pcmk_rsc_match_clone_only - |pe_find_current)); + |pcmk_rsc_match_current_node)); } static void clone_group_instance_rsc(void **rsc) { pe_resource_t *mysql_group_0 = NULL; pe_resource_t *mysql_group_1 = NULL; /* Find the "mysql-group:0" and "mysql-group:1" resources, members of "mysql-clone-group". */ for (GList *iter = mysql_clone_group->children; iter != NULL; iter = iter->next) { pe_resource_t *rsc = (pe_resource_t *) iter->data; if (strcmp(rsc->id, "mysql-group:0") == 0) { mysql_group_0 = rsc; } else if (strcmp(rsc->id, "mysql-group:1") == 0) { mysql_group_1 = rsc; } } assert_non_null(mysql_group_0); assert_non_null(mysql_group_1); /* Passes because NULL was passed for node, regardless of flags. */ assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_group_0, "mysql-group:0", NULL, 0)); - assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_group_0, "mysql-group:0", NULL, pe_find_current)); + assert_ptr_equal(mysql_group_0, + native_find_rsc(mysql_group_0, "mysql-group:0", NULL, + pcmk_rsc_match_current_node)); assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_group_1, "mysql-group:1", NULL, 0)); - assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_group_1, "mysql-group:1", NULL, pe_find_current)); + assert_ptr_equal(mysql_group_1, + native_find_rsc(mysql_group_1, "mysql-group:1", NULL, + pcmk_rsc_match_current_node)); - /* Fails because pe_find_current is required if a node is given. */ + // Fails because pcmk_rsc_match_current_node is required if a node is given assert_null(native_find_rsc(mysql_group_0, "mysql-group:0", cluster02, 0)); assert_null(native_find_rsc(mysql_group_1, "mysql-group:1", cluster01, 0)); /* Check that the resource is running on the node we expect. */ - assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_group_0, "mysql-group:0", cluster02, pe_find_current)); - assert_null(native_find_rsc(mysql_group_0, "mysql-group:0", cluster01, pe_find_current)); - assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_group_1, "mysql-group:1", cluster01, pe_find_current)); - assert_null(native_find_rsc(mysql_group_1, "mysql-group:1", cluster02, pe_find_current)); + assert_ptr_equal(mysql_group_0, + native_find_rsc(mysql_group_0, "mysql-group:0", cluster02, + pcmk_rsc_match_current_node)); + assert_null(native_find_rsc(mysql_group_0, "mysql-group:0", cluster01, + pcmk_rsc_match_current_node)); + assert_ptr_equal(mysql_group_1, + native_find_rsc(mysql_group_1, "mysql-group:1", cluster01, + pcmk_rsc_match_current_node)); + assert_null(native_find_rsc(mysql_group_1, "mysql-group:1", cluster02, + pcmk_rsc_match_current_node)); /* Passes because NULL was passed for node and base name was given, with correct flags. */ assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_group_0, "mysql-group" , NULL, pcmk_rsc_match_clone_only)); /* Passes because pe_find_any matches any base name. */ assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_group_0, "mysql-group" , NULL, pe_find_any)); assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_group_1, "mysql-group" , NULL, pe_find_any)); // Passes because pcmk_rsc_match_anon_basename matches assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_group_0, "mysql-group" , NULL, pcmk_rsc_match_anon_basename)); assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_group_1, "mysql-group" , NULL, pcmk_rsc_match_anon_basename)); /* Check that the resource is running on the node we expect. */ - assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_group_0, "mysql-group", cluster02, pe_find_any|pe_find_current)); + assert_ptr_equal(mysql_group_0, + native_find_rsc(mysql_group_0, "mysql-group", cluster02, + pe_find_any|pcmk_rsc_match_current_node)); assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_group_0, "mysql-group", cluster02, pcmk_rsc_match_anon_basename - |pe_find_current)); - assert_null(native_find_rsc(mysql_group_0, "mysql-group", cluster01, pe_find_any|pe_find_current)); + |pcmk_rsc_match_current_node)); + assert_null(native_find_rsc(mysql_group_0, "mysql-group", cluster01, + pe_find_any|pcmk_rsc_match_current_node)); assert_null(native_find_rsc(mysql_group_0, "mysql-group", cluster01, - pcmk_rsc_match_anon_basename|pe_find_current)); - assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_group_1, "mysql-group", cluster01, pe_find_any|pe_find_current)); + pcmk_rsc_match_anon_basename + |pcmk_rsc_match_current_node)); + assert_ptr_equal(mysql_group_1, + native_find_rsc(mysql_group_1, "mysql-group", cluster01, + pe_find_any|pcmk_rsc_match_current_node)); assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_group_1, "mysql-group", cluster01, pcmk_rsc_match_anon_basename - |pe_find_current)); - assert_null(native_find_rsc(mysql_group_1, "mysql-group", cluster02, pe_find_any|pe_find_current)); + |pcmk_rsc_match_current_node)); assert_null(native_find_rsc(mysql_group_1, "mysql-group", cluster02, - pcmk_rsc_match_anon_basename|pe_find_current)); + pe_find_any|pcmk_rsc_match_current_node)); + assert_null(native_find_rsc(mysql_group_1, "mysql-group", cluster02, + pcmk_rsc_match_anon_basename + |pcmk_rsc_match_current_node)); /* Fails because incorrect flags were given along with base name. */ - assert_null(native_find_rsc(mysql_group_0, "mysql-group", NULL, pe_find_current)); - assert_null(native_find_rsc(mysql_group_1, "mysql-group", NULL, pe_find_current)); + assert_null(native_find_rsc(mysql_group_0, "mysql-group", NULL, + pcmk_rsc_match_current_node)); + assert_null(native_find_rsc(mysql_group_1, "mysql-group", NULL, + pcmk_rsc_match_current_node)); /* And then we check failure possibilities again, except passing mysql_clone_group * instead of mysql_group_X as the first argument to native_find_rsc. */ - /* Fails because pe_find_current is required if a node is given. */ + // Fails because pcmk_rsc_match_current_node is required if a node is given assert_null(native_find_rsc(mysql_clone_group, "mysql-group:0", cluster02, 0)); assert_null(native_find_rsc(mysql_clone_group, "mysql-group:1", cluster01, 0)); /* Check that the resource is running on the node we expect. */ - assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_clone_group, "mysql-group:0", cluster02, pe_find_current)); - assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_clone_group, "mysql-group", cluster02, pe_find_any|pe_find_current)); + assert_ptr_equal(mysql_group_0, + native_find_rsc(mysql_clone_group, "mysql-group:0", + cluster02, pcmk_rsc_match_current_node)); + assert_ptr_equal(mysql_group_0, + native_find_rsc(mysql_clone_group, "mysql-group", + cluster02, + pe_find_any|pcmk_rsc_match_current_node)); assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_clone_group, "mysql-group", cluster02, pcmk_rsc_match_anon_basename - |pe_find_current)); - assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_clone_group, "mysql-group:1", cluster01, pe_find_current)); - assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_clone_group, "mysql-group", cluster01, pe_find_any|pe_find_current)); + |pcmk_rsc_match_current_node)); + assert_ptr_equal(mysql_group_1, + native_find_rsc(mysql_clone_group, "mysql-group:1", + cluster01, pcmk_rsc_match_current_node)); + assert_ptr_equal(mysql_group_1, + native_find_rsc(mysql_clone_group, "mysql-group", + cluster01, + pe_find_any|pcmk_rsc_match_current_node)); assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_clone_group, "mysql-group", cluster01, pcmk_rsc_match_anon_basename - |pe_find_current)); + |pcmk_rsc_match_current_node)); } static void clone_group_member_rsc(void **state) { pe_resource_t *mysql_proxy = NULL; /* Find the "mysql-proxy" resource, a member of "mysql-group". */ for (GList *iter = mysql_clone_group->children; iter != NULL; iter = iter->next) { pe_resource_t *rsc = (pe_resource_t *) iter->data; if (strcmp(rsc->id, "mysql-group:0") == 0) { for (GList *iter2 = rsc->children; iter2 != NULL; iter2 = iter2->next) { pe_resource_t *child = (pe_resource_t *) iter2->data; if (strcmp(child->id, "mysql-proxy:0") == 0) { mysql_proxy = child; break; } } break; } } assert_non_null(mysql_proxy); /* Passes because NULL was passed for node, regardless of flags. */ assert_ptr_equal(mysql_proxy, native_find_rsc(mysql_proxy, "mysql-proxy:0", NULL, 0)); - assert_ptr_equal(mysql_proxy, native_find_rsc(mysql_proxy, "mysql-proxy:0", NULL, pe_find_current)); + assert_ptr_equal(mysql_proxy, + native_find_rsc(mysql_proxy, "mysql-proxy:0", NULL, + pcmk_rsc_match_current_node)); /* Passes because resource's parent is a clone. */ assert_ptr_equal(mysql_proxy, native_find_rsc(mysql_proxy, "mysql-proxy:0", NULL, pcmk_rsc_match_clone_only)); assert_ptr_equal(mysql_proxy, native_find_rsc(mysql_proxy, "mysql-proxy:0", cluster02, pcmk_rsc_match_clone_only - |pe_find_current)); + |pcmk_rsc_match_current_node)); /* Fails because mysql-proxy:0 is not running on cluster01, even with the right flags. */ - assert_null(native_find_rsc(mysql_proxy, "mysql-proxy:0", cluster01, pe_find_current)); + assert_null(native_find_rsc(mysql_proxy, "mysql-proxy:0", cluster01, + pcmk_rsc_match_current_node)); - /* Fails because pe_find_current is required if a node is given. */ + // Fails because pcmk_rsc_match_current_node is required if a node is given assert_null(native_find_rsc(mysql_proxy, "mysql-proxy:0", cluster02, 0)); /* Passes because mysql-proxy:0 is running on cluster02. */ - assert_ptr_equal(mysql_proxy, native_find_rsc(mysql_proxy, "mysql-proxy:0", cluster02, pe_find_current)); + assert_ptr_equal(mysql_proxy, + native_find_rsc(mysql_proxy, "mysql-proxy:0", cluster02, + pcmk_rsc_match_current_node)); } -/* TODO: Add tests for finding on allocated node (passing a node without - * pe_find_current, after scheduling, for a resource that is starting/stopping/moving. +/* TODO: Add tests for finding on assigned node (passing a node without + * pcmk_rsc_match_current_node, after scheduling, for a resource that is + * starting/stopping/moving. */ PCMK__UNIT_TEST(setup, teardown, cmocka_unit_test(bad_args), cmocka_unit_test(primitive_rsc), cmocka_unit_test(group_rsc), cmocka_unit_test(inactive_group_rsc), cmocka_unit_test(group_member_rsc), cmocka_unit_test(inactive_group_member_rsc), cmocka_unit_test(clone_rsc), cmocka_unit_test(inactive_clone_rsc), cmocka_unit_test(clone_instance_rsc), cmocka_unit_test(renamed_rsc), cmocka_unit_test(bundle_rsc), cmocka_unit_test(bundle_replica_rsc), cmocka_unit_test(clone_group_rsc), cmocka_unit_test(clone_group_instance_rsc), cmocka_unit_test(clone_group_member_rsc)) diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c index f6724bc789..c51f9fe7c6 100644 --- a/tools/crm_resource_runtime.c +++ b/tools/crm_resource_runtime.c @@ -1,2213 +1,2214 @@ /* * Copyright 2004-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include static GList * build_node_info_list(const pe_resource_t *rsc) { GList *retval = NULL; for (const GList *iter = rsc->children; iter != NULL; iter = iter->next) { const pe_resource_t *child = (const pe_resource_t *) iter->data; for (const GList *iter2 = child->running_on; iter2 != NULL; iter2 = iter2->next) { const pe_node_t *node = (const pe_node_t *) iter2->data; node_info_t *ni = calloc(1, sizeof(node_info_t)); ni->node_name = node->details->uname; ni->promoted = pcmk_is_set(rsc->flags, pe_rsc_promotable) && child->fns->state(child, TRUE) == pcmk_role_promoted; retval = g_list_prepend(retval, ni); } } return retval; } GList * cli_resource_search(pe_resource_t *rsc, const char *requested_name, pe_working_set_t *data_set) { GList *retval = NULL; const pe_resource_t *parent = pe__const_top_resource(rsc, false); if (pe_rsc_is_clone(rsc)) { retval = build_node_info_list(rsc); /* The anonymous clone children's common ID is supplied */ } else if (pe_rsc_is_clone(parent) && !pcmk_is_set(rsc->flags, pe_rsc_unique) && rsc->clone_name && pcmk__str_eq(requested_name, rsc->clone_name, pcmk__str_casei) && !pcmk__str_eq(requested_name, rsc->id, pcmk__str_casei)) { retval = build_node_info_list(parent); } else if (rsc->running_on != NULL) { for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) { pe_node_t *node = (pe_node_t *) iter->data; node_info_t *ni = calloc(1, sizeof(node_info_t)); ni->node_name = node->details->uname; ni->promoted = (rsc->fns->state(rsc, TRUE) == pcmk_role_promoted); retval = g_list_prepend(retval, ni); } } return retval; } // \return Standard Pacemaker return code static int find_resource_attr(pcmk__output_t *out, cib_t * the_cib, const char *attr, const char *rsc, const char *attr_set_type, const char *set_name, const char *attr_id, const char *attr_name, char **value) { int rc = pcmk_rc_ok; xmlNode *xml_search = NULL; GString *xpath = NULL; const char *xpath_base = NULL; if(value) { *value = NULL; } if(the_cib == NULL) { return ENOTCONN; } xpath_base = pcmk_cib_xpath_for(XML_CIB_TAG_RESOURCES); if (xpath_base == NULL) { crm_err(XML_CIB_TAG_RESOURCES " CIB element not known (bug?)"); return ENOMSG; } xpath = g_string_sized_new(1024); pcmk__g_strcat(xpath, xpath_base, "//*[@" XML_ATTR_ID "=\"", rsc, "\"]", NULL); if (attr_set_type != NULL) { pcmk__g_strcat(xpath, "/", attr_set_type, NULL); if (set_name != NULL) { pcmk__g_strcat(xpath, "[@" XML_ATTR_ID "=\"", set_name, "\"]", NULL); } } g_string_append(xpath, "//" XML_CIB_TAG_NVPAIR "["); if (attr_id != NULL) { pcmk__g_strcat(xpath, "@" XML_ATTR_ID "=\"", attr_id, "\"", NULL); } if (attr_name != NULL) { if (attr_id != NULL) { g_string_append(xpath, " and "); } pcmk__g_strcat(xpath, "@" XML_NVPAIR_ATTR_NAME "=\"", attr_name, "\"", NULL); } g_string_append_c(xpath, ']'); rc = the_cib->cmds->query(the_cib, (const char *) xpath->str, &xml_search, cib_sync_call | cib_scope_local | cib_xpath); rc = pcmk_legacy2rc(rc); if (rc != pcmk_rc_ok) { goto done; } crm_log_xml_debug(xml_search, "Match"); if (xml_search->children != NULL) { xmlNode *child = NULL; rc = ENOTUNIQ; out->info(out, "Multiple attributes match name=%s", attr_name); for (child = pcmk__xml_first_child(xml_search); child != NULL; child = pcmk__xml_next(child)) { out->info(out, " Value: %s \t(id=%s)", crm_element_value(child, XML_NVPAIR_ATTR_VALUE), ID(child)); } out->spacer(out); } else if(value) { pcmk__str_update(value, crm_element_value(xml_search, attr)); } done: g_string_free(xpath, TRUE); free_xml(xml_search); return rc; } /* PRIVATE. Use the find_matching_attr_resources instead. */ static void find_matching_attr_resources_recursive(pcmk__output_t *out, GList/* */ ** result, pe_resource_t * rsc, const char * rsc_id, const char * attr_set, const char * attr_set_type, const char * attr_id, const char * attr_name, cib_t * cib, const char * cmd, int depth) { int rc = pcmk_rc_ok; char *lookup_id = clone_strip(rsc->id); char *local_attr_id = NULL; /* visit the children */ for(GList *gIter = rsc->children; gIter; gIter = gIter->next) { find_matching_attr_resources_recursive(out, result, (pe_resource_t*)gIter->data, rsc_id, attr_set, attr_set_type, attr_id, attr_name, cib, cmd, depth+1); /* do it only once for clones */ if (rsc->variant == pcmk_rsc_variant_clone) { break; } } rc = find_resource_attr(out, cib, XML_ATTR_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name, &local_attr_id); /* Post-order traversal. * The root is always on the list and it is the last item. */ if((0 == depth) || (pcmk_rc_ok == rc)) { /* push the head */ *result = g_list_append(*result, rsc); } free(local_attr_id); free(lookup_id); } /* The result is a linearized pre-ordered tree of resources. */ static GList/**/ * find_matching_attr_resources(pcmk__output_t *out, pe_resource_t * rsc, const char * rsc_id, const char * attr_set, const char * attr_set_type, const char * attr_id, const char * attr_name, cib_t * cib, const char * cmd, gboolean force) { int rc = pcmk_rc_ok; char *lookup_id = NULL; char *local_attr_id = NULL; GList * result = NULL; /* If --force is used, update only the requested resource (clone or primitive). * Otherwise, if the primitive has the attribute, use that. * Otherwise use the clone. */ if(force == TRUE) { return g_list_append(result, rsc); } if ((rsc->parent != NULL) && (rsc->parent->variant == pcmk_rsc_variant_clone)) { int rc = pcmk_rc_ok; char *local_attr_id = NULL; rc = find_resource_attr(out, cib, XML_ATTR_ID, rsc_id, attr_set_type, attr_set, attr_id, attr_name, &local_attr_id); free(local_attr_id); if(rc != pcmk_rc_ok) { rsc = rsc->parent; out->info(out, "Performing %s of '%s' on '%s', the parent of '%s'", cmd, attr_name, rsc->id, rsc_id); } return g_list_append(result, rsc); } else if ((rsc->parent == NULL) && (rsc->children != NULL) && (rsc->variant == pcmk_rsc_variant_clone)) { pe_resource_t *child = rsc->children->data; if (child->variant == pcmk_rsc_variant_primitive) { lookup_id = clone_strip(child->id); /* Could be a cloned group! */ rc = find_resource_attr(out, cib, XML_ATTR_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name, &local_attr_id); if(rc == pcmk_rc_ok) { rsc = child; out->info(out, "A value for '%s' already exists in child '%s', performing %s on that instead of '%s'", attr_name, lookup_id, cmd, rsc_id); } free(local_attr_id); free(lookup_id); } return g_list_append(result, rsc); } /* If the resource is a group ==> children inherit the attribute if defined. */ find_matching_attr_resources_recursive(out, &result, rsc, rsc_id, attr_set, attr_set_type, attr_id, attr_name, cib, cmd, 0); return result; } // \return Standard Pacemaker return code int cli_resource_update_attribute(pe_resource_t *rsc, const char *requested_name, const char *attr_set, const char *attr_set_type, const char *attr_id, const char *attr_name, const char *attr_value, gboolean recursive, cib_t *cib, int cib_options, gboolean force) { pcmk__output_t *out = rsc->cluster->priv; int rc = pcmk_rc_ok; char *found_attr_id = NULL; GList/**/ *resources = NULL; const char *top_id = pe__const_top_resource(rsc, false)->id; if ((attr_id == NULL) && !force) { find_resource_attr(out, cib, XML_ATTR_ID, top_id, NULL, NULL, NULL, attr_name, NULL); } if (pcmk__str_eq(attr_set_type, XML_TAG_ATTR_SETS, pcmk__str_casei)) { if (!force) { rc = find_resource_attr(out, cib, XML_ATTR_ID, top_id, XML_TAG_META_SETS, attr_set, attr_id, attr_name, &found_attr_id); if ((rc == pcmk_rc_ok) && !out->is_quiet(out)) { out->err(out, "WARNING: There is already a meta attribute " "for '%s' called '%s' (id=%s)", top_id, attr_name, found_attr_id); out->err(out, " Delete '%s' first or use the force option " "to override", found_attr_id); } free(found_attr_id); if (rc == pcmk_rc_ok) { return ENOTUNIQ; } } resources = g_list_append(resources, rsc); } else if (pcmk__str_eq(attr_set_type, ATTR_SET_ELEMENT, pcmk__str_none)) { crm_xml_add(rsc->xml, attr_name, attr_value); CRM_ASSERT(cib != NULL); rc = cib->cmds->replace(cib, XML_CIB_TAG_RESOURCES, rsc->xml, cib_options); rc = pcmk_legacy2rc(rc); if (rc == pcmk_rc_ok) { out->info(out, "Set attribute: name=%s value=%s", attr_name, attr_value); } return rc; } else { resources = find_matching_attr_resources(out, rsc, requested_name, attr_set, attr_set_type, attr_id, attr_name, cib, "update", force); } /* If the user specified attr_set or attr_id, the intent is to modify a * single resource, which will be the last item in the list. */ if ((attr_set != NULL) || (attr_id != NULL)) { GList *last = g_list_last(resources); resources = g_list_remove_link(resources, last); g_list_free(resources); resources = last; } for (GList *iter = resources; iter != NULL; iter = iter->next) { char *lookup_id = NULL; char *local_attr_set = NULL; const char *rsc_attr_id = attr_id; const char *rsc_attr_set = attr_set; xmlNode *xml_top = NULL; xmlNode *xml_obj = NULL; found_attr_id = NULL; rsc = (pe_resource_t *) iter->data; lookup_id = clone_strip(rsc->id); /* Could be a cloned group! */ rc = find_resource_attr(out, cib, XML_ATTR_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name, &found_attr_id); switch (rc) { case pcmk_rc_ok: crm_debug("Found a match for name=%s: id=%s", attr_name, found_attr_id); rsc_attr_id = found_attr_id; break; case ENXIO: if (rsc_attr_set == NULL) { local_attr_set = crm_strdup_printf("%s-%s", lookup_id, attr_set_type); rsc_attr_set = local_attr_set; } if (rsc_attr_id == NULL) { found_attr_id = crm_strdup_printf("%s-%s", rsc_attr_set, attr_name); rsc_attr_id = found_attr_id; } xml_top = create_xml_node(NULL, (const char *) rsc->xml->name); crm_xml_add(xml_top, XML_ATTR_ID, lookup_id); xml_obj = create_xml_node(xml_top, attr_set_type); crm_xml_add(xml_obj, XML_ATTR_ID, rsc_attr_set); break; default: free(lookup_id); free(found_attr_id); g_list_free(resources); return rc; } xml_obj = crm_create_nvpair_xml(xml_obj, rsc_attr_id, attr_name, attr_value); if (xml_top == NULL) { xml_top = xml_obj; } crm_log_xml_debug(xml_top, "Update"); rc = cib->cmds->modify(cib, XML_CIB_TAG_RESOURCES, xml_top, cib_options); rc = pcmk_legacy2rc(rc); if (rc == pcmk_rc_ok) { out->info(out, "Set '%s' option: id=%s%s%s%s%s value=%s", lookup_id, found_attr_id, ((rsc_attr_set == NULL)? "" : " set="), pcmk__s(rsc_attr_set, ""), ((attr_name == NULL)? "" : " name="), pcmk__s(attr_name, ""), attr_value); } free_xml(xml_top); free(lookup_id); free(found_attr_id); free(local_attr_set); if (recursive && pcmk__str_eq(attr_set_type, XML_TAG_META_SETS, pcmk__str_casei)) { GList *lpc = NULL; static bool need_init = true; if (need_init) { need_init = false; pcmk__unpack_constraints(rsc->cluster); pe__clear_resource_flags_on_all(rsc->cluster, pe_rsc_detect_loop); } /* We want to set the attribute only on resources explicitly * colocated with this one, so we use rsc->rsc_cons_lhs directly * rather than the with_this_colocations() method. */ pe__set_resource_flags(rsc, pe_rsc_detect_loop); for (lpc = rsc->rsc_cons_lhs; lpc != NULL; lpc = lpc->next) { pcmk__colocation_t *cons = (pcmk__colocation_t *) lpc->data; crm_debug("Checking %s %d", cons->id, cons->score); if (!pcmk_is_set(cons->dependent->flags, pe_rsc_detect_loop) && (cons->score > 0)) { crm_debug("Setting %s=%s for dependent resource %s", attr_name, attr_value, cons->dependent->id); cli_resource_update_attribute(cons->dependent, cons->dependent->id, NULL, attr_set_type, NULL, attr_name, attr_value, recursive, cib, cib_options, force); } } } } g_list_free(resources); return rc; } // \return Standard Pacemaker return code int cli_resource_delete_attribute(pe_resource_t *rsc, const char *requested_name, const char *attr_set, const char *attr_set_type, const char *attr_id, const char *attr_name, cib_t *cib, int cib_options, gboolean force) { pcmk__output_t *out = rsc->cluster->priv; int rc = pcmk_rc_ok; GList/**/ *resources = NULL; if ((attr_id == NULL) && !force) { find_resource_attr(out, cib, XML_ATTR_ID, pe__const_top_resource(rsc, false)->id, NULL, NULL, NULL, attr_name, NULL); } if (pcmk__str_eq(attr_set_type, XML_TAG_META_SETS, pcmk__str_casei)) { resources = find_matching_attr_resources(out, rsc, requested_name, attr_set, attr_set_type, attr_id, attr_name, cib, "delete", force); } else if (pcmk__str_eq(attr_set_type, ATTR_SET_ELEMENT, pcmk__str_none)) { xml_remove_prop(rsc->xml, attr_name); CRM_ASSERT(cib != NULL); rc = cib->cmds->replace(cib, XML_CIB_TAG_RESOURCES, rsc->xml, cib_options); rc = pcmk_legacy2rc(rc); if (rc == pcmk_rc_ok) { out->info(out, "Deleted attribute: %s", attr_name); } return rc; } else { resources = g_list_append(resources, rsc); } for (GList *iter = resources; iter != NULL; iter = iter->next) { char *lookup_id = NULL; xmlNode *xml_obj = NULL; char *found_attr_id = NULL; const char *rsc_attr_id = attr_id; rsc = (pe_resource_t *) iter->data; lookup_id = clone_strip(rsc->id); rc = find_resource_attr(out, cib, XML_ATTR_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name, &found_attr_id); switch (rc) { case pcmk_rc_ok: break; case ENXIO: free(lookup_id); rc = pcmk_rc_ok; continue; default: free(lookup_id); g_list_free(resources); return rc; } if (rsc_attr_id == NULL) { rsc_attr_id = found_attr_id; } xml_obj = crm_create_nvpair_xml(NULL, rsc_attr_id, attr_name, NULL); crm_log_xml_debug(xml_obj, "Delete"); CRM_ASSERT(cib); rc = cib->cmds->remove(cib, XML_CIB_TAG_RESOURCES, xml_obj, cib_options); rc = pcmk_legacy2rc(rc); if (rc == pcmk_rc_ok) { out->info(out, "Deleted '%s' option: id=%s%s%s%s%s", lookup_id, found_attr_id, ((attr_set == NULL)? "" : " set="), pcmk__s(attr_set, ""), ((attr_name == NULL)? "" : " name="), pcmk__s(attr_name, "")); } free(lookup_id); free_xml(xml_obj); free(found_attr_id); } g_list_free(resources); return rc; } // \return Standard Pacemaker return code static int send_lrm_rsc_op(pcmk_ipc_api_t *controld_api, bool do_fail_resource, const char *host_uname, const char *rsc_id, pe_working_set_t *data_set) { pcmk__output_t *out = data_set->priv; const char *router_node = host_uname; const char *rsc_api_id = NULL; const char *rsc_long_id = NULL; const char *rsc_class = NULL; const char *rsc_provider = NULL; const char *rsc_type = NULL; bool cib_only = false; pe_resource_t *rsc = pe_find_resource(data_set->resources, rsc_id); if (rsc == NULL) { out->err(out, "Resource %s not found", rsc_id); return ENXIO; } else if (rsc->variant != pcmk_rsc_variant_primitive) { out->err(out, "We can only process primitive resources, not %s", rsc_id); return EINVAL; } rsc_class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); rsc_provider = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER), rsc_type = crm_element_value(rsc->xml, XML_ATTR_TYPE); if ((rsc_class == NULL) || (rsc_type == NULL)) { out->err(out, "Resource %s does not have a class and type", rsc_id); return EINVAL; } { pe_node_t *node = pe_find_node(data_set->nodes, host_uname); if (node == NULL) { out->err(out, "Node %s not found", host_uname); return pcmk_rc_node_unknown; } if (!(node->details->online)) { if (do_fail_resource) { out->err(out, "Node %s is not online", host_uname); return ENOTCONN; } else { cib_only = true; } } if (!cib_only && pe__is_guest_or_remote_node(node)) { node = pe__current_node(node->details->remote_rsc); if (node == NULL) { out->err(out, "No cluster connection to Pacemaker Remote node %s detected", host_uname); return ENOTCONN; } router_node = node->details->uname; } } if (rsc->clone_name) { rsc_api_id = rsc->clone_name; rsc_long_id = rsc->id; } else { rsc_api_id = rsc->id; } if (do_fail_resource) { return pcmk_controld_api_fail(controld_api, host_uname, router_node, rsc_api_id, rsc_long_id, rsc_class, rsc_provider, rsc_type); } else { return pcmk_controld_api_refresh(controld_api, host_uname, router_node, rsc_api_id, rsc_long_id, rsc_class, rsc_provider, rsc_type, cib_only); } } /*! * \internal * \brief Get resource name as used in failure-related node attributes * * \param[in] rsc Resource to check * * \return Newly allocated string containing resource's fail name * \note The caller is responsible for freeing the result. */ static inline char * rsc_fail_name(const pe_resource_t *rsc) { const char *name = (rsc->clone_name? rsc->clone_name : rsc->id); return pcmk_is_set(rsc->flags, pe_rsc_unique)? strdup(name) : clone_strip(name); } // \return Standard Pacemaker return code static int clear_rsc_history(pcmk_ipc_api_t *controld_api, const char *host_uname, const char *rsc_id, pe_working_set_t *data_set) { int rc = pcmk_rc_ok; /* Erase the resource's entire LRM history in the CIB, even if we're only * clearing a single operation's fail count. If we erased only entries for a * single operation, we might wind up with a wrong idea of the current * resource state, and we might not re-probe the resource. */ rc = send_lrm_rsc_op(controld_api, false, host_uname, rsc_id, data_set); if (rc != pcmk_rc_ok) { return rc; } crm_trace("Processing %d mainloop inputs", pcmk_controld_api_replies_expected(controld_api)); while (g_main_context_iteration(NULL, FALSE)) { crm_trace("Processed mainloop input, %d still remaining", pcmk_controld_api_replies_expected(controld_api)); } return rc; } // \return Standard Pacemaker return code static int clear_rsc_failures(pcmk__output_t *out, pcmk_ipc_api_t *controld_api, const char *node_name, const char *rsc_id, const char *operation, const char *interval_spec, pe_working_set_t *data_set) { int rc = pcmk_rc_ok; const char *failed_value = NULL; const char *failed_id = NULL; const char *interval_ms_s = NULL; GHashTable *rscs = NULL; GHashTableIter iter; /* Create a hash table to use as a set of resources to clean. This lets us * clean each resource only once (per node) regardless of how many failed * operations it has. */ rscs = pcmk__strkey_table(NULL, NULL); // Normalize interval to milliseconds for comparison to history entry if (operation) { interval_ms_s = crm_strdup_printf("%u", crm_parse_interval_spec(interval_spec)); } for (xmlNode *xml_op = pcmk__xml_first_child(data_set->failed); xml_op != NULL; xml_op = pcmk__xml_next(xml_op)) { failed_id = crm_element_value(xml_op, XML_LRM_ATTR_RSCID); if (failed_id == NULL) { // Malformed history entry, should never happen continue; } // No resource specified means all resources match if (rsc_id) { pe_resource_t *fail_rsc = NULL; fail_rsc = pe_find_resource_with_flags(data_set->resources, failed_id, pcmk_rsc_match_history |pcmk_rsc_match_anon_basename); if (!fail_rsc || !pcmk__str_eq(rsc_id, fail_rsc->id, pcmk__str_casei)) { continue; } } // Host name should always have been provided by this point failed_value = crm_element_value(xml_op, XML_ATTR_UNAME); if (!pcmk__str_eq(node_name, failed_value, pcmk__str_casei)) { continue; } // No operation specified means all operations match if (operation) { failed_value = crm_element_value(xml_op, XML_LRM_ATTR_TASK); if (!pcmk__str_eq(operation, failed_value, pcmk__str_casei)) { continue; } // Interval (if operation was specified) defaults to 0 (not all) failed_value = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL_MS); if (!pcmk__str_eq(interval_ms_s, failed_value, pcmk__str_casei)) { continue; } } g_hash_table_add(rscs, (gpointer) failed_id); } g_hash_table_iter_init(&iter, rscs); while (g_hash_table_iter_next(&iter, (gpointer *) &failed_id, NULL)) { crm_debug("Erasing failures of %s on %s", failed_id, node_name); rc = clear_rsc_history(controld_api, node_name, failed_id, data_set); if (rc != pcmk_rc_ok) { return rc; } } g_hash_table_destroy(rscs); return rc; } // \return Standard Pacemaker return code static int clear_rsc_fail_attrs(const pe_resource_t *rsc, const char *operation, const char *interval_spec, const pe_node_t *node) { int rc = pcmk_rc_ok; int attr_options = pcmk__node_attr_none; char *rsc_name = rsc_fail_name(rsc); if (pe__is_guest_or_remote_node(node)) { attr_options |= pcmk__node_attr_remote; } rc = pcmk__attrd_api_clear_failures(NULL, node->details->uname, rsc_name, operation, interval_spec, NULL, attr_options); free(rsc_name); return rc; } // \return Standard Pacemaker return code int cli_resource_delete(pcmk_ipc_api_t *controld_api, const char *host_uname, const pe_resource_t *rsc, const char *operation, const char *interval_spec, bool just_failures, pe_working_set_t *data_set, gboolean force) { pcmk__output_t *out = data_set->priv; int rc = pcmk_rc_ok; pe_node_t *node = NULL; if (rsc == NULL) { return ENXIO; } else if (rsc->children) { for (const GList *lpc = rsc->children; lpc != NULL; lpc = lpc->next) { const pe_resource_t *child = (const pe_resource_t *) lpc->data; rc = cli_resource_delete(controld_api, host_uname, child, operation, interval_spec, just_failures, data_set, force); if (rc != pcmk_rc_ok) { return rc; } } return pcmk_rc_ok; } else if (host_uname == NULL) { GList *lpc = NULL; GList *nodes = g_hash_table_get_values(rsc->known_on); if(nodes == NULL && force) { nodes = pcmk__copy_node_list(data_set->nodes, false); } else if(nodes == NULL && rsc->exclusive_discover) { GHashTableIter iter; pe_node_t *node = NULL; g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void**)&node)) { if(node->weight >= 0) { nodes = g_list_prepend(nodes, node); } } } else if(nodes == NULL) { nodes = g_hash_table_get_values(rsc->allowed_nodes); } for (lpc = nodes; lpc != NULL; lpc = lpc->next) { node = (pe_node_t *) lpc->data; if (node->details->online) { rc = cli_resource_delete(controld_api, node->details->uname, rsc, operation, interval_spec, just_failures, data_set, force); } if (rc != pcmk_rc_ok) { g_list_free(nodes); return rc; } } g_list_free(nodes); return pcmk_rc_ok; } node = pe_find_node(data_set->nodes, host_uname); if (node == NULL) { out->err(out, "Unable to clean up %s because node %s not found", rsc->id, host_uname); return ENODEV; } if (!node->details->rsc_discovery_enabled) { out->err(out, "Unable to clean up %s because resource discovery disabled on %s", rsc->id, host_uname); return EOPNOTSUPP; } if (controld_api == NULL) { out->err(out, "Dry run: skipping clean-up of %s on %s due to CIB_file", rsc->id, host_uname); return pcmk_rc_ok; } rc = clear_rsc_fail_attrs(rsc, operation, interval_spec, node); if (rc != pcmk_rc_ok) { out->err(out, "Unable to clean up %s failures on %s: %s", rsc->id, host_uname, pcmk_rc_str(rc)); return rc; } if (just_failures) { rc = clear_rsc_failures(out, controld_api, host_uname, rsc->id, operation, interval_spec, data_set); } else { rc = clear_rsc_history(controld_api, host_uname, rsc->id, data_set); } if (rc != pcmk_rc_ok) { out->err(out, "Cleaned %s failures on %s, but unable to clean history: %s", rsc->id, host_uname, pcmk_strerror(rc)); } else { out->info(out, "Cleaned up %s on %s", rsc->id, host_uname); } return rc; } // \return Standard Pacemaker return code int cli_cleanup_all(pcmk_ipc_api_t *controld_api, const char *node_name, const char *operation, const char *interval_spec, pe_working_set_t *data_set) { pcmk__output_t *out = data_set->priv; int rc = pcmk_rc_ok; int attr_options = pcmk__node_attr_none; const char *display_name = node_name? node_name : "all nodes"; if (controld_api == NULL) { out->info(out, "Dry run: skipping clean-up of %s due to CIB_file", display_name); return rc; } if (node_name) { pe_node_t *node = pe_find_node(data_set->nodes, node_name); if (node == NULL) { out->err(out, "Unknown node: %s", node_name); return ENXIO; } if (pe__is_guest_or_remote_node(node)) { attr_options |= pcmk__node_attr_remote; } } rc = pcmk__attrd_api_clear_failures(NULL, node_name, NULL, operation, interval_spec, NULL, attr_options); if (rc != pcmk_rc_ok) { out->err(out, "Unable to clean up all failures on %s: %s", display_name, pcmk_rc_str(rc)); return rc; } if (node_name) { rc = clear_rsc_failures(out, controld_api, node_name, NULL, operation, interval_spec, data_set); if (rc != pcmk_rc_ok) { out->err(out, "Cleaned all resource failures on %s, but unable to clean history: %s", node_name, pcmk_strerror(rc)); return rc; } } else { for (GList *iter = data_set->nodes; iter; iter = iter->next) { pe_node_t *node = (pe_node_t *) iter->data; rc = clear_rsc_failures(out, controld_api, node->details->uname, NULL, operation, interval_spec, data_set); if (rc != pcmk_rc_ok) { out->err(out, "Cleaned all resource failures on all nodes, but unable to clean history: %s", pcmk_strerror(rc)); return rc; } } } out->info(out, "Cleaned up all resources on %s", display_name); return rc; } static void check_role(resource_checks_t *checks) { const char *role_s = g_hash_table_lookup(checks->rsc->meta, XML_RSC_ATTR_TARGET_ROLE); if (role_s == NULL) { return; } switch (text2role(role_s)) { case pcmk_role_stopped: checks->flags |= rsc_remain_stopped; break; case pcmk_role_unpromoted: if (pcmk_is_set(pe__const_top_resource(checks->rsc, false)->flags, pe_rsc_promotable)) { checks->flags |= rsc_unpromotable; } break; default: break; } } static void check_managed(resource_checks_t *checks) { const char *managed_s = g_hash_table_lookup(checks->rsc->meta, XML_RSC_ATTR_MANAGED); if ((managed_s != NULL) && !crm_is_true(managed_s)) { checks->flags |= rsc_unmanaged; } } static void check_locked(resource_checks_t *checks) { if (checks->rsc->lock_node != NULL) { checks->flags |= rsc_locked; checks->lock_node = checks->rsc->lock_node->details->uname; } } static bool node_is_unhealthy(pe_node_t *node) { switch (pe__health_strategy(node->details->data_set)) { case pcmk__health_strategy_none: break; case pcmk__health_strategy_no_red: if (pe__node_health(node) < 0) { return true; } break; case pcmk__health_strategy_only_green: if (pe__node_health(node) <= 0) { return true; } break; case pcmk__health_strategy_progressive: case pcmk__health_strategy_custom: /* @TODO These are finite scores, possibly with rules, and possibly * combining with other scores, so attributing these as a cause is * nontrivial. */ break; } return false; } static void check_node_health(resource_checks_t *checks, pe_node_t *node) { if (node == NULL) { GHashTableIter iter; bool allowed = false; bool all_nodes_unhealthy = true; g_hash_table_iter_init(&iter, checks->rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) { allowed = true; if (!node_is_unhealthy(node)) { all_nodes_unhealthy = false; break; } } if (allowed && all_nodes_unhealthy) { checks->flags |= rsc_node_health; } } else if (node_is_unhealthy(node)) { checks->flags |= rsc_node_health; } } int cli_resource_check(pcmk__output_t *out, pe_resource_t *rsc, pe_node_t *node) { resource_checks_t checks = { .rsc = rsc }; check_role(&checks); check_managed(&checks); check_locked(&checks); check_node_health(&checks, node); return out->message(out, "resource-check-list", &checks); } // \return Standard Pacemaker return code int cli_resource_fail(pcmk_ipc_api_t *controld_api, const char *host_uname, const char *rsc_id, pe_working_set_t *data_set) { crm_notice("Failing %s on %s", rsc_id, host_uname); return send_lrm_rsc_op(controld_api, true, host_uname, rsc_id, data_set); } static GHashTable * generate_resource_params(pe_resource_t *rsc, pe_node_t *node, pe_working_set_t *data_set) { GHashTable *params = NULL; GHashTable *meta = NULL; GHashTable *combined = NULL; GHashTableIter iter; char *key = NULL; char *value = NULL; combined = pcmk__strkey_table(free, free); params = pe_rsc_params(rsc, node, data_set); if (params != NULL) { g_hash_table_iter_init(&iter, params); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) { g_hash_table_insert(combined, strdup(key), strdup(value)); } } meta = pcmk__strkey_table(free, free); get_meta_attributes(meta, rsc, node, data_set); if (meta != NULL) { g_hash_table_iter_init(&iter, meta); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) { char *crm_name = crm_meta_name(key); g_hash_table_insert(combined, crm_name, strdup(value)); } g_hash_table_destroy(meta); } return combined; } bool resource_is_running_on(pe_resource_t *rsc, const char *host) { bool found = true; GList *hIter = NULL; GList *hosts = NULL; if (rsc == NULL) { return false; } rsc->fns->location(rsc, &hosts, TRUE); for (hIter = hosts; host != NULL && hIter != NULL; hIter = hIter->next) { pe_node_t *node = (pe_node_t *) hIter->data; if (pcmk__strcase_any_of(host, node->details->uname, node->details->id, NULL)) { crm_trace("Resource %s is running on %s\n", rsc->id, host); goto done; } } if (host != NULL) { crm_trace("Resource %s is not running on: %s\n", rsc->id, host); found = false; } else if(host == NULL && hosts == NULL) { crm_trace("Resource %s is not running\n", rsc->id); found = false; } done: g_list_free(hosts); return found; } /*! * \internal * \brief Create a list of all resources active on host from a given list * * \param[in] host Name of host to check whether resources are active * \param[in] rsc_list List of resources to check * * \return New list of resources from list that are active on host */ static GList * get_active_resources(const char *host, GList *rsc_list) { GList *rIter = NULL; GList *active = NULL; for (rIter = rsc_list; rIter != NULL; rIter = rIter->next) { pe_resource_t *rsc = (pe_resource_t *) rIter->data; /* Expand groups to their members, because if we're restarting a member * other than the first, we can't otherwise tell which resources are * stopping and starting. */ if (rsc->variant == pcmk_rsc_variant_group) { active = g_list_concat(active, get_active_resources(host, rsc->children)); } else if (resource_is_running_on(rsc, host)) { active = g_list_append(active, strdup(rsc->id)); } } return active; } static void dump_list(GList *items, const char *tag) { int lpc = 0; GList *item = NULL; for (item = items; item != NULL; item = item->next) { crm_trace("%s[%d]: %s", tag, lpc, (char*)item->data); lpc++; } } static void display_list(pcmk__output_t *out, GList *items, const char *tag) { GList *item = NULL; for (item = items; item != NULL; item = item->next) { out->info(out, "%s%s", tag, (const char *)item->data); } } /*! * \internal * \brief Upgrade XML to latest schema version and use it as working set input * * This also updates the working set timestamp to the current time. * * \param[in,out] data_set Working set instance to update * \param[in,out] xml XML to use as input * * \return Standard Pacemaker return code * \note On success, caller is responsible for freeing memory allocated for * data_set->now. * \todo This follows the example of other callers of cli_config_update() * and returns ENOKEY ("Required key not available") if that fails, * but perhaps pcmk_rc_schema_validation would be better in that case. */ int update_working_set_xml(pe_working_set_t *data_set, xmlNode **xml) { if (cli_config_update(xml, NULL, FALSE) == FALSE) { return ENOKEY; } data_set->input = *xml; data_set->now = crm_time_new(NULL); return pcmk_rc_ok; } /*! * \internal * \brief Update a working set's XML input based on a CIB query * * \param[in] data_set Data set instance to initialize * \param[in] cib Connection to the CIB manager * * \return Standard Pacemaker return code * \note On success, caller is responsible for freeing memory allocated for * data_set->input and data_set->now. */ static int update_working_set_from_cib(pcmk__output_t *out, pe_working_set_t * data_set, cib_t *cib) { xmlNode *cib_xml_copy = NULL; int rc = pcmk_rc_ok; rc = cib->cmds->query(cib, NULL, &cib_xml_copy, cib_scope_local | cib_sync_call); rc = pcmk_legacy2rc(rc); if (rc != pcmk_rc_ok) { out->err(out, "Could not obtain the current CIB: %s (%d)", pcmk_strerror(rc), rc); return rc; } rc = update_working_set_xml(data_set, &cib_xml_copy); if (rc != pcmk_rc_ok) { out->err(out, "Could not upgrade the current CIB XML"); free_xml(cib_xml_copy); return rc; } return rc; } // \return Standard Pacemaker return code static int update_dataset(cib_t *cib, pe_working_set_t * data_set, bool simulate) { char *pid = NULL; char *shadow_file = NULL; cib_t *shadow_cib = NULL; int rc = pcmk_rc_ok; pcmk__output_t *out = data_set->priv; pe_reset_working_set(data_set); pe__set_working_set_flags(data_set, pe_flag_no_counts|pe_flag_no_compat); rc = update_working_set_from_cib(out, data_set, cib); if (rc != pcmk_rc_ok) { return rc; } if(simulate) { bool prev_quiet = false; pid = pcmk__getpid_s(); shadow_cib = cib_shadow_new(pid); shadow_file = get_shadow_file(pid); if (shadow_cib == NULL) { out->err(out, "Could not create shadow cib: '%s'", pid); rc = ENXIO; goto done; } rc = write_xml_file(data_set->input, shadow_file, FALSE); if (rc < 0) { out->err(out, "Could not populate shadow cib: %s (%d)", pcmk_strerror(rc), rc); goto done; } rc = shadow_cib->cmds->signon(shadow_cib, crm_system_name, cib_command); rc = pcmk_legacy2rc(rc); if (rc != pcmk_rc_ok) { out->err(out, "Could not connect to shadow cib: %s (%d)", pcmk_strerror(rc), rc); goto done; } pcmk__schedule_actions(data_set->input, pe_flag_no_counts|pe_flag_no_compat, data_set); prev_quiet = out->is_quiet(out); out->quiet = true; pcmk__simulate_transition(data_set, shadow_cib, NULL); out->quiet = prev_quiet; rc = update_dataset(shadow_cib, data_set, false); } else { cluster_status(data_set); } done: /* Do not free data_set->input here, we need rsc->xml to be valid later on */ cib_delete(shadow_cib); free(pid); if(shadow_file) { unlink(shadow_file); free(shadow_file); } return rc; } /*! * \internal * \brief Find the maximum stop timeout of a resource and its children (if any) * * \param[in,out] rsc Resource to get timeout for * * \return Maximum stop timeout for \p rsc (in milliseconds) */ static int max_rsc_stop_timeout(pe_resource_t *rsc) { pe_action_t *stop = NULL; long long result_ll; int max_delay = 0; if (rsc == NULL) { return 0; } // If resource is collective, use maximum of its children's stop timeouts if (rsc->children != NULL) { for (GList *iter = rsc->children; iter; iter = iter->next) { pe_resource_t *child = iter->data; int delay = max_rsc_stop_timeout(child); if (delay > max_delay) { pe_rsc_trace(rsc, "Maximum stop timeout for %s is now %s due to %s", rsc->id, pcmk__readable_interval(delay), child->id); max_delay = delay; } } return max_delay; } /* Create a (transient) instance of the resource's stop action, to fully * evaluate its timeout for rules, defaults, etc. * * @TODO This currently ignores node (which might matter for rules) */ stop = custom_action(rsc, stop_key(rsc), PCMK_ACTION_STOP, NULL, TRUE, FALSE, rsc->cluster); if ((pcmk__scan_ll(g_hash_table_lookup(stop->meta, XML_ATTR_TIMEOUT), &result_ll, -1LL) == pcmk_rc_ok) && (result_ll >= 0) && (result_ll <= INT_MAX)) { max_delay = (int) result_ll; } pe_free_action(stop); return max_delay; } /*! * \internal * \brief Find a reasonable waiting time for stopping any one resource in a list * * \param[in,out] data_set Cluster working set * \param[in] resources List of names of resources that will be stopped * * \return Rough estimate of a reasonable time to wait (in seconds) to stop any * one resource in \p resources * \note This estimate is very rough, simply the maximum stop timeout of all * given resources and their children, plus a small fudge factor. It does * not account for children that must be stopped in sequence, action * throttling, or any demotions needed. It checks the stop timeout, even * if the resources in question are actually being started. */ static int wait_time_estimate(pe_working_set_t *data_set, const GList *resources) { int max_delay = 0; // Find maximum stop timeout in milliseconds for (const GList *item = resources; item != NULL; item = item->next) { pe_resource_t *rsc = pe_find_resource(data_set->resources, (const char *) (item->data)); int delay = max_rsc_stop_timeout(rsc); if (delay > max_delay) { pe_rsc_trace(rsc, "Wait time is now %s due to %s", pcmk__readable_interval(delay), rsc->id); max_delay = delay; } } return (max_delay / 1000) + 5; } #define waiting_for_starts(d, r, h) ((d != NULL) || \ (!resource_is_running_on((r), (h)))) /*! * \internal * \brief Restart a resource (on a particular host if requested). * * \param[in,out] out Output object * \param[in,out] rsc The resource to restart * \param[in] node Node to restart resource on (NULL for all) * \param[in] move_lifetime If not NULL, how long constraint should * remain in effect (as ISO 8601 string) * \param[in] timeout_ms Consider failed if actions do not complete * in this time (specified in milliseconds, * but a two-second granularity is actually * used; if 0, it will be calculated based on * the resource timeout) * \param[in,out] cib Connection to the CIB manager * \param[in] cib_options Group of enum cib_call_options flags to * use with CIB calls * \param[in] promoted_role_only If true, limit to promoted instances * \param[in] force If true, apply only to requested instance * if part of a collective resource * * \return Standard Pacemaker return code (exits on certain failures) */ int cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc, const pe_node_t *node, const char *move_lifetime, int timeout_ms, cib_t *cib, int cib_options, gboolean promoted_role_only, gboolean force) { int rc = pcmk_rc_ok; int lpc = 0; int before = 0; int step_timeout_s = 0; int sleep_interval = 2; int timeout = timeout_ms / 1000; bool stop_via_ban = false; char *rsc_id = NULL; char *lookup_id = NULL; char *orig_target_role = NULL; GList *list_delta = NULL; GList *target_active = NULL; GList *current_active = NULL; GList *restart_target_active = NULL; pe_working_set_t *data_set = NULL; pe_resource_t *parent = uber_parent(rsc); bool running = false; const char *id = rsc->clone_name ? rsc->clone_name : rsc->id; const char *host = node ? node->details->uname : NULL; /* If the implicit resource or primitive resource of a bundle is given, operate on the * bundle itself instead. */ if (pe_rsc_is_bundled(rsc)) { rsc = parent->parent; } running = resource_is_running_on(rsc, host); if (pe_rsc_is_clone(parent) && !running) { if (pe_rsc_is_unique_clone(parent)) { lookup_id = strdup(rsc->id); } else { lookup_id = clone_strip(rsc->id); } - rsc = parent->fns->find_rsc(parent, lookup_id, node, pe_find_any|pe_find_current); + rsc = parent->fns->find_rsc(parent, lookup_id, node, + pe_find_any|pcmk_rsc_match_current_node); free(lookup_id); running = resource_is_running_on(rsc, host); } if (!running) { if (host) { out->err(out, "%s is not running on %s and so cannot be restarted", id, host); } else { out->err(out, "%s is not running anywhere and so cannot be restarted", id); } return ENXIO; } rsc_id = strdup(rsc->id); if (pe_rsc_is_unique_clone(parent)) { lookup_id = strdup(rsc->id); } else { lookup_id = clone_strip(rsc->id); } if (host) { if (pe_rsc_is_clone(rsc) || pe_bundle_replicas(rsc)) { stop_via_ban = true; } else if (pe_rsc_is_clone(parent)) { stop_via_ban = true; free(lookup_id); lookup_id = strdup(parent->id); } } /* grab full cib determine originally active resources disable or ban poll cib and watch for affected resources to get stopped without --timeout, calculate the stop timeout for each step and wait for that if we hit --timeout or the service timeout, re-enable or un-ban, report failure and indicate which resources we couldn't take down if everything stopped, re-enable or un-ban poll cib and watch for affected resources to get started without --timeout, calculate the start timeout for each step and wait for that if we hit --timeout or the service timeout, report (different) failure and indicate which resources we couldn't bring back up report success Optimizations: - use constraints to determine ordered list of affected resources - Allow a --no-deps option (aka. --force-restart) */ data_set = pe_new_working_set(); if (data_set == NULL) { crm_perror(LOG_ERR, "Could not allocate working set"); rc = ENOMEM; goto done; } data_set->priv = out; rc = update_dataset(cib, data_set, false); if(rc != pcmk_rc_ok) { out->err(out, "Could not get new resource list: %s (%d)", pcmk_strerror(rc), rc); goto done; } restart_target_active = get_active_resources(host, data_set->resources); current_active = get_active_resources(host, data_set->resources); dump_list(current_active, "Origin"); if (stop_via_ban) { /* Stop the clone or bundle instance by banning it from the host */ out->quiet = true; rc = cli_resource_ban(out, lookup_id, host, move_lifetime, NULL, cib, cib_options, promoted_role_only); } else { /* Stop the resource by setting target-role to Stopped. * Remember any existing target-role so we can restore it later * (though it only makes any difference if it's Unpromoted). */ find_resource_attr(out, cib, XML_NVPAIR_ATTR_VALUE, lookup_id, NULL, NULL, NULL, XML_RSC_ATTR_TARGET_ROLE, &orig_target_role); rc = cli_resource_update_attribute(rsc, rsc_id, NULL, XML_TAG_META_SETS, NULL, XML_RSC_ATTR_TARGET_ROLE, PCMK_ACTION_STOPPED, FALSE, cib, cib_options, force); } if(rc != pcmk_rc_ok) { out->err(out, "Could not set target-role for %s: %s (%d)", rsc_id, pcmk_strerror(rc), rc); if (current_active != NULL) { g_list_free_full(current_active, free); current_active = NULL; } if (restart_target_active != NULL) { g_list_free_full(restart_target_active, free); restart_target_active = NULL; } goto done; } rc = update_dataset(cib, data_set, true); if(rc != pcmk_rc_ok) { out->err(out, "Could not determine which resources would be stopped"); goto failure; } target_active = get_active_resources(host, data_set->resources); dump_list(target_active, "Target"); list_delta = pcmk__subtract_lists(current_active, target_active, (GCompareFunc) strcmp); out->info(out, "Waiting for %d resources to stop:", g_list_length(list_delta)); display_list(out, list_delta, " * "); step_timeout_s = timeout / sleep_interval; while (list_delta != NULL) { before = g_list_length(list_delta); if(timeout_ms == 0) { step_timeout_s = wait_time_estimate(data_set, list_delta) / sleep_interval; } /* We probably don't need the entire step timeout */ for(lpc = 0; (lpc < step_timeout_s) && (list_delta != NULL); lpc++) { sleep(sleep_interval); if(timeout) { timeout -= sleep_interval; crm_trace("%ds remaining", timeout); } rc = update_dataset(cib, data_set, FALSE); if(rc != pcmk_rc_ok) { out->err(out, "Could not determine which resources were stopped"); goto failure; } if (current_active != NULL) { g_list_free_full(current_active, free); current_active = NULL; } current_active = get_active_resources(host, data_set->resources); g_list_free(list_delta); list_delta = NULL; list_delta = pcmk__subtract_lists(current_active, target_active, (GCompareFunc) strcmp); dump_list(current_active, "Current"); dump_list(list_delta, "Delta"); } crm_trace("%d (was %d) resources remaining", g_list_length(list_delta), before); if(before == g_list_length(list_delta)) { /* aborted during stop phase, print the contents of list_delta */ out->err(out, "Could not complete shutdown of %s, %d resources remaining", rsc_id, g_list_length(list_delta)); display_list(out, list_delta, " * "); rc = ETIME; goto failure; } } if (stop_via_ban) { rc = cli_resource_clear(lookup_id, host, NULL, cib, cib_options, true, force); } else if (orig_target_role) { rc = cli_resource_update_attribute(rsc, rsc_id, NULL, XML_TAG_META_SETS, NULL, XML_RSC_ATTR_TARGET_ROLE, orig_target_role, FALSE, cib, cib_options, force); free(orig_target_role); orig_target_role = NULL; } else { rc = cli_resource_delete_attribute(rsc, rsc_id, NULL, XML_TAG_META_SETS, NULL, XML_RSC_ATTR_TARGET_ROLE, cib, cib_options, force); } if(rc != pcmk_rc_ok) { out->err(out, "Could not unset target-role for %s: %s (%d)", rsc_id, pcmk_strerror(rc), rc); goto done; } if (target_active != NULL) { g_list_free_full(target_active, free); target_active = NULL; } target_active = restart_target_active; list_delta = pcmk__subtract_lists(target_active, current_active, (GCompareFunc) strcmp); out->info(out, "Waiting for %d resources to start again:", g_list_length(list_delta)); display_list(out, list_delta, " * "); step_timeout_s = timeout / sleep_interval; while (waiting_for_starts(list_delta, rsc, host)) { before = g_list_length(list_delta); if(timeout_ms == 0) { step_timeout_s = wait_time_estimate(data_set, list_delta) / sleep_interval; } /* We probably don't need the entire step timeout */ for (lpc = 0; (lpc < step_timeout_s) && waiting_for_starts(list_delta, rsc, host); lpc++) { sleep(sleep_interval); if(timeout) { timeout -= sleep_interval; crm_trace("%ds remaining", timeout); } rc = update_dataset(cib, data_set, false); if(rc != pcmk_rc_ok) { out->err(out, "Could not determine which resources were started"); goto failure; } if (current_active != NULL) { g_list_free_full(current_active, free); current_active = NULL; } /* It's OK if dependent resources moved to a different node, * so we check active resources on all nodes. */ current_active = get_active_resources(NULL, data_set->resources); g_list_free(list_delta); list_delta = pcmk__subtract_lists(target_active, current_active, (GCompareFunc) strcmp); dump_list(current_active, "Current"); dump_list(list_delta, "Delta"); } if(before == g_list_length(list_delta)) { /* aborted during start phase, print the contents of list_delta */ out->err(out, "Could not complete restart of %s, %d resources remaining", rsc_id, g_list_length(list_delta)); display_list(out, list_delta, " * "); rc = ETIME; goto failure; } } rc = pcmk_rc_ok; goto done; failure: if (stop_via_ban) { cli_resource_clear(lookup_id, host, NULL, cib, cib_options, true, force); } else if (orig_target_role) { cli_resource_update_attribute(rsc, rsc_id, NULL, XML_TAG_META_SETS, NULL, XML_RSC_ATTR_TARGET_ROLE, orig_target_role, FALSE, cib, cib_options, force); free(orig_target_role); } else { cli_resource_delete_attribute(rsc, rsc_id, NULL, XML_TAG_META_SETS, NULL, XML_RSC_ATTR_TARGET_ROLE, cib, cib_options, force); } done: if (list_delta != NULL) { g_list_free(list_delta); } if (current_active != NULL) { g_list_free_full(current_active, free); } if (target_active != NULL && (target_active != restart_target_active)) { g_list_free_full(target_active, free); } if (restart_target_active != NULL) { g_list_free_full(restart_target_active, free); } free(rsc_id); free(lookup_id); pe_free_working_set(data_set); return rc; } static inline bool action_is_pending(const pe_action_t *action) { if (pcmk_any_flags_set(action->flags, pe_action_optional|pe_action_pseudo) || !pcmk_is_set(action->flags, pe_action_runnable) || pcmk__str_eq(PCMK_ACTION_NOTIFY, action->task, pcmk__str_casei)) { return false; } return true; } /*! * \internal * \brief Check whether any actions in a list are pending * * \param[in] actions List of actions to check * * \return true if any actions in the list are pending, otherwise false */ static bool actions_are_pending(const GList *actions) { for (const GList *action = actions; action != NULL; action = action->next) { const pe_action_t *a = (const pe_action_t *) action->data; if (action_is_pending(a)) { crm_notice("Waiting for %s (flags=%#.8x)", a->uuid, a->flags); return true; } } return false; } static void print_pending_actions(pcmk__output_t *out, GList *actions) { GList *action; out->info(out, "Pending actions:"); for (action = actions; action != NULL; action = action->next) { pe_action_t *a = (pe_action_t *) action->data; if (!action_is_pending(a)) { continue; } if (a->node) { out->info(out, "\tAction %d: %s\ton %s", a->id, a->uuid, pe__node_name(a->node)); } else { out->info(out, "\tAction %d: %s", a->id, a->uuid); } } } /* For --wait, timeout (in seconds) to use if caller doesn't specify one */ #define WAIT_DEFAULT_TIMEOUT_S (60 * 60) /* For --wait, how long to sleep between cluster state checks */ #define WAIT_SLEEP_S (2) /*! * \internal * \brief Wait until all pending cluster actions are complete * * This waits until either the CIB's transition graph is idle or a timeout is * reached. * * \param[in,out] out Output object * \param[in] timeout_ms Consider failed if actions do not complete in * this time (specified in milliseconds, but * one-second granularity is actually used; if 0, a * default will be used) * \param[in,out] cib Connection to the CIB manager * * \return Standard Pacemaker return code */ int wait_till_stable(pcmk__output_t *out, int timeout_ms, cib_t * cib) { pe_working_set_t *data_set = NULL; int rc = pcmk_rc_ok; int timeout_s = timeout_ms? ((timeout_ms + 999) / 1000) : WAIT_DEFAULT_TIMEOUT_S; time_t expire_time = time(NULL) + timeout_s; time_t time_diff; bool printed_version_warning = out->is_quiet(out); // i.e. don't print if quiet data_set = pe_new_working_set(); if (data_set == NULL) { return ENOMEM; } do { /* Abort if timeout is reached */ time_diff = expire_time - time(NULL); if (time_diff > 0) { crm_info("Waiting up to %lld seconds for cluster actions to complete", (long long) time_diff); } else { print_pending_actions(out, data_set->actions); pe_free_working_set(data_set); return ETIME; } if (rc == pcmk_rc_ok) { /* this avoids sleep on first loop iteration */ sleep(WAIT_SLEEP_S); } /* Get latest transition graph */ pe_reset_working_set(data_set); rc = update_working_set_from_cib(out, data_set, cib); if (rc != pcmk_rc_ok) { pe_free_working_set(data_set); return rc; } pcmk__schedule_actions(data_set->input, pe_flag_no_counts|pe_flag_no_compat, data_set); if (!printed_version_warning) { /* If the DC has a different version than the local node, the two * could come to different conclusions about what actions need to be * done. Warn the user in this case. * * @TODO A possible long-term solution would be to reimplement the * wait as a new controller operation that would be forwarded to the * DC. However, that would have potential problems of its own. */ const char *dc_version = g_hash_table_lookup(data_set->config_hash, "dc-version"); if (!pcmk__str_eq(dc_version, PACEMAKER_VERSION "-" BUILD_VERSION, pcmk__str_casei)) { out->info(out, "warning: wait option may not work properly in " "mixed-version cluster"); printed_version_warning = true; } } } while (actions_are_pending(data_set->actions)); pe_free_working_set(data_set); return rc; } static const char * get_action(const char *rsc_action) { const char *action = NULL; if (pcmk__str_eq(rsc_action, "validate", pcmk__str_casei)) { action = PCMK_ACTION_VALIDATE_ALL; } else if (pcmk__str_eq(rsc_action, "force-check", pcmk__str_casei)) { action = PCMK_ACTION_MONITOR; } else if (pcmk__strcase_any_of(rsc_action, "force-start", "force-stop", "force-demote", "force-promote", NULL)) { action = rsc_action+6; } else { action = rsc_action; } return action; } /*! * \brief Set up environment variables as expected by resource agents * * When the cluster executes resource agents, it adds certain environment * variables (directly or via resource meta-attributes) expected by some * resource agents. Add the essential ones that many resource agents expect, so * the behavior is the same for command-line execution. * * \param[in,out] params Resource parameters that will be passed to agent * \param[in] timeout_ms Action timeout (in milliseconds) * \param[in] check_level OCF check level * \param[in] verbosity Verbosity level */ static void set_agent_environment(GHashTable *params, int timeout_ms, int check_level, int verbosity) { g_hash_table_insert(params, strdup("CRM_meta_timeout"), crm_strdup_printf("%d", timeout_ms)); g_hash_table_insert(params, strdup(XML_ATTR_CRM_VERSION), strdup(CRM_FEATURE_SET)); if (check_level >= 0) { char *level = crm_strdup_printf("%d", check_level); setenv("OCF_CHECK_LEVEL", level, 1); free(level); } setenv("HA_debug", (verbosity > 0)? "1" : "0", 1); if (verbosity > 1) { setenv("OCF_TRACE_RA", "1", 1); } /* A resource agent using the standard ocf-shellfuncs library will not print * messages to stderr if it doesn't have a controlling terminal (e.g. if * crm_resource is called via script or ssh). This forces it to do so. */ setenv("OCF_TRACE_FILE", "/dev/stderr", 0); } /*! * \internal * \brief Apply command-line overrides to resource parameters * * \param[in,out] params Parameters to be passed to agent * \param[in] overrides Parameters to override (or NULL if none) */ static void apply_overrides(GHashTable *params, GHashTable *overrides) { if (overrides != NULL) { GHashTableIter iter; char *name = NULL; char *value = NULL; g_hash_table_iter_init(&iter, overrides); while (g_hash_table_iter_next(&iter, (gpointer *) &name, (gpointer *) &value)) { g_hash_table_replace(params, strdup(name), strdup(value)); } } } crm_exit_t cli_resource_execute_from_params(pcmk__output_t *out, const char *rsc_name, const char *rsc_class, const char *rsc_prov, const char *rsc_type, const char *rsc_action, GHashTable *params, GHashTable *override_hash, int timeout_ms, int resource_verbose, gboolean force, int check_level) { const char *class = rsc_class; const char *action = get_action(rsc_action); crm_exit_t exit_code = CRM_EX_OK; svc_action_t *op = NULL; // If no timeout was provided, use the same default as the cluster if (timeout_ms == 0) { timeout_ms = PCMK_DEFAULT_ACTION_TIMEOUT_MS; } set_agent_environment(params, timeout_ms, check_level, resource_verbose); apply_overrides(params, override_hash); op = services__create_resource_action(rsc_name? rsc_name : "test", rsc_class, rsc_prov, rsc_type, action, 0, timeout_ms, params, 0); if (op == NULL) { out->err(out, "Could not execute %s using %s%s%s:%s: %s", action, rsc_class, (rsc_prov? ":" : ""), (rsc_prov? rsc_prov : ""), rsc_type, strerror(ENOMEM)); g_hash_table_destroy(params); return CRM_EX_OSERR; } if (pcmk__str_eq(rsc_class, PCMK_RESOURCE_CLASS_SERVICE, pcmk__str_casei)) { class = resources_find_service_class(rsc_type); } if (!pcmk__strcase_any_of(class, PCMK_RESOURCE_CLASS_OCF, PCMK_RESOURCE_CLASS_LSB, NULL)) { services__format_result(op, CRM_EX_UNIMPLEMENT_FEATURE, PCMK_EXEC_ERROR, "Manual execution of the %s standard is " "unsupported", pcmk__s(class, "unspecified")); } if (op->rc != PCMK_OCF_UNKNOWN) { exit_code = op->rc; goto done; } services_action_sync(op); // Map results to OCF codes for consistent reporting to user { enum ocf_exitcode ocf_code = services_result2ocf(class, action, op->rc); // Cast variable instead of function return to keep compilers happy exit_code = (crm_exit_t) ocf_code; } done: out->message(out, "resource-agent-action", resource_verbose, rsc_class, rsc_prov, rsc_type, rsc_name, rsc_action, override_hash, exit_code, op->status, services__exit_reason(op), op->stdout_data, op->stderr_data); services_action_free(op); return exit_code; } crm_exit_t cli_resource_execute(pe_resource_t *rsc, const char *requested_name, const char *rsc_action, GHashTable *override_hash, int timeout_ms, cib_t * cib, pe_working_set_t *data_set, int resource_verbose, gboolean force, int check_level) { pcmk__output_t *out = data_set->priv; crm_exit_t exit_code = CRM_EX_OK; const char *rid = NULL; const char *rtype = NULL; const char *rprov = NULL; const char *rclass = NULL; GHashTable *params = NULL; if (pcmk__strcase_any_of(rsc_action, "force-start", "force-demote", "force-promote", NULL)) { if(pe_rsc_is_clone(rsc)) { GList *nodes = cli_resource_search(rsc, requested_name, data_set); if(nodes != NULL && force == FALSE) { out->err(out, "It is not safe to %s %s here: the cluster claims it is already active", rsc_action, rsc->id); out->err(out, "Try setting target-role=Stopped first or specifying " "the force option"); return CRM_EX_UNSAFE; } g_list_free_full(nodes, free); } } if(pe_rsc_is_clone(rsc)) { /* Grab the first child resource in the hope it's not a group */ rsc = rsc->children->data; } if (rsc->variant == pcmk_rsc_variant_group) { out->err(out, "Sorry, the %s option doesn't support group resources", rsc_action); return CRM_EX_UNIMPLEMENT_FEATURE; } else if (pe_rsc_is_bundled(rsc)) { out->err(out, "Sorry, the %s option doesn't support bundled resources", rsc_action); return CRM_EX_UNIMPLEMENT_FEATURE; } rclass = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); rprov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER); rtype = crm_element_value(rsc->xml, XML_ATTR_TYPE); params = generate_resource_params(rsc, NULL /* @TODO use local node */, data_set); if (timeout_ms == 0) { timeout_ms = pe_get_configured_timeout(rsc, get_action(rsc_action), data_set); } rid = pe_rsc_is_anon_clone(rsc->parent)? requested_name : rsc->id; exit_code = cli_resource_execute_from_params(out, rid, rclass, rprov, rtype, rsc_action, params, override_hash, timeout_ms, resource_verbose, force, check_level); return exit_code; } // \return Standard Pacemaker return code int cli_resource_move(const pe_resource_t *rsc, const char *rsc_id, const char *host_name, const char *move_lifetime, cib_t *cib, int cib_options, pe_working_set_t *data_set, gboolean promoted_role_only, gboolean force) { pcmk__output_t *out = data_set->priv; int rc = pcmk_rc_ok; unsigned int count = 0; pe_node_t *current = NULL; pe_node_t *dest = pe_find_node(data_set->nodes, host_name); bool cur_is_dest = false; if (dest == NULL) { return pcmk_rc_node_unknown; } if (promoted_role_only && !pcmk_is_set(rsc->flags, pe_rsc_promotable)) { const pe_resource_t *p = pe__const_top_resource(rsc, false); if (pcmk_is_set(p->flags, pe_rsc_promotable)) { out->info(out, "Using parent '%s' for move instead of '%s'.", rsc->id, rsc_id); rsc_id = p->id; rsc = p; } else { out->info(out, "Ignoring --promoted option: %s is not promotable", rsc_id); promoted_role_only = FALSE; } } current = pe__find_active_requires(rsc, &count); if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) { unsigned int promoted_count = 0; pe_node_t *promoted_node = NULL; for (const GList *iter = rsc->children; iter; iter = iter->next) { const pe_resource_t *child = (const pe_resource_t *) iter->data; enum rsc_role_e child_role = child->fns->state(child, TRUE); if (child_role == pcmk_role_promoted) { rsc = child; promoted_node = pe__current_node(child); promoted_count++; } } if (promoted_role_only || (promoted_count != 0)) { count = promoted_count; current = promoted_node; } } if (count > 1) { if (pe_rsc_is_clone(rsc)) { current = NULL; } else { return pcmk_rc_multiple; } } if (current && (current->details == dest->details)) { cur_is_dest = true; if (force) { crm_info("%s is already %s on %s, reinforcing placement with location constraint.", rsc_id, promoted_role_only?"promoted":"active", pe__node_name(dest)); } else { return pcmk_rc_already; } } /* Clear any previous prefer constraints across all nodes. */ cli_resource_clear(rsc_id, NULL, data_set->nodes, cib, cib_options, false, force); /* Clear any previous ban constraints on 'dest'. */ cli_resource_clear(rsc_id, dest->details->uname, data_set->nodes, cib, cib_options, TRUE, force); /* Record an explicit preference for 'dest' */ rc = cli_resource_prefer(out, rsc_id, dest->details->uname, move_lifetime, cib, cib_options, promoted_role_only); crm_trace("%s%s now prefers %s%s", rsc->id, (promoted_role_only? " (promoted)" : ""), pe__node_name(dest), force?"(forced)":""); /* only ban the previous location if current location != destination location. * it is possible to use -M to enforce a location without regard of where the * resource is currently located */ if (force && !cur_is_dest) { /* Ban the original location if possible */ if(current) { (void)cli_resource_ban(out, rsc_id, current->details->uname, move_lifetime, NULL, cib, cib_options, promoted_role_only); } else if(count > 1) { out->info(out, "Resource '%s' is currently %s in %d locations. " "One may now move to %s", rsc_id, (promoted_role_only? "promoted" : "active"), count, pe__node_name(dest)); out->info(out, "To prevent '%s' from being %s at a specific location, " "specify a node.", rsc_id, (promoted_role_only? "promoted" : "active")); } else { crm_trace("Not banning %s from its current location: not active", rsc_id); } } return rc; }