diff --git a/lib/pacemaker/pcmk_sched_bundle.c b/lib/pacemaker/pcmk_sched_bundle.c index efb0d584a4..dd64e49a0b 100644 --- a/lib/pacemaker/pcmk_sched_bundle.c +++ b/lib/pacemaker/pcmk_sched_bundle.c @@ -1,941 +1,941 @@ /* * Copyright 2004-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include "libpacemaker_private.h" /*! * \internal * \brief Assign a single bundle replica's resources (other than container) * * \param[in,out] replica Replica to assign * \param[in] user_data Preferred node, if any * * \return true (to indicate that any further replicas should be processed) */ static bool assign_replica(pe__bundle_replica_t *replica, void *user_data) { pe_node_t *container_host = NULL; const pe_node_t *prefer = user_data; const pe_resource_t *bundle = pe__const_top_resource(replica->container, true); if (replica->ip != NULL) { pe_rsc_trace(bundle, "Assigning bundle %s IP %s", bundle->id, replica->ip->id); replica->ip->cmds->assign(replica->ip, prefer); } container_host = replica->container->allocated_to; if (replica->remote != NULL) { if (pe__is_guest_or_remote_node(container_host)) { /* REMOTE_CONTAINER_HACK: "Nested" connection resources must be on * the same host because Pacemaker Remote only supports a single * active connection. */ - pcmk__new_colocation("replica-remote-with-host-remote", NULL, + pcmk__new_colocation("#replica-remote-with-host-remote", NULL, INFINITY, replica->remote, container_host->details->remote_rsc, NULL, NULL, pcmk__coloc_influence); } pe_rsc_trace(bundle, "Assigning bundle %s connection %s", bundle->id, replica->remote->id); replica->remote->cmds->assign(replica->remote, prefer); } if (replica->child != NULL) { pe_node_t *node = NULL; GHashTableIter iter; g_hash_table_iter_init(&iter, replica->child->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) { if (!pe__same_node(node, replica->node)) { node->weight = -INFINITY; } else if (!pcmk__threshold_reached(replica->child, node, NULL)) { node->weight = INFINITY; } } pe__set_resource_flags(replica->child->parent, pe_rsc_allocating); pe_rsc_trace(bundle, "Assigning bundle %s replica child %s", bundle->id, replica->child->id); replica->child->cmds->assign(replica->child, replica->node); pe__clear_resource_flags(replica->child->parent, pe_rsc_allocating); } return true; } /*! * \internal * \brief Assign a bundle resource to a node * * \param[in,out] rsc Resource to assign to a node * \param[in] prefer Node to prefer, if all else is equal * * \return Node that \p rsc is assigned to, if assigned entirely to one node */ pe_node_t * pcmk__bundle_assign(pe_resource_t *rsc, const pe_node_t *prefer) { GList *containers = NULL; pe_resource_t *bundled_resource = NULL; CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_container)); pe_rsc_trace(rsc, "Assigning bundle %s", rsc->id); pe__set_resource_flags(rsc, pe_rsc_allocating); pe__show_node_scores(!pcmk_is_set(rsc->cluster->flags, pe_flag_show_scores), rsc, __func__, rsc->allowed_nodes, rsc->cluster); // Assign all containers first, so we know what nodes the bundle will be on containers = g_list_sort(pe__bundle_containers(rsc), pcmk__cmp_instance); pcmk__assign_instances(rsc, containers, pe__bundle_max(rsc), rsc->fns->max_per_node(rsc)); g_list_free(containers); // Then assign remaining replica resources pe__foreach_bundle_replica(rsc, assign_replica, (void *) prefer); // Finally, assign the bundled resources to each bundle node bundled_resource = pe__bundled_resource(rsc); if (bundled_resource != NULL) { pe_node_t *node = NULL; GHashTableIter iter; g_hash_table_iter_init(&iter, bundled_resource->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & node)) { if (pe__node_is_bundle_instance(rsc, node)) { node->weight = 0; } else { node->weight = -INFINITY; } } bundled_resource->cmds->assign(bundled_resource, prefer); } pe__clear_resource_flags(rsc, pe_rsc_allocating|pe_rsc_provisional); return NULL; } /*! * \internal * \brief Create actions for a bundle replica's resources (other than child) * * \param[in,out] replica Replica to create actions for * \param[in] user_data Unused * * \return true (to indicate that any further replicas should be processed) */ static bool create_replica_actions(pe__bundle_replica_t *replica, void *user_data) { if (replica->ip != NULL) { replica->ip->cmds->create_actions(replica->ip); } if (replica->container != NULL) { replica->container->cmds->create_actions(replica->container); } if (replica->remote != NULL) { replica->remote->cmds->create_actions(replica->remote); } return true; } /*! * \internal * \brief Create all actions needed for a given bundle resource * * \param[in,out] rsc Bundle resource to create actions for */ void pcmk__bundle_create_actions(pe_resource_t *rsc) { pe_action_t *action = NULL; GList *containers = NULL; pe_resource_t *bundled_resource = NULL; CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_container)); pe__foreach_bundle_replica(rsc, create_replica_actions, NULL); containers = pe__bundle_containers(rsc); pcmk__create_instance_actions(rsc, containers); g_list_free(containers); bundled_resource = pe__bundled_resource(rsc); if (bundled_resource != NULL) { bundled_resource->cmds->create_actions(bundled_resource); if (pcmk_is_set(bundled_resource->flags, pe_rsc_promotable)) { pe__new_rsc_pseudo_action(rsc, RSC_PROMOTE, true, true); action = pe__new_rsc_pseudo_action(rsc, RSC_PROMOTED, true, true); action->priority = INFINITY; pe__new_rsc_pseudo_action(rsc, RSC_DEMOTE, true, true); action = pe__new_rsc_pseudo_action(rsc, RSC_DEMOTED, true, true); action->priority = INFINITY; } } } /*! * \internal * \brief Create internal constraints for a bundle replica's resources * * \param[in,out] replica Replica to create internal constraints for * \param[in,out] user_data Replica's parent bundle * * \return true (to indicate that any further replicas should be processed) */ static bool replica_internal_constraints(pe__bundle_replica_t *replica, void *user_data) { pe_resource_t *bundle = user_data; replica->container->cmds->internal_constraints(replica->container); // Start bundle -> start replica container pcmk__order_starts(bundle, replica->container, pe_order_runnable_left|pe_order_implies_first_printed); // Stop bundle -> stop replica child and container if (replica->child != NULL) { pcmk__order_stops(bundle, replica->child, pe_order_implies_first_printed); } pcmk__order_stops(bundle, replica->container, pe_order_implies_first_printed); // Start replica container -> bundle is started pcmk__order_resource_actions(replica->container, RSC_START, bundle, RSC_STARTED, pe_order_implies_then_printed); // Stop replica container -> bundle is stopped pcmk__order_resource_actions(replica->container, RSC_STOP, bundle, RSC_STOPPED, pe_order_implies_then_printed); if (replica->ip != NULL) { replica->ip->cmds->internal_constraints(replica->ip); // Replica IP address -> replica container (symmetric) pcmk__order_starts(replica->ip, replica->container, pe_order_runnable_left|pe_order_preserve); pcmk__order_stops(replica->container, replica->ip, pe_order_implies_first|pe_order_preserve); - pcmk__new_colocation("ip-with-container", NULL, INFINITY, replica->ip, + pcmk__new_colocation("#ip-with-container", NULL, INFINITY, replica->ip, replica->container, NULL, NULL, pcmk__coloc_influence); } if (replica->remote != NULL) { /* This handles ordering and colocating remote relative to container - * (via "resource-with-container"). Since IP is also ordered and + * (via "#resource-with-container"). Since IP is also ordered and * colocated relative to the container, we don't need to do anything * explicit here with IP. */ replica->remote->cmds->internal_constraints(replica->remote); } if (replica->child != NULL) { CRM_ASSERT(replica->remote != NULL); // "Start remote then child" is implicit in scheduler's remote logic } return true; } /*! * \internal * \brief Create implicit constraints needed for a bundle resource * * \param[in,out] rsc Bundle resource to create implicit constraints for */ void pcmk__bundle_internal_constraints(pe_resource_t *rsc) { pe_resource_t *bundled_resource = NULL; CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_container)); pe__foreach_bundle_replica(rsc, replica_internal_constraints, rsc); bundled_resource = pe__bundled_resource(rsc); if (bundled_resource == NULL) { return; } // Start bundle -> start bundled clone pcmk__order_resource_actions(rsc, RSC_START, bundled_resource, RSC_START, pe_order_implies_first_printed); // Bundled clone is started -> bundle is started pcmk__order_resource_actions(bundled_resource, RSC_STARTED, rsc, RSC_STARTED, pe_order_implies_then_printed); // Stop bundle -> stop bundled clone pcmk__order_resource_actions(rsc, RSC_STOP, bundled_resource, RSC_STOP, pe_order_implies_first_printed); // Bundled clone is stopped -> bundle is stopped pcmk__order_resource_actions(bundled_resource, RSC_STOPPED, rsc, RSC_STOPPED, pe_order_implies_then_printed); bundled_resource->cmds->internal_constraints(bundled_resource); if (!pcmk_is_set(bundled_resource->flags, pe_rsc_promotable)) { return; } pcmk__promotable_restart_ordering(rsc); // Demote bundle -> demote bundled clone pcmk__order_resource_actions(rsc, RSC_DEMOTE, bundled_resource, RSC_DEMOTE, pe_order_implies_first_printed); // Bundled clone is demoted -> bundle is demoted pcmk__order_resource_actions(bundled_resource, RSC_DEMOTED, rsc, RSC_DEMOTED, pe_order_implies_then_printed); // Promote bundle -> promote bundled clone pcmk__order_resource_actions(rsc, RSC_PROMOTE, bundled_resource, RSC_PROMOTE, pe_order_implies_first_printed); // Bundled clone is promoted -> bundle is promoted pcmk__order_resource_actions(bundled_resource, RSC_PROMOTED, rsc, RSC_PROMOTED, pe_order_implies_then_printed); } struct match_data { const pe_node_t *node; // Node to compare against replica pe_resource_t *container; // Replica container corresponding to node }; /*! * \internal * \brief Check whether a replica container is assigned to a given node * * \param[in] replica Replica to check * \param[in,out] user_data struct match_data with node to compare against * * \return true if the replica does not match (to indicate further replicas * should be processed), otherwise false */ static bool match_replica_container(const pe__bundle_replica_t *replica, void *user_data) { struct match_data *match_data = user_data; if (pcmk__instance_matches(replica->container, match_data->node, RSC_ROLE_UNKNOWN, false)) { match_data->container = replica->container; return false; // Match found, don't bother searching further replicas } return true; // No match, keep searching } /*! * \internal * \brief Find a bundle container compatible with a dependent resource * * \param[in] dependent Dependent resource in colocation with bundle * \param[in] bundle Bundle that \p dependent is colocated with * * \return A container from \p bundle assigned to the same node as \p dependent * if assigned, otherwise assigned to any of dependent's allowed nodes, * otherwise NULL. */ static pe_resource_t * compatible_container(const pe_resource_t *dependent, const pe_resource_t *bundle) { GList *scratch = NULL; struct match_data match_data = { NULL, NULL }; // If dependent is assigned, only check there match_data.node = dependent->fns->location(dependent, NULL, 0); if (match_data.node != NULL) { pe__foreach_const_bundle_replica(bundle, match_replica_container, &match_data); return match_data.container; } // Otherwise, check for any of the dependent's allowed nodes scratch = g_hash_table_get_values(dependent->allowed_nodes); scratch = pcmk__sort_nodes(scratch, NULL); for (const GList *iter = scratch; iter != NULL; iter = iter->next) { match_data.node = iter->data; pe__foreach_const_bundle_replica(bundle, match_replica_container, &match_data); if (match_data.container != NULL) { break; } } g_list_free(scratch); return match_data.container; } struct coloc_data { const pcmk__colocation_t *colocation; pe_resource_t *dependent; GList *container_hosts; }; /*! * \internal * \brief Apply a colocation score to replica node scores or resource priority * * \param[in] replica Replica of primary bundle resource in colocation * \param[in,out] user_data struct coloc_data for colocation being applied * * \return true (to indicate that any further replicas should be processed) */ static bool replica_apply_coloc_score(const pe__bundle_replica_t *replica, void *user_data) { struct coloc_data *coloc_data = user_data; pe_node_t *chosen = NULL; if (coloc_data->colocation->score < INFINITY) { replica->container->cmds->apply_coloc_score(coloc_data->dependent, replica->container, coloc_data->colocation, false); return true; } chosen = replica->container->fns->location(replica->container, NULL, 0); if ((chosen == NULL) || is_set_recursive(replica->container, pe_rsc_block, true)) { return true; } if ((coloc_data->colocation->primary_role >= RSC_ROLE_PROMOTED) && ((replica->child == NULL) || (replica->child->next_role < RSC_ROLE_PROMOTED))) { return true; } pe_rsc_trace(pe__const_top_resource(replica->container, true), "Allowing mandatory colocation %s using %s @%d", coloc_data->colocation->id, pe__node_name(chosen), chosen->weight); coloc_data->container_hosts = g_list_prepend(coloc_data->container_hosts, chosen); return true; } /*! * \internal * \brief Apply a colocation's score to node scores or resource priority * * Given a colocation constraint, apply its score to the dependent's * allowed node scores (if we are still placing resources) or priority (if * we are choosing promotable clone instance roles). * * \param[in,out] dependent Dependent resource in colocation * \param[in] primary Primary resource in colocation * \param[in] colocation Colocation constraint to apply * \param[in] for_dependent true if called on behalf of dependent */ void pcmk__bundle_apply_coloc_score(pe_resource_t *dependent, const pe_resource_t *primary, const pcmk__colocation_t *colocation, bool for_dependent) { struct coloc_data coloc_data = { colocation, dependent, NULL }; /* This should never be called for the bundle itself as a dependent. * Instead, we add its colocation constraints to its containers and call the * apply_coloc_score() method for the containers as dependents. */ CRM_ASSERT((primary != NULL) && (primary->variant == pe_container) && (dependent != NULL) && (dependent->variant == pe_native) && (colocation != NULL) && !for_dependent); if (pcmk_is_set(primary->flags, pe_rsc_provisional)) { pe_rsc_trace(primary, "Skipping applying colocation %s " "because %s is still provisional", colocation->id, primary->id); return; } pe_rsc_trace(primary, "Applying colocation %s (%s with %s at %s)", colocation->id, dependent->id, primary->id, pcmk_readable_score(colocation->score)); /* If the constraint dependent is a clone or bundle, "dependent" here is one * of its instances. Look for a compatible instance of this bundle. */ if (colocation->dependent->variant > pe_group) { const pe_resource_t *primary_container = compatible_container(dependent, primary); if (primary_container != NULL) { // Success, we found one pe_rsc_debug(primary, "Pairing %s with %s", dependent->id, primary_container->id); dependent->cmds->apply_coloc_score(dependent, primary_container, colocation, true); } else if (colocation->score >= INFINITY) { // Failure, and it's fatal crm_notice("%s cannot run because there is no compatible " "instance of %s to colocate with", dependent->id, primary->id); pcmk__assign_resource(dependent, NULL, true); } else { // Failure, but we can ignore it pe_rsc_debug(primary, "%s cannot be colocated with any instance of %s", dependent->id, primary->id); } return; } pe__foreach_const_bundle_replica(primary, replica_apply_coloc_score, &coloc_data); if (colocation->score >= INFINITY) { node_list_exclude(dependent->allowed_nodes, coloc_data.container_hosts, FALSE); } g_list_free(coloc_data.container_hosts); } // Bundle implementation of resource_alloc_functions_t:with_this_colocations() void pcmk__with_bundle_colocations(const pe_resource_t *rsc, const pe_resource_t *orig_rsc, GList **list) { CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_container) && (orig_rsc != NULL) && (list != NULL)); if (rsc == orig_rsc) { // Colocations are wanted for bundle itself pcmk__add_with_this_list(list, rsc->rsc_cons_lhs, orig_rsc); // Only the bundle replicas' containers get the bundle's constraints } else if (pcmk_is_set(orig_rsc->flags, pe_rsc_replica_container)) { pcmk__add_collective_constraints(list, orig_rsc, rsc, true); } } // Bundle implementation of resource_alloc_functions_t:this_with_colocations() void pcmk__bundle_with_colocations(const pe_resource_t *rsc, const pe_resource_t *orig_rsc, GList **list) { CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_container) && (orig_rsc != NULL) && (list != NULL)); if (rsc == orig_rsc) { // Colocations are wanted for bundle itself pcmk__add_this_with_list(list, rsc->rsc_cons, orig_rsc); // Only the bundle replicas' containers get the bundle's constraints } else if (pcmk_is_set(orig_rsc->flags, pe_rsc_replica_container)) { pcmk__add_collective_constraints(list, orig_rsc, rsc, false); } } /*! * \internal * \brief Return action flags for a given bundle resource action * * \param[in,out] action Bundle resource action to get flags for * \param[in] node If not NULL, limit effects to this node * * \return Flags appropriate to \p action on \p node */ uint32_t pcmk__bundle_action_flags(pe_action_t *action, const pe_node_t *node) { GList *containers = NULL; uint32_t flags = 0; pe_resource_t *bundled_resource = NULL; CRM_ASSERT((action != NULL) && (action->rsc != NULL) && (action->rsc->variant == pe_container)); bundled_resource = pe__bundled_resource(action->rsc); if (bundled_resource != NULL) { // Clone actions are done on the bundled clone resource, not container switch (get_complex_task(bundled_resource, action->task)) { case no_action: case action_notify: case action_notified: case action_promote: case action_promoted: case action_demote: case action_demoted: return pcmk__collective_action_flags(action, bundled_resource->children, node); default: break; } } containers = pe__bundle_containers(action->rsc); flags = pcmk__collective_action_flags(action, containers, node); g_list_free(containers); return flags; } /*! * \internal * \brief Apply a location constraint to a bundle replica * * \param[in,out] replica Replica to apply constraint to * \param[in,out] user_data Location constraint to apply * * \return true (to indicate that any further replicas should be processed) */ static bool apply_location_to_replica(pe__bundle_replica_t *replica, void *user_data) { pe__location_t *location = user_data; if (replica->container != NULL) { replica->container->cmds->apply_location(replica->container, location); } if (replica->ip != NULL) { replica->ip->cmds->apply_location(replica->ip, location); } return true; } /*! * \internal * \brief Apply a location constraint to a bundle resource's allowed node scores * * \param[in,out] rsc Bundle resource to apply constraint to * \param[in,out] location Location constraint to apply */ void pcmk__bundle_apply_location(pe_resource_t *rsc, pe__location_t *location) { pe_resource_t *bundled_resource = NULL; CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_container) && (location != NULL)); pcmk__apply_location(rsc, location); pe__foreach_bundle_replica(rsc, apply_location_to_replica, location); bundled_resource = pe__bundled_resource(rsc); if ((bundled_resource != NULL) && ((location->role_filter == RSC_ROLE_UNPROMOTED) || (location->role_filter == RSC_ROLE_PROMOTED))) { bundled_resource->cmds->apply_location(bundled_resource, location); bundled_resource->rsc_location = g_list_prepend( bundled_resource->rsc_location, location); } } #define XPATH_REMOTE "//nvpair[@name='" XML_RSC_ATTR_REMOTE_RA_ADDR "']" /*! * \internal * \brief Add a bundle replica's actions to transition graph * * \param[in,out] replica Replica to add to graph * \param[in] user_data Bundle that replica belongs to (for logging only) * * \return true (to indicate that any further replicas should be processed) */ static bool add_replica_actions_to_graph(pe__bundle_replica_t *replica, void *user_data) { if ((replica->remote != NULL) && (replica->container != NULL) && pe__bundle_needs_remote_name(replica->remote)) { /* REMOTE_CONTAINER_HACK: Allow remote nodes to run containers that * run pacemaker-remoted inside, without needing a separate IP for * the container. This is done by configuring the inner remote's * connection host as the magic string "#uname", then * replacing it with the underlying host when needed. */ xmlNode *nvpair = get_xpath_object(XPATH_REMOTE, replica->remote->xml, LOG_ERR); const char *calculated_addr = NULL; // Replace the value in replica->remote->xml (if appropriate) calculated_addr = pe__add_bundle_remote_name(replica->remote, replica->remote->cluster, nvpair, "value"); if (calculated_addr != NULL) { /* Since this is for the bundle as a resource, and not any * particular action, replace the value in the default * parameters (not evaluated for node). create_graph_action() * will grab it from there to replace it in node-evaluated * parameters. */ GHashTable *params = pe_rsc_params(replica->remote, NULL, replica->remote->cluster); g_hash_table_replace(params, strdup(XML_RSC_ATTR_REMOTE_RA_ADDR), strdup(calculated_addr)); } else { pe_resource_t *bundle = user_data; /* The only way to get here is if the remote connection is * neither currently running nor scheduled to run. That means we * won't be doing any operations that require addr (only start * requires it; we additionally use it to compare digests when * unpacking status, promote, and migrate_from history, but * that's already happened by this point). */ pe_rsc_info(bundle, "Unable to determine address for bundle %s " "remote connection", bundle->id); } } if (replica->ip != NULL) { replica->ip->cmds->add_actions_to_graph(replica->ip); } if (replica->container != NULL) { replica->container->cmds->add_actions_to_graph(replica->container); } if (replica->remote != NULL) { replica->remote->cmds->add_actions_to_graph(replica->remote); } return true; } /*! * \internal * \brief Add a bundle resource's actions to the transition graph * * \param[in,out] rsc Bundle resource whose actions should be added */ void pcmk__bundle_add_actions_to_graph(pe_resource_t *rsc) { pe_resource_t *bundled_resource = NULL; CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_container)); bundled_resource = pe__bundled_resource(rsc); if (bundled_resource != NULL) { bundled_resource->cmds->add_actions_to_graph(bundled_resource); } pe__foreach_bundle_replica(rsc, add_replica_actions_to_graph, rsc); } struct probe_data { pe_resource_t *bundle; // Bundle being probed pe_node_t *node; // Node to create probes on bool any_created; // Whether any probes have been created }; /*! * \internal * \brief Order a bundle replica's start after another replica's probe * * \param[in,out] replica Replica to order start for * \param[in,out] user_data Replica with probe to order after * * \return true (to indicate that any further replicas should be processed) */ static bool order_replica_start_after(pe__bundle_replica_t *replica, void *user_data) { pe__bundle_replica_t *probed_replica = user_data; if ((replica == probed_replica) || (replica->container == NULL)) { return true; } pcmk__new_ordering(probed_replica->container, pcmk__op_key(probed_replica->container->id, RSC_STATUS, 0), NULL, replica->container, pcmk__op_key(replica->container->id, RSC_START, 0), NULL, pe_order_optional|pe_order_same_node, replica->container->cluster); return true; } /*! * \internal * \brief Create probes for a bundle replica's resources * * \param[in,out] replica Replica to create probes for * \param[in,out] user_data struct probe_data * * \return true (to indicate that any further replicas should be processed) */ static bool create_replica_probes(pe__bundle_replica_t *replica, void *user_data) { struct probe_data *probe_data = user_data; if ((replica->ip != NULL) && replica->ip->cmds->create_probe(replica->ip, probe_data->node)) { probe_data->any_created = true; } if ((replica->child != NULL) && pe__same_node(probe_data->node, replica->node) && replica->child->cmds->create_probe(replica->child, probe_data->node)) { probe_data->any_created = true; } if ((replica->container != NULL) && replica->container->cmds->create_probe(replica->container, probe_data->node)) { probe_data->any_created = true; /* If we're limited to one replica per host (due to * the lack of an IP range probably), then we don't * want any of our peer containers starting until * we've established that no other copies are already * running. * * Partly this is to ensure that the maximum replicas per host is * observed, but also to ensure that the containers * don't fail to start because the necessary port * mappings (which won't include an IP for uniqueness) * are already taken */ if (probe_data->bundle->fns->max_per_node(probe_data->bundle) == 1) { pe__foreach_bundle_replica(probe_data->bundle, order_replica_start_after, replica); } } if ((replica->container != NULL) && (replica->remote != NULL) && replica->remote->cmds->create_probe(replica->remote, probe_data->node)) { /* Do not probe the remote resource until we know where the container is * running. This is required for REMOTE_CONTAINER_HACK to correctly * probe remote resources. */ char *probe_uuid = pcmk__op_key(replica->remote->id, RSC_STATUS, 0); pe_action_t *probe = find_first_action(replica->remote->actions, probe_uuid, NULL, probe_data->node); free(probe_uuid); if (probe != NULL) { probe_data->any_created = true; pe_rsc_trace(probe_data->bundle, "Ordering %s probe on %s", replica->remote->id, pe__node_name(probe_data->node)); pcmk__new_ordering(replica->container, pcmk__op_key(replica->container->id, RSC_START, 0), NULL, replica->remote, NULL, probe, pe_order_probe, probe_data->bundle->cluster); } } return true; } /*! * \internal * * \brief Schedule any probes needed for a bundle resource on a node * * \param[in,out] rsc Bundle resource to create probes for * \param[in,out] node Node to create probe on * * \return true if any probe was created, otherwise false */ bool pcmk__bundle_create_probe(pe_resource_t *rsc, pe_node_t *node) { struct probe_data probe_data = { rsc, node, false }; CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_container)); pe__foreach_bundle_replica(rsc, create_replica_probes, &probe_data); return probe_data.any_created; } /*! * \internal * \brief Output actions for one bundle replica * * \param[in,out] replica Replica to output actions for * \param[in] user_data Unused * * \return true (to indicate that any further replicas should be processed) */ static bool output_replica_actions(pe__bundle_replica_t *replica, void *user_data) { if (replica->ip != NULL) { replica->ip->cmds->output_actions(replica->ip); } if (replica->container != NULL) { replica->container->cmds->output_actions(replica->container); } if (replica->remote != NULL) { replica->remote->cmds->output_actions(replica->remote); } if (replica->child != NULL) { replica->child->cmds->output_actions(replica->child); } return true; } /*! * \internal * \brief Output a summary of scheduled actions for a bundle resource * * \param[in,out] rsc Bundle resource to output actions for */ void pcmk__output_bundle_actions(pe_resource_t *rsc) { CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_container)); pe__foreach_bundle_replica(rsc, output_replica_actions, NULL); } // Bundle implementation of resource_alloc_functions_t:add_utilization() void pcmk__bundle_add_utilization(const pe_resource_t *rsc, const pe_resource_t *orig_rsc, GList *all_rscs, GHashTable *utilization) { pe_resource_t *container = NULL; CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_container)); if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) { return; } /* All bundle replicas are identical, so using the utilization of the first * is sufficient for any. Only the implicit container resource can have * utilization values. */ container = pe__first_container(rsc); if (container != NULL) { container->cmds->add_utilization(container, orig_rsc, all_rscs, utilization); } } // Bundle implementation of resource_alloc_functions_t:shutdown_lock() void pcmk__bundle_shutdown_lock(pe_resource_t *rsc) { CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_container)); // Bundles currently don't support shutdown locks } diff --git a/lib/pacemaker/pcmk_sched_group.c b/lib/pacemaker/pcmk_sched_group.c index 28f477934c..2c6e8feeb5 100644 --- a/lib/pacemaker/pcmk_sched_group.c +++ b/lib/pacemaker/pcmk_sched_group.c @@ -1,881 +1,880 @@ /* * Copyright 2004-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include "libpacemaker_private.h" /*! * \internal * \brief Assign a group resource to a node * * \param[in,out] rsc Group resource to assign to a node * \param[in] prefer Node to prefer, if all else is equal * * \return Node that \p rsc is assigned to, if assigned entirely to one node */ pe_node_t * pcmk__group_assign(pe_resource_t *rsc, const pe_node_t *prefer) { pe_node_t *first_assigned_node = NULL; pe_resource_t *first_member = NULL; CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_group)); if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) { return rsc->allocated_to; // Assignment already done } if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) { pe_rsc_debug(rsc, "Assignment dependency loop detected involving %s", rsc->id); return NULL; } if (rsc->children == NULL) { // No members to assign pe__clear_resource_flags(rsc, pe_rsc_provisional); return NULL; } pe__set_resource_flags(rsc, pe_rsc_allocating); first_member = (pe_resource_t *) rsc->children->data; rsc->role = first_member->role; pe__show_node_scores(!pcmk_is_set(rsc->cluster->flags, pe_flag_show_scores), rsc, __func__, rsc->allowed_nodes, rsc->cluster); for (GList *iter = rsc->children; iter != NULL; iter = iter->next) { pe_resource_t *member = (pe_resource_t *) iter->data; pe_node_t *node = NULL; pe_rsc_trace(rsc, "Assigning group %s member %s", rsc->id, member->id); node = member->cmds->assign(member, prefer); if (first_assigned_node == NULL) { first_assigned_node = node; } } pe__set_next_role(rsc, first_member->next_role, "first group member"); pe__clear_resource_flags(rsc, pe_rsc_allocating|pe_rsc_provisional); if (!pe__group_flag_is_set(rsc, pe__group_colocated)) { return NULL; } return first_assigned_node; } /*! * \internal * \brief Create a pseudo-operation for a group as an ordering point * * \param[in,out] group Group resource to create action for * \param[in] action Action name * * \return Newly created pseudo-operation */ static pe_action_t * create_group_pseudo_op(pe_resource_t *group, const char *action) { pe_action_t *op = custom_action(group, pcmk__op_key(group->id, action, 0), action, NULL, TRUE, TRUE, group->cluster); pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable); return op; } /*! * \internal * \brief Create all actions needed for a given group resource * * \param[in,out] rsc Group resource to create actions for */ void pcmk__group_create_actions(pe_resource_t *rsc) { CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_group)); pe_rsc_trace(rsc, "Creating actions for group %s", rsc->id); // Create actions for individual group members for (GList *iter = rsc->children; iter != NULL; iter = iter->next) { pe_resource_t *member = (pe_resource_t *) iter->data; member->cmds->create_actions(member); } // Create pseudo-actions for group itself to serve as ordering points create_group_pseudo_op(rsc, RSC_START); create_group_pseudo_op(rsc, RSC_STARTED); create_group_pseudo_op(rsc, RSC_STOP); create_group_pseudo_op(rsc, RSC_STOPPED); if (crm_is_true(g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_PROMOTABLE))) { create_group_pseudo_op(rsc, RSC_DEMOTE); create_group_pseudo_op(rsc, RSC_DEMOTED); create_group_pseudo_op(rsc, RSC_PROMOTE); create_group_pseudo_op(rsc, RSC_PROMOTED); } } // User data for member_internal_constraints() struct member_data { // These could be derived from member but this avoids some function calls bool ordered; bool colocated; bool promotable; pe_resource_t *last_active; pe_resource_t *previous_member; }; /*! * \internal * \brief Create implicit constraints needed for a group member * * \param[in,out] data Group member to create implicit constraints for * \param[in,out] user_data Member data (struct member_data *) */ static void member_internal_constraints(gpointer data, gpointer user_data) { pe_resource_t *member = (pe_resource_t *) data; struct member_data *member_data = (struct member_data *) user_data; // For ordering demote vs demote or stop vs stop uint32_t down_flags = pe_order_implies_first_printed; // For ordering demote vs demoted or stop vs stopped uint32_t post_down_flags = pe_order_implies_then_printed; // Create the individual member's implicit constraints member->cmds->internal_constraints(member); if (member_data->previous_member == NULL) { // This is first member if (member_data->ordered) { pe__set_order_flags(down_flags, pe_order_optional); post_down_flags = pe_order_implies_then; } } else if (member_data->colocated) { uint32_t flags = pcmk__coloc_none; if (pcmk_is_set(member->flags, pe_rsc_critical)) { flags |= pcmk__coloc_influence; } // Colocate this member with the previous one - pcmk__new_colocation("group:internal_colocation", NULL, INFINITY, - member, member_data->previous_member, NULL, NULL, - flags); + pcmk__new_colocation("#group-members", NULL, INFINITY, member, + member_data->previous_member, NULL, NULL, flags); } if (member_data->promotable) { // Demote group -> demote member -> group is demoted pcmk__order_resource_actions(member->parent, RSC_DEMOTE, member, RSC_DEMOTE, down_flags); pcmk__order_resource_actions(member, RSC_DEMOTE, member->parent, RSC_DEMOTED, post_down_flags); // Promote group -> promote member -> group is promoted pcmk__order_resource_actions(member, RSC_PROMOTE, member->parent, RSC_PROMOTED, pe_order_runnable_left |pe_order_implies_then |pe_order_implies_then_printed); pcmk__order_resource_actions(member->parent, RSC_PROMOTE, member, RSC_PROMOTE, pe_order_implies_first_printed); } // Stop group -> stop member -> group is stopped pcmk__order_stops(member->parent, member, down_flags); pcmk__order_resource_actions(member, RSC_STOP, member->parent, RSC_STOPPED, post_down_flags); // Start group -> start member -> group is started pcmk__order_starts(member->parent, member, pe_order_implies_first_printed); pcmk__order_resource_actions(member, RSC_START, member->parent, RSC_STARTED, pe_order_runnable_left |pe_order_implies_then |pe_order_implies_then_printed); if (!member_data->ordered) { pcmk__order_starts(member->parent, member, pe_order_implies_then |pe_order_runnable_left |pe_order_implies_first_printed); if (member_data->promotable) { pcmk__order_resource_actions(member->parent, RSC_PROMOTE, member, RSC_PROMOTE, pe_order_implies_then |pe_order_runnable_left |pe_order_implies_first_printed); } } else if (member_data->previous_member == NULL) { pcmk__order_starts(member->parent, member, pe_order_none); if (member_data->promotable) { pcmk__order_resource_actions(member->parent, RSC_PROMOTE, member, RSC_PROMOTE, pe_order_none); } } else { // Order this member relative to the previous one pcmk__order_starts(member_data->previous_member, member, pe_order_implies_then|pe_order_runnable_left); pcmk__order_stops(member, member_data->previous_member, pe_order_optional|pe_order_restart); /* In unusual circumstances (such as adding a new member to the middle * of a group with unmanaged later members), this member may be active * while the previous (new) member is inactive. In this situation, the * usual restart orderings will be irrelevant, so we need to order this * member's stop before the previous member's start. */ if ((member->running_on != NULL) && (member_data->previous_member->running_on == NULL)) { pcmk__order_resource_actions(member, RSC_STOP, member_data->previous_member, RSC_START, pe_order_implies_first |pe_order_runnable_left); } if (member_data->promotable) { pcmk__order_resource_actions(member_data->previous_member, RSC_PROMOTE, member, RSC_PROMOTE, pe_order_implies_then |pe_order_runnable_left); pcmk__order_resource_actions(member, RSC_DEMOTE, member_data->previous_member, RSC_DEMOTE, pe_order_optional); } } // Make sure partially active groups shut down in sequence if (member->running_on != NULL) { if (member_data->ordered && (member_data->previous_member != NULL) && (member_data->previous_member->running_on == NULL) && (member_data->last_active != NULL) && (member_data->last_active->running_on != NULL)) { pcmk__order_stops(member, member_data->last_active, pe_order_optional); } member_data->last_active = member; } member_data->previous_member = member; } /*! * \internal * \brief Create implicit constraints needed for a group resource * * \param[in,out] rsc Group resource to create implicit constraints for */ void pcmk__group_internal_constraints(pe_resource_t *rsc) { struct member_data member_data = { false, }; const pe_resource_t *top = NULL; CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_group)); /* Order group pseudo-actions relative to each other for restarting: * stop group -> group is stopped -> start group -> group is started */ pcmk__order_resource_actions(rsc, RSC_STOP, rsc, RSC_STOPPED, pe_order_runnable_left); pcmk__order_resource_actions(rsc, RSC_STOPPED, rsc, RSC_START, pe_order_optional); pcmk__order_resource_actions(rsc, RSC_START, rsc, RSC_STARTED, pe_order_runnable_left); top = pe__const_top_resource(rsc, false); member_data.ordered = pe__group_flag_is_set(rsc, pe__group_ordered); member_data.colocated = pe__group_flag_is_set(rsc, pe__group_colocated); member_data.promotable = pcmk_is_set(top->flags, pe_rsc_promotable); g_list_foreach(rsc->children, member_internal_constraints, &member_data); } /*! * \internal * \brief Apply a colocation's score to node scores or resource priority * * Given a colocation constraint for a group with some other resource, apply the * score to the dependent's allowed node scores (if we are still placing * resources) or priority (if we are choosing promotable clone instance roles). * * \param[in,out] dependent Dependent group resource in colocation * \param[in] primary Primary resource in colocation * \param[in] colocation Colocation constraint to apply */ static void colocate_group_with(pe_resource_t *dependent, const pe_resource_t *primary, const pcmk__colocation_t *colocation) { pe_resource_t *member = NULL; if (dependent->children == NULL) { return; } pe_rsc_trace(primary, "Processing %s (group %s with %s) for dependent", colocation->id, dependent->id, primary->id); if (pe__group_flag_is_set(dependent, pe__group_colocated)) { // Colocate first member (internal colocations will handle the rest) member = (pe_resource_t *) dependent->children->data; member->cmds->apply_coloc_score(member, primary, colocation, true); return; } if (colocation->score >= INFINITY) { pcmk__config_err("%s: Cannot perform mandatory colocation between " "non-colocated group and %s", dependent->id, primary->id); return; } // Colocate each member individually for (GList *iter = dependent->children; iter != NULL; iter = iter->next) { member = (pe_resource_t *) iter->data; member->cmds->apply_coloc_score(member, primary, colocation, true); } } /*! * \internal * \brief Apply a colocation's score to node scores or resource priority * * Given a colocation constraint for some other resource with a group, apply the * score to the dependent's allowed node scores (if we are still placing * resources) or priority (if we are choosing promotable clone instance roles). * * \param[in,out] dependent Dependent resource in colocation * \param[in] primary Primary group resource in colocation * \param[in] colocation Colocation constraint to apply */ static void colocate_with_group(pe_resource_t *dependent, const pe_resource_t *primary, const pcmk__colocation_t *colocation) { const pe_resource_t *member = NULL; pe_rsc_trace(primary, "Processing colocation %s (%s with group %s) for primary", colocation->id, dependent->id, primary->id); if (pcmk_is_set(primary->flags, pe_rsc_provisional)) { return; } if (pe__group_flag_is_set(primary, pe__group_colocated)) { if (colocation->score >= INFINITY) { /* For mandatory colocations, the entire group must be assignable * (and in the specified role if any), so apply the colocation based * on the last member. */ member = pe__last_group_member(primary); } else if (primary->children != NULL) { /* For optional colocations, whether the group is partially or fully * up doesn't matter, so apply the colocation based on the first * member. */ member = (pe_resource_t *) primary->children->data; } if (member == NULL) { return; // Nothing to colocate with } member->cmds->apply_coloc_score(dependent, member, colocation, false); return; } if (colocation->score >= INFINITY) { pcmk__config_err("%s: Cannot perform mandatory colocation with" " non-colocated group %s", dependent->id, primary->id); return; } // Colocate dependent with each member individually for (const GList *iter = primary->children; iter != NULL; iter = iter->next) { member = iter->data; member->cmds->apply_coloc_score(dependent, member, colocation, false); } } /*! * \internal * \brief Apply a colocation's score to node scores or resource priority * * Given a colocation constraint, apply its score to the dependent's * allowed node scores (if we are still placing resources) or priority (if * we are choosing promotable clone instance roles). * * \param[in,out] dependent Dependent resource in colocation * \param[in] primary Primary resource in colocation * \param[in] colocation Colocation constraint to apply * \param[in] for_dependent true if called on behalf of dependent */ void pcmk__group_apply_coloc_score(pe_resource_t *dependent, const pe_resource_t *primary, const pcmk__colocation_t *colocation, bool for_dependent) { CRM_ASSERT((dependent != NULL) && (primary != NULL) && (colocation != NULL)); if (for_dependent) { colocate_group_with(dependent, primary, colocation); } else { // Method should only be called for primitive dependents CRM_ASSERT(dependent->variant == pe_native); colocate_with_group(dependent, primary, colocation); } } /*! * \internal * \brief Return action flags for a given group resource action * * \param[in,out] action Group action to get flags for * \param[in] node If not NULL, limit effects to this node * * \return Flags appropriate to \p action on \p node */ uint32_t pcmk__group_action_flags(pe_action_t *action, const pe_node_t *node) { // Default flags for a group action uint32_t flags = pe_action_optional|pe_action_runnable|pe_action_pseudo; CRM_ASSERT(action != NULL); // Update flags considering each member's own flags for same action for (GList *iter = action->rsc->children; iter != NULL; iter = iter->next) { pe_resource_t *member = (pe_resource_t *) iter->data; // Check whether member has the same action enum action_tasks task = get_complex_task(member, action->task); const char *task_s = task2text(task); pe_action_t *member_action = find_first_action(member->actions, NULL, task_s, node); if (member_action != NULL) { uint32_t member_flags = member->cmds->action_flags(member_action, node); // Group action is mandatory if any member action is if (pcmk_is_set(flags, pe_action_optional) && !pcmk_is_set(member_flags, pe_action_optional)) { pe_rsc_trace(action->rsc, "%s is mandatory because %s is", action->uuid, member_action->uuid); pe__clear_raw_action_flags(flags, "group action", pe_action_optional); pe__clear_action_flags(action, pe_action_optional); } // Group action is unrunnable if any member action is if (!pcmk__str_eq(task_s, action->task, pcmk__str_none) && pcmk_is_set(flags, pe_action_runnable) && !pcmk_is_set(member_flags, pe_action_runnable)) { pe_rsc_trace(action->rsc, "%s is unrunnable because %s is", action->uuid, member_action->uuid); pe__clear_raw_action_flags(flags, "group action", pe_action_runnable); pe__clear_action_flags(action, pe_action_runnable); } /* Group (pseudo-)actions other than stop or demote are unrunnable * unless every member will do it. */ } else if ((task != stop_rsc) && (task != action_demote)) { pe_rsc_trace(action->rsc, "%s is not runnable because %s will not %s", action->uuid, member->id, task_s); pe__clear_raw_action_flags(flags, "group action", pe_action_runnable); } } return flags; } /*! * \internal * \brief Update two actions according to an ordering between them * * Given information about an ordering of two actions, update the actions' flags * (and runnable_before members if appropriate) as appropriate for the ordering. * Effects may cascade to other orderings involving the actions as well. * * \param[in,out] first 'First' action in an ordering * \param[in,out] then 'Then' action in an ordering * \param[in] node If not NULL, limit scope of ordering to this node * (only used when interleaving instances) * \param[in] flags Action flags for \p first for ordering purposes * \param[in] filter Action flags to limit scope of certain updates (may * include pe_action_optional to affect only mandatory * actions, and pe_action_runnable to affect only * runnable actions) * \param[in] type Group of enum pe_ordering flags to apply * \param[in,out] data_set Cluster working set * * \return Group of enum pcmk__updated flags indicating what was updated */ uint32_t pcmk__group_update_ordered_actions(pe_action_t *first, pe_action_t *then, const pe_node_t *node, uint32_t flags, uint32_t filter, uint32_t type, pe_working_set_t *data_set) { uint32_t changed = pcmk__updated_none; // Group method can be called only on behalf of "then" action CRM_ASSERT((first != NULL) && (then != NULL) && (then->rsc != NULL) && (data_set != NULL)); // Update the actions for the group itself changed |= pcmk__update_ordered_actions(first, then, node, flags, filter, type, data_set); // Update the actions for each group member for (GList *iter = then->rsc->children; iter != NULL; iter = iter->next) { pe_resource_t *member = (pe_resource_t *) iter->data; pe_action_t *member_action = find_first_action(member->actions, NULL, then->task, node); if (member_action != NULL) { changed |= member->cmds->update_ordered_actions(first, member_action, node, flags, filter, type, data_set); } } return changed; } /*! * \internal * \brief Apply a location constraint to a group's allowed node scores * * \param[in,out] rsc Group resource to apply constraint to * \param[in,out] location Location constraint to apply */ void pcmk__group_apply_location(pe_resource_t *rsc, pe__location_t *location) { GList *node_list_orig = NULL; GList *node_list_copy = NULL; bool reset_scores = true; CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_group) && (location != NULL)); node_list_orig = location->node_list_rh; node_list_copy = pcmk__copy_node_list(node_list_orig, true); reset_scores = pe__group_flag_is_set(rsc, pe__group_colocated); // Apply the constraint for the group itself (updates node scores) pcmk__apply_location(rsc, location); // Apply the constraint for each member for (GList *iter = rsc->children; iter != NULL; iter = iter->next) { pe_resource_t *member = (pe_resource_t *) iter->data; member->cmds->apply_location(member, location); if (reset_scores) { /* The first member of colocated groups needs to use the original * node scores, but subsequent members should work on a copy, since * the first member's scores already incorporate theirs. */ reset_scores = false; location->node_list_rh = node_list_copy; } } location->node_list_rh = node_list_orig; g_list_free_full(node_list_copy, free); } // Group implementation of resource_alloc_functions_t:colocated_resources() GList * pcmk__group_colocated_resources(const pe_resource_t *rsc, const pe_resource_t *orig_rsc, GList *colocated_rscs) { const pe_resource_t *member = NULL; CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_group)); if (orig_rsc == NULL) { orig_rsc = rsc; } if (pe__group_flag_is_set(rsc, pe__group_colocated) || pe_rsc_is_clone(rsc->parent)) { /* This group has colocated members and/or is cloned -- either way, * add every child's colocated resources to the list. The first and last * members will include the group's own colocations. */ colocated_rscs = g_list_prepend(colocated_rscs, (gpointer) rsc); for (const GList *iter = rsc->children; iter != NULL; iter = iter->next) { member = (const pe_resource_t *) iter->data; colocated_rscs = member->cmds->colocated_resources(member, orig_rsc, colocated_rscs); } } else if (rsc->children != NULL) { /* This group's members are not colocated, and the group is not cloned, * so just add the group's own colocations to the list. */ colocated_rscs = pcmk__colocated_resources(rsc, orig_rsc, colocated_rscs); } return colocated_rscs; } // Group implementation of resource_alloc_functions_t:with_this_colocations() void pcmk__with_group_colocations(const pe_resource_t *rsc, const pe_resource_t *orig_rsc, GList **list) { CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_group) && (orig_rsc != NULL) && (list != NULL)); // Ignore empty groups if (rsc->children == NULL) { return; } /* "With this" colocations are needed only for the group itself and for its * last member. Add the group's colocations plus any relevant * parent colocations if cloned. */ if ((rsc == orig_rsc) || (orig_rsc == pe__last_group_member(rsc))) { crm_trace("Adding 'with %s' colocations to list for %s", rsc->id, orig_rsc->id); pcmk__add_with_this_list(list, rsc->rsc_cons_lhs, orig_rsc); if (rsc->parent != NULL) { // Cloned group rsc->parent->cmds->with_this_colocations(rsc->parent, orig_rsc, list); } } } // Group implementation of resource_alloc_functions_t:this_with_colocations() void pcmk__group_with_colocations(const pe_resource_t *rsc, const pe_resource_t *orig_rsc, GList **list) { CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_group) && (orig_rsc != NULL) && (list != NULL)); // Ignore empty groups if (rsc->children == NULL) { return; } /* Colocations for the group itself, or for its first member, consist of the * group's colocations plus any relevant parent colocations if cloned. */ if ((rsc == orig_rsc) || (orig_rsc == (const pe_resource_t *) rsc->children->data)) { crm_trace("Adding '%s with' colocations to list for %s", rsc->id, orig_rsc->id); pcmk__add_this_with_list(list, rsc->rsc_cons, orig_rsc); if (rsc->parent != NULL) { // Cloned group rsc->parent->cmds->this_with_colocations(rsc->parent, orig_rsc, list); } return; } /* Later group members honor the group's colocations indirectly, due to the * internal group colocations that chain everything from the first member. * However, if an earlier group member is unmanaged, this chaining will not * happen, so the group's mandatory colocations must be explicitly added. */ for (GList *iter = rsc->children; iter != NULL; iter = iter->next) { const pe_resource_t *member = (const pe_resource_t *) iter->data; if (orig_rsc == member) { break; // We've seen all earlier members, and none are unmanaged } if (!pcmk_is_set(member->flags, pe_rsc_managed)) { crm_trace("Adding mandatory '%s with' colocations to list for " "member %s because earlier member %s is unmanaged", rsc->id, orig_rsc->id, member->id); for (const GList *cons_iter = rsc->rsc_cons; cons_iter != NULL; cons_iter = cons_iter->next) { const pcmk__colocation_t *colocation = NULL; colocation = (const pcmk__colocation_t *) cons_iter->data; if (colocation->score == INFINITY) { pcmk__add_this_with(list, colocation, orig_rsc); } } // @TODO Add mandatory (or all?) clone constraints if cloned break; } } } /*! * \internal * \brief Update nodes with scores of colocated resources' nodes * * Given a table of nodes and a resource, update the nodes' scores with the * scores of the best nodes matching the attribute used for each of the * resource's relevant colocations. * * \param[in,out] rsc Group resource to check colocations for * \param[in] log_id Resource ID for logs (if NULL, use \p rsc ID) * \param[in,out] nodes Nodes to update (set initial contents to NULL * to copy \p rsc's allowed nodes) * \param[in] colocation Original colocation constraint (used to get * configured primary resource's stickiness, and * to get colocation node attribute; if NULL, * \p rsc's own matching node scores will not be * added, and *nodes must be NULL as well) * \param[in] factor Incorporate scores multiplied by this factor * \param[in] flags Bitmask of enum pcmk__coloc_select values * * \note NULL *nodes, NULL colocation, and the pcmk__coloc_select_this_with * flag are used together (and only by cmp_resources()). * \note The caller remains responsible for freeing \p *nodes. */ void pcmk__group_add_colocated_node_scores(pe_resource_t *rsc, const char *log_id, GHashTable **nodes, pcmk__colocation_t *colocation, float factor, uint32_t flags) { pe_resource_t *member = NULL; CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_group) && (nodes != NULL) && ((colocation != NULL) || (*nodes == NULL))); if (log_id == NULL) { log_id = rsc->id; } // Avoid infinite recursion if (pcmk_is_set(rsc->flags, pe_rsc_merging)) { pe_rsc_info(rsc, "%s: Breaking dependency loop at %s", log_id, rsc->id); return; } pe__set_resource_flags(rsc, pe_rsc_merging); // Ignore empty groups (only possible with schema validation disabled) if (rsc->children == NULL) { return; } /* Refer the operation to the first or last member as appropriate. * * cmp_resources() is the only caller that passes a NULL nodes table, * and is also the only caller using pcmk__coloc_select_this_with. * For "this with" colocations, the last member will recursively incorporate * all the other members' "this with" colocations via the internal group * colocations (and via the first member, the group's own colocations). * * For "with this" colocations, the first member works similarly. */ if (*nodes == NULL) { member = pe__last_group_member(rsc); } else { member = rsc->children->data; } pe_rsc_trace(rsc, "%s: Merging scores from group %s using member %s " "(at %.6f)", log_id, rsc->id, member->id, factor); member->cmds->add_colocated_node_scores(member, log_id, nodes, colocation, factor, flags); pe__clear_resource_flags(rsc, pe_rsc_merging); } // Group implementation of resource_alloc_functions_t:add_utilization() void pcmk__group_add_utilization(const pe_resource_t *rsc, const pe_resource_t *orig_rsc, GList *all_rscs, GHashTable *utilization) { pe_resource_t *member = NULL; CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_group) && (orig_rsc != NULL) && (utilization != NULL)); if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) { return; } pe_rsc_trace(orig_rsc, "%s: Adding group %s as colocated utilization", orig_rsc->id, rsc->id); if (pe__group_flag_is_set(rsc, pe__group_colocated) || pe_rsc_is_clone(rsc->parent)) { // Every group member will be on same node, so sum all members for (GList *iter = rsc->children; iter != NULL; iter = iter->next) { member = (pe_resource_t *) iter->data; if (pcmk_is_set(member->flags, pe_rsc_provisional) && (g_list_find(all_rscs, member) == NULL)) { member->cmds->add_utilization(member, orig_rsc, all_rscs, utilization); } } } else if (rsc->children != NULL) { // Just add first member's utilization member = (pe_resource_t *) rsc->children->data; if ((member != NULL) && pcmk_is_set(member->flags, pe_rsc_provisional) && (g_list_find(all_rscs, member) == NULL)) { member->cmds->add_utilization(member, orig_rsc, all_rscs, utilization); } } } // Group implementation of resource_alloc_functions_t:shutdown_lock() void pcmk__group_shutdown_lock(pe_resource_t *rsc) { CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_group)); for (GList *iter = rsc->children; iter != NULL; iter = iter->next) { pe_resource_t *member = (pe_resource_t *) iter->data; member->cmds->shutdown_lock(member); } } diff --git a/lib/pacemaker/pcmk_sched_primitive.c b/lib/pacemaker/pcmk_sched_primitive.c index 4eb4e87c07..4d1345f837 100644 --- a/lib/pacemaker/pcmk_sched_primitive.c +++ b/lib/pacemaker/pcmk_sched_primitive.c @@ -1,1583 +1,1583 @@ /* * Copyright 2004-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include // uint8_t, uint32_t #include #include #include "libpacemaker_private.h" static void stop_resource(pe_resource_t *rsc, pe_node_t *node, bool optional); static void start_resource(pe_resource_t *rsc, pe_node_t *node, bool optional); static void demote_resource(pe_resource_t *rsc, pe_node_t *node, bool optional); static void promote_resource(pe_resource_t *rsc, pe_node_t *node, bool optional); static void assert_role_error(pe_resource_t *rsc, pe_node_t *node, bool optional); static enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = { /* This array lists the immediate next role when transitioning from one role * to a target role. For example, when going from Stopped to Promoted, the * next role is Unpromoted, because the resource must be started before it * can be promoted. The current state then becomes Started, which is fed * into this array again, giving a next role of Promoted. * * Current role Immediate next role Final target role * ------------ ------------------- ----------------- */ /* Unknown */ { RSC_ROLE_UNKNOWN, /* Unknown */ RSC_ROLE_STOPPED, /* Stopped */ RSC_ROLE_STOPPED, /* Started */ RSC_ROLE_STOPPED, /* Unpromoted */ RSC_ROLE_STOPPED, /* Promoted */ }, /* Stopped */ { RSC_ROLE_STOPPED, /* Unknown */ RSC_ROLE_STOPPED, /* Stopped */ RSC_ROLE_STARTED, /* Started */ RSC_ROLE_UNPROMOTED, /* Unpromoted */ RSC_ROLE_UNPROMOTED, /* Promoted */ }, /* Started */ { RSC_ROLE_STOPPED, /* Unknown */ RSC_ROLE_STOPPED, /* Stopped */ RSC_ROLE_STARTED, /* Started */ RSC_ROLE_UNPROMOTED, /* Unpromoted */ RSC_ROLE_PROMOTED, /* Promoted */ }, /* Unpromoted */ { RSC_ROLE_STOPPED, /* Unknown */ RSC_ROLE_STOPPED, /* Stopped */ RSC_ROLE_STOPPED, /* Started */ RSC_ROLE_UNPROMOTED, /* Unpromoted */ RSC_ROLE_PROMOTED, /* Promoted */ }, /* Promoted */ { RSC_ROLE_STOPPED, /* Unknown */ RSC_ROLE_UNPROMOTED, /* Stopped */ RSC_ROLE_UNPROMOTED, /* Started */ RSC_ROLE_UNPROMOTED, /* Unpromoted */ RSC_ROLE_PROMOTED, /* Promoted */ }, }; /*! * \internal * \brief Function to schedule actions needed for a role change * * \param[in,out] rsc Resource whose role is changing * \param[in,out] node Node where resource will be in its next role * \param[in] optional Whether scheduled actions should be optional */ typedef void (*rsc_transition_fn)(pe_resource_t *rsc, pe_node_t *node, bool optional); static rsc_transition_fn rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = { /* This array lists the function needed to transition directly from one role * to another. NULL indicates that nothing is needed. * * Current role Transition function Next role * ------------ ------------------- ---------- */ /* Unknown */ { assert_role_error, /* Unknown */ stop_resource, /* Stopped */ assert_role_error, /* Started */ assert_role_error, /* Unpromoted */ assert_role_error, /* Promoted */ }, /* Stopped */ { assert_role_error, /* Unknown */ NULL, /* Stopped */ start_resource, /* Started */ start_resource, /* Unpromoted */ assert_role_error, /* Promoted */ }, /* Started */ { assert_role_error, /* Unknown */ stop_resource, /* Stopped */ NULL, /* Started */ NULL, /* Unpromoted */ promote_resource, /* Promoted */ }, /* Unpromoted */ { assert_role_error, /* Unknown */ stop_resource, /* Stopped */ stop_resource, /* Started */ NULL, /* Unpromoted */ promote_resource, /* Promoted */ }, /* Promoted */ { assert_role_error, /* Unknown */ demote_resource, /* Stopped */ demote_resource, /* Started */ demote_resource, /* Unpromoted */ NULL, /* Promoted */ }, }; /*! * \internal * \brief Get a list of a resource's allowed nodes sorted by node score * * \param[in] rsc Resource to check * * \return List of allowed nodes sorted by node score */ static GList * sorted_allowed_nodes(const pe_resource_t *rsc) { if (rsc->allowed_nodes != NULL) { GList *nodes = g_hash_table_get_values(rsc->allowed_nodes); if (nodes != NULL) { return pcmk__sort_nodes(nodes, pe__current_node(rsc)); } } return NULL; } /*! * \internal * \brief Assign a resource to its best allowed node, if possible * * \param[in,out] rsc Resource to choose a node for * \param[in] prefer If not NULL, prefer this node when all else equal * * \return true if \p rsc could be assigned to a node, otherwise false */ static bool assign_best_node(pe_resource_t *rsc, const pe_node_t *prefer) { GList *nodes = NULL; pe_node_t *chosen = NULL; pe_node_t *best = NULL; const pe_node_t *most_free_node = pcmk__ban_insufficient_capacity(rsc); if (prefer == NULL) { prefer = most_free_node; } if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) { // We've already finished assignment of resources to nodes return rsc->allocated_to != NULL; } // Sort allowed nodes by score nodes = sorted_allowed_nodes(rsc); if (nodes != NULL) { best = (pe_node_t *) nodes->data; // First node has best score } if ((prefer != NULL) && (nodes != NULL)) { // Get the allowed node version of prefer chosen = g_hash_table_lookup(rsc->allowed_nodes, prefer->details->id); if (chosen == NULL) { pe_rsc_trace(rsc, "Preferred node %s for %s was unknown", pe__node_name(prefer), rsc->id); /* Favor the preferred node as long as its score is at least as good as * the best allowed node's. * * An alternative would be to favor the preferred node even if the best * node is better, when the best node's score is less than INFINITY. */ } else if (chosen->weight < best->weight) { pe_rsc_trace(rsc, "Preferred node %s for %s was unsuitable", pe__node_name(chosen), rsc->id); chosen = NULL; } else if (!pcmk__node_available(chosen, true, false)) { pe_rsc_trace(rsc, "Preferred node %s for %s was unavailable", pe__node_name(chosen), rsc->id); chosen = NULL; } else { pe_rsc_trace(rsc, "Chose preferred node %s for %s " "(ignoring %d candidates)", pe__node_name(chosen), rsc->id, g_list_length(nodes)); } } if ((chosen == NULL) && (best != NULL)) { /* Either there is no preferred node, or the preferred node is not * suitable, but another node is allowed to run the resource. */ chosen = best; if (!pe_rsc_is_unique_clone(rsc->parent) && (chosen->weight > 0) // Zero not acceptable && pcmk__node_available(chosen, false, false)) { /* If the resource is already running on a node, prefer that node if * it is just as good as the chosen node. * * We don't do this for unique clone instances, because * pcmk__assign_instances() has already assigned instances to their * running nodes when appropriate, and if we get here, we don't want * remaining unassigned instances to prefer a node that's already * running another instance. */ pe_node_t *running = pe__current_node(rsc); if (running == NULL) { // Nothing to do } else if (!pcmk__node_available(running, true, false)) { pe_rsc_trace(rsc, "Current node for %s (%s) can't run resources", rsc->id, pe__node_name(running)); } else { int nodes_with_best_score = 1; for (GList *iter = nodes->next; iter; iter = iter->next) { pe_node_t *allowed = (pe_node_t *) iter->data; if (allowed->weight != chosen->weight) { // The nodes are sorted by score, so no more are equal break; } if (pe__same_node(allowed, running)) { // Scores are equal, so prefer the current node chosen = allowed; } nodes_with_best_score++; } if (nodes_with_best_score > 1) { uint8_t log_level = LOG_INFO; if (chosen->weight >= INFINITY) { log_level = LOG_WARNING; } do_crm_log(log_level, "Chose %s for %s from %d nodes with score %s", pe__node_name(chosen), rsc->id, nodes_with_best_score, pcmk_readable_score(chosen->weight)); } } } pe_rsc_trace(rsc, "Chose %s for %s from %d candidates", pe__node_name(chosen), rsc->id, g_list_length(nodes)); } pcmk__assign_resource(rsc, chosen, false); g_list_free(nodes); return rsc->allocated_to != NULL; } /*! * \internal * \brief Apply a "this with" colocation to a node's allowed node scores * * \param[in,out] colocation Colocation to apply * \param[in,out] rsc Resource being assigned */ static void apply_this_with(pcmk__colocation_t *colocation, pe_resource_t *rsc) { GHashTable *archive = NULL; pe_resource_t *other = colocation->primary; // In certain cases, we will need to revert the node scores if ((colocation->dependent_role >= RSC_ROLE_PROMOTED) || ((colocation->score < 0) && (colocation->score > -INFINITY))) { archive = pcmk__copy_node_table(rsc->allowed_nodes); } if (pcmk_is_set(other->flags, pe_rsc_provisional)) { pe_rsc_trace(rsc, "%s: Assigning colocation %s primary %s first" "(score=%d role=%s)", rsc->id, colocation->id, other->id, colocation->score, role2text(colocation->dependent_role)); other->cmds->assign(other, NULL); } // Apply the colocation score to this resource's allowed node scores rsc->cmds->apply_coloc_score(rsc, other, colocation, true); if ((archive != NULL) && !pcmk__any_node_available(rsc->allowed_nodes)) { pe_rsc_info(rsc, "%s: Reverting scores from colocation with %s " "because no nodes allowed", rsc->id, other->id); g_hash_table_destroy(rsc->allowed_nodes); rsc->allowed_nodes = archive; archive = NULL; } if (archive != NULL) { g_hash_table_destroy(archive); } } /*! * \internal * \brief Update a Pacemaker Remote node once its connection has been assigned * * \param[in] connection Connection resource that has been assigned */ static void remote_connection_assigned(const pe_resource_t *connection) { pe_node_t *remote_node = pe_find_node(connection->cluster->nodes, connection->id); CRM_CHECK(remote_node != NULL, return); if ((connection->allocated_to != NULL) && (connection->next_role != RSC_ROLE_STOPPED)) { crm_trace("Pacemaker Remote node %s will be online", remote_node->details->id); remote_node->details->online = TRUE; if (remote_node->details->unseen) { // Avoid unnecessary fence, since we will attempt connection remote_node->details->unclean = FALSE; } } else { crm_trace("Pacemaker Remote node %s will be shut down " "(%sassigned connection's next role is %s)", remote_node->details->id, ((connection->allocated_to == NULL)? "un" : ""), role2text(connection->next_role)); remote_node->details->shutdown = TRUE; } } /*! * \internal * \brief Assign a primitive resource to a node * * \param[in,out] rsc Resource to assign to a node * \param[in] prefer Node to prefer, if all else is equal * * \return Node that \p rsc is assigned to, if assigned entirely to one node */ pe_node_t * pcmk__primitive_assign(pe_resource_t *rsc, const pe_node_t *prefer) { GList *this_with_colocations = NULL; GList *with_this_colocations = NULL; GList *iter = NULL; pcmk__colocation_t *colocation = NULL; CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_native)); // Never assign a child without parent being assigned first if ((rsc->parent != NULL) && !pcmk_is_set(rsc->parent->flags, pe_rsc_allocating)) { pe_rsc_debug(rsc, "%s: Assigning parent %s first", rsc->id, rsc->parent->id); rsc->parent->cmds->assign(rsc->parent, prefer); } if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) { // Assignment has already been done const char *node_name = "no node"; if (rsc->allocated_to != NULL) { node_name = pe__node_name(rsc->allocated_to); } pe_rsc_debug(rsc, "%s: pre-assigned to %s", rsc->id, node_name); return rsc->allocated_to; } // Ensure we detect assignment loops if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) { pe_rsc_debug(rsc, "Breaking assignment loop involving %s", rsc->id); return NULL; } pe__set_resource_flags(rsc, pe_rsc_allocating); pe__show_node_scores(true, rsc, "Pre-assignment", rsc->allowed_nodes, rsc->cluster); this_with_colocations = pcmk__this_with_colocations(rsc); with_this_colocations = pcmk__with_this_colocations(rsc); // Apply mandatory colocations first, to satisfy as many as possible for (iter = this_with_colocations; iter != NULL; iter = iter->next) { colocation = iter->data; if ((colocation->score <= -CRM_SCORE_INFINITY) || (colocation->score >= CRM_SCORE_INFINITY)) { apply_this_with(iter->data, rsc); } } for (iter = with_this_colocations; iter != NULL; iter = iter->next) { colocation = iter->data; if ((colocation->score <= -CRM_SCORE_INFINITY) || (colocation->score >= CRM_SCORE_INFINITY)) { pcmk__add_dependent_scores(iter->data, rsc); } } pe__show_node_scores(true, rsc, "Mandatory-colocations", rsc->allowed_nodes, rsc->cluster); // Then apply optional colocations for (iter = this_with_colocations; iter != NULL; iter = iter->next) { colocation = iter->data; if ((colocation->score > -CRM_SCORE_INFINITY) && (colocation->score < CRM_SCORE_INFINITY)) { apply_this_with(iter->data, rsc); } } for (iter = with_this_colocations; iter != NULL; iter = iter->next) { colocation = iter->data; if ((colocation->score > -CRM_SCORE_INFINITY) && (colocation->score < CRM_SCORE_INFINITY)) { pcmk__add_dependent_scores(iter->data, rsc); } } g_list_free(this_with_colocations); g_list_free(with_this_colocations); if (rsc->next_role == RSC_ROLE_STOPPED) { pe_rsc_trace(rsc, "Banning %s from all nodes because it will be stopped", rsc->id); resource_location(rsc, NULL, -INFINITY, XML_RSC_ATTR_TARGET_ROLE, rsc->cluster); } else if ((rsc->next_role > rsc->role) && !pcmk_is_set(rsc->cluster->flags, pe_flag_have_quorum) && (rsc->cluster->no_quorum_policy == no_quorum_freeze)) { crm_notice("Resource %s cannot be elevated from %s to %s due to " "no-quorum-policy=freeze", rsc->id, role2text(rsc->role), role2text(rsc->next_role)); pe__set_next_role(rsc, rsc->role, "no-quorum-policy=freeze"); } pe__show_node_scores(!pcmk_is_set(rsc->cluster->flags, pe_flag_show_scores), rsc, __func__, rsc->allowed_nodes, rsc->cluster); // Unmanage resource if fencing is enabled but no device is configured if (pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled) && !pcmk_is_set(rsc->cluster->flags, pe_flag_have_stonith_resource)) { pe__clear_resource_flags(rsc, pe_rsc_managed); } if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) { // Unmanaged resources stay on their current node const char *reason = NULL; pe_node_t *assign_to = NULL; pe__set_next_role(rsc, rsc->role, "unmanaged"); assign_to = pe__current_node(rsc); if (assign_to == NULL) { reason = "inactive"; } else if (rsc->role == RSC_ROLE_PROMOTED) { reason = "promoted"; } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) { reason = "failed"; } else { reason = "active"; } pe_rsc_info(rsc, "Unmanaged resource %s assigned to %s: %s", rsc->id, (assign_to? assign_to->details->uname : "no node"), reason); pcmk__assign_resource(rsc, assign_to, true); } else if (pcmk_is_set(rsc->cluster->flags, pe_flag_stop_everything)) { pe_rsc_debug(rsc, "Forcing %s to stop: stop-all-resources", rsc->id); pcmk__assign_resource(rsc, NULL, true); } else if (!assign_best_node(rsc, prefer)) { // Assignment failed if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)) { pe_rsc_info(rsc, "Resource %s cannot run anywhere", rsc->id); } else if (rsc->running_on != NULL) { pe_rsc_info(rsc, "Stopping orphan resource %s", rsc->id); } } pe__clear_resource_flags(rsc, pe_rsc_allocating); if (rsc->is_remote_node) { remote_connection_assigned(rsc); } return rsc->allocated_to; } /*! * \internal * \brief Schedule actions to bring resource down and back to current role * * \param[in,out] rsc Resource to restart * \param[in,out] current Node that resource should be brought down on * \param[in] need_stop Whether the resource must be stopped * \param[in] need_promote Whether the resource must be promoted * * \return Role that resource would have after scheduled actions are taken */ static void schedule_restart_actions(pe_resource_t *rsc, pe_node_t *current, bool need_stop, bool need_promote) { enum rsc_role_e role = rsc->role; enum rsc_role_e next_role; rsc_transition_fn fn = NULL; pe__set_resource_flags(rsc, pe_rsc_restarting); // Bring resource down to a stop on its current node while (role != RSC_ROLE_STOPPED) { next_role = rsc_state_matrix[role][RSC_ROLE_STOPPED]; pe_rsc_trace(rsc, "Creating %s action to take %s down from %s to %s", (need_stop? "required" : "optional"), rsc->id, role2text(role), role2text(next_role)); fn = rsc_action_matrix[role][next_role]; if (fn == NULL) { break; } fn(rsc, current, !need_stop); role = next_role; } // Bring resource up to its next role on its next node while ((rsc->role <= rsc->next_role) && (role != rsc->role) && !pcmk_is_set(rsc->flags, pe_rsc_block)) { bool required = need_stop; next_role = rsc_state_matrix[role][rsc->role]; if ((next_role == RSC_ROLE_PROMOTED) && need_promote) { required = true; } pe_rsc_trace(rsc, "Creating %s action to take %s up from %s to %s", (required? "required" : "optional"), rsc->id, role2text(role), role2text(next_role)); fn = rsc_action_matrix[role][next_role]; if (fn == NULL) { break; } fn(rsc, rsc->allocated_to, !required); role = next_role; } pe__clear_resource_flags(rsc, pe_rsc_restarting); } /*! * \internal * \brief If a resource's next role is not explicitly specified, set a default * * \param[in,out] rsc Resource to set next role for * * \return "explicit" if next role was explicitly set, otherwise "implicit" */ static const char * set_default_next_role(pe_resource_t *rsc) { if (rsc->next_role != RSC_ROLE_UNKNOWN) { return "explicit"; } if (rsc->allocated_to == NULL) { pe__set_next_role(rsc, RSC_ROLE_STOPPED, "assignment"); } else { pe__set_next_role(rsc, RSC_ROLE_STARTED, "assignment"); } return "implicit"; } /*! * \internal * \brief Create an action to represent an already pending start * * \param[in,out] rsc Resource to create start action for */ static void create_pending_start(pe_resource_t *rsc) { pe_action_t *start = NULL; pe_rsc_trace(rsc, "Creating action for %s to represent already pending start", rsc->id); start = start_action(rsc, rsc->allocated_to, TRUE); pe__set_action_flags(start, pe_action_print_always); } /*! * \internal * \brief Schedule actions needed to take a resource to its next role * * \param[in,out] rsc Resource to schedule actions for */ static void schedule_role_transition_actions(pe_resource_t *rsc) { enum rsc_role_e role = rsc->role; while (role != rsc->next_role) { enum rsc_role_e next_role = rsc_state_matrix[role][rsc->next_role]; rsc_transition_fn fn = NULL; pe_rsc_trace(rsc, "Creating action to take %s from %s to %s (ending at %s)", rsc->id, role2text(role), role2text(next_role), role2text(rsc->next_role)); fn = rsc_action_matrix[role][next_role]; if (fn == NULL) { break; } fn(rsc, rsc->allocated_to, false); role = next_role; } } /*! * \internal * \brief Create all actions needed for a given primitive resource * * \param[in,out] rsc Primitive resource to create actions for */ void pcmk__primitive_create_actions(pe_resource_t *rsc) { bool need_stop = false; bool need_promote = false; bool is_moving = false; bool allow_migrate = false; bool multiply_active = false; pe_node_t *current = NULL; unsigned int num_all_active = 0; unsigned int num_clean_active = 0; const char *next_role_source = NULL; CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_native)); next_role_source = set_default_next_role(rsc); pe_rsc_trace(rsc, "Creating all actions for %s transition from %s to %s " "(%s) on %s", rsc->id, role2text(rsc->role), role2text(rsc->next_role), next_role_source, pe__node_name(rsc->allocated_to)); current = rsc->fns->active_node(rsc, &num_all_active, &num_clean_active); g_list_foreach(rsc->dangling_migrations, pcmk__abort_dangling_migration, rsc); if ((current != NULL) && (rsc->allocated_to != NULL) && !pe__same_node(current, rsc->allocated_to) && (rsc->next_role >= RSC_ROLE_STARTED)) { pe_rsc_trace(rsc, "Moving %s from %s to %s", rsc->id, pe__node_name(current), pe__node_name(rsc->allocated_to)); is_moving = true; allow_migrate = pcmk__rsc_can_migrate(rsc, current); // This is needed even if migrating (though I'm not sure why ...) need_stop = true; } // Check whether resource is partially migrated and/or multiply active if ((rsc->partial_migration_source != NULL) && (rsc->partial_migration_target != NULL) && allow_migrate && (num_all_active == 2) && pe__same_node(current, rsc->partial_migration_source) && pe__same_node(rsc->allocated_to, rsc->partial_migration_target)) { /* A partial migration is in progress, and the migration target remains * the same as when the migration began. */ pe_rsc_trace(rsc, "Partial migration of %s from %s to %s will continue", rsc->id, pe__node_name(rsc->partial_migration_source), pe__node_name(rsc->partial_migration_target)); } else if ((rsc->partial_migration_source != NULL) || (rsc->partial_migration_target != NULL)) { // A partial migration is in progress but can't be continued if (num_all_active > 2) { // The resource is migrating *and* multiply active! crm_notice("Forcing recovery of %s because it is migrating " "from %s to %s and possibly active elsewhere", rsc->id, pe__node_name(rsc->partial_migration_source), pe__node_name(rsc->partial_migration_target)); } else { // The migration source or target isn't available crm_notice("Forcing recovery of %s because it can no longer " "migrate from %s to %s", rsc->id, pe__node_name(rsc->partial_migration_source), pe__node_name(rsc->partial_migration_target)); } need_stop = true; rsc->partial_migration_source = rsc->partial_migration_target = NULL; allow_migrate = false; } else if (pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)) { multiply_active = (num_all_active > 1); } else { /* If a resource has "requires" set to nothing or quorum, don't consider * it active on unclean nodes (similar to how all resources behave when * stonith-enabled is false). We can start such resources elsewhere * before fencing completes, and if we considered the resource active on * the failed node, we would attempt recovery for being active on * multiple nodes. */ multiply_active = (num_clean_active > 1); } if (multiply_active) { const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); // Resource was (possibly) incorrectly multiply active pe_proc_err("%s resource %s might be active on %u nodes (%s)", pcmk__s(class, "Untyped"), rsc->id, num_all_active, recovery2text(rsc->recovery_type)); crm_notice("See https://wiki.clusterlabs.org/wiki/FAQ" "#Resource_is_Too_Active for more information"); switch (rsc->recovery_type) { case recovery_stop_start: need_stop = true; break; case recovery_stop_unexpected: need_stop = true; // stop_resource() will skip expected node pe__set_resource_flags(rsc, pe_rsc_stop_unexpected); break; default: break; } } else { pe__clear_resource_flags(rsc, pe_rsc_stop_unexpected); } if (pcmk_is_set(rsc->flags, pe_rsc_start_pending)) { create_pending_start(rsc); } if (is_moving) { // Remaining tests are only for resources staying where they are } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) { if (pcmk_is_set(rsc->flags, pe_rsc_stop)) { need_stop = true; pe_rsc_trace(rsc, "Recovering %s", rsc->id); } else { pe_rsc_trace(rsc, "Recovering %s by demotion", rsc->id); if (rsc->next_role == RSC_ROLE_PROMOTED) { need_promote = true; } } } else if (pcmk_is_set(rsc->flags, pe_rsc_block)) { pe_rsc_trace(rsc, "Blocking further actions on %s", rsc->id); need_stop = true; } else if ((rsc->role > RSC_ROLE_STARTED) && (current != NULL) && (rsc->allocated_to != NULL)) { pe_action_t *start = NULL; pe_rsc_trace(rsc, "Creating start action for promoted resource %s", rsc->id); start = start_action(rsc, rsc->allocated_to, TRUE); if (!pcmk_is_set(start->flags, pe_action_optional)) { // Recovery of a promoted resource pe_rsc_trace(rsc, "%s restart is required for recovery", rsc->id); need_stop = true; } } // Create any actions needed to bring resource down and back up to same role schedule_restart_actions(rsc, current, need_stop, need_promote); // Create any actions needed to take resource from this role to the next schedule_role_transition_actions(rsc); pcmk__create_recurring_actions(rsc); if (allow_migrate) { pcmk__create_migration_actions(rsc, current); } } /*! * \internal * \brief Ban a resource from any allowed nodes that are Pacemaker Remote nodes * * \param[in] rsc Resource to check */ static void rsc_avoids_remote_nodes(const pe_resource_t *rsc) { GHashTableIter iter; pe_node_t *node = NULL; g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) { if (node->details->remote_rsc != NULL) { node->weight = -INFINITY; } } } /*! * \internal * \brief Return allowed nodes as (possibly sorted) list * * Convert a resource's hash table of allowed nodes to a list. If printing to * stdout, sort the list, to keep action ID numbers consistent for regression * test output (while avoiding the performance hit on a live cluster). * * \param[in] rsc Resource to check for allowed nodes * * \return List of resource's allowed nodes * \note Callers should take care not to rely on the list being sorted. */ static GList * allowed_nodes_as_list(const pe_resource_t *rsc) { GList *allowed_nodes = NULL; if (rsc->allowed_nodes) { allowed_nodes = g_hash_table_get_values(rsc->allowed_nodes); } if (!pcmk__is_daemon) { allowed_nodes = g_list_sort(allowed_nodes, pe__cmp_node_name); } return allowed_nodes; } /*! * \internal * \brief Create implicit constraints needed for a primitive resource * * \param[in,out] rsc Primitive resource to create implicit constraints for */ void pcmk__primitive_internal_constraints(pe_resource_t *rsc) { GList *allowed_nodes = NULL; bool check_unfencing = false; bool check_utilization = false; CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_native)); if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) { pe_rsc_trace(rsc, "Skipping implicit constraints for unmanaged resource %s", rsc->id); return; } // Whether resource requires unfencing check_unfencing = !pcmk_is_set(rsc->flags, pe_rsc_fence_device) && pcmk_is_set(rsc->cluster->flags, pe_flag_enable_unfencing) && pcmk_is_set(rsc->flags, pe_rsc_needs_unfencing); // Whether a non-default placement strategy is used check_utilization = (g_hash_table_size(rsc->utilization) > 0) && !pcmk__str_eq(rsc->cluster->placement_strategy, "default", pcmk__str_casei); // Order stops before starts (i.e. restart) pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL, rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL, pe_order_optional|pe_order_implies_then|pe_order_restart, rsc->cluster); // Promotable ordering: demote before stop, start before promote if (pcmk_is_set(pe__const_top_resource(rsc, false)->flags, pe_rsc_promotable) || (rsc->role > RSC_ROLE_UNPROMOTED)) { pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_DEMOTE, 0), NULL, rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL, pe_order_promoted_implies_first, rsc->cluster); pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL, rsc, pcmk__op_key(rsc->id, RSC_PROMOTE, 0), NULL, pe_order_runnable_left, rsc->cluster); } // Don't clear resource history if probing on same node pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, CRM_OP_LRM_DELETE, 0), NULL, rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0), NULL, pe_order_same_node|pe_order_then_cancels_first, rsc->cluster); // Certain checks need allowed nodes if (check_unfencing || check_utilization || (rsc->container != NULL)) { allowed_nodes = allowed_nodes_as_list(rsc); } if (check_unfencing) { g_list_foreach(allowed_nodes, pcmk__order_restart_vs_unfence, rsc); } if (check_utilization) { pcmk__create_utilization_constraints(rsc, allowed_nodes); } if (rsc->container != NULL) { pe_resource_t *remote_rsc = NULL; if (rsc->is_remote_node) { // rsc is the implicit remote connection for a guest or bundle node /* Guest resources are not allowed to run on Pacemaker Remote nodes, * to avoid nesting remotes. However, bundles are allowed. */ if (!pcmk_is_set(rsc->flags, pe_rsc_allow_remote_remotes)) { rsc_avoids_remote_nodes(rsc->container); } /* If someone cleans up a guest or bundle node's container, we will * likely schedule a (re-)probe of the container and recovery of the * connection. Order the connection stop after the container probe, * so that if we detect the container running, we will trigger a new * transition and avoid the unnecessary recovery. */ pcmk__order_resource_actions(rsc->container, RSC_STATUS, rsc, RSC_STOP, pe_order_optional); /* A user can specify that a resource must start on a Pacemaker Remote * node by explicitly configuring it with the container=NODENAME * meta-attribute. This is of questionable merit, since location * constraints can accomplish the same thing. But we support it, so here * we check whether a resource (that is not itself a remote connection) * has container set to a remote node or guest node resource. */ } else if (rsc->container->is_remote_node) { remote_rsc = rsc->container; } else { remote_rsc = pe__resource_contains_guest_node(rsc->cluster, rsc->container); } if (remote_rsc != NULL) { /* Force the resource on the Pacemaker Remote node instead of * colocating the resource with the container resource. */ for (GList *item = allowed_nodes; item; item = item->next) { pe_node_t *node = item->data; if (node->details->remote_rsc != remote_rsc) { node->weight = -INFINITY; } } } else { /* This resource is either a filler for a container that does NOT * represent a Pacemaker Remote node, or a Pacemaker Remote * connection resource for a guest node or bundle. */ int score; crm_trace("Order and colocate %s relative to its container %s", rsc->id, rsc->container->id); pcmk__new_ordering(rsc->container, pcmk__op_key(rsc->container->id, RSC_START, 0), NULL, rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL, pe_order_implies_then|pe_order_runnable_left, rsc->cluster); pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL, rsc->container, pcmk__op_key(rsc->container->id, RSC_STOP, 0), NULL, pe_order_implies_first, rsc->cluster); if (pcmk_is_set(rsc->flags, pe_rsc_allow_remote_remotes)) { score = 10000; /* Highly preferred but not essential */ } else { score = INFINITY; /* Force them to run on the same host */ } - pcmk__new_colocation("resource-with-container", NULL, score, rsc, + pcmk__new_colocation("#resource-with-container", NULL, score, rsc, rsc->container, NULL, NULL, pcmk__coloc_influence); } } if (rsc->is_remote_node || pcmk_is_set(rsc->flags, pe_rsc_fence_device)) { /* Remote connections and fencing devices are not allowed to run on * Pacemaker Remote nodes */ rsc_avoids_remote_nodes(rsc); } g_list_free(allowed_nodes); } /*! * \internal * \brief Apply a colocation's score to node scores or resource priority * * Given a colocation constraint, apply its score to the dependent's * allowed node scores (if we are still placing resources) or priority (if * we are choosing promotable clone instance roles). * * \param[in,out] dependent Dependent resource in colocation * \param[in] primary Primary resource in colocation * \param[in] colocation Colocation constraint to apply * \param[in] for_dependent true if called on behalf of dependent */ void pcmk__primitive_apply_coloc_score(pe_resource_t *dependent, const pe_resource_t *primary, const pcmk__colocation_t *colocation, bool for_dependent) { enum pcmk__coloc_affects filter_results; CRM_ASSERT((dependent != NULL) && (primary != NULL) && (colocation != NULL)); if (for_dependent) { // Always process on behalf of primary resource primary->cmds->apply_coloc_score(dependent, primary, colocation, false); return; } filter_results = pcmk__colocation_affects(dependent, primary, colocation, false); pe_rsc_trace(dependent, "%s %s with %s (%s, score=%d, filter=%d)", ((colocation->score > 0)? "Colocating" : "Anti-colocating"), dependent->id, primary->id, colocation->id, colocation->score, filter_results); switch (filter_results) { case pcmk__coloc_affects_role: pcmk__apply_coloc_to_priority(dependent, primary, colocation); break; case pcmk__coloc_affects_location: pcmk__apply_coloc_to_scores(dependent, primary, colocation); break; default: // pcmk__coloc_affects_nothing return; } } /* Primitive implementation of * resource_alloc_functions_t:with_this_colocations() */ void pcmk__with_primitive_colocations(const pe_resource_t *rsc, const pe_resource_t *orig_rsc, GList **list) { // Primitives don't have children, so rsc should also be orig_rsc CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_native) && (rsc == orig_rsc) && (list != NULL)); // Add primitive's own colocations plus any relevant ones from parent pcmk__add_with_this_list(list, rsc->rsc_cons_lhs, orig_rsc); if (rsc->parent != NULL) { rsc->parent->cmds->with_this_colocations(rsc->parent, rsc, list); } } /* Primitive implementation of * resource_alloc_functions_t:this_with_colocations() */ void pcmk__primitive_with_colocations(const pe_resource_t *rsc, const pe_resource_t *orig_rsc, GList **list) { // Primitives don't have children, so rsc should also be orig_rsc CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_native) && (rsc == orig_rsc) && (list != NULL)); // Add primitive's own colocations plus any relevant ones from parent pcmk__add_this_with_list(list, rsc->rsc_cons, orig_rsc); if (rsc->parent != NULL) { rsc->parent->cmds->this_with_colocations(rsc->parent, rsc, list); } } /*! * \internal * \brief Return action flags for a given primitive resource action * * \param[in,out] action Action to get flags for * \param[in] node If not NULL, limit effects to this node (ignored) * * \return Flags appropriate to \p action on \p node */ uint32_t pcmk__primitive_action_flags(pe_action_t *action, const pe_node_t *node) { CRM_ASSERT(action != NULL); return (uint32_t) action->flags; } /*! * \internal * \brief Check whether a node is a multiply active resource's expected node * * \param[in] rsc Resource to check * \param[in] node Node to check * * \return true if \p rsc is multiply active with multiple-active set to * stop_unexpected, and \p node is the node where it will remain active * \note This assumes that the resource's next role cannot be changed to stopped * after this is called, which should be reasonable if status has already * been unpacked and resources have been assigned to nodes. */ static bool is_expected_node(const pe_resource_t *rsc, const pe_node_t *node) { return pcmk_all_flags_set(rsc->flags, pe_rsc_stop_unexpected|pe_rsc_restarting) && (rsc->next_role > RSC_ROLE_STOPPED) && pe__same_node(rsc->allocated_to, node); } /*! * \internal * \brief Schedule actions needed to stop a resource wherever it is active * * \param[in,out] rsc Resource being stopped * \param[in] node Node where resource is being stopped (ignored) * \param[in] optional Whether actions should be optional */ static void stop_resource(pe_resource_t *rsc, pe_node_t *node, bool optional) { for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) { pe_node_t *current = (pe_node_t *) iter->data; pe_action_t *stop = NULL; if (is_expected_node(rsc, current)) { /* We are scheduling restart actions for a multiply active resource * with multiple-active=stop_unexpected, and this is where it should * not be stopped. */ pe_rsc_trace(rsc, "Skipping stop of multiply active resource %s " "on expected node %s", rsc->id, pe__node_name(current)); continue; } if (rsc->partial_migration_target != NULL) { // Continue migration if node originally was and remains target if (pe__same_node(current, rsc->partial_migration_target) && pe__same_node(current, rsc->allocated_to)) { pe_rsc_trace(rsc, "Skipping stop of %s on %s " "because partial migration there will continue", rsc->id, pe__node_name(current)); continue; } else { pe_rsc_trace(rsc, "Forcing stop of %s on %s " "because migration target changed", rsc->id, pe__node_name(current)); optional = false; } } pe_rsc_trace(rsc, "Scheduling stop of %s on %s", rsc->id, pe__node_name(current)); stop = stop_action(rsc, current, optional); if (rsc->allocated_to == NULL) { pe_action_set_reason(stop, "node availability", true); } else if (pcmk_all_flags_set(rsc->flags, pe_rsc_restarting |pe_rsc_stop_unexpected)) { /* We are stopping a multiply active resource on a node that is * not its expected node, and we are still scheduling restart * actions, so the stop is for being multiply active. */ pe_action_set_reason(stop, "being multiply active", true); } if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) { pe__clear_action_flags(stop, pe_action_runnable); } if (pcmk_is_set(rsc->cluster->flags, pe_flag_remove_after_stop)) { pcmk__schedule_cleanup(rsc, current, optional); } if (pcmk_is_set(rsc->flags, pe_rsc_needs_unfencing)) { pe_action_t *unfence = pe_fence_op(current, "on", true, NULL, false, rsc->cluster); order_actions(stop, unfence, pe_order_implies_first); if (!pcmk__node_unfenced(current)) { pe_proc_err("Stopping %s until %s can be unfenced", rsc->id, pe__node_name(current)); } } } } /*! * \internal * \brief Schedule actions needed to start a resource on a node * * \param[in,out] rsc Resource being started * \param[in,out] node Node where resource should be started * \param[in] optional Whether actions should be optional */ static void start_resource(pe_resource_t *rsc, pe_node_t *node, bool optional) { pe_action_t *start = NULL; CRM_ASSERT(node != NULL); pe_rsc_trace(rsc, "Scheduling %s start of %s on %s (score %d)", (optional? "optional" : "required"), rsc->id, pe__node_name(node), node->weight); start = start_action(rsc, node, TRUE); pcmk__order_vs_unfence(rsc, node, start, pe_order_implies_then); if (pcmk_is_set(start->flags, pe_action_runnable) && !optional) { pe__clear_action_flags(start, pe_action_optional); } if (is_expected_node(rsc, node)) { /* This could be a problem if the start becomes necessary for other * reasons later. */ pe_rsc_trace(rsc, "Start of multiply active resouce %s " "on expected node %s will be a pseudo-action", rsc->id, pe__node_name(node)); pe__set_action_flags(start, pe_action_pseudo); } } /*! * \internal * \brief Schedule actions needed to promote a resource on a node * * \param[in,out] rsc Resource being promoted * \param[in] node Node where resource should be promoted * \param[in] optional Whether actions should be optional */ static void promote_resource(pe_resource_t *rsc, pe_node_t *node, bool optional) { GList *iter = NULL; GList *action_list = NULL; bool runnable = true; CRM_ASSERT(node != NULL); // Any start must be runnable for promotion to be runnable action_list = pe__resource_actions(rsc, node, RSC_START, true); for (iter = action_list; iter != NULL; iter = iter->next) { pe_action_t *start = (pe_action_t *) iter->data; if (!pcmk_is_set(start->flags, pe_action_runnable)) { runnable = false; } } g_list_free(action_list); if (runnable) { pe_action_t *promote = promote_action(rsc, node, optional); pe_rsc_trace(rsc, "Scheduling %s promotion of %s on %s", (optional? "optional" : "required"), rsc->id, pe__node_name(node)); if (is_expected_node(rsc, node)) { /* This could be a problem if the promote becomes necessary for * other reasons later. */ pe_rsc_trace(rsc, "Promotion of multiply active resouce %s " "on expected node %s will be a pseudo-action", rsc->id, pe__node_name(node)); pe__set_action_flags(promote, pe_action_pseudo); } } else { pe_rsc_trace(rsc, "Not promoting %s on %s: start unrunnable", rsc->id, pe__node_name(node)); action_list = pe__resource_actions(rsc, node, RSC_PROMOTE, true); for (iter = action_list; iter != NULL; iter = iter->next) { pe_action_t *promote = (pe_action_t *) iter->data; pe__clear_action_flags(promote, pe_action_runnable); } g_list_free(action_list); } } /*! * \internal * \brief Schedule actions needed to demote a resource wherever it is active * * \param[in,out] rsc Resource being demoted * \param[in] node Node where resource should be demoted (ignored) * \param[in] optional Whether actions should be optional */ static void demote_resource(pe_resource_t *rsc, pe_node_t *node, bool optional) { /* Since this will only be called for a primitive (possibly as an instance * of a collective resource), the resource is multiply active if it is * running on more than one node, so we want to demote on all of them as * part of recovery, regardless of which one is the desired node. */ for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) { pe_node_t *current = (pe_node_t *) iter->data; if (is_expected_node(rsc, current)) { pe_rsc_trace(rsc, "Skipping demote of multiply active resource %s " "on expected node %s", rsc->id, pe__node_name(current)); } else { pe_rsc_trace(rsc, "Scheduling %s demotion of %s on %s", (optional? "optional" : "required"), rsc->id, pe__node_name(current)); demote_action(rsc, current, optional); } } } static void assert_role_error(pe_resource_t *rsc, pe_node_t *node, bool optional) { CRM_ASSERT(false); } /*! * \internal * \brief Schedule cleanup of a resource * * \param[in,out] rsc Resource to clean up * \param[in] node Node to clean up on * \param[in] optional Whether clean-up should be optional */ void pcmk__schedule_cleanup(pe_resource_t *rsc, const pe_node_t *node, bool optional) { /* If the cleanup is required, its orderings are optional, because they're * relevant only if both actions are required. Conversely, if the cleanup is * optional, the orderings make the then action required if the first action * becomes required. */ uint32_t flag = optional? pe_order_implies_then : pe_order_optional; CRM_CHECK((rsc != NULL) && (node != NULL), return); if (pcmk_is_set(rsc->flags, pe_rsc_failed)) { pe_rsc_trace(rsc, "Skipping clean-up of %s on %s: resource failed", rsc->id, pe__node_name(node)); return; } if (node->details->unclean || !node->details->online) { pe_rsc_trace(rsc, "Skipping clean-up of %s on %s: node unavailable", rsc->id, pe__node_name(node)); return; } crm_notice("Scheduling clean-up of %s on %s", rsc->id, pe__node_name(node)); delete_action(rsc, node, optional); // stop -> clean-up -> start pcmk__order_resource_actions(rsc, RSC_STOP, rsc, RSC_DELETE, flag); pcmk__order_resource_actions(rsc, RSC_DELETE, rsc, RSC_START, flag); } /*! * \internal * \brief Add primitive meta-attributes relevant to graph actions to XML * * \param[in] rsc Primitive resource whose meta-attributes should be added * \param[in,out] xml Transition graph action attributes XML to add to */ void pcmk__primitive_add_graph_meta(const pe_resource_t *rsc, xmlNode *xml) { char *name = NULL; char *value = NULL; const pe_resource_t *parent = NULL; CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_native) && (xml != NULL)); /* Clone instance numbers get set internally as meta-attributes, and are * needed in the transition graph (for example, to tell unique clone * instances apart). */ value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION); if (value != NULL) { name = crm_meta_name(XML_RSC_ATTR_INCARNATION); crm_xml_add(xml, name, value); free(name); } // Not sure if this one is really needed ... value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_REMOTE_NODE); if (value != NULL) { name = crm_meta_name(XML_RSC_ATTR_REMOTE_NODE); crm_xml_add(xml, name, value); free(name); } /* The container meta-attribute can be set on the primitive itself or one of * its parents (for example, a group inside a container resource), so check * them all, and keep the highest one found. */ for (parent = rsc; parent != NULL; parent = parent->parent) { if (parent->container != NULL) { crm_xml_add(xml, CRM_META "_" XML_RSC_ATTR_CONTAINER, parent->container->id); } } /* Bundle replica children will get their external-ip set internally as a * meta-attribute. The graph action needs it, but under a different naming * convention than other meta-attributes. */ value = g_hash_table_lookup(rsc->meta, "external-ip"); if (value != NULL) { crm_xml_add(xml, "pcmk_external_ip", value); } } // Primitive implementation of resource_alloc_functions_t:add_utilization() void pcmk__primitive_add_utilization(const pe_resource_t *rsc, const pe_resource_t *orig_rsc, GList *all_rscs, GHashTable *utilization) { CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_native) && (orig_rsc != NULL) && (utilization != NULL)); if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) { return; } pe_rsc_trace(orig_rsc, "%s: Adding primitive %s as colocated utilization", orig_rsc->id, rsc->id); pcmk__release_node_capacity(utilization, rsc); } /*! * \internal * \brief Get epoch time of node's shutdown attribute (or now if none) * * \param[in,out] node Node to check * * \return Epoch time corresponding to shutdown attribute if set or now if not */ static time_t shutdown_time(pe_node_t *node) { const char *shutdown = pe_node_attribute_raw(node, XML_CIB_ATTR_SHUTDOWN); time_t result = 0; if (shutdown != NULL) { long long result_ll; if (pcmk__scan_ll(shutdown, &result_ll, 0LL) == pcmk_rc_ok) { result = (time_t) result_ll; } } return (result == 0)? get_effective_time(node->details->data_set) : result; } /*! * \internal * \brief Ban a resource from a node if it's not locked to the node * * \param[in] data Node to check * \param[in,out] user_data Resource to check */ static void ban_if_not_locked(gpointer data, gpointer user_data) { const pe_node_t *node = (const pe_node_t *) data; pe_resource_t *rsc = (pe_resource_t *) user_data; if (strcmp(node->details->uname, rsc->lock_node->details->uname) != 0) { resource_location(rsc, node, -CRM_SCORE_INFINITY, XML_CONFIG_ATTR_SHUTDOWN_LOCK, rsc->cluster); } } // Primitive implementation of resource_alloc_functions_t:shutdown_lock() void pcmk__primitive_shutdown_lock(pe_resource_t *rsc) { const char *class = NULL; CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_native)); class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); // Fence devices and remote connections can't be locked if (pcmk__str_eq(class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_null_matches) || pe__resource_is_remote_conn(rsc, rsc->cluster)) { return; } if (rsc->lock_node != NULL) { // The lock was obtained from resource history if (rsc->running_on != NULL) { /* The resource was started elsewhere even though it is now * considered locked. This shouldn't be possible, but as a * failsafe, we don't want to disturb the resource now. */ pe_rsc_info(rsc, "Cancelling shutdown lock because %s is already active", rsc->id); pe__clear_resource_history(rsc, rsc->lock_node, rsc->cluster); rsc->lock_node = NULL; rsc->lock_time = 0; } // Only a resource active on exactly one node can be locked } else if (pcmk__list_of_1(rsc->running_on)) { pe_node_t *node = rsc->running_on->data; if (node->details->shutdown) { if (node->details->unclean) { pe_rsc_debug(rsc, "Not locking %s to unclean %s for shutdown", rsc->id, pe__node_name(node)); } else { rsc->lock_node = node; rsc->lock_time = shutdown_time(node); } } } if (rsc->lock_node == NULL) { // No lock needed return; } if (rsc->cluster->shutdown_lock > 0) { time_t lock_expiration = rsc->lock_time + rsc->cluster->shutdown_lock; pe_rsc_info(rsc, "Locking %s to %s due to shutdown (expires @%lld)", rsc->id, pe__node_name(rsc->lock_node), (long long) lock_expiration); pe__update_recheck_time(++lock_expiration, rsc->cluster); } else { pe_rsc_info(rsc, "Locking %s to %s due to shutdown", rsc->id, pe__node_name(rsc->lock_node)); } // If resource is locked to one node, ban it from all other nodes g_list_foreach(rsc->cluster->nodes, ban_if_not_locked, rsc); }