diff --git a/include/crm/pengine/pe_types.h b/include/crm/pengine/pe_types.h index 6548c779ab..b4919351d0 100644 --- a/include/crm/pengine/pe_types.h +++ b/include/crm/pengine/pe_types.h @@ -1,461 +1,460 @@ /* * Copyright 2004-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #ifndef PCMK__CRM_PENGINE_PE_TYPES__H # define PCMK__CRM_PENGINE_PE_TYPES__H # include // bool # include // time_t # include // xmlNode # include // gboolean, guint, GList, GHashTable # include # include # include #ifdef __cplusplus extern "C" { #endif /*! * \file * \brief Data types for cluster status * \ingroup pengine */ typedef struct pe_node_s pe_node_t; typedef struct pe_action_s pe_action_t; typedef struct pe_resource_s pe_resource_t; typedef struct pe_working_set_s pe_working_set_t; typedef struct resource_object_functions_s { gboolean (*unpack) (pe_resource_t*, pe_working_set_t*); pe_resource_t *(*find_rsc) (pe_resource_t *parent, const char *search, const pe_node_t *node, int flags); /* parameter result must be free'd */ char *(*parameter) (pe_resource_t*, pe_node_t*, gboolean, const char*, pe_working_set_t*); //! \deprecated will be removed in a future release void (*print) (pe_resource_t*, const char*, long, void*); gboolean (*active) (pe_resource_t*, gboolean); enum rsc_role_e (*state) (const pe_resource_t*, gboolean); pe_node_t *(*location) (const pe_resource_t*, GList**, int); void (*free) (pe_resource_t*); void (*count) (pe_resource_t*); gboolean (*is_filtered) (const pe_resource_t*, GList *, gboolean); /*! * \brief Find a node (and optionally count all) where resource is active * * \param[in] rsc Resource to check * \param[out] count_all If not NULL, set this to count of active nodes * \param[out] count_clean If not NULL, set this to count of clean nodes * * \return A node where the resource is active, preferring the source node * if the resource is involved in a partial migration or a clean, * online node if the resource's "requires" is "quorum" or * "nothing", or NULL if the resource is inactive. */ pe_node_t *(*active_node)(const pe_resource_t *rsc, unsigned int *count_all, unsigned int *count_clean); /*! * \brief Get maximum resource instances per node * * \param[in] rsc Resource to check * * \return Maximum number of \p rsc instances that can be active on one node */ unsigned int (*max_per_node)(const pe_resource_t *rsc); } resource_object_functions_t; typedef struct resource_alloc_functions_s resource_alloc_functions_t; struct pe_working_set_s { xmlNode *input; crm_time_t *now; /* options extracted from the input */ char *dc_uuid; pe_node_t *dc_node; const char *stonith_action; const char *placement_strategy; unsigned long long flags; int stonith_timeout; enum pe_quorum_policy no_quorum_policy; GHashTable *config_hash; GHashTable *tickets; // Actions for which there can be only one (e.g. fence nodeX) GHashTable *singletons; GList *nodes; GList *resources; GList *placement_constraints; GList *ordering_constraints; GList *colocation_constraints; GList *ticket_constraints; GList *actions; xmlNode *failed; xmlNode *op_defaults; xmlNode *rsc_defaults; /* stats */ int num_synapse; int max_valid_nodes; //! Deprecated (will be removed in a future release) int order_id; int action_id; /* final output */ xmlNode *graph; GHashTable *template_rsc_sets; const char *localhost; GHashTable *tags; int blocked_resources; int disabled_resources; GList *param_check; // History entries that need to be checked GList *stop_needed; // Containers that need stop actions time_t recheck_by; // Hint to controller to re-run scheduler by this time int ninstances; // Total number of resource instances guint shutdown_lock;// How long (seconds) to lock resources to shutdown node int priority_fencing_delay; // Priority fencing delay void *priv; guint node_pending_timeout; // Node pending timeout }; struct pe_node_shared_s { const char *id; const char *uname; enum node_type type; /* @TODO convert these flags into a bitfield */ gboolean online; gboolean standby; gboolean standby_onfail; gboolean pending; gboolean unclean; gboolean unseen; gboolean shutdown; gboolean expected_up; gboolean is_dc; gboolean maintenance; gboolean rsc_discovery_enabled; gboolean remote_requires_reset; gboolean remote_was_fenced; gboolean remote_maintenance; /* what the remote-rsc is thinking */ gboolean unpacked; int num_resources; pe_resource_t *remote_rsc; GList *running_rsc; /* pe_resource_t* */ GList *allocated_rsc; /* pe_resource_t* */ GHashTable *attrs; /* char* => char* */ GHashTable *utilization; GHashTable *digest_cache; //!< cache of calculated resource digests int priority; // calculated based on the priority of resources running on the node pe_working_set_t *data_set; //!< Cluster that this node is part of }; struct pe_node_s { int weight; gboolean fixed; //!< \deprecated Will be removed in a future release int count; struct pe_node_shared_s *details; int rsc_discover_mode; }; -# define pe_rsc_allow_remote_remotes 0x00004000ULL # define pe_rsc_critical 0x00008000ULL # define pe_rsc_failed 0x00010000ULL # define pe_rsc_detect_loop 0x00020000ULL # define pe_rsc_runnable 0x00040000ULL # define pe_rsc_start_pending 0x00080000ULL //!< \deprecated Do not use # define pe_rsc_starting 0x00100000ULL //!< \deprecated Do not use # define pe_rsc_stopping 0x00200000ULL # define pe_rsc_stop_unexpected 0x00400000ULL # define pe_rsc_allow_migrate 0x00800000ULL # define pe_rsc_failure_ignored 0x01000000ULL # define pe_rsc_replica_container 0x02000000ULL # define pe_rsc_maintenance 0x04000000ULL # define pe_rsc_is_container 0x08000000ULL # define pe_rsc_needs_quorum 0x10000000ULL # define pe_rsc_needs_fencing 0x20000000ULL # define pe_rsc_needs_unfencing 0x40000000ULL /* *INDENT-OFF* */ enum pe_action_flags { pe_action_pseudo = 0x00001, pe_action_runnable = 0x00002, pe_action_optional = 0x00004, pe_action_print_always = 0x00008, pe_action_have_node_attrs = 0x00010, pe_action_implied_by_stonith = 0x00040, pe_action_migrate_runnable = 0x00080, pe_action_dumped = 0x00100, pe_action_processed = 0x00200, #if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1) pe_action_clear = 0x00400, //! \deprecated Unused #endif pe_action_dangle = 0x00800, /* This action requires one or more of its dependencies to be runnable. * We use this to clear the runnable flag before checking dependencies. */ pe_action_requires_any = 0x01000, pe_action_reschedule = 0x02000, pe_action_tracking = 0x04000, pe_action_dedup = 0x08000, //! Internal state tracking when creating graph pe_action_dc = 0x10000, //! Action may run on DC instead of target }; /* *INDENT-ON* */ struct pe_resource_s { char *id; char *clone_name; xmlNode *xml; xmlNode *orig_xml; xmlNode *ops_xml; pe_working_set_t *cluster; pe_resource_t *parent; enum pe_obj_types variant; void *variant_opaque; resource_object_functions_t *fns; resource_alloc_functions_t *cmds; enum rsc_recovery_type recovery_type; enum pe_restart restart_type; //!< \deprecated will be removed in future release int priority; int stickiness; int sort_index; int failure_timeout; int migration_threshold; guint remote_reconnect_ms; char *pending_task; unsigned long long flags; // @TODO merge these into flags gboolean is_remote_node; gboolean exclusive_discover; /* Pay special attention to whether you want to use rsc_cons_lhs and * rsc_cons directly, which include only colocations explicitly involving * this resource, or call libpacemaker's pcmk__with_this_colocations() and * pcmk__this_with_colocations() functions, which may return relevant * colocations involving the resource's ancestors as well. */ //!@{ //! This field should be treated as internal to Pacemaker GList *rsc_cons_lhs; // List of pcmk__colocation_t* GList *rsc_cons; // List of pcmk__colocation_t* GList *rsc_location; // List of pe__location_t* GList *actions; // List of pe_action_t* GList *rsc_tickets; // List of rsc_ticket* //!@} pe_node_t *allocated_to; pe_node_t *partial_migration_target; pe_node_t *partial_migration_source; GList *running_on; /* pe_node_t* */ GHashTable *known_on; /* pe_node_t* */ GHashTable *allowed_nodes; /* pe_node_t* */ enum rsc_role_e role; enum rsc_role_e next_role; GHashTable *meta; GHashTable *parameters; //! \deprecated Use pe_rsc_params() instead GHashTable *utilization; GList *children; /* pe_resource_t* */ GList *dangling_migrations; /* pe_node_t* */ pe_resource_t *container; GList *fillers; // @COMPAT These should be made const at next API compatibility break pe_node_t *pending_node; // Node on which pending_task is happening pe_node_t *lock_node; // Resource is shutdown-locked to this node time_t lock_time; // When shutdown lock started /* Resource parameters may have node-attribute-based rules, which means the * values can vary by node. This table is a cache of parameter name/value * tables for each node (as needed). Use pe_rsc_params() to get the table * for a given node. */ GHashTable *parameter_cache; // Key = node name, value = parameters table }; struct pe_action_s { int id; int priority; pe_resource_t *rsc; pe_node_t *node; xmlNode *op_entry; char *task; char *uuid; char *cancel_task; char *reason; enum pe_action_flags flags; enum rsc_start_requirement needs; enum action_fail_response on_fail; enum rsc_role_e fail_role; GHashTable *meta; GHashTable *extra; /* * These two varables are associated with the constraint logic * that involves first having one or more actions runnable before * then allowing this action to execute. * * These varables are used with features such as 'clone-min' which * requires at minimum X number of cloned instances to be running * before an order dependency can run. Another option that uses * this is 'require-all=false' in ordering constrants. This option * says "only require one instance of a resource to start before * allowing dependencies to start" -- basically, require-all=false is * the same as clone-min=1. */ /* current number of known runnable actions in the before list. */ int runnable_before; /* the number of "before" runnable actions required for this action * to be considered runnable */ int required_runnable_before; GList *actions_before; /* pe_action_wrapper_t* */ GList *actions_after; /* pe_action_wrapper_t* */ /* Some of the above fields could be moved to the details, * except for API backward compatibility. */ void *action_details; // varies by type of action }; typedef struct pe_ticket_s { char *id; gboolean granted; time_t last_granted; gboolean standby; GHashTable *state; } pe_ticket_t; typedef struct pe_tag_s { char *id; GList *refs; } pe_tag_t; //! Internal tracking for transition graph creation enum pe_link_state { pe_link_not_dumped, //! Internal tracking for transition graph creation pe_link_dumped, //! Internal tracking for transition graph creation pe_link_dup, //! \deprecated No longer used by Pacemaker }; enum pe_discover_e { pe_discover_always = 0, pe_discover_never, pe_discover_exclusive, }; /* *INDENT-OFF* */ enum pe_ordering { pe_order_none = 0x0, /* deleted */ pe_order_optional = 0x1, /* pure ordering, nothing implied */ pe_order_apply_first_non_migratable = 0x2, /* Only apply this constraint's ordering if first is not migratable. */ pe_order_implies_first = 0x10, /* If 'then' is required, ensure 'first' is too */ pe_order_implies_then = 0x20, /* If 'first' is required, ensure 'then' is too */ pe_order_promoted_implies_first = 0x40, /* If 'then' is required and then's rsc is promoted, ensure 'first' becomes required too */ /* first requires then to be both runnable and migrate runnable. */ pe_order_implies_first_migratable = 0x80, pe_order_runnable_left = 0x100, /* 'then' requires 'first' to be runnable */ pe_order_pseudo_left = 0x200, /* 'then' can only be pseudo if 'first' is runnable */ pe_order_implies_then_on_node = 0x400, /* If 'first' is required on 'nodeX', * ensure instances of 'then' on 'nodeX' are too. * Only really useful if 'then' is a clone and 'first' is not */ pe_order_probe = 0x800, /* If 'first->rsc' is * - running but about to stop, ignore the constraint * - otherwise, behave as runnable_left */ pe_order_restart = 0x1000, /* 'then' is runnable if 'first' is optional or runnable */ pe_order_stonith_stop = 0x2000, // #endif #ifdef __cplusplus } #endif #endif // PCMK__CRM_PENGINE_PE_TYPES__H diff --git a/include/crm/pengine/pe_types_compat.h b/include/crm/pengine/pe_types_compat.h index 41312dce1b..1014df231e 100644 --- a/include/crm/pengine/pe_types_compat.h +++ b/include/crm/pengine/pe_types_compat.h @@ -1,188 +1,191 @@ /* * Copyright 2004-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #ifndef PCMK__CRM_PENGINE_PE_TYPES_COMPAT__H # define PCMK__CRM_PENGINE_PE_TYPES_COMPAT__H #include #ifdef __cplusplus extern "C" { #endif /** * \file * \brief Deprecated Pacemaker scheduler API * \ingroup pengine * \deprecated Do not include this header directly. The scheduler APIs in this * header, and the header itself, will be removed in a future * release. */ //! \deprecated Use pcmk_rsc_removed instead #define pe_rsc_orphan pcmk_rsc_removed //! \deprecated Use pcmk_rsc_managed instead #define pe_rsc_managed pcmk_rsc_managed //! \deprecated Use pcmk_rsc_blocked instead #define pe_rsc_block pcmk_rsc_blocked //! \deprecated Use pcmk_rsc_removed_filler instead #define pe_rsc_orphan_container_filler pcmk_rsc_removed_filler //! \deprecated Use pcmk_rsc_notify instead #define pe_rsc_notify pcmk_rsc_notify //! \deprecated Use pcmk_rsc_unique instead #define pe_rsc_unique pcmk_rsc_unique //! \deprecated Use pcmk_rsc_fence_device instead #define pe_rsc_fence_device pcmk_rsc_fence_device //! \deprecated Use pcmk_rsc_promotable instead #define pe_rsc_promotable pcmk_rsc_promotable //! \deprecated Use pcmk_rsc_unassigned instead #define pe_rsc_provisional pcmk_rsc_unassigned //! \deprecated Use pcmk_rsc_assigning instead #define pe_rsc_allocating pcmk_rsc_assigning //! \deprecated Use pcmk_rsc_updating_nodes instead #define pe_rsc_merging pcmk_rsc_updating_nodes //! \deprecated Use pcmk_rsc_restarting instead #define pe_rsc_restarting pcmk_rsc_restarting //! \deprecated Use pcmk_rsc_stop_if_failed instead #define pe_rsc_stop pcmk_rsc_stop_if_failed //! \deprecated Use pcmk_rsc_reload instead #define pe_rsc_reload pcmk_rsc_reload +//! \deprecated Use pcmk_rsc_remote_nesting_allowed instead +#define pe_rsc_allow_remote_remotes pcmk_rsc_remote_nesting_allowed + //! \deprecated Use pcmk_sched_quorate instead #define pe_flag_have_quorum pcmk_sched_quorate //! \deprecated Use pcmk_sched_symmetric_cluster instead #define pe_flag_symmetric_cluster pcmk_sched_symmetric_cluster //! \deprecated Use pcmk_sched_in_maintenance instead #define pe_flag_maintenance_mode pcmk_sched_in_maintenance //! \deprecated Use pcmk_sched_fencing_enabled instead #define pe_flag_stonith_enabled pcmk_sched_fencing_enabled //! \deprecated Use pcmk_sched_have_fencing instead #define pe_flag_have_stonith_resource pcmk_sched_have_fencing //! \deprecated Use pcmk_sched_enable_unfencing instead #define pe_flag_enable_unfencing pcmk_sched_enable_unfencing //! \deprecated Use pcmk_sched_concurrent_fencing instead #define pe_flag_concurrent_fencing pcmk_sched_concurrent_fencing //! \deprecated Use pcmk_sched_stop_removed_resources instead #define pe_flag_stop_rsc_orphans pcmk_sched_stop_removed_resources //! \deprecated Use pcmk_sched_cancel_removed_actions instead #define pe_flag_stop_action_orphans pcmk_sched_cancel_removed_actions //! \deprecated Use pcmk_sched_stop_all instead #define pe_flag_stop_everything pcmk_sched_stop_all //! \deprecated Use pcmk_sched_start_failure_fatal instead #define pe_flag_start_failure_fatal pcmk_sched_start_failure_fatal //! \deprecated Do not use #define pe_flag_remove_after_stop pcmk_sched_remove_after_stop //! \deprecated Use pcmk_sched_startup_fencing instead #define pe_flag_startup_fencing pcmk_sched_startup_fencing //! \deprecated Use pcmk_sched_shutdown_lock instead #define pe_flag_shutdown_lock pcmk_sched_shutdown_lock //! \deprecated Use pcmk_sched_probe_resources instead #define pe_flag_startup_probes pcmk_sched_probe_resources //! \deprecated Use pcmk_sched_have_status instead #define pe_flag_have_status pcmk_sched_have_status //! \deprecated Use pcmk_sched_have_remote_nodes instead #define pe_flag_have_remote_nodes pcmk_sched_have_remote_nodes //! \deprecated Use pcmk_sched_location_only instead #define pe_flag_quick_location pcmk_sched_location_only //! \deprecated Use pcmk_sched_sanitized instead #define pe_flag_sanitized pcmk_sched_sanitized //! \deprecated Do not use #define pe_flag_stdout (1ULL << 22) //! \deprecated Use pcmk_sched_no_counts instead #define pe_flag_no_counts pcmk_sched_no_counts //! \deprecated Use pcmk_sched_no_compat instead #define pe_flag_no_compat pcmk_sched_no_compat //! \deprecated Use pcmk_sched_output_scores instead #define pe_flag_show_scores pcmk_sched_output_scores //! \deprecated Use pcmk_sched_show_utilization instead #define pe_flag_show_utilization pcmk_sched_show_utilization //! \deprecated Use pcmk_sched_validate_only instead #define pe_flag_check_config pcmk_sched_validate_only //!@{ //! \deprecated Do not use (unused by Pacemaker) enum pe_graph_flags { pe_graph_none = 0x00000, pe_graph_updated_first = 0x00001, pe_graph_updated_then = 0x00002, pe_graph_disable = 0x00004, }; //!@} //!@{ //! \deprecated Do not use enum pe_check_parameters { pe_check_last_failure, pe_check_active, }; //!@} //!< \deprecated Use pe_action_t instead typedef struct pe_action_s action_t; //!< \deprecated Use pe_action_wrapper_t instead typedef struct pe_action_wrapper_s action_wrapper_t; //!< \deprecated Use pe_node_t instead typedef struct pe_node_s node_t; //!< \deprecated Use enum pe_quorum_policy instead typedef enum pe_quorum_policy no_quorum_policy_t; //!< \deprecated use pe_resource_t instead typedef struct pe_resource_s resource_t; //!< \deprecated Use pe_tag_t instead typedef struct pe_tag_s tag_t; //!< \deprecated Use pe_ticket_t instead typedef struct pe_ticket_s ticket_t; #ifdef __cplusplus } #endif #endif // PCMK__CRM_PENGINE_PE_TYPES_COMPAT__H diff --git a/lib/pacemaker/libpacemaker_private.h b/lib/pacemaker/libpacemaker_private.h index 48dead7df7..96c72fd0c6 100644 --- a/lib/pacemaker/libpacemaker_private.h +++ b/lib/pacemaker/libpacemaker_private.h @@ -1,1139 +1,1140 @@ /* * Copyright 2021-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #ifndef PCMK__LIBPACEMAKER_PRIVATE__H # define PCMK__LIBPACEMAKER_PRIVATE__H /* This header is for the sole use of libpacemaker, so that functions can be * declared with G_GNUC_INTERNAL for efficiency. */ #include // lrmd_event_data_t #include // pe_action_t, pe_node_t, pe_working_set_t #include // pe__location_t // Colocation flags enum pcmk__coloc_flags { pcmk__coloc_none = 0U, // Primary is affected even if already active pcmk__coloc_influence = (1U << 0), // Colocation was explicitly configured in CIB pcmk__coloc_explicit = (1U << 1), }; // Flags to modify the behavior of add_colocated_node_scores() enum pcmk__coloc_select { // With no other flags, apply all "with this" colocations pcmk__coloc_select_default = 0, // Apply "this with" colocations instead of "with this" colocations pcmk__coloc_select_this_with = (1 << 0), // Apply only colocations with non-negative scores pcmk__coloc_select_nonnegative = (1 << 1), // Apply only colocations with at least one matching node pcmk__coloc_select_active = (1 << 2), }; // Flags the update_ordered_actions() method can return enum pcmk__updated { pcmk__updated_none = 0, // Nothing changed pcmk__updated_first = (1 << 0), // First action was updated pcmk__updated_then = (1 << 1), // Then action was updated }; #define pcmk__set_updated_flags(au_flags, action, flags_to_set) do { \ au_flags = pcmk__set_flags_as(__func__, __LINE__, \ LOG_TRACE, "Action update", \ (action)->uuid, au_flags, \ (flags_to_set), #flags_to_set); \ } while (0) #define pcmk__clear_updated_flags(au_flags, action, flags_to_clear) do { \ au_flags = pcmk__clear_flags_as(__func__, __LINE__, \ LOG_TRACE, "Action update", \ (action)->uuid, au_flags, \ (flags_to_clear), #flags_to_clear); \ } while (0) // Resource assignment methods struct resource_alloc_functions_s { /*! * \internal * \brief Assign a resource to a node * * \param[in,out] rsc Resource to assign to a node * \param[in] prefer Node to prefer, if all else is equal * \param[in] stop_if_fail If \c true and \p rsc can't be assigned to a * node, set next role to stopped and update * existing actions (if \p rsc is not a * primitive, this applies to its primitive * descendants instead) * * \return Node that \p rsc is assigned to, if assigned entirely to one node * * \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() * can completely undo the assignment. A successful assignment can be * either undone or left alone as final. A failed assignment has the * same effect as calling pcmk__unassign_resource(); there are no side * effects on roles or actions. */ pe_node_t *(*assign)(pe_resource_t *rsc, const pe_node_t *prefer, bool stop_if_fail); /*! * \internal * \brief Create all actions needed for a given resource * * \param[in,out] rsc Resource to create actions for */ void (*create_actions)(pe_resource_t *rsc); /*! * \internal * \brief Schedule any probes needed for a resource on a node * * \param[in,out] rsc Resource to create probe for * \param[in,out] node Node to create probe on * * \return true if any probe was created, otherwise false */ bool (*create_probe)(pe_resource_t *rsc, pe_node_t *node); /*! * \internal * \brief Create implicit constraints needed for a resource * * \param[in,out] rsc Resource to create implicit constraints for */ void (*internal_constraints)(pe_resource_t *rsc); /*! * \internal * \brief Apply a colocation's score to node scores or resource priority * * Given a colocation constraint, apply its score to the dependent's * allowed node scores (if we are still placing resources) or priority (if * we are choosing promotable clone instance roles). * * \param[in,out] dependent Dependent resource in colocation * \param[in] primary Primary resource in colocation * \param[in] colocation Colocation constraint to apply * \param[in] for_dependent true if called on behalf of dependent */ void (*apply_coloc_score)(pe_resource_t *dependent, const pe_resource_t *primary, const pcmk__colocation_t *colocation, bool for_dependent); /*! * \internal * \brief Create list of all resources in colocations with a given resource * * Given a resource, create a list of all resources involved in mandatory * colocations with it, whether directly or via chained colocations. * * \param[in] rsc Resource to add to colocated list * \param[in] orig_rsc Resource originally requested * \param[in,out] colocated_rscs Existing list * * \return List of given resource and all resources involved in colocations * * \note This function is recursive; top-level callers should pass NULL as * \p colocated_rscs and \p orig_rsc, and the desired resource as * \p rsc. The recursive calls will use other values. */ GList *(*colocated_resources)(const pe_resource_t *rsc, const pe_resource_t *orig_rsc, GList *colocated_rscs); /*! * \internal * \brief Add colocations affecting a resource as primary to a list * * Given a resource being assigned (\p orig_rsc) and a resource somewhere in * its chain of ancestors (\p rsc, which may be \p orig_rsc), get * colocations that affect the ancestor as primary and should affect the * resource, and add them to a given list. * * \param[in] rsc Resource whose colocations should be added * \param[in] orig_rsc Affected resource (\p rsc or a descendant) * \param[in,out] list List of colocations to add to * * \note All arguments should be non-NULL. * \note The pcmk__with_this_colocations() wrapper should usually be used * instead of using this method directly. */ void (*with_this_colocations)(const pe_resource_t *rsc, const pe_resource_t *orig_rsc, GList **list); /*! * \internal * \brief Add colocations affecting a resource as dependent to a list * * Given a resource being assigned (\p orig_rsc) and a resource somewhere in * its chain of ancestors (\p rsc, which may be \p orig_rsc), get * colocations that affect the ancestor as dependent and should affect the * resource, and add them to a given list. * * * \param[in] rsc Resource whose colocations should be added * \param[in] orig_rsc Affected resource (\p rsc or a descendant) * \param[in,out] list List of colocations to add to * * \note All arguments should be non-NULL. * \note The pcmk__this_with_colocations() wrapper should usually be used * instead of using this method directly. */ void (*this_with_colocations)(const pe_resource_t *rsc, const pe_resource_t *orig_rsc, GList **list); /*! * \internal * \brief Update nodes with scores of colocated resources' nodes * * Given a table of nodes and a resource, update the nodes' scores with the * scores of the best nodes matching the attribute used for each of the * resource's relevant colocations. * * \param[in,out] source_rsc Resource whose node scores to add * \param[in] target_rsc Resource on whose behalf to update \p *nodes * \param[in] log_id Resource ID for logs (if \c NULL, use * \p source_rsc ID) * \param[in,out] nodes Nodes to update (set initial contents to * \c NULL to copy allowed nodes from * \p source_rsc) * \param[in] colocation Original colocation constraint (used to get * configured primary resource's stickiness, and * to get colocation node attribute; if \c NULL, * source_rsc's own matching node scores * will not be added, and \p *nodes must be * \c NULL as well) * \param[in] factor Incorporate scores multiplied by this factor * \param[in] flags Bitmask of enum pcmk__coloc_select values * * \note \c NULL \p target_rsc, \c NULL \p *nodes, \c NULL \p colocation, * and the \c pcmk__coloc_select_this_with flag are used together (and * only by \c cmp_resources()). * \note The caller remains responsible for freeing \p *nodes. */ void (*add_colocated_node_scores)(pe_resource_t *source_rsc, const pe_resource_t *target_rsc, const char *log_id, GHashTable **nodes, const pcmk__colocation_t *colocation, float factor, uint32_t flags); /*! * \internal * \brief Apply a location constraint to a resource's allowed node scores * * \param[in,out] rsc Resource to apply constraint to * \param[in,out] location Location constraint to apply */ void (*apply_location)(pe_resource_t *rsc, pe__location_t *location); /*! * \internal * \brief Return action flags for a given resource action * * \param[in,out] action Action to get flags for * \param[in] node If not NULL, limit effects to this node * * \return Flags appropriate to \p action on \p node * \note For primitives, this will be the same as action->flags regardless * of node. For collective resources, the flags can differ due to * multiple instances possibly being involved. */ uint32_t (*action_flags)(pe_action_t *action, const pe_node_t *node); /*! * \internal * \brief Update two actions according to an ordering between them * * Given information about an ordering of two actions, update the actions' * flags (and runnable_before members if appropriate) as appropriate for the * ordering. Effects may cascade to other orderings involving the actions as * well. * * \param[in,out] first 'First' action in an ordering * \param[in,out] then 'Then' action in an ordering * \param[in] node If not NULL, limit scope of ordering to this * node (only used when interleaving instances) * \param[in] flags Action flags for \p first for ordering purposes * \param[in] filter Action flags to limit scope of certain updates * (may include pe_action_optional to affect only * mandatory actions, and pe_action_runnable to * affect only runnable actions) * \param[in] type Group of enum pe_ordering flags to apply * \param[in,out] data_set Cluster working set * * \return Group of enum pcmk__updated flags indicating what was updated */ uint32_t (*update_ordered_actions)(pe_action_t *first, pe_action_t *then, const pe_node_t *node, uint32_t flags, uint32_t filter, uint32_t type, pe_working_set_t *data_set); /*! * \internal * \brief Output a summary of scheduled actions for a resource * * \param[in,out] rsc Resource to output actions for */ void (*output_actions)(pe_resource_t *rsc); /*! * \internal * \brief Add a resource's actions to the transition graph * * \param[in,out] rsc Resource whose actions should be added */ void (*add_actions_to_graph)(pe_resource_t *rsc); /*! * \internal * \brief Add meta-attributes relevant to transition graph actions to XML * * If a given resource supports variant-specific meta-attributes that are * needed for transition graph actions, add them to a given XML element. * * \param[in] rsc Resource whose meta-attributes should be added * \param[in,out] xml Transition graph action attributes XML to add to */ void (*add_graph_meta)(const pe_resource_t *rsc, xmlNode *xml); /*! * \internal * \brief Add a resource's utilization to a table of utilization values * * This function is used when summing the utilization of a resource and all * resources colocated with it, to determine whether a node has sufficient * capacity. Given a resource and a table of utilization values, it will add * the resource's utilization to the existing values, if the resource has * not yet been assigned to a node. * * \param[in] rsc Resource with utilization to add * \param[in] orig_rsc Resource being assigned (for logging only) * \param[in] all_rscs List of all resources that will be summed * \param[in,out] utilization Table of utilization values to add to */ void (*add_utilization)(const pe_resource_t *rsc, const pe_resource_t *orig_rsc, GList *all_rscs, GHashTable *utilization); /*! * \internal * \brief Apply a shutdown lock for a resource, if appropriate * * \param[in,out] rsc Resource to check for shutdown lock */ void (*shutdown_lock)(pe_resource_t *rsc); }; // Actions (pcmk_sched_actions.c) G_GNUC_INTERNAL void pcmk__update_action_for_orderings(pe_action_t *action, pe_working_set_t *data_set); G_GNUC_INTERNAL uint32_t pcmk__update_ordered_actions(pe_action_t *first, pe_action_t *then, const pe_node_t *node, uint32_t flags, uint32_t filter, uint32_t type, pe_working_set_t *data_set); G_GNUC_INTERNAL void pcmk__log_action(const char *pre_text, const pe_action_t *action, bool details); G_GNUC_INTERNAL pe_action_t *pcmk__new_cancel_action(pe_resource_t *rsc, const char *name, guint interval_ms, const pe_node_t *node); G_GNUC_INTERNAL pe_action_t *pcmk__new_shutdown_action(pe_node_t *node); G_GNUC_INTERNAL bool pcmk__action_locks_rsc_to_node(const pe_action_t *action); G_GNUC_INTERNAL void pcmk__deduplicate_action_inputs(pe_action_t *action); G_GNUC_INTERNAL void pcmk__output_actions(pe_working_set_t *data_set); G_GNUC_INTERNAL bool pcmk__check_action_config(pe_resource_t *rsc, pe_node_t *node, const xmlNode *xml_op); G_GNUC_INTERNAL void pcmk__handle_rsc_config_changes(pe_working_set_t *data_set); // Recurring actions (pcmk_sched_recurring.c) G_GNUC_INTERNAL void pcmk__create_recurring_actions(pe_resource_t *rsc); G_GNUC_INTERNAL void pcmk__schedule_cancel(pe_resource_t *rsc, const char *call_id, const char *task, guint interval_ms, const pe_node_t *node, const char *reason); G_GNUC_INTERNAL void pcmk__reschedule_recurring(pe_resource_t *rsc, const char *task, guint interval_ms, pe_node_t *node); G_GNUC_INTERNAL bool pcmk__action_is_recurring(const pe_action_t *action); // Producing transition graphs (pcmk_graph_producer.c) G_GNUC_INTERNAL bool pcmk__graph_has_loop(const pe_action_t *init_action, const pe_action_t *action, pe_action_wrapper_t *input); G_GNUC_INTERNAL void pcmk__add_rsc_actions_to_graph(pe_resource_t *rsc); G_GNUC_INTERNAL void pcmk__create_graph(pe_working_set_t *data_set); // Fencing (pcmk_sched_fencing.c) G_GNUC_INTERNAL void pcmk__order_vs_fence(pe_action_t *stonith_op, pe_working_set_t *data_set); G_GNUC_INTERNAL void pcmk__order_vs_unfence(const pe_resource_t *rsc, pe_node_t *node, pe_action_t *action, enum pe_ordering order); G_GNUC_INTERNAL void pcmk__fence_guest(pe_node_t *node); G_GNUC_INTERNAL bool pcmk__node_unfenced(const pe_node_t *node); G_GNUC_INTERNAL void pcmk__order_restart_vs_unfence(gpointer data, gpointer user_data); // Injected scheduler inputs (pcmk_sched_injections.c) void pcmk__inject_scheduler_input(pe_working_set_t *data_set, cib_t *cib, const pcmk_injections_t *injections); // Constraints of any type (pcmk_sched_constraints.c) G_GNUC_INTERNAL pe_resource_t *pcmk__find_constraint_resource(GList *rsc_list, const char *id); G_GNUC_INTERNAL xmlNode *pcmk__expand_tags_in_sets(xmlNode *xml_obj, const pe_working_set_t *data_set); G_GNUC_INTERNAL bool pcmk__valid_resource_or_tag(const pe_working_set_t *data_set, const char *id, pe_resource_t **rsc, pe_tag_t **tag); G_GNUC_INTERNAL bool pcmk__tag_to_set(xmlNode *xml_obj, xmlNode **rsc_set, const char *attr, bool convert_rsc, const pe_working_set_t *data_set); G_GNUC_INTERNAL void pcmk__create_internal_constraints(pe_working_set_t *data_set); // Location constraints G_GNUC_INTERNAL void pcmk__unpack_location(xmlNode *xml_obj, pe_working_set_t *data_set); G_GNUC_INTERNAL pe__location_t *pcmk__new_location(const char *id, pe_resource_t *rsc, int node_score, const char *discover_mode, pe_node_t *foo_node); G_GNUC_INTERNAL void pcmk__apply_locations(pe_working_set_t *data_set); G_GNUC_INTERNAL void pcmk__apply_location(pe_resource_t *rsc, pe__location_t *constraint); // Colocation constraints (pcmk_sched_colocation.c) enum pcmk__coloc_affects { pcmk__coloc_affects_nothing = 0, pcmk__coloc_affects_location, pcmk__coloc_affects_role, }; /*! * \internal * \brief Get the value of a colocation's node attribute * * When looking up a colocation node attribute on a bundle node for a bundle * primitive, we should always look on the bundle node's assigned host, * regardless of the value of XML_RSC_ATTR_TARGET. At most one resource (the * bundle primitive, if any) can run on a bundle node, so any colocation must * necessarily be evaluated with respect to the bundle node (the container). * * \param[in] node Node on which to look up the attribute * \param[in] attr Name of attribute to look up * \param[in] rsc Resource on whose behalf to look up the attribute * * \return Value of \p attr on \p node or on the host of \p node, as appropriate */ static inline const char * pcmk__colocation_node_attr(const pe_node_t *node, const char *attr, const pe_resource_t *rsc) { const pe_resource_t *top = pe__const_top_resource(rsc, false); const bool force_host = pe__is_bundle_node(node) && pe_rsc_is_bundled(rsc) && (top == pe__bundled_resource(rsc)); return pe__node_attribute_calculated(node, attr, rsc, pe__rsc_node_assigned, force_host); } G_GNUC_INTERNAL enum pcmk__coloc_affects pcmk__colocation_affects(const pe_resource_t *dependent, const pe_resource_t *primary, const pcmk__colocation_t *colocation, bool preview); G_GNUC_INTERNAL void pcmk__apply_coloc_to_scores(pe_resource_t *dependent, const pe_resource_t *primary, const pcmk__colocation_t *colocation); G_GNUC_INTERNAL void pcmk__apply_coloc_to_priority(pe_resource_t *dependent, const pe_resource_t *primary, const pcmk__colocation_t *colocation); G_GNUC_INTERNAL void pcmk__add_colocated_node_scores(pe_resource_t *source_rsc, const pe_resource_t *target_rsc, const char *log_id, GHashTable **nodes, const pcmk__colocation_t *colocation, float factor, uint32_t flags); G_GNUC_INTERNAL void pcmk__add_dependent_scores(gpointer data, gpointer user_data); G_GNUC_INTERNAL void pcmk__colocation_intersect_nodes(pe_resource_t *dependent, const pe_resource_t *primary, const pcmk__colocation_t *colocation, const GList *primary_nodes, bool merge_scores); G_GNUC_INTERNAL void pcmk__unpack_colocation(xmlNode *xml_obj, pe_working_set_t *data_set); G_GNUC_INTERNAL void pcmk__add_this_with(GList **list, const pcmk__colocation_t *colocation, const pe_resource_t *rsc); G_GNUC_INTERNAL void pcmk__add_this_with_list(GList **list, GList *addition, const pe_resource_t *rsc); G_GNUC_INTERNAL void pcmk__add_with_this(GList **list, const pcmk__colocation_t *colocation, const pe_resource_t *rsc); G_GNUC_INTERNAL void pcmk__add_with_this_list(GList **list, GList *addition, const pe_resource_t *rsc); G_GNUC_INTERNAL GList *pcmk__with_this_colocations(const pe_resource_t *rsc); G_GNUC_INTERNAL GList *pcmk__this_with_colocations(const pe_resource_t *rsc); G_GNUC_INTERNAL void pcmk__new_colocation(const char *id, const char *node_attr, int score, pe_resource_t *dependent, pe_resource_t *primary, const char *dependent_role, const char *primary_role, uint32_t flags); G_GNUC_INTERNAL void pcmk__block_colocation_dependents(pe_action_t *action); /*! * \internal * \brief Check whether colocation's dependent preferences should be considered * * \param[in] colocation Colocation constraint * \param[in] rsc Primary instance (normally this will be * colocation->primary, which NULL will be treated as, * but for clones or bundles with multiple instances * this can be a particular instance) * * \return true if colocation influence should be effective, otherwise false */ static inline bool pcmk__colocation_has_influence(const pcmk__colocation_t *colocation, const pe_resource_t *rsc) { if (rsc == NULL) { rsc = colocation->primary; } /* A bundle replica colocates its remote connection with its container, * using a finite score so that the container can run on Pacemaker Remote * nodes. * * Moving a connection is lightweight and does not interrupt the service, * while moving a container is heavyweight and does interrupt the service, * so don't move a clean, active container based solely on the preferences * of its connection. * * This also avoids problematic scenarios where two containers want to * perpetually swap places. */ - if (pcmk_is_set(colocation->dependent->flags, pe_rsc_allow_remote_remotes) + if (pcmk_is_set(colocation->dependent->flags, + pcmk_rsc_remote_nesting_allowed) && !pcmk_is_set(rsc->flags, pe_rsc_failed) && pcmk__list_of_1(rsc->running_on)) { return false; } /* The dependent in a colocation influences the primary's location * if the influence option is true or the primary is not yet active. */ return pcmk_is_set(colocation->flags, pcmk__coloc_influence) || (rsc->running_on == NULL); } // Ordering constraints (pcmk_sched_ordering.c) G_GNUC_INTERNAL void pcmk__new_ordering(pe_resource_t *first_rsc, char *first_task, pe_action_t *first_action, pe_resource_t *then_rsc, char *then_task, pe_action_t *then_action, uint32_t flags, pe_working_set_t *sched); G_GNUC_INTERNAL void pcmk__unpack_ordering(xmlNode *xml_obj, pe_working_set_t *data_set); G_GNUC_INTERNAL void pcmk__disable_invalid_orderings(pe_working_set_t *data_set); G_GNUC_INTERNAL void pcmk__order_stops_before_shutdown(pe_node_t *node, pe_action_t *shutdown_op); G_GNUC_INTERNAL void pcmk__apply_orderings(pe_working_set_t *sched); G_GNUC_INTERNAL void pcmk__order_after_each(pe_action_t *after, GList *list); /*! * \internal * \brief Create a new ordering between two resource actions * * \param[in,out] first_rsc Resource for 'first' action * \param[in,out] first_task Action key for 'first' action * \param[in] then_rsc Resource for 'then' action * \param[in,out] then_task Action key for 'then' action * \param[in] flags Bitmask of enum pe_ordering flags */ #define pcmk__order_resource_actions(first_rsc, first_task, \ then_rsc, then_task, flags) \ pcmk__new_ordering((first_rsc), \ pcmk__op_key((first_rsc)->id, (first_task), 0), \ NULL, \ (then_rsc), \ pcmk__op_key((then_rsc)->id, (then_task), 0), \ NULL, (flags), (first_rsc)->cluster) #define pcmk__order_starts(rsc1, rsc2, flags) \ pcmk__order_resource_actions((rsc1), PCMK_ACTION_START, \ (rsc2), PCMK_ACTION_START, (flags)) #define pcmk__order_stops(rsc1, rsc2, flags) \ pcmk__order_resource_actions((rsc1), PCMK_ACTION_STOP, \ (rsc2), PCMK_ACTION_STOP, (flags)) // Ticket constraints (pcmk_sched_tickets.c) G_GNUC_INTERNAL void pcmk__unpack_rsc_ticket(xmlNode *xml_obj, pe_working_set_t *data_set); // Promotable clone resources (pcmk_sched_promotable.c) G_GNUC_INTERNAL void pcmk__add_promotion_scores(pe_resource_t *rsc); G_GNUC_INTERNAL void pcmk__require_promotion_tickets(pe_resource_t *rsc); G_GNUC_INTERNAL void pcmk__set_instance_roles(pe_resource_t *rsc); G_GNUC_INTERNAL void pcmk__create_promotable_actions(pe_resource_t *clone); G_GNUC_INTERNAL void pcmk__promotable_restart_ordering(pe_resource_t *rsc); G_GNUC_INTERNAL void pcmk__order_promotable_instances(pe_resource_t *clone); G_GNUC_INTERNAL void pcmk__update_dependent_with_promotable(const pe_resource_t *primary, pe_resource_t *dependent, const pcmk__colocation_t *colocation); G_GNUC_INTERNAL void pcmk__update_promotable_dependent_priority(const pe_resource_t *primary, pe_resource_t *dependent, const pcmk__colocation_t *colocation); // Pacemaker Remote nodes (pcmk_sched_remote.c) G_GNUC_INTERNAL bool pcmk__is_failed_remote_node(const pe_node_t *node); G_GNUC_INTERNAL void pcmk__order_remote_connection_actions(pe_working_set_t *data_set); G_GNUC_INTERNAL bool pcmk__rsc_corresponds_to_guest(const pe_resource_t *rsc, const pe_node_t *node); G_GNUC_INTERNAL pe_node_t *pcmk__connection_host_for_action(const pe_action_t *action); G_GNUC_INTERNAL void pcmk__substitute_remote_addr(pe_resource_t *rsc, GHashTable *params); G_GNUC_INTERNAL void pcmk__add_bundle_meta_to_xml(xmlNode *args_xml, const pe_action_t *action); // Primitives (pcmk_sched_primitive.c) G_GNUC_INTERNAL pe_node_t *pcmk__primitive_assign(pe_resource_t *rsc, const pe_node_t *prefer, bool stop_if_fail); G_GNUC_INTERNAL void pcmk__primitive_create_actions(pe_resource_t *rsc); G_GNUC_INTERNAL void pcmk__primitive_internal_constraints(pe_resource_t *rsc); G_GNUC_INTERNAL uint32_t pcmk__primitive_action_flags(pe_action_t *action, const pe_node_t *node); G_GNUC_INTERNAL void pcmk__primitive_apply_coloc_score(pe_resource_t *dependent, const pe_resource_t *primary, const pcmk__colocation_t *colocation, bool for_dependent); G_GNUC_INTERNAL void pcmk__with_primitive_colocations(const pe_resource_t *rsc, const pe_resource_t *orig_rsc, GList **list); G_GNUC_INTERNAL void pcmk__primitive_with_colocations(const pe_resource_t *rsc, const pe_resource_t *orig_rsc, GList **list); G_GNUC_INTERNAL void pcmk__schedule_cleanup(pe_resource_t *rsc, const pe_node_t *node, bool optional); G_GNUC_INTERNAL void pcmk__primitive_add_graph_meta(const pe_resource_t *rsc, xmlNode *xml); G_GNUC_INTERNAL void pcmk__primitive_add_utilization(const pe_resource_t *rsc, const pe_resource_t *orig_rsc, GList *all_rscs, GHashTable *utilization); G_GNUC_INTERNAL void pcmk__primitive_shutdown_lock(pe_resource_t *rsc); // Groups (pcmk_sched_group.c) G_GNUC_INTERNAL pe_node_t *pcmk__group_assign(pe_resource_t *rsc, const pe_node_t *prefer, bool stop_if_fail); G_GNUC_INTERNAL void pcmk__group_create_actions(pe_resource_t *rsc); G_GNUC_INTERNAL void pcmk__group_internal_constraints(pe_resource_t *rsc); G_GNUC_INTERNAL void pcmk__group_apply_coloc_score(pe_resource_t *dependent, const pe_resource_t *primary, const pcmk__colocation_t *colocation, bool for_dependent); G_GNUC_INTERNAL void pcmk__with_group_colocations(const pe_resource_t *rsc, const pe_resource_t *orig_rsc, GList **list); G_GNUC_INTERNAL void pcmk__group_with_colocations(const pe_resource_t *rsc, const pe_resource_t *orig_rsc, GList **list); G_GNUC_INTERNAL void pcmk__group_add_colocated_node_scores(pe_resource_t *source_rsc, const pe_resource_t *target_rsc, const char *log_id, GHashTable **nodes, const pcmk__colocation_t *colocation, float factor, uint32_t flags); G_GNUC_INTERNAL void pcmk__group_apply_location(pe_resource_t *rsc, pe__location_t *location); G_GNUC_INTERNAL uint32_t pcmk__group_action_flags(pe_action_t *action, const pe_node_t *node); G_GNUC_INTERNAL uint32_t pcmk__group_update_ordered_actions(pe_action_t *first, pe_action_t *then, const pe_node_t *node, uint32_t flags, uint32_t filter, uint32_t type, pe_working_set_t *data_set); G_GNUC_INTERNAL GList *pcmk__group_colocated_resources(const pe_resource_t *rsc, const pe_resource_t *orig_rsc, GList *colocated_rscs); G_GNUC_INTERNAL void pcmk__group_add_utilization(const pe_resource_t *rsc, const pe_resource_t *orig_rsc, GList *all_rscs, GHashTable *utilization); G_GNUC_INTERNAL void pcmk__group_shutdown_lock(pe_resource_t *rsc); // Clones (pcmk_sched_clone.c) G_GNUC_INTERNAL pe_node_t *pcmk__clone_assign(pe_resource_t *rsc, const pe_node_t *prefer, bool stop_if_fail); G_GNUC_INTERNAL void pcmk__clone_create_actions(pe_resource_t *rsc); G_GNUC_INTERNAL bool pcmk__clone_create_probe(pe_resource_t *rsc, pe_node_t *node); G_GNUC_INTERNAL void pcmk__clone_internal_constraints(pe_resource_t *rsc); G_GNUC_INTERNAL void pcmk__clone_apply_coloc_score(pe_resource_t *dependent, const pe_resource_t *primary, const pcmk__colocation_t *colocation, bool for_dependent); G_GNUC_INTERNAL void pcmk__with_clone_colocations(const pe_resource_t *rsc, const pe_resource_t *orig_rsc, GList **list); G_GNUC_INTERNAL void pcmk__clone_with_colocations(const pe_resource_t *rsc, const pe_resource_t *orig_rsc, GList **list); G_GNUC_INTERNAL void pcmk__clone_apply_location(pe_resource_t *rsc, pe__location_t *constraint); G_GNUC_INTERNAL uint32_t pcmk__clone_action_flags(pe_action_t *action, const pe_node_t *node); G_GNUC_INTERNAL void pcmk__clone_add_actions_to_graph(pe_resource_t *rsc); G_GNUC_INTERNAL void pcmk__clone_add_graph_meta(const pe_resource_t *rsc, xmlNode *xml); G_GNUC_INTERNAL void pcmk__clone_add_utilization(const pe_resource_t *rsc, const pe_resource_t *orig_rsc, GList *all_rscs, GHashTable *utilization); G_GNUC_INTERNAL void pcmk__clone_shutdown_lock(pe_resource_t *rsc); // Bundles (pcmk_sched_bundle.c) G_GNUC_INTERNAL pe_node_t *pcmk__bundle_assign(pe_resource_t *rsc, const pe_node_t *prefer, bool stop_if_fail); G_GNUC_INTERNAL void pcmk__bundle_create_actions(pe_resource_t *rsc); G_GNUC_INTERNAL bool pcmk__bundle_create_probe(pe_resource_t *rsc, pe_node_t *node); G_GNUC_INTERNAL void pcmk__bundle_internal_constraints(pe_resource_t *rsc); G_GNUC_INTERNAL void pcmk__bundle_apply_coloc_score(pe_resource_t *dependent, const pe_resource_t *primary, const pcmk__colocation_t *colocation, bool for_dependent); G_GNUC_INTERNAL void pcmk__with_bundle_colocations(const pe_resource_t *rsc, const pe_resource_t *orig_rsc, GList **list); G_GNUC_INTERNAL void pcmk__bundle_with_colocations(const pe_resource_t *rsc, const pe_resource_t *orig_rsc, GList **list); G_GNUC_INTERNAL void pcmk__bundle_apply_location(pe_resource_t *rsc, pe__location_t *constraint); G_GNUC_INTERNAL uint32_t pcmk__bundle_action_flags(pe_action_t *action, const pe_node_t *node); G_GNUC_INTERNAL void pcmk__output_bundle_actions(pe_resource_t *rsc); G_GNUC_INTERNAL void pcmk__bundle_add_actions_to_graph(pe_resource_t *rsc); G_GNUC_INTERNAL void pcmk__bundle_add_utilization(const pe_resource_t *rsc, const pe_resource_t *orig_rsc, GList *all_rscs, GHashTable *utilization); G_GNUC_INTERNAL void pcmk__bundle_shutdown_lock(pe_resource_t *rsc); // Clone instances or bundle replica containers (pcmk_sched_instances.c) G_GNUC_INTERNAL void pcmk__assign_instances(pe_resource_t *collective, GList *instances, int max_total, int max_per_node); G_GNUC_INTERNAL void pcmk__create_instance_actions(pe_resource_t *rsc, GList *instances); G_GNUC_INTERNAL bool pcmk__instance_matches(const pe_resource_t *instance, const pe_node_t *node, enum rsc_role_e role, bool current); G_GNUC_INTERNAL pe_resource_t *pcmk__find_compatible_instance(const pe_resource_t *match_rsc, const pe_resource_t *rsc, enum rsc_role_e role, bool current); G_GNUC_INTERNAL uint32_t pcmk__instance_update_ordered_actions(pe_action_t *first, pe_action_t *then, const pe_node_t *node, uint32_t flags, uint32_t filter, uint32_t type, pe_working_set_t *data_set); G_GNUC_INTERNAL uint32_t pcmk__collective_action_flags(pe_action_t *action, const GList *instances, const pe_node_t *node); // Injections (pcmk_injections.c) G_GNUC_INTERNAL xmlNode *pcmk__inject_node(cib_t *cib_conn, const char *node, const char *uuid); G_GNUC_INTERNAL xmlNode *pcmk__inject_node_state_change(cib_t *cib_conn, const char *node, bool up); G_GNUC_INTERNAL xmlNode *pcmk__inject_resource_history(pcmk__output_t *out, xmlNode *cib_node, const char *resource, const char *lrm_name, const char *rclass, const char *rtype, const char *rprovider); G_GNUC_INTERNAL void pcmk__inject_failcount(pcmk__output_t *out, xmlNode *cib_node, const char *resource, const char *task, guint interval_ms, int rc); G_GNUC_INTERNAL xmlNode *pcmk__inject_action_result(xmlNode *cib_resource, lrmd_event_data_t *op, int target_rc); // Nodes (pcmk_sched_nodes.c) G_GNUC_INTERNAL bool pcmk__node_available(const pe_node_t *node, bool consider_score, bool consider_guest); G_GNUC_INTERNAL bool pcmk__any_node_available(GHashTable *nodes); G_GNUC_INTERNAL GHashTable *pcmk__copy_node_table(GHashTable *nodes); G_GNUC_INTERNAL void pcmk__copy_node_tables(const pe_resource_t *rsc, GHashTable **copy); G_GNUC_INTERNAL void pcmk__restore_node_tables(pe_resource_t *rsc, GHashTable *backup); G_GNUC_INTERNAL GList *pcmk__sort_nodes(GList *nodes, pe_node_t *active_node); G_GNUC_INTERNAL void pcmk__apply_node_health(pe_working_set_t *data_set); G_GNUC_INTERNAL pe_node_t *pcmk__top_allowed_node(const pe_resource_t *rsc, const pe_node_t *node); // Functions applying to more than one variant (pcmk_sched_resource.c) G_GNUC_INTERNAL void pcmk__set_assignment_methods(pe_working_set_t *data_set); G_GNUC_INTERNAL bool pcmk__rsc_agent_changed(pe_resource_t *rsc, pe_node_t *node, const xmlNode *rsc_entry, bool active_on_node); G_GNUC_INTERNAL GList *pcmk__rscs_matching_id(const char *id, const pe_working_set_t *data_set); G_GNUC_INTERNAL GList *pcmk__colocated_resources(const pe_resource_t *rsc, const pe_resource_t *orig_rsc, GList *colocated_rscs); G_GNUC_INTERNAL void pcmk__noop_add_graph_meta(const pe_resource_t *rsc, xmlNode *xml); G_GNUC_INTERNAL void pcmk__output_resource_actions(pe_resource_t *rsc); G_GNUC_INTERNAL bool pcmk__assign_resource(pe_resource_t *rsc, pe_node_t *node, bool force, bool stop_if_fail); G_GNUC_INTERNAL void pcmk__unassign_resource(pe_resource_t *rsc); G_GNUC_INTERNAL bool pcmk__threshold_reached(pe_resource_t *rsc, const pe_node_t *node, pe_resource_t **failed); G_GNUC_INTERNAL void pcmk__sort_resources(pe_working_set_t *data_set); G_GNUC_INTERNAL gint pcmk__cmp_instance(gconstpointer a, gconstpointer b); G_GNUC_INTERNAL gint pcmk__cmp_instance_number(gconstpointer a, gconstpointer b); // Functions related to probes (pcmk_sched_probes.c) G_GNUC_INTERNAL bool pcmk__probe_rsc_on_node(pe_resource_t *rsc, pe_node_t *node); G_GNUC_INTERNAL void pcmk__order_probes(pe_working_set_t *data_set); G_GNUC_INTERNAL bool pcmk__probe_resource_list(GList *rscs, pe_node_t *node); G_GNUC_INTERNAL void pcmk__schedule_probes(pe_working_set_t *data_set); // Functions related to live migration (pcmk_sched_migration.c) void pcmk__create_migration_actions(pe_resource_t *rsc, const pe_node_t *current); void pcmk__abort_dangling_migration(void *data, void *user_data); bool pcmk__rsc_can_migrate(const pe_resource_t *rsc, const pe_node_t *current); void pcmk__order_migration_equivalents(pe__ordering_t *order); // Functions related to node utilization (pcmk_sched_utilization.c) G_GNUC_INTERNAL int pcmk__compare_node_capacities(const pe_node_t *node1, const pe_node_t *node2); G_GNUC_INTERNAL void pcmk__consume_node_capacity(GHashTable *current_utilization, const pe_resource_t *rsc); G_GNUC_INTERNAL void pcmk__release_node_capacity(GHashTable *current_utilization, const pe_resource_t *rsc); G_GNUC_INTERNAL const pe_node_t *pcmk__ban_insufficient_capacity(pe_resource_t *rsc); G_GNUC_INTERNAL void pcmk__create_utilization_constraints(pe_resource_t *rsc, const GList *allowed_nodes); G_GNUC_INTERNAL void pcmk__show_node_capacities(const char *desc, pe_working_set_t *data_set); #endif // PCMK__LIBPACEMAKER_PRIVATE__H diff --git a/lib/pacemaker/pcmk_sched_primitive.c b/lib/pacemaker/pcmk_sched_primitive.c index b91c67831f..1202f72723 100644 --- a/lib/pacemaker/pcmk_sched_primitive.c +++ b/lib/pacemaker/pcmk_sched_primitive.c @@ -1,1652 +1,1652 @@ /* * Copyright 2004-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include // uint8_t, uint32_t #include #include #include "libpacemaker_private.h" static void stop_resource(pe_resource_t *rsc, pe_node_t *node, bool optional); static void start_resource(pe_resource_t *rsc, pe_node_t *node, bool optional); static void demote_resource(pe_resource_t *rsc, pe_node_t *node, bool optional); static void promote_resource(pe_resource_t *rsc, pe_node_t *node, bool optional); static void assert_role_error(pe_resource_t *rsc, pe_node_t *node, bool optional); #define RSC_ROLE_MAX (pcmk_role_promoted + 1) static enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = { /* This array lists the immediate next role when transitioning from one role * to a target role. For example, when going from Stopped to Promoted, the * next role is Unpromoted, because the resource must be started before it * can be promoted. The current state then becomes Started, which is fed * into this array again, giving a next role of Promoted. * * Current role Immediate next role Final target role * ------------ ------------------- ----------------- */ /* Unknown */ { pcmk_role_unknown, /* Unknown */ pcmk_role_stopped, /* Stopped */ pcmk_role_stopped, /* Started */ pcmk_role_stopped, /* Unpromoted */ pcmk_role_stopped, /* Promoted */ }, /* Stopped */ { pcmk_role_stopped, /* Unknown */ pcmk_role_stopped, /* Stopped */ pcmk_role_started, /* Started */ pcmk_role_unpromoted, /* Unpromoted */ pcmk_role_unpromoted, /* Promoted */ }, /* Started */ { pcmk_role_stopped, /* Unknown */ pcmk_role_stopped, /* Stopped */ pcmk_role_started, /* Started */ pcmk_role_unpromoted, /* Unpromoted */ pcmk_role_promoted, /* Promoted */ }, /* Unpromoted */ { pcmk_role_stopped, /* Unknown */ pcmk_role_stopped, /* Stopped */ pcmk_role_stopped, /* Started */ pcmk_role_unpromoted, /* Unpromoted */ pcmk_role_promoted, /* Promoted */ }, /* Promoted */ { pcmk_role_stopped, /* Unknown */ pcmk_role_unpromoted, /* Stopped */ pcmk_role_unpromoted, /* Started */ pcmk_role_unpromoted, /* Unpromoted */ pcmk_role_promoted, /* Promoted */ }, }; /*! * \internal * \brief Function to schedule actions needed for a role change * * \param[in,out] rsc Resource whose role is changing * \param[in,out] node Node where resource will be in its next role * \param[in] optional Whether scheduled actions should be optional */ typedef void (*rsc_transition_fn)(pe_resource_t *rsc, pe_node_t *node, bool optional); static rsc_transition_fn rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = { /* This array lists the function needed to transition directly from one role * to another. NULL indicates that nothing is needed. * * Current role Transition function Next role * ------------ ------------------- ---------- */ /* Unknown */ { assert_role_error, /* Unknown */ stop_resource, /* Stopped */ assert_role_error, /* Started */ assert_role_error, /* Unpromoted */ assert_role_error, /* Promoted */ }, /* Stopped */ { assert_role_error, /* Unknown */ NULL, /* Stopped */ start_resource, /* Started */ start_resource, /* Unpromoted */ assert_role_error, /* Promoted */ }, /* Started */ { assert_role_error, /* Unknown */ stop_resource, /* Stopped */ NULL, /* Started */ NULL, /* Unpromoted */ promote_resource, /* Promoted */ }, /* Unpromoted */ { assert_role_error, /* Unknown */ stop_resource, /* Stopped */ stop_resource, /* Started */ NULL, /* Unpromoted */ promote_resource, /* Promoted */ }, /* Promoted */ { assert_role_error, /* Unknown */ demote_resource, /* Stopped */ demote_resource, /* Started */ demote_resource, /* Unpromoted */ NULL, /* Promoted */ }, }; /*! * \internal * \brief Get a list of a resource's allowed nodes sorted by node score * * \param[in] rsc Resource to check * * \return List of allowed nodes sorted by node score */ static GList * sorted_allowed_nodes(const pe_resource_t *rsc) { if (rsc->allowed_nodes != NULL) { GList *nodes = g_hash_table_get_values(rsc->allowed_nodes); if (nodes != NULL) { return pcmk__sort_nodes(nodes, pe__current_node(rsc)); } } return NULL; } /*! * \internal * \brief Assign a resource to its best allowed node, if possible * * \param[in,out] rsc Resource to choose a node for * \param[in] prefer If not \c NULL, prefer this node when all else * equal * \param[in] stop_if_fail If \c true and \p rsc can't be assigned to a * node, set next role to stopped and update * existing actions * * \return true if \p rsc could be assigned to a node, otherwise false * * \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can * completely undo the assignment. A successful assignment can be either * undone or left alone as final. A failed assignment has the same effect * as calling pcmk__unassign_resource(); there are no side effects on * roles or actions. */ static bool assign_best_node(pe_resource_t *rsc, const pe_node_t *prefer, bool stop_if_fail) { GList *nodes = NULL; pe_node_t *chosen = NULL; pe_node_t *best = NULL; const pe_node_t *most_free_node = pcmk__ban_insufficient_capacity(rsc); if (prefer == NULL) { prefer = most_free_node; } if (!pcmk_is_set(rsc->flags, pcmk_rsc_unassigned)) { // We've already finished assignment of resources to nodes return rsc->allocated_to != NULL; } // Sort allowed nodes by score nodes = sorted_allowed_nodes(rsc); if (nodes != NULL) { best = (pe_node_t *) nodes->data; // First node has best score } if ((prefer != NULL) && (nodes != NULL)) { // Get the allowed node version of prefer chosen = g_hash_table_lookup(rsc->allowed_nodes, prefer->details->id); if (chosen == NULL) { pe_rsc_trace(rsc, "Preferred node %s for %s was unknown", pe__node_name(prefer), rsc->id); /* Favor the preferred node as long as its score is at least as good as * the best allowed node's. * * An alternative would be to favor the preferred node even if the best * node is better, when the best node's score is less than INFINITY. */ } else if (chosen->weight < best->weight) { pe_rsc_trace(rsc, "Preferred node %s for %s was unsuitable", pe__node_name(chosen), rsc->id); chosen = NULL; } else if (!pcmk__node_available(chosen, true, false)) { pe_rsc_trace(rsc, "Preferred node %s for %s was unavailable", pe__node_name(chosen), rsc->id); chosen = NULL; } else { pe_rsc_trace(rsc, "Chose preferred node %s for %s " "(ignoring %d candidates)", pe__node_name(chosen), rsc->id, g_list_length(nodes)); } } if ((chosen == NULL) && (best != NULL)) { /* Either there is no preferred node, or the preferred node is not * suitable, but another node is allowed to run the resource. */ chosen = best; if (!pe_rsc_is_unique_clone(rsc->parent) && (chosen->weight > 0) // Zero not acceptable && pcmk__node_available(chosen, false, false)) { /* If the resource is already running on a node, prefer that node if * it is just as good as the chosen node. * * We don't do this for unique clone instances, because * pcmk__assign_instances() has already assigned instances to their * running nodes when appropriate, and if we get here, we don't want * remaining unassigned instances to prefer a node that's already * running another instance. */ pe_node_t *running = pe__current_node(rsc); if (running == NULL) { // Nothing to do } else if (!pcmk__node_available(running, true, false)) { pe_rsc_trace(rsc, "Current node for %s (%s) can't run resources", rsc->id, pe__node_name(running)); } else { int nodes_with_best_score = 1; for (GList *iter = nodes->next; iter; iter = iter->next) { pe_node_t *allowed = (pe_node_t *) iter->data; if (allowed->weight != chosen->weight) { // The nodes are sorted by score, so no more are equal break; } if (pe__same_node(allowed, running)) { // Scores are equal, so prefer the current node chosen = allowed; } nodes_with_best_score++; } if (nodes_with_best_score > 1) { uint8_t log_level = LOG_INFO; if (chosen->weight >= INFINITY) { log_level = LOG_WARNING; } do_crm_log(log_level, "Chose %s for %s from %d nodes with score %s", pe__node_name(chosen), rsc->id, nodes_with_best_score, pcmk_readable_score(chosen->weight)); } } } pe_rsc_trace(rsc, "Chose %s for %s from %d candidates", pe__node_name(chosen), rsc->id, g_list_length(nodes)); } pcmk__assign_resource(rsc, chosen, false, stop_if_fail); g_list_free(nodes); return rsc->allocated_to != NULL; } /*! * \internal * \brief Apply a "this with" colocation to a node's allowed node scores * * \param[in,out] colocation Colocation to apply * \param[in,out] rsc Resource being assigned */ static void apply_this_with(pcmk__colocation_t *colocation, pe_resource_t *rsc) { GHashTable *archive = NULL; pe_resource_t *other = colocation->primary; // In certain cases, we will need to revert the node scores if ((colocation->dependent_role >= pcmk_role_promoted) || ((colocation->score < 0) && (colocation->score > -INFINITY))) { archive = pcmk__copy_node_table(rsc->allowed_nodes); } if (pcmk_is_set(other->flags, pcmk_rsc_unassigned)) { pe_rsc_trace(rsc, "%s: Assigning colocation %s primary %s first" "(score=%d role=%s)", rsc->id, colocation->id, other->id, colocation->score, role2text(colocation->dependent_role)); other->cmds->assign(other, NULL, true); } // Apply the colocation score to this resource's allowed node scores rsc->cmds->apply_coloc_score(rsc, other, colocation, true); if ((archive != NULL) && !pcmk__any_node_available(rsc->allowed_nodes)) { pe_rsc_info(rsc, "%s: Reverting scores from colocation with %s " "because no nodes allowed", rsc->id, other->id); g_hash_table_destroy(rsc->allowed_nodes); rsc->allowed_nodes = archive; archive = NULL; } if (archive != NULL) { g_hash_table_destroy(archive); } } /*! * \internal * \brief Update a Pacemaker Remote node once its connection has been assigned * * \param[in] connection Connection resource that has been assigned */ static void remote_connection_assigned(const pe_resource_t *connection) { pe_node_t *remote_node = pe_find_node(connection->cluster->nodes, connection->id); CRM_CHECK(remote_node != NULL, return); if ((connection->allocated_to != NULL) && (connection->next_role != pcmk_role_stopped)) { crm_trace("Pacemaker Remote node %s will be online", remote_node->details->id); remote_node->details->online = TRUE; if (remote_node->details->unseen) { // Avoid unnecessary fence, since we will attempt connection remote_node->details->unclean = FALSE; } } else { crm_trace("Pacemaker Remote node %s will be shut down " "(%sassigned connection's next role is %s)", remote_node->details->id, ((connection->allocated_to == NULL)? "un" : ""), role2text(connection->next_role)); remote_node->details->shutdown = TRUE; } } /*! * \internal * \brief Assign a primitive resource to a node * * \param[in,out] rsc Resource to assign to a node * \param[in] prefer Node to prefer, if all else is equal * \param[in] stop_if_fail If \c true and \p rsc can't be assigned to a * node, set next role to stopped and update * existing actions * * \return Node that \p rsc is assigned to, if assigned entirely to one node * * \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can * completely undo the assignment. A successful assignment can be either * undone or left alone as final. A failed assignment has the same effect * as calling pcmk__unassign_resource(); there are no side effects on * roles or actions. */ pe_node_t * pcmk__primitive_assign(pe_resource_t *rsc, const pe_node_t *prefer, bool stop_if_fail) { GList *this_with_colocations = NULL; GList *with_this_colocations = NULL; GList *iter = NULL; pcmk__colocation_t *colocation = NULL; CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_primitive)); // Never assign a child without parent being assigned first if ((rsc->parent != NULL) && !pcmk_is_set(rsc->parent->flags, pcmk_rsc_assigning)) { pe_rsc_debug(rsc, "%s: Assigning parent %s first", rsc->id, rsc->parent->id); rsc->parent->cmds->assign(rsc->parent, prefer, stop_if_fail); } if (!pcmk_is_set(rsc->flags, pcmk_rsc_unassigned)) { // Assignment has already been done const char *node_name = "no node"; if (rsc->allocated_to != NULL) { node_name = pe__node_name(rsc->allocated_to); } pe_rsc_debug(rsc, "%s: pre-assigned to %s", rsc->id, node_name); return rsc->allocated_to; } // Ensure we detect assignment loops if (pcmk_is_set(rsc->flags, pcmk_rsc_assigning)) { pe_rsc_debug(rsc, "Breaking assignment loop involving %s", rsc->id); return NULL; } pe__set_resource_flags(rsc, pcmk_rsc_assigning); pe__show_node_scores(true, rsc, "Pre-assignment", rsc->allowed_nodes, rsc->cluster); this_with_colocations = pcmk__this_with_colocations(rsc); with_this_colocations = pcmk__with_this_colocations(rsc); // Apply mandatory colocations first, to satisfy as many as possible for (iter = this_with_colocations; iter != NULL; iter = iter->next) { colocation = iter->data; if ((colocation->score <= -CRM_SCORE_INFINITY) || (colocation->score >= CRM_SCORE_INFINITY)) { apply_this_with(colocation, rsc); } } for (iter = with_this_colocations; iter != NULL; iter = iter->next) { colocation = iter->data; if ((colocation->score <= -CRM_SCORE_INFINITY) || (colocation->score >= CRM_SCORE_INFINITY)) { pcmk__add_dependent_scores(colocation, rsc); } } pe__show_node_scores(true, rsc, "Mandatory-colocations", rsc->allowed_nodes, rsc->cluster); // Then apply optional colocations for (iter = this_with_colocations; iter != NULL; iter = iter->next) { colocation = iter->data; if ((colocation->score > -CRM_SCORE_INFINITY) && (colocation->score < CRM_SCORE_INFINITY)) { apply_this_with(colocation, rsc); } } for (iter = with_this_colocations; iter != NULL; iter = iter->next) { colocation = iter->data; if ((colocation->score > -CRM_SCORE_INFINITY) && (colocation->score < CRM_SCORE_INFINITY)) { pcmk__add_dependent_scores(colocation, rsc); } } g_list_free(this_with_colocations); g_list_free(with_this_colocations); if (rsc->next_role == pcmk_role_stopped) { pe_rsc_trace(rsc, "Banning %s from all nodes because it will be stopped", rsc->id); resource_location(rsc, NULL, -INFINITY, XML_RSC_ATTR_TARGET_ROLE, rsc->cluster); } else if ((rsc->next_role > rsc->role) && !pcmk_is_set(rsc->cluster->flags, pcmk_sched_quorate) && (rsc->cluster->no_quorum_policy == pcmk_no_quorum_freeze)) { crm_notice("Resource %s cannot be elevated from %s to %s due to " "no-quorum-policy=freeze", rsc->id, role2text(rsc->role), role2text(rsc->next_role)); pe__set_next_role(rsc, rsc->role, "no-quorum-policy=freeze"); } pe__show_node_scores(!pcmk_is_set(rsc->cluster->flags, pcmk_sched_output_scores), rsc, __func__, rsc->allowed_nodes, rsc->cluster); // Unmanage resource if fencing is enabled but no device is configured if (pcmk_is_set(rsc->cluster->flags, pcmk_sched_fencing_enabled) && !pcmk_is_set(rsc->cluster->flags, pcmk_sched_have_fencing)) { pe__clear_resource_flags(rsc, pcmk_rsc_managed); } if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) { // Unmanaged resources stay on their current node const char *reason = NULL; pe_node_t *assign_to = NULL; pe__set_next_role(rsc, rsc->role, "unmanaged"); assign_to = pe__current_node(rsc); if (assign_to == NULL) { reason = "inactive"; } else if (rsc->role == pcmk_role_promoted) { reason = "promoted"; } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) { reason = "failed"; } else { reason = "active"; } pe_rsc_info(rsc, "Unmanaged resource %s assigned to %s: %s", rsc->id, (assign_to? assign_to->details->uname : "no node"), reason); pcmk__assign_resource(rsc, assign_to, true, stop_if_fail); } else if (pcmk_is_set(rsc->cluster->flags, pcmk_sched_stop_all)) { // Must stop at some point, but be consistent with stop_if_fail if (stop_if_fail) { pe_rsc_debug(rsc, "Forcing %s to stop: stop-all-resources", rsc->id); } pcmk__assign_resource(rsc, NULL, true, stop_if_fail); } else if (!assign_best_node(rsc, prefer, stop_if_fail)) { // Assignment failed if (!pcmk_is_set(rsc->flags, pcmk_rsc_removed)) { pe_rsc_info(rsc, "Resource %s cannot run anywhere", rsc->id); } else if ((rsc->running_on != NULL) && stop_if_fail) { pe_rsc_info(rsc, "Stopping orphan resource %s", rsc->id); } } pe__clear_resource_flags(rsc, pcmk_rsc_assigning); if (rsc->is_remote_node) { remote_connection_assigned(rsc); } return rsc->allocated_to; } /*! * \internal * \brief Schedule actions to bring resource down and back to current role * * \param[in,out] rsc Resource to restart * \param[in,out] current Node that resource should be brought down on * \param[in] need_stop Whether the resource must be stopped * \param[in] need_promote Whether the resource must be promoted * * \return Role that resource would have after scheduled actions are taken */ static void schedule_restart_actions(pe_resource_t *rsc, pe_node_t *current, bool need_stop, bool need_promote) { enum rsc_role_e role = rsc->role; enum rsc_role_e next_role; rsc_transition_fn fn = NULL; pe__set_resource_flags(rsc, pcmk_rsc_restarting); // Bring resource down to a stop on its current node while (role != pcmk_role_stopped) { next_role = rsc_state_matrix[role][pcmk_role_stopped]; pe_rsc_trace(rsc, "Creating %s action to take %s down from %s to %s", (need_stop? "required" : "optional"), rsc->id, role2text(role), role2text(next_role)); fn = rsc_action_matrix[role][next_role]; if (fn == NULL) { break; } fn(rsc, current, !need_stop); role = next_role; } // Bring resource up to its next role on its next node while ((rsc->role <= rsc->next_role) && (role != rsc->role) && !pcmk_is_set(rsc->flags, pcmk_rsc_blocked)) { bool required = need_stop; next_role = rsc_state_matrix[role][rsc->role]; if ((next_role == pcmk_role_promoted) && need_promote) { required = true; } pe_rsc_trace(rsc, "Creating %s action to take %s up from %s to %s", (required? "required" : "optional"), rsc->id, role2text(role), role2text(next_role)); fn = rsc_action_matrix[role][next_role]; if (fn == NULL) { break; } fn(rsc, rsc->allocated_to, !required); role = next_role; } pe__clear_resource_flags(rsc, pcmk_rsc_restarting); } /*! * \internal * \brief If a resource's next role is not explicitly specified, set a default * * \param[in,out] rsc Resource to set next role for * * \return "explicit" if next role was explicitly set, otherwise "implicit" */ static const char * set_default_next_role(pe_resource_t *rsc) { if (rsc->next_role != pcmk_role_unknown) { return "explicit"; } if (rsc->allocated_to == NULL) { pe__set_next_role(rsc, pcmk_role_stopped, "assignment"); } else { pe__set_next_role(rsc, pcmk_role_started, "assignment"); } return "implicit"; } /*! * \internal * \brief Create an action to represent an already pending start * * \param[in,out] rsc Resource to create start action for */ static void create_pending_start(pe_resource_t *rsc) { pe_action_t *start = NULL; pe_rsc_trace(rsc, "Creating action for %s to represent already pending start", rsc->id); start = start_action(rsc, rsc->allocated_to, TRUE); pe__set_action_flags(start, pe_action_print_always); } /*! * \internal * \brief Schedule actions needed to take a resource to its next role * * \param[in,out] rsc Resource to schedule actions for */ static void schedule_role_transition_actions(pe_resource_t *rsc) { enum rsc_role_e role = rsc->role; while (role != rsc->next_role) { enum rsc_role_e next_role = rsc_state_matrix[role][rsc->next_role]; rsc_transition_fn fn = NULL; pe_rsc_trace(rsc, "Creating action to take %s from %s to %s (ending at %s)", rsc->id, role2text(role), role2text(next_role), role2text(rsc->next_role)); fn = rsc_action_matrix[role][next_role]; if (fn == NULL) { break; } fn(rsc, rsc->allocated_to, false); role = next_role; } } /*! * \internal * \brief Create all actions needed for a given primitive resource * * \param[in,out] rsc Primitive resource to create actions for */ void pcmk__primitive_create_actions(pe_resource_t *rsc) { bool need_stop = false; bool need_promote = false; bool is_moving = false; bool allow_migrate = false; bool multiply_active = false; pe_node_t *current = NULL; unsigned int num_all_active = 0; unsigned int num_clean_active = 0; const char *next_role_source = NULL; CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_primitive)); next_role_source = set_default_next_role(rsc); pe_rsc_trace(rsc, "Creating all actions for %s transition from %s to %s " "(%s) on %s", rsc->id, role2text(rsc->role), role2text(rsc->next_role), next_role_source, pe__node_name(rsc->allocated_to)); current = rsc->fns->active_node(rsc, &num_all_active, &num_clean_active); g_list_foreach(rsc->dangling_migrations, pcmk__abort_dangling_migration, rsc); if ((current != NULL) && (rsc->allocated_to != NULL) && !pe__same_node(current, rsc->allocated_to) && (rsc->next_role >= pcmk_role_started)) { pe_rsc_trace(rsc, "Moving %s from %s to %s", rsc->id, pe__node_name(current), pe__node_name(rsc->allocated_to)); is_moving = true; allow_migrate = pcmk__rsc_can_migrate(rsc, current); // This is needed even if migrating (though I'm not sure why ...) need_stop = true; } // Check whether resource is partially migrated and/or multiply active if ((rsc->partial_migration_source != NULL) && (rsc->partial_migration_target != NULL) && allow_migrate && (num_all_active == 2) && pe__same_node(current, rsc->partial_migration_source) && pe__same_node(rsc->allocated_to, rsc->partial_migration_target)) { /* A partial migration is in progress, and the migration target remains * the same as when the migration began. */ pe_rsc_trace(rsc, "Partial migration of %s from %s to %s will continue", rsc->id, pe__node_name(rsc->partial_migration_source), pe__node_name(rsc->partial_migration_target)); } else if ((rsc->partial_migration_source != NULL) || (rsc->partial_migration_target != NULL)) { // A partial migration is in progress but can't be continued if (num_all_active > 2) { // The resource is migrating *and* multiply active! crm_notice("Forcing recovery of %s because it is migrating " "from %s to %s and possibly active elsewhere", rsc->id, pe__node_name(rsc->partial_migration_source), pe__node_name(rsc->partial_migration_target)); } else { // The migration source or target isn't available crm_notice("Forcing recovery of %s because it can no longer " "migrate from %s to %s", rsc->id, pe__node_name(rsc->partial_migration_source), pe__node_name(rsc->partial_migration_target)); } need_stop = true; rsc->partial_migration_source = rsc->partial_migration_target = NULL; allow_migrate = false; } else if (pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)) { multiply_active = (num_all_active > 1); } else { /* If a resource has "requires" set to nothing or quorum, don't consider * it active on unclean nodes (similar to how all resources behave when * stonith-enabled is false). We can start such resources elsewhere * before fencing completes, and if we considered the resource active on * the failed node, we would attempt recovery for being active on * multiple nodes. */ multiply_active = (num_clean_active > 1); } if (multiply_active) { const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); // Resource was (possibly) incorrectly multiply active pe_proc_err("%s resource %s might be active on %u nodes (%s)", pcmk__s(class, "Untyped"), rsc->id, num_all_active, recovery2text(rsc->recovery_type)); crm_notice("See https://wiki.clusterlabs.org/wiki/FAQ" "#Resource_is_Too_Active for more information"); switch (rsc->recovery_type) { case pcmk_multiply_active_restart: need_stop = true; break; case pcmk_multiply_active_unexpected: need_stop = true; // stop_resource() will skip expected node pe__set_resource_flags(rsc, pe_rsc_stop_unexpected); break; default: break; } } else { pe__clear_resource_flags(rsc, pe_rsc_stop_unexpected); } if (pcmk_is_set(rsc->flags, pe_rsc_start_pending)) { create_pending_start(rsc); } if (is_moving) { // Remaining tests are only for resources staying where they are } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) { if (pcmk_is_set(rsc->flags, pcmk_rsc_stop_if_failed)) { need_stop = true; pe_rsc_trace(rsc, "Recovering %s", rsc->id); } else { pe_rsc_trace(rsc, "Recovering %s by demotion", rsc->id); if (rsc->next_role == pcmk_role_promoted) { need_promote = true; } } } else if (pcmk_is_set(rsc->flags, pcmk_rsc_blocked)) { pe_rsc_trace(rsc, "Blocking further actions on %s", rsc->id); need_stop = true; } else if ((rsc->role > pcmk_role_started) && (current != NULL) && (rsc->allocated_to != NULL)) { pe_action_t *start = NULL; pe_rsc_trace(rsc, "Creating start action for promoted resource %s", rsc->id); start = start_action(rsc, rsc->allocated_to, TRUE); if (!pcmk_is_set(start->flags, pe_action_optional)) { // Recovery of a promoted resource pe_rsc_trace(rsc, "%s restart is required for recovery", rsc->id); need_stop = true; } } // Create any actions needed to bring resource down and back up to same role schedule_restart_actions(rsc, current, need_stop, need_promote); // Create any actions needed to take resource from this role to the next schedule_role_transition_actions(rsc); pcmk__create_recurring_actions(rsc); if (allow_migrate) { pcmk__create_migration_actions(rsc, current); } } /*! * \internal * \brief Ban a resource from any allowed nodes that are Pacemaker Remote nodes * * \param[in] rsc Resource to check */ static void rsc_avoids_remote_nodes(const pe_resource_t *rsc) { GHashTableIter iter; pe_node_t *node = NULL; g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) { if (node->details->remote_rsc != NULL) { node->weight = -INFINITY; } } } /*! * \internal * \brief Return allowed nodes as (possibly sorted) list * * Convert a resource's hash table of allowed nodes to a list. If printing to * stdout, sort the list, to keep action ID numbers consistent for regression * test output (while avoiding the performance hit on a live cluster). * * \param[in] rsc Resource to check for allowed nodes * * \return List of resource's allowed nodes * \note Callers should take care not to rely on the list being sorted. */ static GList * allowed_nodes_as_list(const pe_resource_t *rsc) { GList *allowed_nodes = NULL; if (rsc->allowed_nodes) { allowed_nodes = g_hash_table_get_values(rsc->allowed_nodes); } if (!pcmk__is_daemon) { allowed_nodes = g_list_sort(allowed_nodes, pe__cmp_node_name); } return allowed_nodes; } /*! * \internal * \brief Create implicit constraints needed for a primitive resource * * \param[in,out] rsc Primitive resource to create implicit constraints for */ void pcmk__primitive_internal_constraints(pe_resource_t *rsc) { GList *allowed_nodes = NULL; bool check_unfencing = false; bool check_utilization = false; CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_primitive)); if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) { pe_rsc_trace(rsc, "Skipping implicit constraints for unmanaged resource %s", rsc->id); return; } // Whether resource requires unfencing check_unfencing = !pcmk_is_set(rsc->flags, pcmk_rsc_fence_device) && pcmk_is_set(rsc->cluster->flags, pcmk_sched_enable_unfencing) && pcmk_is_set(rsc->flags, pe_rsc_needs_unfencing); // Whether a non-default placement strategy is used check_utilization = (g_hash_table_size(rsc->utilization) > 0) && !pcmk__str_eq(rsc->cluster->placement_strategy, "default", pcmk__str_casei); // Order stops before starts (i.e. restart) pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_STOP, 0), NULL, rsc, pcmk__op_key(rsc->id, PCMK_ACTION_START, 0), NULL, pe_order_optional|pe_order_implies_then|pe_order_restart, rsc->cluster); // Promotable ordering: demote before stop, start before promote if (pcmk_is_set(pe__const_top_resource(rsc, false)->flags, pcmk_rsc_promotable) || (rsc->role > pcmk_role_unpromoted)) { pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_DEMOTE, 0), NULL, rsc, pcmk__op_key(rsc->id, PCMK_ACTION_STOP, 0), NULL, pe_order_promoted_implies_first, rsc->cluster); pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_START, 0), NULL, rsc, pcmk__op_key(rsc->id, PCMK_ACTION_PROMOTE, 0), NULL, pe_order_runnable_left, rsc->cluster); } // Don't clear resource history if probing on same node pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_LRM_DELETE, 0), NULL, rsc, pcmk__op_key(rsc->id, PCMK_ACTION_MONITOR, 0), NULL, pe_order_same_node|pe_order_then_cancels_first, rsc->cluster); // Certain checks need allowed nodes if (check_unfencing || check_utilization || (rsc->container != NULL)) { allowed_nodes = allowed_nodes_as_list(rsc); } if (check_unfencing) { g_list_foreach(allowed_nodes, pcmk__order_restart_vs_unfence, rsc); } if (check_utilization) { pcmk__create_utilization_constraints(rsc, allowed_nodes); } if (rsc->container != NULL) { pe_resource_t *remote_rsc = NULL; if (rsc->is_remote_node) { // rsc is the implicit remote connection for a guest or bundle node /* Guest resources are not allowed to run on Pacemaker Remote nodes, * to avoid nesting remotes. However, bundles are allowed. */ - if (!pcmk_is_set(rsc->flags, pe_rsc_allow_remote_remotes)) { + if (!pcmk_is_set(rsc->flags, pcmk_rsc_remote_nesting_allowed)) { rsc_avoids_remote_nodes(rsc->container); } /* If someone cleans up a guest or bundle node's container, we will * likely schedule a (re-)probe of the container and recovery of the * connection. Order the connection stop after the container probe, * so that if we detect the container running, we will trigger a new * transition and avoid the unnecessary recovery. */ pcmk__order_resource_actions(rsc->container, PCMK_ACTION_MONITOR, rsc, PCMK_ACTION_STOP, pe_order_optional); /* A user can specify that a resource must start on a Pacemaker Remote * node by explicitly configuring it with the container=NODENAME * meta-attribute. This is of questionable merit, since location * constraints can accomplish the same thing. But we support it, so here * we check whether a resource (that is not itself a remote connection) * has container set to a remote node or guest node resource. */ } else if (rsc->container->is_remote_node) { remote_rsc = rsc->container; } else { remote_rsc = pe__resource_contains_guest_node(rsc->cluster, rsc->container); } if (remote_rsc != NULL) { /* Force the resource on the Pacemaker Remote node instead of * colocating the resource with the container resource. */ for (GList *item = allowed_nodes; item; item = item->next) { pe_node_t *node = item->data; if (node->details->remote_rsc != remote_rsc) { node->weight = -INFINITY; } } } else { /* This resource is either a filler for a container that does NOT * represent a Pacemaker Remote node, or a Pacemaker Remote * connection resource for a guest node or bundle. */ int score; crm_trace("Order and colocate %s relative to its container %s", rsc->id, rsc->container->id); pcmk__new_ordering(rsc->container, pcmk__op_key(rsc->container->id, PCMK_ACTION_START, 0), NULL, rsc, pcmk__op_key(rsc->id, PCMK_ACTION_START, 0), NULL, pe_order_implies_then|pe_order_runnable_left, rsc->cluster); pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_STOP, 0), NULL, rsc->container, pcmk__op_key(rsc->container->id, PCMK_ACTION_STOP, 0), NULL, pe_order_implies_first, rsc->cluster); - if (pcmk_is_set(rsc->flags, pe_rsc_allow_remote_remotes)) { + if (pcmk_is_set(rsc->flags, pcmk_rsc_remote_nesting_allowed)) { score = 10000; /* Highly preferred but not essential */ } else { score = INFINITY; /* Force them to run on the same host */ } pcmk__new_colocation("#resource-with-container", NULL, score, rsc, rsc->container, NULL, NULL, pcmk__coloc_influence); } } if (rsc->is_remote_node || pcmk_is_set(rsc->flags, pcmk_rsc_fence_device)) { /* Remote connections and fencing devices are not allowed to run on * Pacemaker Remote nodes */ rsc_avoids_remote_nodes(rsc); } g_list_free(allowed_nodes); } /*! * \internal * \brief Apply a colocation's score to node scores or resource priority * * Given a colocation constraint, apply its score to the dependent's * allowed node scores (if we are still placing resources) or priority (if * we are choosing promotable clone instance roles). * * \param[in,out] dependent Dependent resource in colocation * \param[in] primary Primary resource in colocation * \param[in] colocation Colocation constraint to apply * \param[in] for_dependent true if called on behalf of dependent */ void pcmk__primitive_apply_coloc_score(pe_resource_t *dependent, const pe_resource_t *primary, const pcmk__colocation_t *colocation, bool for_dependent) { enum pcmk__coloc_affects filter_results; CRM_ASSERT((dependent != NULL) && (primary != NULL) && (colocation != NULL)); if (for_dependent) { // Always process on behalf of primary resource primary->cmds->apply_coloc_score(dependent, primary, colocation, false); return; } filter_results = pcmk__colocation_affects(dependent, primary, colocation, false); pe_rsc_trace(dependent, "%s %s with %s (%s, score=%d, filter=%d)", ((colocation->score > 0)? "Colocating" : "Anti-colocating"), dependent->id, primary->id, colocation->id, colocation->score, filter_results); switch (filter_results) { case pcmk__coloc_affects_role: pcmk__apply_coloc_to_priority(dependent, primary, colocation); break; case pcmk__coloc_affects_location: pcmk__apply_coloc_to_scores(dependent, primary, colocation); break; default: // pcmk__coloc_affects_nothing return; } } /* Primitive implementation of * resource_alloc_functions_t:with_this_colocations() */ void pcmk__with_primitive_colocations(const pe_resource_t *rsc, const pe_resource_t *orig_rsc, GList **list) { CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_primitive) && (list != NULL)); if (rsc == orig_rsc) { /* For the resource itself, add all of its own colocations and relevant * colocations from its parent (if any). */ pcmk__add_with_this_list(list, rsc->rsc_cons_lhs, orig_rsc); if (rsc->parent != NULL) { rsc->parent->cmds->with_this_colocations(rsc->parent, orig_rsc, list); } } else { // For an ancestor, add only explicitly configured constraints for (GList *iter = rsc->rsc_cons_lhs; iter != NULL; iter = iter->next) { pcmk__colocation_t *colocation = iter->data; if (pcmk_is_set(colocation->flags, pcmk__coloc_explicit)) { pcmk__add_with_this(list, colocation, orig_rsc); } } } } /* Primitive implementation of * resource_alloc_functions_t:this_with_colocations() */ void pcmk__primitive_with_colocations(const pe_resource_t *rsc, const pe_resource_t *orig_rsc, GList **list) { CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_primitive) && (list != NULL)); if (rsc == orig_rsc) { /* For the resource itself, add all of its own colocations and relevant * colocations from its parent (if any). */ pcmk__add_this_with_list(list, rsc->rsc_cons, orig_rsc); if (rsc->parent != NULL) { rsc->parent->cmds->this_with_colocations(rsc->parent, orig_rsc, list); } } else { // For an ancestor, add only explicitly configured constraints for (GList *iter = rsc->rsc_cons; iter != NULL; iter = iter->next) { pcmk__colocation_t *colocation = iter->data; if (pcmk_is_set(colocation->flags, pcmk__coloc_explicit)) { pcmk__add_this_with(list, colocation, orig_rsc); } } } } /*! * \internal * \brief Return action flags for a given primitive resource action * * \param[in,out] action Action to get flags for * \param[in] node If not NULL, limit effects to this node (ignored) * * \return Flags appropriate to \p action on \p node */ uint32_t pcmk__primitive_action_flags(pe_action_t *action, const pe_node_t *node) { CRM_ASSERT(action != NULL); return (uint32_t) action->flags; } /*! * \internal * \brief Check whether a node is a multiply active resource's expected node * * \param[in] rsc Resource to check * \param[in] node Node to check * * \return true if \p rsc is multiply active with multiple-active set to * stop_unexpected, and \p node is the node where it will remain active * \note This assumes that the resource's next role cannot be changed to stopped * after this is called, which should be reasonable if status has already * been unpacked and resources have been assigned to nodes. */ static bool is_expected_node(const pe_resource_t *rsc, const pe_node_t *node) { return pcmk_all_flags_set(rsc->flags, pe_rsc_stop_unexpected|pcmk_rsc_restarting) && (rsc->next_role > pcmk_role_stopped) && pe__same_node(rsc->allocated_to, node); } /*! * \internal * \brief Schedule actions needed to stop a resource wherever it is active * * \param[in,out] rsc Resource being stopped * \param[in] node Node where resource is being stopped (ignored) * \param[in] optional Whether actions should be optional */ static void stop_resource(pe_resource_t *rsc, pe_node_t *node, bool optional) { for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) { pe_node_t *current = (pe_node_t *) iter->data; pe_action_t *stop = NULL; if (is_expected_node(rsc, current)) { /* We are scheduling restart actions for a multiply active resource * with multiple-active=stop_unexpected, and this is where it should * not be stopped. */ pe_rsc_trace(rsc, "Skipping stop of multiply active resource %s " "on expected node %s", rsc->id, pe__node_name(current)); continue; } if (rsc->partial_migration_target != NULL) { // Continue migration if node originally was and remains target if (pe__same_node(current, rsc->partial_migration_target) && pe__same_node(current, rsc->allocated_to)) { pe_rsc_trace(rsc, "Skipping stop of %s on %s " "because partial migration there will continue", rsc->id, pe__node_name(current)); continue; } else { pe_rsc_trace(rsc, "Forcing stop of %s on %s " "because migration target changed", rsc->id, pe__node_name(current)); optional = false; } } pe_rsc_trace(rsc, "Scheduling stop of %s on %s", rsc->id, pe__node_name(current)); stop = stop_action(rsc, current, optional); if (rsc->allocated_to == NULL) { pe_action_set_reason(stop, "node availability", true); } else if (pcmk_all_flags_set(rsc->flags, pcmk_rsc_restarting |pe_rsc_stop_unexpected)) { /* We are stopping a multiply active resource on a node that is * not its expected node, and we are still scheduling restart * actions, so the stop is for being multiply active. */ pe_action_set_reason(stop, "being multiply active", true); } if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) { pe__clear_action_flags(stop, pe_action_runnable); } if (pcmk_is_set(rsc->cluster->flags, pcmk_sched_remove_after_stop)) { pcmk__schedule_cleanup(rsc, current, optional); } if (pcmk_is_set(rsc->flags, pe_rsc_needs_unfencing)) { pe_action_t *unfence = pe_fence_op(current, PCMK_ACTION_ON, true, NULL, false, rsc->cluster); order_actions(stop, unfence, pe_order_implies_first); if (!pcmk__node_unfenced(current)) { pe_proc_err("Stopping %s until %s can be unfenced", rsc->id, pe__node_name(current)); } } } } /*! * \internal * \brief Schedule actions needed to start a resource on a node * * \param[in,out] rsc Resource being started * \param[in,out] node Node where resource should be started * \param[in] optional Whether actions should be optional */ static void start_resource(pe_resource_t *rsc, pe_node_t *node, bool optional) { pe_action_t *start = NULL; CRM_ASSERT(node != NULL); pe_rsc_trace(rsc, "Scheduling %s start of %s on %s (score %d)", (optional? "optional" : "required"), rsc->id, pe__node_name(node), node->weight); start = start_action(rsc, node, TRUE); pcmk__order_vs_unfence(rsc, node, start, pe_order_implies_then); if (pcmk_is_set(start->flags, pe_action_runnable) && !optional) { pe__clear_action_flags(start, pe_action_optional); } if (is_expected_node(rsc, node)) { /* This could be a problem if the start becomes necessary for other * reasons later. */ pe_rsc_trace(rsc, "Start of multiply active resouce %s " "on expected node %s will be a pseudo-action", rsc->id, pe__node_name(node)); pe__set_action_flags(start, pe_action_pseudo); } } /*! * \internal * \brief Schedule actions needed to promote a resource on a node * * \param[in,out] rsc Resource being promoted * \param[in] node Node where resource should be promoted * \param[in] optional Whether actions should be optional */ static void promote_resource(pe_resource_t *rsc, pe_node_t *node, bool optional) { GList *iter = NULL; GList *action_list = NULL; bool runnable = true; CRM_ASSERT(node != NULL); // Any start must be runnable for promotion to be runnable action_list = pe__resource_actions(rsc, node, PCMK_ACTION_START, true); for (iter = action_list; iter != NULL; iter = iter->next) { pe_action_t *start = (pe_action_t *) iter->data; if (!pcmk_is_set(start->flags, pe_action_runnable)) { runnable = false; } } g_list_free(action_list); if (runnable) { pe_action_t *promote = promote_action(rsc, node, optional); pe_rsc_trace(rsc, "Scheduling %s promotion of %s on %s", (optional? "optional" : "required"), rsc->id, pe__node_name(node)); if (is_expected_node(rsc, node)) { /* This could be a problem if the promote becomes necessary for * other reasons later. */ pe_rsc_trace(rsc, "Promotion of multiply active resouce %s " "on expected node %s will be a pseudo-action", rsc->id, pe__node_name(node)); pe__set_action_flags(promote, pe_action_pseudo); } } else { pe_rsc_trace(rsc, "Not promoting %s on %s: start unrunnable", rsc->id, pe__node_name(node)); action_list = pe__resource_actions(rsc, node, PCMK_ACTION_PROMOTE, true); for (iter = action_list; iter != NULL; iter = iter->next) { pe_action_t *promote = (pe_action_t *) iter->data; pe__clear_action_flags(promote, pe_action_runnable); } g_list_free(action_list); } } /*! * \internal * \brief Schedule actions needed to demote a resource wherever it is active * * \param[in,out] rsc Resource being demoted * \param[in] node Node where resource should be demoted (ignored) * \param[in] optional Whether actions should be optional */ static void demote_resource(pe_resource_t *rsc, pe_node_t *node, bool optional) { /* Since this will only be called for a primitive (possibly as an instance * of a collective resource), the resource is multiply active if it is * running on more than one node, so we want to demote on all of them as * part of recovery, regardless of which one is the desired node. */ for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) { pe_node_t *current = (pe_node_t *) iter->data; if (is_expected_node(rsc, current)) { pe_rsc_trace(rsc, "Skipping demote of multiply active resource %s " "on expected node %s", rsc->id, pe__node_name(current)); } else { pe_rsc_trace(rsc, "Scheduling %s demotion of %s on %s", (optional? "optional" : "required"), rsc->id, pe__node_name(current)); demote_action(rsc, current, optional); } } } static void assert_role_error(pe_resource_t *rsc, pe_node_t *node, bool optional) { CRM_ASSERT(false); } /*! * \internal * \brief Schedule cleanup of a resource * * \param[in,out] rsc Resource to clean up * \param[in] node Node to clean up on * \param[in] optional Whether clean-up should be optional */ void pcmk__schedule_cleanup(pe_resource_t *rsc, const pe_node_t *node, bool optional) { /* If the cleanup is required, its orderings are optional, because they're * relevant only if both actions are required. Conversely, if the cleanup is * optional, the orderings make the then action required if the first action * becomes required. */ uint32_t flag = optional? pe_order_implies_then : pe_order_optional; CRM_CHECK((rsc != NULL) && (node != NULL), return); if (pcmk_is_set(rsc->flags, pe_rsc_failed)) { pe_rsc_trace(rsc, "Skipping clean-up of %s on %s: resource failed", rsc->id, pe__node_name(node)); return; } if (node->details->unclean || !node->details->online) { pe_rsc_trace(rsc, "Skipping clean-up of %s on %s: node unavailable", rsc->id, pe__node_name(node)); return; } crm_notice("Scheduling clean-up of %s on %s", rsc->id, pe__node_name(node)); delete_action(rsc, node, optional); // stop -> clean-up -> start pcmk__order_resource_actions(rsc, PCMK_ACTION_STOP, rsc, PCMK_ACTION_DELETE, flag); pcmk__order_resource_actions(rsc, PCMK_ACTION_DELETE, rsc, PCMK_ACTION_START, flag); } /*! * \internal * \brief Add primitive meta-attributes relevant to graph actions to XML * * \param[in] rsc Primitive resource whose meta-attributes should be added * \param[in,out] xml Transition graph action attributes XML to add to */ void pcmk__primitive_add_graph_meta(const pe_resource_t *rsc, xmlNode *xml) { char *name = NULL; char *value = NULL; const pe_resource_t *parent = NULL; CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_primitive) && (xml != NULL)); /* Clone instance numbers get set internally as meta-attributes, and are * needed in the transition graph (for example, to tell unique clone * instances apart). */ value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION); if (value != NULL) { name = crm_meta_name(XML_RSC_ATTR_INCARNATION); crm_xml_add(xml, name, value); free(name); } // Not sure if this one is really needed ... value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_REMOTE_NODE); if (value != NULL) { name = crm_meta_name(XML_RSC_ATTR_REMOTE_NODE); crm_xml_add(xml, name, value); free(name); } /* The container meta-attribute can be set on the primitive itself or one of * its parents (for example, a group inside a container resource), so check * them all, and keep the highest one found. */ for (parent = rsc; parent != NULL; parent = parent->parent) { if (parent->container != NULL) { crm_xml_add(xml, CRM_META "_" XML_RSC_ATTR_CONTAINER, parent->container->id); } } /* Bundle replica children will get their external-ip set internally as a * meta-attribute. The graph action needs it, but under a different naming * convention than other meta-attributes. */ value = g_hash_table_lookup(rsc->meta, "external-ip"); if (value != NULL) { crm_xml_add(xml, "pcmk_external_ip", value); } } // Primitive implementation of resource_alloc_functions_t:add_utilization() void pcmk__primitive_add_utilization(const pe_resource_t *rsc, const pe_resource_t *orig_rsc, GList *all_rscs, GHashTable *utilization) { CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_primitive) && (orig_rsc != NULL) && (utilization != NULL)); if (!pcmk_is_set(rsc->flags, pcmk_rsc_unassigned)) { return; } pe_rsc_trace(orig_rsc, "%s: Adding primitive %s as colocated utilization", orig_rsc->id, rsc->id); pcmk__release_node_capacity(utilization, rsc); } /*! * \internal * \brief Get epoch time of node's shutdown attribute (or now if none) * * \param[in,out] node Node to check * * \return Epoch time corresponding to shutdown attribute if set or now if not */ static time_t shutdown_time(pe_node_t *node) { const char *shutdown = pe_node_attribute_raw(node, XML_CIB_ATTR_SHUTDOWN); time_t result = 0; if (shutdown != NULL) { long long result_ll; if (pcmk__scan_ll(shutdown, &result_ll, 0LL) == pcmk_rc_ok) { result = (time_t) result_ll; } } return (result == 0)? get_effective_time(node->details->data_set) : result; } /*! * \internal * \brief Ban a resource from a node if it's not locked to the node * * \param[in] data Node to check * \param[in,out] user_data Resource to check */ static void ban_if_not_locked(gpointer data, gpointer user_data) { const pe_node_t *node = (const pe_node_t *) data; pe_resource_t *rsc = (pe_resource_t *) user_data; if (strcmp(node->details->uname, rsc->lock_node->details->uname) != 0) { resource_location(rsc, node, -CRM_SCORE_INFINITY, XML_CONFIG_ATTR_SHUTDOWN_LOCK, rsc->cluster); } } // Primitive implementation of resource_alloc_functions_t:shutdown_lock() void pcmk__primitive_shutdown_lock(pe_resource_t *rsc) { const char *class = NULL; CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_primitive)); class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); // Fence devices and remote connections can't be locked if (pcmk__str_eq(class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_null_matches) || pe__resource_is_remote_conn(rsc)) { return; } if (rsc->lock_node != NULL) { // The lock was obtained from resource history if (rsc->running_on != NULL) { /* The resource was started elsewhere even though it is now * considered locked. This shouldn't be possible, but as a * failsafe, we don't want to disturb the resource now. */ pe_rsc_info(rsc, "Cancelling shutdown lock because %s is already active", rsc->id); pe__clear_resource_history(rsc, rsc->lock_node, rsc->cluster); rsc->lock_node = NULL; rsc->lock_time = 0; } // Only a resource active on exactly one node can be locked } else if (pcmk__list_of_1(rsc->running_on)) { pe_node_t *node = rsc->running_on->data; if (node->details->shutdown) { if (node->details->unclean) { pe_rsc_debug(rsc, "Not locking %s to unclean %s for shutdown", rsc->id, pe__node_name(node)); } else { rsc->lock_node = node; rsc->lock_time = shutdown_time(node); } } } if (rsc->lock_node == NULL) { // No lock needed return; } if (rsc->cluster->shutdown_lock > 0) { time_t lock_expiration = rsc->lock_time + rsc->cluster->shutdown_lock; pe_rsc_info(rsc, "Locking %s to %s due to shutdown (expires @%lld)", rsc->id, pe__node_name(rsc->lock_node), (long long) lock_expiration); pe__update_recheck_time(++lock_expiration, rsc->cluster); } else { pe_rsc_info(rsc, "Locking %s to %s due to shutdown", rsc->id, pe__node_name(rsc->lock_node)); } // If resource is locked to one node, ban it from all other nodes g_list_foreach(rsc->cluster->nodes, ban_if_not_locked, rsc); } diff --git a/lib/pengine/bundle.c b/lib/pengine/bundle.c index 48936916e7..ee8c560c23 100644 --- a/lib/pengine/bundle.c +++ b/lib/pengine/bundle.c @@ -1,2216 +1,2217 @@ /* * Copyright 2004-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include enum pe__bundle_mount_flags { pe__bundle_mount_none = 0x00, // mount instance-specific subdirectory rather than source directly pe__bundle_mount_subdir = 0x01 }; typedef struct { char *source; char *target; char *options; uint32_t flags; // bitmask of pe__bundle_mount_flags } pe__bundle_mount_t; typedef struct { char *source; char *target; } pe__bundle_port_t; enum pe__container_agent { PE__CONTAINER_AGENT_UNKNOWN, PE__CONTAINER_AGENT_DOCKER, PE__CONTAINER_AGENT_RKT, PE__CONTAINER_AGENT_PODMAN, }; #define PE__CONTAINER_AGENT_UNKNOWN_S "unknown" #define PE__CONTAINER_AGENT_DOCKER_S "docker" #define PE__CONTAINER_AGENT_RKT_S "rkt" #define PE__CONTAINER_AGENT_PODMAN_S "podman" typedef struct pe__bundle_variant_data_s { int promoted_max; int nreplicas; int nreplicas_per_host; char *prefix; char *image; const char *ip_last; char *host_network; char *host_netmask; char *control_port; char *container_network; char *ip_range_start; gboolean add_host; gchar *container_host_options; char *container_command; char *launcher_options; const char *attribute_target; pe_resource_t *child; GList *replicas; // pe__bundle_replica_t * GList *ports; // pe__bundle_port_t * GList *mounts; // pe__bundle_mount_t * enum pe__container_agent agent_type; } pe__bundle_variant_data_t; #define get_bundle_variant_data(data, rsc) \ CRM_ASSERT(rsc != NULL); \ CRM_ASSERT(rsc->variant == pcmk_rsc_variant_bundle); \ CRM_ASSERT(rsc->variant_opaque != NULL); \ data = (pe__bundle_variant_data_t *) rsc->variant_opaque; /*! * \internal * \brief Get maximum number of bundle replicas allowed to run * * \param[in] rsc Bundle or bundled resource to check * * \return Maximum replicas for bundle corresponding to \p rsc */ int pe__bundle_max(const pe_resource_t *rsc) { const pe__bundle_variant_data_t *bundle_data = NULL; get_bundle_variant_data(bundle_data, pe__const_top_resource(rsc, true)); return bundle_data->nreplicas; } /*! * \internal * \brief Get the resource inside a bundle * * \param[in] bundle Bundle to check * * \return Resource inside \p bundle if any, otherwise NULL */ pe_resource_t * pe__bundled_resource(const pe_resource_t *rsc) { const pe__bundle_variant_data_t *bundle_data = NULL; get_bundle_variant_data(bundle_data, pe__const_top_resource(rsc, true)); return bundle_data->child; } /*! * \internal * \brief Get containerized resource corresponding to a given bundle container * * \param[in] instance Collective instance that might be a bundle container * * \return Bundled resource instance inside \p instance if it is a bundle * container instance, otherwise NULL */ const pe_resource_t * pe__get_rsc_in_container(const pe_resource_t *instance) { const pe__bundle_variant_data_t *data = NULL; const pe_resource_t *top = pe__const_top_resource(instance, true); if ((top == NULL) || (top->variant != pcmk_rsc_variant_bundle)) { return NULL; } get_bundle_variant_data(data, top); for (const GList *iter = data->replicas; iter != NULL; iter = iter->next) { const pe__bundle_replica_t *replica = iter->data; if (instance == replica->container) { return replica->child; } } return NULL; } /*! * \internal * \brief Check whether a given node is created by a bundle * * \param[in] bundle Bundle resource to check * \param[in] node Node to check * * \return true if \p node is an instance of \p bundle, otherwise false */ bool pe__node_is_bundle_instance(const pe_resource_t *bundle, const pe_node_t *node) { pe__bundle_variant_data_t *bundle_data = NULL; get_bundle_variant_data(bundle_data, bundle); for (GList *iter = bundle_data->replicas; iter != NULL; iter = iter->next) { pe__bundle_replica_t *replica = iter->data; if (pe__same_node(node, replica->node)) { return true; } } return false; } /*! * \internal * \brief Get the container of a bundle's first replica * * \param[in] bundle Bundle resource to get container for * * \return Container resource from first replica of \p bundle if any, * otherwise NULL */ pe_resource_t * pe__first_container(const pe_resource_t *bundle) { const pe__bundle_variant_data_t *bundle_data = NULL; const pe__bundle_replica_t *replica = NULL; get_bundle_variant_data(bundle_data, bundle); if (bundle_data->replicas == NULL) { return NULL; } replica = bundle_data->replicas->data; return replica->container; } /*! * \internal * \brief Iterate over bundle replicas * * \param[in,out] bundle Bundle to iterate over * \param[in] fn Function to call for each replica (its return value * indicates whether to continue iterating) * \param[in,out] user_data Pointer to pass to \p fn */ void pe__foreach_bundle_replica(pe_resource_t *bundle, bool (*fn)(pe__bundle_replica_t *, void *), void *user_data) { const pe__bundle_variant_data_t *bundle_data = NULL; get_bundle_variant_data(bundle_data, bundle); for (GList *iter = bundle_data->replicas; iter != NULL; iter = iter->next) { if (!fn((pe__bundle_replica_t *) iter->data, user_data)) { break; } } } /*! * \internal * \brief Iterate over const bundle replicas * * \param[in] bundle Bundle to iterate over * \param[in] fn Function to call for each replica (its return value * indicates whether to continue iterating) * \param[in,out] user_data Pointer to pass to \p fn */ void pe__foreach_const_bundle_replica(const pe_resource_t *bundle, bool (*fn)(const pe__bundle_replica_t *, void *), void *user_data) { const pe__bundle_variant_data_t *bundle_data = NULL; get_bundle_variant_data(bundle_data, bundle); for (const GList *iter = bundle_data->replicas; iter != NULL; iter = iter->next) { if (!fn((const pe__bundle_replica_t *) iter->data, user_data)) { break; } } } static char * next_ip(const char *last_ip) { unsigned int oct1 = 0; unsigned int oct2 = 0; unsigned int oct3 = 0; unsigned int oct4 = 0; int rc = sscanf(last_ip, "%u.%u.%u.%u", &oct1, &oct2, &oct3, &oct4); if (rc != 4) { /*@ TODO check for IPv6 */ return NULL; } else if (oct3 > 253) { return NULL; } else if (oct4 > 253) { ++oct3; oct4 = 1; } else { ++oct4; } return crm_strdup_printf("%u.%u.%u.%u", oct1, oct2, oct3, oct4); } static void allocate_ip(pe__bundle_variant_data_t *data, pe__bundle_replica_t *replica, GString *buffer) { if(data->ip_range_start == NULL) { return; } else if(data->ip_last) { replica->ipaddr = next_ip(data->ip_last); } else { replica->ipaddr = strdup(data->ip_range_start); } data->ip_last = replica->ipaddr; switch (data->agent_type) { case PE__CONTAINER_AGENT_DOCKER: case PE__CONTAINER_AGENT_PODMAN: if (data->add_host) { g_string_append_printf(buffer, " --add-host=%s-%d:%s", data->prefix, replica->offset, replica->ipaddr); } else { g_string_append_printf(buffer, " --hosts-entry=%s=%s-%d", replica->ipaddr, data->prefix, replica->offset); } break; case PE__CONTAINER_AGENT_RKT: g_string_append_printf(buffer, " --hosts-entry=%s=%s-%d", replica->ipaddr, data->prefix, replica->offset); break; default: // PE__CONTAINER_AGENT_UNKNOWN break; } } static xmlNode * create_resource(const char *name, const char *provider, const char *kind) { xmlNode *rsc = create_xml_node(NULL, XML_CIB_TAG_RESOURCE); crm_xml_add(rsc, XML_ATTR_ID, name); crm_xml_add(rsc, XML_AGENT_ATTR_CLASS, PCMK_RESOURCE_CLASS_OCF); crm_xml_add(rsc, XML_AGENT_ATTR_PROVIDER, provider); crm_xml_add(rsc, XML_ATTR_TYPE, kind); return rsc; } /*! * \internal * \brief Check whether cluster can manage resource inside container * * \param[in,out] data Container variant data * * \return TRUE if networking configuration is acceptable, FALSE otherwise * * \note The resource is manageable if an IP range or control port has been * specified. If a control port is used without an IP range, replicas per * host must be 1. */ static bool valid_network(pe__bundle_variant_data_t *data) { if(data->ip_range_start) { return TRUE; } if(data->control_port) { if(data->nreplicas_per_host > 1) { pe_err("Specifying the 'control-port' for %s requires 'replicas-per-host=1'", data->prefix); data->nreplicas_per_host = 1; // @TODO to be sure: // pe__clear_resource_flags(rsc, pcmk_rsc_unique); } return TRUE; } return FALSE; } static int create_ip_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data, pe__bundle_replica_t *replica) { if(data->ip_range_start) { char *id = NULL; xmlNode *xml_ip = NULL; xmlNode *xml_obj = NULL; id = crm_strdup_printf("%s-ip-%s", data->prefix, replica->ipaddr); crm_xml_sanitize_id(id); xml_ip = create_resource(id, "heartbeat", "IPaddr2"); free(id); xml_obj = create_xml_node(xml_ip, XML_TAG_ATTR_SETS); crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, replica->offset); crm_create_nvpair_xml(xml_obj, NULL, "ip", replica->ipaddr); if(data->host_network) { crm_create_nvpair_xml(xml_obj, NULL, "nic", data->host_network); } if(data->host_netmask) { crm_create_nvpair_xml(xml_obj, NULL, "cidr_netmask", data->host_netmask); } else { crm_create_nvpair_xml(xml_obj, NULL, "cidr_netmask", "32"); } xml_obj = create_xml_node(xml_ip, "operations"); crm_create_op_xml(xml_obj, ID(xml_ip), PCMK_ACTION_MONITOR, "60s", NULL); // TODO: Other ops? Timeouts and intervals from underlying resource? if (pe__unpack_resource(xml_ip, &replica->ip, parent, parent->cluster) != pcmk_rc_ok) { return pcmk_rc_unpack_error; } parent->children = g_list_append(parent->children, replica->ip); } return pcmk_rc_ok; } static const char* container_agent_str(enum pe__container_agent t) { switch (t) { case PE__CONTAINER_AGENT_DOCKER: return PE__CONTAINER_AGENT_DOCKER_S; case PE__CONTAINER_AGENT_RKT: return PE__CONTAINER_AGENT_RKT_S; case PE__CONTAINER_AGENT_PODMAN: return PE__CONTAINER_AGENT_PODMAN_S; default: // PE__CONTAINER_AGENT_UNKNOWN break; } return PE__CONTAINER_AGENT_UNKNOWN_S; } static int create_container_resource(pe_resource_t *parent, const pe__bundle_variant_data_t *data, pe__bundle_replica_t *replica) { char *id = NULL; xmlNode *xml_container = NULL; xmlNode *xml_obj = NULL; // Agent-specific const char *hostname_opt = NULL; const char *env_opt = NULL; const char *agent_str = NULL; int volid = 0; // rkt-only GString *buffer = NULL; GString *dbuffer = NULL; // Where syntax differences are drop-in replacements, set them now switch (data->agent_type) { case PE__CONTAINER_AGENT_DOCKER: case PE__CONTAINER_AGENT_PODMAN: hostname_opt = "-h "; env_opt = "-e "; break; case PE__CONTAINER_AGENT_RKT: hostname_opt = "--hostname="; env_opt = "--environment="; break; default: // PE__CONTAINER_AGENT_UNKNOWN return pcmk_rc_unpack_error; } agent_str = container_agent_str(data->agent_type); buffer = g_string_sized_new(4096); id = crm_strdup_printf("%s-%s-%d", data->prefix, agent_str, replica->offset); crm_xml_sanitize_id(id); xml_container = create_resource(id, "heartbeat", agent_str); free(id); xml_obj = create_xml_node(xml_container, XML_TAG_ATTR_SETS); crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, replica->offset); crm_create_nvpair_xml(xml_obj, NULL, "image", data->image); crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", XML_BOOLEAN_TRUE); crm_create_nvpair_xml(xml_obj, NULL, "force_kill", XML_BOOLEAN_FALSE); crm_create_nvpair_xml(xml_obj, NULL, "reuse", XML_BOOLEAN_FALSE); if (data->agent_type == PE__CONTAINER_AGENT_DOCKER) { g_string_append(buffer, " --restart=no"); } /* Set a container hostname only if we have an IP to map it to. The user can * set -h or --uts=host themselves if they want a nicer name for logs, but * this makes applications happy who need their hostname to match the IP * they bind to. */ if (data->ip_range_start != NULL) { g_string_append_printf(buffer, " %s%s-%d", hostname_opt, data->prefix, replica->offset); } pcmk__g_strcat(buffer, " ", env_opt, "PCMK_stderr=1", NULL); if (data->container_network != NULL) { pcmk__g_strcat(buffer, " --net=", data->container_network, NULL); } if (data->control_port != NULL) { pcmk__g_strcat(buffer, " ", env_opt, "PCMK_remote_port=", data->control_port, NULL); } else { g_string_append_printf(buffer, " %sPCMK_remote_port=%d", env_opt, DEFAULT_REMOTE_PORT); } for (GList *iter = data->mounts; iter != NULL; iter = iter->next) { pe__bundle_mount_t *mount = (pe__bundle_mount_t *) iter->data; char *source = NULL; if (pcmk_is_set(mount->flags, pe__bundle_mount_subdir)) { source = crm_strdup_printf("%s/%s-%d", mount->source, data->prefix, replica->offset); pcmk__add_separated_word(&dbuffer, 1024, source, ","); } switch (data->agent_type) { case PE__CONTAINER_AGENT_DOCKER: case PE__CONTAINER_AGENT_PODMAN: pcmk__g_strcat(buffer, " -v ", pcmk__s(source, mount->source), ":", mount->target, NULL); if (mount->options != NULL) { pcmk__g_strcat(buffer, ":", mount->options, NULL); } break; case PE__CONTAINER_AGENT_RKT: g_string_append_printf(buffer, " --volume vol%d,kind=host," "source=%s%s%s " "--mount volume=vol%d,target=%s", volid, pcmk__s(source, mount->source), (mount->options != NULL)? "," : "", pcmk__s(mount->options, ""), volid, mount->target); volid++; break; default: break; } free(source); } for (GList *iter = data->ports; iter != NULL; iter = iter->next) { pe__bundle_port_t *port = (pe__bundle_port_t *) iter->data; switch (data->agent_type) { case PE__CONTAINER_AGENT_DOCKER: case PE__CONTAINER_AGENT_PODMAN: if (replica->ipaddr != NULL) { pcmk__g_strcat(buffer, " -p ", replica->ipaddr, ":", port->source, ":", port->target, NULL); } else if (!pcmk__str_eq(data->container_network, "host", pcmk__str_none)) { // No need to do port mapping if net == host pcmk__g_strcat(buffer, " -p ", port->source, ":", port->target, NULL); } break; case PE__CONTAINER_AGENT_RKT: if (replica->ipaddr != NULL) { pcmk__g_strcat(buffer, " --port=", port->target, ":", replica->ipaddr, ":", port->source, NULL); } else { pcmk__g_strcat(buffer, " --port=", port->target, ":", port->source, NULL); } break; default: break; } } /* @COMPAT: We should use pcmk__add_word() here, but we can't yet, because * it would cause restarts during rolling upgrades. * * In a previous version of the container resource creation logic, if * data->launcher_options is not NULL, we append * (" %s", data->launcher_options) even if data->launcher_options is an * empty string. Likewise for data->container_host_options. Using * * pcmk__add_word(buffer, 0, data->launcher_options) * * removes that extra trailing space, causing a resource definition change. */ if (data->launcher_options != NULL) { pcmk__g_strcat(buffer, " ", data->launcher_options, NULL); } if (data->container_host_options != NULL) { pcmk__g_strcat(buffer, " ", data->container_host_options, NULL); } crm_create_nvpair_xml(xml_obj, NULL, "run_opts", (const char *) buffer->str); g_string_free(buffer, TRUE); crm_create_nvpair_xml(xml_obj, NULL, "mount_points", (dbuffer != NULL)? (const char *) dbuffer->str : ""); if (dbuffer != NULL) { g_string_free(dbuffer, TRUE); } if (replica->child != NULL) { if (data->container_command != NULL) { crm_create_nvpair_xml(xml_obj, NULL, "run_cmd", data->container_command); } else { crm_create_nvpair_xml(xml_obj, NULL, "run_cmd", SBIN_DIR "/pacemaker-remoted"); } /* TODO: Allow users to specify their own? * * We just want to know if the container is alive; we'll monitor the * child independently. */ crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true"); #if 0 /* @TODO Consider supporting the use case where we can start and stop * resources, but not proxy local commands (such as setting node * attributes), by running the local executor in stand-alone mode. * However, this would probably be better done via ACLs as with other * Pacemaker Remote nodes. */ } else if ((child != NULL) && data->untrusted) { crm_create_nvpair_xml(xml_obj, NULL, "run_cmd", CRM_DAEMON_DIR "/pacemaker-execd"); crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", CRM_DAEMON_DIR "/pacemaker/cts-exec-helper -c poke"); #endif } else { if (data->container_command != NULL) { crm_create_nvpair_xml(xml_obj, NULL, "run_cmd", data->container_command); } /* TODO: Allow users to specify their own? * * We don't know what's in the container, so we just want to know if it * is alive. */ crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true"); } xml_obj = create_xml_node(xml_container, "operations"); crm_create_op_xml(xml_obj, ID(xml_container), PCMK_ACTION_MONITOR, "60s", NULL); // TODO: Other ops? Timeouts and intervals from underlying resource? if (pe__unpack_resource(xml_container, &replica->container, parent, parent->cluster) != pcmk_rc_ok) { return pcmk_rc_unpack_error; } pe__set_resource_flags(replica->container, pe_rsc_replica_container); parent->children = g_list_append(parent->children, replica->container); return pcmk_rc_ok; } /*! * \brief Ban a node from a resource's (and its children's) allowed nodes list * * \param[in,out] rsc Resource to modify * \param[in] uname Name of node to ban */ static void disallow_node(pe_resource_t *rsc, const char *uname) { gpointer match = g_hash_table_lookup(rsc->allowed_nodes, uname); if (match) { ((pe_node_t *) match)->weight = -INFINITY; ((pe_node_t *) match)->rsc_discover_mode = pe_discover_never; } if (rsc->children) { g_list_foreach(rsc->children, (GFunc) disallow_node, (gpointer) uname); } } static int create_remote_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data, pe__bundle_replica_t *replica) { if (replica->child && valid_network(data)) { GHashTableIter gIter; pe_node_t *node = NULL; xmlNode *xml_remote = NULL; char *id = crm_strdup_printf("%s-%d", data->prefix, replica->offset); char *port_s = NULL; const char *uname = NULL; const char *connect_name = NULL; if (pe_find_resource(parent->cluster->resources, id) != NULL) { free(id); // The biggest hammer we have id = crm_strdup_printf("pcmk-internal-%s-remote-%d", replica->child->id, replica->offset); //@TODO return error instead of asserting? CRM_ASSERT(pe_find_resource(parent->cluster->resources, id) == NULL); } /* REMOTE_CONTAINER_HACK: Using "#uname" as the server name when the * connection does not have its own IP is a magic string that we use to * support nested remotes (i.e. a bundle running on a remote node). */ connect_name = (replica->ipaddr? replica->ipaddr : "#uname"); if (data->control_port == NULL) { port_s = pcmk__itoa(DEFAULT_REMOTE_PORT); } /* This sets replica->container as replica->remote's container, which is * similar to what happens with guest nodes. This is how the scheduler * knows that the bundle node is fenced by recovering the container, and * that remote should be ordered relative to the container. */ xml_remote = pe_create_remote_xml(NULL, id, replica->container->id, NULL, NULL, NULL, connect_name, (data->control_port? data->control_port : port_s)); free(port_s); /* Abandon our created ID, and pull the copy from the XML, because we * need something that will get freed during data set cleanup to use as * the node ID and uname. */ free(id); id = NULL; uname = ID(xml_remote); /* Ensure a node has been created for the guest (it may have already * been, if it has a permanent node attribute), and ensure its weight is * -INFINITY so no other resources can run on it. */ node = pe_find_node(parent->cluster->nodes, uname); if (node == NULL) { node = pe_create_node(uname, uname, "remote", "-INFINITY", parent->cluster); } else { node->weight = -INFINITY; } node->rsc_discover_mode = pe_discover_never; /* unpack_remote_nodes() ensures that each remote node and guest node * has a pe_node_t entry. Ideally, it would do the same for bundle nodes. * Unfortunately, a bundle has to be mostly unpacked before it's obvious * what nodes will be needed, so we do it just above. * * Worse, that means that the node may have been utilized while * unpacking other resources, without our weight correction. The most * likely place for this to happen is when pe__unpack_resource() calls * resource_location() to set a default score in symmetric clusters. * This adds a node *copy* to each resource's allowed nodes, and these * copies will have the wrong weight. * * As a hacky workaround, fix those copies here. * * @TODO Possible alternative: ensure bundles are unpacked before other * resources, so the weight is correct before any copies are made. */ g_list_foreach(parent->cluster->resources, (GFunc) disallow_node, (gpointer) uname); replica->node = pe__copy_node(node); replica->node->weight = 500; replica->node->rsc_discover_mode = pe_discover_exclusive; /* Ensure the node shows up as allowed and with the correct discovery set */ if (replica->child->allowed_nodes != NULL) { g_hash_table_destroy(replica->child->allowed_nodes); } replica->child->allowed_nodes = pcmk__strkey_table(NULL, free); g_hash_table_insert(replica->child->allowed_nodes, (gpointer) replica->node->details->id, pe__copy_node(replica->node)); { pe_node_t *copy = pe__copy_node(replica->node); copy->weight = -INFINITY; g_hash_table_insert(replica->child->parent->allowed_nodes, (gpointer) replica->node->details->id, copy); } if (pe__unpack_resource(xml_remote, &replica->remote, parent, parent->cluster) != pcmk_rc_ok) { return pcmk_rc_unpack_error; } g_hash_table_iter_init(&gIter, replica->remote->allowed_nodes); while (g_hash_table_iter_next(&gIter, NULL, (void **)&node)) { if (pe__is_guest_or_remote_node(node)) { /* Remote resources can only run on 'normal' cluster node */ node->weight = -INFINITY; } } replica->node->details->remote_rsc = replica->remote; // Ensure pe__is_guest_node() functions correctly immediately replica->remote->container = replica->container; /* A bundle's #kind is closer to "container" (guest node) than the * "remote" set by pe_create_node(). */ g_hash_table_insert(replica->node->details->attrs, strdup(CRM_ATTR_KIND), strdup("container")); /* One effect of this is that setup_container() will add * replica->remote to replica->container's fillers, which will make * pe__resource_contains_guest_node() true for replica->container. * * replica->child does NOT get added to replica->container's fillers. * The only noticeable effect if it did would be for its fail count to * be taken into account when checking replica->container's migration * threshold. */ parent->children = g_list_append(parent->children, replica->remote); } return pcmk_rc_ok; } static int create_replica_resources(pe_resource_t *parent, pe__bundle_variant_data_t *data, pe__bundle_replica_t *replica) { int rc = pcmk_rc_ok; rc = create_container_resource(parent, data, replica); if (rc != pcmk_rc_ok) { return rc; } rc = create_ip_resource(parent, data, replica); if (rc != pcmk_rc_ok) { return rc; } rc = create_remote_resource(parent, data, replica); if (rc != pcmk_rc_ok) { return rc; } if ((replica->child != NULL) && (replica->ipaddr != NULL)) { add_hash_param(replica->child->meta, "external-ip", replica->ipaddr); } if (replica->remote != NULL) { /* * Allow the remote connection resource to be allocated to a * different node than the one on which the container is active. * * This makes it possible to have Pacemaker Remote nodes running * containers with pacemaker-remoted inside in order to start * services inside those containers. */ - pe__set_resource_flags(replica->remote, pe_rsc_allow_remote_remotes); + pe__set_resource_flags(replica->remote, + pcmk_rsc_remote_nesting_allowed); } return rc; } static void mount_add(pe__bundle_variant_data_t *bundle_data, const char *source, const char *target, const char *options, uint32_t flags) { pe__bundle_mount_t *mount = calloc(1, sizeof(pe__bundle_mount_t)); CRM_ASSERT(mount != NULL); mount->source = strdup(source); mount->target = strdup(target); pcmk__str_update(&mount->options, options); mount->flags = flags; bundle_data->mounts = g_list_append(bundle_data->mounts, mount); } static void mount_free(pe__bundle_mount_t *mount) { free(mount->source); free(mount->target); free(mount->options); free(mount); } static void port_free(pe__bundle_port_t *port) { free(port->source); free(port->target); free(port); } static pe__bundle_replica_t * replica_for_remote(pe_resource_t *remote) { pe_resource_t *top = remote; pe__bundle_variant_data_t *bundle_data = NULL; if (top == NULL) { return NULL; } while (top->parent != NULL) { top = top->parent; } get_bundle_variant_data(bundle_data, top); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; if (replica->remote == remote) { return replica; } } CRM_LOG_ASSERT(FALSE); return NULL; } bool pe__bundle_needs_remote_name(pe_resource_t *rsc) { const char *value; GHashTable *params = NULL; if (rsc == NULL) { return false; } // Use NULL node since pcmk__bundle_expand() uses that to set value params = pe_rsc_params(rsc, NULL, rsc->cluster); value = g_hash_table_lookup(params, XML_RSC_ATTR_REMOTE_RA_ADDR); return pcmk__str_eq(value, "#uname", pcmk__str_casei) && xml_contains_remote_node(rsc->xml); } const char * pe__add_bundle_remote_name(pe_resource_t *rsc, pe_working_set_t *data_set, xmlNode *xml, const char *field) { // REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside pe_node_t *node = NULL; pe__bundle_replica_t *replica = NULL; if (!pe__bundle_needs_remote_name(rsc)) { return NULL; } replica = replica_for_remote(rsc); if (replica == NULL) { return NULL; } node = replica->container->allocated_to; if (node == NULL) { /* If it won't be running anywhere after the * transition, go with where it's running now. */ node = pe__current_node(replica->container); } if(node == NULL) { crm_trace("Cannot determine address for bundle connection %s", rsc->id); return NULL; } crm_trace("Setting address for bundle connection %s to bundle host %s", rsc->id, pe__node_name(node)); if(xml != NULL && field != NULL) { crm_xml_add(xml, field, node->details->uname); } return node->details->uname; } #define pe__set_bundle_mount_flags(mount_xml, flags, flags_to_set) do { \ flags = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, \ "Bundle mount", ID(mount_xml), flags, \ (flags_to_set), #flags_to_set); \ } while (0) gboolean pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set) { const char *value = NULL; xmlNode *xml_obj = NULL; xmlNode *xml_resource = NULL; pe__bundle_variant_data_t *bundle_data = NULL; bool need_log_mount = TRUE; CRM_ASSERT(rsc != NULL); pe_rsc_trace(rsc, "Processing resource %s...", rsc->id); bundle_data = calloc(1, sizeof(pe__bundle_variant_data_t)); rsc->variant_opaque = bundle_data; bundle_data->prefix = strdup(rsc->id); xml_obj = first_named_child(rsc->xml, PE__CONTAINER_AGENT_DOCKER_S); if (xml_obj != NULL) { bundle_data->agent_type = PE__CONTAINER_AGENT_DOCKER; } else { xml_obj = first_named_child(rsc->xml, PE__CONTAINER_AGENT_RKT_S); if (xml_obj != NULL) { bundle_data->agent_type = PE__CONTAINER_AGENT_RKT; } else { xml_obj = first_named_child(rsc->xml, PE__CONTAINER_AGENT_PODMAN_S); if (xml_obj != NULL) { bundle_data->agent_type = PE__CONTAINER_AGENT_PODMAN; } else { return FALSE; } } } // Use 0 for default, minimum, and invalid promoted-max value = crm_element_value(xml_obj, XML_RSC_ATTR_PROMOTED_MAX); if (value == NULL) { // @COMPAT deprecated since 2.0.0 value = crm_element_value(xml_obj, "masters"); } pcmk__scan_min_int(value, &bundle_data->promoted_max, 0); // Default replicas to promoted-max if it was specified and 1 otherwise value = crm_element_value(xml_obj, "replicas"); if ((value == NULL) && (bundle_data->promoted_max > 0)) { bundle_data->nreplicas = bundle_data->promoted_max; } else { pcmk__scan_min_int(value, &bundle_data->nreplicas, 1); } /* * Communication between containers on the same host via the * floating IPs only works if the container is started with: * --userland-proxy=false --ip-masq=false */ value = crm_element_value(xml_obj, "replicas-per-host"); pcmk__scan_min_int(value, &bundle_data->nreplicas_per_host, 1); if (bundle_data->nreplicas_per_host == 1) { pe__clear_resource_flags(rsc, pcmk_rsc_unique); } bundle_data->container_command = crm_element_value_copy(xml_obj, "run-command"); bundle_data->launcher_options = crm_element_value_copy(xml_obj, "options"); bundle_data->image = crm_element_value_copy(xml_obj, "image"); bundle_data->container_network = crm_element_value_copy(xml_obj, "network"); xml_obj = first_named_child(rsc->xml, "network"); if(xml_obj) { bundle_data->ip_range_start = crm_element_value_copy(xml_obj, "ip-range-start"); bundle_data->host_netmask = crm_element_value_copy(xml_obj, "host-netmask"); bundle_data->host_network = crm_element_value_copy(xml_obj, "host-interface"); bundle_data->control_port = crm_element_value_copy(xml_obj, "control-port"); value = crm_element_value(xml_obj, "add-host"); if (crm_str_to_boolean(value, &bundle_data->add_host) != 1) { bundle_data->add_host = TRUE; } for (xmlNode *xml_child = pcmk__xe_first_child(xml_obj); xml_child != NULL; xml_child = pcmk__xe_next(xml_child)) { pe__bundle_port_t *port = calloc(1, sizeof(pe__bundle_port_t)); port->source = crm_element_value_copy(xml_child, "port"); if(port->source == NULL) { port->source = crm_element_value_copy(xml_child, "range"); } else { port->target = crm_element_value_copy(xml_child, "internal-port"); } if(port->source != NULL && strlen(port->source) > 0) { if(port->target == NULL) { port->target = strdup(port->source); } bundle_data->ports = g_list_append(bundle_data->ports, port); } else { pe_err("Invalid port directive %s", ID(xml_child)); port_free(port); } } } xml_obj = first_named_child(rsc->xml, "storage"); for (xmlNode *xml_child = pcmk__xe_first_child(xml_obj); xml_child != NULL; xml_child = pcmk__xe_next(xml_child)) { const char *source = crm_element_value(xml_child, "source-dir"); const char *target = crm_element_value(xml_child, "target-dir"); const char *options = crm_element_value(xml_child, "options"); int flags = pe__bundle_mount_none; if (source == NULL) { source = crm_element_value(xml_child, "source-dir-root"); pe__set_bundle_mount_flags(xml_child, flags, pe__bundle_mount_subdir); } if (source && target) { mount_add(bundle_data, source, target, options, flags); if (strcmp(target, "/var/log") == 0) { need_log_mount = FALSE; } } else { pe_err("Invalid mount directive %s", ID(xml_child)); } } xml_obj = first_named_child(rsc->xml, "primitive"); if (xml_obj && valid_network(bundle_data)) { char *value = NULL; xmlNode *xml_set = NULL; xml_resource = create_xml_node(NULL, XML_CIB_TAG_INCARNATION); /* @COMPAT We no longer use the tag, but we need to keep it as * part of the resource name, so that bundles don't restart in a rolling * upgrade. (It also avoids needing to change regression tests.) */ crm_xml_set_id(xml_resource, "%s-%s", bundle_data->prefix, (bundle_data->promoted_max? "master" : (const char *)xml_resource->name)); xml_set = create_xml_node(xml_resource, XML_TAG_META_SETS); crm_xml_set_id(xml_set, "%s-%s-meta", bundle_data->prefix, xml_resource->name); crm_create_nvpair_xml(xml_set, NULL, XML_RSC_ATTR_ORDERED, XML_BOOLEAN_TRUE); value = pcmk__itoa(bundle_data->nreplicas); crm_create_nvpair_xml(xml_set, NULL, XML_RSC_ATTR_INCARNATION_MAX, value); free(value); value = pcmk__itoa(bundle_data->nreplicas_per_host); crm_create_nvpair_xml(xml_set, NULL, XML_RSC_ATTR_INCARNATION_NODEMAX, value); free(value); crm_create_nvpair_xml(xml_set, NULL, XML_RSC_ATTR_UNIQUE, pcmk__btoa(bundle_data->nreplicas_per_host > 1)); if (bundle_data->promoted_max) { crm_create_nvpair_xml(xml_set, NULL, XML_RSC_ATTR_PROMOTABLE, XML_BOOLEAN_TRUE); value = pcmk__itoa(bundle_data->promoted_max); crm_create_nvpair_xml(xml_set, NULL, XML_RSC_ATTR_PROMOTED_MAX, value); free(value); } //crm_xml_add(xml_obj, XML_ATTR_ID, bundle_data->prefix); add_node_copy(xml_resource, xml_obj); } else if(xml_obj) { pe_err("Cannot control %s inside %s without either ip-range-start or control-port", rsc->id, ID(xml_obj)); return FALSE; } if(xml_resource) { int lpc = 0; GList *childIter = NULL; pe__bundle_port_t *port = NULL; GString *buffer = NULL; if (pe__unpack_resource(xml_resource, &(bundle_data->child), rsc, data_set) != pcmk_rc_ok) { return FALSE; } /* Currently, we always map the default authentication key location * into the same location inside the container. * * Ideally, we would respect the host's PCMK_authkey_location, but: * - it may be different on different nodes; * - the actual connection will do extra checking to make sure the key * file exists and is readable, that we can't do here on the DC * - tools such as crm_resource and crm_simulate may not have the same * environment variables as the cluster, causing operation digests to * differ * * Always using the default location inside the container is fine, * because we control the pacemaker_remote environment, and it avoids * having to pass another environment variable to the container. * * @TODO A better solution may be to have only pacemaker_remote use the * environment variable, and have the cluster nodes use a new * cluster option for key location. This would introduce the limitation * of the location being the same on all cluster nodes, but that's * reasonable. */ mount_add(bundle_data, DEFAULT_REMOTE_KEY_LOCATION, DEFAULT_REMOTE_KEY_LOCATION, NULL, pe__bundle_mount_none); if (need_log_mount) { mount_add(bundle_data, CRM_BUNDLE_DIR, "/var/log", NULL, pe__bundle_mount_subdir); } port = calloc(1, sizeof(pe__bundle_port_t)); if(bundle_data->control_port) { port->source = strdup(bundle_data->control_port); } else { /* If we wanted to respect PCMK_remote_port, we could use * crm_default_remote_port() here and elsewhere in this file instead * of DEFAULT_REMOTE_PORT. * * However, it gains nothing, since we control both the container * environment and the connection resource parameters, and the user * can use a different port if desired by setting control-port. */ port->source = pcmk__itoa(DEFAULT_REMOTE_PORT); } port->target = strdup(port->source); bundle_data->ports = g_list_append(bundle_data->ports, port); buffer = g_string_sized_new(1024); for (childIter = bundle_data->child->children; childIter != NULL; childIter = childIter->next) { pe__bundle_replica_t *replica = calloc(1, sizeof(pe__bundle_replica_t)); replica->child = childIter->data; replica->child->exclusive_discover = TRUE; replica->offset = lpc++; // Ensure the child's notify gets set based on the underlying primitive's value if (pcmk_is_set(replica->child->flags, pcmk_rsc_notify)) { pe__set_resource_flags(bundle_data->child, pcmk_rsc_notify); } allocate_ip(bundle_data, replica, buffer); bundle_data->replicas = g_list_append(bundle_data->replicas, replica); bundle_data->attribute_target = g_hash_table_lookup(replica->child->meta, XML_RSC_ATTR_TARGET); } bundle_data->container_host_options = g_string_free(buffer, FALSE); if (bundle_data->attribute_target) { g_hash_table_replace(rsc->meta, strdup(XML_RSC_ATTR_TARGET), strdup(bundle_data->attribute_target)); g_hash_table_replace(bundle_data->child->meta, strdup(XML_RSC_ATTR_TARGET), strdup(bundle_data->attribute_target)); } } else { // Just a naked container, no pacemaker-remote GString *buffer = g_string_sized_new(1024); for (int lpc = 0; lpc < bundle_data->nreplicas; lpc++) { pe__bundle_replica_t *replica = calloc(1, sizeof(pe__bundle_replica_t)); replica->offset = lpc; allocate_ip(bundle_data, replica, buffer); bundle_data->replicas = g_list_append(bundle_data->replicas, replica); } bundle_data->container_host_options = g_string_free(buffer, FALSE); } for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; if (create_replica_resources(rsc, bundle_data, replica) != pcmk_rc_ok) { pe_err("Failed unpacking resource %s", rsc->id); rsc->fns->free(rsc); return FALSE; } /* Utilization needs special handling for bundles. It makes no sense for * the inner primitive to have utilization, because it is tied * one-to-one to the guest node created by the container resource -- and * there's no way to set capacities for that guest node anyway. * * What the user really wants is to configure utilization for the * container. However, the schema only allows utilization for * primitives, and the container resource is implicit anyway, so the * user can *only* configure utilization for the inner primitive. If * they do, move the primitive's utilization values to the container. * * @TODO This means that bundles without an inner primitive can't have * utilization. An alternative might be to allow utilization values in * the top-level bundle XML in the schema, and copy those to each * container. */ if (replica->child != NULL) { GHashTable *empty = replica->container->utilization; replica->container->utilization = replica->child->utilization; replica->child->utilization = empty; } } if (bundle_data->child) { rsc->children = g_list_append(rsc->children, bundle_data->child); } return TRUE; } static int replica_resource_active(pe_resource_t *rsc, gboolean all) { if (rsc) { gboolean child_active = rsc->fns->active(rsc, all); if (child_active && !all) { return TRUE; } else if (!child_active && all) { return FALSE; } } return -1; } gboolean pe__bundle_active(pe_resource_t *rsc, gboolean all) { pe__bundle_variant_data_t *bundle_data = NULL; GList *iter = NULL; get_bundle_variant_data(bundle_data, rsc); for (iter = bundle_data->replicas; iter != NULL; iter = iter->next) { pe__bundle_replica_t *replica = iter->data; int rsc_active; rsc_active = replica_resource_active(replica->ip, all); if (rsc_active >= 0) { return (gboolean) rsc_active; } rsc_active = replica_resource_active(replica->child, all); if (rsc_active >= 0) { return (gboolean) rsc_active; } rsc_active = replica_resource_active(replica->container, all); if (rsc_active >= 0) { return (gboolean) rsc_active; } rsc_active = replica_resource_active(replica->remote, all); if (rsc_active >= 0) { return (gboolean) rsc_active; } } /* If "all" is TRUE, we've already checked that no resources were inactive, * so return TRUE; if "all" is FALSE, we didn't find any active resources, * so return FALSE. */ return all; } /*! * \internal * \brief Find the bundle replica corresponding to a given node * * \param[in] bundle Top-level bundle resource * \param[in] node Node to search for * * \return Bundle replica if found, NULL otherwise */ pe_resource_t * pe__find_bundle_replica(const pe_resource_t *bundle, const pe_node_t *node) { pe__bundle_variant_data_t *bundle_data = NULL; CRM_ASSERT(bundle && node); get_bundle_variant_data(bundle_data, bundle); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; CRM_ASSERT(replica && replica->node); if (replica->node->details == node->details) { return replica->child; } } return NULL; } /*! * \internal * \deprecated This function will be removed in a future release */ static void print_rsc_in_list(pe_resource_t *rsc, const char *pre_text, long options, void *print_data) { if (rsc != NULL) { if (options & pe_print_html) { status_print("
  • "); } rsc->fns->print(rsc, pre_text, options, print_data); if (options & pe_print_html) { status_print("
  • \n"); } } } /*! * \internal * \deprecated This function will be removed in a future release */ static void bundle_print_xml(pe_resource_t *rsc, const char *pre_text, long options, void *print_data) { pe__bundle_variant_data_t *bundle_data = NULL; char *child_text = NULL; CRM_CHECK(rsc != NULL, return); if (pre_text == NULL) { pre_text = ""; } child_text = crm_strdup_printf("%s ", pre_text); get_bundle_variant_data(bundle_data, rsc); status_print("%sid); status_print("type=\"%s\" ", container_agent_str(bundle_data->agent_type)); status_print("image=\"%s\" ", bundle_data->image); status_print("unique=\"%s\" ", pe__rsc_bool_str(rsc, pcmk_rsc_unique)); status_print("managed=\"%s\" ", pe__rsc_bool_str(rsc, pcmk_rsc_managed)); status_print("failed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_failed)); status_print(">\n"); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; CRM_ASSERT(replica); status_print("%s \n", pre_text, replica->offset); print_rsc_in_list(replica->ip, child_text, options, print_data); print_rsc_in_list(replica->child, child_text, options, print_data); print_rsc_in_list(replica->container, child_text, options, print_data); print_rsc_in_list(replica->remote, child_text, options, print_data); status_print("%s \n", pre_text); } status_print("%s\n", pre_text); free(child_text); } PCMK__OUTPUT_ARGS("bundle", "uint32_t", "pe_resource_t *", "GList *", "GList *") int pe__bundle_xml(pcmk__output_t *out, va_list args) { uint32_t show_opts = va_arg(args, uint32_t); pe_resource_t *rsc = va_arg(args, pe_resource_t *); GList *only_node = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); pe__bundle_variant_data_t *bundle_data = NULL; int rc = pcmk_rc_no_output; gboolean printed_header = FALSE; gboolean print_everything = TRUE; const char *desc = NULL; CRM_ASSERT(rsc != NULL); get_bundle_variant_data(bundle_data, rsc); if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) { return rc; } print_everything = pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; char *id = NULL; gboolean print_ip, print_child, print_ctnr, print_remote; CRM_ASSERT(replica); if (pcmk__rsc_filtered_by_node(replica->container, only_node)) { continue; } print_ip = replica->ip != NULL && !replica->ip->fns->is_filtered(replica->ip, only_rsc, print_everything); print_child = replica->child != NULL && !replica->child->fns->is_filtered(replica->child, only_rsc, print_everything); print_ctnr = !replica->container->fns->is_filtered(replica->container, only_rsc, print_everything); print_remote = replica->remote != NULL && !replica->remote->fns->is_filtered(replica->remote, only_rsc, print_everything); if (!print_everything && !print_ip && !print_child && !print_ctnr && !print_remote) { continue; } if (!printed_header) { printed_header = TRUE; desc = pe__resource_description(rsc, show_opts); rc = pe__name_and_nvpairs_xml(out, true, "bundle", 8, "id", rsc->id, "type", container_agent_str(bundle_data->agent_type), "image", bundle_data->image, "unique", pe__rsc_bool_str(rsc, pcmk_rsc_unique), "maintenance", pe__rsc_bool_str(rsc, pe_rsc_maintenance), "managed", pe__rsc_bool_str(rsc, pcmk_rsc_managed), "failed", pe__rsc_bool_str(rsc, pe_rsc_failed), "description", desc); CRM_ASSERT(rc == pcmk_rc_ok); } id = pcmk__itoa(replica->offset); rc = pe__name_and_nvpairs_xml(out, true, "replica", 1, "id", id); free(id); CRM_ASSERT(rc == pcmk_rc_ok); if (print_ip) { out->message(out, crm_map_element_name(replica->ip->xml), show_opts, replica->ip, only_node, only_rsc); } if (print_child) { out->message(out, crm_map_element_name(replica->child->xml), show_opts, replica->child, only_node, only_rsc); } if (print_ctnr) { out->message(out, crm_map_element_name(replica->container->xml), show_opts, replica->container, only_node, only_rsc); } if (print_remote) { out->message(out, crm_map_element_name(replica->remote->xml), show_opts, replica->remote, only_node, only_rsc); } pcmk__output_xml_pop_parent(out); // replica } if (printed_header) { pcmk__output_xml_pop_parent(out); // bundle } return rc; } static void pe__bundle_replica_output_html(pcmk__output_t *out, pe__bundle_replica_t *replica, pe_node_t *node, uint32_t show_opts) { pe_resource_t *rsc = replica->child; int offset = 0; char buffer[LINE_MAX]; if(rsc == NULL) { rsc = replica->container; } if (replica->remote) { offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(replica->remote)); } else { offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(replica->container)); } if (replica->ipaddr) { offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)", replica->ipaddr); } pe__common_output_html(out, rsc, buffer, node, show_opts); } /*! * \internal * \brief Get a string describing a resource's unmanaged state or lack thereof * * \param[in] rsc Resource to describe * * \return A string indicating that a resource is in maintenance mode or * otherwise unmanaged, or an empty string otherwise */ static const char * get_unmanaged_str(const pe_resource_t *rsc) { if (pcmk_is_set(rsc->flags, pe_rsc_maintenance)) { return " (maintenance)"; } if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) { return " (unmanaged)"; } return ""; } PCMK__OUTPUT_ARGS("bundle", "uint32_t", "pe_resource_t *", "GList *", "GList *") int pe__bundle_html(pcmk__output_t *out, va_list args) { uint32_t show_opts = va_arg(args, uint32_t); pe_resource_t *rsc = va_arg(args, pe_resource_t *); GList *only_node = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); const char *desc = NULL; pe__bundle_variant_data_t *bundle_data = NULL; int rc = pcmk_rc_no_output; gboolean print_everything = TRUE; CRM_ASSERT(rsc != NULL); get_bundle_variant_data(bundle_data, rsc); desc = pe__resource_description(rsc, show_opts); if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) { return rc; } print_everything = pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; gboolean print_ip, print_child, print_ctnr, print_remote; CRM_ASSERT(replica); if (pcmk__rsc_filtered_by_node(replica->container, only_node)) { continue; } print_ip = replica->ip != NULL && !replica->ip->fns->is_filtered(replica->ip, only_rsc, print_everything); print_child = replica->child != NULL && !replica->child->fns->is_filtered(replica->child, only_rsc, print_everything); print_ctnr = !replica->container->fns->is_filtered(replica->container, only_rsc, print_everything); print_remote = replica->remote != NULL && !replica->remote->fns->is_filtered(replica->remote, only_rsc, print_everything); if (pcmk_is_set(show_opts, pcmk_show_implicit_rscs) || (print_everything == FALSE && (print_ip || print_child || print_ctnr || print_remote))) { /* The text output messages used below require pe_print_implicit to * be set to do anything. */ uint32_t new_show_opts = show_opts | pcmk_show_implicit_rscs; PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s%s%s%s", (bundle_data->nreplicas > 1)? " set" : "", rsc->id, bundle_data->image, pcmk_is_set(rsc->flags, pcmk_rsc_unique)? " (unique)" : "", desc ? " (" : "", desc ? desc : "", desc ? ")" : "", get_unmanaged_str(rsc)); if (pcmk__list_of_multiple(bundle_data->replicas)) { out->begin_list(out, NULL, NULL, "Replica[%d]", replica->offset); } if (print_ip) { out->message(out, crm_map_element_name(replica->ip->xml), new_show_opts, replica->ip, only_node, only_rsc); } if (print_child) { out->message(out, crm_map_element_name(replica->child->xml), new_show_opts, replica->child, only_node, only_rsc); } if (print_ctnr) { out->message(out, crm_map_element_name(replica->container->xml), new_show_opts, replica->container, only_node, only_rsc); } if (print_remote) { out->message(out, crm_map_element_name(replica->remote->xml), new_show_opts, replica->remote, only_node, only_rsc); } if (pcmk__list_of_multiple(bundle_data->replicas)) { out->end_list(out); } } else if (print_everything == FALSE && !(print_ip || print_child || print_ctnr || print_remote)) { continue; } else { PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s%s%s%s", (bundle_data->nreplicas > 1)? " set" : "", rsc->id, bundle_data->image, pcmk_is_set(rsc->flags, pcmk_rsc_unique)? " (unique)" : "", desc ? " (" : "", desc ? desc : "", desc ? ")" : "", get_unmanaged_str(rsc)); pe__bundle_replica_output_html(out, replica, pe__current_node(replica->container), show_opts); } } PCMK__OUTPUT_LIST_FOOTER(out, rc); return rc; } static void pe__bundle_replica_output_text(pcmk__output_t *out, pe__bundle_replica_t *replica, pe_node_t *node, uint32_t show_opts) { const pe_resource_t *rsc = replica->child; int offset = 0; char buffer[LINE_MAX]; if(rsc == NULL) { rsc = replica->container; } if (replica->remote) { offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(replica->remote)); } else { offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(replica->container)); } if (replica->ipaddr) { offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)", replica->ipaddr); } pe__common_output_text(out, rsc, buffer, node, show_opts); } PCMK__OUTPUT_ARGS("bundle", "uint32_t", "pe_resource_t *", "GList *", "GList *") int pe__bundle_text(pcmk__output_t *out, va_list args) { uint32_t show_opts = va_arg(args, uint32_t); pe_resource_t *rsc = va_arg(args, pe_resource_t *); GList *only_node = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); const char *desc = NULL; pe__bundle_variant_data_t *bundle_data = NULL; int rc = pcmk_rc_no_output; gboolean print_everything = TRUE; desc = pe__resource_description(rsc, show_opts); get_bundle_variant_data(bundle_data, rsc); CRM_ASSERT(rsc != NULL); if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) { return rc; } print_everything = pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; gboolean print_ip, print_child, print_ctnr, print_remote; CRM_ASSERT(replica); if (pcmk__rsc_filtered_by_node(replica->container, only_node)) { continue; } print_ip = replica->ip != NULL && !replica->ip->fns->is_filtered(replica->ip, only_rsc, print_everything); print_child = replica->child != NULL && !replica->child->fns->is_filtered(replica->child, only_rsc, print_everything); print_ctnr = !replica->container->fns->is_filtered(replica->container, only_rsc, print_everything); print_remote = replica->remote != NULL && !replica->remote->fns->is_filtered(replica->remote, only_rsc, print_everything); if (pcmk_is_set(show_opts, pcmk_show_implicit_rscs) || (print_everything == FALSE && (print_ip || print_child || print_ctnr || print_remote))) { /* The text output messages used below require pe_print_implicit to * be set to do anything. */ uint32_t new_show_opts = show_opts | pcmk_show_implicit_rscs; PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s%s%s%s", (bundle_data->nreplicas > 1)? " set" : "", rsc->id, bundle_data->image, pcmk_is_set(rsc->flags, pcmk_rsc_unique)? " (unique)" : "", desc ? " (" : "", desc ? desc : "", desc ? ")" : "", get_unmanaged_str(rsc)); if (pcmk__list_of_multiple(bundle_data->replicas)) { out->list_item(out, NULL, "Replica[%d]", replica->offset); } out->begin_list(out, NULL, NULL, NULL); if (print_ip) { out->message(out, crm_map_element_name(replica->ip->xml), new_show_opts, replica->ip, only_node, only_rsc); } if (print_child) { out->message(out, crm_map_element_name(replica->child->xml), new_show_opts, replica->child, only_node, only_rsc); } if (print_ctnr) { out->message(out, crm_map_element_name(replica->container->xml), new_show_opts, replica->container, only_node, only_rsc); } if (print_remote) { out->message(out, crm_map_element_name(replica->remote->xml), new_show_opts, replica->remote, only_node, only_rsc); } out->end_list(out); } else if (print_everything == FALSE && !(print_ip || print_child || print_ctnr || print_remote)) { continue; } else { PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s%s%s%s", (bundle_data->nreplicas > 1)? " set" : "", rsc->id, bundle_data->image, pcmk_is_set(rsc->flags, pcmk_rsc_unique)? " (unique)" : "", desc ? " (" : "", desc ? desc : "", desc ? ")" : "", get_unmanaged_str(rsc)); pe__bundle_replica_output_text(out, replica, pe__current_node(replica->container), show_opts); } } PCMK__OUTPUT_LIST_FOOTER(out, rc); return rc; } /*! * \internal * \deprecated This function will be removed in a future release */ static void print_bundle_replica(pe__bundle_replica_t *replica, const char *pre_text, long options, void *print_data) { pe_node_t *node = NULL; pe_resource_t *rsc = replica->child; int offset = 0; char buffer[LINE_MAX]; if(rsc == NULL) { rsc = replica->container; } if (replica->remote) { offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(replica->remote)); } else { offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(replica->container)); } if (replica->ipaddr) { offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)", replica->ipaddr); } node = pe__current_node(replica->container); common_print(rsc, pre_text, buffer, node, options, print_data); } /*! * \internal * \deprecated This function will be removed in a future release */ void pe__print_bundle(pe_resource_t *rsc, const char *pre_text, long options, void *print_data) { pe__bundle_variant_data_t *bundle_data = NULL; char *child_text = NULL; CRM_CHECK(rsc != NULL, return); if (options & pe_print_xml) { bundle_print_xml(rsc, pre_text, options, print_data); return; } get_bundle_variant_data(bundle_data, rsc); if (pre_text == NULL) { pre_text = " "; } status_print("%sContainer bundle%s: %s [%s]%s%s\n", pre_text, ((bundle_data->nreplicas > 1)? " set" : ""), rsc->id, bundle_data->image, pcmk_is_set(rsc->flags, pcmk_rsc_unique)? " (unique)" : "", pcmk_is_set(rsc->flags, pcmk_rsc_managed)? "" : " (unmanaged)"); if (options & pe_print_html) { status_print("
    \n
      \n"); } for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; CRM_ASSERT(replica); if (options & pe_print_html) { status_print("
    • "); } if (pcmk_is_set(options, pe_print_implicit)) { child_text = crm_strdup_printf(" %s", pre_text); if (pcmk__list_of_multiple(bundle_data->replicas)) { status_print(" %sReplica[%d]\n", pre_text, replica->offset); } if (options & pe_print_html) { status_print("
      \n
        \n"); } print_rsc_in_list(replica->ip, child_text, options, print_data); print_rsc_in_list(replica->container, child_text, options, print_data); print_rsc_in_list(replica->remote, child_text, options, print_data); print_rsc_in_list(replica->child, child_text, options, print_data); if (options & pe_print_html) { status_print("
      \n"); } } else { child_text = crm_strdup_printf("%s ", pre_text); print_bundle_replica(replica, child_text, options, print_data); } free(child_text); if (options & pe_print_html) { status_print("
    • \n"); } } if (options & pe_print_html) { status_print("
    \n"); } } static void free_bundle_replica(pe__bundle_replica_t *replica) { if (replica == NULL) { return; } if (replica->node) { free(replica->node); replica->node = NULL; } if (replica->ip) { free_xml(replica->ip->xml); replica->ip->xml = NULL; replica->ip->fns->free(replica->ip); replica->ip = NULL; } if (replica->container) { free_xml(replica->container->xml); replica->container->xml = NULL; replica->container->fns->free(replica->container); replica->container = NULL; } if (replica->remote) { free_xml(replica->remote->xml); replica->remote->xml = NULL; replica->remote->fns->free(replica->remote); replica->remote = NULL; } free(replica->ipaddr); free(replica); } void pe__free_bundle(pe_resource_t *rsc) { pe__bundle_variant_data_t *bundle_data = NULL; CRM_CHECK(rsc != NULL, return); get_bundle_variant_data(bundle_data, rsc); pe_rsc_trace(rsc, "Freeing %s", rsc->id); free(bundle_data->prefix); free(bundle_data->image); free(bundle_data->control_port); free(bundle_data->host_network); free(bundle_data->host_netmask); free(bundle_data->ip_range_start); free(bundle_data->container_network); free(bundle_data->launcher_options); free(bundle_data->container_command); g_free(bundle_data->container_host_options); g_list_free_full(bundle_data->replicas, (GDestroyNotify) free_bundle_replica); g_list_free_full(bundle_data->mounts, (GDestroyNotify)mount_free); g_list_free_full(bundle_data->ports, (GDestroyNotify)port_free); g_list_free(rsc->children); if(bundle_data->child) { free_xml(bundle_data->child->xml); bundle_data->child->xml = NULL; bundle_data->child->fns->free(bundle_data->child); } common_free(rsc); } enum rsc_role_e pe__bundle_resource_state(const pe_resource_t *rsc, gboolean current) { enum rsc_role_e container_role = pcmk_role_unknown; return container_role; } /*! * \brief Get the number of configured replicas in a bundle * * \param[in] rsc Bundle resource * * \return Number of configured replicas, or 0 on error */ int pe_bundle_replicas(const pe_resource_t *rsc) { if ((rsc == NULL) || (rsc->variant != pcmk_rsc_variant_bundle)) { return 0; } else { pe__bundle_variant_data_t *bundle_data = NULL; get_bundle_variant_data(bundle_data, rsc); return bundle_data->nreplicas; } } void pe__count_bundle(pe_resource_t *rsc) { pe__bundle_variant_data_t *bundle_data = NULL; get_bundle_variant_data(bundle_data, rsc); for (GList *item = bundle_data->replicas; item != NULL; item = item->next) { pe__bundle_replica_t *replica = item->data; if (replica->ip) { replica->ip->fns->count(replica->ip); } if (replica->child) { replica->child->fns->count(replica->child); } if (replica->container) { replica->container->fns->count(replica->container); } if (replica->remote) { replica->remote->fns->count(replica->remote); } } } gboolean pe__bundle_is_filtered(const pe_resource_t *rsc, GList *only_rsc, gboolean check_parent) { gboolean passes = FALSE; pe__bundle_variant_data_t *bundle_data = NULL; if (pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches)) { passes = TRUE; } else { get_bundle_variant_data(bundle_data, rsc); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; if (replica->ip != NULL && !replica->ip->fns->is_filtered(replica->ip, only_rsc, FALSE)) { passes = TRUE; break; } else if (replica->child != NULL && !replica->child->fns->is_filtered(replica->child, only_rsc, FALSE)) { passes = TRUE; break; } else if (!replica->container->fns->is_filtered(replica->container, only_rsc, FALSE)) { passes = TRUE; break; } else if (replica->remote != NULL && !replica->remote->fns->is_filtered(replica->remote, only_rsc, FALSE)) { passes = TRUE; break; } } } return !passes; } /*! * \internal * \brief Get a list of a bundle's containers * * \param[in] bundle Bundle resource * * \return Newly created list of \p bundle's containers * \note It is the caller's responsibility to free the result with * g_list_free(). */ GList * pe__bundle_containers(const pe_resource_t *bundle) { GList *containers = NULL; const pe__bundle_variant_data_t *data = NULL; get_bundle_variant_data(data, bundle); for (GList *iter = data->replicas; iter != NULL; iter = iter->next) { pe__bundle_replica_t *replica = iter->data; containers = g_list_append(containers, replica->container); } return containers; } // Bundle implementation of resource_object_functions_t:active_node() pe_node_t * pe__bundle_active_node(const pe_resource_t *rsc, unsigned int *count_all, unsigned int *count_clean) { pe_node_t *active = NULL; pe_node_t *node = NULL; pe_resource_t *container = NULL; GList *containers = NULL; GList *iter = NULL; GHashTable *nodes = NULL; const pe__bundle_variant_data_t *data = NULL; if (count_all != NULL) { *count_all = 0; } if (count_clean != NULL) { *count_clean = 0; } if (rsc == NULL) { return NULL; } /* For the purposes of this method, we only care about where the bundle's * containers are active, so build a list of active containers. */ get_bundle_variant_data(data, rsc); for (iter = data->replicas; iter != NULL; iter = iter->next) { pe__bundle_replica_t *replica = iter->data; if (replica->container->running_on != NULL) { containers = g_list_append(containers, replica->container); } } if (containers == NULL) { return NULL; } /* If the bundle has only a single active container, just use that * container's method. If live migration is ever supported for bundle * containers, this will allow us to prefer the migration source when there * is only one container and it is migrating. For now, this just lets us * avoid creating the nodes table. */ if (pcmk__list_of_1(containers)) { container = containers->data; node = container->fns->active_node(container, count_all, count_clean); g_list_free(containers); return node; } // Add all containers' active nodes to a hash table (for uniqueness) nodes = g_hash_table_new(NULL, NULL); for (iter = containers; iter != NULL; iter = iter->next) { container = iter->data; for (GList *node_iter = container->running_on; node_iter != NULL; node_iter = node_iter->next) { node = node_iter->data; // If insert returns true, we haven't counted this node yet if (g_hash_table_insert(nodes, (gpointer) node->details, (gpointer) node) && !pe__count_active_node(rsc, node, &active, count_all, count_clean)) { goto done; } } } done: g_list_free(containers); g_hash_table_destroy(nodes); return active; } /*! * \internal * \brief Get maximum bundle resource instances per node * * \param[in] rsc Bundle resource to check * * \return Maximum number of \p rsc instances that can be active on one node */ unsigned int pe__bundle_max_per_node(const pe_resource_t *rsc) { pe__bundle_variant_data_t *bundle_data = NULL; get_bundle_variant_data(bundle_data, rsc); CRM_ASSERT(bundle_data->nreplicas_per_host >= 0); return (unsigned int) bundle_data->nreplicas_per_host; }