diff --git a/include/crm/pengine/internal.h b/include/crm/pengine/internal.h index e09885e128..af6956afc0 100644 --- a/include/crm/pengine/internal.h +++ b/include/crm/pengine/internal.h @@ -1,445 +1,444 @@ /* * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #ifndef PE_INTERNAL__H # define PE_INTERNAL__H # include # include # include # include # include # include # include # include # include # include const char *pe__resource_description(const pcmk_resource_t *rsc, uint32_t show_opts); bool pe__clone_is_ordered(const pcmk_resource_t *clone); int pe__set_clone_flag(pcmk_resource_t *clone, enum pcmk__clone_flags flag); bool pe__clone_flag_is_set(const pcmk_resource_t *clone, uint32_t flags); bool pe__group_flag_is_set(const pcmk_resource_t *group, uint32_t flags); pcmk_resource_t *pe__last_group_member(const pcmk_resource_t *group); const pcmk_resource_t *pe__const_top_resource(const pcmk_resource_t *rsc, bool include_bundle); int pe__clone_max(const pcmk_resource_t *clone); int pe__clone_node_max(const pcmk_resource_t *clone); int pe__clone_promoted_max(const pcmk_resource_t *clone); int pe__clone_promoted_node_max(const pcmk_resource_t *clone); void pe__create_clone_notifications(pcmk_resource_t *clone); void pe__free_clone_notification_data(pcmk_resource_t *clone); void pe__create_clone_notif_pseudo_ops(pcmk_resource_t *clone, pcmk_action_t *start, pcmk_action_t *started, pcmk_action_t *stop, pcmk_action_t *stopped); pcmk_action_t *pe__new_rsc_pseudo_action(pcmk_resource_t *rsc, const char *task, bool optional, bool runnable); void pe__create_promotable_pseudo_ops(pcmk_resource_t *clone, bool any_promoting, bool any_demoting); bool pe_can_fence(const pcmk_scheduler_t *scheduler, const pcmk_node_t *node); char *native_parameter(pcmk_resource_t *rsc, pcmk_node_t *node, gboolean create, const char *name, pcmk_scheduler_t *scheduler); pcmk_node_t *native_location(const pcmk_resource_t *rsc, GList **list, int current); void native_add_running(pcmk_resource_t *rsc, pcmk_node_t *node, pcmk_scheduler_t *scheduler, gboolean failed); gboolean native_unpack(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler); gboolean group_unpack(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler); gboolean clone_unpack(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler); gboolean pe__unpack_bundle(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler); pcmk_resource_t *native_find_rsc(pcmk_resource_t *rsc, const char *id, const pcmk_node_t *node, int flags); gboolean native_active(pcmk_resource_t *rsc, gboolean all); gboolean group_active(pcmk_resource_t *rsc, gboolean all); gboolean clone_active(pcmk_resource_t *rsc, gboolean all); gboolean pe__bundle_active(pcmk_resource_t *rsc, gboolean all); //! \deprecated This function will be removed in a future release void native_print(pcmk_resource_t *rsc, const char *pre_text, long options, void *print_data); //! \deprecated This function will be removed in a future release void group_print(pcmk_resource_t *rsc, const char *pre_text, long options, void *print_data); //! \deprecated This function will be removed in a future release void clone_print(pcmk_resource_t *rsc, const char *pre_text, long options, void *print_data); //! \deprecated This function will be removed in a future release void pe__print_bundle(pcmk_resource_t *rsc, const char *pre_text, long options, void *print_data); gchar *pcmk__native_output_string(const pcmk_resource_t *rsc, const char *name, const pcmk_node_t *node, uint32_t show_opts, const char *target_role, bool show_nodes); int pe__name_and_nvpairs_xml(pcmk__output_t *out, bool is_list, const char *tag_name , size_t pairs_count, ...); char *pe__node_display_name(pcmk_node_t *node, bool print_detail); // Clone notifications (pe_notif.c) void pe__order_notifs_after_fencing(const pcmk_action_t *action, pcmk_resource_t *rsc, pcmk_action_t *stonith_op); // Resource output methods int pe__clone_xml(pcmk__output_t *out, va_list args); int pe__clone_default(pcmk__output_t *out, va_list args); int pe__group_xml(pcmk__output_t *out, va_list args); int pe__group_default(pcmk__output_t *out, va_list args); int pe__bundle_xml(pcmk__output_t *out, va_list args); int pe__bundle_html(pcmk__output_t *out, va_list args); int pe__bundle_text(pcmk__output_t *out, va_list args); int pe__node_html(pcmk__output_t *out, va_list args); int pe__node_text(pcmk__output_t *out, va_list args); int pe__node_xml(pcmk__output_t *out, va_list args); int pe__resource_xml(pcmk__output_t *out, va_list args); int pe__resource_html(pcmk__output_t *out, va_list args); int pe__resource_text(pcmk__output_t *out, va_list args); void native_free(pcmk_resource_t *rsc); void group_free(pcmk_resource_t *rsc); void clone_free(pcmk_resource_t *rsc); void pe__free_bundle(pcmk_resource_t *rsc); enum rsc_role_e native_resource_state(const pcmk_resource_t *rsc, gboolean current); enum rsc_role_e group_resource_state(const pcmk_resource_t *rsc, gboolean current); enum rsc_role_e clone_resource_state(const pcmk_resource_t *rsc, gboolean current); enum rsc_role_e pe__bundle_resource_state(const pcmk_resource_t *rsc, gboolean current); void pe__count_common(pcmk_resource_t *rsc); void pe__count_bundle(pcmk_resource_t *rsc); void common_free(pcmk_resource_t *rsc); pcmk_node_t *pe__copy_node(const pcmk_node_t *this_node); time_t get_effective_time(pcmk_scheduler_t *scheduler); /* Failure handling utilities (from failcounts.c) */ int pe_get_failcount(const pcmk_node_t *node, pcmk_resource_t *rsc, time_t *last_failure, uint32_t flags, const xmlNode *xml_op); pcmk_action_t *pe__clear_failcount(pcmk_resource_t *rsc, const pcmk_node_t *node, const char *reason, pcmk_scheduler_t *scheduler); /* Functions for finding/counting a resource's active nodes */ bool pe__count_active_node(const pcmk_resource_t *rsc, pcmk_node_t *node, pcmk_node_t **active, unsigned int *count_all, unsigned int *count_clean); pcmk_node_t *pe__find_active_requires(const pcmk_resource_t *rsc, unsigned int *count); /* Binary like operators for lists of nodes */ GHashTable *pe__node_list2table(const GList *list); pcmk_action_t *get_pseudo_op(const char *name, pcmk_scheduler_t *scheduler); gboolean order_actions(pcmk_action_t *lh_action, pcmk_action_t *rh_action, uint32_t flags); void pe__show_node_scores_as(const char *file, const char *function, int line, bool to_log, const pcmk_resource_t *rsc, const char *comment, GHashTable *nodes, pcmk_scheduler_t *scheduler); #define pe__show_node_scores(level, rsc, text, nodes, scheduler) \ pe__show_node_scores_as(__FILE__, __func__, __LINE__, \ (level), (rsc), (text), (nodes), (scheduler)) GHashTable *pcmk__unpack_action_meta(pcmk_resource_t *rsc, const pcmk_node_t *node, const char *action_name, guint interval_ms, const xmlNode *action_config); GHashTable *pcmk__unpack_action_rsc_params(const xmlNode *action_xml, GHashTable *node_attrs, pcmk_scheduler_t *data_set); xmlNode *pcmk__find_action_config(const pcmk_resource_t *rsc, const char *action_name, guint interval_ms, bool include_disabled); enum rsc_start_requirement pcmk__action_requires(const pcmk_resource_t *rsc, const char *action_name); enum action_fail_response pcmk__parse_on_fail(const pcmk_resource_t *rsc, const char *action_name, guint interval_ms, const char *value); enum rsc_role_e pcmk__role_after_failure(const pcmk_resource_t *rsc, const char *action_name, enum action_fail_response on_fail, GHashTable *meta); pcmk_action_t *custom_action(pcmk_resource_t *rsc, char *key, const char *task, const pcmk_node_t *on_node, gboolean optional, pcmk_scheduler_t *scheduler); # define delete_key(rsc) pcmk__op_key(rsc->id, PCMK_ACTION_DELETE, 0) # define delete_action(rsc, node, optional) custom_action( \ rsc, delete_key(rsc), PCMK_ACTION_DELETE, node, \ optional, rsc->cluster); # define stop_key(rsc) pcmk__op_key(rsc->id, PCMK_ACTION_STOP, 0) # define stop_action(rsc, node, optional) custom_action( \ rsc, stop_key(rsc), PCMK_ACTION_STOP, node, \ optional, rsc->cluster); # define reload_key(rsc) pcmk__op_key(rsc->id, PCMK_ACTION_RELOAD_AGENT, 0) # define start_key(rsc) pcmk__op_key(rsc->id, PCMK_ACTION_START, 0) # define start_action(rsc, node, optional) custom_action( \ rsc, start_key(rsc), PCMK_ACTION_START, node, \ optional, rsc->cluster) # define promote_key(rsc) pcmk__op_key(rsc->id, PCMK_ACTION_PROMOTE, 0) # define promote_action(rsc, node, optional) custom_action( \ rsc, promote_key(rsc), PCMK_ACTION_PROMOTE, node, \ optional, rsc->cluster) # define demote_key(rsc) pcmk__op_key(rsc->id, PCMK_ACTION_DEMOTE, 0) # define demote_action(rsc, node, optional) custom_action( \ rsc, demote_key(rsc), PCMK_ACTION_DEMOTE, node, \ optional, rsc->cluster) extern int pe_get_configured_timeout(pcmk_resource_t *rsc, const char *action, pcmk_scheduler_t *scheduler); pcmk_action_t *find_first_action(const GList *input, const char *uuid, const char *task, const pcmk_node_t *on_node); enum action_tasks get_complex_task(const pcmk_resource_t *rsc, const char *name); GList *find_actions(GList *input, const char *key, const pcmk_node_t *on_node); GList *find_actions_exact(GList *input, const char *key, const pcmk_node_t *on_node); GList *pe__resource_actions(const pcmk_resource_t *rsc, const pcmk_node_t *node, const char *task, bool require_node); extern void pe_free_action(pcmk_action_t *action); void resource_location(pcmk_resource_t *rsc, const pcmk_node_t *node, int score, const char *tag, pcmk_scheduler_t *scheduler); extern int pe__is_newer_op(const xmlNode *xml_a, const xmlNode *xml_b, bool same_node_default); extern gint sort_op_by_callid(gconstpointer a, gconstpointer b); gboolean get_target_role(const pcmk_resource_t *rsc, enum rsc_role_e *role); void pe__set_next_role(pcmk_resource_t *rsc, enum rsc_role_e role, const char *why); pcmk_resource_t *find_clone_instance(const pcmk_resource_t *rsc, const char *sub_id); extern void destroy_ticket(gpointer data); pcmk_ticket_t *ticket_new(const char *ticket_id, pcmk_scheduler_t *scheduler); // Resources for manipulating resource names const char *pe_base_name_end(const char *id); char *clone_strip(const char *last_rsc_id); char *clone_zero(const char *last_rsc_id); static inline bool pe_base_name_eq(const pcmk_resource_t *rsc, const char *id) { if (id && rsc && rsc->id) { // Number of characters in rsc->id before any clone suffix size_t base_len = pe_base_name_end(rsc->id) - rsc->id + 1; return (strlen(id) == base_len) && !strncmp(id, rsc->id, base_len); } return false; } int pe__target_rc_from_xml(const xmlNode *xml_op); gint pe__cmp_node_name(gconstpointer a, gconstpointer b); bool is_set_recursive(const pcmk_resource_t *rsc, long long flag, bool any); pcmk__op_digest_t *pe__calculate_digests(pcmk_resource_t *rsc, const char *task, guint *interval_ms, const pcmk_node_t *node, const xmlNode *xml_op, GHashTable *overrides, bool calc_secure, pcmk_scheduler_t *scheduler); void pe__free_digests(gpointer ptr); pcmk__op_digest_t *rsc_action_digest_cmp(pcmk_resource_t *rsc, const xmlNode *xml_op, pcmk_node_t *node, pcmk_scheduler_t *scheduler); pcmk_action_t *pe_fence_op(pcmk_node_t *node, const char *op, bool optional, const char *reason, bool priority_delay, pcmk_scheduler_t *scheduler); void trigger_unfencing(pcmk_resource_t *rsc, pcmk_node_t *node, const char *reason, pcmk_action_t *dependency, pcmk_scheduler_t *scheduler); char *pe__action2reason(const pcmk_action_t *action, enum pe_action_flags flag); void pe_action_set_reason(pcmk_action_t *action, const char *reason, bool overwrite); void pe__add_action_expected_result(pcmk_action_t *action, int expected_result); void pe__set_resource_flags_recursive(pcmk_resource_t *rsc, uint64_t flags); void pe__clear_resource_flags_recursive(pcmk_resource_t *rsc, uint64_t flags); void pe__clear_resource_flags_on_all(pcmk_scheduler_t *scheduler, uint64_t flag); gboolean add_tag_ref(GHashTable * tags, const char * tag_name, const char * obj_ref); //! \deprecated This function will be removed in a future release void print_rscs_brief(GList *rsc_list, const char * pre_text, long options, void * print_data, gboolean print_all); int pe__rscs_brief_output(pcmk__output_t *out, GList *rsc_list, unsigned int options); void pe_fence_node(pcmk_scheduler_t *scheduler, pcmk_node_t *node, const char *reason, bool priority_delay); pcmk_node_t *pe_create_node(const char *id, const char *uname, const char *type, const char *score, pcmk_scheduler_t *scheduler); //! \deprecated This function will be removed in a future release void common_print(pcmk_resource_t *rsc, const char *pre_text, const char *name, const pcmk_node_t *node, long options, void *print_data); int pe__common_output_text(pcmk__output_t *out, const pcmk_resource_t *rsc, const char *name, const pcmk_node_t *node, unsigned int options); int pe__common_output_html(pcmk__output_t *out, const pcmk_resource_t *rsc, const char *name, const pcmk_node_t *node, unsigned int options); GList *pe__bundle_containers(const pcmk_resource_t *bundle); int pe__bundle_max(const pcmk_resource_t *rsc); bool pe__node_is_bundle_instance(const pcmk_resource_t *bundle, const pcmk_node_t *node); pcmk_resource_t *pe__bundled_resource(const pcmk_resource_t *rsc); const pcmk_resource_t *pe__get_rsc_in_container(const pcmk_resource_t *instance); pcmk_resource_t *pe__first_container(const pcmk_resource_t *bundle); void pe__foreach_bundle_replica(pcmk_resource_t *bundle, bool (*fn)(pcmk__bundle_replica_t *, void *), void *user_data); void pe__foreach_const_bundle_replica(const pcmk_resource_t *bundle, bool (*fn)(const pcmk__bundle_replica_t *, void *), void *user_data); pcmk_resource_t *pe__find_bundle_replica(const pcmk_resource_t *bundle, const pcmk_node_t *node); bool pe__bundle_needs_remote_name(pcmk_resource_t *rsc); -const char *pe__add_bundle_remote_name(pcmk_resource_t *rsc, - pcmk_scheduler_t *scheduler, - xmlNode *xml, const char *field); +const char *pe__add_bundle_remote_name(pcmk_resource_t *rsc, xmlNode *xml, + const char *field); bool pe__is_universal_clone(const pcmk_resource_t *rsc, const pcmk_scheduler_t *scheduler); void pe__add_param_check(const xmlNode *rsc_op, pcmk_resource_t *rsc, pcmk_node_t *node, enum pcmk__check_parameters, pcmk_scheduler_t *scheduler); void pe__foreach_param_check(pcmk_scheduler_t *scheduler, void (*cb)(pcmk_resource_t*, pcmk_node_t*, const xmlNode*, enum pcmk__check_parameters)); void pe__free_param_checks(pcmk_scheduler_t *scheduler); bool pe__shutdown_requested(const pcmk_node_t *node); void pe__update_recheck_time(time_t recheck, pcmk_scheduler_t *scheduler, const char *reason); /*! * \internal * \brief Register xml formatting message functions. * * \param[in,out] out Output object to register messages with */ void pe__register_messages(pcmk__output_t *out); void pe__unpack_dataset_nvpairs(const xmlNode *xml_obj, const char *set_name, const pe_rule_eval_data_t *rule_data, GHashTable *hash, const char *always_first, gboolean overwrite, pcmk_scheduler_t *scheduler); bool pe__resource_is_disabled(const pcmk_resource_t *rsc); void pe__clear_resource_history(pcmk_resource_t *rsc, const pcmk_node_t *node); GList *pe__rscs_with_tag(pcmk_scheduler_t *scheduler, const char *tag_name); GList *pe__unames_with_tag(pcmk_scheduler_t *scheduler, const char *tag_name); bool pe__rsc_has_tag(pcmk_scheduler_t *scheduler, const char *rsc, const char *tag); bool pe__uname_has_tag(pcmk_scheduler_t *scheduler, const char *node, const char *tag); bool pe__rsc_running_on_only(const pcmk_resource_t *rsc, const pcmk_node_t *node); bool pe__rsc_running_on_any(pcmk_resource_t *rsc, GList *node_list); GList *pe__filter_rsc_list(GList *rscs, GList *filter); GList * pe__build_node_name_list(pcmk_scheduler_t *scheduler, const char *s); GList * pe__build_rsc_list(pcmk_scheduler_t *scheduler, const char *s); bool pcmk__rsc_filtered_by_node(pcmk_resource_t *rsc, GList *only_node); gboolean pe__bundle_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc, gboolean check_parent); gboolean pe__clone_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc, gboolean check_parent); gboolean pe__group_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc, gboolean check_parent); gboolean pe__native_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc, gboolean check_parent); xmlNode *pe__failed_probe_for_rsc(const pcmk_resource_t *rsc, const char *name); const char *pe__clone_child_id(const pcmk_resource_t *rsc); int pe__sum_node_health_scores(const pcmk_node_t *node, int base_health); int pe__node_health(pcmk_node_t *node); static inline enum pcmk__health_strategy pe__health_strategy(pcmk_scheduler_t *scheduler) { const char *strategy = pcmk__cluster_option(scheduler->config_hash, PCMK_OPT_NODE_HEALTH_STRATEGY); return pcmk__parse_health_strategy(strategy); } static inline int pe__health_score(const char *option, pcmk_scheduler_t *scheduler) { const char *value = pcmk__cluster_option(scheduler->config_hash, option); return char2score(value); } #endif diff --git a/lib/pacemaker/pcmk_sched_bundle.c b/lib/pacemaker/pcmk_sched_bundle.c index 1d1466be9f..ac0b553317 100644 --- a/lib/pacemaker/pcmk_sched_bundle.c +++ b/lib/pacemaker/pcmk_sched_bundle.c @@ -1,1059 +1,1058 @@ /* * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include "libpacemaker_private.h" struct assign_data { const pcmk_node_t *prefer; bool stop_if_fail; }; /*! * \internal * \brief Assign a single bundle replica's resources (other than container) * * \param[in,out] replica Replica to assign * \param[in] user_data Preferred node, if any * * \return true (to indicate that any further replicas should be processed) */ static bool assign_replica(pcmk__bundle_replica_t *replica, void *user_data) { pcmk_node_t *container_host = NULL; struct assign_data *assign_data = user_data; const pcmk_node_t *prefer = assign_data->prefer; bool stop_if_fail = assign_data->stop_if_fail; const pcmk_resource_t *bundle = pe__const_top_resource(replica->container, true); if (replica->ip != NULL) { pcmk__rsc_trace(bundle, "Assigning bundle %s IP %s", bundle->id, replica->ip->id); replica->ip->cmds->assign(replica->ip, prefer, stop_if_fail); } container_host = replica->container->allocated_to; if (replica->remote != NULL) { if (pcmk__is_pacemaker_remote_node(container_host)) { /* REMOTE_CONTAINER_HACK: "Nested" connection resources must be on * the same host because Pacemaker Remote only supports a single * active connection. */ pcmk__new_colocation("#replica-remote-with-host-remote", NULL, PCMK_SCORE_INFINITY, replica->remote, container_host->details->remote_rsc, NULL, NULL, pcmk__coloc_influence); } pcmk__rsc_trace(bundle, "Assigning bundle %s connection %s", bundle->id, replica->remote->id); replica->remote->cmds->assign(replica->remote, prefer, stop_if_fail); } if (replica->child != NULL) { pcmk_node_t *node = NULL; GHashTableIter iter; g_hash_table_iter_init(&iter, replica->child->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) { if (!pcmk__same_node(node, replica->node)) { node->weight = -PCMK_SCORE_INFINITY; } else if (!pcmk__threshold_reached(replica->child, node, NULL)) { node->weight = PCMK_SCORE_INFINITY; } } pcmk__set_rsc_flags(replica->child->parent, pcmk_rsc_assigning); pcmk__rsc_trace(bundle, "Assigning bundle %s replica child %s", bundle->id, replica->child->id); replica->child->cmds->assign(replica->child, replica->node, stop_if_fail); pcmk__clear_rsc_flags(replica->child->parent, pcmk_rsc_assigning); } return true; } /*! * \internal * \brief Assign a bundle resource to a node * * \param[in,out] rsc Resource to assign to a node * \param[in] prefer Node to prefer, if all else is equal * \param[in] stop_if_fail If \c true and a primitive descendant of \p rsc * can't be assigned to a node, set the * descendant's next role to stopped and update * existing actions * * \return Node that \p rsc is assigned to, if assigned entirely to one node * * \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can * completely undo the assignment. A successful assignment can be either * undone or left alone as final. A failed assignment has the same effect * as calling pcmk__unassign_resource(); there are no side effects on * roles or actions. */ pcmk_node_t * pcmk__bundle_assign(pcmk_resource_t *rsc, const pcmk_node_t *prefer, bool stop_if_fail) { GList *containers = NULL; pcmk_resource_t *bundled_resource = NULL; struct assign_data assign_data = { prefer, stop_if_fail }; CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_bundle)); pcmk__rsc_trace(rsc, "Assigning bundle %s", rsc->id); pcmk__set_rsc_flags(rsc, pcmk_rsc_assigning); pe__show_node_scores(!pcmk_is_set(rsc->cluster->flags, pcmk_sched_output_scores), rsc, __func__, rsc->allowed_nodes, rsc->cluster); // Assign all containers first, so we know what nodes the bundle will be on containers = g_list_sort(pe__bundle_containers(rsc), pcmk__cmp_instance); pcmk__assign_instances(rsc, containers, pe__bundle_max(rsc), rsc->fns->max_per_node(rsc)); g_list_free(containers); // Then assign remaining replica resources pe__foreach_bundle_replica(rsc, assign_replica, (void *) &assign_data); // Finally, assign the bundled resources to each bundle node bundled_resource = pe__bundled_resource(rsc); if (bundled_resource != NULL) { pcmk_node_t *node = NULL; GHashTableIter iter; g_hash_table_iter_init(&iter, bundled_resource->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & node)) { if (pe__node_is_bundle_instance(rsc, node)) { node->weight = 0; } else { node->weight = -PCMK_SCORE_INFINITY; } } bundled_resource->cmds->assign(bundled_resource, prefer, stop_if_fail); } pcmk__clear_rsc_flags(rsc, pcmk_rsc_assigning|pcmk_rsc_unassigned); return NULL; } /*! * \internal * \brief Create actions for a bundle replica's resources (other than child) * * \param[in,out] replica Replica to create actions for * \param[in] user_data Unused * * \return true (to indicate that any further replicas should be processed) */ static bool create_replica_actions(pcmk__bundle_replica_t *replica, void *user_data) { if (replica->ip != NULL) { replica->ip->cmds->create_actions(replica->ip); } if (replica->container != NULL) { replica->container->cmds->create_actions(replica->container); } if (replica->remote != NULL) { replica->remote->cmds->create_actions(replica->remote); } return true; } /*! * \internal * \brief Create all actions needed for a given bundle resource * * \param[in,out] rsc Bundle resource to create actions for */ void pcmk__bundle_create_actions(pcmk_resource_t *rsc) { pcmk_action_t *action = NULL; GList *containers = NULL; pcmk_resource_t *bundled_resource = NULL; CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_bundle)); pe__foreach_bundle_replica(rsc, create_replica_actions, NULL); containers = pe__bundle_containers(rsc); pcmk__create_instance_actions(rsc, containers); g_list_free(containers); bundled_resource = pe__bundled_resource(rsc); if (bundled_resource != NULL) { bundled_resource->cmds->create_actions(bundled_resource); if (pcmk_is_set(bundled_resource->flags, pcmk_rsc_promotable)) { pe__new_rsc_pseudo_action(rsc, PCMK_ACTION_PROMOTE, true, true); action = pe__new_rsc_pseudo_action(rsc, PCMK_ACTION_PROMOTED, true, true); action->priority = PCMK_SCORE_INFINITY; pe__new_rsc_pseudo_action(rsc, PCMK_ACTION_DEMOTE, true, true); action = pe__new_rsc_pseudo_action(rsc, PCMK_ACTION_DEMOTED, true, true); action->priority = PCMK_SCORE_INFINITY; } } } /*! * \internal * \brief Create internal constraints for a bundle replica's resources * * \param[in,out] replica Replica to create internal constraints for * \param[in,out] user_data Replica's parent bundle * * \return true (to indicate that any further replicas should be processed) */ static bool replica_internal_constraints(pcmk__bundle_replica_t *replica, void *user_data) { pcmk_resource_t *bundle = user_data; replica->container->cmds->internal_constraints(replica->container); // Start bundle -> start replica container pcmk__order_starts(bundle, replica->container, pcmk__ar_unrunnable_first_blocks |pcmk__ar_then_implies_first_graphed); // Stop bundle -> stop replica child and container if (replica->child != NULL) { pcmk__order_stops(bundle, replica->child, pcmk__ar_then_implies_first_graphed); } pcmk__order_stops(bundle, replica->container, pcmk__ar_then_implies_first_graphed); // Start replica container -> bundle is started pcmk__order_resource_actions(replica->container, PCMK_ACTION_START, bundle, PCMK_ACTION_RUNNING, pcmk__ar_first_implies_then_graphed); // Stop replica container -> bundle is stopped pcmk__order_resource_actions(replica->container, PCMK_ACTION_STOP, bundle, PCMK_ACTION_STOPPED, pcmk__ar_first_implies_then_graphed); if (replica->ip != NULL) { replica->ip->cmds->internal_constraints(replica->ip); // Replica IP address -> replica container (symmetric) pcmk__order_starts(replica->ip, replica->container, pcmk__ar_unrunnable_first_blocks |pcmk__ar_guest_allowed); pcmk__order_stops(replica->container, replica->ip, pcmk__ar_then_implies_first|pcmk__ar_guest_allowed); pcmk__new_colocation("#ip-with-container", NULL, PCMK_SCORE_INFINITY, replica->ip, replica->container, NULL, NULL, pcmk__coloc_influence); } if (replica->remote != NULL) { /* This handles ordering and colocating remote relative to container * (via "#resource-with-container"). Since IP is also ordered and * colocated relative to the container, we don't need to do anything * explicit here with IP. */ replica->remote->cmds->internal_constraints(replica->remote); } if (replica->child != NULL) { CRM_ASSERT(replica->remote != NULL); // "Start remote then child" is implicit in scheduler's remote logic } return true; } /*! * \internal * \brief Create implicit constraints needed for a bundle resource * * \param[in,out] rsc Bundle resource to create implicit constraints for */ void pcmk__bundle_internal_constraints(pcmk_resource_t *rsc) { pcmk_resource_t *bundled_resource = NULL; CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_bundle)); pe__foreach_bundle_replica(rsc, replica_internal_constraints, rsc); bundled_resource = pe__bundled_resource(rsc); if (bundled_resource == NULL) { return; } // Start bundle -> start bundled clone pcmk__order_resource_actions(rsc, PCMK_ACTION_START, bundled_resource, PCMK_ACTION_START, pcmk__ar_then_implies_first_graphed); // Bundled clone is started -> bundle is started pcmk__order_resource_actions(bundled_resource, PCMK_ACTION_RUNNING, rsc, PCMK_ACTION_RUNNING, pcmk__ar_first_implies_then_graphed); // Stop bundle -> stop bundled clone pcmk__order_resource_actions(rsc, PCMK_ACTION_STOP, bundled_resource, PCMK_ACTION_STOP, pcmk__ar_then_implies_first_graphed); // Bundled clone is stopped -> bundle is stopped pcmk__order_resource_actions(bundled_resource, PCMK_ACTION_STOPPED, rsc, PCMK_ACTION_STOPPED, pcmk__ar_first_implies_then_graphed); bundled_resource->cmds->internal_constraints(bundled_resource); if (!pcmk_is_set(bundled_resource->flags, pcmk_rsc_promotable)) { return; } pcmk__promotable_restart_ordering(rsc); // Demote bundle -> demote bundled clone pcmk__order_resource_actions(rsc, PCMK_ACTION_DEMOTE, bundled_resource, PCMK_ACTION_DEMOTE, pcmk__ar_then_implies_first_graphed); // Bundled clone is demoted -> bundle is demoted pcmk__order_resource_actions(bundled_resource, PCMK_ACTION_DEMOTED, rsc, PCMK_ACTION_DEMOTED, pcmk__ar_first_implies_then_graphed); // Promote bundle -> promote bundled clone pcmk__order_resource_actions(rsc, PCMK_ACTION_PROMOTE, bundled_resource, PCMK_ACTION_PROMOTE, pcmk__ar_then_implies_first_graphed); // Bundled clone is promoted -> bundle is promoted pcmk__order_resource_actions(bundled_resource, PCMK_ACTION_PROMOTED, rsc, PCMK_ACTION_PROMOTED, pcmk__ar_first_implies_then_graphed); } struct match_data { const pcmk_node_t *node; // Node to compare against replica pcmk_resource_t *container; // Replica container corresponding to node }; /*! * \internal * \brief Check whether a replica container is assigned to a given node * * \param[in] replica Replica to check * \param[in,out] user_data struct match_data with node to compare against * * \return true if the replica does not match (to indicate further replicas * should be processed), otherwise false */ static bool match_replica_container(const pcmk__bundle_replica_t *replica, void *user_data) { struct match_data *match_data = user_data; if (pcmk__instance_matches(replica->container, match_data->node, pcmk_role_unknown, false)) { match_data->container = replica->container; return false; // Match found, don't bother searching further replicas } return true; // No match, keep searching } /*! * \internal * \brief Get the host to which a bundle node is assigned * * \param[in] node Possible bundle node to check * * \return Node to which the container for \p node is assigned if \p node is a * bundle node, otherwise \p node itself */ static const pcmk_node_t * get_bundle_node_host(const pcmk_node_t *node) { if (pcmk__is_bundle_node(node)) { const pcmk_resource_t *container = node->details->remote_rsc->container; return container->fns->location(container, NULL, 0); } return node; } /*! * \internal * \brief Find a bundle container compatible with a dependent resource * * \param[in] dependent Dependent resource in colocation with bundle * \param[in] bundle Bundle that \p dependent is colocated with * * \return A container from \p bundle assigned to the same node as \p dependent * if assigned, otherwise assigned to any of dependent's allowed nodes, * otherwise NULL. */ static pcmk_resource_t * compatible_container(const pcmk_resource_t *dependent, const pcmk_resource_t *bundle) { GList *scratch = NULL; struct match_data match_data = { NULL, NULL }; // If dependent is assigned, only check there match_data.node = dependent->fns->location(dependent, NULL, 0); match_data.node = get_bundle_node_host(match_data.node); if (match_data.node != NULL) { pe__foreach_const_bundle_replica(bundle, match_replica_container, &match_data); return match_data.container; } // Otherwise, check for any of the dependent's allowed nodes scratch = g_hash_table_get_values(dependent->allowed_nodes); scratch = pcmk__sort_nodes(scratch, NULL); for (const GList *iter = scratch; iter != NULL; iter = iter->next) { match_data.node = iter->data; match_data.node = get_bundle_node_host(match_data.node); if (match_data.node == NULL) { continue; } pe__foreach_const_bundle_replica(bundle, match_replica_container, &match_data); if (match_data.container != NULL) { break; } } g_list_free(scratch); return match_data.container; } struct coloc_data { const pcmk__colocation_t *colocation; pcmk_resource_t *dependent; GList *container_hosts; }; /*! * \internal * \brief Apply a colocation score to replica node scores or resource priority * * \param[in] replica Replica of primary bundle resource in colocation * \param[in,out] user_data struct coloc_data for colocation being applied * * \return true (to indicate that any further replicas should be processed) */ static bool replica_apply_coloc_score(const pcmk__bundle_replica_t *replica, void *user_data) { struct coloc_data *coloc_data = user_data; pcmk_node_t *chosen = NULL; if (coloc_data->colocation->score < PCMK_SCORE_INFINITY) { replica->container->cmds->apply_coloc_score(coloc_data->dependent, replica->container, coloc_data->colocation, false); return true; } chosen = replica->container->fns->location(replica->container, NULL, 0); if ((chosen == NULL) || is_set_recursive(replica->container, pcmk_rsc_blocked, true)) { return true; } if ((coloc_data->colocation->primary_role >= pcmk_role_promoted) && ((replica->child == NULL) || (replica->child->next_role < pcmk_role_promoted))) { return true; } pcmk__rsc_trace(pe__const_top_resource(replica->container, true), "Allowing mandatory colocation %s using %s @%d", coloc_data->colocation->id, pcmk__node_name(chosen), chosen->weight); coloc_data->container_hosts = g_list_prepend(coloc_data->container_hosts, chosen); return true; } /*! * \internal * \brief Apply a colocation's score to node scores or resource priority * * Given a colocation constraint, apply its score to the dependent's * allowed node scores (if we are still placing resources) or priority (if * we are choosing promotable clone instance roles). * * \param[in,out] dependent Dependent resource in colocation * \param[in] primary Primary resource in colocation * \param[in] colocation Colocation constraint to apply * \param[in] for_dependent true if called on behalf of dependent */ void pcmk__bundle_apply_coloc_score(pcmk_resource_t *dependent, const pcmk_resource_t *primary, const pcmk__colocation_t *colocation, bool for_dependent) { struct coloc_data coloc_data = { colocation, dependent, NULL }; /* This should never be called for the bundle itself as a dependent. * Instead, we add its colocation constraints to its containers and bundled * primitive and call the apply_coloc_score() method for them as dependents. */ CRM_ASSERT((primary != NULL) && (primary->variant == pcmk_rsc_variant_bundle) && (dependent != NULL) && (dependent->variant == pcmk_rsc_variant_primitive) && (colocation != NULL) && !for_dependent); if (pcmk_is_set(primary->flags, pcmk_rsc_unassigned)) { pcmk__rsc_trace(primary, "Skipping applying colocation %s " "because %s is still provisional", colocation->id, primary->id); return; } pcmk__rsc_trace(primary, "Applying colocation %s (%s with %s at %s)", colocation->id, dependent->id, primary->id, pcmk_readable_score(colocation->score)); /* If the constraint dependent is a clone or bundle, "dependent" here is one * of its instances. Look for a compatible instance of this bundle. */ if (colocation->dependent->variant > pcmk_rsc_variant_group) { const pcmk_resource_t *primary_container = NULL; primary_container = compatible_container(dependent, primary); if (primary_container != NULL) { // Success, we found one pcmk__rsc_debug(primary, "Pairing %s with %s", dependent->id, primary_container->id); dependent->cmds->apply_coloc_score(dependent, primary_container, colocation, true); } else if (colocation->score >= PCMK_SCORE_INFINITY) { // Failure, and it's fatal crm_notice("%s cannot run because there is no compatible " "instance of %s to colocate with", dependent->id, primary->id); pcmk__assign_resource(dependent, NULL, true, true); } else { // Failure, but we can ignore it pcmk__rsc_debug(primary, "%s cannot be colocated with any instance of %s", dependent->id, primary->id); } return; } pe__foreach_const_bundle_replica(primary, replica_apply_coloc_score, &coloc_data); if (colocation->score >= PCMK_SCORE_INFINITY) { pcmk__colocation_intersect_nodes(dependent, primary, colocation, coloc_data.container_hosts, false); } g_list_free(coloc_data.container_hosts); } // Bundle implementation of pcmk_assignment_methods_t:with_this_colocations() void pcmk__with_bundle_colocations(const pcmk_resource_t *rsc, const pcmk_resource_t *orig_rsc, GList **list) { const pcmk_resource_t *bundled_rsc = NULL; CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_bundle) && (orig_rsc != NULL) && (list != NULL)); // The bundle itself and its containers always get its colocations if ((orig_rsc == rsc) || pcmk_is_set(orig_rsc->flags, pcmk_rsc_replica_container)) { pcmk__add_with_this_list(list, rsc->rsc_cons_lhs, orig_rsc); return; } /* The bundled resource gets the colocations if it's promotable and we've * begun choosing roles */ bundled_rsc = pe__bundled_resource(rsc); if ((bundled_rsc == NULL) || !pcmk_is_set(bundled_rsc->flags, pcmk_rsc_promotable) || (pe__const_top_resource(orig_rsc, false) != bundled_rsc)) { return; } if (orig_rsc == bundled_rsc) { if (pe__clone_flag_is_set(orig_rsc, pcmk__clone_promotion_constrained)) { /* orig_rsc is the clone and we're setting roles (or have already * done so) */ pcmk__add_with_this_list(list, rsc->rsc_cons_lhs, orig_rsc); } } else if (!pcmk_is_set(orig_rsc->flags, pcmk_rsc_unassigned)) { /* orig_rsc is an instance and is already assigned. If something * requests colocations for orig_rsc now, it's for setting roles. */ pcmk__add_with_this_list(list, rsc->rsc_cons_lhs, orig_rsc); } } // Bundle implementation of pcmk_assignment_methods_t:this_with_colocations() void pcmk__bundle_with_colocations(const pcmk_resource_t *rsc, const pcmk_resource_t *orig_rsc, GList **list) { const pcmk_resource_t *bundled_rsc = NULL; CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_bundle) && (orig_rsc != NULL) && (list != NULL)); // The bundle itself and its containers always get its colocations if ((orig_rsc == rsc) || pcmk_is_set(orig_rsc->flags, pcmk_rsc_replica_container)) { pcmk__add_this_with_list(list, rsc->rsc_cons, orig_rsc); return; } /* The bundled resource gets the colocations if it's promotable and we've * begun choosing roles */ bundled_rsc = pe__bundled_resource(rsc); if ((bundled_rsc == NULL) || !pcmk_is_set(bundled_rsc->flags, pcmk_rsc_promotable) || (pe__const_top_resource(orig_rsc, false) != bundled_rsc)) { return; } if (orig_rsc == bundled_rsc) { if (pe__clone_flag_is_set(orig_rsc, pcmk__clone_promotion_constrained)) { /* orig_rsc is the clone and we're setting roles (or have already * done so) */ pcmk__add_this_with_list(list, rsc->rsc_cons, orig_rsc); } } else if (!pcmk_is_set(orig_rsc->flags, pcmk_rsc_unassigned)) { /* orig_rsc is an instance and is already assigned. If something * requests colocations for orig_rsc now, it's for setting roles. */ pcmk__add_this_with_list(list, rsc->rsc_cons, orig_rsc); } } /*! * \internal * \brief Return action flags for a given bundle resource action * * \param[in,out] action Bundle resource action to get flags for * \param[in] node If not NULL, limit effects to this node * * \return Flags appropriate to \p action on \p node */ uint32_t pcmk__bundle_action_flags(pcmk_action_t *action, const pcmk_node_t *node) { GList *containers = NULL; uint32_t flags = 0; pcmk_resource_t *bundled_resource = NULL; CRM_ASSERT((action != NULL) && (action->rsc != NULL) && (action->rsc->variant == pcmk_rsc_variant_bundle)); bundled_resource = pe__bundled_resource(action->rsc); if (bundled_resource != NULL) { // Clone actions are done on the bundled clone resource, not container switch (get_complex_task(bundled_resource, action->task)) { case pcmk_action_unspecified: case pcmk_action_notify: case pcmk_action_notified: case pcmk_action_promote: case pcmk_action_promoted: case pcmk_action_demote: case pcmk_action_demoted: return pcmk__collective_action_flags(action, bundled_resource->children, node); default: break; } } containers = pe__bundle_containers(action->rsc); flags = pcmk__collective_action_flags(action, containers, node); g_list_free(containers); return flags; } /*! * \internal * \brief Apply a location constraint to a bundle replica * * \param[in,out] replica Replica to apply constraint to * \param[in,out] user_data Location constraint to apply * * \return true (to indicate that any further replicas should be processed) */ static bool apply_location_to_replica(pcmk__bundle_replica_t *replica, void *user_data) { pcmk__location_t *location = user_data; if (replica->container != NULL) { replica->container->cmds->apply_location(replica->container, location); } if (replica->ip != NULL) { replica->ip->cmds->apply_location(replica->ip, location); } return true; } /*! * \internal * \brief Apply a location constraint to a bundle resource's allowed node scores * * \param[in,out] rsc Bundle resource to apply constraint to * \param[in,out] location Location constraint to apply */ void pcmk__bundle_apply_location(pcmk_resource_t *rsc, pcmk__location_t *location) { pcmk_resource_t *bundled_resource = NULL; CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_bundle) && (location != NULL)); pcmk__apply_location(rsc, location); pe__foreach_bundle_replica(rsc, apply_location_to_replica, location); bundled_resource = pe__bundled_resource(rsc); if ((bundled_resource != NULL) && ((location->role_filter == pcmk_role_unpromoted) || (location->role_filter == pcmk_role_promoted))) { bundled_resource->cmds->apply_location(bundled_resource, location); bundled_resource->rsc_location = g_list_prepend( bundled_resource->rsc_location, location); } } #define XPATH_REMOTE "//nvpair[@name='" PCMK_REMOTE_RA_ADDR "']" /*! * \internal * \brief Add a bundle replica's actions to transition graph * * \param[in,out] replica Replica to add to graph * \param[in] user_data Bundle that replica belongs to (for logging only) * * \return true (to indicate that any further replicas should be processed) */ static bool add_replica_actions_to_graph(pcmk__bundle_replica_t *replica, void *user_data) { if ((replica->remote != NULL) && (replica->container != NULL) && pe__bundle_needs_remote_name(replica->remote)) { /* REMOTE_CONTAINER_HACK: Allow remote nodes to run containers that * run pacemaker-remoted inside, without needing a separate IP for * the container. This is done by configuring the inner remote's * connection host as the magic string "#uname", then * replacing it with the underlying host when needed. */ xmlNode *nvpair = get_xpath_object(XPATH_REMOTE, replica->remote->xml, LOG_ERR); const char *calculated_addr = NULL; // Replace the value in replica->remote->xml (if appropriate) - calculated_addr = pe__add_bundle_remote_name(replica->remote, - replica->remote->cluster, - nvpair, PCMK_XA_VALUE); + calculated_addr = pe__add_bundle_remote_name(replica->remote, nvpair, + PCMK_XA_VALUE); if (calculated_addr != NULL) { /* Since this is for the bundle as a resource, and not any * particular action, replace the value in the default * parameters (not evaluated for node). create_graph_action() * will grab it from there to replace it in node-evaluated * parameters. */ GHashTable *params = pe_rsc_params(replica->remote, NULL, replica->remote->cluster); pcmk__insert_dup(params, PCMK_REMOTE_RA_ADDR, calculated_addr); } else { pcmk_resource_t *bundle = user_data; /* The only way to get here is if the remote connection is * neither currently running nor scheduled to run. That means we * won't be doing any operations that require addr (only start * requires it; we additionally use it to compare digests when * unpacking status, promote, and migrate_from history, but * that's already happened by this point). */ pcmk__rsc_info(bundle, "Unable to determine address for bundle %s " "remote connection", bundle->id); } } if (replica->ip != NULL) { replica->ip->cmds->add_actions_to_graph(replica->ip); } if (replica->container != NULL) { replica->container->cmds->add_actions_to_graph(replica->container); } if (replica->remote != NULL) { replica->remote->cmds->add_actions_to_graph(replica->remote); } return true; } /*! * \internal * \brief Add a bundle resource's actions to the transition graph * * \param[in,out] rsc Bundle resource whose actions should be added */ void pcmk__bundle_add_actions_to_graph(pcmk_resource_t *rsc) { pcmk_resource_t *bundled_resource = NULL; CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_bundle)); bundled_resource = pe__bundled_resource(rsc); if (bundled_resource != NULL) { bundled_resource->cmds->add_actions_to_graph(bundled_resource); } pe__foreach_bundle_replica(rsc, add_replica_actions_to_graph, rsc); } struct probe_data { pcmk_resource_t *bundle; // Bundle being probed pcmk_node_t *node; // Node to create probes on bool any_created; // Whether any probes have been created }; /*! * \internal * \brief Order a bundle replica's start after another replica's probe * * \param[in,out] replica Replica to order start for * \param[in,out] user_data Replica with probe to order after * * \return true (to indicate that any further replicas should be processed) */ static bool order_replica_start_after(pcmk__bundle_replica_t *replica, void *user_data) { pcmk__bundle_replica_t *probed_replica = user_data; if ((replica == probed_replica) || (replica->container == NULL)) { return true; } pcmk__new_ordering(probed_replica->container, pcmk__op_key(probed_replica->container->id, PCMK_ACTION_MONITOR, 0), NULL, replica->container, pcmk__op_key(replica->container->id, PCMK_ACTION_START, 0), NULL, pcmk__ar_ordered|pcmk__ar_if_on_same_node, replica->container->cluster); return true; } /*! * \internal * \brief Create probes for a bundle replica's resources * * \param[in,out] replica Replica to create probes for * \param[in,out] user_data struct probe_data * * \return true (to indicate that any further replicas should be processed) */ static bool create_replica_probes(pcmk__bundle_replica_t *replica, void *user_data) { struct probe_data *probe_data = user_data; if ((replica->ip != NULL) && replica->ip->cmds->create_probe(replica->ip, probe_data->node)) { probe_data->any_created = true; } if ((replica->child != NULL) && pcmk__same_node(probe_data->node, replica->node) && replica->child->cmds->create_probe(replica->child, probe_data->node)) { probe_data->any_created = true; } if ((replica->container != NULL) && replica->container->cmds->create_probe(replica->container, probe_data->node)) { probe_data->any_created = true; /* If we're limited to one replica per host (due to * the lack of an IP range probably), then we don't * want any of our peer containers starting until * we've established that no other copies are already * running. * * Partly this is to ensure that the maximum replicas per host is * observed, but also to ensure that the containers * don't fail to start because the necessary port * mappings (which won't include an IP for uniqueness) * are already taken */ if (probe_data->bundle->fns->max_per_node(probe_data->bundle) == 1) { pe__foreach_bundle_replica(probe_data->bundle, order_replica_start_after, replica); } } if ((replica->container != NULL) && (replica->remote != NULL) && replica->remote->cmds->create_probe(replica->remote, probe_data->node)) { /* Do not probe the remote resource until we know where the container is * running. This is required for REMOTE_CONTAINER_HACK to correctly * probe remote resources. */ char *probe_uuid = pcmk__op_key(replica->remote->id, PCMK_ACTION_MONITOR, 0); pcmk_action_t *probe = find_first_action(replica->remote->actions, probe_uuid, NULL, probe_data->node); free(probe_uuid); if (probe != NULL) { probe_data->any_created = true; pcmk__rsc_trace(probe_data->bundle, "Ordering %s probe on %s", replica->remote->id, pcmk__node_name(probe_data->node)); pcmk__new_ordering(replica->container, pcmk__op_key(replica->container->id, PCMK_ACTION_START, 0), NULL, replica->remote, NULL, probe, pcmk__ar_nested_remote_probe, probe_data->bundle->cluster); } } return true; } /*! * \internal * * \brief Schedule any probes needed for a bundle resource on a node * * \param[in,out] rsc Bundle resource to create probes for * \param[in,out] node Node to create probe on * * \return true if any probe was created, otherwise false */ bool pcmk__bundle_create_probe(pcmk_resource_t *rsc, pcmk_node_t *node) { struct probe_data probe_data = { rsc, node, false }; CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_bundle)); pe__foreach_bundle_replica(rsc, create_replica_probes, &probe_data); return probe_data.any_created; } /*! * \internal * \brief Output actions for one bundle replica * * \param[in,out] replica Replica to output actions for * \param[in] user_data Unused * * \return true (to indicate that any further replicas should be processed) */ static bool output_replica_actions(pcmk__bundle_replica_t *replica, void *user_data) { if (replica->ip != NULL) { replica->ip->cmds->output_actions(replica->ip); } if (replica->container != NULL) { replica->container->cmds->output_actions(replica->container); } if (replica->remote != NULL) { replica->remote->cmds->output_actions(replica->remote); } if (replica->child != NULL) { replica->child->cmds->output_actions(replica->child); } return true; } /*! * \internal * \brief Output a summary of scheduled actions for a bundle resource * * \param[in,out] rsc Bundle resource to output actions for */ void pcmk__output_bundle_actions(pcmk_resource_t *rsc) { CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_bundle)); pe__foreach_bundle_replica(rsc, output_replica_actions, NULL); } // Bundle implementation of pcmk_assignment_methods_t:add_utilization() void pcmk__bundle_add_utilization(const pcmk_resource_t *rsc, const pcmk_resource_t *orig_rsc, GList *all_rscs, GHashTable *utilization) { pcmk_resource_t *container = NULL; CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_bundle)); if (!pcmk_is_set(rsc->flags, pcmk_rsc_unassigned)) { return; } /* All bundle replicas are identical, so using the utilization of the first * is sufficient for any. Only the implicit container resource can have * utilization values. */ container = pe__first_container(rsc); if (container != NULL) { container->cmds->add_utilization(container, orig_rsc, all_rscs, utilization); } } // Bundle implementation of pcmk_assignment_methods_t:shutdown_lock() void pcmk__bundle_shutdown_lock(pcmk_resource_t *rsc) { CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_bundle)); // Bundles currently don't support shutdown locks } diff --git a/lib/pengine/bundle.c b/lib/pengine/bundle.c index 475e53853e..8cf8174c63 100644 --- a/lib/pengine/bundle.c +++ b/lib/pengine/bundle.c @@ -1,2260 +1,2260 @@ /* * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include enum pe__bundle_mount_flags { pe__bundle_mount_none = 0x00, // mount instance-specific subdirectory rather than source directly pe__bundle_mount_subdir = 0x01 }; typedef struct { char *source; char *target; char *options; uint32_t flags; // bitmask of pe__bundle_mount_flags } pe__bundle_mount_t; typedef struct { char *source; char *target; } pe__bundle_port_t; enum pe__container_agent { PE__CONTAINER_AGENT_UNKNOWN, PE__CONTAINER_AGENT_DOCKER, PE__CONTAINER_AGENT_RKT, PE__CONTAINER_AGENT_PODMAN, }; #define PE__CONTAINER_AGENT_UNKNOWN_S "unknown" #define PE__CONTAINER_AGENT_DOCKER_S "docker" #define PE__CONTAINER_AGENT_RKT_S "rkt" #define PE__CONTAINER_AGENT_PODMAN_S "podman" typedef struct pe__bundle_variant_data_s { int promoted_max; int nreplicas; int nreplicas_per_host; char *prefix; char *image; const char *ip_last; char *host_network; char *host_netmask; char *control_port; char *container_network; char *ip_range_start; gboolean add_host; gchar *container_host_options; char *container_command; char *launcher_options; const char *attribute_target; pcmk_resource_t *child; GList *replicas; // pcmk__bundle_replica_t * GList *ports; // pe__bundle_port_t * GList *mounts; // pe__bundle_mount_t * enum pe__container_agent agent_type; } pe__bundle_variant_data_t; #define get_bundle_variant_data(data, rsc) \ CRM_ASSERT(rsc != NULL); \ CRM_ASSERT(rsc->variant == pcmk_rsc_variant_bundle); \ CRM_ASSERT(rsc->variant_opaque != NULL); \ data = (pe__bundle_variant_data_t *) rsc->variant_opaque; /*! * \internal * \brief Get maximum number of bundle replicas allowed to run * * \param[in] rsc Bundle or bundled resource to check * * \return Maximum replicas for bundle corresponding to \p rsc */ int pe__bundle_max(const pcmk_resource_t *rsc) { const pe__bundle_variant_data_t *bundle_data = NULL; get_bundle_variant_data(bundle_data, pe__const_top_resource(rsc, true)); return bundle_data->nreplicas; } /*! * \internal * \brief Get the resource inside a bundle * * \param[in] bundle Bundle to check * * \return Resource inside \p bundle if any, otherwise NULL */ pcmk_resource_t * pe__bundled_resource(const pcmk_resource_t *rsc) { const pe__bundle_variant_data_t *bundle_data = NULL; get_bundle_variant_data(bundle_data, pe__const_top_resource(rsc, true)); return bundle_data->child; } /*! * \internal * \brief Get containerized resource corresponding to a given bundle container * * \param[in] instance Collective instance that might be a bundle container * * \return Bundled resource instance inside \p instance if it is a bundle * container instance, otherwise NULL */ const pcmk_resource_t * pe__get_rsc_in_container(const pcmk_resource_t *instance) { const pe__bundle_variant_data_t *data = NULL; const pcmk_resource_t *top = pe__const_top_resource(instance, true); if ((top == NULL) || (top->variant != pcmk_rsc_variant_bundle)) { return NULL; } get_bundle_variant_data(data, top); for (const GList *iter = data->replicas; iter != NULL; iter = iter->next) { const pcmk__bundle_replica_t *replica = iter->data; if (instance == replica->container) { return replica->child; } } return NULL; } /*! * \internal * \brief Check whether a given node is created by a bundle * * \param[in] bundle Bundle resource to check * \param[in] node Node to check * * \return true if \p node is an instance of \p bundle, otherwise false */ bool pe__node_is_bundle_instance(const pcmk_resource_t *bundle, const pcmk_node_t *node) { pe__bundle_variant_data_t *bundle_data = NULL; get_bundle_variant_data(bundle_data, bundle); for (GList *iter = bundle_data->replicas; iter != NULL; iter = iter->next) { pcmk__bundle_replica_t *replica = iter->data; if (pcmk__same_node(node, replica->node)) { return true; } } return false; } /*! * \internal * \brief Get the container of a bundle's first replica * * \param[in] bundle Bundle resource to get container for * * \return Container resource from first replica of \p bundle if any, * otherwise NULL */ pcmk_resource_t * pe__first_container(const pcmk_resource_t *bundle) { const pe__bundle_variant_data_t *bundle_data = NULL; const pcmk__bundle_replica_t *replica = NULL; get_bundle_variant_data(bundle_data, bundle); if (bundle_data->replicas == NULL) { return NULL; } replica = bundle_data->replicas->data; return replica->container; } /*! * \internal * \brief Iterate over bundle replicas * * \param[in,out] bundle Bundle to iterate over * \param[in] fn Function to call for each replica (its return value * indicates whether to continue iterating) * \param[in,out] user_data Pointer to pass to \p fn */ void pe__foreach_bundle_replica(pcmk_resource_t *bundle, bool (*fn)(pcmk__bundle_replica_t *, void *), void *user_data) { const pe__bundle_variant_data_t *bundle_data = NULL; get_bundle_variant_data(bundle_data, bundle); for (GList *iter = bundle_data->replicas; iter != NULL; iter = iter->next) { if (!fn((pcmk__bundle_replica_t *) iter->data, user_data)) { break; } } } /*! * \internal * \brief Iterate over const bundle replicas * * \param[in] bundle Bundle to iterate over * \param[in] fn Function to call for each replica (its return value * indicates whether to continue iterating) * \param[in,out] user_data Pointer to pass to \p fn */ void pe__foreach_const_bundle_replica(const pcmk_resource_t *bundle, bool (*fn)(const pcmk__bundle_replica_t *, void *), void *user_data) { const pe__bundle_variant_data_t *bundle_data = NULL; get_bundle_variant_data(bundle_data, bundle); for (const GList *iter = bundle_data->replicas; iter != NULL; iter = iter->next) { if (!fn((const pcmk__bundle_replica_t *) iter->data, user_data)) { break; } } } static char * next_ip(const char *last_ip) { unsigned int oct1 = 0; unsigned int oct2 = 0; unsigned int oct3 = 0; unsigned int oct4 = 0; int rc = sscanf(last_ip, "%u.%u.%u.%u", &oct1, &oct2, &oct3, &oct4); if (rc != 4) { /*@ TODO check for IPv6 */ return NULL; } else if (oct3 > 253) { return NULL; } else if (oct4 > 253) { ++oct3; oct4 = 1; } else { ++oct4; } return crm_strdup_printf("%u.%u.%u.%u", oct1, oct2, oct3, oct4); } static void allocate_ip(pe__bundle_variant_data_t *data, pcmk__bundle_replica_t *replica, GString *buffer) { if(data->ip_range_start == NULL) { return; } else if(data->ip_last) { replica->ipaddr = next_ip(data->ip_last); } else { replica->ipaddr = strdup(data->ip_range_start); } data->ip_last = replica->ipaddr; switch (data->agent_type) { case PE__CONTAINER_AGENT_DOCKER: case PE__CONTAINER_AGENT_PODMAN: if (data->add_host) { g_string_append_printf(buffer, " --add-host=%s-%d:%s", data->prefix, replica->offset, replica->ipaddr); } else { g_string_append_printf(buffer, " --hosts-entry=%s=%s-%d", replica->ipaddr, data->prefix, replica->offset); } break; case PE__CONTAINER_AGENT_RKT: g_string_append_printf(buffer, " --hosts-entry=%s=%s-%d", replica->ipaddr, data->prefix, replica->offset); break; default: // PE__CONTAINER_AGENT_UNKNOWN break; } } static xmlNode * create_resource(const char *name, const char *provider, const char *kind) { xmlNode *rsc = pcmk__xe_create(NULL, PCMK_XE_PRIMITIVE); crm_xml_add(rsc, PCMK_XA_ID, name); crm_xml_add(rsc, PCMK_XA_CLASS, PCMK_RESOURCE_CLASS_OCF); crm_xml_add(rsc, PCMK_XA_PROVIDER, provider); crm_xml_add(rsc, PCMK_XA_TYPE, kind); return rsc; } /*! * \internal * \brief Check whether cluster can manage resource inside container * * \param[in,out] data Container variant data * * \return TRUE if networking configuration is acceptable, FALSE otherwise * * \note The resource is manageable if an IP range or control port has been * specified. If a control port is used without an IP range, replicas per * host must be 1. */ static bool valid_network(pe__bundle_variant_data_t *data) { if(data->ip_range_start) { return TRUE; } if(data->control_port) { if(data->nreplicas_per_host > 1) { pcmk__config_err("Specifying the '" PCMK_XA_CONTROL_PORT "' for %s " "requires '" PCMK_XA_REPLICAS_PER_HOST "=1'", data->prefix); data->nreplicas_per_host = 1; // @TODO to be sure: // pcmk__clear_rsc_flags(rsc, pcmk_rsc_unique); } return TRUE; } return FALSE; } static int create_ip_resource(pcmk_resource_t *parent, pe__bundle_variant_data_t *data, pcmk__bundle_replica_t *replica) { if(data->ip_range_start) { char *id = NULL; xmlNode *xml_ip = NULL; xmlNode *xml_obj = NULL; id = crm_strdup_printf("%s-ip-%s", data->prefix, replica->ipaddr); crm_xml_sanitize_id(id); xml_ip = create_resource(id, "heartbeat", "IPaddr2"); free(id); xml_obj = pcmk__xe_create(xml_ip, PCMK_XE_INSTANCE_ATTRIBUTES); crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, replica->offset); crm_create_nvpair_xml(xml_obj, NULL, "ip", replica->ipaddr); if(data->host_network) { crm_create_nvpair_xml(xml_obj, NULL, "nic", data->host_network); } if(data->host_netmask) { crm_create_nvpair_xml(xml_obj, NULL, "cidr_netmask", data->host_netmask); } else { crm_create_nvpair_xml(xml_obj, NULL, "cidr_netmask", "32"); } xml_obj = pcmk__xe_create(xml_ip, PCMK_XE_OPERATIONS); crm_create_op_xml(xml_obj, pcmk__xe_id(xml_ip), PCMK_ACTION_MONITOR, "60s", NULL); // TODO: Other ops? Timeouts and intervals from underlying resource? if (pe__unpack_resource(xml_ip, &replica->ip, parent, parent->cluster) != pcmk_rc_ok) { return pcmk_rc_unpack_error; } parent->children = g_list_append(parent->children, replica->ip); } return pcmk_rc_ok; } static const char* container_agent_str(enum pe__container_agent t) { switch (t) { case PE__CONTAINER_AGENT_DOCKER: return PE__CONTAINER_AGENT_DOCKER_S; case PE__CONTAINER_AGENT_RKT: return PE__CONTAINER_AGENT_RKT_S; case PE__CONTAINER_AGENT_PODMAN: return PE__CONTAINER_AGENT_PODMAN_S; default: // PE__CONTAINER_AGENT_UNKNOWN break; } return PE__CONTAINER_AGENT_UNKNOWN_S; } static int create_container_resource(pcmk_resource_t *parent, const pe__bundle_variant_data_t *data, pcmk__bundle_replica_t *replica) { char *id = NULL; xmlNode *xml_container = NULL; xmlNode *xml_obj = NULL; // Agent-specific const char *hostname_opt = NULL; const char *env_opt = NULL; const char *agent_str = NULL; int volid = 0; // rkt-only GString *buffer = NULL; GString *dbuffer = NULL; // Where syntax differences are drop-in replacements, set them now switch (data->agent_type) { case PE__CONTAINER_AGENT_DOCKER: case PE__CONTAINER_AGENT_PODMAN: hostname_opt = "-h "; env_opt = "-e "; break; case PE__CONTAINER_AGENT_RKT: hostname_opt = "--hostname="; env_opt = "--environment="; break; default: // PE__CONTAINER_AGENT_UNKNOWN return pcmk_rc_unpack_error; } agent_str = container_agent_str(data->agent_type); buffer = g_string_sized_new(4096); id = crm_strdup_printf("%s-%s-%d", data->prefix, agent_str, replica->offset); crm_xml_sanitize_id(id); xml_container = create_resource(id, "heartbeat", agent_str); free(id); xml_obj = pcmk__xe_create(xml_container, PCMK_XE_INSTANCE_ATTRIBUTES); crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, replica->offset); crm_create_nvpair_xml(xml_obj, NULL, "image", data->image); crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", PCMK_VALUE_TRUE); crm_create_nvpair_xml(xml_obj, NULL, "force_kill", PCMK_VALUE_FALSE); crm_create_nvpair_xml(xml_obj, NULL, "reuse", PCMK_VALUE_FALSE); if (data->agent_type == PE__CONTAINER_AGENT_DOCKER) { g_string_append(buffer, " --restart=no"); } /* Set a container hostname only if we have an IP to map it to. The user can * set -h or --uts=host themselves if they want a nicer name for logs, but * this makes applications happy who need their hostname to match the IP * they bind to. */ if (data->ip_range_start != NULL) { g_string_append_printf(buffer, " %s%s-%d", hostname_opt, data->prefix, replica->offset); } pcmk__g_strcat(buffer, " ", env_opt, "PCMK_stderr=1", NULL); if (data->container_network != NULL) { pcmk__g_strcat(buffer, " --net=", data->container_network, NULL); } if (data->control_port != NULL) { pcmk__g_strcat(buffer, " ", env_opt, "PCMK_" PCMK__ENV_REMOTE_PORT "=", data->control_port, NULL); } else { g_string_append_printf(buffer, " %sPCMK_" PCMK__ENV_REMOTE_PORT "=%d", env_opt, DEFAULT_REMOTE_PORT); } for (GList *iter = data->mounts; iter != NULL; iter = iter->next) { pe__bundle_mount_t *mount = (pe__bundle_mount_t *) iter->data; char *source = NULL; if (pcmk_is_set(mount->flags, pe__bundle_mount_subdir)) { source = crm_strdup_printf("%s/%s-%d", mount->source, data->prefix, replica->offset); pcmk__add_separated_word(&dbuffer, 1024, source, ","); } switch (data->agent_type) { case PE__CONTAINER_AGENT_DOCKER: case PE__CONTAINER_AGENT_PODMAN: pcmk__g_strcat(buffer, " -v ", pcmk__s(source, mount->source), ":", mount->target, NULL); if (mount->options != NULL) { pcmk__g_strcat(buffer, ":", mount->options, NULL); } break; case PE__CONTAINER_AGENT_RKT: g_string_append_printf(buffer, " --volume vol%d,kind=host," "source=%s%s%s " "--mount volume=vol%d,target=%s", volid, pcmk__s(source, mount->source), (mount->options != NULL)? "," : "", pcmk__s(mount->options, ""), volid, mount->target); volid++; break; default: break; } free(source); } for (GList *iter = data->ports; iter != NULL; iter = iter->next) { pe__bundle_port_t *port = (pe__bundle_port_t *) iter->data; switch (data->agent_type) { case PE__CONTAINER_AGENT_DOCKER: case PE__CONTAINER_AGENT_PODMAN: if (replica->ipaddr != NULL) { pcmk__g_strcat(buffer, " -p ", replica->ipaddr, ":", port->source, ":", port->target, NULL); } else if (!pcmk__str_eq(data->container_network, PCMK_VALUE_HOST, pcmk__str_none)) { // No need to do port mapping if net == host pcmk__g_strcat(buffer, " -p ", port->source, ":", port->target, NULL); } break; case PE__CONTAINER_AGENT_RKT: if (replica->ipaddr != NULL) { pcmk__g_strcat(buffer, " --port=", port->target, ":", replica->ipaddr, ":", port->source, NULL); } else { pcmk__g_strcat(buffer, " --port=", port->target, ":", port->source, NULL); } break; default: break; } } /* @COMPAT: We should use pcmk__add_word() here, but we can't yet, because * it would cause restarts during rolling upgrades. * * In a previous version of the container resource creation logic, if * data->launcher_options is not NULL, we append * (" %s", data->launcher_options) even if data->launcher_options is an * empty string. Likewise for data->container_host_options. Using * * pcmk__add_word(buffer, 0, data->launcher_options) * * removes that extra trailing space, causing a resource definition change. */ if (data->launcher_options != NULL) { pcmk__g_strcat(buffer, " ", data->launcher_options, NULL); } if (data->container_host_options != NULL) { pcmk__g_strcat(buffer, " ", data->container_host_options, NULL); } crm_create_nvpair_xml(xml_obj, NULL, "run_opts", (const char *) buffer->str); g_string_free(buffer, TRUE); crm_create_nvpair_xml(xml_obj, NULL, "mount_points", (dbuffer != NULL)? (const char *) dbuffer->str : ""); if (dbuffer != NULL) { g_string_free(dbuffer, TRUE); } if (replica->child != NULL) { if (data->container_command != NULL) { crm_create_nvpair_xml(xml_obj, NULL, "run_cmd", data->container_command); } else { crm_create_nvpair_xml(xml_obj, NULL, "run_cmd", SBIN_DIR "/pacemaker-remoted"); } /* TODO: Allow users to specify their own? * * We just want to know if the container is alive; we'll monitor the * child independently. */ crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true"); #if 0 /* @TODO Consider supporting the use case where we can start and stop * resources, but not proxy local commands (such as setting node * attributes), by running the local executor in stand-alone mode. * However, this would probably be better done via ACLs as with other * Pacemaker Remote nodes. */ } else if ((child != NULL) && data->untrusted) { crm_create_nvpair_xml(xml_obj, NULL, "run_cmd", CRM_DAEMON_DIR "/pacemaker-execd"); crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", CRM_DAEMON_DIR "/pacemaker/cts-exec-helper -c poke"); #endif } else { if (data->container_command != NULL) { crm_create_nvpair_xml(xml_obj, NULL, "run_cmd", data->container_command); } /* TODO: Allow users to specify their own? * * We don't know what's in the container, so we just want to know if it * is alive. */ crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true"); } xml_obj = pcmk__xe_create(xml_container, PCMK_XE_OPERATIONS); crm_create_op_xml(xml_obj, pcmk__xe_id(xml_container), PCMK_ACTION_MONITOR, "60s", NULL); // TODO: Other ops? Timeouts and intervals from underlying resource? if (pe__unpack_resource(xml_container, &replica->container, parent, parent->cluster) != pcmk_rc_ok) { return pcmk_rc_unpack_error; } pcmk__set_rsc_flags(replica->container, pcmk_rsc_replica_container); parent->children = g_list_append(parent->children, replica->container); return pcmk_rc_ok; } /*! * \brief Ban a node from a resource's (and its children's) allowed nodes list * * \param[in,out] rsc Resource to modify * \param[in] uname Name of node to ban */ static void disallow_node(pcmk_resource_t *rsc, const char *uname) { gpointer match = g_hash_table_lookup(rsc->allowed_nodes, uname); if (match) { ((pcmk_node_t *) match)->weight = -PCMK_SCORE_INFINITY; ((pcmk_node_t *) match)->rsc_discover_mode = pcmk_probe_never; } if (rsc->children) { g_list_foreach(rsc->children, (GFunc) disallow_node, (gpointer) uname); } } static int create_remote_resource(pcmk_resource_t *parent, pe__bundle_variant_data_t *data, pcmk__bundle_replica_t *replica) { if (replica->child && valid_network(data)) { GHashTableIter gIter; pcmk_node_t *node = NULL; xmlNode *xml_remote = NULL; char *id = crm_strdup_printf("%s-%d", data->prefix, replica->offset); char *port_s = NULL; const char *uname = NULL; const char *connect_name = NULL; if (pe_find_resource(parent->cluster->resources, id) != NULL) { free(id); // The biggest hammer we have id = crm_strdup_printf("pcmk-internal-%s-remote-%d", replica->child->id, replica->offset); //@TODO return error instead of asserting? CRM_ASSERT(pe_find_resource(parent->cluster->resources, id) == NULL); } /* REMOTE_CONTAINER_HACK: Using "#uname" as the server name when the * connection does not have its own IP is a magic string that we use to * support nested remotes (i.e. a bundle running on a remote node). */ connect_name = (replica->ipaddr? replica->ipaddr : "#uname"); if (data->control_port == NULL) { port_s = pcmk__itoa(DEFAULT_REMOTE_PORT); } /* This sets replica->container as replica->remote's container, which is * similar to what happens with guest nodes. This is how the scheduler * knows that the bundle node is fenced by recovering the container, and * that remote should be ordered relative to the container. */ xml_remote = pe_create_remote_xml(NULL, id, replica->container->id, NULL, NULL, NULL, connect_name, (data->control_port? data->control_port : port_s)); free(port_s); /* Abandon our created ID, and pull the copy from the XML, because we * need something that will get freed during scheduler data cleanup to * use as the node ID and uname. */ free(id); id = NULL; uname = pcmk__xe_id(xml_remote); /* Ensure a node has been created for the guest (it may have already * been, if it has a permanent node attribute), and ensure its weight is * -INFINITY so no other resources can run on it. */ node = pe_find_node(parent->cluster->nodes, uname); if (node == NULL) { node = pe_create_node(uname, uname, PCMK_VALUE_REMOTE, PCMK_VALUE_MINUS_INFINITY, parent->cluster); } else { node->weight = -PCMK_SCORE_INFINITY; } node->rsc_discover_mode = pcmk_probe_never; /* unpack_remote_nodes() ensures that each remote node and guest node * has a pcmk_node_t entry. Ideally, it would do the same for bundle * nodes. Unfortunately, a bundle has to be mostly unpacked before it's * obvious what nodes will be needed, so we do it just above. * * Worse, that means that the node may have been utilized while * unpacking other resources, without our weight correction. The most * likely place for this to happen is when pe__unpack_resource() calls * resource_location() to set a default score in symmetric clusters. * This adds a node *copy* to each resource's allowed nodes, and these * copies will have the wrong weight. * * As a hacky workaround, fix those copies here. * * @TODO Possible alternative: ensure bundles are unpacked before other * resources, so the weight is correct before any copies are made. */ g_list_foreach(parent->cluster->resources, (GFunc) disallow_node, (gpointer) uname); replica->node = pe__copy_node(node); replica->node->weight = 500; replica->node->rsc_discover_mode = pcmk_probe_exclusive; /* Ensure the node shows up as allowed and with the correct discovery set */ if (replica->child->allowed_nodes != NULL) { g_hash_table_destroy(replica->child->allowed_nodes); } replica->child->allowed_nodes = pcmk__strkey_table(NULL, free); g_hash_table_insert(replica->child->allowed_nodes, (gpointer) replica->node->details->id, pe__copy_node(replica->node)); { pcmk_node_t *copy = pe__copy_node(replica->node); copy->weight = -PCMK_SCORE_INFINITY; g_hash_table_insert(replica->child->parent->allowed_nodes, (gpointer) replica->node->details->id, copy); } if (pe__unpack_resource(xml_remote, &replica->remote, parent, parent->cluster) != pcmk_rc_ok) { return pcmk_rc_unpack_error; } g_hash_table_iter_init(&gIter, replica->remote->allowed_nodes); while (g_hash_table_iter_next(&gIter, NULL, (void **)&node)) { if (pcmk__is_pacemaker_remote_node(node)) { /* Remote resources can only run on 'normal' cluster node */ node->weight = -PCMK_SCORE_INFINITY; } } replica->node->details->remote_rsc = replica->remote; // Ensure pcmk__is_guest_or_bundle_node() functions correctly replica->remote->container = replica->container; /* A bundle's #kind is closer to "container" (guest node) than the * "remote" set by pe_create_node(). */ pcmk__insert_dup(replica->node->details->attrs, CRM_ATTR_KIND, "container"); /* One effect of this is that setup_container() will add * replica->remote to replica->container's fillers, which will make * pe__resource_contains_guest_node() true for replica->container. * * replica->child does NOT get added to replica->container's fillers. * The only noticeable effect if it did would be for its fail count to * be taken into account when checking replica->container's migration * threshold. */ parent->children = g_list_append(parent->children, replica->remote); } return pcmk_rc_ok; } static int create_replica_resources(pcmk_resource_t *parent, pe__bundle_variant_data_t *data, pcmk__bundle_replica_t *replica) { int rc = pcmk_rc_ok; rc = create_container_resource(parent, data, replica); if (rc != pcmk_rc_ok) { return rc; } rc = create_ip_resource(parent, data, replica); if (rc != pcmk_rc_ok) { return rc; } rc = create_remote_resource(parent, data, replica); if (rc != pcmk_rc_ok) { return rc; } if ((replica->child != NULL) && (replica->ipaddr != NULL)) { pcmk__insert_meta(replica->child, "external-ip", replica->ipaddr); } if (replica->remote != NULL) { /* * Allow the remote connection resource to be allocated to a * different node than the one on which the container is active. * * This makes it possible to have Pacemaker Remote nodes running * containers with pacemaker-remoted inside in order to start * services inside those containers. */ pcmk__set_rsc_flags(replica->remote, pcmk_rsc_remote_nesting_allowed); } return rc; } static void mount_add(pe__bundle_variant_data_t *bundle_data, const char *source, const char *target, const char *options, uint32_t flags) { pe__bundle_mount_t *mount = pcmk__assert_alloc(1, sizeof(pe__bundle_mount_t)); mount->source = pcmk__str_copy(source); mount->target = pcmk__str_copy(target); mount->options = pcmk__str_copy(options); mount->flags = flags; bundle_data->mounts = g_list_append(bundle_data->mounts, mount); } static void mount_free(pe__bundle_mount_t *mount) { free(mount->source); free(mount->target); free(mount->options); free(mount); } static void port_free(pe__bundle_port_t *port) { free(port->source); free(port->target); free(port); } static pcmk__bundle_replica_t * replica_for_remote(pcmk_resource_t *remote) { pcmk_resource_t *top = remote; pe__bundle_variant_data_t *bundle_data = NULL; if (top == NULL) { return NULL; } while (top->parent != NULL) { top = top->parent; } get_bundle_variant_data(bundle_data, top); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pcmk__bundle_replica_t *replica = gIter->data; if (replica->remote == remote) { return replica; } } CRM_LOG_ASSERT(FALSE); return NULL; } bool pe__bundle_needs_remote_name(pcmk_resource_t *rsc) { const char *value; GHashTable *params = NULL; if (rsc == NULL) { return false; } // Use NULL node since pcmk__bundle_expand() uses that to set value params = pe_rsc_params(rsc, NULL, rsc->cluster); value = g_hash_table_lookup(params, PCMK_REMOTE_RA_ADDR); return pcmk__str_eq(value, "#uname", pcmk__str_casei) && xml_contains_remote_node(rsc->xml); } const char * -pe__add_bundle_remote_name(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler, - xmlNode *xml, const char *field) +pe__add_bundle_remote_name(pcmk_resource_t *rsc, xmlNode *xml, + const char *field) { // REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside pcmk_node_t *node = NULL; pcmk__bundle_replica_t *replica = NULL; if (!pe__bundle_needs_remote_name(rsc)) { return NULL; } replica = replica_for_remote(rsc); if (replica == NULL) { return NULL; } node = replica->container->allocated_to; if (node == NULL) { /* If it won't be running anywhere after the * transition, go with where it's running now. */ node = pcmk__current_node(replica->container); } if(node == NULL) { crm_trace("Cannot determine address for bundle connection %s", rsc->id); return NULL; } crm_trace("Setting address for bundle connection %s to bundle host %s", rsc->id, pcmk__node_name(node)); if(xml != NULL && field != NULL) { crm_xml_add(xml, field, node->details->uname); } return node->details->uname; } #define pe__set_bundle_mount_flags(mount_xml, flags, flags_to_set) do { \ flags = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, \ "Bundle mount", pcmk__xe_id(mount_xml), \ flags, (flags_to_set), #flags_to_set); \ } while (0) gboolean pe__unpack_bundle(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler) { const char *value = NULL; xmlNode *xml_obj = NULL; const xmlNode *xml_child = NULL; xmlNode *xml_resource = NULL; pe__bundle_variant_data_t *bundle_data = NULL; bool need_log_mount = TRUE; CRM_ASSERT(rsc != NULL); pcmk__rsc_trace(rsc, "Processing resource %s...", rsc->id); bundle_data = pcmk__assert_alloc(1, sizeof(pe__bundle_variant_data_t)); rsc->variant_opaque = bundle_data; bundle_data->prefix = strdup(rsc->id); xml_obj = pcmk__xe_first_child(rsc->xml, PCMK_XE_DOCKER, NULL, NULL); if (xml_obj != NULL) { bundle_data->agent_type = PE__CONTAINER_AGENT_DOCKER; } else { xml_obj = pcmk__xe_first_child(rsc->xml, PCMK__XE_RKT, NULL, NULL); if (xml_obj != NULL) { pcmk__warn_once(pcmk__wo_rkt, "Support for " PCMK__XE_RKT " in bundles " "(such as %s) is deprecated and will be " "removed in a future release", rsc->id); bundle_data->agent_type = PE__CONTAINER_AGENT_RKT; } else { xml_obj = pcmk__xe_first_child(rsc->xml, PCMK_XE_PODMAN, NULL, NULL); if (xml_obj != NULL) { bundle_data->agent_type = PE__CONTAINER_AGENT_PODMAN; } else { return FALSE; } } } // Use 0 for default, minimum, and invalid PCMK_XA_PROMOTED_MAX value = crm_element_value(xml_obj, PCMK_XA_PROMOTED_MAX); if (value == NULL) { // @COMPAT deprecated since 2.0.0 value = crm_element_value(xml_obj, PCMK__XA_PROMOTED_MAX_LEGACY); } pcmk__scan_min_int(value, &bundle_data->promoted_max, 0); /* Default replicas to PCMK_XA_PROMOTED_MAX if it was specified and 1 * otherwise */ value = crm_element_value(xml_obj, PCMK_XA_REPLICAS); if ((value == NULL) && (bundle_data->promoted_max > 0)) { bundle_data->nreplicas = bundle_data->promoted_max; } else { pcmk__scan_min_int(value, &bundle_data->nreplicas, 1); } /* * Communication between containers on the same host via the * floating IPs only works if the container is started with: * --userland-proxy=false --ip-masq=false */ value = crm_element_value(xml_obj, PCMK_XA_REPLICAS_PER_HOST); pcmk__scan_min_int(value, &bundle_data->nreplicas_per_host, 1); if (bundle_data->nreplicas_per_host == 1) { pcmk__clear_rsc_flags(rsc, pcmk_rsc_unique); } bundle_data->container_command = crm_element_value_copy(xml_obj, PCMK_XA_RUN_COMMAND); bundle_data->launcher_options = crm_element_value_copy(xml_obj, PCMK_XA_OPTIONS); bundle_data->image = crm_element_value_copy(xml_obj, PCMK_XA_IMAGE); bundle_data->container_network = crm_element_value_copy(xml_obj, PCMK_XA_NETWORK); xml_obj = pcmk__xe_first_child(rsc->xml, PCMK_XE_NETWORK, NULL, NULL); if(xml_obj) { bundle_data->ip_range_start = crm_element_value_copy(xml_obj, PCMK_XA_IP_RANGE_START); bundle_data->host_netmask = crm_element_value_copy(xml_obj, PCMK_XA_HOST_NETMASK); bundle_data->host_network = crm_element_value_copy(xml_obj, PCMK_XA_HOST_INTERFACE); bundle_data->control_port = crm_element_value_copy(xml_obj, PCMK_XA_CONTROL_PORT); value = crm_element_value(xml_obj, PCMK_XA_ADD_HOST); if (crm_str_to_boolean(value, &bundle_data->add_host) != 1) { bundle_data->add_host = TRUE; } for (xml_child = pcmk__xe_first_child(xml_obj, PCMK_XE_PORT_MAPPING, NULL, NULL); xml_child != NULL; xml_child = pcmk__xe_next_same(xml_child)) { pe__bundle_port_t *port = pcmk__assert_alloc(1, sizeof(pe__bundle_port_t)); port->source = crm_element_value_copy(xml_child, PCMK_XA_PORT); if(port->source == NULL) { port->source = crm_element_value_copy(xml_child, PCMK_XA_RANGE); } else { port->target = crm_element_value_copy(xml_child, PCMK_XA_INTERNAL_PORT); } if(port->source != NULL && strlen(port->source) > 0) { if(port->target == NULL) { port->target = strdup(port->source); } bundle_data->ports = g_list_append(bundle_data->ports, port); } else { pcmk__config_err("Invalid " PCMK_XA_PORT " directive %s", pcmk__xe_id(xml_child)); port_free(port); } } } xml_obj = pcmk__xe_first_child(rsc->xml, PCMK_XE_STORAGE, NULL, NULL); for (xml_child = pcmk__xe_first_child(xml_obj, PCMK_XE_STORAGE_MAPPING, NULL, NULL); xml_child != NULL; xml_child = pcmk__xe_next_same(xml_child)) { const char *source = crm_element_value(xml_child, PCMK_XA_SOURCE_DIR); const char *target = crm_element_value(xml_child, PCMK_XA_TARGET_DIR); const char *options = crm_element_value(xml_child, PCMK_XA_OPTIONS); int flags = pe__bundle_mount_none; if (source == NULL) { source = crm_element_value(xml_child, PCMK_XA_SOURCE_DIR_ROOT); pe__set_bundle_mount_flags(xml_child, flags, pe__bundle_mount_subdir); } if (source && target) { mount_add(bundle_data, source, target, options, flags); if (strcmp(target, "/var/log") == 0) { need_log_mount = FALSE; } } else { pcmk__config_err("Invalid mount directive %s", pcmk__xe_id(xml_child)); } } xml_obj = pcmk__xe_first_child(rsc->xml, PCMK_XE_PRIMITIVE, NULL, NULL); if (xml_obj && valid_network(bundle_data)) { char *value = NULL; xmlNode *xml_set = NULL; xml_resource = pcmk__xe_create(NULL, PCMK_XE_CLONE); /* @COMPAT We no longer use the tag, but we need to keep it as * part of the resource name, so that bundles don't restart in a rolling * upgrade. (It also avoids needing to change regression tests.) */ crm_xml_set_id(xml_resource, "%s-%s", bundle_data->prefix, (bundle_data->promoted_max? "master" : (const char *)xml_resource->name)); xml_set = pcmk__xe_create(xml_resource, PCMK_XE_META_ATTRIBUTES); crm_xml_set_id(xml_set, "%s-%s-meta", bundle_data->prefix, xml_resource->name); crm_create_nvpair_xml(xml_set, NULL, PCMK_META_ORDERED, PCMK_VALUE_TRUE); value = pcmk__itoa(bundle_data->nreplicas); crm_create_nvpair_xml(xml_set, NULL, PCMK_META_CLONE_MAX, value); free(value); value = pcmk__itoa(bundle_data->nreplicas_per_host); crm_create_nvpair_xml(xml_set, NULL, PCMK_META_CLONE_NODE_MAX, value); free(value); crm_create_nvpair_xml(xml_set, NULL, PCMK_META_GLOBALLY_UNIQUE, pcmk__btoa(bundle_data->nreplicas_per_host > 1)); if (bundle_data->promoted_max) { crm_create_nvpair_xml(xml_set, NULL, PCMK_META_PROMOTABLE, PCMK_VALUE_TRUE); value = pcmk__itoa(bundle_data->promoted_max); crm_create_nvpair_xml(xml_set, NULL, PCMK_META_PROMOTED_MAX, value); free(value); } //crm_xml_add(xml_obj, PCMK_XA_ID, bundle_data->prefix); pcmk__xml_copy(xml_resource, xml_obj); } else if(xml_obj) { pcmk__config_err("Cannot control %s inside %s without either " PCMK_XA_IP_RANGE_START " or " PCMK_XA_CONTROL_PORT, rsc->id, pcmk__xe_id(xml_obj)); return FALSE; } if(xml_resource) { int lpc = 0; GList *childIter = NULL; pe__bundle_port_t *port = NULL; GString *buffer = NULL; if (pe__unpack_resource(xml_resource, &(bundle_data->child), rsc, scheduler) != pcmk_rc_ok) { return FALSE; } /* Currently, we always map the default authentication key location * into the same location inside the container. * * Ideally, we would respect the host's PCMK_authkey_location, but: * - it may be different on different nodes; * - the actual connection will do extra checking to make sure the key * file exists and is readable, that we can't do here on the DC * - tools such as crm_resource and crm_simulate may not have the same * environment variables as the cluster, causing operation digests to * differ * * Always using the default location inside the container is fine, * because we control the pacemaker_remote environment, and it avoids * having to pass another environment variable to the container. * * @TODO A better solution may be to have only pacemaker_remote use the * environment variable, and have the cluster nodes use a new * cluster option for key location. This would introduce the limitation * of the location being the same on all cluster nodes, but that's * reasonable. */ mount_add(bundle_data, DEFAULT_REMOTE_KEY_LOCATION, DEFAULT_REMOTE_KEY_LOCATION, NULL, pe__bundle_mount_none); if (need_log_mount) { mount_add(bundle_data, CRM_BUNDLE_DIR, "/var/log", NULL, pe__bundle_mount_subdir); } port = pcmk__assert_alloc(1, sizeof(pe__bundle_port_t)); if(bundle_data->control_port) { port->source = strdup(bundle_data->control_port); } else { /* If we wanted to respect PCMK_remote_port, we could use * crm_default_remote_port() here and elsewhere in this file instead * of DEFAULT_REMOTE_PORT. * * However, it gains nothing, since we control both the container * environment and the connection resource parameters, and the user * can use a different port if desired by setting * PCMK_XA_CONTROL_PORT. */ port->source = pcmk__itoa(DEFAULT_REMOTE_PORT); } port->target = strdup(port->source); bundle_data->ports = g_list_append(bundle_data->ports, port); buffer = g_string_sized_new(1024); for (childIter = bundle_data->child->children; childIter != NULL; childIter = childIter->next) { pcmk__bundle_replica_t *replica = NULL; replica = pcmk__assert_alloc(1, sizeof(pcmk__bundle_replica_t)); replica->child = childIter->data; replica->child->exclusive_discover = TRUE; replica->offset = lpc++; // Ensure the child's notify gets set based on the underlying primitive's value if (pcmk_is_set(replica->child->flags, pcmk_rsc_notify)) { pcmk__set_rsc_flags(bundle_data->child, pcmk_rsc_notify); } allocate_ip(bundle_data, replica, buffer); bundle_data->replicas = g_list_append(bundle_data->replicas, replica); bundle_data->attribute_target = g_hash_table_lookup(replica->child->meta, PCMK_META_CONTAINER_ATTRIBUTE_TARGET); } bundle_data->container_host_options = g_string_free(buffer, FALSE); if (bundle_data->attribute_target) { pcmk__insert_dup(rsc->meta, PCMK_META_CONTAINER_ATTRIBUTE_TARGET, bundle_data->attribute_target); pcmk__insert_dup(bundle_data->child->meta, PCMK_META_CONTAINER_ATTRIBUTE_TARGET, bundle_data->attribute_target); } } else { // Just a naked container, no pacemaker-remote GString *buffer = g_string_sized_new(1024); for (int lpc = 0; lpc < bundle_data->nreplicas; lpc++) { pcmk__bundle_replica_t *replica = NULL; replica = pcmk__assert_alloc(1, sizeof(pcmk__bundle_replica_t)); replica->offset = lpc; allocate_ip(bundle_data, replica, buffer); bundle_data->replicas = g_list_append(bundle_data->replicas, replica); } bundle_data->container_host_options = g_string_free(buffer, FALSE); } for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pcmk__bundle_replica_t *replica = gIter->data; if (create_replica_resources(rsc, bundle_data, replica) != pcmk_rc_ok) { pcmk__config_err("Failed unpacking resource %s", rsc->id); rsc->fns->free(rsc); return FALSE; } /* Utilization needs special handling for bundles. It makes no sense for * the inner primitive to have utilization, because it is tied * one-to-one to the guest node created by the container resource -- and * there's no way to set capacities for that guest node anyway. * * What the user really wants is to configure utilization for the * container. However, the schema only allows utilization for * primitives, and the container resource is implicit anyway, so the * user can *only* configure utilization for the inner primitive. If * they do, move the primitive's utilization values to the container. * * @TODO This means that bundles without an inner primitive can't have * utilization. An alternative might be to allow utilization values in * the top-level bundle XML in the schema, and copy those to each * container. */ if (replica->child != NULL) { GHashTable *empty = replica->container->utilization; replica->container->utilization = replica->child->utilization; replica->child->utilization = empty; } } if (bundle_data->child) { rsc->children = g_list_append(rsc->children, bundle_data->child); } return TRUE; } static int replica_resource_active(pcmk_resource_t *rsc, gboolean all) { if (rsc) { gboolean child_active = rsc->fns->active(rsc, all); if (child_active && !all) { return TRUE; } else if (!child_active && all) { return FALSE; } } return -1; } gboolean pe__bundle_active(pcmk_resource_t *rsc, gboolean all) { pe__bundle_variant_data_t *bundle_data = NULL; GList *iter = NULL; get_bundle_variant_data(bundle_data, rsc); for (iter = bundle_data->replicas; iter != NULL; iter = iter->next) { pcmk__bundle_replica_t *replica = iter->data; int rsc_active; rsc_active = replica_resource_active(replica->ip, all); if (rsc_active >= 0) { return (gboolean) rsc_active; } rsc_active = replica_resource_active(replica->child, all); if (rsc_active >= 0) { return (gboolean) rsc_active; } rsc_active = replica_resource_active(replica->container, all); if (rsc_active >= 0) { return (gboolean) rsc_active; } rsc_active = replica_resource_active(replica->remote, all); if (rsc_active >= 0) { return (gboolean) rsc_active; } } /* If "all" is TRUE, we've already checked that no resources were inactive, * so return TRUE; if "all" is FALSE, we didn't find any active resources, * so return FALSE. */ return all; } /*! * \internal * \brief Find the bundle replica corresponding to a given node * * \param[in] bundle Top-level bundle resource * \param[in] node Node to search for * * \return Bundle replica if found, NULL otherwise */ pcmk_resource_t * pe__find_bundle_replica(const pcmk_resource_t *bundle, const pcmk_node_t *node) { pe__bundle_variant_data_t *bundle_data = NULL; CRM_ASSERT(bundle && node); get_bundle_variant_data(bundle_data, bundle); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pcmk__bundle_replica_t *replica = gIter->data; CRM_ASSERT(replica && replica->node); if (pcmk__same_node(replica->node, node)) { return replica->child; } } return NULL; } /*! * \internal * \deprecated This function will be removed in a future release */ static void print_rsc_in_list(pcmk_resource_t *rsc, const char *pre_text, long options, void *print_data) { if (rsc != NULL) { if (options & pe_print_html) { status_print("
  • "); } rsc->fns->print(rsc, pre_text, options, print_data); if (options & pe_print_html) { status_print("
  • \n"); } } } /*! * \internal * \deprecated This function will be removed in a future release */ static void bundle_print_xml(pcmk_resource_t *rsc, const char *pre_text, long options, void *print_data) { pe__bundle_variant_data_t *bundle_data = NULL; char *child_text = NULL; CRM_CHECK(rsc != NULL, return); if (pre_text == NULL) { pre_text = ""; } child_text = crm_strdup_printf("%s ", pre_text); get_bundle_variant_data(bundle_data, rsc); status_print("%sid); status_print("type=\"%s\" ", container_agent_str(bundle_data->agent_type)); status_print("image=\"%s\" ", bundle_data->image); status_print("unique=\"%s\" ", pcmk__flag_text(rsc->flags, pcmk_rsc_unique)); status_print("managed=\"%s\" ", pcmk__flag_text(rsc->flags, pcmk_rsc_managed)); status_print("failed=\"%s\" ", pcmk__flag_text(rsc->flags, pcmk_rsc_failed)); status_print(">\n"); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pcmk__bundle_replica_t *replica = gIter->data; CRM_ASSERT(replica); status_print("%s \n", pre_text, replica->offset); print_rsc_in_list(replica->ip, child_text, options, print_data); print_rsc_in_list(replica->child, child_text, options, print_data); print_rsc_in_list(replica->container, child_text, options, print_data); print_rsc_in_list(replica->remote, child_text, options, print_data); status_print("%s \n", pre_text); } status_print("%s\n", pre_text); free(child_text); } PCMK__OUTPUT_ARGS("bundle", "uint32_t", "pcmk_resource_t *", "GList *", "GList *") int pe__bundle_xml(pcmk__output_t *out, va_list args) { uint32_t show_opts = va_arg(args, uint32_t); pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); GList *only_node = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); pe__bundle_variant_data_t *bundle_data = NULL; int rc = pcmk_rc_no_output; gboolean printed_header = FALSE; gboolean print_everything = TRUE; const char *desc = NULL; CRM_ASSERT(rsc != NULL); get_bundle_variant_data(bundle_data, rsc); if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) { return rc; } print_everything = pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pcmk__bundle_replica_t *replica = gIter->data; char *id = NULL; gboolean print_ip, print_child, print_ctnr, print_remote; CRM_ASSERT(replica); if (pcmk__rsc_filtered_by_node(replica->container, only_node)) { continue; } print_ip = replica->ip != NULL && !replica->ip->fns->is_filtered(replica->ip, only_rsc, print_everything); print_child = replica->child != NULL && !replica->child->fns->is_filtered(replica->child, only_rsc, print_everything); print_ctnr = !replica->container->fns->is_filtered(replica->container, only_rsc, print_everything); print_remote = replica->remote != NULL && !replica->remote->fns->is_filtered(replica->remote, only_rsc, print_everything); if (!print_everything && !print_ip && !print_child && !print_ctnr && !print_remote) { continue; } if (!printed_header) { const char *type = container_agent_str(bundle_data->agent_type); const char *unique = pcmk__flag_text(rsc->flags, pcmk_rsc_unique); const char *maintenance = pcmk__flag_text(rsc->flags, pcmk_rsc_maintenance); const char *managed = pcmk__flag_text(rsc->flags, pcmk_rsc_managed); const char *failed = pcmk__flag_text(rsc->flags, pcmk_rsc_failed); printed_header = TRUE; desc = pe__resource_description(rsc, show_opts); rc = pe__name_and_nvpairs_xml(out, true, PCMK_XE_BUNDLE, 8, PCMK_XA_ID, rsc->id, PCMK_XA_TYPE, type, PCMK_XA_IMAGE, bundle_data->image, PCMK_XA_UNIQUE, unique, PCMK_XA_MAINTENANCE, maintenance, PCMK_XA_MANAGED, managed, PCMK_XA_FAILED, failed, PCMK_XA_DESCRIPTION, desc); CRM_ASSERT(rc == pcmk_rc_ok); } id = pcmk__itoa(replica->offset); rc = pe__name_and_nvpairs_xml(out, true, PCMK_XE_REPLICA, 1, PCMK_XA_ID, id); free(id); CRM_ASSERT(rc == pcmk_rc_ok); if (print_ip) { out->message(out, crm_map_element_name(replica->ip->xml), show_opts, replica->ip, only_node, only_rsc); } if (print_child) { out->message(out, crm_map_element_name(replica->child->xml), show_opts, replica->child, only_node, only_rsc); } if (print_ctnr) { out->message(out, crm_map_element_name(replica->container->xml), show_opts, replica->container, only_node, only_rsc); } if (print_remote) { out->message(out, crm_map_element_name(replica->remote->xml), show_opts, replica->remote, only_node, only_rsc); } pcmk__output_xml_pop_parent(out); // replica } if (printed_header) { pcmk__output_xml_pop_parent(out); // bundle } return rc; } static void pe__bundle_replica_output_html(pcmk__output_t *out, pcmk__bundle_replica_t *replica, pcmk_node_t *node, uint32_t show_opts) { pcmk_resource_t *rsc = replica->child; int offset = 0; char buffer[LINE_MAX]; if(rsc == NULL) { rsc = replica->container; } if (replica->remote) { offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(replica->remote)); } else { offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(replica->container)); } if (replica->ipaddr) { offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)", replica->ipaddr); } pe__common_output_html(out, rsc, buffer, node, show_opts); } /*! * \internal * \brief Get a string describing a resource's unmanaged state or lack thereof * * \param[in] rsc Resource to describe * * \return A string indicating that a resource is in maintenance mode or * otherwise unmanaged, or an empty string otherwise */ static const char * get_unmanaged_str(const pcmk_resource_t *rsc) { if (pcmk_is_set(rsc->flags, pcmk_rsc_maintenance)) { return " (maintenance)"; } if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) { return " (unmanaged)"; } return ""; } PCMK__OUTPUT_ARGS("bundle", "uint32_t", "pcmk_resource_t *", "GList *", "GList *") int pe__bundle_html(pcmk__output_t *out, va_list args) { uint32_t show_opts = va_arg(args, uint32_t); pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); GList *only_node = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); const char *desc = NULL; pe__bundle_variant_data_t *bundle_data = NULL; int rc = pcmk_rc_no_output; gboolean print_everything = TRUE; CRM_ASSERT(rsc != NULL); get_bundle_variant_data(bundle_data, rsc); desc = pe__resource_description(rsc, show_opts); if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) { return rc; } print_everything = pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pcmk__bundle_replica_t *replica = gIter->data; gboolean print_ip, print_child, print_ctnr, print_remote; CRM_ASSERT(replica); if (pcmk__rsc_filtered_by_node(replica->container, only_node)) { continue; } print_ip = replica->ip != NULL && !replica->ip->fns->is_filtered(replica->ip, only_rsc, print_everything); print_child = replica->child != NULL && !replica->child->fns->is_filtered(replica->child, only_rsc, print_everything); print_ctnr = !replica->container->fns->is_filtered(replica->container, only_rsc, print_everything); print_remote = replica->remote != NULL && !replica->remote->fns->is_filtered(replica->remote, only_rsc, print_everything); if (pcmk_is_set(show_opts, pcmk_show_implicit_rscs) || (print_everything == FALSE && (print_ip || print_child || print_ctnr || print_remote))) { /* The text output messages used below require pe_print_implicit to * be set to do anything. */ uint32_t new_show_opts = show_opts | pcmk_show_implicit_rscs; PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s%s%s%s", (bundle_data->nreplicas > 1)? " set" : "", rsc->id, bundle_data->image, pcmk_is_set(rsc->flags, pcmk_rsc_unique)? " (unique)" : "", desc ? " (" : "", desc ? desc : "", desc ? ")" : "", get_unmanaged_str(rsc)); if (pcmk__list_of_multiple(bundle_data->replicas)) { out->begin_list(out, NULL, NULL, "Replica[%d]", replica->offset); } if (print_ip) { out->message(out, crm_map_element_name(replica->ip->xml), new_show_opts, replica->ip, only_node, only_rsc); } if (print_child) { out->message(out, crm_map_element_name(replica->child->xml), new_show_opts, replica->child, only_node, only_rsc); } if (print_ctnr) { out->message(out, crm_map_element_name(replica->container->xml), new_show_opts, replica->container, only_node, only_rsc); } if (print_remote) { out->message(out, crm_map_element_name(replica->remote->xml), new_show_opts, replica->remote, only_node, only_rsc); } if (pcmk__list_of_multiple(bundle_data->replicas)) { out->end_list(out); } } else if (print_everything == FALSE && !(print_ip || print_child || print_ctnr || print_remote)) { continue; } else { PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s%s%s%s", (bundle_data->nreplicas > 1)? " set" : "", rsc->id, bundle_data->image, pcmk_is_set(rsc->flags, pcmk_rsc_unique)? " (unique)" : "", desc ? " (" : "", desc ? desc : "", desc ? ")" : "", get_unmanaged_str(rsc)); pe__bundle_replica_output_html(out, replica, pcmk__current_node(replica->container), show_opts); } } PCMK__OUTPUT_LIST_FOOTER(out, rc); return rc; } static void pe__bundle_replica_output_text(pcmk__output_t *out, pcmk__bundle_replica_t *replica, pcmk_node_t *node, uint32_t show_opts) { const pcmk_resource_t *rsc = replica->child; int offset = 0; char buffer[LINE_MAX]; if(rsc == NULL) { rsc = replica->container; } if (replica->remote) { offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(replica->remote)); } else { offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(replica->container)); } if (replica->ipaddr) { offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)", replica->ipaddr); } pe__common_output_text(out, rsc, buffer, node, show_opts); } PCMK__OUTPUT_ARGS("bundle", "uint32_t", "pcmk_resource_t *", "GList *", "GList *") int pe__bundle_text(pcmk__output_t *out, va_list args) { uint32_t show_opts = va_arg(args, uint32_t); pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); GList *only_node = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); const char *desc = NULL; pe__bundle_variant_data_t *bundle_data = NULL; int rc = pcmk_rc_no_output; gboolean print_everything = TRUE; desc = pe__resource_description(rsc, show_opts); get_bundle_variant_data(bundle_data, rsc); CRM_ASSERT(rsc != NULL); if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) { return rc; } print_everything = pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pcmk__bundle_replica_t *replica = gIter->data; gboolean print_ip, print_child, print_ctnr, print_remote; CRM_ASSERT(replica); if (pcmk__rsc_filtered_by_node(replica->container, only_node)) { continue; } print_ip = replica->ip != NULL && !replica->ip->fns->is_filtered(replica->ip, only_rsc, print_everything); print_child = replica->child != NULL && !replica->child->fns->is_filtered(replica->child, only_rsc, print_everything); print_ctnr = !replica->container->fns->is_filtered(replica->container, only_rsc, print_everything); print_remote = replica->remote != NULL && !replica->remote->fns->is_filtered(replica->remote, only_rsc, print_everything); if (pcmk_is_set(show_opts, pcmk_show_implicit_rscs) || (print_everything == FALSE && (print_ip || print_child || print_ctnr || print_remote))) { /* The text output messages used below require pe_print_implicit to * be set to do anything. */ uint32_t new_show_opts = show_opts | pcmk_show_implicit_rscs; PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s%s%s%s", (bundle_data->nreplicas > 1)? " set" : "", rsc->id, bundle_data->image, pcmk_is_set(rsc->flags, pcmk_rsc_unique)? " (unique)" : "", desc ? " (" : "", desc ? desc : "", desc ? ")" : "", get_unmanaged_str(rsc)); if (pcmk__list_of_multiple(bundle_data->replicas)) { out->list_item(out, NULL, "Replica[%d]", replica->offset); } out->begin_list(out, NULL, NULL, NULL); if (print_ip) { out->message(out, crm_map_element_name(replica->ip->xml), new_show_opts, replica->ip, only_node, only_rsc); } if (print_child) { out->message(out, crm_map_element_name(replica->child->xml), new_show_opts, replica->child, only_node, only_rsc); } if (print_ctnr) { out->message(out, crm_map_element_name(replica->container->xml), new_show_opts, replica->container, only_node, only_rsc); } if (print_remote) { out->message(out, crm_map_element_name(replica->remote->xml), new_show_opts, replica->remote, only_node, only_rsc); } out->end_list(out); } else if (print_everything == FALSE && !(print_ip || print_child || print_ctnr || print_remote)) { continue; } else { PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s%s%s%s", (bundle_data->nreplicas > 1)? " set" : "", rsc->id, bundle_data->image, pcmk_is_set(rsc->flags, pcmk_rsc_unique)? " (unique)" : "", desc ? " (" : "", desc ? desc : "", desc ? ")" : "", get_unmanaged_str(rsc)); pe__bundle_replica_output_text(out, replica, pcmk__current_node(replica->container), show_opts); } } PCMK__OUTPUT_LIST_FOOTER(out, rc); return rc; } /*! * \internal * \deprecated This function will be removed in a future release */ static void print_bundle_replica(pcmk__bundle_replica_t *replica, const char *pre_text, long options, void *print_data) { pcmk_node_t *node = NULL; pcmk_resource_t *rsc = replica->child; int offset = 0; char buffer[LINE_MAX]; if(rsc == NULL) { rsc = replica->container; } if (replica->remote) { offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(replica->remote)); } else { offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(replica->container)); } if (replica->ipaddr) { offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)", replica->ipaddr); } node = pcmk__current_node(replica->container); common_print(rsc, pre_text, buffer, node, options, print_data); } /*! * \internal * \deprecated This function will be removed in a future release */ void pe__print_bundle(pcmk_resource_t *rsc, const char *pre_text, long options, void *print_data) { pe__bundle_variant_data_t *bundle_data = NULL; char *child_text = NULL; CRM_CHECK(rsc != NULL, return); if (options & pe_print_xml) { bundle_print_xml(rsc, pre_text, options, print_data); return; } get_bundle_variant_data(bundle_data, rsc); if (pre_text == NULL) { pre_text = " "; } status_print("%sContainer bundle%s: %s [%s]%s%s\n", pre_text, ((bundle_data->nreplicas > 1)? " set" : ""), rsc->id, bundle_data->image, pcmk_is_set(rsc->flags, pcmk_rsc_unique)? " (unique)" : "", pcmk_is_set(rsc->flags, pcmk_rsc_managed)? "" : " (unmanaged)"); if (options & pe_print_html) { status_print("
    \n
      \n"); } for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pcmk__bundle_replica_t *replica = gIter->data; CRM_ASSERT(replica); if (options & pe_print_html) { status_print("
    • "); } if (pcmk_is_set(options, pe_print_implicit)) { child_text = crm_strdup_printf(" %s", pre_text); if (pcmk__list_of_multiple(bundle_data->replicas)) { status_print(" %sReplica[%d]\n", pre_text, replica->offset); } if (options & pe_print_html) { status_print("
      \n
        \n"); } print_rsc_in_list(replica->ip, child_text, options, print_data); print_rsc_in_list(replica->container, child_text, options, print_data); print_rsc_in_list(replica->remote, child_text, options, print_data); print_rsc_in_list(replica->child, child_text, options, print_data); if (options & pe_print_html) { status_print("
      \n"); } } else { child_text = crm_strdup_printf("%s ", pre_text); print_bundle_replica(replica, child_text, options, print_data); } free(child_text); if (options & pe_print_html) { status_print("
    • \n"); } } if (options & pe_print_html) { status_print("
    \n"); } } static void free_bundle_replica(pcmk__bundle_replica_t *replica) { if (replica == NULL) { return; } if (replica->node) { free(replica->node); replica->node = NULL; } if (replica->ip) { free_xml(replica->ip->xml); replica->ip->xml = NULL; replica->ip->fns->free(replica->ip); replica->ip = NULL; } if (replica->container) { free_xml(replica->container->xml); replica->container->xml = NULL; replica->container->fns->free(replica->container); replica->container = NULL; } if (replica->remote) { free_xml(replica->remote->xml); replica->remote->xml = NULL; replica->remote->fns->free(replica->remote); replica->remote = NULL; } free(replica->ipaddr); free(replica); } void pe__free_bundle(pcmk_resource_t *rsc) { pe__bundle_variant_data_t *bundle_data = NULL; CRM_CHECK(rsc != NULL, return); get_bundle_variant_data(bundle_data, rsc); pcmk__rsc_trace(rsc, "Freeing %s", rsc->id); free(bundle_data->prefix); free(bundle_data->image); free(bundle_data->control_port); free(bundle_data->host_network); free(bundle_data->host_netmask); free(bundle_data->ip_range_start); free(bundle_data->container_network); free(bundle_data->launcher_options); free(bundle_data->container_command); g_free(bundle_data->container_host_options); g_list_free_full(bundle_data->replicas, (GDestroyNotify) free_bundle_replica); g_list_free_full(bundle_data->mounts, (GDestroyNotify)mount_free); g_list_free_full(bundle_data->ports, (GDestroyNotify)port_free); g_list_free(rsc->children); if(bundle_data->child) { free_xml(bundle_data->child->xml); bundle_data->child->xml = NULL; bundle_data->child->fns->free(bundle_data->child); } common_free(rsc); } enum rsc_role_e pe__bundle_resource_state(const pcmk_resource_t *rsc, gboolean current) { enum rsc_role_e container_role = pcmk_role_unknown; return container_role; } /*! * \brief Get the number of configured replicas in a bundle * * \param[in] rsc Bundle resource * * \return Number of configured replicas, or 0 on error */ int pe_bundle_replicas(const pcmk_resource_t *rsc) { if ((rsc == NULL) || (rsc->variant != pcmk_rsc_variant_bundle)) { return 0; } else { pe__bundle_variant_data_t *bundle_data = NULL; get_bundle_variant_data(bundle_data, rsc); return bundle_data->nreplicas; } } void pe__count_bundle(pcmk_resource_t *rsc) { pe__bundle_variant_data_t *bundle_data = NULL; get_bundle_variant_data(bundle_data, rsc); for (GList *item = bundle_data->replicas; item != NULL; item = item->next) { pcmk__bundle_replica_t *replica = item->data; if (replica->ip) { replica->ip->fns->count(replica->ip); } if (replica->child) { replica->child->fns->count(replica->child); } if (replica->container) { replica->container->fns->count(replica->container); } if (replica->remote) { replica->remote->fns->count(replica->remote); } } } gboolean pe__bundle_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc, gboolean check_parent) { gboolean passes = FALSE; pe__bundle_variant_data_t *bundle_data = NULL; if (pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches)) { passes = TRUE; } else { get_bundle_variant_data(bundle_data, rsc); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pcmk__bundle_replica_t *replica = gIter->data; if (replica->ip != NULL && !replica->ip->fns->is_filtered(replica->ip, only_rsc, FALSE)) { passes = TRUE; break; } else if (replica->child != NULL && !replica->child->fns->is_filtered(replica->child, only_rsc, FALSE)) { passes = TRUE; break; } else if (!replica->container->fns->is_filtered(replica->container, only_rsc, FALSE)) { passes = TRUE; break; } else if (replica->remote != NULL && !replica->remote->fns->is_filtered(replica->remote, only_rsc, FALSE)) { passes = TRUE; break; } } } return !passes; } /*! * \internal * \brief Get a list of a bundle's containers * * \param[in] bundle Bundle resource * * \return Newly created list of \p bundle's containers * \note It is the caller's responsibility to free the result with * g_list_free(). */ GList * pe__bundle_containers(const pcmk_resource_t *bundle) { GList *containers = NULL; const pe__bundle_variant_data_t *data = NULL; get_bundle_variant_data(data, bundle); for (GList *iter = data->replicas; iter != NULL; iter = iter->next) { pcmk__bundle_replica_t *replica = iter->data; containers = g_list_append(containers, replica->container); } return containers; } // Bundle implementation of pcmk_rsc_methods_t:active_node() pcmk_node_t * pe__bundle_active_node(const pcmk_resource_t *rsc, unsigned int *count_all, unsigned int *count_clean) { pcmk_node_t *active = NULL; pcmk_node_t *node = NULL; pcmk_resource_t *container = NULL; GList *containers = NULL; GList *iter = NULL; GHashTable *nodes = NULL; const pe__bundle_variant_data_t *data = NULL; if (count_all != NULL) { *count_all = 0; } if (count_clean != NULL) { *count_clean = 0; } if (rsc == NULL) { return NULL; } /* For the purposes of this method, we only care about where the bundle's * containers are active, so build a list of active containers. */ get_bundle_variant_data(data, rsc); for (iter = data->replicas; iter != NULL; iter = iter->next) { pcmk__bundle_replica_t *replica = iter->data; if (replica->container->running_on != NULL) { containers = g_list_append(containers, replica->container); } } if (containers == NULL) { return NULL; } /* If the bundle has only a single active container, just use that * container's method. If live migration is ever supported for bundle * containers, this will allow us to prefer the migration source when there * is only one container and it is migrating. For now, this just lets us * avoid creating the nodes table. */ if (pcmk__list_of_1(containers)) { container = containers->data; node = container->fns->active_node(container, count_all, count_clean); g_list_free(containers); return node; } // Add all containers' active nodes to a hash table (for uniqueness) nodes = g_hash_table_new(NULL, NULL); for (iter = containers; iter != NULL; iter = iter->next) { container = iter->data; for (GList *node_iter = container->running_on; node_iter != NULL; node_iter = node_iter->next) { node = node_iter->data; // If insert returns true, we haven't counted this node yet if (g_hash_table_insert(nodes, (gpointer) node->details, (gpointer) node) && !pe__count_active_node(rsc, node, &active, count_all, count_clean)) { goto done; } } } done: g_list_free(containers); g_hash_table_destroy(nodes); return active; } /*! * \internal * \brief Get maximum bundle resource instances per node * * \param[in] rsc Bundle resource to check * * \return Maximum number of \p rsc instances that can be active on one node */ unsigned int pe__bundle_max_per_node(const pcmk_resource_t *rsc) { pe__bundle_variant_data_t *bundle_data = NULL; get_bundle_variant_data(bundle_data, rsc); CRM_ASSERT(bundle_data->nreplicas_per_host >= 0); return (unsigned int) bundle_data->nreplicas_per_host; } diff --git a/lib/pengine/pe_digest.c b/lib/pengine/pe_digest.c index 10d472484c..1d75f22a56 100644 --- a/lib/pengine/pe_digest.c +++ b/lib/pengine/pe_digest.c @@ -1,611 +1,611 @@ /* * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include "pe_status_private.h" extern bool pcmk__is_daemon; /*! * \internal * \brief Free an operation digest cache entry * * \param[in,out] ptr Pointer to cache entry to free * * \note The argument is a gpointer so this can be used as a hash table * free function. */ void pe__free_digests(gpointer ptr) { pcmk__op_digest_t *data = ptr; if (data != NULL) { free_xml(data->params_all); free_xml(data->params_secure); free_xml(data->params_restart); free(data->digest_all_calc); free(data->digest_restart_calc); free(data->digest_secure_calc); free(data); } } // Return true if XML attribute name is not substring of a given string static bool attr_not_in_string(xmlAttrPtr a, void *user_data) { bool filter = false; char *name = crm_strdup_printf(" %s ", (const char *) a->name); if (strstr((const char *) user_data, name) == NULL) { crm_trace("Filtering %s (not found in '%s')", (const char *) a->name, (const char *) user_data); filter = true; } free(name); return filter; } // Return true if XML attribute name is substring of a given string static bool attr_in_string(xmlAttrPtr a, void *user_data) { bool filter = false; char *name = crm_strdup_printf(" %s ", (const char *) a->name); if (strstr((const char *) user_data, name) != NULL) { crm_trace("Filtering %s (found in '%s')", (const char *) a->name, (const char *) user_data); filter = true; } free(name); return filter; } /*! * \internal * \brief Add digest of all parameters to a digest cache entry * * \param[out] data Digest cache entry to modify * \param[in,out] rsc Resource that action was for * \param[in] node Node action was performed on * \param[in] params Resource parameters evaluated for node * \param[in] task Name of action performed * \param[in,out] interval_ms Action's interval (will be reset if in overrides) * \param[in] xml_op Unused * \param[in] op_version CRM feature set to use for digest calculation * \param[in] overrides Key/value table to override resource parameters * \param[in,out] scheduler Scheduler data */ static void calculate_main_digest(pcmk__op_digest_t *data, pcmk_resource_t *rsc, const pcmk_node_t *node, GHashTable *params, const char *task, guint *interval_ms, const xmlNode *xml_op, const char *op_version, GHashTable *overrides, pcmk_scheduler_t *scheduler) { xmlNode *action_config = NULL; data->params_all = pcmk__xe_create(NULL, PCMK_XE_PARAMETERS); /* REMOTE_CONTAINER_HACK: Allow Pacemaker Remote nodes to run containers * that themselves are Pacemaker Remote nodes */ - (void) pe__add_bundle_remote_name(rsc, scheduler, data->params_all, + (void) pe__add_bundle_remote_name(rsc, data->params_all, PCMK_REMOTE_RA_ADDR); if (overrides != NULL) { // If interval was overridden, reset it const char *meta_name = CRM_META "_" PCMK_META_INTERVAL; const char *interval_s = g_hash_table_lookup(overrides, meta_name); if (interval_s != NULL) { long long value_ll; if ((pcmk__scan_ll(interval_s, &value_ll, 0LL) == pcmk_rc_ok) && (value_ll >= 0) && (value_ll <= G_MAXUINT)) { *interval_ms = (guint) value_ll; } } // Add overrides to list of all parameters g_hash_table_foreach(overrides, hash2field, data->params_all); } // Add provided instance parameters g_hash_table_foreach(params, hash2field, data->params_all); // Find action configuration XML in CIB action_config = pcmk__find_action_config(rsc, task, *interval_ms, true); /* Add action-specific resource instance attributes to the digest list. * * If this is a one-time action with action-specific instance attributes, * enforce a restart instead of reload-agent in case the main digest doesn't * match, even if the restart digest does. This ensures any changes of the * action-specific parameters get applied for this specific action, and * digests calculated for the resulting history will be correct. Default the * result to RSC_DIGEST_RESTART for the case where the main digest doesn't * match. */ params = pcmk__unpack_action_rsc_params(action_config, node->details->attrs, scheduler); if ((*interval_ms == 0) && (g_hash_table_size(params) > 0)) { data->rc = pcmk__digest_restart; } g_hash_table_foreach(params, hash2field, data->params_all); g_hash_table_destroy(params); // Add action meta-attributes params = pcmk__unpack_action_meta(rsc, node, task, *interval_ms, action_config); g_hash_table_foreach(params, hash2metafield, data->params_all); g_hash_table_destroy(params); pcmk__filter_op_for_digest(data->params_all); data->digest_all_calc = calculate_operation_digest(data->params_all, op_version); } // Return true if XML attribute name is a Pacemaker-defined fencing parameter static bool is_fence_param(xmlAttrPtr attr, void *user_data) { return pcmk_stonith_param((const char *) attr->name); } /*! * \internal * \brief Add secure digest to a digest cache entry * * \param[out] data Digest cache entry to modify * \param[in] rsc Resource that action was for * \param[in] params Resource parameters evaluated for node * \param[in] xml_op XML of operation in CIB status (if available) * \param[in] op_version CRM feature set to use for digest calculation * \param[in] overrides Key/value hash table to override resource parameters */ static void calculate_secure_digest(pcmk__op_digest_t *data, const pcmk_resource_t *rsc, GHashTable *params, const xmlNode *xml_op, const char *op_version, GHashTable *overrides) { const char *class = crm_element_value(rsc->xml, PCMK_XA_CLASS); const char *secure_list = NULL; bool old_version = (compare_version(op_version, "3.16.0") < 0); if (xml_op == NULL) { secure_list = " passwd password user "; } else { secure_list = crm_element_value(xml_op, PCMK__XA_OP_SECURE_PARAMS); } if (old_version) { data->params_secure = pcmk__xe_create(NULL, PCMK_XE_PARAMETERS); if (overrides != NULL) { g_hash_table_foreach(overrides, hash2field, data->params_secure); } g_hash_table_foreach(params, hash2field, data->params_secure); } else { // Start with a copy of all parameters data->params_secure = pcmk__xml_copy(NULL, data->params_all); } if (secure_list != NULL) { pcmk__xe_remove_matching_attrs(data->params_secure, attr_in_string, (void *) secure_list); } if (old_version && pcmk_is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_fence_params)) { /* For stonith resources, Pacemaker adds special parameters, * but these are not listed in fence agent meta-data, so with older * versions of DC, the controller will not hash them. That means we have * to filter them out before calculating our hash for comparison. */ pcmk__xe_remove_matching_attrs(data->params_secure, is_fence_param, NULL); } pcmk__filter_op_for_digest(data->params_secure); /* CRM_meta_timeout *should* be part of a digest for recurring operations. * However, with older versions of DC, the controller does not add timeout * to secure digests, because it only includes parameters declared by the * resource agent. * Remove any timeout that made it this far, to match. */ if (old_version) { pcmk__xe_remove_attr(data->params_secure, CRM_META "_" PCMK_META_TIMEOUT); } data->digest_secure_calc = calculate_operation_digest(data->params_secure, op_version); } /*! * \internal * \brief Add restart digest to a digest cache entry * * \param[out] data Digest cache entry to modify * \param[in] xml_op XML of operation in CIB status (if available) * \param[in] op_version CRM feature set to use for digest calculation * * \note This function doesn't need to handle overrides because it starts with * data->params_all, which already has overrides applied. */ static void calculate_restart_digest(pcmk__op_digest_t *data, const xmlNode *xml_op, const char *op_version) { const char *value = NULL; // We must have XML of resource operation history if (xml_op == NULL) { return; } // And the history must have a restart digest to compare against if (crm_element_value(xml_op, PCMK__XA_OP_RESTART_DIGEST) == NULL) { return; } // Start with a copy of all parameters data->params_restart = pcmk__xml_copy(NULL, data->params_all); // Then filter out reloadable parameters, if any value = crm_element_value(xml_op, PCMK__XA_OP_FORCE_RESTART); if (value != NULL) { pcmk__xe_remove_matching_attrs(data->params_restart, attr_not_in_string, (void *) value); } value = crm_element_value(xml_op, PCMK_XA_CRM_FEATURE_SET); data->digest_restart_calc = calculate_operation_digest(data->params_restart, value); } /*! * \internal * \brief Create a new digest cache entry with calculated digests * * \param[in,out] rsc Resource that action was for * \param[in] task Name of action performed * \param[in,out] interval_ms Action's interval (will be reset if in overrides) * \param[in] node Node action was performed on * \param[in] xml_op XML of operation in CIB status (if available) * \param[in] overrides Key/value table to override resource parameters * \param[in] calc_secure Whether to calculate secure digest * \param[in,out] scheduler Scheduler data * * \return Pointer to new digest cache entry (or NULL on memory error) * \note It is the caller's responsibility to free the result using * pe__free_digests(). */ pcmk__op_digest_t * pe__calculate_digests(pcmk_resource_t *rsc, const char *task, guint *interval_ms, const pcmk_node_t *node, const xmlNode *xml_op, GHashTable *overrides, bool calc_secure, pcmk_scheduler_t *scheduler) { pcmk__op_digest_t *data = calloc(1, sizeof(pcmk__op_digest_t)); const char *op_version = NULL; GHashTable *params = NULL; if (data == NULL) { pcmk__sched_err("Could not allocate memory for operation digest"); return NULL; } data->rc = pcmk__digest_match; if (xml_op != NULL) { op_version = crm_element_value(xml_op, PCMK_XA_CRM_FEATURE_SET); } if (op_version == NULL && scheduler != NULL && scheduler->input != NULL) { op_version = crm_element_value(scheduler->input, PCMK_XA_CRM_FEATURE_SET); } if (op_version == NULL) { op_version = CRM_FEATURE_SET; } params = pe_rsc_params(rsc, node, scheduler); calculate_main_digest(data, rsc, node, params, task, interval_ms, xml_op, op_version, overrides, scheduler); if (calc_secure) { calculate_secure_digest(data, rsc, params, xml_op, op_version, overrides); } calculate_restart_digest(data, xml_op, op_version); return data; } /*! * \internal * \brief Calculate action digests and store in node's digest cache * * \param[in,out] rsc Resource that action was for * \param[in] task Name of action performed * \param[in] interval_ms Action's interval * \param[in,out] node Node action was performed on * \param[in] xml_op XML of operation in CIB status (if available) * \param[in] calc_secure Whether to calculate secure digest * \param[in,out] scheduler Scheduler data * * \return Pointer to node's digest cache entry */ static pcmk__op_digest_t * rsc_action_digest(pcmk_resource_t *rsc, const char *task, guint interval_ms, pcmk_node_t *node, const xmlNode *xml_op, bool calc_secure, pcmk_scheduler_t *scheduler) { pcmk__op_digest_t *data = NULL; char *key = pcmk__op_key(rsc->id, task, interval_ms); data = g_hash_table_lookup(node->details->digest_cache, key); if (data == NULL) { data = pe__calculate_digests(rsc, task, &interval_ms, node, xml_op, NULL, calc_secure, scheduler); CRM_ASSERT(data != NULL); g_hash_table_insert(node->details->digest_cache, strdup(key), data); } free(key); return data; } /*! * \internal * \brief Calculate operation digests and compare against an XML history entry * * \param[in,out] rsc Resource to check * \param[in] xml_op Resource history XML * \param[in,out] node Node to use for digest calculation * \param[in,out] scheduler Scheduler data * * \return Pointer to node's digest cache entry, with comparison result set */ pcmk__op_digest_t * rsc_action_digest_cmp(pcmk_resource_t *rsc, const xmlNode *xml_op, pcmk_node_t *node, pcmk_scheduler_t *scheduler) { pcmk__op_digest_t *data = NULL; guint interval_ms = 0; const char *op_version; const char *task = crm_element_value(xml_op, PCMK_XA_OPERATION); const char *digest_all; const char *digest_restart; CRM_ASSERT(node != NULL); op_version = crm_element_value(xml_op, PCMK_XA_CRM_FEATURE_SET); digest_all = crm_element_value(xml_op, PCMK__XA_OP_DIGEST); digest_restart = crm_element_value(xml_op, PCMK__XA_OP_RESTART_DIGEST); crm_element_value_ms(xml_op, PCMK_META_INTERVAL, &interval_ms); data = rsc_action_digest(rsc, task, interval_ms, node, xml_op, pcmk_is_set(scheduler->flags, pcmk_sched_sanitized), scheduler); if (digest_restart && data->digest_restart_calc && strcmp(data->digest_restart_calc, digest_restart) != 0) { pcmk__rsc_info(rsc, "Parameters to %ums-interval %s action for %s on %s " "changed: hash was %s vs. now %s (restart:%s) %s", interval_ms, task, rsc->id, pcmk__node_name(node), pcmk__s(digest_restart, "missing"), data->digest_restart_calc, op_version, crm_element_value(xml_op, PCMK__XA_TRANSITION_MAGIC)); data->rc = pcmk__digest_restart; } else if (digest_all == NULL) { /* it is unknown what the previous op digest was */ data->rc = pcmk__digest_unknown; } else if (strcmp(digest_all, data->digest_all_calc) != 0) { /* Given a non-recurring operation with extra parameters configured, * in case that the main digest doesn't match, even if the restart * digest matches, enforce a restart rather than a reload-agent anyway. * So that it ensures any changes of the extra parameters get applied * for this specific operation, and the digests calculated for the * resulting PCMK__XE_LRM_RSC_OP will be correct. * Preserve the implied rc pcmk__digest_restart for the case that the * main digest doesn't match. */ if ((interval_ms == 0) && (data->rc == pcmk__digest_restart)) { pcmk__rsc_info(rsc, "Parameters containing extra ones to %ums-interval" " %s action for %s on %s " "changed: hash was %s vs. now %s (restart:%s) %s", interval_ms, task, rsc->id, pcmk__node_name(node), pcmk__s(digest_all, "missing"), data->digest_all_calc, op_version, crm_element_value(xml_op, PCMK__XA_TRANSITION_MAGIC)); } else { pcmk__rsc_info(rsc, "Parameters to %ums-interval %s action for %s on %s " "changed: hash was %s vs. now %s (%s:%s) %s", interval_ms, task, rsc->id, pcmk__node_name(node), pcmk__s(digest_all, "missing"), data->digest_all_calc, (interval_ms > 0)? "reschedule" : "reload", op_version, crm_element_value(xml_op, PCMK__XA_TRANSITION_MAGIC)); data->rc = pcmk__digest_mismatch; } } else { data->rc = pcmk__digest_match; } return data; } /*! * \internal * \brief Create an unfencing summary for use in special node attribute * * Create a string combining a fence device's resource ID, agent type, and * parameter digest (whether for all parameters or just non-private parameters). * This can be stored in a special node attribute, allowing us to detect changes * in either the agent type or parameters, to know whether unfencing must be * redone or can be safely skipped when the device's history is cleaned. * * \param[in] rsc_id Fence device resource ID * \param[in] agent_type Fence device agent * \param[in] param_digest Fence device parameter digest * * \return Newly allocated string with unfencing digest * \note The caller is responsible for freeing the result. */ static inline char * create_unfencing_summary(const char *rsc_id, const char *agent_type, const char *param_digest) { return crm_strdup_printf("%s:%s:%s", rsc_id, agent_type, param_digest); } /*! * \internal * \brief Check whether a node can skip unfencing * * Check whether a fence device's current definition matches a node's * stored summary of when it was last unfenced by the device. * * \param[in] rsc_id Fence device's resource ID * \param[in] agent Fence device's agent type * \param[in] digest_calc Fence device's current parameter digest * \param[in] node_summary Value of node's special unfencing node attribute * (a comma-separated list of unfencing summaries for * all devices that have unfenced this node) * * \return TRUE if digest matches, FALSE otherwise */ static bool unfencing_digest_matches(const char *rsc_id, const char *agent, const char *digest_calc, const char *node_summary) { bool matches = FALSE; if (rsc_id && agent && digest_calc && node_summary) { char *search_secure = create_unfencing_summary(rsc_id, agent, digest_calc); /* The digest was calculated including the device ID and agent, * so there is no risk of collision using strstr(). */ matches = (strstr(node_summary, search_secure) != NULL); crm_trace("Calculated unfencing digest '%s' %sfound in '%s'", search_secure, matches? "" : "not ", node_summary); free(search_secure); } return matches; } /* Magic string to use as action name for digest cache entries used for * unfencing checks. This is not a real action name (i.e. "on"), so * pcmk__check_action_config() won't confuse these entries with real actions. */ #define STONITH_DIGEST_TASK "stonith-on" /*! * \internal * \brief Calculate fence device digests and digest comparison result * * \param[in,out] rsc Fence device resource * \param[in] agent Fence device's agent type * \param[in,out] node Node with digest cache to use * \param[in,out] scheduler Scheduler data * * \return Node's digest cache entry */ pcmk__op_digest_t * pe__compare_fencing_digest(pcmk_resource_t *rsc, const char *agent, pcmk_node_t *node, pcmk_scheduler_t *scheduler) { const char *node_summary = NULL; // Calculate device's current parameter digests pcmk__op_digest_t *data = rsc_action_digest(rsc, STONITH_DIGEST_TASK, 0U, node, NULL, TRUE, scheduler); // Check whether node has special unfencing summary node attribute node_summary = pcmk__node_attr(node, CRM_ATTR_DIGESTS_ALL, NULL, pcmk__rsc_node_current); if (node_summary == NULL) { data->rc = pcmk__digest_unknown; return data; } // Check whether full parameter digest matches if (unfencing_digest_matches(rsc->id, agent, data->digest_all_calc, node_summary)) { data->rc = pcmk__digest_match; return data; } // Check whether secure parameter digest matches node_summary = pcmk__node_attr(node, CRM_ATTR_DIGESTS_SECURE, NULL, pcmk__rsc_node_current); if (unfencing_digest_matches(rsc->id, agent, data->digest_secure_calc, node_summary)) { data->rc = pcmk__digest_match; if (!pcmk__is_daemon && scheduler->priv != NULL) { pcmk__output_t *out = scheduler->priv; out->info(out, "Only 'private' parameters to %s " "for unfencing %s changed", rsc->id, pcmk__node_name(node)); } return data; } // Parameters don't match data->rc = pcmk__digest_mismatch; if (pcmk_is_set(scheduler->flags, pcmk_sched_sanitized) && (data->digest_secure_calc != NULL)) { if (scheduler->priv != NULL) { pcmk__output_t *out = scheduler->priv; char *digest = create_unfencing_summary(rsc->id, agent, data->digest_secure_calc); out->info(out, "Parameters to %s for unfencing " "%s changed, try '%s'", rsc->id, pcmk__node_name(node), digest); free(digest); } else if (!pcmk__is_daemon) { char *digest = create_unfencing_summary(rsc->id, agent, data->digest_secure_calc); printf("Parameters to %s for unfencing %s changed, try '%s'\n", rsc->id, pcmk__node_name(node), digest); free(digest); } } return data; }