diff --git a/include/crm/pengine/pe_types.h b/include/crm/pengine/pe_types.h index 90afaca6ad..16843660ba 100644 --- a/include/crm/pengine/pe_types.h +++ b/include/crm/pengine/pe_types.h @@ -1,447 +1,446 @@ /* * Copyright 2004-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #ifndef PCMK__CRM_PENGINE_PE_TYPES__H # define PCMK__CRM_PENGINE_PE_TYPES__H # include // bool # include // time_t # include // xmlNode # include // gboolean, guint, GList, GHashTable # include # include # include #ifdef __cplusplus extern "C" { #endif /*! * \file * \brief Data types for cluster status * \ingroup pengine */ typedef struct pe_node_s pe_node_t; typedef struct pe_action_s pe_action_t; typedef struct pe_resource_s pe_resource_t; typedef struct pe_working_set_s pe_working_set_t; typedef struct resource_object_functions_s { gboolean (*unpack) (pe_resource_t*, pe_working_set_t*); pe_resource_t *(*find_rsc) (pe_resource_t *parent, const char *search, const pe_node_t *node, int flags); /* parameter result must be free'd */ char *(*parameter) (pe_resource_t*, pe_node_t*, gboolean, const char*, pe_working_set_t*); //! \deprecated will be removed in a future release void (*print) (pe_resource_t*, const char*, long, void*); gboolean (*active) (pe_resource_t*, gboolean); enum rsc_role_e (*state) (const pe_resource_t*, gboolean); pe_node_t *(*location) (const pe_resource_t*, GList**, int); void (*free) (pe_resource_t*); void (*count) (pe_resource_t*); gboolean (*is_filtered) (const pe_resource_t*, GList *, gboolean); /*! * \brief Find a node (and optionally count all) where resource is active * * \param[in] rsc Resource to check * \param[out] count_all If not NULL, set this to count of active nodes * \param[out] count_clean If not NULL, set this to count of clean nodes * * \return A node where the resource is active, preferring the source node * if the resource is involved in a partial migration or a clean, * online node if the resource's "requires" is "quorum" or * "nothing", or NULL if the resource is inactive. */ pe_node_t *(*active_node)(const pe_resource_t *rsc, unsigned int *count_all, unsigned int *count_clean); /*! * \brief Get maximum resource instances per node * * \param[in] rsc Resource to check * * \return Maximum number of \p rsc instances that can be active on one node */ unsigned int (*max_per_node)(const pe_resource_t *rsc); } resource_object_functions_t; typedef struct resource_alloc_functions_s resource_alloc_functions_t; struct pe_working_set_s { xmlNode *input; crm_time_t *now; /* options extracted from the input */ char *dc_uuid; pe_node_t *dc_node; const char *stonith_action; const char *placement_strategy; unsigned long long flags; int stonith_timeout; enum pe_quorum_policy no_quorum_policy; GHashTable *config_hash; GHashTable *tickets; // Actions for which there can be only one (e.g. fence nodeX) GHashTable *singletons; GList *nodes; GList *resources; GList *placement_constraints; GList *ordering_constraints; GList *colocation_constraints; GList *ticket_constraints; GList *actions; xmlNode *failed; xmlNode *op_defaults; xmlNode *rsc_defaults; /* stats */ int num_synapse; int max_valid_nodes; //! Deprecated (will be removed in a future release) int order_id; int action_id; /* final output */ xmlNode *graph; GHashTable *template_rsc_sets; const char *localhost; GHashTable *tags; int blocked_resources; int disabled_resources; GList *param_check; // History entries that need to be checked GList *stop_needed; // Containers that need stop actions time_t recheck_by; // Hint to controller to re-run scheduler by this time int ninstances; // Total number of resource instances guint shutdown_lock;// How long (seconds) to lock resources to shutdown node int priority_fencing_delay; // Priority fencing delay void *priv; guint node_pending_timeout; // Node pending timeout }; struct pe_node_shared_s { const char *id; const char *uname; enum node_type type; /* @TODO convert these flags into a bitfield */ gboolean online; gboolean standby; gboolean standby_onfail; gboolean pending; gboolean unclean; gboolean unseen; gboolean shutdown; gboolean expected_up; gboolean is_dc; gboolean maintenance; gboolean rsc_discovery_enabled; gboolean remote_requires_reset; gboolean remote_was_fenced; gboolean remote_maintenance; /* what the remote-rsc is thinking */ gboolean unpacked; int num_resources; pe_resource_t *remote_rsc; GList *running_rsc; /* pe_resource_t* */ GList *allocated_rsc; /* pe_resource_t* */ GHashTable *attrs; /* char* => char* */ GHashTable *utilization; GHashTable *digest_cache; //!< cache of calculated resource digests int priority; // calculated based on the priority of resources running on the node pe_working_set_t *data_set; //!< Cluster that this node is part of }; struct pe_node_s { int weight; gboolean fixed; //!< \deprecated Will be removed in a future release int count; struct pe_node_shared_s *details; int rsc_discover_mode; }; -# define pe_rsc_stop_unexpected 0x00400000ULL # define pe_rsc_allow_migrate 0x00800000ULL # define pe_rsc_failure_ignored 0x01000000ULL # define pe_rsc_replica_container 0x02000000ULL # define pe_rsc_maintenance 0x04000000ULL # define pe_rsc_is_container 0x08000000ULL # define pe_rsc_needs_quorum 0x10000000ULL # define pe_rsc_needs_fencing 0x20000000ULL # define pe_rsc_needs_unfencing 0x40000000ULL /* *INDENT-OFF* */ enum pe_action_flags { pe_action_pseudo = 0x00001, pe_action_runnable = 0x00002, pe_action_optional = 0x00004, pe_action_print_always = 0x00008, pe_action_have_node_attrs = 0x00010, pe_action_implied_by_stonith = 0x00040, pe_action_migrate_runnable = 0x00080, pe_action_dumped = 0x00100, pe_action_processed = 0x00200, #if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1) pe_action_clear = 0x00400, //! \deprecated Unused #endif pe_action_dangle = 0x00800, /* This action requires one or more of its dependencies to be runnable. * We use this to clear the runnable flag before checking dependencies. */ pe_action_requires_any = 0x01000, pe_action_reschedule = 0x02000, pe_action_tracking = 0x04000, pe_action_dedup = 0x08000, //! Internal state tracking when creating graph pe_action_dc = 0x10000, //! Action may run on DC instead of target }; /* *INDENT-ON* */ struct pe_resource_s { char *id; char *clone_name; xmlNode *xml; xmlNode *orig_xml; xmlNode *ops_xml; pe_working_set_t *cluster; pe_resource_t *parent; enum pe_obj_types variant; void *variant_opaque; resource_object_functions_t *fns; resource_alloc_functions_t *cmds; enum rsc_recovery_type recovery_type; enum pe_restart restart_type; //!< \deprecated will be removed in future release int priority; int stickiness; int sort_index; int failure_timeout; int migration_threshold; guint remote_reconnect_ms; char *pending_task; unsigned long long flags; // @TODO merge these into flags gboolean is_remote_node; gboolean exclusive_discover; /* Pay special attention to whether you want to use rsc_cons_lhs and * rsc_cons directly, which include only colocations explicitly involving * this resource, or call libpacemaker's pcmk__with_this_colocations() and * pcmk__this_with_colocations() functions, which may return relevant * colocations involving the resource's ancestors as well. */ //!@{ //! This field should be treated as internal to Pacemaker GList *rsc_cons_lhs; // List of pcmk__colocation_t* GList *rsc_cons; // List of pcmk__colocation_t* GList *rsc_location; // List of pe__location_t* GList *actions; // List of pe_action_t* GList *rsc_tickets; // List of rsc_ticket* //!@} pe_node_t *allocated_to; pe_node_t *partial_migration_target; pe_node_t *partial_migration_source; GList *running_on; /* pe_node_t* */ GHashTable *known_on; /* pe_node_t* */ GHashTable *allowed_nodes; /* pe_node_t* */ enum rsc_role_e role; enum rsc_role_e next_role; GHashTable *meta; GHashTable *parameters; //! \deprecated Use pe_rsc_params() instead GHashTable *utilization; GList *children; /* pe_resource_t* */ GList *dangling_migrations; /* pe_node_t* */ pe_resource_t *container; GList *fillers; // @COMPAT These should be made const at next API compatibility break pe_node_t *pending_node; // Node on which pending_task is happening pe_node_t *lock_node; // Resource is shutdown-locked to this node time_t lock_time; // When shutdown lock started /* Resource parameters may have node-attribute-based rules, which means the * values can vary by node. This table is a cache of parameter name/value * tables for each node (as needed). Use pe_rsc_params() to get the table * for a given node. */ GHashTable *parameter_cache; // Key = node name, value = parameters table }; struct pe_action_s { int id; int priority; pe_resource_t *rsc; pe_node_t *node; xmlNode *op_entry; char *task; char *uuid; char *cancel_task; char *reason; enum pe_action_flags flags; enum rsc_start_requirement needs; enum action_fail_response on_fail; enum rsc_role_e fail_role; GHashTable *meta; GHashTable *extra; /* * These two varables are associated with the constraint logic * that involves first having one or more actions runnable before * then allowing this action to execute. * * These varables are used with features such as 'clone-min' which * requires at minimum X number of cloned instances to be running * before an order dependency can run. Another option that uses * this is 'require-all=false' in ordering constrants. This option * says "only require one instance of a resource to start before * allowing dependencies to start" -- basically, require-all=false is * the same as clone-min=1. */ /* current number of known runnable actions in the before list. */ int runnable_before; /* the number of "before" runnable actions required for this action * to be considered runnable */ int required_runnable_before; GList *actions_before; /* pe_action_wrapper_t* */ GList *actions_after; /* pe_action_wrapper_t* */ /* Some of the above fields could be moved to the details, * except for API backward compatibility. */ void *action_details; // varies by type of action }; typedef struct pe_ticket_s { char *id; gboolean granted; time_t last_granted; gboolean standby; GHashTable *state; } pe_ticket_t; typedef struct pe_tag_s { char *id; GList *refs; } pe_tag_t; //! Internal tracking for transition graph creation enum pe_link_state { pe_link_not_dumped, //! Internal tracking for transition graph creation pe_link_dumped, //! Internal tracking for transition graph creation pe_link_dup, //! \deprecated No longer used by Pacemaker }; enum pe_discover_e { pe_discover_always = 0, pe_discover_never, pe_discover_exclusive, }; /* *INDENT-OFF* */ enum pe_ordering { pe_order_none = 0x0, /* deleted */ pe_order_optional = 0x1, /* pure ordering, nothing implied */ pe_order_apply_first_non_migratable = 0x2, /* Only apply this constraint's ordering if first is not migratable. */ pe_order_implies_first = 0x10, /* If 'then' is required, ensure 'first' is too */ pe_order_implies_then = 0x20, /* If 'first' is required, ensure 'then' is too */ pe_order_promoted_implies_first = 0x40, /* If 'then' is required and then's rsc is promoted, ensure 'first' becomes required too */ /* first requires then to be both runnable and migrate runnable. */ pe_order_implies_first_migratable = 0x80, pe_order_runnable_left = 0x100, /* 'then' requires 'first' to be runnable */ pe_order_pseudo_left = 0x200, /* 'then' can only be pseudo if 'first' is runnable */ pe_order_implies_then_on_node = 0x400, /* If 'first' is required on 'nodeX', * ensure instances of 'then' on 'nodeX' are too. * Only really useful if 'then' is a clone and 'first' is not */ pe_order_probe = 0x800, /* If 'first->rsc' is * - running but about to stop, ignore the constraint * - otherwise, behave as runnable_left */ pe_order_restart = 0x1000, /* 'then' is runnable if 'first' is optional or runnable */ pe_order_stonith_stop = 0x2000, // #endif #ifdef __cplusplus } #endif #endif // PCMK__CRM_PENGINE_PE_TYPES__H diff --git a/include/crm/pengine/pe_types_compat.h b/include/crm/pengine/pe_types_compat.h index 3ecd1ad87d..1a042dcdf2 100644 --- a/include/crm/pengine/pe_types_compat.h +++ b/include/crm/pengine/pe_types_compat.h @@ -1,212 +1,215 @@ /* * Copyright 2004-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #ifndef PCMK__CRM_PENGINE_PE_TYPES_COMPAT__H # define PCMK__CRM_PENGINE_PE_TYPES_COMPAT__H #include #ifdef __cplusplus extern "C" { #endif /** * \file * \brief Deprecated Pacemaker scheduler API * \ingroup pengine * \deprecated Do not include this header directly. The scheduler APIs in this * header, and the header itself, will be removed in a future * release. */ //! \deprecated Use pcmk_rsc_removed instead #define pe_rsc_orphan pcmk_rsc_removed //! \deprecated Use pcmk_rsc_managed instead #define pe_rsc_managed pcmk_rsc_managed //! \deprecated Use pcmk_rsc_blocked instead #define pe_rsc_block pcmk_rsc_blocked //! \deprecated Use pcmk_rsc_removed_filler instead #define pe_rsc_orphan_container_filler pcmk_rsc_removed_filler //! \deprecated Use pcmk_rsc_notify instead #define pe_rsc_notify pcmk_rsc_notify //! \deprecated Use pcmk_rsc_unique instead #define pe_rsc_unique pcmk_rsc_unique //! \deprecated Use pcmk_rsc_fence_device instead #define pe_rsc_fence_device pcmk_rsc_fence_device //! \deprecated Use pcmk_rsc_promotable instead #define pe_rsc_promotable pcmk_rsc_promotable //! \deprecated Use pcmk_rsc_unassigned instead #define pe_rsc_provisional pcmk_rsc_unassigned //! \deprecated Use pcmk_rsc_assigning instead #define pe_rsc_allocating pcmk_rsc_assigning //! \deprecated Use pcmk_rsc_updating_nodes instead #define pe_rsc_merging pcmk_rsc_updating_nodes //! \deprecated Use pcmk_rsc_restarting instead #define pe_rsc_restarting pcmk_rsc_restarting //! \deprecated Use pcmk_rsc_stop_if_failed instead #define pe_rsc_stop pcmk_rsc_stop_if_failed //! \deprecated Use pcmk_rsc_reload instead #define pe_rsc_reload pcmk_rsc_reload //! \deprecated Use pcmk_rsc_remote_nesting_allowed instead #define pe_rsc_allow_remote_remotes pcmk_rsc_remote_nesting_allowed //! \deprecated Use pcmk_rsc_critical instead #define pe_rsc_critical pcmk_rsc_critical //! \deprecated Use pcmk_rsc_failed instead #define pe_rsc_failed pcmk_rsc_failed //! \deprecated Use pcmk_rsc_detect_loop instead #define pe_rsc_detect_loop pcmk_rsc_detect_loop //! \deprecated Do not use #define pe_rsc_runnable pcmk_rsc_runnable //! \deprecated Use pcmk_rsc_start_pending instead #define pe_rsc_start_pending pcmk_rsc_start_pending //!< \deprecated Do not use #define pe_rsc_starting pcmk_rsc_starting //!< \deprecated Do not use #define pe_rsc_stopping pcmk_rsc_stopping +//! \deprecated Use pcmk_rsc_stop_unexpected instead +#define pe_rsc_stop_unexpected pcmk_rsc_stop_unexpected + //! \deprecated Use pcmk_sched_quorate instead #define pe_flag_have_quorum pcmk_sched_quorate //! \deprecated Use pcmk_sched_symmetric_cluster instead #define pe_flag_symmetric_cluster pcmk_sched_symmetric_cluster //! \deprecated Use pcmk_sched_in_maintenance instead #define pe_flag_maintenance_mode pcmk_sched_in_maintenance //! \deprecated Use pcmk_sched_fencing_enabled instead #define pe_flag_stonith_enabled pcmk_sched_fencing_enabled //! \deprecated Use pcmk_sched_have_fencing instead #define pe_flag_have_stonith_resource pcmk_sched_have_fencing //! \deprecated Use pcmk_sched_enable_unfencing instead #define pe_flag_enable_unfencing pcmk_sched_enable_unfencing //! \deprecated Use pcmk_sched_concurrent_fencing instead #define pe_flag_concurrent_fencing pcmk_sched_concurrent_fencing //! \deprecated Use pcmk_sched_stop_removed_resources instead #define pe_flag_stop_rsc_orphans pcmk_sched_stop_removed_resources //! \deprecated Use pcmk_sched_cancel_removed_actions instead #define pe_flag_stop_action_orphans pcmk_sched_cancel_removed_actions //! \deprecated Use pcmk_sched_stop_all instead #define pe_flag_stop_everything pcmk_sched_stop_all //! \deprecated Use pcmk_sched_start_failure_fatal instead #define pe_flag_start_failure_fatal pcmk_sched_start_failure_fatal //! \deprecated Do not use #define pe_flag_remove_after_stop pcmk_sched_remove_after_stop //! \deprecated Use pcmk_sched_startup_fencing instead #define pe_flag_startup_fencing pcmk_sched_startup_fencing //! \deprecated Use pcmk_sched_shutdown_lock instead #define pe_flag_shutdown_lock pcmk_sched_shutdown_lock //! \deprecated Use pcmk_sched_probe_resources instead #define pe_flag_startup_probes pcmk_sched_probe_resources //! \deprecated Use pcmk_sched_have_status instead #define pe_flag_have_status pcmk_sched_have_status //! \deprecated Use pcmk_sched_have_remote_nodes instead #define pe_flag_have_remote_nodes pcmk_sched_have_remote_nodes //! \deprecated Use pcmk_sched_location_only instead #define pe_flag_quick_location pcmk_sched_location_only //! \deprecated Use pcmk_sched_sanitized instead #define pe_flag_sanitized pcmk_sched_sanitized //! \deprecated Do not use #define pe_flag_stdout (1ULL << 22) //! \deprecated Use pcmk_sched_no_counts instead #define pe_flag_no_counts pcmk_sched_no_counts //! \deprecated Use pcmk_sched_no_compat instead #define pe_flag_no_compat pcmk_sched_no_compat //! \deprecated Use pcmk_sched_output_scores instead #define pe_flag_show_scores pcmk_sched_output_scores //! \deprecated Use pcmk_sched_show_utilization instead #define pe_flag_show_utilization pcmk_sched_show_utilization //! \deprecated Use pcmk_sched_validate_only instead #define pe_flag_check_config pcmk_sched_validate_only //!@{ //! \deprecated Do not use (unused by Pacemaker) enum pe_graph_flags { pe_graph_none = 0x00000, pe_graph_updated_first = 0x00001, pe_graph_updated_then = 0x00002, pe_graph_disable = 0x00004, }; //!@} //!@{ //! \deprecated Do not use enum pe_check_parameters { pe_check_last_failure, pe_check_active, }; //!@} //!< \deprecated Use pe_action_t instead typedef struct pe_action_s action_t; //!< \deprecated Use pe_action_wrapper_t instead typedef struct pe_action_wrapper_s action_wrapper_t; //!< \deprecated Use pe_node_t instead typedef struct pe_node_s node_t; //!< \deprecated Use enum pe_quorum_policy instead typedef enum pe_quorum_policy no_quorum_policy_t; //!< \deprecated use pe_resource_t instead typedef struct pe_resource_s resource_t; //!< \deprecated Use pe_tag_t instead typedef struct pe_tag_s tag_t; //!< \deprecated Use pe_ticket_t instead typedef struct pe_ticket_s ticket_t; #ifdef __cplusplus } #endif #endif // PCMK__CRM_PENGINE_PE_TYPES_COMPAT__H diff --git a/lib/pacemaker/pcmk_output.c b/lib/pacemaker/pcmk_output.c index d564f15709..0b054313a1 100644 --- a/lib/pacemaker/pcmk_output.c +++ b/lib/pacemaker/pcmk_output.c @@ -1,2397 +1,2397 @@ /* * Copyright 2019-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include static char * colocations_header(pe_resource_t *rsc, pcmk__colocation_t *cons, bool dependents) { char *retval = NULL; if (cons->primary_role > pcmk_role_started) { retval = crm_strdup_printf("%s (score=%s, %s role=%s, id=%s)", rsc->id, pcmk_readable_score(cons->score), (dependents? "needs" : "with"), role2text(cons->primary_role), cons->id); } else { retval = crm_strdup_printf("%s (score=%s, id=%s)", rsc->id, pcmk_readable_score(cons->score), cons->id); } return retval; } static void colocations_xml_node(pcmk__output_t *out, pe_resource_t *rsc, pcmk__colocation_t *cons) { xmlNodePtr node = NULL; node = pcmk__output_create_xml_node(out, XML_CONS_TAG_RSC_DEPEND, "id", cons->id, "rsc", cons->dependent->id, "with-rsc", cons->primary->id, "score", pcmk_readable_score(cons->score), NULL); if (cons->node_attribute) { xmlSetProp(node, (pcmkXmlStr) "node-attribute", (pcmkXmlStr) cons->node_attribute); } if (cons->dependent_role != pcmk_role_unknown) { xmlSetProp(node, (pcmkXmlStr) "rsc-role", (pcmkXmlStr) role2text(cons->dependent_role)); } if (cons->primary_role != pcmk_role_unknown) { xmlSetProp(node, (pcmkXmlStr) "with-rsc-role", (pcmkXmlStr) role2text(cons->primary_role)); } } static int do_locations_list_xml(pcmk__output_t *out, pe_resource_t *rsc, bool add_header) { GList *lpc = NULL; GList *list = rsc->rsc_location; int rc = pcmk_rc_no_output; for (lpc = list; lpc != NULL; lpc = lpc->next) { pe__location_t *cons = lpc->data; GList *lpc2 = NULL; for (lpc2 = cons->node_list_rh; lpc2 != NULL; lpc2 = lpc2->next) { pe_node_t *node = (pe_node_t *) lpc2->data; if (add_header) { PCMK__OUTPUT_LIST_HEADER(out, false, rc, "locations"); } pcmk__output_create_xml_node(out, XML_CONS_TAG_RSC_LOCATION, "node", node->details->uname, "rsc", rsc->id, "id", cons->id, "score", pcmk_readable_score(node->weight), NULL); } } if (add_header) { PCMK__OUTPUT_LIST_FOOTER(out, rc); } return rc; } PCMK__OUTPUT_ARGS("rsc-action-item", "const char *", "pe_resource_t *", "pe_node_t *", "pe_node_t *", "pe_action_t *", "pe_action_t *") static int rsc_action_item(pcmk__output_t *out, va_list args) { const char *change = va_arg(args, const char *); pe_resource_t *rsc = va_arg(args, pe_resource_t *); pe_node_t *origin = va_arg(args, pe_node_t *); pe_node_t *destination = va_arg(args, pe_node_t *); pe_action_t *action = va_arg(args, pe_action_t *); pe_action_t *source = va_arg(args, pe_action_t *); int len = 0; char *reason = NULL; char *details = NULL; bool same_host = false; bool same_role = false; bool need_role = false; static int rsc_width = 5; static int detail_width = 5; CRM_ASSERT(action); CRM_ASSERT(destination != NULL || origin != NULL); if (source == NULL) { source = action; } len = strlen(rsc->id); if (len > rsc_width) { rsc_width = len + 2; } if ((rsc->role > pcmk_role_started) || (rsc->next_role > pcmk_role_unpromoted)) { need_role = true; } if (pe__same_node(origin, destination)) { same_host = true; } if (rsc->role == rsc->next_role) { same_role = true; } if (need_role && (origin == NULL)) { /* Starting and promoting a promotable clone instance */ details = crm_strdup_printf("%s -> %s %s", role2text(rsc->role), role2text(rsc->next_role), pe__node_name(destination)); } else if (origin == NULL) { /* Starting a resource */ details = crm_strdup_printf("%s", pe__node_name(destination)); } else if (need_role && (destination == NULL)) { /* Stopping a promotable clone instance */ details = crm_strdup_printf("%s %s", role2text(rsc->role), pe__node_name(origin)); } else if (destination == NULL) { /* Stopping a resource */ details = crm_strdup_printf("%s", pe__node_name(origin)); } else if (need_role && same_role && same_host) { /* Recovering, restarting or re-promoting a promotable clone instance */ details = crm_strdup_printf("%s %s", role2text(rsc->role), pe__node_name(origin)); } else if (same_role && same_host) { /* Recovering or Restarting a normal resource */ details = crm_strdup_printf("%s", pe__node_name(origin)); } else if (need_role && same_role) { /* Moving a promotable clone instance */ details = crm_strdup_printf("%s -> %s %s", pe__node_name(origin), pe__node_name(destination), role2text(rsc->role)); } else if (same_role) { /* Moving a normal resource */ details = crm_strdup_printf("%s -> %s", pe__node_name(origin), pe__node_name(destination)); } else if (same_host) { /* Promoting or demoting a promotable clone instance */ details = crm_strdup_printf("%s -> %s %s", role2text(rsc->role), role2text(rsc->next_role), pe__node_name(origin)); } else { /* Moving and promoting/demoting */ details = crm_strdup_printf("%s %s -> %s %s", role2text(rsc->role), pe__node_name(origin), role2text(rsc->next_role), pe__node_name(destination)); } len = strlen(details); if (len > detail_width) { detail_width = len; } if ((source->reason != NULL) && !pcmk_is_set(action->flags, pe_action_runnable)) { reason = crm_strdup_printf("due to %s (blocked)", source->reason); } else if (source->reason) { reason = crm_strdup_printf("due to %s", source->reason); } else if (!pcmk_is_set(action->flags, pe_action_runnable)) { reason = strdup("blocked"); } out->list_item(out, NULL, "%-8s %-*s ( %*s )%s%s", change, rsc_width, rsc->id, detail_width, details, ((reason == NULL)? "" : " "), pcmk__s(reason, "")); free(details); free(reason); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("rsc-action-item", "const char *", "pe_resource_t *", "pe_node_t *", "pe_node_t *", "pe_action_t *", "pe_action_t *") static int rsc_action_item_xml(pcmk__output_t *out, va_list args) { const char *change = va_arg(args, const char *); pe_resource_t *rsc = va_arg(args, pe_resource_t *); pe_node_t *origin = va_arg(args, pe_node_t *); pe_node_t *destination = va_arg(args, pe_node_t *); pe_action_t *action = va_arg(args, pe_action_t *); pe_action_t *source = va_arg(args, pe_action_t *); char *change_str = NULL; bool same_host = false; bool same_role = false; bool need_role = false; xmlNode *xml = NULL; CRM_ASSERT(action); CRM_ASSERT(destination != NULL || origin != NULL); if (source == NULL) { source = action; } if ((rsc->role > pcmk_role_started) || (rsc->next_role > pcmk_role_unpromoted)) { need_role = true; } if (pe__same_node(origin, destination)) { same_host = true; } if (rsc->role == rsc->next_role) { same_role = true; } change_str = g_ascii_strdown(change, -1); xml = pcmk__output_create_xml_node(out, "rsc_action", "action", change_str, "resource", rsc->id, NULL); g_free(change_str); if (need_role && (origin == NULL)) { /* Starting and promoting a promotable clone instance */ pcmk__xe_set_props(xml, "role", role2text(rsc->role), "next-role", role2text(rsc->next_role), "dest", destination->details->uname, NULL); } else if (origin == NULL) { /* Starting a resource */ crm_xml_add(xml, "node", destination->details->uname); } else if (need_role && (destination == NULL)) { /* Stopping a promotable clone instance */ pcmk__xe_set_props(xml, "role", role2text(rsc->role), "node", origin->details->uname, NULL); } else if (destination == NULL) { /* Stopping a resource */ crm_xml_add(xml, "node", origin->details->uname); } else if (need_role && same_role && same_host) { /* Recovering, restarting or re-promoting a promotable clone instance */ pcmk__xe_set_props(xml, "role", role2text(rsc->role), "source", origin->details->uname, NULL); } else if (same_role && same_host) { /* Recovering or Restarting a normal resource */ crm_xml_add(xml, "source", origin->details->uname); } else if (need_role && same_role) { /* Moving a promotable clone instance */ pcmk__xe_set_props(xml, "source", origin->details->uname, "dest", destination->details->uname, "role", role2text(rsc->role), NULL); } else if (same_role) { /* Moving a normal resource */ pcmk__xe_set_props(xml, "source", origin->details->uname, "dest", destination->details->uname, NULL); } else if (same_host) { /* Promoting or demoting a promotable clone instance */ pcmk__xe_set_props(xml, "role", role2text(rsc->role), "next-role", role2text(rsc->next_role), "source", origin->details->uname, NULL); } else { /* Moving and promoting/demoting */ pcmk__xe_set_props(xml, "role", role2text(rsc->role), "source", origin->details->uname, "next-role", role2text(rsc->next_role), "dest", destination->details->uname, NULL); } if (source->reason && !pcmk_is_set(action->flags, pe_action_runnable)) { pcmk__xe_set_props(xml, "reason", source->reason, "blocked", "true", NULL); } else if (source->reason != NULL) { crm_xml_add(xml, "reason", source->reason); } else if (!pcmk_is_set(action->flags, pe_action_runnable)) { pcmk__xe_set_bool_attr(xml, "blocked", true); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("rsc-is-colocated-with-list", "pe_resource_t *", "bool") static int rsc_is_colocated_with_list(pcmk__output_t *out, va_list args) { pe_resource_t *rsc = va_arg(args, pe_resource_t *); bool recursive = va_arg(args, int); int rc = pcmk_rc_no_output; if (pcmk_is_set(rsc->flags, pcmk_rsc_detect_loop)) { return rc; } /* We're listing constraints explicitly involving rsc, so use rsc->rsc_cons * directly rather than rsc->cmds->this_with_colocations(). */ pe__set_resource_flags(rsc, pcmk_rsc_detect_loop); for (GList *lpc = rsc->rsc_cons; lpc != NULL; lpc = lpc->next) { pcmk__colocation_t *cons = (pcmk__colocation_t *) lpc->data; char *hdr = NULL; PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Resources %s is colocated with", rsc->id); if (pcmk_is_set(cons->primary->flags, pcmk_rsc_detect_loop)) { out->list_item(out, NULL, "%s (id=%s - loop)", cons->primary->id, cons->id); continue; } hdr = colocations_header(cons->primary, cons, false); out->list_item(out, NULL, "%s", hdr); free(hdr); // Empty list header for indentation of information about this resource out->begin_list(out, NULL, NULL, NULL); out->message(out, "locations-list", cons->primary); if (recursive) { out->message(out, "rsc-is-colocated-with-list", cons->primary, recursive); } out->end_list(out); } PCMK__OUTPUT_LIST_FOOTER(out, rc); return rc; } PCMK__OUTPUT_ARGS("rsc-is-colocated-with-list", "pe_resource_t *", "bool") static int rsc_is_colocated_with_list_xml(pcmk__output_t *out, va_list args) { pe_resource_t *rsc = va_arg(args, pe_resource_t *); bool recursive = va_arg(args, int); int rc = pcmk_rc_no_output; if (pcmk_is_set(rsc->flags, pcmk_rsc_detect_loop)) { return rc; } /* We're listing constraints explicitly involving rsc, so use rsc->rsc_cons * directly rather than rsc->cmds->this_with_colocations(). */ pe__set_resource_flags(rsc, pcmk_rsc_detect_loop); for (GList *lpc = rsc->rsc_cons; lpc != NULL; lpc = lpc->next) { pcmk__colocation_t *cons = (pcmk__colocation_t *) lpc->data; if (pcmk_is_set(cons->primary->flags, pcmk_rsc_detect_loop)) { colocations_xml_node(out, cons->primary, cons); continue; } colocations_xml_node(out, cons->primary, cons); do_locations_list_xml(out, cons->primary, false); if (recursive) { out->message(out, "rsc-is-colocated-with-list", cons->primary, recursive); } } return rc; } PCMK__OUTPUT_ARGS("rscs-colocated-with-list", "pe_resource_t *", "bool") static int rscs_colocated_with_list(pcmk__output_t *out, va_list args) { pe_resource_t *rsc = va_arg(args, pe_resource_t *); bool recursive = va_arg(args, int); int rc = pcmk_rc_no_output; if (pcmk_is_set(rsc->flags, pcmk_rsc_detect_loop)) { return rc; } /* We're listing constraints explicitly involving rsc, so use * rsc->rsc_cons_lhs directly rather than * rsc->cmds->with_this_colocations(). */ pe__set_resource_flags(rsc, pcmk_rsc_detect_loop); for (GList *lpc = rsc->rsc_cons_lhs; lpc != NULL; lpc = lpc->next) { pcmk__colocation_t *cons = (pcmk__colocation_t *) lpc->data; char *hdr = NULL; PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Resources colocated with %s", rsc->id); if (pcmk_is_set(cons->dependent->flags, pcmk_rsc_detect_loop)) { out->list_item(out, NULL, "%s (id=%s - loop)", cons->dependent->id, cons->id); continue; } hdr = colocations_header(cons->dependent, cons, true); out->list_item(out, NULL, "%s", hdr); free(hdr); // Empty list header for indentation of information about this resource out->begin_list(out, NULL, NULL, NULL); out->message(out, "locations-list", cons->dependent); if (recursive) { out->message(out, "rscs-colocated-with-list", cons->dependent, recursive); } out->end_list(out); } PCMK__OUTPUT_LIST_FOOTER(out, rc); return rc; } PCMK__OUTPUT_ARGS("rscs-colocated-with-list", "pe_resource_t *", "bool") static int rscs_colocated_with_list_xml(pcmk__output_t *out, va_list args) { pe_resource_t *rsc = va_arg(args, pe_resource_t *); bool recursive = va_arg(args, int); int rc = pcmk_rc_no_output; if (pcmk_is_set(rsc->flags, pcmk_rsc_detect_loop)) { return rc; } /* We're listing constraints explicitly involving rsc, so use * rsc->rsc_cons_lhs directly rather than * rsc->cmds->with_this_colocations(). */ pe__set_resource_flags(rsc, pcmk_rsc_detect_loop); for (GList *lpc = rsc->rsc_cons_lhs; lpc != NULL; lpc = lpc->next) { pcmk__colocation_t *cons = (pcmk__colocation_t *) lpc->data; if (pcmk_is_set(cons->dependent->flags, pcmk_rsc_detect_loop)) { colocations_xml_node(out, cons->dependent, cons); continue; } colocations_xml_node(out, cons->dependent, cons); do_locations_list_xml(out, cons->dependent, false); if (recursive) { out->message(out, "rscs-colocated-with-list", cons->dependent, recursive); } } return rc; } PCMK__OUTPUT_ARGS("locations-list", "pe_resource_t *") static int locations_list(pcmk__output_t *out, va_list args) { pe_resource_t *rsc = va_arg(args, pe_resource_t *); GList *lpc = NULL; GList *list = rsc->rsc_location; int rc = pcmk_rc_no_output; for (lpc = list; lpc != NULL; lpc = lpc->next) { pe__location_t *cons = lpc->data; GList *lpc2 = NULL; for (lpc2 = cons->node_list_rh; lpc2 != NULL; lpc2 = lpc2->next) { pe_node_t *node = (pe_node_t *) lpc2->data; PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Locations"); out->list_item(out, NULL, "Node %s (score=%s, id=%s, rsc=%s)", pe__node_name(node), pcmk_readable_score(node->weight), cons->id, rsc->id); } } PCMK__OUTPUT_LIST_FOOTER(out, rc); return rc; } PCMK__OUTPUT_ARGS("locations-list", "pe_resource_t *") static int locations_list_xml(pcmk__output_t *out, va_list args) { pe_resource_t *rsc = va_arg(args, pe_resource_t *); return do_locations_list_xml(out, rsc, true); } PCMK__OUTPUT_ARGS("locations-and-colocations", "pe_resource_t *", "bool", "bool") static int locations_and_colocations(pcmk__output_t *out, va_list args) { pe_resource_t *rsc = va_arg(args, pe_resource_t *); bool recursive = va_arg(args, int); bool force = va_arg(args, int); pcmk__unpack_constraints(rsc->cluster); // Constraints apply to group/clone, not member/instance if (!force) { rsc = uber_parent(rsc); } out->message(out, "locations-list", rsc); pe__clear_resource_flags_on_all(rsc->cluster, pcmk_rsc_detect_loop); out->message(out, "rscs-colocated-with-list", rsc, recursive); pe__clear_resource_flags_on_all(rsc->cluster, pcmk_rsc_detect_loop); out->message(out, "rsc-is-colocated-with-list", rsc, recursive); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("locations-and-colocations", "pe_resource_t *", "bool", "bool") static int locations_and_colocations_xml(pcmk__output_t *out, va_list args) { pe_resource_t *rsc = va_arg(args, pe_resource_t *); bool recursive = va_arg(args, int); bool force = va_arg(args, int); pcmk__unpack_constraints(rsc->cluster); // Constraints apply to group/clone, not member/instance if (!force) { rsc = uber_parent(rsc); } pcmk__output_xml_create_parent(out, "constraints", NULL); do_locations_list_xml(out, rsc, false); pe__clear_resource_flags_on_all(rsc->cluster, pcmk_rsc_detect_loop); out->message(out, "rscs-colocated-with-list", rsc, recursive); pe__clear_resource_flags_on_all(rsc->cluster, pcmk_rsc_detect_loop); out->message(out, "rsc-is-colocated-with-list", rsc, recursive); pcmk__output_xml_pop_parent(out); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("health", "const char *", "const char *", "const char *", "const char *") static int health(pcmk__output_t *out, va_list args) { const char *sys_from G_GNUC_UNUSED = va_arg(args, const char *); const char *host_from = va_arg(args, const char *); const char *fsa_state = va_arg(args, const char *); const char *result = va_arg(args, const char *); return out->info(out, "Controller on %s in state %s: %s", pcmk__s(host_from, "unknown node"), pcmk__s(fsa_state, "unknown"), pcmk__s(result, "unknown result")); } PCMK__OUTPUT_ARGS("health", "const char *", "const char *", "const char *", "const char *") static int health_text(pcmk__output_t *out, va_list args) { if (!out->is_quiet(out)) { return health(out, args); } else { const char *sys_from G_GNUC_UNUSED = va_arg(args, const char *); const char *host_from G_GNUC_UNUSED = va_arg(args, const char *); const char *fsa_state = va_arg(args, const char *); const char *result G_GNUC_UNUSED = va_arg(args, const char *); if (fsa_state != NULL) { pcmk__formatted_printf(out, "%s\n", fsa_state); return pcmk_rc_ok; } } return pcmk_rc_no_output; } PCMK__OUTPUT_ARGS("health", "const char *", "const char *", "const char *", "const char *") static int health_xml(pcmk__output_t *out, va_list args) { const char *sys_from = va_arg(args, const char *); const char *host_from = va_arg(args, const char *); const char *fsa_state = va_arg(args, const char *); const char *result = va_arg(args, const char *); pcmk__output_create_xml_node(out, pcmk__s(sys_from, ""), "node_name", pcmk__s(host_from, ""), "state", pcmk__s(fsa_state, ""), "result", pcmk__s(result, ""), NULL); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("pacemakerd-health", "const char *", "enum pcmk_pacemakerd_state", "const char *", "time_t") static int pacemakerd_health(pcmk__output_t *out, va_list args) { const char *sys_from = va_arg(args, const char *); enum pcmk_pacemakerd_state state = (enum pcmk_pacemakerd_state) va_arg(args, int); const char *state_s = va_arg(args, const char *); time_t last_updated = va_arg(args, time_t); char *last_updated_s = NULL; int rc = pcmk_rc_ok; if (sys_from == NULL) { if (state == pcmk_pacemakerd_state_remote) { sys_from = "pacemaker-remoted"; } else { sys_from = CRM_SYSTEM_MCP; } } if (state_s == NULL) { state_s = pcmk__pcmkd_state_enum2friendly(state); } if (last_updated != 0) { last_updated_s = pcmk__epoch2str(&last_updated, crm_time_log_date |crm_time_log_timeofday |crm_time_log_with_timezone); } rc = out->info(out, "Status of %s: '%s' (last updated %s)", sys_from, state_s, pcmk__s(last_updated_s, "at unknown time")); free(last_updated_s); return rc; } PCMK__OUTPUT_ARGS("pacemakerd-health", "const char *", "enum pcmk_pacemakerd_state", "const char *", "time_t") static int pacemakerd_health_html(pcmk__output_t *out, va_list args) { const char *sys_from = va_arg(args, const char *); enum pcmk_pacemakerd_state state = (enum pcmk_pacemakerd_state) va_arg(args, int); const char *state_s = va_arg(args, const char *); time_t last_updated = va_arg(args, time_t); char *last_updated_s = NULL; char *msg = NULL; if (sys_from == NULL) { if (state == pcmk_pacemakerd_state_remote) { sys_from = "pacemaker-remoted"; } else { sys_from = CRM_SYSTEM_MCP; } } if (state_s == NULL) { state_s = pcmk__pcmkd_state_enum2friendly(state); } if (last_updated != 0) { last_updated_s = pcmk__epoch2str(&last_updated, crm_time_log_date |crm_time_log_timeofday |crm_time_log_with_timezone); } msg = crm_strdup_printf("Status of %s: '%s' (last updated %s)", sys_from, state_s, pcmk__s(last_updated_s, "at unknown time")); pcmk__output_create_html_node(out, "li", NULL, NULL, msg); free(msg); free(last_updated_s); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("pacemakerd-health", "const char *", "enum pcmk_pacemakerd_state", "const char *", "time_t") static int pacemakerd_health_text(pcmk__output_t *out, va_list args) { if (!out->is_quiet(out)) { return pacemakerd_health(out, args); } else { const char *sys_from G_GNUC_UNUSED = va_arg(args, const char *); enum pcmk_pacemakerd_state state = (enum pcmk_pacemakerd_state) va_arg(args, int); const char *state_s = va_arg(args, const char *); time_t last_updated G_GNUC_UNUSED = va_arg(args, time_t); if (state_s == NULL) { state_s = pcmk_pacemakerd_api_daemon_state_enum2text(state); } pcmk__formatted_printf(out, "%s\n", state_s); return pcmk_rc_ok; } } PCMK__OUTPUT_ARGS("pacemakerd-health", "const char *", "enum pcmk_pacemakerd_state", "const char *", "time_t") static int pacemakerd_health_xml(pcmk__output_t *out, va_list args) { const char *sys_from = va_arg(args, const char *); enum pcmk_pacemakerd_state state = (enum pcmk_pacemakerd_state) va_arg(args, int); const char *state_s = va_arg(args, const char *); time_t last_updated = va_arg(args, time_t); char *last_updated_s = NULL; if (sys_from == NULL) { if (state == pcmk_pacemakerd_state_remote) { sys_from = "pacemaker-remoted"; } else { sys_from = CRM_SYSTEM_MCP; } } if (state_s == NULL) { state_s = pcmk_pacemakerd_api_daemon_state_enum2text(state); } if (last_updated != 0) { last_updated_s = pcmk__epoch2str(&last_updated, crm_time_log_date |crm_time_log_timeofday |crm_time_log_with_timezone); } pcmk__output_create_xml_node(out, "pacemakerd", "sys_from", sys_from, "state", state_s, "last_updated", last_updated_s, NULL); free(last_updated_s); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("profile", "const char *", "clock_t", "clock_t") static int profile_default(pcmk__output_t *out, va_list args) { const char *xml_file = va_arg(args, const char *); clock_t start = va_arg(args, clock_t); clock_t end = va_arg(args, clock_t); out->list_item(out, NULL, "Testing %s ... %.2f secs", xml_file, (end - start) / (float) CLOCKS_PER_SEC); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("profile", "const char *", "clock_t", "clock_t") static int profile_xml(pcmk__output_t *out, va_list args) { const char *xml_file = va_arg(args, const char *); clock_t start = va_arg(args, clock_t); clock_t end = va_arg(args, clock_t); char *duration = pcmk__ftoa((end - start) / (float) CLOCKS_PER_SEC); pcmk__output_create_xml_node(out, "timing", "file", xml_file, "duration", duration, NULL); free(duration); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("dc", "const char *") static int dc(pcmk__output_t *out, va_list args) { const char *dc = va_arg(args, const char *); return out->info(out, "Designated Controller is: %s", pcmk__s(dc, "not yet elected")); } PCMK__OUTPUT_ARGS("dc", "const char *") static int dc_text(pcmk__output_t *out, va_list args) { if (!out->is_quiet(out)) { return dc(out, args); } else { const char *dc = va_arg(args, const char *); if (dc != NULL) { pcmk__formatted_printf(out, "%s\n", pcmk__s(dc, "")); return pcmk_rc_ok; } } return pcmk_rc_no_output; } PCMK__OUTPUT_ARGS("dc", "const char *") static int dc_xml(pcmk__output_t *out, va_list args) { const char *dc = va_arg(args, const char *); pcmk__output_create_xml_node(out, "dc", "node_name", pcmk__s(dc, ""), NULL); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("crmadmin-node", "const char *", "const char *", "const char *", "bool") static int crmadmin_node(pcmk__output_t *out, va_list args) { const char *type = va_arg(args, const char *); const char *name = va_arg(args, const char *); const char *id = va_arg(args, const char *); bool bash_export = va_arg(args, int); if (bash_export) { return out->info(out, "export %s=%s", pcmk__s(name, ""), pcmk__s(id, "")); } else { return out->info(out, "%s node: %s (%s)", type ? type : "cluster", pcmk__s(name, ""), pcmk__s(id, "")); } } PCMK__OUTPUT_ARGS("crmadmin-node", "const char *", "const char *", "const char *", "bool") static int crmadmin_node_text(pcmk__output_t *out, va_list args) { if (!out->is_quiet(out)) { return crmadmin_node(out, args); } else { const char *type G_GNUC_UNUSED = va_arg(args, const char *); const char *name = va_arg(args, const char *); const char *id G_GNUC_UNUSED = va_arg(args, const char *); bool bash_export G_GNUC_UNUSED = va_arg(args, int); pcmk__formatted_printf(out, "%s\n", pcmk__s(name, "")); return pcmk_rc_ok; } } PCMK__OUTPUT_ARGS("crmadmin-node", "const char *", "const char *", "const char *", "bool") static int crmadmin_node_xml(pcmk__output_t *out, va_list args) { const char *type = va_arg(args, const char *); const char *name = va_arg(args, const char *); const char *id = va_arg(args, const char *); bool bash_export G_GNUC_UNUSED = va_arg(args, int); pcmk__output_create_xml_node(out, "node", "type", type ? type : "cluster", "name", pcmk__s(name, ""), "id", pcmk__s(id, ""), NULL); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("digests", "const pe_resource_t *", "const pe_node_t *", "const char *", "guint", "const op_digest_cache_t *") static int digests_text(pcmk__output_t *out, va_list args) { const pe_resource_t *rsc = va_arg(args, const pe_resource_t *); const pe_node_t *node = va_arg(args, const pe_node_t *); const char *task = va_arg(args, const char *); guint interval_ms = va_arg(args, guint); const op_digest_cache_t *digests = va_arg(args, const op_digest_cache_t *); char *action_desc = NULL; const char *rsc_desc = "unknown resource"; const char *node_desc = "unknown node"; if (interval_ms != 0) { action_desc = crm_strdup_printf("%ums-interval %s action", interval_ms, ((task == NULL)? "unknown" : task)); } else if (pcmk__str_eq(task, PCMK_ACTION_MONITOR, pcmk__str_none)) { action_desc = strdup("probe action"); } else { action_desc = crm_strdup_printf("%s action", ((task == NULL)? "unknown" : task)); } if ((rsc != NULL) && (rsc->id != NULL)) { rsc_desc = rsc->id; } if ((node != NULL) && (node->details->uname != NULL)) { node_desc = node->details->uname; } out->begin_list(out, NULL, NULL, "Digests for %s %s on %s", rsc_desc, action_desc, node_desc); free(action_desc); if (digests == NULL) { out->list_item(out, NULL, "none"); out->end_list(out); return pcmk_rc_ok; } if (digests->digest_all_calc != NULL) { out->list_item(out, NULL, "%s (all parameters)", digests->digest_all_calc); } if (digests->digest_secure_calc != NULL) { out->list_item(out, NULL, "%s (non-private parameters)", digests->digest_secure_calc); } if (digests->digest_restart_calc != NULL) { out->list_item(out, NULL, "%s (non-reloadable parameters)", digests->digest_restart_calc); } out->end_list(out); return pcmk_rc_ok; } static void add_digest_xml(xmlNode *parent, const char *type, const char *digest, xmlNode *digest_source) { if (digest != NULL) { xmlNodePtr digest_xml = create_xml_node(parent, "digest"); crm_xml_add(digest_xml, "type", ((type == NULL)? "unspecified" : type)); crm_xml_add(digest_xml, "hash", digest); if (digest_source != NULL) { add_node_copy(digest_xml, digest_source); } } } PCMK__OUTPUT_ARGS("digests", "const pe_resource_t *", "const pe_node_t *", "const char *", "guint", "const op_digest_cache_t *") static int digests_xml(pcmk__output_t *out, va_list args) { const pe_resource_t *rsc = va_arg(args, const pe_resource_t *); const pe_node_t *node = va_arg(args, const pe_node_t *); const char *task = va_arg(args, const char *); guint interval_ms = va_arg(args, guint); const op_digest_cache_t *digests = va_arg(args, const op_digest_cache_t *); char *interval_s = crm_strdup_printf("%ums", interval_ms); xmlNode *xml = NULL; xml = pcmk__output_create_xml_node(out, "digests", "resource", pcmk__s(rsc->id, ""), "node", pcmk__s(node->details->uname, ""), "task", pcmk__s(task, ""), "interval", interval_s, NULL); free(interval_s); if (digests != NULL) { add_digest_xml(xml, "all", digests->digest_all_calc, digests->params_all); add_digest_xml(xml, "nonprivate", digests->digest_secure_calc, digests->params_secure); add_digest_xml(xml, "nonreloadable", digests->digest_restart_calc, digests->params_restart); } return pcmk_rc_ok; } #define STOP_SANITY_ASSERT(lineno) do { \ if ((current != NULL) && current->details->unclean) { \ /* It will be a pseudo op */ \ } else if (stop == NULL) { \ crm_err("%s:%d: No stop action exists for %s", \ __func__, lineno, rsc->id); \ CRM_ASSERT(stop != NULL); \ } else if (pcmk_is_set(stop->flags, pe_action_optional)) { \ crm_err("%s:%d: Action %s is still optional", \ __func__, lineno, stop->uuid); \ CRM_ASSERT(!pcmk_is_set(stop->flags, pe_action_optional)); \ } \ } while (0) PCMK__OUTPUT_ARGS("rsc-action", "pe_resource_t *", "pe_node_t *", "pe_node_t *") static int rsc_action_default(pcmk__output_t *out, va_list args) { pe_resource_t *rsc = va_arg(args, pe_resource_t *); pe_node_t *current = va_arg(args, pe_node_t *); pe_node_t *next = va_arg(args, pe_node_t *); GList *possible_matches = NULL; char *key = NULL; int rc = pcmk_rc_no_output; bool moving = false; pe_node_t *start_node = NULL; pe_action_t *start = NULL; pe_action_t *stop = NULL; pe_action_t *promote = NULL; pe_action_t *demote = NULL; pe_action_t *reason_op = NULL; if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed) || (current == NULL && next == NULL)) { const bool managed = pcmk_is_set(rsc->flags, pcmk_rsc_managed); pe_rsc_info(rsc, "Leave %s\t(%s%s)", rsc->id, role2text(rsc->role), (managed? "" : " unmanaged")); return rc; } moving = (current != NULL) && (next != NULL) && !pe__same_node(current, next); possible_matches = pe__resource_actions(rsc, next, PCMK_ACTION_START, false); if (possible_matches) { start = possible_matches->data; g_list_free(possible_matches); } if ((start == NULL) || !pcmk_is_set(start->flags, pe_action_runnable)) { start_node = NULL; } else { start_node = current; } possible_matches = pe__resource_actions(rsc, start_node, PCMK_ACTION_STOP, false); if (possible_matches) { stop = possible_matches->data; g_list_free(possible_matches); - } else if (pcmk_is_set(rsc->flags, pe_rsc_stop_unexpected)) { + } else if (pcmk_is_set(rsc->flags, pcmk_rsc_stop_unexpected)) { /* The resource is multiply active with multiple-active set to * stop_unexpected, and not stopping on its current node, but it should * be stopping elsewhere. */ possible_matches = pe__resource_actions(rsc, NULL, PCMK_ACTION_STOP, false); if (possible_matches != NULL) { stop = possible_matches->data; g_list_free(possible_matches); } } possible_matches = pe__resource_actions(rsc, next, PCMK_ACTION_PROMOTE, false); if (possible_matches) { promote = possible_matches->data; g_list_free(possible_matches); } possible_matches = pe__resource_actions(rsc, next, PCMK_ACTION_DEMOTE, false); if (possible_matches) { demote = possible_matches->data; g_list_free(possible_matches); } if (rsc->role == rsc->next_role) { pe_action_t *migrate_op = NULL; CRM_CHECK(next != NULL, return rc); possible_matches = pe__resource_actions(rsc, next, PCMK_ACTION_MIGRATE_FROM, false); if (possible_matches) { migrate_op = possible_matches->data; } if ((migrate_op != NULL) && (current != NULL) && pcmk_is_set(migrate_op->flags, pe_action_runnable)) { rc = out->message(out, "rsc-action-item", "Migrate", rsc, current, next, start, NULL); } else if (pcmk_is_set(rsc->flags, pcmk_rsc_reload)) { rc = out->message(out, "rsc-action-item", "Reload", rsc, current, next, start, NULL); } else if ((start == NULL) || pcmk_is_set(start->flags, pe_action_optional)) { if ((demote != NULL) && (promote != NULL) && !pcmk_is_set(demote->flags, pe_action_optional) && !pcmk_is_set(promote->flags, pe_action_optional)) { rc = out->message(out, "rsc-action-item", "Re-promote", rsc, current, next, promote, demote); } else { pe_rsc_info(rsc, "Leave %s\t(%s %s)", rsc->id, role2text(rsc->role), pe__node_name(next)); } } else if (!pcmk_is_set(start->flags, pe_action_runnable)) { if ((stop == NULL) || (stop->reason == NULL)) { reason_op = start; } else { reason_op = stop; } rc = out->message(out, "rsc-action-item", "Stop", rsc, current, NULL, stop, reason_op); STOP_SANITY_ASSERT(__LINE__); } else if (moving && current) { const bool failed = pcmk_is_set(rsc->flags, pcmk_rsc_failed); rc = out->message(out, "rsc-action-item", (failed? "Recover" : "Move"), rsc, current, next, stop, NULL); } else if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) { rc = out->message(out, "rsc-action-item", "Recover", rsc, current, NULL, stop, NULL); STOP_SANITY_ASSERT(__LINE__); } else { rc = out->message(out, "rsc-action-item", "Restart", rsc, current, next, start, NULL); #if 0 /* @TODO This can be reached in situations that should really be * "Start" (see for example the migrate-fail-7 regression test) */ STOP_SANITY_ASSERT(__LINE__); #endif } g_list_free(possible_matches); return rc; } if ((stop != NULL) && ((rsc->next_role == pcmk_role_stopped) || ((start != NULL) && !pcmk_is_set(start->flags, pe_action_runnable)))) { key = stop_key(rsc); for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) { pe_node_t *node = iter->data; pe_action_t *stop_op = NULL; reason_op = start; possible_matches = find_actions(rsc->actions, key, node); if (possible_matches) { stop_op = possible_matches->data; g_list_free(possible_matches); } if (stop_op != NULL) { if (pcmk_is_set(stop_op->flags, pe_action_runnable)) { STOP_SANITY_ASSERT(__LINE__); } if (stop_op->reason != NULL) { reason_op = stop_op; } } if (out->message(out, "rsc-action-item", "Stop", rsc, node, NULL, stop_op, reason_op) == pcmk_rc_ok) { rc = pcmk_rc_ok; } } free(key); } else if ((stop != NULL) && pcmk_all_flags_set(rsc->flags, pcmk_rsc_failed|pcmk_rsc_stop_if_failed)) { /* 'stop' may be NULL if the failure was ignored */ rc = out->message(out, "rsc-action-item", "Recover", rsc, current, next, stop, start); STOP_SANITY_ASSERT(__LINE__); } else if (moving) { rc = out->message(out, "rsc-action-item", "Move", rsc, current, next, stop, NULL); STOP_SANITY_ASSERT(__LINE__); } else if (pcmk_is_set(rsc->flags, pcmk_rsc_reload)) { rc = out->message(out, "rsc-action-item", "Reload", rsc, current, next, start, NULL); } else if (stop != NULL && !pcmk_is_set(stop->flags, pe_action_optional)) { rc = out->message(out, "rsc-action-item", "Restart", rsc, current, next, start, NULL); STOP_SANITY_ASSERT(__LINE__); } else if (rsc->role == pcmk_role_promoted) { CRM_LOG_ASSERT(current != NULL); rc = out->message(out, "rsc-action-item", "Demote", rsc, current, next, demote, NULL); } else if (rsc->next_role == pcmk_role_promoted) { CRM_LOG_ASSERT(next); rc = out->message(out, "rsc-action-item", "Promote", rsc, current, next, promote, NULL); } else if ((rsc->role == pcmk_role_stopped) && (rsc->next_role > pcmk_role_stopped)) { rc = out->message(out, "rsc-action-item", "Start", rsc, current, next, start, NULL); } return rc; } PCMK__OUTPUT_ARGS("node-action", "const char *", "const char *", "const char *") static int node_action(pcmk__output_t *out, va_list args) { const char *task = va_arg(args, const char *); const char *node_name = va_arg(args, const char *); const char *reason = va_arg(args, const char *); if (task == NULL) { return pcmk_rc_no_output; } else if (reason) { out->list_item(out, NULL, "%s %s '%s'", task, node_name, reason); } else { crm_notice(" * %s %s", task, node_name); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("node-action", "const char *", "const char *", "const char *") static int node_action_xml(pcmk__output_t *out, va_list args) { const char *task = va_arg(args, const char *); const char *node_name = va_arg(args, const char *); const char *reason = va_arg(args, const char *); if (task == NULL) { return pcmk_rc_no_output; } else if (reason) { pcmk__output_create_xml_node(out, "node_action", "task", task, "node", node_name, "reason", reason, NULL); } else { crm_notice(" * %s %s", task, node_name); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("node-info", "uint32_t", "const char *", "const char *", "const char *", "bool", "bool") static int node_info_default(pcmk__output_t *out, va_list args) { uint32_t node_id = va_arg(args, uint32_t); const char *node_name = va_arg(args, const char *); const char *uuid = va_arg(args, const char *); const char *state = va_arg(args, const char *); bool have_quorum = (bool) va_arg(args, int); bool is_remote = (bool) va_arg(args, int); return out->info(out, "Node %" PRIu32 ": %s " "(uuid=%s, state=%s, have_quorum=%s, is_remote=%s)", node_id, pcmk__s(node_name, "unknown"), pcmk__s(uuid, "unknown"), pcmk__s(state, "unknown"), pcmk__btoa(have_quorum), pcmk__btoa(is_remote)); } PCMK__OUTPUT_ARGS("node-info", "uint32_t", "const char *", "const char *", "const char *", "bool", "bool") static int node_info_xml(pcmk__output_t *out, va_list args) { uint32_t node_id = va_arg(args, uint32_t); const char *node_name = va_arg(args, const char *); const char *uuid = va_arg(args, const char *); const char *state = va_arg(args, const char *); bool have_quorum = (bool) va_arg(args, int); bool is_remote = (bool) va_arg(args, int); char *id_s = crm_strdup_printf("%" PRIu32, node_id); pcmk__output_create_xml_node(out, "node-info", "nodeid", id_s, XML_ATTR_UNAME, node_name, XML_ATTR_ID, uuid, XML_NODE_IS_PEER, state, XML_ATTR_HAVE_QUORUM, pcmk__btoa(have_quorum), XML_NODE_IS_REMOTE, pcmk__btoa(is_remote), NULL); free(id_s); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-cluster-action", "const char *", "const char *", "xmlNodePtr") static int inject_cluster_action(pcmk__output_t *out, va_list args) { const char *node = va_arg(args, const char *); const char *task = va_arg(args, const char *); xmlNodePtr rsc = va_arg(args, xmlNodePtr); if (out->is_quiet(out)) { return pcmk_rc_no_output; } if (rsc != NULL) { out->list_item(out, NULL, "Cluster action: %s for %s on %s", task, ID(rsc), node); } else { out->list_item(out, NULL, "Cluster action: %s on %s", task, node); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-cluster-action", "const char *", "const char *", "xmlNodePtr") static int inject_cluster_action_xml(pcmk__output_t *out, va_list args) { const char *node = va_arg(args, const char *); const char *task = va_arg(args, const char *); xmlNodePtr rsc = va_arg(args, xmlNodePtr); xmlNodePtr xml_node = NULL; if (out->is_quiet(out)) { return pcmk_rc_no_output; } xml_node = pcmk__output_create_xml_node(out, "cluster_action", "task", task, "node", node, NULL); if (rsc) { crm_xml_add(xml_node, "id", ID(rsc)); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-fencing-action", "const char *", "const char *") static int inject_fencing_action(pcmk__output_t *out, va_list args) { const char *target = va_arg(args, const char *); const char *op = va_arg(args, const char *); if (out->is_quiet(out)) { return pcmk_rc_no_output; } out->list_item(out, NULL, "Fencing %s (%s)", target, op); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-fencing-action", "const char *", "const char *") static int inject_fencing_action_xml(pcmk__output_t *out, va_list args) { const char *target = va_arg(args, const char *); const char *op = va_arg(args, const char *); if (out->is_quiet(out)) { return pcmk_rc_no_output; } pcmk__output_create_xml_node(out, "fencing_action", "target", target, "op", op, NULL); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-attr", "const char *", "const char *", "xmlNodePtr") static int inject_attr(pcmk__output_t *out, va_list args) { const char *name = va_arg(args, const char *); const char *value = va_arg(args, const char *); xmlNodePtr cib_node = va_arg(args, xmlNodePtr); xmlChar *node_path = NULL; if (out->is_quiet(out)) { return pcmk_rc_no_output; } node_path = xmlGetNodePath(cib_node); out->list_item(out, NULL, "Injecting attribute %s=%s into %s '%s'", name, value, node_path, ID(cib_node)); free(node_path); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-attr", "const char *", "const char *", "xmlNodePtr") static int inject_attr_xml(pcmk__output_t *out, va_list args) { const char *name = va_arg(args, const char *); const char *value = va_arg(args, const char *); xmlNodePtr cib_node = va_arg(args, xmlNodePtr); xmlChar *node_path = NULL; if (out->is_quiet(out)) { return pcmk_rc_no_output; } node_path = xmlGetNodePath(cib_node); pcmk__output_create_xml_node(out, "inject_attr", "name", name, "value", value, "node_path", node_path, "cib_node", ID(cib_node), NULL); free(node_path); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-spec", "const char *") static int inject_spec(pcmk__output_t *out, va_list args) { const char *spec = va_arg(args, const char *); if (out->is_quiet(out)) { return pcmk_rc_no_output; } out->list_item(out, NULL, "Injecting %s into the configuration", spec); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-spec", "const char *") static int inject_spec_xml(pcmk__output_t *out, va_list args) { const char *spec = va_arg(args, const char *); if (out->is_quiet(out)) { return pcmk_rc_no_output; } pcmk__output_create_xml_node(out, "inject_spec", "spec", spec, NULL); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-modify-config", "const char *", "const char *") static int inject_modify_config(pcmk__output_t *out, va_list args) { const char *quorum = va_arg(args, const char *); const char *watchdog = va_arg(args, const char *); if (out->is_quiet(out)) { return pcmk_rc_no_output; } out->begin_list(out, NULL, NULL, "Performing Requested Modifications"); if (quorum) { out->list_item(out, NULL, "Setting quorum: %s", quorum); } if (watchdog) { out->list_item(out, NULL, "Setting watchdog: %s", watchdog); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-modify-config", "const char *", "const char *") static int inject_modify_config_xml(pcmk__output_t *out, va_list args) { const char *quorum = va_arg(args, const char *); const char *watchdog = va_arg(args, const char *); xmlNodePtr node = NULL; if (out->is_quiet(out)) { return pcmk_rc_no_output; } node = pcmk__output_xml_create_parent(out, "modifications", NULL); if (quorum) { crm_xml_add(node, "quorum", quorum); } if (watchdog) { crm_xml_add(node, "watchdog", watchdog); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-modify-node", "const char *", "const char *") static int inject_modify_node(pcmk__output_t *out, va_list args) { const char *action = va_arg(args, const char *); const char *node = va_arg(args, const char *); if (out->is_quiet(out)) { return pcmk_rc_no_output; } if (pcmk__str_eq(action, "Online", pcmk__str_none)) { out->list_item(out, NULL, "Bringing node %s online", node); return pcmk_rc_ok; } else if (pcmk__str_eq(action, "Offline", pcmk__str_none)) { out->list_item(out, NULL, "Taking node %s offline", node); return pcmk_rc_ok; } else if (pcmk__str_eq(action, "Failing", pcmk__str_none)) { out->list_item(out, NULL, "Failing node %s", node); return pcmk_rc_ok; } return pcmk_rc_no_output; } PCMK__OUTPUT_ARGS("inject-modify-node", "const char *", "const char *") static int inject_modify_node_xml(pcmk__output_t *out, va_list args) { const char *action = va_arg(args, const char *); const char *node = va_arg(args, const char *); if (out->is_quiet(out)) { return pcmk_rc_no_output; } pcmk__output_create_xml_node(out, "modify_node", "action", action, "node", node, NULL); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-modify-ticket", "const char *", "const char *") static int inject_modify_ticket(pcmk__output_t *out, va_list args) { const char *action = va_arg(args, const char *); const char *ticket = va_arg(args, const char *); if (out->is_quiet(out)) { return pcmk_rc_no_output; } if (pcmk__str_eq(action, "Standby", pcmk__str_none)) { out->list_item(out, NULL, "Making ticket %s standby", ticket); } else { out->list_item(out, NULL, "%s ticket %s", action, ticket); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-modify-ticket", "const char *", "const char *") static int inject_modify_ticket_xml(pcmk__output_t *out, va_list args) { const char *action = va_arg(args, const char *); const char *ticket = va_arg(args, const char *); if (out->is_quiet(out)) { return pcmk_rc_no_output; } pcmk__output_create_xml_node(out, "modify_ticket", "action", action, "ticket", ticket, NULL); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-pseudo-action", "const char *", "const char *") static int inject_pseudo_action(pcmk__output_t *out, va_list args) { const char *node = va_arg(args, const char *); const char *task = va_arg(args, const char *); if (out->is_quiet(out)) { return pcmk_rc_no_output; } out->list_item(out, NULL, "Pseudo action: %s%s%s", task, ((node == NULL)? "" : " on "), pcmk__s(node, "")); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-pseudo-action", "const char *", "const char *") static int inject_pseudo_action_xml(pcmk__output_t *out, va_list args) { const char *node = va_arg(args, const char *); const char *task = va_arg(args, const char *); xmlNodePtr xml_node = NULL; if (out->is_quiet(out)) { return pcmk_rc_no_output; } xml_node = pcmk__output_create_xml_node(out, "pseudo_action", "task", task, NULL); if (node) { crm_xml_add(xml_node, "node", node); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-rsc-action", "const char *", "const char *", "const char *", "guint") static int inject_rsc_action(pcmk__output_t *out, va_list args) { const char *rsc = va_arg(args, const char *); const char *operation = va_arg(args, const char *); const char *node = va_arg(args, const char *); guint interval_ms = va_arg(args, guint); if (out->is_quiet(out)) { return pcmk_rc_no_output; } if (interval_ms) { out->list_item(out, NULL, "Resource action: %-15s %s=%u on %s", rsc, operation, interval_ms, node); } else { out->list_item(out, NULL, "Resource action: %-15s %s on %s", rsc, operation, node); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-rsc-action", "const char *", "const char *", "const char *", "guint") static int inject_rsc_action_xml(pcmk__output_t *out, va_list args) { const char *rsc = va_arg(args, const char *); const char *operation = va_arg(args, const char *); const char *node = va_arg(args, const char *); guint interval_ms = va_arg(args, guint); xmlNodePtr xml_node = NULL; if (out->is_quiet(out)) { return pcmk_rc_no_output; } xml_node = pcmk__output_create_xml_node(out, "rsc_action", "resource", rsc, "op", operation, "node", node, NULL); if (interval_ms) { char *interval_s = pcmk__itoa(interval_ms); crm_xml_add(xml_node, "interval", interval_s); free(interval_s); } return pcmk_rc_ok; } #define CHECK_RC(retcode, retval) \ if (retval == pcmk_rc_ok) { \ retcode = pcmk_rc_ok; \ } PCMK__OUTPUT_ARGS("cluster-status", "pe_working_set_t *", "enum pcmk_pacemakerd_state", "crm_exit_t", "stonith_history_t *", "enum pcmk__fence_history", "uint32_t", "uint32_t", "const char *", "GList *", "GList *") int pcmk__cluster_status_text(pcmk__output_t *out, va_list args) { pe_working_set_t *data_set = va_arg(args, pe_working_set_t *); enum pcmk_pacemakerd_state pcmkd_state = (enum pcmk_pacemakerd_state) va_arg(args, int); crm_exit_t history_rc = va_arg(args, crm_exit_t); stonith_history_t *stonith_history = va_arg(args, stonith_history_t *); enum pcmk__fence_history fence_history = va_arg(args, int); uint32_t section_opts = va_arg(args, uint32_t); uint32_t show_opts = va_arg(args, uint32_t); const char *prefix = va_arg(args, const char *); GList *unames = va_arg(args, GList *); GList *resources = va_arg(args, GList *); int rc = pcmk_rc_no_output; bool already_printed_failure = false; CHECK_RC(rc, out->message(out, "cluster-summary", data_set, pcmkd_state, section_opts, show_opts)); if (pcmk_is_set(section_opts, pcmk_section_nodes) && unames) { CHECK_RC(rc, out->message(out, "node-list", data_set->nodes, unames, resources, show_opts, rc == pcmk_rc_ok)); } /* Print resources section, if needed */ if (pcmk_is_set(section_opts, pcmk_section_resources)) { CHECK_RC(rc, out->message(out, "resource-list", data_set, show_opts, true, unames, resources, rc == pcmk_rc_ok)); } /* print Node Attributes section if requested */ if (pcmk_is_set(section_opts, pcmk_section_attributes)) { CHECK_RC(rc, out->message(out, "node-attribute-list", data_set, show_opts, (rc == pcmk_rc_ok), unames, resources)); } /* If requested, print resource operations (which includes failcounts) * or just failcounts */ if (pcmk_any_flags_set(section_opts, pcmk_section_operations|pcmk_section_failcounts)) { CHECK_RC(rc, out->message(out, "node-summary", data_set, unames, resources, section_opts, show_opts, (rc == pcmk_rc_ok))); } /* If there were any failed actions, print them */ if (pcmk_is_set(section_opts, pcmk_section_failures) && (data_set->failed != NULL) && (data_set->failed->children != NULL)) { CHECK_RC(rc, out->message(out, "failed-action-list", data_set, unames, resources, show_opts, rc == pcmk_rc_ok)); } /* Print failed stonith actions */ if (pcmk_is_set(section_opts, pcmk_section_fence_failed) && fence_history != pcmk__fence_history_none) { if (history_rc == 0) { stonith_history_t *hp = NULL; hp = stonith__first_matching_event(stonith_history, stonith__event_state_eq, GINT_TO_POINTER(st_failed)); if (hp) { CHECK_RC(rc, out->message(out, "failed-fencing-list", stonith_history, unames, section_opts, show_opts, rc == pcmk_rc_ok)); } } else { PCMK__OUTPUT_SPACER_IF(out, rc == pcmk_rc_ok); out->begin_list(out, NULL, NULL, "Failed Fencing Actions"); out->list_item(out, NULL, "Failed to get fencing history: %s", crm_exit_str(history_rc)); out->end_list(out); already_printed_failure = true; } } /* Print tickets if requested */ if (pcmk_is_set(section_opts, pcmk_section_tickets)) { CHECK_RC(rc, out->message(out, "ticket-list", data_set, (rc == pcmk_rc_ok))); } /* Print negative location constraints if requested */ if (pcmk_is_set(section_opts, pcmk_section_bans)) { CHECK_RC(rc, out->message(out, "ban-list", data_set, prefix, resources, show_opts, rc == pcmk_rc_ok)); } /* Print stonith history */ if (pcmk_any_flags_set(section_opts, pcmk_section_fencing_all) && fence_history != pcmk__fence_history_none) { if (history_rc != 0) { if (!already_printed_failure) { PCMK__OUTPUT_SPACER_IF(out, rc == pcmk_rc_ok); out->begin_list(out, NULL, NULL, "Failed Fencing Actions"); out->list_item(out, NULL, "Failed to get fencing history: %s", crm_exit_str(history_rc)); out->end_list(out); } } else if (pcmk_is_set(section_opts, pcmk_section_fence_worked)) { stonith_history_t *hp = NULL; hp = stonith__first_matching_event(stonith_history, stonith__event_state_neq, GINT_TO_POINTER(st_failed)); if (hp) { CHECK_RC(rc, out->message(out, "fencing-list", hp, unames, section_opts, show_opts, rc == pcmk_rc_ok)); } } else if (pcmk_is_set(section_opts, pcmk_section_fence_pending)) { stonith_history_t *hp = NULL; hp = stonith__first_matching_event(stonith_history, stonith__event_state_pending, NULL); if (hp) { CHECK_RC(rc, out->message(out, "pending-fencing-list", hp, unames, section_opts, show_opts, rc == pcmk_rc_ok)); } } } return rc; } PCMK__OUTPUT_ARGS("cluster-status", "pe_working_set_t *", "enum pcmk_pacemakerd_state", "crm_exit_t", "stonith_history_t *", "enum pcmk__fence_history", "uint32_t", "uint32_t", "const char *", "GList *", "GList *") static int cluster_status_xml(pcmk__output_t *out, va_list args) { pe_working_set_t *data_set = va_arg(args, pe_working_set_t *); enum pcmk_pacemakerd_state pcmkd_state = (enum pcmk_pacemakerd_state) va_arg(args, int); crm_exit_t history_rc = va_arg(args, crm_exit_t); stonith_history_t *stonith_history = va_arg(args, stonith_history_t *); enum pcmk__fence_history fence_history = va_arg(args, int); uint32_t section_opts = va_arg(args, uint32_t); uint32_t show_opts = va_arg(args, uint32_t); const char *prefix = va_arg(args, const char *); GList *unames = va_arg(args, GList *); GList *resources = va_arg(args, GList *); out->message(out, "cluster-summary", data_set, pcmkd_state, section_opts, show_opts); /*** NODES ***/ if (pcmk_is_set(section_opts, pcmk_section_nodes)) { out->message(out, "node-list", data_set->nodes, unames, resources, show_opts, false); } /* Print resources section, if needed */ if (pcmk_is_set(section_opts, pcmk_section_resources)) { /* XML output always displays full details. */ uint32_t full_show_opts = show_opts & ~pcmk_show_brief; out->message(out, "resource-list", data_set, full_show_opts, false, unames, resources, false); } /* print Node Attributes section if requested */ if (pcmk_is_set(section_opts, pcmk_section_attributes)) { out->message(out, "node-attribute-list", data_set, show_opts, false, unames, resources); } /* If requested, print resource operations (which includes failcounts) * or just failcounts */ if (pcmk_any_flags_set(section_opts, pcmk_section_operations|pcmk_section_failcounts)) { out->message(out, "node-summary", data_set, unames, resources, section_opts, show_opts, false); } /* If there were any failed actions, print them */ if (pcmk_is_set(section_opts, pcmk_section_failures) && (data_set->failed != NULL) && (data_set->failed->children != NULL)) { out->message(out, "failed-action-list", data_set, unames, resources, show_opts, false); } /* Print stonith history */ if (pcmk_is_set(section_opts, pcmk_section_fencing_all) && fence_history != pcmk__fence_history_none) { out->message(out, "full-fencing-list", history_rc, stonith_history, unames, section_opts, show_opts, false); } /* Print tickets if requested */ if (pcmk_is_set(section_opts, pcmk_section_tickets)) { out->message(out, "ticket-list", data_set, false); } /* Print negative location constraints if requested */ if (pcmk_is_set(section_opts, pcmk_section_bans)) { out->message(out, "ban-list", data_set, prefix, resources, show_opts, false); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("cluster-status", "pe_working_set_t *", "enum pcmk_pacemakerd_state", "crm_exit_t", "stonith_history_t *", "enum pcmk__fence_history", "uint32_t", "uint32_t", "const char *", "GList *", "GList *") static int cluster_status_html(pcmk__output_t *out, va_list args) { pe_working_set_t *data_set = va_arg(args, pe_working_set_t *); enum pcmk_pacemakerd_state pcmkd_state = (enum pcmk_pacemakerd_state) va_arg(args, int); crm_exit_t history_rc = va_arg(args, crm_exit_t); stonith_history_t *stonith_history = va_arg(args, stonith_history_t *); enum pcmk__fence_history fence_history = va_arg(args, int); uint32_t section_opts = va_arg(args, uint32_t); uint32_t show_opts = va_arg(args, uint32_t); const char *prefix = va_arg(args, const char *); GList *unames = va_arg(args, GList *); GList *resources = va_arg(args, GList *); bool already_printed_failure = false; out->message(out, "cluster-summary", data_set, pcmkd_state, section_opts, show_opts); /*** NODE LIST ***/ if (pcmk_is_set(section_opts, pcmk_section_nodes) && unames) { out->message(out, "node-list", data_set->nodes, unames, resources, show_opts, false); } /* Print resources section, if needed */ if (pcmk_is_set(section_opts, pcmk_section_resources)) { out->message(out, "resource-list", data_set, show_opts, true, unames, resources, false); } /* print Node Attributes section if requested */ if (pcmk_is_set(section_opts, pcmk_section_attributes)) { out->message(out, "node-attribute-list", data_set, show_opts, false, unames, resources); } /* If requested, print resource operations (which includes failcounts) * or just failcounts */ if (pcmk_any_flags_set(section_opts, pcmk_section_operations|pcmk_section_failcounts)) { out->message(out, "node-summary", data_set, unames, resources, section_opts, show_opts, false); } /* If there were any failed actions, print them */ if (pcmk_is_set(section_opts, pcmk_section_failures) && (data_set->failed != NULL) && (data_set->failed->children != NULL)) { out->message(out, "failed-action-list", data_set, unames, resources, show_opts, false); } /* Print failed stonith actions */ if (pcmk_is_set(section_opts, pcmk_section_fence_failed) && fence_history != pcmk__fence_history_none) { if (history_rc == 0) { stonith_history_t *hp = NULL; hp = stonith__first_matching_event(stonith_history, stonith__event_state_eq, GINT_TO_POINTER(st_failed)); if (hp) { out->message(out, "failed-fencing-list", stonith_history, unames, section_opts, show_opts, false); } } else { out->begin_list(out, NULL, NULL, "Failed Fencing Actions"); out->list_item(out, NULL, "Failed to get fencing history: %s", crm_exit_str(history_rc)); out->end_list(out); } } /* Print stonith history */ if (pcmk_any_flags_set(section_opts, pcmk_section_fencing_all) && fence_history != pcmk__fence_history_none) { if (history_rc != 0) { if (!already_printed_failure) { out->begin_list(out, NULL, NULL, "Failed Fencing Actions"); out->list_item(out, NULL, "Failed to get fencing history: %s", crm_exit_str(history_rc)); out->end_list(out); } } else if (pcmk_is_set(section_opts, pcmk_section_fence_worked)) { stonith_history_t *hp = NULL; hp = stonith__first_matching_event(stonith_history, stonith__event_state_neq, GINT_TO_POINTER(st_failed)); if (hp) { out->message(out, "fencing-list", hp, unames, section_opts, show_opts, false); } } else if (pcmk_is_set(section_opts, pcmk_section_fence_pending)) { stonith_history_t *hp = NULL; hp = stonith__first_matching_event(stonith_history, stonith__event_state_pending, NULL); if (hp) { out->message(out, "pending-fencing-list", hp, unames, section_opts, show_opts, false); } } } /* Print tickets if requested */ if (pcmk_is_set(section_opts, pcmk_section_tickets)) { out->message(out, "ticket-list", data_set, false); } /* Print negative location constraints if requested */ if (pcmk_is_set(section_opts, pcmk_section_bans)) { out->message(out, "ban-list", data_set, prefix, resources, show_opts, false); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("attribute", "const char *", "const char *", "const char *", "const char *", "const char *") static int attribute_default(pcmk__output_t *out, va_list args) { const char *scope = va_arg(args, const char *); const char *instance = va_arg(args, const char *); const char *name = va_arg(args, const char *); const char *value = va_arg(args, const char *); const char *host = va_arg(args, const char *); GString *s = g_string_sized_new(50); if (!pcmk__str_empty(scope)) { pcmk__g_strcat(s, "scope=\"", scope, "\" ", NULL); } if (!pcmk__str_empty(instance)) { pcmk__g_strcat(s, "id=\"", instance, "\" ", NULL); } pcmk__g_strcat(s, "name=\"", pcmk__s(name, ""), "\" ", NULL); if (!pcmk__str_empty(host)) { pcmk__g_strcat(s, "host=\"", host, "\" ", NULL); } pcmk__g_strcat(s, "value=\"", pcmk__s(value, ""), "\"", NULL); out->info(out, "%s", s->str); g_string_free(s, TRUE); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("attribute", "const char *", "const char *", "const char *", "const char *", "const char *") static int attribute_xml(pcmk__output_t *out, va_list args) { const char *scope = va_arg(args, const char *); const char *instance = va_arg(args, const char *); const char *name = va_arg(args, const char *); const char *value = va_arg(args, const char *); const char *host = va_arg(args, const char *); xmlNodePtr node = NULL; node = pcmk__output_create_xml_node(out, "attribute", "name", name, "value", value ? value : "", NULL); if (!pcmk__str_empty(scope)) { crm_xml_add(node, "scope", scope); } if (!pcmk__str_empty(instance)) { crm_xml_add(node, "id", instance); } if (!pcmk__str_empty(host)) { crm_xml_add(node, "host", host); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("rule-check", "const char *", "int", "const char *") static int rule_check_default(pcmk__output_t *out, va_list args) { const char *rule_id = va_arg(args, const char *); int result = va_arg(args, int); const char *error = va_arg(args, const char *); switch (result) { case pcmk_rc_within_range: return out->info(out, "Rule %s is still in effect", rule_id); case pcmk_rc_ok: return out->info(out, "Rule %s satisfies conditions", rule_id); case pcmk_rc_after_range: return out->info(out, "Rule %s is expired", rule_id); case pcmk_rc_before_range: return out->info(out, "Rule %s has not yet taken effect", rule_id); case pcmk_rc_op_unsatisfied: return out->info(out, "Rule %s does not satisfy conditions", rule_id); default: out->err(out, "Could not determine whether rule %s is in effect: %s", rule_id, ((error != NULL)? error : "unexpected error")); return pcmk_rc_ok; } } PCMK__OUTPUT_ARGS("rule-check", "const char *", "int", "const char *") static int rule_check_xml(pcmk__output_t *out, va_list args) { const char *rule_id = va_arg(args, const char *); int result = va_arg(args, int); const char *error = va_arg(args, const char *); char *rc_str = pcmk__itoa(pcmk_rc2exitc(result)); pcmk__output_create_xml_node(out, "rule-check", "rule-id", rule_id, "rc", rc_str, NULL); free(rc_str); switch (result) { case pcmk_rc_within_range: case pcmk_rc_ok: case pcmk_rc_after_range: case pcmk_rc_before_range: case pcmk_rc_op_unsatisfied: return pcmk_rc_ok; default: out->err(out, "Could not determine whether rule %s is in effect: %s", rule_id, ((error != NULL)? error : "unexpected error")); return pcmk_rc_ok; } } PCMK__OUTPUT_ARGS("result-code", "int", "const char *", "const char *") static int result_code_none(pcmk__output_t *out, va_list args) { return pcmk_rc_no_output; } PCMK__OUTPUT_ARGS("result-code", "int", "const char *", "const char *") static int result_code_text(pcmk__output_t *out, va_list args) { int code = va_arg(args, int); const char *name = va_arg(args, const char *); const char *desc = va_arg(args, const char *); static int code_width = 0; if (out->is_quiet(out)) { /* If out->is_quiet(), don't print the code. Print name and/or desc in a * compact format for text output, or print nothing at all for none-type * output. */ if ((name != NULL) && (desc != NULL)) { pcmk__formatted_printf(out, "%s - %s\n", name, desc); } else if ((name != NULL) || (desc != NULL)) { pcmk__formatted_printf(out, "%s\n", ((name != NULL)? name : desc)); } return pcmk_rc_ok; } /* Get length of longest (most negative) standard Pacemaker return code * This should be longer than all the values of any other type of return * code. */ if (code_width == 0) { long long most_negative = pcmk_rc_error - (long long) pcmk__n_rc + 1; code_width = (int) snprintf(NULL, 0, "%lld", most_negative); } if ((name != NULL) && (desc != NULL)) { static int name_width = 0; if (name_width == 0) { // Get length of longest standard Pacemaker return code name for (int lpc = 0; lpc < pcmk__n_rc; lpc++) { int len = (int) strlen(pcmk_rc_name(pcmk_rc_error - lpc)); name_width = QB_MAX(name_width, len); } } return out->info(out, "% *d: %-*s %s", code_width, code, name_width, name, desc); } if ((name != NULL) || (desc != NULL)) { return out->info(out, "% *d: %s", code_width, code, ((name != NULL)? name : desc)); } return out->info(out, "% *d", code_width, code); } PCMK__OUTPUT_ARGS("result-code", "int", "const char *", "const char *") static int result_code_xml(pcmk__output_t *out, va_list args) { int code = va_arg(args, int); const char *name = va_arg(args, const char *); const char *desc = va_arg(args, const char *); char *code_str = pcmk__itoa(code); pcmk__output_create_xml_node(out, "result-code", "code", code_str, XML_ATTR_NAME, name, XML_ATTR_DESC, desc, NULL); free(code_str); return pcmk_rc_ok; } static pcmk__message_entry_t fmt_functions[] = { { "attribute", "default", attribute_default }, { "attribute", "xml", attribute_xml }, { "cluster-status", "default", pcmk__cluster_status_text }, { "cluster-status", "html", cluster_status_html }, { "cluster-status", "xml", cluster_status_xml }, { "crmadmin-node", "default", crmadmin_node }, { "crmadmin-node", "text", crmadmin_node_text }, { "crmadmin-node", "xml", crmadmin_node_xml }, { "dc", "default", dc }, { "dc", "text", dc_text }, { "dc", "xml", dc_xml }, { "digests", "default", digests_text }, { "digests", "xml", digests_xml }, { "health", "default", health }, { "health", "text", health_text }, { "health", "xml", health_xml }, { "inject-attr", "default", inject_attr }, { "inject-attr", "xml", inject_attr_xml }, { "inject-cluster-action", "default", inject_cluster_action }, { "inject-cluster-action", "xml", inject_cluster_action_xml }, { "inject-fencing-action", "default", inject_fencing_action }, { "inject-fencing-action", "xml", inject_fencing_action_xml }, { "inject-modify-config", "default", inject_modify_config }, { "inject-modify-config", "xml", inject_modify_config_xml }, { "inject-modify-node", "default", inject_modify_node }, { "inject-modify-node", "xml", inject_modify_node_xml }, { "inject-modify-ticket", "default", inject_modify_ticket }, { "inject-modify-ticket", "xml", inject_modify_ticket_xml }, { "inject-pseudo-action", "default", inject_pseudo_action }, { "inject-pseudo-action", "xml", inject_pseudo_action_xml }, { "inject-rsc-action", "default", inject_rsc_action }, { "inject-rsc-action", "xml", inject_rsc_action_xml }, { "inject-spec", "default", inject_spec }, { "inject-spec", "xml", inject_spec_xml }, { "locations-list", "default", locations_list }, { "locations-list", "xml", locations_list_xml }, { "node-action", "default", node_action }, { "node-action", "xml", node_action_xml }, { "node-info", "default", node_info_default }, { "node-info", "xml", node_info_xml }, { "pacemakerd-health", "default", pacemakerd_health }, { "pacemakerd-health", "html", pacemakerd_health_html }, { "pacemakerd-health", "text", pacemakerd_health_text }, { "pacemakerd-health", "xml", pacemakerd_health_xml }, { "profile", "default", profile_default, }, { "profile", "xml", profile_xml }, { "result-code", "none", result_code_none }, { "result-code", "text", result_code_text }, { "result-code", "xml", result_code_xml }, { "rsc-action", "default", rsc_action_default }, { "rsc-action-item", "default", rsc_action_item }, { "rsc-action-item", "xml", rsc_action_item_xml }, { "rsc-is-colocated-with-list", "default", rsc_is_colocated_with_list }, { "rsc-is-colocated-with-list", "xml", rsc_is_colocated_with_list_xml }, { "rscs-colocated-with-list", "default", rscs_colocated_with_list }, { "rscs-colocated-with-list", "xml", rscs_colocated_with_list_xml }, { "rule-check", "default", rule_check_default }, { "rule-check", "xml", rule_check_xml }, { "locations-and-colocations", "default", locations_and_colocations }, { "locations-and-colocations", "xml", locations_and_colocations_xml }, { NULL, NULL, NULL } }; void pcmk__register_lib_messages(pcmk__output_t *out) { pcmk__register_messages(out, fmt_functions); } diff --git a/lib/pacemaker/pcmk_sched_primitive.c b/lib/pacemaker/pcmk_sched_primitive.c index 7f238fbdad..26821e49cd 100644 --- a/lib/pacemaker/pcmk_sched_primitive.c +++ b/lib/pacemaker/pcmk_sched_primitive.c @@ -1,1652 +1,1652 @@ /* * Copyright 2004-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include // uint8_t, uint32_t #include #include #include "libpacemaker_private.h" static void stop_resource(pe_resource_t *rsc, pe_node_t *node, bool optional); static void start_resource(pe_resource_t *rsc, pe_node_t *node, bool optional); static void demote_resource(pe_resource_t *rsc, pe_node_t *node, bool optional); static void promote_resource(pe_resource_t *rsc, pe_node_t *node, bool optional); static void assert_role_error(pe_resource_t *rsc, pe_node_t *node, bool optional); #define RSC_ROLE_MAX (pcmk_role_promoted + 1) static enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = { /* This array lists the immediate next role when transitioning from one role * to a target role. For example, when going from Stopped to Promoted, the * next role is Unpromoted, because the resource must be started before it * can be promoted. The current state then becomes Started, which is fed * into this array again, giving a next role of Promoted. * * Current role Immediate next role Final target role * ------------ ------------------- ----------------- */ /* Unknown */ { pcmk_role_unknown, /* Unknown */ pcmk_role_stopped, /* Stopped */ pcmk_role_stopped, /* Started */ pcmk_role_stopped, /* Unpromoted */ pcmk_role_stopped, /* Promoted */ }, /* Stopped */ { pcmk_role_stopped, /* Unknown */ pcmk_role_stopped, /* Stopped */ pcmk_role_started, /* Started */ pcmk_role_unpromoted, /* Unpromoted */ pcmk_role_unpromoted, /* Promoted */ }, /* Started */ { pcmk_role_stopped, /* Unknown */ pcmk_role_stopped, /* Stopped */ pcmk_role_started, /* Started */ pcmk_role_unpromoted, /* Unpromoted */ pcmk_role_promoted, /* Promoted */ }, /* Unpromoted */ { pcmk_role_stopped, /* Unknown */ pcmk_role_stopped, /* Stopped */ pcmk_role_stopped, /* Started */ pcmk_role_unpromoted, /* Unpromoted */ pcmk_role_promoted, /* Promoted */ }, /* Promoted */ { pcmk_role_stopped, /* Unknown */ pcmk_role_unpromoted, /* Stopped */ pcmk_role_unpromoted, /* Started */ pcmk_role_unpromoted, /* Unpromoted */ pcmk_role_promoted, /* Promoted */ }, }; /*! * \internal * \brief Function to schedule actions needed for a role change * * \param[in,out] rsc Resource whose role is changing * \param[in,out] node Node where resource will be in its next role * \param[in] optional Whether scheduled actions should be optional */ typedef void (*rsc_transition_fn)(pe_resource_t *rsc, pe_node_t *node, bool optional); static rsc_transition_fn rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = { /* This array lists the function needed to transition directly from one role * to another. NULL indicates that nothing is needed. * * Current role Transition function Next role * ------------ ------------------- ---------- */ /* Unknown */ { assert_role_error, /* Unknown */ stop_resource, /* Stopped */ assert_role_error, /* Started */ assert_role_error, /* Unpromoted */ assert_role_error, /* Promoted */ }, /* Stopped */ { assert_role_error, /* Unknown */ NULL, /* Stopped */ start_resource, /* Started */ start_resource, /* Unpromoted */ assert_role_error, /* Promoted */ }, /* Started */ { assert_role_error, /* Unknown */ stop_resource, /* Stopped */ NULL, /* Started */ NULL, /* Unpromoted */ promote_resource, /* Promoted */ }, /* Unpromoted */ { assert_role_error, /* Unknown */ stop_resource, /* Stopped */ stop_resource, /* Started */ NULL, /* Unpromoted */ promote_resource, /* Promoted */ }, /* Promoted */ { assert_role_error, /* Unknown */ demote_resource, /* Stopped */ demote_resource, /* Started */ demote_resource, /* Unpromoted */ NULL, /* Promoted */ }, }; /*! * \internal * \brief Get a list of a resource's allowed nodes sorted by node score * * \param[in] rsc Resource to check * * \return List of allowed nodes sorted by node score */ static GList * sorted_allowed_nodes(const pe_resource_t *rsc) { if (rsc->allowed_nodes != NULL) { GList *nodes = g_hash_table_get_values(rsc->allowed_nodes); if (nodes != NULL) { return pcmk__sort_nodes(nodes, pe__current_node(rsc)); } } return NULL; } /*! * \internal * \brief Assign a resource to its best allowed node, if possible * * \param[in,out] rsc Resource to choose a node for * \param[in] prefer If not \c NULL, prefer this node when all else * equal * \param[in] stop_if_fail If \c true and \p rsc can't be assigned to a * node, set next role to stopped and update * existing actions * * \return true if \p rsc could be assigned to a node, otherwise false * * \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can * completely undo the assignment. A successful assignment can be either * undone or left alone as final. A failed assignment has the same effect * as calling pcmk__unassign_resource(); there are no side effects on * roles or actions. */ static bool assign_best_node(pe_resource_t *rsc, const pe_node_t *prefer, bool stop_if_fail) { GList *nodes = NULL; pe_node_t *chosen = NULL; pe_node_t *best = NULL; const pe_node_t *most_free_node = pcmk__ban_insufficient_capacity(rsc); if (prefer == NULL) { prefer = most_free_node; } if (!pcmk_is_set(rsc->flags, pcmk_rsc_unassigned)) { // We've already finished assignment of resources to nodes return rsc->allocated_to != NULL; } // Sort allowed nodes by score nodes = sorted_allowed_nodes(rsc); if (nodes != NULL) { best = (pe_node_t *) nodes->data; // First node has best score } if ((prefer != NULL) && (nodes != NULL)) { // Get the allowed node version of prefer chosen = g_hash_table_lookup(rsc->allowed_nodes, prefer->details->id); if (chosen == NULL) { pe_rsc_trace(rsc, "Preferred node %s for %s was unknown", pe__node_name(prefer), rsc->id); /* Favor the preferred node as long as its score is at least as good as * the best allowed node's. * * An alternative would be to favor the preferred node even if the best * node is better, when the best node's score is less than INFINITY. */ } else if (chosen->weight < best->weight) { pe_rsc_trace(rsc, "Preferred node %s for %s was unsuitable", pe__node_name(chosen), rsc->id); chosen = NULL; } else if (!pcmk__node_available(chosen, true, false)) { pe_rsc_trace(rsc, "Preferred node %s for %s was unavailable", pe__node_name(chosen), rsc->id); chosen = NULL; } else { pe_rsc_trace(rsc, "Chose preferred node %s for %s " "(ignoring %d candidates)", pe__node_name(chosen), rsc->id, g_list_length(nodes)); } } if ((chosen == NULL) && (best != NULL)) { /* Either there is no preferred node, or the preferred node is not * suitable, but another node is allowed to run the resource. */ chosen = best; if (!pe_rsc_is_unique_clone(rsc->parent) && (chosen->weight > 0) // Zero not acceptable && pcmk__node_available(chosen, false, false)) { /* If the resource is already running on a node, prefer that node if * it is just as good as the chosen node. * * We don't do this for unique clone instances, because * pcmk__assign_instances() has already assigned instances to their * running nodes when appropriate, and if we get here, we don't want * remaining unassigned instances to prefer a node that's already * running another instance. */ pe_node_t *running = pe__current_node(rsc); if (running == NULL) { // Nothing to do } else if (!pcmk__node_available(running, true, false)) { pe_rsc_trace(rsc, "Current node for %s (%s) can't run resources", rsc->id, pe__node_name(running)); } else { int nodes_with_best_score = 1; for (GList *iter = nodes->next; iter; iter = iter->next) { pe_node_t *allowed = (pe_node_t *) iter->data; if (allowed->weight != chosen->weight) { // The nodes are sorted by score, so no more are equal break; } if (pe__same_node(allowed, running)) { // Scores are equal, so prefer the current node chosen = allowed; } nodes_with_best_score++; } if (nodes_with_best_score > 1) { uint8_t log_level = LOG_INFO; if (chosen->weight >= INFINITY) { log_level = LOG_WARNING; } do_crm_log(log_level, "Chose %s for %s from %d nodes with score %s", pe__node_name(chosen), rsc->id, nodes_with_best_score, pcmk_readable_score(chosen->weight)); } } } pe_rsc_trace(rsc, "Chose %s for %s from %d candidates", pe__node_name(chosen), rsc->id, g_list_length(nodes)); } pcmk__assign_resource(rsc, chosen, false, stop_if_fail); g_list_free(nodes); return rsc->allocated_to != NULL; } /*! * \internal * \brief Apply a "this with" colocation to a node's allowed node scores * * \param[in,out] colocation Colocation to apply * \param[in,out] rsc Resource being assigned */ static void apply_this_with(pcmk__colocation_t *colocation, pe_resource_t *rsc) { GHashTable *archive = NULL; pe_resource_t *other = colocation->primary; // In certain cases, we will need to revert the node scores if ((colocation->dependent_role >= pcmk_role_promoted) || ((colocation->score < 0) && (colocation->score > -INFINITY))) { archive = pcmk__copy_node_table(rsc->allowed_nodes); } if (pcmk_is_set(other->flags, pcmk_rsc_unassigned)) { pe_rsc_trace(rsc, "%s: Assigning colocation %s primary %s first" "(score=%d role=%s)", rsc->id, colocation->id, other->id, colocation->score, role2text(colocation->dependent_role)); other->cmds->assign(other, NULL, true); } // Apply the colocation score to this resource's allowed node scores rsc->cmds->apply_coloc_score(rsc, other, colocation, true); if ((archive != NULL) && !pcmk__any_node_available(rsc->allowed_nodes)) { pe_rsc_info(rsc, "%s: Reverting scores from colocation with %s " "because no nodes allowed", rsc->id, other->id); g_hash_table_destroy(rsc->allowed_nodes); rsc->allowed_nodes = archive; archive = NULL; } if (archive != NULL) { g_hash_table_destroy(archive); } } /*! * \internal * \brief Update a Pacemaker Remote node once its connection has been assigned * * \param[in] connection Connection resource that has been assigned */ static void remote_connection_assigned(const pe_resource_t *connection) { pe_node_t *remote_node = pe_find_node(connection->cluster->nodes, connection->id); CRM_CHECK(remote_node != NULL, return); if ((connection->allocated_to != NULL) && (connection->next_role != pcmk_role_stopped)) { crm_trace("Pacemaker Remote node %s will be online", remote_node->details->id); remote_node->details->online = TRUE; if (remote_node->details->unseen) { // Avoid unnecessary fence, since we will attempt connection remote_node->details->unclean = FALSE; } } else { crm_trace("Pacemaker Remote node %s will be shut down " "(%sassigned connection's next role is %s)", remote_node->details->id, ((connection->allocated_to == NULL)? "un" : ""), role2text(connection->next_role)); remote_node->details->shutdown = TRUE; } } /*! * \internal * \brief Assign a primitive resource to a node * * \param[in,out] rsc Resource to assign to a node * \param[in] prefer Node to prefer, if all else is equal * \param[in] stop_if_fail If \c true and \p rsc can't be assigned to a * node, set next role to stopped and update * existing actions * * \return Node that \p rsc is assigned to, if assigned entirely to one node * * \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can * completely undo the assignment. A successful assignment can be either * undone or left alone as final. A failed assignment has the same effect * as calling pcmk__unassign_resource(); there are no side effects on * roles or actions. */ pe_node_t * pcmk__primitive_assign(pe_resource_t *rsc, const pe_node_t *prefer, bool stop_if_fail) { GList *this_with_colocations = NULL; GList *with_this_colocations = NULL; GList *iter = NULL; pcmk__colocation_t *colocation = NULL; CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_primitive)); // Never assign a child without parent being assigned first if ((rsc->parent != NULL) && !pcmk_is_set(rsc->parent->flags, pcmk_rsc_assigning)) { pe_rsc_debug(rsc, "%s: Assigning parent %s first", rsc->id, rsc->parent->id); rsc->parent->cmds->assign(rsc->parent, prefer, stop_if_fail); } if (!pcmk_is_set(rsc->flags, pcmk_rsc_unassigned)) { // Assignment has already been done const char *node_name = "no node"; if (rsc->allocated_to != NULL) { node_name = pe__node_name(rsc->allocated_to); } pe_rsc_debug(rsc, "%s: pre-assigned to %s", rsc->id, node_name); return rsc->allocated_to; } // Ensure we detect assignment loops if (pcmk_is_set(rsc->flags, pcmk_rsc_assigning)) { pe_rsc_debug(rsc, "Breaking assignment loop involving %s", rsc->id); return NULL; } pe__set_resource_flags(rsc, pcmk_rsc_assigning); pe__show_node_scores(true, rsc, "Pre-assignment", rsc->allowed_nodes, rsc->cluster); this_with_colocations = pcmk__this_with_colocations(rsc); with_this_colocations = pcmk__with_this_colocations(rsc); // Apply mandatory colocations first, to satisfy as many as possible for (iter = this_with_colocations; iter != NULL; iter = iter->next) { colocation = iter->data; if ((colocation->score <= -CRM_SCORE_INFINITY) || (colocation->score >= CRM_SCORE_INFINITY)) { apply_this_with(colocation, rsc); } } for (iter = with_this_colocations; iter != NULL; iter = iter->next) { colocation = iter->data; if ((colocation->score <= -CRM_SCORE_INFINITY) || (colocation->score >= CRM_SCORE_INFINITY)) { pcmk__add_dependent_scores(colocation, rsc); } } pe__show_node_scores(true, rsc, "Mandatory-colocations", rsc->allowed_nodes, rsc->cluster); // Then apply optional colocations for (iter = this_with_colocations; iter != NULL; iter = iter->next) { colocation = iter->data; if ((colocation->score > -CRM_SCORE_INFINITY) && (colocation->score < CRM_SCORE_INFINITY)) { apply_this_with(colocation, rsc); } } for (iter = with_this_colocations; iter != NULL; iter = iter->next) { colocation = iter->data; if ((colocation->score > -CRM_SCORE_INFINITY) && (colocation->score < CRM_SCORE_INFINITY)) { pcmk__add_dependent_scores(colocation, rsc); } } g_list_free(this_with_colocations); g_list_free(with_this_colocations); if (rsc->next_role == pcmk_role_stopped) { pe_rsc_trace(rsc, "Banning %s from all nodes because it will be stopped", rsc->id); resource_location(rsc, NULL, -INFINITY, XML_RSC_ATTR_TARGET_ROLE, rsc->cluster); } else if ((rsc->next_role > rsc->role) && !pcmk_is_set(rsc->cluster->flags, pcmk_sched_quorate) && (rsc->cluster->no_quorum_policy == pcmk_no_quorum_freeze)) { crm_notice("Resource %s cannot be elevated from %s to %s due to " "no-quorum-policy=freeze", rsc->id, role2text(rsc->role), role2text(rsc->next_role)); pe__set_next_role(rsc, rsc->role, "no-quorum-policy=freeze"); } pe__show_node_scores(!pcmk_is_set(rsc->cluster->flags, pcmk_sched_output_scores), rsc, __func__, rsc->allowed_nodes, rsc->cluster); // Unmanage resource if fencing is enabled but no device is configured if (pcmk_is_set(rsc->cluster->flags, pcmk_sched_fencing_enabled) && !pcmk_is_set(rsc->cluster->flags, pcmk_sched_have_fencing)) { pe__clear_resource_flags(rsc, pcmk_rsc_managed); } if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) { // Unmanaged resources stay on their current node const char *reason = NULL; pe_node_t *assign_to = NULL; pe__set_next_role(rsc, rsc->role, "unmanaged"); assign_to = pe__current_node(rsc); if (assign_to == NULL) { reason = "inactive"; } else if (rsc->role == pcmk_role_promoted) { reason = "promoted"; } else if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) { reason = "failed"; } else { reason = "active"; } pe_rsc_info(rsc, "Unmanaged resource %s assigned to %s: %s", rsc->id, (assign_to? assign_to->details->uname : "no node"), reason); pcmk__assign_resource(rsc, assign_to, true, stop_if_fail); } else if (pcmk_is_set(rsc->cluster->flags, pcmk_sched_stop_all)) { // Must stop at some point, but be consistent with stop_if_fail if (stop_if_fail) { pe_rsc_debug(rsc, "Forcing %s to stop: stop-all-resources", rsc->id); } pcmk__assign_resource(rsc, NULL, true, stop_if_fail); } else if (!assign_best_node(rsc, prefer, stop_if_fail)) { // Assignment failed if (!pcmk_is_set(rsc->flags, pcmk_rsc_removed)) { pe_rsc_info(rsc, "Resource %s cannot run anywhere", rsc->id); } else if ((rsc->running_on != NULL) && stop_if_fail) { pe_rsc_info(rsc, "Stopping orphan resource %s", rsc->id); } } pe__clear_resource_flags(rsc, pcmk_rsc_assigning); if (rsc->is_remote_node) { remote_connection_assigned(rsc); } return rsc->allocated_to; } /*! * \internal * \brief Schedule actions to bring resource down and back to current role * * \param[in,out] rsc Resource to restart * \param[in,out] current Node that resource should be brought down on * \param[in] need_stop Whether the resource must be stopped * \param[in] need_promote Whether the resource must be promoted * * \return Role that resource would have after scheduled actions are taken */ static void schedule_restart_actions(pe_resource_t *rsc, pe_node_t *current, bool need_stop, bool need_promote) { enum rsc_role_e role = rsc->role; enum rsc_role_e next_role; rsc_transition_fn fn = NULL; pe__set_resource_flags(rsc, pcmk_rsc_restarting); // Bring resource down to a stop on its current node while (role != pcmk_role_stopped) { next_role = rsc_state_matrix[role][pcmk_role_stopped]; pe_rsc_trace(rsc, "Creating %s action to take %s down from %s to %s", (need_stop? "required" : "optional"), rsc->id, role2text(role), role2text(next_role)); fn = rsc_action_matrix[role][next_role]; if (fn == NULL) { break; } fn(rsc, current, !need_stop); role = next_role; } // Bring resource up to its next role on its next node while ((rsc->role <= rsc->next_role) && (role != rsc->role) && !pcmk_is_set(rsc->flags, pcmk_rsc_blocked)) { bool required = need_stop; next_role = rsc_state_matrix[role][rsc->role]; if ((next_role == pcmk_role_promoted) && need_promote) { required = true; } pe_rsc_trace(rsc, "Creating %s action to take %s up from %s to %s", (required? "required" : "optional"), rsc->id, role2text(role), role2text(next_role)); fn = rsc_action_matrix[role][next_role]; if (fn == NULL) { break; } fn(rsc, rsc->allocated_to, !required); role = next_role; } pe__clear_resource_flags(rsc, pcmk_rsc_restarting); } /*! * \internal * \brief If a resource's next role is not explicitly specified, set a default * * \param[in,out] rsc Resource to set next role for * * \return "explicit" if next role was explicitly set, otherwise "implicit" */ static const char * set_default_next_role(pe_resource_t *rsc) { if (rsc->next_role != pcmk_role_unknown) { return "explicit"; } if (rsc->allocated_to == NULL) { pe__set_next_role(rsc, pcmk_role_stopped, "assignment"); } else { pe__set_next_role(rsc, pcmk_role_started, "assignment"); } return "implicit"; } /*! * \internal * \brief Create an action to represent an already pending start * * \param[in,out] rsc Resource to create start action for */ static void create_pending_start(pe_resource_t *rsc) { pe_action_t *start = NULL; pe_rsc_trace(rsc, "Creating action for %s to represent already pending start", rsc->id); start = start_action(rsc, rsc->allocated_to, TRUE); pe__set_action_flags(start, pe_action_print_always); } /*! * \internal * \brief Schedule actions needed to take a resource to its next role * * \param[in,out] rsc Resource to schedule actions for */ static void schedule_role_transition_actions(pe_resource_t *rsc) { enum rsc_role_e role = rsc->role; while (role != rsc->next_role) { enum rsc_role_e next_role = rsc_state_matrix[role][rsc->next_role]; rsc_transition_fn fn = NULL; pe_rsc_trace(rsc, "Creating action to take %s from %s to %s (ending at %s)", rsc->id, role2text(role), role2text(next_role), role2text(rsc->next_role)); fn = rsc_action_matrix[role][next_role]; if (fn == NULL) { break; } fn(rsc, rsc->allocated_to, false); role = next_role; } } /*! * \internal * \brief Create all actions needed for a given primitive resource * * \param[in,out] rsc Primitive resource to create actions for */ void pcmk__primitive_create_actions(pe_resource_t *rsc) { bool need_stop = false; bool need_promote = false; bool is_moving = false; bool allow_migrate = false; bool multiply_active = false; pe_node_t *current = NULL; unsigned int num_all_active = 0; unsigned int num_clean_active = 0; const char *next_role_source = NULL; CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_primitive)); next_role_source = set_default_next_role(rsc); pe_rsc_trace(rsc, "Creating all actions for %s transition from %s to %s " "(%s) on %s", rsc->id, role2text(rsc->role), role2text(rsc->next_role), next_role_source, pe__node_name(rsc->allocated_to)); current = rsc->fns->active_node(rsc, &num_all_active, &num_clean_active); g_list_foreach(rsc->dangling_migrations, pcmk__abort_dangling_migration, rsc); if ((current != NULL) && (rsc->allocated_to != NULL) && !pe__same_node(current, rsc->allocated_to) && (rsc->next_role >= pcmk_role_started)) { pe_rsc_trace(rsc, "Moving %s from %s to %s", rsc->id, pe__node_name(current), pe__node_name(rsc->allocated_to)); is_moving = true; allow_migrate = pcmk__rsc_can_migrate(rsc, current); // This is needed even if migrating (though I'm not sure why ...) need_stop = true; } // Check whether resource is partially migrated and/or multiply active if ((rsc->partial_migration_source != NULL) && (rsc->partial_migration_target != NULL) && allow_migrate && (num_all_active == 2) && pe__same_node(current, rsc->partial_migration_source) && pe__same_node(rsc->allocated_to, rsc->partial_migration_target)) { /* A partial migration is in progress, and the migration target remains * the same as when the migration began. */ pe_rsc_trace(rsc, "Partial migration of %s from %s to %s will continue", rsc->id, pe__node_name(rsc->partial_migration_source), pe__node_name(rsc->partial_migration_target)); } else if ((rsc->partial_migration_source != NULL) || (rsc->partial_migration_target != NULL)) { // A partial migration is in progress but can't be continued if (num_all_active > 2) { // The resource is migrating *and* multiply active! crm_notice("Forcing recovery of %s because it is migrating " "from %s to %s and possibly active elsewhere", rsc->id, pe__node_name(rsc->partial_migration_source), pe__node_name(rsc->partial_migration_target)); } else { // The migration source or target isn't available crm_notice("Forcing recovery of %s because it can no longer " "migrate from %s to %s", rsc->id, pe__node_name(rsc->partial_migration_source), pe__node_name(rsc->partial_migration_target)); } need_stop = true; rsc->partial_migration_source = rsc->partial_migration_target = NULL; allow_migrate = false; } else if (pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)) { multiply_active = (num_all_active > 1); } else { /* If a resource has "requires" set to nothing or quorum, don't consider * it active on unclean nodes (similar to how all resources behave when * stonith-enabled is false). We can start such resources elsewhere * before fencing completes, and if we considered the resource active on * the failed node, we would attempt recovery for being active on * multiple nodes. */ multiply_active = (num_clean_active > 1); } if (multiply_active) { const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); // Resource was (possibly) incorrectly multiply active pe_proc_err("%s resource %s might be active on %u nodes (%s)", pcmk__s(class, "Untyped"), rsc->id, num_all_active, recovery2text(rsc->recovery_type)); crm_notice("See https://wiki.clusterlabs.org/wiki/FAQ" "#Resource_is_Too_Active for more information"); switch (rsc->recovery_type) { case pcmk_multiply_active_restart: need_stop = true; break; case pcmk_multiply_active_unexpected: need_stop = true; // stop_resource() will skip expected node - pe__set_resource_flags(rsc, pe_rsc_stop_unexpected); + pe__set_resource_flags(rsc, pcmk_rsc_stop_unexpected); break; default: break; } } else { - pe__clear_resource_flags(rsc, pe_rsc_stop_unexpected); + pe__clear_resource_flags(rsc, pcmk_rsc_stop_unexpected); } if (pcmk_is_set(rsc->flags, pcmk_rsc_start_pending)) { create_pending_start(rsc); } if (is_moving) { // Remaining tests are only for resources staying where they are } else if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) { if (pcmk_is_set(rsc->flags, pcmk_rsc_stop_if_failed)) { need_stop = true; pe_rsc_trace(rsc, "Recovering %s", rsc->id); } else { pe_rsc_trace(rsc, "Recovering %s by demotion", rsc->id); if (rsc->next_role == pcmk_role_promoted) { need_promote = true; } } } else if (pcmk_is_set(rsc->flags, pcmk_rsc_blocked)) { pe_rsc_trace(rsc, "Blocking further actions on %s", rsc->id); need_stop = true; } else if ((rsc->role > pcmk_role_started) && (current != NULL) && (rsc->allocated_to != NULL)) { pe_action_t *start = NULL; pe_rsc_trace(rsc, "Creating start action for promoted resource %s", rsc->id); start = start_action(rsc, rsc->allocated_to, TRUE); if (!pcmk_is_set(start->flags, pe_action_optional)) { // Recovery of a promoted resource pe_rsc_trace(rsc, "%s restart is required for recovery", rsc->id); need_stop = true; } } // Create any actions needed to bring resource down and back up to same role schedule_restart_actions(rsc, current, need_stop, need_promote); // Create any actions needed to take resource from this role to the next schedule_role_transition_actions(rsc); pcmk__create_recurring_actions(rsc); if (allow_migrate) { pcmk__create_migration_actions(rsc, current); } } /*! * \internal * \brief Ban a resource from any allowed nodes that are Pacemaker Remote nodes * * \param[in] rsc Resource to check */ static void rsc_avoids_remote_nodes(const pe_resource_t *rsc) { GHashTableIter iter; pe_node_t *node = NULL; g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) { if (node->details->remote_rsc != NULL) { node->weight = -INFINITY; } } } /*! * \internal * \brief Return allowed nodes as (possibly sorted) list * * Convert a resource's hash table of allowed nodes to a list. If printing to * stdout, sort the list, to keep action ID numbers consistent for regression * test output (while avoiding the performance hit on a live cluster). * * \param[in] rsc Resource to check for allowed nodes * * \return List of resource's allowed nodes * \note Callers should take care not to rely on the list being sorted. */ static GList * allowed_nodes_as_list(const pe_resource_t *rsc) { GList *allowed_nodes = NULL; if (rsc->allowed_nodes) { allowed_nodes = g_hash_table_get_values(rsc->allowed_nodes); } if (!pcmk__is_daemon) { allowed_nodes = g_list_sort(allowed_nodes, pe__cmp_node_name); } return allowed_nodes; } /*! * \internal * \brief Create implicit constraints needed for a primitive resource * * \param[in,out] rsc Primitive resource to create implicit constraints for */ void pcmk__primitive_internal_constraints(pe_resource_t *rsc) { GList *allowed_nodes = NULL; bool check_unfencing = false; bool check_utilization = false; CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_primitive)); if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) { pe_rsc_trace(rsc, "Skipping implicit constraints for unmanaged resource %s", rsc->id); return; } // Whether resource requires unfencing check_unfencing = !pcmk_is_set(rsc->flags, pcmk_rsc_fence_device) && pcmk_is_set(rsc->cluster->flags, pcmk_sched_enable_unfencing) && pcmk_is_set(rsc->flags, pe_rsc_needs_unfencing); // Whether a non-default placement strategy is used check_utilization = (g_hash_table_size(rsc->utilization) > 0) && !pcmk__str_eq(rsc->cluster->placement_strategy, "default", pcmk__str_casei); // Order stops before starts (i.e. restart) pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_STOP, 0), NULL, rsc, pcmk__op_key(rsc->id, PCMK_ACTION_START, 0), NULL, pe_order_optional|pe_order_implies_then|pe_order_restart, rsc->cluster); // Promotable ordering: demote before stop, start before promote if (pcmk_is_set(pe__const_top_resource(rsc, false)->flags, pcmk_rsc_promotable) || (rsc->role > pcmk_role_unpromoted)) { pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_DEMOTE, 0), NULL, rsc, pcmk__op_key(rsc->id, PCMK_ACTION_STOP, 0), NULL, pe_order_promoted_implies_first, rsc->cluster); pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_START, 0), NULL, rsc, pcmk__op_key(rsc->id, PCMK_ACTION_PROMOTE, 0), NULL, pe_order_runnable_left, rsc->cluster); } // Don't clear resource history if probing on same node pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_LRM_DELETE, 0), NULL, rsc, pcmk__op_key(rsc->id, PCMK_ACTION_MONITOR, 0), NULL, pe_order_same_node|pe_order_then_cancels_first, rsc->cluster); // Certain checks need allowed nodes if (check_unfencing || check_utilization || (rsc->container != NULL)) { allowed_nodes = allowed_nodes_as_list(rsc); } if (check_unfencing) { g_list_foreach(allowed_nodes, pcmk__order_restart_vs_unfence, rsc); } if (check_utilization) { pcmk__create_utilization_constraints(rsc, allowed_nodes); } if (rsc->container != NULL) { pe_resource_t *remote_rsc = NULL; if (rsc->is_remote_node) { // rsc is the implicit remote connection for a guest or bundle node /* Guest resources are not allowed to run on Pacemaker Remote nodes, * to avoid nesting remotes. However, bundles are allowed. */ if (!pcmk_is_set(rsc->flags, pcmk_rsc_remote_nesting_allowed)) { rsc_avoids_remote_nodes(rsc->container); } /* If someone cleans up a guest or bundle node's container, we will * likely schedule a (re-)probe of the container and recovery of the * connection. Order the connection stop after the container probe, * so that if we detect the container running, we will trigger a new * transition and avoid the unnecessary recovery. */ pcmk__order_resource_actions(rsc->container, PCMK_ACTION_MONITOR, rsc, PCMK_ACTION_STOP, pe_order_optional); /* A user can specify that a resource must start on a Pacemaker Remote * node by explicitly configuring it with the container=NODENAME * meta-attribute. This is of questionable merit, since location * constraints can accomplish the same thing. But we support it, so here * we check whether a resource (that is not itself a remote connection) * has container set to a remote node or guest node resource. */ } else if (rsc->container->is_remote_node) { remote_rsc = rsc->container; } else { remote_rsc = pe__resource_contains_guest_node(rsc->cluster, rsc->container); } if (remote_rsc != NULL) { /* Force the resource on the Pacemaker Remote node instead of * colocating the resource with the container resource. */ for (GList *item = allowed_nodes; item; item = item->next) { pe_node_t *node = item->data; if (node->details->remote_rsc != remote_rsc) { node->weight = -INFINITY; } } } else { /* This resource is either a filler for a container that does NOT * represent a Pacemaker Remote node, or a Pacemaker Remote * connection resource for a guest node or bundle. */ int score; crm_trace("Order and colocate %s relative to its container %s", rsc->id, rsc->container->id); pcmk__new_ordering(rsc->container, pcmk__op_key(rsc->container->id, PCMK_ACTION_START, 0), NULL, rsc, pcmk__op_key(rsc->id, PCMK_ACTION_START, 0), NULL, pe_order_implies_then|pe_order_runnable_left, rsc->cluster); pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_STOP, 0), NULL, rsc->container, pcmk__op_key(rsc->container->id, PCMK_ACTION_STOP, 0), NULL, pe_order_implies_first, rsc->cluster); if (pcmk_is_set(rsc->flags, pcmk_rsc_remote_nesting_allowed)) { score = 10000; /* Highly preferred but not essential */ } else { score = INFINITY; /* Force them to run on the same host */ } pcmk__new_colocation("#resource-with-container", NULL, score, rsc, rsc->container, NULL, NULL, pcmk__coloc_influence); } } if (rsc->is_remote_node || pcmk_is_set(rsc->flags, pcmk_rsc_fence_device)) { /* Remote connections and fencing devices are not allowed to run on * Pacemaker Remote nodes */ rsc_avoids_remote_nodes(rsc); } g_list_free(allowed_nodes); } /*! * \internal * \brief Apply a colocation's score to node scores or resource priority * * Given a colocation constraint, apply its score to the dependent's * allowed node scores (if we are still placing resources) or priority (if * we are choosing promotable clone instance roles). * * \param[in,out] dependent Dependent resource in colocation * \param[in] primary Primary resource in colocation * \param[in] colocation Colocation constraint to apply * \param[in] for_dependent true if called on behalf of dependent */ void pcmk__primitive_apply_coloc_score(pe_resource_t *dependent, const pe_resource_t *primary, const pcmk__colocation_t *colocation, bool for_dependent) { enum pcmk__coloc_affects filter_results; CRM_ASSERT((dependent != NULL) && (primary != NULL) && (colocation != NULL)); if (for_dependent) { // Always process on behalf of primary resource primary->cmds->apply_coloc_score(dependent, primary, colocation, false); return; } filter_results = pcmk__colocation_affects(dependent, primary, colocation, false); pe_rsc_trace(dependent, "%s %s with %s (%s, score=%d, filter=%d)", ((colocation->score > 0)? "Colocating" : "Anti-colocating"), dependent->id, primary->id, colocation->id, colocation->score, filter_results); switch (filter_results) { case pcmk__coloc_affects_role: pcmk__apply_coloc_to_priority(dependent, primary, colocation); break; case pcmk__coloc_affects_location: pcmk__apply_coloc_to_scores(dependent, primary, colocation); break; default: // pcmk__coloc_affects_nothing return; } } /* Primitive implementation of * resource_alloc_functions_t:with_this_colocations() */ void pcmk__with_primitive_colocations(const pe_resource_t *rsc, const pe_resource_t *orig_rsc, GList **list) { CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_primitive) && (list != NULL)); if (rsc == orig_rsc) { /* For the resource itself, add all of its own colocations and relevant * colocations from its parent (if any). */ pcmk__add_with_this_list(list, rsc->rsc_cons_lhs, orig_rsc); if (rsc->parent != NULL) { rsc->parent->cmds->with_this_colocations(rsc->parent, orig_rsc, list); } } else { // For an ancestor, add only explicitly configured constraints for (GList *iter = rsc->rsc_cons_lhs; iter != NULL; iter = iter->next) { pcmk__colocation_t *colocation = iter->data; if (pcmk_is_set(colocation->flags, pcmk__coloc_explicit)) { pcmk__add_with_this(list, colocation, orig_rsc); } } } } /* Primitive implementation of * resource_alloc_functions_t:this_with_colocations() */ void pcmk__primitive_with_colocations(const pe_resource_t *rsc, const pe_resource_t *orig_rsc, GList **list) { CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_primitive) && (list != NULL)); if (rsc == orig_rsc) { /* For the resource itself, add all of its own colocations and relevant * colocations from its parent (if any). */ pcmk__add_this_with_list(list, rsc->rsc_cons, orig_rsc); if (rsc->parent != NULL) { rsc->parent->cmds->this_with_colocations(rsc->parent, orig_rsc, list); } } else { // For an ancestor, add only explicitly configured constraints for (GList *iter = rsc->rsc_cons; iter != NULL; iter = iter->next) { pcmk__colocation_t *colocation = iter->data; if (pcmk_is_set(colocation->flags, pcmk__coloc_explicit)) { pcmk__add_this_with(list, colocation, orig_rsc); } } } } /*! * \internal * \brief Return action flags for a given primitive resource action * * \param[in,out] action Action to get flags for * \param[in] node If not NULL, limit effects to this node (ignored) * * \return Flags appropriate to \p action on \p node */ uint32_t pcmk__primitive_action_flags(pe_action_t *action, const pe_node_t *node) { CRM_ASSERT(action != NULL); return (uint32_t) action->flags; } /*! * \internal * \brief Check whether a node is a multiply active resource's expected node * * \param[in] rsc Resource to check * \param[in] node Node to check * * \return true if \p rsc is multiply active with multiple-active set to * stop_unexpected, and \p node is the node where it will remain active * \note This assumes that the resource's next role cannot be changed to stopped * after this is called, which should be reasonable if status has already * been unpacked and resources have been assigned to nodes. */ static bool is_expected_node(const pe_resource_t *rsc, const pe_node_t *node) { return pcmk_all_flags_set(rsc->flags, - pe_rsc_stop_unexpected|pcmk_rsc_restarting) + pcmk_rsc_stop_unexpected|pcmk_rsc_restarting) && (rsc->next_role > pcmk_role_stopped) && pe__same_node(rsc->allocated_to, node); } /*! * \internal * \brief Schedule actions needed to stop a resource wherever it is active * * \param[in,out] rsc Resource being stopped * \param[in] node Node where resource is being stopped (ignored) * \param[in] optional Whether actions should be optional */ static void stop_resource(pe_resource_t *rsc, pe_node_t *node, bool optional) { for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) { pe_node_t *current = (pe_node_t *) iter->data; pe_action_t *stop = NULL; if (is_expected_node(rsc, current)) { /* We are scheduling restart actions for a multiply active resource * with multiple-active=stop_unexpected, and this is where it should * not be stopped. */ pe_rsc_trace(rsc, "Skipping stop of multiply active resource %s " "on expected node %s", rsc->id, pe__node_name(current)); continue; } if (rsc->partial_migration_target != NULL) { // Continue migration if node originally was and remains target if (pe__same_node(current, rsc->partial_migration_target) && pe__same_node(current, rsc->allocated_to)) { pe_rsc_trace(rsc, "Skipping stop of %s on %s " "because partial migration there will continue", rsc->id, pe__node_name(current)); continue; } else { pe_rsc_trace(rsc, "Forcing stop of %s on %s " "because migration target changed", rsc->id, pe__node_name(current)); optional = false; } } pe_rsc_trace(rsc, "Scheduling stop of %s on %s", rsc->id, pe__node_name(current)); stop = stop_action(rsc, current, optional); if (rsc->allocated_to == NULL) { pe_action_set_reason(stop, "node availability", true); } else if (pcmk_all_flags_set(rsc->flags, pcmk_rsc_restarting - |pe_rsc_stop_unexpected)) { + |pcmk_rsc_stop_unexpected)) { /* We are stopping a multiply active resource on a node that is * not its expected node, and we are still scheduling restart * actions, so the stop is for being multiply active. */ pe_action_set_reason(stop, "being multiply active", true); } if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) { pe__clear_action_flags(stop, pe_action_runnable); } if (pcmk_is_set(rsc->cluster->flags, pcmk_sched_remove_after_stop)) { pcmk__schedule_cleanup(rsc, current, optional); } if (pcmk_is_set(rsc->flags, pe_rsc_needs_unfencing)) { pe_action_t *unfence = pe_fence_op(current, PCMK_ACTION_ON, true, NULL, false, rsc->cluster); order_actions(stop, unfence, pe_order_implies_first); if (!pcmk__node_unfenced(current)) { pe_proc_err("Stopping %s until %s can be unfenced", rsc->id, pe__node_name(current)); } } } } /*! * \internal * \brief Schedule actions needed to start a resource on a node * * \param[in,out] rsc Resource being started * \param[in,out] node Node where resource should be started * \param[in] optional Whether actions should be optional */ static void start_resource(pe_resource_t *rsc, pe_node_t *node, bool optional) { pe_action_t *start = NULL; CRM_ASSERT(node != NULL); pe_rsc_trace(rsc, "Scheduling %s start of %s on %s (score %d)", (optional? "optional" : "required"), rsc->id, pe__node_name(node), node->weight); start = start_action(rsc, node, TRUE); pcmk__order_vs_unfence(rsc, node, start, pe_order_implies_then); if (pcmk_is_set(start->flags, pe_action_runnable) && !optional) { pe__clear_action_flags(start, pe_action_optional); } if (is_expected_node(rsc, node)) { /* This could be a problem if the start becomes necessary for other * reasons later. */ pe_rsc_trace(rsc, "Start of multiply active resouce %s " "on expected node %s will be a pseudo-action", rsc->id, pe__node_name(node)); pe__set_action_flags(start, pe_action_pseudo); } } /*! * \internal * \brief Schedule actions needed to promote a resource on a node * * \param[in,out] rsc Resource being promoted * \param[in] node Node where resource should be promoted * \param[in] optional Whether actions should be optional */ static void promote_resource(pe_resource_t *rsc, pe_node_t *node, bool optional) { GList *iter = NULL; GList *action_list = NULL; bool runnable = true; CRM_ASSERT(node != NULL); // Any start must be runnable for promotion to be runnable action_list = pe__resource_actions(rsc, node, PCMK_ACTION_START, true); for (iter = action_list; iter != NULL; iter = iter->next) { pe_action_t *start = (pe_action_t *) iter->data; if (!pcmk_is_set(start->flags, pe_action_runnable)) { runnable = false; } } g_list_free(action_list); if (runnable) { pe_action_t *promote = promote_action(rsc, node, optional); pe_rsc_trace(rsc, "Scheduling %s promotion of %s on %s", (optional? "optional" : "required"), rsc->id, pe__node_name(node)); if (is_expected_node(rsc, node)) { /* This could be a problem if the promote becomes necessary for * other reasons later. */ pe_rsc_trace(rsc, "Promotion of multiply active resouce %s " "on expected node %s will be a pseudo-action", rsc->id, pe__node_name(node)); pe__set_action_flags(promote, pe_action_pseudo); } } else { pe_rsc_trace(rsc, "Not promoting %s on %s: start unrunnable", rsc->id, pe__node_name(node)); action_list = pe__resource_actions(rsc, node, PCMK_ACTION_PROMOTE, true); for (iter = action_list; iter != NULL; iter = iter->next) { pe_action_t *promote = (pe_action_t *) iter->data; pe__clear_action_flags(promote, pe_action_runnable); } g_list_free(action_list); } } /*! * \internal * \brief Schedule actions needed to demote a resource wherever it is active * * \param[in,out] rsc Resource being demoted * \param[in] node Node where resource should be demoted (ignored) * \param[in] optional Whether actions should be optional */ static void demote_resource(pe_resource_t *rsc, pe_node_t *node, bool optional) { /* Since this will only be called for a primitive (possibly as an instance * of a collective resource), the resource is multiply active if it is * running on more than one node, so we want to demote on all of them as * part of recovery, regardless of which one is the desired node. */ for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) { pe_node_t *current = (pe_node_t *) iter->data; if (is_expected_node(rsc, current)) { pe_rsc_trace(rsc, "Skipping demote of multiply active resource %s " "on expected node %s", rsc->id, pe__node_name(current)); } else { pe_rsc_trace(rsc, "Scheduling %s demotion of %s on %s", (optional? "optional" : "required"), rsc->id, pe__node_name(current)); demote_action(rsc, current, optional); } } } static void assert_role_error(pe_resource_t *rsc, pe_node_t *node, bool optional) { CRM_ASSERT(false); } /*! * \internal * \brief Schedule cleanup of a resource * * \param[in,out] rsc Resource to clean up * \param[in] node Node to clean up on * \param[in] optional Whether clean-up should be optional */ void pcmk__schedule_cleanup(pe_resource_t *rsc, const pe_node_t *node, bool optional) { /* If the cleanup is required, its orderings are optional, because they're * relevant only if both actions are required. Conversely, if the cleanup is * optional, the orderings make the then action required if the first action * becomes required. */ uint32_t flag = optional? pe_order_implies_then : pe_order_optional; CRM_CHECK((rsc != NULL) && (node != NULL), return); if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) { pe_rsc_trace(rsc, "Skipping clean-up of %s on %s: resource failed", rsc->id, pe__node_name(node)); return; } if (node->details->unclean || !node->details->online) { pe_rsc_trace(rsc, "Skipping clean-up of %s on %s: node unavailable", rsc->id, pe__node_name(node)); return; } crm_notice("Scheduling clean-up of %s on %s", rsc->id, pe__node_name(node)); delete_action(rsc, node, optional); // stop -> clean-up -> start pcmk__order_resource_actions(rsc, PCMK_ACTION_STOP, rsc, PCMK_ACTION_DELETE, flag); pcmk__order_resource_actions(rsc, PCMK_ACTION_DELETE, rsc, PCMK_ACTION_START, flag); } /*! * \internal * \brief Add primitive meta-attributes relevant to graph actions to XML * * \param[in] rsc Primitive resource whose meta-attributes should be added * \param[in,out] xml Transition graph action attributes XML to add to */ void pcmk__primitive_add_graph_meta(const pe_resource_t *rsc, xmlNode *xml) { char *name = NULL; char *value = NULL; const pe_resource_t *parent = NULL; CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_primitive) && (xml != NULL)); /* Clone instance numbers get set internally as meta-attributes, and are * needed in the transition graph (for example, to tell unique clone * instances apart). */ value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION); if (value != NULL) { name = crm_meta_name(XML_RSC_ATTR_INCARNATION); crm_xml_add(xml, name, value); free(name); } // Not sure if this one is really needed ... value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_REMOTE_NODE); if (value != NULL) { name = crm_meta_name(XML_RSC_ATTR_REMOTE_NODE); crm_xml_add(xml, name, value); free(name); } /* The container meta-attribute can be set on the primitive itself or one of * its parents (for example, a group inside a container resource), so check * them all, and keep the highest one found. */ for (parent = rsc; parent != NULL; parent = parent->parent) { if (parent->container != NULL) { crm_xml_add(xml, CRM_META "_" XML_RSC_ATTR_CONTAINER, parent->container->id); } } /* Bundle replica children will get their external-ip set internally as a * meta-attribute. The graph action needs it, but under a different naming * convention than other meta-attributes. */ value = g_hash_table_lookup(rsc->meta, "external-ip"); if (value != NULL) { crm_xml_add(xml, "pcmk_external_ip", value); } } // Primitive implementation of resource_alloc_functions_t:add_utilization() void pcmk__primitive_add_utilization(const pe_resource_t *rsc, const pe_resource_t *orig_rsc, GList *all_rscs, GHashTable *utilization) { CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_primitive) && (orig_rsc != NULL) && (utilization != NULL)); if (!pcmk_is_set(rsc->flags, pcmk_rsc_unassigned)) { return; } pe_rsc_trace(orig_rsc, "%s: Adding primitive %s as colocated utilization", orig_rsc->id, rsc->id); pcmk__release_node_capacity(utilization, rsc); } /*! * \internal * \brief Get epoch time of node's shutdown attribute (or now if none) * * \param[in,out] node Node to check * * \return Epoch time corresponding to shutdown attribute if set or now if not */ static time_t shutdown_time(pe_node_t *node) { const char *shutdown = pe_node_attribute_raw(node, XML_CIB_ATTR_SHUTDOWN); time_t result = 0; if (shutdown != NULL) { long long result_ll; if (pcmk__scan_ll(shutdown, &result_ll, 0LL) == pcmk_rc_ok) { result = (time_t) result_ll; } } return (result == 0)? get_effective_time(node->details->data_set) : result; } /*! * \internal * \brief Ban a resource from a node if it's not locked to the node * * \param[in] data Node to check * \param[in,out] user_data Resource to check */ static void ban_if_not_locked(gpointer data, gpointer user_data) { const pe_node_t *node = (const pe_node_t *) data; pe_resource_t *rsc = (pe_resource_t *) user_data; if (strcmp(node->details->uname, rsc->lock_node->details->uname) != 0) { resource_location(rsc, node, -CRM_SCORE_INFINITY, XML_CONFIG_ATTR_SHUTDOWN_LOCK, rsc->cluster); } } // Primitive implementation of resource_alloc_functions_t:shutdown_lock() void pcmk__primitive_shutdown_lock(pe_resource_t *rsc) { const char *class = NULL; CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_primitive)); class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); // Fence devices and remote connections can't be locked if (pcmk__str_eq(class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_null_matches) || pe__resource_is_remote_conn(rsc)) { return; } if (rsc->lock_node != NULL) { // The lock was obtained from resource history if (rsc->running_on != NULL) { /* The resource was started elsewhere even though it is now * considered locked. This shouldn't be possible, but as a * failsafe, we don't want to disturb the resource now. */ pe_rsc_info(rsc, "Cancelling shutdown lock because %s is already active", rsc->id); pe__clear_resource_history(rsc, rsc->lock_node, rsc->cluster); rsc->lock_node = NULL; rsc->lock_time = 0; } // Only a resource active on exactly one node can be locked } else if (pcmk__list_of_1(rsc->running_on)) { pe_node_t *node = rsc->running_on->data; if (node->details->shutdown) { if (node->details->unclean) { pe_rsc_debug(rsc, "Not locking %s to unclean %s for shutdown", rsc->id, pe__node_name(node)); } else { rsc->lock_node = node; rsc->lock_time = shutdown_time(node); } } } if (rsc->lock_node == NULL) { // No lock needed return; } if (rsc->cluster->shutdown_lock > 0) { time_t lock_expiration = rsc->lock_time + rsc->cluster->shutdown_lock; pe_rsc_info(rsc, "Locking %s to %s due to shutdown (expires @%lld)", rsc->id, pe__node_name(rsc->lock_node), (long long) lock_expiration); pe__update_recheck_time(++lock_expiration, rsc->cluster); } else { pe_rsc_info(rsc, "Locking %s to %s due to shutdown", rsc->id, pe__node_name(rsc->lock_node)); } // If resource is locked to one node, ban it from all other nodes g_list_foreach(rsc->cluster->nodes, ban_if_not_locked, rsc); }