diff --git a/include/crm/common/scheduler_internal.h b/include/crm/common/scheduler_internal.h index b5ebe80b4a..63b3052098 100644 --- a/include/crm/common/scheduler_internal.h +++ b/include/crm/common/scheduler_internal.h @@ -1,288 +1,289 @@ /* * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #ifndef PCMK__CRM_COMMON_SCHEDULER_INTERNAL__H #define PCMK__CRM_COMMON_SCHEDULER_INTERNAL__H #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef __cplusplus extern "C" { #endif enum pcmk__check_parameters { /* Clear fail count if parameters changed for un-expired start or monitor * last_failure. */ pcmk__check_last_failure, /* Clear fail count if parameters changed for start, monitor, promote, or * migrate_from actions for active resources. */ pcmk__check_active, }; // Scheduling options and conditions enum pcmk__scheduler_flags { // No scheduler flags set (compare with equality rather than bit set) pcmk__sched_none = 0ULL, /* These flags are dynamically determined conditions */ // Whether partition has quorum (via \c PCMK_XA_HAVE_QUORUM attribute) //! \deprecated Call pcmk_has_quorum() to check quorum instead pcmk__sched_quorate = (1ULL << 0), // Whether cluster is symmetric (via symmetric-cluster property) pcmk__sched_symmetric_cluster = (1ULL << 1), // Whether scheduling encountered a non-configuration error pcmk__sched_processing_error = (1ULL << 2), // Whether cluster is in maintenance mode (via maintenance-mode property) pcmk__sched_in_maintenance = (1ULL << 3), // Whether fencing is enabled (via stonith-enabled property) pcmk__sched_fencing_enabled = (1ULL << 4), // Whether cluster has a fencing resource (via CIB resources) /*! \deprecated To indicate the cluster has a fencing resource, add either a * fencing resource configuration or the have-watchdog cluster option to the * input CIB */ pcmk__sched_have_fencing = (1ULL << 5), // Whether any resource provides or requires unfencing (via CIB resources) pcmk__sched_enable_unfencing = (1ULL << 6), // Whether concurrent fencing is allowed (via concurrent-fencing property) pcmk__sched_concurrent_fencing = (1ULL << 7), /* * Whether resources removed from the configuration should be stopped (via * stop-orphan-resources property) */ pcmk__sched_stop_removed_resources = (1ULL << 8), /* * Whether recurring actions removed from the configuration should be * cancelled (via stop-orphan-actions property) */ pcmk__sched_cancel_removed_actions = (1ULL << 9), // Whether to stop all resources (via stop-all-resources property) pcmk__sched_stop_all = (1ULL << 10), // Whether scheduler processing encountered a warning pcmk__sched_processing_warning = (1ULL << 11), /* * Whether start failure should be treated as if * \c PCMK_META_MIGRATION_THRESHOLD is 1 (via * \c PCMK_OPT_START_FAILURE_IS_FATAL property) */ pcmk__sched_start_failure_fatal = (1ULL << 12), // Whether unseen nodes should be fenced (via startup-fencing property) pcmk__sched_startup_fencing = (1ULL << 14), /* * Whether resources should be left stopped when their node shuts down * cleanly (via shutdown-lock property) */ pcmk__sched_shutdown_lock = (1ULL << 15), /* * Whether resources' current state should be probed (when unknown) before * scheduling any other actions (via the enable-startup-probes property) */ pcmk__sched_probe_resources = (1ULL << 16), // Whether the CIB status section has been parsed yet pcmk__sched_have_status = (1ULL << 17), // Whether the cluster includes any Pacemaker Remote nodes (via CIB) pcmk__sched_have_remote_nodes = (1ULL << 18), /* The remaining flags are scheduling options that must be set explicitly */ /* * Whether to skip unpacking the CIB status section and stop the scheduling * sequence after applying node-specific location criteria (skipping * assignment, ordering, actions, etc.). */ pcmk__sched_location_only = (1ULL << 20), // Whether sensitive resource attributes have been masked pcmk__sched_sanitized = (1ULL << 21), // Skip counting of total, disabled, and blocked resource instances pcmk__sched_no_counts = (1ULL << 23), // Whether node scores should be output instead of logged pcmk__sched_output_scores = (1ULL << 25), // Whether to show node and resource utilization (in log or output) pcmk__sched_show_utilization = (1ULL << 26), /* * Whether to stop the scheduling sequence after unpacking the CIB, * calculating cluster status, and applying node health (skipping * applying node-specific location criteria, assignment, etc.) */ pcmk__sched_validate_only = (1ULL << 27), }; // Implementation of pcmk__scheduler_private_t struct pcmk__scheduler_private { // Be careful about when each piece of information is available and final char *local_node_name; // Name of node running scheduler (if known) crm_time_t *now; // Time to use when evaluating rules pcmk__output_t *out; // Output object for displaying messages GHashTable *options; // Cluster options const char *fence_action; // Default fencing action guint fence_timeout_ms; // Default fencing action timeout (in ms) guint priority_fencing_ms; // Priority-based fencing delay (in ms) guint shutdown_lock_ms; // How long to lock resources (in ms) guint node_pending_ms; // Pending join times out after this (in ms) const char *placement_strategy; // Value of placement-strategy property xmlNode *rsc_defaults; // Configured resource defaults xmlNode *op_defaults; // Configured operation defaults GList *resources; // Resources in cluster GHashTable *templates; // Key = template ID, value = resource list GHashTable *tags; // Key = tag ID, value = element list GList *actions; // All scheduled actions GHashTable *singletons; // Scheduled non-resource actions int next_action_id; // Counter used as ID for actions xmlNode *failed; // History entries of failed actions GList *param_check; // History entries that need to be checked GList *stop_needed; // Containers that need stop actions GList *location_constraints; // Location constraints GList *colocation_constraints; // Colocation constraints GList *ordering_constraints; // Ordering constraints GHashTable *ticket_constraints; // Key = ticket ID, value = pcmk__ticket_t int next_ordering_id; // Counter used as ID for orderings int ninstances; // Total number of resource instances int blocked_resources; // Number of blocked resources in cluster int disabled_resources; // Number of disabled resources in cluster time_t recheck_by; // Hint to controller when to reschedule xmlNode *graph; // Transition graph int synapse_count; // Number of transition graph synapses }; // Group of enum pcmk__warnings flags for warnings we want to log once extern uint32_t pcmk__warnings; /*! * \internal * \brief Log a resource-tagged message at info severity * * \param[in] rsc Tag message with this resource's ID * \param[in] fmt... printf(3)-style format and arguments */ #define pcmk__rsc_info(rsc, fmt, args...) \ crm_log_tag(LOG_INFO, ((rsc) == NULL)? "" : (rsc)->id, (fmt), ##args) /*! * \internal * \brief Log a resource-tagged message at debug severity * * \param[in] rsc Tag message with this resource's ID * \param[in] fmt... printf(3)-style format and arguments */ #define pcmk__rsc_debug(rsc, fmt, args...) \ crm_log_tag(LOG_DEBUG, ((rsc) == NULL)? "" : (rsc)->id, (fmt), ##args) /*! * \internal * \brief Log a resource-tagged message at trace severity * * \param[in] rsc Tag message with this resource's ID * \param[in] fmt... printf(3)-style format and arguments */ #define pcmk__rsc_trace(rsc, fmt, args...) \ crm_log_tag(LOG_TRACE, ((rsc) == NULL)? "" : (rsc)->id, (fmt), ##args) /*! * \internal * \brief Log an error and remember that current scheduler input has errors * * \param[in,out] scheduler Scheduler data * \param[in] fmt... printf(3)-style format and arguments */ #define pcmk__sched_err(scheduler, fmt...) do { \ pcmk__set_scheduler_flags((scheduler), \ pcmk__sched_processing_error); \ crm_err(fmt); \ } while (0) /*! * \internal * \brief Log a warning and remember that current scheduler input has warnings * * \param[in,out] scheduler Scheduler data * \param[in] fmt... printf(3)-style format and arguments */ #define pcmk__sched_warn(scheduler, fmt...) do { \ pcmk__set_scheduler_flags((scheduler), \ pcmk__sched_processing_warning); \ crm_warn(fmt); \ } while (0) /*! * \internal * \brief Set scheduler flags * * \param[in,out] scheduler Scheduler data * \param[in] flags_to_set Group of enum pcmk__scheduler_flags to set */ #define pcmk__set_scheduler_flags(scheduler, flags_to_set) do { \ (scheduler)->flags = pcmk__set_flags_as(__func__, __LINE__, \ LOG_TRACE, "Scheduler", crm_system_name, \ (scheduler)->flags, (flags_to_set), #flags_to_set); \ } while (0) /*! * \internal * \brief Clear scheduler flags * * \param[in,out] scheduler Scheduler data * \param[in] flags_to_clear Group of enum pcmk__scheduler_flags to clear */ #define pcmk__clear_scheduler_flags(scheduler, flags_to_clear) do { \ (scheduler)->flags = pcmk__clear_flags_as(__func__, __LINE__, \ LOG_TRACE, "Scheduler", crm_system_name, \ (scheduler)->flags, (flags_to_clear), #flags_to_clear); \ } while (0) +void pcmk__set_scheduler_defaults(pcmk_scheduler_t *scheduler); time_t pcmk__scheduler_epoch_time(pcmk_scheduler_t *scheduler); #ifdef __cplusplus } #endif #endif // PCMK__CRM_COMMON_SCHEDULER_INTERNAL__H diff --git a/lib/common/scheduler.c b/lib/common/scheduler.c index 2717b54d55..020de6283d 100644 --- a/lib/common/scheduler.c +++ b/lib/common/scheduler.c @@ -1,128 +1,158 @@ /* * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include // uint32_t #include // EINVAL #include // gboolean, FALSE #include // xmlNode #include uint32_t pcmk__warnings = 0; +/*! + * \internal + * \brief Set non-zero default values in scheduler data + * + * \param[in,out] scheduler Scheduler data to modify + * + * \note Values that default to NULL or 0 will remain unchanged + */ +void +pcmk__set_scheduler_defaults(pcmk_scheduler_t *scheduler) +{ + pcmk__assert(scheduler != NULL); + scheduler->flags = 0U; +#if PCMK__CONCURRENT_FENCING_DEFAULT_TRUE + pcmk__set_scheduler_flags(scheduler, + pcmk__sched_symmetric_cluster + |pcmk__sched_concurrent_fencing + |pcmk__sched_stop_removed_resources + |pcmk__sched_cancel_removed_actions); +#else + pcmk__set_scheduler_flags(scheduler, + pcmk__sched_symmetric_cluster + |pcmk__sched_stop_removed_resources + |pcmk__sched_cancel_removed_actions); +#endif + scheduler->no_quorum_policy = pcmk_no_quorum_stop; + scheduler->priv->next_action_id = 1; + scheduler->priv->next_ordering_id = 1; +} + /*! * \internal * \brief Get the Designated Controller node from scheduler data * * \param[in] scheduler Scheduler data * * \return Designated Controller node from scheduler data, or NULL if none */ pcmk_node_t * pcmk_get_dc(const pcmk_scheduler_t *scheduler) { return (scheduler == NULL)? NULL : scheduler->dc_node; } /*! * \internal * \brief Get the no quorum policy from scheduler data * * \param[in] scheduler Scheduler data * * \return No quorum policy from scheduler data */ enum pe_quorum_policy pcmk_get_no_quorum_policy(const pcmk_scheduler_t *scheduler) { if (scheduler == NULL) { return pcmk_no_quorum_stop; // The default } return scheduler->no_quorum_policy; } /*! * \internal * \brief Set CIB XML as scheduler input in scheduler data * * \param[out] scheduler Scheduler data * \param[in] cib CIB XML to set as scheduler input * * \return Standard Pacemaker return code (EINVAL if \p scheduler is NULL, * otherwise pcmk_rc_ok) * \note This will not free any previously set scheduler CIB. */ int pcmk_set_scheduler_cib(pcmk_scheduler_t *scheduler, xmlNode *cib) { if (scheduler == NULL) { return EINVAL; } scheduler->input = cib; return pcmk_rc_ok; } /*! * \internal * \brief Check whether cluster has quorum * * \param[in] scheduler Scheduler data * * \return true if cluster has quorum, otherwise false */ bool pcmk_has_quorum(const pcmk_scheduler_t *scheduler) { if (scheduler == NULL) { return false; } return pcmk_is_set(scheduler->flags, pcmk__sched_quorate); } /*! * \brief Find a node by name in scheduler data * * \param[in] scheduler Scheduler data * \param[in] node_name Name of node to find * * \return Node from scheduler data that matches \p node_name if any, * otherwise NULL */ pcmk_node_t * pcmk_find_node(const pcmk_scheduler_t *scheduler, const char *node_name) { if ((scheduler == NULL) || (node_name == NULL)) { return NULL; } return pcmk__find_node_in_list(scheduler->nodes, node_name); } /*! * \internal * \brief Get scheduler data's "now" in epoch time * * \param[in,out] scheduler Scheduler data * * \return Scheduler data's "now" as seconds since epoch (defaulting to current * time) */ time_t pcmk__scheduler_epoch_time(pcmk_scheduler_t *scheduler) { if (scheduler == NULL) { return time(NULL); } if (scheduler->priv->now == NULL) { crm_trace("Scheduler 'now' set to current time"); scheduler->priv->now = crm_time_new(NULL); } return crm_time_get_seconds_since_epoch(scheduler->priv->now); } diff --git a/lib/common/tests/scheduler/Makefile.am b/lib/common/tests/scheduler/Makefile.am index 51bde6235e..3665b38eee 100644 --- a/lib/common/tests/scheduler/Makefile.am +++ b/lib/common/tests/scheduler/Makefile.am @@ -1,20 +1,21 @@ # # Copyright 2024 the Pacemaker project contributors # # The version control history for this file may have further details. # # This source code is licensed under the GNU General Public License version 2 # or later (GPLv2+) WITHOUT ANY WARRANTY. # include $(top_srcdir)/mk/common.mk include $(top_srcdir)/mk/tap.mk include $(top_srcdir)/mk/unittest.mk # Add "_test" to the end of all test program names to simplify .gitignore. -check_PROGRAMS = pcmk_get_dc_test \ +check_PROGRAMS = pcmk__set_scheduler_defaults_test \ + pcmk_get_dc_test \ pcmk_get_no_quorum_policy_test \ pcmk_has_quorum_test \ pcmk_set_scheduler_cib_test TESTS = $(check_PROGRAMS) diff --git a/lib/pengine/tests/status/set_working_set_defaults_test.c b/lib/common/tests/scheduler/pcmk__set_scheduler_defaults_test.c similarity index 76% rename from lib/pengine/tests/status/set_working_set_defaults_test.c rename to lib/common/tests/scheduler/pcmk__set_scheduler_defaults_test.c index 9743e1c759..788d04bcf1 100644 --- a/lib/pengine/tests/status/set_working_set_defaults_test.c +++ b/lib/common/tests/scheduler/pcmk__set_scheduler_defaults_test.c @@ -1,50 +1,55 @@ /* * Copyright 2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include "mock_private.h" static void -check_defaults(void **state) { - uint32_t flags; - pcmk_scheduler_t *scheduler = pcmk__assert_alloc(1, - sizeof(pcmk_scheduler_t)); +null_scheduler(void **state) +{ + pcmk__assert_asserts(pcmk__set_scheduler_defaults(NULL)); +} + +static void +check_defaults(void **state) +{ + uint32_t flags = 0U; + pcmk_scheduler_t *scheduler = NULL; + scheduler = pcmk__assert_alloc(1, sizeof(pcmk_scheduler_t)); scheduler->priv = pcmk__assert_alloc(1, sizeof(pcmk__scheduler_private_t)); - set_working_set_defaults(scheduler); + pcmk__set_scheduler_defaults(scheduler); flags = pcmk__sched_symmetric_cluster #if PCMK__CONCURRENT_FENCING_DEFAULT_TRUE |pcmk__sched_concurrent_fencing #endif |pcmk__sched_stop_removed_resources |pcmk__sched_cancel_removed_actions; assert_null(scheduler->priv->out); assert_int_equal(scheduler->priv->next_ordering_id, 1); assert_int_equal(scheduler->priv->next_action_id, 1); assert_int_equal(scheduler->no_quorum_policy, pcmk_no_quorum_stop); assert_int_equal(scheduler->flags, flags); - /* Avoid calling pe_free_working_set here so we don't artificially - * inflate the coverage numbers. - */ free(scheduler->priv); free(scheduler); } PCMK__UNIT_TEST(NULL, NULL, + cmocka_unit_test(null_scheduler), cmocka_unit_test(check_defaults)) diff --git a/lib/pengine/status.c b/lib/pengine/status.c index 2e4deb07e1..9ce10fdb72 100644 --- a/lib/pengine/status.c +++ b/lib/pengine/status.c @@ -1,546 +1,532 @@ /* * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include /*! * \brief Create a new object to hold scheduler data * * \return New, initialized scheduler data on success, else NULL (and set errno) * \note Only pcmk_scheduler_t objects created with this function (as opposed * to statically declared or directly allocated) should be used with the * functions in this library, to allow for future extensions to the * data type. The caller is responsible for freeing the memory with * pe_free_working_set() when the instance is no longer needed. */ pcmk_scheduler_t * pe_new_working_set(void) { pcmk_scheduler_t *scheduler = calloc(1, sizeof(pcmk_scheduler_t)); if (scheduler == NULL) { return NULL; } scheduler->priv = calloc(1, sizeof(pcmk__scheduler_private_t)); if (scheduler->priv == NULL) { free(scheduler); return NULL; } - set_working_set_defaults(scheduler); + pcmk__set_scheduler_defaults(scheduler); return scheduler; } /*! * \brief Free scheduler data * * \param[in,out] scheduler Scheduler data to free */ void pe_free_working_set(pcmk_scheduler_t *scheduler) { if (scheduler != NULL) { pe_reset_working_set(scheduler); free(scheduler->priv->local_node_name); free(scheduler->priv); free(scheduler); } } #define XPATH_DEPRECATED_RULES \ "//" PCMK_XE_OP_DEFAULTS "//" PCMK_XE_EXPRESSION \ "|//" PCMK_XE_OP "//" PCMK_XE_EXPRESSION /*! * \internal * \brief Log a warning for deprecated rule syntax in operations * * \param[in] scheduler Scheduler data */ static void check_for_deprecated_rules(pcmk_scheduler_t *scheduler) { // @COMPAT Drop this function when support for the syntax is dropped xmlNode *deprecated = get_xpath_object(XPATH_DEPRECATED_RULES, scheduler->input, LOG_NEVER); if (deprecated != NULL) { pcmk__warn_once(pcmk__wo_op_attr_expr, "Support for rules with node attribute expressions in " PCMK_XE_OP " or " PCMK_XE_OP_DEFAULTS " is deprecated " "and will be dropped in a future release"); } } /* * Unpack everything * At the end you'll have: * - A list of nodes * - A list of resources (each with any dependencies on other resources) * - A list of constraints between resources and nodes * - A list of constraints between start/stop actions * - A list of nodes that need to be stonith'd * - A list of nodes that need to be shutdown * - A list of the possible stop/start actions (without dependencies) */ gboolean cluster_status(pcmk_scheduler_t * scheduler) { const char *new_version = NULL; xmlNode *section = NULL; if ((scheduler == NULL) || (scheduler->input == NULL)) { return FALSE; } new_version = crm_element_value(scheduler->input, PCMK_XA_CRM_FEATURE_SET); if (pcmk__check_feature_set(new_version) != pcmk_rc_ok) { pcmk__config_err("Can't process CIB with feature set '%s' greater than our own '%s'", new_version, CRM_FEATURE_SET); return FALSE; } crm_trace("Beginning unpack"); if (scheduler->priv->failed != NULL) { pcmk__xml_free(scheduler->priv->failed); } scheduler->priv->failed = pcmk__xe_create(NULL, "failed-ops"); if (scheduler->priv->now == NULL) { scheduler->priv->now = crm_time_new(NULL); } if (pcmk__xe_attr_is_true(scheduler->input, PCMK_XA_HAVE_QUORUM)) { pcmk__set_scheduler_flags(scheduler, pcmk__sched_quorate); } else { pcmk__clear_scheduler_flags(scheduler, pcmk__sched_quorate); } scheduler->priv->op_defaults = get_xpath_object("//" PCMK_XE_OP_DEFAULTS, scheduler->input, LOG_NEVER); check_for_deprecated_rules(scheduler); scheduler->priv->rsc_defaults = get_xpath_object("//" PCMK_XE_RSC_DEFAULTS, scheduler->input, LOG_NEVER); section = get_xpath_object("//" PCMK_XE_CRM_CONFIG, scheduler->input, LOG_TRACE); unpack_config(section, scheduler); if (!pcmk_any_flags_set(scheduler->flags, pcmk__sched_location_only|pcmk__sched_quorate) && (scheduler->no_quorum_policy != pcmk_no_quorum_ignore)) { pcmk__sched_warn(scheduler, "Fencing and resource management disabled " "due to lack of quorum"); } section = get_xpath_object("//" PCMK_XE_NODES, scheduler->input, LOG_TRACE); unpack_nodes(section, scheduler); section = get_xpath_object("//" PCMK_XE_RESOURCES, scheduler->input, LOG_TRACE); if (!pcmk_is_set(scheduler->flags, pcmk__sched_location_only)) { unpack_remote_nodes(section, scheduler); } unpack_resources(section, scheduler); section = get_xpath_object("//" PCMK_XE_FENCING_TOPOLOGY, scheduler->input, LOG_TRACE); pcmk__validate_fencing_topology(section); section = get_xpath_object("//" PCMK_XE_TAGS, scheduler->input, LOG_NEVER); unpack_tags(section, scheduler); if (!pcmk_is_set(scheduler->flags, pcmk__sched_location_only)) { section = get_xpath_object("//" PCMK_XE_STATUS, scheduler->input, LOG_TRACE); unpack_status(section, scheduler); } if (!pcmk_is_set(scheduler->flags, pcmk__sched_no_counts)) { for (GList *item = scheduler->priv->resources; item != NULL; item = item->next) { pcmk_resource_t *rsc = item->data; rsc->priv->fns->count(item->data); } crm_trace("Cluster resource count: %d (%d disabled, %d blocked)", scheduler->priv->ninstances, scheduler->priv->disabled_resources, scheduler->priv->blocked_resources); } if ((scheduler->priv->local_node_name != NULL) && (pcmk_find_node(scheduler, scheduler->priv->local_node_name) == NULL)) { crm_info("Creating a fake local node for %s", scheduler->priv->local_node_name); pe_create_node(scheduler->priv->local_node_name, scheduler->priv->local_node_name, NULL, 0, scheduler); } pcmk__set_scheduler_flags(scheduler, pcmk__sched_have_status); return TRUE; } /*! * \internal * \brief Free a list of pcmk_resource_t * * \param[in,out] resources List to free * * \note When the scheduler's resource list is freed, that includes the original * storage for the uname and id of any Pacemaker Remote nodes in the * scheduler's node list, so take care not to use those afterward. * \todo Refactor pcmk_node_t to strdup() the node name. */ static void pe_free_resources(GList *resources) { pcmk_resource_t *rsc = NULL; GList *iterator = resources; while (iterator != NULL) { rsc = (pcmk_resource_t *) iterator->data; iterator = iterator->next; rsc->priv->fns->free(rsc); } if (resources != NULL) { g_list_free(resources); } } static void pe_free_actions(GList *actions) { GList *iterator = actions; while (iterator != NULL) { pe_free_action(iterator->data); iterator = iterator->next; } if (actions != NULL) { g_list_free(actions); } } static void pe_free_nodes(GList *nodes) { for (GList *iterator = nodes; iterator != NULL; iterator = iterator->next) { pcmk_node_t *node = (pcmk_node_t *) iterator->data; // Shouldn't be possible, but to be safe ... if (node == NULL) { continue; } if (node->details == NULL) { free(node); continue; } /* This is called after pe_free_resources(), which means that we can't * use node->private->name for Pacemaker Remote nodes. */ crm_trace("Freeing node %s", (pcmk__is_pacemaker_remote_node(node)? "(guest or remote)" : pcmk__node_name(node))); if (node->priv->attrs != NULL) { g_hash_table_destroy(node->priv->attrs); } if (node->priv->utilization != NULL) { g_hash_table_destroy(node->priv->utilization); } if (node->priv->digest_cache != NULL) { g_hash_table_destroy(node->priv->digest_cache); } g_list_free(node->details->running_rsc); g_list_free(node->priv->assigned_resources); free(node->priv); free(node->details); free(node->assign); free(node); } if (nodes != NULL) { g_list_free(nodes); } } static void pe__free_ordering(GList *constraints) { GList *iterator = constraints; while (iterator != NULL) { pcmk__action_relation_t *order = iterator->data; iterator = iterator->next; free(order->task1); free(order->task2); free(order); } if (constraints != NULL) { g_list_free(constraints); } } static void pe__free_location(GList *constraints) { GList *iterator = constraints; while (iterator != NULL) { pcmk__location_t *cons = iterator->data; iterator = iterator->next; g_list_free_full(cons->nodes, pcmk__free_node_copy); free(cons->id); free(cons); } if (constraints != NULL) { g_list_free(constraints); } } /*! * \brief Reset scheduler data to defaults without freeing it or constraints * * \param[in,out] scheduler Scheduler data to reset * * \deprecated This function is deprecated as part of the API; * pe_reset_working_set() should be used instead. */ void cleanup_calculations(pcmk_scheduler_t *scheduler) { if (scheduler == NULL) { return; } pcmk__clear_scheduler_flags(scheduler, pcmk__sched_have_status); if (scheduler->priv->options != NULL) { g_hash_table_destroy(scheduler->priv->options); } if (scheduler->priv->singletons != NULL) { g_hash_table_destroy(scheduler->priv->singletons); } if (scheduler->priv->ticket_constraints != NULL) { g_hash_table_destroy(scheduler->priv->ticket_constraints); } if (scheduler->priv->templates != NULL) { g_hash_table_destroy(scheduler->priv->templates); } if (scheduler->priv->tags != NULL) { g_hash_table_destroy(scheduler->priv->tags); } crm_trace("deleting resources"); pe_free_resources(scheduler->priv->resources); crm_trace("deleting actions"); pe_free_actions(scheduler->priv->actions); crm_trace("deleting nodes"); pe_free_nodes(scheduler->nodes); pe__free_param_checks(scheduler); g_list_free(scheduler->priv->stop_needed); crm_time_free(scheduler->priv->now); pcmk__xml_free(scheduler->input); pcmk__xml_free(scheduler->priv->failed); pcmk__xml_free(scheduler->priv->graph); set_working_set_defaults(scheduler); CRM_LOG_ASSERT((scheduler->priv->location_constraints == NULL) && (scheduler->priv->ordering_constraints == NULL)); } /*! * \brief Reset scheduler data to default state without freeing it * * \param[in,out] scheduler Scheduler data to reset */ void pe_reset_working_set(pcmk_scheduler_t *scheduler) { if (scheduler == NULL) { return; } crm_trace("Deleting %d ordering constraints", g_list_length(scheduler->priv->ordering_constraints)); pe__free_ordering(scheduler->priv->ordering_constraints); scheduler->priv->ordering_constraints = NULL; crm_trace("Deleting %d location constraints", g_list_length(scheduler->priv->location_constraints)); pe__free_location(scheduler->priv->location_constraints); scheduler->priv->location_constraints = NULL; crm_trace("Deleting %d colocation constraints", g_list_length(scheduler->priv->colocation_constraints)); g_list_free_full(scheduler->priv->colocation_constraints, free); scheduler->priv->colocation_constraints = NULL; cleanup_calculations(scheduler); } void set_working_set_defaults(pcmk_scheduler_t *scheduler) { // These members must be preserved pcmk__scheduler_private_t *priv = scheduler->priv; pcmk__output_t *out = priv->out; char *local_node_name = scheduler->priv->local_node_name; // Wipe the main structs (any other members must have previously been freed) memset(scheduler, 0, sizeof(pcmk_scheduler_t)); memset(priv, 0, sizeof(pcmk__scheduler_private_t)); // Restore the members to preserve scheduler->priv = priv; scheduler->priv->out = out; scheduler->priv->local_node_name = local_node_name; // Set defaults for everything else - scheduler->priv->next_ordering_id = 1; - scheduler->priv->next_action_id = 1; - scheduler->no_quorum_policy = pcmk_no_quorum_stop; -#if PCMK__CONCURRENT_FENCING_DEFAULT_TRUE - pcmk__set_scheduler_flags(scheduler, - pcmk__sched_symmetric_cluster - |pcmk__sched_concurrent_fencing - |pcmk__sched_stop_removed_resources - |pcmk__sched_cancel_removed_actions); -#else - pcmk__set_scheduler_flags(scheduler, - pcmk__sched_symmetric_cluster - |pcmk__sched_stop_removed_resources - |pcmk__sched_cancel_removed_actions); -#endif + pcmk__set_scheduler_defaults(scheduler); } pcmk_resource_t * pe_find_resource(GList *rsc_list, const char *id) { return pe_find_resource_with_flags(rsc_list, id, pcmk_rsc_match_history); } pcmk_resource_t * pe_find_resource_with_flags(GList *rsc_list, const char *id, enum pe_find flags) { GList *rIter = NULL; for (rIter = rsc_list; id && rIter; rIter = rIter->next) { pcmk_resource_t *parent = rIter->data; pcmk_resource_t *match = parent->priv->fns->find_rsc(parent, id, NULL, flags); if (match != NULL) { return match; } } crm_trace("No match for %s", id); return NULL; } /*! * \brief Find a node by name or ID in a list of nodes * * \param[in] nodes List of nodes (as pcmk_node_t*) * \param[in] id If not NULL, ID of node to find * \param[in] node_name If not NULL, name of node to find * * \return Node from \p nodes that matches \p id if any, * otherwise node from \p nodes that matches \p uname if any, * otherwise NULL */ pcmk_node_t * pe_find_node_any(const GList *nodes, const char *id, const char *uname) { pcmk_node_t *match = NULL; if (id != NULL) { match = pe_find_node_id(nodes, id); } if ((match == NULL) && (uname != NULL)) { match = pcmk__find_node_in_list(nodes, uname); } return match; } /*! * \brief Find a node by ID in a list of nodes * * \param[in] nodes List of nodes (as pcmk_node_t*) * \param[in] id ID of node to find * * \return Node from \p nodes that matches \p id if any, otherwise NULL */ pcmk_node_t * pe_find_node_id(const GList *nodes, const char *id) { for (const GList *iter = nodes; iter != NULL; iter = iter->next) { pcmk_node_t *node = (pcmk_node_t *) iter->data; /* @TODO Whether node IDs should be considered case-sensitive should * probably depend on the node type, so functionizing the comparison * would be worthwhile */ if (pcmk__str_eq(node->priv->id, id, pcmk__str_casei)) { return node; } } return NULL; } // Deprecated functions kept only for backward API compatibility // LCOV_EXCL_START #include /*! * \brief Find a node by name in a list of nodes * * \param[in] nodes List of nodes (as pcmk_node_t*) * \param[in] node_name Name of node to find * * \return Node from \p nodes that matches \p node_name if any, otherwise NULL */ pcmk_node_t * pe_find_node(const GList *nodes, const char *node_name) { return pcmk__find_node_in_list(nodes, node_name); } // LCOV_EXCL_STOP // End deprecated API diff --git a/lib/pengine/tests/status/Makefile.am b/lib/pengine/tests/status/Makefile.am index e95f51a74a..8f39b8fc86 100644 --- a/lib/pengine/tests/status/Makefile.am +++ b/lib/pengine/tests/status/Makefile.am @@ -1,22 +1,21 @@ # # Copyright 2022-2024 the Pacemaker project contributors # # The version control history for this file may have further details. # # This source code is licensed under the GNU General Public License version 2 # or later (GPLv2+) WITHOUT ANY WARRANTY. # include $(top_srcdir)/mk/common.mk include $(top_srcdir)/mk/tap.mk include $(top_srcdir)/mk/unittest.mk LDADD += $(top_builddir)/lib/pengine/libpe_status_test.la # Add "_test" to the end of all test program names to simplify .gitignore. check_PROGRAMS = pe_find_node_any_test \ pe_find_node_id_test \ - pe_new_working_set_test \ - set_working_set_defaults_test + pe_new_working_set_test TESTS = $(check_PROGRAMS) diff --git a/lib/pengine/tests/status/pe_new_working_set_test.c b/lib/pengine/tests/status/pe_new_working_set_test.c index b385f9cd11..9236d50afb 100644 --- a/lib/pengine/tests/status/pe_new_working_set_test.c +++ b/lib/pengine/tests/status/pe_new_working_set_test.c @@ -1,46 +1,47 @@ /* * Copyright 2022-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include "mock_private.h" static void calloc_fails(void **state) { pcmk__mock_calloc = true; // calloc() will return NULL expect_value(__wrap_calloc, nmemb, 1); expect_value(__wrap_calloc, size, sizeof(pcmk_scheduler_t)); assert_null(pe_new_working_set()); pcmk__mock_calloc = false; // Use real calloc() } static void -calloc_succeeds(void **state) { +calloc_succeeds(void **state) +{ pcmk_scheduler_t *scheduler = pe_new_working_set(); - /* Nothing else to test about this function, as all it does is call - * set_working_set_defaults which is also a public function and should - * get its own unit test. + /* We only need to test that the return value is non-NULL, as all the + * function does is call pcmk__set_scheduler_defaults(), which should have + * its own unit test. */ assert_non_null(scheduler); /* Avoid calling pe_free_working_set here so we don't artificially * inflate the coverage numbers. */ free(scheduler); } PCMK__UNIT_TEST(NULL, NULL, cmocka_unit_test(calloc_fails), cmocka_unit_test(calloc_succeeds))