Page MenuHomeClusterLabs Projects

No OneTemporary

diff --git a/include/crm/common/resources_internal.h b/include/crm/common/resources_internal.h
index 0d5d83ac99..5c01f34997 100644
--- a/include/crm/common/resources_internal.h
+++ b/include/crm/common/resources_internal.h
@@ -1,245 +1,245 @@
/*
* Copyright 2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef PCMK__CRM_COMMON_RESOURCES_INTERNAL__H
#define PCMK__CRM_COMMON_RESOURCES_INTERNAL__H
#include <glib.h> // gboolean, GList
#include <crm/common/resources.h> // enum rsc_recovery_type
#include <crm/common/roles.h> // enum rsc_role_e
#include <crm/common/scheduler_types.h> // pcmk_node_t, pcmk_resource_t, etc.
#ifdef __cplusplus
extern "C" {
#endif
/*!
* \internal
* \brief Set resource flags
*
* \param[in,out] resource Resource to set flags for
* \param[in] flags_to_set Group of enum pcmk_rsc_flags to set
*/
#define pcmk__set_rsc_flags(resource, flags_to_set) do { \
(resource)->flags = pcmk__set_flags_as(__func__, __LINE__, \
LOG_TRACE, "Resource", (resource)->id, (resource)->flags, \
(flags_to_set), #flags_to_set); \
} while (0)
/*!
* \internal
* \brief Clear resource flags
*
* \param[in,out] resource Resource to clear flags for
* \param[in] flags_to_clear Group of enum pcmk_rsc_flags to clear
*/
#define pcmk__clear_rsc_flags(resource, flags_to_clear) do { \
(resource)->flags = pcmk__clear_flags_as(__func__, __LINE__, \
LOG_TRACE, "Resource", (resource)->id, (resource)->flags, \
(flags_to_clear), #flags_to_clear); \
} while (0)
//! Resource variants supported by Pacemaker
enum pcmk__rsc_variant {
// Order matters: some code compares greater or lesser than
pcmk__rsc_variant_unknown = -1, //!< Unknown resource variant
pcmk__rsc_variant_primitive = 0, //!< Primitive resource
pcmk__rsc_variant_group = 1, //!< Group resource
pcmk__rsc_variant_clone = 2, //!< Clone resource
pcmk__rsc_variant_bundle = 3, //!< Bundle resource
};
//! Resource assignment methods (implementation defined by libpacemaker)
typedef struct pcmk__assignment_methods pcmk__assignment_methods_t;
//! Resource object methods
typedef struct {
/*!
* \internal
* \brief Parse variant-specific resource XML from CIB into struct members
*
* \param[in,out] rsc Partially unpacked resource
* \param[in,out] scheduler Scheduler data
*
* \return TRUE if resource was unpacked successfully, otherwise FALSE
*/
gboolean (*unpack)(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler);
/*!
* \internal
* \brief Search for a resource ID in a resource and its children
*
* \param[in] rsc Search this resource and its children
* \param[in] id Search for this resource ID
* \param[in] on_node If not NULL, limit search to resources on this node
* \param[in] flags Group of enum pe_find flags
*
* \return Resource that matches search criteria if any, otherwise NULL
*/
pcmk_resource_t *(*find_rsc)(pcmk_resource_t *rsc, const char *search,
const pcmk_node_t *node, int flags);
/*!
* \internal
* \brief Get value of a resource instance attribute
*
* \param[in,out] rsc Resource to check
* \param[in] node Node to use to evaluate rules
* \param[in] create Ignored
* \param[in] name Name of instance attribute to check
* \param[in,out] scheduler Scheduler data
*
* \return Value of requested attribute if available, otherwise NULL
* \note The caller is responsible for freeing the result using free().
*/
char *(*parameter)(pcmk_resource_t *rsc, pcmk_node_t *node, gboolean create,
const char *name, pcmk_scheduler_t *scheduler);
/*!
* \internal
* \brief Check whether a resource is active
*
* \param[in] rsc Resource to check
* \param[in] all If \p rsc is collective, all instances must be active
*
* \return TRUE if \p rsc is active, otherwise FALSE
*/
gboolean (*active)(pcmk_resource_t *rsc, gboolean all);
/*!
* \internal
* \brief Get resource's current or assigned role
*
* \param[in] rsc Resource to check
* \param[in] current If TRUE, check current role, otherwise assigned role
*
* \return Current or assigned role of \p rsc
*/
enum rsc_role_e (*state)(const pcmk_resource_t *rsc, gboolean current);
/*!
* \internal
* \brief List nodes where a resource (or any of its children) is
*
* \param[in] rsc Resource to check
* \param[out] list List to add result to
* \param[in] current If 0, list nodes where \p rsc is assigned;
* if 1, where active; if 2, where active or pending
*
* \return If list contains only one node, that node, otherwise NULL
*/
pcmk_node_t *(*location)(const pcmk_resource_t *rsc, GList **list,
int current);
/*!
* \internal
* \brief Free all memory used by a resource
*
* \param[in,out] rsc Resource to free
*/
void (*free)(pcmk_resource_t *rsc);
/*!
* \internal
* \brief Increment cluster's instance counts for a resource
*
* Given a resource, increment its cluster's ninstances, disabled_resources,
* and blocked_resources counts for the resource and its descendants.
*
* \param[in,out] rsc Resource to count
*/
void (*count)(pcmk_resource_t *rsc);
/*!
* \internal
* \brief Check whether a given resource is in a list of resources
*
* \param[in] rsc Resource ID to check for
* \param[in] only_rsc List of resource IDs to check
* \param[in] check_parent If TRUE, check top ancestor as well
*
* \return TRUE if \p rsc, its top parent if requested, or '*' is in
* \p only_rsc, otherwise FALSE
*/
gboolean (*is_filtered)(const pcmk_resource_t *rsc, GList *only_rsc,
gboolean check_parent);
/*!
* \internal
* \brief Find a node (and optionally count all) where resource is active
*
* \param[in] rsc Resource to check
* \param[out] count_all If not NULL, set this to count of active nodes
* \param[out] count_clean If not NULL, set this to count of clean nodes
*
* \return A node where the resource is active, preferring the source node
* if the resource is involved in a partial migration, or a clean,
* online node if the resource's \c PCMK_META_REQUIRES is
* \c PCMK_VALUE_QUORUM or \c PCMK_VALUE_NOTHING, otherwise \c NULL.
*/
pcmk_node_t *(*active_node)(const pcmk_resource_t *rsc,
unsigned int *count_all,
unsigned int *count_clean);
/*!
* \internal
* \brief Get maximum resource instances per node
*
* \param[in] rsc Resource to check
*
* \return Maximum number of \p rsc instances that can be active on one node
*/
unsigned int (*max_per_node)(const pcmk_resource_t *rsc);
} pcmk__rsc_methods_t;
// Implementation of pcmk__resource_private_t
struct pcmk__resource_private {
enum pcmk__rsc_variant variant; // Resource variant
void *variant_opaque; // Variant-specific data
char *history_id; // Resource instance ID in history
pcmk_resource_t *parent; // Resource's parent resource, if any
pcmk_scheduler_t *scheduler; // Scheduler data containing resource
// Resource configuration (possibly expanded from template)
xmlNode *xml;
// Original resource configuration, if using template
xmlNode *orig_xml;
// Configuration of resource operations (possibly expanded from template)
xmlNode *ops_xml;
const pcmk__rsc_methods_t *fns; // Resource object methods
const pcmk__assignment_methods_t *cmds; // Resource assignment methods
};
-const char *pcmk__multiply_active_text(enum rsc_recovery_type recovery);
+const char *pcmk__multiply_active_text(const pcmk_resource_t *rsc);
/*!
* \internal
* \brief Get node where resource is currently active (if any)
*
* \param[in] rsc Resource to check
*
* \return Node that \p rsc is active on, if any, otherwise NULL
*/
static inline pcmk_node_t *
pcmk__current_node(const pcmk_resource_t *rsc)
{
if (rsc == NULL) {
return NULL;
}
return rsc->private->fns->active_node(rsc, NULL, NULL);
}
#ifdef __cplusplus
}
#endif
#endif // PCMK__CRM_COMMON_RESOURCES_INTERNAL__H
diff --git a/lib/common/resources.c b/lib/common/resources.c
index 46c038b6cc..5b398e210e 100644
--- a/lib/common/resources.c
+++ b/lib/common/resources.c
@@ -1,67 +1,67 @@
/*
* Copyright 2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <stdio.h> // NULL
#include <stdbool.h> // bool, false
#include <crm/common/scheduler.h>
#include <crm/common/scheduler_internal.h>
/*!
* \internal
* \brief Get a resource's ID
*
* \param[in] rsc Resource to check
*
* \return ID of \p rsc (or NULL if \p rsc is NULL)
*/
const char *
pcmk_resource_id(const pcmk_resource_t *rsc)
{
return (rsc == NULL)? NULL : rsc->id;
}
/*!
* \internal
* \brief Check whether a resource is managed by the cluster
*
* \param[in] rsc Resource to check
*
* \return true if \p rsc is managed, otherwise false
*/
bool
pcmk_resource_is_managed(const pcmk_resource_t *rsc)
{
return (rsc == NULL)? false : pcmk_is_set(rsc->flags, pcmk_rsc_managed);
}
/*!
* \brief Get readable description of a multiply-active recovery type
*
- * \param[in] recovery Recovery type
+ * \param[in] rsc Resource with recovery type to check
*
- * \return Static string describing \p recovery
+ * \return Static string describing recovery type of \p rsc
*/
const char *
-pcmk__multiply_active_text(enum rsc_recovery_type recovery)
+pcmk__multiply_active_text(const pcmk_resource_t *rsc)
{
- switch (recovery) {
+ switch (rsc->recovery_type) {
case pcmk_multiply_active_stop:
return "shutting it down";
case pcmk_multiply_active_restart:
return "attempting recovery";
case pcmk_multiply_active_block:
return "waiting for an administrator";
case pcmk_multiply_active_unexpected:
return "stopping unexpected instances";
}
return "Unknown";
}
diff --git a/lib/pacemaker/pcmk_sched_primitive.c b/lib/pacemaker/pcmk_sched_primitive.c
index e4f1f1417f..51b4bafb1f 100644
--- a/lib/pacemaker/pcmk_sched_primitive.c
+++ b/lib/pacemaker/pcmk_sched_primitive.c
@@ -1,1695 +1,1695 @@
/*
* Copyright 2004-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <stdbool.h>
#include <stdint.h> // uint8_t, uint32_t
#include <crm/common/xml.h>
#include <pacemaker-internal.h>
#include "libpacemaker_private.h"
static void stop_resource(pcmk_resource_t *rsc, pcmk_node_t *node,
bool optional);
static void start_resource(pcmk_resource_t *rsc, pcmk_node_t *node,
bool optional);
static void demote_resource(pcmk_resource_t *rsc, pcmk_node_t *node,
bool optional);
static void promote_resource(pcmk_resource_t *rsc, pcmk_node_t *node,
bool optional);
static void assert_role_error(pcmk_resource_t *rsc, pcmk_node_t *node,
bool optional);
#define RSC_ROLE_MAX (pcmk_role_promoted + 1)
static enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = {
/* This array lists the immediate next role when transitioning from one role
* to a target role. For example, when going from Stopped to Promoted, the
* next role is Unpromoted, because the resource must be started before it
* can be promoted. The current state then becomes Started, which is fed
* into this array again, giving a next role of Promoted.
*
* Current role Immediate next role Final target role
* ------------ ------------------- -----------------
*/
/* Unknown */ { pcmk_role_unknown, /* Unknown */
pcmk_role_stopped, /* Stopped */
pcmk_role_stopped, /* Started */
pcmk_role_stopped, /* Unpromoted */
pcmk_role_stopped, /* Promoted */
},
/* Stopped */ { pcmk_role_stopped, /* Unknown */
pcmk_role_stopped, /* Stopped */
pcmk_role_started, /* Started */
pcmk_role_unpromoted, /* Unpromoted */
pcmk_role_unpromoted, /* Promoted */
},
/* Started */ { pcmk_role_stopped, /* Unknown */
pcmk_role_stopped, /* Stopped */
pcmk_role_started, /* Started */
pcmk_role_unpromoted, /* Unpromoted */
pcmk_role_promoted, /* Promoted */
},
/* Unpromoted */ { pcmk_role_stopped, /* Unknown */
pcmk_role_stopped, /* Stopped */
pcmk_role_stopped, /* Started */
pcmk_role_unpromoted, /* Unpromoted */
pcmk_role_promoted, /* Promoted */
},
/* Promoted */ { pcmk_role_stopped, /* Unknown */
pcmk_role_unpromoted, /* Stopped */
pcmk_role_unpromoted, /* Started */
pcmk_role_unpromoted, /* Unpromoted */
pcmk_role_promoted, /* Promoted */
},
};
/*!
* \internal
* \brief Function to schedule actions needed for a role change
*
* \param[in,out] rsc Resource whose role is changing
* \param[in,out] node Node where resource will be in its next role
* \param[in] optional Whether scheduled actions should be optional
*/
typedef void (*rsc_transition_fn)(pcmk_resource_t *rsc, pcmk_node_t *node,
bool optional);
static rsc_transition_fn rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = {
/* This array lists the function needed to transition directly from one role
* to another. NULL indicates that nothing is needed.
*
* Current role Transition function Next role
* ------------ ------------------- ----------
*/
/* Unknown */ { assert_role_error, /* Unknown */
stop_resource, /* Stopped */
assert_role_error, /* Started */
assert_role_error, /* Unpromoted */
assert_role_error, /* Promoted */
},
/* Stopped */ { assert_role_error, /* Unknown */
NULL, /* Stopped */
start_resource, /* Started */
start_resource, /* Unpromoted */
assert_role_error, /* Promoted */
},
/* Started */ { assert_role_error, /* Unknown */
stop_resource, /* Stopped */
NULL, /* Started */
NULL, /* Unpromoted */
promote_resource, /* Promoted */
},
/* Unpromoted */ { assert_role_error, /* Unknown */
stop_resource, /* Stopped */
stop_resource, /* Started */
NULL, /* Unpromoted */
promote_resource, /* Promoted */
},
/* Promoted */ { assert_role_error, /* Unknown */
demote_resource, /* Stopped */
demote_resource, /* Started */
demote_resource, /* Unpromoted */
NULL, /* Promoted */
},
};
/*!
* \internal
* \brief Get a list of a resource's allowed nodes sorted by node score
*
* \param[in] rsc Resource to check
*
* \return List of allowed nodes sorted by node score
*/
static GList *
sorted_allowed_nodes(const pcmk_resource_t *rsc)
{
if (rsc->allowed_nodes != NULL) {
GList *nodes = g_hash_table_get_values(rsc->allowed_nodes);
if (nodes != NULL) {
return pcmk__sort_nodes(nodes, pcmk__current_node(rsc));
}
}
return NULL;
}
/*!
* \internal
* \brief Assign a resource to its best allowed node, if possible
*
* \param[in,out] rsc Resource to choose a node for
* \param[in] prefer If not \c NULL, prefer this node when all else
* equal
* \param[in] stop_if_fail If \c true and \p rsc can't be assigned to a
* node, set next role to stopped and update
* existing actions
*
* \return true if \p rsc could be assigned to a node, otherwise false
*
* \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can
* completely undo the assignment. A successful assignment can be either
* undone or left alone as final. A failed assignment has the same effect
* as calling pcmk__unassign_resource(); there are no side effects on
* roles or actions.
*/
static bool
assign_best_node(pcmk_resource_t *rsc, const pcmk_node_t *prefer,
bool stop_if_fail)
{
GList *nodes = NULL;
pcmk_node_t *chosen = NULL;
pcmk_node_t *best = NULL;
const pcmk_node_t *most_free_node = pcmk__ban_insufficient_capacity(rsc);
if (prefer == NULL) {
prefer = most_free_node;
}
if (!pcmk_is_set(rsc->flags, pcmk_rsc_unassigned)) {
// We've already finished assignment of resources to nodes
return rsc->allocated_to != NULL;
}
// Sort allowed nodes by score
nodes = sorted_allowed_nodes(rsc);
if (nodes != NULL) {
best = (pcmk_node_t *) nodes->data; // First node has best score
}
if ((prefer != NULL) && (nodes != NULL)) {
// Get the allowed node version of prefer
chosen = g_hash_table_lookup(rsc->allowed_nodes, prefer->details->id);
if (chosen == NULL) {
pcmk__rsc_trace(rsc, "Preferred node %s for %s was unknown",
pcmk__node_name(prefer), rsc->id);
/* Favor the preferred node as long as its score is at least as good as
* the best allowed node's.
*
* An alternative would be to favor the preferred node even if the best
* node is better, when the best node's score is less than INFINITY.
*/
} else if (chosen->weight < best->weight) {
pcmk__rsc_trace(rsc, "Preferred node %s for %s was unsuitable",
pcmk__node_name(chosen), rsc->id);
chosen = NULL;
} else if (!pcmk__node_available(chosen, true, false)) {
pcmk__rsc_trace(rsc, "Preferred node %s for %s was unavailable",
pcmk__node_name(chosen), rsc->id);
chosen = NULL;
} else {
pcmk__rsc_trace(rsc,
"Chose preferred node %s for %s "
"(ignoring %d candidates)",
pcmk__node_name(chosen), rsc->id,
g_list_length(nodes));
}
}
if ((chosen == NULL) && (best != NULL)) {
/* Either there is no preferred node, or the preferred node is not
* suitable, but another node is allowed to run the resource.
*/
chosen = best;
if (!pcmk__is_unique_clone(rsc->private->parent)
&& (chosen->weight > 0) // Zero not acceptable
&& pcmk__node_available(chosen, false, false)) {
/* If the resource is already running on a node, prefer that node if
* it is just as good as the chosen node.
*
* We don't do this for unique clone instances, because
* pcmk__assign_instances() has already assigned instances to their
* running nodes when appropriate, and if we get here, we don't want
* remaining unassigned instances to prefer a node that's already
* running another instance.
*/
pcmk_node_t *running = pcmk__current_node(rsc);
if (running == NULL) {
// Nothing to do
} else if (!pcmk__node_available(running, true, false)) {
pcmk__rsc_trace(rsc,
"Current node for %s (%s) can't run resources",
rsc->id, pcmk__node_name(running));
} else {
int nodes_with_best_score = 1;
for (GList *iter = nodes->next; iter; iter = iter->next) {
pcmk_node_t *allowed = (pcmk_node_t *) iter->data;
if (allowed->weight != chosen->weight) {
// The nodes are sorted by score, so no more are equal
break;
}
if (pcmk__same_node(allowed, running)) {
// Scores are equal, so prefer the current node
chosen = allowed;
}
nodes_with_best_score++;
}
if (nodes_with_best_score > 1) {
uint8_t log_level = LOG_INFO;
if (chosen->weight >= PCMK_SCORE_INFINITY) {
log_level = LOG_WARNING;
}
do_crm_log(log_level,
"Chose %s for %s from %d nodes with score %s",
pcmk__node_name(chosen), rsc->id,
nodes_with_best_score,
pcmk_readable_score(chosen->weight));
}
}
}
pcmk__rsc_trace(rsc, "Chose %s for %s from %d candidates",
pcmk__node_name(chosen), rsc->id, g_list_length(nodes));
}
pcmk__assign_resource(rsc, chosen, false, stop_if_fail);
g_list_free(nodes);
return rsc->allocated_to != NULL;
}
/*!
* \internal
* \brief Apply a "this with" colocation to a node's allowed node scores
*
* \param[in,out] colocation Colocation to apply
* \param[in,out] rsc Resource being assigned
*/
static void
apply_this_with(pcmk__colocation_t *colocation, pcmk_resource_t *rsc)
{
GHashTable *archive = NULL;
pcmk_resource_t *other = colocation->primary;
// In certain cases, we will need to revert the node scores
if ((colocation->dependent_role >= pcmk_role_promoted)
|| ((colocation->score < 0)
&& (colocation->score > -PCMK_SCORE_INFINITY))) {
archive = pcmk__copy_node_table(rsc->allowed_nodes);
}
if (pcmk_is_set(other->flags, pcmk_rsc_unassigned)) {
pcmk__rsc_trace(rsc,
"%s: Assigning colocation %s primary %s first"
"(score=%d role=%s)",
rsc->id, colocation->id, other->id,
colocation->score,
pcmk_role_text(colocation->dependent_role));
other->private->cmds->assign(other, NULL, true);
}
// Apply the colocation score to this resource's allowed node scores
rsc->private->cmds->apply_coloc_score(rsc, other, colocation, true);
if ((archive != NULL)
&& !pcmk__any_node_available(rsc->allowed_nodes)) {
pcmk__rsc_info(rsc,
"%s: Reverting scores from colocation with %s "
"because no nodes allowed",
rsc->id, other->id);
g_hash_table_destroy(rsc->allowed_nodes);
rsc->allowed_nodes = archive;
archive = NULL;
}
if (archive != NULL) {
g_hash_table_destroy(archive);
}
}
/*!
* \internal
* \brief Update a Pacemaker Remote node once its connection has been assigned
*
* \param[in] connection Connection resource that has been assigned
*/
static void
remote_connection_assigned(const pcmk_resource_t *connection)
{
pcmk_node_t *remote_node = pcmk_find_node(connection->private->scheduler,
connection->id);
CRM_CHECK(remote_node != NULL, return);
if ((connection->allocated_to != NULL)
&& (connection->next_role != pcmk_role_stopped)) {
crm_trace("Pacemaker Remote node %s will be online",
remote_node->details->id);
remote_node->details->online = TRUE;
if (remote_node->details->unseen) {
// Avoid unnecessary fence, since we will attempt connection
remote_node->details->unclean = FALSE;
}
} else {
crm_trace("Pacemaker Remote node %s will be shut down "
"(%sassigned connection's next role is %s)",
remote_node->details->id,
((connection->allocated_to == NULL)? "un" : ""),
pcmk_role_text(connection->next_role));
remote_node->details->shutdown = TRUE;
}
}
/*!
* \internal
* \brief Assign a primitive resource to a node
*
* \param[in,out] rsc Resource to assign to a node
* \param[in] prefer Node to prefer, if all else is equal
* \param[in] stop_if_fail If \c true and \p rsc can't be assigned to a
* node, set next role to stopped and update
* existing actions
*
* \return Node that \p rsc is assigned to, if assigned entirely to one node
*
* \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can
* completely undo the assignment. A successful assignment can be either
* undone or left alone as final. A failed assignment has the same effect
* as calling pcmk__unassign_resource(); there are no side effects on
* roles or actions.
*/
pcmk_node_t *
pcmk__primitive_assign(pcmk_resource_t *rsc, const pcmk_node_t *prefer,
bool stop_if_fail)
{
GList *this_with_colocations = NULL;
GList *with_this_colocations = NULL;
GList *iter = NULL;
pcmk_resource_t *parent = NULL;
pcmk__colocation_t *colocation = NULL;
pcmk_scheduler_t *scheduler = NULL;
CRM_ASSERT(pcmk__is_primitive(rsc));
scheduler = rsc->private->scheduler;
parent = rsc->private->parent;
// Never assign a child without parent being assigned first
if ((parent != NULL) && !pcmk_is_set(parent->flags, pcmk_rsc_assigning)) {
pcmk__rsc_debug(rsc, "%s: Assigning parent %s first",
rsc->id, parent->id);
parent->private->cmds->assign(parent, prefer, stop_if_fail);
}
if (!pcmk_is_set(rsc->flags, pcmk_rsc_unassigned)) {
// Assignment has already been done
const char *node_name = "no node";
if (rsc->allocated_to != NULL) {
node_name = pcmk__node_name(rsc->allocated_to);
}
pcmk__rsc_debug(rsc, "%s: pre-assigned to %s", rsc->id, node_name);
return rsc->allocated_to;
}
// Ensure we detect assignment loops
if (pcmk_is_set(rsc->flags, pcmk_rsc_assigning)) {
pcmk__rsc_debug(rsc, "Breaking assignment loop involving %s", rsc->id);
return NULL;
}
pcmk__set_rsc_flags(rsc, pcmk_rsc_assigning);
pe__show_node_scores(true, rsc, "Pre-assignment", rsc->allowed_nodes,
scheduler);
this_with_colocations = pcmk__this_with_colocations(rsc);
with_this_colocations = pcmk__with_this_colocations(rsc);
// Apply mandatory colocations first, to satisfy as many as possible
for (iter = this_with_colocations; iter != NULL; iter = iter->next) {
colocation = iter->data;
if ((colocation->score <= -PCMK_SCORE_INFINITY)
|| (colocation->score >= PCMK_SCORE_INFINITY)) {
apply_this_with(colocation, rsc);
}
}
for (iter = with_this_colocations; iter != NULL; iter = iter->next) {
colocation = iter->data;
if ((colocation->score <= -PCMK_SCORE_INFINITY)
|| (colocation->score >= PCMK_SCORE_INFINITY)) {
pcmk__add_dependent_scores(colocation, rsc);
}
}
pe__show_node_scores(true, rsc, "Mandatory-colocations",
rsc->allowed_nodes, scheduler);
// Then apply optional colocations
for (iter = this_with_colocations; iter != NULL; iter = iter->next) {
colocation = iter->data;
if ((colocation->score > -PCMK_SCORE_INFINITY)
&& (colocation->score < PCMK_SCORE_INFINITY)) {
apply_this_with(colocation, rsc);
}
}
for (iter = with_this_colocations; iter != NULL; iter = iter->next) {
colocation = iter->data;
if ((colocation->score > -PCMK_SCORE_INFINITY)
&& (colocation->score < PCMK_SCORE_INFINITY)) {
pcmk__add_dependent_scores(colocation, rsc);
}
}
g_list_free(this_with_colocations);
g_list_free(with_this_colocations);
if (rsc->next_role == pcmk_role_stopped) {
pcmk__rsc_trace(rsc,
"Banning %s from all nodes because it will be stopped",
rsc->id);
resource_location(rsc, NULL, -PCMK_SCORE_INFINITY,
PCMK_META_TARGET_ROLE, scheduler);
} else if ((rsc->next_role > rsc->role)
&& !pcmk_is_set(scheduler->flags, pcmk_sched_quorate)
&& (scheduler->no_quorum_policy == pcmk_no_quorum_freeze)) {
crm_notice("Resource %s cannot be elevated from %s to %s due to "
PCMK_OPT_NO_QUORUM_POLICY "=" PCMK_VALUE_FREEZE,
rsc->id, pcmk_role_text(rsc->role),
pcmk_role_text(rsc->next_role));
pe__set_next_role(rsc, rsc->role,
PCMK_OPT_NO_QUORUM_POLICY "=" PCMK_VALUE_FREEZE);
}
pe__show_node_scores(!pcmk_is_set(scheduler->flags,
pcmk_sched_output_scores),
rsc, __func__, rsc->allowed_nodes, scheduler);
// Unmanage resource if fencing is enabled but no device is configured
if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)
&& !pcmk_is_set(scheduler->flags, pcmk_sched_have_fencing)) {
pcmk__clear_rsc_flags(rsc, pcmk_rsc_managed);
}
if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
// Unmanaged resources stay on their current node
const char *reason = NULL;
pcmk_node_t *assign_to = NULL;
pe__set_next_role(rsc, rsc->role, "unmanaged");
assign_to = pcmk__current_node(rsc);
if (assign_to == NULL) {
reason = "inactive";
} else if (rsc->role == pcmk_role_promoted) {
reason = "promoted";
} else if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
reason = "failed";
} else {
reason = "active";
}
pcmk__rsc_info(rsc, "Unmanaged resource %s assigned to %s: %s", rsc->id,
(assign_to? assign_to->details->uname : "no node"),
reason);
pcmk__assign_resource(rsc, assign_to, true, stop_if_fail);
} else if (pcmk_is_set(scheduler->flags, pcmk_sched_stop_all)) {
// Must stop at some point, but be consistent with stop_if_fail
if (stop_if_fail) {
pcmk__rsc_debug(rsc,
"Forcing %s to stop: " PCMK_OPT_STOP_ALL_RESOURCES,
rsc->id);
}
pcmk__assign_resource(rsc, NULL, true, stop_if_fail);
} else if (!assign_best_node(rsc, prefer, stop_if_fail)) {
// Assignment failed
if (!pcmk_is_set(rsc->flags, pcmk_rsc_removed)) {
pcmk__rsc_info(rsc, "Resource %s cannot run anywhere", rsc->id);
} else if ((rsc->running_on != NULL) && stop_if_fail) {
pcmk__rsc_info(rsc, "Stopping removed resource %s", rsc->id);
}
}
pcmk__clear_rsc_flags(rsc, pcmk_rsc_assigning);
if (rsc->is_remote_node) {
remote_connection_assigned(rsc);
}
return rsc->allocated_to;
}
/*!
* \internal
* \brief Schedule actions to bring resource down and back to current role
*
* \param[in,out] rsc Resource to restart
* \param[in,out] current Node that resource should be brought down on
* \param[in] need_stop Whether the resource must be stopped
* \param[in] need_promote Whether the resource must be promoted
*
* \return Role that resource would have after scheduled actions are taken
*/
static void
schedule_restart_actions(pcmk_resource_t *rsc, pcmk_node_t *current,
bool need_stop, bool need_promote)
{
enum rsc_role_e role = rsc->role;
enum rsc_role_e next_role;
rsc_transition_fn fn = NULL;
pcmk__set_rsc_flags(rsc, pcmk_rsc_restarting);
// Bring resource down to a stop on its current node
while (role != pcmk_role_stopped) {
next_role = rsc_state_matrix[role][pcmk_role_stopped];
pcmk__rsc_trace(rsc, "Creating %s action to take %s down from %s to %s",
(need_stop? "required" : "optional"), rsc->id,
pcmk_role_text(role), pcmk_role_text(next_role));
fn = rsc_action_matrix[role][next_role];
if (fn == NULL) {
break;
}
fn(rsc, current, !need_stop);
role = next_role;
}
// Bring resource up to its next role on its next node
while ((rsc->role <= rsc->next_role) && (role != rsc->role)
&& !pcmk_is_set(rsc->flags, pcmk_rsc_blocked)) {
bool required = need_stop;
next_role = rsc_state_matrix[role][rsc->role];
if ((next_role == pcmk_role_promoted) && need_promote) {
required = true;
}
pcmk__rsc_trace(rsc, "Creating %s action to take %s up from %s to %s",
(required? "required" : "optional"), rsc->id,
pcmk_role_text(role), pcmk_role_text(next_role));
fn = rsc_action_matrix[role][next_role];
if (fn == NULL) {
break;
}
fn(rsc, rsc->allocated_to, !required);
role = next_role;
}
pcmk__clear_rsc_flags(rsc, pcmk_rsc_restarting);
}
/*!
* \internal
* \brief If a resource's next role is not explicitly specified, set a default
*
* \param[in,out] rsc Resource to set next role for
*
* \return "explicit" if next role was explicitly set, otherwise "implicit"
*/
static const char *
set_default_next_role(pcmk_resource_t *rsc)
{
if (rsc->next_role != pcmk_role_unknown) {
return "explicit";
}
if (rsc->allocated_to == NULL) {
pe__set_next_role(rsc, pcmk_role_stopped, "assignment");
} else {
pe__set_next_role(rsc, pcmk_role_started, "assignment");
}
return "implicit";
}
/*!
* \internal
* \brief Create an action to represent an already pending start
*
* \param[in,out] rsc Resource to create start action for
*/
static void
create_pending_start(pcmk_resource_t *rsc)
{
pcmk_action_t *start = NULL;
pcmk__rsc_trace(rsc,
"Creating action for %s to represent already pending start",
rsc->id);
start = start_action(rsc, rsc->allocated_to, TRUE);
pcmk__set_action_flags(start, pcmk_action_always_in_graph);
}
/*!
* \internal
* \brief Schedule actions needed to take a resource to its next role
*
* \param[in,out] rsc Resource to schedule actions for
*/
static void
schedule_role_transition_actions(pcmk_resource_t *rsc)
{
enum rsc_role_e role = rsc->role;
while (role != rsc->next_role) {
enum rsc_role_e next_role = rsc_state_matrix[role][rsc->next_role];
rsc_transition_fn fn = NULL;
pcmk__rsc_trace(rsc,
"Creating action to take %s from %s to %s "
"(ending at %s)",
rsc->id, pcmk_role_text(role),
pcmk_role_text(next_role),
pcmk_role_text(rsc->next_role));
fn = rsc_action_matrix[role][next_role];
if (fn == NULL) {
break;
}
fn(rsc, rsc->allocated_to, false);
role = next_role;
}
}
/*!
* \internal
* \brief Create all actions needed for a given primitive resource
*
* \param[in,out] rsc Primitive resource to create actions for
*/
void
pcmk__primitive_create_actions(pcmk_resource_t *rsc)
{
bool need_stop = false;
bool need_promote = false;
bool is_moving = false;
bool allow_migrate = false;
bool multiply_active = false;
pcmk_node_t *current = NULL;
unsigned int num_all_active = 0;
unsigned int num_clean_active = 0;
const char *next_role_source = NULL;
CRM_ASSERT(pcmk__is_primitive(rsc));
next_role_source = set_default_next_role(rsc);
pcmk__rsc_trace(rsc,
"Creating all actions for %s transition from %s to %s "
"(%s) on %s",
rsc->id, pcmk_role_text(rsc->role),
pcmk_role_text(rsc->next_role), next_role_source,
pcmk__node_name(rsc->allocated_to));
current = rsc->private->fns->active_node(rsc, &num_all_active,
&num_clean_active);
g_list_foreach(rsc->dangling_migrations, pcmk__abort_dangling_migration,
rsc);
if ((current != NULL) && (rsc->allocated_to != NULL)
&& !pcmk__same_node(current, rsc->allocated_to)
&& (rsc->next_role >= pcmk_role_started)) {
pcmk__rsc_trace(rsc, "Moving %s from %s to %s",
rsc->id, pcmk__node_name(current),
pcmk__node_name(rsc->allocated_to));
is_moving = true;
allow_migrate = pcmk__rsc_can_migrate(rsc, current);
// This is needed even if migrating (though I'm not sure why ...)
need_stop = true;
}
// Check whether resource is partially migrated and/or multiply active
if ((rsc->partial_migration_source != NULL)
&& (rsc->partial_migration_target != NULL)
&& allow_migrate && (num_all_active == 2)
&& pcmk__same_node(current, rsc->partial_migration_source)
&& pcmk__same_node(rsc->allocated_to, rsc->partial_migration_target)) {
/* A partial migration is in progress, and the migration target remains
* the same as when the migration began.
*/
pcmk__rsc_trace(rsc,
"Partial migration of %s from %s to %s will continue",
rsc->id, pcmk__node_name(rsc->partial_migration_source),
pcmk__node_name(rsc->partial_migration_target));
} else if ((rsc->partial_migration_source != NULL)
|| (rsc->partial_migration_target != NULL)) {
// A partial migration is in progress but can't be continued
if (num_all_active > 2) {
// The resource is migrating *and* multiply active!
crm_notice("Forcing recovery of %s because it is migrating "
"from %s to %s and possibly active elsewhere",
rsc->id, pcmk__node_name(rsc->partial_migration_source),
pcmk__node_name(rsc->partial_migration_target));
} else {
// The migration source or target isn't available
crm_notice("Forcing recovery of %s because it can no longer "
"migrate from %s to %s",
rsc->id, pcmk__node_name(rsc->partial_migration_source),
pcmk__node_name(rsc->partial_migration_target));
}
need_stop = true;
rsc->partial_migration_source = rsc->partial_migration_target = NULL;
allow_migrate = false;
} else if (pcmk_is_set(rsc->flags, pcmk_rsc_needs_fencing)) {
multiply_active = (num_all_active > 1);
} else {
/* If a resource has PCMK_META_REQUIRES set to PCMK_VALUE_NOTHING or
* PCMK_VALUE_QUORUM, don't consider it active on unclean nodes (similar
* to how all resources behave when PCMK_OPT_STONITH_ENABLED is false).
* We can start such resources elsewhere before fencing completes, and
* if we considered the resource active on the failed node, we would
* attempt recovery for being active on multiple nodes.
*/
multiply_active = (num_clean_active > 1);
}
if (multiply_active) {
const char *class = crm_element_value(rsc->private->xml, PCMK_XA_CLASS);
// Resource was (possibly) incorrectly multiply active
pcmk__sched_err("%s resource %s might be active on %u nodes (%s)",
pcmk__s(class, "Untyped"), rsc->id, num_all_active,
- pcmk__multiply_active_text(rsc->recovery_type));
+ pcmk__multiply_active_text(rsc));
crm_notice("For more information, see \"What are multiply active "
"resources?\" at "
"https://projects.clusterlabs.org/w/clusterlabs/faq/");
switch (rsc->recovery_type) {
case pcmk_multiply_active_restart:
need_stop = true;
break;
case pcmk_multiply_active_unexpected:
need_stop = true; // stop_resource() will skip expected node
pcmk__set_rsc_flags(rsc, pcmk_rsc_stop_unexpected);
break;
default:
break;
}
} else {
pcmk__clear_rsc_flags(rsc, pcmk_rsc_stop_unexpected);
}
if (pcmk_is_set(rsc->flags, pcmk_rsc_start_pending)) {
create_pending_start(rsc);
}
if (is_moving) {
// Remaining tests are only for resources staying where they are
} else if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
if (pcmk_is_set(rsc->flags, pcmk_rsc_stop_if_failed)) {
need_stop = true;
pcmk__rsc_trace(rsc, "Recovering %s", rsc->id);
} else {
pcmk__rsc_trace(rsc, "Recovering %s by demotion", rsc->id);
if (rsc->next_role == pcmk_role_promoted) {
need_promote = true;
}
}
} else if (pcmk_is_set(rsc->flags, pcmk_rsc_blocked)) {
pcmk__rsc_trace(rsc, "Blocking further actions on %s", rsc->id);
need_stop = true;
} else if ((rsc->role > pcmk_role_started) && (current != NULL)
&& (rsc->allocated_to != NULL)) {
pcmk_action_t *start = NULL;
pcmk__rsc_trace(rsc, "Creating start action for promoted resource %s",
rsc->id);
start = start_action(rsc, rsc->allocated_to, TRUE);
if (!pcmk_is_set(start->flags, pcmk_action_optional)) {
// Recovery of a promoted resource
pcmk__rsc_trace(rsc, "%s restart is required for recovery", rsc->id);
need_stop = true;
}
}
// Create any actions needed to bring resource down and back up to same role
schedule_restart_actions(rsc, current, need_stop, need_promote);
// Create any actions needed to take resource from this role to the next
schedule_role_transition_actions(rsc);
pcmk__create_recurring_actions(rsc);
if (allow_migrate) {
pcmk__create_migration_actions(rsc, current);
}
}
/*!
* \internal
* \brief Ban a resource from any allowed nodes that are Pacemaker Remote nodes
*
* \param[in] rsc Resource to check
*/
static void
rsc_avoids_remote_nodes(const pcmk_resource_t *rsc)
{
GHashTableIter iter;
pcmk_node_t *node = NULL;
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
if (node->details->remote_rsc != NULL) {
node->weight = -PCMK_SCORE_INFINITY;
}
}
}
/*!
* \internal
* \brief Return allowed nodes as (possibly sorted) list
*
* Convert a resource's hash table of allowed nodes to a list. If printing to
* stdout, sort the list, to keep action ID numbers consistent for regression
* test output (while avoiding the performance hit on a live cluster).
*
* \param[in] rsc Resource to check for allowed nodes
*
* \return List of resource's allowed nodes
* \note Callers should take care not to rely on the list being sorted.
*/
static GList *
allowed_nodes_as_list(const pcmk_resource_t *rsc)
{
GList *allowed_nodes = NULL;
if (rsc->allowed_nodes) {
allowed_nodes = g_hash_table_get_values(rsc->allowed_nodes);
}
if (!pcmk__is_daemon) {
allowed_nodes = g_list_sort(allowed_nodes, pe__cmp_node_name);
}
return allowed_nodes;
}
/*!
* \internal
* \brief Create implicit constraints needed for a primitive resource
*
* \param[in,out] rsc Primitive resource to create implicit constraints for
*/
void
pcmk__primitive_internal_constraints(pcmk_resource_t *rsc)
{
GList *allowed_nodes = NULL;
bool check_unfencing = false;
bool check_utilization = false;
pcmk_scheduler_t *scheduler = NULL;
CRM_ASSERT(pcmk__is_primitive(rsc));
scheduler = rsc->private->scheduler;
if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
pcmk__rsc_trace(rsc,
"Skipping implicit constraints for unmanaged resource "
"%s", rsc->id);
return;
}
// Whether resource requires unfencing
check_unfencing = !pcmk_is_set(rsc->flags, pcmk_rsc_fence_device)
&& pcmk_is_set(scheduler->flags,
pcmk_sched_enable_unfencing)
&& pcmk_is_set(rsc->flags, pcmk_rsc_needs_unfencing);
// Whether a non-default placement strategy is used
check_utilization = (g_hash_table_size(rsc->utilization) > 0)
&& !pcmk__str_eq(scheduler->placement_strategy,
PCMK_VALUE_DEFAULT, pcmk__str_casei);
// Order stops before starts (i.e. restart)
pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_STOP, 0), NULL,
rsc, pcmk__op_key(rsc->id, PCMK_ACTION_START, 0), NULL,
pcmk__ar_ordered
|pcmk__ar_first_implies_then
|pcmk__ar_intermediate_stop, scheduler);
// Promotable ordering: demote before stop, start before promote
if (pcmk_is_set(pe__const_top_resource(rsc, false)->flags,
pcmk_rsc_promotable)
|| (rsc->role > pcmk_role_unpromoted)) {
pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_DEMOTE, 0),
NULL,
rsc, pcmk__op_key(rsc->id, PCMK_ACTION_STOP, 0),
NULL,
pcmk__ar_promoted_then_implies_first, scheduler);
pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_START, 0),
NULL,
rsc, pcmk__op_key(rsc->id, PCMK_ACTION_PROMOTE, 0),
NULL,
pcmk__ar_unrunnable_first_blocks, scheduler);
}
// Don't clear resource history if probing on same node
pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_LRM_DELETE, 0),
NULL, rsc,
pcmk__op_key(rsc->id, PCMK_ACTION_MONITOR, 0),
NULL,
pcmk__ar_if_on_same_node|pcmk__ar_then_cancels_first,
scheduler);
// Certain checks need allowed nodes
if (check_unfencing || check_utilization || (rsc->container != NULL)) {
allowed_nodes = allowed_nodes_as_list(rsc);
}
if (check_unfencing) {
g_list_foreach(allowed_nodes, pcmk__order_restart_vs_unfence, rsc);
}
if (check_utilization) {
pcmk__create_utilization_constraints(rsc, allowed_nodes);
}
if (rsc->container != NULL) {
pcmk_resource_t *remote_rsc = NULL;
if (rsc->is_remote_node) {
// rsc is the implicit remote connection for a guest or bundle node
/* Guest resources are not allowed to run on Pacemaker Remote nodes,
* to avoid nesting remotes. However, bundles are allowed.
*/
if (!pcmk_is_set(rsc->flags, pcmk_rsc_remote_nesting_allowed)) {
rsc_avoids_remote_nodes(rsc->container);
}
/* If someone cleans up a guest or bundle node's container, we will
* likely schedule a (re-)probe of the container and recovery of the
* connection. Order the connection stop after the container probe,
* so that if we detect the container running, we will trigger a new
* transition and avoid the unnecessary recovery.
*/
pcmk__order_resource_actions(rsc->container, PCMK_ACTION_MONITOR,
rsc, PCMK_ACTION_STOP,
pcmk__ar_ordered);
/* A user can specify that a resource must start on a Pacemaker Remote
* node by explicitly configuring it with the container=NODENAME
* meta-attribute. This is of questionable merit, since location
* constraints can accomplish the same thing. But we support it, so here
* we check whether a resource (that is not itself a remote connection)
* has container set to a remote node or guest node resource.
*/
} else if (rsc->container->is_remote_node) {
remote_rsc = rsc->container;
} else {
remote_rsc = pe__resource_contains_guest_node(scheduler,
rsc->container);
}
if (remote_rsc != NULL) {
/* Force the resource on the Pacemaker Remote node instead of
* colocating the resource with the container resource.
*/
for (GList *item = allowed_nodes; item; item = item->next) {
pcmk_node_t *node = item->data;
if (node->details->remote_rsc != remote_rsc) {
node->weight = -PCMK_SCORE_INFINITY;
}
}
} else {
/* This resource is either a filler for a container that does NOT
* represent a Pacemaker Remote node, or a Pacemaker Remote
* connection resource for a guest node or bundle.
*/
int score;
crm_trace("Order and colocate %s relative to its container %s",
rsc->id, rsc->container->id);
pcmk__new_ordering(rsc->container,
pcmk__op_key(rsc->container->id,
PCMK_ACTION_START, 0),
NULL, rsc,
pcmk__op_key(rsc->id, PCMK_ACTION_START, 0),
NULL,
pcmk__ar_first_implies_then
|pcmk__ar_unrunnable_first_blocks, scheduler);
pcmk__new_ordering(rsc,
pcmk__op_key(rsc->id, PCMK_ACTION_STOP, 0),
NULL,
rsc->container,
pcmk__op_key(rsc->container->id,
PCMK_ACTION_STOP, 0),
NULL, pcmk__ar_then_implies_first, scheduler);
if (pcmk_is_set(rsc->flags, pcmk_rsc_remote_nesting_allowed)) {
score = 10000; /* Highly preferred but not essential */
} else {
score = PCMK_SCORE_INFINITY; // Force to run on same host
}
pcmk__new_colocation("#resource-with-container", NULL, score, rsc,
rsc->container, NULL, NULL,
pcmk__coloc_influence);
}
}
if (rsc->is_remote_node
|| pcmk_is_set(rsc->flags, pcmk_rsc_fence_device)) {
/* Remote connections and fencing devices are not allowed to run on
* Pacemaker Remote nodes
*/
rsc_avoids_remote_nodes(rsc);
}
g_list_free(allowed_nodes);
}
/*!
* \internal
* \brief Apply a colocation's score to node scores or resource priority
*
* Given a colocation constraint, apply its score to the dependent's
* allowed node scores (if we are still placing resources) or priority (if
* we are choosing promotable clone instance roles).
*
* \param[in,out] dependent Dependent resource in colocation
* \param[in] primary Primary resource in colocation
* \param[in] colocation Colocation constraint to apply
* \param[in] for_dependent true if called on behalf of dependent
*/
void
pcmk__primitive_apply_coloc_score(pcmk_resource_t *dependent,
const pcmk_resource_t *primary,
const pcmk__colocation_t *colocation,
bool for_dependent)
{
enum pcmk__coloc_affects filter_results;
CRM_ASSERT((dependent != NULL) && (primary != NULL)
&& (colocation != NULL));
if (for_dependent) {
// Always process on behalf of primary resource
primary->private->cmds->apply_coloc_score(dependent, primary,
colocation, false);
return;
}
filter_results = pcmk__colocation_affects(dependent, primary, colocation,
false);
pcmk__rsc_trace(dependent, "%s %s with %s (%s, score=%d, filter=%d)",
((colocation->score > 0)? "Colocating" : "Anti-colocating"),
dependent->id, primary->id, colocation->id,
colocation->score,
filter_results);
switch (filter_results) {
case pcmk__coloc_affects_role:
pcmk__apply_coloc_to_priority(dependent, primary, colocation);
break;
case pcmk__coloc_affects_location:
pcmk__apply_coloc_to_scores(dependent, primary, colocation);
break;
default: // pcmk__coloc_affects_nothing
return;
}
}
/* Primitive implementation of
* pcmk__assignment_methods_t:with_this_colocations()
*/
void
pcmk__with_primitive_colocations(const pcmk_resource_t *rsc,
const pcmk_resource_t *orig_rsc, GList **list)
{
const pcmk_resource_t *parent = NULL;
CRM_ASSERT(pcmk__is_primitive(rsc) && (list != NULL));
parent = rsc->private->parent;
if (rsc == orig_rsc) {
/* For the resource itself, add all of its own colocations and relevant
* colocations from its parent (if any).
*/
pcmk__add_with_this_list(list, rsc->rsc_cons_lhs, orig_rsc);
if (parent != NULL) {
parent->private->cmds->with_this_colocations(parent, orig_rsc,
list);
}
} else {
// For an ancestor, add only explicitly configured constraints
for (GList *iter = rsc->rsc_cons_lhs; iter != NULL; iter = iter->next) {
pcmk__colocation_t *colocation = iter->data;
if (pcmk_is_set(colocation->flags, pcmk__coloc_explicit)) {
pcmk__add_with_this(list, colocation, orig_rsc);
}
}
}
}
/* Primitive implementation of
* pcmk__assignment_methods_t:this_with_colocations()
*/
void
pcmk__primitive_with_colocations(const pcmk_resource_t *rsc,
const pcmk_resource_t *orig_rsc, GList **list)
{
const pcmk_resource_t *parent = NULL;
CRM_ASSERT(pcmk__is_primitive(rsc) && (list != NULL));
parent = rsc->private->parent;
if (rsc == orig_rsc) {
/* For the resource itself, add all of its own colocations and relevant
* colocations from its parent (if any).
*/
pcmk__add_this_with_list(list, rsc->rsc_cons, orig_rsc);
if (parent != NULL) {
parent->private->cmds->this_with_colocations(parent, orig_rsc,
list);
}
} else {
// For an ancestor, add only explicitly configured constraints
for (GList *iter = rsc->rsc_cons; iter != NULL; iter = iter->next) {
pcmk__colocation_t *colocation = iter->data;
if (pcmk_is_set(colocation->flags, pcmk__coloc_explicit)) {
pcmk__add_this_with(list, colocation, orig_rsc);
}
}
}
}
/*!
* \internal
* \brief Return action flags for a given primitive resource action
*
* \param[in,out] action Action to get flags for
* \param[in] node If not NULL, limit effects to this node (ignored)
*
* \return Flags appropriate to \p action on \p node
*/
uint32_t
pcmk__primitive_action_flags(pcmk_action_t *action, const pcmk_node_t *node)
{
CRM_ASSERT(action != NULL);
return (uint32_t) action->flags;
}
/*!
* \internal
* \brief Check whether a node is a multiply active resource's expected node
*
* \param[in] rsc Resource to check
* \param[in] node Node to check
*
* \return \c true if \p rsc is multiply active with
* \c PCMK_META_MULTIPLE_ACTIVE set to \c PCMK_VALUE_STOP_UNEXPECTED,
* and \p node is the node where it will remain active
* \note This assumes that the resource's next role cannot be changed to stopped
* after this is called, which should be reasonable if status has already
* been unpacked and resources have been assigned to nodes.
*/
static bool
is_expected_node(const pcmk_resource_t *rsc, const pcmk_node_t *node)
{
return pcmk_all_flags_set(rsc->flags,
pcmk_rsc_stop_unexpected|pcmk_rsc_restarting)
&& (rsc->next_role > pcmk_role_stopped)
&& pcmk__same_node(rsc->allocated_to, node);
}
/*!
* \internal
* \brief Schedule actions needed to stop a resource wherever it is active
*
* \param[in,out] rsc Resource being stopped
* \param[in] node Node where resource is being stopped (ignored)
* \param[in] optional Whether actions should be optional
*/
static void
stop_resource(pcmk_resource_t *rsc, pcmk_node_t *node, bool optional)
{
for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) {
pcmk_node_t *current = (pcmk_node_t *) iter->data;
pcmk_action_t *stop = NULL;
if (is_expected_node(rsc, current)) {
/* We are scheduling restart actions for a multiply active resource
* with PCMK_META_MULTIPLE_ACTIVE=PCMK_VALUE_STOP_UNEXPECTED, and
* this is where it should not be stopped.
*/
pcmk__rsc_trace(rsc,
"Skipping stop of multiply active resource %s "
"on expected node %s",
rsc->id, pcmk__node_name(current));
continue;
}
if (rsc->partial_migration_target != NULL) {
// Continue migration if node originally was and remains target
if (pcmk__same_node(current, rsc->partial_migration_target)
&& pcmk__same_node(current, rsc->allocated_to)) {
pcmk__rsc_trace(rsc,
"Skipping stop of %s on %s "
"because partial migration there will continue",
rsc->id, pcmk__node_name(current));
continue;
} else {
pcmk__rsc_trace(rsc,
"Forcing stop of %s on %s "
"because migration target changed",
rsc->id, pcmk__node_name(current));
optional = false;
}
}
pcmk__rsc_trace(rsc, "Scheduling stop of %s on %s",
rsc->id, pcmk__node_name(current));
stop = stop_action(rsc, current, optional);
if (rsc->allocated_to == NULL) {
pe_action_set_reason(stop, "node availability", true);
} else if (pcmk_all_flags_set(rsc->flags, pcmk_rsc_restarting
|pcmk_rsc_stop_unexpected)) {
/* We are stopping a multiply active resource on a node that is
* not its expected node, and we are still scheduling restart
* actions, so the stop is for being multiply active.
*/
pe_action_set_reason(stop, "being multiply active", true);
}
if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
pcmk__clear_action_flags(stop, pcmk_action_runnable);
}
if (pcmk_is_set(rsc->private->scheduler->flags,
pcmk_sched_remove_after_stop)) {
pcmk__schedule_cleanup(rsc, current, optional);
}
if (pcmk_is_set(rsc->flags, pcmk_rsc_needs_unfencing)) {
pcmk_action_t *unfence = pe_fence_op(current, PCMK_ACTION_ON, true,
NULL, false,
rsc->private->scheduler);
order_actions(stop, unfence, pcmk__ar_then_implies_first);
if (!pcmk__node_unfenced(current)) {
pcmk__sched_err("Stopping %s until %s can be unfenced",
rsc->id, pcmk__node_name(current));
}
}
}
}
/*!
* \internal
* \brief Schedule actions needed to start a resource on a node
*
* \param[in,out] rsc Resource being started
* \param[in,out] node Node where resource should be started
* \param[in] optional Whether actions should be optional
*/
static void
start_resource(pcmk_resource_t *rsc, pcmk_node_t *node, bool optional)
{
pcmk_action_t *start = NULL;
CRM_ASSERT(node != NULL);
pcmk__rsc_trace(rsc, "Scheduling %s start of %s on %s (score %d)",
(optional? "optional" : "required"), rsc->id,
pcmk__node_name(node), node->weight);
start = start_action(rsc, node, TRUE);
pcmk__order_vs_unfence(rsc, node, start, pcmk__ar_first_implies_then);
if (pcmk_is_set(start->flags, pcmk_action_runnable) && !optional) {
pcmk__clear_action_flags(start, pcmk_action_optional);
}
if (is_expected_node(rsc, node)) {
/* This could be a problem if the start becomes necessary for other
* reasons later.
*/
pcmk__rsc_trace(rsc,
"Start of multiply active resouce %s "
"on expected node %s will be a pseudo-action",
rsc->id, pcmk__node_name(node));
pcmk__set_action_flags(start, pcmk_action_pseudo);
}
}
/*!
* \internal
* \brief Schedule actions needed to promote a resource on a node
*
* \param[in,out] rsc Resource being promoted
* \param[in] node Node where resource should be promoted
* \param[in] optional Whether actions should be optional
*/
static void
promote_resource(pcmk_resource_t *rsc, pcmk_node_t *node, bool optional)
{
GList *iter = NULL;
GList *action_list = NULL;
bool runnable = true;
CRM_ASSERT(node != NULL);
// Any start must be runnable for promotion to be runnable
action_list = pe__resource_actions(rsc, node, PCMK_ACTION_START, true);
for (iter = action_list; iter != NULL; iter = iter->next) {
pcmk_action_t *start = (pcmk_action_t *) iter->data;
if (!pcmk_is_set(start->flags, pcmk_action_runnable)) {
runnable = false;
}
}
g_list_free(action_list);
if (runnable) {
pcmk_action_t *promote = promote_action(rsc, node, optional);
pcmk__rsc_trace(rsc, "Scheduling %s promotion of %s on %s",
(optional? "optional" : "required"), rsc->id,
pcmk__node_name(node));
if (is_expected_node(rsc, node)) {
/* This could be a problem if the promote becomes necessary for
* other reasons later.
*/
pcmk__rsc_trace(rsc,
"Promotion of multiply active resouce %s "
"on expected node %s will be a pseudo-action",
rsc->id, pcmk__node_name(node));
pcmk__set_action_flags(promote, pcmk_action_pseudo);
}
} else {
pcmk__rsc_trace(rsc, "Not promoting %s on %s: start unrunnable",
rsc->id, pcmk__node_name(node));
action_list = pe__resource_actions(rsc, node, PCMK_ACTION_PROMOTE,
true);
for (iter = action_list; iter != NULL; iter = iter->next) {
pcmk_action_t *promote = (pcmk_action_t *) iter->data;
pcmk__clear_action_flags(promote, pcmk_action_runnable);
}
g_list_free(action_list);
}
}
/*!
* \internal
* \brief Schedule actions needed to demote a resource wherever it is active
*
* \param[in,out] rsc Resource being demoted
* \param[in] node Node where resource should be demoted (ignored)
* \param[in] optional Whether actions should be optional
*/
static void
demote_resource(pcmk_resource_t *rsc, pcmk_node_t *node, bool optional)
{
/* Since this will only be called for a primitive (possibly as an instance
* of a collective resource), the resource is multiply active if it is
* running on more than one node, so we want to demote on all of them as
* part of recovery, regardless of which one is the desired node.
*/
for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) {
pcmk_node_t *current = (pcmk_node_t *) iter->data;
if (is_expected_node(rsc, current)) {
pcmk__rsc_trace(rsc,
"Skipping demote of multiply active resource %s "
"on expected node %s",
rsc->id, pcmk__node_name(current));
} else {
pcmk__rsc_trace(rsc, "Scheduling %s demotion of %s on %s",
(optional? "optional" : "required"), rsc->id,
pcmk__node_name(current));
demote_action(rsc, current, optional);
}
}
}
static void
assert_role_error(pcmk_resource_t *rsc, pcmk_node_t *node, bool optional)
{
CRM_ASSERT(false);
}
/*!
* \internal
* \brief Schedule cleanup of a resource
*
* \param[in,out] rsc Resource to clean up
* \param[in] node Node to clean up on
* \param[in] optional Whether clean-up should be optional
*/
void
pcmk__schedule_cleanup(pcmk_resource_t *rsc, const pcmk_node_t *node,
bool optional)
{
/* If the cleanup is required, its orderings are optional, because they're
* relevant only if both actions are required. Conversely, if the cleanup is
* optional, the orderings make the then action required if the first action
* becomes required.
*/
uint32_t flag = optional? pcmk__ar_first_implies_then : pcmk__ar_ordered;
CRM_CHECK((rsc != NULL) && (node != NULL), return);
if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
pcmk__rsc_trace(rsc, "Skipping clean-up of %s on %s: resource failed",
rsc->id, pcmk__node_name(node));
return;
}
if (node->details->unclean || !node->details->online) {
pcmk__rsc_trace(rsc, "Skipping clean-up of %s on %s: node unavailable",
rsc->id, pcmk__node_name(node));
return;
}
crm_notice("Scheduling clean-up of %s on %s",
rsc->id, pcmk__node_name(node));
delete_action(rsc, node, optional);
// stop -> clean-up -> start
pcmk__order_resource_actions(rsc, PCMK_ACTION_STOP,
rsc, PCMK_ACTION_DELETE, flag);
pcmk__order_resource_actions(rsc, PCMK_ACTION_DELETE,
rsc, PCMK_ACTION_START, flag);
}
/*!
* \internal
* \brief Add primitive meta-attributes relevant to graph actions to XML
*
* \param[in] rsc Primitive resource whose meta-attributes should be added
* \param[in,out] xml Transition graph action attributes XML to add to
*/
void
pcmk__primitive_add_graph_meta(const pcmk_resource_t *rsc, xmlNode *xml)
{
char *name = NULL;
char *value = NULL;
const pcmk_resource_t *parent = NULL;
CRM_ASSERT(pcmk__is_primitive(rsc) && (xml != NULL));
/* Clone instance numbers get set internally as meta-attributes, and are
* needed in the transition graph (for example, to tell unique clone
* instances apart).
*/
value = g_hash_table_lookup(rsc->meta, PCMK__META_CLONE);
if (value != NULL) {
name = crm_meta_name(PCMK__META_CLONE);
crm_xml_add(xml, name, value);
free(name);
}
// Not sure if this one is really needed ...
value = g_hash_table_lookup(rsc->meta, PCMK_META_REMOTE_NODE);
if (value != NULL) {
name = crm_meta_name(PCMK_META_REMOTE_NODE);
crm_xml_add(xml, name, value);
free(name);
}
/* The container meta-attribute can be set on the primitive itself or one of
* its parents (for example, a group inside a container resource), so check
* them all, and keep the highest one found.
*/
for (parent = rsc; parent != NULL; parent = parent->private->parent) {
if (parent->container != NULL) {
crm_xml_add(xml, CRM_META "_" PCMK__META_CONTAINER,
parent->container->id);
}
}
/* Bundle replica children will get their external-ip set internally as a
* meta-attribute. The graph action needs it, but under a different naming
* convention than other meta-attributes.
*/
value = g_hash_table_lookup(rsc->meta, "external-ip");
if (value != NULL) {
crm_xml_add(xml, "pcmk_external_ip", value);
}
}
// Primitive implementation of pcmk__assignment_methods_t:add_utilization()
void
pcmk__primitive_add_utilization(const pcmk_resource_t *rsc,
const pcmk_resource_t *orig_rsc,
GList *all_rscs, GHashTable *utilization)
{
CRM_ASSERT(pcmk__is_primitive(rsc)
&& (orig_rsc != NULL) && (utilization != NULL));
if (!pcmk_is_set(rsc->flags, pcmk_rsc_unassigned)) {
return;
}
pcmk__rsc_trace(orig_rsc,
"%s: Adding primitive %s as colocated utilization",
orig_rsc->id, rsc->id);
pcmk__release_node_capacity(utilization, rsc);
}
/*!
* \internal
* \brief Get epoch time of node's shutdown attribute (or now if none)
*
* \param[in,out] node Node to check
*
* \return Epoch time corresponding to shutdown attribute if set or now if not
*/
static time_t
shutdown_time(pcmk_node_t *node)
{
const char *shutdown = pcmk__node_attr(node, PCMK__NODE_ATTR_SHUTDOWN, NULL,
pcmk__rsc_node_current);
time_t result = 0;
if (shutdown != NULL) {
long long result_ll;
if (pcmk__scan_ll(shutdown, &result_ll, 0LL) == pcmk_rc_ok) {
result = (time_t) result_ll;
}
}
return (result == 0)? get_effective_time(node->details->data_set) : result;
}
/*!
* \internal
* \brief Ban a resource from a node if it's not locked to the node
*
* \param[in] data Node to check
* \param[in,out] user_data Resource to check
*/
static void
ban_if_not_locked(gpointer data, gpointer user_data)
{
const pcmk_node_t *node = (const pcmk_node_t *) data;
pcmk_resource_t *rsc = (pcmk_resource_t *) user_data;
if (strcmp(node->details->uname, rsc->lock_node->details->uname) != 0) {
resource_location(rsc, node, -PCMK_SCORE_INFINITY,
PCMK_OPT_SHUTDOWN_LOCK, rsc->private->scheduler);
}
}
// Primitive implementation of pcmk__assignment_methods_t:shutdown_lock()
void
pcmk__primitive_shutdown_lock(pcmk_resource_t *rsc)
{
const char *class = NULL;
pcmk_scheduler_t *scheduler = NULL;
CRM_ASSERT(pcmk__is_primitive(rsc));
scheduler = rsc->private->scheduler;
class = crm_element_value(rsc->private->xml, PCMK_XA_CLASS);
// Fence devices and remote connections can't be locked
if (pcmk__str_eq(class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_null_matches)
|| rsc->is_remote_node) {
return;
}
if (rsc->lock_node != NULL) {
// The lock was obtained from resource history
if (rsc->running_on != NULL) {
/* The resource was started elsewhere even though it is now
* considered locked. This shouldn't be possible, but as a
* failsafe, we don't want to disturb the resource now.
*/
pcmk__rsc_info(rsc,
"Cancelling shutdown lock "
"because %s is already active", rsc->id);
pe__clear_resource_history(rsc, rsc->lock_node);
rsc->lock_node = NULL;
rsc->lock_time = 0;
}
// Only a resource active on exactly one node can be locked
} else if (pcmk__list_of_1(rsc->running_on)) {
pcmk_node_t *node = rsc->running_on->data;
if (node->details->shutdown) {
if (node->details->unclean) {
pcmk__rsc_debug(rsc,
"Not locking %s to unclean %s for shutdown",
rsc->id, pcmk__node_name(node));
} else {
rsc->lock_node = node;
rsc->lock_time = shutdown_time(node);
}
}
}
if (rsc->lock_node == NULL) {
// No lock needed
return;
}
if (scheduler->shutdown_lock > 0) {
time_t lock_expiration = rsc->lock_time + scheduler->shutdown_lock;
pcmk__rsc_info(rsc, "Locking %s to %s due to shutdown (expires @%lld)",
rsc->id, pcmk__node_name(rsc->lock_node),
(long long) lock_expiration);
pe__update_recheck_time(++lock_expiration, scheduler,
"shutdown lock expiration");
} else {
pcmk__rsc_info(rsc, "Locking %s to %s due to shutdown",
rsc->id, pcmk__node_name(rsc->lock_node));
}
// If resource is locked to one node, ban it from all other nodes
g_list_foreach(scheduler->nodes, ban_if_not_locked, rsc);
}
diff --git a/lib/pengine/native.c b/lib/pengine/native.c
index 497311274a..ae54516100 100644
--- a/lib/pengine/native.c
+++ b/lib/pengine/native.c
@@ -1,1172 +1,1172 @@
/*
* Copyright 2004-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <stdint.h>
#include <crm/common/output.h>
#include <crm/pengine/rules.h>
#include <crm/pengine/status.h>
#include <crm/pengine/complex.h>
#include <crm/pengine/internal.h>
#include <crm/common/xml.h>
#include <pe_status_private.h>
#ifdef PCMK__COMPAT_2_0
#define PROVIDER_SEP "::"
#else
#define PROVIDER_SEP ":"
#endif
/*!
* \internal
* \brief Check whether a resource is active on multiple nodes
*/
static bool
is_multiply_active(const pcmk_resource_t *rsc)
{
unsigned int count = 0;
if (pcmk__is_primitive(rsc)) {
pe__find_active_requires(rsc, &count);
}
return count > 1;
}
static void
native_priority_to_node(pcmk_resource_t *rsc, pcmk_node_t *node,
gboolean failed)
{
int priority = 0;
if ((rsc->priority == 0) || (failed == TRUE)) {
return;
}
if (rsc->role == pcmk_role_promoted) {
// Promoted instance takes base priority + 1
priority = rsc->priority + 1;
} else {
priority = rsc->priority;
}
node->details->priority += priority;
pcmk__rsc_trace(rsc, "%s now has priority %d with %s'%s' (priority: %d%s)",
pcmk__node_name(node), node->details->priority,
(rsc->role == pcmk_role_promoted)? "promoted " : "",
rsc->id, rsc->priority,
(rsc->role == pcmk_role_promoted)? " + 1" : "");
/* Priority of a resource running on a guest node is added to the cluster
* node as well. */
if (node->details->remote_rsc
&& node->details->remote_rsc->container) {
GList *gIter = node->details->remote_rsc->container->running_on;
for (; gIter != NULL; gIter = gIter->next) {
pcmk_node_t *a_node = gIter->data;
a_node->details->priority += priority;
pcmk__rsc_trace(rsc,
"%s now has priority %d with %s'%s' "
"(priority: %d%s) from guest node %s",
pcmk__node_name(a_node), a_node->details->priority,
(rsc->role == pcmk_role_promoted)? "promoted " : "",
rsc->id, rsc->priority,
(rsc->role == pcmk_role_promoted)? " + 1" : "",
pcmk__node_name(node));
}
}
}
void
native_add_running(pcmk_resource_t *rsc, pcmk_node_t *node,
pcmk_scheduler_t *scheduler, gboolean failed)
{
pcmk_resource_t *parent = rsc->private->parent;
GList *gIter = rsc->running_on;
CRM_CHECK(node != NULL, return);
for (; gIter != NULL; gIter = gIter->next) {
pcmk_node_t *a_node = (pcmk_node_t *) gIter->data;
CRM_CHECK(a_node != NULL, return);
if (pcmk__str_eq(a_node->details->id, node->details->id, pcmk__str_casei)) {
return;
}
}
pcmk__rsc_trace(rsc, "Adding %s to %s %s", rsc->id, pcmk__node_name(node),
pcmk_is_set(rsc->flags, pcmk_rsc_managed)? "" : "(unmanaged)");
rsc->running_on = g_list_append(rsc->running_on, node);
if (pcmk__is_primitive(rsc)) {
node->details->running_rsc = g_list_append(node->details->running_rsc, rsc);
native_priority_to_node(rsc, node, failed);
if (node->details->maintenance) {
pcmk__clear_rsc_flags(rsc, pcmk_rsc_managed);
pcmk__set_rsc_flags(rsc, pcmk_rsc_maintenance);
}
}
if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
pcmk_resource_t *p = parent;
pcmk__rsc_info(rsc, "resource %s isn't managed", rsc->id);
resource_location(rsc, node, PCMK_SCORE_INFINITY,
"not_managed_default", scheduler);
while(p && node->details->online) {
/* add without the additional location constraint */
p->running_on = g_list_append(p->running_on, node);
p = p->private->parent;
}
return;
}
if (is_multiply_active(rsc)) {
switch (rsc->recovery_type) {
case pcmk_multiply_active_stop:
{
GHashTableIter gIter;
pcmk_node_t *local_node = NULL;
/* make sure it doesn't come up again */
if (rsc->allowed_nodes != NULL) {
g_hash_table_destroy(rsc->allowed_nodes);
}
rsc->allowed_nodes = pe__node_list2table(scheduler->nodes);
g_hash_table_iter_init(&gIter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&gIter, NULL, (void **)&local_node)) {
local_node->weight = -PCMK_SCORE_INFINITY;
}
}
break;
case pcmk_multiply_active_block:
pcmk__clear_rsc_flags(rsc, pcmk_rsc_managed);
pcmk__set_rsc_flags(rsc, pcmk_rsc_blocked);
/* If the resource belongs to a group or bundle configured with
* PCMK_META_MULTIPLE_ACTIVE=PCMK_VALUE_BLOCK, block the entire
* entity.
*/
if ((pcmk__is_group(parent) || pcmk__is_bundle(parent))
&& (parent->recovery_type == pcmk_multiply_active_block)) {
for (GList *gIter = parent->children;
gIter != NULL; gIter = gIter->next) {
pcmk_resource_t *child = gIter->data;
pcmk__clear_rsc_flags(child, pcmk_rsc_managed);
pcmk__set_rsc_flags(child, pcmk_rsc_blocked);
}
}
break;
// pcmk_multiply_active_restart, pcmk_multiply_active_unexpected
default:
/* The scheduler will do the right thing because the relevant
* variables and flags are set when unpacking the history.
*/
break;
}
crm_debug("%s is active on multiple nodes including %s: %s",
rsc->id, pcmk__node_name(node),
- pcmk__multiply_active_text(rsc->recovery_type));
+ pcmk__multiply_active_text(rsc));
} else {
pcmk__rsc_trace(rsc, "Resource %s is active on %s",
rsc->id, pcmk__node_name(node));
}
if (parent != NULL) {
native_add_running(parent, node, scheduler, FALSE);
}
}
static void
recursive_clear_unique(pcmk_resource_t *rsc, gpointer user_data)
{
pcmk__clear_rsc_flags(rsc, pcmk_rsc_unique);
pcmk__insert_meta(rsc, PCMK_META_GLOBALLY_UNIQUE, PCMK_VALUE_FALSE);
g_list_foreach(rsc->children, (GFunc) recursive_clear_unique, NULL);
}
gboolean
native_unpack(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler)
{
pcmk_resource_t *parent = uber_parent(rsc);
const char *standard = crm_element_value(rsc->private->xml, PCMK_XA_CLASS);
uint32_t ra_caps = pcmk_get_ra_caps(standard);
pcmk__rsc_trace(rsc, "Processing resource %s...", rsc->id);
// Only some agent standards support unique and promotable clones
if (!pcmk_is_set(ra_caps, pcmk_ra_cap_unique)
&& pcmk_is_set(rsc->flags, pcmk_rsc_unique)
&& pcmk__is_clone(parent)) {
/* @COMPAT We should probably reject this situation as an error (as we
* do for promotable below) rather than warn and convert, but that would
* be a backward-incompatible change that we should probably do with a
* transform at a schema major version bump.
*/
pe__force_anon(standard, parent, rsc->id, scheduler);
/* Clear PCMK_META_GLOBALLY_UNIQUE on the parent and all its descendants
* unpacked so far (clearing the parent should make any future children
* unpacking correct). We have to clear this resource explicitly because
* it isn't hooked into the parent's children yet.
*/
recursive_clear_unique(parent, NULL);
recursive_clear_unique(rsc, NULL);
}
if (!pcmk_is_set(ra_caps, pcmk_ra_cap_promotable)
&& pcmk_is_set(parent->flags, pcmk_rsc_promotable)) {
pcmk__config_err("Resource %s is of type %s and therefore "
"cannot be used as a promotable clone resource",
rsc->id, standard);
return FALSE;
}
return TRUE;
}
static bool
rsc_is_on_node(pcmk_resource_t *rsc, const pcmk_node_t *node, int flags)
{
pcmk__rsc_trace(rsc, "Checking whether %s is on %s",
rsc->id, pcmk__node_name(node));
if (pcmk_is_set(flags, pcmk_rsc_match_current_node)
&& (rsc->running_on != NULL)) {
for (GList *iter = rsc->running_on; iter; iter = iter->next) {
if (pcmk__same_node((pcmk_node_t *) iter->data, node)) {
return true;
}
}
} else if (pcmk_is_set(flags, pe_find_inactive) // @COMPAT deprecated
&& (rsc->running_on == NULL)) {
return true;
} else if (!pcmk_is_set(flags, pcmk_rsc_match_current_node)
&& (rsc->allocated_to != NULL)
&& pcmk__same_node(rsc->allocated_to, node)) {
return true;
}
return false;
}
pcmk_resource_t *
native_find_rsc(pcmk_resource_t *rsc, const char *id,
const pcmk_node_t *on_node, int flags)
{
bool match = false;
pcmk_resource_t *result = NULL;
CRM_CHECK(id && rsc && rsc->id, return NULL);
if (pcmk_is_set(flags, pcmk_rsc_match_clone_only)) {
const char *rid = pcmk__xe_id(rsc->private->xml);
if (!pcmk__is_clone(pe__const_top_resource(rsc, false))) {
match = false;
} else if (!strcmp(id, rsc->id) || pcmk__str_eq(id, rid, pcmk__str_none)) {
match = true;
}
} else if (!strcmp(id, rsc->id)) {
match = true;
} else if (pcmk_is_set(flags, pcmk_rsc_match_history)
&& pcmk__str_eq(rsc->private->history_id, id, pcmk__str_none)) {
match = true;
} else if (pcmk_is_set(flags, pcmk_rsc_match_basename)
|| (pcmk_is_set(flags, pcmk_rsc_match_anon_basename)
&& !pcmk_is_set(rsc->flags, pcmk_rsc_unique))) {
match = pe_base_name_eq(rsc, id);
}
if (match && on_node) {
if (!rsc_is_on_node(rsc, on_node, flags)) {
match = false;
}
}
if (match) {
return rsc;
}
for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
pcmk_resource_t *child = (pcmk_resource_t *) gIter->data;
result = rsc->private->fns->find_rsc(child, id, on_node, flags);
if (result) {
return result;
}
}
return NULL;
}
// create is ignored
char *
native_parameter(pcmk_resource_t *rsc, pcmk_node_t *node, gboolean create,
const char *name, pcmk_scheduler_t *scheduler)
{
const char *value = NULL;
GHashTable *params = NULL;
CRM_CHECK(rsc != NULL, return NULL);
CRM_CHECK(name != NULL && strlen(name) != 0, return NULL);
pcmk__rsc_trace(rsc, "Looking up %s in %s", name, rsc->id);
params = pe_rsc_params(rsc, node, scheduler);
value = g_hash_table_lookup(params, name);
if (value == NULL) {
/* try meta attributes instead */
value = g_hash_table_lookup(rsc->meta, name);
}
return pcmk__str_copy(value);
}
gboolean
native_active(pcmk_resource_t * rsc, gboolean all)
{
for (GList *gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
pcmk_node_t *a_node = (pcmk_node_t *) gIter->data;
if (a_node->details->unclean) {
pcmk__rsc_trace(rsc, "Resource %s: %s is unclean",
rsc->id, pcmk__node_name(a_node));
return TRUE;
} else if (!a_node->details->online
&& pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
pcmk__rsc_trace(rsc, "Resource %s: %s is offline",
rsc->id, pcmk__node_name(a_node));
} else {
pcmk__rsc_trace(rsc, "Resource %s active on %s",
rsc->id, pcmk__node_name(a_node));
return TRUE;
}
}
return FALSE;
}
struct print_data_s {
long options;
void *print_data;
};
static const char *
native_pending_state(const pcmk_resource_t *rsc)
{
const char *pending_state = NULL;
if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_START, pcmk__str_casei)) {
pending_state = "Starting";
} else if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_STOP,
pcmk__str_casei)) {
pending_state = "Stopping";
} else if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_MIGRATE_TO,
pcmk__str_casei)) {
pending_state = "Migrating";
} else if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_MIGRATE_FROM,
pcmk__str_casei)) {
/* Work might be done in here. */
pending_state = "Migrating";
} else if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_PROMOTE,
pcmk__str_casei)) {
pending_state = "Promoting";
} else if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_DEMOTE,
pcmk__str_casei)) {
pending_state = "Demoting";
}
return pending_state;
}
static const char *
native_pending_task(const pcmk_resource_t *rsc)
{
const char *pending_task = NULL;
if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_MONITOR, pcmk__str_casei)) {
pending_task = "Monitoring";
/* Pending probes are not printed, even if pending
* operations are requested. If someone ever requests that
* behavior, uncomment this and the corresponding part of
* unpack.c:unpack_rsc_op().
*/
/*
} else if (pcmk__str_eq(rsc->pending_task, "probe", pcmk__str_casei)) {
pending_task = "Checking";
*/
}
return pending_task;
}
static enum rsc_role_e
native_displayable_role(const pcmk_resource_t *rsc)
{
enum rsc_role_e role = rsc->role;
if ((role == pcmk_role_started)
&& pcmk_is_set(pe__const_top_resource(rsc, false)->flags,
pcmk_rsc_promotable)) {
role = pcmk_role_unpromoted;
}
return role;
}
static const char *
native_displayable_state(const pcmk_resource_t *rsc, bool print_pending)
{
const char *rsc_state = NULL;
if (print_pending) {
rsc_state = native_pending_state(rsc);
}
if (rsc_state == NULL) {
rsc_state = pcmk_role_text(native_displayable_role(rsc));
}
return rsc_state;
}
// Append a flag to resource description string's flags list
static bool
add_output_flag(GString *s, const char *flag_desc, bool have_flags)
{
g_string_append(s, (have_flags? ", " : " ("));
g_string_append(s, flag_desc);
return true;
}
// Append a node name to resource description string's node list
static bool
add_output_node(GString *s, const char *node, bool have_nodes)
{
g_string_append(s, (have_nodes? " " : " [ "));
g_string_append(s, node);
return true;
}
/*!
* \internal
* \brief Create a string description of a resource
*
* \param[in] rsc Resource to describe
* \param[in] name Desired identifier for the resource
* \param[in] node If not NULL, node that resource is "on"
* \param[in] show_opts Bitmask of pcmk_show_opt_e.
* \param[in] target_role Resource's target role
* \param[in] show_nodes Whether to display nodes when multiply active
*
* \return Newly allocated string description of resource
* \note Caller must free the result with g_free().
*/
gchar *
pcmk__native_output_string(const pcmk_resource_t *rsc, const char *name,
const pcmk_node_t *node, uint32_t show_opts,
const char *target_role, bool show_nodes)
{
const char *class = crm_element_value(rsc->private->xml, PCMK_XA_CLASS);
const char *provider = NULL;
const char *kind = crm_element_value(rsc->private->xml, PCMK_XA_TYPE);
GString *outstr = NULL;
bool have_flags = false;
if (!pcmk__is_primitive(rsc)) {
return NULL;
}
CRM_CHECK(name != NULL, name = "unknown");
CRM_CHECK(kind != NULL, kind = "unknown");
CRM_CHECK(class != NULL, class = "unknown");
if (pcmk_is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider)) {
provider = crm_element_value(rsc->private->xml, PCMK_XA_PROVIDER);
}
if ((node == NULL) && (rsc->lock_node != NULL)) {
node = rsc->lock_node;
}
if (pcmk_any_flags_set(show_opts, pcmk_show_rsc_only)
|| pcmk__list_of_multiple(rsc->running_on)) {
node = NULL;
}
outstr = g_string_sized_new(128);
// Resource name and agent
pcmk__g_strcat(outstr,
name, "\t(", class, ((provider == NULL)? "" : PROVIDER_SEP),
pcmk__s(provider, ""), ":", kind, "):\t", NULL);
// State on node
if (pcmk_is_set(rsc->flags, pcmk_rsc_removed)) {
g_string_append(outstr, " ORPHANED");
}
if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
enum rsc_role_e role = native_displayable_role(rsc);
g_string_append(outstr, " FAILED");
if (role > pcmk_role_unpromoted) {
pcmk__add_word(&outstr, 0, pcmk_role_text(role));
}
} else {
bool show_pending = pcmk_is_set(show_opts, pcmk_show_pending);
pcmk__add_word(&outstr, 0, native_displayable_state(rsc, show_pending));
}
if (node) {
pcmk__add_word(&outstr, 0, pcmk__node_name(node));
}
// Failed probe operation
if (native_displayable_role(rsc) == pcmk_role_stopped) {
xmlNode *probe_op = pe__failed_probe_for_rsc(rsc, node ? node->details->uname : NULL);
if (probe_op != NULL) {
int rc;
pcmk__scan_min_int(crm_element_value(probe_op, PCMK__XA_RC_CODE),
&rc, 0);
pcmk__g_strcat(outstr, " (", services_ocf_exitcode_str(rc), ") ",
NULL);
}
}
// Flags, as: (<flag> [...])
if (node && !(node->details->online) && node->details->unclean) {
have_flags = add_output_flag(outstr, "UNCLEAN", have_flags);
}
if (node && (node == rsc->lock_node)) {
have_flags = add_output_flag(outstr, "LOCKED", have_flags);
}
if (pcmk_is_set(show_opts, pcmk_show_pending)) {
const char *pending_task = native_pending_task(rsc);
if (pending_task) {
have_flags = add_output_flag(outstr, pending_task, have_flags);
}
}
if (target_role != NULL) {
switch (pcmk_parse_role(target_role)) {
case pcmk_role_unknown:
pcmk__config_err("Invalid " PCMK_META_TARGET_ROLE
" %s for resource %s", target_role, rsc->id);
break;
case pcmk_role_stopped:
have_flags = add_output_flag(outstr, "disabled", have_flags);
break;
case pcmk_role_unpromoted:
if (pcmk_is_set(pe__const_top_resource(rsc, false)->flags,
pcmk_rsc_promotable)) {
have_flags = add_output_flag(outstr,
PCMK_META_TARGET_ROLE ":",
have_flags);
g_string_append(outstr, target_role);
}
break;
default:
/* Only show target role if it limits our abilities (i.e. ignore
* Started, as it is the default anyways, and doesn't prevent
* the resource from becoming promoted).
*/
break;
}
}
// Blocked or maintenance implies unmanaged
if (pcmk_any_flags_set(rsc->flags,
pcmk_rsc_blocked|pcmk_rsc_maintenance)) {
if (pcmk_is_set(rsc->flags, pcmk_rsc_blocked)) {
have_flags = add_output_flag(outstr, "blocked", have_flags);
} else if (pcmk_is_set(rsc->flags, pcmk_rsc_maintenance)) {
have_flags = add_output_flag(outstr, "maintenance", have_flags);
}
} else if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
have_flags = add_output_flag(outstr, "unmanaged", have_flags);
}
if (pcmk_is_set(rsc->flags, pcmk_rsc_ignore_failure)) {
have_flags = add_output_flag(outstr, "failure ignored", have_flags);
}
if (have_flags) {
g_string_append_c(outstr, ')');
}
// User-supplied description
if (pcmk_any_flags_set(show_opts, pcmk_show_rsc_only|pcmk_show_description)
|| pcmk__list_of_multiple(rsc->running_on)) {
const char *desc = crm_element_value(rsc->private->xml,
PCMK_XA_DESCRIPTION);
if (desc) {
g_string_append(outstr, " (");
g_string_append(outstr, desc);
g_string_append(outstr, ")");
}
}
if (show_nodes && !pcmk_is_set(show_opts, pcmk_show_rsc_only)
&& pcmk__list_of_multiple(rsc->running_on)) {
bool have_nodes = false;
for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) {
pcmk_node_t *n = (pcmk_node_t *) iter->data;
have_nodes = add_output_node(outstr, n->details->uname, have_nodes);
}
if (have_nodes) {
g_string_append(outstr, " ]");
}
}
return g_string_free(outstr, FALSE);
}
int
pe__common_output_html(pcmk__output_t *out, const pcmk_resource_t *rsc,
const char *name, const pcmk_node_t *node,
uint32_t show_opts)
{
const char *kind = crm_element_value(rsc->private->xml, PCMK_XA_TYPE);
const char *target_role = NULL;
const char *cl = NULL;
xmlNode *child = NULL;
gchar *content = NULL;
CRM_ASSERT((kind != NULL) && pcmk__is_primitive(rsc));
if (rsc->meta) {
const char *is_internal = g_hash_table_lookup(rsc->meta,
PCMK__META_INTERNAL_RSC);
if (crm_is_true(is_internal)
&& !pcmk_is_set(show_opts, pcmk_show_implicit_rscs)) {
crm_trace("skipping print of internal resource %s", rsc->id);
return pcmk_rc_no_output;
}
target_role = g_hash_table_lookup(rsc->meta, PCMK_META_TARGET_ROLE);
}
if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
cl = PCMK__VALUE_RSC_MANAGED;
} else if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
cl = PCMK__VALUE_RSC_FAILED;
} else if (pcmk__is_primitive(rsc) && (rsc->running_on == NULL)) {
cl = PCMK__VALUE_RSC_FAILED;
} else if (pcmk__list_of_multiple(rsc->running_on)) {
cl = PCMK__VALUE_RSC_MULTIPLE;
} else if (pcmk_is_set(rsc->flags, pcmk_rsc_ignore_failure)) {
cl = PCMK__VALUE_RSC_FAILURE_IGNORED;
} else {
cl = PCMK__VALUE_RSC_OK;
}
child = pcmk__output_create_html_node(out, "li", NULL, NULL, NULL);
child = pcmk__html_create(child, PCMK__XE_SPAN, NULL, cl);
content = pcmk__native_output_string(rsc, name, node, show_opts,
target_role, true);
pcmk__xe_set_content(child, "%s", content);
g_free(content);
return pcmk_rc_ok;
}
int
pe__common_output_text(pcmk__output_t *out, const pcmk_resource_t *rsc,
const char *name, const pcmk_node_t *node,
uint32_t show_opts)
{
const char *target_role = NULL;
CRM_ASSERT(pcmk__is_primitive(rsc));
if (rsc->meta) {
const char *is_internal = g_hash_table_lookup(rsc->meta,
PCMK__META_INTERNAL_RSC);
if (crm_is_true(is_internal)
&& !pcmk_is_set(show_opts, pcmk_show_implicit_rscs)) {
crm_trace("skipping print of internal resource %s", rsc->id);
return pcmk_rc_no_output;
}
target_role = g_hash_table_lookup(rsc->meta, PCMK_META_TARGET_ROLE);
}
{
gchar *s = pcmk__native_output_string(rsc, name, node, show_opts,
target_role, true);
out->list_item(out, NULL, "%s", s);
g_free(s);
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("primitive", "uint32_t", "pcmk_resource_t *", "GList *",
"GList *")
int
pe__resource_xml(pcmk__output_t *out, va_list args)
{
uint32_t show_opts = va_arg(args, uint32_t);
pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
GList *only_node G_GNUC_UNUSED = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
int rc = pcmk_rc_no_output;
bool print_pending = pcmk_is_set(show_opts, pcmk_show_pending);
const char *class = crm_element_value(rsc->private->xml, PCMK_XA_CLASS);
const char *prov = crm_element_value(rsc->private->xml, PCMK_XA_PROVIDER);
char ra_name[LINE_MAX];
const char *rsc_state = native_displayable_state(rsc, print_pending);
const char *target_role = NULL;
const char *active = pcmk__btoa(rsc->private->fns->active(rsc, TRUE));
const char *orphaned = pcmk__flag_text(rsc->flags, pcmk_rsc_removed);
const char *blocked = pcmk__flag_text(rsc->flags, pcmk_rsc_blocked);
const char *maintenance = pcmk__flag_text(rsc->flags, pcmk_rsc_maintenance);
const char *managed = pcmk__flag_text(rsc->flags, pcmk_rsc_managed);
const char *failed = pcmk__flag_text(rsc->flags, pcmk_rsc_failed);
const char *ignored = pcmk__flag_text(rsc->flags, pcmk_rsc_ignore_failure);
char *nodes_running_on = NULL;
const char *pending = print_pending? native_pending_task(rsc) : NULL;
const char *locked_to = NULL;
const char *desc = pe__resource_description(rsc, show_opts);
CRM_ASSERT(pcmk__is_primitive(rsc));
if (rsc->private->fns->is_filtered(rsc, only_rsc, TRUE)) {
return pcmk_rc_no_output;
}
// Resource information
snprintf(ra_name, LINE_MAX, "%s%s%s:%s", class,
((prov == NULL)? "" : PROVIDER_SEP), ((prov == NULL)? "" : prov),
crm_element_value(rsc->private->xml, PCMK_XA_TYPE));
if (rsc->meta != NULL) {
target_role = g_hash_table_lookup(rsc->meta, PCMK_META_TARGET_ROLE);
}
nodes_running_on = pcmk__itoa(g_list_length(rsc->running_on));
if (rsc->lock_node != NULL) {
locked_to = rsc->lock_node->details->uname;
}
rc = pe__name_and_nvpairs_xml(out, true, PCMK_XE_RESOURCE,
PCMK_XA_ID, rsc_printable_id(rsc),
PCMK_XA_RESOURCE_AGENT, ra_name,
PCMK_XA_ROLE, rsc_state,
PCMK_XA_TARGET_ROLE, target_role,
PCMK_XA_ACTIVE, active,
PCMK_XA_ORPHANED, orphaned,
PCMK_XA_BLOCKED, blocked,
PCMK_XA_MAINTENANCE, maintenance,
PCMK_XA_MANAGED, managed,
PCMK_XA_FAILED, failed,
PCMK_XA_FAILURE_IGNORED, ignored,
PCMK_XA_NODES_RUNNING_ON, nodes_running_on,
PCMK_XA_PENDING, pending,
PCMK_XA_LOCKED_TO, locked_to,
PCMK_XA_DESCRIPTION, desc,
NULL);
free(nodes_running_on);
CRM_ASSERT(rc == pcmk_rc_ok);
if (rsc->running_on != NULL) {
GList *gIter = rsc->running_on;
for (; gIter != NULL; gIter = gIter->next) {
pcmk_node_t *node = (pcmk_node_t *) gIter->data;
const char *cached = pcmk__btoa(node->details->online);
rc = pe__name_and_nvpairs_xml(out, false, PCMK_XE_NODE,
PCMK_XA_NAME, node->details->uname,
PCMK_XA_ID, node->details->id,
PCMK_XA_CACHED, cached,
NULL);
CRM_ASSERT(rc == pcmk_rc_ok);
}
}
pcmk__output_xml_pop_parent(out);
return rc;
}
PCMK__OUTPUT_ARGS("primitive", "uint32_t", "pcmk_resource_t *", "GList *",
"GList *")
int
pe__resource_html(pcmk__output_t *out, va_list args)
{
uint32_t show_opts = va_arg(args, uint32_t);
pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
GList *only_node G_GNUC_UNUSED = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
const pcmk_node_t *node = pcmk__current_node(rsc);
if (rsc->private->fns->is_filtered(rsc, only_rsc, TRUE)) {
return pcmk_rc_no_output;
}
CRM_ASSERT(pcmk__is_primitive(rsc));
if (node == NULL) {
// This is set only if a non-probe action is pending on this node
node = rsc->pending_node;
}
return pe__common_output_html(out, rsc, rsc_printable_id(rsc), node, show_opts);
}
PCMK__OUTPUT_ARGS("primitive", "uint32_t", "pcmk_resource_t *", "GList *",
"GList *")
int
pe__resource_text(pcmk__output_t *out, va_list args)
{
uint32_t show_opts = va_arg(args, uint32_t);
pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
GList *only_node G_GNUC_UNUSED = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
const pcmk_node_t *node = pcmk__current_node(rsc);
CRM_ASSERT(pcmk__is_primitive(rsc));
if (rsc->private->fns->is_filtered(rsc, only_rsc, TRUE)) {
return pcmk_rc_no_output;
}
if (node == NULL) {
// This is set only if a non-probe action is pending on this node
node = rsc->pending_node;
}
return pe__common_output_text(out, rsc, rsc_printable_id(rsc), node, show_opts);
}
void
native_free(pcmk_resource_t * rsc)
{
pcmk__rsc_trace(rsc, "Freeing resource action list (not the data)");
common_free(rsc);
}
enum rsc_role_e
native_resource_state(const pcmk_resource_t * rsc, gboolean current)
{
enum rsc_role_e role = rsc->next_role;
if (current) {
role = rsc->role;
}
pcmk__rsc_trace(rsc, "%s state: %s", rsc->id, pcmk_role_text(role));
return role;
}
/*!
* \internal
* \brief List nodes where a resource (or any of its children) is
*
* \param[in] rsc Resource to check
* \param[out] list List to add result to
* \param[in] current 0 = where allocated, 1 = where running,
* 2 = where running or pending
*
* \return If list contains only one node, that node, or NULL otherwise
*/
pcmk_node_t *
native_location(const pcmk_resource_t *rsc, GList **list, int current)
{
// @COMPAT: Accept a pcmk__rsc_node argument instead of int current
pcmk_node_t *one = NULL;
GList *result = NULL;
if (rsc->children) {
GList *gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
pcmk_resource_t *child = (pcmk_resource_t *) gIter->data;
child->private->fns->location(child, &result, current);
}
} else if (current) {
if (rsc->running_on) {
result = g_list_copy(rsc->running_on);
}
if ((current == 2) && rsc->pending_node
&& !pe_find_node_id(result, rsc->pending_node->details->id)) {
result = g_list_append(result, rsc->pending_node);
}
} else if (current == FALSE && rsc->allocated_to) {
result = g_list_append(NULL, rsc->allocated_to);
}
if (result && (result->next == NULL)) {
one = result->data;
}
if (list) {
GList *gIter = result;
for (; gIter != NULL; gIter = gIter->next) {
pcmk_node_t *node = (pcmk_node_t *) gIter->data;
if (*list == NULL || pe_find_node_id(*list, node->details->id) == NULL) {
*list = g_list_append(*list, node);
}
}
}
g_list_free(result);
return one;
}
static void
get_rscs_brief(GList *rsc_list, GHashTable * rsc_table, GHashTable * active_table)
{
GList *gIter = rsc_list;
for (; gIter != NULL; gIter = gIter->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) gIter->data;
const char *class = crm_element_value(rsc->private->xml, PCMK_XA_CLASS);
const char *kind = crm_element_value(rsc->private->xml, PCMK_XA_TYPE);
int offset = 0;
char buffer[LINE_MAX];
int *rsc_counter = NULL;
int *active_counter = NULL;
if (!pcmk__is_primitive(rsc)) {
continue;
}
offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", class);
if (pcmk_is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider)) {
const char *prov = crm_element_value(rsc->private->xml,
PCMK_XA_PROVIDER);
if (prov != NULL) {
offset += snprintf(buffer + offset, LINE_MAX - offset,
PROVIDER_SEP "%s", prov);
}
}
offset += snprintf(buffer + offset, LINE_MAX - offset, ":%s", kind);
CRM_LOG_ASSERT(offset > 0);
if (rsc_table) {
rsc_counter = g_hash_table_lookup(rsc_table, buffer);
if (rsc_counter == NULL) {
rsc_counter = pcmk__assert_alloc(1, sizeof(int));
*rsc_counter = 0;
g_hash_table_insert(rsc_table, strdup(buffer), rsc_counter);
}
(*rsc_counter)++;
}
if (active_table) {
GList *gIter2 = rsc->running_on;
for (; gIter2 != NULL; gIter2 = gIter2->next) {
pcmk_node_t *node = (pcmk_node_t *) gIter2->data;
GHashTable *node_table = NULL;
if (node->details->unclean == FALSE && node->details->online == FALSE &&
pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
continue;
}
node_table = g_hash_table_lookup(active_table, node->details->uname);
if (node_table == NULL) {
node_table = pcmk__strkey_table(free, free);
g_hash_table_insert(active_table, strdup(node->details->uname), node_table);
}
active_counter = g_hash_table_lookup(node_table, buffer);
if (active_counter == NULL) {
active_counter = pcmk__assert_alloc(1, sizeof(int));
*active_counter = 0;
g_hash_table_insert(node_table, strdup(buffer), active_counter);
}
(*active_counter)++;
}
}
}
}
static void
destroy_node_table(gpointer data)
{
GHashTable *node_table = data;
if (node_table) {
g_hash_table_destroy(node_table);
}
}
int
pe__rscs_brief_output(pcmk__output_t *out, GList *rsc_list, uint32_t show_opts)
{
GHashTable *rsc_table = pcmk__strkey_table(free, free);
GHashTable *active_table = pcmk__strkey_table(free, destroy_node_table);
GList *sorted_rscs;
int rc = pcmk_rc_no_output;
get_rscs_brief(rsc_list, rsc_table, active_table);
/* Make a list of the rsc_table keys so that it can be sorted. This is to make sure
* output order stays consistent between systems.
*/
sorted_rscs = g_hash_table_get_keys(rsc_table);
sorted_rscs = g_list_sort(sorted_rscs, (GCompareFunc) strcmp);
for (GList *gIter = sorted_rscs; gIter; gIter = gIter->next) {
char *type = (char *) gIter->data;
int *rsc_counter = g_hash_table_lookup(rsc_table, type);
GList *sorted_nodes = NULL;
int active_counter_all = 0;
/* Also make a list of the active_table keys so it can be sorted. If there's
* more than one instance of a type of resource running, we need the nodes to
* be sorted to make sure output order stays consistent between systems.
*/
sorted_nodes = g_hash_table_get_keys(active_table);
sorted_nodes = g_list_sort(sorted_nodes, (GCompareFunc) pcmk__numeric_strcasecmp);
for (GList *gIter2 = sorted_nodes; gIter2; gIter2 = gIter2->next) {
char *node_name = (char *) gIter2->data;
GHashTable *node_table = g_hash_table_lookup(active_table, node_name);
int *active_counter = NULL;
if (node_table == NULL) {
continue;
}
active_counter = g_hash_table_lookup(node_table, type);
if (active_counter == NULL || *active_counter == 0) {
continue;
} else {
active_counter_all += *active_counter;
}
if (pcmk_is_set(show_opts, pcmk_show_rsc_only)) {
node_name = NULL;
}
if (pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) {
out->list_item(out, NULL, "%d/%d\t(%s):\tActive %s",
*active_counter,
rsc_counter ? *rsc_counter : 0, type,
(*active_counter > 0) && node_name ? node_name : "");
} else {
out->list_item(out, NULL, "%d\t(%s):\tActive %s",
*active_counter, type,
(*active_counter > 0) && node_name ? node_name : "");
}
rc = pcmk_rc_ok;
}
if (pcmk_is_set(show_opts, pcmk_show_inactive_rscs) && active_counter_all == 0) {
out->list_item(out, NULL, "%d/%d\t(%s):\tActive",
active_counter_all,
rsc_counter ? *rsc_counter : 0, type);
rc = pcmk_rc_ok;
}
if (sorted_nodes) {
g_list_free(sorted_nodes);
}
}
if (rsc_table) {
g_hash_table_destroy(rsc_table);
rsc_table = NULL;
}
if (active_table) {
g_hash_table_destroy(active_table);
active_table = NULL;
}
if (sorted_rscs) {
g_list_free(sorted_rscs);
}
return rc;
}
gboolean
pe__native_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc,
gboolean check_parent)
{
if (pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches) ||
pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches)) {
return FALSE;
} else if (check_parent && (rsc->private->parent != NULL)) {
const pcmk_resource_t *up = pe__const_top_resource(rsc, true);
return up->private->fns->is_filtered(up, only_rsc, FALSE);
}
return TRUE;
}
/*!
* \internal
* \brief Get maximum primitive resource instances per node
*
* \param[in] rsc Primitive resource to check
*
* \return Maximum number of \p rsc instances that can be active on one node
*/
unsigned int
pe__primitive_max_per_node(const pcmk_resource_t *rsc)
{
CRM_ASSERT(pcmk__is_primitive(rsc));
return 1U;
}

File Metadata

Mime Type
text/x-diff
Expires
Sat, Jan 25, 6:59 AM (1 d, 15 h)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
1321594
Default Alt Text
(116 KB)

Event Timeline