Page MenuHomeClusterLabs Projects

No OneTemporary

diff --git a/include/crm/pengine/status.h b/include/crm/pengine/status.h
index 17787ddbf8..3786ca9526 100644
--- a/include/crm/pengine/status.h
+++ b/include/crm/pengine/status.h
@@ -1,513 +1,517 @@
/*
* Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef PENGINE_STATUS__H
# define PENGINE_STATUS__H
# include <glib.h>
# include <stdbool.h>
# include <crm/common/iso8601.h>
# include <crm/pengine/common.h>
typedef struct node_s pe_node_t;
typedef struct node_s node_t;
typedef struct pe_action_s action_t;
typedef struct pe_action_s pe_action_t;
typedef struct resource_s resource_t;
typedef struct ticket_s ticket_t;
typedef enum no_quorum_policy_e {
no_quorum_freeze,
no_quorum_stop,
no_quorum_ignore,
no_quorum_suicide
} no_quorum_policy_t;
enum node_type {
node_ping,
node_member,
node_remote
};
enum pe_restart {
pe_restart_restart,
pe_restart_ignore
};
enum pe_find {
pe_find_renamed = 0x001,
pe_find_anon = 0x002,
pe_find_clone = 0x004,
pe_find_current = 0x008,
pe_find_inactive = 0x010,
};
# define pe_flag_have_quorum 0x00000001ULL
# define pe_flag_symmetric_cluster 0x00000002ULL
# define pe_flag_is_managed_default 0x00000004ULL
# define pe_flag_maintenance_mode 0x00000008ULL
# define pe_flag_stonith_enabled 0x00000010ULL
# define pe_flag_have_stonith_resource 0x00000020ULL
# define pe_flag_enable_unfencing 0x00000040ULL
# define pe_flag_concurrent_fencing 0x00000080ULL
# define pe_flag_stop_rsc_orphans 0x00000100ULL
# define pe_flag_stop_action_orphans 0x00000200ULL
# define pe_flag_stop_everything 0x00000400ULL
# define pe_flag_start_failure_fatal 0x00001000ULL
# define pe_flag_remove_after_stop 0x00002000ULL
# define pe_flag_startup_fencing 0x00004000ULL
# define pe_flag_startup_probes 0x00010000ULL
# define pe_flag_have_status 0x00020000ULL
# define pe_flag_have_remote_nodes 0x00040000ULL
# define pe_flag_quick_location 0x00100000ULL
# define pe_flag_sanitized 0x00200000ULL
typedef struct pe_working_set_s {
xmlNode *input;
crm_time_t *now;
/* options extracted from the input */
char *dc_uuid;
node_t *dc_node;
const char *stonith_action;
const char *placement_strategy;
unsigned long long flags;
int stonith_timeout;
int default_resource_stickiness;
no_quorum_policy_t no_quorum_policy;
GHashTable *config_hash;
GHashTable *tickets;
// Actions for which there can be only one (e.g. fence nodeX)
GHashTable *singletons;
GListPtr nodes;
GListPtr resources;
GListPtr placement_constraints;
GListPtr ordering_constraints;
GListPtr colocation_constraints;
GListPtr ticket_constraints;
GListPtr actions;
xmlNode *failed;
xmlNode *op_defaults;
xmlNode *rsc_defaults;
/* stats */
int num_synapse;
int max_valid_nodes;
int order_id;
int action_id;
/* final output */
xmlNode *graph;
GHashTable *template_rsc_sets;
const char *localhost;
GHashTable *tags;
int blocked_resources;
int disabled_resources;
} pe_working_set_t;
struct node_shared_s {
const char *id;
const char *uname;
/* @TODO convert these flags (and the ones at the end) into a bitfield */
gboolean online;
gboolean standby;
gboolean standby_onfail;
gboolean pending;
gboolean unclean;
gboolean unseen;
gboolean shutdown;
gboolean expected_up;
gboolean is_dc;
int num_resources;
GListPtr running_rsc; /* resource_t* */
GListPtr allocated_rsc; /* resource_t* */
resource_t *remote_rsc;
GHashTable *attrs; /* char* => char* */
enum node_type type;
GHashTable *utilization;
/*! cache of calculated rsc digests for this node. */
GHashTable *digest_cache;
gboolean maintenance;
gboolean rsc_discovery_enabled;
gboolean remote_requires_reset;
gboolean remote_was_fenced;
gboolean remote_maintenance; /* what the remote-rsc is thinking */
gboolean unpacked;
};
struct node_s {
int weight;
gboolean fixed;
int count;
struct node_shared_s *details;
int rsc_discover_mode;
};
# include <crm/pengine/complex.h>
# define pe_rsc_orphan 0x00000001ULL
# define pe_rsc_managed 0x00000002ULL
# define pe_rsc_block 0x00000004ULL
# define pe_rsc_orphan_container_filler 0x00000008ULL
# define pe_rsc_notify 0x00000010ULL
# define pe_rsc_unique 0x00000020ULL
# define pe_rsc_fence_device 0x00000040ULL
# define pe_rsc_provisional 0x00000100ULL
# define pe_rsc_allocating 0x00000200ULL
# define pe_rsc_merging 0x00000400ULL
# define pe_rsc_munging 0x00000800ULL
# define pe_rsc_try_reload 0x00001000ULL
# define pe_rsc_reload 0x00002000ULL
# define pe_rsc_allow_remote_remotes 0x00004000ULL
# define pe_rsc_failed 0x00010000ULL
# define pe_rsc_shutdown 0x00020000ULL
# define pe_rsc_runnable 0x00040000ULL
# define pe_rsc_start_pending 0x00080000ULL
# define pe_rsc_starting 0x00100000ULL
# define pe_rsc_stopping 0x00200000ULL
# define pe_rsc_migrating 0x00400000ULL
# define pe_rsc_allow_migrate 0x00800000ULL
# define pe_rsc_failure_ignored 0x01000000ULL
# define pe_rsc_unexpectedly_running 0x02000000ULL
# define pe_rsc_maintenance 0x04000000ULL
# define pe_rsc_needs_quorum 0x10000000ULL
# define pe_rsc_needs_fencing 0x20000000ULL
# define pe_rsc_needs_unfencing 0x40000000ULL
# define pe_rsc_have_unfencing 0x80000000ULL // obsolete (not set or used by cluster)
enum pe_graph_flags {
pe_graph_none = 0x00000,
pe_graph_updated_first = 0x00001,
pe_graph_updated_then = 0x00002,
pe_graph_disable = 0x00004,
};
/* *INDENT-OFF* */
enum pe_action_flags {
pe_action_pseudo = 0x00001,
pe_action_runnable = 0x00002,
pe_action_optional = 0x00004,
pe_action_print_always = 0x00008,
pe_action_have_node_attrs = 0x00010,
pe_action_failure_is_fatal = 0x00020, /* no longer used, here for API compatibility */
pe_action_implied_by_stonith = 0x00040,
pe_action_migrate_runnable = 0x00080,
pe_action_dumped = 0x00100,
pe_action_processed = 0x00200,
pe_action_clear = 0x00400,
pe_action_dangle = 0x00800,
/* This action requires one or more of its dependencies to be runnable.
* We use this to clear the runnable flag before checking dependencies.
*/
pe_action_requires_any = 0x01000,
pe_action_reschedule = 0x02000,
pe_action_tracking = 0x04000,
};
/* *INDENT-ON* */
struct resource_s {
char *id;
char *clone_name;
xmlNode *xml;
xmlNode *orig_xml;
xmlNode *ops_xml;
resource_t *parent;
void *variant_opaque;
enum pe_obj_types variant;
resource_object_functions_t *fns;
resource_alloc_functions_t *cmds;
enum rsc_recovery_type recovery_type;
enum pe_restart restart_type;
int priority;
int stickiness;
int sort_index;
int failure_timeout;
int effective_priority;
int migration_threshold;
gboolean is_remote_node;
unsigned long long flags;
GListPtr rsc_cons_lhs; /* rsc_colocation_t* */
GListPtr rsc_cons; /* rsc_colocation_t* */
GListPtr rsc_location; /* rsc_to_node_t* */
GListPtr actions; /* action_t* */
GListPtr rsc_tickets; /* rsc_ticket* */
node_t *allocated_to;
GListPtr running_on; /* node_t* */
GHashTable *known_on; /* node_t* */
GHashTable *allowed_nodes; /* node_t* */
enum rsc_role_e role;
enum rsc_role_e next_role;
GHashTable *meta;
GHashTable *parameters;
GHashTable *utilization;
GListPtr children; /* resource_t* */
GListPtr dangling_migrations; /* node_t* */
node_t *partial_migration_target;
node_t *partial_migration_source;
resource_t *container;
GListPtr fillers;
char *pending_task;
const char *isolation_wrapper;
gboolean exclusive_discover;
int remote_reconnect_interval;
pe_working_set_t *cluster;
#if ENABLE_VERSIONED_ATTRS
xmlNode *versioned_parameters;
#endif
};
#if ENABLE_VERSIONED_ATTRS
// Used as action->action_details if action->rsc is not NULL
typedef struct pe_rsc_action_details_s {
xmlNode *versioned_parameters;
xmlNode *versioned_meta;
} pe_rsc_action_details_t;
#endif
struct pe_action_s {
int id;
int priority;
resource_t *rsc;
node_t *node;
xmlNode *op_entry;
char *task;
char *uuid;
char *cancel_task;
enum pe_action_flags flags;
enum rsc_start_requirement needs;
enum action_fail_response on_fail;
enum rsc_role_e fail_role;
action_t *pre_notify;
action_t *pre_notified;
action_t *post_notify;
action_t *post_notified;
int seen_count;
GHashTable *meta;
GHashTable *extra;
/*
* These two varables are associated with the constraint logic
* that involves first having one or more actions runnable before
* then allowing this action to execute.
*
* These varables are used with features such as 'clone-min' which
* requires at minimum X number of cloned instances to be running
* before an order dependency can run. Another option that uses
* this is 'require-all=false' in ordering constrants. This option
* says "only require one instance of a resource to start before
* allowing dependencies to start" -- basically, require-all=false is
* the same as clone-min=1.
*/
/* current number of known runnable actions in the before list. */
int runnable_before;
/* the number of "before" runnable actions required for this action
* to be considered runnable */
int required_runnable_before;
GListPtr actions_before; /* action_wrapper_t* */
GListPtr actions_after; /* action_wrapper_t* */
/* Some of the above fields could be moved to the details,
* except for API backward compatibility.
*/
void *action_details; // varies by type of action
char *reason;
};
struct ticket_s {
char *id;
gboolean granted;
time_t last_granted;
gboolean standby;
GHashTable *state;
};
typedef struct tag_s {
char *id;
GListPtr refs;
} tag_t;
enum pe_link_state {
pe_link_not_dumped,
pe_link_dumped,
pe_link_dup,
};
enum pe_discover_e {
pe_discover_always = 0,
pe_discover_never,
pe_discover_exclusive,
};
/* *INDENT-OFF* */
enum pe_ordering {
pe_order_none = 0x0, /* deleted */
pe_order_optional = 0x1, /* pure ordering, nothing implied */
pe_order_apply_first_non_migratable = 0x2, /* Only apply this constraint's ordering if first is not migratable. */
pe_order_implies_first = 0x10, /* If 'then' is required, ensure 'first' is too */
pe_order_implies_then = 0x20, /* If 'first' is required, ensure 'then' is too */
pe_order_implies_first_master = 0x40, /* Imply 'first' is required when 'then' is required and then's rsc holds Master role. */
/* first requires then to be both runnable and migrate runnable. */
pe_order_implies_first_migratable = 0x80,
pe_order_runnable_left = 0x100, /* 'then' requires 'first' to be runnable */
pe_order_pseudo_left = 0x200, /* 'then' can only be pseudo if 'first' is runnable */
pe_order_implies_then_on_node = 0x400, /* If 'first' is required on 'nodeX',
* ensure instances of 'then' on 'nodeX' are too.
* Only really useful if 'then' is a clone and 'first' is not
*/
+ pe_order_probe = 0x800, /* If 'first->rsc' is
+ * - running but about to stop, ignore the constraint
+ * - otherwise, behave as runnable_left
+ */
pe_order_restart = 0x1000, /* 'then' is runnable if 'first' is optional or runnable */
pe_order_stonith_stop = 0x2000, /* only applies if the action is non-pseudo */
pe_order_serialize_only = 0x4000, /* serialize */
pe_order_same_node = 0x8000, /* applies only if 'first' and 'then' are on same node */
pe_order_implies_first_printed = 0x10000, /* Like ..implies_first but only ensures 'first' is printed, not mandatory */
pe_order_implies_then_printed = 0x20000, /* Like ..implies_then but only ensures 'then' is printed, not mandatory */
pe_order_asymmetrical = 0x100000, /* Indicates asymmetrical one way ordering constraint. */
pe_order_load = 0x200000, /* Only relevant if... */
pe_order_one_or_more = 0x400000, /* 'then' is runnable only if one or more of its dependencies are too */
pe_order_anti_colocation = 0x800000,
pe_order_preserve = 0x1000000, /* Hack for breaking user ordering constraints with container resources */
pe_order_trace = 0x4000000, /* test marker */
};
/* *INDENT-ON* */
typedef struct action_wrapper_s action_wrapper_t;
struct action_wrapper_s {
enum pe_ordering type;
enum pe_link_state state;
action_t *action;
};
const char *rsc_printable_id(resource_t *rsc);
gboolean cluster_status(pe_working_set_t * data_set);
void set_working_set_defaults(pe_working_set_t * data_set);
void cleanup_calculations(pe_working_set_t * data_set);
resource_t *pe_find_resource(GListPtr rsc_list, const char *id_rh);
resource_t *pe_find_resource_with_flags(GListPtr rsc_list, const char *id, enum pe_find flags);
node_t *pe_find_node(GListPtr node_list, const char *uname);
node_t *pe_find_node_id(GListPtr node_list, const char *id);
node_t *pe_find_node_any(GListPtr node_list, const char *id, const char *uname);
GListPtr find_operations(const char *rsc, const char *node, gboolean active_filter,
pe_working_set_t * data_set);
int pe_bundle_replicas(const resource_t *rsc);
#if ENABLE_VERSIONED_ATTRS
pe_rsc_action_details_t *pe_rsc_action_details(pe_action_t *action);
#endif
/*!
* \brief Check whether a resource is any clone type
*
* \param[in] rsc Resource to check
*
* \return TRUE if resource is clone, FALSE otherwise
*/
static inline bool
pe_rsc_is_clone(resource_t *rsc)
{
return rsc && ((rsc->variant == pe_clone) || (rsc->variant == pe_master));
}
/*!
* \brief Check whether a resource is a globally unique clone
*
* \param[in] rsc Resource to check
*
* \return TRUE if resource is unique clone, FALSE otherwise
*/
static inline bool
pe_rsc_is_unique_clone(resource_t *rsc)
{
return pe_rsc_is_clone(rsc) && is_set(rsc->flags, pe_rsc_unique);
}
/*!
* \brief Check whether a resource is an anonymous clone
*
* \param[in] rsc Resource to check
*
* \return TRUE if resource is anonymous clone, FALSE otherwise
*/
static inline bool
pe_rsc_is_anon_clone(resource_t *rsc)
{
return pe_rsc_is_clone(rsc) && is_not_set(rsc->flags, pe_rsc_unique);
}
#endif
diff --git a/pengine/container.c b/pengine/container.c
index df5a238921..a884516df3 100644
--- a/pengine/container.c
+++ b/pengine/container.c
@@ -1,944 +1,965 @@
/*
* Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <crm_internal.h>
#include <crm/msg_xml.h>
#include <allocate.h>
#include <notif.h>
#include <utils.h>
#define VARIANT_CONTAINER 1
#include <lib/pengine/variant.h>
static bool
is_child_container_node(container_variant_data_t *data, pe_node_t *node)
{
for (GListPtr gIter = data->tuples; gIter != NULL; gIter = gIter->next) {
container_grouping_t *tuple = (container_grouping_t *)gIter->data;
if(node->details == tuple->node->details) {
return TRUE;
}
}
return FALSE;
}
gint sort_clone_instance(gconstpointer a, gconstpointer b, gpointer data_set);
void distribute_children(resource_t *rsc, GListPtr children, GListPtr nodes,
int max, int per_host_max, pe_working_set_t * data_set);
static GListPtr get_container_list(resource_t *rsc)
{
GListPtr containers = NULL;
container_variant_data_t *data = NULL;
if(rsc->variant == pe_container) {
get_container_variant_data(data, rsc);
for (GListPtr gIter = data->tuples; gIter != NULL; gIter = gIter->next) {
container_grouping_t *tuple = (container_grouping_t *)gIter->data;
containers = g_list_append(containers, tuple->docker);
}
}
return containers;
}
static GListPtr get_containers_or_children(resource_t *rsc)
{
GListPtr containers = NULL;
container_variant_data_t *data = NULL;
if(rsc->variant == pe_container) {
get_container_variant_data(data, rsc);
for (GListPtr gIter = data->tuples; gIter != NULL; gIter = gIter->next) {
container_grouping_t *tuple = (container_grouping_t *)gIter->data;
containers = g_list_append(containers, tuple->docker);
}
return containers;
} else {
return rsc->children;
}
}
static bool
migration_threshold_reached(resource_t *rsc, node_t *node,
pe_working_set_t *data_set)
{
int fail_count, countdown;
/* Migration threshold of 0 means never force away */
if (rsc->migration_threshold == 0) {
return FALSE;
}
/* If there are no failures, there's no need to force away */
fail_count = get_failcount_all(node, rsc, NULL, data_set);
if (fail_count <= 0) {
return FALSE;
}
/* How many more times recovery will be tried on this node */
countdown = QB_MAX(rsc->migration_threshold - fail_count, 0);
if (countdown == 0) {
crm_warn("Forcing %s away from %s after %d failures (max=%d)",
rsc->id, node->details->uname, fail_count,
rsc->migration_threshold);
return TRUE;
}
crm_info("%s can fail %d more times on %s before being forced off",
rsc->id, countdown, node->details->uname);
return FALSE;
}
node_t *
container_color(resource_t * rsc, node_t * prefer, pe_working_set_t * data_set)
{
GListPtr containers = NULL;
GListPtr nodes = NULL;
container_variant_data_t *container_data = NULL;
CRM_CHECK(rsc != NULL, return NULL);
get_container_variant_data(container_data, rsc);
set_bit(rsc->flags, pe_rsc_allocating);
containers = get_container_list(rsc);
dump_node_scores(show_scores ? 0 : scores_log_level, rsc, __FUNCTION__, rsc->allowed_nodes);
nodes = g_hash_table_get_values(rsc->allowed_nodes);
nodes = g_list_sort_with_data(nodes, sort_node_weight, NULL);
containers = g_list_sort_with_data(containers, sort_clone_instance, data_set);
distribute_children(rsc, containers, nodes,
container_data->replicas, container_data->replicas_per_host, data_set);
g_list_free(nodes);
g_list_free(containers);
for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) {
container_grouping_t *tuple = (container_grouping_t *)gIter->data;
pe_node_t *docker_host = tuple->docker->allocated_to;
CRM_ASSERT(tuple);
if(tuple->ip) {
tuple->ip->cmds->allocate(tuple->ip, prefer, data_set);
}
if(tuple->remote && is_remote_node(docker_host)) {
/* We need 'nested' connection resources to be on the same
* host because pacemaker-remoted only supports a single
* active connection
*/
rsc_colocation_new("child-remote-with-docker-remote", NULL,
INFINITY, tuple->remote, docker_host->details->remote_rsc, NULL, NULL, data_set);
}
if(tuple->remote) {
tuple->remote->cmds->allocate(tuple->remote, prefer, data_set);
}
// Explicitly allocate tuple->child before the container->child
if(tuple->child) {
pe_node_t *node = NULL;
GHashTableIter iter;
g_hash_table_iter_init(&iter, tuple->child->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & node)) {
if(node->details != tuple->node->details) {
node->weight = -INFINITY;
} else if(migration_threshold_reached(tuple->child, node, data_set) == FALSE) {
node->weight = INFINITY;
}
}
set_bit(tuple->child->parent->flags, pe_rsc_allocating);
tuple->child->cmds->allocate(tuple->child, tuple->node, data_set);
clear_bit(tuple->child->parent->flags, pe_rsc_allocating);
}
}
if(container_data->child) {
pe_node_t *node = NULL;
GHashTableIter iter;
g_hash_table_iter_init(&iter, container_data->child->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & node)) {
if(is_child_container_node(container_data, node)) {
node->weight = 0;
} else {
node->weight = -INFINITY;
}
}
container_data->child->cmds->allocate(container_data->child, prefer, data_set);
}
clear_bit(rsc->flags, pe_rsc_allocating);
clear_bit(rsc->flags, pe_rsc_provisional);
return NULL;
}
void
container_create_actions(resource_t * rsc, pe_working_set_t * data_set)
{
pe_action_t *action = NULL;
GListPtr containers = NULL;
container_variant_data_t *container_data = NULL;
CRM_CHECK(rsc != NULL, return);
containers = get_container_list(rsc);
get_container_variant_data(container_data, rsc);
for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) {
container_grouping_t *tuple = (container_grouping_t *)gIter->data;
CRM_ASSERT(tuple);
if(tuple->ip) {
tuple->ip->cmds->create_actions(tuple->ip, data_set);
}
if(tuple->docker) {
tuple->docker->cmds->create_actions(tuple->docker, data_set);
}
if(tuple->remote) {
tuple->remote->cmds->create_actions(tuple->remote, data_set);
}
}
clone_create_pseudo_actions(rsc, containers, NULL, NULL, data_set);
if(container_data->child) {
container_data->child->cmds->create_actions(container_data->child, data_set);
if(container_data->child->variant == pe_master) {
/* promote */
action = create_pseudo_resource_op(rsc, RSC_PROMOTE, TRUE, TRUE, data_set);
action = create_pseudo_resource_op(rsc, RSC_PROMOTED, TRUE, TRUE, data_set);
action->priority = INFINITY;
/* demote */
action = create_pseudo_resource_op(rsc, RSC_DEMOTE, TRUE, TRUE, data_set);
action = create_pseudo_resource_op(rsc, RSC_DEMOTED, TRUE, TRUE, data_set);
action->priority = INFINITY;
}
}
g_list_free(containers);
}
void
container_internal_constraints(resource_t * rsc, pe_working_set_t * data_set)
{
container_variant_data_t *container_data = NULL;
CRM_CHECK(rsc != NULL, return);
get_container_variant_data(container_data, rsc);
if(container_data->child) {
new_rsc_order(rsc, RSC_START, container_data->child, RSC_START, pe_order_implies_first_printed, data_set);
new_rsc_order(rsc, RSC_STOP, container_data->child, RSC_STOP, pe_order_implies_first_printed, data_set);
if(container_data->child->children) {
new_rsc_order(container_data->child, RSC_STARTED, rsc, RSC_STARTED, pe_order_implies_then_printed, data_set);
new_rsc_order(container_data->child, RSC_STOPPED, rsc, RSC_STOPPED, pe_order_implies_then_printed, data_set);
} else {
new_rsc_order(container_data->child, RSC_START, rsc, RSC_STARTED, pe_order_implies_then_printed, data_set);
new_rsc_order(container_data->child, RSC_STOP, rsc, RSC_STOPPED, pe_order_implies_then_printed, data_set);
}
}
for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) {
container_grouping_t *tuple = (container_grouping_t *)gIter->data;
CRM_ASSERT(tuple);
CRM_ASSERT(tuple->docker);
tuple->docker->cmds->internal_constraints(tuple->docker, data_set);
order_start_start(rsc, tuple->docker, pe_order_runnable_left | pe_order_implies_first_printed);
if(tuple->child) {
order_stop_stop(rsc, tuple->child, pe_order_implies_first_printed);
}
order_stop_stop(rsc, tuple->docker, pe_order_implies_first_printed);
new_rsc_order(tuple->docker, RSC_START, rsc, RSC_STARTED, pe_order_implies_then_printed, data_set);
new_rsc_order(tuple->docker, RSC_STOP, rsc, RSC_STOPPED, pe_order_implies_then_printed, data_set);
if(tuple->ip) {
tuple->ip->cmds->internal_constraints(tuple->ip, data_set);
// Start ip then docker
new_rsc_order(tuple->ip, RSC_START, tuple->docker, RSC_START,
pe_order_runnable_left|pe_order_preserve, data_set);
new_rsc_order(tuple->docker, RSC_STOP, tuple->ip, RSC_STOP,
pe_order_implies_first|pe_order_preserve, data_set);
rsc_colocation_new("ip-with-docker", NULL, INFINITY, tuple->ip, tuple->docker, NULL, NULL, data_set);
}
if(tuple->remote) {
/* This handles ordering and colocating remote relative to docker
* (via "resource-with-container"). Since IP is also ordered and
* colocated relative to docker, we don't need to do anything
* explicit here with IP.
*/
tuple->remote->cmds->internal_constraints(tuple->remote, data_set);
}
if(tuple->child) {
CRM_ASSERT(tuple->remote);
// Start of the remote then child is implicit in the PE's remote logic
}
}
if(container_data->child) {
container_data->child->cmds->internal_constraints(container_data->child, data_set);
if(container_data->child->variant == pe_master) {
master_promotion_constraints(rsc, data_set);
/* child demoted before global demoted */
new_rsc_order(container_data->child, RSC_DEMOTED, rsc, RSC_DEMOTED, pe_order_implies_then_printed, data_set);
/* global demote before child demote */
new_rsc_order(rsc, RSC_DEMOTE, container_data->child, RSC_DEMOTE, pe_order_implies_first_printed, data_set);
/* child promoted before global promoted */
new_rsc_order(container_data->child, RSC_PROMOTED, rsc, RSC_PROMOTED, pe_order_implies_then_printed, data_set);
/* global promote before child promote */
new_rsc_order(rsc, RSC_PROMOTE, container_data->child, RSC_PROMOTE, pe_order_implies_first_printed, data_set);
}
} else {
// int type = pe_order_optional | pe_order_implies_then | pe_order_restart;
// custom_action_order(rsc, generate_op_key(rsc->id, RSC_STOP, 0), NULL,
// rsc, generate_op_key(rsc->id, RSC_START, 0), NULL, pe_order_optional, data_set);
}
}
static resource_t *
find_compatible_tuple_by_node(resource_t * rsc_lh, node_t * candidate, resource_t * rsc,
enum rsc_role_e filter, gboolean current)
{
container_variant_data_t *container_data = NULL;
CRM_CHECK(candidate != NULL, return NULL);
get_container_variant_data(container_data, rsc);
crm_trace("Looking for compatible child from %s for %s on %s",
rsc_lh->id, rsc->id, candidate->details->uname);
for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) {
container_grouping_t *tuple = (container_grouping_t *)gIter->data;
if(is_child_compatible(tuple->docker, candidate, filter, current)) {
crm_trace("Pairing %s with %s on %s",
rsc_lh->id, tuple->docker->id, candidate->details->uname);
return tuple->docker;
}
}
crm_trace("Can't pair %s with %s", rsc_lh->id, rsc->id);
return NULL;
}
static resource_t *
find_compatible_tuple(resource_t *rsc_lh, resource_t * rsc, enum rsc_role_e filter,
gboolean current)
{
GListPtr scratch = NULL;
resource_t *pair = NULL;
node_t *active_node_lh = NULL;
active_node_lh = rsc_lh->fns->location(rsc_lh, NULL, current);
if (active_node_lh) {
return find_compatible_tuple_by_node(rsc_lh, active_node_lh, rsc, filter, current);
}
scratch = g_hash_table_get_values(rsc_lh->allowed_nodes);
scratch = g_list_sort_with_data(scratch, sort_node_weight, NULL);
for (GListPtr gIter = scratch; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
pair = find_compatible_tuple_by_node(rsc_lh, node, rsc, filter, current);
if (pair) {
goto done;
}
}
pe_rsc_debug(rsc, "Can't pair %s with %s", rsc_lh->id, rsc->id);
done:
g_list_free(scratch);
return pair;
}
void
container_rsc_colocation_lh(resource_t * rsc, resource_t * rsc_rh, rsc_colocation_t * constraint)
{
/* -- Never called --
*
* Instead we add the colocation constraints to the child and call from there
*/
CRM_ASSERT(FALSE);
}
int copies_per_node(resource_t * rsc)
{
/* Strictly speaking, there should be a 'copies_per_node' addition
* to the resource function table and each case would be a
* function. However that would be serious overkill to return an
* int. In fact, it seems to me that both function tables
* could/should be replaced by resources.{c,h} full of
* rsc_{some_operation} functions containing a switch as below
* which calls out to functions named {variant}_{some_operation}
* as needed.
*/
switch(rsc->variant) {
case pe_unknown:
return 0;
case pe_native:
case pe_group:
return 1;
case pe_clone:
case pe_master:
{
const char *max_clones_node = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION_NODEMAX);
return crm_parse_int(max_clones_node, "1");
}
case pe_container:
{
container_variant_data_t *data = NULL;
get_container_variant_data(data, rsc);
return data->replicas_per_host;
}
}
return 0;
}
void
container_rsc_colocation_rh(resource_t * rsc_lh, resource_t * rsc, rsc_colocation_t * constraint)
{
GListPtr allocated_rhs = NULL;
container_variant_data_t *container_data = NULL;
CRM_CHECK(constraint != NULL, return);
CRM_CHECK(rsc_lh != NULL, pe_err("rsc_lh was NULL for %s", constraint->id); return);
CRM_CHECK(rsc != NULL, pe_err("rsc was NULL for %s", constraint->id); return);
CRM_ASSERT(rsc_lh->variant == pe_native);
if (is_set(rsc->flags, pe_rsc_provisional)) {
pe_rsc_trace(rsc, "%s is still provisional", rsc->id);
return;
} else if(constraint->rsc_lh->variant > pe_group) {
resource_t *rh_child = find_compatible_tuple(rsc_lh, rsc, RSC_ROLE_UNKNOWN, FALSE);
if (rh_child) {
pe_rsc_debug(rsc, "Pairing %s with %s", rsc_lh->id, rh_child->id);
rsc_lh->cmds->rsc_colocation_lh(rsc_lh, rh_child, constraint);
} else if (constraint->score >= INFINITY) {
crm_notice("Cannot pair %s with instance of %s", rsc_lh->id, rsc->id);
assign_node(rsc_lh, NULL, TRUE);
} else {
pe_rsc_debug(rsc, "Cannot pair %s with instance of %s", rsc_lh->id, rsc->id);
}
return;
}
get_container_variant_data(container_data, rsc);
pe_rsc_trace(rsc, "Processing constraint %s: %s -> %s %d",
constraint->id, rsc_lh->id, rsc->id, constraint->score);
for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) {
container_grouping_t *tuple = (container_grouping_t *)gIter->data;
if (constraint->score < INFINITY) {
tuple->docker->cmds->rsc_colocation_rh(rsc_lh, tuple->docker, constraint);
} else {
node_t *chosen = tuple->docker->fns->location(tuple->docker, NULL, FALSE);
if (chosen != NULL && is_set_recursive(tuple->docker, pe_rsc_block, TRUE) == FALSE) {
pe_rsc_trace(rsc, "Allowing %s: %s %d", constraint->id, chosen->details->uname, chosen->weight);
allocated_rhs = g_list_prepend(allocated_rhs, chosen);
}
}
}
if (constraint->score >= INFINITY) {
node_list_exclude(rsc_lh->allowed_nodes, allocated_rhs, FALSE);
}
g_list_free(allocated_rhs);
}
enum pe_action_flags
container_action_flags(action_t * action, node_t * node)
{
GListPtr containers = NULL;
enum pe_action_flags flags = 0;
container_variant_data_t *data = NULL;
get_container_variant_data(data, action->rsc);
if(data->child) {
enum action_tasks task = get_complex_task(data->child, action->task, TRUE);
switch(task) {
case no_action:
case action_notify:
case action_notified:
case action_promote:
case action_promoted:
case action_demote:
case action_demoted:
return summary_action_flags(action, data->child->children, node);
default:
break;
}
}
containers = get_container_list(action->rsc);
flags = summary_action_flags(action, containers, node);
g_list_free(containers);
return flags;
}
resource_t *
find_compatible_child_by_node(resource_t * local_child, node_t * local_node, resource_t * rsc,
enum rsc_role_e filter, gboolean current)
{
GListPtr gIter = NULL;
GListPtr children = NULL;
if (local_node == NULL) {
crm_err("Can't colocate unrunnable child %s with %s", local_child->id, rsc->id);
return NULL;
}
crm_trace("Looking for compatible child from %s for %s on %s",
local_child->id, rsc->id, local_node->details->uname);
children = get_containers_or_children(rsc);
for (gIter = children; gIter != NULL; gIter = gIter->next) {
resource_t *child_rsc = (resource_t *) gIter->data;
if(is_child_compatible(child_rsc, local_node, filter, current)) {
crm_trace("Pairing %s with %s on %s",
local_child->id, child_rsc->id, local_node->details->uname);
return child_rsc;
}
}
crm_trace("Can't pair %s with %s", local_child->id, rsc->id);
if(children != rsc->children) {
g_list_free(children);
}
return NULL;
}
static container_grouping_t *
tuple_for_docker(resource_t *rsc, resource_t *docker, node_t *node)
{
if(rsc->variant == pe_container) {
container_variant_data_t *data = NULL;
get_container_variant_data(data, rsc);
for (GListPtr gIter = data->tuples; gIter != NULL; gIter = gIter->next) {
container_grouping_t *tuple = (container_grouping_t *)gIter->data;
if(tuple->child
&& docker == tuple->docker
&& node->details == tuple->node->details) {
return tuple;
}
}
}
return NULL;
}
static enum pe_graph_flags
container_update_interleave_actions(action_t * first, action_t * then, node_t * node, enum pe_action_flags flags,
enum pe_action_flags filter, enum pe_ordering type)
{
GListPtr gIter = NULL;
GListPtr children = NULL;
gboolean current = FALSE;
enum pe_graph_flags changed = pe_graph_none;
/* Fix this - lazy */
if (crm_ends_with(first->uuid, "_stopped_0")
|| crm_ends_with(first->uuid, "_demoted_0")) {
current = TRUE;
}
children = get_containers_or_children(then->rsc);
for (gIter = children; gIter != NULL; gIter = gIter->next) {
resource_t *then_child = (resource_t *) gIter->data;
resource_t *first_child = find_compatible_child(then_child, first->rsc, RSC_ROLE_UNKNOWN, current);
if (first_child == NULL && current) {
crm_trace("Ignore");
} else if (first_child == NULL) {
crm_debug("No match found for %s (%d / %s / %s)", then_child->id, current, first->uuid, then->uuid);
/* Me no like this hack - but what else can we do?
*
* If there is no-one active or about to be active
* on the same node as then_child, then they must
* not be allowed to start
*/
if (type & (pe_order_runnable_left | pe_order_implies_then) /* Mandatory */ ) {
pe_rsc_info(then->rsc, "Inhibiting %s from being active", then_child->id);
if(assign_node(then_child, NULL, TRUE)) {
changed |= pe_graph_updated_then;
}
}
} else {
pe_action_t *first_action = NULL;
pe_action_t *then_action = NULL;
enum action_tasks task = clone_child_action(first);
const char *first_task = task2text(task);
container_grouping_t *first_tuple = tuple_for_docker(first->rsc, first_child, node);
container_grouping_t *then_tuple = tuple_for_docker(then->rsc, then_child, node);
if(strstr(first->task, "stop") && first_tuple && first_tuple->child) {
/* Except for 'stopped' we should be looking at the
* in-container resource, actions for the child will
* happen later and are therefor more likely to align
* with the user's intent.
*/
first_action = find_first_action(first_tuple->child->actions, NULL, task2text(task), node);
} else {
first_action = find_first_action(first_child->actions, NULL, task2text(task), node);
}
if(strstr(then->task, "mote") && then_tuple && then_tuple->child) {
/* Promote/demote actions will never be found for the
* docker resource, look in the child instead
*
* Alternatively treat:
* 'XXXX then promote YYYY' as 'XXXX then start container for YYYY', and
* 'demote XXXX then stop YYYY' as 'stop container for XXXX then stop YYYY'
*/
then_action = find_first_action(then_tuple->child->actions, NULL, then->task, node);
} else {
then_action = find_first_action(then_child->actions, NULL, then->task, node);
}
if (first_action == NULL) {
if (is_not_set(first_child->flags, pe_rsc_orphan)
&& crm_str_eq(first_task, RSC_STOP, TRUE) == FALSE
&& crm_str_eq(first_task, RSC_DEMOTE, TRUE) == FALSE) {
crm_err("Internal error: No action found for %s in %s (first)",
first_task, first_child->id);
} else {
crm_trace("No action found for %s in %s%s (first)",
first_task, first_child->id,
is_set(first_child->flags, pe_rsc_orphan) ? " (ORPHAN)" : "");
}
continue;
}
/* We're only interested if 'then' is neither stopping nor being demoted */
if (then_action == NULL) {
if (is_not_set(then_child->flags, pe_rsc_orphan)
&& crm_str_eq(then->task, RSC_STOP, TRUE) == FALSE
&& crm_str_eq(then->task, RSC_DEMOTE, TRUE) == FALSE) {
crm_err("Internal error: No action found for %s in %s (then)",
then->task, then_child->id);
} else {
crm_trace("No action found for %s in %s%s (then)",
then->task, then_child->id,
is_set(then_child->flags, pe_rsc_orphan) ? " (ORPHAN)" : "");
}
continue;
}
if (order_actions(first_action, then_action, type)) {
crm_debug("Created constraint for %s (%d) -> %s (%d) %.6x",
first_action->uuid, is_set(first_action->flags, pe_action_optional),
then_action->uuid, is_set(then_action->flags, pe_action_optional), type);
changed |= (pe_graph_updated_first | pe_graph_updated_then);
}
if(first_action && then_action) {
changed |= then_child->cmds->update_actions(first_action, then_action, node,
first_child->cmds->action_flags(first_action, node),
filter, type);
} else {
crm_err("Nothing found either for %s (%p) or %s (%p) %s",
first_child->id, first_action,
then_child->id, then_action, task2text(task));
}
}
}
if(children != then->rsc->children) {
g_list_free(children);
}
return changed;
}
bool can_interleave_actions(pe_action_t *first, pe_action_t *then)
{
bool interleave = FALSE;
resource_t *rsc = NULL;
const char *interleave_s = NULL;
if(first->rsc == NULL || then->rsc == NULL) {
crm_trace("Not interleaving %s with %s (both must be resources)", first->uuid, then->uuid);
return FALSE;
} else if(first->rsc == then->rsc) {
crm_trace("Not interleaving %s with %s (must belong to different resources)", first->uuid, then->uuid);
return FALSE;
} else if(first->rsc->variant < pe_clone || then->rsc->variant < pe_clone) {
crm_trace("Not interleaving %s with %s (both sides must be clones, masters, or bundles)", first->uuid, then->uuid);
return FALSE;
}
if (crm_ends_with(then->uuid, "_stop_0") || crm_ends_with(then->uuid, "_demote_0")) {
rsc = first->rsc;
} else {
rsc = then->rsc;
}
interleave_s = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INTERLEAVE);
interleave = crm_is_true(interleave_s);
crm_trace("Interleave %s -> %s: %s (based on %s)",
first->uuid, then->uuid, interleave ? "yes" : "no", rsc->id);
return interleave;
}
enum pe_graph_flags
container_update_actions(action_t * first, action_t * then, node_t * node, enum pe_action_flags flags,
enum pe_action_flags filter, enum pe_ordering type)
{
enum pe_graph_flags changed = pe_graph_none;
crm_trace("%s -> %s", first->uuid, then->uuid);
if(can_interleave_actions(first, then)) {
changed = container_update_interleave_actions(first, then, node, flags, filter, type);
} else if(then->rsc) {
GListPtr gIter = NULL;
GListPtr children = NULL;
// Handle the 'primitive' ordering case
changed |= native_update_actions(first, then, node, flags, filter, type);
// Now any children (or containers in the case of a bundle)
children = get_containers_or_children(then->rsc);
for (gIter = children; gIter != NULL; gIter = gIter->next) {
resource_t *then_child = (resource_t *) gIter->data;
enum pe_graph_flags then_child_changed = pe_graph_none;
action_t *then_child_action = find_first_action(then_child->actions, NULL, then->task, node);
if (then_child_action) {
enum pe_action_flags then_child_flags = then_child->cmds->action_flags(then_child_action, node);
if (is_set(then_child_flags, pe_action_runnable)) {
then_child_changed |=
then_child->cmds->update_actions(first, then_child_action, node, flags, filter, type);
}
changed |= then_child_changed;
if (then_child_changed & pe_graph_updated_then) {
for (GListPtr lpc = then_child_action->actions_after; lpc != NULL; lpc = lpc->next) {
action_wrapper_t *next = (action_wrapper_t *) lpc->data;
update_action(next->action);
}
}
}
}
if(children != then->rsc->children) {
g_list_free(children);
}
}
return changed;
}
void
container_rsc_location(resource_t * rsc, rsc_to_node_t * constraint)
{
container_variant_data_t *container_data = NULL;
get_container_variant_data(container_data, rsc);
pe_rsc_trace(rsc, "Processing location constraint %s for %s", constraint->id, rsc->id);
native_rsc_location(rsc, constraint);
for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) {
container_grouping_t *tuple = (container_grouping_t *)gIter->data;
if (tuple->docker) {
tuple->docker->cmds->rsc_location(tuple->docker, constraint);
}
if(tuple->ip) {
tuple->ip->cmds->rsc_location(tuple->ip, constraint);
}
}
if(container_data->child && (constraint->role_filter == RSC_ROLE_SLAVE || constraint->role_filter == RSC_ROLE_MASTER)) {
container_data->child->cmds->rsc_location(container_data->child, constraint);
container_data->child->rsc_location = g_list_prepend(container_data->child->rsc_location, constraint);
}
}
void
container_expand(resource_t * rsc, pe_working_set_t * data_set)
{
container_variant_data_t *container_data = NULL;
CRM_CHECK(rsc != NULL, return);
get_container_variant_data(container_data, rsc);
if(container_data->child) {
container_data->child->cmds->expand(container_data->child, data_set);
}
for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) {
container_grouping_t *tuple = (container_grouping_t *)gIter->data;
-
CRM_ASSERT(tuple);
- if (tuple->docker && tuple->remote && tuple->docker->allocated_to
- && fix_remote_addr(tuple->remote)) {
+ if (tuple->remote && tuple->docker && fix_remote_addr(tuple->remote)) {
// REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside
+ pe_node_t *node = tuple->docker->allocated_to;
xmlNode *nvpair = get_xpath_object("//nvpair[@name='addr']", tuple->remote->xml, LOG_ERR);
- g_hash_table_replace(tuple->remote->parameters, strdup("addr"), strdup(tuple->docker->allocated_to->details->uname));
- crm_xml_add(nvpair, "value", tuple->docker->allocated_to->details->uname);
+ if(node == NULL && tuple->docker->running_on) {
+ /* If it wont be running anywhere after the
+ * transition, go with where it's running now.
+ */
+ node = tuple->docker->running_on->data;
+ }
+
+ if(node != NULL) {
+ g_hash_table_replace(tuple->remote->parameters, strdup("addr"), strdup(node->details->uname));
+ crm_xml_add(nvpair, "value", node->details->uname);
+
+ } else if(fix_remote_addr(tuple->remote)) {
+ crm_trace("Cannot fix address for %s", tuple->remote->id);
+ }
}
if(tuple->ip) {
tuple->ip->cmds->expand(tuple->ip, data_set);
}
if(tuple->docker) {
tuple->docker->cmds->expand(tuple->docker, data_set);
}
if(tuple->remote) {
tuple->remote->cmds->expand(tuple->remote, data_set);
}
}
}
gboolean
container_create_probe(resource_t * rsc, node_t * node, action_t * complete,
gboolean force, pe_working_set_t * data_set)
{
bool any_created = FALSE;
container_variant_data_t *container_data = NULL;
CRM_CHECK(rsc != NULL, return FALSE);
get_container_variant_data(container_data, rsc);
for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) {
container_grouping_t *tuple = (container_grouping_t *)gIter->data;
CRM_ASSERT(tuple);
if(tuple->ip) {
any_created |= tuple->ip->cmds->create_probe(tuple->ip, node, complete, force, data_set);
}
if(tuple->child && node->details == tuple->node->details) {
any_created |= tuple->child->cmds->create_probe(tuple->child, node, complete, force, data_set);
}
if(tuple->docker) {
bool created = tuple->docker->cmds->create_probe(tuple->docker, node, complete, force, data_set);
if(created) {
any_created = TRUE;
/* If we're limited to one replica per host (due to
* the lack of an IP range probably), then we don't
* want any of our peer containers starting until
* we've established that no other copies are already
* running.
*
* Partly this is to ensure that replicas_per_host is
* observed, but also to ensure that the containers
* don't fail to start because the necessary port
* mappings (which won't include an IP for uniqueness)
* are already taken
*/
for (GListPtr tIter = container_data->tuples; tIter != NULL && container_data->replicas_per_host == 1; tIter = tIter->next) {
container_grouping_t *other = (container_grouping_t *)tIter->data;
if ((other != tuple) && (other != NULL)
&& (other->docker != NULL)) {
custom_action_order(tuple->docker, generate_op_key(tuple->docker->id, RSC_STATUS, 0), NULL,
other->docker, generate_op_key(other->docker->id, RSC_START, 0), NULL,
pe_order_optional, data_set);
}
}
}
}
- if(tuple->remote) {
- any_created |= tuple->remote->cmds->create_probe(tuple->remote, node, complete, force, data_set);
+ if(tuple->remote && tuple->remote->cmds->create_probe(tuple->remote, node, complete, force, data_set)) {
+ /* Do not probe the remote resource until we know where docker is running
+ * Required for REMOTE_CONTAINER_HACK to correctly probe remote resources
+ */
+ char *probe_uuid = generate_op_key(tuple->remote->id, RSC_STATUS, 0);
+ action_t *probe = find_first_action(tuple->remote->actions, probe_uuid, NULL, node);
+ any_created = TRUE;
+
+ crm_trace("Ordering %s probe on %s", tuple->remote->id, node->details->uname);
+ custom_action_order(tuple->docker, generate_op_key(tuple->docker->id, RSC_START, 0), NULL,
+ tuple->remote, NULL, probe, pe_order_probe, data_set);
+ free(probe_uuid);
}
}
return any_created;
}
void
container_append_meta(resource_t * rsc, xmlNode * xml)
{
}
GHashTable *
container_merge_weights(resource_t * rsc, const char *rhs, GHashTable * nodes, const char *attr,
float factor, enum pe_weights flags)
{
return rsc_merge_weights(rsc, rhs, nodes, attr, factor, flags);
}
void container_LogActions(
resource_t * rsc, pe_working_set_t * data_set, gboolean terminal)
{
container_variant_data_t *container_data = NULL;
CRM_CHECK(rsc != NULL, return);
get_container_variant_data(container_data, rsc);
for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) {
container_grouping_t *tuple = (container_grouping_t *)gIter->data;
CRM_ASSERT(tuple);
if(tuple->ip) {
LogActions(tuple->ip, data_set, terminal);
}
if(tuple->docker) {
LogActions(tuple->docker, data_set, terminal);
}
if(tuple->remote) {
LogActions(tuple->remote, data_set, terminal);
}
if(tuple->child) {
LogActions(tuple->child, data_set, terminal);
}
}
}
diff --git a/pengine/graph.c b/pengine/graph.c
index 822e1c7ea4..e3ee4fc1ea 100644
--- a/pengine/graph.c
+++ b/pengine/graph.c
@@ -1,1722 +1,1742 @@
/*
* Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <crm_internal.h>
#include <sys/param.h>
#include <crm/crm.h>
#include <crm/cib.h>
#include <crm/msg_xml.h>
#include <crm/common/xml.h>
#include <glib.h>
#include <allocate.h>
#include <utils.h>
void update_colo_start_chain(action_t * action);
gboolean rsc_update_action(action_t * first, action_t * then, enum pe_ordering type);
static enum pe_action_flags
get_action_flags(action_t * action, node_t * node)
{
enum pe_action_flags flags = action->flags;
if (action->rsc) {
flags = action->rsc->cmds->action_flags(action, NULL);
if (pe_rsc_is_clone(action->rsc) && node) {
/* We only care about activity on $node */
enum pe_action_flags clone_flags = action->rsc->cmds->action_flags(action, node);
/* Go to great lengths to ensure the correct value for pe_action_runnable...
*
* If we are a clone, then for _ordering_ constraints, it's only relevant
* if we are runnable _anywhere_.
*
* This only applies to _runnable_ though, and only for ordering constraints.
* If this function is ever used during colocation, then we'll need additional logic
*
* Not very satisfying, but it's logical and appears to work well.
*/
if (is_not_set(clone_flags, pe_action_runnable)
&& is_set(flags, pe_action_runnable)) {
pe_rsc_trace(action->rsc, "Fixing up runnable flag for %s", action->uuid);
set_bit(clone_flags, pe_action_runnable);
}
flags = clone_flags;
}
}
return flags;
}
static char *
convert_non_atomic_uuid(char *old_uuid, resource_t * rsc, gboolean allow_notify,
gboolean free_original)
{
int interval = 0;
char *uuid = NULL;
char *rid = NULL;
char *raw_task = NULL;
int task = no_action;
CRM_ASSERT(rsc);
pe_rsc_trace(rsc, "Processing %s", old_uuid);
if (old_uuid == NULL) {
return NULL;
} else if (strstr(old_uuid, "notify") != NULL) {
goto done; /* no conversion */
} else if (rsc->variant < pe_group) {
goto done; /* no conversion */
}
CRM_ASSERT(parse_op_key(old_uuid, &rid, &raw_task, &interval));
if (interval > 0) {
goto done; /* no conversion */
}
task = text2task(raw_task);
switch (task) {
case stop_rsc:
case start_rsc:
case action_notify:
case action_promote:
case action_demote:
break;
case stopped_rsc:
case started_rsc:
case action_notified:
case action_promoted:
case action_demoted:
task--;
break;
case monitor_rsc:
case shutdown_crm:
case stonith_node:
task = no_action;
break;
default:
crm_err("Unknown action: %s", raw_task);
task = no_action;
break;
}
if (task != no_action) {
if (is_set(rsc->flags, pe_rsc_notify) && allow_notify) {
uuid = generate_notify_key(rid, "confirmed-post", task2text(task + 1));
} else {
uuid = generate_op_key(rid, task2text(task + 1), 0);
}
pe_rsc_trace(rsc, "Converted %s -> %s", old_uuid, uuid);
}
done:
if (uuid == NULL) {
uuid = strdup(old_uuid);
}
if (free_original) {
free(old_uuid);
}
free(raw_task);
free(rid);
return uuid;
}
static action_t *
rsc_expand_action(action_t * action)
{
gboolean notify = FALSE;
action_t *result = action;
resource_t *rsc = action->rsc;
if (rsc == NULL) {
return action;
}
if ((rsc->parent == NULL)
|| (pe_rsc_is_clone(rsc) && (rsc->parent->variant == pe_container))) {
/* Only outermost resources have notification actions.
* The exception is those in bundles.
*/
notify = is_set(rsc->flags, pe_rsc_notify);
}
if (rsc->variant >= pe_group) {
/* Expand 'start' -> 'started' */
char *uuid = NULL;
uuid = convert_non_atomic_uuid(action->uuid, rsc, notify, FALSE);
if (uuid) {
pe_rsc_trace(rsc, "Converting %s to %s %d", action->uuid, uuid,
is_set(rsc->flags, pe_rsc_notify));
result = find_first_action(rsc->actions, uuid, NULL, NULL);
if (result == NULL) {
crm_err("Couldn't expand %s to %s in %s", action->uuid, uuid, rsc->id);
result = action;
}
free(uuid);
}
}
return result;
}
static enum pe_graph_flags
graph_update_action(action_t * first, action_t * then, node_t * node,
enum pe_action_flags first_flags, enum pe_action_flags then_flags,
enum pe_ordering type)
{
enum pe_graph_flags changed = pe_graph_none;
gboolean processed = FALSE;
/* TODO: Do as many of these in parallel as possible */
if (type & pe_order_implies_then) {
processed = TRUE;
if (then->rsc) {
changed |=
then->rsc->cmds->update_actions(first, then, node, first_flags & pe_action_optional,
pe_action_optional, pe_order_implies_then);
} else if (is_set(first_flags, pe_action_optional) == FALSE) {
if (update_action_flags(then, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__)) {
changed |= pe_graph_updated_then;
}
}
if (changed) {
pe_rsc_trace(then->rsc, "implies right: %s then %s: changed", first->uuid, then->uuid);
} else {
crm_trace("implies right: %s then %s %p", first->uuid, then->uuid, then->rsc);
}
}
if ((type & pe_order_restart) && then->rsc) {
enum pe_action_flags restart = (pe_action_optional | pe_action_runnable);
processed = TRUE;
changed |=
then->rsc->cmds->update_actions(first, then, node, first_flags, restart, pe_order_restart);
if (changed) {
pe_rsc_trace(then->rsc, "restart: %s then %s: changed", first->uuid, then->uuid);
} else {
crm_trace("restart: %s then %s", first->uuid, then->uuid);
}
}
if (type & pe_order_implies_first) {
processed = TRUE;
if (first->rsc) {
changed |=
first->rsc->cmds->update_actions(first, then, node, first_flags,
pe_action_optional, pe_order_implies_first);
} else if (is_set(first_flags, pe_action_optional) == FALSE) {
pe_rsc_trace(first->rsc, "first unrunnable: %s (%d) then %s (%d)",
first->uuid, is_set(first_flags, pe_action_optional),
then->uuid, is_set(then_flags, pe_action_optional));
if (update_action_flags(first, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__)) {
changed |= pe_graph_updated_first;
}
}
if (changed) {
pe_rsc_trace(then->rsc, "implies left: %s then %s: changed", first->uuid, then->uuid);
} else {
crm_trace("implies left: %s (%d) then %s (%d)",
first->uuid, is_set(first_flags, pe_action_optional),
then->uuid, is_set(then_flags, pe_action_optional));
}
}
if (type & pe_order_implies_first_master) {
processed = TRUE;
if (then->rsc) {
changed |=
then->rsc->cmds->update_actions(first, then, node, first_flags & pe_action_optional,
pe_action_optional, pe_order_implies_first_master);
}
if (changed) {
pe_rsc_trace(then->rsc,
"implies left when right rsc is Master role: %s then %s: changed",
first->uuid, then->uuid);
} else {
crm_trace("implies left when right rsc is Master role: %s then %s", first->uuid,
then->uuid);
}
}
if (type & pe_order_one_or_more) {
processed = TRUE;
if (then->rsc) {
changed |=
then->rsc->cmds->update_actions(first, then, node, first_flags,
pe_action_runnable, pe_order_one_or_more);
} else if (is_set(first_flags, pe_action_runnable)) {
/* alright. a "first" action is considered runnable, incremente
* the 'runnable_before' counter */
then->runnable_before++;
/* if the runnable before count for then exceeds the required number
* of "before" runnable actions... mark then as runnable */
if (then->runnable_before >= then->required_runnable_before) {
if (update_action_flags(then, pe_action_runnable, __FUNCTION__, __LINE__)) {
changed |= pe_graph_updated_then;
}
}
}
if (changed) {
pe_rsc_trace(then->rsc, "runnable_one_or_more: %s then %s: changed", first->uuid,
then->uuid);
} else {
crm_trace("runnable_one_or_more: %s then %s", first->uuid, then->uuid);
}
}
+ if (then->rsc && is_set(type, pe_order_probe)) {
+ processed = TRUE;
+
+ if (is_not_set(first_flags, pe_action_runnable) && first->rsc->running_on != NULL) {
+ pe_rsc_trace(then->rsc, "Ignoring %s then %s - %s is about to be stopped",
+ first->uuid, then->uuid, first->rsc->id);
+
+ } else {
+ pe_rsc_trace(then->rsc, "Enforcing %s then %s", first->uuid, then->uuid);
+ changed |= then->rsc->cmds->update_actions(first, then, node, first_flags,
+ pe_action_runnable, pe_order_runnable_left);
+ }
+
+ if (changed) {
+ pe_rsc_trace(then->rsc, "runnable: %s then %s: changed", first->uuid, then->uuid);
+ } else {
+ crm_trace("runnable: %s then %s", first->uuid, then->uuid);
+ }
+ }
+
if (type & pe_order_runnable_left) {
processed = TRUE;
if (then->rsc) {
changed |=
then->rsc->cmds->update_actions(first, then, node, first_flags,
pe_action_runnable, pe_order_runnable_left);
} else if (is_set(first_flags, pe_action_runnable) == FALSE) {
pe_rsc_trace(then->rsc, "then unrunnable: %s then %s", first->uuid, then->uuid);
if (update_action_flags(then, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__)) {
changed |= pe_graph_updated_then;
}
}
if (changed) {
pe_rsc_trace(then->rsc, "runnable: %s then %s: changed", first->uuid, then->uuid);
} else {
crm_trace("runnable: %s then %s", first->uuid, then->uuid);
}
}
if (type & pe_order_implies_first_migratable) {
processed = TRUE;
if (then->rsc) {
changed |=
then->rsc->cmds->update_actions(first, then, node, first_flags,
pe_action_optional, pe_order_implies_first_migratable);
}
if (changed) {
pe_rsc_trace(then->rsc, "optional: %s then %s: changed", first->uuid, then->uuid);
} else {
crm_trace("optional: %s then %s", first->uuid, then->uuid);
}
}
if (type & pe_order_pseudo_left) {
processed = TRUE;
if (then->rsc) {
changed |=
then->rsc->cmds->update_actions(first, then, node, first_flags,
pe_action_optional, pe_order_pseudo_left);
}
if (changed) {
pe_rsc_trace(then->rsc, "optional: %s then %s: changed", first->uuid, then->uuid);
} else {
crm_trace("optional: %s then %s", first->uuid, then->uuid);
}
}
if (type & pe_order_optional) {
processed = TRUE;
if (then->rsc) {
changed |=
then->rsc->cmds->update_actions(first, then, node, first_flags,
pe_action_runnable, pe_order_optional);
}
if (changed) {
pe_rsc_trace(then->rsc, "optional: %s then %s: changed", first->uuid, then->uuid);
} else {
crm_trace("optional: %s then %s", first->uuid, then->uuid);
}
}
if (type & pe_order_asymmetrical) {
processed = TRUE;
if (then->rsc) {
changed |=
then->rsc->cmds->update_actions(first, then, node, first_flags,
pe_action_runnable, pe_order_asymmetrical);
}
if (changed) {
pe_rsc_trace(then->rsc, "asymmetrical: %s then %s: changed", first->uuid, then->uuid);
} else {
crm_trace("asymmetrical: %s then %s", first->uuid, then->uuid);
}
}
if ((first->flags & pe_action_runnable) && (type & pe_order_implies_then_printed)
&& (first_flags & pe_action_optional) == 0) {
processed = TRUE;
crm_trace("%s implies %s printed", first->uuid, then->uuid);
update_action_flags(then, pe_action_print_always, __FUNCTION__, __LINE__); /* don't care about changed */
}
if (is_set(type, pe_order_implies_first_printed) && is_set(then_flags, pe_action_optional) == FALSE) {
processed = TRUE;
crm_trace("%s implies %s printed", then->uuid, first->uuid);
update_action_flags(first, pe_action_print_always, __FUNCTION__, __LINE__); /* don't care about changed */
}
if ((type & pe_order_implies_then
|| type & pe_order_implies_first
|| type & pe_order_restart)
&& first->rsc
&& safe_str_eq(first->task, RSC_STOP)
&& is_not_set(first->rsc->flags, pe_rsc_managed)
&& is_set(first->rsc->flags, pe_rsc_block)
&& is_not_set(first->flags, pe_action_runnable)) {
if (update_action_flags(then, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__)) {
changed |= pe_graph_updated_then;
}
if (changed) {
pe_rsc_trace(then->rsc, "unmanaged left: %s then %s: changed", first->uuid, then->uuid);
} else {
crm_trace("unmanaged left: %s then %s", first->uuid, then->uuid);
}
}
if (processed == FALSE) {
crm_trace("Constraint 0x%.6x not applicable", type);
}
return changed;
}
static void
mark_start_blocked(resource_t *rsc, resource_t *reason)
{
GListPtr gIter = rsc->actions;
char *reason_text = crm_strdup_printf("colocation with %s", reason->id);
for (; gIter != NULL; gIter = gIter->next) {
action_t *action = (action_t *) gIter->data;
if (safe_str_neq(action->task, RSC_START)) {
continue;
}
if (is_set(action->flags, pe_action_runnable)) {
pe_action_set_flag_reason(__FUNCTION__, __LINE__, action, NULL, reason_text, pe_action_runnable, FALSE);
update_colo_start_chain(action);
update_action(action);
}
}
free(reason_text);
}
void
update_colo_start_chain(action_t *action)
{
GListPtr gIter = NULL;
resource_t *rsc = NULL;
if (is_not_set(action->flags, pe_action_runnable) && safe_str_eq(action->task, RSC_START)) {
rsc = uber_parent(action->rsc);
if (rsc->parent) {
/*
* This is a bundle and uber_parent() returns the
* container/clone/master not the bundle itself, so if
* rsc->parent is not NULL, we must be in a bundle.
*
* Here we need the bundle so that we can check if all
* containers are stopped/stopping.
*/
rsc = rsc->parent;
}
}
if (rsc == NULL || rsc->rsc_cons_lhs == NULL) {
return;
}
/* if rsc has children, all the children need to have start set to
* unrunnable before we follow the colo chain for the parent. */
for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
resource_t *child = (resource_t *)gIter->data;
action_t *start = find_first_action(child->actions, NULL, RSC_START, NULL);
if (start == NULL || is_set(start->flags, pe_action_runnable)) {
return;
}
}
for (gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) {
rsc_colocation_t *colocate_with = (rsc_colocation_t *)gIter->data;
if (colocate_with->score == INFINITY) {
mark_start_blocked(colocate_with->rsc_lh, action->rsc);
}
}
}
gboolean
update_action(action_t * then)
{
GListPtr lpc = NULL;
enum pe_graph_flags changed = pe_graph_none;
int last_flags = then->flags;
crm_trace("Processing %s (%s %s %s)",
then->uuid,
is_set(then->flags, pe_action_optional) ? "optional" : "required",
is_set(then->flags, pe_action_runnable) ? "runnable" : "unrunnable",
is_set(then->flags,
pe_action_pseudo) ? "pseudo" : then->node ? then->node->details->uname : "");
if (is_set(then->flags, pe_action_requires_any)) {
/* initialize current known runnable before actions to 0
* from here as graph_update_action is called for each of
* then's before actions, this number will increment as
* runnable 'first' actions are encountered */
then->runnable_before = 0;
/* for backwards compatibility with previous options that use
* the 'requires_any' flag, initialize required to 1 if it is
* not set. */
if (then->required_runnable_before == 0) {
then->required_runnable_before = 1;
}
pe_clear_action_bit(then, pe_action_runnable);
/* We are relying on the pe_order_one_or_more clause of
* graph_update_action(), called as part of the:
*
* 'if (first == other->action)'
*
* block below, to set this back if appropriate
*/
}
for (lpc = then->actions_before; lpc != NULL; lpc = lpc->next) {
action_wrapper_t *other = (action_wrapper_t *) lpc->data;
action_t *first = other->action;
node_t *then_node = then->node;
node_t *first_node = first->node;
enum pe_action_flags then_flags = 0;
enum pe_action_flags first_flags = 0;
if (first->rsc && first->rsc->variant == pe_group && safe_str_eq(first->task, RSC_START)) {
first_node = first->rsc->fns->location(first->rsc, NULL, FALSE);
if (first_node) {
crm_trace("First: Found node %s for %s", first_node->details->uname, first->uuid);
}
}
if (then->rsc && then->rsc->variant == pe_group && safe_str_eq(then->task, RSC_START)) {
then_node = then->rsc->fns->location(then->rsc, NULL, FALSE);
if (then_node) {
crm_trace("Then: Found node %s for %s", then_node->details->uname, then->uuid);
}
}
/* Disable constraint if it only applies when on same node, but isn't */
if (is_set(other->type, pe_order_same_node) && first_node && then_node
&& (first_node->details != then_node->details)) {
crm_trace("Disabled constraint %s on %s -> %s on %s",
other->action->uuid, first_node->details->uname,
then->uuid, then_node->details->uname);
other->type = pe_order_none;
continue;
}
clear_bit(changed, pe_graph_updated_first);
if (first->rsc && then->rsc && (first->rsc != then->rsc)
&& (is_parent(then->rsc, first->rsc) == FALSE)) {
first = rsc_expand_action(first);
}
if (first != other->action) {
crm_trace("Ordering %s after %s instead of %s", then->uuid, first->uuid,
other->action->uuid);
}
first_flags = get_action_flags(first, then_node);
then_flags = get_action_flags(then, first_node);
crm_trace("Checking %s (%s %s %s) against %s (%s %s %s) filter=0x%.6x type=0x%.6x",
then->uuid,
is_set(then_flags, pe_action_optional) ? "optional" : "required",
is_set(then_flags, pe_action_runnable) ? "runnable" : "unrunnable",
is_set(then_flags,
pe_action_pseudo) ? "pseudo" : then->node ? then->node->details->
uname : "", first->uuid, is_set(first_flags,
pe_action_optional) ? "optional" : "required",
is_set(first_flags, pe_action_runnable) ? "runnable" : "unrunnable",
is_set(first_flags,
pe_action_pseudo) ? "pseudo" : first->node ? first->node->details->
uname : "", first_flags, other->type);
if (first == other->action) {
/*
* 'first' was not expanded (e.g. from 'start' to 'running'), which could mean it:
* - has no associated resource,
* - was a primitive,
* - was pre-expanded (e.g. 'running' instead of 'start')
*
* The third argument here to graph_update_action() is a node which is used under two conditions:
* - Interleaving, in which case first->node and
* then->node are equal (and NULL)
* - If 'then' is a clone, to limit the scope of the
* constraint to instances on the supplied node
*
*/
int otype = other->type;
node_t *node = then->node;
if(is_set(otype, pe_order_implies_then_on_node)) {
/* Normally we want the _whole_ 'then' clone to
* restart if 'first' is restarted, so then->node is
* needed.
*
* However for unfencing, we want to limit this to
* instances on the same node as 'first' (the
* unfencing operation), so first->node is supplied.
*
* Swap the node, from then on we can can treat it
* like any other 'pe_order_implies_then'
*/
clear_bit(otype, pe_order_implies_then_on_node);
set_bit(otype, pe_order_implies_then);
node = first->node;
}
clear_bit(first_flags, pe_action_pseudo);
changed |= graph_update_action(first, then, node, first_flags, then_flags, otype);
/* 'first' was for a complex resource (clone, group, etc),
* create a new dependency if necessary
*/
} else if (order_actions(first, then, other->type)) {
/* This was the first time 'first' and 'then' were associated,
* start again to get the new actions_before list
*/
changed |= (pe_graph_updated_then | pe_graph_disable);
}
if (changed & pe_graph_disable) {
crm_trace("Disabled constraint %s -> %s in favor of %s -> %s",
other->action->uuid, then->uuid, first->uuid, then->uuid);
clear_bit(changed, pe_graph_disable);
other->type = pe_order_none;
}
if (changed & pe_graph_updated_first) {
GListPtr lpc2 = NULL;
crm_trace("Updated %s (first %s %s %s), processing dependents ",
first->uuid,
is_set(first->flags, pe_action_optional) ? "optional" : "required",
is_set(first->flags, pe_action_runnable) ? "runnable" : "unrunnable",
is_set(first->flags,
pe_action_pseudo) ? "pseudo" : first->node ? first->node->details->
uname : "");
for (lpc2 = first->actions_after; lpc2 != NULL; lpc2 = lpc2->next) {
action_wrapper_t *other = (action_wrapper_t *) lpc2->data;
update_action(other->action);
}
update_action(first);
}
}
if (is_set(then->flags, pe_action_requires_any)) {
if (last_flags != then->flags) {
changed |= pe_graph_updated_then;
} else {
clear_bit(changed, pe_graph_updated_then);
}
}
if (changed & pe_graph_updated_then) {
crm_trace("Updated %s (then %s %s %s), processing dependents ",
then->uuid,
is_set(then->flags, pe_action_optional) ? "optional" : "required",
is_set(then->flags, pe_action_runnable) ? "runnable" : "unrunnable",
is_set(then->flags,
pe_action_pseudo) ? "pseudo" : then->node ? then->node->details->
uname : "");
if (is_set(last_flags, pe_action_runnable) && is_not_set(then->flags, pe_action_runnable)) {
update_colo_start_chain(then);
}
update_action(then);
for (lpc = then->actions_after; lpc != NULL; lpc = lpc->next) {
action_wrapper_t *other = (action_wrapper_t *) lpc->data;
update_action(other->action);
}
}
return FALSE;
}
gboolean
shutdown_constraints(node_t * node, action_t * shutdown_op, pe_working_set_t * data_set)
{
/* add the stop to the before lists so it counts as a pre-req
* for the shutdown
*/
GListPtr lpc = NULL;
for (lpc = data_set->actions; lpc != NULL; lpc = lpc->next) {
action_t *action = (action_t *) lpc->data;
if (action->rsc == NULL || action->node == NULL) {
continue;
} else if (action->node->details != node->details) {
continue;
} else if (is_set(action->rsc->flags, pe_rsc_maintenance)) {
pe_rsc_trace(action->rsc, "Skipping %s: maintenance mode", action->uuid);
continue;
} else if (node->details->maintenance) {
pe_rsc_trace(action->rsc, "Skipping %s: node %s is in maintenance mode",
action->uuid, node->details->uname);
continue;
} else if (safe_str_neq(action->task, RSC_STOP)) {
continue;
} else if (is_not_set(action->rsc->flags, pe_rsc_managed)
&& is_not_set(action->rsc->flags, pe_rsc_block)) {
/*
* If another action depends on this one, we may still end up blocking
*/
pe_rsc_trace(action->rsc, "Skipping %s: unmanaged", action->uuid);
continue;
}
pe_rsc_trace(action->rsc, "Ordering %s before shutdown on %s", action->uuid,
node->details->uname);
pe_clear_action_bit(action, pe_action_optional);
custom_action_order(action->rsc, NULL, action,
NULL, strdup(CRM_OP_SHUTDOWN), shutdown_op,
pe_order_optional | pe_order_runnable_left, data_set);
}
return TRUE;
}
/*!
* \internal
* \brief Order all actions appropriately relative to a fencing operation
*
* Ensure start operations of affected resources are ordered after fencing,
* imply stop and demote operations of affected resources by marking them as
* pseudo-actions, etc.
*
* \param[in] node Node to be fenced
* \param[in] stonith_op Fencing operation
* \param[in,out] data_set Working set of cluster
*/
gboolean
stonith_constraints(node_t * node, action_t * stonith_op, pe_working_set_t * data_set)
{
GListPtr r = NULL;
CRM_CHECK(stonith_op != NULL, return FALSE);
for (r = data_set->resources; r != NULL; r = r->next) {
rsc_stonith_ordering((resource_t *) r->data, stonith_op, data_set);
}
return TRUE;
}
static node_t *
get_router_node(action_t *action)
{
node_t *began_on = NULL;
node_t *ended_on = NULL;
node_t *router_node = NULL;
if (safe_str_eq(action->task, CRM_OP_FENCE) || is_remote_node(action->node) == FALSE) {
return NULL;
}
CRM_ASSERT(action->node->details->remote_rsc != NULL);
if (action->node->details->remote_rsc->running_on) {
began_on = action->node->details->remote_rsc->running_on->data;
}
ended_on = action->node->details->remote_rsc->allocated_to;
/* if there is only one location to choose from,
* this is easy. Check for those conditions first */
if (!began_on || !ended_on) {
/* remote rsc is either shutting down or starting up */
return began_on ? began_on : ended_on;
} else if (began_on->details == ended_on->details) {
/* remote rsc didn't move nodes. */
return began_on;
}
/* If we have get here, we know the remote resource
* began on one node and is moving to another node.
*
* This means some actions will get routed through the cluster
* node the connection rsc began on, and others are routed through
* the cluster node the connection rsc ends up on.
*
* 1. stop, demote, migrate actions of resources living in the remote
* node _MUST_ occur _BEFORE_ the connection can move (these actions
* are all required before the remote rsc stop action can occur.) In
* this case, we know these actions have to be routed through the initial
* cluster node the connection resource lived on before the move takes place.
*
* 2. Everything else (start, promote, monitor, probe, refresh, clear failcount
* delete ....) must occur after the resource starts on the node it is
* moving to.
*/
/* 1. before connection rsc moves. */
if (safe_str_eq(action->task, "stop") ||
safe_str_eq(action->task, "demote") ||
safe_str_eq(action->task, "migrate_from") ||
safe_str_eq(action->task, "migrate_to")) {
router_node = began_on;
/* 2. after connection rsc moves. */
} else {
router_node = ended_on;
}
return router_node;
}
/*!
* \internal
* \brief Add an XML node tag for a specified ID
*
* \param[in] id Node UUID to add
* \param[in,out] xml Parent XML tag to add to
*/
static xmlNode*
add_node_to_xml_by_id(const char *id, xmlNode *xml)
{
xmlNode *node_xml;
node_xml = create_xml_node(xml, XML_CIB_TAG_NODE);
crm_xml_add(node_xml, XML_ATTR_UUID, id);
return node_xml;
}
/*!
* \internal
* \brief Add an XML node tag for a specified node
*
* \param[in] node Node to add
* \param[in,out] xml XML to add node to
*/
static void
add_node_to_xml(const node_t *node, void *xml)
{
add_node_to_xml_by_id(node->details->id, (xmlNode *) xml);
}
/*!
* \internal
* \brief Add XML with nodes that need an update of their maintenance state
*
* \param[in,out] xml Parent XML tag to add to
* \param[in] data_set Working set for cluster
*/
static int
add_maintenance_nodes(xmlNode *xml, const pe_working_set_t *data_set)
{
GListPtr gIter = NULL;
xmlNode *maintenance =
xml?create_xml_node(xml, XML_GRAPH_TAG_MAINTENANCE):NULL;
int count = 0;
for (gIter = data_set->nodes; gIter != NULL;
gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
struct node_shared_s *details = node->details;
if (!(is_remote_node(node))) {
continue; /* just remote nodes need to know atm */
}
if (details->maintenance != details->remote_maintenance) {
if (maintenance) {
crm_xml_add(
add_node_to_xml_by_id(node->details->id, maintenance),
XML_NODE_IS_MAINTENANCE, details->maintenance?"1":"0");
}
count++;
}
}
crm_trace("%s %d nodes to adjust maintenance-mode "
"to transition", maintenance?"Added":"Counted", count);
return count;
}
/*!
* \internal
* \brief Add pseudo action with nodes needing maintenance state update
*
* \param[in,out] data_set Working set for cluster
*/
void
add_maintenance_update(pe_working_set_t *data_set)
{
action_t *action = NULL;
if (add_maintenance_nodes(NULL, data_set)) {
crm_trace("adding maintenance state update pseudo action");
action = get_pseudo_op(CRM_OP_MAINTENANCE_NODES, data_set);
set_bit(action->flags, pe_action_print_always);
}
}
/*!
* \internal
* \brief Add XML with nodes that an action is expected to bring down
*
* If a specified action is expected to bring any nodes down, add an XML block
* with their UUIDs. When a node is lost, this allows the crmd to determine
* whether it was expected.
*
* \param[in,out] xml Parent XML tag to add to
* \param[in] action Action to check for downed nodes
* \param[in] data_set Working set for cluster
*/
static void
add_downed_nodes(xmlNode *xml, const action_t *action,
const pe_working_set_t *data_set)
{
CRM_CHECK(xml && action && action->node && data_set, return);
if (safe_str_eq(action->task, CRM_OP_SHUTDOWN)) {
/* Shutdown makes the action's node down */
xmlNode *downed = create_xml_node(xml, XML_GRAPH_TAG_DOWNED);
add_node_to_xml_by_id(action->node->details->id, downed);
} else if (safe_str_eq(action->task, CRM_OP_FENCE)) {
/* Fencing makes the action's node and any hosted guest nodes down */
const char *fence = g_hash_table_lookup(action->meta, "stonith_action");
if (safe_str_eq(fence, "off") || safe_str_eq(fence, "reboot")) {
xmlNode *downed = create_xml_node(xml, XML_GRAPH_TAG_DOWNED);
add_node_to_xml_by_id(action->node->details->id, downed);
pe_foreach_guest_node(data_set, action->node, add_node_to_xml, downed);
}
} else if (action->rsc && action->rsc->is_remote_node
&& safe_str_eq(action->task, CRMD_ACTION_STOP)) {
/* Stopping a remote connection resource makes connected node down,
* unless it's part of a migration
*/
GListPtr iter;
action_t *input;
gboolean migrating = FALSE;
for (iter = action->actions_before; iter != NULL; iter = iter->next) {
input = ((action_wrapper_t *) iter->data)->action;
if (input->rsc && safe_str_eq(action->rsc->id, input->rsc->id)
&& safe_str_eq(input->task, CRMD_ACTION_MIGRATED)) {
migrating = TRUE;
break;
}
}
if (!migrating) {
xmlNode *downed = create_xml_node(xml, XML_GRAPH_TAG_DOWNED);
add_node_to_xml_by_id(action->rsc->id, downed);
}
}
}
static xmlNode *
action2xml(action_t * action, gboolean as_input, pe_working_set_t *data_set)
{
gboolean needs_node_info = TRUE;
gboolean needs_maintenance_info = FALSE;
xmlNode *action_xml = NULL;
xmlNode *args_xml = NULL;
#if ENABLE_VERSIONED_ATTRS
pe_rsc_action_details_t *rsc_details = NULL;
#endif
if (action == NULL) {
return NULL;
}
if (safe_str_eq(action->task, CRM_OP_FENCE)) {
/* All fences need node info; guest node fences are pseudo-events */
action_xml = create_xml_node(NULL,
is_set(action->flags, pe_action_pseudo)?
XML_GRAPH_TAG_PSEUDO_EVENT :
XML_GRAPH_TAG_CRM_EVENT);
} else if (safe_str_eq(action->task, CRM_OP_SHUTDOWN)) {
action_xml = create_xml_node(NULL, XML_GRAPH_TAG_CRM_EVENT);
} else if (safe_str_eq(action->task, CRM_OP_CLEAR_FAILCOUNT)) {
action_xml = create_xml_node(NULL, XML_GRAPH_TAG_CRM_EVENT);
} else if (safe_str_eq(action->task, CRM_OP_LRM_REFRESH)) {
action_xml = create_xml_node(NULL, XML_GRAPH_TAG_CRM_EVENT);
/* } else if(safe_str_eq(action->task, RSC_PROBED)) { */
/* action_xml = create_xml_node(NULL, XML_GRAPH_TAG_CRM_EVENT); */
} else if (is_set(action->flags, pe_action_pseudo)) {
if (safe_str_eq(action->task, CRM_OP_MAINTENANCE_NODES)) {
needs_maintenance_info = TRUE;
}
action_xml = create_xml_node(NULL, XML_GRAPH_TAG_PSEUDO_EVENT);
needs_node_info = FALSE;
} else {
action_xml = create_xml_node(NULL, XML_GRAPH_TAG_RSC_OP);
#if ENABLE_VERSIONED_ATTRS
rsc_details = pe_rsc_action_details(action);
#endif
}
crm_xml_add_int(action_xml, XML_ATTR_ID, action->id);
crm_xml_add(action_xml, XML_LRM_ATTR_TASK, action->task);
if (action->rsc != NULL && action->rsc->clone_name != NULL) {
char *clone_key = NULL;
const char *interval_s = g_hash_table_lookup(action->meta, XML_LRM_ATTR_INTERVAL);
int interval = crm_parse_int(interval_s, "0");
if (safe_str_eq(action->task, RSC_NOTIFY)) {
const char *n_type = g_hash_table_lookup(action->meta, "notify_type");
const char *n_task = g_hash_table_lookup(action->meta, "notify_operation");
CRM_CHECK(n_type != NULL, crm_err("No notify type value found for %s", action->uuid));
CRM_CHECK(n_task != NULL,
crm_err("No notify operation value found for %s", action->uuid));
clone_key = generate_notify_key(action->rsc->clone_name, n_type, n_task);
} else if(action->cancel_task) {
clone_key = generate_op_key(action->rsc->clone_name, action->cancel_task, interval);
} else {
clone_key = generate_op_key(action->rsc->clone_name, action->task, interval);
}
CRM_CHECK(clone_key != NULL, crm_err("Could not generate a key for %s", action->uuid));
crm_xml_add(action_xml, XML_LRM_ATTR_TASK_KEY, clone_key);
crm_xml_add(action_xml, "internal_" XML_LRM_ATTR_TASK_KEY, action->uuid);
free(clone_key);
} else {
crm_xml_add(action_xml, XML_LRM_ATTR_TASK_KEY, action->uuid);
}
if (needs_node_info && action->node != NULL) {
node_t *router_node = get_router_node(action);
crm_xml_add(action_xml, XML_LRM_ATTR_TARGET, action->node->details->uname);
crm_xml_add(action_xml, XML_LRM_ATTR_TARGET_UUID, action->node->details->id);
if (router_node) {
crm_xml_add(action_xml, XML_LRM_ATTR_ROUTER_NODE, router_node->details->uname);
}
g_hash_table_insert(action->meta, strdup(XML_LRM_ATTR_TARGET), strdup(action->node->details->uname));
g_hash_table_insert(action->meta, strdup(XML_LRM_ATTR_TARGET_UUID), strdup(action->node->details->id));
}
/* No details if this action is only being listed in the inputs section */
if (as_input) {
return action_xml;
}
/* List affected resource */
if (action->rsc) {
if (is_set(action->flags, pe_action_pseudo) == FALSE) {
int lpc = 0;
xmlNode *rsc_xml = create_xml_node(action_xml, crm_element_name(action->rsc->xml));
const char *attr_list[] = {
XML_AGENT_ATTR_CLASS,
XML_AGENT_ATTR_PROVIDER,
XML_ATTR_TYPE
};
if (is_set(action->rsc->flags, pe_rsc_orphan) && action->rsc->clone_name) {
/* Do not use the 'instance free' name here as that
* might interfere with the instance we plan to keep.
* Ie. if there are more than two named /anonymous/
* instances on a given node, we need to make sure the
* command goes to the right one.
*
* Keep this block, even when everyone is using
* 'instance free' anonymous clone names - it means
* we'll do the right thing if anyone toggles the
* unique flag to 'off'
*/
crm_debug("Using orphan clone name %s instead of %s", action->rsc->id,
action->rsc->clone_name);
crm_xml_add(rsc_xml, XML_ATTR_ID, action->rsc->clone_name);
crm_xml_add(rsc_xml, XML_ATTR_ID_LONG, action->rsc->id);
} else if (is_not_set(action->rsc->flags, pe_rsc_unique)) {
const char *xml_id = ID(action->rsc->xml);
crm_debug("Using anonymous clone name %s for %s (aka. %s)", xml_id, action->rsc->id,
action->rsc->clone_name);
/* ID is what we'd like client to use
* ID_LONG is what they might know it as instead
*
* ID_LONG is only strictly needed /here/ during the
* transition period until all nodes in the cluster
* are running the new software /and/ have rebooted
* once (meaning that they've only ever spoken to a DC
* supporting this feature).
*
* If anyone toggles the unique flag to 'on', the
* 'instance free' name will correspond to an orphan
* and fall into the clause above instead
*/
crm_xml_add(rsc_xml, XML_ATTR_ID, xml_id);
if (action->rsc->clone_name && safe_str_neq(xml_id, action->rsc->clone_name)) {
crm_xml_add(rsc_xml, XML_ATTR_ID_LONG, action->rsc->clone_name);
} else {
crm_xml_add(rsc_xml, XML_ATTR_ID_LONG, action->rsc->id);
}
} else {
CRM_ASSERT(action->rsc->clone_name == NULL);
crm_xml_add(rsc_xml, XML_ATTR_ID, action->rsc->id);
}
for (lpc = 0; lpc < DIMOF(attr_list); lpc++) {
crm_xml_add(rsc_xml, attr_list[lpc],
g_hash_table_lookup(action->rsc->meta, attr_list[lpc]));
}
}
}
/* List any attributes in effect */
args_xml = create_xml_node(NULL, XML_TAG_ATTRS);
crm_xml_add(args_xml, XML_ATTR_CRM_VERSION, CRM_FEATURE_SET);
g_hash_table_foreach(action->extra, hash2field, args_xml);
if (action->rsc != NULL && action->node) {
GHashTable *p = crm_str_table_new();
get_rsc_attributes(p, action->rsc, action->node, data_set);
g_hash_table_foreach(p, hash2smartfield, args_xml);
g_hash_table_destroy(p);
#if ENABLE_VERSIONED_ATTRS
{
xmlNode *versioned_parameters = create_xml_node(NULL, XML_TAG_RSC_VER_ATTRS);
pe_get_versioned_attributes(versioned_parameters, action->rsc,
action->node, data_set);
if (xml_has_children(versioned_parameters)) {
add_node_copy(action_xml, versioned_parameters);
}
free_xml(versioned_parameters);
}
#endif
} else if(action->rsc && action->rsc->variant <= pe_native) {
g_hash_table_foreach(action->rsc->parameters, hash2smartfield, args_xml);
#if ENABLE_VERSIONED_ATTRS
if (xml_has_children(action->rsc->versioned_parameters)) {
add_node_copy(action_xml, action->rsc->versioned_parameters);
}
#endif
}
#if ENABLE_VERSIONED_ATTRS
if (rsc_details) {
if (xml_has_children(rsc_details->versioned_parameters)) {
add_node_copy(action_xml, rsc_details->versioned_parameters);
}
if (xml_has_children(rsc_details->versioned_meta)) {
add_node_copy(action_xml, rsc_details->versioned_meta);
}
}
#endif
g_hash_table_foreach(action->meta, hash2metafield, args_xml);
if (action->rsc != NULL) {
int isolated = 0;
const char *value = g_hash_table_lookup(action->rsc->meta, "external-ip");
resource_t *parent = action->rsc;
while (parent != NULL) {
isolated |= parent->isolation_wrapper ? 1 : 0;
parent->cmds->append_meta(parent, args_xml);
parent = parent->parent;
}
if (isolated && action->node) {
char *nodeattr = crm_meta_name(XML_RSC_ATTR_ISOLATION_HOST);
crm_xml_add(args_xml, nodeattr, action->node->details->uname);
free(nodeattr);
}
if(value) {
hash2smartfield((gpointer)"pcmk_external_ip", (gpointer)value, (gpointer)args_xml);
}
if(is_container_remote_node(action->node)) {
pe_node_t *host = NULL;
enum action_tasks task = text2task(action->task);
if(task == action_notify || task == action_notified) {
const char *n_task = g_hash_table_lookup(action->meta, "notify_operation");
task = text2task(n_task);
}
// Differentiate between up and down actions
switch (task) {
case stop_rsc:
case stopped_rsc:
case action_demote:
case action_demoted:
if(action->node->details->remote_rsc->container->running_on) {
host = action->node->details->remote_rsc->container->running_on->data;
}
break;
case start_rsc:
case started_rsc:
case monitor_rsc:
case action_promote:
case action_promoted:
if(action->node->details->remote_rsc->container->allocated_to) {
host = action->node->details->remote_rsc->container->allocated_to;
}
break;
default:
break;
}
if(host) {
hash2metafield((gpointer)XML_RSC_ATTR_TARGET,
(gpointer)g_hash_table_lookup(action->rsc->meta, XML_RSC_ATTR_TARGET), (gpointer)args_xml);
hash2metafield((gpointer)PCMK_ENV_PHYSICAL_HOST, (gpointer)host->details->uname, (gpointer)args_xml);
}
}
} else if (safe_str_eq(action->task, CRM_OP_FENCE) && action->node) {
/* Pass the node's attributes as meta-attributes.
*
* @TODO: Determine whether it is still necessary to do this. It was
* added in 33d99707, probably for the libfence-based implementation in
* c9a90bd, which is no longer used.
*/
g_hash_table_foreach(action->node->details->attrs, hash2metafield, args_xml);
}
sorted_xml(args_xml, action_xml, FALSE);
free_xml(args_xml);
/* List any nodes this action is expected to make down */
if (needs_node_info && (action->node != NULL)) {
add_downed_nodes(action_xml, action, data_set);
}
if (needs_maintenance_info) {
add_maintenance_nodes(action_xml, data_set);
}
crm_log_xml_trace(action_xml, "dumped action");
return action_xml;
}
static gboolean
should_dump_action(action_t * action)
{
CRM_CHECK(action != NULL, return FALSE);
if (is_set(action->flags, pe_action_dumped)) {
crm_trace("action %d (%s) was already dumped", action->id, action->uuid);
return FALSE;
} else if (is_set(action->flags, pe_action_pseudo) && safe_str_eq(action->task, CRM_OP_PROBED)) {
GListPtr lpc = NULL;
/* This is a horrible but convenient hack
*
* It mimimizes the number of actions with unsatisfied inputs
* (i.e. not included in the graph)
*
* This in turn, means we can be more concise when printing
* aborted/incomplete graphs.
*
* It also makes it obvious which node is preventing
* probe_complete from running (presumably because it is only
* partially up)
*
* For these reasons we tolerate such perversions
*/
for (lpc = action->actions_after; lpc != NULL; lpc = lpc->next) {
action_wrapper_t *wrapper = (action_wrapper_t *) lpc->data;
if (is_not_set(wrapper->action->flags, pe_action_runnable)) {
/* Only interested in runnable operations */
} else if (safe_str_neq(wrapper->action->task, RSC_START)) {
/* Only interested in start operations */
} else if (is_set(wrapper->action->flags, pe_action_dumped)) {
crm_trace("action %d (%s) dependency of %s",
action->id, action->uuid, wrapper->action->uuid);
return TRUE;
} else if (should_dump_action(wrapper->action)) {
crm_trace("action %d (%s) dependency of %s",
action->id, action->uuid, wrapper->action->uuid);
return TRUE;
}
}
}
if (is_set(action->flags, pe_action_runnable) == FALSE) {
crm_trace("action %d (%s) was not runnable", action->id, action->uuid);
return FALSE;
} else if (is_set(action->flags, pe_action_optional)
&& is_set(action->flags, pe_action_print_always) == FALSE) {
crm_trace("action %d (%s) was optional", action->id, action->uuid);
return FALSE;
} else if (action->rsc != NULL && is_not_set(action->rsc->flags, pe_rsc_managed)) {
const char *interval = NULL;
interval = g_hash_table_lookup(action->meta, XML_LRM_ATTR_INTERVAL);
/* make sure probes and recurring monitors go through */
if (safe_str_neq(action->task, RSC_STATUS) && interval == NULL) {
crm_trace("action %d (%s) was for an unmanaged resource (%s)",
action->id, action->uuid, action->rsc->id);
return FALSE;
}
}
if (is_set(action->flags, pe_action_pseudo)
|| safe_str_eq(action->task, CRM_OP_FENCE)
|| safe_str_eq(action->task, CRM_OP_SHUTDOWN)) {
/* skip the next checks */
return TRUE;
}
if (action->node == NULL) {
pe_err("action %d (%s) was not allocated", action->id, action->uuid);
log_action(LOG_DEBUG, "Unallocated action", action, FALSE);
return FALSE;
} else if(is_container_remote_node(action->node) && action->node->details->remote_requires_reset == FALSE) {
crm_trace("Assuming action %s for %s will be runnable", action->uuid, action->node->details->uname);
} else if (action->node->details->online == FALSE) {
pe_err("action %d was (%s) scheduled for offline node", action->id, action->uuid);
log_action(LOG_DEBUG, "Action for offline node", action, FALSE);
return FALSE;
#if 0
/* but this would also affect resources that can be safely
* migrated before a fencing op
*/
} else if (action->node->details->unclean == FALSE) {
pe_err("action %d was (%s) scheduled for unclean node", action->id, action->uuid);
log_action(LOG_DEBUG, "Action for unclean node", action, FALSE);
return FALSE;
#endif
}
return TRUE;
}
/* lowest to highest */
static gint
sort_action_id(gconstpointer a, gconstpointer b)
{
const action_wrapper_t *action_wrapper2 = (const action_wrapper_t *)a;
const action_wrapper_t *action_wrapper1 = (const action_wrapper_t *)b;
if (a == NULL) {
return 1;
}
if (b == NULL) {
return -1;
}
if (action_wrapper1->action->id > action_wrapper2->action->id) {
return -1;
}
if (action_wrapper1->action->id < action_wrapper2->action->id) {
return 1;
}
return 0;
}
static gboolean
check_dump_input(int last_action, action_t * action, action_wrapper_t * wrapper)
{
int type = wrapper->type;
if (wrapper->state == pe_link_dumped) {
return TRUE;
} else if (wrapper->state == pe_link_dup) {
return FALSE;
}
type &= ~pe_order_implies_first_printed;
type &= ~pe_order_implies_then_printed;
type &= ~pe_order_optional;
if (is_not_set(type, pe_order_preserve)
&& action->rsc && action->rsc->fillers
&& wrapper->action->rsc && wrapper->action->node
&& wrapper->action->node->details->remote_rsc
&& (wrapper->action->node->details->remote_rsc->container == action->rsc)) {
/* This prevents user-defined ordering constraints between resources
* running in a guest node and the resource that defines that node.
*/
crm_warn("Invalid ordering constraint between %s and %s",
wrapper->action->rsc->id, action->rsc->id);
wrapper->type = pe_order_none;
return FALSE;
}
if (last_action == wrapper->action->id) {
crm_trace("Input (%d) %s duplicated for %s",
wrapper->action->id, wrapper->action->uuid, action->uuid);
wrapper->state = pe_link_dup;
return FALSE;
} else if (wrapper->type == pe_order_none) {
crm_trace("Input (%d) %s suppressed for %s",
wrapper->action->id, wrapper->action->uuid, action->uuid);
return FALSE;
} else if (is_set(wrapper->action->flags, pe_action_runnable) == FALSE
&& type == pe_order_none && safe_str_neq(wrapper->action->uuid, CRM_OP_PROBED)) {
crm_trace("Input (%d) %s optional (ordering) for %s",
wrapper->action->id, wrapper->action->uuid, action->uuid);
return FALSE;
} else if (is_set(wrapper->action->flags, pe_action_runnable) == FALSE
&& is_set(type, pe_order_one_or_more)) {
crm_trace("Input (%d) %s optional (one-or-more) for %s",
wrapper->action->id, wrapper->action->uuid, action->uuid);
return FALSE;
} else if (is_set(action->flags, pe_action_pseudo)
&& (wrapper->type & pe_order_stonith_stop)) {
crm_trace("Input (%d) %s suppressed for %s",
wrapper->action->id, wrapper->action->uuid, action->uuid);
return FALSE;
} else if ((wrapper->type & pe_order_implies_first_migratable) && (is_set(wrapper->action->flags, pe_action_runnable) == FALSE)) {
return FALSE;
} else if ((wrapper->type & pe_order_apply_first_non_migratable)
&& (is_set(wrapper->action->flags, pe_action_migrate_runnable))) {
return FALSE;
} else if ((wrapper->type == pe_order_optional)
&& crm_ends_with(wrapper->action->uuid, "_stop_0")
&& is_set(wrapper->action->flags, pe_action_migrate_runnable)) {
/* for optional only ordering, ordering is not preserved for
* a stop action that is actually involved with a migration. */
return FALSE;
} else if (wrapper->type == pe_order_load) {
crm_trace("check load filter %s.%s -> %s.%s",
wrapper->action->uuid,
wrapper->action->node ? wrapper->action->node->details->uname : "", action->uuid,
action->node ? action->node->details->uname : "");
if (action->rsc && safe_str_eq(action->task, RSC_MIGRATE)) {
/* Remove the orders like the following if not relevant:
* "load_stopped_node2" -> "rscA_migrate_to node1"
* which were created also from: pengine/native.c: MigrateRsc()
* order_actions(other, then, other_w->type);
*/
/* For migrate_to ops, we care about where it has been
* allocated to, not where the action will be executed
*/
if (wrapper->action->node == NULL || action->rsc->allocated_to == NULL
|| wrapper->action->node->details != action->rsc->allocated_to->details) {
/* Check if the actions are for the same node, ignore otherwise */
crm_trace("load filter - migrate");
wrapper->type = pe_order_none;
return FALSE;
}
} else if (wrapper->action->node == NULL || action->node == NULL
|| wrapper->action->node->details != action->node->details) {
/* Check if the actions are for the same node, ignore otherwise */
crm_trace("load filter - node");
wrapper->type = pe_order_none;
return FALSE;
} else if (is_set(wrapper->action->flags, pe_action_optional)) {
/* Check if the pre-req is optional, ignore if so */
crm_trace("load filter - optional");
wrapper->type = pe_order_none;
return FALSE;
}
} else if (wrapper->type == pe_order_anti_colocation) {
crm_trace("check anti-colocation filter %s.%s -> %s.%s",
wrapper->action->uuid,
wrapper->action->node ? wrapper->action->node->details->uname : "",
action->uuid,
action->node ? action->node->details->uname : "");
if (wrapper->action->node && action->node
&& wrapper->action->node->details != action->node->details) {
/* Check if the actions are for the same node, ignore otherwise */
crm_trace("anti-colocation filter - node");
wrapper->type = pe_order_none;
return FALSE;
} else if (is_set(wrapper->action->flags, pe_action_optional)) {
/* Check if the pre-req is optional, ignore if so */
crm_trace("anti-colocation filter - optional");
wrapper->type = pe_order_none;
return FALSE;
}
} else if (wrapper->action->rsc
&& wrapper->action->rsc != action->rsc
&& is_set(wrapper->action->rsc->flags, pe_rsc_failed)
&& is_not_set(wrapper->action->rsc->flags, pe_rsc_managed)
&& crm_ends_with(wrapper->action->uuid, "_stop_0")
&& action->rsc && pe_rsc_is_clone(action->rsc)) {
crm_warn("Ignoring requirement that %s complete before %s:"
" unmanaged failed resources cannot prevent clone shutdown",
wrapper->action->uuid, action->uuid);
return FALSE;
} else if (is_set(wrapper->action->flags, pe_action_dumped)
|| should_dump_action(wrapper->action)) {
crm_trace("Input (%d) %s should be dumped for %s", wrapper->action->id,
wrapper->action->uuid, action->uuid);
goto dump;
#if 0
} else if (is_set(wrapper->action->flags, pe_action_runnable)
&& is_set(wrapper->action->flags, pe_action_pseudo)
&& wrapper->action->rsc->variant != pe_native) {
crm_crit("Input (%d) %s should be dumped for %s",
wrapper->action->id, wrapper->action->uuid, action->uuid);
goto dump;
#endif
} else if (is_set(wrapper->action->flags, pe_action_optional) == TRUE
&& is_set(wrapper->action->flags, pe_action_print_always) == FALSE) {
crm_trace("Input (%d) %s optional for %s", wrapper->action->id,
wrapper->action->uuid, action->uuid);
crm_trace("Input (%d) %s n=%p p=%d r=%d o=%d a=%d f=0x%.6x",
wrapper->action->id, wrapper->action->uuid, wrapper->action->node,
is_set(wrapper->action->flags, pe_action_pseudo),
is_set(wrapper->action->flags, pe_action_runnable),
is_set(wrapper->action->flags, pe_action_optional),
is_set(wrapper->action->flags, pe_action_print_always), wrapper->type);
return FALSE;
}
dump:
return TRUE;
}
static gboolean
graph_has_loop(action_t * init_action, action_t * action, action_wrapper_t * wrapper)
{
GListPtr lpc = NULL;
gboolean has_loop = FALSE;
if (is_set(wrapper->action->flags, pe_action_tracking)) {
crm_trace("Breaking tracking loop: %s.%s -> %s.%s (0x%.6x)",
wrapper->action->uuid,
wrapper->action->node ? wrapper->action->node->details->uname : "",
action->uuid,
action->node ? action->node->details->uname : "",
wrapper->type);
return FALSE;
}
if (check_dump_input(-1, action, wrapper) == FALSE) {
return FALSE;
}
/* If there's any order like:
* "rscB_stop node2"-> "load_stopped_node2" -> "rscA_migrate_to node1"
* rscA is being migrated from node1 to node2,
* while rscB is being migrated from node2 to node1.
* There will be potential graph loop.
* Break the order "load_stopped_node2" -> "rscA_migrate_to node1".
*/
crm_trace("Checking graph loop: %s.%s -> %s.%s (0x%.6x)",
wrapper->action->uuid,
wrapper->action->node ? wrapper->action->node->details->uname : "",
action->uuid,
action->node ? action->node->details->uname : "",
wrapper->type);
if (wrapper->action == init_action) {
crm_debug("Found graph loop: %s.%s ->...-> %s.%s",
action->uuid,
action->node ? action->node->details->uname : "",
init_action->uuid,
init_action->node ? init_action->node->details->uname : "");
return TRUE;
}
set_bit(wrapper->action->flags, pe_action_tracking);
for (lpc = wrapper->action->actions_before; lpc != NULL; lpc = lpc->next) {
action_wrapper_t *wrapper_before = (action_wrapper_t *) lpc->data;
if (graph_has_loop(init_action, wrapper->action, wrapper_before)) {
has_loop = TRUE;
goto done;
}
}
done:
pe_clear_action_bit(wrapper->action, pe_action_tracking);
return has_loop;
}
static gboolean
should_dump_input(int last_action, action_t * action, action_wrapper_t * wrapper)
{
wrapper->state = pe_link_not_dumped;
if (check_dump_input(last_action, action, wrapper) == FALSE) {
return FALSE;
}
if (wrapper->type == pe_order_load
&& action->rsc
&& safe_str_eq(action->task, RSC_MIGRATE)) {
crm_trace("Checking graph loop - load migrate: %s.%s -> %s.%s",
wrapper->action->uuid,
wrapper->action->node ? wrapper->action->node->details->uname : "",
action->uuid,
action->node ? action->node->details->uname : "");
if (graph_has_loop(action, action, wrapper)) {
/* Remove the orders like the following if they are introducing any graph loops:
* "load_stopped_node2" -> "rscA_migrate_to node1"
* which were created also from: pengine/native.c: MigrateRsc()
* order_actions(other, then, other_w->type);
*/
crm_debug("Breaking graph loop - load migrate: %s.%s -> %s.%s",
wrapper->action->uuid,
wrapper->action->node ? wrapper->action->node->details->uname : "",
action->uuid,
action->node ? action->node->details->uname : "");
wrapper->type = pe_order_none;
return FALSE;
}
}
crm_trace("Input (%d) %s n=%p p=%d r=%d o=%d a=%d f=0x%.6x dumped for %s",
wrapper->action->id,
wrapper->action->uuid,
wrapper->action->node,
is_set(wrapper->action->flags, pe_action_pseudo),
is_set(wrapper->action->flags, pe_action_runnable),
is_set(wrapper->action->flags, pe_action_optional),
is_set(wrapper->action->flags, pe_action_print_always), wrapper->type, action->uuid);
return TRUE;
}
void
graph_element_from_action(action_t * action, pe_working_set_t * data_set)
{
GListPtr lpc = NULL;
int last_action = -1;
int synapse_priority = 0;
xmlNode *syn = NULL;
xmlNode *set = NULL;
xmlNode *in = NULL;
xmlNode *input = NULL;
xmlNode *xml_action = NULL;
if (should_dump_action(action) == FALSE) {
return;
}
set_bit(action->flags, pe_action_dumped);
syn = create_xml_node(data_set->graph, "synapse");
set = create_xml_node(syn, "action_set");
in = create_xml_node(syn, "inputs");
crm_xml_add_int(syn, XML_ATTR_ID, data_set->num_synapse);
data_set->num_synapse++;
if (action->rsc != NULL) {
synapse_priority = action->rsc->priority;
}
if (action->priority > synapse_priority) {
synapse_priority = action->priority;
}
if (synapse_priority > 0) {
crm_xml_add_int(syn, XML_CIB_ATTR_PRIORITY, synapse_priority);
}
xml_action = action2xml(action, FALSE, data_set);
add_node_nocopy(set, crm_element_name(xml_action), xml_action);
action->actions_before = g_list_sort(action->actions_before, sort_action_id);
for (lpc = action->actions_before; lpc != NULL; lpc = lpc->next) {
action_wrapper_t *wrapper = (action_wrapper_t *) lpc->data;
if (should_dump_input(last_action, action, wrapper) == FALSE) {
continue;
}
wrapper->state = pe_link_dumped;
CRM_CHECK(last_action < wrapper->action->id,;
);
last_action = wrapper->action->id;
input = create_xml_node(in, "trigger");
xml_action = action2xml(wrapper->action, TRUE, data_set);
add_node_nocopy(input, crm_element_name(xml_action), xml_action);
}
}

File Metadata

Mime Type
text/x-diff
Expires
Sat, Jan 25, 11:35 AM (1 d, 12 h)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
1322390
Default Alt Text
(121 KB)

Event Timeline