diff --git a/cts/scheduler/exp/594.exp b/cts/scheduler/exp/594.exp index 9dacf33d08..cef251ad32 100644 --- a/cts/scheduler/exp/594.exp +++ b/cts/scheduler/exp/594.exp @@ -1,219 +1,219 @@ - + diff --git a/cts/scheduler/exp/829.exp b/cts/scheduler/exp/829.exp index 3eb01f3fed..5d277e1bce 100644 --- a/cts/scheduler/exp/829.exp +++ b/cts/scheduler/exp/829.exp @@ -1,269 +1,269 @@ - + diff --git a/cts/scheduler/exp/bug-5186-partial-migrate.exp b/cts/scheduler/exp/bug-5186-partial-migrate.exp index ec5ed5e9ec..ffacf34d8d 100644 --- a/cts/scheduler/exp/bug-5186-partial-migrate.exp +++ b/cts/scheduler/exp/bug-5186-partial-migrate.exp @@ -1,364 +1,364 @@ - + diff --git a/cts/scheduler/exp/bug-cl-5247.exp b/cts/scheduler/exp/bug-cl-5247.exp index 68cbf7bbea..565f8c1120 100644 --- a/cts/scheduler/exp/bug-cl-5247.exp +++ b/cts/scheduler/exp/bug-cl-5247.exp @@ -1,560 +1,560 @@ - + diff --git a/cts/scheduler/exp/bug-lf-2508.exp b/cts/scheduler/exp/bug-lf-2508.exp index c73379b979..9cc9e447b2 100644 --- a/cts/scheduler/exp/bug-lf-2508.exp +++ b/cts/scheduler/exp/bug-lf-2508.exp @@ -1,498 +1,498 @@ - + diff --git a/cts/scheduler/exp/bug-lf-2551.exp b/cts/scheduler/exp/bug-lf-2551.exp index c08d486d95..c4e82bb891 100644 --- a/cts/scheduler/exp/bug-lf-2551.exp +++ b/cts/scheduler/exp/bug-lf-2551.exp @@ -1,588 +1,588 @@ - + diff --git a/cts/scheduler/exp/bundle-order-fencing.exp b/cts/scheduler/exp/bundle-order-fencing.exp index c39583cead..678cd9db17 100644 --- a/cts/scheduler/exp/bundle-order-fencing.exp +++ b/cts/scheduler/exp/bundle-order-fencing.exp @@ -1,1868 +1,1868 @@ - + - + - + diff --git a/cts/scheduler/exp/clone-requires-quorum-recovery.exp b/cts/scheduler/exp/clone-requires-quorum-recovery.exp index ddf22dccc4..1a5be0a1a7 100644 --- a/cts/scheduler/exp/clone-requires-quorum-recovery.exp +++ b/cts/scheduler/exp/clone-requires-quorum-recovery.exp @@ -1,101 +1,101 @@ - + diff --git a/cts/scheduler/exp/clone-requires-quorum.exp b/cts/scheduler/exp/clone-requires-quorum.exp index 9f532fd23f..182ec48cb0 100644 --- a/cts/scheduler/exp/clone-requires-quorum.exp +++ b/cts/scheduler/exp/clone-requires-quorum.exp @@ -1,48 +1,48 @@ - + diff --git a/cts/scheduler/exp/dc-fence-ordering.exp b/cts/scheduler/exp/dc-fence-ordering.exp index 207cef59ba..b89efa91b4 100644 --- a/cts/scheduler/exp/dc-fence-ordering.exp +++ b/cts/scheduler/exp/dc-fence-ordering.exp @@ -1,259 +1,259 @@ - + diff --git a/cts/scheduler/exp/guest-node-cleanup.exp b/cts/scheduler/exp/guest-node-cleanup.exp index 688de2c315..72c40db7c3 100644 --- a/cts/scheduler/exp/guest-node-cleanup.exp +++ b/cts/scheduler/exp/guest-node-cleanup.exp @@ -1,289 +1,289 @@ - + diff --git a/cts/scheduler/exp/guest-node-host-dies.exp b/cts/scheduler/exp/guest-node-host-dies.exp index 35a7ffce1d..5c15cf3244 100644 --- a/cts/scheduler/exp/guest-node-host-dies.exp +++ b/cts/scheduler/exp/guest-node-host-dies.exp @@ -1,653 +1,653 @@ - + - + - + diff --git a/cts/scheduler/exp/interleave-pseudo-stop.exp b/cts/scheduler/exp/interleave-pseudo-stop.exp index bf985cf24f..d4a5076c2a 100644 --- a/cts/scheduler/exp/interleave-pseudo-stop.exp +++ b/cts/scheduler/exp/interleave-pseudo-stop.exp @@ -1,522 +1,522 @@ - + diff --git a/cts/scheduler/exp/migrate-fencing.exp b/cts/scheduler/exp/migrate-fencing.exp index f79dea69c0..a586284f2f 100644 --- a/cts/scheduler/exp/migrate-fencing.exp +++ b/cts/scheduler/exp/migrate-fencing.exp @@ -1,634 +1,634 @@ - + diff --git a/cts/scheduler/exp/multiply-active-stonith.exp b/cts/scheduler/exp/multiply-active-stonith.exp index 508edbe13a..7f4410f916 100644 --- a/cts/scheduler/exp/multiply-active-stonith.exp +++ b/cts/scheduler/exp/multiply-active-stonith.exp @@ -1,34 +1,34 @@ - + diff --git a/cts/scheduler/exp/on_fail_demote4.exp b/cts/scheduler/exp/on_fail_demote4.exp index fbdd0f3592..f19ba2b6e3 100644 --- a/cts/scheduler/exp/on_fail_demote4.exp +++ b/cts/scheduler/exp/on_fail_demote4.exp @@ -1,1818 +1,1818 @@ - + - + - + - + - + diff --git a/cts/scheduler/exp/order-expired-failure.exp b/cts/scheduler/exp/order-expired-failure.exp index 5b1b059770..4b604cbc14 100644 --- a/cts/scheduler/exp/order-expired-failure.exp +++ b/cts/scheduler/exp/order-expired-failure.exp @@ -1,144 +1,144 @@ - + diff --git a/cts/scheduler/exp/per-op-failcount.exp b/cts/scheduler/exp/per-op-failcount.exp index ad63436873..ff9c5666f4 100644 --- a/cts/scheduler/exp/per-op-failcount.exp +++ b/cts/scheduler/exp/per-op-failcount.exp @@ -1,75 +1,75 @@ - + diff --git a/cts/scheduler/exp/priority-fencing-delay.exp b/cts/scheduler/exp/priority-fencing-delay.exp index 828e359871..a815e660f4 100644 --- a/cts/scheduler/exp/priority-fencing-delay.exp +++ b/cts/scheduler/exp/priority-fencing-delay.exp @@ -1,570 +1,570 @@ - + diff --git a/cts/scheduler/exp/promoted-7.exp b/cts/scheduler/exp/promoted-7.exp index f9724adbf8..773defeb46 100644 --- a/cts/scheduler/exp/promoted-7.exp +++ b/cts/scheduler/exp/promoted-7.exp @@ -1,643 +1,643 @@ - + diff --git a/cts/scheduler/exp/promoted-8.exp b/cts/scheduler/exp/promoted-8.exp index eefd8506db..e5a19191ea 100644 --- a/cts/scheduler/exp/promoted-8.exp +++ b/cts/scheduler/exp/promoted-8.exp @@ -1,732 +1,732 @@ - + diff --git a/cts/scheduler/exp/rec-node-13.exp b/cts/scheduler/exp/rec-node-13.exp index 5b5d2aa329..83825b599b 100644 --- a/cts/scheduler/exp/rec-node-13.exp +++ b/cts/scheduler/exp/rec-node-13.exp @@ -1,55 +1,55 @@ - + diff --git a/cts/scheduler/exp/rec-node-15.exp b/cts/scheduler/exp/rec-node-15.exp index 2477e3ff5b..3484f85ed4 100644 --- a/cts/scheduler/exp/rec-node-15.exp +++ b/cts/scheduler/exp/rec-node-15.exp @@ -1,451 +1,451 @@ - + diff --git a/cts/scheduler/exp/remote-connection-unrecoverable.exp b/cts/scheduler/exp/remote-connection-unrecoverable.exp index 41357742b0..878cf07574 100644 --- a/cts/scheduler/exp/remote-connection-unrecoverable.exp +++ b/cts/scheduler/exp/remote-connection-unrecoverable.exp @@ -1,223 +1,223 @@ - + diff --git a/cts/scheduler/exp/remote-fence-before-reconnect.exp b/cts/scheduler/exp/remote-fence-before-reconnect.exp index 26ed3d0791..85e3f37d47 100644 --- a/cts/scheduler/exp/remote-fence-before-reconnect.exp +++ b/cts/scheduler/exp/remote-fence-before-reconnect.exp @@ -1,66 +1,66 @@ - + diff --git a/cts/scheduler/exp/remote-fence-unclean-3.exp b/cts/scheduler/exp/remote-fence-unclean-3.exp index 66ed145dc5..fe80be2b05 100644 --- a/cts/scheduler/exp/remote-fence-unclean-3.exp +++ b/cts/scheduler/exp/remote-fence-unclean-3.exp @@ -1,249 +1,249 @@ - + diff --git a/cts/scheduler/exp/remote-fence-unclean.exp b/cts/scheduler/exp/remote-fence-unclean.exp index 9645ed4e1d..7c62b30d34 100644 --- a/cts/scheduler/exp/remote-fence-unclean.exp +++ b/cts/scheduler/exp/remote-fence-unclean.exp @@ -1,175 +1,175 @@ - + diff --git a/cts/scheduler/exp/remote-fence-unclean2.exp b/cts/scheduler/exp/remote-fence-unclean2.exp index 6e73444249..226b27aacf 100644 --- a/cts/scheduler/exp/remote-fence-unclean2.exp +++ b/cts/scheduler/exp/remote-fence-unclean2.exp @@ -1,25 +1,25 @@ - + diff --git a/cts/scheduler/exp/remote-recover-all.exp b/cts/scheduler/exp/remote-recover-all.exp index e69bec4199..24dd495ae4 100644 --- a/cts/scheduler/exp/remote-recover-all.exp +++ b/cts/scheduler/exp/remote-recover-all.exp @@ -1,814 +1,814 @@ - + - + - + diff --git a/cts/scheduler/exp/remote-recover-connection.exp b/cts/scheduler/exp/remote-recover-connection.exp index e208355b92..efe8153cb5 100644 --- a/cts/scheduler/exp/remote-recover-connection.exp +++ b/cts/scheduler/exp/remote-recover-connection.exp @@ -1,554 +1,554 @@ - + diff --git a/cts/scheduler/exp/remote-recover-fail.exp b/cts/scheduler/exp/remote-recover-fail.exp index 3a1d39c8ae..9626f4dd1f 100644 --- a/cts/scheduler/exp/remote-recover-fail.exp +++ b/cts/scheduler/exp/remote-recover-fail.exp @@ -1,187 +1,187 @@ - + diff --git a/cts/scheduler/exp/remote-recover-no-resources.exp b/cts/scheduler/exp/remote-recover-no-resources.exp index fdbaa035c3..f6f26fa464 100644 --- a/cts/scheduler/exp/remote-recover-no-resources.exp +++ b/cts/scheduler/exp/remote-recover-no-resources.exp @@ -1,698 +1,698 @@ - + - + diff --git a/cts/scheduler/exp/remote-recover-unknown.exp b/cts/scheduler/exp/remote-recover-unknown.exp index 8009880cc9..16670f1562 100644 --- a/cts/scheduler/exp/remote-recover-unknown.exp +++ b/cts/scheduler/exp/remote-recover-unknown.exp @@ -1,728 +1,728 @@ - + - + - + diff --git a/cts/scheduler/exp/remote-recovery.exp b/cts/scheduler/exp/remote-recovery.exp index e208355b92..efe8153cb5 100644 --- a/cts/scheduler/exp/remote-recovery.exp +++ b/cts/scheduler/exp/remote-recovery.exp @@ -1,554 +1,554 @@ - + diff --git a/cts/scheduler/exp/remote-unclean2.exp b/cts/scheduler/exp/remote-unclean2.exp index a3f9819c30..0c1eb21e5a 100644 --- a/cts/scheduler/exp/remote-unclean2.exp +++ b/cts/scheduler/exp/remote-unclean2.exp @@ -1,54 +1,54 @@ - + diff --git a/cts/scheduler/exp/start-then-stop-with-unfence.exp b/cts/scheduler/exp/start-then-stop-with-unfence.exp index c659279bb9..247be5c7f3 100644 --- a/cts/scheduler/exp/start-then-stop-with-unfence.exp +++ b/cts/scheduler/exp/start-then-stop-with-unfence.exp @@ -1,159 +1,159 @@ - + diff --git a/cts/scheduler/exp/stonith-0.exp b/cts/scheduler/exp/stonith-0.exp index 9463b906cb..fe68c4ba45 100644 --- a/cts/scheduler/exp/stonith-0.exp +++ b/cts/scheduler/exp/stonith-0.exp @@ -1,410 +1,410 @@ - + - + diff --git a/cts/scheduler/exp/stonith-1.exp b/cts/scheduler/exp/stonith-1.exp index 5460a4cd25..8941919e50 100644 --- a/cts/scheduler/exp/stonith-1.exp +++ b/cts/scheduler/exp/stonith-1.exp @@ -1,558 +1,558 @@ - + diff --git a/cts/scheduler/exp/stonith-2.exp b/cts/scheduler/exp/stonith-2.exp index 2bf7888e1c..f215829e11 100644 --- a/cts/scheduler/exp/stonith-2.exp +++ b/cts/scheduler/exp/stonith-2.exp @@ -1,39 +1,39 @@ - + diff --git a/cts/scheduler/exp/stop-failure-no-quorum.exp b/cts/scheduler/exp/stop-failure-no-quorum.exp index f3e509f548..ef1df3e0dc 100644 --- a/cts/scheduler/exp/stop-failure-no-quorum.exp +++ b/cts/scheduler/exp/stop-failure-no-quorum.exp @@ -1,55 +1,55 @@ - + diff --git a/cts/scheduler/exp/stop-failure-with-fencing.exp b/cts/scheduler/exp/stop-failure-with-fencing.exp index ed72131c6d..f8acf54fba 100644 --- a/cts/scheduler/exp/stop-failure-with-fencing.exp +++ b/cts/scheduler/exp/stop-failure-with-fencing.exp @@ -1,64 +1,64 @@ - + diff --git a/cts/scheduler/exp/unfence-device.exp b/cts/scheduler/exp/unfence-device.exp index 3839818a71..171701bfea 100644 --- a/cts/scheduler/exp/unfence-device.exp +++ b/cts/scheduler/exp/unfence-device.exp @@ -1,100 +1,100 @@ - + - + - + diff --git a/cts/scheduler/exp/whitebox-imply-stop-on-fence.exp b/cts/scheduler/exp/whitebox-imply-stop-on-fence.exp index a1a200ecd8..4f7afea782 100644 --- a/cts/scheduler/exp/whitebox-imply-stop-on-fence.exp +++ b/cts/scheduler/exp/whitebox-imply-stop-on-fence.exp @@ -1,570 +1,570 @@ - + diff --git a/cts/scheduler/exp/whitebox-ms-ordering.exp b/cts/scheduler/exp/whitebox-ms-ordering.exp index 56079567e7..927d7b43ed 100644 --- a/cts/scheduler/exp/whitebox-ms-ordering.exp +++ b/cts/scheduler/exp/whitebox-ms-ordering.exp @@ -1,582 +1,582 @@ - + - + diff --git a/cts/scheduler/exp/year-2038.exp b/cts/scheduler/exp/year-2038.exp index 5b1ce5b947..2332fe819c 100644 --- a/cts/scheduler/exp/year-2038.exp +++ b/cts/scheduler/exp/year-2038.exp @@ -1,144 +1,144 @@ - + diff --git a/lib/pacemaker/pcmk_graph_producer.c b/lib/pacemaker/pcmk_graph_producer.c index 73f1472c1e..8106af937d 100644 --- a/lib/pacemaker/pcmk_graph_producer.c +++ b/lib/pacemaker/pcmk_graph_producer.c @@ -1,1116 +1,1105 @@ /* * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include "libpacemaker_private.h" // Convenience macros for logging action properties #define action_type_str(flags) \ (pcmk_is_set((flags), pcmk__action_pseudo)? "pseudo-action" : "action") #define action_optional_str(flags) \ (pcmk_is_set((flags), pcmk__action_optional)? "optional" : "required") #define action_runnable_str(flags) \ (pcmk_is_set((flags), pcmk__action_runnable)? "runnable" : "unrunnable") #define action_node_str(a) \ (((a)->node == NULL)? "no node" : (a)->node->priv->name) /*! * \internal * \brief Add an XML node tag for a specified ID * * \param[in] id Node UUID to add * \param[in,out] xml Parent XML tag to add to */ static xmlNode* add_node_to_xml_by_id(const char *id, xmlNode *xml) { xmlNode *node_xml; node_xml = pcmk__xe_create(xml, PCMK_XE_NODE); crm_xml_add(node_xml, PCMK_XA_ID, id); return node_xml; } /*! * \internal * \brief Add an XML node tag for a specified node * * \param[in] node Node to add * \param[in,out] xml XML to add node to */ static void add_node_to_xml(const pcmk_node_t *node, void *xml) { add_node_to_xml_by_id(node->priv->id, (xmlNode *) xml); } /*! * \internal * \brief Count (optionally add to XML) nodes needing maintenance state update * * \param[in,out] xml Parent XML tag to add to, if any * \param[in] scheduler Scheduler data * * \return Count of nodes added * \note Only Pacemaker Remote nodes are considered currently */ static int add_maintenance_nodes(xmlNode *xml, const pcmk_scheduler_t *scheduler) { xmlNode *maintenance = NULL; int count = 0; if (xml != NULL) { maintenance = pcmk__xe_create(xml, PCMK__XE_MAINTENANCE); } for (const GList *iter = scheduler->nodes; iter != NULL; iter = iter->next) { const pcmk_node_t *node = iter->data; if (!pcmk__is_pacemaker_remote_node(node)) { continue; } if ((node->details->maintenance && !pcmk_is_set(node->priv->flags, pcmk__node_remote_maint)) || (!node->details->maintenance && pcmk_is_set(node->priv->flags, pcmk__node_remote_maint))) { if (maintenance != NULL) { crm_xml_add(add_node_to_xml_by_id(node->priv->id, maintenance), PCMK__XA_NODE_IN_MAINTENANCE, (node->details->maintenance? "1" : "0")); } count++; } } crm_trace("%s %d nodes in need of maintenance mode update in state", ((maintenance == NULL)? "Counted" : "Added"), count); return count; } /*! * \internal * \brief Add pseudo action with nodes needing maintenance state update * * \param[in,out] scheduler Scheduler data */ static void add_maintenance_update(pcmk_scheduler_t *scheduler) { pcmk_action_t *action = NULL; if (add_maintenance_nodes(NULL, scheduler) != 0) { action = get_pseudo_op(PCMK_ACTION_MAINTENANCE_NODES, scheduler); pcmk__set_action_flags(action, pcmk__action_always_in_graph); } } /*! * \internal * \brief Add XML with nodes that an action is expected to bring down * * If a specified action is expected to bring any nodes down, add an XML block * with their UUIDs. When a node is lost, this allows the controller to * determine whether it was expected. * * \param[in,out] xml Parent XML tag to add to * \param[in] action Action to check for downed nodes */ static void add_downed_nodes(xmlNode *xml, const pcmk_action_t *action) { CRM_CHECK((xml != NULL) && (action != NULL) && (action->node != NULL), return); if (pcmk__str_eq(action->task, PCMK_ACTION_DO_SHUTDOWN, pcmk__str_none)) { /* Shutdown makes the action's node down */ xmlNode *downed = pcmk__xe_create(xml, PCMK__XE_DOWNED); add_node_to_xml_by_id(action->node->priv->id, downed); } else if (pcmk__str_eq(action->task, PCMK_ACTION_STONITH, pcmk__str_none)) { /* Fencing makes the action's node and any hosted guest nodes down */ const char *fence = g_hash_table_lookup(action->meta, PCMK__META_STONITH_ACTION); if (pcmk__is_fencing_action(fence)) { xmlNode *downed = pcmk__xe_create(xml, PCMK__XE_DOWNED); add_node_to_xml_by_id(action->node->priv->id, downed); pe_foreach_guest_node(action->node->priv->scheduler, action->node, add_node_to_xml, downed); } } else if ((action->rsc != NULL) && pcmk_is_set(action->rsc->flags, pcmk__rsc_is_remote_connection) && pcmk__str_eq(action->task, PCMK_ACTION_STOP, pcmk__str_none)) { /* Stopping a remote connection resource makes connected node down, * unless it's part of a migration */ GList *iter; pcmk_action_t *input; bool migrating = false; for (iter = action->actions_before; iter != NULL; iter = iter->next) { input = ((pcmk__related_action_t *) iter->data)->action; if ((input->rsc != NULL) && pcmk__str_eq(action->rsc->id, input->rsc->id, pcmk__str_none) && pcmk__str_eq(input->task, PCMK_ACTION_MIGRATE_FROM, pcmk__str_none)) { migrating = true; break; } } if (!migrating) { xmlNode *downed = pcmk__xe_create(xml, PCMK__XE_DOWNED); add_node_to_xml_by_id(action->rsc->id, downed); } } } /*! * \internal * \brief Create a transition graph operation key for a clone action * * \param[in] action Clone action * \param[in] interval_ms Action interval in milliseconds * * \return Newly allocated string with transition graph operation key */ static char * clone_op_key(const pcmk_action_t *action, guint interval_ms) { if (pcmk__str_eq(action->task, PCMK_ACTION_NOTIFY, pcmk__str_none)) { const char *n_type = g_hash_table_lookup(action->meta, "notify_type"); const char *n_task = g_hash_table_lookup(action->meta, "notify_operation"); return pcmk__notify_key(action->rsc->priv->history_id, n_type, n_task); } return pcmk__op_key(action->rsc->priv->history_id, pcmk__s(action->cancel_task, action->task), interval_ms); } /*! * \internal * \brief Add node details to transition graph action XML * * \param[in] action Scheduled action * \param[in,out] xml Transition graph action XML for \p action */ static void add_node_details(const pcmk_action_t *action, xmlNode *xml) { pcmk_node_t *router_node = pcmk__connection_host_for_action(action); crm_xml_add(xml, PCMK__META_ON_NODE, action->node->priv->name); crm_xml_add(xml, PCMK__META_ON_NODE_UUID, action->node->priv->id); if (router_node != NULL) { crm_xml_add(xml, PCMK__XA_ROUTER_NODE, router_node->priv->name); } } /*! * \internal * \brief Add resource details to transition graph action XML * * \param[in] action Scheduled action * \param[in,out] action_xml Transition graph action XML for \p action */ static void add_resource_details(const pcmk_action_t *action, xmlNode *action_xml) { xmlNode *rsc_xml = NULL; const char *attr_list[] = { PCMK_XA_CLASS, PCMK_XA_PROVIDER, PCMK_XA_TYPE, }; /* If a resource is locked to a node via PCMK_OPT_SHUTDOWN_LOCK, mark its * actions so the controller can preserve the lock when the action * completes. */ if (pcmk__action_locks_rsc_to_node(action)) { crm_xml_add_ll(action_xml, PCMK_OPT_SHUTDOWN_LOCK, (long long) action->rsc->priv->lock_time); } // List affected resource rsc_xml = pcmk__xe_create(action_xml, (const char *) action->rsc->priv->xml->name); if (pcmk_is_set(action->rsc->flags, pcmk__rsc_removed) && (action->rsc->priv->history_id != NULL)) { /* Use the numbered instance name here, because if there is more * than one instance on a node, we need to make sure the command * goes to the right one. * * This is important even for anonymous clones, because the clone's * unique meta-attribute might have just been toggled from on to * off. */ crm_debug("Using orphan clone name %s instead of history ID %s", action->rsc->id, action->rsc->priv->history_id); crm_xml_add(rsc_xml, PCMK_XA_ID, action->rsc->priv->history_id); crm_xml_add(rsc_xml, PCMK__XA_LONG_ID, action->rsc->id); } else if (!pcmk_is_set(action->rsc->flags, pcmk__rsc_unique)) { const char *xml_id = pcmk__xe_id(action->rsc->priv->xml); crm_debug("Using anonymous clone name %s for %s (aka %s)", xml_id, action->rsc->id, action->rsc->priv->history_id); /* ID is what we'd like client to use * LONG_ID is what they might know it as instead * * LONG_ID is only strictly needed /here/ during the * transition period until all nodes in the cluster * are running the new software /and/ have rebooted * once (meaning that they've only ever spoken to a DC * supporting this feature). * * If anyone toggles the unique flag to 'on', the * 'instance free' name will correspond to an orphan * and fall into the clause above instead */ crm_xml_add(rsc_xml, PCMK_XA_ID, xml_id); if ((action->rsc->priv->history_id != NULL) && !pcmk__str_eq(xml_id, action->rsc->priv->history_id, pcmk__str_none)) { crm_xml_add(rsc_xml, PCMK__XA_LONG_ID, action->rsc->priv->history_id); } else { crm_xml_add(rsc_xml, PCMK__XA_LONG_ID, action->rsc->id); } } else { pcmk__assert(action->rsc->priv->history_id == NULL); crm_xml_add(rsc_xml, PCMK_XA_ID, action->rsc->id); } for (int lpc = 0; lpc < PCMK__NELEM(attr_list); lpc++) { crm_xml_add(rsc_xml, attr_list[lpc], g_hash_table_lookup(action->rsc->priv->meta, attr_list[lpc])); } } /*! * \internal * \brief Add action attributes to transition graph action XML * * \param[in,out] action Scheduled action * \param[in,out] action_xml Transition graph action XML for \p action */ static void add_action_attributes(pcmk_action_t *action, xmlNode *action_xml) { xmlNode *args_xml = NULL; pcmk_resource_t *rsc = action->rsc; /* We create free-standing XML to start, so we can sort the attributes * before adding it to action_xml, which keeps the scheduler regression * test graphs comparable. */ args_xml = pcmk__xe_create(action_xml, PCMK__XE_ATTRIBUTES); crm_xml_add(args_xml, PCMK_XA_CRM_FEATURE_SET, CRM_FEATURE_SET); g_hash_table_foreach(action->extra, hash2field, args_xml); if ((rsc != NULL) && (action->node != NULL)) { // Get the resource instance attributes, evaluated properly for node GHashTable *params = pe_rsc_params(rsc, action->node, rsc->priv->scheduler); pcmk__substitute_remote_addr(rsc, params); g_hash_table_foreach(params, hash2smartfield, args_xml); } else if ((rsc != NULL) && (rsc->priv->variant <= pcmk__rsc_variant_primitive)) { GHashTable *params = pe_rsc_params(rsc, NULL, rsc->priv->scheduler); g_hash_table_foreach(params, hash2smartfield, args_xml); } g_hash_table_foreach(action->meta, hash2metafield, args_xml); if (rsc != NULL) { pcmk_resource_t *parent = rsc; while (parent != NULL) { parent->priv->cmds->add_graph_meta(parent, args_xml); parent = parent->priv->parent; } pcmk__add_guest_meta_to_xml(args_xml, action); - - } else if (pcmk__str_eq(action->task, PCMK_ACTION_STONITH, pcmk__str_none) - && (action->node != NULL)) { - /* Pass the node's attributes as meta-attributes. - * - * @TODO: Determine whether it is still necessary to do this. It was - * added in 33d99707, probably for the libfence-based implementation in - * c9a90bd, which is no longer used. - */ - g_hash_table_foreach(action->node->priv->attrs, hash2metafield, - args_xml); } pcmk__xe_sort_attrs(args_xml); } /*! * \internal * \brief Create the transition graph XML for a scheduled action * * \param[in,out] parent Parent XML element to add action to * \param[in,out] action Scheduled action * \param[in] skip_details If false, add action details as sub-elements * \param[in] scheduler Scheduler data */ static void create_graph_action(xmlNode *parent, pcmk_action_t *action, bool skip_details, const pcmk_scheduler_t *scheduler) { bool needs_node_info = true; bool needs_maintenance_info = false; xmlNode *action_xml = NULL; if ((action == NULL) || (scheduler == NULL)) { return; } // Create the top-level element based on task if (pcmk__str_eq(action->task, PCMK_ACTION_STONITH, pcmk__str_none)) { /* All fences need node info; guest node fences are pseudo-events */ if (pcmk_is_set(action->flags, pcmk__action_pseudo)) { action_xml = pcmk__xe_create(parent, PCMK__XE_PSEUDO_EVENT); } else { action_xml = pcmk__xe_create(parent, PCMK__XE_CRM_EVENT); } } else if (pcmk__str_any_of(action->task, PCMK_ACTION_DO_SHUTDOWN, PCMK_ACTION_CLEAR_FAILCOUNT, NULL)) { action_xml = pcmk__xe_create(parent, PCMK__XE_CRM_EVENT); } else if (pcmk__str_eq(action->task, PCMK_ACTION_LRM_DELETE, pcmk__str_none)) { // CIB-only clean-up for shutdown locks action_xml = pcmk__xe_create(parent, PCMK__XE_CRM_EVENT); crm_xml_add(action_xml, PCMK__XA_MODE, PCMK__VALUE_CIB); } else if (pcmk_is_set(action->flags, pcmk__action_pseudo)) { if (pcmk__str_eq(action->task, PCMK_ACTION_MAINTENANCE_NODES, pcmk__str_none)) { needs_maintenance_info = true; } action_xml = pcmk__xe_create(parent, PCMK__XE_PSEUDO_EVENT); needs_node_info = false; } else { action_xml = pcmk__xe_create(parent, PCMK__XE_RSC_OP); } crm_xml_add_int(action_xml, PCMK_XA_ID, action->id); crm_xml_add(action_xml, PCMK_XA_OPERATION, action->task); if ((action->rsc != NULL) && (action->rsc->priv->history_id != NULL)) { char *clone_key = NULL; guint interval_ms; if (pcmk__guint_from_hash(action->meta, PCMK_META_INTERVAL, 0, &interval_ms) != pcmk_rc_ok) { interval_ms = 0; } clone_key = clone_op_key(action, interval_ms); crm_xml_add(action_xml, PCMK__XA_OPERATION_KEY, clone_key); crm_xml_add(action_xml, "internal_" PCMK__XA_OPERATION_KEY, action->uuid); free(clone_key); } else { crm_xml_add(action_xml, PCMK__XA_OPERATION_KEY, action->uuid); } if (needs_node_info && (action->node != NULL)) { add_node_details(action, action_xml); pcmk__insert_dup(action->meta, PCMK__META_ON_NODE, action->node->priv->name); pcmk__insert_dup(action->meta, PCMK__META_ON_NODE_UUID, action->node->priv->id); } if (skip_details) { return; } if ((action->rsc != NULL) && !pcmk_is_set(action->flags, pcmk__action_pseudo)) { // This is a real resource action, so add resource details add_resource_details(action, action_xml); } /* List any attributes in effect */ add_action_attributes(action, action_xml); /* List any nodes this action is expected to make down */ if (needs_node_info && (action->node != NULL)) { add_downed_nodes(action_xml, action); } if (needs_maintenance_info) { add_maintenance_nodes(action_xml, scheduler); } } /*! * \internal * \brief Check whether an action should be added to the transition graph * * \param[in,out] action Action to check * * \return true if action should be added to graph, otherwise false */ static bool should_add_action_to_graph(pcmk_action_t *action) { if (!pcmk_is_set(action->flags, pcmk__action_runnable)) { crm_trace("Ignoring action %s (%d): unrunnable", action->uuid, action->id); return false; } if (pcmk_is_set(action->flags, pcmk__action_optional) && !pcmk_is_set(action->flags, pcmk__action_always_in_graph)) { crm_trace("Ignoring action %s (%d): optional", action->uuid, action->id); return false; } /* Actions for unmanaged resources should be excluded from the graph, * with the exception of monitors and cancellation of recurring monitors. */ if ((action->rsc != NULL) && !pcmk_is_set(action->rsc->flags, pcmk__rsc_managed) && !pcmk__str_eq(action->task, PCMK_ACTION_MONITOR, pcmk__str_none)) { const char *interval_ms_s; /* A cancellation of a recurring monitor will get here because the task * is cancel rather than monitor, but the interval can still be used to * recognize it. The interval has been normalized to milliseconds by * this point, so a string comparison is sufficient. */ interval_ms_s = g_hash_table_lookup(action->meta, PCMK_META_INTERVAL); if (pcmk__str_eq(interval_ms_s, "0", pcmk__str_null_matches)) { crm_trace("Ignoring action %s (%d): for unmanaged resource (%s)", action->uuid, action->id, action->rsc->id); return false; } } /* Always add pseudo-actions, fence actions, and shutdown actions (already * determined to be required and runnable by this point) */ if (pcmk_is_set(action->flags, pcmk__action_pseudo) || pcmk__strcase_any_of(action->task, PCMK_ACTION_STONITH, PCMK_ACTION_DO_SHUTDOWN, NULL)) { return true; } if (action->node == NULL) { pcmk__sched_err(action->scheduler, "Skipping action %s (%d) " "because it was not assigned to a node (bug?)", action->uuid, action->id); pcmk__log_action("Unassigned", action, false); return false; } if (pcmk_is_set(action->flags, pcmk__action_on_dc)) { crm_trace("Action %s (%d) should be dumped: " "can run on DC instead of %s", action->uuid, action->id, pcmk__node_name(action->node)); } else if (pcmk__is_guest_or_bundle_node(action->node) && !pcmk_is_set(action->node->priv->flags, pcmk__node_remote_reset)) { crm_trace("Action %s (%d) should be dumped: " "assuming will be runnable on guest %s", action->uuid, action->id, pcmk__node_name(action->node)); } else if (!action->node->details->online) { pcmk__sched_err(action->scheduler, "Skipping action %s (%d) " "because it was scheduled for offline node (bug?)", action->uuid, action->id); pcmk__log_action("Offline node", action, false); return false; } else if (action->node->details->unclean) { pcmk__sched_err(action->scheduler, "Skipping action %s (%d) " "because it was scheduled for unclean node (bug?)", action->uuid, action->id); pcmk__log_action("Unclean node", action, false); return false; } return true; } /*! * \internal * \brief Check whether an ordering's flags can change an action * * \param[in] ordering Ordering to check * * \return true if ordering has flags that can change an action, false otherwise */ static bool ordering_can_change_actions(const pcmk__related_action_t *ordering) { return pcmk_any_flags_set(ordering->flags, ~(pcmk__ar_then_implies_first_graphed |pcmk__ar_first_implies_then_graphed |pcmk__ar_ordered)); } /*! * \internal * \brief Check whether an action input should be in the transition graph * * \param[in] action Action to check * \param[in,out] input Action input to check * * \return true if input should be in graph, false otherwise * \note This function may not only check an input, but disable it under certian * circumstances (load or anti-colocation orderings that are not needed). */ static bool should_add_input_to_graph(const pcmk_action_t *action, pcmk__related_action_t *input) { if (input->graphed) { return true; } if (input->flags == pcmk__ar_none) { crm_trace("Ignoring %s (%d) input %s (%d): " "ordering disabled", action->uuid, action->id, input->action->uuid, input->action->id); return false; } else if (!pcmk_is_set(input->action->flags, pcmk__action_runnable) && !ordering_can_change_actions(input)) { crm_trace("Ignoring %s (%d) input %s (%d): " "optional and input unrunnable", action->uuid, action->id, input->action->uuid, input->action->id); return false; } else if (!pcmk_is_set(input->action->flags, pcmk__action_runnable) && pcmk_is_set(input->flags, pcmk__ar_min_runnable)) { crm_trace("Ignoring %s (%d) input %s (%d): " "minimum number of instances required but input unrunnable", action->uuid, action->id, input->action->uuid, input->action->id); return false; } else if (pcmk_is_set(input->flags, pcmk__ar_unmigratable_then_blocks) && !pcmk_is_set(input->action->flags, pcmk__action_runnable)) { crm_trace("Ignoring %s (%d) input %s (%d): " "input blocked if 'then' unmigratable", action->uuid, action->id, input->action->uuid, input->action->id); return false; } else if (pcmk_is_set(input->flags, pcmk__ar_if_first_unmigratable) && pcmk_is_set(input->action->flags, pcmk__action_migratable)) { crm_trace("Ignoring %s (%d) input %s (%d): ordering applies " "only if input is unmigratable, but it is migratable", action->uuid, action->id, input->action->uuid, input->action->id); return false; } else if ((input->flags == pcmk__ar_ordered) && pcmk_is_set(input->action->flags, pcmk__action_migratable) && pcmk__ends_with(input->action->uuid, "_stop_0")) { crm_trace("Ignoring %s (%d) input %s (%d): " "optional but stop in migration", action->uuid, action->id, input->action->uuid, input->action->id); return false; } else if (input->flags == pcmk__ar_if_on_same_node_or_target) { pcmk_node_t *input_node = input->action->node; if ((action->rsc != NULL) && pcmk__str_eq(action->task, PCMK_ACTION_MIGRATE_TO, pcmk__str_none)) { pcmk_node_t *assigned = action->rsc->priv->assigned_node; /* For load_stopped -> migrate_to orderings, we care about where * the resource has been assigned, not where migrate_to will be * executed. */ if (!pcmk__same_node(input_node, assigned)) { crm_trace("Ignoring %s (%d) input %s (%d): " "migration target %s is not same as input node %s", action->uuid, action->id, input->action->uuid, input->action->id, (assigned? assigned->priv->name : ""), (input_node? input_node->priv->name : "")); input->flags = pcmk__ar_none; return false; } } else if (!pcmk__same_node(input_node, action->node)) { crm_trace("Ignoring %s (%d) input %s (%d): " "not on same node (%s vs %s)", action->uuid, action->id, input->action->uuid, input->action->id, (action->node? action->node->priv->name : ""), (input_node? input_node->priv->name : "")); input->flags = pcmk__ar_none; return false; } else if (pcmk_is_set(input->action->flags, pcmk__action_optional)) { crm_trace("Ignoring %s (%d) input %s (%d): " "ordering optional", action->uuid, action->id, input->action->uuid, input->action->id); input->flags = pcmk__ar_none; return false; } } else if (input->flags == pcmk__ar_if_required_on_same_node) { if (input->action->node && action->node && !pcmk__same_node(input->action->node, action->node)) { crm_trace("Ignoring %s (%d) input %s (%d): " "not on same node (%s vs %s)", action->uuid, action->id, input->action->uuid, input->action->id, pcmk__node_name(action->node), pcmk__node_name(input->action->node)); input->flags = pcmk__ar_none; return false; } else if (pcmk_is_set(input->action->flags, pcmk__action_optional)) { crm_trace("Ignoring %s (%d) input %s (%d): optional", action->uuid, action->id, input->action->uuid, input->action->id); input->flags = pcmk__ar_none; return false; } } else if (input->action->rsc && input->action->rsc != action->rsc && pcmk_is_set(input->action->rsc->flags, pcmk__rsc_failed) && !pcmk_is_set(input->action->rsc->flags, pcmk__rsc_managed) && pcmk__ends_with(input->action->uuid, "_stop_0") && pcmk__is_clone(action->rsc)) { crm_warn("Ignoring requirement that %s complete before %s:" " unmanaged failed resources cannot prevent clone shutdown", input->action->uuid, action->uuid); return false; } else if (pcmk_is_set(input->action->flags, pcmk__action_optional) && !pcmk_any_flags_set(input->action->flags, pcmk__action_always_in_graph |pcmk__action_added_to_graph) && !should_add_action_to_graph(input->action)) { crm_trace("Ignoring %s (%d) input %s (%d): " "input optional", action->uuid, action->id, input->action->uuid, input->action->id); return false; } crm_trace("%s (%d) input %s %s (%d) on %s should be dumped: %s %s %#.6x", action->uuid, action->id, action_type_str(input->action->flags), input->action->uuid, input->action->id, action_node_str(input->action), action_runnable_str(input->action->flags), action_optional_str(input->action->flags), input->flags); return true; } /*! * \internal * \brief Check whether an ordering creates an ordering loop * * \param[in] init_action "First" action in ordering * \param[in] action Callers should always set this the same as * \p init_action (this function may use a different * value for recursive calls) * \param[in,out] input Action wrapper for "then" action in ordering * * \return true if the ordering creates a loop, otherwise false */ bool pcmk__graph_has_loop(const pcmk_action_t *init_action, const pcmk_action_t *action, pcmk__related_action_t *input) { bool has_loop = false; if (pcmk_is_set(input->action->flags, pcmk__action_detect_loop)) { crm_trace("Breaking tracking loop: %s@%s -> %s@%s (%#.6x)", input->action->uuid, input->action->node? input->action->node->priv->name : "", action->uuid, action->node? action->node->priv->name : "", input->flags); return false; } // Don't need to check inputs that won't be used if (!should_add_input_to_graph(action, input)) { return false; } if (input->action == init_action) { crm_debug("Input loop found in %s@%s ->...-> %s@%s", action->uuid, action->node? action->node->priv->name : "", init_action->uuid, init_action->node? init_action->node->priv->name : ""); return true; } pcmk__set_action_flags(input->action, pcmk__action_detect_loop); crm_trace("Checking inputs of action %s@%s input %s@%s (%#.6x)" "for graph loop with %s@%s ", action->uuid, action->node? action->node->priv->name : "", input->action->uuid, input->action->node? input->action->node->priv->name : "", input->flags, init_action->uuid, init_action->node? init_action->node->priv->name : ""); // Recursively check input itself for loops for (GList *iter = input->action->actions_before; iter != NULL; iter = iter->next) { if (pcmk__graph_has_loop(init_action, input->action, (pcmk__related_action_t *) iter->data)) { // Recursive call already logged a debug message has_loop = true; break; } } pcmk__clear_action_flags(input->action, pcmk__action_detect_loop); if (!has_loop) { crm_trace("No input loop found in %s@%s -> %s@%s (%#.6x)", input->action->uuid, input->action->node? input->action->node->priv->name : "", action->uuid, action->node? action->node->priv->name : "", input->flags); } return has_loop; } /*! * \internal * \brief Create a synapse XML element for a transition graph * * \param[in] action Action that synapse is for * \param[in,out] scheduler Scheduler data containing graph * * \return Newly added XML element for new graph synapse */ static xmlNode * create_graph_synapse(const pcmk_action_t *action, pcmk_scheduler_t *scheduler) { int synapse_priority = 0; xmlNode *syn = pcmk__xe_create(scheduler->priv->graph, "synapse"); crm_xml_add_int(syn, PCMK_XA_ID, scheduler->priv->synapse_count++); if (action->rsc != NULL) { synapse_priority = action->rsc->priv->priority; } if (action->priority > synapse_priority) { synapse_priority = action->priority; } if (synapse_priority > 0) { crm_xml_add_int(syn, PCMK__XA_PRIORITY, synapse_priority); } return syn; } /*! * \internal * \brief Add an action to the transition graph XML if appropriate * * \param[in,out] data Action to possibly add * \param[in,out] user_data Scheduler data * * \note This will de-duplicate the action inputs, meaning that the * pcmk__related_action_t:type flags can no longer be relied on to retain * their original settings. That means this MUST be called after * pcmk__apply_orderings() is complete, and nothing after this should rely * on those type flags. (For example, some code looks for type equal to * some flag rather than whether the flag is set, and some code looks for * particular combinations of flags -- such code must be done before * pcmk__create_graph().) */ static void add_action_to_graph(gpointer data, gpointer user_data) { pcmk_action_t *action = (pcmk_action_t *) data; pcmk_scheduler_t *scheduler = (pcmk_scheduler_t *) user_data; xmlNode *syn = NULL; xmlNode *set = NULL; xmlNode *in = NULL; /* If we haven't already, de-duplicate inputs (even if we won't be adding * the action to the graph, so that crm_simulate's dot graphs don't have * duplicates). */ if (!pcmk_is_set(action->flags, pcmk__action_inputs_deduplicated)) { pcmk__deduplicate_action_inputs(action); pcmk__set_action_flags(action, pcmk__action_inputs_deduplicated); } if (pcmk_is_set(action->flags, pcmk__action_added_to_graph) || !should_add_action_to_graph(action)) { return; // Already added, or shouldn't be } pcmk__set_action_flags(action, pcmk__action_added_to_graph); crm_trace("Adding action %d (%s%s%s) to graph", action->id, action->uuid, ((action->node == NULL)? "" : " on "), ((action->node == NULL)? "" : action->node->priv->name)); syn = create_graph_synapse(action, scheduler); set = pcmk__xe_create(syn, "action_set"); in = pcmk__xe_create(syn, "inputs"); create_graph_action(set, action, false, scheduler); for (GList *lpc = action->actions_before; lpc != NULL; lpc = lpc->next) { pcmk__related_action_t *input = lpc->data; if (should_add_input_to_graph(action, input)) { xmlNode *input_xml = pcmk__xe_create(in, "trigger"); input->graphed = true; create_graph_action(input_xml, input->action, true, scheduler); } } } static int transition_id = 0; /*! * \internal * \brief Log a message after calculating a transition * * \param[in] scheduler Scheduler data * \param[in] filename Where transition input is stored */ void pcmk__log_transition_summary(const pcmk_scheduler_t *scheduler, const char *filename) { if (pcmk_is_set(scheduler->flags, pcmk__sched_processing_error) || pcmk__config_has_error) { crm_err("Calculated transition %d (with errors)%s%s", transition_id, (filename == NULL)? "" : ", saving inputs in ", (filename == NULL)? "" : filename); } else if (pcmk_is_set(scheduler->flags, pcmk__sched_processing_warning) || pcmk__config_has_warning) { crm_warn("Calculated transition %d (with warnings)%s%s", transition_id, (filename == NULL)? "" : ", saving inputs in ", (filename == NULL)? "" : filename); } else { crm_notice("Calculated transition %d%s%s", transition_id, (filename == NULL)? "" : ", saving inputs in ", (filename == NULL)? "" : filename); } if (pcmk__config_has_error) { crm_notice("Configuration errors found during scheduler processing," " please run \"crm_verify -L\" to identify issues"); } } /*! * \internal * \brief Add a resource's actions to the transition graph * * \param[in,out] rsc Resource whose actions should be added */ void pcmk__add_rsc_actions_to_graph(pcmk_resource_t *rsc) { GList *iter = NULL; pcmk__assert(rsc != NULL); pcmk__rsc_trace(rsc, "Adding actions for %s to graph", rsc->id); // First add the resource's own actions g_list_foreach(rsc->priv->actions, add_action_to_graph, rsc->priv->scheduler); // Then recursively add its children's actions (appropriate to variant) for (iter = rsc->priv->children; iter != NULL; iter = iter->next) { pcmk_resource_t *child_rsc = (pcmk_resource_t *) iter->data; child_rsc->priv->cmds->add_actions_to_graph(child_rsc); } } /*! * \internal * \brief Create a transition graph with all cluster actions needed * * \param[in,out] scheduler Scheduler data */ void pcmk__create_graph(pcmk_scheduler_t *scheduler) { GList *iter = NULL; const char *value = NULL; long long limit = 0LL; GHashTable *config_hash = scheduler->priv->options; int rc = pcmk_rc_ok; transition_id++; crm_trace("Creating transition graph %d", transition_id); scheduler->priv->graph = pcmk__xe_create(NULL, PCMK__XE_TRANSITION_GRAPH); value = pcmk__cluster_option(config_hash, PCMK_OPT_CLUSTER_DELAY); crm_xml_add(scheduler->priv->graph, PCMK_OPT_CLUSTER_DELAY, value); value = pcmk__cluster_option(config_hash, PCMK_OPT_STONITH_TIMEOUT); crm_xml_add(scheduler->priv->graph, PCMK_OPT_STONITH_TIMEOUT, value); crm_xml_add(scheduler->priv->graph, "failed-stop-offset", "INFINITY"); if (pcmk_is_set(scheduler->flags, pcmk__sched_start_failure_fatal)) { crm_xml_add(scheduler->priv->graph, "failed-start-offset", "INFINITY"); } else { crm_xml_add(scheduler->priv->graph, "failed-start-offset", "1"); } value = pcmk__cluster_option(config_hash, PCMK_OPT_BATCH_LIMIT); crm_xml_add(scheduler->priv->graph, PCMK_OPT_BATCH_LIMIT, value); crm_xml_add_int(scheduler->priv->graph, "transition_id", transition_id); value = pcmk__cluster_option(config_hash, PCMK_OPT_MIGRATION_LIMIT); rc = pcmk__scan_ll(value, &limit, 0LL); if (rc != pcmk_rc_ok) { crm_warn("Ignoring invalid value '%s' for " PCMK_OPT_MIGRATION_LIMIT ": %s", value, pcmk_rc_str(rc)); } else if (limit > 0) { crm_xml_add(scheduler->priv->graph, PCMK_OPT_MIGRATION_LIMIT, value); } if (scheduler->priv->recheck_by > 0) { char *recheck_epoch = NULL; recheck_epoch = crm_strdup_printf("%llu", (unsigned long long) scheduler->priv->recheck_by); crm_xml_add(scheduler->priv->graph, "recheck-by", recheck_epoch); free(recheck_epoch); } /* The following code will de-duplicate action inputs, so nothing past this * should rely on the action input type flags retaining their original * values. */ // Add resource actions to graph for (iter = scheduler->priv->resources; iter != NULL; iter = iter->next) { pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data; pcmk__rsc_trace(rsc, "Processing actions for %s", rsc->id); rsc->priv->cmds->add_actions_to_graph(rsc); } // Add pseudo-action for list of nodes with maintenance state update add_maintenance_update(scheduler); // Add non-resource (node) actions for (iter = scheduler->priv->actions; iter != NULL; iter = iter->next) { pcmk_action_t *action = (pcmk_action_t *) iter->data; if ((action->rsc != NULL) && (action->node != NULL) && action->node->details->shutdown && !pcmk_is_set(action->rsc->flags, pcmk__rsc_maintenance) && !pcmk_any_flags_set(action->flags, pcmk__action_optional|pcmk__action_runnable) && pcmk__str_eq(action->task, PCMK_ACTION_STOP, pcmk__str_none)) { /* Eventually we should just ignore the 'fence' case, but for now * it's the best way to detect (in CTS) when CIB resource updates * are being lost. */ if (pcmk_is_set(scheduler->flags, pcmk__sched_quorate) || (scheduler->no_quorum_policy == pcmk_no_quorum_ignore)) { const bool managed = pcmk_is_set(action->rsc->flags, pcmk__rsc_managed); const bool failed = pcmk_is_set(action->rsc->flags, pcmk__rsc_failed); crm_crit("Cannot %s %s because of %s:%s%s (%s)", action->node->details->unclean? "fence" : "shut down", pcmk__node_name(action->node), action->rsc->id, (managed? " blocked" : " unmanaged"), (failed? " failed" : ""), action->uuid); } } add_action_to_graph((gpointer) action, (gpointer) scheduler); } crm_log_xml_trace(scheduler->priv->graph, "graph"); }