diff --git a/include/crm/pengine/pe_types.h b/include/crm/pengine/pe_types.h
index 77171e4808..e3ecaa823b 100644
--- a/include/crm/pengine/pe_types.h
+++ b/include/crm/pengine/pe_types.h
@@ -1,546 +1,552 @@
 /*
- * Copyright 2004-2021 the Pacemaker project contributors
+ * Copyright 2004-2022 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #ifndef PCMK__CRM_PENGINE_PE_TYPES__H
 #  define PCMK__CRM_PENGINE_PE_TYPES__H
 
 
 #  include <stdbool.h>              // bool
 #  include <sys/types.h>            // time_t
 #  include <libxml/tree.h>          // xmlNode
 #  include <glib.h>                 // gboolean, guint, GList, GHashTable
 #  include <crm/common/iso8601.h>
 #  include <crm/pengine/common.h>
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
 /*!
  * \file
  * \brief Data types for cluster status
  * \ingroup pengine
  */
 
 typedef struct pe_node_s pe_node_t;
 typedef struct pe_action_s pe_action_t;
 typedef struct pe_resource_s pe_resource_t;
 typedef struct pe_working_set_s pe_working_set_t;
 
 enum pe_obj_types {
     pe_unknown = -1,
     pe_native = 0,
     pe_group = 1,
     pe_clone = 2,
     pe_container = 3,
 };
 
 typedef struct resource_object_functions_s {
     gboolean (*unpack) (pe_resource_t*, pe_working_set_t*);
     pe_resource_t *(*find_rsc) (pe_resource_t *parent, const char *search,
                                 const pe_node_t *node, int flags);
     /* parameter result must be free'd */
     char *(*parameter) (pe_resource_t*, pe_node_t*, gboolean, const char*,
                         pe_working_set_t*);
     //! \deprecated will be removed in a future release
     void (*print) (pe_resource_t*, const char*, long, void*);
     gboolean (*active) (pe_resource_t*, gboolean);
     enum rsc_role_e (*state) (const pe_resource_t*, gboolean);
     pe_node_t *(*location) (const pe_resource_t*, GList**, int);
     void (*free) (pe_resource_t*);
     void (*count) (pe_resource_t*);
     gboolean (*is_filtered) (pe_resource_t*, GList *, gboolean);
 } resource_object_functions_t;
 
 typedef struct resource_alloc_functions_s resource_alloc_functions_t;
 
 enum pe_quorum_policy {
     no_quorum_freeze,
     no_quorum_stop,
     no_quorum_ignore,
     no_quorum_suicide,
     no_quorum_demote
 };
 
 enum node_type {
     node_ping,
     node_member,
     node_remote
 };
 
 //! \deprecated will be removed in a future release
 enum pe_restart {
     pe_restart_restart, //! \deprecated will be removed in a future release
     pe_restart_ignore   //! \deprecated will be removed in a future release
 };
 
 //! Determine behavior of pe_find_resource_with_flags()
 enum pe_find {
     pe_find_renamed  = 0x001, //!< match resource ID or LRM history ID
     pe_find_anon     = 0x002, //!< match base name of anonymous clone instances
     pe_find_clone    = 0x004, //!< match only clone instances
     pe_find_current  = 0x008, //!< match resource active on specified node
     pe_find_inactive = 0x010, //!< match resource not running anywhere
     pe_find_any      = 0x020, //!< match base name of any clone instance
 };
 
 // @TODO Make these an enum
 
 #  define pe_flag_have_quorum           0x00000001ULL
 #  define pe_flag_symmetric_cluster     0x00000002ULL
 #  define pe_flag_maintenance_mode      0x00000008ULL
 
 #  define pe_flag_stonith_enabled       0x00000010ULL
 #  define pe_flag_have_stonith_resource 0x00000020ULL
 #  define pe_flag_enable_unfencing      0x00000040ULL
 #  define pe_flag_concurrent_fencing    0x00000080ULL
 
 #  define pe_flag_stop_rsc_orphans      0x00000100ULL
 #  define pe_flag_stop_action_orphans   0x00000200ULL
 #  define pe_flag_stop_everything       0x00000400ULL
 
 #  define pe_flag_start_failure_fatal   0x00001000ULL
 
 //! \deprecated
 #  define pe_flag_remove_after_stop     0x00002000ULL
 
 #  define pe_flag_startup_fencing       0x00004000ULL
 #  define pe_flag_shutdown_lock         0x00008000ULL
 
 #  define pe_flag_startup_probes        0x00010000ULL
 #  define pe_flag_have_status           0x00020000ULL
 #  define pe_flag_have_remote_nodes     0x00040000ULL
 
 #  define pe_flag_quick_location        0x00100000ULL
 #  define pe_flag_sanitized             0x00200000ULL
 
 //! \deprecated
 #  define pe_flag_stdout                0x00400000ULL
 
 //! Don't count total, disabled and blocked resource instances
 #  define pe_flag_no_counts             0x00800000ULL
 
 /*! Skip deprecated code that is kept solely for backward API compatibility.
  * (Internal code should always set this.)
  */
 #  define pe_flag_no_compat             0x01000000ULL
 
 #  define pe_flag_show_scores           0x02000000ULL
 #  define pe_flag_show_utilization      0x04000000ULL
 
+/*!
+ * When scheduling, only unpack the CIB (including constraints), calculate
+ * as much cluster status as possible, and apply node health.
+ */
+#  define pe_flag_check_config          0x08000000ULL
+
 struct pe_working_set_s {
     xmlNode *input;
     crm_time_t *now;
 
     /* options extracted from the input */
     char *dc_uuid;
     pe_node_t *dc_node;
     const char *stonith_action;
     const char *placement_strategy;
 
     unsigned long long flags;
 
     int stonith_timeout;
     enum pe_quorum_policy no_quorum_policy;
 
     GHashTable *config_hash;
     GHashTable *tickets;
 
     // Actions for which there can be only one (e.g. fence nodeX)
     GHashTable *singletons;
 
     GList *nodes;
     GList *resources;
     GList *placement_constraints;
     GList *ordering_constraints;
     GList *colocation_constraints;
     GList *ticket_constraints;
 
     GList *actions;
     xmlNode *failed;
     xmlNode *op_defaults;
     xmlNode *rsc_defaults;
 
     /* stats */
     int num_synapse;
     int max_valid_nodes;    //! Deprecated (will be removed in a future release)
     int order_id;
     int action_id;
 
     /* final output */
     xmlNode *graph;
 
     GHashTable *template_rsc_sets;
     const char *localhost;
     GHashTable *tags;
 
     int blocked_resources;
     int disabled_resources;
 
     GList *param_check; // History entries that need to be checked
     GList *stop_needed; // Containers that need stop actions
     time_t recheck_by;  // Hint to controller to re-run scheduler by this time
     int ninstances;     // Total number of resource instances
     guint shutdown_lock;// How long (seconds) to lock resources to shutdown node
     int priority_fencing_delay; // Priority fencing delay
 
     void *priv;
 };
 
 enum pe_check_parameters {
     /* Clear fail count if parameters changed for un-expired start or monitor
      * last_failure.
      */
     pe_check_last_failure,
 
     /* Clear fail count if parameters changed for start, monitor, promote, or
      * migrate_from actions for active resources.
      */
     pe_check_active,
 };
 
 struct pe_node_shared_s {
     const char *id;
     const char *uname;
     enum node_type type;
 
     /* @TODO convert these flags into a bitfield */
     gboolean online;
     gboolean standby;
     gboolean standby_onfail;
     gboolean pending;
     gboolean unclean;
     gboolean unseen;
     gboolean shutdown;
     gboolean expected_up;
     gboolean is_dc;
     gboolean maintenance;
     gboolean rsc_discovery_enabled;
     gboolean remote_requires_reset;
     gboolean remote_was_fenced;
     gboolean remote_maintenance; /* what the remote-rsc is thinking */
     gboolean unpacked;
 
     int num_resources;
     pe_resource_t *remote_rsc;
     GList *running_rsc;       /* pe_resource_t* */
     GList *allocated_rsc;     /* pe_resource_t* */
 
     GHashTable *attrs;          /* char* => char* */
     GHashTable *utilization;
     GHashTable *digest_cache;   //!< cache of calculated resource digests
     int priority; // calculated based on the priority of resources running on the node
 };
 
 struct pe_node_s {
     int weight;
     gboolean fixed;
     int count;
     struct pe_node_shared_s *details;
     int rsc_discover_mode;
 };
 
 #  define pe_rsc_orphan                     0x00000001ULL
 #  define pe_rsc_managed                    0x00000002ULL
 #  define pe_rsc_block                      0x00000004ULL
 #  define pe_rsc_orphan_container_filler    0x00000008ULL
 
 #  define pe_rsc_notify                     0x00000010ULL
 #  define pe_rsc_unique                     0x00000020ULL
 #  define pe_rsc_fence_device               0x00000040ULL
 #  define pe_rsc_promotable                 0x00000080ULL
 
 #  define pe_rsc_provisional                0x00000100ULL
 #  define pe_rsc_allocating                 0x00000200ULL
 #  define pe_rsc_merging                    0x00000400ULL
 
 #  define pe_rsc_stop                       0x00001000ULL
 #  define pe_rsc_reload                     0x00002000ULL
 #  define pe_rsc_allow_remote_remotes       0x00004000ULL
 #  define pe_rsc_critical                   0x00008000ULL
 
 #  define pe_rsc_failed                     0x00010000ULL
 #  define pe_rsc_runnable                   0x00040000ULL
 #  define pe_rsc_start_pending              0x00080000ULL
 
 #  define pe_rsc_starting                   0x00100000ULL
 #  define pe_rsc_stopping                   0x00200000ULL
 #  define pe_rsc_allow_migrate              0x00800000ULL
 
 #  define pe_rsc_failure_ignored            0x01000000ULL
 #  define pe_rsc_maintenance                0x04000000ULL
 #  define pe_rsc_is_container               0x08000000ULL
 
 #  define pe_rsc_needs_quorum               0x10000000ULL
 #  define pe_rsc_needs_fencing              0x20000000ULL
 #  define pe_rsc_needs_unfencing            0x40000000ULL
 
 enum pe_graph_flags {
     pe_graph_none = 0x00000,
     pe_graph_updated_first = 0x00001,
     pe_graph_updated_then = 0x00002,
     pe_graph_disable = 0x00004,
 };
 
 /* *INDENT-OFF* */
 enum pe_action_flags {
     pe_action_pseudo = 0x00001,
     pe_action_runnable = 0x00002,
     pe_action_optional = 0x00004,
     pe_action_print_always = 0x00008,
 
     pe_action_have_node_attrs = 0x00010,
     pe_action_implied_by_stonith = 0x00040,
     pe_action_migrate_runnable =   0x00080,
 
     pe_action_dumped = 0x00100,
     pe_action_processed = 0x00200,
 #if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1)
     pe_action_clear = 0x00400, //! \deprecated Unused
 #endif
     pe_action_dangle = 0x00800,
 
     /* This action requires one or more of its dependencies to be runnable.
      * We use this to clear the runnable flag before checking dependencies.
      */
     pe_action_requires_any = 0x01000,
 
     pe_action_reschedule = 0x02000,
     pe_action_tracking = 0x04000,
     pe_action_dedup = 0x08000, //! Internal state tracking when creating graph
 
     pe_action_dc = 0x10000,         //! Action may run on DC instead of target
 };
 /* *INDENT-ON* */
 
 struct pe_resource_s {
     char *id;
     char *clone_name;
     xmlNode *xml;
     xmlNode *orig_xml;
     xmlNode *ops_xml;
 
     pe_working_set_t *cluster;
     pe_resource_t *parent;
 
     enum pe_obj_types variant;
     void *variant_opaque;
     resource_object_functions_t *fns;
     resource_alloc_functions_t *cmds;
 
     enum rsc_recovery_type recovery_type;
 
     enum pe_restart restart_type; //!< \deprecated will be removed in future release
 
     int priority;
     int stickiness;
     int sort_index;
     int failure_timeout;
     int migration_threshold;
     guint remote_reconnect_ms;
     char *pending_task;
 
     unsigned long long flags;
 
     // @TODO merge these into flags
     gboolean is_remote_node;
     gboolean exclusive_discover;
 
     //!@{
     //! This field should be treated as internal to Pacemaker
     GList *rsc_cons_lhs;      // List of pcmk__colocation_t*
     GList *rsc_cons;          // List of pcmk__colocation_t*
     GList *rsc_location;      // List of pe__location_t*
     GList *actions;           // List of pe_action_t*
     GList *rsc_tickets;       // List of rsc_ticket*
     //!@}
 
     pe_node_t *allocated_to;
     pe_node_t *partial_migration_target;
     pe_node_t *partial_migration_source;
     GList *running_on;        /* pe_node_t*   */
     GHashTable *known_on;       /* pe_node_t*   */
     GHashTable *allowed_nodes;  /* pe_node_t*   */
 
     enum rsc_role_e role;
     enum rsc_role_e next_role;
 
     GHashTable *meta;
     GHashTable *parameters; //! \deprecated Use pe_rsc_params() instead
     GHashTable *utilization;
 
     GList *children;          /* pe_resource_t*   */
     GList *dangling_migrations;       /* pe_node_t*       */
 
     pe_resource_t *container;
     GList *fillers;
 
     pe_node_t *pending_node;    // Node on which pending_task is happening
     pe_node_t *lock_node;       // Resource is shutdown-locked to this node
     time_t lock_time;           // When shutdown lock started
 
     /* Resource parameters may have node-attribute-based rules, which means the
      * values can vary by node. This table is a cache of parameter name/value
      * tables for each node (as needed). Use pe_rsc_params() to get the table
      * for a given node.
      */
     GHashTable *parameter_cache; // Key = node name, value = parameters table
 #if ENABLE_VERSIONED_ATTRS
     xmlNode *versioned_parameters;
 #endif
 };
 
 #if ENABLE_VERSIONED_ATTRS
 // Used as action->action_details if action->rsc is not NULL
 typedef struct pe_rsc_action_details_s {
     xmlNode *versioned_parameters;
     xmlNode *versioned_meta;
 } pe_rsc_action_details_t;
 #endif
 
 struct pe_action_s {
     int id;
     int priority;
 
     pe_resource_t *rsc;
     pe_node_t *node;
     xmlNode *op_entry;
 
     char *task;
     char *uuid;
     char *cancel_task;
     char *reason;
 
     enum pe_action_flags flags;
     enum rsc_start_requirement needs;
     enum action_fail_response on_fail;
     enum rsc_role_e fail_role;
 
     GHashTable *meta;
     GHashTable *extra;
 
     /* 
      * These two varables are associated with the constraint logic
      * that involves first having one or more actions runnable before
      * then allowing this action to execute.
      *
      * These varables are used with features such as 'clone-min' which
      * requires at minimum X number of cloned instances to be running
      * before an order dependency can run. Another option that uses
      * this is 'require-all=false' in ordering constrants. This option
      * says "only require one instance of a resource to start before
      * allowing dependencies to start" -- basically, require-all=false is
      * the same as clone-min=1.
      */
 
     /* current number of known runnable actions in the before list. */
     int runnable_before;
     /* the number of "before" runnable actions required for this action
      * to be considered runnable */ 
     int required_runnable_before;
 
     GList *actions_before;    /* pe_action_wrapper_t* */
     GList *actions_after;     /* pe_action_wrapper_t* */
 
     /* Some of the above fields could be moved to the details,
      * except for API backward compatibility.
      */
     void *action_details; // varies by type of action
 };
 
 typedef struct pe_ticket_s {
     char *id;
     gboolean granted;
     time_t last_granted;
     gboolean standby;
     GHashTable *state;
 } pe_ticket_t;
 
 typedef struct pe_tag_s {
     char *id;
     GList *refs;
 } pe_tag_t;
 
 //! Internal tracking for transition graph creation
 enum pe_link_state {
     pe_link_not_dumped, //! Internal tracking for transition graph creation
     pe_link_dumped,     //! Internal tracking for transition graph creation
     pe_link_dup,        //! \deprecated No longer used by Pacemaker
 };
 
 enum pe_discover_e {
     pe_discover_always = 0,
     pe_discover_never,
     pe_discover_exclusive,
 };
 
 /* *INDENT-OFF* */
 enum pe_ordering {
     pe_order_none                  = 0x0,       /* deleted */
     pe_order_optional              = 0x1,       /* pure ordering, nothing implied */
     pe_order_apply_first_non_migratable = 0x2,  /* Only apply this constraint's ordering if first is not migratable. */
 
     pe_order_implies_first         = 0x10,      /* If 'then' is required, ensure 'first' is too */
     pe_order_implies_then          = 0x20,      /* If 'first' is required, ensure 'then' is too */
     pe_order_promoted_implies_first = 0x40,     /* If 'then' is required and then's rsc is promoted, ensure 'first' becomes required too */
 
     /* first requires then to be both runnable and migrate runnable. */
     pe_order_implies_first_migratable  = 0x80,
 
     pe_order_runnable_left         = 0x100,     /* 'then' requires 'first' to be runnable */
 
     pe_order_pseudo_left           = 0x200,     /* 'then' can only be pseudo if 'first' is runnable */
     pe_order_implies_then_on_node  = 0x400,     /* If 'first' is required on 'nodeX',
                                                  * ensure instances of 'then' on 'nodeX' are too.
                                                  * Only really useful if 'then' is a clone and 'first' is not
                                                  */
     pe_order_probe                 = 0x800,     /* If 'first->rsc' is
                                                  *  - running but about to stop, ignore the constraint
                                                  *  - otherwise, behave as runnable_left
                                                  */
 
     pe_order_restart               = 0x1000,    /* 'then' is runnable if 'first' is optional or runnable */
     pe_order_stonith_stop          = 0x2000,    //<! \deprecated Will be removed in future release
     pe_order_serialize_only        = 0x4000,    /* serialize */
     pe_order_same_node             = 0x8000,    /* applies only if 'first' and 'then' are on same node */
 
     pe_order_implies_first_printed = 0x10000,   /* Like ..implies_first but only ensures 'first' is printed, not mandatory */
     pe_order_implies_then_printed  = 0x20000,   /* Like ..implies_then but only ensures 'then' is printed, not mandatory */
 
     pe_order_asymmetrical          = 0x100000,  /* Indicates asymmetrical one way ordering constraint. */
     pe_order_load                  = 0x200000,  /* Only relevant if... */
     pe_order_one_or_more           = 0x400000,  /* 'then' is runnable only if one or more of its dependencies are too */
     pe_order_anti_colocation       = 0x800000,
 
     pe_order_preserve              = 0x1000000, /* Hack for breaking user ordering constraints with container resources */
     pe_order_then_cancels_first    = 0x2000000, // if 'then' becomes required, 'first' becomes optional
     pe_order_trace                 = 0x4000000, /* test marker */
 
 #if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1)
     // \deprecated Use pe_order_promoted_implies_first instead
     pe_order_implies_first_master  = pe_order_promoted_implies_first,
 #endif
 };
 /* *INDENT-ON* */
 
 typedef struct pe_action_wrapper_s {
     enum pe_ordering type;
     enum pe_link_state state;
     pe_action_t *action;
 } pe_action_wrapper_t;
 
 #if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1)
 #include <crm/pengine/pe_types_compat.h>
 #endif
 
 #ifdef __cplusplus
 }
 #endif
 
 #endif // PCMK__CRM_PENGINE_PE_TYPES__H
diff --git a/lib/pacemaker/pcmk_sched_allocate.c b/lib/pacemaker/pcmk_sched_allocate.c
index b6ec19d6e6..4e73f6a841 100644
--- a/lib/pacemaker/pcmk_sched_allocate.c
+++ b/lib/pacemaker/pcmk_sched_allocate.c
@@ -1,803 +1,806 @@
 /*
  * Copyright 2004-2022 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <crm/crm.h>
 #include <crm/cib.h>
 #include <crm/msg_xml.h>
 #include <crm/common/xml.h>
 #include <crm/common/xml_internal.h>
 
 #include <glib.h>
 
 #include <crm/pengine/status.h>
 #include <pacemaker-internal.h>
 #include "libpacemaker_private.h"
 
 CRM_TRACE_INIT_DATA(pacemaker);
 
 /*!
  * \internal
  * \brief Do deferred action checks after allocation
  *
  * When unpacking the resource history, the scheduler checks for resource
  * configurations that have changed since an action was run. However, at that
  * time, bundles using the REMOTE_CONTAINER_HACK don't have their final
  * parameter information, so instead they add a deferred check to a list. This
  * function processes one entry in that list.
  *
  * \param[in] rsc       Resource that action history is for
  * \param[in] node      Node that action history is for
  * \param[in] rsc_op    Action history entry
  * \param[in] check     Type of deferred check to do
  * \param[in] data_set  Working set for cluster
  */
 static void
 check_params(pe_resource_t *rsc, pe_node_t *node, xmlNode *rsc_op,
              enum pe_check_parameters check, pe_working_set_t *data_set)
 {
     const char *reason = NULL;
     op_digest_cache_t *digest_data = NULL;
 
     switch (check) {
         case pe_check_active:
             if (pcmk__check_action_config(rsc, node, rsc_op)
                 && pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL,
                                     data_set)) {
                 reason = "action definition changed";
             }
             break;
 
         case pe_check_last_failure:
             digest_data = rsc_action_digest_cmp(rsc, rsc_op, node, data_set);
             switch (digest_data->rc) {
                 case RSC_DIGEST_UNKNOWN:
                     crm_trace("Resource %s history entry %s on %s has "
                               "no digest to compare",
                               rsc->id, ID(rsc_op), node->details->id);
                     break;
                 case RSC_DIGEST_MATCH:
                     break;
                 default:
                     reason = "resource parameters have changed";
                     break;
             }
             break;
     }
     if (reason != NULL) {
         pe__clear_failcount(rsc, node, reason, data_set);
     }
 }
 
 /*!
  * \internal
  * \brief Check whether a resource has failcount clearing scheduled on a node
  *
  * \param[in] node  Node to check
  * \param[in] rsc   Resource to check
  *
  * \return true if \p rsc has failcount clearing scheduled on \p node,
  *         otherwise false
  */
 static bool
 failcount_clear_action_exists(pe_node_t *node, pe_resource_t *rsc)
 {
     GList *list = pe__resource_actions(rsc, node, CRM_OP_CLEAR_FAILCOUNT, TRUE);
 
     if (list != NULL) {
         g_list_free(list);
         return true;
     }
     return false;
 }
 
 /*!
  * \internal
  * \brief Ban a resource from a node if it reached its failure threshold there
  *
  * \param[in] rsc       Resource to check failure threshold for
  * \param[in] node      Node to check \p rsc on
  */
 static void
 check_failure_threshold(pe_resource_t *rsc, pe_node_t *node)
 {
     // If this is a collective resource, apply recursively to children instead
     if (rsc->children != NULL) {
         g_list_foreach(rsc->children, (GFunc) check_failure_threshold,
                        node);
         return;
 
     } else if (failcount_clear_action_exists(node, rsc)) {
         /* Don't force the resource away from this node due to a failcount
          * that's going to be cleared.
          *
          * @TODO Failcount clearing can be scheduled in
          * pcmk__handle_rsc_config_changes() via process_rsc_history(), or in
          * stage5() via check_params(). This runs well before then, so it cannot
          * detect those, meaning we might check the migration threshold when we
          * shouldn't. Worst case, we stop or move the resource, then move it
          * back in the next transition.
          */
         return;
 
     } else {
         pe_resource_t *failed = NULL;
 
         if (pcmk__threshold_reached(rsc, node, &failed)) {
             resource_location(failed, node, -INFINITY, "__fail_limit__",
                               rsc->cluster);
         }
     }
 }
 
 /*!
  * \internal
  * \brief Apply stickiness to a resource if appropriate
  *
  * \param[in] rsc       Resource to check for stickiness
  * \param[in] data_set  Cluster working set
  */
 static void
 apply_stickiness(pe_resource_t *rsc, pe_working_set_t *data_set)
 {
     pe_node_t *node = NULL;
 
     // If this is a collective resource, apply recursively to children instead
     if (rsc->children != NULL) {
         g_list_foreach(rsc->children, (GFunc) apply_stickiness, data_set);
         return;
     }
 
     /* A resource is sticky if it is managed, has stickiness configured, and is
      * active on a single node.
      */
     if (!pcmk_is_set(rsc->flags, pe_rsc_managed)
         || (rsc->stickiness < 1) || !pcmk__list_of_1(rsc->running_on)) {
         return;
     }
 
     node = rsc->running_on->data;
 
     /* In a symmetric cluster, stickiness can always be used. In an
      * asymmetric cluster, we have to check whether the resource is still
      * allowed on the node, so we don't keep the resource somewhere it is no
      * longer explicitly enabled.
      */
     if (!pcmk_is_set(rsc->cluster->flags, pe_flag_symmetric_cluster)
         && (pe_hash_table_lookup(rsc->allowed_nodes,
                                  node->details->id) == NULL)) {
         pe_rsc_debug(rsc,
                      "Ignoring %s stickiness because the cluster is "
                      "asymmetric and node %s is not explicitly allowed",
                      rsc->id, node->details->uname);
         return;
     }
 
     pe_rsc_debug(rsc, "Resource %s has %d stickiness on node %s",
                  rsc->id, rsc->stickiness, node->details->uname);
     resource_location(rsc, node, rsc->stickiness, "stickiness",
                       rsc->cluster);
 }
 
 gboolean
 stage0(pe_working_set_t * data_set)
 {
     if (data_set->input == NULL) {
         return FALSE;
     }
 
     if (!pcmk_is_set(data_set->flags, pe_flag_have_status)) {
         crm_trace("Calculating status");
         cluster_status(data_set);
     }
 
     pcmk__set_allocation_methods(data_set);
     pcmk__apply_node_health(data_set);
     pcmk__unpack_constraints(data_set);
 
     return TRUE;
 }
 
 static void
 rsc_discover_filter(pe_resource_t *rsc, pe_node_t *node)
 {
     pe_resource_t *top = uber_parent(rsc);
     pe_node_t *match;
 
     if (rsc->exclusive_discover == FALSE && top->exclusive_discover == FALSE) {
         return;
     }
 
     g_list_foreach(rsc->children, (GFunc) rsc_discover_filter, node);
 
     match = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
     if (match && match->rsc_discover_mode != pe_discover_exclusive) {
         match->weight = -INFINITY;
     }
 }
 
 static time_t
 shutdown_time(pe_node_t *node, pe_working_set_t *data_set)
 {
     const char *shutdown = pe_node_attribute_raw(node, XML_CIB_ATTR_SHUTDOWN);
     time_t result = 0;
 
     if (shutdown) {
         long long result_ll;
 
         if (pcmk__scan_ll(shutdown, &result_ll, 0LL) == pcmk_rc_ok) {
             result = (time_t) result_ll;
         }
     }
     return result? result : get_effective_time(data_set);
 }
 
 static void
 apply_shutdown_lock(pe_resource_t *rsc, pe_working_set_t *data_set)
 {
     const char *class;
 
     // Only primitives and (uncloned) groups may be locked
     if (rsc->variant == pe_group) {
         g_list_foreach(rsc->children, (GFunc) apply_shutdown_lock, data_set);
     } else if (rsc->variant != pe_native) {
         return;
     }
 
     // Fence devices and remote connections can't be locked
     class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
     if (pcmk__str_eq(class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_null_matches)
         || pe__resource_is_remote_conn(rsc, data_set)) {
         return;
     }
 
     if (rsc->lock_node != NULL) {
         // The lock was obtained from resource history
 
         if (rsc->running_on != NULL) {
             /* The resource was started elsewhere even though it is now
              * considered locked. This shouldn't be possible, but as a
              * failsafe, we don't want to disturb the resource now.
              */
             pe_rsc_info(rsc,
                         "Cancelling shutdown lock because %s is already active",
                         rsc->id);
             pe__clear_resource_history(rsc, rsc->lock_node, data_set);
             rsc->lock_node = NULL;
             rsc->lock_time = 0;
         }
 
     // Only a resource active on exactly one node can be locked
     } else if (pcmk__list_of_1(rsc->running_on)) {
         pe_node_t *node = rsc->running_on->data;
 
         if (node->details->shutdown) {
             if (node->details->unclean) {
                 pe_rsc_debug(rsc, "Not locking %s to unclean %s for shutdown",
                              rsc->id, node->details->uname);
             } else {
                 rsc->lock_node = node;
                 rsc->lock_time = shutdown_time(node, data_set);
             }
         }
     }
 
     if (rsc->lock_node == NULL) {
         // No lock needed
         return;
     }
 
     if (data_set->shutdown_lock > 0) {
         time_t lock_expiration = rsc->lock_time + data_set->shutdown_lock;
 
         pe_rsc_info(rsc, "Locking %s to %s due to shutdown (expires @%lld)",
                     rsc->id, rsc->lock_node->details->uname,
                     (long long) lock_expiration);
         pe__update_recheck_time(++lock_expiration, data_set);
     } else {
         pe_rsc_info(rsc, "Locking %s to %s due to shutdown",
                     rsc->id, rsc->lock_node->details->uname);
     }
 
     // If resource is locked to one node, ban it from all other nodes
     for (GList *item = data_set->nodes; item != NULL; item = item->next) {
         pe_node_t *node = item->data;
 
         if (strcmp(node->details->uname, rsc->lock_node->details->uname)) {
             resource_location(rsc, node, -CRM_SCORE_INFINITY,
                               XML_CONFIG_ATTR_SHUTDOWN_LOCK, data_set);
         }
     }
 }
 
 /*
  * \internal
  * \brief Stage 2 of cluster status: apply node-specific criteria
  *
  * Count known nodes, and apply location constraints, stickiness, and exclusive
  * resource discovery.
  */
 gboolean
 stage2(pe_working_set_t * data_set)
 {
     GList *gIter = NULL;
 
     if (pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)) {
         g_list_foreach(data_set->resources, (GFunc) apply_shutdown_lock, data_set);
     }
 
     if (!pcmk_is_set(data_set->flags, pe_flag_no_compat)) {
         // @COMPAT API backward compatibility
         for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
             pe_node_t *node = (pe_node_t *) gIter->data;
 
             if (node && (node->weight >= 0) && node->details->online
                 && (node->details->type != node_ping)) {
                 data_set->max_valid_nodes++;
             }
         }
         crm_trace("Online node count: %d", data_set->max_valid_nodes);
     }
 
     pcmk__apply_locations(data_set);
     g_list_foreach(data_set->resources, (GFunc) apply_stickiness, data_set);
 
     gIter = data_set->nodes;
     for (; gIter != NULL; gIter = gIter->next) {
         GList *gIter2 = NULL;
         pe_node_t *node = (pe_node_t *) gIter->data;
 
         gIter2 = data_set->resources;
         for (; gIter2 != NULL; gIter2 = gIter2->next) {
             pe_resource_t *rsc = (pe_resource_t *) gIter2->data;
 
             check_failure_threshold(rsc, node);
             rsc_discover_filter(rsc, node);
         }
     }
 
     return TRUE;
 }
 
 static void
 allocate_resources(pe_working_set_t * data_set)
 {
     GList *gIter = NULL;
 
     if (pcmk_is_set(data_set->flags, pe_flag_have_remote_nodes)) {
         /* Allocate remote connection resources first (which will also allocate
          * any colocation dependencies). If the connection is migrating, always
          * prefer the partial migration target.
          */
         for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
             pe_resource_t *rsc = (pe_resource_t *) gIter->data;
             if (rsc->is_remote_node == FALSE) {
                 continue;
             }
             pe_rsc_trace(rsc, "Allocating remote connection resource '%s'",
                          rsc->id);
             rsc->cmds->allocate(rsc, rsc->partial_migration_target, data_set);
         }
     }
 
     /* now do the rest of the resources */
     for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *rsc = (pe_resource_t *) gIter->data;
         if (rsc->is_remote_node == TRUE) {
             continue;
         }
         pe_rsc_trace(rsc, "Allocating %s resource '%s'",
                      crm_element_name(rsc->xml), rsc->id);
         rsc->cmds->allocate(rsc, NULL, data_set);
     }
 }
 
 // Clear fail counts for orphaned rsc on all online nodes
 static void
 cleanup_orphans(pe_resource_t * rsc, pe_working_set_t * data_set)
 {
     GList *gIter = NULL;
 
     for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
         pe_node_t *node = (pe_node_t *) gIter->data;
 
         if (node->details->online
             && pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL,
                                 data_set)) {
 
             pe_action_t *clear_op = NULL;
 
             clear_op = pe__clear_failcount(rsc, node, "it is orphaned",
                                            data_set);
 
             /* We can't use order_action_then_stop() here because its
              * pe_order_preserve breaks things
              */
             pcmk__new_ordering(clear_op->rsc, NULL, clear_op,
                                rsc, stop_key(rsc), NULL,
                                pe_order_optional, data_set);
         }
     }
 }
 
 gboolean
 stage5(pe_working_set_t * data_set)
 {
     pcmk__output_t *out = data_set->priv;
     GList *gIter = NULL;
 
     if (!pcmk__str_eq(data_set->placement_strategy, "default", pcmk__str_casei)) {
         pcmk__sort_resources(data_set);
     }
 
     gIter = data_set->nodes;
     for (; gIter != NULL; gIter = gIter->next) {
         pe_node_t *node = (pe_node_t *) gIter->data;
 
         if (pcmk_is_set(data_set->flags, pe_flag_show_utilization)) {
             out->message(out, "node-capacity", node, "Original");
         }
     }
 
     crm_trace("Allocating services");
     /* Take (next) highest resource, assign it and create its actions */
 
     allocate_resources(data_set);
 
     gIter = data_set->nodes;
     for (; gIter != NULL; gIter = gIter->next) {
         pe_node_t *node = (pe_node_t *) gIter->data;
 
         if (pcmk_is_set(data_set->flags, pe_flag_show_utilization)) {
             out->message(out, "node-capacity", node, "Remaining");
         }
     }
 
     // Process deferred action checks
     pe__foreach_param_check(data_set, check_params);
     pe__free_param_checks(data_set);
 
     if (pcmk_is_set(data_set->flags, pe_flag_startup_probes)) {
         crm_trace("Calculating needed probes");
         pcmk__schedule_probes(data_set);
     }
 
     crm_trace("Handle orphans");
     if (pcmk_is_set(data_set->flags, pe_flag_stop_rsc_orphans)) {
         for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
             pe_resource_t *rsc = (pe_resource_t *) gIter->data;
 
             /* There's no need to recurse into rsc->children because those
              * should just be unallocated clone instances.
              */
             if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
                 cleanup_orphans(rsc, data_set);
             }
         }
     }
 
     crm_trace("Creating actions");
 
     for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *rsc = (pe_resource_t *) gIter->data;
 
         rsc->cmds->create_actions(rsc, data_set);
     }
 
     crm_trace("Creating done");
     return TRUE;
 }
 
 static gboolean
 is_managed(const pe_resource_t * rsc)
 {
     GList *gIter = rsc->children;
 
     if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
         return TRUE;
     }
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         if (is_managed(child_rsc)) {
             return TRUE;
         }
     }
 
     return FALSE;
 }
 
 static gboolean
 any_managed_resources(pe_working_set_t * data_set)
 {
 
     GList *gIter = data_set->resources;
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *rsc = (pe_resource_t *) gIter->data;
 
         if (is_managed(rsc)) {
             return TRUE;
         }
     }
     return FALSE;
 }
 
 /*
  * Create dependencies for stonith and shutdown operations
  */
 gboolean
 stage6(pe_working_set_t * data_set)
 {
     pe_action_t *dc_down = NULL;
     pe_action_t *stonith_op = NULL;
     gboolean integrity_lost = FALSE;
     gboolean need_stonith = TRUE;
     GList *gIter;
     GList *stonith_ops = NULL;
     GList *shutdown_ops = NULL;
 
     /* Remote ordering constraints need to happen prior to calculating fencing
      * because it is one more place we can mark nodes as needing fencing.
      */
     pcmk__order_remote_connection_actions(data_set);
 
     crm_trace("Processing fencing and shutdown cases");
     if (any_managed_resources(data_set) == FALSE) {
         crm_notice("Delaying fencing operations until there are resources to manage");
         need_stonith = FALSE;
     }
 
     /* Check each node for stonith/shutdown */
     for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
         pe_node_t *node = (pe_node_t *) gIter->data;
 
         /* Guest nodes are "fenced" by recovering their container resource,
          * so handle them separately.
          */
         if (pe__is_guest_node(node)) {
             if (node->details->remote_requires_reset && need_stonith
                 && pe_can_fence(data_set, node)) {
                 pcmk__fence_guest(node, data_set);
             }
             continue;
         }
 
         stonith_op = NULL;
 
         if (node->details->unclean
             && need_stonith && pe_can_fence(data_set, node)) {
 
             stonith_op = pe_fence_op(node, NULL, FALSE, "node is unclean", FALSE, data_set);
             pe_warn("Scheduling Node %s for STONITH", node->details->uname);
 
             pcmk__order_vs_fence(stonith_op, data_set);
 
             if (node->details->is_dc) {
                 // Remember if the DC is being fenced
                 dc_down = stonith_op;
 
             } else {
 
                 if (!pcmk_is_set(data_set->flags, pe_flag_concurrent_fencing)
                     && (stonith_ops != NULL)) {
                     /* Concurrent fencing is disabled, so order each non-DC
                      * fencing in a chain. If there is any DC fencing or
                      * shutdown, it will be ordered after the last action in the
                      * chain later.
                      */
                     order_actions((pe_action_t *) stonith_ops->data,
                                   stonith_op, pe_order_optional);
                 }
 
                 // Remember all non-DC fencing actions in a separate list
                 stonith_ops = g_list_prepend(stonith_ops, stonith_op);
             }
 
         } else if (node->details->online && node->details->shutdown &&
                 /* TODO define what a shutdown op means for a remote node.
                  * For now we do not send shutdown operations for remote nodes, but
                  * if we can come up with a good use for this in the future, we will. */
                     pe__is_guest_or_remote_node(node) == FALSE) {
 
             pe_action_t *down_op = pcmk__new_shutdown_action(node, data_set);
 
             if (node->details->is_dc) {
                 // Remember if the DC is being shut down
                 dc_down = down_op;
             } else {
                 // Remember non-DC shutdowns for later ordering
                 shutdown_ops = g_list_prepend(shutdown_ops, down_op);
             }
         }
 
         if (node->details->unclean && stonith_op == NULL) {
             integrity_lost = TRUE;
             pe_warn("Node %s is unclean!", node->details->uname);
         }
     }
 
     if (integrity_lost) {
         if (!pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
             pe_warn("YOUR RESOURCES ARE NOW LIKELY COMPROMISED");
             pe_err("ENABLE STONITH TO KEEP YOUR RESOURCES SAFE");
 
         } else if (!pcmk_is_set(data_set->flags, pe_flag_have_quorum)) {
             crm_notice("Cannot fence unclean nodes until quorum is"
                        " attained (or no-quorum-policy is set to ignore)");
         }
     }
 
     if (dc_down != NULL) {
         /* Order any non-DC shutdowns before any DC shutdown, to avoid repeated
          * DC elections. However, we don't want to order non-DC shutdowns before
          * a DC *fencing*, because even though we don't want a node that's
          * shutting down to become DC, the DC fencing could be ordered before a
          * clone stop that's also ordered before the shutdowns, thus leading to
          * a graph loop.
          */
         if (pcmk__str_eq(dc_down->task, CRM_OP_SHUTDOWN, pcmk__str_casei)) {
             for (gIter = shutdown_ops; gIter != NULL; gIter = gIter->next) {
                 pe_action_t *node_stop = (pe_action_t *) gIter->data;
 
                 crm_debug("Ordering shutdown on %s before %s on DC %s",
                           node_stop->node->details->uname,
                           dc_down->task, dc_down->node->details->uname);
 
                 order_actions(node_stop, dc_down, pe_order_optional);
             }
         }
 
         // Order any non-DC fencing before any DC fencing or shutdown
 
         if (pcmk_is_set(data_set->flags, pe_flag_concurrent_fencing)) {
             /* With concurrent fencing, order each non-DC fencing action
              * separately before any DC fencing or shutdown.
              */
             for (gIter = stonith_ops; gIter != NULL; gIter = gIter->next) {
                 order_actions((pe_action_t *) gIter->data, dc_down,
                               pe_order_optional);
             }
         } else if (stonith_ops) {
             /* Without concurrent fencing, the non-DC fencing actions are
              * already ordered relative to each other, so we just need to order
              * the DC fencing after the last action in the chain (which is the
              * first item in the list).
              */
             order_actions((pe_action_t *) stonith_ops->data, dc_down,
                           pe_order_optional);
         }
     }
     g_list_free(stonith_ops);
     g_list_free(shutdown_ops);
     return TRUE;
 }
 
 static void
 log_resource_details(pe_working_set_t *data_set)
 {
     pcmk__output_t *out = data_set->priv;
     GList *all = NULL;
 
     /* We need a list of nodes that we are allowed to output information for.
      * This is necessary because out->message for all the resource-related
      * messages expects such a list, due to the `crm_mon --node=` feature.  Here,
      * we just make it a list of all the nodes.
      */
     all = g_list_prepend(all, (gpointer) "*");
 
     for (GList *item = data_set->resources; item != NULL; item = item->next) {
         pe_resource_t *rsc = (pe_resource_t *) item->data;
 
         // Log all resources except inactive orphans
         if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)
             || (rsc->role != RSC_ROLE_STOPPED)) {
             out->message(out, crm_map_element_name(rsc->xml), 0, rsc, all, all);
         }
     }
 
     g_list_free(all);
 }
 
 static void
 log_all_actions(pe_working_set_t *data_set)
 {
     /* This only ever outputs to the log, so ignore whatever output object was
      * previously set and just log instead.
      */
     pcmk__output_t *prev_out = data_set->priv;
     pcmk__output_t *out = pcmk__new_logger();
 
     if (out == NULL) {
         return;
     }
 
     pcmk__output_set_log_level(out, LOG_NOTICE);
     data_set->priv = out;
 
     out->begin_list(out, NULL, NULL, "Actions");
     pcmk__output_actions(data_set);
     out->end_list(out);
     out->finish(out, CRM_EX_OK, true, NULL);
     pcmk__output_free(out);
 
     data_set->priv = prev_out;
 }
 
 /*!
  * \internal
  * \brief Run the scheduler for a given CIB
  *
  * \param[in,out] data_set  Cluster working set
  * \param[in]     xml_input CIB XML to use as scheduler input
  */
 xmlNode *
 pcmk__schedule_actions(pe_working_set_t *data_set, xmlNode *xml_input)
 {
     GList *gIter = NULL;
 
     CRM_ASSERT(xml_input || pcmk_is_set(data_set->flags, pe_flag_have_status));
 
     if (!pcmk_is_set(data_set->flags, pe_flag_have_status)) {
         set_working_set_defaults(data_set);
         data_set->input = xml_input;
 
     } else {
         crm_trace("Already have status - reusing");
     }
 
     crm_trace("Calculate cluster status");
     stage0(data_set);
+    if (pcmk_is_set(data_set->flags, pe_flag_check_config)) {
+        return data_set->graph;
+    }
     if (!pcmk_is_set(data_set->flags, pe_flag_quick_location) &&
          pcmk__is_daemon) {
         log_resource_details(data_set);
     }
 
     crm_trace("Applying location constraints");
     stage2(data_set);
 
     if (pcmk_is_set(data_set->flags, pe_flag_quick_location)) {
         return NULL;
     }
 
     pcmk__create_internal_constraints(data_set);
     pcmk__handle_rsc_config_changes(data_set);
 
     crm_trace("Allocate resources");
     stage5(data_set);
 
     crm_trace("Processing fencing and shutdown cases");
     stage6(data_set);
 
     pcmk__apply_orderings(data_set);
     log_all_actions(data_set);
 
     crm_trace("Create transition graph");
     pcmk__create_graph(data_set);
 
     crm_trace("=#=#=#=#= Summary =#=#=#=#=");
     crm_trace("\t========= Set %d (Un-runnable) =========", -1);
     if (get_crm_log_level() == LOG_TRACE) {
         gIter = data_set->actions;
         for (; gIter != NULL; gIter = gIter->next) {
             pe_action_t *action = (pe_action_t *) gIter->data;
 
             if (!pcmk_any_flags_set(action->flags,
                                     pe_action_optional
                                     |pe_action_runnable
                                     |pe_action_pseudo)) {
                 pcmk__log_action("\t", action, true);
             }
         }
     }
 
     return data_set->graph;
 }
diff --git a/tools/crm_verify.c b/tools/crm_verify.c
index 881fa203d5..b20370ae0b 100644
--- a/tools/crm_verify.c
+++ b/tools/crm_verify.c
@@ -1,283 +1,283 @@
 /*
- * Copyright 2004-2021 the Pacemaker project contributors
+ * Copyright 2004-2022 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 #include <crm/crm.h>
 #include <crm/common/cmdline_internal.h>
 #include <crm/common/output_internal.h>
 
 #include <stdio.h>
 #include <sys/types.h>
 #include <unistd.h>
 
 #include <stdlib.h>
 #include <errno.h>
 #include <fcntl.h>
 #include <libgen.h>
 #include <glib.h>
 
 #include <crm/common/xml.h>
 #include <crm/common/util.h>
 #include <crm/msg_xml.h>
 #include <crm/cib.h>
 #include <crm/cib/internal.h>
 #include <crm/pengine/status.h>
 #include <pacemaker-internal.h>
 
 const char *SUMMARY = "Check a Pacemaker configuration for errors\n\n"
                       "Check the well-formedness of a complete Pacemaker XML configuration,\n"
                       "its conformance to the configured schema, and the presence of common\n"
                       "misconfigurations. Problems reported as errors must be fixed before the\n"
                       "cluster will work properly. It is left to the administrator to decide\n"
                       "whether to fix problems reported as warnings.";
 
 struct {
     char *cib_save;
     gboolean use_live_cib;
     char *xml_file;
     gboolean xml_stdin;
     char *xml_string;
 } options;
 
-extern gboolean stage0(pe_working_set_t * data_set);
-
 static GOptionEntry data_entries[] = {
     { "live-check", 'L', 0, G_OPTION_ARG_NONE,
       &options.use_live_cib, "Check the configuration used by the running cluster",
       NULL },
     { "xml-file", 'x', 0, G_OPTION_ARG_FILENAME,
       &options.xml_file, "Check the configuration in the named file",
       "FILE" },
     { "xml-pipe", 'p', 0, G_OPTION_ARG_NONE,
       &options.xml_stdin, "Check the configuration piped in via stdin",
       NULL },
     { "xml-text", 'X', 0, G_OPTION_ARG_STRING,
       &options.xml_string, "Check the configuration in the supplied string",
       "XML" },
 
     { NULL }
 };
 
 static GOptionEntry addl_entries[] = {
     { "save-xml", 'S', G_OPTION_FLAG_NONE, G_OPTION_ARG_FILENAME,
       &options.cib_save, "Save verified XML to named file (most useful with -L)",
       "FILE" },
 
     { NULL }
 };
 
 static pcmk__supported_format_t formats[] = {
     PCMK__SUPPORTED_FORMAT_NONE,
     PCMK__SUPPORTED_FORMAT_TEXT,
     PCMK__SUPPORTED_FORMAT_XML,
     { NULL, NULL, NULL }
 };
 
 static GOptionContext *
 build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) {
     GOptionContext *context = NULL;
 
     const char *description = "Examples:\n\n"
                               "Check the consistency of the configuration in the running cluster:\n\n"
                               "\tcrm_verify --live-check\n\n"
                               "Check the consistency of the configuration in a given file and "
                               "produce verbose output:\n\n"
                               "\tcrm_verify --xml-file file.xml --verbose\n\n";
 
     context = pcmk__build_arg_context(args, "text (default), xml", group, NULL);
     g_option_context_set_description(context, description);
 
     pcmk__add_arg_group(context, "data", "Data sources:",
                         "Show data options", data_entries);
     pcmk__add_arg_group(context, "additional", "Additional options:",
                         "Show additional options", addl_entries);
 
     return context;
 }
 
 int
 main(int argc, char **argv)
 {
     xmlNode *cib_object = NULL;
     xmlNode *status = NULL;
 
     pe_working_set_t *data_set = NULL;
     const char *xml_tag = NULL;
 
     int rc = pcmk_rc_ok;
     crm_exit_t exit_code = CRM_EX_OK;
 
     GError *error = NULL;
 
     pcmk__output_t *out = NULL;
 
     GOptionGroup *output_group = NULL;
     pcmk__common_args_t *args = pcmk__new_common_args(SUMMARY);
     gchar **processed_args = pcmk__cmdline_preproc(argv, "xSX");
     GOptionContext *context = build_arg_context(args, &output_group);
 
     pcmk__register_formats(output_group, formats);
     if (!g_option_context_parse_strv(context, &processed_args, &error)) {
         exit_code = CRM_EX_USAGE;
         goto done;
     }
 
     pcmk__cli_init_logging("crm_verify", args->verbosity);
 
     rc = pcmk__output_new(&out, args->output_ty, args->output_dest, argv);
     if (rc != pcmk_rc_ok) {
         exit_code = CRM_EX_ERROR;
         g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Error creating output format %s: %s",
                     args->output_ty, pcmk_rc_str(rc));
         goto done;
     }
 
     if (args->version) {
         out->version(out, false);
         goto done;
     }
 
     pcmk__register_lib_messages(out);
 
     crm_info("=#=#=#=#= Getting XML =#=#=#=#=");
 
     if (options.use_live_cib) {
         crm_info("Reading XML from: live cluster");
         rc = cib__signon_query(NULL, &cib_object);
 
         if (rc != pcmk_rc_ok) {
             g_set_error(&error, PCMK__RC_ERROR, rc, "CIB query failed: %s", pcmk_rc_str(rc));
             goto done;
         }
 
     } else if (options.xml_file != NULL) {
         cib_object = filename2xml(options.xml_file);
         if (cib_object == NULL) {
             rc = ENODATA;
             g_set_error(&error, PCMK__RC_ERROR, rc, "Couldn't parse input file: %s", options.xml_file);
             goto done;
         }
 
     } else if (options.xml_string != NULL) {
         cib_object = string2xml(options.xml_string);
         if (cib_object == NULL) {
             rc = ENODATA;
             g_set_error(&error, PCMK__RC_ERROR, rc, "Couldn't parse input string: %s", options.xml_string);
             goto done;
         }
     } else if (options.xml_stdin) {
         cib_object = stdin2xml();
         if (cib_object == NULL) {
             rc = ENODATA;
             g_set_error(&error, PCMK__RC_ERROR, rc, "Couldn't parse input from STDIN.");
             goto done;
         }
 
     } else {
         rc = ENODATA;
         g_set_error(&error, PCMK__RC_ERROR, rc,
                     "No configuration source specified.  Use --help for usage information.");
         goto done;
     }
 
     xml_tag = crm_element_name(cib_object);
     if (!pcmk__str_eq(xml_tag, XML_TAG_CIB, pcmk__str_casei)) {
         rc = EBADMSG;
         g_set_error(&error, PCMK__RC_ERROR, rc,
                     "This tool can only check complete configurations (i.e. those starting with <cib>).");
         goto done;
     }
 
     if (options.cib_save != NULL) {
         write_xml_file(cib_object, options.cib_save, FALSE);
     }
 
     status = pcmk_find_cib_element(cib_object, XML_CIB_TAG_STATUS);
     if (status == NULL) {
         create_xml_node(cib_object, XML_CIB_TAG_STATUS);
     }
 
     if (validate_xml(cib_object, NULL, FALSE) == FALSE) {
         pcmk__config_err("CIB did not pass schema validation");
         free_xml(cib_object);
         cib_object = NULL;
 
     } else if (cli_config_update(&cib_object, NULL, FALSE) == FALSE) {
         crm_config_error = TRUE;
         free_xml(cib_object);
         cib_object = NULL;
         out->err(out, "The cluster will NOT be able to use this configuration.\n"
                  "Please manually update the configuration to conform to the %s syntax.",
                  xml_latest_schema());
     }
 
     data_set = pe_new_working_set();
     if (data_set == NULL) {
         rc = errno;
         crm_perror(LOG_CRIT, "Unable to allocate working set");
         goto done;
     }
     pe__set_working_set_flags(data_set, pe_flag_no_counts|pe_flag_no_compat);
     data_set->priv = out;
 
-    if (cib_object == NULL) {
-    } else if (status != NULL || options.use_live_cib) {
-        /* live queries will always have a status section and can do a full simulation */
+    /* Process the configuration to set crm_config_error/crm_config_warning.
+     *
+     * @TODO Some parts of the configuration are unpacked only when needed (for
+     * example, action configuration), so we aren't necessarily checking those.
+     */
+    if (cib_object != NULL) {
+        if ((status == NULL) && !options.use_live_cib) {
+            // No status available, so do minimal checks
+            pe__set_working_set_flags(data_set, pe_flag_check_config);
+        }
         pcmk__schedule_actions(data_set, cib_object);
-
-    } else {
-        data_set->now = crm_time_new(NULL);
-        data_set->input = cib_object;
-        stage0(data_set);
     }
     pe_free_working_set(data_set);
 
     if (crm_config_error) {
         rc = pcmk_rc_schema_validation;
 
         if (args->verbosity > 0) {
             g_set_error(&error, PCMK__RC_ERROR, rc,
                         "Errors found during check: config not valid");
         } else {
             g_set_error(&error, PCMK__RC_ERROR, rc,
                         "Errors found during check: config not valid\n-V may provide more details");
         }
 
     } else if (crm_config_warning) {
         rc = pcmk_rc_schema_validation;
 
         if (args->verbosity > 0) {
             g_set_error(&error, PCMK__RC_ERROR, rc,
                         "Warnings found during check: config may not be valid");
         } else {
             g_set_error(&error, PCMK__RC_ERROR, rc,
                         "Warnings found during check: config may not be valid\n-V may provide more details");
         }
     }
 
   done:
     g_strfreev(processed_args);
     pcmk__free_arg_context(context);
     free(options.cib_save);
     free(options.xml_file);
     free(options.xml_string);
 
     if (exit_code == CRM_EX_OK) {
         exit_code = pcmk_rc2exitc(rc);
     }
 
     pcmk__output_and_clear_error(error, NULL);
 
     if (out != NULL) {
         out->finish(out, exit_code, true, NULL);
         pcmk__output_free(out);
     }
 
     crm_exit(exit_code);
 }