diff --git a/include/crm/pengine/internal.h b/include/crm/pengine/internal.h
index af6956afc0..c48ff511a9 100644
--- a/include/crm/pengine/internal.h
+++ b/include/crm/pengine/internal.h
@@ -1,444 +1,441 @@
 /*
  * Copyright 2004-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #ifndef PE_INTERNAL__H
 #  define PE_INTERNAL__H
 
 #  include <stdbool.h>
 #  include <stdint.h>
 #  include <string.h>
 #  include <crm/common/xml.h>
 #  include <crm/pengine/status.h>
 #  include <crm/pengine/remote_internal.h>
 #  include <crm/common/internal.h>
 #  include <crm/common/options_internal.h>
 #  include <crm/common/output_internal.h>
 #  include <crm/common/scheduler_internal.h>
 
 const char *pe__resource_description(const pcmk_resource_t *rsc,
                                      uint32_t show_opts);
 
 bool pe__clone_is_ordered(const pcmk_resource_t *clone);
 int pe__set_clone_flag(pcmk_resource_t *clone, enum pcmk__clone_flags flag);
 bool pe__clone_flag_is_set(const pcmk_resource_t *clone, uint32_t flags);
 
 bool pe__group_flag_is_set(const pcmk_resource_t *group, uint32_t flags);
 pcmk_resource_t *pe__last_group_member(const pcmk_resource_t *group);
 
 const pcmk_resource_t *pe__const_top_resource(const pcmk_resource_t *rsc,
                                               bool include_bundle);
 
 int pe__clone_max(const pcmk_resource_t *clone);
 int pe__clone_node_max(const pcmk_resource_t *clone);
 int pe__clone_promoted_max(const pcmk_resource_t *clone);
 int pe__clone_promoted_node_max(const pcmk_resource_t *clone);
 void pe__create_clone_notifications(pcmk_resource_t *clone);
 void pe__free_clone_notification_data(pcmk_resource_t *clone);
 void pe__create_clone_notif_pseudo_ops(pcmk_resource_t *clone,
                                        pcmk_action_t *start,
                                        pcmk_action_t *started,
                                        pcmk_action_t *stop,
                                        pcmk_action_t *stopped);
 
 pcmk_action_t *pe__new_rsc_pseudo_action(pcmk_resource_t *rsc, const char *task,
                                          bool optional, bool runnable);
 
 void pe__create_promotable_pseudo_ops(pcmk_resource_t *clone,
                                       bool any_promoting, bool any_demoting);
 
 bool pe_can_fence(const pcmk_scheduler_t *scheduler, const pcmk_node_t *node);
 
 char *native_parameter(pcmk_resource_t *rsc, pcmk_node_t *node, gboolean create,
                        const char *name, pcmk_scheduler_t *scheduler);
 pcmk_node_t *native_location(const pcmk_resource_t *rsc, GList **list,
                              int current);
 void native_add_running(pcmk_resource_t *rsc, pcmk_node_t *node,
                         pcmk_scheduler_t *scheduler, gboolean failed);
 
 gboolean native_unpack(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler);
 gboolean group_unpack(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler);
 gboolean clone_unpack(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler);
 gboolean pe__unpack_bundle(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler);
 
 pcmk_resource_t *native_find_rsc(pcmk_resource_t *rsc, const char *id,
                                  const pcmk_node_t *node, int flags);
 
 gboolean native_active(pcmk_resource_t *rsc, gboolean all);
 gboolean group_active(pcmk_resource_t *rsc, gboolean all);
 gboolean clone_active(pcmk_resource_t *rsc, gboolean all);
 gboolean pe__bundle_active(pcmk_resource_t *rsc, gboolean all);
 
 //! \deprecated This function will be removed in a future release
 void native_print(pcmk_resource_t *rsc, const char *pre_text, long options,
                   void *print_data);
 
 //! \deprecated This function will be removed in a future release
 void group_print(pcmk_resource_t *rsc, const char *pre_text, long options,
                  void *print_data);
 
 //! \deprecated This function will be removed in a future release
 void clone_print(pcmk_resource_t *rsc, const char *pre_text, long options,
                  void *print_data);
 
 //! \deprecated This function will be removed in a future release
 void pe__print_bundle(pcmk_resource_t *rsc, const char *pre_text, long options,
                       void *print_data);
 
 gchar *pcmk__native_output_string(const pcmk_resource_t *rsc, const char *name,
                                   const pcmk_node_t *node, uint32_t show_opts,
                                   const char *target_role, bool show_nodes);
 
 int pe__name_and_nvpairs_xml(pcmk__output_t *out, bool is_list, const char *tag_name
                          , size_t pairs_count, ...);
 char *pe__node_display_name(pcmk_node_t *node, bool print_detail);
 
 
 // Clone notifications (pe_notif.c)
 void pe__order_notifs_after_fencing(const pcmk_action_t *action,
                                     pcmk_resource_t *rsc,
                                     pcmk_action_t *stonith_op);
 
 
 // Resource output methods
 int pe__clone_xml(pcmk__output_t *out, va_list args);
 int pe__clone_default(pcmk__output_t *out, va_list args);
 int pe__group_xml(pcmk__output_t *out, va_list args);
 int pe__group_default(pcmk__output_t *out, va_list args);
 int pe__bundle_xml(pcmk__output_t *out, va_list args);
 int pe__bundle_html(pcmk__output_t *out, va_list args);
 int pe__bundle_text(pcmk__output_t *out, va_list args);
 int pe__node_html(pcmk__output_t *out, va_list args);
 int pe__node_text(pcmk__output_t *out, va_list args);
 int pe__node_xml(pcmk__output_t *out, va_list args);
 int pe__resource_xml(pcmk__output_t *out, va_list args);
 int pe__resource_html(pcmk__output_t *out, va_list args);
 int pe__resource_text(pcmk__output_t *out, va_list args);
 
 void native_free(pcmk_resource_t *rsc);
 void group_free(pcmk_resource_t *rsc);
 void clone_free(pcmk_resource_t *rsc);
 void pe__free_bundle(pcmk_resource_t *rsc);
 
 enum rsc_role_e native_resource_state(const pcmk_resource_t *rsc,
                                       gboolean current);
 enum rsc_role_e group_resource_state(const pcmk_resource_t *rsc,
                                      gboolean current);
 enum rsc_role_e clone_resource_state(const pcmk_resource_t *rsc,
                                      gboolean current);
 enum rsc_role_e pe__bundle_resource_state(const pcmk_resource_t *rsc,
                                           gboolean current);
 
 void pe__count_common(pcmk_resource_t *rsc);
 void pe__count_bundle(pcmk_resource_t *rsc);
 
 void common_free(pcmk_resource_t *rsc);
 
 pcmk_node_t *pe__copy_node(const pcmk_node_t *this_node);
 time_t get_effective_time(pcmk_scheduler_t *scheduler);
 
 /* Failure handling utilities (from failcounts.c) */
 
 int pe_get_failcount(const pcmk_node_t *node, pcmk_resource_t *rsc,
                      time_t *last_failure, uint32_t flags,
                      const xmlNode *xml_op);
 
 pcmk_action_t *pe__clear_failcount(pcmk_resource_t *rsc,
                                    const pcmk_node_t *node, const char *reason,
                                    pcmk_scheduler_t *scheduler);
 
 /* Functions for finding/counting a resource's active nodes */
 
 bool pe__count_active_node(const pcmk_resource_t *rsc, pcmk_node_t *node,
                            pcmk_node_t **active, unsigned int *count_all,
                            unsigned int *count_clean);
 
 pcmk_node_t *pe__find_active_requires(const pcmk_resource_t *rsc,
                                     unsigned int *count);
 
 /* Binary like operators for lists of nodes */
 GHashTable *pe__node_list2table(const GList *list);
 
 pcmk_action_t *get_pseudo_op(const char *name, pcmk_scheduler_t *scheduler);
 gboolean order_actions(pcmk_action_t *lh_action, pcmk_action_t *rh_action,
                        uint32_t flags);
 
 void pe__show_node_scores_as(const char *file, const char *function,
                              int line, bool to_log, const pcmk_resource_t *rsc,
                              const char *comment, GHashTable *nodes,
                              pcmk_scheduler_t *scheduler);
 
 #define pe__show_node_scores(level, rsc, text, nodes, scheduler)    \
         pe__show_node_scores_as(__FILE__, __func__, __LINE__,      \
                                 (level), (rsc), (text), (nodes), (scheduler))
 
 GHashTable *pcmk__unpack_action_meta(pcmk_resource_t *rsc,
                                      const pcmk_node_t *node,
                                      const char *action_name, guint interval_ms,
                                      const xmlNode *action_config);
 GHashTable *pcmk__unpack_action_rsc_params(const xmlNode *action_xml,
                                            GHashTable *node_attrs,
                                            pcmk_scheduler_t *data_set);
 xmlNode *pcmk__find_action_config(const pcmk_resource_t *rsc,
                                   const char *action_name, guint interval_ms,
                                   bool include_disabled);
 
 enum rsc_start_requirement pcmk__action_requires(const pcmk_resource_t *rsc,
                                                  const char *action_name);
 
 enum action_fail_response pcmk__parse_on_fail(const pcmk_resource_t *rsc,
                                               const char *action_name,
                                               guint interval_ms,
                                               const char *value);
 
 enum rsc_role_e pcmk__role_after_failure(const pcmk_resource_t *rsc,
                                          const char *action_name,
                                          enum action_fail_response on_fail,
                                          GHashTable *meta);
 
 pcmk_action_t *custom_action(pcmk_resource_t *rsc, char *key, const char *task,
                              const pcmk_node_t *on_node, gboolean optional,
                              pcmk_scheduler_t *scheduler);
 
 #  define delete_key(rsc) pcmk__op_key(rsc->id, PCMK_ACTION_DELETE, 0)
 #  define delete_action(rsc, node, optional) custom_action(		\
 		rsc, delete_key(rsc), PCMK_ACTION_DELETE, node, \
 		optional, rsc->cluster);
 
 #  define stop_key(rsc) pcmk__op_key(rsc->id, PCMK_ACTION_STOP, 0)
 #  define stop_action(rsc, node, optional) custom_action(			\
 		rsc, stop_key(rsc), PCMK_ACTION_STOP, node,		\
 		optional, rsc->cluster);
 
 #  define reload_key(rsc) pcmk__op_key(rsc->id, PCMK_ACTION_RELOAD_AGENT, 0)
 #  define start_key(rsc) pcmk__op_key(rsc->id, PCMK_ACTION_START, 0)
 #  define start_action(rsc, node, optional) custom_action(		\
 		rsc, start_key(rsc), PCMK_ACTION_START, node,           \
 		optional, rsc->cluster)
 
 #  define promote_key(rsc) pcmk__op_key(rsc->id, PCMK_ACTION_PROMOTE, 0)
 #  define promote_action(rsc, node, optional) custom_action(		\
 		rsc, promote_key(rsc), PCMK_ACTION_PROMOTE, node,	\
 		optional, rsc->cluster)
 
 #  define demote_key(rsc) pcmk__op_key(rsc->id, PCMK_ACTION_DEMOTE, 0)
 #  define demote_action(rsc, node, optional) custom_action(		\
 		rsc, demote_key(rsc), PCMK_ACTION_DEMOTE, node, \
 		optional, rsc->cluster)
 
-extern int pe_get_configured_timeout(pcmk_resource_t *rsc, const char *action,
-                                     pcmk_scheduler_t *scheduler);
-
 pcmk_action_t *find_first_action(const GList *input, const char *uuid,
                                  const char *task, const pcmk_node_t *on_node);
 
 enum action_tasks get_complex_task(const pcmk_resource_t *rsc,
                                    const char *name);
 
 GList *find_actions(GList *input, const char *key, const pcmk_node_t *on_node);
 GList *find_actions_exact(GList *input, const char *key,
                           const pcmk_node_t *on_node);
 GList *pe__resource_actions(const pcmk_resource_t *rsc, const pcmk_node_t *node,
                             const char *task, bool require_node);
 
 extern void pe_free_action(pcmk_action_t *action);
 
 void resource_location(pcmk_resource_t *rsc, const pcmk_node_t *node, int score,
                        const char *tag, pcmk_scheduler_t *scheduler);
 
 extern int pe__is_newer_op(const xmlNode *xml_a, const xmlNode *xml_b,
                            bool same_node_default);
 extern gint sort_op_by_callid(gconstpointer a, gconstpointer b);
 gboolean get_target_role(const pcmk_resource_t *rsc, enum rsc_role_e *role);
 void pe__set_next_role(pcmk_resource_t *rsc, enum rsc_role_e role,
                        const char *why);
 
 pcmk_resource_t *find_clone_instance(const pcmk_resource_t *rsc,
                                      const char *sub_id);
 
 extern void destroy_ticket(gpointer data);
 pcmk_ticket_t *ticket_new(const char *ticket_id, pcmk_scheduler_t *scheduler);
 
 // Resources for manipulating resource names
 const char *pe_base_name_end(const char *id);
 char *clone_strip(const char *last_rsc_id);
 char *clone_zero(const char *last_rsc_id);
 
 static inline bool
 pe_base_name_eq(const pcmk_resource_t *rsc, const char *id)
 {
     if (id && rsc && rsc->id) {
         // Number of characters in rsc->id before any clone suffix
         size_t base_len = pe_base_name_end(rsc->id) - rsc->id + 1;
 
         return (strlen(id) == base_len) && !strncmp(id, rsc->id, base_len);
     }
     return false;
 }
 
 int pe__target_rc_from_xml(const xmlNode *xml_op);
 
 gint pe__cmp_node_name(gconstpointer a, gconstpointer b);
 bool is_set_recursive(const pcmk_resource_t *rsc, long long flag, bool any);
 
 pcmk__op_digest_t *pe__calculate_digests(pcmk_resource_t *rsc, const char *task,
                                          guint *interval_ms,
                                          const pcmk_node_t *node,
                                          const xmlNode *xml_op,
                                          GHashTable *overrides,
                                          bool calc_secure,
                                          pcmk_scheduler_t *scheduler);
 
 void pe__free_digests(gpointer ptr);
 
 pcmk__op_digest_t *rsc_action_digest_cmp(pcmk_resource_t *rsc,
                                          const xmlNode *xml_op,
                                          pcmk_node_t *node,
                                          pcmk_scheduler_t *scheduler);
 
 pcmk_action_t *pe_fence_op(pcmk_node_t *node, const char *op, bool optional,
                            const char *reason, bool priority_delay,
                            pcmk_scheduler_t *scheduler);
 void trigger_unfencing(pcmk_resource_t *rsc, pcmk_node_t *node,
                        const char *reason, pcmk_action_t *dependency,
                        pcmk_scheduler_t *scheduler);
 
 char *pe__action2reason(const pcmk_action_t *action, enum pe_action_flags flag);
 void pe_action_set_reason(pcmk_action_t *action, const char *reason,
                           bool overwrite);
 void pe__add_action_expected_result(pcmk_action_t *action, int expected_result);
 
 void pe__set_resource_flags_recursive(pcmk_resource_t *rsc, uint64_t flags);
 void pe__clear_resource_flags_recursive(pcmk_resource_t *rsc, uint64_t flags);
 void pe__clear_resource_flags_on_all(pcmk_scheduler_t *scheduler,
                                      uint64_t flag);
 
 gboolean add_tag_ref(GHashTable * tags, const char * tag_name,  const char * obj_ref);
 
 //! \deprecated This function will be removed in a future release
 void print_rscs_brief(GList *rsc_list, const char * pre_text, long options,
                       void * print_data, gboolean print_all);
 int pe__rscs_brief_output(pcmk__output_t *out, GList *rsc_list, unsigned int options);
 void pe_fence_node(pcmk_scheduler_t *scheduler, pcmk_node_t *node,
                    const char *reason, bool priority_delay);
 
 pcmk_node_t *pe_create_node(const char *id, const char *uname, const char *type,
                             const char *score, pcmk_scheduler_t *scheduler);
 
 //! \deprecated This function will be removed in a future release
 void common_print(pcmk_resource_t *rsc, const char *pre_text, const char *name,
                   const pcmk_node_t *node, long options, void *print_data);
 int pe__common_output_text(pcmk__output_t *out, const pcmk_resource_t *rsc,
                            const char *name, const pcmk_node_t *node,
                            unsigned int options);
 int pe__common_output_html(pcmk__output_t *out, const pcmk_resource_t *rsc,
                            const char *name, const pcmk_node_t *node,
                            unsigned int options);
 
 GList *pe__bundle_containers(const pcmk_resource_t *bundle);
 
 int pe__bundle_max(const pcmk_resource_t *rsc);
 bool pe__node_is_bundle_instance(const pcmk_resource_t *bundle,
                                  const pcmk_node_t *node);
 pcmk_resource_t *pe__bundled_resource(const pcmk_resource_t *rsc);
 const pcmk_resource_t *pe__get_rsc_in_container(const pcmk_resource_t *instance);
 pcmk_resource_t *pe__first_container(const pcmk_resource_t *bundle);
 void pe__foreach_bundle_replica(pcmk_resource_t *bundle,
                                 bool (*fn)(pcmk__bundle_replica_t *, void *),
                                 void *user_data);
 void pe__foreach_const_bundle_replica(const pcmk_resource_t *bundle,
                                       bool (*fn)(const pcmk__bundle_replica_t *,
                                                  void *),
                                       void *user_data);
 pcmk_resource_t *pe__find_bundle_replica(const pcmk_resource_t *bundle,
                                          const pcmk_node_t *node);
 bool pe__bundle_needs_remote_name(pcmk_resource_t *rsc);
 const char *pe__add_bundle_remote_name(pcmk_resource_t *rsc, xmlNode *xml,
                                        const char *field);
 bool pe__is_universal_clone(const pcmk_resource_t *rsc,
                             const pcmk_scheduler_t *scheduler);
 void pe__add_param_check(const xmlNode *rsc_op, pcmk_resource_t *rsc,
                          pcmk_node_t *node, enum pcmk__check_parameters,
                          pcmk_scheduler_t *scheduler);
 void pe__foreach_param_check(pcmk_scheduler_t *scheduler,
                              void (*cb)(pcmk_resource_t*, pcmk_node_t*,
                                         const xmlNode*,
                                         enum pcmk__check_parameters));
 void pe__free_param_checks(pcmk_scheduler_t *scheduler);
 
 bool pe__shutdown_requested(const pcmk_node_t *node);
 void pe__update_recheck_time(time_t recheck, pcmk_scheduler_t *scheduler,
                              const char *reason);
 
 /*!
  * \internal
  * \brief Register xml formatting message functions.
  *
  * \param[in,out] out  Output object to register messages with
  */
 void pe__register_messages(pcmk__output_t *out);
 
 void pe__unpack_dataset_nvpairs(const xmlNode *xml_obj, const char *set_name,
                                 const pe_rule_eval_data_t *rule_data,
                                 GHashTable *hash, const char *always_first,
                                 gboolean overwrite,
                                 pcmk_scheduler_t *scheduler);
 
 bool pe__resource_is_disabled(const pcmk_resource_t *rsc);
 void pe__clear_resource_history(pcmk_resource_t *rsc, const pcmk_node_t *node);
 
 GList *pe__rscs_with_tag(pcmk_scheduler_t *scheduler, const char *tag_name);
 GList *pe__unames_with_tag(pcmk_scheduler_t *scheduler, const char *tag_name);
 bool pe__rsc_has_tag(pcmk_scheduler_t *scheduler, const char *rsc,
                      const char *tag);
 bool pe__uname_has_tag(pcmk_scheduler_t *scheduler, const char *node,
                        const char *tag);
 
 bool pe__rsc_running_on_only(const pcmk_resource_t *rsc,
                              const pcmk_node_t *node);
 bool pe__rsc_running_on_any(pcmk_resource_t *rsc, GList *node_list);
 GList *pe__filter_rsc_list(GList *rscs, GList *filter);
 GList * pe__build_node_name_list(pcmk_scheduler_t *scheduler, const char *s);
 GList * pe__build_rsc_list(pcmk_scheduler_t *scheduler, const char *s);
 
 bool pcmk__rsc_filtered_by_node(pcmk_resource_t *rsc, GList *only_node);
 
 gboolean pe__bundle_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc,
                                 gboolean check_parent);
 gboolean pe__clone_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc,
                                gboolean check_parent);
 gboolean pe__group_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc,
                                gboolean check_parent);
 gboolean pe__native_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc,
                                 gboolean check_parent);
 
 xmlNode *pe__failed_probe_for_rsc(const pcmk_resource_t *rsc, const char *name);
 
 const char *pe__clone_child_id(const pcmk_resource_t *rsc);
 
 int pe__sum_node_health_scores(const pcmk_node_t *node, int base_health);
 int pe__node_health(pcmk_node_t *node);
 
 static inline enum pcmk__health_strategy
 pe__health_strategy(pcmk_scheduler_t *scheduler)
 {
     const char *strategy = pcmk__cluster_option(scheduler->config_hash,
                                                 PCMK_OPT_NODE_HEALTH_STRATEGY);
 
     return pcmk__parse_health_strategy(strategy);
 }
 
 static inline int
 pe__health_score(const char *option, pcmk_scheduler_t *scheduler)
 {
     const char *value = pcmk__cluster_option(scheduler->config_hash, option);
 
     return char2score(value);
 }
 
 #endif
diff --git a/lib/pengine/pe_actions.c b/lib/pengine/pe_actions.c
index 5d482fd991..308841502f 100644
--- a/lib/pengine/pe_actions.c
+++ b/lib/pengine/pe_actions.c
@@ -1,1877 +1,1828 @@
 /*
  * Copyright 2004-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <glib.h>
 #include <stdbool.h>
 
 #include <crm/crm.h>
 #include <crm/common/xml.h>
 #include <crm/common/scheduler_internal.h>
 #include <crm/pengine/internal.h>
 #include <crm/common/xml_internal.h>
 #include "pe_status_private.h"
 
 static void unpack_operation(pcmk_action_t *action, const xmlNode *xml_obj,
                              guint interval_ms);
 
 static void
 add_singleton(pcmk_scheduler_t *scheduler, pcmk_action_t *action)
 {
     if (scheduler->singletons == NULL) {
         scheduler->singletons = pcmk__strkey_table(NULL, NULL);
     }
     g_hash_table_insert(scheduler->singletons, action->uuid, action);
 }
 
 static pcmk_action_t *
 lookup_singleton(pcmk_scheduler_t *scheduler, const char *action_uuid)
 {
     if (scheduler->singletons == NULL) {
         return NULL;
     }
     return g_hash_table_lookup(scheduler->singletons, action_uuid);
 }
 
 /*!
  * \internal
  * \brief Find an existing action that matches arguments
  *
  * \param[in] key        Action key to match
  * \param[in] rsc        Resource to match (if any)
  * \param[in] node       Node to match (if any)
  * \param[in] scheduler  Scheduler data
  *
  * \return Existing action that matches arguments (or NULL if none)
  */
 static pcmk_action_t *
 find_existing_action(const char *key, const pcmk_resource_t *rsc,
                      const pcmk_node_t *node, const pcmk_scheduler_t *scheduler)
 {
     GList *matches = NULL;
     pcmk_action_t *action = NULL;
 
     /* When rsc is NULL, it would be quicker to check scheduler->singletons,
      * but checking all scheduler->actions takes the node into account.
      */
     matches = find_actions(((rsc == NULL)? scheduler->actions : rsc->actions),
                            key, node);
     if (matches == NULL) {
         return NULL;
     }
     CRM_LOG_ASSERT(!pcmk__list_of_multiple(matches));
 
     action = matches->data;
     g_list_free(matches);
     return action;
 }
 
 /*!
  * \internal
  * \brief Find the XML configuration corresponding to a specific action key
  *
  * \param[in] rsc               Resource to find action configuration for
  * \param[in] key               "RSC_ACTION_INTERVAL" of action to find
  * \param[in] include_disabled  If false, do not return disabled actions
  *
  * \return XML configuration of desired action if any, otherwise NULL
  */
 static xmlNode *
 find_exact_action_config(const pcmk_resource_t *rsc, const char *action_name,
                          guint interval_ms, bool include_disabled)
 {
     for (xmlNode *operation = pcmk__xe_first_child(rsc->ops_xml, PCMK_XE_OP,
                                                    NULL, NULL);
          operation != NULL; operation = pcmk__xe_next_same(operation)) {
 
         bool enabled = false;
         const char *config_name = NULL;
         const char *interval_spec = NULL;
         guint tmp_ms = 0U;
 
         // @TODO This does not consider meta-attributes, rules, defaults, etc.
         if (!include_disabled
             && (pcmk__xe_get_bool_attr(operation, PCMK_META_ENABLED,
                                        &enabled) == pcmk_rc_ok) && !enabled) {
             continue;
         }
 
         interval_spec = crm_element_value(operation, PCMK_META_INTERVAL);
         pcmk_parse_interval_spec(interval_spec, &tmp_ms);
         if (tmp_ms != interval_ms) {
             continue;
         }
 
         config_name = crm_element_value(operation, PCMK_XA_NAME);
         if (pcmk__str_eq(action_name, config_name, pcmk__str_none)) {
             return operation;
         }
     }
     return NULL;
 }
 
 /*!
  * \internal
  * \brief Find the XML configuration of a resource action
  *
  * \param[in] rsc               Resource to find action configuration for
  * \param[in] action_name       Action name to search for
  * \param[in] interval_ms       Action interval (in milliseconds) to search for
  * \param[in] include_disabled  If false, do not return disabled actions
  *
  * \return XML configuration of desired action if any, otherwise NULL
  */
 xmlNode *
 pcmk__find_action_config(const pcmk_resource_t *rsc, const char *action_name,
                          guint interval_ms, bool include_disabled)
 {
     xmlNode *action_config = NULL;
 
     // Try requested action first
     action_config = find_exact_action_config(rsc, action_name, interval_ms,
                                              include_disabled);
 
     // For migrate_to and migrate_from actions, retry with "migrate"
     // @TODO This should be either documented or deprecated
     if ((action_config == NULL)
         && pcmk__str_any_of(action_name, PCMK_ACTION_MIGRATE_TO,
                             PCMK_ACTION_MIGRATE_FROM, NULL)) {
         action_config = find_exact_action_config(rsc, "migrate", 0,
                                                  include_disabled);
     }
 
     return action_config;
 }
 
 /*!
  * \internal
  * \brief Create a new action object
  *
  * \param[in]     key        Action key
  * \param[in]     task       Action name
  * \param[in,out] rsc        Resource that action is for (if any)
  * \param[in]     node       Node that action is on (if any)
  * \param[in]     optional   Whether action should be considered optional
  * \param[in,out] scheduler  Scheduler data
  *
  * \return Newly allocated action
  * \note This function takes ownership of \p key. It is the caller's
  *       responsibility to free the return value with pe_free_action().
  */
 static pcmk_action_t *
 new_action(char *key, const char *task, pcmk_resource_t *rsc,
            const pcmk_node_t *node, bool optional, pcmk_scheduler_t *scheduler)
 {
     pcmk_action_t *action = pcmk__assert_alloc(1, sizeof(pcmk_action_t));
 
     action->rsc = rsc;
     action->task = pcmk__str_copy(task);
     action->uuid = key;
 
     if (node) {
         action->node = pe__copy_node(node);
     }
 
     if (pcmk__str_eq(task, PCMK_ACTION_LRM_DELETE, pcmk__str_casei)) {
         // Resource history deletion for a node can be done on the DC
         pcmk__set_action_flags(action, pcmk_action_on_dc);
     }
 
     pcmk__set_action_flags(action, pcmk_action_runnable);
     if (optional) {
         pcmk__set_action_flags(action, pcmk_action_optional);
     } else {
         pcmk__clear_action_flags(action, pcmk_action_optional);
     }
 
     if (rsc == NULL) {
         action->meta = pcmk__strkey_table(free, free);
     } else {
         guint interval_ms = 0;
 
         parse_op_key(key, NULL, NULL, &interval_ms);
         action->op_entry = pcmk__find_action_config(rsc, task, interval_ms,
                                                     true);
 
         /* If the given key is for one of the many notification pseudo-actions
          * (pre_notify_promote, etc.), the actual action name is "notify"
          */
         if ((action->op_entry == NULL) && (strstr(key, "_notify_") != NULL)) {
             action->op_entry = find_exact_action_config(rsc, PCMK_ACTION_NOTIFY,
                                                         0, true);
         }
 
         unpack_operation(action, action->op_entry, interval_ms);
     }
 
     pcmk__rsc_trace(rsc, "Created %s action %d (%s): %s for %s on %s",
                     (optional? "optional" : "required"),
                     scheduler->action_id, key, task,
                     ((rsc == NULL)? "no resource" : rsc->id),
                     pcmk__node_name(node));
     action->id = scheduler->action_id++;
 
     scheduler->actions = g_list_prepend(scheduler->actions, action);
     if (rsc == NULL) {
         add_singleton(scheduler, action);
     } else {
         rsc->actions = g_list_prepend(rsc->actions, action);
     }
     return action;
 }
 
 /*!
  * \internal
  * \brief Unpack a resource's action-specific instance parameters
  *
  * \param[in]     action_xml  XML of action's configuration in CIB (if any)
  * \param[in,out] node_attrs  Table of node attributes (for rule evaluation)
  * \param[in,out] scheduler   Cluster working set (for rule evaluation)
  *
  * \return Newly allocated hash table of action-specific instance parameters
  */
 GHashTable *
 pcmk__unpack_action_rsc_params(const xmlNode *action_xml,
                                GHashTable *node_attrs,
                                pcmk_scheduler_t *scheduler)
 {
     GHashTable *params = pcmk__strkey_table(free, free);
 
     pe_rule_eval_data_t rule_data = {
         .node_hash = node_attrs,
         .now = scheduler->now,
         .match_data = NULL,
         .rsc_data = NULL,
         .op_data = NULL
     };
 
     pe__unpack_dataset_nvpairs(action_xml, PCMK_XE_INSTANCE_ATTRIBUTES,
                                &rule_data, params, NULL,
                                FALSE, scheduler);
     return params;
 }
 
 /*!
  * \internal
  * \brief Update an action's optional flag
  *
  * \param[in,out] action    Action to update
  * \param[in]     optional  Requested optional status
  */
 static void
 update_action_optional(pcmk_action_t *action, gboolean optional)
 {
     // Force a non-recurring action to be optional if its resource is unmanaged
     if ((action->rsc != NULL) && (action->node != NULL)
         && !pcmk_is_set(action->flags, pcmk_action_pseudo)
         && !pcmk_is_set(action->rsc->flags, pcmk_rsc_managed)
         && (g_hash_table_lookup(action->meta, PCMK_META_INTERVAL) == NULL)) {
             pcmk__rsc_debug(action->rsc,
                             "%s on %s is optional (%s is unmanaged)",
                             action->uuid, pcmk__node_name(action->node),
                             action->rsc->id);
             pcmk__set_action_flags(action, pcmk_action_optional);
             // We shouldn't clear runnable here because ... something
 
     // Otherwise require the action if requested
     } else if (!optional) {
         pcmk__clear_action_flags(action, pcmk_action_optional);
     }
 }
 
 static enum pe_quorum_policy
 effective_quorum_policy(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler)
 {
     enum pe_quorum_policy policy = scheduler->no_quorum_policy;
 
     if (pcmk_is_set(scheduler->flags, pcmk_sched_quorate)) {
         policy = pcmk_no_quorum_ignore;
 
     } else if (scheduler->no_quorum_policy == pcmk_no_quorum_demote) {
         switch (rsc->role) {
             case pcmk_role_promoted:
             case pcmk_role_unpromoted:
                 if (rsc->next_role > pcmk_role_unpromoted) {
                     pe__set_next_role(rsc, pcmk_role_unpromoted,
                                       PCMK_OPT_NO_QUORUM_POLICY "=demote");
                 }
                 policy = pcmk_no_quorum_ignore;
                 break;
             default:
                 policy = pcmk_no_quorum_stop;
                 break;
         }
     }
     return policy;
 }
 
 /*!
  * \internal
  * \brief Update a resource action's runnable flag
  *
  * \param[in,out] action     Action to update
  * \param[in,out] scheduler  Scheduler data
  *
  * \note This may also schedule fencing if a stop is unrunnable.
  */
 static void
 update_resource_action_runnable(pcmk_action_t *action,
                                 pcmk_scheduler_t *scheduler)
 {
     if (pcmk_is_set(action->flags, pcmk_action_pseudo)) {
         return;
     }
 
     if (action->node == NULL) {
         pcmk__rsc_trace(action->rsc, "%s is unrunnable (unallocated)",
                         action->uuid);
         pcmk__clear_action_flags(action, pcmk_action_runnable);
 
     } else if (!pcmk_is_set(action->flags, pcmk_action_on_dc)
                && !(action->node->details->online)
                && (!pcmk__is_guest_or_bundle_node(action->node)
                    || action->node->details->remote_requires_reset)) {
         pcmk__clear_action_flags(action, pcmk_action_runnable);
         do_crm_log(LOG_WARNING, "%s on %s is unrunnable (node is offline)",
                    action->uuid, pcmk__node_name(action->node));
         if (pcmk_is_set(action->rsc->flags, pcmk_rsc_managed)
             && pcmk__str_eq(action->task, PCMK_ACTION_STOP, pcmk__str_casei)
             && !(action->node->details->unclean)) {
             pe_fence_node(scheduler, action->node, "stop is unrunnable", false);
         }
 
     } else if (!pcmk_is_set(action->flags, pcmk_action_on_dc)
                && action->node->details->pending) {
         pcmk__clear_action_flags(action, pcmk_action_runnable);
         do_crm_log(LOG_WARNING,
                    "Action %s on %s is unrunnable (node is pending)",
                    action->uuid, pcmk__node_name(action->node));
 
     } else if (action->needs == pcmk_requires_nothing) {
         pe_action_set_reason(action, NULL, TRUE);
         if (pcmk__is_guest_or_bundle_node(action->node)
             && !pe_can_fence(scheduler, action->node)) {
             /* An action that requires nothing usually does not require any
              * fencing in order to be runnable. However, there is an exception:
              * such an action cannot be completed if it is on a guest node whose
              * host is unclean and cannot be fenced.
              */
             pcmk__rsc_debug(action->rsc,
                             "%s on %s is unrunnable "
                             "(node's host cannot be fenced)",
                             action->uuid, pcmk__node_name(action->node));
             pcmk__clear_action_flags(action, pcmk_action_runnable);
         } else {
             pcmk__rsc_trace(action->rsc,
                             "%s on %s does not require fencing or quorum",
                             action->uuid, pcmk__node_name(action->node));
             pcmk__set_action_flags(action, pcmk_action_runnable);
         }
 
     } else {
         switch (effective_quorum_policy(action->rsc, scheduler)) {
             case pcmk_no_quorum_stop:
                 pcmk__rsc_debug(action->rsc,
                                 "%s on %s is unrunnable (no quorum)",
                                 action->uuid, pcmk__node_name(action->node));
                 pcmk__clear_action_flags(action, pcmk_action_runnable);
                 pe_action_set_reason(action, "no quorum", true);
                 break;
 
             case pcmk_no_quorum_freeze:
                 if (!action->rsc->fns->active(action->rsc, TRUE)
                     || (action->rsc->next_role > action->rsc->role)) {
                     pcmk__rsc_debug(action->rsc,
                                     "%s on %s is unrunnable (no quorum)",
                                     action->uuid,
                                     pcmk__node_name(action->node));
                     pcmk__clear_action_flags(action, pcmk_action_runnable);
                     pe_action_set_reason(action, "quorum freeze", true);
                 }
                 break;
 
             default:
                 //pe_action_set_reason(action, NULL, TRUE);
                 pcmk__set_action_flags(action, pcmk_action_runnable);
                 break;
         }
     }
 }
 
 /*!
  * \internal
  * \brief Update a resource object's flags for a new action on it
  *
  * \param[in,out] rsc     Resource that action is for (if any)
  * \param[in]     action  New action
  */
 static void
 update_resource_flags_for_action(pcmk_resource_t *rsc,
                                  const pcmk_action_t *action)
 {
     /* @COMPAT pcmk_rsc_starting and pcmk_rsc_stopping are deprecated and unused
      * within Pacemaker, and will eventually be removed
      */
     if (pcmk__str_eq(action->task, PCMK_ACTION_STOP, pcmk__str_casei)) {
         pcmk__set_rsc_flags(rsc, pcmk_rsc_stopping);
 
     } else if (pcmk__str_eq(action->task, PCMK_ACTION_START, pcmk__str_casei)) {
         if (pcmk_is_set(action->flags, pcmk_action_runnable)) {
             pcmk__set_rsc_flags(rsc, pcmk_rsc_starting);
         } else {
             pcmk__clear_rsc_flags(rsc, pcmk_rsc_starting);
         }
     }
 }
 
 static bool
 valid_stop_on_fail(const char *value)
 {
     return !pcmk__strcase_any_of(value,
                                  PCMK_VALUE_STANDBY, PCMK_VALUE_DEMOTE,
                                  PCMK_VALUE_STOP, NULL);
 }
 
 /*!
  * \internal
  * \brief Validate (and possibly reset) resource action's on_fail meta-attribute
  *
  * \param[in]     rsc            Resource that action is for
  * \param[in]     action_name    Action name
  * \param[in]     action_config  Action configuration XML from CIB (if any)
  * \param[in,out] meta           Table of action meta-attributes
  */
 static void
 validate_on_fail(const pcmk_resource_t *rsc, const char *action_name,
                  const xmlNode *action_config, GHashTable *meta)
 {
     const char *name = NULL;
     const char *role = NULL;
     const char *interval_spec = NULL;
     const char *value = g_hash_table_lookup(meta, PCMK_META_ON_FAIL);
     guint interval_ms = 0U;
 
     // Stop actions can only use certain on-fail values
     if (pcmk__str_eq(action_name, PCMK_ACTION_STOP, pcmk__str_none)
         && !valid_stop_on_fail(value)) {
 
         pcmk__config_err("Resetting '" PCMK_META_ON_FAIL "' for %s stop "
                          "action to default value because '%s' is not "
                          "allowed for stop", rsc->id, value);
         g_hash_table_remove(meta, PCMK_META_ON_FAIL);
         return;
     }
 
     /* Demote actions default on-fail to the on-fail value for the first
      * recurring monitor for the promoted role (if any).
      */
     if (pcmk__str_eq(action_name, PCMK_ACTION_DEMOTE, pcmk__str_none)
         && (value == NULL)) {
 
         /* @TODO This does not consider promote options set in a meta-attribute
          * block (which may have rules that need to be evaluated) rather than
          * XML properties.
          */
         for (xmlNode *operation = pcmk__xe_first_child(rsc->ops_xml, PCMK_XE_OP,
                                                        NULL, NULL);
              operation != NULL; operation = pcmk__xe_next_same(operation)) {
 
             bool enabled = false;
             const char *promote_on_fail = NULL;
 
             /* We only care about explicit on-fail (if promote uses default, so
              * can demote)
              */
             promote_on_fail = crm_element_value(operation, PCMK_META_ON_FAIL);
             if (promote_on_fail == NULL) {
                 continue;
             }
 
             // We only care about recurring monitors for the promoted role
             name = crm_element_value(operation, PCMK_XA_NAME);
             role = crm_element_value(operation, PCMK_XA_ROLE);
             if (!pcmk__str_eq(name, PCMK_ACTION_MONITOR, pcmk__str_none)
                 || !pcmk__strcase_any_of(role, PCMK__ROLE_PROMOTED,
                                          PCMK__ROLE_PROMOTED_LEGACY, NULL)) {
                 continue;
             }
             interval_spec = crm_element_value(operation, PCMK_META_INTERVAL);
             pcmk_parse_interval_spec(interval_spec, &interval_ms);
             if (interval_ms == 0U) {
                 continue;
             }
 
             // We only care about enabled monitors
             if ((pcmk__xe_get_bool_attr(operation, PCMK_META_ENABLED,
                                         &enabled) == pcmk_rc_ok) && !enabled) {
                 continue;
             }
 
             /* Demote actions can't default to
              * PCMK_META_ON_FAIL=PCMK_VALUE_DEMOTE
              */
             if (pcmk__str_eq(promote_on_fail, PCMK_VALUE_DEMOTE,
                              pcmk__str_casei)) {
                 continue;
             }
 
             // Use value from first applicable promote action found
             pcmk__insert_dup(meta, PCMK_META_ON_FAIL, promote_on_fail);
         }
         return;
     }
 
     if (pcmk__str_eq(action_name, PCMK_ACTION_LRM_DELETE, pcmk__str_none)
         && !pcmk__str_eq(value, PCMK_VALUE_IGNORE, pcmk__str_casei)) {
 
         pcmk__insert_dup(meta, PCMK_META_ON_FAIL, PCMK_VALUE_IGNORE);
         return;
     }
 
     // PCMK_META_ON_FAIL=PCMK_VALUE_DEMOTE is allowed only for certain actions
     if (pcmk__str_eq(value, PCMK_VALUE_DEMOTE, pcmk__str_casei)) {
         name = crm_element_value(action_config, PCMK_XA_NAME);
         role = crm_element_value(action_config, PCMK_XA_ROLE);
         interval_spec = crm_element_value(action_config, PCMK_META_INTERVAL);
         pcmk_parse_interval_spec(interval_spec, &interval_ms);
 
         if (!pcmk__str_eq(name, PCMK_ACTION_PROMOTE, pcmk__str_none)
             && ((interval_ms == 0U)
                 || !pcmk__str_eq(name, PCMK_ACTION_MONITOR, pcmk__str_none)
                 || !pcmk__strcase_any_of(role, PCMK__ROLE_PROMOTED,
                                          PCMK__ROLE_PROMOTED_LEGACY, NULL))) {
 
             pcmk__config_err("Resetting '" PCMK_META_ON_FAIL "' for %s %s "
                              "action to default value because 'demote' is not "
                              "allowed for it", rsc->id, name);
             g_hash_table_remove(meta, PCMK_META_ON_FAIL);
             return;
         }
     }
 }
 
 static int
 unpack_timeout(const char *value)
 {
     long long timeout_ms = crm_get_msec(value);
 
     if (timeout_ms < 0) {
         timeout_ms = PCMK_DEFAULT_ACTION_TIMEOUT_MS;
     }
     return (int) QB_MIN(timeout_ms, INT_MAX);
 }
 
 // true if value contains valid, non-NULL interval origin for recurring op
 static bool
 unpack_interval_origin(const char *value, const xmlNode *xml_obj,
                        guint interval_ms, const crm_time_t *now,
                        long long *start_delay)
 {
     long long result = 0;
     guint interval_sec = interval_ms / 1000;
     crm_time_t *origin = NULL;
 
     // Ignore unspecified values and non-recurring operations
     if ((value == NULL) || (interval_ms == 0) || (now == NULL)) {
         return false;
     }
 
     // Parse interval origin from text
     origin = crm_time_new(value);
     if (origin == NULL) {
         pcmk__config_err("Ignoring '" PCMK_META_INTERVAL_ORIGIN "' for "
                          "operation '%s' because '%s' is not valid",
                          pcmk__s(pcmk__xe_id(xml_obj), "(missing ID)"), value);
         return false;
     }
 
     // Get seconds since origin (negative if origin is in the future)
     result = crm_time_get_seconds(now) - crm_time_get_seconds(origin);
     crm_time_free(origin);
 
     // Calculate seconds from closest interval to now
     result = result % interval_sec;
 
     // Calculate seconds remaining until next interval
     result = ((result <= 0)? 0 : interval_sec) - result;
     crm_info("Calculated a start delay of %llds for operation '%s'",
              result, pcmk__s(pcmk__xe_id(xml_obj), "(unspecified)"));
 
     if (start_delay != NULL) {
         *start_delay = result * 1000; // milliseconds
     }
     return true;
 }
 
 static int
 unpack_start_delay(const char *value, GHashTable *meta)
 {
     long long start_delay_ms = 0;
 
     if (value == NULL) {
         return 0;
     }
 
     start_delay_ms = crm_get_msec(value);
     start_delay_ms = QB_MIN(start_delay_ms, INT_MAX);
     if (start_delay_ms < 0) {
         start_delay_ms = 0;
     }
 
     if (meta != NULL) {
         g_hash_table_replace(meta, strdup(PCMK_META_START_DELAY),
                              pcmk__itoa(start_delay_ms));
     }
 
     return (int) start_delay_ms;
 }
 
 /*!
  * \internal
  * \brief Find a resource's most frequent recurring monitor
  *
  * \param[in] rsc  Resource to check
  *
  * \return Operation XML configured for most frequent recurring monitor for
  *         \p rsc (if any)
  */
 static xmlNode *
 most_frequent_monitor(const pcmk_resource_t *rsc)
 {
     guint min_interval_ms = G_MAXUINT;
     xmlNode *op = NULL;
 
     for (xmlNode *operation = pcmk__xe_first_child(rsc->ops_xml, PCMK_XE_OP,
                                                    NULL, NULL);
          operation != NULL; operation = pcmk__xe_next_same(operation)) {
 
         bool enabled = false;
         guint interval_ms = 0U;
         const char *interval_spec = crm_element_value(operation,
                                                       PCMK_META_INTERVAL);
 
         // We only care about enabled recurring monitors
         if (!pcmk__str_eq(crm_element_value(operation, PCMK_XA_NAME),
                           PCMK_ACTION_MONITOR, pcmk__str_none)) {
             continue;
         }
 
         pcmk_parse_interval_spec(interval_spec, &interval_ms);
         if (interval_ms == 0U) {
             continue;
         }
 
         // @TODO This does not consider meta-attributes, rules, defaults, etc.
         if ((pcmk__xe_get_bool_attr(operation, PCMK_META_ENABLED,
                                     &enabled) == pcmk_rc_ok) && !enabled) {
             continue;
         }
 
         if (interval_ms < min_interval_ms) {
             min_interval_ms = interval_ms;
             op = operation;
         }
     }
     return op;
 }
 
 /*!
  * \internal
  * \brief Unpack action meta-attributes
  *
  * \param[in,out] rsc            Resource that action is for
  * \param[in]     node           Node that action is on
  * \param[in]     action_name    Action name
  * \param[in]     interval_ms    Action interval (in milliseconds)
  * \param[in]     action_config  Action XML configuration from CIB (if any)
  *
  * Unpack a resource action's meta-attributes (normalizing the interval,
  * timeout, and start delay values as integer milliseconds) from its CIB XML
  * configuration (including defaults).
  *
  * \return Newly allocated hash table with normalized action meta-attributes
  */
 GHashTable *
 pcmk__unpack_action_meta(pcmk_resource_t *rsc, const pcmk_node_t *node,
                          const char *action_name, guint interval_ms,
                          const xmlNode *action_config)
 {
     GHashTable *meta = NULL;
     const char *timeout_spec = NULL;
     const char *str = NULL;
 
     pe_rsc_eval_data_t rsc_rule_data = {
         .standard = crm_element_value(rsc->xml, PCMK_XA_CLASS),
         .provider = crm_element_value(rsc->xml, PCMK_XA_PROVIDER),
         .agent = crm_element_value(rsc->xml, PCMK_XA_TYPE),
     };
 
     pe_op_eval_data_t op_rule_data = {
         .op_name = action_name,
         .interval = interval_ms,
     };
 
     pe_rule_eval_data_t rule_data = {
         .node_hash = (node == NULL)? NULL : node->details->attrs,
         .now = rsc->cluster->now,
         .match_data = NULL,
         .rsc_data = &rsc_rule_data,
         .op_data = &op_rule_data,
     };
 
     meta = pcmk__strkey_table(free, free);
 
     // Cluster-wide <op_defaults> <meta_attributes>
     pe__unpack_dataset_nvpairs(rsc->cluster->op_defaults,
                                PCMK_XE_META_ATTRIBUTES, &rule_data, meta, NULL,
                                FALSE, rsc->cluster);
 
     // Derive default timeout for probes from recurring monitor timeouts
     if (pcmk_is_probe(action_name, interval_ms)) {
         xmlNode *min_interval_mon = most_frequent_monitor(rsc);
 
         if (min_interval_mon != NULL) {
             /* @TODO This does not consider timeouts set in
              * PCMK_XE_META_ATTRIBUTES blocks (which may also have rules that
              * need to be evaluated).
              */
             timeout_spec = crm_element_value(min_interval_mon,
                                              PCMK_META_TIMEOUT);
             if (timeout_spec != NULL) {
                 pcmk__rsc_trace(rsc,
                                 "Setting default timeout for %s probe to "
                                 "most frequent monitor's timeout '%s'",
                                 rsc->id, timeout_spec);
                 pcmk__insert_dup(meta, PCMK_META_TIMEOUT, timeout_spec);
             }
         }
     }
 
     if (action_config != NULL) {
         // <op> <meta_attributes> take precedence over defaults
         pe__unpack_dataset_nvpairs(action_config, PCMK_XE_META_ATTRIBUTES,
                                    &rule_data, meta, NULL, TRUE, rsc->cluster);
 
         /* Anything set as an <op> XML property has highest precedence.
          * This ensures we use the name and interval from the <op> tag.
          * (See below for the only exception, fence device start/probe timeout.)
          */
         for (xmlAttrPtr attr = action_config->properties;
              attr != NULL; attr = attr->next) {
             pcmk__insert_dup(meta, (const char *) attr->name,
                              pcmk__xml_attr_value(attr));
         }
     }
 
     g_hash_table_remove(meta, PCMK_XA_ID);
 
     // Normalize interval to milliseconds
     if (interval_ms > 0) {
         g_hash_table_insert(meta, pcmk__str_copy(PCMK_META_INTERVAL),
                             crm_strdup_printf("%u", interval_ms));
     } else {
         g_hash_table_remove(meta, PCMK_META_INTERVAL);
     }
 
     /* Timeout order of precedence (highest to lowest):
      *   1. pcmk_monitor_timeout resource parameter (only for starts and probes
      *      when rsc has pcmk_ra_cap_fence_params; this gets used for recurring
      *      monitors via the executor instead)
      *   2. timeout configured in <op> (with <op timeout> taking precedence over
      *      <op> <meta_attributes>)
      *   3. timeout configured in <op_defaults> <meta_attributes>
      *   4. PCMK_DEFAULT_ACTION_TIMEOUT_MS
      */
 
     // Check for pcmk_monitor_timeout
     if (pcmk_is_set(pcmk_get_ra_caps(rsc_rule_data.standard),
                     pcmk_ra_cap_fence_params)
         && (pcmk__str_eq(action_name, PCMK_ACTION_START, pcmk__str_none)
             || pcmk_is_probe(action_name, interval_ms))) {
 
         GHashTable *params = pe_rsc_params(rsc, node, rsc->cluster);
 
         timeout_spec = g_hash_table_lookup(params, "pcmk_monitor_timeout");
         if (timeout_spec != NULL) {
             pcmk__rsc_trace(rsc,
                             "Setting timeout for %s %s to "
                             "pcmk_monitor_timeout (%s)",
                             rsc->id, action_name, timeout_spec);
             pcmk__insert_dup(meta, PCMK_META_TIMEOUT, timeout_spec);
         }
     }
 
     // Normalize timeout to positive milliseconds
     timeout_spec = g_hash_table_lookup(meta, PCMK_META_TIMEOUT);
     g_hash_table_insert(meta, pcmk__str_copy(PCMK_META_TIMEOUT),
                         pcmk__itoa(unpack_timeout(timeout_spec)));
 
     // Ensure on-fail has a valid value
     validate_on_fail(rsc, action_name, action_config, meta);
 
     // Normalize PCMK_META_START_DELAY
     str = g_hash_table_lookup(meta, PCMK_META_START_DELAY);
     if (str != NULL) {
         unpack_start_delay(str, meta);
     } else {
         long long start_delay = 0;
 
         str = g_hash_table_lookup(meta, PCMK_META_INTERVAL_ORIGIN);
         if (unpack_interval_origin(str, action_config, interval_ms,
                                    rsc->cluster->now, &start_delay)) {
             g_hash_table_insert(meta, pcmk__str_copy(PCMK_META_START_DELAY),
                                 crm_strdup_printf("%lld", start_delay));
         }
     }
     return meta;
 }
 
 /*!
  * \internal
  * \brief Determine an action's quorum and fencing dependency
  *
  * \param[in] rsc          Resource that action is for
  * \param[in] action_name  Name of action being unpacked
  *
  * \return Quorum and fencing dependency appropriate to action
  */
 enum rsc_start_requirement
 pcmk__action_requires(const pcmk_resource_t *rsc, const char *action_name)
 {
     const char *value = NULL;
     enum rsc_start_requirement requires = pcmk_requires_nothing;
 
     CRM_CHECK((rsc != NULL) && (action_name != NULL), return requires);
 
     if (!pcmk__strcase_any_of(action_name, PCMK_ACTION_START,
                               PCMK_ACTION_PROMOTE, NULL)) {
         value = "nothing (not start or promote)";
 
     } else if (pcmk_is_set(rsc->flags, pcmk_rsc_needs_fencing)) {
         requires = pcmk_requires_fencing;
         value = "fencing";
 
     } else if (pcmk_is_set(rsc->flags, pcmk_rsc_needs_quorum)) {
         requires = pcmk_requires_quorum;
         value = "quorum";
 
     } else {
         value = "nothing";
     }
     pcmk__rsc_trace(rsc, "%s of %s requires %s", action_name, rsc->id, value);
     return requires;
 }
 
 /*!
  * \internal
  * \brief Parse action failure response from a user-provided string
  *
  * \param[in] rsc          Resource that action is for
  * \param[in] action_name  Name of action
  * \param[in] interval_ms  Action interval (in milliseconds)
  * \param[in] value        User-provided configuration value for on-fail
  *
  * \return Action failure response parsed from \p text
  */
 enum action_fail_response
 pcmk__parse_on_fail(const pcmk_resource_t *rsc, const char *action_name,
                     guint interval_ms, const char *value)
 {
     const char *desc = NULL;
     bool needs_remote_reset = false;
     enum action_fail_response on_fail = pcmk_on_fail_ignore;
 
     // There's no enum value for unknown or invalid, so assert
     CRM_ASSERT((rsc != NULL) && (action_name != NULL));
 
     if (value == NULL) {
         // Use default
 
     } else if (pcmk__str_eq(value, PCMK_VALUE_BLOCK, pcmk__str_casei)) {
         on_fail = pcmk_on_fail_block;
         desc = "block";
 
     } else if (pcmk__str_eq(value, PCMK_VALUE_FENCE, pcmk__str_casei)) {
         if (pcmk_is_set(rsc->cluster->flags, pcmk_sched_fencing_enabled)) {
             on_fail = pcmk_on_fail_fence_node;
             desc = "node fencing";
         } else {
             pcmk__config_err("Resetting '" PCMK_META_ON_FAIL "' for "
                              "%s of %s to 'stop' because 'fence' is not "
                              "valid when fencing is disabled",
                              action_name, rsc->id);
             on_fail = pcmk_on_fail_stop;
             desc = "stop resource";
         }
 
     } else if (pcmk__str_eq(value, PCMK_VALUE_STANDBY, pcmk__str_casei)) {
         on_fail = pcmk_on_fail_standby_node;
         desc = "node standby";
 
     } else if (pcmk__strcase_any_of(value,
                                     PCMK_VALUE_IGNORE, PCMK_VALUE_NOTHING,
                                     NULL)) {
         desc = "ignore";
 
     } else if (pcmk__str_eq(value, "migrate", pcmk__str_casei)) {
         on_fail = pcmk_on_fail_ban;
         desc = "force migration";
 
     } else if (pcmk__str_eq(value, PCMK_VALUE_STOP, pcmk__str_casei)) {
         on_fail = pcmk_on_fail_stop;
         desc = "stop resource";
 
     } else if (pcmk__str_eq(value, PCMK_VALUE_RESTART, pcmk__str_casei)) {
         on_fail = pcmk_on_fail_restart;
         desc = "restart (and possibly migrate)";
 
     } else if (pcmk__str_eq(value, PCMK_VALUE_RESTART_CONTAINER,
                             pcmk__str_casei)) {
         if (rsc->container == NULL) {
             pcmk__rsc_debug(rsc,
                             "Using default " PCMK_META_ON_FAIL " for %s "
                             "of %s because it does not have a container",
                             action_name, rsc->id);
         } else {
             on_fail = pcmk_on_fail_restart_container;
             desc = "restart container (and possibly migrate)";
         }
 
     } else if (pcmk__str_eq(value, PCMK_VALUE_DEMOTE, pcmk__str_casei)) {
         on_fail = pcmk_on_fail_demote;
         desc = "demote instance";
 
     } else {
         pcmk__config_err("Using default '" PCMK_META_ON_FAIL "' for "
                          "%s of %s because '%s' is not valid",
                          action_name, rsc->id, value);
     }
 
     /* Remote node connections are handled specially. Failures that result
      * in dropping an active connection must result in fencing. The only
      * failures that don't are probes and starts. The user can explicitly set
      * PCMK_META_ON_FAIL=PCMK_VALUE_FENCE to fence after start failures.
      */
     if (rsc->is_remote_node
         && pcmk__is_remote_node(pe_find_node(rsc->cluster->nodes, rsc->id))
         && !pcmk_is_probe(action_name, interval_ms)
         && !pcmk__str_eq(action_name, PCMK_ACTION_START, pcmk__str_none)) {
         needs_remote_reset = true;
         if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
             desc = NULL; // Force default for unmanaged connections
         }
     }
 
     if (desc != NULL) {
         // Explicit value used, default not needed
 
     } else if (rsc->container != NULL) {
         on_fail = pcmk_on_fail_restart_container;
         desc = "restart container (and possibly migrate) (default)";
 
     } else if (needs_remote_reset) {
         if (pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
             if (pcmk_is_set(rsc->cluster->flags,
                             pcmk_sched_fencing_enabled)) {
                 desc = "fence remote node (default)";
             } else {
                 desc = "recover remote node connection (default)";
             }
             on_fail = pcmk_on_fail_reset_remote;
         } else {
             on_fail = pcmk_on_fail_stop;
             desc = "stop unmanaged remote node (enforcing default)";
         }
 
     } else if (pcmk__str_eq(action_name, PCMK_ACTION_STOP, pcmk__str_none)) {
         if (pcmk_is_set(rsc->cluster->flags, pcmk_sched_fencing_enabled)) {
             on_fail = pcmk_on_fail_fence_node;
             desc = "resource fence (default)";
         } else {
             on_fail = pcmk_on_fail_block;
             desc = "resource block (default)";
         }
 
     } else {
         on_fail = pcmk_on_fail_restart;
         desc = "restart (and possibly migrate) (default)";
     }
 
     pcmk__rsc_trace(rsc, "Failure handling for %s-interval %s of %s: %s",
                     pcmk__readable_interval(interval_ms), action_name,
                     rsc->id, desc);
     return on_fail;
 }
 
 /*!
  * \internal
  * \brief Determine a resource's role after failure of an action
  *
  * \param[in] rsc          Resource that action is for
  * \param[in] action_name  Action name
  * \param[in] on_fail      Failure handling for action
  * \param[in] meta         Unpacked action meta-attributes
  *
  * \return Resource role that results from failure of action
  */
 enum rsc_role_e
 pcmk__role_after_failure(const pcmk_resource_t *rsc, const char *action_name,
                          enum action_fail_response on_fail, GHashTable *meta)
 {
     const char *value = NULL;
     enum rsc_role_e role = pcmk_role_unknown;
 
     // Set default for role after failure specially in certain circumstances
     switch (on_fail) {
         case pcmk_on_fail_stop:
             role = pcmk_role_stopped;
             break;
 
         case pcmk_on_fail_reset_remote:
             if (rsc->remote_reconnect_ms != 0) {
                 role = pcmk_role_stopped;
             }
             break;
 
         default:
             break;
     }
 
     // @COMPAT Check for explicitly configured role (deprecated)
     value = g_hash_table_lookup(meta, PCMK__META_ROLE_AFTER_FAILURE);
     if (value != NULL) {
         pcmk__warn_once(pcmk__wo_role_after,
                         "Support for " PCMK__META_ROLE_AFTER_FAILURE " is "
                         "deprecated and will be removed in a future release");
         if (role == pcmk_role_unknown) {
             role = pcmk_parse_role(value);
             if (role == pcmk_role_unknown) {
                 pcmk__config_err("Ignoring invalid value %s for "
                                  PCMK__META_ROLE_AFTER_FAILURE,
                                  value);
             }
         }
     }
 
     if (role == pcmk_role_unknown) {
         // Use default
         if (pcmk__str_eq(action_name, PCMK_ACTION_PROMOTE, pcmk__str_none)) {
             role = pcmk_role_unpromoted;
         } else {
             role = pcmk_role_started;
         }
     }
     pcmk__rsc_trace(rsc, "Role after %s %s failure is: %s",
                     rsc->id, action_name, pcmk_role_text(role));
     return role;
 }
 
 /*!
  * \internal
  * \brief Unpack action configuration
  *
  * Unpack a resource action's meta-attributes (normalizing the interval,
  * timeout, and start delay values as integer milliseconds), requirements, and
  * failure policy from its CIB XML configuration (including defaults).
  *
  * \param[in,out] action       Resource action to unpack into
  * \param[in]     xml_obj      Action configuration XML (NULL for defaults only)
  * \param[in]     interval_ms  How frequently to perform the operation
  */
 static void
 unpack_operation(pcmk_action_t *action, const xmlNode *xml_obj,
                  guint interval_ms)
 {
     const char *value = NULL;
 
     action->meta = pcmk__unpack_action_meta(action->rsc, action->node,
                                             action->task, interval_ms, xml_obj);
     action->needs = pcmk__action_requires(action->rsc, action->task);
 
     value = g_hash_table_lookup(action->meta, PCMK_META_ON_FAIL);
     action->on_fail = pcmk__parse_on_fail(action->rsc, action->task,
                                           interval_ms, value);
 
     action->fail_role = pcmk__role_after_failure(action->rsc, action->task,
                                                  action->on_fail, action->meta);
 }
 
 /*!
  * \brief Create or update an action object
  *
  * \param[in,out] rsc          Resource that action is for (if any)
  * \param[in,out] key          Action key (must be non-NULL)
  * \param[in]     task         Action name (must be non-NULL)
  * \param[in]     on_node      Node that action is on (if any)
  * \param[in]     optional     Whether action should be considered optional
  * \param[in,out] scheduler    Scheduler data
  *
  * \return Action object corresponding to arguments (guaranteed not to be
  *         \c NULL)
  * \note This function takes ownership of (and might free) \p key, and
  *       \p scheduler takes ownership of the returned action (the caller should
  *       not free it).
  */
 pcmk_action_t *
 custom_action(pcmk_resource_t *rsc, char *key, const char *task,
               const pcmk_node_t *on_node, gboolean optional,
               pcmk_scheduler_t *scheduler)
 {
     pcmk_action_t *action = NULL;
 
     CRM_ASSERT((key != NULL) && (task != NULL) && (scheduler != NULL));
 
     action = find_existing_action(key, rsc, on_node, scheduler);
     if (action == NULL) {
         action = new_action(key, task, rsc, on_node, optional, scheduler);
     } else {
         free(key);
     }
 
     update_action_optional(action, optional);
 
     if (rsc != NULL) {
         /* An action can be initially created with a NULL node, and later have
          * the node added via find_existing_action() (above) -> find_actions().
          * That is why the extra parameters are unpacked here rather than in
          * new_action().
          */
         if ((action->node != NULL) && (action->op_entry != NULL)
             && !pcmk_is_set(action->flags, pcmk_action_attrs_evaluated)) {
 
             GHashTable *attrs = action->node->details->attrs;
 
             if (action->extra != NULL) {
                 g_hash_table_destroy(action->extra);
             }
             action->extra = pcmk__unpack_action_rsc_params(action->op_entry,
                                                            attrs, scheduler);
             pcmk__set_action_flags(action, pcmk_action_attrs_evaluated);
         }
 
         update_resource_action_runnable(action, scheduler);
         update_resource_flags_for_action(rsc, action);
     }
 
     if (action->extra == NULL) {
         action->extra = pcmk__strkey_table(free, free);
     }
 
     return action;
 }
 
 pcmk_action_t *
 get_pseudo_op(const char *name, pcmk_scheduler_t *scheduler)
 {
     pcmk_action_t *op = lookup_singleton(scheduler, name);
 
     if (op == NULL) {
         op = custom_action(NULL, strdup(name), name, NULL, TRUE, scheduler);
         pcmk__set_action_flags(op, pcmk_action_pseudo|pcmk_action_runnable);
     }
     return op;
 }
 
 static GList *
 find_unfencing_devices(GList *candidates, GList *matches) 
 {
     for (GList *gIter = candidates; gIter != NULL; gIter = gIter->next) {
         pcmk_resource_t *candidate = gIter->data;
 
         if (candidate->children != NULL) {
             matches = find_unfencing_devices(candidate->children, matches);
 
         } else if (!pcmk_is_set(candidate->flags, pcmk_rsc_fence_device)) {
             continue;
 
         } else if (pcmk_is_set(candidate->flags, pcmk_rsc_needs_unfencing)) {
             matches = g_list_prepend(matches, candidate);
 
         } else if (pcmk__str_eq(g_hash_table_lookup(candidate->meta,
                                                     PCMK_STONITH_PROVIDES),
                                 PCMK_VALUE_UNFENCING, pcmk__str_casei)) {
             matches = g_list_prepend(matches, candidate);
         }
     }
     return matches;
 }
 
 static int
 node_priority_fencing_delay(const pcmk_node_t *node,
                             const pcmk_scheduler_t *scheduler)
 {
     int member_count = 0;
     int online_count = 0;
     int top_priority = 0;
     int lowest_priority = 0;
     GList *gIter = NULL;
 
     // PCMK_OPT_PRIORITY_FENCING_DELAY is disabled
     if (scheduler->priority_fencing_delay <= 0) {
         return 0;
     }
 
     /* No need to request a delay if the fencing target is not a normal cluster
      * member, for example if it's a remote node or a guest node. */
     if (node->details->type != pcmk_node_variant_cluster) {
         return 0;
     }
 
     // No need to request a delay if the fencing target is in our partition
     if (node->details->online) {
         return 0;
     }
 
     for (gIter = scheduler->nodes; gIter != NULL; gIter = gIter->next) {
         pcmk_node_t *n = gIter->data;
 
         if (n->details->type != pcmk_node_variant_cluster) {
             continue;
         }
 
         member_count ++;
 
         if (n->details->online) {
             online_count++;
         }
 
         if (member_count == 1
             || n->details->priority > top_priority) {
             top_priority = n->details->priority;
         }
 
         if (member_count == 1
             || n->details->priority < lowest_priority) {
             lowest_priority = n->details->priority;
         }
     }
 
     // No need to delay if we have more than half of the cluster members
     if (online_count > member_count / 2) {
         return 0;
     }
 
     /* All the nodes have equal priority.
      * Any configured corresponding `pcmk_delay_base/max` will be applied. */
     if (lowest_priority == top_priority) {
         return 0;
     }
 
     if (node->details->priority < top_priority) {
         return 0;
     }
 
     return scheduler->priority_fencing_delay;
 }
 
 pcmk_action_t *
 pe_fence_op(pcmk_node_t *node, const char *op, bool optional,
             const char *reason, bool priority_delay,
             pcmk_scheduler_t *scheduler)
 {
     char *op_key = NULL;
     pcmk_action_t *stonith_op = NULL;
 
     if(op == NULL) {
         op = scheduler->stonith_action;
     }
 
     op_key = crm_strdup_printf("%s-%s-%s",
                                PCMK_ACTION_STONITH, node->details->uname, op);
 
     stonith_op = lookup_singleton(scheduler, op_key);
     if(stonith_op == NULL) {
         stonith_op = custom_action(NULL, op_key, PCMK_ACTION_STONITH, node,
                                    TRUE, scheduler);
 
         pcmk__insert_meta(stonith_op, PCMK__META_ON_NODE, node->details->uname);
         pcmk__insert_meta(stonith_op, PCMK__META_ON_NODE_UUID,
                           node->details->id);
         pcmk__insert_meta(stonith_op, PCMK__META_STONITH_ACTION, op);
 
         if (pcmk_is_set(scheduler->flags, pcmk_sched_enable_unfencing)) {
             /* Extra work to detect device changes
              */
             GString *digests_all = g_string_sized_new(1024);
             GString *digests_secure = g_string_sized_new(1024);
 
             GList *matches = find_unfencing_devices(scheduler->resources, NULL);
 
             for (GList *gIter = matches; gIter != NULL; gIter = gIter->next) {
                 pcmk_resource_t *match = gIter->data;
                 const char *agent = g_hash_table_lookup(match->meta,
                                                         PCMK_XA_TYPE);
                 pcmk__op_digest_t *data = NULL;
 
                 data = pe__compare_fencing_digest(match, agent, node,
                                                   scheduler);
                 if (data->rc == pcmk__digest_mismatch) {
                     optional = FALSE;
                     crm_notice("Unfencing node %s because the definition of "
                                "%s changed", pcmk__node_name(node), match->id);
                     if (!pcmk__is_daemon && scheduler->priv != NULL) {
                         pcmk__output_t *out = scheduler->priv;
 
                         out->info(out,
                                   "notice: Unfencing node %s because the "
                                   "definition of %s changed",
                                   pcmk__node_name(node), match->id);
                     }
                 }
 
                 pcmk__g_strcat(digests_all,
                                match->id, ":", agent, ":",
                                data->digest_all_calc, ",", NULL);
                 pcmk__g_strcat(digests_secure,
                                match->id, ":", agent, ":",
                                data->digest_secure_calc, ",", NULL);
             }
             pcmk__insert_dup(stonith_op->meta, PCMK__META_DIGESTS_ALL,
                              digests_all->str);
             g_string_free(digests_all, TRUE);
 
             pcmk__insert_dup(stonith_op->meta, PCMK__META_DIGESTS_SECURE,
                              digests_secure->str);
             g_string_free(digests_secure, TRUE);
         }
 
     } else {
         free(op_key);
     }
 
     if (scheduler->priority_fencing_delay > 0
 
             /* It's a suitable case where PCMK_OPT_PRIORITY_FENCING_DELAY
              * applies. At least add PCMK_OPT_PRIORITY_FENCING_DELAY field as
              * an indicator.
              */
         && (priority_delay
 
             /* The priority delay needs to be recalculated if this function has
              * been called by schedule_fencing_and_shutdowns() after node
              * priority has already been calculated by native_add_running().
              */
             || g_hash_table_lookup(stonith_op->meta,
                                    PCMK_OPT_PRIORITY_FENCING_DELAY) != NULL)) {
 
             /* Add PCMK_OPT_PRIORITY_FENCING_DELAY to the fencing op even if
              * it's 0 for the targeting node. So that it takes precedence over
              * any possible `pcmk_delay_base/max`.
              */
             char *delay_s = pcmk__itoa(node_priority_fencing_delay(node,
                                                                    scheduler));
 
             g_hash_table_insert(stonith_op->meta,
                                 strdup(PCMK_OPT_PRIORITY_FENCING_DELAY),
                                 delay_s);
     }
 
     if(optional == FALSE && pe_can_fence(scheduler, node)) {
         pcmk__clear_action_flags(stonith_op, pcmk_action_optional);
         pe_action_set_reason(stonith_op, reason, false);
 
     } else if(reason && stonith_op->reason == NULL) {
         stonith_op->reason = strdup(reason);
     }
 
     return stonith_op;
 }
 
 void
 pe_free_action(pcmk_action_t *action)
 {
     if (action == NULL) {
         return;
     }
     g_list_free_full(action->actions_before, free);
     g_list_free_full(action->actions_after, free);
     if (action->extra) {
         g_hash_table_destroy(action->extra);
     }
     if (action->meta) {
         g_hash_table_destroy(action->meta);
     }
     free(action->cancel_task);
     free(action->reason);
     free(action->task);
     free(action->uuid);
     free(action->node);
     free(action);
 }
 
-int
-pe_get_configured_timeout(pcmk_resource_t *rsc, const char *action,
-                          pcmk_scheduler_t *scheduler)
-{
-    xmlNode *child = NULL;
-    GHashTable *action_meta = NULL;
-    const char *timeout_spec = NULL;
-    long long timeout_ms = 0;
-
-    pe_rule_eval_data_t rule_data = {
-        .node_hash = NULL,
-        .now = scheduler->now,
-        .match_data = NULL,
-        .rsc_data = NULL,
-        .op_data = NULL
-    };
-
-    for (child = pcmk__xe_first_child(rsc->ops_xml, PCMK_XE_OP, NULL, NULL);
-         child != NULL; child = pcmk__xe_next_same(child)) {
-
-        if (pcmk__str_eq(action, crm_element_value(child, PCMK_XA_NAME),
-                pcmk__str_casei)) {
-            timeout_spec = crm_element_value(child, PCMK_META_TIMEOUT);
-            break;
-        }
-    }
-
-    if (timeout_spec == NULL && scheduler->op_defaults) {
-        action_meta = pcmk__strkey_table(free, free);
-        pe__unpack_dataset_nvpairs(scheduler->op_defaults,
-                                   PCMK_XE_META_ATTRIBUTES, &rule_data,
-                                   action_meta, NULL, FALSE, scheduler);
-        timeout_spec = g_hash_table_lookup(action_meta, PCMK_META_TIMEOUT);
-    }
-
-    // @TODO check meta-attributes
-    // @TODO maybe use min-interval monitor timeout as default for monitors
-
-    timeout_ms = crm_get_msec(timeout_spec);
-    if (timeout_ms < 0) {
-        timeout_ms = PCMK_DEFAULT_ACTION_TIMEOUT_MS;
-    }
-
-    if (action_meta != NULL) {
-        g_hash_table_destroy(action_meta);
-    }
-    return (int) QB_MIN(timeout_ms, INT_MAX);
-}
-
 enum action_tasks
 get_complex_task(const pcmk_resource_t *rsc, const char *name)
 {
     enum action_tasks task = pcmk_parse_action(name);
 
     if ((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_primitive)) {
         switch (task) {
             case pcmk_action_stopped:
             case pcmk_action_started:
             case pcmk_action_demoted:
             case pcmk_action_promoted:
                 crm_trace("Folding %s back into its atomic counterpart for %s",
                           name, rsc->id);
                 --task;
                 break;
             default:
                 break;
         }
     }
     return task;
 }
 
 /*!
  * \internal
  * \brief Find first matching action in a list
  *
  * \param[in] input    List of actions to search
  * \param[in] uuid     If not NULL, action must have this UUID
  * \param[in] task     If not NULL, action must have this action name
  * \param[in] on_node  If not NULL, action must be on this node
  *
  * \return First action in list that matches criteria, or NULL if none
  */
 pcmk_action_t *
 find_first_action(const GList *input, const char *uuid, const char *task,
                   const pcmk_node_t *on_node)
 {
     CRM_CHECK(uuid || task, return NULL);
 
     for (const GList *gIter = input; gIter != NULL; gIter = gIter->next) {
         pcmk_action_t *action = (pcmk_action_t *) gIter->data;
 
         if (uuid != NULL && !pcmk__str_eq(uuid, action->uuid, pcmk__str_casei)) {
             continue;
 
         } else if (task != NULL && !pcmk__str_eq(task, action->task, pcmk__str_casei)) {
             continue;
 
         } else if (on_node == NULL) {
             return action;
 
         } else if (action->node == NULL) {
             continue;
 
         } else if (pcmk__same_node(on_node, action->node)) {
             return action;
         }
     }
 
     return NULL;
 }
 
 GList *
 find_actions(GList *input, const char *key, const pcmk_node_t *on_node)
 {
     GList *gIter = input;
     GList *result = NULL;
 
     CRM_CHECK(key != NULL, return NULL);
 
     for (; gIter != NULL; gIter = gIter->next) {
         pcmk_action_t *action = (pcmk_action_t *) gIter->data;
 
         if (!pcmk__str_eq(key, action->uuid, pcmk__str_casei)) {
             continue;
 
         } else if (on_node == NULL) {
             crm_trace("Action %s matches (ignoring node)", key);
             result = g_list_prepend(result, action);
 
         } else if (action->node == NULL) {
             crm_trace("Action %s matches (unallocated, assigning to %s)",
                       key, pcmk__node_name(on_node));
 
             action->node = pe__copy_node(on_node);
             result = g_list_prepend(result, action);
 
         } else if (pcmk__same_node(on_node, action->node)) {
             crm_trace("Action %s on %s matches", key, pcmk__node_name(on_node));
             result = g_list_prepend(result, action);
         }
     }
 
     return result;
 }
 
 GList *
 find_actions_exact(GList *input, const char *key, const pcmk_node_t *on_node)
 {
     GList *result = NULL;
 
     CRM_CHECK(key != NULL, return NULL);
 
     if (on_node == NULL) {
         return NULL;
     }
 
     for (GList *gIter = input; gIter != NULL; gIter = gIter->next) {
         pcmk_action_t *action = (pcmk_action_t *) gIter->data;
 
         if ((action->node != NULL)
             && pcmk__str_eq(key, action->uuid, pcmk__str_casei)
             && pcmk__str_eq(on_node->details->id, action->node->details->id,
                             pcmk__str_casei)) {
 
             crm_trace("Action %s on %s matches", key, pcmk__node_name(on_node));
             result = g_list_prepend(result, action);
         }
     }
 
     return result;
 }
 
 /*!
  * \brief Find all actions of given type for a resource
  *
  * \param[in] rsc           Resource to search
  * \param[in] node          Find only actions scheduled on this node
  * \param[in] task          Action name to search for
  * \param[in] require_node  If TRUE, NULL node or action node will not match
  *
  * \return List of actions found (or NULL if none)
  * \note If node is not NULL and require_node is FALSE, matching actions
  *       without a node will be assigned to node.
  */
 GList *
 pe__resource_actions(const pcmk_resource_t *rsc, const pcmk_node_t *node,
                      const char *task, bool require_node)
 {
     GList *result = NULL;
     char *key = pcmk__op_key(rsc->id, task, 0);
 
     if (require_node) {
         result = find_actions_exact(rsc->actions, key, node);
     } else {
         result = find_actions(rsc->actions, key, node);
     }
     free(key);
     return result;
 }
 
 /*!
  * \internal
  * \brief Create an action reason string based on the action itself
  *
  * \param[in] action  Action to create reason string for
  * \param[in] flag    Action flag that was cleared
  *
  * \return Newly allocated string suitable for use as action reason
  * \note It is the caller's responsibility to free() the result.
  */
 char *
 pe__action2reason(const pcmk_action_t *action, enum pe_action_flags flag)
 {
     const char *change = NULL;
 
     switch (flag) {
         case pcmk_action_runnable:
             change = "unrunnable";
             break;
         case pcmk_action_migratable:
             change = "unmigrateable";
             break;
         case pcmk_action_optional:
             change = "required";
             break;
         default:
             // Bug: caller passed unsupported flag
             CRM_CHECK(change != NULL, change = "");
             break;
     }
     return crm_strdup_printf("%s%s%s %s", change,
                              (action->rsc == NULL)? "" : " ",
                              (action->rsc == NULL)? "" : action->rsc->id,
                              action->task);
 }
 
 void pe_action_set_reason(pcmk_action_t *action, const char *reason,
                           bool overwrite)
 {
     if (action->reason != NULL && overwrite) {
         pcmk__rsc_trace(action->rsc, "Changing %s reason from '%s' to '%s'",
                         action->uuid, action->reason,
                         pcmk__s(reason, "(none)"));
     } else if (action->reason == NULL) {
         pcmk__rsc_trace(action->rsc, "Set %s reason to '%s'",
                         action->uuid, pcmk__s(reason, "(none)"));
     } else {
         // crm_assert(action->reason != NULL && !overwrite);
         return;
     }
 
     pcmk__str_update(&action->reason, reason);
 }
 
 /*!
  * \internal
  * \brief Create an action to clear a resource's history from CIB
  *
  * \param[in,out] rsc       Resource to clear
  * \param[in]     node      Node to clear history on
  */
 void
 pe__clear_resource_history(pcmk_resource_t *rsc, const pcmk_node_t *node)
 {
     CRM_ASSERT((rsc != NULL) && (node != NULL));
 
     custom_action(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_LRM_DELETE, 0),
                   PCMK_ACTION_LRM_DELETE, node, FALSE, rsc->cluster);
 }
 
 #define sort_return(an_int, why) do {					\
 	free(a_uuid);						\
 	free(b_uuid);						\
 	crm_trace("%s (%d) %c %s (%d) : %s",				\
 		  a_xml_id, a_call_id, an_int>0?'>':an_int<0?'<':'=',	\
 		  b_xml_id, b_call_id, why);				\
 	return an_int;							\
     } while(0)
 
 int
 pe__is_newer_op(const xmlNode *xml_a, const xmlNode *xml_b,
                 bool same_node_default)
 {
     int a_call_id = -1;
     int b_call_id = -1;
 
     char *a_uuid = NULL;
     char *b_uuid = NULL;
 
     const char *a_xml_id = crm_element_value(xml_a, PCMK_XA_ID);
     const char *b_xml_id = crm_element_value(xml_b, PCMK_XA_ID);
 
     const char *a_node = crm_element_value(xml_a, PCMK__META_ON_NODE);
     const char *b_node = crm_element_value(xml_b, PCMK__META_ON_NODE);
     bool same_node = true;
 
     /* @COMPAT The on_node attribute was added to last_failure as of 1.1.13 (via
      * 8b3ca1c) and the other entries as of 1.1.12 (via 0b07b5c).
      *
      * In case that any of the PCMK__XE_LRM_RSC_OP entries doesn't have on_node
      * attribute, we need to explicitly tell whether the two operations are on
      * the same node.
      */
     if (a_node == NULL || b_node == NULL) {
         same_node = same_node_default;
 
     } else {
         same_node = pcmk__str_eq(a_node, b_node, pcmk__str_casei);
     }
 
     if (same_node && pcmk__str_eq(a_xml_id, b_xml_id, pcmk__str_none)) {
         /* We have duplicate PCMK__XE_LRM_RSC_OP entries in the status
          * section which is unlikely to be a good thing
          *    - we can handle it easily enough, but we need to get
          *    to the bottom of why it's happening.
          */
         pcmk__config_err("Duplicate " PCMK__XE_LRM_RSC_OP " entries named %s",
                          a_xml_id);
         sort_return(0, "duplicate");
     }
 
     crm_element_value_int(xml_a, PCMK__XA_CALL_ID, &a_call_id);
     crm_element_value_int(xml_b, PCMK__XA_CALL_ID, &b_call_id);
 
     if (a_call_id == -1 && b_call_id == -1) {
         /* both are pending ops so it doesn't matter since
          *   stops are never pending
          */
         sort_return(0, "pending");
 
     } else if (same_node && a_call_id >= 0 && a_call_id < b_call_id) {
         sort_return(-1, "call id");
 
     } else if (same_node && b_call_id >= 0 && a_call_id > b_call_id) {
         sort_return(1, "call id");
 
     } else if (a_call_id >= 0 && b_call_id >= 0
                && (!same_node || a_call_id == b_call_id)) {
         /* The op and last_failed_op are the same. Order on
          * PCMK_XA_LAST_RC_CHANGE.
          */
         time_t last_a = -1;
         time_t last_b = -1;
 
         crm_element_value_epoch(xml_a, PCMK_XA_LAST_RC_CHANGE, &last_a);
         crm_element_value_epoch(xml_b, PCMK_XA_LAST_RC_CHANGE, &last_b);
 
         crm_trace("rc-change: %lld vs %lld",
                   (long long) last_a, (long long) last_b);
         if (last_a >= 0 && last_a < last_b) {
             sort_return(-1, "rc-change");
 
         } else if (last_b >= 0 && last_a > last_b) {
             sort_return(1, "rc-change");
         }
         sort_return(0, "rc-change");
 
     } else {
         /* One of the inputs is a pending operation.
          * Attempt to use PCMK__XA_TRANSITION_MAGIC to determine its age relative
          * to the other.
          */
 
         int a_id = -1;
         int b_id = -1;
 
         const char *a_magic = crm_element_value(xml_a,
                                                 PCMK__XA_TRANSITION_MAGIC);
         const char *b_magic = crm_element_value(xml_b,
                                                 PCMK__XA_TRANSITION_MAGIC);
 
         CRM_CHECK(a_magic != NULL && b_magic != NULL, sort_return(0, "No magic"));
         if (!decode_transition_magic(a_magic, &a_uuid, &a_id, NULL, NULL, NULL,
                                      NULL)) {
             sort_return(0, "bad magic a");
         }
         if (!decode_transition_magic(b_magic, &b_uuid, &b_id, NULL, NULL, NULL,
                                      NULL)) {
             sort_return(0, "bad magic b");
         }
         /* try to determine the relative age of the operation...
          * some pending operations (e.g. a start) may have been superseded
          *   by a subsequent stop
          *
          * [a|b]_id == -1 means it's a shutdown operation and _always_ comes last
          */
         if (!pcmk__str_eq(a_uuid, b_uuid, pcmk__str_casei) || a_id == b_id) {
             /*
              * some of the logic in here may be redundant...
              *
              * if the UUID from the TE doesn't match then one better
              *   be a pending operation.
              * pending operations don't survive between elections and joins
              *   because we query the LRM directly
              */
 
             if (b_call_id == -1) {
                 sort_return(-1, "transition + call");
 
             } else if (a_call_id == -1) {
                 sort_return(1, "transition + call");
             }
 
         } else if ((a_id >= 0 && a_id < b_id) || b_id == -1) {
             sort_return(-1, "transition");
 
         } else if ((b_id >= 0 && a_id > b_id) || a_id == -1) {
             sort_return(1, "transition");
         }
     }
 
     /* we should never end up here */
     CRM_CHECK(FALSE, sort_return(0, "default"));
 }
 
 gint
 sort_op_by_callid(gconstpointer a, gconstpointer b)
 {
     const xmlNode *xml_a = a;
     const xmlNode *xml_b = b;
 
     return pe__is_newer_op(xml_a, xml_b, true);
 }
 
 /*!
  * \internal
  * \brief Create a new pseudo-action for a resource
  *
  * \param[in,out] rsc       Resource to create action for
  * \param[in]     task      Action name
  * \param[in]     optional  Whether action should be considered optional
  * \param[in]     runnable  Whethe action should be considered runnable
  *
  * \return New action object corresponding to arguments
  */
 pcmk_action_t *
 pe__new_rsc_pseudo_action(pcmk_resource_t *rsc, const char *task, bool optional,
                           bool runnable)
 {
     pcmk_action_t *action = NULL;
 
     CRM_ASSERT((rsc != NULL) && (task != NULL));
 
     action = custom_action(rsc, pcmk__op_key(rsc->id, task, 0), task, NULL,
                            optional, rsc->cluster);
     pcmk__set_action_flags(action, pcmk_action_pseudo);
     if (runnable) {
         pcmk__set_action_flags(action, pcmk_action_runnable);
     }
     return action;
 }
 
 /*!
  * \internal
  * \brief Add the expected result to an action
  *
  * \param[in,out] action           Action to add expected result to
  * \param[in]     expected_result  Expected result to add
  *
  * \note This is more efficient than calling pcmk__insert_meta().
  */
 void
 pe__add_action_expected_result(pcmk_action_t *action, int expected_result)
 {
     CRM_ASSERT((action != NULL) && (action->meta != NULL));
 
     g_hash_table_insert(action->meta, pcmk__str_copy(PCMK__META_OP_TARGET_RC),
                         pcmk__itoa(expected_result));
 }
diff --git a/tools/crm_resource.c b/tools/crm_resource.c
index c96e812013..dd9a3c5335 100644
--- a/tools/crm_resource.c
+++ b/tools/crm_resource.c
@@ -1,2120 +1,2121 @@
 /*
  * Copyright 2004-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <crm_resource.h>
 #include <crm/lrmd_internal.h>
 #include <crm/common/cmdline_internal.h>
 #include <crm/common/ipc_attrd_internal.h>
 #include <crm/common/lists_internal.h>
 #include <crm/common/output.h>
 #include <pacemaker-internal.h>
 
 #include <sys/param.h>
 #include <stdint.h>         // uint32_t
 #include <stdio.h>
 #include <sys/types.h>
 #include <unistd.h>
 #include <stdlib.h>
 #include <errno.h>
 #include <fcntl.h>
 #include <libgen.h>
 #include <time.h>
 
 #include <crm/crm.h>
 #include <crm/stonith-ng.h>
 #include <crm/common/ipc_controld.h>
 #include <crm/cib/internal.h>
 
 #define SUMMARY "crm_resource - perform tasks related to Pacemaker cluster resources"
 
 enum rsc_command {
     cmd_none = 0,           // No command option given (yet)
     cmd_ban,
     cmd_cleanup,
     cmd_clear,
     cmd_colocations,
     cmd_cts,
     cmd_delete,
     cmd_delete_param,
     cmd_digests,
     cmd_execute_agent,
     cmd_fail,
     cmd_get_param,
     cmd_get_property,
     cmd_list_active_ops,
     cmd_list_agents,
     cmd_list_all_ops,
     cmd_list_alternatives,
     cmd_list_instances,
     cmd_list_providers,
     cmd_list_resources,
     cmd_list_standards,
     cmd_locate,
     cmd_metadata,
     cmd_move,
     cmd_query_xml,
     cmd_query_xml_raw,
     cmd_refresh,
     cmd_restart,
     cmd_set_param,
     cmd_set_property,
     cmd_wait,
     cmd_why,
 };
 
 struct {
     enum rsc_command rsc_cmd;     // crm_resource command to perform
 
     // Command-line option values
     gchar *rsc_id;                // Value of --resource
     gchar *rsc_type;              // Value of --resource-type
     gboolean force;               // --force was given
     gboolean clear_expired;       // --expired was given
     gboolean recursive;           // --recursive was given
     gboolean promoted_role_only;  // --promoted was given
     gchar *host_uname;            // Value of --node
     gchar *interval_spec;         // Value of --interval
     gchar *move_lifetime;         // Value of --lifetime
     gchar *operation;             // Value of --operation
     const char *attr_set_type;    // Instance, meta, utilization, or element attribute
     gchar *prop_id;               // --nvpair (attribute XML ID)
     char *prop_name;              // Attribute name
     gchar *prop_set;              // --set-name (attribute block XML ID)
     gchar *prop_value;            // --parameter-value (attribute value)
-    long long timeout_ms;         // Parsed from --timeout value
+    guint timeout_ms;             // Parsed from --timeout value
     char *agent_spec;             // Standard and/or provider and/or agent
     gchar *xml_file;              // Value of (deprecated) --xml-file
     int check_level;              // Optional value of --validate or --force-check
 
     // Resource configuration specified via command-line arguments
     bool cmdline_config;          // Resource configuration was via arguments
     char *v_agent;                // Value of --agent
     char *v_class;                // Value of --class
     char *v_provider;             // Value of --provider
     GHashTable *cmdline_params;   // Resource parameters specified
 
     // Positional command-line arguments
     gchar **remainder;            // Positional arguments as given
     GHashTable *override_params;  // Resource parameter values that override config
 } options = {
     .attr_set_type = PCMK_XE_INSTANCE_ATTRIBUTES,
     .check_level = -1,
     .rsc_cmd = cmd_list_resources,  // List all resources if no command given
 };
 
 gboolean attr_set_type_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
 gboolean cmdline_config_cb(const gchar *option_name, const gchar *optarg,
                            gpointer data, GError **error);
 gboolean option_cb(const gchar *option_name, const gchar *optarg,
                    gpointer data, GError **error);
 gboolean timeout_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
 
 static crm_exit_t exit_code = CRM_EX_OK;
 static pcmk__output_t *out = NULL;
 static pcmk__common_args_t *args = NULL;
 
 // Things that should be cleaned up on exit
 static GError *error = NULL;
 static GMainLoop *mainloop = NULL;
 static cib_t *cib_conn = NULL;
 static pcmk_ipc_api_t *controld_api = NULL;
 static pcmk_scheduler_t *scheduler = NULL;
 
 #define MESSAGE_TIMEOUT_S 60
 
 #define INDENT "                                    "
 
 static pcmk__supported_format_t formats[] = {
     PCMK__SUPPORTED_FORMAT_NONE,
     PCMK__SUPPORTED_FORMAT_TEXT,
     PCMK__SUPPORTED_FORMAT_XML,
     { NULL, NULL, NULL }
 };
 
 // Clean up and exit
 static crm_exit_t
 bye(crm_exit_t ec)
 {
     pcmk__output_and_clear_error(&error, out);
 
     if (out != NULL) {
         out->finish(out, ec, true, NULL);
         pcmk__output_free(out);
     }
     pcmk__unregister_formats();
 
     if (cib_conn != NULL) {
         cib_t *save_cib_conn = cib_conn;
 
         cib_conn = NULL; // Ensure we can't free this twice
         cib__clean_up_connection(&save_cib_conn);
     }
 
     if (controld_api != NULL) {
         pcmk_ipc_api_t *save_controld_api = controld_api;
 
         controld_api = NULL; // Ensure we can't free this twice
         pcmk_free_ipc_api(save_controld_api);
     }
 
     if (mainloop != NULL) {
         g_main_loop_unref(mainloop);
         mainloop = NULL;
     }
 
     pe_free_working_set(scheduler);
     scheduler = NULL;
     crm_exit(ec);
     return ec;
 }
 
 static void
 quit_main_loop(crm_exit_t ec)
 {
     exit_code = ec;
     if (mainloop != NULL) {
         GMainLoop *mloop = mainloop;
 
         mainloop = NULL; // Don't re-enter this block
         pcmk_quit_main_loop(mloop, 10);
         g_main_loop_unref(mloop);
     }
 }
 
 static gboolean
 resource_ipc_timeout(gpointer data)
 {
     // Start with newline because "Waiting for ..." message doesn't have one
     if (error != NULL) {
         g_clear_error(&error);
     }
 
     g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_TIMEOUT,
                 _("Aborting because no messages received in %d seconds"), MESSAGE_TIMEOUT_S);
 
     quit_main_loop(CRM_EX_TIMEOUT);
     return FALSE;
 }
 
 static void
 controller_event_callback(pcmk_ipc_api_t *api, enum pcmk_ipc_event event_type,
                           crm_exit_t status, void *event_data, void *user_data)
 {
     switch (event_type) {
         case pcmk_ipc_event_disconnect:
             if (exit_code == CRM_EX_DISCONNECT) { // Unexpected
                 crm_info("Connection to controller was terminated");
             }
             quit_main_loop(exit_code);
             break;
 
         case pcmk_ipc_event_reply:
             if (status != CRM_EX_OK) {
                 out->err(out, "Error: bad reply from controller: %s",
                          crm_exit_str(status));
                 pcmk_disconnect_ipc(api);
                 quit_main_loop(status);
             } else {
                 if ((pcmk_controld_api_replies_expected(api) == 0)
                     && mainloop && g_main_loop_is_running(mainloop)) {
                     out->info(out, "... got reply (done)");
                     crm_debug("Got all the replies we expected");
                     pcmk_disconnect_ipc(api);
                     quit_main_loop(CRM_EX_OK);
                 } else {
                     out->info(out, "... got reply");
                 }
             }
             break;
 
         default:
             break;
     }
 }
 
 static void
 start_mainloop(pcmk_ipc_api_t *capi)
 {
     unsigned int count = pcmk_controld_api_replies_expected(capi);
 
     if (count > 0) {
         out->info(out, "Waiting for %u %s from the controller",
                   count, pcmk__plural_alt(count, "reply", "replies"));
         exit_code = CRM_EX_DISCONNECT; // For unexpected disconnects
         mainloop = g_main_loop_new(NULL, FALSE);
         g_timeout_add(MESSAGE_TIMEOUT_S * 1000, resource_ipc_timeout, NULL);
         g_main_loop_run(mainloop);
     }
 }
 
 static int
 compare_id(gconstpointer a, gconstpointer b)
 {
     return strcmp((const char *)a, (const char *)b);
 }
 
 static GList *
 build_constraint_list(xmlNode *root)
 {
     GList *retval = NULL;
     xmlNode *cib_constraints = NULL;
     xmlXPathObjectPtr xpathObj = NULL;
     int ndx = 0;
 
     cib_constraints = pcmk_find_cib_element(root, PCMK_XE_CONSTRAINTS);
     xpathObj = xpath_search(cib_constraints, "//" PCMK_XE_RSC_LOCATION);
 
     for (ndx = 0; ndx < numXpathResults(xpathObj); ndx++) {
         xmlNode *match = getXpathResult(xpathObj, ndx);
         retval = g_list_insert_sorted(retval, (gpointer) pcmk__xe_id(match),
                                       compare_id);
     }
 
     freeXpathObject(xpathObj);
     return retval;
 }
 
 /*!
  * \internal
  * \brief Process options that set the command
  *
  * Nothing else should set \c options.rsc_cmd.
  *
  * \param[in]  option_name  Name of the option being parsed
  * \param[in]  optarg       Value to be parsed
  * \param[in]  data         Ignored
  * \param[out] error        Where to store recoverable error, if any
  *
  * \return \c TRUE if the option was successfully parsed, or \c FALSE if an
  *         error occurred, in which case \p *error is set
  */
 static gboolean
 command_cb(const gchar *option_name, const gchar *optarg, gpointer data,
            GError **error)
 {
     // Sorted by enum rsc_command name
     if (pcmk__str_any_of(option_name, "-B", "--ban", NULL)) {
         options.rsc_cmd = cmd_ban;
 
     } else if (pcmk__str_any_of(option_name, "-C", "--cleanup", NULL)) {
         options.rsc_cmd = cmd_cleanup;
 
     } else if (pcmk__str_any_of(option_name, "-U", "--clear", NULL)) {
         options.rsc_cmd = cmd_clear;
 
     } else if (pcmk__str_any_of(option_name, "-a", "--constraints", NULL)) {
         options.rsc_cmd = cmd_colocations;
 
     } else if (pcmk__str_any_of(option_name, "-A", "--stack", NULL)) {
         options.rsc_cmd = cmd_colocations;
         options.recursive = TRUE;
 
     } else if (pcmk__str_any_of(option_name, "-c", "--list-cts", NULL)) {
         options.rsc_cmd = cmd_cts;
 
     } else if (pcmk__str_any_of(option_name, "-D", "--delete", NULL)) {
         options.rsc_cmd = cmd_delete;
 
     } else if (pcmk__str_any_of(option_name, "-d", "--delete-parameter",
                                 NULL)) {
         options.rsc_cmd = cmd_delete_param;
         pcmk__str_update(&options.prop_name, optarg);
 
     } else if (pcmk__str_eq(option_name, "--digests", pcmk__str_none)) {
         options.rsc_cmd = cmd_digests;
 
         if (options.override_params == NULL) {
             options.override_params = pcmk__strkey_table(free, free);
         }
 
     } else if (pcmk__str_any_of(option_name,
                                 "--force-demote", "--force-promote",
                                 "--force-start", "--force-stop",
                                 "--force-check", "--validate", NULL)) {
         options.rsc_cmd = cmd_execute_agent;
 
         g_free(options.operation);
         options.operation = g_strdup(option_name + 2);  // skip "--"
 
         if (options.override_params == NULL) {
             options.override_params = pcmk__strkey_table(free, free);
         }
 
         if (optarg != NULL) {
             if (pcmk__scan_min_int(optarg, &options.check_level,
                                    0) != pcmk_rc_ok) {
                 g_set_error(error, G_OPTION_ERROR, CRM_EX_INVALID_PARAM,
                             _("Invalid check level setting: %s"), optarg);
                 return FALSE;
             }
         }
 
     } else if (pcmk__str_any_of(option_name, "-F", "--fail", NULL)) {
         options.rsc_cmd = cmd_fail;
 
     } else if (pcmk__str_any_of(option_name, "-g", "--get-parameter", NULL)) {
         options.rsc_cmd = cmd_get_param;
         pcmk__str_update(&options.prop_name, optarg);
 
     } else if (pcmk__str_any_of(option_name, "-G", "--get-property", NULL)) {
         options.rsc_cmd = cmd_get_property;
         pcmk__str_update(&options.prop_name, optarg);
 
     } else if (pcmk__str_any_of(option_name, "-O", "--list-operations", NULL)) {
         options.rsc_cmd = cmd_list_active_ops;
 
     } else if (pcmk__str_eq(option_name, "--list-agents", pcmk__str_none)) {
         options.rsc_cmd = cmd_list_agents;
         pcmk__str_update(&options.agent_spec, optarg);
 
     } else if (pcmk__str_any_of(option_name, "-o", "--list-all-operations",
                                 NULL)) {
         options.rsc_cmd = cmd_list_all_ops;
 
     } else if (pcmk__str_eq(option_name, "--list-ocf-alternatives",
                             pcmk__str_none)) {
         options.rsc_cmd = cmd_list_alternatives;
         pcmk__str_update(&options.agent_spec, optarg);
 
     } else if (pcmk__str_any_of(option_name, "-l", "--list-raw", NULL)) {
         options.rsc_cmd = cmd_list_instances;
 
     } else if (pcmk__str_eq(option_name, "--list-ocf-providers",
                             pcmk__str_none)) {
         options.rsc_cmd = cmd_list_providers;
         pcmk__str_update(&options.agent_spec, optarg);
 
     } else if (pcmk__str_any_of(option_name, "-L", "--list", NULL)) {
         options.rsc_cmd = cmd_list_resources;
 
     } else if (pcmk__str_eq(option_name, "--list-standards", pcmk__str_none)) {
         options.rsc_cmd = cmd_list_standards;
 
     } else if (pcmk__str_any_of(option_name, "-W", "--locate", NULL)) {
         options.rsc_cmd = cmd_locate;
 
     } else if (pcmk__str_eq(option_name, "--show-metadata", pcmk__str_none)) {
         options.rsc_cmd = cmd_metadata;
         pcmk__str_update(&options.agent_spec, optarg);
 
     } else if (pcmk__str_any_of(option_name, "-M", "--move", NULL)) {
         options.rsc_cmd = cmd_move;
 
     } else if (pcmk__str_any_of(option_name, "-q", "--query-xml", NULL)) {
         options.rsc_cmd = cmd_query_xml;
 
     } else if (pcmk__str_any_of(option_name, "-w", "--query-xml-raw", NULL)) {
         options.rsc_cmd = cmd_query_xml_raw;
 
     } else if (pcmk__str_any_of(option_name, "-R", "--refresh", NULL)) {
         options.rsc_cmd = cmd_refresh;
 
     } else if (pcmk__str_eq(option_name, "--restart", pcmk__str_none)) {
         options.rsc_cmd = cmd_restart;
 
     } else if (pcmk__str_any_of(option_name, "-p", "--set-parameter", NULL)) {
         options.rsc_cmd = cmd_set_param;
         pcmk__str_update(&options.prop_name, optarg);
 
     } else if (pcmk__str_any_of(option_name, "-S", "--set-property", NULL)) {
         options.rsc_cmd = cmd_set_property;
         pcmk__str_update(&options.prop_name, optarg);
 
     } else if (pcmk__str_eq(option_name, "--wait", pcmk__str_none)) {
         options.rsc_cmd = cmd_wait;
 
     } else if (pcmk__str_any_of(option_name, "-Y", "--why", NULL)) {
         options.rsc_cmd = cmd_why;
     }
 
     return TRUE;
 }
 
 /* short option letters still available: eEJkKXyYZ */
 
 static GOptionEntry query_entries[] = {
     { "list", 'L', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
       "List all cluster resources with status",
       NULL },
     { "list-raw", 'l', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
       "List IDs of all instantiated resources (individual members\n"
       INDENT "rather than groups etc.)",
       NULL },
     { "list-cts", 'c', G_OPTION_FLAG_HIDDEN|G_OPTION_FLAG_NO_ARG,
           G_OPTION_ARG_CALLBACK, command_cb,
       NULL,
       NULL },
     { "list-operations", 'O', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
           command_cb,
       "List active resource operations, optionally filtered by\n"
       INDENT "--resource and/or --node",
       NULL },
     { "list-all-operations", 'o', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
           command_cb,
       "List all resource operations, optionally filtered by\n"
       INDENT "--resource and/or --node",
       NULL },
     { "list-standards", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
           command_cb,
       "List supported standards",
       NULL },
     { "list-ocf-providers", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
           command_cb,
       "List all available OCF providers",
       NULL },
     { "list-agents", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK,
           command_cb,
       "List all agents available for the named standard and/or provider",
       "STD:PROV" },
     { "list-ocf-alternatives", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK,
           command_cb,
       "List all available providers for the named OCF agent",
       "AGENT" },
     { "show-metadata", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, command_cb,
       "Show the metadata for the named class:provider:agent",
       "SPEC" },
     { "query-xml", 'q', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
       "Show XML configuration of resource (after any template expansion)",
       NULL },
     { "query-xml-raw", 'w', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
           command_cb,
       "Show XML configuration of resource (before any template expansion)",
       NULL },
     { "get-parameter", 'g', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK,
           command_cb,
       "Display named parameter for resource (use instance attribute\n"
       INDENT "unless --element, --meta, or --utilization is specified)",
       "PARAM" },
     { "get-property", 'G', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_CALLBACK,
           command_cb,
       "Display named property of resource ('class', 'type', or 'provider') "
       "(requires --resource)",
       "PROPERTY" },
     { "locate", 'W', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
       "Show node(s) currently running resource",
       NULL },
     { "constraints", 'a', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
           command_cb,
       "Display the location and colocation constraints that apply to a\n"
       INDENT "resource, and if --recursive is specified, to the resources\n"
       INDENT "directly or indirectly involved in those colocations.\n"
       INDENT "If the named resource is part of a group, or a clone or\n"
       INDENT "bundle instance, constraints for the collective resource\n"
       INDENT "will be shown unless --force is given.",
       NULL },
     { "stack", 'A', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
       "Equivalent to --constraints --recursive",
       NULL },
     { "why", 'Y', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
       "Show why resources are not running, optionally filtered by\n"
       INDENT "--resource and/or --node",
       NULL },
 
     { NULL }
 };
 
 static GOptionEntry command_entries[] = {
     { "validate", 0, G_OPTION_FLAG_OPTIONAL_ARG, G_OPTION_ARG_CALLBACK,
           command_cb,
       "Validate resource configuration by calling agent's validate-all\n"
       INDENT "action. The configuration may be specified either by giving an\n"
       INDENT "existing resource name with -r, or by specifying --class,\n"
       INDENT "--agent, and --provider arguments, along with any number of\n"
       INDENT "--option arguments. An optional LEVEL argument can be given\n"
       INDENT "to control the level of checking performed.",
       "LEVEL" },
     { "cleanup", 'C', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
       "If resource has any past failures, clear its history and fail\n"
       INDENT "count. Optionally filtered by --resource, --node, --operation\n"
       INDENT "and --interval (otherwise all). --operation and --interval\n"
       INDENT "apply to fail counts, but entire history is always clear, to\n"
       INDENT "allow current state to be rechecked. If the named resource is\n"
       INDENT "part of a group, or one numbered instance of a clone or bundled\n"
       INDENT "resource, the clean-up applies to the whole collective resource\n"
       INDENT "unless --force is given.",
       NULL },
     { "refresh", 'R', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
       "Delete resource's history (including failures) so its current state\n"
       INDENT "is rechecked. Optionally filtered by --resource and --node\n"
       INDENT "(otherwise all). If the named resource is part of a group, or one\n"
       INDENT "numbered instance of a clone or bundled resource, the refresh\n"
       INDENT "applies to the whole collective resource unless --force is given.",
       NULL },
     { "set-parameter", 'p', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK,
           command_cb,
       "Set named parameter for resource (requires -v). Use instance\n"
       INDENT "attribute unless --element, --meta, or --utilization is "
       "specified.",
       "PARAM" },
     { "delete-parameter", 'd', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK,
           command_cb,
       "Delete named parameter for resource. Use instance attribute\n"
       INDENT "unless --element, --meta or, --utilization is specified.",
       "PARAM" },
     { "set-property", 'S', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_CALLBACK,
           command_cb,
       "Set named property of resource ('class', 'type', or 'provider') "
       "(requires -r, -t, -v)",
       "PROPERTY" },
 
     { NULL }
 };
 
 static GOptionEntry location_entries[] = {
     { "move", 'M', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
       "Create a constraint to move resource. If --node is specified,\n"
       INDENT "the constraint will be to move to that node, otherwise it\n"
       INDENT "will be to ban the current node. Unless --force is specified\n"
       INDENT "this will return an error if the resource is already running\n"
       INDENT "on the specified node. If --force is specified, this will\n"
       INDENT "always ban the current node.\n"
       INDENT "Optional: --lifetime, --promoted. NOTE: This may prevent the\n"
       INDENT "resource from running on its previous location until the\n"
       INDENT "implicit constraint expires or is removed with --clear.",
       NULL },
     { "ban", 'B', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
       "Create a constraint to keep resource off a node.\n"
       INDENT "Optional: --node, --lifetime, --promoted.\n"
       INDENT "NOTE: This will prevent the resource from running on the\n"
       INDENT "affected node until the implicit constraint expires or is\n"
       INDENT "removed with --clear. If --node is not specified, it defaults\n"
       INDENT "to the node currently running the resource for primitives\n"
       INDENT "and groups, or the promoted instance of promotable clones with\n"
       INDENT PCMK_META_PROMOTED_MAX "=1 (all other situations result in an\n"
       INDENT "error as there is no sane default).",
       NULL },
     { "clear", 'U', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
       "Remove all constraints created by the --ban and/or --move\n"
       INDENT "commands. Requires: --resource. Optional: --node, --promoted,\n"
       INDENT "--expired. If --node is not specified, all constraints created\n"
       INDENT "by --ban and --move will be removed for the named resource. If\n"
       INDENT "--node and --force are specified, any constraint created by\n"
       INDENT "--move will be cleared, even if it is not for the specified\n"
       INDENT "node. If --expired is specified, only those constraints whose\n"
       INDENT "lifetimes have expired will be removed.",
       NULL },
     { "expired", 'e', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE,
           &options.clear_expired,
       "Modifies the --clear argument to remove constraints with\n"
       INDENT "expired lifetimes.",
       NULL },
     { "lifetime", 'u', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.move_lifetime,
       "Lifespan (as ISO 8601 duration) of created constraints (with\n"
       INDENT "-B, -M) see https://en.wikipedia.org/wiki/ISO_8601#Durations)",
       "TIMESPEC" },
     { "promoted", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE,
       &options.promoted_role_only,
       "Limit scope of command to promoted role (with -B, -M, -U). For\n"
       INDENT "-B and -M, previously promoted instances may remain\n"
       INDENT "active in the unpromoted role.",
       NULL },
 
     // Deprecated since 2.1.0
     { "master", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE,
       &options.promoted_role_only,
       "Deprecated: Use --promoted instead", NULL },
 
     { NULL }
 };
 
 static GOptionEntry advanced_entries[] = {
     { "delete", 'D', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
       "(Advanced) Delete a resource from the CIB. Required: -t",
       NULL },
     { "fail", 'F', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
       "(Advanced) Tell the cluster this resource has failed",
       NULL },
     { "restart", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
       "(Advanced) Tell the cluster to restart this resource and\n"
       INDENT "anything that depends on it",
       NULL },
     { "wait", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
       "(Advanced) Wait until the cluster settles into a stable state",
       NULL },
     { "digests", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
       "(Advanced) Show parameter hashes that Pacemaker uses to detect\n"
       INDENT "configuration changes (only accurate if there is resource\n"
       INDENT "history on the specified node). Required: --resource, --node.\n"
       INDENT "Optional: any NAME=VALUE parameters will be used to override\n"
       INDENT "the configuration (to see what the hash would be with those\n"
       INDENT "changes).",
       NULL },
     { "force-demote", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
           command_cb,
       "(Advanced) Bypass the cluster and demote a resource on the local\n"
       INDENT "node. Unless --force is specified, this will refuse to do so if\n"
       INDENT "the cluster believes the resource is a clone instance already\n"
       INDENT "running on the local node.",
       NULL },
     { "force-stop", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
       "(Advanced) Bypass the cluster and stop a resource on the local node",
       NULL },
     { "force-start", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
       "(Advanced) Bypass the cluster and start a resource on the local\n"
       INDENT "node. Unless --force is specified, this will refuse to do so if\n"
       INDENT "the cluster believes the resource is a clone instance already\n"
       INDENT "running on the local node.",
       NULL },
     { "force-promote", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
           command_cb,
       "(Advanced) Bypass the cluster and promote a resource on the local\n"
       INDENT "node. Unless --force is specified, this will refuse to do so if\n"
       INDENT "the cluster believes the resource is a clone instance already\n"
       INDENT "running on the local node.",
       NULL },
     { "force-check", 0, G_OPTION_FLAG_OPTIONAL_ARG, G_OPTION_ARG_CALLBACK,
           command_cb,
       "(Advanced) Bypass the cluster and check the state of a resource on\n"
       INDENT "the local node. An optional LEVEL argument can be given\n"
       INDENT "to control the level of checking performed.",
       "LEVEL" },
 
     { NULL }
 };
 
 static GOptionEntry addl_entries[] = {
     { "node", 'N', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.host_uname,
       "Node name",
       "NAME" },
     { "recursive", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.recursive,
       "Follow colocation chains when using --set-parameter or --constraints",
       NULL },
     { "resource-type", 't', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.rsc_type,
       "Resource XML element (primitive, group, etc.) (with -D)",
       "ELEMENT" },
     { "parameter-value", 'v', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.prop_value,
       "Value to use with -p",
       "PARAM" },
     { "meta", 'm', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, attr_set_type_cb,
       "Use resource meta-attribute instead of instance attribute\n"
       INDENT "(with -p, -g, -d)",
       NULL },
     { "utilization", 'z', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, attr_set_type_cb,
       "Use resource utilization attribute instead of instance attribute\n"
       INDENT "(with -p, -g, -d)",
       NULL },
     { "element", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, attr_set_type_cb,
       "Use resource element attribute instead of instance attribute\n"
       INDENT "(with -p, -g, -d)",
       NULL },
     { "operation", 'n', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.operation,
       "Operation to clear instead of all (with -C -r)",
       "OPERATION" },
     { "interval", 'I', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.interval_spec,
       "Interval of operation to clear (default 0) (with -C -r -n)",
       "N" },
     { "class", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, cmdline_config_cb,
       "The standard the resource agent conforms to (for example, ocf).\n"
       INDENT "Use with --agent, --provider, --option, and --validate.",
       "CLASS" },
     { "agent", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, cmdline_config_cb,
       "The agent to use (for example, IPaddr). Use with --class,\n"
       INDENT "--provider, --option, and --validate.",
       "AGENT" },
     { "provider", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK,
           cmdline_config_cb,
       "The vendor that supplies the resource agent (for example,\n"
       INDENT "heartbeat). Use with --class, --agent, --option, and --validate.",
       "PROVIDER" },
     { "option", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, option_cb,
       "Specify a device configuration parameter as NAME=VALUE (may be\n"
       INDENT "specified multiple times). Use with --validate and without the\n"
       INDENT "-r option.",
       "PARAM" },
     { "set-name", 's', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.prop_set,
       "(Advanced) XML ID of attributes element to use (with -p, -d)",
       "ID" },
     { "nvpair", 'i', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.prop_id,
       "(Advanced) XML ID of nvpair element to use (with -p, -d)",
       "ID" },
     { "timeout", 'T', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, timeout_cb,
       "(Advanced) Abort if command does not finish in this time (with\n"
       INDENT "--restart, --wait, --force-*)",
       "N" },
     { "force", 'f', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.force,
       "Force the action to be performed. See help for individual commands for\n"
       INDENT "additional behavior.",
       NULL },
     { "xml-file", 'x', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_FILENAME, &options.xml_file,
       NULL,
       "FILE" },
     { "host-uname", 'H', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_STRING, &options.host_uname,
       NULL,
       "HOST" },
 
     { NULL }
 };
 
 gboolean
 attr_set_type_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
     if (pcmk__str_any_of(option_name, "-m", "--meta", NULL)) {
         options.attr_set_type = PCMK_XE_META_ATTRIBUTES;
     } else if (pcmk__str_any_of(option_name, "-z", "--utilization", NULL)) {
         options.attr_set_type = PCMK_XE_UTILIZATION;
     } else if (pcmk__str_eq(option_name, "--element", pcmk__str_none)) {
         options.attr_set_type = ATTR_SET_ELEMENT;
     }
     return TRUE;
 }
 
 gboolean
 cmdline_config_cb(const gchar *option_name, const gchar *optarg, gpointer data,
                   GError **error)
 {
     options.cmdline_config = true;
 
     if (pcmk__str_eq(option_name, "--class", pcmk__str_none)) {
         pcmk__str_update(&options.v_class, optarg);
 
     } else if (pcmk__str_eq(option_name, "--provider", pcmk__str_none)) {
         pcmk__str_update(&options.v_provider, optarg);
 
     } else {    // --agent
         pcmk__str_update(&options.v_agent, optarg);
     }
     return TRUE;
 }
 
 gboolean
 option_cb(const gchar *option_name, const gchar *optarg, gpointer data,
           GError **error)
 {
     char *name = NULL;
     char *value = NULL;
 
     if (pcmk__scan_nvpair(optarg, &name, &value) != 2) {
         return FALSE;
     }
     if (options.cmdline_params == NULL) {
         options.cmdline_params = pcmk__strkey_table(free, free);
     }
     g_hash_table_replace(options.cmdline_params, name, value);
     return TRUE;
 }
 
 gboolean
 timeout_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
-    options.timeout_ms = crm_get_msec(optarg);
+    long long timeout_ms = crm_get_msec(optarg);
 
-    if (options.timeout_ms < 0) {
+    if (timeout_ms < 0) {
+        // @COMPAT When we can break backward compatibilty, return FALSE
         crm_warn("Ignoring invalid timeout '%s'", optarg);
-        options.timeout_ms = 0;
+        options.timeout_ms = 0U;
     } else {
-        options.timeout_ms = QB_MIN(options.timeout_ms, INT_MAX);
+        options.timeout_ms = (guint) QB_MIN(timeout_ms, UINT_MAX);
     }
     return TRUE;
 }
 
 static int
 ban_or_move(pcmk__output_t *out, pcmk_resource_t *rsc,
             const char *move_lifetime)
 {
     int rc = pcmk_rc_ok;
     pcmk_node_t *current = NULL;
     unsigned int nactive = 0;
 
     CRM_CHECK(rsc != NULL, return EINVAL);
 
     current = pe__find_active_requires(rsc, &nactive);
 
     if (nactive == 1) {
         rc = cli_resource_ban(out, options.rsc_id, current->details->uname, move_lifetime,
                               cib_conn, cib_sync_call,
                               options.promoted_role_only, PCMK__ROLE_PROMOTED);
 
     } else if (pcmk_is_set(rsc->flags, pcmk_rsc_promotable)) {
         int count = 0;
         GList *iter = NULL;
 
         current = NULL;
         for(iter = rsc->children; iter; iter = iter->next) {
             pcmk_resource_t *child = (pcmk_resource_t *)iter->data;
             enum rsc_role_e child_role = child->fns->state(child, TRUE);
 
             if (child_role == pcmk_role_promoted) {
                 count++;
                 current = pcmk__current_node(child);
             }
         }
 
         if(count == 1 && current) {
             rc = cli_resource_ban(out, options.rsc_id, current->details->uname, move_lifetime,
                                   cib_conn, cib_sync_call,
                                   options.promoted_role_only,
                                   PCMK__ROLE_PROMOTED);
 
         } else {
             rc = EINVAL;
             g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
                         _("Resource '%s' not moved: active in %d locations (promoted in %d).\n"
                         "To prevent '%s' from running on a specific location, "
                         "specify a node."
                         "To prevent '%s' from being promoted at a specific "
                         "location, specify a node and the --promoted option."),
                         options.rsc_id, nactive, count, options.rsc_id, options.rsc_id);
         }
 
     } else {
         rc = EINVAL;
         g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
                     _("Resource '%s' not moved: active in %d locations.\n"
                     "To prevent '%s' from running on a specific location, "
                     "specify a node."),
                     options.rsc_id, nactive, options.rsc_id);
     }
 
     return rc;
 }
 
 static void
 cleanup(pcmk__output_t *out, pcmk_resource_t *rsc, pcmk_node_t *node)
 {
     int rc = pcmk_rc_ok;
 
     if (options.force == FALSE) {
         rsc = uber_parent(rsc);
     }
 
     crm_debug("Erasing failures of %s (%s requested) on %s",
               rsc->id, options.rsc_id, (options.host_uname? options.host_uname: "all nodes"));
     rc = cli_resource_delete(controld_api, options.host_uname, rsc,
                              options.operation, options.interval_spec, TRUE,
                              scheduler, options.force);
 
     if ((rc == pcmk_rc_ok) && !out->is_quiet(out)) {
         // Show any reasons why resource might stay stopped
         cli_resource_check(out, rsc, node);
     }
 
     if (rc == pcmk_rc_ok) {
         start_mainloop(controld_api);
     }
 }
 
 static int
 clear_constraints(pcmk__output_t *out, xmlNodePtr *cib_xml_copy)
 {
     GList *before = NULL;
     GList *after = NULL;
     GList *remaining = NULL;
     GList *ele = NULL;
     pcmk_node_t *dest = NULL;
     int rc = pcmk_rc_ok;
 
     if (!out->is_quiet(out)) {
         before = build_constraint_list(scheduler->input);
     }
 
     if (options.clear_expired) {
         rc = cli_resource_clear_all_expired(scheduler->input, cib_conn,
                                             cib_sync_call, options.rsc_id,
                                             options.host_uname,
                                             options.promoted_role_only);
 
     } else if (options.host_uname) {
         dest = pe_find_node(scheduler->nodes, options.host_uname);
         if (dest == NULL) {
             rc = pcmk_rc_node_unknown;
             if (!out->is_quiet(out)) {
                 g_list_free(before);
             }
             return rc;
         }
         rc = cli_resource_clear(options.rsc_id, dest->details->uname, NULL,
                                 cib_conn, cib_sync_call, true, options.force);
 
     } else {
         rc = cli_resource_clear(options.rsc_id, NULL, scheduler->nodes,
                                 cib_conn, cib_sync_call, true, options.force);
     }
 
     if (!out->is_quiet(out)) {
         rc = cib_conn->cmds->query(cib_conn, NULL, cib_xml_copy, cib_scope_local | cib_sync_call);
         rc = pcmk_legacy2rc(rc);
 
         if (rc != pcmk_rc_ok) {
             g_set_error(&error, PCMK__RC_ERROR, rc,
                         _("Could not get modified CIB: %s\n"), pcmk_rc_str(rc));
             g_list_free(before);
             free_xml(*cib_xml_copy);
             *cib_xml_copy = NULL;
             return rc;
         }
 
         scheduler->input = *cib_xml_copy;
         cluster_status(scheduler);
 
         after = build_constraint_list(scheduler->input);
         remaining = pcmk__subtract_lists(before, after, (GCompareFunc) strcmp);
 
         for (ele = remaining; ele != NULL; ele = ele->next) {
             out->info(out, "Removing constraint: %s", (char *) ele->data);
         }
 
         g_list_free(before);
         g_list_free(after);
         g_list_free(remaining);
     }
 
     return rc;
 }
 
 static int
 initialize_scheduler_data(xmlNodePtr *cib_xml_copy)
 {
     int rc = pcmk_rc_ok;
 
     if (options.xml_file != NULL) {
         *cib_xml_copy = pcmk__xml_read(options.xml_file);
         if (*cib_xml_copy == NULL) {
             rc = pcmk_rc_cib_corrupt;
         }
     } else {
         rc = cib_conn->cmds->query(cib_conn, NULL, cib_xml_copy, cib_scope_local | cib_sync_call);
         rc = pcmk_legacy2rc(rc);
     }
 
     if (rc == pcmk_rc_ok) {
         scheduler = pe_new_working_set();
         if (scheduler == NULL) {
             rc = ENOMEM;
         } else {
             pcmk__set_scheduler_flags(scheduler,
                                       pcmk_sched_no_counts
                                       |pcmk_sched_no_compat);
             scheduler->priv = out;
             rc = update_scheduler_input(scheduler, cib_xml_copy);
         }
     }
 
     if (rc != pcmk_rc_ok) {
         free_xml(*cib_xml_copy);
         *cib_xml_copy = NULL;
         return rc;
     }
 
     cluster_status(scheduler);
     return pcmk_rc_ok;
 }
 
 static int
 refresh(pcmk__output_t *out)
 {
     int rc = pcmk_rc_ok;
     const char *router_node = options.host_uname;
     int attr_options = pcmk__node_attr_none;
 
     if (options.host_uname) {
         pcmk_node_t *node = pe_find_node(scheduler->nodes, options.host_uname);
 
         if (pcmk__is_pacemaker_remote_node(node)) {
             node = pcmk__current_node(node->details->remote_rsc);
             if (node == NULL) {
                 rc = ENXIO;
                 g_set_error(&error, PCMK__RC_ERROR, rc,
                             _("No cluster connection to Pacemaker Remote node %s detected"),
                             options.host_uname);
                 return rc;
             }
             router_node = node->details->uname;
             attr_options |= pcmk__node_attr_remote;
         }
     }
 
     if (controld_api == NULL) {
         out->info(out, "Dry run: skipping clean-up of %s due to CIB_file",
                   options.host_uname? options.host_uname : "all nodes");
         rc = pcmk_rc_ok;
         return rc;
     }
 
     crm_debug("Re-checking the state of all resources on %s", options.host_uname?options.host_uname:"all nodes");
 
     rc = pcmk__attrd_api_clear_failures(NULL, options.host_uname, NULL,
                                         NULL, NULL, NULL, attr_options);
 
     if (pcmk_controld_api_reprobe(controld_api, options.host_uname,
                                   router_node) == pcmk_rc_ok) {
         start_mainloop(controld_api);
     }
 
     return rc;
 }
 
 static void
 refresh_resource(pcmk__output_t *out, pcmk_resource_t *rsc, pcmk_node_t *node)
 {
     int rc = pcmk_rc_ok;
 
     if (options.force == FALSE) {
         rsc = uber_parent(rsc);
     }
 
     crm_debug("Re-checking the state of %s (%s requested) on %s",
               rsc->id, options.rsc_id, (options.host_uname? options.host_uname: "all nodes"));
     rc = cli_resource_delete(controld_api, options.host_uname, rsc, NULL, 0,
                              FALSE, scheduler, options.force);
 
     if ((rc == pcmk_rc_ok) && !out->is_quiet(out)) {
         // Show any reasons why resource might stay stopped
         cli_resource_check(out, rsc, node);
     }
 
     if (rc == pcmk_rc_ok) {
         start_mainloop(controld_api);
     }
 }
 
 static int
 set_property(void)
 {
     int rc = pcmk_rc_ok;
     xmlNode *msg_data = NULL;
 
     if (pcmk__str_empty(options.rsc_type)) {
         g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
                     _("Must specify -t with resource type"));
         rc = ENXIO;
         return rc;
 
     } else if (pcmk__str_empty(options.prop_value)) {
         g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
                     _("Must supply -v with new value"));
         rc = ENXIO;
         return rc;
     }
 
     CRM_LOG_ASSERT(options.prop_name != NULL);
 
     msg_data = pcmk__xe_create(NULL, options.rsc_type);
     crm_xml_add(msg_data, PCMK_XA_ID, options.rsc_id);
     crm_xml_add(msg_data, options.prop_name, options.prop_value);
 
     rc = cib_conn->cmds->modify(cib_conn, PCMK_XE_RESOURCES, msg_data,
                                 cib_sync_call);
     rc = pcmk_legacy2rc(rc);
     free_xml(msg_data);
 
     return rc;
 }
 
 static int
 show_metadata(pcmk__output_t *out, const char *agent_spec)
 {
     int rc = pcmk_rc_ok;
     char *standard = NULL;
     char *provider = NULL;
     char *type = NULL;
     char *metadata = NULL;
     lrmd_t *lrmd_conn = NULL;
 
     rc = lrmd__new(&lrmd_conn, NULL, NULL, 0);
     if (rc != pcmk_rc_ok) {
         g_set_error(&error, PCMK__RC_ERROR, rc,
                     _("Could not create executor connection"));
         lrmd_api_delete(lrmd_conn);
         return rc;
     }
 
     rc = crm_parse_agent_spec(agent_spec, &standard, &provider, &type);
     rc = pcmk_legacy2rc(rc);
 
     if (rc == pcmk_rc_ok) {
         rc = lrmd_conn->cmds->get_metadata(lrmd_conn, standard,
                                            provider, type,
                                            &metadata, 0);
         rc = pcmk_legacy2rc(rc);
 
         if (metadata) {
             out->output_xml(out, PCMK_XE_METADATA, metadata);
             free(metadata);
         } else {
             /* We were given a validly formatted spec, but it doesn't necessarily
              * match up with anything that exists.  Use ENXIO as the return code
              * here because that maps to an exit code of CRM_EX_NOSUCH, which
              * probably is the most common reason to get here.
              */
             rc = ENXIO;
             g_set_error(&error, PCMK__RC_ERROR, rc,
                         _("Metadata query for %s failed: %s"),
                         agent_spec, pcmk_rc_str(rc));
         }
     } else {
         rc = ENXIO;
         g_set_error(&error, PCMK__RC_ERROR, rc,
                     _("'%s' is not a valid agent specification"), agent_spec);
     }
 
     lrmd_api_delete(lrmd_conn);
     return rc;
 }
 
 static void
 validate_cmdline_config(void)
 {
     // Cannot use both --resource and command-line resource configuration
     if (options.rsc_id != NULL) {
         g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
                     _("--resource cannot be used with --class, --agent, and --provider"));
 
     // Not all commands support command-line resource configuration
     } else if (options.rsc_cmd != cmd_execute_agent) {
         g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
                     _("--class, --agent, and --provider can only be used with "
                     "--validate and --force-*"));
 
     // Not all of --class, --agent, and --provider need to be given.  Not all
     // classes support the concept of a provider.  Check that what we were given
     // is valid.
     } else if (pcmk__str_eq(options.v_class, "stonith", pcmk__str_none)) {
         if (options.v_provider != NULL) {
             g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
                         _("stonith does not support providers"));
 
         } else if (stonith_agent_exists(options.v_agent, 0) == FALSE) {
             g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
                         _("%s is not a known stonith agent"), options.v_agent ? options.v_agent : "");
         }
 
     } else if (resources_agent_exists(options.v_class, options.v_provider, options.v_agent) == FALSE) {
         g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
                     _("%s:%s:%s is not a known resource"),
                     options.v_class ? options.v_class : "",
                     options.v_provider ? options.v_provider : "",
                     options.v_agent ? options.v_agent : "");
     }
 
     if ((error == NULL) && (options.cmdline_params == NULL)) {
         options.cmdline_params = pcmk__strkey_table(free, free);
     }
 }
 
 /*!
  * \internal
  * \brief Get the <tt>enum pe_find</tt> flags for a given command
  *
  * \return <tt>enum pe_find</tt> flag group appropriate for \c options.rsc_cmd.
  */
 static uint32_t
 get_find_flags(void)
 {
     switch (options.rsc_cmd) {
         case cmd_ban:
         case cmd_cleanup:
         case cmd_clear:
         case cmd_colocations:
         case cmd_digests:
         case cmd_execute_agent:
         case cmd_locate:
         case cmd_move:
         case cmd_refresh:
         case cmd_restart:
         case cmd_why:
             return pcmk_rsc_match_history|pcmk_rsc_match_anon_basename;
 
         // @COMPAT See note in is_scheduler_required()
         case cmd_delete:
         case cmd_delete_param:
         case cmd_get_param:
         case cmd_get_property:
         case cmd_query_xml_raw:
         case cmd_query_xml:
         case cmd_set_param:
         case cmd_set_property:
             return pcmk_rsc_match_history|pcmk_rsc_match_basename;
 
         default:
             return 0;
     }
 }
 
 /*!
  * \internal
  * \brief Check whether a node argument is required
  *
  * \return \c true if a \c --node argument is required, or \c false otherwise
  */
 static bool
 is_node_required(void)
 {
     switch (options.rsc_cmd) {
         case cmd_digests:
         case cmd_fail:
             return true;
         default:
             return false;
     }
 }
 
 /*!
  * \internal
  * \brief Check whether a resource argument is required
  *
  * \return \c true if a \c --resource argument is required, or \c false
  *         otherwise
  */
 static bool
 is_resource_required(void)
 {
     if (options.cmdline_config) {
         return false;
     }
 
     switch (options.rsc_cmd) {
         case cmd_clear:
             return !options.clear_expired;
 
         case cmd_cleanup:
         case cmd_cts:
         case cmd_list_active_ops:
         case cmd_list_agents:
         case cmd_list_all_ops:
         case cmd_list_alternatives:
         case cmd_list_instances:
         case cmd_list_providers:
         case cmd_list_resources:
         case cmd_list_standards:
         case cmd_metadata:
         case cmd_refresh:
         case cmd_wait:
         case cmd_why:
             return false;
 
         default:
             return true;
     }
 }
 
 /*!
  * \internal
  * \brief Check whether a CIB connection is required
  *
  * \return \c true if a CIB connection is required, or \c false otherwise
  */
 static bool
 is_cib_required(void)
 {
     if (options.cmdline_config) {
         return false;
     }
 
     switch (options.rsc_cmd) {
         case cmd_list_agents:
         case cmd_list_alternatives:
         case cmd_list_providers:
         case cmd_list_standards:
         case cmd_metadata:
             return false;
         default:
             return true;
     }
 }
 
 /*!
  * \internal
  * \brief Check whether a controller IPC connection is required
  *
  * \return \c true if a controller connection is required, or \c false otherwise
  */
 static bool
 is_controller_required(void)
 {
     switch (options.rsc_cmd) {
         case cmd_cleanup:
         case cmd_refresh:
             return getenv("CIB_file") == NULL;
 
         case cmd_fail:
             return true;
 
         default:
             return false;
     }
 }
 
 /*!
  * \internal
  * \brief Check whether a scheduler IPC connection is required
  *
  * \return \c true if a scheduler connection is required, or \c false otherwise
  */
 static bool
 is_scheduler_required(void)
 {
     if (options.cmdline_config) {
         return false;
     }
 
     /* @COMPAT cmd_delete does not actually need the scheduler and should not
      * set find_flags. However, crm_resource --delete currently throws a
      * "resource not found" error if the resource doesn't exist. This is
      * incorrect behavior (deleting a nonexistent resource should be considered
      * success); however, we shouldn't change it until 3.0.0.
      */
     switch (options.rsc_cmd) {
         case cmd_list_agents:
         case cmd_list_alternatives:
         case cmd_list_providers:
         case cmd_list_standards:
         case cmd_metadata:
         case cmd_wait:
             return false;
         default:
             return true;
     }
 }
 
 /*!
  * \internal
  * \brief Check whether the chosen command accepts clone instances
  *
  * \return \c true if \p options.rsc_cmd accepts clone instances, or \c false
  *         otherwise
  */
 static bool
 accept_clone_instance(void)
 {
     // @COMPAT At 3.0.0, add cmd_delete; for now, don't throw error
     switch (options.rsc_cmd) {
         case cmd_ban:
         case cmd_clear:
         case cmd_move:
         case cmd_restart:
             return false;
         default:
             return true;
     }
 }
 
 static GOptionContext *
 build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) {
     GOptionContext *context = NULL;
 
     GOptionEntry extra_prog_entries[] = {
         { "quiet", 'Q', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &(args->quiet),
           "Be less descriptive in output.",
           NULL },
         { "resource", 'r', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.rsc_id,
           "Resource ID",
           "ID" },
         { G_OPTION_REMAINING, 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING_ARRAY, &options.remainder,
           NULL,
           NULL },
 
         { NULL }
     };
 
     const char *description = "Examples:\n\n"
                               "List the available OCF agents:\n\n"
                               "\t# crm_resource --list-agents ocf\n\n"
                               "List the available OCF agents from the linux-ha project:\n\n"
                               "\t# crm_resource --list-agents ocf:heartbeat\n\n"
                               "Move 'myResource' to a specific node:\n\n"
                               "\t# crm_resource --resource myResource --move --node altNode\n\n"
                               "Allow (but not force) 'myResource' to move back to its original "
                               "location:\n\n"
                               "\t# crm_resource --resource myResource --clear\n\n"
                               "Stop 'myResource' (and anything that depends on it):\n\n"
                               "\t# crm_resource --resource myResource --set-parameter "
                               PCMK_META_TARGET_ROLE "--meta --parameter-value Stopped\n\n"
                               "Tell the cluster not to manage 'myResource' (the cluster will not "
                               "attempt to start or stop the\n"
                               "resource under any circumstances; useful when performing maintenance "
                               "tasks on a resource):\n\n"
                               "\t# crm_resource --resource myResource --set-parameter "
                               PCMK_META_IS_MANAGED "--meta --parameter-value false\n\n"
                               "Erase the operation history of 'myResource' on 'aNode' (the cluster "
                               "will 'forget' the existing\n"
                               "resource state, including any errors, and attempt to recover the"
                               "resource; useful when a resource\n"
                               "had failed permanently and has been repaired by an administrator):\n\n"
                               "\t# crm_resource --resource myResource --cleanup --node aNode\n\n";
 
     context = pcmk__build_arg_context(args, "text (default), xml", group, NULL);
     g_option_context_set_description(context, description);
 
     /* Add the -Q option, which cannot be part of the globally supported options
      * because some tools use that flag for something else.
      */
     pcmk__add_main_args(context, extra_prog_entries);
 
     pcmk__add_arg_group(context, "queries", "Queries:",
                         "Show query help", query_entries);
     pcmk__add_arg_group(context, "commands", "Commands:",
                         "Show command help", command_entries);
     pcmk__add_arg_group(context, "locations", "Locations:",
                         "Show location help", location_entries);
     pcmk__add_arg_group(context, "advanced", "Advanced:",
                         "Show advanced option help", advanced_entries);
     pcmk__add_arg_group(context, "additional", "Additional Options:",
                         "Show additional options", addl_entries);
     return context;
 }
 
 int
 main(int argc, char **argv)
 {
     xmlNode *cib_xml_copy = NULL;
     pcmk_resource_t *rsc = NULL;
     pcmk_node_t *node = NULL;
     uint32_t find_flags = 0;
     int rc = pcmk_rc_ok;
 
     GOptionGroup *output_group = NULL;
     gchar **processed_args = NULL;
     GOptionContext *context = NULL;
 
     /*
      * Parse command line arguments
      */
 
     args = pcmk__new_common_args(SUMMARY);
     processed_args = pcmk__cmdline_preproc(argv, "GHINSTdginpstuvx");
     context = build_arg_context(args, &output_group);
 
     pcmk__register_formats(output_group, formats);
     if (!g_option_context_parse_strv(context, &processed_args, &error)) {
         exit_code = CRM_EX_USAGE;
         goto done;
     }
 
     pcmk__cli_init_logging("crm_resource", args->verbosity);
 
     rc = pcmk__output_new(&out, args->output_ty, args->output_dest, argv);
     if (rc != pcmk_rc_ok) {
         exit_code = CRM_EX_ERROR;
         g_set_error(&error, PCMK__EXITC_ERROR, exit_code, _("Error creating output format %s: %s"),
                     args->output_ty, pcmk_rc_str(rc));
         goto done;
     }
 
     pe__register_messages(out);
     crm_resource_register_messages(out);
     lrmd__register_messages(out);
     pcmk__register_lib_messages(out);
 
     out->quiet = args->quiet;
 
     crm_log_args(argc, argv);
 
     /*
      * Validate option combinations
      */
 
     // --expired without --clear/-U doesn't make sense
     if (options.clear_expired && (options.rsc_cmd != cmd_clear)) {
         exit_code = CRM_EX_USAGE;
         g_set_error(&error, PCMK__EXITC_ERROR, exit_code, _("--expired requires --clear or -U"));
         goto done;
     }
 
     if ((options.remainder != NULL) && (options.override_params != NULL)) {
         // Commands that use positional arguments will create override_params
         for (gchar **s = options.remainder; *s; s++) {
             char *name = pcmk__assert_alloc(1, strlen(*s));
             char *value = pcmk__assert_alloc(1, strlen(*s));
             int rc = sscanf(*s, "%[^=]=%s", name, value);
 
             if (rc == 2) {
                 g_hash_table_replace(options.override_params, name, value);
 
             } else {
                 exit_code = CRM_EX_USAGE;
                 g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
                             _("Error parsing '%s' as a name=value pair"),
                             argv[optind]);
                 free(value);
                 free(name);
                 goto done;
             }
         }
 
     } else if (options.remainder != NULL) {
         gchar **strv = NULL;
         gchar *msg = NULL;
         int i = 1;
         int len = 0;
 
         for (gchar **s = options.remainder; *s; s++) {
             len++;
         }
 
         CRM_ASSERT(len > 0);
 
         /* Add 1 for the strv[0] string below, and add another 1 for the NULL
          * at the end of the array so g_strjoinv knows when to stop.
          */
         strv = pcmk__assert_alloc(len+2, sizeof(char *));
         strv[0] = strdup("non-option ARGV-elements:\n");
 
         for (gchar **s = options.remainder; *s; s++) {
             strv[i] = crm_strdup_printf("[%d of %d] %s\n", i, len, *s);
             i++;
         }
 
         strv[i] = NULL;
 
         exit_code = CRM_EX_USAGE;
         msg = g_strjoinv("", strv);
         g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "%s", msg);
         g_free(msg);
 
         /* Don't try to free the last element, which is just NULL. */
         for(i = 0; i < len+1; i++) {
             free(strv[i]);
         }
         free(strv);
 
         goto done;
     }
 
     if (pcmk__str_eq(args->output_ty, "xml", pcmk__str_none)) {
         switch (options.rsc_cmd) {
             /* These are the only commands that have historically used the <list>
              * elements in their XML schema.  For all others, use the simple list
              * argument.
              */
             case cmd_get_param:
             case cmd_get_property:
             case cmd_list_instances:
             case cmd_list_standards:
                 pcmk__output_enable_list_element(out);
                 break;
 
             default:
                 break;
         }
 
     } else if (pcmk__str_eq(args->output_ty, "text", pcmk__str_null_matches)) {
         switch (options.rsc_cmd) {
             case cmd_colocations:
             case cmd_list_resources:
                 pcmk__output_text_set_fancy(out, true);
                 break;
             default:
                 break;
         }
     }
 
     if (args->version) {
         out->version(out, false);
         goto done;
     }
 
     if (options.cmdline_config) {
         /* A resource configuration was given on the command line. Sanity-check
          * the values and set error if they don't make sense.
          */
         validate_cmdline_config();
         if (error != NULL) {
             exit_code = CRM_EX_USAGE;
             goto done;
         }
 
     } else if (options.cmdline_params != NULL) {
         // @COMPAT @TODO error out here when we can break backward compatibility
         g_hash_table_destroy(options.cmdline_params);
         options.cmdline_params = NULL;
     }
 
     if (is_resource_required() && (options.rsc_id == NULL)) {
         exit_code = CRM_EX_USAGE;
         g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
                     _("Must supply a resource id with -r"));
         goto done;
     }
     if (is_node_required() && (options.host_uname == NULL)) {
         exit_code = CRM_EX_USAGE;
         g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
                     _("Must supply a node name with -N"));
         goto done;
     }
 
     /*
      * Set up necessary connections
      */
 
     // Establish a connection to the CIB if needed
     if (is_cib_required()) {
         cib_conn = cib_new();
         if ((cib_conn == NULL) || (cib_conn->cmds == NULL)) {
             exit_code = CRM_EX_DISCONNECT;
             g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
                         _("Could not create CIB connection"));
             goto done;
         }
         rc = cib_conn->cmds->signon(cib_conn, crm_system_name, cib_command);
         rc = pcmk_legacy2rc(rc);
         if (rc != pcmk_rc_ok) {
             exit_code = pcmk_rc2exitc(rc);
             g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
                         _("Could not connect to the CIB: %s"), pcmk_rc_str(rc));
             goto done;
         }
     }
 
     // Populate scheduler data from XML file if specified or CIB query otherwise
     if (is_scheduler_required()) {
         rc = initialize_scheduler_data(&cib_xml_copy);
         if (rc != pcmk_rc_ok) {
             exit_code = pcmk_rc2exitc(rc);
             goto done;
         }
     }
 
     find_flags = get_find_flags();
 
     // If command requires that resource exist if specified, find it
     if ((find_flags != 0) && (options.rsc_id != NULL)) {
         rsc = pe_find_resource_with_flags(scheduler->resources, options.rsc_id,
                                           find_flags);
         if (rsc == NULL) {
             exit_code = CRM_EX_NOSUCH;
             g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
                         _("Resource '%s' not found"), options.rsc_id);
             goto done;
         }
 
         /* The --ban, --clear, --move, and --restart commands do not work with
          * instances of clone resourcs.
          */
         if (pcmk__is_clone(rsc->parent) && (strchr(options.rsc_id, ':') != NULL)
             && !accept_clone_instance()) {
 
             exit_code = CRM_EX_INVALID_PARAM;
             g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
                         _("Cannot operate on clone resource instance '%s'"), options.rsc_id);
             goto done;
         }
     }
 
     // If user supplied a node name, check whether it exists
     if ((options.host_uname != NULL) && (scheduler != NULL)) {
         node = pe_find_node(scheduler->nodes, options.host_uname);
 
         if (node == NULL) {
             exit_code = CRM_EX_NOSUCH;
             g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
                         _("Node '%s' not found"), options.host_uname);
             goto done;
         }
     }
 
     // Establish a connection to the controller if needed
     if (is_controller_required()) {
         rc = pcmk_new_ipc_api(&controld_api, pcmk_ipc_controld);
         if (rc != pcmk_rc_ok) {
             exit_code = pcmk_rc2exitc(rc);
             g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
                         _("Error connecting to the controller: %s"), pcmk_rc_str(rc));
             goto done;
         }
         pcmk_register_ipc_callback(controld_api, controller_event_callback,
                                    NULL);
         rc = pcmk__connect_ipc(controld_api, pcmk_ipc_dispatch_main, 5);
         if (rc != pcmk_rc_ok) {
             exit_code = pcmk_rc2exitc(rc);
             g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
                         _("Error connecting to %s: %s"),
                         pcmk_ipc_name(controld_api, true), pcmk_rc_str(rc));
             goto done;
         }
     }
 
     /*
      * Handle requested command
      */
 
     switch (options.rsc_cmd) {
         case cmd_list_resources: {
             GList *all = NULL;
             uint32_t show_opts = pcmk_show_inactive_rscs | pcmk_show_rsc_only | pcmk_show_pending;
 
             all = g_list_prepend(all, (gpointer) "*");
             rc = out->message(out, "resource-list", scheduler,
                               show_opts, true, all, all, false);
             g_list_free(all);
 
             if (rc == pcmk_rc_no_output) {
                 rc = ENXIO;
             }
             break;
         }
 
         case cmd_list_instances:
             rc = out->message(out, "resource-names-list", scheduler->resources);
 
             if (rc != pcmk_rc_ok) {
                 rc = ENXIO;
             }
 
             break;
 
         case cmd_list_alternatives:
             rc = pcmk__list_alternatives(out, options.agent_spec);
             break;
 
         case cmd_list_agents:
             rc = pcmk__list_agents(out, options.agent_spec);
             break;
 
         case cmd_list_standards:
             rc = pcmk__list_standards(out);
             break;
 
         case cmd_list_providers:
             rc = pcmk__list_providers(out, options.agent_spec);
             break;
 
         case cmd_metadata:
             rc = show_metadata(out, options.agent_spec);
             break;
 
         case cmd_restart:
             /* We don't pass scheduler because rsc needs to stay valid for the
              * entire lifetime of cli_resource_restart(), but it will reset and
              * update the scheduler data multiple times, so it needs to use its
              * own copy.
              */
             rc = cli_resource_restart(out, rsc, node, options.move_lifetime,
                                       options.timeout_ms, cib_conn,
                                       cib_sync_call, options.promoted_role_only,
                                       options.force);
             break;
 
         case cmd_wait:
             rc = wait_till_stable(out, options.timeout_ms, cib_conn);
             break;
 
         case cmd_execute_agent:
             if (options.cmdline_config) {
                 exit_code = cli_resource_execute_from_params(out, NULL,
                     options.v_class, options.v_provider, options.v_agent,
                     options.operation, options.cmdline_params,
                     options.override_params, options.timeout_ms,
                     args->verbosity, options.force, options.check_level);
             } else {
                 exit_code = cli_resource_execute(rsc, options.rsc_id,
                     options.operation, options.override_params,
                     options.timeout_ms, cib_conn, scheduler,
                     args->verbosity, options.force, options.check_level);
             }
             goto done;
 
         case cmd_digests:
             node = pe_find_node(scheduler->nodes, options.host_uname);
             if (node == NULL) {
                 rc = pcmk_rc_node_unknown;
             } else {
                 rc = pcmk__resource_digests(out, rsc, node,
                                             options.override_params);
             }
             break;
 
         case cmd_colocations:
             rc = out->message(out, "locations-and-colocations", rsc,
                               options.recursive, (bool) options.force);
             break;
 
         case cmd_cts:
             rc = pcmk_rc_ok;
             g_list_foreach(scheduler->resources, (GFunc) cli_resource_print_cts,
                            out);
             cli_resource_print_cts_constraints(scheduler);
             break;
 
         case cmd_fail:
             rc = cli_resource_fail(controld_api, options.host_uname,
                                    options.rsc_id, scheduler);
             if (rc == pcmk_rc_ok) {
                 start_mainloop(controld_api);
             }
             break;
 
         case cmd_list_active_ops:
             rc = cli_resource_print_operations(options.rsc_id,
                                                options.host_uname, TRUE,
                                                scheduler);
             break;
 
         case cmd_list_all_ops:
             rc = cli_resource_print_operations(options.rsc_id,
                                                options.host_uname, FALSE,
                                                scheduler);
             break;
 
         case cmd_locate: {
             GList *nodes = cli_resource_search(rsc, options.rsc_id, scheduler);
             rc = out->message(out, "resource-search-list", nodes, options.rsc_id);
             g_list_free_full(nodes, free);
             break;
         }
 
         case cmd_query_xml:
             rc = cli_resource_print(rsc, scheduler, true);
             break;
 
         case cmd_query_xml_raw:
             rc = cli_resource_print(rsc, scheduler, false);
             break;
 
         case cmd_why:
             if ((options.host_uname != NULL) && (node == NULL)) {
                 rc = pcmk_rc_node_unknown;
             } else {
                 rc = out->message(out, "resource-reasons-list",
                                   scheduler->resources, rsc, node);
             }
             break;
 
         case cmd_clear:
             rc = clear_constraints(out, &cib_xml_copy);
             break;
 
         case cmd_move:
             if (options.host_uname == NULL) {
                 rc = ban_or_move(out, rsc, options.move_lifetime);
             } else {
                 rc = cli_resource_move(rsc, options.rsc_id, options.host_uname,
                                        options.move_lifetime, cib_conn,
                                        cib_sync_call, scheduler,
                                        options.promoted_role_only,
                                        options.force);
             }
 
             if (rc == EINVAL) {
                 exit_code = CRM_EX_USAGE;
                 goto done;
             }
 
             break;
 
         case cmd_ban:
             if (options.host_uname == NULL) {
                 rc = ban_or_move(out, rsc, options.move_lifetime);
             } else if (node == NULL) {
                 rc = pcmk_rc_node_unknown;
             } else {
                 rc = cli_resource_ban(out, options.rsc_id, node->details->uname,
                                       options.move_lifetime, cib_conn,
                                       cib_sync_call, options.promoted_role_only,
                                       PCMK__ROLE_PROMOTED);
             }
 
             if (rc == EINVAL) {
                 exit_code = CRM_EX_USAGE;
                 goto done;
             }
 
             break;
 
         case cmd_get_property:
             rc = out->message(out, "property-list", rsc, options.prop_name);
             if (rc == pcmk_rc_no_output) {
                 rc = ENXIO;
             }
 
             break;
 
         case cmd_set_property:
             rc = set_property();
             break;
 
         case cmd_get_param: {
             unsigned int count = 0;
             GHashTable *params = NULL;
             pcmk_node_t *current = rsc->fns->active_node(rsc, &count, NULL);
             bool free_params = true;
             const char* value = NULL;
 
             if (count > 1) {
                 out->err(out, "%s is active on more than one node,"
                          " returning the default value for %s", rsc->id,
                          pcmk__s(options.prop_name, "unspecified property"));
                 current = NULL;
             }
 
             crm_debug("Looking up %s in %s", options.prop_name, rsc->id);
 
             if (pcmk__str_eq(options.attr_set_type, PCMK_XE_INSTANCE_ATTRIBUTES,
                              pcmk__str_none)) {
                 params = pe_rsc_params(rsc, current, scheduler);
                 free_params = false;
 
                 value = g_hash_table_lookup(params, options.prop_name);
 
             } else if (pcmk__str_eq(options.attr_set_type,
                                     PCMK_XE_META_ATTRIBUTES, pcmk__str_none)) {
                 params = pcmk__strkey_table(free, free);
                 get_meta_attributes(params, rsc, NULL, scheduler);
 
                 value = g_hash_table_lookup(params, options.prop_name);
 
             } else if (pcmk__str_eq(options.attr_set_type, ATTR_SET_ELEMENT, pcmk__str_none)) {
 
                 value = crm_element_value(rsc->xml, options.prop_name);
                 free_params = false;
 
             } else {
                 params = pcmk__strkey_table(free, free);
                 pe__unpack_dataset_nvpairs(rsc->xml, PCMK_XE_UTILIZATION, NULL,
                                            params, NULL, FALSE, scheduler);
 
                 value = g_hash_table_lookup(params, options.prop_name);
             }
 
             rc = out->message(out, "attribute-list", rsc, options.prop_name, value);
             if (free_params) {
                 g_hash_table_destroy(params);
             }
 
             break;
         }
 
         case cmd_set_param:
             if (pcmk__str_empty(options.prop_value)) {
                 exit_code = CRM_EX_USAGE;
                 g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
                             _("You need to supply a value with the -v option"));
                 goto done;
             }
 
             /* coverity[var_deref_model] False positive */
             rc = cli_resource_update_attribute(rsc, options.rsc_id,
                                                options.prop_set,
                                                options.attr_set_type,
                                                options.prop_id,
                                                options.prop_name,
                                                options.prop_value,
                                                options.recursive, cib_conn,
                                                cib_sync_call, options.force);
             break;
 
         case cmd_delete_param:
             /* coverity[var_deref_model] False positive */
             rc = cli_resource_delete_attribute(rsc, options.rsc_id,
                                                options.prop_set,
                                                options.attr_set_type,
                                                options.prop_id,
                                                options.prop_name, cib_conn,
                                                cib_sync_call, options.force);
             break;
 
         case cmd_cleanup:
             if (rsc == NULL) {
                 rc = cli_cleanup_all(controld_api, options.host_uname,
                                      options.operation, options.interval_spec,
                                      scheduler);
                 if (rc == pcmk_rc_ok) {
                     start_mainloop(controld_api);
                 }
             } else {
                 cleanup(out, rsc, node);
             }
             break;
 
         case cmd_refresh:
             if (rsc == NULL) {
                 rc = refresh(out);
             } else {
                 refresh_resource(out, rsc, node);
             }
             break;
 
         case cmd_delete:
             /* rsc_id was already checked for NULL much earlier when validating
              * command line arguments.
              */
             if (options.rsc_type == NULL) {
                 // @COMPAT @TODO change this to exit_code = CRM_EX_USAGE
                 rc = ENXIO;
                 g_set_error(&error, PCMK__RC_ERROR, rc,
                             _("You need to specify a resource type with -t"));
             } else {
                 rc = pcmk__resource_delete(cib_conn, cib_sync_call,
                                            options.rsc_id, options.rsc_type);
 
                 if (rc != pcmk_rc_ok) {
                     g_set_error(&error, PCMK__RC_ERROR, rc,
                                 _("Could not delete resource %s: %s"),
                                 options.rsc_id, pcmk_rc_str(rc));
                 }
             }
 
             break;
 
         default:
             exit_code = CRM_EX_USAGE;
             g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
                         _("Unimplemented command: %d"), (int) options.rsc_cmd);
             goto done;
     }
 
     /* Convert rc into an exit code. */
     if (rc != pcmk_rc_ok && rc != pcmk_rc_no_output) {
         exit_code = pcmk_rc2exitc(rc);
     }
 
     /*
      * Clean up and exit
      */
 
 done:
     /* When we get here, exit_code has been set one of two ways - either at one of
      * the spots where there's a "goto done" (which itself could have happened either
      * directly or by calling pcmk_rc2exitc), or just up above after any of the break
      * statements.
      *
      * Thus, we can use just exit_code here to decide what to do.
      */
     if (exit_code != CRM_EX_OK && exit_code != CRM_EX_USAGE) {
         if (error != NULL) {
             char *msg = crm_strdup_printf("%s\nError performing operation: %s",
                                           error->message, crm_exit_str(exit_code));
             g_clear_error(&error);
             g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "%s", msg);
             free(msg);
         } else {
             g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
                         _("Error performing operation: %s"), crm_exit_str(exit_code));
         }
     }
 
     g_free(options.host_uname);
     g_free(options.interval_spec);
     g_free(options.move_lifetime);
     g_free(options.operation);
     g_free(options.prop_id);
     free(options.prop_name);
     g_free(options.prop_set);
     g_free(options.prop_value);
     g_free(options.rsc_id);
     g_free(options.rsc_type);
     free(options.agent_spec);
     free(options.v_agent);
     free(options.v_class);
     free(options.v_provider);
     g_free(options.xml_file);
     g_strfreev(options.remainder);
 
     if (options.override_params != NULL) {
         g_hash_table_destroy(options.override_params);
     }
 
     /* options.cmdline_params does not need to be destroyed here.  See the
      * comments in cli_resource_execute_from_params.
      */
 
     g_strfreev(processed_args);
     g_option_context_free(context);
 
     return bye(exit_code);
 }
diff --git a/tools/crm_resource.h b/tools/crm_resource.h
index 2ca02a216c..b6573629de 100644
--- a/tools/crm_resource.h
+++ b/tools/crm_resource.h
@@ -1,127 +1,128 @@
 /*
  * Copyright 2004-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <stdint.h>
 #include <stdbool.h>
 
 #include <crm/crm.h>
 
 #include <crm/services.h>
 #include <crm/common/xml.h>
 #include <crm/common/mainloop.h>
 #include <crm/common/output_internal.h>
 #include <crm/common/scheduler_internal.h>
 
 #include <crm/cib.h>
 #include <crm/common/attrs_internal.h>
 #include <crm/pengine/rules.h>
 #include <crm/pengine/status.h>
 #include <crm/pengine/internal.h>
 #include <pacemaker-internal.h>
 
 #define ATTR_SET_ELEMENT "attr_set_element"
 
 typedef struct node_info_s {
     const char *node_name;
     bool promoted;
 } node_info_t;
 
 enum resource_check_flags {
     rsc_remain_stopped  = (1 << 0),
     rsc_unpromotable    = (1 << 1),
     rsc_unmanaged       = (1 << 2),
     rsc_locked          = (1 << 3),
     rsc_node_health     = (1 << 4),
 };
 
 typedef struct resource_checks_s {
     pcmk_resource_t *rsc;   // Resource being checked
     uint32_t flags;         // Group of enum resource_check_flags
     const char *lock_node;  // Node that resource is shutdown-locked to, if any
 } resource_checks_t;
 
 resource_checks_t *cli_check_resource(pcmk_resource_t *rsc, char *role_s,
                                       char *managed);
 
 /* ban */
 int cli_resource_prefer(pcmk__output_t *out, const char *rsc_id, const char *host,
                         const char *move_lifetime, cib_t * cib_conn, int cib_options,
                         gboolean promoted_role_only, const char *promoted_role);
 int cli_resource_ban(pcmk__output_t *out, const char *rsc_id, const char *host,
                      const char *move_lifetime, cib_t *cib_conn, int cib_options,
                      gboolean promoted_role_only, const char *promoted_role);
 int cli_resource_clear(const char *rsc_id, const char *host, GList *allnodes,
                        cib_t * cib_conn, int cib_options, bool clear_ban_constraints, gboolean force);
 int cli_resource_clear_all_expired(xmlNode *root, cib_t *cib_conn, int cib_options,
                                    const char *rsc, const char *node, gboolean promoted_role_only);
 
 /* print */
 void cli_resource_print_cts(pcmk_resource_t *rsc, pcmk__output_t *out);
 void cli_resource_print_cts_constraints(pcmk_scheduler_t *scheduler);
 
 int cli_resource_print(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler,
                        bool expanded);
 int cli_resource_print_operations(const char *rsc_id, const char *host_uname,
                                   bool active, pcmk_scheduler_t *scheduler);
 
 /* runtime */
 int cli_resource_check(pcmk__output_t *out, pcmk_resource_t *rsc,
                        pcmk_node_t *node);
 int cli_resource_fail(pcmk_ipc_api_t *controld_api, const char *host_uname,
                       const char *rsc_id, pcmk_scheduler_t *scheduler);
 GList *cli_resource_search(pcmk_resource_t *rsc, const char *requested_name,
                              pcmk_scheduler_t *scheduler);
 int cli_resource_delete(pcmk_ipc_api_t *controld_api, const char *host_uname,
                         const pcmk_resource_t *rsc, const char *operation,
                         const char *interval_spec, bool just_failures,
                         pcmk_scheduler_t *scheduler, gboolean force);
 int cli_cleanup_all(pcmk_ipc_api_t *controld_api, const char *node_name,
                     const char *operation, const char *interval_spec,
                     pcmk_scheduler_t *scheduler);
 int cli_resource_restart(pcmk__output_t *out, pcmk_resource_t *rsc,
                          const pcmk_node_t *node, const char *move_lifetime,
-                         int timeout_ms, cib_t *cib, int cib_options,
+                         guint timeout_ms, cib_t *cib, int cib_options,
                          gboolean promoted_role_only, gboolean force);
 int cli_resource_move(const pcmk_resource_t *rsc, const char *rsc_id,
                       const char *host_name, const char *move_lifetime,
                       cib_t *cib, int cib_options, pcmk_scheduler_t *scheduler,
                       gboolean promoted_role_only, gboolean force);
 crm_exit_t cli_resource_execute_from_params(pcmk__output_t *out, const char *rsc_name,
                                             const char *rsc_class, const char *rsc_prov,
                                             const char *rsc_type, const char *rsc_action,
                                             GHashTable *params, GHashTable *override_hash,
-                                            int timeout_ms, int resource_verbose,
+                                            guint timeout_ms,
+                                            int resource_verbose,
                                             gboolean force, int check_level);
 crm_exit_t cli_resource_execute(pcmk_resource_t *rsc,
                                 const char *requested_name,
                                 const char *rsc_action, GHashTable *override_hash,
-                                int timeout_ms, cib_t *cib,
+                                guint timeout_ms, cib_t *cib,
                                 pcmk_scheduler_t *scheduler,
                                 int resource_verbose, gboolean force, int check_level);
 
 int cli_resource_update_attribute(pcmk_resource_t *rsc,
                                   const char *requested_name,
                                   const char *attr_set, const char *attr_set_type,
                                   const char *attr_id, const char *attr_name,
                                   const char *attr_value, gboolean recursive,
                                   cib_t *cib, int cib_options, gboolean force);
 int cli_resource_delete_attribute(pcmk_resource_t *rsc,
                                   const char *requested_name,
                                   const char *attr_set, const char *attr_set_type,
                                   const char *attr_id, const char *attr_name,
                                   cib_t *cib, int cib_options, gboolean force);
 
 int update_scheduler_input(pcmk_scheduler_t *scheduler, xmlNode **xml);
-int wait_till_stable(pcmk__output_t *out, int timeout_ms, cib_t * cib);
+int wait_till_stable(pcmk__output_t *out, guint timeout_ms, cib_t * cib);
 
 bool resource_is_running_on(pcmk_resource_t *rsc, const char *host);
 
 void crm_resource_register_messages(pcmk__output_t *out);
diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c
index 36e5792636..c4a0a0a751 100644
--- a/tools/crm_resource_runtime.c
+++ b/tools/crm_resource_runtime.c
@@ -1,2249 +1,2282 @@
 /*
  * Copyright 2004-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
-#include <crm_resource.h>
+#include <stdio.h>
+#include <limits.h>
+#include <glib.h>
+#include <libxml/tree.h>
+
 #include <crm/common/ipc_attrd_internal.h>
 #include <crm/common/ipc_controld.h>
 #include <crm/common/lists_internal.h>
 #include <crm/services_internal.h>
 
+#include <crm_resource.h>
+
 static GList *
 build_node_info_list(const pcmk_resource_t *rsc)
 {
     GList *retval = NULL;
 
     for (const GList *iter = rsc->children; iter != NULL; iter = iter->next) {
         const pcmk_resource_t *child = (const pcmk_resource_t *) iter->data;
 
         for (const GList *iter2 = child->running_on;
              iter2 != NULL; iter2 = iter2->next) {
 
             const pcmk_node_t *node = (const pcmk_node_t *) iter2->data;
             node_info_t *ni = pcmk__assert_alloc(1, sizeof(node_info_t));
 
             ni->node_name = node->details->uname;
             ni->promoted = pcmk_is_set(rsc->flags, pcmk_rsc_promotable) &&
                            child->fns->state(child, TRUE) == pcmk_role_promoted;
 
             retval = g_list_prepend(retval, ni);
         }
     }
 
     return retval;
 }
 
 GList *
 cli_resource_search(pcmk_resource_t *rsc, const char *requested_name,
                     pcmk_scheduler_t *scheduler)
 {
     GList *retval = NULL;
     const pcmk_resource_t *parent = pe__const_top_resource(rsc, false);
 
     if (pcmk__is_clone(rsc)) {
         retval = build_node_info_list(rsc);
 
     /* The anonymous clone children's common ID is supplied */
     } else if (pcmk__is_clone(parent)
                && !pcmk_is_set(rsc->flags, pcmk_rsc_unique)
                && rsc->clone_name
                && pcmk__str_eq(requested_name, rsc->clone_name, pcmk__str_casei)
                && !pcmk__str_eq(requested_name, rsc->id, pcmk__str_casei)) {
 
         retval = build_node_info_list(parent);
 
     } else if (rsc->running_on != NULL) {
         for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) {
             pcmk_node_t *node = (pcmk_node_t *) iter->data;
             node_info_t *ni = pcmk__assert_alloc(1, sizeof(node_info_t));
 
             ni->node_name = node->details->uname;
             ni->promoted = (rsc->fns->state(rsc, TRUE) == pcmk_role_promoted);
 
             retval = g_list_prepend(retval, ni);
         }
     }
 
     return retval;
 }
 
 // \return Standard Pacemaker return code
 static int
 find_resource_attr(pcmk__output_t *out, cib_t * the_cib, const char *attr,
                    const char *rsc, const char *attr_set_type, const char *set_name,
                    const char *attr_id, const char *attr_name, char **value)
 {
     int rc = pcmk_rc_ok;
     xmlNode *xml_search = NULL;
     GString *xpath = NULL;
     const char *xpath_base = NULL;
 
     if(value) {
         *value = NULL;
     }
 
     if(the_cib == NULL) {
         return ENOTCONN;
     }
 
     xpath_base = pcmk_cib_xpath_for(PCMK_XE_RESOURCES);
     if (xpath_base == NULL) {
         crm_err(PCMK_XE_RESOURCES " CIB element not known (bug?)");
         return ENOMSG;
     }
 
     xpath = g_string_sized_new(1024);
     pcmk__g_strcat(xpath,
                    xpath_base, "//*[@" PCMK_XA_ID "=\"", rsc, "\"]", NULL);
 
     if (attr_set_type != NULL) {
         pcmk__g_strcat(xpath, "/", attr_set_type, NULL);
         if (set_name != NULL) {
             pcmk__g_strcat(xpath, "[@" PCMK_XA_ID "=\"", set_name, "\"]",
                            NULL);
         }
     }
 
     g_string_append(xpath, "//" PCMK_XE_NVPAIR);
 
     if (attr_id != NULL && attr_name!= NULL) {
         pcmk__g_strcat(xpath,
                        "[@" PCMK_XA_ID "='", attr_id, "' "
                        "and @" PCMK_XA_NAME "='", attr_name, "']", NULL);
 
     } else if (attr_id != NULL) {
         pcmk__g_strcat(xpath, "[@" PCMK_XA_ID "='", attr_id, "']", NULL);
 
     } else if (attr_name != NULL) {
         pcmk__g_strcat(xpath, "[@" PCMK_XA_NAME "='", attr_name, "']", NULL);
     }
 
     rc = the_cib->cmds->query(the_cib, (const char *) xpath->str, &xml_search,
                               cib_sync_call | cib_scope_local | cib_xpath);
     rc = pcmk_legacy2rc(rc);
 
     if (rc != pcmk_rc_ok) {
         goto done;
     }
 
     crm_log_xml_debug(xml_search, "Match");
     if (xml_search->children != NULL) {
         rc = ENOTUNIQ;
         pcmk__warn_multiple_name_matches(out, xml_search, attr_name);
         out->spacer(out);
     } else if(value) {
         pcmk__str_update(value, crm_element_value(xml_search, attr));
     }
 
   done:
     g_string_free(xpath, TRUE);
     free_xml(xml_search);
     return rc;
 }
 
 /* PRIVATE. Use the find_matching_attr_resources instead. */
 static void
 find_matching_attr_resources_recursive(pcmk__output_t *out,
                                        GList /* <pcmk_resource_t*> */ **result,
                                        pcmk_resource_t *rsc, const char * attr_set,
                                        const char * attr_set_type, const char * attr_id,
                                        const char * attr_name, cib_t * cib, int depth)
 {
     int rc = pcmk_rc_ok;
     char *lookup_id = clone_strip(rsc->id);
 
     /* visit the children */
     for(GList *gIter = rsc->children; gIter; gIter = gIter->next) {
         find_matching_attr_resources_recursive(out, result,
                                                (pcmk_resource_t *) gIter->data,
                                                attr_set, attr_set_type, attr_id,
                                                attr_name, cib, depth+1);
         /* do it only once for clones */
         if (rsc->variant == pcmk_rsc_variant_clone) {
             break;
         }
     }
 
     rc = find_resource_attr(out, cib, PCMK_XA_ID, lookup_id, attr_set_type,
                             attr_set, attr_id, attr_name, NULL);
     /* Post-order traversal.
      * The root is always on the list and it is the last item. */
     if((0 == depth) || (pcmk_rc_ok == rc)) {
         /* push the head */
         *result = g_list_append(*result, rsc);
     }
 
     free(lookup_id);
 }
 
 
 /* The result is a linearized pre-ordered tree of resources. */
 static GList/*<pcmk_resource_t*>*/ *
 find_matching_attr_resources(pcmk__output_t *out, pcmk_resource_t *rsc,
                              const char * rsc_id, const char * attr_set,
                              const char * attr_set_type, const char * attr_id,
                              const char * attr_name, cib_t * cib, const char * cmd,
                              gboolean force)
 {
     int rc = pcmk_rc_ok;
     char *lookup_id = NULL;
     GList * result = NULL;
     /* If --force is used, update only the requested resource (clone or primitive).
      * Otherwise, if the primitive has the attribute, use that.
      * Otherwise use the clone. */
     if(force == TRUE) {
         return g_list_append(result, rsc);
     }
     if ((rsc->parent != NULL)
         && (rsc->parent->variant == pcmk_rsc_variant_clone)) {
         int rc = find_resource_attr(out, cib, PCMK_XA_ID, rsc_id, attr_set_type,
                                     attr_set, attr_id, attr_name, NULL);
 
         if(rc != pcmk_rc_ok) {
             rsc = rsc->parent;
             out->info(out, "Performing %s of '%s' on '%s', the parent of '%s'",
                       cmd, attr_name, rsc->id, rsc_id);
         }
         return g_list_append(result, rsc);
 
     } else if ((rsc->parent == NULL) && (rsc->children != NULL)
                && (rsc->variant == pcmk_rsc_variant_clone)) {
         pcmk_resource_t *child = rsc->children->data;
 
         if (child->variant == pcmk_rsc_variant_primitive) {
             lookup_id = clone_strip(child->id); /* Could be a cloned group! */
             rc = find_resource_attr(out, cib, PCMK_XA_ID, lookup_id,
                                     attr_set_type, attr_set, attr_id, attr_name,
                                     NULL);
 
             if(rc == pcmk_rc_ok) {
                 rsc = child;
                 out->info(out, "A value for '%s' already exists in child '%s', performing %s on that instead of '%s'",
                           attr_name, lookup_id, cmd, rsc_id);
             }
 
             free(lookup_id);
         }
         return g_list_append(result, rsc);
     }
     /* If the resource is a group ==> children inherit the attribute if defined. */
     find_matching_attr_resources_recursive(out, &result, rsc, attr_set,
                                            attr_set_type, attr_id, attr_name,
                                            cib, 0);
     return result;
 }
 
 // \return Standard Pacemaker return code
 int
 cli_resource_update_attribute(pcmk_resource_t *rsc, const char *requested_name,
                               const char *attr_set, const char *attr_set_type,
                               const char *attr_id, const char *attr_name,
                               const char *attr_value, gboolean recursive,
                               cib_t *cib, int cib_options, gboolean force)
 {
     pcmk__output_t *out = rsc->cluster->priv;
     int rc = pcmk_rc_ok;
 
     char *found_attr_id = NULL;
 
     GList/*<pcmk_resource_t*>*/ *resources = NULL;
     const char *top_id = pe__const_top_resource(rsc, false)->id;
 
     if ((attr_id == NULL) && !force) {
         find_resource_attr(out, cib, PCMK_XA_ID, top_id, NULL, NULL, NULL,
                            attr_name, NULL);
     }
 
     if (pcmk__str_eq(attr_set_type, PCMK_XE_INSTANCE_ATTRIBUTES,
                      pcmk__str_casei)) {
         if (!force) {
             rc = find_resource_attr(out, cib, PCMK_XA_ID, top_id,
                                     PCMK_XE_META_ATTRIBUTES, attr_set, attr_id,
                                     attr_name, &found_attr_id);
             if ((rc == pcmk_rc_ok) && !out->is_quiet(out)) {
                 out->err(out,
                          "WARNING: There is already a meta attribute "
                          "for '%s' called '%s' (id=%s)",
                          top_id, attr_name, found_attr_id);
                 out->err(out,
                          "         Delete '%s' first or use the force option "
                          "to override", found_attr_id);
             }
             free(found_attr_id);
             if (rc == pcmk_rc_ok) {
                 return ENOTUNIQ;
             }
         }
         resources = g_list_append(resources, rsc);
 
     } else if (pcmk__str_eq(attr_set_type, ATTR_SET_ELEMENT, pcmk__str_none)) {
         crm_xml_add(rsc->xml, attr_name, attr_value);
         CRM_ASSERT(cib != NULL);
         rc = cib->cmds->replace(cib, PCMK_XE_RESOURCES, rsc->xml, cib_options);
         rc = pcmk_legacy2rc(rc);
         if (rc == pcmk_rc_ok) {
             out->info(out, "Set attribute: " PCMK_XA_NAME "=%s value=%s",
                       attr_name, attr_value);
         }
         return rc;
 
     } else {
         resources = find_matching_attr_resources(out, rsc, requested_name,
                                                  attr_set, attr_set_type,
                                                  attr_id, attr_name, cib,
                                                  "update", force);
     }
 
     /* If the user specified attr_set or attr_id, the intent is to modify a
      * single resource, which will be the last item in the list.
      */
     if ((attr_set != NULL) || (attr_id != NULL)) {
         GList *last = g_list_last(resources);
 
         resources = g_list_remove_link(resources, last);
         g_list_free(resources);
         resources = last;
     }
 
     for (GList *iter = resources; iter != NULL; iter = iter->next) {
         char *lookup_id = NULL;
         char *local_attr_set = NULL;
         const char *rsc_attr_id = attr_id;
         const char *rsc_attr_set = attr_set;
 
         xmlNode *xml_top = NULL;
         xmlNode *xml_obj = NULL;
         found_attr_id = NULL;
 
         rsc = (pcmk_resource_t *) iter->data;
 
         lookup_id = clone_strip(rsc->id); /* Could be a cloned group! */
         rc = find_resource_attr(out, cib, PCMK_XA_ID, lookup_id, attr_set_type,
                                 attr_set, attr_id, attr_name, &found_attr_id);
 
         switch (rc) {
             case pcmk_rc_ok:
                 crm_debug("Found a match for " PCMK_XA_NAME "='%s': "
                           PCMK_XA_ID "='%s'", attr_name, found_attr_id);
                 rsc_attr_id = found_attr_id;
                 break;
 
             case ENXIO:
                 if (rsc_attr_set == NULL) {
                     local_attr_set = crm_strdup_printf("%s-%s", lookup_id,
                                                        attr_set_type);
                     rsc_attr_set = local_attr_set;
                 }
                 if (rsc_attr_id == NULL) {
                     found_attr_id = crm_strdup_printf("%s-%s",
                                                       rsc_attr_set, attr_name);
                     rsc_attr_id = found_attr_id;
                 }
 
                 xml_top = pcmk__xe_create(NULL, (const char *) rsc->xml->name);
                 crm_xml_add(xml_top, PCMK_XA_ID, lookup_id);
 
                 xml_obj = pcmk__xe_create(xml_top, attr_set_type);
                 crm_xml_add(xml_obj, PCMK_XA_ID, rsc_attr_set);
                 break;
 
             default:
                 free(lookup_id);
                 free(found_attr_id);
                 g_list_free(resources);
                 return rc;
         }
 
         xml_obj = crm_create_nvpair_xml(xml_obj, rsc_attr_id, attr_name,
                                         attr_value);
         if (xml_top == NULL) {
             xml_top = xml_obj;
         }
 
         crm_log_xml_debug(xml_top, "Update");
 
         rc = cib->cmds->modify(cib, PCMK_XE_RESOURCES, xml_top, cib_options);
         rc = pcmk_legacy2rc(rc);
         if (rc == pcmk_rc_ok) {
             out->info(out, "Set '%s' option: "
                       PCMK_XA_ID "=%s%s%s%s%s value=%s",
                       lookup_id, found_attr_id,
                       ((rsc_attr_set == NULL)? "" : " set="),
                       pcmk__s(rsc_attr_set, ""),
                       ((attr_name == NULL)? "" : " " PCMK_XA_NAME "="),
                       pcmk__s(attr_name, ""), attr_value);
         }
 
         free_xml(xml_top);
 
         free(lookup_id);
         free(found_attr_id);
         free(local_attr_set);
 
         if (recursive
             && pcmk__str_eq(attr_set_type, PCMK_XE_META_ATTRIBUTES,
                             pcmk__str_casei)) {
             GList *lpc = NULL;
             static bool need_init = true;
 
             if (need_init) {
                 need_init = false;
                 pcmk__unpack_constraints(rsc->cluster);
                 pe__clear_resource_flags_on_all(rsc->cluster,
                                                 pcmk_rsc_detect_loop);
             }
 
             /* We want to set the attribute only on resources explicitly
              * colocated with this one, so we use rsc->rsc_cons_lhs directly
              * rather than the with_this_colocations() method.
              */
             pcmk__set_rsc_flags(rsc, pcmk_rsc_detect_loop);
             for (lpc = rsc->rsc_cons_lhs; lpc != NULL; lpc = lpc->next) {
                 pcmk__colocation_t *cons = (pcmk__colocation_t *) lpc->data;
 
                 crm_debug("Checking %s %d", cons->id, cons->score);
                 if (!pcmk_is_set(cons->dependent->flags, pcmk_rsc_detect_loop)
                     && (cons->score > 0)) {
                     crm_debug("Setting %s=%s for dependent resource %s",
                               attr_name, attr_value, cons->dependent->id);
                     cli_resource_update_attribute(cons->dependent,
                                                   cons->dependent->id, NULL,
                                                   attr_set_type, NULL,
                                                   attr_name, attr_value,
                                                   recursive, cib, cib_options,
                                                   force);
                 }
             }
         }
     }
     g_list_free(resources);
     return rc;
 }
 
 // \return Standard Pacemaker return code
 int
 cli_resource_delete_attribute(pcmk_resource_t *rsc, const char *requested_name,
                               const char *attr_set, const char *attr_set_type,
                               const char *attr_id, const char *attr_name,
                               cib_t *cib, int cib_options, gboolean force)
 {
     pcmk__output_t *out = rsc->cluster->priv;
     int rc = pcmk_rc_ok;
     GList/*<pcmk_resource_t*>*/ *resources = NULL;
 
     if ((attr_id == NULL) && !force) {
         find_resource_attr(out, cib, PCMK_XA_ID,
                            pe__const_top_resource(rsc, false)->id, NULL,
                            NULL, NULL, attr_name, NULL);
     }
 
     if (pcmk__str_eq(attr_set_type, PCMK_XE_META_ATTRIBUTES, pcmk__str_casei)) {
         resources = find_matching_attr_resources(out, rsc, requested_name,
                                                  attr_set, attr_set_type,
                                                  attr_id, attr_name, cib,
                                                  "delete", force);
 
     } else if (pcmk__str_eq(attr_set_type, ATTR_SET_ELEMENT, pcmk__str_none)) {
         pcmk__xe_remove_attr(rsc->xml, attr_name);
         CRM_ASSERT(cib != NULL);
         rc = cib->cmds->replace(cib, PCMK_XE_RESOURCES, rsc->xml, cib_options);
         rc = pcmk_legacy2rc(rc);
         if (rc == pcmk_rc_ok) {
             out->info(out, "Deleted attribute: %s", attr_name);
         }
         return rc;
 
     } else {
         resources = g_list_append(resources, rsc);
     }
 
     for (GList *iter = resources; iter != NULL; iter = iter->next) {
         char *lookup_id = NULL;
         xmlNode *xml_obj = NULL;
         char *found_attr_id = NULL;
         const char *rsc_attr_id = attr_id;
 
         rsc = (pcmk_resource_t *) iter->data;
 
         lookup_id = clone_strip(rsc->id);
         rc = find_resource_attr(out, cib, PCMK_XA_ID, lookup_id, attr_set_type,
                                 attr_set, attr_id, attr_name, &found_attr_id);
         switch (rc) {
             case pcmk_rc_ok:
                 break;
 
             case ENXIO:
                 free(lookup_id);
                 rc = pcmk_rc_ok;
                 continue;
 
             default:
                 free(lookup_id);
                 g_list_free(resources);
                 return rc;
         }
 
         if (rsc_attr_id == NULL) {
             rsc_attr_id = found_attr_id;
         }
 
         xml_obj = crm_create_nvpair_xml(NULL, rsc_attr_id, attr_name, NULL);
         crm_log_xml_debug(xml_obj, "Delete");
 
         CRM_ASSERT(cib);
         rc = cib->cmds->remove(cib, PCMK_XE_RESOURCES, xml_obj, cib_options);
         rc = pcmk_legacy2rc(rc);
 
         if (rc == pcmk_rc_ok) {
             out->info(out, "Deleted '%s' option: " PCMK_XA_ID "=%s%s%s%s%s",
                       lookup_id, found_attr_id,
                       ((attr_set == NULL)? "" : " set="),
                       pcmk__s(attr_set, ""),
                       ((attr_name == NULL)? "" : " " PCMK_XA_NAME "="),
                       pcmk__s(attr_name, ""));
         }
 
         free(lookup_id);
         free_xml(xml_obj);
         free(found_attr_id);
     }
     g_list_free(resources);
     return rc;
 }
 
 // \return Standard Pacemaker return code
 static int
 send_lrm_rsc_op(pcmk_ipc_api_t *controld_api, bool do_fail_resource,
                 const char *host_uname, const char *rsc_id,
                 pcmk_scheduler_t *scheduler)
 {
     pcmk__output_t *out = scheduler->priv;
     const char *router_node = host_uname;
     const char *rsc_api_id = NULL;
     const char *rsc_long_id = NULL;
     const char *rsc_class = NULL;
     const char *rsc_provider = NULL;
     const char *rsc_type = NULL;
     bool cib_only = false;
     pcmk_resource_t *rsc = pe_find_resource(scheduler->resources, rsc_id);
 
     if (rsc == NULL) {
         out->err(out, "Resource %s not found", rsc_id);
         return ENXIO;
 
     } else if (rsc->variant != pcmk_rsc_variant_primitive) {
         out->err(out, "We can only process primitive resources, not %s", rsc_id);
         return EINVAL;
     }
 
     rsc_class = crm_element_value(rsc->xml, PCMK_XA_CLASS);
     rsc_provider = crm_element_value(rsc->xml, PCMK_XA_PROVIDER),
     rsc_type = crm_element_value(rsc->xml, PCMK_XA_TYPE);
     if ((rsc_class == NULL) || (rsc_type == NULL)) {
         out->err(out, "Resource %s does not have a class and type", rsc_id);
         return EINVAL;
     }
 
     {
         pcmk_node_t *node = pe_find_node(scheduler->nodes, host_uname);
 
         if (node == NULL) {
             out->err(out, "Node %s not found", host_uname);
             return pcmk_rc_node_unknown;
         }
 
         if (!(node->details->online)) {
             if (do_fail_resource) {
                 out->err(out, "Node %s is not online", host_uname);
                 return ENOTCONN;
             } else {
                 cib_only = true;
             }
         }
         if (!cib_only && pcmk__is_pacemaker_remote_node(node)) {
             node = pcmk__current_node(node->details->remote_rsc);
             if (node == NULL) {
                 out->err(out, "No cluster connection to Pacemaker Remote node %s detected",
                          host_uname);
                 return ENOTCONN;
             }
             router_node = node->details->uname;
         }
     }
 
     if (rsc->clone_name) {
         rsc_api_id = rsc->clone_name;
         rsc_long_id = rsc->id;
     } else {
         rsc_api_id = rsc->id;
     }
     if (do_fail_resource) {
         return pcmk_controld_api_fail(controld_api, host_uname, router_node,
                                       rsc_api_id, rsc_long_id,
                                       rsc_class, rsc_provider, rsc_type);
     } else {
         return pcmk_controld_api_refresh(controld_api, host_uname, router_node,
                                          rsc_api_id, rsc_long_id, rsc_class,
                                          rsc_provider, rsc_type, cib_only);
     }
 }
 
 /*!
  * \internal
  * \brief Get resource name as used in failure-related node attributes
  *
  * \param[in] rsc  Resource to check
  *
  * \return Newly allocated string containing resource's fail name
  * \note The caller is responsible for freeing the result.
  */
 static inline char *
 rsc_fail_name(const pcmk_resource_t *rsc)
 {
     const char *name = (rsc->clone_name? rsc->clone_name : rsc->id);
 
     if (pcmk_is_set(rsc->flags, pcmk_rsc_unique)) {
         return strdup(name);
     }
     return clone_strip(name);
 }
 
 // \return Standard Pacemaker return code
 static int
 clear_rsc_history(pcmk_ipc_api_t *controld_api, const char *host_uname,
                   const char *rsc_id, pcmk_scheduler_t *scheduler)
 {
     int rc = pcmk_rc_ok;
 
     /* Erase the resource's entire LRM history in the CIB, even if we're only
      * clearing a single operation's fail count. If we erased only entries for a
      * single operation, we might wind up with a wrong idea of the current
      * resource state, and we might not re-probe the resource.
      */
     rc = send_lrm_rsc_op(controld_api, false, host_uname, rsc_id, scheduler);
     if (rc != pcmk_rc_ok) {
         return rc;
     }
 
     crm_trace("Processing %d mainloop inputs",
               pcmk_controld_api_replies_expected(controld_api));
     while (g_main_context_iteration(NULL, FALSE)) {
         crm_trace("Processed mainloop input, %d still remaining",
                   pcmk_controld_api_replies_expected(controld_api));
     }
     return rc;
 }
 
 // \return Standard Pacemaker return code
 static int
 clear_rsc_failures(pcmk__output_t *out, pcmk_ipc_api_t *controld_api,
                    const char *node_name, const char *rsc_id, const char *operation,
                    const char *interval_spec, pcmk_scheduler_t *scheduler)
 {
     int rc = pcmk_rc_ok;
     const char *failed_value = NULL;
     const char *failed_id = NULL;
     char *interval_ms_s = NULL;
     GHashTable *rscs = NULL;
     GHashTableIter iter;
 
     /* Create a hash table to use as a set of resources to clean. This lets us
      * clean each resource only once (per node) regardless of how many failed
      * operations it has.
      */
     rscs = pcmk__strkey_table(NULL, NULL);
 
     // Normalize interval to milliseconds for comparison to history entry
     if (operation) {
         guint interval_ms = 0U;
 
         pcmk_parse_interval_spec(interval_spec, &interval_ms);
         interval_ms_s = crm_strdup_printf("%u", interval_ms);
     }
 
     for (xmlNode *xml_op = pcmk__xe_first_child(scheduler->failed, NULL, NULL,
                                                 NULL);
          xml_op != NULL; xml_op = pcmk__xe_next(xml_op)) {
 
         failed_id = crm_element_value(xml_op, PCMK__XA_RSC_ID);
         if (failed_id == NULL) {
             // Malformed history entry, should never happen
             continue;
         }
 
         // No resource specified means all resources match
         if (rsc_id) {
             pcmk_resource_t *fail_rsc = NULL;
 
             fail_rsc = pe_find_resource_with_flags(scheduler->resources,
                                                    failed_id,
                                                    pcmk_rsc_match_history
                                                    |pcmk_rsc_match_anon_basename);
             if (!fail_rsc || !pcmk__str_eq(rsc_id, fail_rsc->id, pcmk__str_casei)) {
                 continue;
             }
         }
 
         // Host name should always have been provided by this point
         failed_value = crm_element_value(xml_op, PCMK_XA_UNAME);
         if (!pcmk__str_eq(node_name, failed_value, pcmk__str_casei)) {
             continue;
         }
 
         // No operation specified means all operations match
         if (operation) {
             failed_value = crm_element_value(xml_op, PCMK_XA_OPERATION);
             if (!pcmk__str_eq(operation, failed_value, pcmk__str_casei)) {
                 continue;
             }
 
             // Interval (if operation was specified) defaults to 0 (not all)
             failed_value = crm_element_value(xml_op, PCMK_META_INTERVAL);
             if (!pcmk__str_eq(interval_ms_s, failed_value, pcmk__str_casei)) {
                 continue;
             }
         }
 
         g_hash_table_add(rscs, (gpointer) failed_id);
     }
 
     free(interval_ms_s);
 
     g_hash_table_iter_init(&iter, rscs);
     while (g_hash_table_iter_next(&iter, (gpointer *) &failed_id, NULL)) {
         crm_debug("Erasing failures of %s on %s", failed_id, node_name);
         rc = clear_rsc_history(controld_api, node_name, failed_id, scheduler);
         if (rc != pcmk_rc_ok) {
             return rc;
         }
     }
     g_hash_table_destroy(rscs);
     return rc;
 }
 
 // \return Standard Pacemaker return code
 static int
 clear_rsc_fail_attrs(const pcmk_resource_t *rsc, const char *operation,
                      const char *interval_spec, const pcmk_node_t *node)
 {
     int rc = pcmk_rc_ok;
     int attr_options = pcmk__node_attr_none;
     char *rsc_name = rsc_fail_name(rsc);
 
     if (pcmk__is_pacemaker_remote_node(node)) {
         attr_options |= pcmk__node_attr_remote;
     }
 
     rc = pcmk__attrd_api_clear_failures(NULL, node->details->uname, rsc_name,
                                         operation, interval_spec, NULL,
                                         attr_options);
     free(rsc_name);
     return rc;
 }
 
 // \return Standard Pacemaker return code
 int
 cli_resource_delete(pcmk_ipc_api_t *controld_api, const char *host_uname,
                     const pcmk_resource_t *rsc, const char *operation,
                     const char *interval_spec, bool just_failures,
                     pcmk_scheduler_t *scheduler, gboolean force)
 {
     pcmk__output_t *out = scheduler->priv;
     int rc = pcmk_rc_ok;
     pcmk_node_t *node = NULL;
 
     if (rsc == NULL) {
         return ENXIO;
 
     } else if (rsc->children) {
 
         for (const GList *lpc = rsc->children; lpc != NULL; lpc = lpc->next) {
             const pcmk_resource_t *child = (const pcmk_resource_t *) lpc->data;
 
             rc = cli_resource_delete(controld_api, host_uname, child, operation,
                                      interval_spec, just_failures, scheduler,
                                      force);
             if (rc != pcmk_rc_ok) {
                 return rc;
             }
         }
         return pcmk_rc_ok;
 
     } else if (host_uname == NULL) {
         GList *lpc = NULL;
         GList *nodes = g_hash_table_get_values(rsc->known_on);
 
         if(nodes == NULL && force) {
             nodes = pcmk__copy_node_list(scheduler->nodes, false);
 
         } else if(nodes == NULL && rsc->exclusive_discover) {
             GHashTableIter iter;
             pcmk_node_t *node = NULL;
 
             g_hash_table_iter_init(&iter, rsc->allowed_nodes);
             while (g_hash_table_iter_next(&iter, NULL, (void**)&node)) {
                 if(node->weight >= 0) {
                     nodes = g_list_prepend(nodes, node);
                 }
             }
 
         } else if(nodes == NULL) {
             nodes = g_hash_table_get_values(rsc->allowed_nodes);
         }
 
         for (lpc = nodes; lpc != NULL; lpc = lpc->next) {
             node = (pcmk_node_t *) lpc->data;
 
             if (node->details->online) {
                 rc = cli_resource_delete(controld_api, node->details->uname, rsc,
                                          operation, interval_spec, just_failures,
                                          scheduler, force);
             }
             if (rc != pcmk_rc_ok) {
                 g_list_free(nodes);
                 return rc;
             }
         }
 
         g_list_free(nodes);
         return pcmk_rc_ok;
     }
 
     node = pe_find_node(scheduler->nodes, host_uname);
 
     if (node == NULL) {
         out->err(out, "Unable to clean up %s because node %s not found",
                  rsc->id, host_uname);
         return ENODEV;
     }
 
     if (!node->details->rsc_discovery_enabled) {
         out->err(out, "Unable to clean up %s because resource discovery disabled on %s",
                  rsc->id, host_uname);
         return EOPNOTSUPP;
     }
 
     if (controld_api == NULL) {
         out->err(out, "Dry run: skipping clean-up of %s on %s due to CIB_file",
                  rsc->id, host_uname);
         return pcmk_rc_ok;
     }
 
     rc = clear_rsc_fail_attrs(rsc, operation, interval_spec, node);
     if (rc != pcmk_rc_ok) {
         out->err(out, "Unable to clean up %s failures on %s: %s",
                  rsc->id, host_uname, pcmk_rc_str(rc));
         return rc;
     }
 
     if (just_failures) {
         rc = clear_rsc_failures(out, controld_api, host_uname, rsc->id, operation,
                                 interval_spec, scheduler);
     } else {
         rc = clear_rsc_history(controld_api, host_uname, rsc->id, scheduler);
     }
     if (rc != pcmk_rc_ok) {
         out->err(out, "Cleaned %s failures on %s, but unable to clean history: %s",
                  rsc->id, host_uname, pcmk_rc_str(rc));
     } else {
         out->info(out, "Cleaned up %s on %s", rsc->id, host_uname);
     }
     return rc;
 }
 
 // \return Standard Pacemaker return code
 int
 cli_cleanup_all(pcmk_ipc_api_t *controld_api, const char *node_name,
                 const char *operation, const char *interval_spec,
                 pcmk_scheduler_t *scheduler)
 {
     pcmk__output_t *out = scheduler->priv;
     int rc = pcmk_rc_ok;
     int attr_options = pcmk__node_attr_none;
     const char *display_name = node_name? node_name : "all nodes";
 
     if (controld_api == NULL) {
         out->info(out, "Dry run: skipping clean-up of %s due to CIB_file",
                   display_name);
         return rc;
     }
 
     if (node_name) {
         pcmk_node_t *node = pe_find_node(scheduler->nodes, node_name);
 
         if (node == NULL) {
             out->err(out, "Unknown node: %s", node_name);
             return ENXIO;
         }
         if (pcmk__is_pacemaker_remote_node(node)) {
             attr_options |= pcmk__node_attr_remote;
         }
     }
 
     rc = pcmk__attrd_api_clear_failures(NULL, node_name, NULL, operation,
                                         interval_spec, NULL, attr_options);
     if (rc != pcmk_rc_ok) {
         out->err(out, "Unable to clean up all failures on %s: %s",
                  display_name, pcmk_rc_str(rc));
         return rc;
     }
 
     if (node_name) {
         rc = clear_rsc_failures(out, controld_api, node_name, NULL,
                                 operation, interval_spec, scheduler);
         if (rc != pcmk_rc_ok) {
             out->err(out, "Cleaned all resource failures on %s, but unable to clean history: %s",
                      node_name, pcmk_rc_str(rc));
             return rc;
         }
     } else {
         for (GList *iter = scheduler->nodes; iter; iter = iter->next) {
             pcmk_node_t *node = (pcmk_node_t *) iter->data;
 
             rc = clear_rsc_failures(out, controld_api, node->details->uname, NULL,
                                     operation, interval_spec, scheduler);
             if (rc != pcmk_rc_ok) {
                 out->err(out, "Cleaned all resource failures on all nodes, but unable to clean history: %s",
                          pcmk_rc_str(rc));
                 return rc;
             }
         }
     }
 
     out->info(out, "Cleaned up all resources on %s", display_name);
     return rc;
 }
 
 static void
 check_role(resource_checks_t *checks)
 {
     const char *role_s = g_hash_table_lookup(checks->rsc->meta,
                                              PCMK_META_TARGET_ROLE);
 
     if (role_s == NULL) {
         return;
     }
     switch (pcmk_parse_role(role_s)) {
         case pcmk_role_stopped:
             checks->flags |= rsc_remain_stopped;
             break;
 
         case pcmk_role_unpromoted:
             if (pcmk_is_set(pe__const_top_resource(checks->rsc, false)->flags,
                             pcmk_rsc_promotable)) {
                 checks->flags |= rsc_unpromotable;
             }
             break;
 
         default:
             break;
     }
 }
 
 static void
 check_managed(resource_checks_t *checks)
 {
     const char *managed_s = g_hash_table_lookup(checks->rsc->meta,
                                                 PCMK_META_IS_MANAGED);
 
     if ((managed_s != NULL) && !crm_is_true(managed_s)) {
         checks->flags |= rsc_unmanaged;
     }
 }
 
 static void
 check_locked(resource_checks_t *checks)
 {
     if (checks->rsc->lock_node != NULL) {
         checks->flags |= rsc_locked;
         checks->lock_node = checks->rsc->lock_node->details->uname;
     }
 }
 
 static bool
 node_is_unhealthy(pcmk_node_t *node)
 {
     switch (pe__health_strategy(node->details->data_set)) {
         case pcmk__health_strategy_none:
             break;
 
         case pcmk__health_strategy_no_red:
             if (pe__node_health(node) < 0) {
                 return true;
             }
             break;
 
         case pcmk__health_strategy_only_green:
             if (pe__node_health(node) <= 0) {
                 return true;
             }
             break;
 
         case pcmk__health_strategy_progressive:
         case pcmk__health_strategy_custom:
             /* @TODO These are finite scores, possibly with rules, and possibly
              * combining with other scores, so attributing these as a cause is
              * nontrivial.
              */
             break;
     }
     return false;
 }
 
 static void
 check_node_health(resource_checks_t *checks, pcmk_node_t *node)
 {
     if (node == NULL) {
         GHashTableIter iter;
         bool allowed = false;
         bool all_nodes_unhealthy = true;
 
         g_hash_table_iter_init(&iter, checks->rsc->allowed_nodes);
         while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
             allowed = true;
             if (!node_is_unhealthy(node)) {
                 all_nodes_unhealthy = false;
                 break;
             }
         }
         if (allowed && all_nodes_unhealthy) {
             checks->flags |= rsc_node_health;
         }
 
     } else if (node_is_unhealthy(node)) {
         checks->flags |= rsc_node_health;
     }
 }
 
 int
 cli_resource_check(pcmk__output_t *out, pcmk_resource_t *rsc, pcmk_node_t *node)
 {
     resource_checks_t checks = { .rsc = rsc };
 
     check_role(&checks);
     check_managed(&checks);
     check_locked(&checks);
     check_node_health(&checks, node);
 
     return out->message(out, "resource-check-list", &checks);
 }
 
 // \return Standard Pacemaker return code
 int
 cli_resource_fail(pcmk_ipc_api_t *controld_api, const char *host_uname,
                   const char *rsc_id, pcmk_scheduler_t *scheduler)
 {
     crm_notice("Failing %s on %s", rsc_id, host_uname);
     return send_lrm_rsc_op(controld_api, true, host_uname, rsc_id, scheduler);
 }
 
 static GHashTable *
 generate_resource_params(pcmk_resource_t *rsc, pcmk_node_t *node,
                          pcmk_scheduler_t *scheduler)
 {
     GHashTable *params = NULL;
     GHashTable *meta = NULL;
     GHashTable *combined = NULL;
     GHashTableIter iter;
     char *key = NULL;
     char *value = NULL;
 
     combined = pcmk__strkey_table(free, free);
 
     params = pe_rsc_params(rsc, node, scheduler);
     if (params != NULL) {
         g_hash_table_iter_init(&iter, params);
         while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) {
             pcmk__insert_dup(combined, key, value);
         }
     }
 
     meta = pcmk__strkey_table(free, free);
     get_meta_attributes(meta, rsc, NULL, scheduler);
     if (meta != NULL) {
         g_hash_table_iter_init(&iter, meta);
         while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) {
             char *crm_name = crm_meta_name(key);
 
             g_hash_table_insert(combined, crm_name, strdup(value));
         }
         g_hash_table_destroy(meta);
     }
 
     return combined;
 }
 
 bool resource_is_running_on(pcmk_resource_t *rsc, const char *host)
 {
     bool found = true;
     GList *hIter = NULL;
     GList *hosts = NULL;
 
     if (rsc == NULL) {
         return false;
     }
 
     rsc->fns->location(rsc, &hosts, TRUE);
     for (hIter = hosts; host != NULL && hIter != NULL; hIter = hIter->next) {
         pcmk_node_t *node = (pcmk_node_t *) hIter->data;
 
         if (pcmk__strcase_any_of(host, node->details->uname, node->details->id, NULL)) {
             crm_trace("Resource %s is running on %s\n", rsc->id, host);
             goto done;
         }
     }
 
     if (host != NULL) {
         crm_trace("Resource %s is not running on: %s\n", rsc->id, host);
         found = false;
 
     } else if(host == NULL && hosts == NULL) {
         crm_trace("Resource %s is not running\n", rsc->id);
         found = false;
     }
 
   done:
     g_list_free(hosts);
     return found;
 }
 
 /*!
  * \internal
  * \brief Create a list of all resources active on host from a given list
  *
  * \param[in] host      Name of host to check whether resources are active
  * \param[in] rsc_list  List of resources to check
  *
  * \return New list of resources from list that are active on host
  */
 static GList *
 get_active_resources(const char *host, GList *rsc_list)
 {
     GList *rIter = NULL;
     GList *active = NULL;
 
     for (rIter = rsc_list; rIter != NULL; rIter = rIter->next) {
         pcmk_resource_t *rsc = (pcmk_resource_t *) rIter->data;
 
         /* Expand groups to their members, because if we're restarting a member
          * other than the first, we can't otherwise tell which resources are
          * stopping and starting.
          */
         if (rsc->variant == pcmk_rsc_variant_group) {
             active = g_list_concat(active,
                                    get_active_resources(host, rsc->children));
         } else if (resource_is_running_on(rsc, host)) {
             active = g_list_append(active, strdup(rsc->id));
         }
     }
     return active;
 }
 
 static void dump_list(GList *items, const char *tag)
 {
     int lpc = 0;
     GList *item = NULL;
 
     for (item = items; item != NULL; item = item->next) {
         crm_trace("%s[%d]: %s", tag, lpc, (char*)item->data);
         lpc++;
     }
 }
 
 static void display_list(pcmk__output_t *out, GList *items, const char *tag)
 {
     GList *item = NULL;
 
     for (item = items; item != NULL; item = item->next) {
         out->info(out, "%s%s", tag, (const char *)item->data);
     }
 }
 
 /*!
  * \internal
  * \brief Upgrade XML to latest schema version and use it as scheduler input
  *
  * This also updates the scheduler timestamp to the current time.
  *
  * \param[in,out] scheduler  Scheduler data to update
  * \param[in,out] xml        XML to use as input
  *
  * \return Standard Pacemaker return code
  * \note On success, caller is responsible for freeing memory allocated for
  *       scheduler->now.
  * \todo This follows the example of other callers of cli_config_update()
  *       and returns ENOKEY ("Required key not available") if that fails,
  *       but perhaps pcmk_rc_schema_validation would be better in that case.
  */
 int
 update_scheduler_input(pcmk_scheduler_t *scheduler, xmlNode **xml)
 {
     if (cli_config_update(xml, NULL, FALSE) == FALSE) {
         return ENOKEY;
     }
     scheduler->input = *xml;
     scheduler->now = crm_time_new(NULL);
     return pcmk_rc_ok;
 }
 
 /*!
  * \internal
  * \brief Update scheduler XML input based on a CIB query
  *
  * \param[in] scheduler  Scheduler data to initialize
  * \param[in] cib        Connection to the CIB manager
  *
  * \return Standard Pacemaker return code
  * \note On success, caller is responsible for freeing memory allocated for
  *       scheduler->input and scheduler->now.
  */
 static int
 update_scheduler_input_to_cib(pcmk__output_t *out, pcmk_scheduler_t *scheduler,
                               cib_t *cib)
 {
     xmlNode *cib_xml_copy = NULL;
     int rc = pcmk_rc_ok;
 
     rc = cib->cmds->query(cib, NULL, &cib_xml_copy, cib_scope_local | cib_sync_call);
     rc = pcmk_legacy2rc(rc);
 
     if (rc != pcmk_rc_ok) {
         out->err(out, "Could not obtain the current CIB: %s (%d)", pcmk_rc_str(rc), rc);
         return rc;
     }
     rc = update_scheduler_input(scheduler, &cib_xml_copy);
     if (rc != pcmk_rc_ok) {
         out->err(out, "Could not upgrade the current CIB XML");
         free_xml(cib_xml_copy);
         return rc;
     }
 
     return rc;
 }
 
 // \return Standard Pacemaker return code
 static int
 update_dataset(cib_t *cib, pcmk_scheduler_t *scheduler, bool simulate)
 {
     char *pid = NULL;
     char *shadow_file = NULL;
     cib_t *shadow_cib = NULL;
     int rc = pcmk_rc_ok;
 
     pcmk__output_t *out = scheduler->priv;
 
     pe_reset_working_set(scheduler);
     pcmk__set_scheduler_flags(scheduler,
                               pcmk_sched_no_counts|pcmk_sched_no_compat);
     rc = update_scheduler_input_to_cib(out, scheduler, cib);
     if (rc != pcmk_rc_ok) {
         return rc;
     }
 
     if(simulate) {
         bool prev_quiet = false;
 
         pid = pcmk__getpid_s();
         shadow_cib = cib_shadow_new(pid);
         shadow_file = get_shadow_file(pid);
 
         if (shadow_cib == NULL) {
             out->err(out, "Could not create shadow cib: '%s'", pid);
             rc = ENXIO;
             goto done;
         }
 
         rc = pcmk__xml_write_file(scheduler->input, shadow_file, false, NULL);
         if (rc != pcmk_rc_ok) {
             out->err(out, "Could not populate shadow cib: %s", pcmk_rc_str(rc));
             goto done;
         }
 
         rc = shadow_cib->cmds->signon(shadow_cib, crm_system_name, cib_command);
         rc = pcmk_legacy2rc(rc);
 
         if (rc != pcmk_rc_ok) {
             out->err(out, "Could not connect to shadow cib: %s",
                      pcmk_rc_str(rc));
             goto done;
         }
 
         pcmk__schedule_actions(scheduler->input,
                                pcmk_sched_no_counts|pcmk_sched_no_compat,
                                scheduler);
 
         prev_quiet = out->is_quiet(out);
         out->quiet = true;
         pcmk__simulate_transition(scheduler, shadow_cib, NULL);
         out->quiet = prev_quiet;
 
         rc = update_dataset(shadow_cib, scheduler, false);
 
     } else {
         cluster_status(scheduler);
     }
 
   done:
     // Do not free scheduler->input here, we need rsc->xml to be valid later on
     cib_delete(shadow_cib);
     free(pid);
 
     if(shadow_file) {
         unlink(shadow_file);
         free(shadow_file);
     }
 
     return rc;
 }
 
 /*!
  * \internal
  * \brief Find the maximum stop timeout of a resource and its children (if any)
  *
  * \param[in,out] rsc  Resource to get timeout for
  *
  * \return Maximum stop timeout for \p rsc (in milliseconds)
  */
-static int
+static guint
 max_rsc_stop_timeout(pcmk_resource_t *rsc)
 {
     long long result_ll;
-    int max_delay = 0;
+    guint max_delay = 0;
     xmlNode *config = NULL;
     GHashTable *meta = NULL;
 
     if (rsc == NULL) {
         return 0;
     }
 
     // If resource is collective, use maximum of its children's stop timeouts
     if (rsc->children != NULL) {
         for (GList *iter = rsc->children; iter; iter = iter->next) {
             pcmk_resource_t *child = iter->data;
-            int delay = max_rsc_stop_timeout(child);
+            guint delay = max_rsc_stop_timeout(child);
 
             if (delay > max_delay) {
                 pcmk__rsc_trace(rsc,
                                 "Maximum stop timeout for %s is now %s "
                                 "due to %s", rsc->id,
                                 pcmk__readable_interval(delay), child->id);
                 max_delay = delay;
             }
         }
         return max_delay;
     }
 
     // Get resource's stop action configuration from CIB
     config = pcmk__find_action_config(rsc, PCMK_ACTION_STOP, 0, true);
 
     /* Get configured timeout for stop action (fully evaluated for rules,
      * defaults, etc.).
      *
      * @TODO This currently ignores node (which might matter for rules)
      */
     meta = pcmk__unpack_action_meta(rsc, NULL, PCMK_ACTION_STOP, 0, config);
     if ((pcmk__scan_ll(g_hash_table_lookup(meta, PCMK_META_TIMEOUT),
-                       &result_ll, -1LL) == pcmk_rc_ok)
-        && (result_ll >= 0) && (result_ll <= INT_MAX)) {
-        max_delay = (int) result_ll;
+                       &result_ll, -1LL) == pcmk_rc_ok) && (result_ll >= 0)) {
+        max_delay = (guint) QB_MIN(result_ll, UINT_MAX);
     }
     g_hash_table_destroy(meta);
 
     return max_delay;
 }
 
 /*!
  * \internal
  * \brief Find a reasonable waiting time for stopping any one resource in a list
  *
  * \param[in,out] scheduler  Scheduler data
  * \param[in]     resources  List of names of resources that will be stopped
  *
  * \return Rough estimate of a reasonable time to wait (in seconds) to stop any
  *         one resource in \p resources
  * \note This estimate is very rough, simply the maximum stop timeout of all
  *       given resources and their children, plus a small fudge factor. It does
  *       not account for children that must be stopped in sequence, action
  *       throttling, or any demotions needed. It checks the stop timeout, even
  *       if the resources in question are actually being started.
  */
-static int
+static guint
 wait_time_estimate(pcmk_scheduler_t *scheduler, const GList *resources)
 {
-    int max_delay = 0;
+    guint max_delay = 0U;
 
     // Find maximum stop timeout in milliseconds
     for (const GList *item = resources; item != NULL; item = item->next) {
         pcmk_resource_t *rsc = pe_find_resource(scheduler->resources,
                                                 (const char *) item->data);
-        int delay = max_rsc_stop_timeout(rsc);
+        guint delay = max_rsc_stop_timeout(rsc);
 
         if (delay > max_delay) {
             pcmk__rsc_trace(rsc,
                             "Wait time is now %s due to %s",
                             pcmk__readable_interval(delay), rsc->id);
             max_delay = delay;
         }
     }
 
-    return (max_delay / 1000) + 5;
+    return (max_delay / 1000U) + 5U;
 }
 
 #define waiting_for_starts(d, r, h) ((d != NULL) || \
                                     (!resource_is_running_on((r), (h))))
 
 /*!
  * \internal
  * \brief Restart a resource (on a particular host if requested).
  *
  * \param[in,out] out                 Output object
  * \param[in,out] rsc                 The resource to restart
  * \param[in]     node                Node to restart resource on (NULL for all)
  * \param[in]     move_lifetime       If not NULL, how long constraint should
  *                                    remain in effect (as ISO 8601 string)
  * \param[in]     timeout_ms          Consider failed if actions do not complete
  *                                    in this time (specified in milliseconds,
  *                                    but a two-second granularity is actually
  *                                    used; if 0, it will be calculated based on
  *                                    the resource timeout)
  * \param[in,out] cib                 Connection to the CIB manager
  * \param[in]     cib_options         Group of enum cib_call_options flags to
  *                                    use with CIB calls
  * \param[in]     promoted_role_only  If true, limit to promoted instances
  * \param[in]     force               If true, apply only to requested instance
  *                                    if part of a collective resource
  *
  * \return Standard Pacemaker return code (exits on certain failures)
  */
 int
 cli_resource_restart(pcmk__output_t *out, pcmk_resource_t *rsc,
                      const pcmk_node_t *node, const char *move_lifetime,
-                     int timeout_ms, cib_t *cib, int cib_options,
+                     guint timeout_ms, cib_t *cib, int cib_options,
                      gboolean promoted_role_only, gboolean force)
 {
     int rc = pcmk_rc_ok;
     int lpc = 0;
     int before = 0;
-    int step_timeout_s = 0;
-    int sleep_interval = 2;
-    int timeout = timeout_ms / 1000;
+    guint step_timeout_s = 0;
+    guint sleep_interval = 2U;
+    guint timeout = timeout_ms / 1000U;
 
     bool stop_via_ban = false;
     char *rsc_id = NULL;
     char *lookup_id = NULL;
     char *orig_target_role = NULL;
 
     GList *list_delta = NULL;
     GList *target_active = NULL;
     GList *current_active = NULL;
     GList *restart_target_active = NULL;
 
     pcmk_scheduler_t *scheduler = NULL;
     pcmk_resource_t *parent = uber_parent(rsc);
 
     bool running = false;
     const char *id = rsc->clone_name ? rsc->clone_name : rsc->id;
     const char *host = node ? node->details->uname : NULL;
 
     /* If the implicit resource or primitive resource of a bundle is given, operate on the
      * bundle itself instead.
      */
     if (pcmk__is_bundled(rsc)) {
         rsc = parent->parent;
     }
 
     running = resource_is_running_on(rsc, host);
 
     if (pcmk__is_clone(parent) && !running) {
         if (pcmk__is_unique_clone(parent)) {
             lookup_id = strdup(rsc->id);
         } else {
             lookup_id = clone_strip(rsc->id);
         }
 
         rsc = parent->fns->find_rsc(parent, lookup_id, node,
                                     pcmk_rsc_match_basename
                                     |pcmk_rsc_match_current_node);
         free(lookup_id);
         running = resource_is_running_on(rsc, host);
     }
 
     if (!running) {
         if (host) {
             out->err(out, "%s is not running on %s and so cannot be restarted", id, host);
         } else {
             out->err(out, "%s is not running anywhere and so cannot be restarted", id);
         }
         return ENXIO;
     }
 
     if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
         out->err(out, "Unmanaged resources cannot be restarted.");
         return EAGAIN;
     }
 
     rsc_id = strdup(rsc->id);
 
     if (pcmk__is_unique_clone(parent)) {
         lookup_id = strdup(rsc->id);
     } else {
         lookup_id = clone_strip(rsc->id);
     }
 
     if (host) {
         if (pcmk__is_clone(rsc) || pe_bundle_replicas(rsc)) {
             stop_via_ban = true;
         } else if (pcmk__is_clone(parent)) {
             stop_via_ban = true;
             free(lookup_id);
             lookup_id = strdup(parent->id);
         }
     }
 
     /*
       grab full cib
       determine originally active resources
       disable or ban
       poll cib and watch for affected resources to get stopped
       without --timeout, calculate the stop timeout for each step and wait for that
       if we hit --timeout or the service timeout, re-enable or un-ban, report failure and indicate which resources we couldn't take down
       if everything stopped, re-enable or un-ban
       poll cib and watch for affected resources to get started
       without --timeout, calculate the start timeout for each step and wait for that
       if we hit --timeout or the service timeout, report (different) failure and indicate which resources we couldn't bring back up
       report success
 
       Optimizations:
       - use constraints to determine ordered list of affected resources
       - Allow a --no-deps option (aka. --force-restart)
     */
 
     scheduler = pe_new_working_set();
     if (scheduler == NULL) {
         rc = errno;
         out->err(out, "Could not allocate scheduler data: %s", pcmk_rc_str(rc));
         goto done;
     }
 
     scheduler->priv = out;
     rc = update_dataset(cib, scheduler, false);
 
     if(rc != pcmk_rc_ok) {
         out->err(out, "Could not get new resource list: %s (%d)", pcmk_rc_str(rc), rc);
         goto done;
     }
 
     restart_target_active = get_active_resources(host, scheduler->resources);
     current_active = get_active_resources(host, scheduler->resources);
 
     dump_list(current_active, "Origin");
 
     if (stop_via_ban) {
         /* Stop the clone or bundle instance by banning it from the host */
         out->quiet = true;
         rc = cli_resource_ban(out, lookup_id, host, move_lifetime, cib,
                               cib_options, promoted_role_only,
                               PCMK__ROLE_PROMOTED);
     } else {
         /* Stop the resource by setting PCMK_META_TARGET_ROLE to Stopped.
          * Remember any existing PCMK_META_TARGET_ROLE so we can restore it
          * later (though it only makes any difference if it's Unpromoted).
          */
 
         find_resource_attr(out, cib, PCMK_XA_VALUE, lookup_id, NULL, NULL, NULL,
                            PCMK_META_TARGET_ROLE, &orig_target_role);
         rc = cli_resource_update_attribute(rsc, rsc_id, NULL,
                                            PCMK_XE_META_ATTRIBUTES, NULL,
                                            PCMK_META_TARGET_ROLE,
                                            PCMK_ACTION_STOPPED, FALSE, cib,
                                            cib_options, force);
     }
     if(rc != pcmk_rc_ok) {
         out->err(out, "Could not set " PCMK_META_TARGET_ROLE " for %s: %s (%d)",
                  rsc_id, pcmk_rc_str(rc), rc);
         if (current_active != NULL) {
             g_list_free_full(current_active, free);
             current_active = NULL;
         }
         if (restart_target_active != NULL) {
             g_list_free_full(restart_target_active, free);
             restart_target_active = NULL;
         }
         goto done;
     }
 
     rc = update_dataset(cib, scheduler, true);
     if(rc != pcmk_rc_ok) {
         out->err(out, "Could not determine which resources would be stopped");
         goto failure;
     }
 
     target_active = get_active_resources(host, scheduler->resources);
     dump_list(target_active, "Target");
 
     list_delta = pcmk__subtract_lists(current_active, target_active, (GCompareFunc) strcmp);
     out->info(out, "Waiting for %d resources to stop:", g_list_length(list_delta));
     display_list(out, list_delta, " * ");
 
     step_timeout_s = timeout / sleep_interval;
     while (list_delta != NULL) {
         before = g_list_length(list_delta);
         if(timeout_ms == 0) {
             step_timeout_s = wait_time_estimate(scheduler, list_delta)
                              / sleep_interval;
         }
 
         /* We probably don't need the entire step timeout */
         for(lpc = 0; (lpc < step_timeout_s) && (list_delta != NULL); lpc++) {
             sleep(sleep_interval);
             if(timeout) {
                 timeout -= sleep_interval;
-                crm_trace("%ds remaining", timeout);
+                crm_trace("%us remaining", timeout);
             }
             rc = update_dataset(cib, scheduler, FALSE);
             if(rc != pcmk_rc_ok) {
                 out->err(out, "Could not determine which resources were stopped");
                 goto failure;
             }
 
             if (current_active != NULL) {
                 g_list_free_full(current_active, free);
             }
             current_active = get_active_resources(host, scheduler->resources);
 
             g_list_free(list_delta);
             list_delta = pcmk__subtract_lists(current_active, target_active, (GCompareFunc) strcmp);
 
             dump_list(current_active, "Current");
             dump_list(list_delta, "Delta");
         }
 
         crm_trace("%d (was %d) resources remaining", g_list_length(list_delta), before);
         if(before == g_list_length(list_delta)) {
             /* aborted during stop phase, print the contents of list_delta */
             out->err(out, "Could not complete shutdown of %s, %d resources remaining", rsc_id, g_list_length(list_delta));
             display_list(out, list_delta, " * ");
             rc = ETIME;
             goto failure;
         }
 
     }
 
     if (stop_via_ban) {
         rc = cli_resource_clear(lookup_id, host, NULL, cib, cib_options, true, force);
 
     } else if (orig_target_role) {
         rc = cli_resource_update_attribute(rsc, rsc_id, NULL,
                                            PCMK_XE_META_ATTRIBUTES, NULL,
                                            PCMK_META_TARGET_ROLE,
                                            orig_target_role, FALSE, cib,
                                            cib_options, force);
         free(orig_target_role);
         orig_target_role = NULL;
     } else {
         rc = cli_resource_delete_attribute(rsc, rsc_id, NULL,
                                            PCMK_XE_META_ATTRIBUTES, NULL,
                                            PCMK_META_TARGET_ROLE, cib,
                                            cib_options, force);
     }
 
     if(rc != pcmk_rc_ok) {
         out->err(out,
                  "Could not unset " PCMK_META_TARGET_ROLE " for %s: %s (%d)",
                  rsc_id, pcmk_rc_str(rc), rc);
         goto done;
     }
 
     if (target_active != NULL) {
         g_list_free_full(target_active, free);
     }
     target_active = restart_target_active;
 
     list_delta = pcmk__subtract_lists(target_active, current_active, (GCompareFunc) strcmp);
     out->info(out, "Waiting for %d resources to start again:", g_list_length(list_delta));
     display_list(out, list_delta, " * ");
 
     step_timeout_s = timeout / sleep_interval;
     while (waiting_for_starts(list_delta, rsc, host)) {
         before = g_list_length(list_delta);
         if(timeout_ms == 0) {
             step_timeout_s = wait_time_estimate(scheduler, list_delta)
                              / sleep_interval;
         }
 
         /* We probably don't need the entire step timeout */
         for (lpc = 0; (lpc < step_timeout_s) && waiting_for_starts(list_delta, rsc, host); lpc++) {
 
             sleep(sleep_interval);
             if(timeout) {
                 timeout -= sleep_interval;
                 crm_trace("%ds remaining", timeout);
             }
 
             rc = update_dataset(cib, scheduler, false);
             if(rc != pcmk_rc_ok) {
                 out->err(out, "Could not determine which resources were started");
                 goto failure;
             }
 
             /* It's OK if dependent resources moved to a different node,
              * so we check active resources on all nodes.
              */
             if (current_active != NULL) {
                 g_list_free_full(current_active, free);
             }
             current_active = get_active_resources(NULL, scheduler->resources);
 
             g_list_free(list_delta);
             list_delta = pcmk__subtract_lists(target_active, current_active, (GCompareFunc) strcmp);
             dump_list(current_active, "Current");
             dump_list(list_delta, "Delta");
         }
 
         if(before == g_list_length(list_delta)) {
             /* aborted during start phase, print the contents of list_delta */
             out->err(out, "Could not complete restart of %s, %d resources remaining", rsc_id, g_list_length(list_delta));
             display_list(out, list_delta, " * ");
             rc = ETIME;
             goto failure;
         }
 
     }
 
     rc = pcmk_rc_ok;
     goto done;
 
   failure:
     if (stop_via_ban) {
         cli_resource_clear(lookup_id, host, NULL, cib, cib_options, true, force);
     } else if (orig_target_role) {
         cli_resource_update_attribute(rsc, rsc_id, NULL,
                                       PCMK_XE_META_ATTRIBUTES, NULL,
                                       PCMK_META_TARGET_ROLE, orig_target_role,
                                       FALSE, cib, cib_options, force);
         free(orig_target_role);
     } else {
         cli_resource_delete_attribute(rsc, rsc_id, NULL,
                                       PCMK_XE_META_ATTRIBUTES, NULL,
                                       PCMK_META_TARGET_ROLE, cib, cib_options,
                                       force);
     }
 
 done:
     if (list_delta != NULL) {
         g_list_free(list_delta);
     }
     if (current_active != NULL) {
         g_list_free_full(current_active, free);
     }
     if (target_active != NULL && (target_active != restart_target_active)) {
         g_list_free_full(target_active, free);
     }
     if (restart_target_active != NULL) {
         g_list_free_full(restart_target_active, free);
     }
     free(rsc_id);
     free(lookup_id);
     pe_free_working_set(scheduler);
     return rc;
 }
 
 static inline bool
 action_is_pending(const pcmk_action_t *action)
 {
     if (pcmk_any_flags_set(action->flags,
                            pcmk_action_optional|pcmk_action_pseudo)
         || !pcmk_is_set(action->flags, pcmk_action_runnable)
         || pcmk__str_eq(PCMK_ACTION_NOTIFY, action->task, pcmk__str_casei)) {
         return false;
     }
     return true;
 }
 
 /*!
  * \internal
  * \brief Check whether any actions in a list are pending
  *
  * \param[in] actions   List of actions to check
  *
  * \return true if any actions in the list are pending, otherwise false
  */
 static bool
 actions_are_pending(const GList *actions)
 {
     for (const GList *action = actions; action != NULL; action = action->next) {
         const pcmk_action_t *a = (const pcmk_action_t *) action->data;
 
         if (action_is_pending(a)) {
             crm_notice("Waiting for %s (flags=%#.8x)", a->uuid, a->flags);
             return true;
         }
     }
     return false;
 }
 
 static void
 print_pending_actions(pcmk__output_t *out, GList *actions)
 {
     GList *action;
 
     out->info(out, "Pending actions:");
     for (action = actions; action != NULL; action = action->next) {
         pcmk_action_t *a = (pcmk_action_t *) action->data;
 
         if (!action_is_pending(a)) {
             continue;
         }
 
         if (a->node) {
             out->info(out, "\tAction %d: %s\ton %s",
                       a->id, a->uuid, pcmk__node_name(a->node));
         } else {
             out->info(out, "\tAction %d: %s", a->id, a->uuid);
         }
     }
 }
 
 /* For --wait, timeout (in seconds) to use if caller doesn't specify one */
 #define WAIT_DEFAULT_TIMEOUT_S (60 * 60)
 
 /* For --wait, how long to sleep between cluster state checks */
 #define WAIT_SLEEP_S (2)
 
 /*!
  * \internal
  * \brief Wait until all pending cluster actions are complete
  *
  * This waits until either the CIB's transition graph is idle or a timeout is
  * reached.
  *
  * \param[in,out] out          Output object
  * \param[in]     timeout_ms   Consider failed if actions do not complete in
  *                             this time (specified in milliseconds, but
  *                             one-second granularity is actually used; if 0, a
  *                             default will be used)
  * \param[in,out] cib          Connection to the CIB manager
  *
  * \return Standard Pacemaker return code
  */
 int
-wait_till_stable(pcmk__output_t *out, int timeout_ms, cib_t * cib)
+wait_till_stable(pcmk__output_t *out, guint timeout_ms, cib_t * cib)
 {
     pcmk_scheduler_t *scheduler = NULL;
     xmlXPathObjectPtr search;
     int rc = pcmk_rc_ok;
     bool pending_unknown_state_resources;
-    int timeout_s = timeout_ms? ((timeout_ms + 999) / 1000) : WAIT_DEFAULT_TIMEOUT_S;
-    time_t expire_time = time(NULL) + timeout_s;
+    time_t expire_time = time(NULL);
     time_t time_diff;
     bool printed_version_warning = out->is_quiet(out); // i.e. don't print if quiet
     char *xpath = NULL;
 
+    if (timeout_ms == 0) {
+        expire_time += WAIT_DEFAULT_TIMEOUT_S;
+    } else {
+        expire_time += (timeout_ms + 999) / 1000;
+    }
+
     scheduler = pe_new_working_set();
     if (scheduler == NULL) {
         return ENOMEM;
     }
 
     xpath = crm_strdup_printf("/" PCMK_XE_CIB "/" PCMK_XE_STATUS
                               "/" PCMK__XE_NODE_STATE "/" PCMK__XE_LRM
                               "/" PCMK__XE_LRM_RESOURCES
                               "/" PCMK__XE_LRM_RESOURCE
                               "/" PCMK__XE_LRM_RSC_OP
                               "[@" PCMK__XA_RC_CODE "='%d']",
                               PCMK_OCF_UNKNOWN);
     do {
         /* Abort if timeout is reached */
         time_diff = expire_time - time(NULL);
         if (time_diff <= 0) {
             print_pending_actions(out, scheduler->actions);
             rc = ETIME;
             break;
         }
 
         crm_info("Waiting up to %lld seconds for cluster actions to complete",
                  (long long) time_diff);
 
         if (rc == pcmk_rc_ok) { /* this avoids sleep on first loop iteration */
             sleep(WAIT_SLEEP_S);
         }
 
         /* Get latest transition graph */
         pe_reset_working_set(scheduler);
         rc = update_scheduler_input_to_cib(out, scheduler, cib);
         if (rc != pcmk_rc_ok) {
             break;
         }
         pcmk__schedule_actions(scheduler->input,
                                pcmk_sched_no_counts|pcmk_sched_no_compat,
                                scheduler);
 
         if (!printed_version_warning) {
             /* If the DC has a different version than the local node, the two
              * could come to different conclusions about what actions need to be
              * done. Warn the user in this case.
              *
              * @TODO A possible long-term solution would be to reimplement the
              * wait as a new controller operation that would be forwarded to the
              * DC. However, that would have potential problems of its own.
              */
             const char *dc_version = g_hash_table_lookup(scheduler->config_hash,
                                                          PCMK_OPT_DC_VERSION);
 
             if (!pcmk__str_eq(dc_version, PACEMAKER_VERSION "-" BUILD_VERSION, pcmk__str_casei)) {
                 out->info(out, "warning: wait option may not work properly in "
                           "mixed-version cluster");
                 printed_version_warning = true;
             }
         }
 
         search = xpath_search(scheduler->input, xpath);
         pending_unknown_state_resources = (numXpathResults(search) > 0);
         freeXpathObject(search);
     } while (actions_are_pending(scheduler->actions) || pending_unknown_state_resources);
 
     pe_free_working_set(scheduler);
     free(xpath);
     return rc;
 }
 
 static const char *
 get_action(const char *rsc_action) {
     const char *action = NULL;
 
     if (pcmk__str_eq(rsc_action, "validate", pcmk__str_casei)) {
         action = PCMK_ACTION_VALIDATE_ALL;
 
     } else if (pcmk__str_eq(rsc_action, "force-check", pcmk__str_casei)) {
         action = PCMK_ACTION_MONITOR;
 
     } else if (pcmk__strcase_any_of(rsc_action, "force-start", "force-stop",
                                     "force-demote", "force-promote", NULL)) {
         action = rsc_action+6;
     } else {
         action = rsc_action;
     }
 
     return action;
 }
 
 /*!
  * \brief Set up environment variables as expected by resource agents
  *
  * When the cluster executes resource agents, it adds certain environment
  * variables (directly or via resource meta-attributes) expected by some
  * resource agents. Add the essential ones that many resource agents expect, so
  * the behavior is the same for command-line execution.
  *
  * \param[in,out] params       Resource parameters that will be passed to agent
  * \param[in]     timeout_ms   Action timeout (in milliseconds)
  * \param[in]     check_level  OCF check level
  * \param[in]     verbosity    Verbosity level
  */
 static void
-set_agent_environment(GHashTable *params, int timeout_ms, int check_level,
+set_agent_environment(GHashTable *params, guint timeout_ms, int check_level,
                       int verbosity)
 {
     g_hash_table_insert(params, crm_meta_name(PCMK_META_TIMEOUT),
-                        crm_strdup_printf("%d", timeout_ms));
+                        crm_strdup_printf("%u", timeout_ms));
 
     pcmk__insert_dup(params, PCMK_XA_CRM_FEATURE_SET, CRM_FEATURE_SET);
 
     if (check_level >= 0) {
         char *level = crm_strdup_printf("%d", check_level);
 
         setenv("OCF_CHECK_LEVEL", level, 1);
         free(level);
     }
 
     pcmk__set_env_option(PCMK__ENV_DEBUG, ((verbosity > 0)? "1" : "0"), true);
     if (verbosity > 1) {
         setenv("OCF_TRACE_RA", "1", 1);
     }
 
     /* A resource agent using the standard ocf-shellfuncs library will not print
      * messages to stderr if it doesn't have a controlling terminal (e.g. if
      * crm_resource is called via script or ssh). This forces it to do so.
      */
     setenv("OCF_TRACE_FILE", "/dev/stderr", 0);
 }
 
 /*!
  * \internal
  * \brief Apply command-line overrides to resource parameters
  *
  * \param[in,out] params     Parameters to be passed to agent
  * \param[in]     overrides  Parameters to override (or NULL if none)
  */
 static void
 apply_overrides(GHashTable *params, GHashTable *overrides)
 {
     if (overrides != NULL) {
         GHashTableIter iter;
         char *name = NULL;
         char *value = NULL;
 
         g_hash_table_iter_init(&iter, overrides);
         while (g_hash_table_iter_next(&iter, (gpointer *) &name,
                                       (gpointer *) &value)) {
             pcmk__insert_dup(params, name, value);
         }
     }
 }
 
 crm_exit_t
 cli_resource_execute_from_params(pcmk__output_t *out, const char *rsc_name,
                                  const char *rsc_class, const char *rsc_prov,
                                  const char *rsc_type, const char *rsc_action,
                                  GHashTable *params, GHashTable *override_hash,
-                                 int timeout_ms, int resource_verbose, gboolean force,
-                                 int check_level)
+                                 guint timeout_ms, int resource_verbose,
+                                 gboolean force, int check_level)
 {
     const char *class = rsc_class;
     const char *action = get_action(rsc_action);
     crm_exit_t exit_code = CRM_EX_OK;
     svc_action_t *op = NULL;
 
     // If no timeout was provided, use the same default as the cluster
-    if (timeout_ms == 0) {
+    if (timeout_ms == 0U) {
         timeout_ms = PCMK_DEFAULT_ACTION_TIMEOUT_MS;
     }
 
     set_agent_environment(params, timeout_ms, check_level, resource_verbose);
     apply_overrides(params, override_hash);
 
     op = services__create_resource_action(rsc_name? rsc_name : "test",
                                           rsc_class, rsc_prov, rsc_type, action,
-                                          0, timeout_ms, params, 0);
+                                          0, QB_MIN(timeout_ms, INT_MAX),
+                                          params, 0);
     if (op == NULL) {
         out->err(out, "Could not execute %s using %s%s%s:%s: %s",
                  action, rsc_class, (rsc_prov? ":" : ""),
                  (rsc_prov? rsc_prov : ""), rsc_type, strerror(ENOMEM));
         g_hash_table_destroy(params);
         return CRM_EX_OSERR;
     }
 
     if (pcmk__str_eq(rsc_class, PCMK_RESOURCE_CLASS_SERVICE, pcmk__str_casei)) {
         class = resources_find_service_class(rsc_type);
     }
     if (!pcmk__strcase_any_of(class, PCMK_RESOURCE_CLASS_OCF,
                               PCMK_RESOURCE_CLASS_LSB, NULL)) {
         services__format_result(op, CRM_EX_UNIMPLEMENT_FEATURE, PCMK_EXEC_ERROR,
                                 "Manual execution of the %s standard is "
                                 "unsupported", pcmk__s(class, "unspecified"));
     }
 
     if (op->rc != PCMK_OCF_UNKNOWN) {
         exit_code = op->rc;
         goto done;
     }
 
     services_action_sync(op);
 
     // Map results to OCF codes for consistent reporting to user
     {
         enum ocf_exitcode ocf_code = services_result2ocf(class, action, op->rc);
 
         // Cast variable instead of function return to keep compilers happy
         exit_code = (crm_exit_t) ocf_code;
     }
 
 done:
     out->message(out, "resource-agent-action", resource_verbose, rsc_class,
                  rsc_prov, rsc_type, rsc_name, rsc_action, override_hash,
                  exit_code, op->status, services__exit_reason(op),
                  op->stdout_data, op->stderr_data);
     services_action_free(op);
     return exit_code;
 }
 
+/*!
+ * \internal
+ * \brief Get the timeout the cluster would use for an action
+ *
+ * \param[in] rsc     Resource that action is for
+ * \param[in] action  Name of action
+ */
+static guint
+get_action_timeout(pcmk_resource_t *rsc, const char *action)
+{
+    long long timeout_ms = -1LL;
+    xmlNode *op = pcmk__find_action_config(rsc, action, 0, true);
+    GHashTable *meta = pcmk__unpack_action_meta(rsc, NULL, action, 0, op);
+
+    if ((pcmk__scan_ll(g_hash_table_lookup(meta, PCMK_META_TIMEOUT),
+                       &timeout_ms, -1LL) != pcmk_rc_ok)
+        || (timeout_ms <= 0LL)) {
+        timeout_ms = PCMK_DEFAULT_ACTION_TIMEOUT_MS;
+    }
+    g_hash_table_destroy(meta);
+    return (guint) QB_MIN(timeout_ms, UINT_MAX);
+}
+
 crm_exit_t
 cli_resource_execute(pcmk_resource_t *rsc, const char *requested_name,
                      const char *rsc_action, GHashTable *override_hash,
-                     int timeout_ms, cib_t *cib, pcmk_scheduler_t *scheduler,
+                     guint timeout_ms, cib_t *cib, pcmk_scheduler_t *scheduler,
                      int resource_verbose, gboolean force, int check_level)
 {
     pcmk__output_t *out = scheduler->priv;
     crm_exit_t exit_code = CRM_EX_OK;
     const char *rid = NULL;
     const char *rtype = NULL;
     const char *rprov = NULL;
     const char *rclass = NULL;
     GHashTable *params = NULL;
 
     if (pcmk__strcase_any_of(rsc_action, "force-start", "force-demote",
                                     "force-promote", NULL)) {
         if (pcmk__is_clone(rsc)) {
             GList *nodes = cli_resource_search(rsc, requested_name, scheduler);
             if(nodes != NULL && force == FALSE) {
                 out->err(out, "It is not safe to %s %s here: the cluster claims it is already active",
                          rsc_action, rsc->id);
                 out->err(out,
                          "Try setting "
                          PCMK_META_TARGET_ROLE "=" PCMK__ROLE_STOPPED
                          " first or specifying the force option");
                 return CRM_EX_UNSAFE;
             }
 
             g_list_free_full(nodes, free);
         }
     }
 
     if (pcmk__is_clone(rsc)) {
         /* Grab the first child resource in the hope it's not a group */
         rsc = rsc->children->data;
     }
 
     if (rsc->variant == pcmk_rsc_variant_group) {
         out->err(out, "Sorry, the %s option doesn't support group resources", rsc_action);
         return CRM_EX_UNIMPLEMENT_FEATURE;
     } else if (pcmk__is_bundled(rsc)) {
         out->err(out, "Sorry, the %s option doesn't support bundled resources", rsc_action);
         return CRM_EX_UNIMPLEMENT_FEATURE;
     }
 
     rclass = crm_element_value(rsc->xml, PCMK_XA_CLASS);
     rprov = crm_element_value(rsc->xml, PCMK_XA_PROVIDER);
     rtype = crm_element_value(rsc->xml, PCMK_XA_TYPE);
 
     params = generate_resource_params(rsc, NULL /* @TODO use local node */,
                                       scheduler);
 
-    if (timeout_ms == 0) {
-        timeout_ms = pe_get_configured_timeout(rsc, get_action(rsc_action),
-                                               scheduler);
+    if (timeout_ms == 0U) {
+        timeout_ms = get_action_timeout(rsc, get_action(rsc_action));
     }
 
     rid = pcmk__is_anonymous_clone(rsc->parent)? requested_name : rsc->id;
 
     exit_code = cli_resource_execute_from_params(out, rid, rclass, rprov, rtype, rsc_action,
                                                  params, override_hash, timeout_ms,
                                                  resource_verbose, force, check_level);
     return exit_code;
 }
 
 // \return Standard Pacemaker return code
 int
 cli_resource_move(const pcmk_resource_t *rsc, const char *rsc_id,
                   const char *host_name, const char *move_lifetime, cib_t *cib,
                   int cib_options, pcmk_scheduler_t *scheduler,
                   gboolean promoted_role_only, gboolean force)
 {
     pcmk__output_t *out = scheduler->priv;
     int rc = pcmk_rc_ok;
     unsigned int count = 0;
     pcmk_node_t *current = NULL;
     pcmk_node_t *dest = pe_find_node(scheduler->nodes, host_name);
     bool cur_is_dest = false;
 
     if (dest == NULL) {
         return pcmk_rc_node_unknown;
     }
 
     if (promoted_role_only
         && !pcmk_is_set(rsc->flags, pcmk_rsc_promotable)) {
 
         const pcmk_resource_t *p = pe__const_top_resource(rsc, false);
 
         if (pcmk_is_set(p->flags, pcmk_rsc_promotable)) {
             out->info(out, "Using parent '%s' for move instead of '%s'.", rsc->id, rsc_id);
             rsc_id = p->id;
             rsc = p;
 
         } else {
             out->info(out, "Ignoring --promoted option: %s is not promotable",
                       rsc_id);
             promoted_role_only = FALSE;
         }
     }
 
     current = pe__find_active_requires(rsc, &count);
 
     if (pcmk_is_set(rsc->flags, pcmk_rsc_promotable)) {
         unsigned int promoted_count = 0;
         pcmk_node_t *promoted_node = NULL;
 
         for (const GList *iter = rsc->children; iter; iter = iter->next) {
             const pcmk_resource_t *child = (const pcmk_resource_t *) iter->data;
             enum rsc_role_e child_role = child->fns->state(child, TRUE);
 
             if (child_role == pcmk_role_promoted) {
                 rsc = child;
                 promoted_node = pcmk__current_node(child);
                 promoted_count++;
             }
         }
         if (promoted_role_only || (promoted_count != 0)) {
             count = promoted_count;
             current = promoted_node;
         }
 
     }
 
     if (count > 1) {
         if (pcmk__is_clone(rsc)) {
             current = NULL;
         } else {
             return pcmk_rc_multiple;
         }
     }
 
     if (pcmk__same_node(current, dest)) {
         cur_is_dest = true;
         if (force) {
             crm_info("%s is already %s on %s, reinforcing placement with location constraint.",
                      rsc_id, promoted_role_only?"promoted":"active",
                      pcmk__node_name(dest));
         } else {
             return pcmk_rc_already;
         }
     }
 
     /* Clear any previous prefer constraints across all nodes. */
     cli_resource_clear(rsc_id, NULL, scheduler->nodes, cib, cib_options, false,
                        force);
 
     /* Clear any previous ban constraints on 'dest'. */
     cli_resource_clear(rsc_id, dest->details->uname, scheduler->nodes, cib,
                        cib_options, TRUE, force);
 
     /* Record an explicit preference for 'dest' */
     rc = cli_resource_prefer(out, rsc_id, dest->details->uname, move_lifetime,
                              cib, cib_options, promoted_role_only,
                              PCMK__ROLE_PROMOTED);
 
     crm_trace("%s%s now prefers %s%s",
               rsc->id, (promoted_role_only? " (promoted)" : ""),
               pcmk__node_name(dest), force?"(forced)":"");
 
     /* only ban the previous location if current location != destination location.
      * it is possible to use -M to enforce a location without regard of where the
      * resource is currently located */
     if (force && !cur_is_dest) {
         /* Ban the original location if possible */
         if(current) {
             (void)cli_resource_ban(out, rsc_id, current->details->uname, move_lifetime,
                                    cib, cib_options, promoted_role_only,
                                    PCMK__ROLE_PROMOTED);
         } else if(count > 1) {
             out->info(out, "Resource '%s' is currently %s in %d locations. "
                       "One may now move to %s",
                       rsc_id, (promoted_role_only? "promoted" : "active"),
                       count, pcmk__node_name(dest));
             out->info(out, "To prevent '%s' from being %s at a specific location, "
                       "specify a node.",
                       rsc_id, (promoted_role_only? "promoted" : "active"));
 
         } else {
             crm_trace("Not banning %s from its current location: not active", rsc_id);
         }
     }
 
     return rc;
 }