diff --git a/daemons/fenced/pacemaker-fenced.c b/daemons/fenced/pacemaker-fenced.c
index 4edda6ca75..092d872d59 100644
--- a/daemons/fenced/pacemaker-fenced.c
+++ b/daemons/fenced/pacemaker-fenced.c
@@ -1,1751 +1,1751 @@
 /*
  * Copyright 2009-2023 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <sys/param.h>
 #include <stdio.h>
 #include <sys/types.h>
 #include <sys/stat.h>
 #include <unistd.h>
 #include <sys/utsname.h>
 
 #include <stdlib.h>
 #include <errno.h>
 #include <fcntl.h>
 #include <inttypes.h>  // PRIu32, PRIx32
 
 #include <crm/crm.h>
 #include <crm/msg_xml.h>
 #include <crm/common/cmdline_internal.h>
 #include <crm/common/ipc.h>
 #include <crm/common/ipc_internal.h>
 #include <crm/common/output_internal.h>
 #include <crm/cluster/internal.h>
 
 #include <crm/stonith-ng.h>
 #include <crm/fencing/internal.h>
 #include <crm/common/xml.h>
 #include <crm/common/xml_internal.h>
 
 #include <crm/common/mainloop.h>
 
 #include <crm/cib/internal.h>
 #include <crm/pengine/status.h>
 #include <pacemaker-internal.h>
 
 #include <pacemaker-fenced.h>
 
 #define SUMMARY "daemon for executing fencing devices in a Pacemaker cluster"
 
 char *stonith_our_uname = NULL;
 long stonith_watchdog_timeout_ms = 0;
 GList *stonith_watchdog_targets = NULL;
 
 static GMainLoop *mainloop = NULL;
 
 gboolean stand_alone = FALSE;
 static gboolean stonith_shutdown_flag = FALSE;
 
 static qb_ipcs_service_t *ipcs = NULL;
 static xmlNode *local_cib = NULL;
 static pe_working_set_t *fenced_data_set = NULL;
 static const unsigned long long data_set_flags = pe_flag_quick_location
                                                  | pe_flag_no_compat
                                                  | pe_flag_no_counts;
 
 static cib_t *cib_api = NULL;
 
 static pcmk__output_t *logger_out = NULL;
 static pcmk__output_t *out = NULL;
 
 pcmk__supported_format_t formats[] = {
     PCMK__SUPPORTED_FORMAT_NONE,
     PCMK__SUPPORTED_FORMAT_TEXT,
     PCMK__SUPPORTED_FORMAT_XML,
     { NULL, NULL, NULL }
 };
 
 static struct {
     bool no_cib_connect;
     gchar **log_files;
 } options;
 
 static crm_exit_t exit_code = CRM_EX_OK;
 
 static void stonith_shutdown(int nsig);
 static void stonith_cleanup(void);
 
 static int32_t
 st_ipc_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid)
 {
     if (stonith_shutdown_flag) {
         crm_info("Ignoring new client [%d] during shutdown",
                  pcmk__client_pid(c));
         return -EPERM;
     }
 
     if (pcmk__new_client(c, uid, gid) == NULL) {
         return -EIO;
     }
     return 0;
 }
 
 /* Exit code means? */
 static int32_t
 st_ipc_dispatch(qb_ipcs_connection_t * qbc, void *data, size_t size)
 {
     uint32_t id = 0;
     uint32_t flags = 0;
     int call_options = 0;
     xmlNode *request = NULL;
     pcmk__client_t *c = pcmk__find_client(qbc);
     const char *op = NULL;
 
     if (c == NULL) {
         crm_info("Invalid client: %p", qbc);
         return 0;
     }
 
     request = pcmk__client_data2xml(c, data, &id, &flags);
     if (request == NULL) {
         pcmk__ipc_send_ack(c, id, flags, "nack", NULL, CRM_EX_PROTOCOL);
         return 0;
     }
 
 
     op = crm_element_value(request, F_CRM_TASK);
     if(pcmk__str_eq(op, CRM_OP_RM_NODE_CACHE, pcmk__str_casei)) {
         crm_xml_add(request, F_TYPE, T_STONITH_NG);
         crm_xml_add(request, F_STONITH_OPERATION, op);
         crm_xml_add(request, F_STONITH_CLIENTID, c->id);
         crm_xml_add(request, F_STONITH_CLIENTNAME, pcmk__client_name(c));
         crm_xml_add(request, F_STONITH_CLIENTNODE, stonith_our_uname);
 
         send_cluster_message(NULL, crm_msg_stonith_ng, request, FALSE);
         free_xml(request);
         return 0;
     }
 
     if (c->name == NULL) {
         const char *value = crm_element_value(request, F_STONITH_CLIENTNAME);
 
         if (value == NULL) {
             value = "unknown";
         }
         c->name = crm_strdup_printf("%s.%u", value, c->pid);
     }
 
     crm_element_value_int(request, F_STONITH_CALLOPTS, &call_options);
     crm_trace("Flags %#08" PRIx32 "/%#08x for command %" PRIu32
               " from client %s", flags, call_options, id, pcmk__client_name(c));
 
     if (pcmk_is_set(call_options, st_opt_sync_call)) {
         CRM_ASSERT(flags & crm_ipc_client_response);
         CRM_LOG_ASSERT(c->request_id == 0);     /* This means the client has two synchronous events in-flight */
         c->request_id = id;     /* Reply only to the last one */
     }
 
     crm_xml_add(request, F_STONITH_CLIENTID, c->id);
     crm_xml_add(request, F_STONITH_CLIENTNAME, pcmk__client_name(c));
     crm_xml_add(request, F_STONITH_CLIENTNODE, stonith_our_uname);
 
     crm_log_xml_trace(request, "ipc-received");
     stonith_command(c, id, flags, request, NULL);
 
     free_xml(request);
     return 0;
 }
 
 /* Error code means? */
 static int32_t
 st_ipc_closed(qb_ipcs_connection_t * c)
 {
     pcmk__client_t *client = pcmk__find_client(c);
 
     if (client == NULL) {
         return 0;
     }
 
     crm_trace("Connection %p closed", c);
     pcmk__free_client(client);
 
     /* 0 means: yes, go ahead and destroy the connection */
     return 0;
 }
 
 static void
 st_ipc_destroy(qb_ipcs_connection_t * c)
 {
     crm_trace("Connection %p destroyed", c);
     st_ipc_closed(c);
 }
 
 static void
 stonith_peer_callback(xmlNode * msg, void *private_data)
 {
     const char *remote_peer = crm_element_value(msg, F_ORIG);
     const char *op = crm_element_value(msg, F_STONITH_OPERATION);
 
     if (pcmk__str_eq(op, "poke", pcmk__str_none)) {
         return;
     }
 
     crm_log_xml_trace(msg, "Peer[inbound]");
     stonith_command(NULL, 0, 0, msg, remote_peer);
 }
 
 #if SUPPORT_COROSYNC
 static void
 stonith_peer_ais_callback(cpg_handle_t handle,
                           const struct cpg_name *groupName,
                           uint32_t nodeid, uint32_t pid, void *msg, size_t msg_len)
 {
     uint32_t kind = 0;
     xmlNode *xml = NULL;
     const char *from = NULL;
     char *data = pcmk_message_common_cs(handle, nodeid, pid, msg, &kind, &from);
 
     if(data == NULL) {
         return;
     }
     if (kind == crm_class_cluster) {
         xml = string2xml(data);
         if (xml == NULL) {
             crm_err("Invalid XML: '%.120s'", data);
             free(data);
             return;
         }
         crm_xml_add(xml, F_ORIG, from);
         /* crm_xml_add_int(xml, F_SEQ, wrapper->id); */
         stonith_peer_callback(xml, NULL);
     }
 
     free_xml(xml);
     free(data);
     return;
 }
 
 static void
 stonith_peer_cs_destroy(gpointer user_data)
 {
     crm_crit("Lost connection to cluster layer, shutting down");
     stonith_shutdown(0);
 }
 #endif
 
 void
 do_local_reply(xmlNode *notify_src, pcmk__client_t *client, int call_options)
 {
     /* send callback to originating child */
     int local_rc = pcmk_rc_ok;
     int rid = 0;
     uint32_t ipc_flags = crm_ipc_server_event;
 
     if (pcmk_is_set(call_options, st_opt_sync_call)) {
         CRM_LOG_ASSERT(client->request_id);
         rid = client->request_id;
         client->request_id = 0;
         ipc_flags = crm_ipc_flags_none;
     }
 
     local_rc = pcmk__ipc_send_xml(client, rid, notify_src, ipc_flags);
     if (local_rc == pcmk_rc_ok) {
         crm_trace("Sent response %d to client %s",
                   rid, pcmk__client_name(client));
     } else {
         crm_warn("%synchronous reply to client %s failed: %s",
                  (pcmk_is_set(call_options, st_opt_sync_call)? "S" : "As"),
                  pcmk__client_name(client), pcmk_rc_str(local_rc));
     }
 }
 
 uint64_t
 get_stonith_flag(const char *name)
 {
     if (pcmk__str_eq(name, T_STONITH_NOTIFY_FENCE, pcmk__str_casei)) {
         return st_callback_notify_fence;
 
     } else if (pcmk__str_eq(name, STONITH_OP_DEVICE_ADD, pcmk__str_casei)) {
         return st_callback_device_add;
 
     } else if (pcmk__str_eq(name, STONITH_OP_DEVICE_DEL, pcmk__str_casei)) {
         return st_callback_device_del;
 
     } else if (pcmk__str_eq(name, T_STONITH_NOTIFY_HISTORY, pcmk__str_casei)) {
         return st_callback_notify_history;
 
     } else if (pcmk__str_eq(name, T_STONITH_NOTIFY_HISTORY_SYNCED, pcmk__str_casei)) {
         return st_callback_notify_history_synced;
 
     }
     return st_callback_unknown;
 }
 
 static void
 stonith_notify_client(gpointer key, gpointer value, gpointer user_data)
 {
 
     xmlNode *update_msg = user_data;
     pcmk__client_t *client = value;
     const char *type = NULL;
 
     CRM_CHECK(client != NULL, return);
     CRM_CHECK(update_msg != NULL, return);
 
     type = crm_element_value(update_msg, F_SUBTYPE);
     CRM_CHECK(type != NULL, crm_log_xml_err(update_msg, "notify"); return);
 
     if (client->ipcs == NULL) {
         crm_trace("Skipping client with NULL channel");
         return;
     }
 
     if (pcmk_is_set(client->flags, get_stonith_flag(type))) {
         int rc = pcmk__ipc_send_xml(client, 0, update_msg,
                                     crm_ipc_server_event);
 
         if (rc != pcmk_rc_ok) {
             crm_warn("%s notification of client %s failed: %s "
                      CRM_XS " id=%.8s rc=%d", type, pcmk__client_name(client),
                      pcmk_rc_str(rc), client->id, rc);
         } else {
             crm_trace("Sent %s notification to client %s",
                       type, pcmk__client_name(client));
         }
     }
 }
 
 void
 do_stonith_async_timeout_update(const char *client_id, const char *call_id, int timeout)
 {
     pcmk__client_t *client = NULL;
     xmlNode *notify_data = NULL;
 
     if (!timeout || !call_id || !client_id) {
         return;
     }
 
     client = pcmk__find_client_by_id(client_id);
     if (!client) {
         return;
     }
 
     notify_data = create_xml_node(NULL, T_STONITH_TIMEOUT_VALUE);
     crm_xml_add(notify_data, F_TYPE, T_STONITH_TIMEOUT_VALUE);
     crm_xml_add(notify_data, F_STONITH_CALLID, call_id);
     crm_xml_add_int(notify_data, F_STONITH_TIMEOUT, timeout);
 
     crm_trace("timeout update is %d for client %s and call id %s", timeout, client_id, call_id);
 
     if (client) {
         pcmk__ipc_send_xml(client, 0, notify_data, crm_ipc_server_event);
     }
 
     free_xml(notify_data);
 }
 
 /*!
  * \internal
  * \brief Notify relevant IPC clients of a fencing operation result
  *
  * \param[in] type     Notification type
  * \param[in] result   Result of fencing operation (assume success if NULL)
  * \param[in] data     If not NULL, add to notification as call data
  */
 void
 fenced_send_notification(const char *type, const pcmk__action_result_t *result,
                          xmlNode *data)
 {
     /* TODO: Standardize the contents of data */
     xmlNode *update_msg = create_xml_node(NULL, "notify");
 
     CRM_LOG_ASSERT(type != NULL);
 
     crm_xml_add(update_msg, F_TYPE, T_STONITH_NOTIFY);
     crm_xml_add(update_msg, F_SUBTYPE, type);
     crm_xml_add(update_msg, F_STONITH_OPERATION, type);
     stonith__xe_set_result(update_msg, result);
 
     if (data != NULL) {
         add_message_xml(update_msg, F_STONITH_CALLDATA, data);
     }
 
     crm_trace("Notifying clients");
     pcmk__foreach_ipc_client(stonith_notify_client, update_msg);
     free_xml(update_msg);
     crm_trace("Notify complete");
 }
 
 /*!
  * \internal
  * \brief Send notifications for a configuration change to subscribed clients
  *
  * \param[in] op      Notification type (STONITH_OP_DEVICE_ADD,
  *                    STONITH_OP_DEVICE_DEL, STONITH_OP_LEVEL_ADD, or
  *                    STONITH_OP_LEVEL_DEL)
  * \param[in] result  Operation result
  * \param[in] desc    Description of what changed
  * \param[in] active  Current number of devices or topologies in use
  */
 static void
 send_config_notification(const char *op, const pcmk__action_result_t *result,
                          const char *desc, int active)
 {
     xmlNode *notify_data = create_xml_node(NULL, op);
 
     CRM_CHECK(notify_data != NULL, return);
 
     crm_xml_add(notify_data, F_STONITH_DEVICE, desc);
     crm_xml_add_int(notify_data, F_STONITH_ACTIVE, active);
 
     fenced_send_notification(op, result, notify_data);
     free_xml(notify_data);
 }
 
 /*!
  * \internal
  * \brief Send notifications for a device change to subscribed clients
  *
  * \param[in] op      Notification type (STONITH_OP_DEVICE_ADD or
  *                    STONITH_OP_DEVICE_DEL)
  * \param[in] result  Operation result
  * \param[in] desc    ID of device that changed
  */
 void
 fenced_send_device_notification(const char *op,
                                 const pcmk__action_result_t *result,
                                 const char *desc)
 {
     send_config_notification(op, result, desc, g_hash_table_size(device_list));
 }
 
 /*!
  * \internal
  * \brief Send notifications for a topology level change to subscribed clients
  *
  * \param[in] op      Notification type (STONITH_OP_LEVEL_ADD or
  *                    STONITH_OP_LEVEL_DEL)
  * \param[in] result  Operation result
  * \param[in] desc    String representation of level (<target>[<level_index>])
  */
 void
 fenced_send_level_notification(const char *op,
                                const pcmk__action_result_t *result,
                                const char *desc)
 {
     send_config_notification(op, result, desc, g_hash_table_size(topology));
 }
 
 static void
 topology_remove_helper(const char *node, int level)
 {
     char *desc = NULL;
     pcmk__action_result_t result = PCMK__UNKNOWN_RESULT;
     xmlNode *data = create_xml_node(NULL, XML_TAG_FENCING_LEVEL);
 
     crm_xml_add(data, F_STONITH_ORIGIN, __func__);
     crm_xml_add_int(data, XML_ATTR_STONITH_INDEX, level);
     crm_xml_add(data, XML_ATTR_STONITH_TARGET, node);
 
     fenced_unregister_level(data, &desc, &result);
     fenced_send_level_notification(STONITH_OP_LEVEL_DEL, &result, desc);
     pcmk__reset_result(&result);
     free_xml(data);
     free(desc);
 }
 
 static void
 remove_cib_device(xmlXPathObjectPtr xpathObj)
 {
     int max = numXpathResults(xpathObj), lpc = 0;
 
     for (lpc = 0; lpc < max; lpc++) {
         const char *rsc_id = NULL;
         const char *standard = NULL;
         xmlNode *match = getXpathResult(xpathObj, lpc);
 
         CRM_LOG_ASSERT(match != NULL);
         if(match != NULL) {
             standard = crm_element_value(match, XML_AGENT_ATTR_CLASS);
         }
 
         if (!pcmk__str_eq(standard, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
             continue;
         }
 
         rsc_id = crm_element_value(match, XML_ATTR_ID);
 
         stonith_device_remove(rsc_id, true);
     }
 }
 
 static void
 remove_topology_level(xmlNode *match)
 {
     int index = 0;
     char *key = NULL;
 
     CRM_CHECK(match != NULL, return);
 
     key = stonith_level_key(match, fenced_target_by_unknown);
     crm_element_value_int(match, XML_ATTR_STONITH_INDEX, &index);
     topology_remove_helper(key, index);
     free(key);
 }
 
 static void
 add_topology_level(xmlNode *match)
 {
     char *desc = NULL;
     pcmk__action_result_t result = PCMK__UNKNOWN_RESULT;
 
     CRM_CHECK(match != NULL, return);
 
     fenced_register_level(match, &desc, &result);
     fenced_send_level_notification(STONITH_OP_LEVEL_ADD, &result, desc);
     pcmk__reset_result(&result);
     free(desc);
 }
 
 static void
 remove_fencing_topology(xmlXPathObjectPtr xpathObj)
 {
     int max = numXpathResults(xpathObj), lpc = 0;
 
     for (lpc = 0; lpc < max; lpc++) {
         xmlNode *match = getXpathResult(xpathObj, lpc);
 
         CRM_LOG_ASSERT(match != NULL);
         if (match && crm_element_value(match, XML_DIFF_MARKER)) {
             /* Deletion */
             int index = 0;
             char *target = stonith_level_key(match, fenced_target_by_unknown);
 
             crm_element_value_int(match, XML_ATTR_STONITH_INDEX, &index);
             if (target == NULL) {
                 crm_err("Invalid fencing target in element %s", ID(match));
 
             } else if (index <= 0) {
                 crm_err("Invalid level for %s in element %s", target, ID(match));
 
             } else {
                 topology_remove_helper(target, index);
             }
             /* } else { Deal with modifications during the 'addition' stage */
         }
     }
 }
 
 static void
 register_fencing_topology(xmlXPathObjectPtr xpathObj)
 {
     int max = numXpathResults(xpathObj), lpc = 0;
 
     for (lpc = 0; lpc < max; lpc++) {
         xmlNode *match = getXpathResult(xpathObj, lpc);
 
         remove_topology_level(match);
         add_topology_level(match);
     }
 }
 
 /* Fencing
 <diff crm_feature_set="3.0.6">
   <diff-removed>
     <fencing-topology>
       <fencing-level id="f-p1.1" target="pcmk-1" index="1" devices="poison-pill" __crm_diff_marker__="removed:top"/>
       <fencing-level id="f-p1.2" target="pcmk-1" index="2" devices="power" __crm_diff_marker__="removed:top"/>
       <fencing-level devices="disk,network" id="f-p2.1"/>
     </fencing-topology>
   </diff-removed>
   <diff-added>
     <fencing-topology>
       <fencing-level id="f-p.1" target="pcmk-1" index="1" devices="poison-pill" __crm_diff_marker__="added:top"/>
       <fencing-level id="f-p2.1" target="pcmk-2" index="1" devices="disk,something"/>
       <fencing-level id="f-p3.1" target="pcmk-2" index="2" devices="power" __crm_diff_marker__="added:top"/>
     </fencing-topology>
   </diff-added>
 </diff>
 */
 
 static void
 fencing_topology_init(void)
 {
     xmlXPathObjectPtr xpathObj = NULL;
     const char *xpath = "//" XML_TAG_FENCING_LEVEL;
 
     crm_trace("Full topology refresh");
     free_topology_list();
     init_topology_list();
 
     /* Grab everything */
     xpathObj = xpath_search(local_cib, xpath);
     register_fencing_topology(xpathObj);
 
     freeXpathObject(xpathObj);
 }
 
 #define rsc_name(x) x->clone_name?x->clone_name:x->id
 
 /*!
  * \internal
  * \brief Check whether our uname is in a resource's allowed node list
  *
  * \param[in] rsc  Resource to check
  *
  * \return Pointer to node object if found, NULL otherwise
  */
 static pe_node_t *
 our_node_allowed_for(const pe_resource_t *rsc)
 {
     GHashTableIter iter;
     pe_node_t *node = NULL;
 
     if (rsc && stonith_our_uname) {
         g_hash_table_iter_init(&iter, rsc->allowed_nodes);
         while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
             if (node && strcmp(node->details->uname, stonith_our_uname) == 0) {
                 break;
             }
             node = NULL;
         }
     }
     return node;
 }
 
 static void
 watchdog_device_update(void)
 {
     if (stonith_watchdog_timeout_ms > 0) {
         if (!g_hash_table_lookup(device_list, STONITH_WATCHDOG_ID) &&
             !stonith_watchdog_targets) {
             /* getting here watchdog-fencing enabled, no device there yet
                and reason isn't stonith_watchdog_targets preventing that
              */
             int rc;
             xmlNode *xml;
 
             xml = create_device_registration_xml(
                     STONITH_WATCHDOG_ID,
                     st_namespace_internal,
                     STONITH_WATCHDOG_AGENT,
                     NULL, /* stonith_device_register will add our
                              own name as PCMK_STONITH_HOST_LIST param
                              so we can skip that here
                            */
                     NULL);
             rc = stonith_device_register(xml, TRUE);
             free_xml(xml);
             if (rc != pcmk_ok) {
                 rc = pcmk_legacy2rc(rc);
                 exit_code = CRM_EX_FATAL;
                 crm_crit("Cannot register watchdog pseudo fence agent: %s",
                          pcmk_rc_str(rc));
                 stonith_shutdown(0);
             }
         }
 
     } else if (g_hash_table_lookup(device_list, STONITH_WATCHDOG_ID) != NULL) {
         /* be silent if no device - todo parameter to stonith_device_remove */
         stonith_device_remove(STONITH_WATCHDOG_ID, true);
     }
 }
 
 static void
 update_stonith_watchdog_timeout_ms(xmlNode *cib)
 {
     long timeout_ms = 0;
     xmlNode *stonith_watchdog_xml = NULL;
     const char *value = NULL;
 
     stonith_watchdog_xml = get_xpath_object("//nvpair[@name='stonith-watchdog-timeout']",
 					    cib, LOG_NEVER);
     if (stonith_watchdog_xml) {
         value = crm_element_value(stonith_watchdog_xml, XML_NVPAIR_ATTR_VALUE);
     }
     if (value) {
         timeout_ms = crm_get_msec(value);
     }
 
     if (timeout_ms < 0) {
         timeout_ms = pcmk__auto_watchdog_timeout();
     }
 
     stonith_watchdog_timeout_ms = timeout_ms;
 }
 
 /*!
  * \internal
  * \brief If a resource or any of its children are STONITH devices, update their
  *        definitions given a cluster working set.
  *
  * \param[in,out] rsc       Resource to check
  * \param[in,out] data_set  Cluster working set with device information
  */
 static void
 cib_device_update(pe_resource_t *rsc, pe_working_set_t *data_set)
 {
     pe_node_t *node = NULL;
     const char *value = NULL;
     const char *rclass = NULL;
     pe_node_t *parent = NULL;
 
     /* If this is a complex resource, check children rather than this resource itself. */
     if(rsc->children) {
         GList *gIter = NULL;
         for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
             cib_device_update(gIter->data, data_set);
             if(pe_rsc_is_clone(rsc)) {
                 crm_trace("Only processing one copy of the clone %s", rsc->id);
                 break;
             }
         }
         return;
     }
 
     /* We only care about STONITH resources. */
     rclass = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
     if (!pcmk__str_eq(rclass, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
         return;
     }
 
     /* If this STONITH resource is disabled, remove it. */
     if (pe__resource_is_disabled(rsc)) {
         crm_info("Device %s has been disabled", rsc->id);
         return;
     }
 
     /* if watchdog-fencing is disabled handle any watchdog-fence
        resource as if it was disabled
      */
     if ((stonith_watchdog_timeout_ms <= 0) &&
         pcmk__str_eq(rsc->id, STONITH_WATCHDOG_ID, pcmk__str_none)) {
         crm_info("Watchdog-fencing disabled thus handling "
                  "device %s as disabled", rsc->id);
         return;
     }
 
     /* Check whether our node is allowed for this resource (and its parent if in a group) */
     node = our_node_allowed_for(rsc);
     if (rsc->parent && (rsc->parent->variant == pe_group)) {
         parent = our_node_allowed_for(rsc->parent);
     }
 
     if(node == NULL) {
         /* Our node is disallowed, so remove the device */
         GHashTableIter iter;
 
         crm_info("Device %s has been disabled on %s: unknown", rsc->id, stonith_our_uname);
         g_hash_table_iter_init(&iter, rsc->allowed_nodes);
         while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
             crm_trace("Available: %s = %d", pe__node_name(node), node->weight);
         }
 
         return;
 
     } else if(node->weight < 0 || (parent && parent->weight < 0)) {
         /* Our node (or its group) is disallowed by score, so remove the device */
         int score = (node->weight < 0)? node->weight : parent->weight;
 
         crm_info("Device %s has been disabled on %s: score=%s",
                  rsc->id, stonith_our_uname, pcmk_readable_score(score));
         return;
 
     } else {
         /* Our node is allowed, so update the device information */
         int rc;
         xmlNode *data;
         GHashTable *rsc_params = NULL;
         GHashTableIter gIter;
         stonith_key_value_t *params = NULL;
 
         const char *name = NULL;
         const char *agent = crm_element_value(rsc->xml, XML_EXPR_ATTR_TYPE);
         const char *rsc_provides = NULL;
 
         crm_debug("Device %s is allowed on %s: score=%d", rsc->id, stonith_our_uname, node->weight);
         rsc_params = pe_rsc_params(rsc, node, data_set);
         get_meta_attributes(rsc->meta, rsc, node, data_set);
 
         rsc_provides = g_hash_table_lookup(rsc->meta, PCMK_STONITH_PROVIDES);
 
         g_hash_table_iter_init(&gIter, rsc_params);
         while (g_hash_table_iter_next(&gIter, (gpointer *) & name, (gpointer *) & value)) {
             if (!name || !value) {
                 continue;
             }
             params = stonith_key_value_add(params, name, value);
             crm_trace(" %s=%s", name, value);
         }
 
         data = create_device_registration_xml(rsc_name(rsc), st_namespace_any,
                                               agent, params, rsc_provides);
         stonith_key_value_freeall(params, 1, 1);
         rc = stonith_device_register(data, TRUE);
         CRM_ASSERT(rc == pcmk_ok);
         free_xml(data);
     }
 }
 
 /*!
  * \internal
  * \brief Update all STONITH device definitions based on current CIB
  */
 static void
 cib_devices_update(void)
 {
     GHashTableIter iter;
     stonith_device_t *device = NULL;
 
     crm_info("Updating devices to version %s.%s.%s",
              crm_element_value(local_cib, XML_ATTR_GENERATION_ADMIN),
              crm_element_value(local_cib, XML_ATTR_GENERATION),
              crm_element_value(local_cib, XML_ATTR_NUMUPDATES));
 
     if (fenced_data_set->now != NULL) {
         crm_time_free(fenced_data_set->now);
         fenced_data_set->now = NULL;
     }
     fenced_data_set->localhost = stonith_our_uname;
     pcmk__schedule_actions(local_cib, data_set_flags, fenced_data_set);
 
     g_hash_table_iter_init(&iter, device_list);
     while (g_hash_table_iter_next(&iter, NULL, (void **)&device)) {
         if (device->cib_registered) {
             device->dirty = TRUE;
         }
     }
 
     /* have list repopulated if cib has a watchdog-fencing-resource
        TODO: keep a cached list for queries happening while we are refreshing
      */
     g_list_free_full(stonith_watchdog_targets, free);
     stonith_watchdog_targets = NULL;
     g_list_foreach(fenced_data_set->resources, (GFunc) cib_device_update, fenced_data_set);
 
     g_hash_table_iter_init(&iter, device_list);
     while (g_hash_table_iter_next(&iter, NULL, (void **)&device)) {
         if (device->dirty) {
             g_hash_table_iter_remove(&iter);
         }
     }
 
     fenced_data_set->input = NULL; // Wasn't a copy, so don't let API free it
     pe_reset_working_set(fenced_data_set);
 }
 
 static void
 update_cib_stonith_devices_v2(const char *event, xmlNode * msg)
 {
     xmlNode *change = NULL;
     char *reason = NULL;
     bool needs_update = FALSE;
     xmlNode *patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT);
 
     for (change = pcmk__xml_first_child(patchset); change != NULL;
          change = pcmk__xml_next(change)) {
         const char *op = crm_element_value(change, XML_DIFF_OP);
         const char *xpath = crm_element_value(change, XML_DIFF_PATH);
         const char *shortpath = NULL;
 
         if ((op == NULL) ||
             (strcmp(op, "move") == 0) ||
             strstr(xpath, "/"XML_CIB_TAG_STATUS)) {
             continue;
         } else if (pcmk__str_eq(op, "delete", pcmk__str_casei) && strstr(xpath, "/"XML_CIB_TAG_RESOURCE)) {
             const char *rsc_id = NULL;
             char *search = NULL;
             char *mutable = NULL;
 
             if (strstr(xpath, XML_TAG_ATTR_SETS) ||
                 strstr(xpath, XML_TAG_META_SETS)) {
                 needs_update = TRUE;
                 pcmk__str_update(&reason,
                                  "(meta) attribute deleted from resource");
                 break;
             }
             pcmk__str_update(&mutable, xpath);
             rsc_id = strstr(mutable, "primitive[@" XML_ATTR_ID "=\'");
             if (rsc_id != NULL) {
                 rsc_id += strlen("primitive[@" XML_ATTR_ID "=\'");
                 search = strchr(rsc_id, '\'');
             }
             if (search != NULL) {
                 *search = 0;
                 stonith_device_remove(rsc_id, true);
                 /* watchdog_device_update called afterwards
                    to fall back to implicit definition if needed */
             } else {
                 crm_warn("Ignoring malformed CIB update (resource deletion)");
             }
             free(mutable);
 
         } else if (strstr(xpath, "/"XML_CIB_TAG_RESOURCES) ||
                    strstr(xpath, "/"XML_CIB_TAG_CONSTRAINTS) ||
                    strstr(xpath, "/"XML_CIB_TAG_RSCCONFIG)) {
             shortpath = strrchr(xpath, '/'); CRM_ASSERT(shortpath);
             reason = crm_strdup_printf("%s %s", op, shortpath+1);
             needs_update = TRUE;
             break;
         }
     }
 
     if(needs_update) {
         crm_info("Updating device list from CIB: %s", reason);
         cib_devices_update();
     } else {
         crm_trace("No updates for device list found in CIB");
     }
     free(reason);
 }
 
 
 static void
 update_cib_stonith_devices_v1(const char *event, xmlNode * msg)
 {
     const char *reason = "none";
     gboolean needs_update = FALSE;
     xmlXPathObjectPtr xpath_obj = NULL;
 
     /* process new constraints */
     xpath_obj = xpath_search(msg, "//" F_CIB_UPDATE_RESULT "//" XML_CONS_TAG_RSC_LOCATION);
     if (numXpathResults(xpath_obj) > 0) {
         int max = numXpathResults(xpath_obj), lpc = 0;
 
         /* Safest and simplest to always recompute */
         needs_update = TRUE;
         reason = "new location constraint";
 
         for (lpc = 0; lpc < max; lpc++) {
             xmlNode *match = getXpathResult(xpath_obj, lpc);
 
             crm_log_xml_trace(match, "new constraint");
         }
     }
     freeXpathObject(xpath_obj);
 
     /* process deletions */
     xpath_obj = xpath_search(msg, "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_REMOVED "//" XML_CIB_TAG_RESOURCE);
     if (numXpathResults(xpath_obj) > 0) {
         remove_cib_device(xpath_obj);
     }
     freeXpathObject(xpath_obj);
 
     /* process additions */
     xpath_obj = xpath_search(msg, "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED "//" XML_CIB_TAG_RESOURCE);
     if (numXpathResults(xpath_obj) > 0) {
         int max = numXpathResults(xpath_obj), lpc = 0;
 
         for (lpc = 0; lpc < max; lpc++) {
             const char *rsc_id = NULL;
             const char *standard = NULL;
             xmlNode *match = getXpathResult(xpath_obj, lpc);
 
             rsc_id = crm_element_value(match, XML_ATTR_ID);
             standard = crm_element_value(match, XML_AGENT_ATTR_CLASS);
 
             if (!pcmk__str_eq(standard, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
                 continue;
             }
 
             crm_trace("Fencing resource %s was added or modified", rsc_id);
             reason = "new resource";
             needs_update = TRUE;
         }
     }
     freeXpathObject(xpath_obj);
 
     if(needs_update) {
         crm_info("Updating device list from CIB: %s", reason);
         cib_devices_update();
     }
 }
 
 static void
 update_cib_stonith_devices(const char *event, xmlNode * msg)
 {
     int format = 1;
     xmlNode *patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT);
 
     CRM_ASSERT(patchset);
     crm_element_value_int(patchset, "format", &format);
     switch(format) {
         case 1:
             update_cib_stonith_devices_v1(event, msg);
             break;
         case 2:
             update_cib_stonith_devices_v2(event, msg);
             break;
         default:
             crm_warn("Unknown patch format: %d", format);
     }
 }
 
 /*!
  * \internal
  * \brief Check whether a node has a specific attribute name/value
  *
  * \param[in] node    Name of node to check
  * \param[in] name    Name of an attribute to look for
  * \param[in] value   The value the named attribute needs to be set to in order to be considered a match
  *
  * \return TRUE if the locally cached CIB has the specified node attribute
  */
 gboolean
 node_has_attr(const char *node, const char *name, const char *value)
 {
     GString *xpath = NULL;
     xmlNode *match;
 
     CRM_CHECK((local_cib != NULL) && (node != NULL) && (name != NULL)
               && (value != NULL), return FALSE);
 
     /* Search for the node's attributes in the CIB. While the schema allows
      * multiple sets of instance attributes, and allows instance attributes to
      * use id-ref to reference values elsewhere, that is intended for resources,
      * so we ignore that here.
      */
     xpath = g_string_sized_new(256);
     pcmk__g_strcat(xpath,
                    "//" XML_CIB_TAG_NODES "/" XML_CIB_TAG_NODE
                    "[@" XML_ATTR_UNAME "='", node, "']/" XML_TAG_ATTR_SETS
                    "/" XML_CIB_TAG_NVPAIR
                    "[@" XML_NVPAIR_ATTR_NAME "='", name, "' "
                    "and @" XML_NVPAIR_ATTR_VALUE "='", value, "']", NULL);
 
     match = get_xpath_object((const char *) xpath->str, local_cib, LOG_NEVER);
 
     g_string_free(xpath, TRUE);
     return (match != NULL);
 }
 
 /*!
  * \internal
  * \brief Check whether a node does watchdog-fencing
  *
  * \param[in] node    Name of node to check
  *
  * \return TRUE if node found in stonith_watchdog_targets
  *         or stonith_watchdog_targets is empty indicating
  *         all nodes are doing watchdog-fencing
  */
 gboolean
 node_does_watchdog_fencing(const char *node)
 {
     return ((stonith_watchdog_targets == NULL) ||
             pcmk__str_in_list(node, stonith_watchdog_targets, pcmk__str_casei));
 }
 
 
 static void
 update_fencing_topology(const char *event, xmlNode * msg)
 {
     int format = 1;
     const char *xpath;
     xmlXPathObjectPtr xpathObj = NULL;
     xmlNode *patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT);
 
     CRM_ASSERT(patchset);
     crm_element_value_int(patchset, "format", &format);
 
     if(format == 1) {
         /* Process deletions (only) */
         xpath = "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_REMOVED "//" XML_TAG_FENCING_LEVEL;
         xpathObj = xpath_search(msg, xpath);
 
         remove_fencing_topology(xpathObj);
         freeXpathObject(xpathObj);
 
         /* Process additions and changes */
         xpath = "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED "//" XML_TAG_FENCING_LEVEL;
         xpathObj = xpath_search(msg, xpath);
 
         register_fencing_topology(xpathObj);
         freeXpathObject(xpathObj);
 
     } else if(format == 2) {
         xmlNode *change = NULL;
         int add[] = { 0, 0, 0 };
         int del[] = { 0, 0, 0 };
 
         xml_patch_versions(patchset, add, del);
 
         for (change = pcmk__xml_first_child(patchset); change != NULL;
              change = pcmk__xml_next(change)) {
             const char *op = crm_element_value(change, XML_DIFF_OP);
             const char *xpath = crm_element_value(change, XML_DIFF_PATH);
 
             if(op == NULL) {
                 continue;
 
             } else if(strstr(xpath, "/" XML_TAG_FENCING_LEVEL) != NULL) {
                 /* Change to a specific entry */
 
                 crm_trace("Handling %s operation %d.%d.%d for %s", op, add[0], add[1], add[2], xpath);
                 if(strcmp(op, "move") == 0) {
                     continue;
 
                 } else if(strcmp(op, "create") == 0) {
                     add_topology_level(change->children);
 
                 } else if(strcmp(op, "modify") == 0) {
                     xmlNode *match = first_named_child(change, XML_DIFF_RESULT);
 
                     if(match) {
                         remove_topology_level(match->children);
                         add_topology_level(match->children);
                     }
 
                 } else if(strcmp(op, "delete") == 0) {
                     /* Nuclear option, all we have is the path and an id... not enough to remove a specific entry */
                     crm_info("Re-initializing fencing topology after %s operation %d.%d.%d for %s",
                              op, add[0], add[1], add[2], xpath);
                     fencing_topology_init();
                     return;
                 }
 
             } else if (strstr(xpath, "/" XML_TAG_FENCING_TOPOLOGY) != NULL) {
                 /* Change to the topology in general */
                 crm_info("Re-initializing fencing topology after top-level %s operation  %d.%d.%d for %s",
                          op, add[0], add[1], add[2], xpath);
                 fencing_topology_init();
                 return;
 
             } else if (strstr(xpath, "/" XML_CIB_TAG_CONFIGURATION)) {
                 /* Changes to the whole config section, possibly including the topology as a whild */
                 if(first_named_child(change, XML_TAG_FENCING_TOPOLOGY) == NULL) {
                     crm_trace("Nothing for us in %s operation %d.%d.%d for %s.",
                               op, add[0], add[1], add[2], xpath);
 
                 } else if(strcmp(op, "delete") == 0 || strcmp(op, "create") == 0) {
                     crm_info("Re-initializing fencing topology after top-level %s operation %d.%d.%d for %s.",
                              op, add[0], add[1], add[2], xpath);
                     fencing_topology_init();
                     return;
                 }
 
             } else {
                 crm_trace("Nothing for us in %s operation %d.%d.%d for %s",
                           op, add[0], add[1], add[2], xpath);
             }
         }
 
     } else {
         crm_warn("Unknown patch format: %d", format);
     }
 }
 static bool have_cib_devices = FALSE;
 
 static void
 update_cib_cache_cb(const char *event, xmlNode * msg)
 {
     int rc = pcmk_ok;
     long timeout_ms_saved = stonith_watchdog_timeout_ms;
     bool need_full_refresh = false;
 
     if(!have_cib_devices) {
         crm_trace("Skipping updates until we get a full dump");
         return;
 
     } else if(msg == NULL) {
         crm_trace("Missing %s update", event);
         return;
     }
 
     /* Maintain a local copy of the CIB so that we have full access
      * to device definitions, location constraints, and node attributes
      */
     if (local_cib != NULL) {
         int rc = pcmk_ok;
         xmlNode *patchset = NULL;
 
         crm_element_value_int(msg, F_CIB_RC, &rc);
         if (rc != pcmk_ok) {
             return;
         }
 
         patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT);
         pcmk__output_set_log_level(logger_out, LOG_TRACE);
         out->message(out, "xml-patchset", patchset);
         rc = xml_apply_patchset(local_cib, patchset, TRUE);
         switch (rc) {
             case pcmk_ok:
             case -pcmk_err_old_data:
                 break;
             case -pcmk_err_diff_resync:
             case -pcmk_err_diff_failed:
                 crm_notice("[%s] Patch aborted: %s (%d)", event, pcmk_strerror(rc), rc);
                 free_xml(local_cib);
                 local_cib = NULL;
                 break;
             default:
                 crm_warn("[%s] ABORTED: %s (%d)", event, pcmk_strerror(rc), rc);
                 free_xml(local_cib);
                 local_cib = NULL;
         }
     }
 
     if (local_cib == NULL) {
         crm_trace("Re-requesting full CIB");
         rc = cib_api->cmds->query(cib_api, NULL, &local_cib, cib_scope_local | cib_sync_call);
         if(rc != pcmk_ok) {
             crm_err("Couldn't retrieve the CIB: %s (%d)", pcmk_strerror(rc), rc);
             return;
         }
         CRM_ASSERT(local_cib != NULL);
         need_full_refresh = true;
     }
 
     pcmk__refresh_node_caches_from_cib(local_cib);
     update_stonith_watchdog_timeout_ms(local_cib);
 
     if (timeout_ms_saved != stonith_watchdog_timeout_ms) {
         need_full_refresh = true;
     }
 
     if (need_full_refresh) {
         fencing_topology_init();
         cib_devices_update();
     } else {
         // Partial refresh
         update_fencing_topology(event, msg);
         update_cib_stonith_devices(event, msg);
     }
 
     watchdog_device_update();
 }
 
 static void
 init_cib_cache_cb(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data)
 {
     crm_info("Updating device list from CIB");
     have_cib_devices = TRUE;
     local_cib = copy_xml(output);
 
     pcmk__refresh_node_caches_from_cib(local_cib);
     update_stonith_watchdog_timeout_ms(local_cib);
 
     fencing_topology_init();
     cib_devices_update();
     watchdog_device_update();
 }
 
 static void
 stonith_shutdown(int nsig)
 {
     crm_info("Terminating with %d clients", pcmk__ipc_client_count());
     stonith_shutdown_flag = TRUE;
     if (mainloop != NULL && g_main_loop_is_running(mainloop)) {
         g_main_loop_quit(mainloop);
     }
 }
 
 static void
 cib_connection_destroy(gpointer user_data)
 {
     if (stonith_shutdown_flag) {
         crm_info("Connection to the CIB manager closed");
         return;
     } else {
         crm_crit("Lost connection to the CIB manager, shutting down");
     }
     if (cib_api) {
         cib_api->cmds->signoff(cib_api);
     }
     stonith_shutdown(0);
 }
 
 static void
 stonith_cleanup(void)
 {
     if (cib_api) {
         cib_api->cmds->del_notify_callback(cib_api, T_CIB_DIFF_NOTIFY, update_cib_cache_cb);
         cib_api->cmds->signoff(cib_api);
     }
 
     if (ipcs) {
         qb_ipcs_destroy(ipcs);
     }
 
     crm_peer_destroy();
     pcmk__client_cleanup();
     free_stonith_remote_op_list();
     free_topology_list();
     free_device_list();
     free_metadata_cache();
     fenced_unregister_handlers();
 
     free(stonith_our_uname);
     stonith_our_uname = NULL;
 
     free_xml(local_cib);
     local_cib = NULL;
 }
 
 static gboolean
 stand_alone_cpg_cb(const gchar *option_name, const gchar *optarg, gpointer data,
                    GError **error)
 {
     stand_alone = FALSE;
     options.no_cib_connect = true;
     return TRUE;
 }
 
 static void
 setup_cib(void)
 {
     int rc, retries = 0;
 
     cib_api = cib_new();
     if (cib_api == NULL) {
         crm_err("No connection to the CIB manager");
         return;
     }
 
     do {
         sleep(retries);
         rc = cib_api->cmds->signon(cib_api, CRM_SYSTEM_STONITHD, cib_command);
     } while (rc == -ENOTCONN && ++retries < 5);
 
     if (rc != pcmk_ok) {
         crm_err("Could not connect to the CIB manager: %s (%d)", pcmk_strerror(rc), rc);
 
     } else if (pcmk_ok !=
                cib_api->cmds->add_notify_callback(cib_api, T_CIB_DIFF_NOTIFY, update_cib_cache_cb)) {
         crm_err("Could not set CIB notification callback");
 
     } else {
         rc = cib_api->cmds->query(cib_api, NULL, NULL, cib_scope_local);
         cib_api->cmds->register_callback(cib_api, rc, 120, FALSE, NULL, "init_cib_cache_cb",
                                          init_cib_cache_cb);
         cib_api->cmds->set_connection_dnotify(cib_api, cib_connection_destroy);
         crm_info("Watching for fencing topology changes");
     }
 }
 
 struct qb_ipcs_service_handlers ipc_callbacks = {
     .connection_accept = st_ipc_accept,
     .connection_created = NULL,
     .msg_process = st_ipc_dispatch,
     .connection_closed = st_ipc_closed,
     .connection_destroyed = st_ipc_destroy
 };
 
 /*!
  * \internal
  * \brief Callback for peer status changes
  *
  * \param[in] type  What changed
  * \param[in] node  What peer had the change
  * \param[in] data  Previous value of what changed
  */
 static void
 st_peer_update_callback(enum crm_status_type type, crm_node_t * node, const void *data)
 {
     if ((type != crm_status_processes)
         && !pcmk_is_set(node->flags, crm_remote_node)) {
         /*
          * This is a hack until we can send to a nodeid and/or we fix node name lookups
          * These messages are ignored in stonith_peer_callback()
          */
         xmlNode *query = create_xml_node(NULL, "stonith_command");
 
         crm_xml_add(query, F_XML_TAGNAME, "stonith_command");
         crm_xml_add(query, F_TYPE, T_STONITH_NG);
         crm_xml_add(query, F_STONITH_OPERATION, "poke");
 
         crm_debug("Broadcasting our uname because of node %u", node->id);
         send_cluster_message(NULL, crm_msg_stonith_ng, query, FALSE);
 
         free_xml(query);
     }
 }
 
 static pcmk__cluster_option_t fencer_options[] = {
     /* name, old name, type, allowed values,
      * default value, validator,
      * short description,
      * long description
      */
     {
         PCMK_STONITH_HOST_ARGUMENT, NULL, "string", NULL, "port", NULL,
         N_("Advanced use only: An alternate parameter to supply instead of 'port'"),
         N_("some devices do not support the "
            "standard 'port' parameter or may provide additional ones. Use "
            "this to specify an alternate, device-specific, parameter "
            "that should indicate the machine to be fenced. A value of "
            "none can be used to tell the cluster not to supply any "
            "additional parameters.")
     },
     {
         PCMK_STONITH_HOST_MAP,NULL, "string", NULL, "", NULL,
         N_("A mapping of host names to ports numbers for devices that do not support host names."),
         N_("Eg. node1:1;node2:2,3 would tell the cluster to use port 1 for node1 and ports 2 and 3 for node2")
     },
     {
         PCMK_STONITH_HOST_LIST,NULL, "string", NULL, "", NULL,
         N_("Eg. node1,node2,node3"),
         N_("A list of machines controlled by "
                "this device (Optional unless pcmk_host_list=static-list)")
     },
     {
         PCMK_STONITH_HOST_CHECK,NULL, "string", NULL, "dynamic-list", NULL,
         N_("How to determine which machines are controlled by the device."),
         N_("Allowed values: dynamic-list "
                "(query the device via the 'list' command), static-list "
                "(check the pcmk_host_list attribute), status "
                "(query the device via the 'status' command), "
                "none (assume every device can fence every "
                "machine)")
     },
     {
         PCMK_STONITH_DELAY_MAX,NULL, "time", NULL, "0s", NULL,
         N_("Enable a base delay for fencing actions and specify base delay value."),
         N_("Enable a delay of no more than the "
                "time specified before executing fencing actions. Pacemaker "
                "derives the overall delay by taking the value of "
                "pcmk_delay_base and adding a random delay value such "
                "that the sum is kept below this maximum.")
     },
     {
         PCMK_STONITH_DELAY_BASE,NULL, "string", NULL, "0s", NULL,
         N_("Enable a base delay for "
                "fencing actions and specify base delay value."),
         N_("This enables a static delay for "
                "fencing actions, which can help avoid \"death matches\" where "
                "two nodes try to fence each other at the same time. If "
                "pcmk_delay_max  is also used, a random delay will be "
                "added such that the total delay is kept below that value."
                "This can be set to a single time value to apply to any node "
                "targeted by this device (useful if a separate device is "
                "configured for each target), or to a node map (for example, "
                "\"node1:1s;node2:5\") to set a different value per target.")
     },
     {
         PCMK_STONITH_ACTION_LIMIT,NULL, "integer", NULL, "1", NULL,
         N_("The maximum number of actions can be performed in parallel on this device"),
         N_("Cluster property concurrent-fencing=true needs to be configured first."
              "Then use this to specify the maximum number of actions can be performed in parallel on this device. -1 is unlimited.")
     },
     {
 	"pcmk_reboot_action",NULL, "string", NULL, "reboot", NULL,
 	N_("Advanced use only: An alternate command to run instead of 'reboot'"),
         N_("Some devices do not support the standard commands or may provide additional ones.\n"
                  "Use this to specify an alternate, device-specific, command that implements the \'reboot\' action.")
     },
     {
 	"pcmk_reboot_timeout",NULL, "time", NULL, "60s", NULL,
 	N_("Advanced use only: Specify an alternate timeout to use for reboot actions instead of stonith-timeout"),
         N_("Some devices need much more/less time to complete than normal."
 	   "Use this to specify an alternate, device-specific, timeout for \'reboot\' actions.")
     },
     {
 	"pcmk_reboot_retries",NULL, "integer", NULL, "2", NULL,
 	N_("Advanced use only: The maximum number of times to retry the 'reboot' command within the timeout period"),
         N_("Some devices do not support multiple connections."
            " Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation,      if there is time remaining."
            " Use this option to alter the number of times Pacemaker retries \'reboot\' actions before giving up.")
     },
     {
 	"pcmk_off_action",NULL, "string", NULL, "off", NULL,
 	N_("Advanced use only: An alternate command to run instead of \'off\'"),
         N_("Some devices do not support the standard commands or may provide additional ones."
                  "Use this to specify an alternate, device-specific, command that implements the \'off\' action.")
     },
     {
 	"pcmk_off_timeout",NULL, "time", NULL, "60s", NULL,
 	N_("Advanced use only: Specify an alternate timeout to use for off actions instead of stonith-timeout"),
         N_("Some devices need much more/less time to complete than normal."
 	   "Use this to specify an alternate, device-specific, timeout for \'off\' actions.")
     },
     {
 	"pcmk_off_retries",NULL, "integer", NULL, "2", NULL,
 	N_("Advanced use only: The maximum number of times to retry the 'off' command within the timeout period"),
         N_("Some devices do not support multiple connections."
            " Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation,      if there is time remaining."
            " Use this option to alter the number of times Pacemaker retries \'off\' actions before giving up.")
     },
     {
 	"pcmk_on_action",NULL, "string", NULL, "on", NULL,
 	N_("Advanced use only: An alternate command to run instead of 'on'"),
         N_("Some devices do not support the standard commands or may provide additional ones."
                  "Use this to specify an alternate, device-specific, command that implements the \'on\' action.")
     },
     {
 	"pcmk_on_timeout",NULL, "time", NULL, "60s", NULL,
 	N_("Advanced use only: Specify an alternate timeout to use for on actions instead of stonith-timeout"),
         N_("Some devices need much more/less time to complete than normal."
 	   "Use this to specify an alternate, device-specific, timeout for \'on\' actions.")
     },
     {
 	"pcmk_on_retries",NULL, "integer", NULL, "2", NULL,
 	N_("Advanced use only: The maximum number of times to retry the 'on' command within the timeout period"),
         N_("Some devices do not support multiple connections."
            " Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation,      if there is time remaining."
            " Use this option to alter the number of times Pacemaker retries \'on\' actions before giving up.")
     },
     {
 	"pcmk_list_action",NULL, "string", NULL, "list", NULL,
 	N_("Advanced use only: An alternate command to run instead of \'list\'"),
         N_("Some devices do not support the standard commands or may provide additional ones."
                  "Use this to specify an alternate, device-specific, command that implements the \'list\' action.")
     },
     {
 	"pcmk_list_timeout",NULL, "time", NULL, "60s", NULL,
 	N_("Advanced use only: Specify an alternate timeout to use for list actions instead of stonith-timeout"),
         N_("Some devices need much more/less time to complete than normal."
 	   "Use this to specify an alternate, device-specific, timeout for \'list\' actions.")
     },
     {
 	"pcmk_list_retries",NULL, "integer", NULL, "2", NULL,
 	N_("Advanced use only: The maximum number of times to retry the \'list\' command within the timeout period"),
         N_("Some devices do not support multiple connections."
            " Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation,      if there is time remaining."
            " Use this option to alter the number of times Pacemaker retries \'list\' actions before giving up.")
     },
     {
 	"pcmk_monitor_action",NULL, "string", NULL, "monitor", NULL,
 	N_("Advanced use only: An alternate command to run instead of \'monitor\'"),
         N_("Some devices do not support the standard commands or may provide additional ones."
                  "Use this to specify an alternate, device-specific, command that implements the \'monitor\' action.")
     },
     {
 	"pcmk_monitor_timeout",NULL, "time", NULL, "60s", NULL,
 	N_("Advanced use only: Specify an alternate timeout to use for monitor actions instead of stonith-timeout"),
         N_("Some devices need much more/less time to complete than normal.\n"
 	   "Use this to specify an alternate, device-specific, timeout for \'monitor\' actions.")
     },
     {
 	"pcmk_monitor_retries",NULL, "integer", NULL, "2", NULL,
 	N_("Advanced use only: The maximum number of times to retry the \'monitor\' command within the timeout period"),
         N_("Some devices do not support multiple connections."
            " Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation,      if there is time remaining."
            " Use this option to alter the number of times Pacemaker retries \'monitor\' actions before giving up.")
     },
     {
 	"pcmk_status_action",NULL, "string", NULL, "status", NULL,
 	N_("Advanced use only: An alternate command to run instead of \'status\'"),
         N_("Some devices do not support the standard commands or may provide additional ones."
                  "Use this to specify an alternate, device-specific, command that implements the \'status\' action.")
     },
     {
 	"pcmk_status_timeout",NULL, "time", NULL, "60s", NULL,
 	N_("Advanced use only: Specify an alternate timeout to use for status actions instead of stonith-timeout"),
         N_("Some devices need much more/less time to complete than normal."
 	   "Use this to specify an alternate, device-specific, timeout for \'status\' actions.")
     },
     {
 	"pcmk_status_retries",NULL, "integer", NULL, "2", NULL,
 	N_("Advanced use only: The maximum number of times to retry the \'status\' command within the timeout period"),
         N_("Some devices do not support multiple connections."
            " Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation,      if there is time remaining."
            " Use this option to alter the number of times Pacemaker retries \'status\' actions before giving up.")
     },
 };
 
 void
 fencer_metadata(void)
 {
     const char *desc_short = N_("Instance attributes available for all "
                              "\"stonith\"-class resources");
     const char *desc_long = N_("Instance attributes available for all \"stonith\"-"
                             "class resources and used by Pacemaker's fence "
                             "daemon, formerly known as stonithd");
 
     gchar *s = pcmk__format_option_metadata("pacemaker-fenced", desc_short,
                                             desc_long, fencer_options,
                                             PCMK__NELEM(fencer_options));
     printf("%s", s);
     g_free(s);
 }
 
 static GOptionEntry entries[] = {
     { "stand-alone", 's', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &stand_alone,
-      "Deprecated (will be removed in a future release)", NULL },
+      N_("Deprecated (will be removed in a future release)"), NULL },
 
     { "stand-alone-w-cpg", 'c', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
-      stand_alone_cpg_cb, "Intended for use in regression testing only", NULL },
+      stand_alone_cpg_cb, N_("Intended for use in regression testing only"), NULL },
 
     { "logfile", 'l', G_OPTION_FLAG_NONE, G_OPTION_ARG_FILENAME_ARRAY,
-      &options.log_files, "Send logs to the additional named logfile", NULL },
+      &options.log_files, N_("Send logs to the additional named logfile"), NULL },
 
     { NULL }
 };
 
 static GOptionContext *
 build_arg_context(pcmk__common_args_t *args, GOptionGroup **group)
 {
     GOptionContext *context = NULL;
 
     context = pcmk__build_arg_context(args, "text (default), xml", group,
                                       "[metadata]");
     pcmk__add_main_args(context, entries);
     return context;
 }
 
 int
 main(int argc, char **argv)
 {
     int rc = pcmk_rc_ok;
     crm_cluster_t *cluster = NULL;
     crm_ipc_t *old_instance = NULL;
 
     GError *error = NULL;
 
     GOptionGroup *output_group = NULL;
     pcmk__common_args_t *args = pcmk__new_common_args(SUMMARY);
     gchar **processed_args = pcmk__cmdline_preproc(argv, "l");
     GOptionContext *context = build_arg_context(args, &output_group);
 
     crm_log_preinit(NULL, argc, argv);
 
     pcmk__register_formats(output_group, formats);
     if (!g_option_context_parse_strv(context, &processed_args, &error)) {
         exit_code = CRM_EX_USAGE;
         goto done;
     }
 
     rc = pcmk__output_new(&out, args->output_ty, args->output_dest, argv);
     if (rc != pcmk_rc_ok) {
         exit_code = CRM_EX_ERROR;
         g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
                     "Error creating output format %s: %s",
                     args->output_ty, pcmk_rc_str(rc));
         goto done;
     }
 
     if (args->version) {
         out->version(out, false);
         goto done;
     }
 
     if ((g_strv_length(processed_args) >= 2)
         && pcmk__str_eq(processed_args[1], "metadata", pcmk__str_none)) {
         fencer_metadata();
         goto done;
     }
 
     // Open additional log files
     pcmk__add_logfiles(options.log_files, out);
 
     crm_log_init(NULL, LOG_INFO + args->verbosity, TRUE,
                  (args->verbosity > 0), argc, argv, FALSE);
 
     crm_notice("Starting Pacemaker fencer");
 
     old_instance = crm_ipc_new("stonith-ng", 0);
     if (old_instance == NULL) {
         /* crm_ipc_new() will have already logged an error message with
          * crm_err()
          */
         exit_code = CRM_EX_FATAL;
         goto done;
     }
 
     if (crm_ipc_connect(old_instance)) {
         // IPC endpoint already up
         crm_ipc_close(old_instance);
         crm_ipc_destroy(old_instance);
         crm_err("pacemaker-fenced is already active, aborting startup");
         goto done;
     } else {
         // Not up or not authentic, we'll proceed either way
         crm_ipc_destroy(old_instance);
         old_instance = NULL;
     }
 
     mainloop_add_signal(SIGTERM, stonith_shutdown);
 
     crm_peer_init();
 
     fenced_data_set = pe_new_working_set();
     CRM_ASSERT(fenced_data_set != NULL);
 
     cluster = pcmk_cluster_new();
 
     /* Initialize the logger prior to setup_cib(). update_cib_cache_cb() may
      * call the "xml-patchset" message function, which needs the logger, after
      * setup_cib() has run.
      */
     rc = pcmk__log_output_new(&logger_out) != pcmk_rc_ok;
     if (rc != pcmk_rc_ok) {
         exit_code = CRM_EX_FATAL;
         g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
                     "Error creating output format log: %s", pcmk_rc_str(rc));
         goto done;
     }
     pe__register_messages(logger_out);
     pcmk__register_lib_messages(logger_out);
     pcmk__output_set_log_level(logger_out, LOG_TRACE);
     fenced_data_set->priv = logger_out;
 
     if (!stand_alone) {
 #if SUPPORT_COROSYNC
         if (is_corosync_cluster()) {
             cluster->destroy = stonith_peer_cs_destroy;
             cluster->cpg.cpg_deliver_fn = stonith_peer_ais_callback;
             cluster->cpg.cpg_confchg_fn = pcmk_cpg_membership;
         }
 #endif // SUPPORT_COROSYNC
 
         crm_set_status_callback(&st_peer_update_callback);
 
         if (crm_cluster_connect(cluster) == FALSE) {
             exit_code = CRM_EX_FATAL;
             crm_crit("Cannot sign in to the cluster... terminating");
             goto done;
         }
         pcmk__str_update(&stonith_our_uname, cluster->uname);
 
         if (!options.no_cib_connect) {
             setup_cib();
         }
 
     } else {
         pcmk__str_update(&stonith_our_uname, "localhost");
         crm_warn("Stand-alone mode is deprecated and will be removed "
                  "in a future release");
     }
 
     init_device_list();
     init_topology_list();
 
     pcmk__serve_fenced_ipc(&ipcs, &ipc_callbacks);
 
     // Create the mainloop and run it...
     mainloop = g_main_loop_new(NULL, FALSE);
     crm_notice("Pacemaker fencer successfully started and accepting connections");
     g_main_loop_run(mainloop);
 
 done:
     g_strfreev(processed_args);
     pcmk__free_arg_context(context);
 
     g_strfreev(options.log_files);
 
     stonith_cleanup();
     pcmk_cluster_free(cluster);
     pe_free_working_set(fenced_data_set);
 
     pcmk__output_and_clear_error(&error, out);
 
     if (logger_out != NULL) {
         logger_out->finish(logger_out, exit_code, true, NULL);
         pcmk__output_free(logger_out);
     }
 
     if (out != NULL) {
         out->finish(out, exit_code, true, NULL);
         pcmk__output_free(out);
     }
 
     pcmk__unregister_formats();
     crm_exit(exit_code);
 }
diff --git a/po/zh_CN.po b/po/zh_CN.po
index a107f0b4eb..348676760d 100644
--- a/po/zh_CN.po
+++ b/po/zh_CN.po
@@ -1,1105 +1,1117 @@
 #
 # Copyright 2003-2022 the Pacemaker project contributors
 #
 # The version control history for this file may have further details.
 #
 # This source code is licensed under the GNU Lesser General Public License
 # version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
 #
 #, fuzzy
 msgid ""
 msgstr ""
 "Project-Id-Version: Pacemaker 2\n"
 "Report-Msgid-Bugs-To: developers@clusterlabs.org\n"
-"POT-Creation-Date: 2023-04-05 16:20-0500\n"
+"POT-Creation-Date: 2023-06-01 15:09+0800\n"
 "PO-Revision-Date: 2021-11-08 11:04+0800\n"
 "Last-Translator: Vivi <developers@clusterlabs.org>\n"
 "Language-Team: CHINESE <wangluwei@uniontech.org>\n"
 "Language: zh_CN\n"
 "MIME-Version: 1.0\n"
 "Content-Type: text/plain; charset=UTF-8\n"
 "Content-Transfer-Encoding: 8bit\n"
 
 #: daemons/controld/controld_control.c:533
 msgid "Pacemaker version on cluster node elected Designated Controller (DC)"
 msgstr "集群选定的控制器节点(DC)的 Pacemaker 版本"
 
 #: daemons/controld/controld_control.c:534
 msgid ""
 "Includes a hash which identifies the exact changeset the code was built "
 "from. Used for diagnostic purposes."
 msgstr "它包含一个标识所构建代码变更版本的哈希值,其可用于诊断。"
 
 #: daemons/controld/controld_control.c:539
 msgid "The messaging stack on which Pacemaker is currently running"
 msgstr "Pacemaker 正在使用的消息传输引擎"
 
 #: daemons/controld/controld_control.c:540
 msgid "Used for informational and diagnostic purposes."
 msgstr "用于提供信息和诊断。"
 
 #: daemons/controld/controld_control.c:544
 msgid "An arbitrary name for the cluster"
 msgstr "任意的集群名称"
 
 #: daemons/controld/controld_control.c:545
 msgid ""
 "This optional value is mostly for users' convenience as desired in "
 "administration, but may also be used in Pacemaker configuration rules via "
 "the #cluster-name node attribute, and by higher-level tools and resource "
 "agents."
 msgstr ""
 "该可选值主要是为了方便用户管理使用,也可以在pacemaker 配置规则中通过 "
 "#cluster-name 节点属性配置使用,也可以通过高级工具和资源代理使用。"
 
 #: daemons/controld/controld_control.c:553
 msgid "How long to wait for a response from other nodes during start-up"
 msgstr "启动过程中等待其他节点响应的时间"
 
 #: daemons/controld/controld_control.c:554
 msgid ""
 "The optimal value will depend on the speed and load of your network and the "
 "type of switches used."
 msgstr "其最佳值将取决于你的网络速度和负载以及所用交换机的类型。"
 
 #: daemons/controld/controld_control.c:559
 msgid ""
 "Zero disables polling, while positive values are an interval in "
 "seconds(unless other units are specified, for example \"5min\")"
 msgstr ""
 "设置为0将禁用轮询,设置为正数将是以秒为单位的时间间隔(除非使用了其他单位,比"
 "如\"5min\"表示5分钟)"
 
 #: daemons/controld/controld_control.c:562
 msgid ""
 "Polling interval to recheck cluster state and evaluate rules with date "
 "specifications"
 msgstr "重新检查集群状态并且评估具有日期规格的配置规则的轮询间隔"
 
 #: daemons/controld/controld_control.c:564
 msgid ""
 "Pacemaker is primarily event-driven, and looks ahead to know when to recheck "
 "cluster state for failure timeouts and most time-based rules. However, it "
 "will also recheck the cluster after this amount of inactivity, to evaluate "
 "rules with date specifications and serve as a fail-safe for certain types of "
 "scheduler bugs."
 msgstr ""
 "Pacemaker 主要是通过事件驱动的,并能预期重新检查集群状态以评估大多数基于时间"
 "的规则以及过期的错误。然而无论如何,在集群经过该时间间隔的不活动状态后,它还"
 "将重新检查集群,以评估具有日期规格的规则,并为某些类型的调度程序缺陷提供故障"
 "保护。"
 
 #: daemons/controld/controld_control.c:573
 msgid "Maximum amount of system load that should be used by cluster nodes"
 msgstr "集群节点应该使用的最大系统负载量"
 
 #: daemons/controld/controld_control.c:574
 msgid ""
 "The cluster will slow down its recovery process when the amount of system "
 "resources used (currently CPU) approaches this limit"
 msgstr "当使用的系统资源量(当前为CPU)接近此限制时,集群将减慢其恢复过程"
 
 #: daemons/controld/controld_control.c:580
 msgid ""
 "Maximum number of jobs that can be scheduled per node (defaults to 2x cores)"
 msgstr "每个节点可以调度的最大作业数(默认为2x内核数)"
 
 #: daemons/controld/controld_control.c:584
 msgid "How a cluster node should react if notified of its own fencing"
 msgstr "集群节点在收到针对自己的 fence 操作结果通知时应如何反应"
 
 #: daemons/controld/controld_control.c:585
 msgid ""
 "A cluster node may receive notification of its own fencing if fencing is "
 "misconfigured, or if fabric fencing is in use that doesn't cut cluster "
 "communication. Allowed values are \"stop\" to attempt to immediately stop "
 "Pacemaker and stay stopped, or \"panic\" to attempt to immediately reboot "
 "the local node, falling back to stop on failure."
 msgstr ""
 "如果有错误的 fence 配置,或者在使用 fabric fence 机制 (并不会切断集群通信),"
 "则集群节点可能会收到针对自己的 fence 结果通知。允许的值为 \"stop\" 尝试立即停"
 "止 pacemaker 并保持停用状态,或者 \"panic\" 尝试立即重新启动本地节点,并在失败"
 "时返回执行stop。"
 
 #: daemons/controld/controld_control.c:595
 msgid ""
 "Declare an election failed if it is not decided within this much time. If "
 "you need to adjust this value, it probably indicates the presence of a bug."
 msgstr ""
 "如果集群在本项设置时间内没有作出决定则宣布选举失败。如果您需要调整该值,这可"
 "能代表存在某些缺陷。"
 
 #: daemons/controld/controld_control.c:603
 msgid ""
 "Exit immediately if shutdown does not complete within this much time. If you "
 "need to adjust this value, it probably indicates the presence of a bug."
 msgstr ""
 "如果在这段时间内关机仍未完成,则立即退出。如果您需要调整该值,这可能代表存在"
 "某些缺陷。"
 
 #: daemons/controld/controld_control.c:611
 #: daemons/controld/controld_control.c:618
 msgid ""
 "If you need to adjust this value, it probably indicates the presence of a "
 "bug."
 msgstr "如果您需要调整该值,这可能代表存在某些缺陷。"
 
 #: daemons/controld/controld_control.c:624
 msgid ""
 "*** Advanced Use Only *** Enabling this option will slow down cluster "
 "recovery under all conditions"
 msgstr "*** Advanced Use Only *** 启用此选项将在所有情况下减慢集群恢复的速度"
 
 #: daemons/controld/controld_control.c:626
 msgid ""
 "Delay cluster recovery for this much time to allow for additional events to "
 "occur. Useful if your configuration is sensitive to the order in which ping "
 "updates arrive."
 msgstr ""
 "集群恢复将被推迟指定的时间间隔,以等待更多事件发生。如果您的配置对 ping 更新"
 "到达的顺序很敏感,这就很有用"
 
 #: daemons/controld/controld_control.c:633
 #, fuzzy
 msgid ""
 "How long before nodes can be assumed to be safely down when watchdog-based "
 "self-fencing via SBD is in use"
 msgstr ""
 "当基于 watchdog 的自我 fence 机制通过SBD 被执行时,我们可以假设节点安全关闭之"
 "前需要等待多长时间"
 
 #: daemons/controld/controld_control.c:635
 msgid ""
 "If this is set to a positive value, lost nodes are assumed to self-fence "
 "using watchdog-based SBD within this much time. This does not require a "
 "fencing resource to be explicitly configured, though a fence_watchdog "
 "resource can be configured, to limit use to specific nodes. If this is set "
 "to 0 (the default), the cluster will never assume watchdog-based self-"
 "fencing. If this is set to a negative value, the cluster will use twice the "
 "local value of the `SBD_WATCHDOG_TIMEOUT` environment variable if that is "
 "positive, or otherwise treat this as 0. WARNING: When used, this timeout "
 "must be larger than `SBD_WATCHDOG_TIMEOUT` on all nodes that use watchdog-"
 "based SBD, and Pacemaker will refuse to start on any of those nodes where "
 "this is not true for the local value or SBD is not active. When this is set "
 "to a negative value, `SBD_WATCHDOG_TIMEOUT` must be set to the same value on "
 "all nodes that use SBD, otherwise data corruption or loss could occur."
 msgstr ""
 "如果设置为正值,则假定丢失的节点在这段时间内使用基于watchdog的SBD进行自我防"
 "护。这不需要明确配置fence资源,但可以配置一个fence_watchdog资源,以限制特定节"
 "点的使用。如果设置为0(默认值),集群将永远不会假定基于watchdog的自我防护。如"
 "果设置为负值,且如果`SBD_WATCHDOG_TIMEOUT`环境变量的本地值为正值,则集群将使"
 "用该值的两倍,否则将其视为0。警告:在使用基于watchdog的SBD的所有节点上,此超"
 "时必须大于`SBD_WATCGDOG_TIMEOUT`,如果本地值不是这样,或者SBD未运行,则"
 "Pacemaker将拒绝在任何节点上启动。如果设置为负值,则在使用SBD的所有节点上,"
 "`SBD_WATCHDOG_TIMEOUT`必须设置为相同的值,否则可能会发生数据损坏或丢失。"
 
 #: daemons/controld/controld_control.c:654
 msgid ""
 "How many times fencing can fail before it will no longer be immediately re-"
 "attempted on a target"
 msgstr "fence操作失败多少次会停止立即尝试"
 
 #: daemons/controld/controld_control.c:662 lib/pengine/common.c:39
 msgid "What to do when the cluster does not have quorum"
 msgstr "当集群没有必需票数时该如何作"
 
 #: daemons/controld/controld_control.c:667 lib/pengine/common.c:73
 msgid "Whether to lock resources to a cleanly shut down node"
 msgstr "是否锁定资源到完全关闭的节点"
 
 #: daemons/controld/controld_control.c:668 lib/pengine/common.c:74
 msgid ""
 "When true, resources active on a node when it is cleanly shut down are kept "
 "\"locked\" to that node (not allowed to run elsewhere) until they start "
 "again on that node after it rejoins (or for at most shutdown-lock-limit, if "
 "set). Stonith resources and Pacemaker Remote connections are never locked. "
 "Clone and bundle instances and the promoted role of promotable clones are "
 "currently never locked, though support could be added in a future release."
 msgstr ""
 "设置为true时,在完全关闭的节点上活动的资源将被“锁定”到该节点(不允许在其他地"
 "方运行),直到该节点重新加入后资源重新启动(或最长shutdown-lock-limit,如果已"
 "设置)。 Stonith资源和Pacemaker Remote连接永远不会被锁定。 克隆和捆绑实例以及"
 "可升级克隆的主角色目前从未锁定,尽管可以在将来的发行版中添加支持。"
 
 #: daemons/controld/controld_control.c:680 lib/pengine/common.c:86
 msgid "Do not lock resources to a cleanly shut down node longer than this"
 msgstr "资源会被锁定到完全关闭的节点的最长时间"
 
 #: daemons/controld/controld_control.c:682 lib/pengine/common.c:88
 msgid ""
 "If shutdown-lock is true and this is set to a nonzero time duration, "
 "shutdown locks will expire after this much time has passed since the "
 "shutdown was initiated, even if the node has not rejoined."
 msgstr ""
 "如果shutdown-lock为true,并且将此选项设置为非零持续时间,则自从开始shutdown以"
 "来经过了这么长的时间后,shutdown锁将过期,即使该节点尚未重新加入。"
 
 #: daemons/fenced/pacemaker-fenced.c:1379
 msgid "Advanced use only: An alternate parameter to supply instead of 'port'"
 msgstr "仅高级使用:使用替代的参数名,而不是'port'"
 
 #: daemons/fenced/pacemaker-fenced.c:1380
 msgid ""
 "some devices do not support the standard 'port' parameter or may provide "
 "additional ones. Use this to specify an alternate, device-specific, "
 "parameter that should indicate the machine to be fenced. A value of none can "
 "be used to tell the cluster not to supply any additional parameters."
 msgstr ""
 "一些设备不支持标准的'port'参数,或者可能提供其他参数。使用此选项可指定一个该"
 "设备专用的参数名,该参数用于标识需要fence的机器。值none可以用于告诉集群不要提"
 "供任何其他的参数。"
 
 #: daemons/fenced/pacemaker-fenced.c:1389
 msgid ""
 "A mapping of host names to ports numbers for devices that do not support "
 "host names."
 msgstr "为不支持主机名的设备提供主机名到端口号的映射。"
 
 #: daemons/fenced/pacemaker-fenced.c:1390
 msgid ""
 "Eg. node1:1;node2:2,3 would tell the cluster to use port 1 for node1 and "
 "ports 2 and 3 for node2"
 msgstr ""
 "例如 node1:1;node2:2,3,将会告诉集群对node1使用端口1,对node2使用端口2和3 "
 
 #: daemons/fenced/pacemaker-fenced.c:1394
 msgid "Eg. node1,node2,node3"
 msgstr "例如 node1,node2,node3"
 
 #: daemons/fenced/pacemaker-fenced.c:1395
 msgid ""
 "A list of machines controlled by this device (Optional unless "
 "pcmk_host_list=static-list)"
 msgstr "该设备控制的机器列表(可选参数,除非 pcmk_host_list 设置为 static-list)"
 
 #: daemons/fenced/pacemaker-fenced.c:1400
 msgid "How to determine which machines are controlled by the device."
 msgstr "如何确定设备控制哪些机器。"
 
 #: daemons/fenced/pacemaker-fenced.c:1401
 msgid ""
 "Allowed values: dynamic-list (query the device via the 'list' command), "
 "static-list (check the pcmk_host_list attribute), status (query the device "
 "via the 'status' command), none (assume every device can fence every machine)"
 msgstr ""
 "允许的值:dynamic-list(通过'list'命令查询设备),static-list(检查"
 "pcmk_host_list属性),status(通过'status'命令查询设备),none(假设每个设备"
 "都可fence 每台机器 )"
 
 #: daemons/fenced/pacemaker-fenced.c:1410
 #: daemons/fenced/pacemaker-fenced.c:1419
 msgid "Enable a base delay for fencing actions and specify base delay value."
 msgstr "在执行 fencing 操作前启用不超过指定时间的延迟。"
 
 #: daemons/fenced/pacemaker-fenced.c:1411
 msgid ""
 "Enable a delay of no more than the time specified before executing fencing "
 "actions. Pacemaker derives the overall delay by taking the value of "
 "pcmk_delay_base and adding a random delay value such that the sum is kept "
 "below this maximum."
 msgstr ""
 "在执行 fencing 操作前启用不超过指定时间的延迟。 Pacemaker通过获取"
 "pcmk_delay_base的值并添加随机延迟值来得出总体延迟,从而使总和保持在此最大值以"
 "下。"
 
 #: daemons/fenced/pacemaker-fenced.c:1421
 msgid ""
 "This enables a static delay for fencing actions, which can help avoid "
 "\"death matches\" where two nodes try to fence each other at the same time. "
 "If pcmk_delay_max  is also used, a random delay will be added such that the "
 "total delay is kept below that value.This can be set to a single time value "
 "to apply to any node targeted by this device (useful if a separate device is "
 "configured for each target), or to a node map (for example, \"node1:1s;"
 "node2:5\") to set a different value per target."
 msgstr ""
 "这使fencing 操作启用静态延迟,这可以帮助避免\"death matches\"即两个节点试图同"
 "时互相fence.如果还使用了pcmk_delay_max,则将添加随机延迟,以使总延迟保持在该"
 "值以下。可以将其设置为单个时间值,以应用于该设备针对的任何节点(适用于为每个"
 "目标分别配置了各自的设备的情况), 或着设置为一个节点映射 (例如,\"node1:1s;"
 "node2:5\")从而为每个目标设置不同值。"
 
 #: daemons/fenced/pacemaker-fenced.c:1433
 msgid ""
 "The maximum number of actions can be performed in parallel on this device"
 msgstr "可以在该设备上并发执行的最多操作数量"
 
 #: daemons/fenced/pacemaker-fenced.c:1434
 msgid ""
 "Cluster property concurrent-fencing=true needs to be configured first.Then "
 "use this to specify the maximum number of actions can be performed in "
 "parallel on this device. -1 is unlimited."
 msgstr ""
 "需要首先配置集群属性 concurrent-fencing=true 。然后使用此参数指定可以在该设备"
 "上并发执行的最多操作数量。 -1 代表没有限制"
 
 #: daemons/fenced/pacemaker-fenced.c:1439
 msgid "Advanced use only: An alternate command to run instead of 'reboot'"
 msgstr "仅高级使用:运行替代命令,而不是'reboot'"
 
 #: daemons/fenced/pacemaker-fenced.c:1440
 msgid ""
 "Some devices do not support the standard commands or may provide additional "
 "ones.\n"
 "Use this to specify an alternate, device-specific, command that implements "
 "the 'reboot' action."
 msgstr ""
 "一些设备不支持标准命令或可能提供其他命令,使用此选项可以指定一个该设备特定的"
 "替代命令,用来实现'reboot'操作。"
 
 #: daemons/fenced/pacemaker-fenced.c:1445
 msgid ""
 "Advanced use only: Specify an alternate timeout to use for reboot actions "
 "instead of stonith-timeout"
 msgstr "仅高级使用:指定用于'reboot' 操作的替代超时,而不是stonith-timeout"
 
 #: daemons/fenced/pacemaker-fenced.c:1446
 msgid ""
 "Some devices need much more/less time to complete than normal.Use this to "
 "specify an alternate, device-specific, timeout for 'reboot' actions."
 msgstr ""
 "一些设备需要比正常情况下更多或更少的时间来完成操作,使用此选项指定一个用"
 "于'reboot'操作的该设备特定的替代超时。"
 
 #: daemons/fenced/pacemaker-fenced.c:1451
 msgid ""
 "Advanced use only: The maximum number of times to retry the 'reboot' command "
 "within the timeout period"
 msgstr "仅高级使用:在超时前重试'reboot'命令的最大次数"
 
 #: daemons/fenced/pacemaker-fenced.c:1452
 msgid ""
 "Some devices do not support multiple connections. Operations may 'fail' if "
 "the device is busy with another task so Pacemaker will automatically retry "
 "the operation,      if there is time remaining. Use this option to alter the "
 "number of times Pacemaker retries 'reboot' actions before giving up."
 msgstr ""
 "一些设备不支持多个连接。 如果设备忙于另一个任务,则操作可能会'失败' ,因此"
 "Pacemaker将自动重试(如果时间允许)。 使用此选项更改Pacemaker在放弃之前重"
 "试'reboot' 操作的次数."
 
 #: daemons/fenced/pacemaker-fenced.c:1458
 msgid "Advanced use only: An alternate command to run instead of 'off'"
 msgstr "仅高级使用:运行替代命令,而不是'off'"
 
 #: daemons/fenced/pacemaker-fenced.c:1459
 msgid ""
 "Some devices do not support the standard commands or may provide additional "
 "ones.Use this to specify an alternate, device-specific, command that "
 "implements the 'off' action."
 msgstr ""
 "一些设备不支持标准命令或可能提供其他命令,使用此选项可指定一个该设备专用的替代"
 "命令,用来实现'off'操作。"
 
 #: daemons/fenced/pacemaker-fenced.c:1464
 msgid ""
 "Advanced use only: Specify an alternate timeout to use for off actions "
 "instead of stonith-timeout"
 msgstr "仅高级使用:指定用于off 操作的替代超时,而不是stonith-timeout"
 
 #: daemons/fenced/pacemaker-fenced.c:1465
 msgid ""
 "Some devices need much more/less time to complete than normal.Use this to "
 "specify an alternate, device-specific, timeout for 'off' actions."
 msgstr ""
 "一些设备需要比正常情况下更多或更少的时间来完成操作,使用此选项指定一个用"
 "于'off'操作的该设备特定的替代超时。"
 
 #: daemons/fenced/pacemaker-fenced.c:1470
 msgid ""
 "Advanced use only: The maximum number of times to retry the 'off' command "
 "within the timeout period"
 msgstr "仅高级使用:在超时前重试'off'命令的最大次数"
 
 #: daemons/fenced/pacemaker-fenced.c:1471
 msgid ""
 "Some devices do not support multiple connections. Operations may 'fail' if "
 "the device is busy with another task so Pacemaker will automatically retry "
 "the operation,      if there is time remaining. Use this option to alter the "
 "number of times Pacemaker retries 'off' actions before giving up."
 msgstr ""
 " 一些设备不支持多个连接。 如果设备忙于另一个任务,则操作可能会'失败' , 因此"
 "Pacemaker将自动重试(如果时间允许)。 使用此选项更改Pacemaker在放弃之前重"
 "试'off' 操作的次数."
 
 #: daemons/fenced/pacemaker-fenced.c:1477
 msgid "Advanced use only: An alternate command to run instead of 'on'"
 msgstr "仅高级使用:运行替代命令,而不是'on'"
 
 #: daemons/fenced/pacemaker-fenced.c:1478
 msgid ""
 "Some devices do not support the standard commands or may provide additional "
 "ones.Use this to specify an alternate, device-specific, command that "
 "implements the 'on' action."
 msgstr ""
 "一些设备不支持标准命令或可能提供其他命令,使用此选项可指定一个该设备特定的替"
 "代命令,用来实现'on'操作。"
 
 #: daemons/fenced/pacemaker-fenced.c:1483
 msgid ""
 "Advanced use only: Specify an alternate timeout to use for on actions "
 "instead of stonith-timeout"
 msgstr "仅高级使用:指定用于on 操作的替代超时,而不是stonith-timeout"
 
 #: daemons/fenced/pacemaker-fenced.c:1484
 msgid ""
 "Some devices need much more/less time to complete than normal.Use this to "
 "specify an alternate, device-specific, timeout for 'on' actions."
 msgstr ""
 "一些设备需要比正常情况下更多或更少的时间来完成操作,使用此选项指定一个用"
 "于'on'操作的该设备特定的替代超时。"
 
 #: daemons/fenced/pacemaker-fenced.c:1489
 msgid ""
 "Advanced use only: The maximum number of times to retry the 'on' command "
 "within the timeout period"
 msgstr "仅高级使用:在超时前重试'on'命令的最大次数"
 
 #: daemons/fenced/pacemaker-fenced.c:1490
 msgid ""
 "Some devices do not support multiple connections. Operations may 'fail' if "
 "the device is busy with another task so Pacemaker will automatically retry "
 "the operation,      if there is time remaining. Use this option to alter the "
 "number of times Pacemaker retries 'on' actions before giving up."
 msgstr ""
 " 一些设备不支持多个连接。 如果设备忙于另一个任务,则操作可能会'失败' , 因此"
 "Pacemaker将自动重试(如果时间允许)。 使用此选项更改Pacemaker在放弃之前重"
 "试'on' 操作的次数."
 
 #: daemons/fenced/pacemaker-fenced.c:1496
 msgid "Advanced use only: An alternate command to run instead of 'list'"
 msgstr "仅高级使用:运行替代命令,而不是'list'"
 
 #: daemons/fenced/pacemaker-fenced.c:1497
 msgid ""
 "Some devices do not support the standard commands or may provide additional "
 "ones.Use this to specify an alternate, device-specific, command that "
 "implements the 'list' action."
 msgstr ""
 "一些设备不支持标准命令或可能提供其他命令,使用此选项可指定一个该设备特定的替"
 "代命令,用来实现'list'操作。"
 
 #: daemons/fenced/pacemaker-fenced.c:1502
 msgid ""
 "Advanced use only: Specify an alternate timeout to use for list actions "
 "instead of stonith-timeout"
 msgstr "仅高级使用:指定用于list 操作的替代超时,而不是stonith-timeout"
 
 #: daemons/fenced/pacemaker-fenced.c:1503
 msgid ""
 "Some devices need much more/less time to complete than normal.Use this to "
 "specify an alternate, device-specific, timeout for 'list' actions."
 msgstr ""
 "一些设备需要比正常情况下更多或更少的时间来完成操作,使用此选项指定一个用"
 "于'list'操作的该设备特定的替代超时。"
 
 #: daemons/fenced/pacemaker-fenced.c:1508
 msgid ""
 "Advanced use only: The maximum number of times to retry the 'list' command "
 "within the timeout period"
 msgstr "仅高级使用:在超时前重试'list'命令的最大次数"
 
 #: daemons/fenced/pacemaker-fenced.c:1509
 msgid ""
 "Some devices do not support multiple connections. Operations may 'fail' if "
 "the device is busy with another task so Pacemaker will automatically retry "
 "the operation,      if there is time remaining. Use this option to alter the "
 "number of times Pacemaker retries 'list' actions before giving up."
 msgstr ""
 " 一些设备不支持多个连接。 如果设备忙于另一个任务,则操作可能会'失败' , 因此"
 "Pacemaker将自动重试(如果时间允许)。 使用此选项更改Pacemaker在放弃之前重"
 "试'list' 操作的次数."
 
 #: daemons/fenced/pacemaker-fenced.c:1515
 msgid "Advanced use only: An alternate command to run instead of 'monitor'"
 msgstr "仅高级使用:运行替代命令,而不是'monitor'"
 
 #: daemons/fenced/pacemaker-fenced.c:1516
 msgid ""
 "Some devices do not support the standard commands or may provide additional "
 "ones.Use this to specify an alternate, device-specific, command that "
 "implements the 'monitor' action."
 msgstr ""
 "一些设备不支持标准命令或可能提供其他命令,使用此选项可指定一个该设备特定的替"
 "代命令,用来实现'monitor'操作。"
 
 #: daemons/fenced/pacemaker-fenced.c:1521
 msgid ""
 "Advanced use only: Specify an alternate timeout to use for monitor actions "
 "instead of stonith-timeout"
 msgstr "仅高级使用:指定用于monitor 操作的替代超时,而不是stonith-timeout"
 
 #: daemons/fenced/pacemaker-fenced.c:1522
 msgid ""
 "Some devices need much more/less time to complete than normal.\n"
 "Use this to specify an alternate, device-specific, timeout for 'monitor' "
 "actions."
 msgstr ""
 "一些设备需要比正常情况下更多或更少的时间来完成操作,使用此选项指定一个用"
 "于'monitor'操作的该设备特定的替代超时。"
 
 #: daemons/fenced/pacemaker-fenced.c:1527
 msgid ""
 "Advanced use only: The maximum number of times to retry the 'monitor' "
 "command within the timeout period"
 msgstr "仅高级使用:在超时前重试'monitor'命令的最大次数"
 
 #: daemons/fenced/pacemaker-fenced.c:1528
 msgid ""
 "Some devices do not support multiple connections. Operations may 'fail' if "
 "the device is busy with another task so Pacemaker will automatically retry "
 "the operation,      if there is time remaining. Use this option to alter the "
 "number of times Pacemaker retries 'monitor' actions before giving up."
 msgstr ""
 " 一些设备不支持多个连接。 如果设备忙于另一个任务,则操作可能会'失败' , 因此"
 "Pacemaker将自动重试(如果时间允许)。 使用此选项更改Pacemaker在放弃之前重"
 "试'monitor' 操作的次数."
 
 #: daemons/fenced/pacemaker-fenced.c:1534
 msgid "Advanced use only: An alternate command to run instead of 'status'"
 msgstr "仅高级使用:运行替代命令,而不是'status'"
 
 #: daemons/fenced/pacemaker-fenced.c:1535
 msgid ""
 "Some devices do not support the standard commands or may provide additional "
 "ones.Use this to specify an alternate, device-specific, command that "
 "implements the 'status' action."
 msgstr ""
 "一些设备不支持标准命令或可能提供其他命令,使用此选项可指定一个该设备特定的替"
 "代命令,用来实现'status'操作。"
 
 #: daemons/fenced/pacemaker-fenced.c:1540
 msgid ""
 "Advanced use only: Specify an alternate timeout to use for status actions "
 "instead of stonith-timeout"
 msgstr "仅高级使用:指定用于status 操作的替代超时,而不是stonith-timeout"
 
 #: daemons/fenced/pacemaker-fenced.c:1541
 msgid ""
 "Some devices need much more/less time to complete than normal.Use this to "
 "specify an alternate, device-specific, timeout for 'status' actions."
 msgstr ""
 "一些设备需要比正常情况下更多或更少的时间来完成操作,使用此选项指定一个用"
 "于'status'操作的该设备特定的替代超时"
 
 #: daemons/fenced/pacemaker-fenced.c:1546
 msgid ""
 "Advanced use only: The maximum number of times to retry the 'status' command "
 "within the timeout period"
 msgstr "仅高级使用:在超时前重试'status'命令的最大次数"
 
 #: daemons/fenced/pacemaker-fenced.c:1547
 msgid ""
 "Some devices do not support multiple connections. Operations may 'fail' if "
 "the device is busy with another task so Pacemaker will automatically retry "
 "the operation,      if there is time remaining. Use this option to alter the "
 "number of times Pacemaker retries 'status' actions before giving up."
 msgstr ""
 " 一些设备不支持多个连接。 如果设备忙于另一个任务,则操作可能会'失败' , 因此"
 "Pacemaker将自动重试(如果时间允许)。 使用此选项更改Pacemaker在放弃之前重"
 "试'status' 操作的次数."
 
 #: daemons/fenced/pacemaker-fenced.c:1556
 msgid "Instance attributes available for all \"stonith\"-class resources"
 msgstr " 可用于所有stonith类资源的实例属性"
 
 #: daemons/fenced/pacemaker-fenced.c:1558
 msgid ""
 "Instance attributes available for all \"stonith\"-class resources and used "
 "by Pacemaker's fence daemon, formerly known as stonithd"
 msgstr ""
 " 可用于所有stonith类资源的实例属性,并由Pacemaker的fence守护程序使用(以前称"
 "为stonithd)"
 
+#: daemons/fenced/pacemaker-fenced.c:1571
+msgid "Deprecated (will be removed in a future release)"
+msgstr "已弃用(将在未来版本中删除)"
+
+#: daemons/fenced/pacemaker-fenced.c:1574
+msgid "Intended for use in regression testing only"
+msgstr "仅适用于回归测试"
+
+#: daemons/fenced/pacemaker-fenced.c:1577
+msgid "Send logs to the additional named logfile"
+msgstr "将日志发送到其他命名日志文件"
+
 #: lib/cib/cib_utils.c:589
 msgid "Enable Access Control Lists (ACLs) for the CIB"
 msgstr "为CIB启用访问控制列表(ACL)"
 
 #: lib/cib/cib_utils.c:595
 msgid "Maximum IPC message backlog before disconnecting a cluster daemon"
 msgstr "断开集群守护程序之前的最大IPC消息积压"
 
 #: lib/cib/cib_utils.c:596
 msgid ""
 "Raise this if log has \"Evicting client\" messages for cluster daemon PIDs "
 "(a good value is the number of resources in the cluster multiplied by the "
 "number of nodes)."
 msgstr ""
 "如果日志中有针对集群守护程序PID的消息“Evicting client”,(则建议将值设为集群"
 "中的资源数量乘以节点数量)"
 
 #: lib/common/options.c:401
 msgid "  Allowed values: "
 msgstr " 允许的值: "
 
 #: lib/common/cmdline.c:70
 msgid "Display software version and exit"
 msgstr "显示软件版本信息"
 
 #: lib/common/cmdline.c:73
 msgid "Increase debug output (may be specified multiple times)"
 msgstr "显示更多调试信息(可多次指定)"
 
 #: lib/common/cmdline.c:92
 msgid "FORMAT"
 msgstr "格式"
 
 #: lib/common/cmdline.c:94
 msgid "Specify file name for output (or \"-\" for stdout)"
 msgstr "指定输出的文件名 或指定'-' 表示标准输出"
 
 #: lib/common/cmdline.c:94
 msgid "DEST"
 msgstr "目标"
 
 #: lib/common/cmdline.c:100
 msgid "Output Options:"
 msgstr "输出选项"
 
 #: lib/common/cmdline.c:100
 msgid "Show output help"
 msgstr "显示输出帮助"
 
 #: lib/pengine/common.c:45
 msgid "Whether resources can run on any node by default"
 msgstr "资源是否默认可以在任何节点上运行"
 
 #: lib/pengine/common.c:51
 msgid ""
 "Whether the cluster should refrain from monitoring, starting, and stopping "
 "resources"
 msgstr "集群是否应避免监视,启动和停止资源"
 
 #: lib/pengine/common.c:58
 msgid ""
 "Whether a start failure should prevent a resource from being recovered on "
 "the same node"
 msgstr "是否避免在同一节点上重启启动失败的资源"
 
 #: lib/pengine/common.c:60
 msgid ""
 "When true, the cluster will immediately ban a resource from a node if it "
 "fails to start there. When false, the cluster will instead check the "
 "resource's fail count against its migration-threshold."
 msgstr ""
 "当为true,如果资源启动失败,集群将立即禁止节点启动该资源,当为false,群集将根"
 "据其迁移阈值来检查资源的失败计数。"
 
 #: lib/pengine/common.c:67
 msgid "Whether the cluster should check for active resources during start-up"
 msgstr "群集是否在启动期间检查运行资源"
 
 #: lib/pengine/common.c:98
 msgid ""
 "*** Advanced Use Only *** Whether nodes may be fenced as part of recovery"
 msgstr "*** Advanced Use Only *** 节点是否可以被 fence 以作为集群恢复的一部分"
 
 #: lib/pengine/common.c:100
 msgid ""
 "If false, unresponsive nodes are immediately assumed to be harmless, and "
 "resources that were active on them may be recovered elsewhere. This can "
 "result in a \"split-brain\" situation, potentially leading to data loss and/"
 "or service unavailability."
 msgstr ""
 "如果为false,则立即假定无响应的节点是无害的,并且可以在其他位置恢复在其上活动"
 "的资源。 这可能会导致 \"split-brain\" 情况,可能导致数据丢失和/或服务不可用。"
 
 #: lib/pengine/common.c:108
 msgid ""
 "Action to send to fence device when a node needs to be fenced (\"poweroff\" "
 "is a deprecated alias for \"off\")"
 msgstr "发送到 fence 设备的操作( \"poweroff\" 是 \"off \"的别名,不建议使用)"
 
 #: lib/pengine/common.c:115
 msgid "*** Advanced Use Only *** Unused by Pacemaker"
 msgstr "*** Advanced Use Only *** pacemaker未使用"
 
 #: lib/pengine/common.c:116
 msgid ""
 "This value is not used by Pacemaker, but is kept for backward compatibility, "
 "and certain legacy fence agents might use it."
 msgstr ""
 "Pacemaker不使用此值,但保留此值是为了向后兼容,某些传统的fence 代理可能会使用"
 "它。"
 
 #: lib/pengine/common.c:122
 msgid "Whether watchdog integration is enabled"
 msgstr "是否启用watchdog集成设置"
 
 #: lib/pengine/common.c:123
 msgid ""
 "This is set automatically by the cluster according to whether SBD is "
 "detected to be in use. User-configured values are ignored. The value `true` "
 "is meaningful if diskless SBD is used and `stonith-watchdog-timeout` is "
 "nonzero. In that case, if fencing is required, watchdog-based self-fencing "
 "will be performed via SBD without requiring a fencing resource explicitly "
 "configured."
 msgstr ""
 "这是由集群检测是否正在使用 SBD 并自动设置。用户配置的值将被忽略。如果使用无"
 "盘 SBD 并且 stonith-watchdog-timeout 不为零时,此选项为 true 才有实际意义。在"
 "这种情况下,无需明确配置fence资源,如果需要fence时,基于watchdog的自我fence会"
 "通过SBD执行。"
 
 #: lib/pengine/common.c:133
 msgid "Allow performing fencing operations in parallel"
 msgstr "允许并行执行 fencing 操作"
 
 #: lib/pengine/common.c:139
 msgid "*** Advanced Use Only *** Whether to fence unseen nodes at start-up"
 msgstr "*** 仅高级使用 *** 是否在启动时fence不可见节点"
 
 #: lib/pengine/common.c:140
 msgid ""
 "Setting this to false may lead to a \"split-brain\" situation,potentially "
 "leading to data loss and/or service unavailability."
 msgstr ""
 "将此设置为 false 可能会导致 \"split-brain\" 的情况,可能导致数据丢失和/或服务"
 "不可用。"
 
 #: lib/pengine/common.c:146
 msgid ""
 "Apply fencing delay targeting the lost nodes with the highest total resource "
 "priority"
 msgstr "针对具有最高总资源优先级的丢失节点应用fencing延迟"
 
 #: lib/pengine/common.c:147
 msgid ""
 "Apply specified delay for the fencings that are targeting the lost nodes "
 "with the highest total resource priority in case we don't have the majority "
 "of the nodes in our cluster partition, so that the more significant nodes "
 "potentially win any fencing match, which is especially meaningful under "
 "split-brain of 2-node cluster. A promoted resource instance takes the base "
 "priority + 1 on calculation if the base priority is not 0. Any static/random "
 "delays that are introduced by `pcmk_delay_base/max` configured for the "
 "corresponding fencing resources will be added to this delay. This delay "
 "should be significantly greater than, safely twice, the maximum "
 "`pcmk_delay_base/max`. By default, priority fencing delay is disabled."
 msgstr ""
 "如果我们所在的集群分区并不拥有大多数集群节点,则针对丢失节点的fence操作应用指"
 "定的延迟,这样更重要的节点就能够赢得fence竞赛。这对于双节点集群在split-brain"
 "状况下尤其有意义。如果基本优先级不为0,在计算时主资源实例获得基本优先级+1。任"
 "何对于相应的 fence 资源由 pcmk_delay_base/max 配置所引入的静态/随机延迟会被添"
 "加到此延迟。为了安全, 这个延迟应该明显大于 pcmk_delay_base/max 的最大设置值,"
 "例如两倍。默认情况下,优先级fencing延迟已禁用。"
 
 #: lib/pengine/common.c:164
 msgid "Maximum time for node-to-node communication"
 msgstr "最大节点间通信时间"
 
 #: lib/pengine/common.c:165
 msgid ""
 "The node elected Designated Controller (DC) will consider an action failed "
 "if it does not get a response from the node executing the action within this "
 "time (after considering the action's own timeout). The \"correct\" value "
 "will depend on the speed and load of your network and cluster nodes."
 msgstr ""
 "如果一个操作未在该时间内(并且考虑操作本身的超时时长)从执行该操作的节点获得"
 "响应,则会被选为指定控制器(DC)的节点认定为失败。\"正确\" 值将取决于速度和您"
 "的网络和集群节点的负载。"
 
 #: lib/pengine/common.c:174
 #, fuzzy
 msgid ""
 "Maximum number of jobs that the cluster may execute in parallel across all "
 "nodes"
 msgstr "集群可以在所有节点上并发执行的最大作业数"
 
 #: lib/pengine/common.c:176
 msgid ""
 "The \"correct\" value will depend on the speed and load of your network and "
 "cluster nodes. If set to 0, the cluster will impose a dynamically calculated "
 "limit when any node has a high load."
 msgstr ""
 "\"正确\" 值将取决于速度和您的网络与集群节点的负载。如果设置为0,当任何节点具"
 "有高负载时,集群将施加一个动态计算的限制。"
 
 #: lib/pengine/common.c:184
 msgid ""
 "The number of live migration actions that the cluster is allowed to execute "
 "in parallel on a node (-1 means no limit)"
 msgstr "允许集群在一个节点上并行执行的实时迁移操作的数量(-1表示没有限制)"
 
 #: lib/pengine/common.c:192
 #, fuzzy
 msgid "Whether the cluster should stop all active resources"
 msgstr "群集是否在启动期间检查运行资源"
 
 #: lib/pengine/common.c:198
 msgid "Whether to stop resources that were removed from the configuration"
 msgstr "是否停止配置已被删除的资源"
 
 #: lib/pengine/common.c:204
 msgid "Whether to cancel recurring actions removed from the configuration"
 msgstr "是否取消配置已被删除的的重复操作"
 
 #: lib/pengine/common.c:210
 msgid ""
 "*** Deprecated *** Whether to remove stopped resources from the executor"
 msgstr "***不推荐***是否从pacemaker-execd 守护进程中清除已停止的资源"
 
 #: lib/pengine/common.c:212
 msgid ""
 "Values other than default are poorly tested and potentially dangerous. This "
 "option will be removed in a future release."
 msgstr "非默认值未经过充分的测试,有潜在的风险。该选项将在未来的版本中删除。"
 
 #: lib/pengine/common.c:220
 msgid "The number of scheduler inputs resulting in errors to save"
 msgstr "保存导致错误的调度程序输入的数量"
 
 #: lib/pengine/common.c:221 lib/pengine/common.c:227 lib/pengine/common.c:233
 msgid "Zero to disable, -1 to store unlimited."
 msgstr "零表示禁用,-1表示存储不受限制。"
 
 #: lib/pengine/common.c:226
 msgid "The number of scheduler inputs resulting in warnings to save"
 msgstr "保存导致警告的调度程序输入的数量"
 
 #: lib/pengine/common.c:232
 msgid "The number of scheduler inputs without errors or warnings to save"
 msgstr "保存没有错误或警告的调度程序输入的数量"
 
 #: lib/pengine/common.c:243
 #, fuzzy
 msgid "How cluster should react to node health attributes"
 msgstr "集群节点对节点健康属性如何反应"
 
 #: lib/pengine/common.c:244
 msgid ""
 "Requires external entities to create node attributes (named with the prefix "
 "\"#health\") with values \"red\", \"yellow\", or \"green\"."
 msgstr ""
 "需要外部实体创建具有“red”,“yellow”或“green”值的节点属性(前缀为“#health”)"
 
 #: lib/pengine/common.c:251
 msgid "Base health score assigned to a node"
 msgstr "分配给节点的基本健康分数"
 
 #: lib/pengine/common.c:252
 msgid "Only used when \"node-health-strategy\" is set to \"progressive\"."
 msgstr "仅在“node-health-strategy”设置为“progressive”时使用。"
 
 #: lib/pengine/common.c:257
 msgid "The score to use for a node health attribute whose value is \"green\""
 msgstr "为节点健康属性值为“green”所使用的分数"
 
 #: lib/pengine/common.c:258 lib/pengine/common.c:264 lib/pengine/common.c:270
 msgid ""
 "Only used when \"node-health-strategy\" is set to \"custom\" or \"progressive"
 "\"."
 msgstr "仅在“node-health-strategy”设置为“custom”或“progressive”时使用。"
 
 #: lib/pengine/common.c:263
 msgid "The score to use for a node health attribute whose value is \"yellow\""
 msgstr "为节点健康属性值为“yellow”所使用的分数"
 
 #: lib/pengine/common.c:269
 msgid "The score to use for a node health attribute whose value is \"red\""
 msgstr "为节点健康属性值为“red”所使用的分数"
 
 #: lib/pengine/common.c:278
 #, fuzzy
 msgid "How the cluster should allocate resources to nodes"
 msgstr "群集应该如何分配资源到节点"
 
 #: tools/crm_resource.c:258
 #, c-format
 msgid "Aborting because no messages received in %d seconds"
 msgstr "中止,因为在%d秒内没有接收到消息"
 
 #: tools/crm_resource.c:915
 #, c-format
 msgid "Invalid check level setting: %s"
 msgstr "无效的检查级别设置:%s"
 
 #: tools/crm_resource.c:999
 #, c-format
 msgid ""
 "Resource '%s' not moved: active in %d locations (promoted in %d).\n"
 "To prevent '%s' from running on a specific location, specify a node.To "
 "prevent '%s' from being promoted at a specific location, specify a node and "
 "the --promoted option."
 msgstr ""
 "资源'%s'未移动:在%d个位置运行(其中在%d个位置为主实例)\n"
 "若要阻止'%s'在特定位置运行,请指定一个节点。若要防止'%s'在指定位置升级,指定"
 "一个节点并使用--promoted选项"
 
 #: tools/crm_resource.c:1010
 #, c-format
 msgid ""
 "Resource '%s' not moved: active in %d locations.\n"
 "To prevent '%s' from running on a specific location, specify a node."
 msgstr ""
 "资源%s未移动:在%d个位置运行\n"
 "若要防止'%s'运行在特定位置,指定一个节点"
 
 #: tools/crm_resource.c:1085
 #, c-format
 msgid "Could not get modified CIB: %s\n"
 msgstr "无法获得修改的CIB:%s\n"
 
 #: tools/crm_resource.c:1119
 msgid "You need to specify a resource type with -t"
 msgstr "需要使用-t指定资源类型"
 
 #: tools/crm_resource.c:1162
 #, c-format
 msgid "No agents found for standard '%s'"
 msgstr "没有发现指定的'%s'标准代理"
 
 #: tools/crm_resource.c:1165
 #, fuzzy, c-format
 msgid "No agents found for standard '%s' and provider '%s'"
 msgstr "没有发现指定的标准%s和提供者%S的资源代理"
 
 #: tools/crm_resource.c:1232
 #, c-format
 msgid "No %s found for %s"
 msgstr "没有发现%s符合%s"
 
 #: tools/crm_resource.c:1237
 #, c-format
 msgid "No %s found"
 msgstr "没有发现%s"
 
 #: tools/crm_resource.c:1297
 #, c-format
 msgid "No cluster connection to Pacemaker Remote node %s detected"
 msgstr "未检测到至pacemaker远程节点%s的集群连接"
 
 #: tools/crm_resource.c:1358
 msgid "Must specify -t with resource type"
 msgstr "需要使用-t指定资源类型"
 
 #: tools/crm_resource.c:1364
 msgid "Must supply -v with new value"
 msgstr "必须使用-v指定新值"
 
 #: tools/crm_resource.c:1396
 msgid "Could not create executor connection"
 msgstr "无法创建到pacemaker-execd守护进程的连接"
 
 #: tools/crm_resource.c:1421
 #, fuzzy, c-format
 msgid "Metadata query for %s failed: %s"
 msgstr ",查询%s的元数据失败: %s\n"
 
 #: tools/crm_resource.c:1427
 #, c-format
 msgid "'%s' is not a valid agent specification"
 msgstr "'%s' 是一个无效的代理"
 
 #: tools/crm_resource.c:1440
 msgid "--resource cannot be used with --class, --agent, and --provider"
 msgstr "--resource 不能与 --class, --agent, --provider一起使用"
 
 #: tools/crm_resource.c:1445
 msgid ""
 "--class, --agent, and --provider can only be used with --validate and --"
 "force-*"
 msgstr "--class, --agent和--provider只能被用于--validate和--force-*"
 
 #: tools/crm_resource.c:1454
 msgid "stonith does not support providers"
 msgstr "stonith 不支持提供者"
 
 #: tools/crm_resource.c:1458
 #, c-format
 msgid "%s is not a known stonith agent"
 msgstr "%s 不是一个已知stonith代理"
 
 #: tools/crm_resource.c:1463
 #, c-format
 msgid "%s:%s:%s is not a known resource"
 msgstr "%s:%s:%s 不是一个已知资源"
 
 #: tools/crm_resource.c:1577
 #, c-format
 msgid "Error creating output format %s: %s"
 msgstr "创建输出格式错误 %s:%s"
 
 #: tools/crm_resource.c:1604
 msgid "--expired requires --clear or -U"
 msgstr "--expired需要和--clear或-U一起使用"
 
 #: tools/crm_resource.c:1621
 #, c-format
 msgid "Error parsing '%s' as a name=value pair"
 msgstr "'%s'解析错误,格式为name=value"
 
 #: tools/crm_resource.c:1718
 msgid "Must supply a resource id with -r"
 msgstr "必须使用-r指定资源id"
 
 #: tools/crm_resource.c:1724
 msgid "Must supply a node name with -N"
 msgstr "必须使用-N指定节点名称"
 
 #: tools/crm_resource.c:1742
 msgid "Could not create CIB connection"
 msgstr "无法创建到CIB的连接"
 
 #: tools/crm_resource.c:1750
 #, c-format
 msgid "Could not connect to the CIB: %s"
 msgstr "不能连接到CIB:%s"
 
 #: tools/crm_resource.c:1771
 #, c-format
 msgid "Resource '%s' not found"
 msgstr "没有发现'%s'资源"
 
 #: tools/crm_resource.c:1783
 #, c-format
 msgid "Cannot operate on clone resource instance '%s'"
 msgstr "不能操作克隆资源实例'%s'"
 
 #: tools/crm_resource.c:1795
 #, c-format
 msgid "Node '%s' not found"
 msgstr "没有发现%s节点"
 
 #: tools/crm_resource.c:1806 tools/crm_resource.c:1815
 #, c-format
 msgid "Error connecting to the controller: %s"
 msgstr "连接到控制器错误:%s"
 
 #: tools/crm_resource.c:2064
 msgid "You need to supply a value with the -v option"
 msgstr "需要使用-v选项提供一个值"
 
 #: tools/crm_resource.c:2119
 #, c-format
 msgid "Unimplemented command: %d"
 msgstr "无效的命令:%d"
 
 #: tools/crm_resource.c:2149
 #, c-format
 msgid "Error performing operation: %s"
 msgstr "执行操作错误:%s"
 
 #~ msgid ""
 #~ "If nonzero, along with `have-watchdog=true` automatically set by the "
 #~ "cluster, when fencing is required, watchdog-based self-fencing will be "
 #~ "performed via SBD without requiring a fencing resource explicitly "
 #~ "configured. If `stonith-watchdog-timeout` is set to a positive value, "
 #~ "unseen nodes are assumed to self-fence within this much time. +WARNING:+ "
 #~ "It must be ensured that this value is larger than the "
 #~ "`SBD_WATCHDOG_TIMEOUT` environment variable on all nodes. Pacemaker "
 #~ "verifies the settings individually on all nodes and prevents startup or "
 #~ "shuts down if configured wrongly on the fly. It's strongly recommended "
 #~ "that `SBD_WATCHDOG_TIMEOUT` is set to the same value on all nodes. If "
 #~ "`stonith-watchdog-timeout` is set to a negative value, and "
 #~ "`SBD_WATCHDOG_TIMEOUT` is set, twice that value will be used. +WARNING:+ "
 #~ "In this case, it's essential (currently not verified by Pacemaker) that "
 #~ "`SBD_WATCHDOG_TIMEOUT` is set to the same value on all nodes."
 #~ msgstr ""
 #~ "如果值非零,且集群设置了 `have-watchdog=true` ,当需要 fence 操作时,基于 "
 #~ "watchdog 的自我 fence 机制将通过SBD执行,而不需要显式配置 fence 资源。如"
 #~ "果 `stonith-watchdog-timeout` 被设为正值,则假定不可见的节点在这段时间内自"
 #~ "我fence。 +WARNING:+ 必须确保该值大于所有节点上的`SBD_WATCHDOG_TIMEOUT` 环"
 #~ "境变量。Pacemaker将在所有节点上单独验证设置,如发现有错误的动态配置,将防"
 #~ "止节点启动或关闭。强烈建议在所有节点上将 `SBD_WATCHDOG_TIMEOUT` 设置为相同"
 #~ "的值。如果 `stonith-watchdog-timeout` 设置为负值。并且设置了 "
 #~ "`SBD_WATCHDOG_TIMEOUT` ,则将使用该值的两倍, +WARNING:+ 在这种情况下,必"
 #~ "须将所有节点上 `SBD_WATCHDOG_TIMEOUT` 设置为相同的值(目前没有通过pacemaker"
 #~ "验证)。"