diff --git a/cts/patterns.py b/cts/patterns.py
index 8f0388a78a..ea1dd94ba8 100644
--- a/cts/patterns.py
+++ b/cts/patterns.py
@@ -1,393 +1,392 @@
 """ Pattern-holding classes for Pacemaker's Cluster Test Suite (CTS)
 """
 
 # Pacemaker targets compatibility with Python 2.7 and 3.2+
 from __future__ import print_function, unicode_literals, absolute_import, division
 
 __copyright__ = "Copyright 2008-2018 Andrew Beekhof <andrew@beekhof.net>"
 __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
 
 import sys, os
 
 from cts.CTSvars import *
 
 patternvariants = {}
 class BasePatterns(object):
     def __init__(self, name):
         self.name = name
         patternvariants[name] = self
         self.ignore = [
             "avoid confusing Valgrind",
 
             # Logging bug in some versions of libvirtd
             r"libvirtd.*: internal error: Failed to parse PCI config address",
         ]
         self.BadNews = []
         self.components = {}
         self.commands = {
             "StatusCmd"      : "crmadmin -t 60000 -S %s 2>/dev/null",
             "CibQuery"       : "cibadmin -Ql",
             "CibAddXml"      : "cibadmin --modify -c --xml-text %s",
             "CibDelXpath"    : "cibadmin --delete --xpath %s",
             # 300,000 == 5 minutes
             "RscRunning"     : CTSvars.CRM_DAEMON_DIR + "/cts-exec-helper -R -r %s",
             "CIBfile"        : "%s:"+CTSvars.CRM_CONFIG_DIR+"/cib.xml",
             "TmpDir"         : "/tmp",
 
             "BreakCommCmd"   : "iptables -A INPUT -s %s -j DROP >/dev/null 2>&1",
             "FixCommCmd"     : "iptables -D INPUT -s %s -j DROP >/dev/null 2>&1",
 
 # tc qdisc add dev lo root handle 1: cbq avpkt 1000 bandwidth 1000mbit
 # tc class add dev lo parent 1: classid 1:1 cbq rate "$RATE"kbps allot 17000 prio 5 bounded isolated
 # tc filter add dev lo parent 1: protocol ip prio 16 u32 match ip dst 127.0.0.1 match ip sport $PORT 0xFFFF flowid 1:1
 # tc qdisc add dev lo parent 1: netem delay "$LATENCY"msec "$(($LATENCY/4))"msec 10% 2> /dev/null > /dev/null
             "ReduceCommCmd"  : "",
             "RestoreCommCmd" : "tc qdisc del dev lo root",
 
             "SetCheckInterval"    : "cibadmin --modify -c --xml-text '<cluster_property_set id=\"cib-bootstrap-options\"><nvpair id=\"cts-recheck-interval-setting\" name=\"cluster-recheck-interval\" value=\"%s\"/></cluster_property_set>'",
             "ClearCheckInterval"    : "cibadmin --delete --xpath \"//nvpair[@name='cluster-recheck-interval']\"",
 
             "MaintenanceModeOn"    : "cibadmin --modify -c --xml-text '<cluster_property_set id=\"cib-bootstrap-options\"><nvpair id=\"cts-maintenance-mode-setting\" name=\"maintenance-mode\" value=\"true\"/></cluster_property_set>'",
             "MaintenanceModeOff"    : "cibadmin --delete --xpath \"//nvpair[@name='maintenance-mode']\"",
 
             "StandbyCmd"      : "crm_attribute -Vq  -U %s -n standby -l forever -v %s 2>/dev/null",
             "StandbyQueryCmd" : "crm_attribute -qG -U %s -n standby -l forever -d off 2>/dev/null",
         }
         self.search = {
             "Pat:DC_IDLE"      : "pacemaker-controld.*State transition.*-> S_IDLE",
             
             # This won't work if we have multiple partitions
             "Pat:Local_started" : "%s\W.*The local CRM is operational",
             "Pat:NonDC_started" : r"%s\W.*State transition.*-> S_NOT_DC",
             "Pat:DC_started"    : r"%s\W.*State transition.*-> S_IDLE",
             "Pat:We_stopped"    : "%s\W.*OVERRIDE THIS PATTERN",
             "Pat:They_stopped"  : "%s\W.*LOST:.* %s ",
             "Pat:They_dead"     : "node %s.*: is dead",
             "Pat:TransitionComplete" : "Transition status: Complete: complete",
 
             "Pat:Fencing_start" : "(Initiating remote operation|Requesting peer fencing ).* (for|of) %s",
             "Pat:Fencing_ok"    : r"pacemaker-fenced.*:\s*Operation .* of %s by .* for .*@.*: OK",
             "Pat:Fencing_recover"    : r"schedulerd.*: Recover %s",
 
             "Pat:RscOpOK"       : r"pacemaker-controld.*:\s+Result of %s operation for %s.*: (0 \()?ok",
             "Pat:RscRemoteOpOK" : r"pacemaker-controld.*:\s+Result of %s operation for %s on %s: (0 \()?ok",
             "Pat:NodeFenced"    : r"pacemaker-controld.*:\s* Peer %s was terminated \(.*\) by .* on behalf of .*: OK",
             "Pat:FenceOpOK"     : "Operation .* for host '%s' with device .* returned: 0",
         }
 
     def get_component(self, key):
         if key in self.components:
             return self.components[key]
         print("Unknown component '%s' for %s" % (key, self.name))
         return []
 
     def get_patterns(self, key):
         if key == "BadNews":
             return self.BadNews
         elif key == "BadNewsIgnore":
             return self.ignore
         elif key == "Commands":
             return self.commands
         elif key == "Search":
             return self.search
         elif key == "Components":
             return self.components
 
     def __getitem__(self, key):
         if key == "Name":
             return self.name
         elif key in self.commands:
             return self.commands[key]
         elif key in self.search:
             return self.search[key]
         else:
             print("Unknown template '%s' for %s" % (key, self.name))
             return None
 
 
 class crm_corosync(BasePatterns):
     '''
     Patterns for Corosync version 2 cluster manager class
     '''
 
     def __init__(self, name):
         BasePatterns.__init__(self, name)
 
         self.commands.update({
             "StartCmd"       : "service corosync start && service pacemaker start",
             "StopCmd"        : "service pacemaker stop; [ ! -e /usr/sbin/pacemaker-remoted ] || service pacemaker_remote stop; service corosync stop",
 
             "EpochCmd"      : "crm_node -e",
             "QuorumCmd"      : "crm_node -q",
             "PartitionCmd"    : "crm_node -p",
         })
 
         self.search.update({
             # Close enough ... "Corosync Cluster Engine exiting normally" isn't
             # printed reliably.
             "Pat:We_stopped"   : "%s\W.*Unloading all Corosync service engines",
             "Pat:They_stopped" : "%s\W.*pacemaker-controld.*Node %s(\[|\s).*state is now lost",
             "Pat:They_dead"    : "pacemaker-controld.*Node %s(\[|\s).*state is now lost",
 
             "Pat:ChildExit"    : r"\[[0-9]+\] exited with status [0-9]+ \(",
             "Pat:ChildKilled"  : r"%s\W.*pacemakerd.*%s\[[0-9]+\] terminated with signal 9",
             "Pat:ChildRespawn" : "%s\W.*pacemakerd.*Respawning failed child process: %s",
 
             "Pat:InfraUp"      : "%s\W.*corosync.*Initializing transport",
             "Pat:PacemakerUp"  : "%s\W.*pacemakerd.*Starting Pacemaker",
         })
 
         self.ignore = self.ignore + [
             r"crm_mon:",
             r"crmadmin:",
             r"update_trace_data",
             r"async_notify:.*strange, client not found",
             r"Parse error: Ignoring unknown option .*nodename",
             r"error.*: Operation 'reboot' .* with device 'FencingFail' returned:",
             r"getinfo response error: 1$",
-            "sbd.* error: inquisitor_child: DEBUG MODE IS ACTIVE",
-            r"sbd.* pcmk:\s*error:.*Connection to cib_ro failed",
-            r"sbd.* pcmk:\s*error:.*Connection to cib_ro.* closed .I/O condition=17",
+            r"sbd.* error: inquisitor_child: DEBUG MODE IS ACTIVE",
+            r"sbd.* pcmk:\s*error:.*Connection to cib_ro.* (failed|closed)",
         ]
 
         self.BadNews = [
             r"error:",
             r"crit:",
             r"ERROR:",
             r"CRIT:",
             r"Shutting down...NOW",
             r"Timer I_TERMINATE just popped",
             r"input=I_ERROR",
             r"input=I_FAIL",
             r"input=I_INTEGRATED cause=C_TIMER_POPPED",
             r"input=I_FINALIZED cause=C_TIMER_POPPED",
             r"input=I_ERROR",
             r"(pacemakerd|pacemaker-execd|pacemaker-controld):.*, exiting",
             r"schedulerd.*Attempting recovery of resource",
             r"is taking more than 2x its timeout",
             r"Confirm not received from",
             r"Welcome reply not received from",
             r"Attempting to schedule .* after a stop",
             r"Resource .* was active at shutdown",
             r"duplicate entries for call_id",
             r"Search terminated:",
             r":global_timer_callback",
             r"Faking parameter digest creation",
             r"Parameters to .* action changed:",
             r"Parameters to .* changed",
             r"\[[0-9]+\] terminated with signal [0-9]+ \(",
             r"schedulerd:.*Recover .*\(.* -\> .*\)",
             r"rsyslogd.* imuxsock lost .* messages from pid .* due to rate-limiting",
             r"Peer is not part of our cluster",
             r"We appear to be in an election loop",
             r"Unknown node -> we will not deliver message",
             r"(Blackbox dump requested|Problem detected)",
             r"pacemakerd.*Could not connect to Cluster Configuration Database API",
             r"Receiving messages from a node we think is dead",
             r"share the same cluster nodeid",
             r"share the same name",
 
             #r"crm_ipc_send:.*Request .* failed",
             #r"crm_ipc_send:.*Sending to .* is disabled until pending reply is received",
 
                 # Not inherently bad, but worth tracking
             #r"No need to invoke the TE",
             #r"ping.*: DEBUG: Updated connected = 0",
             #r"Digest mis-match:",
             r"pacemaker-controld:.*Transition failed: terminated",
             r"Local CIB .* differs from .*:",
             r"warn.*:\s*Continuing but .* will NOT be used",
             r"warn.*:\s*Cluster configuration file .* is corrupt",
             #r"Executing .* fencing operation",
             r"Election storm",
             r"stalled the FSA with pending inputs",
         ]
 
         self.components["common-ignore"] = [
                     "Pending action:",
                     "error: crm_log_message_adv:",
                     r"resource( was|s were) active at shutdown",
                     "pending LRM operations at shutdown",
                     "Lost connection to the CIB service",
                     "Connection to the CIB terminated...",
                     "Sending message to CIB service FAILED",
                     "apply_xml_diff:.*Diff application failed!",
                     r"pacemaker-controld.*:\s*Action A_RECOVER .* not supported",
                     "unconfirmed_actions:.*Waiting on .* unconfirmed actions",
                     "cib_native_msgready:.*Message pending on command channel",
                     r"pacemaker-controld.*:\s*Performing A_EXIT_1 - forcefully exiting ",
                     "verify_stopped:.*Resource .* was active at shutdown.  You may ignore this error if it is unmanaged.",
                     "error: attrd_connection_destroy:.*Lost connection to attrd",
                     r".*:\s*Executing .* fencing operation \(.*\) on ",
                     r".*:\s*Requesting fencing \([^)]+\) of node ",
                     r"(Blackbox dump requested|Problem detected)",
 #                    "error: native_create_actions: Resource .*stonith::.* is active on 2 nodes attempting recovery",
 #                    "error: process_pe_message: Transition .* ERRORs found during PE processing",
             ]
         
         self.components["corosync-ignore"] = [
             r"error:.*Connection to the CPG API failed: Library error",
             r"\[[0-9]+\] exited with status [0-9]+ \(",
             r"pacemaker-based.*error:.*Corosync connection lost",
             r"pacemaker-fenced.*error:.*Corosync connection terminated",
             r"pacemaker-controld.*State transition .* S_RECOVERY",
             r"pacemaker-controld.*error:.*Input (I_ERROR|I_TERMINATE ) .*received in state",
             r"pacemaker-controld.*error:.*Could not recover from internal error",
             r"error:.*Connection to cib_(shm|rw).* (failed|closed)",
             r"error:.*Connection to (fencer|stonith-ng).* (closed|failed|lost)",
             r"crit: Fencing daemon connection failed",
             ]
 
         self.components["corosync"] = [
             r"pacemakerd.*error:.*Connection destroyed",
             r"attrd.*:\s*(crit|error):.*Lost connection to (Corosync|CIB) service",
             r"pacemaker-fenced.*:\s*(Corosync connection terminated|Shutting down)",
             r"pacemaker-based.*:\s*Corosync connection lost!\s+Exiting.",
             r"pacemaker-controld.*:\s*(connection terminated|Disconnected from Corosync)",
             r"schedulerd.*Scheduling Node .* for STONITH",
             r"pacemaker-controld.*:\s*Peer .* was terminated \(.*\) by .* for .*:\s*OK",
         ]
 
         self.components["pacemaker-based"] = [
             r"State transition .* S_RECOVERY",
             r"Respawning failed child process: (pacemaker-attrd|pacemaker-controld)",
             r"Connection to cib_.* (failed|closed)",
             r"pacemaker-controld.*:.*Connection to the CIB terminated...",
             r"attrd.*:.*(Lost connection to CIB service|Connection to the CIB terminated)",
             r"pacemaker-controld\[[0-9]+\] exited with status 1 \(",
             r"attrd\[[0-9]+\] exited with status 102 \(",
             r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
             r"pacemaker-controld.*I_ERROR.*crmd_cib_connection_destroy",
             r"pacemaker-controld.*Could not recover from internal error",
         ]
         self.components["pacemaker-based-ignore"] = [
             r"pacemaker-execd.*Connection to (fencer|stonith-ng).* (closed|failed|lost)",
         ]
 
         self.components["pacemaker-execd"] = [
             r"pacemaker-controld.*Connection to (pacemaker-execd|lrmd|executor) (failed|closed)",
             r"pacemaker-controld.*I_ERROR.*lrm_connection_destroy",
             r"pacemaker-controld.*State transition .* S_RECOVERY",
             r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
             r"pacemaker-controld.*Could not recover from internal error",
             r"pacemakerd.*pacemaker-execd.* terminated with signal 9",
             r"pacemakerd.*pacemaker-controld\[[0-9]+\] exited with status 1",
             r"pacemakerd.*Respawning failed child process: pacemaker-execd",
             r"pacemakerd.*Respawning failed child process: pacemaker-controld",
         ]
         self.components["pacemaker-execd-ignore"] = []
 
         self.components["pacemaker-controld"] = [
 #                    "WARN: determine_online_status: Node .* is unclean",
 #                    "Scheduling Node .* for STONITH",
 #                    "Executing .* fencing operation",
 # Only if the node wasn't the DC:  "State transition S_IDLE",
                     "State transition .* -> S_IDLE",
                     ]
         self.components["pacemaker-controld-ignore"] = []
 
         self.components["pacemaker-attrd"] = []
         self.components["pacemaker-attrd-ignore"] = []
 
         self.components["pacemaker-schedulerd"] = [
                     "State transition .* S_RECOVERY",
                     r"Respawning failed child process: pacemaker-controld",
                     r"pacemaker-controld\[[0-9]+\] exited with status 1 \(",
                     "Connection to pengine failed",
                     "Connection to pengine.* closed",
                     r"Connection to the scheduler failed",
                     "pacemaker-controld.*I_ERROR.*save_cib_contents",
                     r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
                     "pacemaker-controld.*Could not recover from internal error",
                     ]
         self.components["pacemaker-schedulerd-ignore"] = []
 
         self.components["pacemaker-fenced"] = [
             r"error:.*Connection to (fencer|stonith-ng).* (closed|failed|lost)",
             r"Fencing daemon connection failed",
             r"pacemaker-controld.*:\s*warn.*:\s*Callback already present",
         ]
         self.components["pacemaker-fenced-ignore"] = [
             r"error:.*Connection to (fencer|stonith-ng).* (closed|failed|lost)",
             r"crit:.*Fencing daemon connection failed",
             r"error:.*Sign-in failed: triggered a retry",
             r"Connection to (fencer|stonith-ng) failed, finalizing .* pending operations",
             r"pacemaker-controld.*:\s+Result of .* operation for Fencing.*Error",
         ]
         self.components["pacemaker-fenced-ignore"].extend(self.components["common-ignore"])
 
 
 class crm_corosync_docker(crm_corosync):
     '''
     Patterns for Corosync version 2 cluster manager class
     '''
     def __init__(self, name):
         crm_corosync.__init__(self, name)
 
         self.commands.update({
             "StartCmd"       : "pcmk_start",
             "StopCmd"        : "pcmk_stop",
         })
 
 
 class PatternSelector(object):
 
     def __init__(self, name=None):
         self.name = name
         self.base = BasePatterns("crm-base")
 
         if not name:
             crm_corosync("crm-corosync")
         elif name == "crm-corosync":
             crm_corosync(name)
         elif name == "crm-corosync-docker":
             crm_corosync_docker(name)
 
     def get_variant(self, variant):
         if variant in patternvariants:
             return patternvariants[variant]
         print("defaulting to crm-base for %s" % variant)
         return self.base
 
     def get_patterns(self, variant, kind):
         return self.get_variant(variant).get_patterns(kind)
 
     def get_template(self, variant, key):
         v = self.get_variant(variant)
         return v[key]
 
     def get_component(self, variant, kind):
         return self.get_variant(variant).get_component(kind)
 
     def __getitem__(self, key):
         return self.get_template(self.name, key)
 
 # python cts/CTSpatt.py -k crm-corosync -t StartCmd
 if __name__ == '__main__':
 
     pdir=os.path.dirname(sys.path[0])
     sys.path.insert(0, pdir) # So that things work from the source directory
 
     kind=None
     template=None
 
     skipthis=None
     args=sys.argv[1:]
     for i in range(0, len(args)):
        if skipthis:
            skipthis=None
            continue
 
        elif args[i] == "-k" or args[i] == "--kind":
            skipthis=1
            kind = args[i+1]
 
        elif args[i] == "-t" or args[i] == "--template":
            skipthis=1
            template = args[i+1]
 
        else:
            print("Illegal argument " + args[i])
 
 
     print(PatternSelector(kind)[template])
diff --git a/lib/common/mainloop.c b/lib/common/mainloop.c
index ce9f0ec3dc..3627ca19eb 100644
--- a/lib/common/mainloop.c
+++ b/lib/common/mainloop.c
@@ -1,1244 +1,1245 @@
 /*
  * Copyright 2004-2018 Andrew Beekhof <andrew@beekhof.net>
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #ifndef _GNU_SOURCE
 #  define _GNU_SOURCE
 #endif
 
 #include <stdlib.h>
 #include <signal.h>
 #include <errno.h>
 
 #include <sys/wait.h>
 
 #include <crm/crm.h>
 #include <crm/common/xml.h>
 #include <crm/common/mainloop.h>
 #include <crm/common/ipcs.h>
 
 #include <qb/qbarray.h>
 
 struct mainloop_child_s {
     pid_t pid;
     char *desc;
     unsigned timerid;
     gboolean timeout;
     void *privatedata;
 
     enum mainloop_child_flags flags;
 
     /* Called when a process dies */
     void (*callback) (mainloop_child_t * p, pid_t pid, int core, int signo, int exitcode);
 };
 
 struct trigger_s {
     GSource source;
     gboolean running;
     gboolean trigger;
     void *user_data;
     guint id;
 
 };
 
 static gboolean
 crm_trigger_prepare(GSource * source, gint * timeout)
 {
     crm_trigger_t *trig = (crm_trigger_t *) source;
 
     /* cluster-glue's FD and IPC related sources make use of
      * g_source_add_poll() but do not set a timeout in their prepare
      * functions
      *
      * This means mainloop's poll() will block until an event for one
      * of these sources occurs - any /other/ type of source, such as
      * this one or g_idle_*, that doesn't use g_source_add_poll() is
      * S-O-L and won't be processed until there is something fd-based
      * happens.
      *
      * Luckily the timeout we can set here affects all sources and
      * puts an upper limit on how long poll() can take.
      *
      * So unconditionally set a small-ish timeout, not too small that
      * we're in constant motion, which will act as an upper bound on
      * how long the signal handling might be delayed for.
      */
     *timeout = 500;             /* Timeout in ms */
 
     return trig->trigger;
 }
 
 static gboolean
 crm_trigger_check(GSource * source)
 {
     crm_trigger_t *trig = (crm_trigger_t *) source;
 
     return trig->trigger;
 }
 
 static gboolean
 crm_trigger_dispatch(GSource * source, GSourceFunc callback, gpointer userdata)
 {
     int rc = TRUE;
     crm_trigger_t *trig = (crm_trigger_t *) source;
 
     if (trig->running) {
         /* Wait until the existing job is complete before starting the next one */
         return TRUE;
     }
     trig->trigger = FALSE;
 
     if (callback) {
         rc = callback(trig->user_data);
         if (rc < 0) {
             crm_trace("Trigger handler %p not yet complete", trig);
             trig->running = TRUE;
             rc = TRUE;
         }
     }
     return rc;
 }
 
 static void
 crm_trigger_finalize(GSource * source)
 {
     crm_trace("Trigger %p destroyed", source);
 }
 
 #if 0
 struct _GSourceCopy
 {
   gpointer callback_data;
   GSourceCallbackFuncs *callback_funcs;
 
   const GSourceFuncs *source_funcs;
   guint ref_count;
 
   GMainContext *context;
 
   gint priority;
   guint flags;
   guint source_id;
 
   GSList *poll_fds;
   
   GSource *prev;
   GSource *next;
 
   char    *name;
 
   void *priv;
 };
 
 static int
 g_source_refcount(GSource * source)
 {
     /* Duplicating the contents of private header files is a necessary evil */
     if (source) {
         struct _GSourceCopy *evil = (struct _GSourceCopy*)source;
         return evil->ref_count;
     }
     return 0;
 }
 #else
 static int g_source_refcount(GSource * source)
 {
     return 0;
 }
 #endif
 
 static GSourceFuncs crm_trigger_funcs = {
     crm_trigger_prepare,
     crm_trigger_check,
     crm_trigger_dispatch,
     crm_trigger_finalize,
 };
 
 static crm_trigger_t *
 mainloop_setup_trigger(GSource * source, int priority, int (*dispatch) (gpointer user_data),
                        gpointer userdata)
 {
     crm_trigger_t *trigger = NULL;
 
     trigger = (crm_trigger_t *) source;
 
     trigger->id = 0;
     trigger->trigger = FALSE;
     trigger->user_data = userdata;
 
     if (dispatch) {
         g_source_set_callback(source, dispatch, trigger, NULL);
     }
 
     g_source_set_priority(source, priority);
     g_source_set_can_recurse(source, FALSE);
 
     crm_trace("Setup %p with ref-count=%u", source, g_source_refcount(source));
     trigger->id = g_source_attach(source, NULL);
     crm_trace("Attached %p with ref-count=%u", source, g_source_refcount(source));
 
     return trigger;
 }
 
 void
 mainloop_trigger_complete(crm_trigger_t * trig)
 {
     crm_trace("Trigger handler %p complete", trig);
     trig->running = FALSE;
 }
 
 /* If dispatch returns:
  *  -1: Job running but not complete
  *   0: Remove the trigger from mainloop
  *   1: Leave the trigger in mainloop
  */
 crm_trigger_t *
 mainloop_add_trigger(int priority, int (*dispatch) (gpointer user_data), gpointer userdata)
 {
     GSource *source = NULL;
 
     CRM_ASSERT(sizeof(crm_trigger_t) > sizeof(GSource));
     source = g_source_new(&crm_trigger_funcs, sizeof(crm_trigger_t));
     CRM_ASSERT(source != NULL);
 
     return mainloop_setup_trigger(source, priority, dispatch, userdata);
 }
 
 void
 mainloop_set_trigger(crm_trigger_t * source)
 {
     if(source) {
         source->trigger = TRUE;
     }
 }
 
 gboolean
 mainloop_destroy_trigger(crm_trigger_t * source)
 {
     GSource *gs = NULL;
 
     if(source == NULL) {
         return TRUE;
     }
 
     gs = (GSource *)source;
 
     if(g_source_refcount(gs) > 2) {
         crm_info("Trigger %p is still referenced %u times", gs, g_source_refcount(gs));
     }
 
     g_source_destroy(gs); /* Remove from mainloop, ref_count-- */
     g_source_unref(gs); /* The caller no longer carries a reference to source
                          *
                          * At this point the source should be free'd,
                          * unless we're currently processing said
                          * source, in which case mainloop holds an
                          * additional reference and it will be free'd
                          * once our processing completes
                          */
     return TRUE;
 }
 
 typedef struct signal_s {
     crm_trigger_t trigger;      /* must be first */
     void (*handler) (int sig);
     int signal;
 
 } crm_signal_t;
 
 static crm_signal_t *crm_signals[NSIG];
 
 static gboolean
 crm_signal_dispatch(GSource * source, GSourceFunc callback, gpointer userdata)
 {
     crm_signal_t *sig = (crm_signal_t *) source;
 
     if(sig->signal != SIGCHLD) {
         crm_notice("Caught '%s' signal "CRM_XS" %d (%s handler)",
                    strsignal(sig->signal), sig->signal,
                    (sig->handler? "invoking" : "no"));
     }
 
     sig->trigger.trigger = FALSE;
     if (sig->handler) {
         sig->handler(sig->signal);
     }
     return TRUE;
 }
 
 static void
 mainloop_signal_handler(int sig)
 {
     if (sig > 0 && sig < NSIG && crm_signals[sig] != NULL) {
         mainloop_set_trigger((crm_trigger_t *) crm_signals[sig]);
     }
 }
 
 static GSourceFuncs crm_signal_funcs = {
     crm_trigger_prepare,
     crm_trigger_check,
     crm_signal_dispatch,
     crm_trigger_finalize,
 };
 
 gboolean
 crm_signal(int sig, void (*dispatch) (int sig))
 {
     sigset_t mask;
     struct sigaction sa;
     struct sigaction old;
 
     if (sigemptyset(&mask) < 0) {
         crm_perror(LOG_ERR, "Call to sigemptyset failed");
         return FALSE;
     }
 
     memset(&sa, 0, sizeof(struct sigaction));
     sa.sa_handler = dispatch;
     sa.sa_flags = SA_RESTART;
     sa.sa_mask = mask;
 
     if (sigaction(sig, &sa, &old) < 0) {
         crm_perror(LOG_ERR, "Could not install signal handler for signal %d", sig);
         return FALSE;
     }
 
     return TRUE;
 }
 
 static void
 mainloop_destroy_signal_entry(int sig)
 {
     crm_signal_t *tmp = crm_signals[sig];
 
     crm_signals[sig] = NULL;
 
     crm_trace("Destroying signal %d", sig);
     mainloop_destroy_trigger((crm_trigger_t *) tmp);
 }
 
 gboolean
 mainloop_add_signal(int sig, void (*dispatch) (int sig))
 {
     GSource *source = NULL;
     int priority = G_PRIORITY_HIGH - 1;
 
     if (sig == SIGTERM) {
         /* TERM is higher priority than other signals,
          *   signals are higher priority than other ipc.
          * Yes, minus: smaller is "higher"
          */
         priority--;
     }
 
     if (sig >= NSIG || sig < 0) {
         crm_err("Signal %d is out of range", sig);
         return FALSE;
 
     } else if (crm_signals[sig] != NULL && crm_signals[sig]->handler == dispatch) {
         crm_trace("Signal handler for %d is already installed", sig);
         return TRUE;
 
     } else if (crm_signals[sig] != NULL) {
         crm_err("Different signal handler for %d is already installed", sig);
         return FALSE;
     }
 
     CRM_ASSERT(sizeof(crm_signal_t) > sizeof(GSource));
     source = g_source_new(&crm_signal_funcs, sizeof(crm_signal_t));
 
     crm_signals[sig] = (crm_signal_t *) mainloop_setup_trigger(source, priority, NULL, NULL);
     CRM_ASSERT(crm_signals[sig] != NULL);
 
     crm_signals[sig]->handler = dispatch;
     crm_signals[sig]->signal = sig;
 
     if (crm_signal(sig, mainloop_signal_handler) == FALSE) {
         mainloop_destroy_signal_entry(sig);
         return FALSE;
     }
 #if 0
     /* If we want signals to interrupt mainloop's poll(), instead of waiting for
      * the timeout, then we should call siginterrupt() below
      *
      * For now, just enforce a low timeout
      */
     if (siginterrupt(sig, 1) < 0) {
         crm_perror(LOG_INFO, "Could not enable system call interruptions for signal %d", sig);
     }
 #endif
 
     return TRUE;
 }
 
 gboolean
 mainloop_destroy_signal(int sig)
 {
     if (sig >= NSIG || sig < 0) {
         crm_err("Signal %d is out of range", sig);
         return FALSE;
 
     } else if (crm_signal(sig, NULL) == FALSE) {
         crm_perror(LOG_ERR, "Could not uninstall signal handler for signal %d", sig);
         return FALSE;
 
     } else if (crm_signals[sig] == NULL) {
         return TRUE;
     }
     mainloop_destroy_signal_entry(sig);
     return TRUE;
 }
 
 static qb_array_t *gio_map = NULL;
 
 void
 mainloop_cleanup(void) 
 {
     if (gio_map) {
         qb_array_free(gio_map);
     }
 
     for (int sig = 0; sig < NSIG; ++sig) {
         mainloop_destroy_signal_entry(sig);
     }
 }
 
 /*
  * libqb...
  */
 struct gio_to_qb_poll {
     int32_t is_used;
     guint source;
     int32_t events;
     void *data;
     qb_ipcs_dispatch_fn_t fn;
     enum qb_loop_priority p;
 };
 
 static gboolean
 gio_read_socket(GIOChannel * gio, GIOCondition condition, gpointer data)
 {
     struct gio_to_qb_poll *adaptor = (struct gio_to_qb_poll *)data;
     gint fd = g_io_channel_unix_get_fd(gio);
 
     crm_trace("%p.%d %d", data, fd, condition);
 
     /* if this assert get's hit, then there is a race condition between
      * when we destroy a fd and when mainloop actually gives it up */
     CRM_ASSERT(adaptor->is_used > 0);
 
     return (adaptor->fn(fd, condition, adaptor->data) == 0);
 }
 
 static void
 gio_poll_destroy(gpointer data)
 {
     struct gio_to_qb_poll *adaptor = (struct gio_to_qb_poll *)data;
 
     adaptor->is_used--;
     CRM_ASSERT(adaptor->is_used >= 0);
 
     if (adaptor->is_used == 0) {
         crm_trace("Marking adaptor %p unused", adaptor);
         adaptor->source = 0;
     }
 }
 
 static int32_t
 gio_poll_dispatch_update(enum qb_loop_priority p, int32_t fd, int32_t evts,
                          void *data, qb_ipcs_dispatch_fn_t fn, int32_t add)
 {
     struct gio_to_qb_poll *adaptor;
     GIOChannel *channel;
     int32_t res = 0;
 
     res = qb_array_index(gio_map, fd, (void **)&adaptor);
     if (res < 0) {
         crm_err("Array lookup failed for fd=%d: %d", fd, res);
         return res;
     }
 
     crm_trace("Adding fd=%d to mainloop as adaptor %p", fd, adaptor);
 
     if (add && adaptor->source) {
         crm_err("Adaptor for descriptor %d is still in-use", fd);
         return -EEXIST;
     }
     if (!add && !adaptor->is_used) {
         crm_err("Adaptor for descriptor %d is not in-use", fd);
         return -ENOENT;
     }
 
     /* channel is created with ref_count = 1 */
     channel = g_io_channel_unix_new(fd);
     if (!channel) {
         crm_err("No memory left to add fd=%d", fd);
         return -ENOMEM;
     }
 
     if (adaptor->source) {
         g_source_remove(adaptor->source);
         adaptor->source = 0;
     }
 
     /* Because unlike the poll() API, glib doesn't tell us about HUPs by default */
     evts |= (G_IO_HUP | G_IO_NVAL | G_IO_ERR);
 
     adaptor->fn = fn;
     adaptor->events = evts;
     adaptor->data = data;
     adaptor->p = p;
     adaptor->is_used++;
     adaptor->source =
         g_io_add_watch_full(channel, G_PRIORITY_DEFAULT, evts, gio_read_socket, adaptor,
                             gio_poll_destroy);
 
     /* Now that mainloop now holds a reference to channel,
      * thanks to g_io_add_watch_full(), drop ours from g_io_channel_unix_new().
      *
      * This means that channel will be free'd by:
      * g_main_context_dispatch()
      *  -> g_source_destroy_internal()
      *      -> g_source_callback_unref()
      * shortly after gio_poll_destroy() completes
      */
     g_io_channel_unref(channel);
 
     crm_trace("Added to mainloop with gsource id=%d", adaptor->source);
     if (adaptor->source > 0) {
         return 0;
     }
 
     return -EINVAL;
 }
 
 static int32_t
 gio_poll_dispatch_add(enum qb_loop_priority p, int32_t fd, int32_t evts,
                       void *data, qb_ipcs_dispatch_fn_t fn)
 {
     return gio_poll_dispatch_update(p, fd, evts, data, fn, QB_TRUE);
 }
 
 static int32_t
 gio_poll_dispatch_mod(enum qb_loop_priority p, int32_t fd, int32_t evts,
                       void *data, qb_ipcs_dispatch_fn_t fn)
 {
     return gio_poll_dispatch_update(p, fd, evts, data, fn, QB_FALSE);
 }
 
 static int32_t
 gio_poll_dispatch_del(int32_t fd)
 {
     struct gio_to_qb_poll *adaptor;
 
     crm_trace("Looking for fd=%d", fd);
     if (qb_array_index(gio_map, fd, (void **)&adaptor) == 0) {
         if (adaptor->source) {
             g_source_remove(adaptor->source);
             adaptor->source = 0;
         }
     }
     return 0;
 }
 
 struct qb_ipcs_poll_handlers gio_poll_funcs = {
     .job_add = NULL,
     .dispatch_add = gio_poll_dispatch_add,
     .dispatch_mod = gio_poll_dispatch_mod,
     .dispatch_del = gio_poll_dispatch_del,
 };
 
 static enum qb_ipc_type
 pick_ipc_type(enum qb_ipc_type requested)
 {
     const char *env = getenv("PCMK_ipc_type");
 
     if (env && strcmp("shared-mem", env) == 0) {
         return QB_IPC_SHM;
     } else if (env && strcmp("socket", env) == 0) {
         return QB_IPC_SOCKET;
     } else if (env && strcmp("posix", env) == 0) {
         return QB_IPC_POSIX_MQ;
     } else if (env && strcmp("sysv", env) == 0) {
         return QB_IPC_SYSV_MQ;
     } else if (requested == QB_IPC_NATIVE) {
         /* We prefer shared memory because the server never blocks on
          * send.  If part of a message fits into the socket, libqb
          * needs to block until the remainder can be sent also.
          * Otherwise the client will wait forever for the remaining
          * bytes.
          */
         return QB_IPC_SHM;
     }
     return requested;
 }
 
 qb_ipcs_service_t *
 mainloop_add_ipc_server(const char *name, enum qb_ipc_type type,
                         struct qb_ipcs_service_handlers * callbacks)
 {
     int rc = 0;
     qb_ipcs_service_t *server = NULL;
 
     if (gio_map == NULL) {
         gio_map = qb_array_create_2(64, sizeof(struct gio_to_qb_poll), 1);
     }
 
     crm_client_init();
     server = qb_ipcs_create(name, 0, pick_ipc_type(type), callbacks);
 
 #ifdef HAVE_IPCS_GET_BUFFER_SIZE
     /* All clients should use at least ipc_buffer_max as their buffer size */
     qb_ipcs_enforce_buffer_size(server, crm_ipc_default_buffer_size());
 #endif
 
     qb_ipcs_poll_handlers_set(server, &gio_poll_funcs);
 
     rc = qb_ipcs_run(server);
     if (rc < 0) {
         crm_err("Could not start %s IPC server: %s (%d)", name, pcmk_strerror(rc), rc);
         return NULL;
     }
 
     return server;
 }
 
 void
 mainloop_del_ipc_server(qb_ipcs_service_t * server)
 {
     if (server) {
         qb_ipcs_destroy(server);
     }
 }
 
 struct mainloop_io_s {
     char *name;
     void *userdata;
 
     int fd;
     guint source;
     crm_ipc_t *ipc;
     GIOChannel *channel;
 
     int (*dispatch_fn_ipc) (const char *buffer, ssize_t length, gpointer userdata);
     int (*dispatch_fn_io) (gpointer userdata);
     void (*destroy_fn) (gpointer userdata);
 
 };
 
 static gboolean
 mainloop_gio_callback(GIOChannel * gio, GIOCondition condition, gpointer data)
 {
     gboolean keep = TRUE;
     mainloop_io_t *client = data;
 
     CRM_ASSERT(client->fd == g_io_channel_unix_get_fd(gio));
 
     if (condition & G_IO_IN) {
         if (client->ipc) {
             long rc = 0;
             int max = 10;
 
             do {
                 rc = crm_ipc_read(client->ipc);
                 if (rc <= 0) {
                     crm_trace("Message acquisition from %s[%p] failed: %s (%ld)",
                               client->name, client, pcmk_strerror(rc), rc);
 
                 } else if (client->dispatch_fn_ipc) {
                     const char *buffer = crm_ipc_buffer(client->ipc);
 
                     crm_trace("New message from %s[%p] = %ld (I/O condition=%d)", client->name, client, rc, condition);
                     if (client->dispatch_fn_ipc(buffer, rc, client->userdata) < 0) {
                         crm_trace("Connection to %s no longer required", client->name);
                         keep = FALSE;
                     }
                 }
 
             } while (keep && rc > 0 && --max > 0);
 
         } else {
             crm_trace("New message from %s[%p] %u", client->name, client, condition);
             if (client->dispatch_fn_io) {
                 if (client->dispatch_fn_io(client->userdata) < 0) {
                     crm_trace("Connection to %s no longer required", client->name);
                     keep = FALSE;
                 }
             }
         }
     }
 
     if (client->ipc && crm_ipc_connected(client->ipc) == FALSE) {
-        crm_err("Connection to %s[%p] closed (I/O condition=%d)", client->name, client, condition);
+        crm_err("Connection to %s closed " CRM_XS "client=%p condition=%d",
+                client->name, client, condition);
         keep = FALSE;
 
     } else if (condition & (G_IO_HUP | G_IO_NVAL | G_IO_ERR)) {
         crm_trace("The connection %s[%p] has been closed (I/O condition=%d)",
                   client->name, client, condition);
         keep = FALSE;
 
     } else if ((condition & G_IO_IN) == 0) {
         /*
            #define      GLIB_SYSDEF_POLLIN     =1
            #define      GLIB_SYSDEF_POLLPRI    =2
            #define      GLIB_SYSDEF_POLLOUT    =4
            #define      GLIB_SYSDEF_POLLERR    =8
            #define      GLIB_SYSDEF_POLLHUP    =16
            #define      GLIB_SYSDEF_POLLNVAL   =32
 
            typedef enum
            {
            G_IO_IN      GLIB_SYSDEF_POLLIN,
            G_IO_OUT     GLIB_SYSDEF_POLLOUT,
            G_IO_PRI     GLIB_SYSDEF_POLLPRI,
            G_IO_ERR     GLIB_SYSDEF_POLLERR,
            G_IO_HUP     GLIB_SYSDEF_POLLHUP,
            G_IO_NVAL    GLIB_SYSDEF_POLLNVAL
            } GIOCondition;
 
            A bitwise combination representing a condition to watch for on an event source.
 
            G_IO_IN      There is data to read.
            G_IO_OUT     Data can be written (without blocking).
            G_IO_PRI     There is urgent data to read.
            G_IO_ERR     Error condition.
            G_IO_HUP     Hung up (the connection has been broken, usually for pipes and sockets).
            G_IO_NVAL    Invalid request. The file descriptor is not open.
          */
         crm_err("Strange condition: %d", condition);
     }
 
     /* keep == FALSE results in mainloop_gio_destroy() being called
      * just before the source is removed from mainloop
      */
     return keep;
 }
 
 static void
 mainloop_gio_destroy(gpointer c)
 {
     mainloop_io_t *client = c;
     char *c_name = strdup(client->name);
 
     /* client->source is valid but about to be destroyed (ref_count == 0) in gmain.c
      * client->channel will still have ref_count > 0... should be == 1
      */
     crm_trace("Destroying client %s[%p]", c_name, c);
 
     if (client->ipc) {
         crm_ipc_close(client->ipc);
     }
 
     if (client->destroy_fn) {
         void (*destroy_fn) (gpointer userdata) = client->destroy_fn;
 
         client->destroy_fn = NULL;
         destroy_fn(client->userdata);
     }
 
     if (client->ipc) {
         crm_ipc_t *ipc = client->ipc;
 
         client->ipc = NULL;
         crm_ipc_destroy(ipc);
     }
 
     crm_trace("Destroyed client %s[%p]", c_name, c);
 
     free(client->name); client->name = NULL;
     free(client);
 
     free(c_name);
 }
 
 mainloop_io_t *
 mainloop_add_ipc_client(const char *name, int priority, size_t max_size, void *userdata,
                         struct ipc_client_callbacks *callbacks)
 {
     mainloop_io_t *client = NULL;
     crm_ipc_t *conn = crm_ipc_new(name, max_size);
 
     if (conn && crm_ipc_connect(conn)) {
         int32_t fd = crm_ipc_get_fd(conn);
 
         client = mainloop_add_fd(name, priority, fd, userdata, NULL);
     }
 
     if (client == NULL) {
         crm_perror(LOG_TRACE, "Connection to %s failed", name);
         if (conn) {
             crm_ipc_close(conn);
             crm_ipc_destroy(conn);
         }
         return NULL;
     }
 
     client->ipc = conn;
     client->destroy_fn = callbacks->destroy;
     client->dispatch_fn_ipc = callbacks->dispatch;
     return client;
 }
 
 void
 mainloop_del_ipc_client(mainloop_io_t * client)
 {
     mainloop_del_fd(client);
 }
 
 crm_ipc_t *
 mainloop_get_ipc_client(mainloop_io_t * client)
 {
     if (client) {
         return client->ipc;
     }
     return NULL;
 }
 
 mainloop_io_t *
 mainloop_add_fd(const char *name, int priority, int fd, void *userdata,
                 struct mainloop_fd_callbacks * callbacks)
 {
     mainloop_io_t *client = NULL;
 
     if (fd >= 0) {
         client = calloc(1, sizeof(mainloop_io_t));
         if (client == NULL) {
             return NULL;
         }
         client->name = strdup(name);
         client->userdata = userdata;
 
         if (callbacks) {
             client->destroy_fn = callbacks->destroy;
             client->dispatch_fn_io = callbacks->dispatch;
         }
 
         client->fd = fd;
         client->channel = g_io_channel_unix_new(fd);
         client->source =
             g_io_add_watch_full(client->channel, priority,
                                 (G_IO_IN | G_IO_HUP | G_IO_NVAL | G_IO_ERR), mainloop_gio_callback,
                                 client, mainloop_gio_destroy);
 
         /* Now that mainloop now holds a reference to channel,
          * thanks to g_io_add_watch_full(), drop ours from g_io_channel_unix_new().
          *
          * This means that channel will be free'd by:
          * g_main_context_dispatch() or g_source_remove()
          *  -> g_source_destroy_internal()
          *      -> g_source_callback_unref()
          * shortly after mainloop_gio_destroy() completes
          */
         g_io_channel_unref(client->channel);
         crm_trace("Added connection %d for %s[%p].%d", client->source, client->name, client, fd);
     } else {
         errno = EINVAL;
     }
 
     return client;
 }
 
 void
 mainloop_del_fd(mainloop_io_t * client)
 {
     if (client != NULL) {
         crm_trace("Removing client %s[%p]", client->name, client);
         if (client->source) {
             /* Results in mainloop_gio_destroy() being called just
              * before the source is removed from mainloop
              */
             g_source_remove(client->source);
         }
     }
 }
 
 static GListPtr child_list = NULL;
 
 pid_t
 mainloop_child_pid(mainloop_child_t * child)
 {
     return child->pid;
 }
 
 const char *
 mainloop_child_name(mainloop_child_t * child)
 {
     return child->desc;
 }
 
 int
 mainloop_child_timeout(mainloop_child_t * child)
 {
     return child->timeout;
 }
 
 void *
 mainloop_child_userdata(mainloop_child_t * child)
 {
     return child->privatedata;
 }
 
 void
 mainloop_clear_child_userdata(mainloop_child_t * child)
 {
     child->privatedata = NULL;
 }
 
 /* good function name */
 static void
 child_free(mainloop_child_t *child)
 {
     if (child->timerid != 0) {
         crm_trace("Removing timer %d", child->timerid);
         g_source_remove(child->timerid);
         child->timerid = 0;
     }
     free(child->desc);
     free(child);
 }
 
 /* terrible function name */
 static int
 child_kill_helper(mainloop_child_t *child)
 {
     int rc;
     if (child->flags & mainloop_leave_pid_group) {
         crm_debug("Kill pid %d only. leave group intact.", child->pid);
         rc = kill(child->pid, SIGKILL);
     } else {
         crm_debug("Kill pid %d's group", child->pid);
         rc = kill(-child->pid, SIGKILL);
     }
 
     if (rc < 0) {
         if (errno != ESRCH) {
             crm_perror(LOG_ERR, "kill(%d, KILL) failed", child->pid);
         }
         return -errno;
     }
     return 0;
 }
 
 static gboolean
 child_timeout_callback(gpointer p)
 {
     mainloop_child_t *child = p;
     int rc = 0;
 
     child->timerid = 0;
     if (child->timeout) {
         crm_crit("%s process (PID %d) will not die!", child->desc, (int)child->pid);
         return FALSE;
     }
 
     rc = child_kill_helper(child);
     if (rc == ESRCH) {
         /* Nothing left to do. pid doesn't exist */
         return FALSE;
     }
 
     child->timeout = TRUE;
     crm_warn("%s process (PID %d) timed out", child->desc, (int)child->pid);
 
     child->timerid = g_timeout_add(5000, child_timeout_callback, child);
     return FALSE;
 }
 
 static gboolean
 child_waitpid(mainloop_child_t *child, int flags)
 {
     int rc = 0;
     int core = 0;
     int signo = 0;
     int status = 0;
     int exitcode = 0;
 
     rc = waitpid(child->pid, &status, flags);
     if(rc == 0) {
         crm_perror(LOG_DEBUG, "wait(%d) = %d", child->pid, rc);
         return FALSE;
 
     } else if(rc != child->pid) {
         signo = SIGCHLD;
         exitcode = 1;
         status = 1;
         crm_perror(LOG_ERR, "Call to waitpid(%d) failed", child->pid);
 
     } else {
         crm_trace("Managed process %d exited: %p", child->pid, child);
 
         if (WIFEXITED(status)) {
             exitcode = WEXITSTATUS(status);
             crm_trace("Managed process %d (%s) exited with rc=%d", child->pid, child->desc, exitcode);
 
         } else if (WIFSIGNALED(status)) {
             signo = WTERMSIG(status);
             crm_trace("Managed process %d (%s) exited with signal=%d", child->pid, child->desc, signo);
         }
 #ifdef WCOREDUMP
         if (WCOREDUMP(status)) {
             core = 1;
             crm_err("Managed process %d (%s) dumped core", child->pid, child->desc);
         }
 #endif
     }
 
     if (child->callback) {
         child->callback(child, child->pid, core, signo, exitcode);
     }
     return TRUE;
 }
 
 static void
 child_death_dispatch(int signal)
 {
     GListPtr iter = child_list;
     gboolean exited;
 
     while(iter) {
         GListPtr saved = NULL;
         mainloop_child_t *child = iter->data;
         exited = child_waitpid(child, WNOHANG);
 
         saved = iter;
         iter = iter->next;
 
         if (exited == FALSE) {
             continue;
         }
         crm_trace("Removing process entry %p for %d", child, child->pid);
 
         child_list = g_list_remove_link(child_list, saved);
         g_list_free(saved);
         child_free(child);
     }
 }
 
 static gboolean
 child_signal_init(gpointer p)
 {
     crm_trace("Installed SIGCHLD handler");
     /* Do NOT use g_child_watch_add() and friends, they rely on pthreads */
     mainloop_add_signal(SIGCHLD, child_death_dispatch);
 
     /* In case they terminated before the signal handler was installed */
     child_death_dispatch(SIGCHLD);
     return FALSE;
 }
 
 int
 mainloop_child_kill(pid_t pid)
 {
     GListPtr iter;
     mainloop_child_t *child = NULL;
     mainloop_child_t *match = NULL;
     /* It is impossible to block SIGKILL, this allows us to
      * call waitpid without WNOHANG flag.*/
     int waitflags = 0, rc = 0;
 
     for (iter = child_list; iter != NULL && match == NULL; iter = iter->next) {
         child = iter->data;
         if (pid == child->pid) {
             match = child;
         }
     }
 
     if (match == NULL) {
         return FALSE;
     }
 
     rc = child_kill_helper(match);
     if(rc == -ESRCH) {
         /* It's gone, but hasn't shown up in waitpid() yet
          *
          * Wait until we get SIGCHLD and let child_death_dispatch()
          * clean it up as normal (so we get the correct return
          * code/status)
          *
          * The blocking alternative would be to call:
          *    child_waitpid(match, 0);
          */
         crm_trace("Waiting for child %d to be reaped by child_death_dispatch()", match->pid);
         return TRUE;
 
     } else if(rc != 0) {
         /* If KILL for some other reason set the WNOHANG flag since we
          * can't be certain what happened.
          */
         waitflags = WNOHANG;
     }
 
     if (child_waitpid(match, waitflags) == FALSE) {
         /* not much we can do if this occurs */
         return FALSE;
     }
 
     child_list = g_list_remove(child_list, match);
     child_free(match);
     return TRUE;
 }
 
 /* Create/Log a new tracked process
  * To track a process group, use -pid
  */
 void
 mainloop_child_add_with_flags(pid_t pid, int timeout, const char *desc, void *privatedata, enum mainloop_child_flags flags, 
                    void (*callback) (mainloop_child_t * p, pid_t pid, int core, int signo, int exitcode))
 {
     static bool need_init = TRUE;
     mainloop_child_t *child = g_new(mainloop_child_t, 1);
 
     child->pid = pid;
     child->timerid = 0;
     child->timeout = FALSE;
     child->privatedata = privatedata;
     child->callback = callback;
     child->flags = flags;
 
     if(desc) {
         child->desc = strdup(desc);
     }
 
     if (timeout) {
         child->timerid = g_timeout_add(timeout, child_timeout_callback, child);
     }
 
     child_list = g_list_append(child_list, child);
 
     if(need_init) {
         need_init = FALSE;
         /* SIGCHLD processing has to be invoked from mainloop.
          * We do not want it to be possible to both add a child pid
          * to mainloop, and have the pid's exit callback invoked within
          * the same callstack. */
         g_timeout_add(1, child_signal_init, NULL);
     }
 }
 
 void
 mainloop_child_add(pid_t pid, int timeout, const char *desc, void *privatedata,
                    void (*callback) (mainloop_child_t * p, pid_t pid, int core, int signo, int exitcode))
 {
     mainloop_child_add_with_flags(pid, timeout, desc, privatedata, 0, callback);
 }
 
 struct mainloop_timer_s {
         guint id;
         guint period_ms;
         bool repeat;
         char *name;
         GSourceFunc cb;
         void *userdata;
 };
 
 struct mainloop_timer_s mainloop;
 
 static gboolean mainloop_timer_cb(gpointer user_data)
 {
     int id = 0;
     bool repeat = FALSE;
     struct mainloop_timer_s *t = user_data;
 
     CRM_ASSERT(t != NULL);
 
     id = t->id;
     t->id = 0; /* Ensure it's unset during callbacks so that
                 * mainloop_timer_running() works as expected
                 */
 
     if(t->cb) {
         crm_trace("Invoking callbacks for timer %s", t->name);
         repeat = t->repeat;
         if(t->cb(t->userdata) == FALSE) {
             crm_trace("Timer %s complete", t->name);
             repeat = FALSE;
         }
     }
 
     if(repeat) {
         /* Restore if repeating */
         t->id = id;
     }
 
     return repeat;
 }
 
 bool mainloop_timer_running(mainloop_timer_t *t)
 {
     if(t && t->id != 0) {
         return TRUE;
     }
     return FALSE;
 }
 
 void mainloop_timer_start(mainloop_timer_t *t)
 {
     mainloop_timer_stop(t);
     if(t && t->period_ms > 0) {
         crm_trace("Starting timer %s", t->name);
         t->id = g_timeout_add(t->period_ms, mainloop_timer_cb, t);
     }
 }
 
 void mainloop_timer_stop(mainloop_timer_t *t)
 {
     if(t && t->id != 0) {
         crm_trace("Stopping timer %s", t->name);
         g_source_remove(t->id);
         t->id = 0;
     }
 }
 
 guint mainloop_timer_set_period(mainloop_timer_t *t, guint period_ms)
 {
     guint last = 0;
 
     if(t) {
         last = t->period_ms;
         t->period_ms = period_ms;
     }
 
     if(t && t->id != 0 && last != t->period_ms) {
         mainloop_timer_start(t);
     }
     return last;
 }
 
 
 mainloop_timer_t *
 mainloop_timer_add(const char *name, guint period_ms, bool repeat, GSourceFunc cb, void *userdata)
 {
     mainloop_timer_t *t = calloc(1, sizeof(mainloop_timer_t));
 
     if(t) {
         if(name) {
             t->name = crm_strdup_printf("%s-%u-%d", name, period_ms, repeat);
         } else {
             t->name = crm_strdup_printf("%p-%u-%d", t, period_ms, repeat);
         }
         t->id = 0;
         t->period_ms = period_ms;
         t->repeat = repeat;
         t->cb = cb;
         t->userdata = userdata;
         crm_trace("Created timer %s with %p %p", t->name, userdata, t->userdata);
     }
     return t;
 }
 
 void
 mainloop_timer_del(mainloop_timer_t *t)
 {
     if(t) {
         crm_trace("Destroying timer %s", t->name);
         mainloop_timer_stop(t);
         free(t->name);
         free(t);
     }
 }