diff --git a/cts/CTS.py.in b/cts/CTS.py.in
index de47e82e88..f64d53e4f7 100644
--- a/cts/CTS.py.in
+++ b/cts/CTS.py.in
@@ -1,939 +1,939 @@
 """ Main classes for Pacemaker's Cluster Test Suite (CTS)
 """
 
 # Pacemaker targets compatibility with Python 2.7 and 3.2+
 from __future__ import print_function, unicode_literals, absolute_import, division
 
 __copyright__ = "Copyright 2000-2018 Alan Robertson <alanr@unix.sh>"
 __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
 
 import os
 import re
 import sys
 import time
 import traceback
 
 if sys.version_info > (3,):
     from collections import UserDict
 else:
     from UserDict import UserDict
 
 from cts.CTSvars     import *
 from cts.logging     import LogFactory
 from cts.watcher     import LogWatcher
 from cts.remote      import RemoteFactory, input_wrapper
 from cts.environment import EnvFactory
 from cts.patterns    import PatternSelector
 
 has_log_stats = {}
 log_stats_bin = CTSvars.CRM_DAEMON_DIR + "/cts_log_stats.sh"
 log_stats = """
 #!@BASH_PATH@
 # Tool for generating system load reports while CTS runs
 
 trap "" 1
 
 f=$1; shift
 action=$1; shift
 base=`basename $0`
 
 if [ ! -e $f ]; then
     echo "Time, Load 1, Load 5, Load 15, Test Marker" > $f
 fi
 
 function killpid() {
     if [ -e $f.pid ]; then
        kill -9 `cat $f.pid`
        rm -f $f.pid
     fi
 }
 
 function status() {
     if [ -e $f.pid ]; then
        kill -0 `cat $f.pid`
        return $?
     else
        return 1
     fi
 }
 
 function start() {
     # Is it already running?
     if
         status
     then
         return
     fi
 
     echo Active as $$
     echo $$ > $f.pid
 
     while [ 1 = 1 ]; do
         uptime | sed s/up.*:/,/ | tr '\\n' ',' >> $f
         #top -b -c -n1 | grep -e usr/libexec/pacemaker | grep -v -e grep -e python | head -n 1 | sed s@/usr/libexec/pacemaker/@@ | awk '{print " 0, "$9", "$10", "$12}' | tr '\\n' ',' >> $f
         echo 0 >> $f
         sleep 5
     done
 }
 
 case $action in
     start)
         start
         ;;
     start-bg|bg)
         # Use c --ssh -- ./stats.sh file start-bg
         nohup $0 $f start >/dev/null 2>&1 </dev/null &
         ;;
     stop)
         killpid
         ;;
     delete)
         killpid
         rm -f $f
         ;;
     mark)
         uptime | sed s/up.*:/,/ | tr '\\n' ',' >> $f
         echo " $*" >> $f
         start
         ;;
     *)
         echo "Unknown action: $action."
         ;;
 esac
 """
 
 class CtsLab(object):
     '''This class defines the Lab Environment for the Cluster Test System.
     It defines those things which are expected to change from test
     environment to test environment for the same cluster manager.
 
     It is where you define the set of nodes that are in your test lab
     what kind of reset mechanism you use, etc.
 
     This class is derived from a UserDict because we hold many
     different parameters of different kinds, and this provides
-    provide a uniform and extensible interface useful for any kind of
+    a uniform and extensible interface useful for any kind of
     communication between the user/administrator/tester and CTS.
 
     At this point in time, it is the intent of this class to model static
     configuration and/or environmental data about the environment which
     doesn't change as the tests proceed.
 
     Well-known names (keys) are an important concept in this class.
     The HasMinimalKeys member function knows the minimal set of
     well-known names for the class.
 
     The following names are standard (well-known) at this time:
 
         nodes           An array of the nodes in the cluster
         reset           A ResetMechanism object
         logger          An array of objects that log strings...
         CMclass         The type of ClusterManager we are running
                         (This is a class object, not a class instance)
         RandSeed        Random seed.  It is a triple of bytes. (optional)
 
     The CTS code ignores names it doesn't know about/need.
     The individual tests have access to this information, and it is
     perfectly acceptable to provide hints, tweaks, fine-tuning
     directions or other information to the tests through this mechanism.
     '''
 
     def __init__(self, args=None):
         self.Env = EnvFactory().getInstance(args)
         self.Scenario = None
         self.logger = LogFactory()
         self.rsh = RemoteFactory().getInstance()
 
     def dump(self):
         self.Env.dump()
 
     def has_key(self, key):
         return key in list(self.Env.keys())
 
     def __getitem__(self, key):
         return self.Env[key]
 
     def __setitem__(self, key, value):
         self.Env[key] = value
 
     def run(self, Scenario, Iterations):
         if not Scenario:
             self.logger.log("No scenario was defined")
             return 1
 
         self.logger.log("Cluster nodes: ")
         for node in self.Env["nodes"]:
             self.logger.log("    * %s" % (node))
 
         if not Scenario.SetUp():
             return 1
 
         try :
             Scenario.run(Iterations)
         except :
             self.logger.log("Exception by %s" % sys.exc_info()[0])
             self.logger.traceback(traceback)
 
             Scenario.summarize()
             Scenario.TearDown()
             return 1
 
         #ClusterManager.oprofileSave(Iterations)
         Scenario.TearDown()
 
         Scenario.summarize()
         if Scenario.Stats["failure"] > 0:
             return Scenario.Stats["failure"]
 
         elif Scenario.Stats["success"] != Iterations:
             self.logger.log("No failure count but success != requested iterations")
             return 1
 
         return 0
 
     def __CheckNode(self, node):
         "Raise a ValueError if the given node isn't valid"
 
         if not self.IsValidNode(node):
             raise ValueError("Invalid node [%s] in CheckNode" % node)
 
 class NodeStatus(object):
     def __init__(self, env):
         self.Env = env
 
     def IsNodeBooted(self, node):
         '''Return TRUE if the given node is booted (responds to pings)'''
         if self.Env["docker"]:
             return RemoteFactory().getInstance()("localhost", "docker inspect --format {{.State.Running}} %s | grep -q true" % node, silent=True) == 0
 
         return RemoteFactory().getInstance()("localhost", "ping -nq -c1 -w1 %s" % node, silent=True) == 0
 
     def IsSshdUp(self, node):
         rc = RemoteFactory().getInstance()(node, "true", silent=True)
         return rc == 0
 
     def WaitForNodeToComeUp(self, node, Timeout=300):
         '''Return TRUE when given node comes up, or None/FALSE if timeout'''
         timeout = Timeout
         anytimeouts = 0
         while timeout > 0:
             if self.IsNodeBooted(node) and self.IsSshdUp(node):
                 if anytimeouts:
                      # Fudge to wait for the system to finish coming up
                      time.sleep(30)
                      LogFactory().debug("Node %s now up" % node)
                 return 1
 
             time.sleep(30)
             if (not anytimeouts):
                 LogFactory().debug("Waiting for node %s to come up" % node)
 
             anytimeouts = 1
             timeout = timeout - 1
 
         LogFactory().log("%s did not come up within %d tries" % (node, Timeout))
         if self.Env["continue"] == 1:
             answer = "Y"
         else:
             try:
                 answer = input_wrapper('Continue? [nY]')
             except EOFError as e:
                 answer = "n"
         if answer and answer == "n":
             raise ValueError("%s did not come up within %d tries" % (node, Timeout))
 
     def WaitForAllNodesToComeUp(self, nodes, timeout=300):
         '''Return TRUE when all nodes come up, or FALSE if timeout'''
 
         for node in nodes:
             if not self.WaitForNodeToComeUp(node, timeout):
                 return None
         return 1
 
 
 class ClusterManager(UserDict):
     '''The Cluster Manager class.
     This is an subclass of the Python dictionary class.
     (this is because it contains lots of {name,value} pairs,
     not because it's behavior is that terribly similar to a
     dictionary in other ways.)
 
     This is an abstract class which class implements high-level
     operations on the cluster and/or its cluster managers.
     Actual cluster managers classes are subclassed from this type.
 
     One of the things we do is track the state we think every node should
     be in.
     '''
 
     def __InitialConditions(self):
         #if os.geteuid() != 0:
         #  raise ValueError("Must Be Root!")
         None
 
     def _finalConditions(self):
         for key in list(self.keys()):
             if self[key] == None:
                 raise ValueError("Improper derivation: self[" + key +   "] must be overridden by subclass.")
 
     def __init__(self, Environment, randseed=None):
         self.Env = EnvFactory().getInstance()
         self.templates = PatternSelector(self.Env["Name"])
         self.__InitialConditions()
         self.logger = LogFactory()
         self.TestLoggingLevel=0
         self.data = {}
         self.name = self.Env["Name"]
 
         self.rsh = RemoteFactory().getInstance()
         self.ShouldBeStatus={}
         self.ns = NodeStatus(self.Env)
         self.OurNode = os.uname()[1].lower()
         self.__instance_errorstoignore = []
 
     def __getitem__(self, key):
         if key == "Name":
             return self.name
 
         print("FIXME: Getting %s from %s" % (key, repr(self)))
         if key in self.data:
             return self.data[key]
 
         return self.templates.get_patterns(self.Env["Name"], key)
 
     def __setitem__(self, key, value):
         print("FIXME: Setting %s=%s on %s" % (key, value, repr(self)))
         self.data[key] = value
 
     def key_for_node(self, node):
         return node
 
     def instance_errorstoignore_clear(self):
         '''Allows the test scenario to reset instance errors to ignore on each iteration.'''
         self.__instance_errorstoignore = []
 
     def instance_errorstoignore(self):
         '''Return list of errors which are 'normal' for a specific test instance'''
         return self.__instance_errorstoignore
 
     def errorstoignore(self):
         '''Return list of errors which are 'normal' and should be ignored'''
         return []
 
     def log(self, args):
         self.logger.log(args)
 
     def debug(self, args):
         self.logger.debug(args)
 
     def prepare(self):
         '''Finish the Initialization process. Prepare to test...'''
 
         print(repr(self)+"prepare")
         for node in self.Env["nodes"]:
             if self.StataCM(node):
                 self.ShouldBeStatus[node] = "up"
             else:
                 self.ShouldBeStatus[node] = "down"
 
             if self.Env["experimental-tests"]:
                 self.unisolate_node(node)
 
     def upcount(self):
         '''How many nodes are up?'''
         count = 0
         for node in self.Env["nodes"]:
           if self.ShouldBeStatus[node] == "up":
             count = count + 1
         return count
 
     def install_support(self, command="install"):
         for node in self.Env["nodes"]:
             self.rsh(node, CTSvars.CRM_DAEMON_DIR + "/cts-support " + command)
 
     def install_config(self, node):
         return None
 
     def prepare_fencing_watcher(self, name):
         # If we don't have quorum now but get it as a result of starting this node,
         # then a bunch of nodes might get fenced
         upnode = None
         if self.HasQuorum(None):
             self.debug("Have quorum")
             return None
 
         if not self.templates["Pat:Fencing_start"]:
             print("No start pattern")
             return None
 
         if not self.templates["Pat:Fencing_ok"]:
             print("No ok pattern")
             return None
 
         stonith = None
         stonithPats = []
         for peer in self.Env["nodes"]:
             if self.ShouldBeStatus[peer] != "up":
                 stonithPats.append(self.templates["Pat:Fencing_ok"] % peer)
                 stonithPats.append(self.templates["Pat:Fencing_start"] % peer)
 
         stonith = LogWatcher(self.Env["LogFileName"], stonithPats, "StartupFencing", 0, hosts=self.Env["nodes"], kind=self.Env["LogWatcher"])
         stonith.setwatch()
         return stonith
 
     def fencing_cleanup(self, node, stonith):
         peer_list = []
         peer_state = {}
 
         self.debug("Looking for nodes that were fenced as a result of %s starting" % node)
 
         # If we just started a node, we may now have quorum (and permission to fence)
         if not stonith:
             self.debug("Nothing to do")
             return peer_list
 
         q = self.HasQuorum(None)
         if not q and len(self.Env["nodes"]) > 2:
             # We didn't gain quorum - we shouldn't have shot anyone
             self.debug("Quorum: %d Len: %d" % (q, len(self.Env["nodes"])))
             return peer_list
 
         for n in self.Env["nodes"]:
             peer_state[n] = "unknown"
 
         # Now see if any states need to be updated
         self.debug("looking for: " + repr(stonith.regexes))
         shot = stonith.look(0)
         while shot:
             line = repr(shot)
             self.debug("Found: " + line)
             del stonith.regexes[stonith.whichmatch]
 
             # Extract node name
             for n in self.Env["nodes"]:
                 if re.search(self.templates["Pat:Fencing_ok"] % n, shot):
                     peer = n
                     peer_state[peer] = "complete"
                     self.__instance_errorstoignore.append(self.templates["Pat:Fencing_ok"] % peer)
 
                 elif peer_state[n] != "complete" and re.search(self.templates["Pat:Fencing_start"] % n, shot):
                     # TODO: Correctly detect multiple fencing operations for the same host
                     peer = n
                     peer_state[peer] = "in-progress"
                     self.__instance_errorstoignore.append(self.templates["Pat:Fencing_start"] % peer)
 
             if not peer:
                 self.logger.log("ERROR: Unknown stonith match: %s" % line)
 
             elif not peer in peer_list:
                 self.debug("Found peer: " + peer)
                 peer_list.append(peer)
 
             # Get the next one
             shot = stonith.look(60)
 
         for peer in peer_list:
 
             self.debug("   Peer %s was fenced as a result of %s starting: %s" % (peer, node, peer_state[peer]))
             if self.Env["at-boot"]:
                 self.ShouldBeStatus[peer] = "up"
             else:
                 self.ShouldBeStatus[peer] = "down"
 
             if peer_state[peer] == "in-progress":
                 # Wait for any in-progress operations to complete
                 shot = stonith.look(60)
                 while len(stonith.regexes) and shot:
                     line = repr(shot)
                     self.debug("Found: " + line)
                     del stonith.regexes[stonith.whichmatch]
                     shot = stonith.look(60)
 
             # Now make sure the node is alive too
             self.ns.WaitForNodeToComeUp(peer, self.Env["DeadTime"])
 
             # Poll until it comes up
             if self.Env["at-boot"]:
                 if not self.StataCM(peer):
                     time.sleep(self.Env["StartTime"])
 
                 if not self.StataCM(peer):
                     self.logger.log("ERROR: Peer %s failed to restart after being fenced" % peer)
                     return None
 
         return peer_list
 
     def StartaCM(self, node, verbose=False):
 
         '''Start up the cluster manager on a given node'''
         if verbose: self.logger.log("Starting %s on node %s" % (self.templates["Name"], node))
         else: self.debug("Starting %s on node %s" % (self.templates["Name"], node))
         ret = 1
 
         if not node in self.ShouldBeStatus:
             self.ShouldBeStatus[node] = "down"
 
         if self.ShouldBeStatus[node] != "down":
             return 1
 
         patterns = []
         # Technically we should always be able to notice ourselves starting
         patterns.append(self.templates["Pat:Local_started"] % node)
         if self.upcount() == 0:
             patterns.append(self.templates["Pat:DC_started"] % node)
         else:
             patterns.append(self.templates["Pat:NonDC_started"] % node)
 
         watch = LogWatcher(
             self.Env["LogFileName"], patterns, "StartaCM", self.Env["StartTime"]+10, hosts=self.Env["nodes"], kind=self.Env["LogWatcher"])
 
         self.install_config(node)
 
         self.ShouldBeStatus[node] = "any"
         if self.StataCM(node) and self.cluster_stable(self.Env["DeadTime"]):
             self.logger.log ("%s was already started" % (node))
             return 1
 
         stonith = self.prepare_fencing_watcher(node)
         watch.setwatch()
 
         if self.rsh(node, self.templates["StartCmd"]) != 0:
             self.logger.log ("Warn: Start command failed on node %s" % (node))
             self.fencing_cleanup(node, stonith)
             return None
 
         self.ShouldBeStatus[node] = "up"
         watch_result = watch.lookforall()
 
         if watch.unmatched:
             for regex in watch.unmatched:
                 self.logger.log ("Warn: Startup pattern not found: %s" % (regex))
 
         if watch_result and self.cluster_stable(self.Env["DeadTime"]):
             #self.debug("Found match: "+ repr(watch_result))
             self.fencing_cleanup(node, stonith)
             return 1
 
         elif self.StataCM(node) and self.cluster_stable(self.Env["DeadTime"]):
             self.fencing_cleanup(node, stonith)
             return 1
 
         self.logger.log ("Warn: Start failed for node %s" % (node))
         return None
 
     def StartaCMnoBlock(self, node, verbose=False):
 
         '''Start up the cluster manager on a given node with none-block mode'''
 
         if verbose: self.logger.log("Starting %s on node %s" % (self["Name"], node))
         else: self.debug("Starting %s on node %s" % (self["Name"], node))
 
         self.install_config(node)
         self.rsh(node, self.templates["StartCmd"], synchronous=0)
         self.ShouldBeStatus[node] = "up"
         return 1
 
     def StopaCM(self, node, verbose=False, force=False):
 
         '''Stop the cluster manager on a given node'''
 
         if verbose: self.logger.log("Stopping %s on node %s" % (self["Name"], node))
         else: self.debug("Stopping %s on node %s" % (self["Name"], node))
 
         if self.ShouldBeStatus[node] != "up" and force == False:
             return 1
 
         if self.rsh(node, self.templates["StopCmd"]) == 0:
             # Make sure we can continue even if corosync leaks
             # fdata-* is the old name
             #self.rsh(node, "rm -f /dev/shm/qb-* /dev/shm/fdata-*")
             self.ShouldBeStatus[node] = "down"
             self.cluster_stable(self.Env["DeadTime"])
             return 1
         else:
             self.logger.log ("ERROR: Could not stop %s on node %s" % (self["Name"], node))
 
         return None
 
     def StopaCMnoBlock(self, node):
 
         '''Stop the cluster manager on a given node with none-block mode'''
 
         self.debug("Stopping %s on node %s" % (self["Name"], node))
 
         self.rsh(node, self.templates["StopCmd"], synchronous=0)
         self.ShouldBeStatus[node] = "down"
         return 1
 
     def cluster_stable(self, timeout = None):
         time.sleep(self.Env["StableTime"])
         return 1
 
     def node_stable(self, node):
         return 1
 
     def RereadCM(self, node):
 
         '''Force the cluster manager on a given node to reread its config
            This may be a no-op on certain cluster managers.
         '''
         rc=self.rsh(node, self.templates["RereadCmd"])
         if rc == 0:
             return 1
         else:
             self.logger.log ("Could not force %s on node %s to reread its config"
             %        (self["Name"], node))
         return None
 
     def StataCM(self, node):
 
         '''Report the status of the cluster manager on a given node'''
 
         out=self.rsh(node, self.templates["StatusCmd"] % node, 1)
         ret= (str.find(out, 'stopped') == -1)
 
         try:
             if ret:
                 if self.ShouldBeStatus[node] == "down":
                     self.logger.log(
                     "Node status for %s is %s but we think it should be %s"
                     %        (node, "up", self.ShouldBeStatus[node]))
             else:
                 if self.ShouldBeStatus[node] == "up":
                     self.logger.log(
                     "Node status for %s is %s but we think it should be %s"
                     %        (node, "down", self.ShouldBeStatus[node]))
         except KeyError:        pass
 
         if ret:
             self.ShouldBeStatus[node] = "up"
         else:
             self.ShouldBeStatus[node] = "down"
         return ret
 
     def startall(self, nodelist=None, verbose=False, quick=False):
 
         '''Start the cluster manager on every node in the cluster.
         We can do it on a subset of the cluster if nodelist is not None.
         '''
         map = {}
         if not nodelist:
             nodelist = self.Env["nodes"]
 
         for node in nodelist:
             if self.ShouldBeStatus[node] == "down":
                 self.ns.WaitForAllNodesToComeUp(nodelist, 300)
 
         if not quick:
             # This is used for "basic sanity checks", so only start one node ...
             if not self.StartaCM(node, verbose=verbose):
                 return 0
             return 1
 
         # Approximation of SimulStartList for --boot 
         watchpats = [ ]
         watchpats.append(self.templates["Pat:DC_IDLE"])
         for node in nodelist:
             watchpats.append(self.templates["Pat:Local_started"] % node)
             watchpats.append(self.templates["Pat:InfraUp"] % node)
             watchpats.append(self.templates["Pat:PacemakerUp"] % node)
 
         #   Start all the nodes - at about the same time...
         watch = LogWatcher(self.Env["LogFileName"], watchpats, "fast-start", self.Env["DeadTime"]+10, hosts=self.Env["nodes"], kind=self.Env["LogWatcher"])
         watch.setwatch()
 
         if not self.StartaCM(nodelist[0], verbose=verbose):
             return 0
         for node in nodelist:
             self.StartaCMnoBlock(node, verbose=verbose)
 
         watch.lookforall()
         if watch.unmatched:
             for regex in watch.unmatched:
                 self.logger.log ("Warn: Startup pattern not found: %s" % (regex))
 
         if not self.cluster_stable():
             self.logger.log("Cluster did not stabilize")
             return 0
 
         return 1
 
     def stopall(self, nodelist=None, verbose=False, force=False):
 
         '''Stop the cluster managers on every node in the cluster.
         We can do it on a subset of the cluster if nodelist is not None.
         '''
 
         ret = 1
         map = {}
         if not nodelist:
             nodelist = self.Env["nodes"]
         for node in self.Env["nodes"]:
             if self.ShouldBeStatus[node] == "up" or force == True:
                 if not self.StopaCM(node, verbose=verbose, force=force):
                     ret = 0
         return ret
 
     def rereadall(self, nodelist=None):
 
         '''Force the cluster managers on every node in the cluster
         to reread their config files.  We can do it on a subset of the
         cluster if nodelist is not None.
         '''
 
         map = {}
         if not nodelist:
             nodelist = self.Env["nodes"]
         for node in self.Env["nodes"]:
             if self.ShouldBeStatus[node] == "up":
                 self.RereadCM(node)
 
     def statall(self, nodelist=None):
 
         '''Return the status of the cluster managers in the cluster.
         We can do it on a subset of the cluster if nodelist is not None.
         '''
 
         result = {}
         if not nodelist:
             nodelist = self.Env["nodes"]
         for node in nodelist:
             if self.StataCM(node):
                 result[node] = "up"
             else:
                 result[node] = "down"
         return result
 
     def isolate_node(self, target, nodes=None):
         '''isolate the communication between the nodes'''
         if not nodes:
             nodes = self.Env["nodes"]
 
         for node in nodes:
             if node != target:
                 rc = self.rsh(target, self.templates["BreakCommCmd"] % self.key_for_node(node))
                 if rc != 0:
                     self.logger.log("Could not break the communication between %s and %s: %d" % (target, node, rc))
                     return None
                 else:
                     self.debug("Communication cut between %s and %s" % (target, node))
         return 1
 
     def unisolate_node(self, target, nodes=None):
         '''fix the communication between the nodes'''
         if not nodes:
             nodes = self.Env["nodes"]
 
         for node in nodes:
             if node != target:
                 restored = 0
 
                 # Limit the amount of time we have asynchronous connectivity for
                 # Restore both sides as simultaneously as possible
                 self.rsh(target, self.templates["FixCommCmd"] % self.key_for_node(node), synchronous=0)
                 self.rsh(node, self.templates["FixCommCmd"] % self.key_for_node(target), synchronous=0)
                 self.debug("Communication restored between %s and %s" % (target, node))
 
     def reducecomm_node(self,node):
         '''reduce the communication between the nodes'''
         rc = self.rsh(node, self.templates["ReduceCommCmd"]%(self.Env["XmitLoss"],self.Env["RecvLoss"]))
         if rc == 0:
             return 1
         else:
             self.logger.log("Could not reduce the communication between the nodes from node: %s" % node)
         return None
 
     def restorecomm_node(self,node):
         '''restore the saved communication between the nodes'''
         rc = 0
         if float(self.Env["XmitLoss"]) != 0 or float(self.Env["RecvLoss"]) != 0 :
             rc = self.rsh(node, self.templates["RestoreCommCmd"]);
         if rc == 0:
             return 1
         else:
             self.logger.log("Could not restore the communication between the nodes from node: %s" % node)
         return None
 
     def HasQuorum(self, node_list):
         "Return TRUE if the cluster currently has quorum"
         # If we are auditing a partition, then one side will
         #   have quorum and the other not.
         # So the caller needs to tell us which we are checking
         # If no value for node_list is specified... assume all nodes
         raise ValueError("Abstract Class member (HasQuorum)")
 
     def Components(self):
         raise ValueError("Abstract Class member (Components)")
 
     def oprofileStart(self, node=None):
         if not node:
             for n in self.Env["oprofile"]:
                 self.oprofileStart(n)
 
         elif node in self.Env["oprofile"]:
             self.debug("Enabling oprofile on %s" % node)
             self.rsh(node, "opcontrol --init")
             self.rsh(node, "opcontrol --setup --no-vmlinux --separate=lib --callgraph=20 --image=all")
             self.rsh(node, "opcontrol --start")
             self.rsh(node, "opcontrol --reset")
 
     def oprofileSave(self, test, node=None):
         if not node:
             for n in self.Env["oprofile"]:
                 self.oprofileSave(test, n)
 
         elif node in self.Env["oprofile"]:
             self.rsh(node, "opcontrol --dump")
             self.rsh(node, "opcontrol --save=cts.%d" % test)
             # Read back with: opreport -l session:cts.0 image:<directory>/c*
             if None:
                 self.rsh(node, "opcontrol --reset")
             else:
                 self.oprofileStop(node)
                 self.oprofileStart(node)
 
     def oprofileStop(self, node=None):
         if not node:
             for n in self.Env["oprofile"]:
                 self.oprofileStop(n)
 
         elif node in self.Env["oprofile"]:
             self.debug("Stopping oprofile on %s" % node)
             self.rsh(node, "opcontrol --reset")
             self.rsh(node, "opcontrol --shutdown 2>&1 > /dev/null")
 
 
     def StatsExtract(self):
         if not self.Env["stats"]:
             return
 
         for host in self.Env["nodes"]:
             log_stats_file = "%s/cts-stats.csv" % CTSvars.CRM_DAEMON_DIR
             if host in has_log_stats:
                 self.rsh(host, '''bash %s %s stop''' % (log_stats_bin, log_stats_file))
                 (rc, lines) = self.rsh(host, '''cat %s''' % log_stats_file, stdout=2)
                 self.rsh(host, '''bash %s %s delete''' % (log_stats_bin, log_stats_file))
 
                 fname = "cts-stats-%d-nodes-%s.csv" % (len(self.Env["nodes"]), host)
                 print("Extracted stats: %s" % fname)
                 fd = open(fname, "a")
                 fd.writelines(lines)
                 fd.close()
 
     def StatsMark(self, testnum):
         '''Mark the test number in the stats log'''
 
         global has_log_stats
         if not self.Env["stats"]:
             return
 
         for host in self.Env["nodes"]:
             log_stats_file = "%s/cts-stats.csv" % CTSvars.CRM_DAEMON_DIR
             if not host in has_log_stats:
 
                 global log_stats
                 global log_stats_bin
                 script=log_stats
                 #script = re.sub("\\\\", "\\\\", script)
                 script = re.sub('\"', '\\\"', script)
                 script = re.sub("'", "\'", script)
                 script = re.sub("`", "\`", script)
                 script = re.sub("\$", "\\\$", script)
 
                 self.debug("Installing %s on %s" % (log_stats_bin, host))
                 self.rsh(host, '''echo "%s" > %s''' % (script, log_stats_bin), silent=True)
                 self.rsh(host, '''bash %s %s delete''' % (log_stats_bin, log_stats_file))
                 has_log_stats[host] = 1
 
             # Now mark it
             self.rsh(host, '''bash %s %s mark %s''' % (log_stats_bin, log_stats_file, testnum), synchronous=0)
 
 
 class Resource(object):
     '''
     This is an HA resource (not a resource group).
     A resource group is just an ordered list of Resource objects.
     '''
 
     def __init__(self, cm, rsctype=None, instance=None):
         self.CM = cm
         self.ResourceType = rsctype
         self.Instance = instance
         self.needs_quorum = 1
 
     def Type(self):
         return self.ResourceType
 
     def Instance(self, nodename):
         return self.Instance
 
     def IsRunningOn(self, nodename):
         '''
         This member function returns true if our resource is running
         on the given node in the cluster.
         It is analagous to the "status" operation on SystemV init scripts and
         heartbeat scripts.  FailSafe calls it the "exclusive" operation.
         '''
         raise ValueError("Abstract Class member (IsRunningOn)")
         return None
 
     def IsWorkingCorrectly(self, nodename):
         '''
         This member function returns true if our resource is operating
         correctly on the given node in the cluster.
         OCF does not require this operation, but it might be called
         the Monitor operation, which is what FailSafe calls it.
         For remotely monitorable resources (like IP addresses), they *should*
         be monitored remotely for testing.
         '''
         raise ValueError("Abstract Class member (IsWorkingCorrectly)")
         return None
 
     def Start(self, nodename):
         '''
         This member function starts or activates the resource.
         '''
         raise ValueError("Abstract Class member (Start)")
         return None
 
     def Stop(self, nodename):
         '''
         This member function stops or deactivates the resource.
         '''
         raise ValueError("Abstract Class member (Stop)")
         return None
 
     def __repr__(self):
         if (self.Instance and len(self.Instance) > 1):
                 return "{" + self.ResourceType + "::" + self.Instance + "}"
         else:
                 return "{" + self.ResourceType + "}"
 
 
 class Component(object):
     def kill(self, node):
         None
 
 
 class Process(Component):
     def __init__(self, cm, name, process=None, dc_only=0, pats=[], dc_pats=[], badnews_ignore=[], common_ignore=[], triggersreboot=0):
         self.name = str(name)
         self.dc_only = dc_only
         self.pats = pats
         self.dc_pats = dc_pats
         self.CM = cm
         self.badnews_ignore = badnews_ignore
         self.badnews_ignore.extend(common_ignore)
         self.triggersreboot = triggersreboot
 
         if process:
             self.proc = str(process)
         else:
             self.proc = str(name)
         self.KillCmd = "killall -9 " + self.proc
 
     def kill(self, node):
         if self.CM.rsh(node, self.KillCmd) != 0:
             self.CM.log ("ERROR: Kill %s failed on node %s" % (self.name,node))
             return None
         return 1
diff --git a/cts/CTStests.py b/cts/CTStests.py
index 42f6119294..8a42b8724e 100644
--- a/cts/CTStests.py
+++ b/cts/CTStests.py
@@ -1,3111 +1,3111 @@
 """ Test-specific classes for Pacemaker's Cluster Test Suite (CTS)
 """
 
 # Pacemaker targets compatibility with Python 2.7 and 3.2+
 from __future__ import print_function, unicode_literals, absolute_import, division
 
 __copyright__ = """Copyright 2000, 2001 Alan Robertson <alanr@unix.sh>
 Add RecourceRecover testcase Zhao Kai <zhaokai@cn.ibm.com>
 """
 
 __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
 
 #
 #        SPECIAL NOTE:
 #
 #        Tests may NOT implement any cluster-manager-specific code in them.
 #        EXTEND the ClusterManager object to provide the base capabilities
 #        the test needs if you need to do something that the current CM classes
 #        do not.  Otherwise you screw up the whole point of the object structure
 #        in CTS.
 #
 #                Thank you.
 #
 
 import os
 import re
 import time
 import subprocess
 import tempfile
 
 from stat import *
 from cts import CTS
 from cts.CTSaudits import *
 from cts.CTSvars   import *
 from cts.patterns  import PatternSelector
 from cts.logging   import LogFactory
 from cts.remote    import RemoteFactory, input_wrapper
 from cts.watcher   import LogWatcher
 from cts.environment import EnvFactory
 
 AllTestClasses = [ ]
 
 
 class CTSTest(object):
     '''
     A Cluster test.
     We implement the basic set of properties and behaviors for a generic
     cluster test.
 
     Cluster tests track their own statistics.
     We keep each of the kinds of counts we track as separate {name,value}
     pairs.
     '''
 
     def __init__(self, cm):
         #self.name="the unnamed test"
         self.Stats = {"calls":0
         ,        "success":0
         ,        "failure":0
         ,        "skipped":0
         ,        "auditfail":0}
 
 #        if not issubclass(cm.__class__, ClusterManager):
 #            raise ValueError("Must be a ClusterManager object")
         self.CM = cm
         self.Env = EnvFactory().getInstance()
         self.rsh = RemoteFactory().getInstance()
         self.logger = LogFactory()
         self.templates = PatternSelector(cm["Name"])
         self.Audits = []
         self.timeout = 120
         self.passed = 1
         self.is_loop = 0
         self.is_unsafe = 0
         self.is_docker_unsafe = 0
         self.is_experimental = 0
         self.is_container = 0
         self.is_valgrind = 0
         self.benchmark = 0  # which tests to benchmark
         self.timer = {}  # timers
 
     def log(self, args):
         self.logger.log(args)
 
     def debug(self, args):
         self.logger.debug(args)
 
     def has_key(self, key):
         return key in self.Stats
 
     def __setitem__(self, key, value):
         self.Stats[key] = value
 
     def __getitem__(self, key):
         if str(key) == "0":
             raise ValueError("Bad call to 'foo in X', should reference 'foo in X.Stats' instead")
 
         if key in self.Stats:
             return self.Stats[key]
         return None
 
     def log_mark(self, msg):
         self.debug("MARK: test %s %s %d" % (self.name,msg,time.time()))
         return
 
     def get_timer(self,key = "test"):
         try: return self.timer[key]
         except: return 0
 
     def set_timer(self,key = "test"):
         self.timer[key] = time.time()
         return self.timer[key]
 
     def log_timer(self,key = "test"):
         elapsed = 0
         if key in self.timer:
             elapsed = time.time() - self.timer[key]
             s = key == "test" and self.name or "%s:%s" % (self.name,key)
             self.debug("%s runtime: %.2f" % (s, elapsed))
             del self.timer[key]
         return elapsed
 
     def incr(self, name):
         '''Increment (or initialize) the value associated with the given name'''
         if not name in self.Stats:
             self.Stats[name] = 0
         self.Stats[name] = self.Stats[name]+1
 
         # Reset the test passed boolean
         if name == "calls":
             self.passed = 1
 
     def failure(self, reason="none"):
         '''Increment the failure count'''
         self.passed = 0
         self.incr("failure")
         self.logger.log(("Test %s" % self.name).ljust(35) + " FAILED: %s" % reason)
         return None
 
     def success(self):
         '''Increment the success count'''
         self.incr("success")
         return 1
 
     def skipped(self):
         '''Increment the skipped count'''
         self.incr("skipped")
         return 1
 
     def __call__(self, node):
         '''Perform the given test'''
         raise ValueError("Abstract Class member (__call__)")
         self.incr("calls")
         return self.failure()
 
     def audit(self):
         passed = 1
         if len(self.Audits) > 0:
             for audit in self.Audits:
                 if not audit():
                     self.logger.log("Internal %s Audit %s FAILED." % (self.name, audit.name()))
                     self.incr("auditfail")
                     passed = 0
         return passed
 
     def setup(self, node):
         '''Setup the given test'''
         return self.success()
 
     def teardown(self, node):
         '''Tear down the given test'''
         return self.success()
 
     def create_watch(self, patterns, timeout, name=None):
         if not name:
             name = self.name
         return LogWatcher(self.Env["LogFileName"], patterns, name, timeout, kind=self.Env["LogWatcher"], hosts=self.Env["nodes"])
 
     def local_badnews(self, prefix, watch, local_ignore=[]):
         errcount = 0
         if not prefix:
             prefix = "LocalBadNews:"
 
         ignorelist = []
         ignorelist.append(" CTS: ")
         ignorelist.append(prefix)
         ignorelist.extend(local_ignore)
 
         while errcount < 100:
             match = watch.look(0)
             if match:
                add_err = 1
                for ignore in ignorelist:
                    if add_err == 1 and re.search(ignore, match):
                        add_err = 0
                if add_err == 1:
                    self.logger.log(prefix + " " + match)
                    errcount = errcount + 1
             else:
               break
         else:
             self.logger.log("Too many errors!")
 
         watch.end()
         return errcount
 
     def is_applicable(self):
         return self.is_applicable_common()
 
     def is_applicable_common(self):
         '''Return TRUE if we are applicable in the current test configuration'''
         #raise ValueError("Abstract Class member (is_applicable)")
 
         if self.is_loop and not self.Env["loop-tests"]:
             return 0
         elif self.is_unsafe and not self.Env["unsafe-tests"]:
             return 0
         elif self.is_valgrind and not self.Env["valgrind-tests"]:
             return 0
         elif self.is_experimental and not self.Env["experimental-tests"]:
             return 0
         elif self.is_docker_unsafe and self.Env["docker"]:
             return 0
         elif self.is_container and not self.Env["container-tests"]:
             return 0
         elif self.Env["benchmark"] and self.benchmark == 0:
             return 0
 
         return 1
 
     def find_ocfs2_resources(self, node):
         self.r_o2cb = None
         self.r_ocfs2 = []
 
         (rc, lines) = self.rsh(node, "crm_resource -c", None)
         for line in lines:
             if re.search("^Resource", line):
                 r = AuditResource(self.CM, line)
                 if r.rtype == "o2cb" and r.parent != "NA":
                     self.debug("Found o2cb: %s" % self.r_o2cb)
                     self.r_o2cb = r.parent
             if re.search("^Constraint", line):
                 c = AuditConstraint(self.CM, line)
                 if c.type == "rsc_colocation" and c.target == self.r_o2cb:
                     self.r_ocfs2.append(c.rsc)
 
         self.debug("Found ocfs2 filesystems: %s" % repr(self.r_ocfs2))
         return len(self.r_ocfs2)
 
     def canrunnow(self, node):
         '''Return TRUE if we can meaningfully run right now'''
         return 1
 
     def errorstoignore(self):
         '''Return list of errors which are 'normal' and should be ignored'''
         return []
 
 
 class StopTest(CTSTest):
     '''Stop (deactivate) the cluster manager on a node'''
     def __init__(self, cm):
         CTSTest.__init__(self, cm)
         self.name = "Stop"
 
     def __call__(self, node):
         '''Perform the 'stop' test. '''
         self.incr("calls")
         if self.CM.ShouldBeStatus[node] != "up":
             return self.skipped()
 
         patterns = []
         # Technically we should always be able to notice ourselves stopping
         patterns.append(self.templates["Pat:We_stopped"] % node)
 
         # Any active node needs to notice this one left
         # (note that this won't work if we have multiple partitions)
         for other in self.Env["nodes"]:
             if self.CM.ShouldBeStatus[other] == "up" and other != node:
                 patterns.append(self.templates["Pat:They_stopped"] %(other, self.CM.key_for_node(node)))
                 #self.debug("Checking %s will notice %s left"%(other, node))
 
         watch = self.create_watch(patterns, self.Env["DeadTime"])
         watch.setwatch()
 
         if node == self.CM.OurNode:
             self.incr("us")
         else:
             if self.CM.upcount() <= 1:
                 self.incr("all")
             else:
                 self.incr("them")
 
         self.CM.StopaCM(node)
         watch_result = watch.lookforall()
 
         failreason = None
         UnmatchedList = "||"
         if watch.unmatched:
             (rc, output) = self.rsh(node, "/bin/ps axf", None)
             for line in output:
                 self.debug(line)
 
             (rc, output) = self.rsh(node, "/usr/sbin/dlm_tool dump", None)
             for line in output:
                 self.debug(line)
 
             for regex in watch.unmatched:
                 self.logger.log ("ERROR: Shutdown pattern not found: %s" % (regex))
                 UnmatchedList +=  regex + "||";
                 failreason = "Missing shutdown pattern"
 
         self.CM.cluster_stable(self.Env["DeadTime"])
 
         if not watch.unmatched or self.CM.upcount() == 0:
             return self.success()
 
         if len(watch.unmatched) >= self.CM.upcount():
             return self.failure("no match against (%s)" % UnmatchedList)
 
         if failreason == None:
             return self.success()
         else:
             return self.failure(failreason)
 #
 # We don't register StopTest because it's better when called by
 # another test...
 #
 
 
 class StartTest(CTSTest):
     '''Start (activate) the cluster manager on a node'''
     def __init__(self, cm, debug=None):
         CTSTest.__init__(self,cm)
         self.name = "start"
         self.debug = debug
 
     def __call__(self, node):
         '''Perform the 'start' test. '''
         self.incr("calls")
 
         if self.CM.upcount() == 0:
             self.incr("us")
         else:
             self.incr("them")
 
         if self.CM.ShouldBeStatus[node] != "down":
             return self.skipped()
         elif self.CM.StartaCM(node):
             return self.success()
         else:
             return self.failure("Startup %s on node %s failed"
                                 % (self.Env["Name"], node))
 
 #
 # We don't register StartTest because it's better when called by
 # another test...
 #
 
 
 class FlipTest(CTSTest):
     '''If it's running, stop it.  If it's stopped start it.
        Overthrow the status quo...
     '''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "Flip"
         self.start = StartTest(cm)
         self.stop = StopTest(cm)
 
     def __call__(self, node):
         '''Perform the 'Flip' test. '''
         self.incr("calls")
         if self.CM.ShouldBeStatus[node] == "up":
             self.incr("stopped")
             ret = self.stop(node)
             type = "up->down"
             # Give the cluster time to recognize it's gone...
             time.sleep(self.Env["StableTime"])
         elif self.CM.ShouldBeStatus[node] == "down":
             self.incr("started")
             ret = self.start(node)
             type = "down->up"
         else:
             return self.skipped()
 
         self.incr(type)
         if ret:
             return self.success()
         else:
             return self.failure("%s failure" % type)
 
 #        Register FlipTest as a good test to run
 AllTestClasses.append(FlipTest)
 
 
 class RestartTest(CTSTest):
     '''Stop and restart a node'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "Restart"
         self.start = StartTest(cm)
         self.stop = StopTest(cm)
         self.benchmark = 1
 
     def __call__(self, node):
         '''Perform the 'restart' test. '''
         self.incr("calls")
 
         self.incr("node:" + node)
 
         ret1 = 1
         if self.CM.StataCM(node):
             self.incr("WasStopped")
             if not self.start(node):
                 return self.failure("start (setup) failure: "+node)
 
         self.set_timer()
         if not self.stop(node):
             return self.failure("stop failure: "+node)
         if not self.start(node):
             return self.failure("start failure: "+node)
         return self.success()
 
 #        Register RestartTest as a good test to run
 AllTestClasses.append(RestartTest)
 
 
 class StonithdTest(CTSTest):
     def __init__(self, cm):
         CTSTest.__init__(self, cm)
         self.name = "Stonithd"
         self.startall = SimulStartLite(cm)
         self.benchmark = 1
 
     def __call__(self, node):
         self.incr("calls")
         if len(self.Env["nodes"]) < 2:
             return self.skipped()
 
         ret = self.startall(None)
         if not ret:
             return self.failure("Setup failed")
 
         is_dc = self.CM.is_node_dc(node)
 
         watchpats = []
         watchpats.append(self.templates["Pat:FenceOpOK"] % node)
         watchpats.append(self.templates["Pat:NodeFenced"] % node)
 
         if self.Env["at-boot"] == 0:
             self.debug("Expecting %s to stay down" % node)
             self.CM.ShouldBeStatus[node] = "down"
         else:
             self.debug("Expecting %s to come up again %d" % (node, self.Env["at-boot"]))
             watchpats.append("%s.* S_STARTING -> S_PENDING" % node)
             watchpats.append("%s.* S_PENDING -> S_NOT_DC" % node)
 
         watch = self.create_watch(watchpats, 30 + self.Env["DeadTime"] + self.Env["StableTime"] + self.Env["StartTime"])
         watch.setwatch()
 
         origin = self.Env.RandomGen.choice(self.Env["nodes"])
 
         rc = self.rsh(origin, "stonith_admin --reboot %s -VVVVVV" % node)
 
         if rc == 194:
             # 194 - 256 = -62 = Timer expired
             #
             # Look for the patterns, usually this means the required
             # device was running on the node to be fenced - or that
             # the required devices were in the process of being loaded
             # and/or moved
             #
             # Effectively the node committed suicide so there will be
             # no confirmation, but pacemaker should be watching and
             # fence the node again
 
             self.logger.log("Fencing command on %s to fence %s timed out" % (origin, node))
 
         elif origin != node and rc != 0:
             self.debug("Waiting for the cluster to recover")
             self.CM.cluster_stable()
 
             self.debug("Waiting for fenced node to come back up")
             self.CM.ns.WaitForAllNodesToComeUp(self.Env["nodes"], 600)
 
             self.logger.log("Fencing command on %s failed to fence %s (rc=%d)" % (origin, node, rc))
 
         elif origin == node and rc != 255:
             # 255 == broken pipe, ie. the node was fenced as expected
             self.logger.log("Locally originated fencing returned %d" % rc)
 
         self.set_timer("fence")
         matched = watch.lookforall()
         self.log_timer("fence")
         self.set_timer("reform")
         if watch.unmatched:
             self.logger.log("Patterns not found: " + repr(watch.unmatched))
 
         self.debug("Waiting for the cluster to recover")
         self.CM.cluster_stable()
 
         self.debug("Waiting for fenced node to come back up")
         self.CM.ns.WaitForAllNodesToComeUp(self.Env["nodes"], 600)
 
         self.debug("Waiting for the cluster to re-stabilize with all nodes")
         is_stable = self.CM.cluster_stable(self.Env["StartTime"])
 
         if not matched:
             return self.failure("Didn't find all expected patterns")
         elif not is_stable:
             return self.failure("Cluster did not become stable")
 
         self.log_timer("reform")
         return self.success()
 
     def errorstoignore(self):
         return [
             self.templates["Pat:Fencing_start"] % ".*",
             self.templates["Pat:Fencing_ok"] % ".*",
             r"error.*: Resource .*stonith::.* is active on 2 nodes attempting recovery",
             r"error.*: Operation reboot of .*by .* for stonith_admin.*: Timer expired",
         ]
 
     def is_applicable(self):
         if not self.is_applicable_common():
             return 0
 
         if "DoFencing" in list(self.Env.keys()):
             return self.Env["DoFencing"]
 
         return 1
 
 AllTestClasses.append(StonithdTest)
 
 
 class StartOnebyOne(CTSTest):
     '''Start all the nodes ~ one by one'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "StartOnebyOne"
         self.stopall = SimulStopLite(cm)
         self.start = StartTest(cm)
         self.ns = CTS.NodeStatus(cm.Env)
 
     def __call__(self, dummy):
         '''Perform the 'StartOnebyOne' test. '''
         self.incr("calls")
 
         #        We ignore the "node" parameter...
 
         #        Shut down all the nodes...
         ret = self.stopall(None)
         if not ret:
             return self.failure("Test setup failed")
 
         failed = []
         self.set_timer()
         for node in self.Env["nodes"]:
             if not self.start(node):
                 failed.append(node)
 
         if len(failed) > 0:
             return self.failure("Some node failed to start: " + repr(failed))
 
         return self.success()
 
 #        Register StartOnebyOne as a good test to run
 AllTestClasses.append(StartOnebyOne)
 
 
 class SimulStart(CTSTest):
     '''Start all the nodes ~ simultaneously'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "SimulStart"
         self.stopall = SimulStopLite(cm)
         self.startall = SimulStartLite(cm)
 
     def __call__(self, dummy):
         '''Perform the 'SimulStart' test. '''
         self.incr("calls")
 
         #        We ignore the "node" parameter...
 
         #        Shut down all the nodes...
         ret = self.stopall(None)
         if not ret:
             return self.failure("Setup failed")
 
         if not self.startall(None):
             return self.failure("Startall failed")
 
         return self.success()
 
 #        Register SimulStart as a good test to run
 AllTestClasses.append(SimulStart)
 
 
 class SimulStop(CTSTest):
     '''Stop all the nodes ~ simultaneously'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "SimulStop"
         self.startall = SimulStartLite(cm)
         self.stopall = SimulStopLite(cm)
 
     def __call__(self, dummy):
         '''Perform the 'SimulStop' test. '''
         self.incr("calls")
 
         #     We ignore the "node" parameter...
 
         #     Start up all the nodes...
         ret = self.startall(None)
         if not ret:
             return self.failure("Setup failed")
 
         if not self.stopall(None):
             return self.failure("Stopall failed")
 
         return self.success()
 
 #     Register SimulStop as a good test to run
 AllTestClasses.append(SimulStop)
 
 
 class StopOnebyOne(CTSTest):
     '''Stop all the nodes in order'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "StopOnebyOne"
         self.startall = SimulStartLite(cm)
         self.stop = StopTest(cm)
 
     def __call__(self, dummy):
         '''Perform the 'StopOnebyOne' test. '''
         self.incr("calls")
 
         #     We ignore the "node" parameter...
 
         #     Start up all the nodes...
         ret = self.startall(None)
         if not ret:
             return self.failure("Setup failed")
 
         failed = []
         self.set_timer()
         for node in self.Env["nodes"]:
             if not self.stop(node):
                 failed.append(node)
 
         if len(failed) > 0:
             return self.failure("Some node failed to stop: " + repr(failed))
 
         return self.success()
 
 #     Register StopOnebyOne as a good test to run
 AllTestClasses.append(StopOnebyOne)
 
 
 class RestartOnebyOne(CTSTest):
     '''Restart all the nodes in order'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "RestartOnebyOne"
         self.startall = SimulStartLite(cm)
 
     def __call__(self, dummy):
         '''Perform the 'RestartOnebyOne' test. '''
         self.incr("calls")
 
         #     We ignore the "node" parameter...
 
         #     Start up all the nodes...
         ret = self.startall(None)
         if not ret:
             return self.failure("Setup failed")
 
         did_fail = []
         self.set_timer()
         self.restart = RestartTest(self.CM)
         for node in self.Env["nodes"]:
             if not self.restart(node):
                 did_fail.append(node)
 
         if did_fail:
             return self.failure("Could not restart %d nodes: %s"
                                 % (len(did_fail), repr(did_fail)))
         return self.success()
 
 #     Register StopOnebyOne as a good test to run
 AllTestClasses.append(RestartOnebyOne)
 
 
 class PartialStart(CTSTest):
     '''Start a node - but tell it to stop before it finishes starting up'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "PartialStart"
         self.startall = SimulStartLite(cm)
         self.stopall = SimulStopLite(cm)
         self.stop = StopTest(cm)
         #self.is_unsafe = 1
 
     def __call__(self, node):
         '''Perform the 'PartialStart' test. '''
         self.incr("calls")
 
         ret = self.stopall(None)
         if not ret:
             return self.failure("Setup failed")
 
 #   FIXME!  This should use the CM class to get the pattern
 #       then it would be applicable in general
         watchpats = []
         watchpats.append("pacemaker-controld.*Connecting to cluster infrastructure")
         watch = self.create_watch(watchpats, self.Env["DeadTime"]+10)
         watch.setwatch()
 
         self.CM.StartaCMnoBlock(node)
         ret = watch.lookforall()
         if not ret:
             self.logger.log("Patterns not found: " + repr(watch.unmatched))
             return self.failure("Setup of %s failed" % node)
 
         ret = self.stop(node)
         if not ret:
             return self.failure("%s did not stop in time" % node)
 
         return self.success()
 
     def errorstoignore(self):
         '''Return list of errors which should be ignored'''
 
         # We might do some fencing in the 2-node case if we make it up far enough
         return [
             r"Executing reboot fencing operation",
             r"Requesting fencing \([^)]+\) of node ",
         ]
 
 #     Register StopOnebyOne as a good test to run
 AllTestClasses.append(PartialStart)
 
 
 class StandbyTest(CTSTest):
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "Standby"
         self.benchmark = 1
 
         self.start = StartTest(cm)
         self.startall = SimulStartLite(cm)
 
     # make sure the node is active
     # set the node to standby mode
     # check resources, none resource should be running on the node
     # set the node to active mode
     # check resouces, resources should have been migrated back (SHOULD THEY?)
 
     def __call__(self, node):
 
         self.incr("calls")
         ret = self.startall(None)
         if not ret:
             return self.failure("Start all nodes failed")
 
         self.debug("Make sure node %s is active" % node)
         if self.CM.StandbyStatus(node) != "off":
             if not self.CM.SetStandbyMode(node, "off"):
                 return self.failure("can't set node %s to active mode" % node)
 
         self.CM.cluster_stable()
 
         status = self.CM.StandbyStatus(node)
         if status != "off":
             return self.failure("standby status of %s is [%s] but we expect [off]" % (node, status))
 
         self.debug("Getting resources running on node %s" % node)
         rsc_on_node = self.CM.active_resources(node)
 
         watchpats = []
         watchpats.append(r"State transition .* -> S_POLICY_ENGINE")
         watch = self.create_watch(watchpats, self.Env["DeadTime"]+10)
         watch.setwatch()
 
         self.debug("Setting node %s to standby mode" % node)
         if not self.CM.SetStandbyMode(node, "on"):
             return self.failure("can't set node %s to standby mode" % node)
 
         self.set_timer("on")
 
         ret = watch.lookforall()
         if not ret:
             self.logger.log("Patterns not found: " + repr(watch.unmatched))
             self.CM.SetStandbyMode(node, "off")
             return self.failure("cluster didn't react to standby change on %s" % node)
 
         self.CM.cluster_stable()
 
         status = self.CM.StandbyStatus(node)
         if status != "on":
             return self.failure("standby status of %s is [%s] but we expect [on]" % (node, status))
         self.log_timer("on")
 
         self.debug("Checking resources")
         bad_run = self.CM.active_resources(node)
         if len(bad_run) > 0:
             rc = self.failure("%s set to standby, %s is still running on it" % (node, repr(bad_run)))
             self.debug("Setting node %s to active mode" % node)
             self.CM.SetStandbyMode(node, "off")
             return rc
 
         self.debug("Setting node %s to active mode" % node)
         if not self.CM.SetStandbyMode(node, "off"):
             return self.failure("can't set node %s to active mode" % node)
 
         self.set_timer("off")
         self.CM.cluster_stable()
 
         status = self.CM.StandbyStatus(node)
         if status != "off":
             return self.failure("standby status of %s is [%s] but we expect [off]" % (node, status))
         self.log_timer("off")
 
         return self.success()
 
 AllTestClasses.append(StandbyTest)
 
 
 class ValgrindTest(CTSTest):
     '''Check for memory leaks'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "Valgrind"
         self.stopall = SimulStopLite(cm)
         self.startall = SimulStartLite(cm)
         self.is_valgrind = 1
         self.is_loop = 1
 
     def setup(self, node):
         self.incr("calls")
 
         ret = self.stopall(None)
         if not ret:
             return self.failure("Stop all nodes failed")
 
         # @TODO Edit /etc/sysconfig/pacemaker on all nodes to enable valgrind,
         # and clear any valgrind logs from previous runs. For now, we rely on
         # the user to do this manually.
 
         ret = self.startall(None)
         if not ret:
             return self.failure("Start all nodes failed")
 
         return self.success()
 
     def teardown(self, node):
         # Return all nodes to normal
         # @TODO Edit /etc/sysconfig/pacemaker on all nodes to disable valgrind
         ret = self.stopall(None)
         if not ret:
             return self.failure("Stop all nodes failed")
 
         return self.success()
 
     def find_leaks(self):
         # Check for leaks
         # (no longer used but kept in case feature is restored)
         leaked = []
         self.stop = StopTest(self.CM)
 
         for node in self.Env["nodes"]:
             rc = self.stop(node)
             if not rc:
                 self.failure("Couldn't shut down %s" % node)
 
             rc = self.rsh(node, "grep -e indirectly.*lost:.*[1-9] -e definitely.*lost:.*[1-9] -e (ERROR|error).*SUMMARY:.*[1-9].*errors %s" % self.logger.logPat, 0)
             if rc != 1:
                 leaked.append(node)
                 self.failure("Valgrind errors detected on %s" % node)
                 (rc, output) = self.rsh(node, "grep -e lost: -e SUMMARY: %s" % self.logger.logPat, None)
                 for line in output:
                     self.logger.log(line)
                 (rc, output) = self.rsh(node, "cat %s" % self.logger.logPat, None)
                 for line in output:
                     self.debug(line)
 
         self.rsh(node, "rm -f %s" % self.logger.logPat, None)
         return leaked
 
     def __call__(self, node):
         #leaked = self.find_leaks()
         #if len(leaked) > 0:
         #    return self.failure("Nodes %s leaked" % repr(leaked))
 
         return self.success()
 
     def errorstoignore(self):
         '''Return list of errors which should be ignored'''
         return [
             r"pacemaker-based.*: \*\*\*\*\*\*\*\*\*\*\*\*\*",
             r"pacemaker-based.*: .* avoid confusing Valgrind",
             r"HA_VALGRIND_ENABLED",
         ]
 
 
 class StandbyLoopTest(ValgrindTest):
     '''Check for memory leaks by putting a node in and out of standby for an hour'''
     # @TODO This is not a useful test for memory leaks
     def __init__(self, cm):
         ValgrindTest.__init__(self,cm)
         self.name = "StandbyLoop"
 
     def __call__(self, node):
 
         lpc = 0
         delay = 2
         failed = 0
         done = time.time() + self.Env["loop-minutes"] * 60
         while time.time() <= done and not failed:
             lpc = lpc + 1
 
             time.sleep(delay)
             if not self.CM.SetStandbyMode(node, "on"):
                 self.failure("can't set node %s to standby mode" % node)
                 failed = lpc
 
             time.sleep(delay)
             if not self.CM.SetStandbyMode(node, "off"):
                 self.failure("can't set node %s to active mode" % node)
                 failed = lpc
 
         leaked = self.find_leaks()
         if failed:
             return self.failure("Iteration %d failed" % failed)
         elif len(leaked) > 0:
             return self.failure("Nodes %s leaked" % repr(leaked))
 
         return self.success()
 
 #AllTestClasses.append(StandbyLoopTest)
 
 
 class BandwidthTest(CTSTest):
 #        Tests should not be cluster-manager-specific
 #        If you need to find out cluster manager configuration to do this, then
 #        it should be added to the generic cluster manager API.
     '''Test the bandwidth which the cluster uses'''
     def __init__(self, cm):
         CTSTest.__init__(self, cm)
         self.name = "Bandwidth"
         self.start = StartTest(cm)
         self.__setitem__("min",0)
         self.__setitem__("max",0)
         self.__setitem__("totalbandwidth",0)
         (handle, self.tempfile) = tempfile.mkstemp(".cts")
         os.close(handle)
         self.startall = SimulStartLite(cm)
 
     def __call__(self, node):
         '''Perform the Bandwidth test'''
         self.incr("calls")
 
         if self.CM.upcount() < 1:
             return self.skipped()
 
         Path = self.CM.InternalCommConfig()
         if "ip" not in Path["mediatype"]:
              return self.skipped()
 
         port = Path["port"][0]
         port = int(port)
 
         ret = self.startall(None)
         if not ret:
             return self.failure("Test setup failed")
         time.sleep(5)  # We get extra messages right after startup.
 
         fstmpfile = "/var/run/band_estimate"
         dumpcmd = "tcpdump -p -n -c 102 -i any udp port %d > %s 2>&1" \
         %                (port, fstmpfile)
 
         rc = self.rsh(node, dumpcmd)
         if rc == 0:
             farfile = "root@%s:%s" % (node, fstmpfile)
             self.rsh.cp(farfile, self.tempfile)
             Bandwidth = self.countbandwidth(self.tempfile)
             if not Bandwidth:
                 self.logger.log("Could not compute bandwidth.")
                 return self.success()
             intband = int(Bandwidth + 0.5)
             self.logger.log("...bandwidth: %d bits/sec" % intband)
             self.Stats["totalbandwidth"] = self.Stats["totalbandwidth"] + Bandwidth
             if self.Stats["min"] == 0:
                 self.Stats["min"] = Bandwidth
             if Bandwidth > self.Stats["max"]:
                 self.Stats["max"] = Bandwidth
             if Bandwidth < self.Stats["min"]:
                 self.Stats["min"] = Bandwidth
             self.rsh(node, "rm -f %s" % fstmpfile)
             os.unlink(self.tempfile)
             return self.success()
         else:
             return self.failure("no response from tcpdump command [%d]!" % rc)
 
     def countbandwidth(self, file):
         fp = open(file, "r")
         fp.seek(0)
         count = 0
         sum = 0
         while 1:
             line = fp.readline()
             if not line:
                 return None
             if re.search("udp",line) or re.search("UDP,", line):
                 count = count + 1
                 linesplit = line.split(" ")
                 for j in range(len(linesplit)-1):
                     if linesplit[j] == "udp": break
                     if linesplit[j] == "length:": break
 
                 try:
                     sum = sum + int(linesplit[j+1])
                 except ValueError:
                     self.logger.log("Invalid tcpdump line: %s" % line)
                     return None
                 T1 = linesplit[0]
                 timesplit = T1.split(":")
                 time2split = timesplit[2].split(".")
                 time1 = (int(timesplit[0])*60+int(timesplit[1]))*60+int(time2split[0])+int(time2split[1])*0.000001
                 break
 
         while count < 100:
             line = fp.readline()
             if not line:
                 return None
             if re.search("udp",line) or re.search("UDP,", line):
                 count = count+1
                 linessplit = line.split(" ")
                 for j in range(len(linessplit)-1):
                     if linessplit[j] == "udp": break
                     if linessplit[j] == "length:": break
                 try:
                     sum = int(linessplit[j+1]) + sum
                 except ValueError:
                     self.logger.log("Invalid tcpdump line: %s" % line)
                     return None
 
         T2 = linessplit[0]
         timesplit = T2.split(":")
         time2split = timesplit[2].split(".")
         time2 = (int(timesplit[0])*60+int(timesplit[1]))*60+int(time2split[0])+int(time2split[1])*0.000001
         time = time2-time1
         if (time <= 0):
             return 0
         return int((sum*8)/time)
 
     def is_applicable(self):
         '''BandwidthTest never applicable'''
         return 0
 
 AllTestClasses.append(BandwidthTest)
 
 
 ###################################################################
 class MaintenanceMode(CTSTest):
 ###################################################################
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "MaintenanceMode"
         self.start = StartTest(cm)
         self.startall = SimulStartLite(cm)
         self.max = 30
         #self.is_unsafe = 1
         self.benchmark = 1
         self.action = "asyncmon"
         self.interval = 0
         self.rid = "maintenanceDummy"
 
     def toggleMaintenanceMode(self, node, action):
         pats = []
         pats.append(self.templates["Pat:DC_IDLE"])
 
         # fail the resource right after turning Maintenance mode on
         # verify it is not recovered until maintenance mode is turned off
         if action == "On":
             pats.append(r"schedulerd.*:\s+warning:.*Processing failed %s of %s on" % (self.action, self.rid))
         else:
             pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.rid))
             pats.append(self.templates["Pat:RscOpOK"] % ("start", self.rid))
 
         watch = self.create_watch(pats, 60)
         watch.setwatch()
 
         self.debug("Turning maintenance mode %s" % action)
         self.rsh(node, self.templates["MaintenanceMode%s" % (action)])
         if (action == "On"):
             self.rsh(node, "crm_resource -V -F -r %s -H %s &>/dev/null" % (self.rid, node))
 
         self.set_timer("recover%s" % (action))
         watch.lookforall()
         self.log_timer("recover%s" % (action))
         if watch.unmatched:
             self.debug("Failed to find patterns when turning maintenance mode %s" % action)
             return repr(watch.unmatched)
 
         return ""
 
     def insertMaintenanceDummy(self, node):
         pats = []
         pats.append(("%s.*" % node) + (self.templates["Pat:RscOpOK"] % ("start", self.rid)))
 
         watch = self.create_watch(pats, 60)
         watch.setwatch()
 
         self.CM.AddDummyRsc(node, self.rid)
 
         self.set_timer("addDummy")
         watch.lookforall()
         self.log_timer("addDummy")
 
         if watch.unmatched:
             self.debug("Failed to find patterns when adding maintenance dummy resource")
             return repr(watch.unmatched)
         return ""
 
     def removeMaintenanceDummy(self, node):
         pats = []
         pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.rid))
 
         watch = self.create_watch(pats, 60)
         watch.setwatch()
         self.CM.RemoveDummyRsc(node, self.rid)
 
         self.set_timer("removeDummy")
         watch.lookforall()
         self.log_timer("removeDummy")
 
         if watch.unmatched:
             self.debug("Failed to find patterns when removing maintenance dummy resource")
             return repr(watch.unmatched)
         return ""
 
     def managedRscList(self, node):
         rscList = []
         (rc, lines) = self.rsh(node, "crm_resource -c", None)
         for line in lines:
             if re.search("^Resource", line):
                 tmp = AuditResource(self.CM, line)
                 if tmp.managed():
                     rscList.append(tmp.id)
 
         return rscList
 
     def verifyResources(self, node, rscList, managed):
         managedList = list(rscList)
         managed_str = "managed"
         if not managed:
             managed_str = "unmanaged"
 
         (rc, lines) = self.rsh(node, "crm_resource -c", None)
         for line in lines:
             if re.search("^Resource", line):
                 tmp = AuditResource(self.CM, line)
                 if managed and not tmp.managed():
                     continue
                 elif not managed and tmp.managed():
                     continue
                 elif managedList.count(tmp.id):
                     managedList.remove(tmp.id)
 
         if len(managedList) == 0:
             self.debug("Found all %s resources on %s" % (managed_str, node))
             return True
 
         self.logger.log("Could not find all %s resources on %s. %s" % (managed_str, node, managedList))
         return False
 
     def __call__(self, node):
         '''Perform the 'MaintenanceMode' test. '''
         self.incr("calls")
         verify_managed = False
         verify_unmanaged = False
         failPat = ""
 
         ret = self.startall(None)
         if not ret:
             return self.failure("Setup failed")
 
         # get a list of all the managed resources. We use this list
         # after enabling maintenance mode to verify all managed resources
         # become un-managed.  After maintenance mode is turned off, we use
         # this list to verify all the resources become managed again.
         managedResources = self.managedRscList(node)
         if len(managedResources) == 0:
             self.logger.log("No managed resources on %s" % node)
             return self.skipped()
 
         # insert a fake resource we can fail during maintenance mode
         # so we can verify recovery does not take place until after maintenance
         # mode is disabled.
         failPat = failPat + self.insertMaintenanceDummy(node)
 
         # toggle maintenance mode ON, then fail dummy resource.
         failPat = failPat + self.toggleMaintenanceMode(node, "On")
 
         # verify all the resources are now unmanaged
         if self.verifyResources(node, managedResources, False):
             verify_unmanaged = True
 
         # Toggle maintenance mode  OFF, verify dummy is recovered.
         failPat = failPat + self.toggleMaintenanceMode(node, "Off")
 
         # verify all the resources are now managed again
         if self.verifyResources(node, managedResources, True):
             verify_managed = True
 
         # Remove our maintenance dummy resource.
         failPat = failPat + self.removeMaintenanceDummy(node)
 
         self.CM.cluster_stable()
 
         if failPat != "":
             return self.failure("Unmatched patterns: %s" % (failPat))
         elif verify_unmanaged is False:
             return self.failure("Failed to verify resources became unmanaged during maintenance mode")
         elif verify_managed is False:
             return self.failure("Failed to verify resources switched back to managed after disabling maintenance mode")
 
         return self.success()
 
     def errorstoignore(self):
         '''Return list of errors which should be ignored'''
         return [
             r"Updating failcount for %s" % self.rid,
             r"schedulerd.*: Recover %s\s*\(.*\)" % self.rid,
             r"Unknown operation: fail",
             self.templates["Pat:RscOpOK"] % (self.action, self.rid),
             r"(ERROR|error).*: Action %s_%s_%d .* initiated outside of a transition" % (self.rid, self.action, self.interval),
         ]
 
 AllTestClasses.append(MaintenanceMode)
 
 
 class ResourceRecover(CTSTest):
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "ResourceRecover"
         self.start = StartTest(cm)
         self.startall = SimulStartLite(cm)
         self.max = 30
         self.rid = None
         self.rid_alt = None
         #self.is_unsafe = 1
         self.benchmark = 1
 
         # these are the values used for the new LRM API call
         self.action = "asyncmon"
         self.interval = 0
 
     def __call__(self, node):
         '''Perform the 'ResourceRecover' test. '''
         self.incr("calls")
 
         ret = self.startall(None)
         if not ret:
             return self.failure("Setup failed")
 
         resourcelist = self.CM.active_resources(node)
         # if there are no resourcelist, return directly
         if len(resourcelist) == 0:
             self.logger.log("No active resources on %s" % node)
             return self.skipped()
 
         self.rid = self.Env.RandomGen.choice(resourcelist)
         self.rid_alt = self.rid
 
         rsc = None
         (rc, lines) = self.rsh(node, "crm_resource -c", None)
         for line in lines:
             if re.search("^Resource", line):
                 tmp = AuditResource(self.CM, line)
                 if tmp.id == self.rid:
                     rsc = tmp
                     # Handle anonymous clones that get renamed
                     self.rid = rsc.clone_id
                     break
 
         if not rsc:
             return self.failure("Could not find %s in the resource list" % self.rid)
 
         self.debug("Shooting %s aka. %s" % (rsc.clone_id, rsc.id))
 
         pats = []
         pats.append(r"schedulerd.*:\s+warning:.*Processing failed %s of (%s|%s) on" % (self.action,
             rsc.id, rsc.clone_id))
 
         if rsc.managed():
             pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.rid))
             if rsc.unique():
                 pats.append(self.templates["Pat:RscOpOK"] % ("start", self.rid))
             else:
                 # Anonymous clones may get restarted with a different clone number
                 pats.append(self.templates["Pat:RscOpOK"] % ("start", ".*"))
 
         watch = self.create_watch(pats, 60)
         watch.setwatch()
 
         self.rsh(node, "crm_resource -V -F -r %s -H %s &>/dev/null" % (self.rid, node))
 
         self.set_timer("recover")
         watch.lookforall()
         self.log_timer("recover")
 
         self.CM.cluster_stable()
         recovered = self.CM.ResourceLocation(self.rid)
 
         if watch.unmatched:
             return self.failure("Patterns not found: %s" % repr(watch.unmatched))
 
         elif rsc.unique() and len(recovered) > 1:
             return self.failure("%s is now active on more than one node: %s"%(self.rid, repr(recovered)))
 
         elif len(recovered) > 0:
             self.debug("%s is running on: %s" % (self.rid, repr(recovered)))
 
         elif rsc.managed():
             return self.failure("%s was not recovered and is inactive" % self.rid)
 
         return self.success()
 
     def errorstoignore(self):
         '''Return list of errors which should be ignored'''
         return [
             r"Updating failcount for %s" % self.rid,
             r"schedulerd.*: Recover (%s|%s)\s*\(.*\)" % (self.rid, self.rid_alt),
             r"Unknown operation: fail",
             self.templates["Pat:RscOpOK"] % (self.action, self.rid),
             r"(ERROR|error).*: Action %s_%s_%d .* initiated outside of a transition" % (self.rid, self.action, self.interval),
         ]
 
 AllTestClasses.append(ResourceRecover)
 
 
 class ComponentFail(CTSTest):
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "ComponentFail"
         # TODO make this work correctly in docker.
         self.is_docker_unsafe = 1
         self.startall = SimulStartLite(cm)
         self.complist = cm.Components()
         self.patterns = []
         self.okerrpatterns = []
         self.is_unsafe = 1
 
     def __call__(self, node):
         '''Perform the 'ComponentFail' test. '''
         self.incr("calls")
         self.patterns = []
         self.okerrpatterns = []
 
         # start all nodes
         ret = self.startall(None)
         if not ret:
             return self.failure("Setup failed")
 
         if not self.CM.cluster_stable(self.Env["StableTime"]):
             return self.failure("Setup failed - unstable")
 
         node_is_dc = self.CM.is_node_dc(node, None)
 
         # select a component to kill
         chosen = self.Env.RandomGen.choice(self.complist)
         while chosen.dc_only == 1 and node_is_dc == 0:
             chosen = self.Env.RandomGen.choice(self.complist)
 
         self.debug("...component %s (dc=%d,boot=%d)" % (chosen.name, node_is_dc,chosen.triggersreboot))
         self.incr(chosen.name)
 
         if chosen.name != "corosync":
             self.patterns.append(self.templates["Pat:ChildKilled"] %(node, chosen.name))
             self.patterns.append(self.templates["Pat:ChildRespawn"] %(node, chosen.name))
 
         self.patterns.extend(chosen.pats)
         if node_is_dc:
           self.patterns.extend(chosen.dc_pats)
 
         if chosen.name == "pacemaker-fenced":
             # Ignore actions for STONITH resources
             (rc, lines) = self.rsh(node, "crm_resource -c", None)
             for line in lines:
                 if re.search("^Resource", line):
                     r = AuditResource(self.CM, line)
                     if r.rclass == "stonith":
                         self.okerrpatterns.append(self.templates["Pat:Fencing_recover"] % r.id)
 
         # supply a copy so self.patterns doesn't end up empty
         tmpPats = []
         tmpPats.extend(self.patterns)
         self.patterns.extend(chosen.badnews_ignore)
 
         # Look for STONITH ops, depending on Env["at-boot"] we might need to change the nodes status
         stonithPats = []
         stonithPats.append(self.templates["Pat:Fencing_ok"] % node)
         stonith = self.create_watch(stonithPats, 0)
         stonith.setwatch()
 
         # set the watch for stable
         watch = self.create_watch(
             tmpPats, self.Env["DeadTime"] + self.Env["StableTime"] + self.Env["StartTime"])
         watch.setwatch()
 
         # kill the component
         chosen.kill(node)
 
         self.debug("Waiting for the cluster to recover")
         self.CM.cluster_stable()
 
         self.debug("Waiting for any fenced node to come back up")
         self.CM.ns.WaitForAllNodesToComeUp(self.Env["nodes"], 600)
 
         self.debug("Waiting for the cluster to re-stabilize with all nodes")
         self.CM.cluster_stable(self.Env["StartTime"])
 
         self.debug("Checking if %s was shot" % node)
         shot = stonith.look(60)
         if shot:
             self.debug("Found: " + repr(shot))
             self.okerrpatterns.append(self.templates["Pat:Fencing_start"] % node)
 
             if self.Env["at-boot"] == 0:
                 self.CM.ShouldBeStatus[node] = "down"
 
             # If fencing occurred, chances are many (if not all) the expected logs
             # will not be sent - or will be lost when the node reboots
             return self.success()
 
         # check for logs indicating a graceful recovery
         matched = watch.lookforall(allow_multiple_matches=1)
         if watch.unmatched:
             self.logger.log("Patterns not found: " + repr(watch.unmatched))
 
         self.debug("Waiting for the cluster to re-stabilize with all nodes")
         is_stable = self.CM.cluster_stable(self.Env["StartTime"])
 
         if not matched:
             return self.failure("Didn't find all expected %s patterns" % chosen.name)
         elif not is_stable:
             return self.failure("Cluster did not become stable after killing %s" % chosen.name)
 
         return self.success()
 
     def errorstoignore(self):
         '''Return list of errors which should be ignored'''
     # Note that okerrpatterns refers to the last time we ran this test
     # The good news is that this works fine for us...
         self.okerrpatterns.extend(self.patterns)
         return self.okerrpatterns
 
 AllTestClasses.append(ComponentFail)
 
 
 class SplitBrainTest(CTSTest):
     '''It is used to test split-brain. when the path between the two nodes break
        check the two nodes both take over the resource'''
     def __init__(self,cm):
         CTSTest.__init__(self,cm)
         self.name = "SplitBrain"
         self.start = StartTest(cm)
         self.startall = SimulStartLite(cm)
         self.is_experimental = 1
 
     def isolate_partition(self, partition):
         other_nodes = []
         other_nodes.extend(self.Env["nodes"])
 
         for node in partition:
             try:
                 other_nodes.remove(node)
             except ValueError:
                 self.logger.log("Node "+node+" not in " + repr(self.Env["nodes"]) + " from " +repr(partition))
 
         if len(other_nodes) == 0:
             return 1
 
         self.debug("Creating partition: " + repr(partition))
         self.debug("Everyone else: " + repr(other_nodes))
 
         for node in partition:
             if not self.CM.isolate_node(node, other_nodes):
                 self.logger.log("Could not isolate %s" % node)
                 return 0
 
         return 1
 
     def heal_partition(self, partition):
         other_nodes = []
         other_nodes.extend(self.Env["nodes"])
 
         for node in partition:
             try:
                 other_nodes.remove(node)
             except ValueError:
                 self.logger.log("Node "+node+" not in " + repr(self.Env["nodes"]))
 
         if len(other_nodes) == 0:
             return 1
 
         self.debug("Healing partition: " + repr(partition))
         self.debug("Everyone else: " + repr(other_nodes))
 
         for node in partition:
             self.CM.unisolate_node(node, other_nodes)
 
     def __call__(self, node):
         '''Perform split-brain test'''
         self.incr("calls")
         self.passed = 1
         partitions = {}
 
         ret = self.startall(None)
         if not ret:
             return self.failure("Setup failed")
 
         while 1:
             # Retry until we get multiple partitions
             partitions = {}
             p_max = len(self.Env["nodes"])
             for node in self.Env["nodes"]:
                 p = self.Env.RandomGen.randint(1, p_max)
                 if not p in partitions:
                     partitions[p] = []
                 partitions[p].append(node)
             p_max = len(list(partitions.keys()))
             if p_max > 1:
                 break
             # else, try again
 
         self.debug("Created %d partitions" % p_max)
         for key in list(partitions.keys()):
             self.debug("Partition["+str(key)+"]:\t"+repr(partitions[key]))
 
         # Disabling STONITH to reduce test complexity for now
         self.rsh(node, "crm_attribute -V -n stonith-enabled -v false")
 
         for key in list(partitions.keys()):
             self.isolate_partition(partitions[key])
 
         count = 30
         while count > 0:
             if len(self.CM.find_partitions()) != p_max:
                 time.sleep(10)
             else:
                 break
         else:
             self.failure("Expected partitions were not created")
 
         # Target number of partitions formed - wait for stability
         if not self.CM.cluster_stable():
             self.failure("Partitioned cluster not stable")
 
         # Now audit the cluster state
         self.CM.partitions_expected = p_max
         if not self.audit():
             self.failure("Audits failed")
         self.CM.partitions_expected = 1
 
         # And heal them again
         for key in list(partitions.keys()):
             self.heal_partition(partitions[key])
 
         # Wait for a single partition to form
         count = 30
         while count > 0:
             if len(self.CM.find_partitions()) != 1:
                 time.sleep(10)
                 count -= 1
             else:
                 break
         else:
             self.failure("Cluster did not reform")
 
         # Wait for it to have the right number of members
         count = 30
         while count > 0:
             members = []
 
             partitions = self.CM.find_partitions()
             if len(partitions) > 0:
                 members = partitions[0].split()
 
             if len(members) != len(self.Env["nodes"]):
                 time.sleep(10)
                 count -= 1
             else:
                 break
         else:
             self.failure("Cluster did not completely reform")
 
         # Wait up to 20 minutes - the delay is more preferable than
         # trying to continue with in a messed up state
         if not self.CM.cluster_stable(1200):
             self.failure("Reformed cluster not stable")
             if self.Env["continue"] == 1:
                 answer = "Y"
             else:
                 try:
                     answer = input_wrapper('Continue? [nY]')
                 except EOFError as e:
                     answer = "n" 
             if answer and answer == "n":
                 raise ValueError("Reformed cluster not stable")
 
         # Turn fencing back on
         if self.Env["DoFencing"]:
             self.rsh(node, "crm_attribute -V -D -n stonith-enabled")
 
         self.CM.cluster_stable()
 
         if self.passed:
             return self.success()
         return self.failure("See previous errors")
 
     def errorstoignore(self):
         '''Return list of errors which are 'normal' and should be ignored'''
         return [
             r"Another DC detected:",
             r"(ERROR|error).*: .*Application of an update diff failed",
             r"pacemaker-controld.*:.*not in our membership list",
             r"CRIT:.*node.*returning after partition",
         ]
 
     def is_applicable(self):
         if not self.is_applicable_common():
             return 0
         return len(self.Env["nodes"]) > 2
 
 AllTestClasses.append(SplitBrainTest)
 
 
 class Reattach(CTSTest):
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "Reattach"
         self.startall = SimulStartLite(cm)
         self.restart1 = RestartTest(cm)
         self.stopall = SimulStopLite(cm)
         self.is_unsafe = 0 # Handled by canrunnow()
 
     def _is_managed(self, node):
         is_managed = self.rsh(node, "crm_attribute -t rsc_defaults -n is-managed -q -G -d true", 1)
         is_managed = is_managed[:-1] # Strip off the newline
         return is_managed == "true"
 
     def _set_unmanaged(self, node):
         self.debug("Disable resource management")
         self.rsh(node, "crm_attribute -t rsc_defaults -n is-managed -v false")
 
     def _set_managed(self, node):
         self.debug("Re-enable resource management")
         self.rsh(node, "crm_attribute -t rsc_defaults -n is-managed -D")
 
     def setup(self, node):
         attempt = 0
         if not self.startall(None):
             return None
 
         # Make sure we are really _really_ stable and that all
         # resources, including those that depend on transient node
         # attributes, are started
         while not self.CM.cluster_stable(double_check=True):
             if attempt < 5:
                 attempt += 1
                 self.debug("Not stable yet, re-testing")
             else:
                 self.logger.log("Cluster is not stable")
                 return None
 
         return 1
 
     def teardown(self, node):
 
         # Make sure 'node' is up
         start = StartTest(self.CM)
         start(node)
 
         if not self._is_managed(node):
             self.logger.log("Attempting to re-enable resource management on %s" % node)
             self._set_managed(node)
             self.CM.cluster_stable()
             if not self._is_managed(node):
                 self.logger.log("Could not re-enable resource management")
                 return 0
 
         return 1
 
     def canrunnow(self, node):
         '''Return TRUE if we can meaningfully run right now'''
         if self.find_ocfs2_resources(node):
             self.logger.log("Detach/Reattach scenarios are not possible with OCFS2 services present")
             return 0
         return 1
 
     def __call__(self, node):
         self.incr("calls")
 
         pats = []
         # Conveniently, the scheduler will display this message when disabling
         # management, even if fencing is not enabled, so we can rely on it.
         managed = self.create_watch(["Delaying fencing operations"], 60)
         managed.setwatch()
 
         self._set_unmanaged(node)
 
         if not managed.lookforall():
             self.logger.log("Patterns not found: " + repr(managed.unmatched))
             return self.failure("Resource management not disabled")
 
         pats = []
         pats.append(self.templates["Pat:RscOpOK"] % ("start", ".*"))
         pats.append(self.templates["Pat:RscOpOK"] % ("stop", ".*"))
         pats.append(self.templates["Pat:RscOpOK"] % ("promote", ".*"))
         pats.append(self.templates["Pat:RscOpOK"] % ("demote", ".*"))
         pats.append(self.templates["Pat:RscOpOK"] % ("migrate", ".*"))
 
         watch = self.create_watch(pats, 60, "ShutdownActivity")
         watch.setwatch()
 
         self.debug("Shutting down the cluster")
         ret = self.stopall(None)
         if not ret:
             self._set_managed(node)
             return self.failure("Couldn't shut down the cluster")
 
         self.debug("Bringing the cluster back up")
         ret = self.startall(None)
         time.sleep(5) # allow ping to update the CIB
         if not ret:
             self._set_managed(node)
             return self.failure("Couldn't restart the cluster")
 
         if self.local_badnews("ResourceActivity:", watch):
             self._set_managed(node)
             return self.failure("Resources stopped or started during cluster restart")
 
         watch = self.create_watch(pats, 60, "StartupActivity")
         watch.setwatch()
 
         # Re-enable resource management (and verify it happened).
         self._set_managed(node)
         self.CM.cluster_stable()
         if not self._is_managed(node):
             return self.failure("Could not re-enable resource management")
 
         # Ignore actions for STONITH resources
         ignore = []
         (rc, lines) = self.rsh(node, "crm_resource -c", None)
         for line in lines:
             if re.search("^Resource", line):
                 r = AuditResource(self.CM, line)
                 if r.rclass == "stonith":
 
                     self.debug("Ignoring start actions for %s" % r.id)
                     ignore.append(self.templates["Pat:RscOpOK"] % ("start", r.id))
 
         if self.local_badnews("ResourceActivity:", watch, ignore):
             return self.failure("Resources stopped or started after resource management was re-enabled")
 
         return ret
 
     def errorstoignore(self):
         '''Return list of errors which should be ignored'''
         return [
             r"resource( was|s were) active at shutdown",
         ]
 
     def is_applicable(self):
         return 1
 
 AllTestClasses.append(Reattach)
 
 
 class SpecialTest1(CTSTest):
     '''Set up a custom test to cause quorum failure issues for Andrew'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "SpecialTest1"
         self.startall = SimulStartLite(cm)
         self.restart1 = RestartTest(cm)
         self.stopall = SimulStopLite(cm)
 
     def __call__(self, node):
         '''Perform the 'SpecialTest1' test for Andrew. '''
         self.incr("calls")
 
         #        Shut down all the nodes...
         ret = self.stopall(None)
         if not ret:
             return self.failure("Could not stop all nodes")
 
         # Test config recovery when the other nodes come up
         self.rsh(node, "rm -f "+CTSvars.CRM_CONFIG_DIR+"/cib*")
 
         #        Start the selected node
         ret = self.restart1(node)
         if not ret:
             return self.failure("Could not start "+node)
 
         #        Start all remaining nodes
         ret = self.startall(None)
         if not ret:
             return self.failure("Could not start the remaining nodes")
 
         return self.success()
 
     def errorstoignore(self):
         '''Return list of errors which should be ignored'''
         # Errors that occur as a result of the CIB being wiped
         return [
             r"error.*: v1 patchset error, patch failed to apply: Application of an update diff failed",
             r"error.*: Resource start-up disabled since no STONITH resources have been defined",
             r"error.*: Either configure some or disable STONITH with the stonith-enabled option",
             r"error.*: NOTE: Clusters with shared data need STONITH to ensure data integrity",
         ]
 
 AllTestClasses.append(SpecialTest1)
 
 
 class HAETest(CTSTest):
     '''Set up a custom test to cause quorum failure issues for Andrew'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "HAETest"
         self.stopall = SimulStopLite(cm)
         self.startall = SimulStartLite(cm)
         self.is_loop = 1
 
     def setup(self, node):
         #  Start all remaining nodes
         ret = self.startall(None)
         if not ret:
             return self.failure("Couldn't start all nodes")
         return self.success()
 
     def teardown(self, node):
         # Stop everything
         ret = self.stopall(None)
         if not ret:
             return self.failure("Couldn't stop all nodes")
         return self.success()
 
     def wait_on_state(self, node, resource, expected_clones, attempts=240):
         while attempts > 0:
             active = 0
             (rc, lines) = self.rsh(node, "crm_resource -r %s -W -Q" % resource, stdout=None)
 
             # Hack until crm_resource does the right thing
             if rc == 0 and lines:
                 active = len(lines)
 
             if len(lines) == expected_clones:
                 return 1
 
             elif rc == 1:
                 self.debug("Resource %s is still inactive" % resource)
 
             elif rc == 234:
                 self.logger.log("Unknown resource %s" % resource)
                 return 0
 
             elif rc == 246:
                 self.logger.log("Cluster is inactive")
                 return 0
 
             elif rc != 0:
                 self.logger.log("Call to crm_resource failed, rc=%d" % rc)
                 return 0
 
             else:
                 self.debug("Resource %s is active on %d times instead of %d" % (resource, active, expected_clones))
 
             attempts -= 1
             time.sleep(1)
 
         return 0
 
     def find_dlm(self, node):
         self.r_dlm = None
 
         (rc, lines) = self.rsh(node, "crm_resource -c", None)
         for line in lines:
             if re.search("^Resource", line):
                 r = AuditResource(self.CM, line)
                 if r.rtype == "controld" and r.parent != "NA":
                     self.debug("Found dlm: %s" % self.r_dlm)
                     self.r_dlm = r.parent
                     return 1
         return 0
 
     def find_hae_resources(self, node):
         self.r_dlm = None
         self.r_o2cb = None
         self.r_ocfs2 = []
 
         if self.find_dlm(node):
             self.find_ocfs2_resources(node)
 
     def is_applicable(self):
         if not self.is_applicable_common():
             return 0
         if self.Env["Schema"] == "hae":
             return 1
         return None
 
 
 class HAERoleTest(HAETest):
     def __init__(self, cm):
         '''Lars' mount/unmount test for the HA extension. '''
         HAETest.__init__(self,cm)
         self.name = "HAERoleTest"
 
     def change_state(self, node, resource, target):
         rc = self.rsh(node, "crm_resource -V -r %s -p target-role -v %s  --meta" % (resource, target))
         return rc
 
     def __call__(self, node):
         self.incr("calls")
         lpc = 0
         failed = 0
         delay = 2
         done = time.time() + self.Env["loop-minutes"]*60
         self.find_hae_resources(node)
 
         clone_max = len(self.Env["nodes"])
         while time.time() <= done and not failed:
             lpc = lpc + 1
 
             self.change_state(node, self.r_dlm, "Stopped")
             if not self.wait_on_state(node, self.r_dlm, 0):
                 self.failure("%s did not go down correctly" % self.r_dlm)
                 failed = lpc
 
             self.change_state(node, self.r_dlm, "Started")
             if not self.wait_on_state(node, self.r_dlm, clone_max):
                 self.failure("%s did not come up correctly" % self.r_dlm)
                 failed = lpc
 
             if not self.wait_on_state(node, self.r_o2cb, clone_max):
                 self.failure("%s did not come up correctly" % self.r_o2cb)
                 failed = lpc
 
             for fs in self.r_ocfs2:
                 if not self.wait_on_state(node, fs, clone_max):
                     self.failure("%s did not come up correctly" % fs)
                     failed = lpc
 
         if failed:
             return self.failure("iteration %d failed" % failed)
         return self.success()
 
 AllTestClasses.append(HAERoleTest)
 
 
 class HAEStandbyTest(HAETest):
     '''Set up a custom test to cause quorum failure issues for Andrew'''
     def __init__(self, cm):
         HAETest.__init__(self,cm)
         self.name = "HAEStandbyTest"
 
     def change_state(self, node, resource, target):
         rc = self.rsh(node, "crm_standby -V -l reboot -v %s" % (target))
         return rc
 
     def __call__(self, node):
         self.incr("calls")
 
         lpc = 0
         failed = 0
         done = time.time() + self.Env["loop-minutes"]*60
         self.find_hae_resources(node)
 
         clone_max = len(self.Env["nodes"])
         while time.time() <= done and not failed:
             lpc = lpc + 1
 
             self.change_state(node, self.r_dlm, "true")
             if not self.wait_on_state(node, self.r_dlm, clone_max-1):
                 self.failure("%s did not go down correctly" % self.r_dlm)
                 failed = lpc
 
             self.change_state(node, self.r_dlm, "false")
             if not self.wait_on_state(node, self.r_dlm, clone_max):
                 self.failure("%s did not come up correctly" % self.r_dlm)
                 failed = lpc
 
             if not self.wait_on_state(node, self.r_o2cb, clone_max):
                 self.failure("%s did not come up correctly" % self.r_o2cb)
                 failed = lpc
 
             for fs in self.r_ocfs2:
                 if not self.wait_on_state(node, fs, clone_max):
                     self.failure("%s did not come up correctly" % fs)
                     failed = lpc
 
         if failed:
             return self.failure("iteration %d failed" % failed)
         return self.success()
 
 AllTestClasses.append(HAEStandbyTest)
 
 
 class NearQuorumPointTest(CTSTest):
     '''
     This test brings larger clusters near the quorum point (50%).
     In addition, it will test doing starts and stops at the same time.
 
     Here is how I think it should work:
     - loop over the nodes and decide randomly which will be up and which
       will be down  Use a 50% probability for each of up/down.
     - figure out what to do to get into that state from the current state
     - in parallel, bring up those going up  and bring those going down.
     '''
 
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "NearQuorumPoint"
 
     def __call__(self, dummy):
         '''Perform the 'NearQuorumPoint' test. '''
         self.incr("calls")
         startset = []
         stopset = []
 
         stonith = self.CM.prepare_fencing_watcher("NearQuorumPoint")
         #decide what to do with each node
         for node in self.Env["nodes"]:
             action = self.Env.RandomGen.choice(["start","stop"])
             #action = self.Env.RandomGen.choice(["start","stop","no change"])
             if action == "start" :
                 startset.append(node)
             elif action == "stop" :
                 stopset.append(node)
 
         self.debug("start nodes:" + repr(startset))
         self.debug("stop nodes:" + repr(stopset))
 
         #add search patterns
         watchpats = [ ]
         for node in stopset:
             if self.CM.ShouldBeStatus[node] == "up":
                 watchpats.append(self.templates["Pat:We_stopped"] % node)
 
         for node in startset:
             if self.CM.ShouldBeStatus[node] == "down":
                 #watchpats.append(self.templates["Pat:NonDC_started"] % node)
                 watchpats.append(self.templates["Pat:Local_started"] % node)
             else:
                 for stopping in stopset:
                     if self.CM.ShouldBeStatus[stopping] == "up":
                         watchpats.append(self.templates["Pat:They_stopped"] % (node, self.CM.key_for_node(stopping)))
 
         if len(watchpats) == 0:
             return self.skipped()
 
         if len(startset) != 0:
             watchpats.append(self.templates["Pat:DC_IDLE"])
 
         watch = self.create_watch(watchpats, self.Env["DeadTime"]+10)
 
         watch.setwatch()
 
         #begin actions
         for node in stopset:
             if self.CM.ShouldBeStatus[node] == "up":
                 self.CM.StopaCMnoBlock(node)
 
         for node in startset:
             if self.CM.ShouldBeStatus[node] == "down":
                 self.CM.StartaCMnoBlock(node)
 
         #get the result
         if watch.lookforall():
             self.CM.cluster_stable()
             self.CM.fencing_cleanup("NearQuorumPoint", stonith)
             return self.success()
 
         self.logger.log("Warn: Patterns not found: " + repr(watch.unmatched))
 
         #get the "bad" nodes
         upnodes = []
         for node in stopset:
             if self.CM.StataCM(node) == 1:
                 upnodes.append(node)
 
         downnodes = []
         for node in startset:
             if self.CM.StataCM(node) == 0:
                 downnodes.append(node)
 
         self.CM.fencing_cleanup("NearQuorumPoint", stonith)
         if upnodes == [] and downnodes == []:
             self.CM.cluster_stable()
 
             # Make sure they're completely down with no residule
             for node in stopset:
                 self.rsh(node, self.templates["StopCmd"])
 
             return self.success()
 
         if len(upnodes) > 0:
             self.logger.log("Warn: Unstoppable nodes: " + repr(upnodes))
 
         if len(downnodes) > 0:
             self.logger.log("Warn: Unstartable nodes: " + repr(downnodes))
 
         return self.failure()
 
     def is_applicable(self):
         return 1
 
 AllTestClasses.append(NearQuorumPointTest)
 
 
 class RollingUpgradeTest(CTSTest):
     '''Perform a rolling upgrade of the cluster'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "RollingUpgrade"
         self.start = StartTest(cm)
         self.stop = StopTest(cm)
         self.stopall = SimulStopLite(cm)
         self.startall = SimulStartLite(cm)
 
     def setup(self, node):
         #  Start all remaining nodes
         ret = self.stopall(None)
         if not ret:
             return self.failure("Couldn't stop all nodes")
 
         for node in self.Env["nodes"]:
             if not self.downgrade(node, None):
                 return self.failure("Couldn't downgrade %s" % node)
 
         ret = self.startall(None)
         if not ret:
             return self.failure("Couldn't start all nodes")
         return self.success()
 
     def teardown(self, node):
         # Stop everything
         ret = self.stopall(None)
         if not ret:
             return self.failure("Couldn't stop all nodes")
 
         for node in self.Env["nodes"]:
             if not self.upgrade(node, None):
                 return self.failure("Couldn't upgrade %s" % node)
 
         return self.success()
 
     def install(self, node, version, start=1, flags="--force"):
 
         target_dir = "/tmp/rpm-%s" % version
         src_dir = "%s/%s" % (self.Env["rpm-dir"], version)
 
         self.logger.log("Installing %s on %s with %s" % (version, node, flags))
         if not self.stop(node):
             return self.failure("stop failure: "+node)
 
         rc = self.rsh(node, "mkdir -p %s" % target_dir)
         rc = self.rsh(node, "rm -f %s/*.rpm" % target_dir)
         (rc, lines) = self.rsh(node, "ls -1 %s/*.rpm" % src_dir, None)
         for line in lines:
             line = line[:-1]
             rc = self.rsh.cp("%s" % (line), "%s:%s/" % (node, target_dir))
         rc = self.rsh(node, "rpm -Uvh %s %s/*.rpm" % (flags, target_dir))
 
         if start and not self.start(node):
             return self.failure("start failure: "+node)
 
         return self.success()
 
     def upgrade(self, node, start=1):
         return self.install(node, self.Env["current-version"], start)
 
     def downgrade(self, node, start=1):
         return self.install(node, self.Env["previous-version"], start, "--force --nodeps")
 
     def __call__(self, node):
         '''Perform the 'Rolling Upgrade' test. '''
         self.incr("calls")
 
         for node in self.Env["nodes"]:
             if self.upgrade(node):
                 return self.failure("Couldn't upgrade %s" % node)
 
             self.CM.cluster_stable()
 
         return self.success()
 
     def is_applicable(self):
         if not self.is_applicable_common():
             return None
 
         if not "rpm-dir" in list(self.Env.keys()):
             return None
         if not "current-version" in list(self.Env.keys()):
             return None
         if not "previous-version" in list(self.Env.keys()):
             return None
 
         return 1
 
 #        Register RestartTest as a good test to run
 AllTestClasses.append(RollingUpgradeTest)
 
 
 class BSC_AddResource(CTSTest):
     '''Add a resource to the cluster'''
     def __init__(self, cm):
         CTSTest.__init__(self, cm)
         self.name = "AddResource"
         self.resource_offset = 0
         self.cib_cmd = """cibadmin -C -o %s -X '%s' """
 
     def __call__(self, node):
         self.incr("calls")
         self.resource_offset =         self.resource_offset  + 1
 
         r_id = "bsc-rsc-%s-%d" % (node, self.resource_offset)
         start_pat = "pacemaker-controld.*%s_start_0.*confirmed.*ok"
 
         patterns = []
         patterns.append(start_pat % r_id)
 
         watch = self.create_watch(patterns, self.Env["DeadTime"])
         watch.setwatch()
 
         ip = self.NextIP()
         if not self.make_ip_resource(node, r_id, "ocf", "IPaddr", ip):
             return self.failure("Make resource %s failed" % r_id)
 
         failed = 0
         watch_result = watch.lookforall()
         if watch.unmatched:
             for regex in watch.unmatched:
                 self.logger.log ("Warn: Pattern not found: %s" % (regex))
                 failed = 1
 
         if failed:
             return self.failure("Resource pattern(s) not found")
 
         if not self.CM.cluster_stable(self.Env["DeadTime"]):
             return self.failure("Unstable cluster")
 
         return self.success()
 
     def NextIP(self):
         ip = self.Env["IPBase"]
         if ":" in ip:
             fields = ip.rpartition(":")
             fields[2] = str(hex(int(fields[2], 16)+1))
             print(str(hex(int(f[2], 16)+1)))
         else:
             fields = ip.rpartition('.')
             fields[2] = str(int(fields[2])+1)
 
         ip = fields[0] + fields[1] + fields[3];
         self.Env["IPBase"] = ip
         return ip.strip()
 
     def make_ip_resource(self, node, id, rclass, type, ip):
         self.logger.log("Creating %s::%s:%s (%s) on %s" % (rclass,type,id,ip,node))
         rsc_xml="""
 <primitive id="%s" class="%s" type="%s"  provider="heartbeat">
     <instance_attributes id="%s"><attributes>
         <nvpair id="%s" name="ip" value="%s"/>
     </attributes></instance_attributes>
 </primitive>""" % (id, rclass, type, id, id, ip)
 
         node_constraint = """
       <rsc_location id="run_%s" rsc="%s">
         <rule id="pref_run_%s" score="100">
           <expression id="%s_loc_expr" attribute="#uname" operation="eq" value="%s"/>
         </rule>
       </rsc_location>""" % (id, id, id, id, node)
 
         rc = 0
         (rc, lines) = self.rsh(node, self.cib_cmd % ("constraints", node_constraint), None)
         if rc != 0:
             self.logger.log("Constraint creation failed: %d" % rc)
             return None
 
         (rc, lines) = self.rsh(node, self.cib_cmd % ("resources", rsc_xml), None)
         if rc != 0:
             self.logger.log("Resource creation failed: %d" % rc)
             return None
 
         return 1
 
     def is_applicable(self):
         if self.Env["DoBSC"]:
             return 1
         return None
 
 AllTestClasses.append(BSC_AddResource)
 
 
 class SimulStopLite(CTSTest):
     '''Stop any active nodes ~ simultaneously'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "SimulStopLite"
 
     def __call__(self, dummy):
         '''Perform the 'SimulStopLite' setup work. '''
         self.incr("calls")
 
         self.debug("Setup: " + self.name)
 
         #     We ignore the "node" parameter...
         watchpats = [ ]
 
         for node in self.Env["nodes"]:
             if self.CM.ShouldBeStatus[node] == "up":
                 self.incr("WasStarted")
                 watchpats.append(self.templates["Pat:We_stopped"] % node)
 
         if len(watchpats) == 0:
             return self.success()
 
         #     Stop all the nodes - at about the same time...
         watch = self.create_watch(watchpats, self.Env["DeadTime"]+10)
 
         watch.setwatch()
         self.set_timer()
         for node in self.Env["nodes"]:
             if self.CM.ShouldBeStatus[node] == "up":
                 self.CM.StopaCMnoBlock(node)
         if watch.lookforall():
             # Make sure they're completely down with no residule
             for node in self.Env["nodes"]:
                 self.rsh(node, self.templates["StopCmd"])
 
             return self.success()
 
         did_fail = 0
         up_nodes = []
         for node in self.Env["nodes"]:
             if self.CM.StataCM(node) == 1:
                 did_fail = 1
                 up_nodes.append(node)
 
         if did_fail:
             return self.failure("Active nodes exist: " + repr(up_nodes))
 
         self.logger.log("Warn: All nodes stopped but CTS didnt detect: "
                     + repr(watch.unmatched))
 
         return self.failure("Missing log message: "+repr(watch.unmatched))
 
     def is_applicable(self):
         '''SimulStopLite is a setup test and never applicable'''
         return 0
 
 
 class SimulStartLite(CTSTest):
     '''Start any stopped nodes ~ simultaneously'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "SimulStartLite"
 
     def __call__(self, dummy):
         '''Perform the 'SimulStartList' setup work. '''
         self.incr("calls")
         self.debug("Setup: " + self.name)
 
         #        We ignore the "node" parameter...
         node_list = []
         for node in self.Env["nodes"]:
             if self.CM.ShouldBeStatus[node] == "down":
                 self.incr("WasStopped")
                 node_list.append(node)
 
         self.set_timer()
         while len(node_list) > 0:
             # Repeat until all nodes come up
             watchpats = [ ]
 
             uppat = self.templates["Pat:NonDC_started"]
             if self.CM.upcount() == 0:
                 uppat = self.templates["Pat:Local_started"]
 
             watchpats.append(self.templates["Pat:DC_IDLE"])
             for node in node_list:
                 watchpats.append(uppat % node)
                 watchpats.append(self.templates["Pat:InfraUp"] % node)
                 watchpats.append(self.templates["Pat:PacemakerUp"] % node)
 
             #   Start all the nodes - at about the same time...
             watch = self.create_watch(watchpats, self.Env["DeadTime"]+10)
             watch.setwatch()
 
             stonith = self.CM.prepare_fencing_watcher(self.name)
 
             for node in node_list:
                 self.CM.StartaCMnoBlock(node)
 
             watch.lookforall()
 
             node_list = self.CM.fencing_cleanup(self.name, stonith)
 
             if node_list == None:
                 return self.failure("Cluster did not stabilize")
 
             # Remove node_list messages from watch.unmatched
             for node in node_list:
                 self.logger.debug("Dealing with stonith operations for %s" % repr(node_list))
                 if watch.unmatched:
                     try:
                         watch.unmatched.remove(uppat % node)
                     except:
                         self.debug("Already matched: %s" % (uppat % node))
                     try:                        
                         watch.unmatched.remove(self.templates["Pat:InfraUp"] % node)
                     except:
                         self.debug("Already matched: %s" % (self.templates["Pat:InfraUp"] % node))
                     try:
                         watch.unmatched.remove(self.templates["Pat:PacemakerUp"] % node)
                     except:
                         self.debug("Already matched: %s" % (self.templates["Pat:PacemakerUp"] % node))
 
             if watch.unmatched:
                 for regex in watch.unmatched:
                     self.logger.log ("Warn: Startup pattern not found: %s" %(regex))
 
             if not self.CM.cluster_stable():
                 return self.failure("Cluster did not stabilize")
 
         did_fail = 0
         unstable = []
         for node in self.Env["nodes"]:
             if self.CM.StataCM(node) == 0:
                 did_fail = 1
                 unstable.append(node)
 
         if did_fail:
             return self.failure("Unstarted nodes exist: " + repr(unstable))
 
         unstable = []
         for node in self.Env["nodes"]:
             if not self.CM.node_stable(node):
                 did_fail = 1
                 unstable.append(node)
 
         if did_fail:
             return self.failure("Unstable cluster nodes exist: " + repr(unstable))
 
         return self.success()
 
     def is_applicable(self):
         '''SimulStartLite is a setup test and never applicable'''
         return 0
 
 
 def TestList(cm, audits):
     result = []
     for testclass in AllTestClasses:
         bound_test = testclass(cm)
         if bound_test.is_applicable():
             bound_test.Audits = audits
             result.append(bound_test)
     return result
 
 
 class RemoteLXC(CTSTest):
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "RemoteLXC"
         self.start = StartTest(cm)
         self.startall = SimulStartLite(cm)
         self.num_containers = 2
         self.is_container = 1
         self.is_docker_unsafe = 1
         self.failed = 0
         self.fail_string = ""
 
     def start_lxc_simple(self, node):
 
         # restore any artifacts laying around from a previous test.
         self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -s -R &>/dev/null")
 
         # generate the containers, put them in the config, add some resources to them
         pats = [ ]
         watch = self.create_watch(pats, 120)
         watch.setwatch()
         pats.append(self.templates["Pat:RscOpOK"] % ("start", "lxc1"))
         pats.append(self.templates["Pat:RscOpOK"] % ("start", "lxc2"))
         pats.append(self.templates["Pat:RscOpOK"] % ("start", "lxc-ms"))
         pats.append(self.templates["Pat:RscOpOK"] % ("promote", "lxc-ms"))
 
         self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -g -a -m -s -c %d &>/dev/null" % self.num_containers)
         self.set_timer("remoteSimpleInit")
         watch.lookforall()
         self.log_timer("remoteSimpleInit")
         if watch.unmatched:
             self.fail_string = "Unmatched patterns: %s" % (repr(watch.unmatched))
             self.failed = 1
 
     def cleanup_lxc_simple(self, node):
 
         pats = [ ]
         # if the test failed, attempt to clean up the cib and libvirt environment
         # as best as possible 
         if self.failed == 1:
             # restore libvirt and cib
             self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -s -R &>/dev/null")
             return
 
         watch = self.create_watch(pats, 120)
         watch.setwatch()
 
         pats.append(self.templates["Pat:RscOpOK"] % ("stop", "container1"))
         pats.append(self.templates["Pat:RscOpOK"] % ("stop", "container2"))
 
         self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -p &>/dev/null")
         self.set_timer("remoteSimpleCleanup")
         watch.lookforall()
         self.log_timer("remoteSimpleCleanup")
 
         if watch.unmatched:
             self.fail_string = "Unmatched patterns: %s" % (repr(watch.unmatched))
             self.failed = 1
 
         # cleanup libvirt
         self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -s -R &>/dev/null")
 
     def __call__(self, node):
         '''Perform the 'RemoteLXC' test. '''
         self.incr("calls")
 
         ret = self.startall(None)
         if not ret:
             return self.failure("Setup failed, start all nodes failed.")
 
         rc = self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -v &>/dev/null")
         if rc == 1:
             self.log("Environment test for lxc support failed.")
             return self.skipped()
 
         self.start_lxc_simple(node)
         self.cleanup_lxc_simple(node)
 
         self.debug("Waiting for the cluster to recover")
         self.CM.cluster_stable()
 
         if self.failed == 1:
             return self.failure(self.fail_string)
 
         return self.success()
 
     def errorstoignore(self):
         '''Return list of errors which should be ignored'''
         return [
             r"Updating failcount for ping",
             r"schedulerd.*: Recover (ping|lxc-ms|container)\s*\(.*\)",
             # The orphaned lxc-ms resource causes an expected transition error
             # that is a result of the scheduler not having knowledge that the
             # promotable resource used to be a clone. As a result, it looks like that 
             # resource is running in multiple locations when it shouldn't... But in
             # this instance we know why this error is occurring and that it is expected.
             r"Calculated [Tt]ransition .*pe-error",
             r"Resource lxc-ms .* is active on 2 nodes attempting recovery",
             r"Unknown operation: fail",
             r"VirtualDomain.*ERROR: Unable to determine emulator",
         ]
 
 AllTestClasses.append(RemoteLXC)
 
 
 class RemoteDriver(CTSTest):
 
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = self.__class__.__name__
         self.is_docker_unsafe = 1
         self.start = StartTest(cm)
         self.startall = SimulStartLite(cm)
         self.stop = StopTest(cm)
         self.remote_rsc = "remote-rsc"
         self.cib_cmd = """cibadmin -C -o %s -X '%s' """
         self.reset()
 
     def reset(self):
         self.pcmk_started = 0
         self.failed = False
         self.fail_string = ""
         self.remote_node_added = 0
         self.remote_rsc_added = 0
         self.remote_use_reconnect_interval = self.Env.RandomGen.choice([True,False])
 
     def fail(self, msg):
         """ Mark test as failed. """
 
         self.failed = True
 
         # Always log the failure.
         self.logger.log(msg)
 
         # Use first failure as test status, as it's likely to be most useful.
         if not self.fail_string:
             self.fail_string = msg
 
     def get_othernode(self, node):
         for othernode in self.Env["nodes"]:
             if othernode == node:
                 # we don't want to try and use the cib that we just shutdown.
                 # find a cluster node that is not our soon to be remote-node.
                 continue
             else:
                 return othernode
 
     def del_rsc(self, node, rsc):
         othernode = self.get_othernode(node)
         rc = self.rsh(othernode, "crm_resource -D -r %s -t primitive" % (rsc))
         if rc != 0:
             self.fail("Removal of resource '%s' failed" % rsc)
 
     def add_rsc(self, node, rsc_xml):
         othernode = self.get_othernode(node)
         rc = self.rsh(othernode, self.cib_cmd % ("resources", rsc_xml))
         if rc != 0:
             self.fail("resource creation failed")
 
     def add_primitive_rsc(self, node):
         rsc_xml = """
 <primitive class="ocf" id="%s" provider="heartbeat" type="Dummy">
     <operations>
       <op id="remote-rsc-monitor-interval-10s" interval="10s" name="monitor"/>
     </operations>
     <meta_attributes id="remote-meta_attributes"/>
 </primitive>""" % (self.remote_rsc)
         self.add_rsc(node, rsc_xml)
         if not self.failed:
             self.remote_rsc_added = 1
 
     def add_connection_rsc(self, node):
         if self.remote_use_reconnect_interval:
             # use reconnect interval and make sure to set cluster-recheck-interval as well.
             rsc_xml = """
 <primitive class="ocf" id="%s" provider="pacemaker" type="remote">
     <instance_attributes id="remote-instance_attributes"/>
         <instance_attributes id="remote-instance_attributes">
           <nvpair id="remote-instance_attributes-server" name="server" value="%s"/>
           <nvpair id="remote-instance_attributes-reconnect_interval" name="reconnect_interval" value="60s"/>
         </instance_attributes>
     <operations>
       <op id="remote-monitor-interval-60s" interval="60s" name="monitor"/>
       <op id="remote-name-start-interval-0-timeout-120" interval="0" name="start" timeout="60"/>
     </operations>
 </primitive>""" % (self.remote_node, node)
             self.rsh(self.get_othernode(node), self.templates["SetCheckInterval"] % ("45s"))
         else:
             # not using reconnect interval
             rsc_xml = """
 <primitive class="ocf" id="%s" provider="pacemaker" type="remote">
     <instance_attributes id="remote-instance_attributes"/>
         <instance_attributes id="remote-instance_attributes">
           <nvpair id="remote-instance_attributes-server" name="server" value="%s"/>
         </instance_attributes>
     <operations>
       <op id="remote-monitor-interval-60s" interval="60s" name="monitor"/>
       <op id="remote-name-start-interval-0-timeout-120" interval="0" name="start" timeout="120"/>
     </operations>
 </primitive>""" % (self.remote_node, node)
 
         self.add_rsc(node, rsc_xml)
         if not self.failed:
             self.remote_node_added = 1
 
     def stop_pcmk_remote(self, node):
         # disable pcmk remote
         for i in range(10):
             rc = self.rsh(node, "service pacemaker_remote stop")
             if rc != 0:
                 time.sleep(6)
             else:
                 break
 
     def start_pcmk_remote(self, node):
         for i in range(10):
             rc = self.rsh(node, "service pacemaker_remote start")
             if rc != 0:
                 time.sleep(6)
             else:
                 self.pcmk_started = 1
                 break
 
     def kill_pcmk_remote(self, node):
         """ Simulate a Pacemaker Remote daemon failure. """
 
         # We kill the process to prevent a graceful stop,
         # then stop it to prevent the OS from restarting it.
         self.rsh(node, "killall -9 pacemaker-remoted")
         self.stop_pcmk_remote(node)
 
     def start_metal(self, node):
         pcmk_started = 0
 
         # make sure the resource doesn't already exist for some reason
         self.rsh(node, "crm_resource -D -r %s -t primitive" % (self.remote_rsc))
         self.rsh(node, "crm_resource -D -r %s -t primitive" % (self.remote_node))
 
         if not self.stop(node):
             self.fail("Failed to shutdown cluster node %s" % node)
             return
 
         self.start_pcmk_remote(node)
 
         if self.pcmk_started == 0:
             self.fail("Failed to start pacemaker_remote on node %s" % node)
             return
 
         # Convert node to baremetal now that it has shutdown the cluster stack
         pats = [ ]
         watch = self.create_watch(pats, 120)
         watch.setwatch()
         pats.append(self.templates["Pat:RscOpOK"] % ("start", self.remote_node))
         pats.append(self.templates["Pat:DC_IDLE"])
 
         self.add_connection_rsc(node)
 
         self.set_timer("remoteMetalInit")
         watch.lookforall()
         self.log_timer("remoteMetalInit")
         if watch.unmatched:
             self.fail("Unmatched patterns: %s" % watch.unmatched)
 
     def migrate_connection(self, node):
         if self.failed:
             return
 
         pats = [ ]
         pats.append(self.templates["Pat:RscOpOK"] % ("migrate_to", self.remote_node))
         pats.append(self.templates["Pat:RscOpOK"] % ("migrate_from", self.remote_node))
         pats.append(self.templates["Pat:DC_IDLE"])
         watch = self.create_watch(pats, 120)
         watch.setwatch()
 
         (rc, lines) = self.rsh(node, "crm_resource -M -r %s" % (self.remote_node), None)
         if rc != 0:
             self.fail("failed to move remote node connection resource")
             return
 
         self.set_timer("remoteMetalMigrate")
         watch.lookforall()
         self.log_timer("remoteMetalMigrate")
 
         if watch.unmatched:
             self.fail("Unmatched patterns: %s" % watch.unmatched)
             return
 
     def fail_rsc(self, node):
         if self.failed:
             return
 
         watchpats = [ ]
         watchpats.append(self.templates["Pat:RscRemoteOpOK"] % ("stop", self.remote_rsc, self.remote_node))
         watchpats.append(self.templates["Pat:RscRemoteOpOK"] % ("start", self.remote_rsc, self.remote_node))
         watchpats.append(self.templates["Pat:DC_IDLE"])
 
         watch = self.create_watch(watchpats, 120)
         watch.setwatch()
 
         self.debug("causing dummy rsc to fail.")
 
         rc = self.rsh(node, "rm -f /var/run/resource-agents/Dummy*")
 
         self.set_timer("remoteRscFail")
         watch.lookforall()
         self.log_timer("remoteRscFail")
         if watch.unmatched:
             self.fail("Unmatched patterns during rsc fail: %s" % watch.unmatched)
 
     def fail_connection(self, node):
         if self.failed:
             return
 
         watchpats = [ ]
         watchpats.append(self.templates["Pat:FenceOpOK"] % self.remote_node)
         watchpats.append(self.templates["Pat:NodeFenced"] % self.remote_node)
 
         watch = self.create_watch(watchpats, 120)
         watch.setwatch()
 
         # force stop the pcmk remote daemon. this will result in fencing
         self.debug("Force stopped active remote node")
         self.kill_pcmk_remote(node)
 
         self.debug("Waiting for remote node to be fenced.")
         self.set_timer("remoteMetalFence")
         watch.lookforall()
         self.log_timer("remoteMetalFence")
         if watch.unmatched:
             self.fail("Unmatched patterns: %s" % watch.unmatched)
             return
 
         self.debug("Waiting for the remote node to come back up")
         self.CM.ns.WaitForNodeToComeUp(node, 120);
 
         pats = [ ]
         watch = self.create_watch(pats, 240)
         watch.setwatch()
         pats.append(self.templates["Pat:RscOpOK"] % ("start", self.remote_node))
         if self.remote_rsc_added == 1:
             pats.append(self.templates["Pat:RscRemoteOpOK"] % ("start", self.remote_rsc, self.remote_node))
 
         # start the remote node again watch it integrate back into cluster.
         self.start_pcmk_remote(node)
         if self.pcmk_started == 0:
             self.fail("Failed to start pacemaker_remote on node %s" % node)
             return
 
         self.debug("Waiting for remote node to rejoin cluster after being fenced.")
         self.set_timer("remoteMetalRestart")
         watch.lookforall()
         self.log_timer("remoteMetalRestart")
         if watch.unmatched:
             self.fail("Unmatched patterns: %s" % watch.unmatched)
             return
 
     def add_dummy_rsc(self, node):
         if self.failed:
             return
 
         # verify we can put a resource on the remote node
         pats = [ ]
         watch = self.create_watch(pats, 120)
         watch.setwatch()
         pats.append(self.templates["Pat:RscRemoteOpOK"] % ("start", self.remote_rsc, self.remote_node))
         pats.append(self.templates["Pat:DC_IDLE"])
 
         # Add a resource that must live on remote-node
         self.add_primitive_rsc(node)
 
         # force that rsc to prefer the remote node. 
         (rc, line) = self.CM.rsh(node, "crm_resource -M -r %s -N %s -f" % (self.remote_rsc, self.remote_node), None)
         if rc != 0:
             self.fail("Failed to place remote resource on remote node.")
             return
 
         self.set_timer("remoteMetalRsc")
         watch.lookforall()
         self.log_timer("remoteMetalRsc")
         if watch.unmatched:
             self.fail("Unmatched patterns: %s" % watch.unmatched)
 
     def test_attributes(self, node):
         if self.failed:
             return
 
         # This verifies permanent attributes can be set on a remote-node. It also
         # verifies the remote-node can edit its own cib node section remotely.
         (rc, line) = self.CM.rsh(node, "crm_attribute -l forever -n testattr -v testval -N %s" % (self.remote_node), None)
         if rc != 0:
             self.fail("Failed to set remote-node attribute. rc:%s output:%s" % (rc, line))
             return
 
         (rc, line) = self.CM.rsh(node, "crm_attribute -l forever -n testattr -q -N %s" % (self.remote_node), None)
         if rc != 0:
             self.fail("Failed to get remote-node attribute")
             return
 
         (rc, line) = self.CM.rsh(node, "crm_attribute -l forever -n testattr -D -N %s" % (self.remote_node), None)
         if rc != 0:
             self.fail("Failed to delete remote-node attribute")
             return
 
     def cleanup_metal(self, node):
         if self.pcmk_started == 0:
             return
 
         pats = [ ]
 
         watch = self.create_watch(pats, 120)
         watch.setwatch()
 
         if self.remote_rsc_added == 1:
             pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.remote_rsc))
         if self.remote_node_added == 1:
             pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.remote_node))
 
         self.set_timer("remoteMetalCleanup")
 
         if self.remote_use_reconnect_interval:
             self.debug("Cleaning up re-check interval")
             self.rsh(self.get_othernode(node), self.templates["ClearCheckInterval"])
 
         if self.remote_rsc_added == 1:
 
             # Remove dummy resource added for remote node tests
             self.debug("Cleaning up dummy rsc put on remote node")
             self.rsh(node, "crm_resource -U -r %s" % self.remote_rsc)
             self.del_rsc(node, self.remote_rsc)
 
         if self.remote_node_added == 1:
 
             # Remove remote node's connection resource
             self.debug("Cleaning up remote node connection resource")
             self.rsh(node, "crm_resource -U -r %s" % (self.remote_node))
             self.del_rsc(node, self.remote_node)
 
         watch.lookforall()
         self.log_timer("remoteMetalCleanup")
 
         if watch.unmatched:
             self.fail("Unmatched patterns: %s" % watch.unmatched)
 
         self.stop_pcmk_remote(node)
 
         self.debug("Waiting for the cluster to recover")
         self.CM.cluster_stable()
 
         if self.remote_node_added == 1:
             # Remove remote node itself
             self.debug("Cleaning up node entry for remote node")
             self.rsh(self.get_othernode(node), "crm_node --force --remove %s" % self.remote_node)
 
     def setup_env(self, node):
 
         self.remote_node = "remote-%s" % (node)
 
         # we are assuming if all nodes have a key, that it is
         # the right key... If any node doesn't have a remote
         # key, we regenerate it everywhere.
         if self.rsh.exists_on_all("/etc/pacemaker/authkey", self.Env["nodes"]):
             return
 
         # create key locally
         (handle, keyfile) = tempfile.mkstemp(".cts")
         os.close(handle)
         devnull = open(os.devnull, 'wb')
         subprocess.check_call(["dd", "if=/dev/urandom", "of=%s" % keyfile, "bs=4096", "count=1"],
             stdout=devnull, stderr=devnull)
         devnull.close()
 
         # sync key throughout the cluster
         for node in self.Env["nodes"]:
             self.rsh(node, "mkdir -p --mode=0750 /etc/pacemaker")
             self.rsh.cp(keyfile, "root@%s:/etc/pacemaker/authkey" % node)
             self.rsh(node, "chgrp haclient /etc/pacemaker /etc/pacemaker/authkey")
             self.rsh(node, "chmod 0640 /etc/pacemaker/authkey")
         os.unlink(keyfile)
 
     def is_applicable(self):
         if not self.is_applicable_common():
             return False
 
         for node in self.Env["nodes"]:
             rc = self.rsh(node, "which pacemaker-remoted >/dev/null 2>&1")
             if rc != 0:
                 return False
         return True
 
     def start_new_test(self, node):
         self.incr("calls")
         self.reset()
 
         ret = self.startall(None)
         if not ret:
             return self.failure("setup failed: could not start all nodes")
 
         self.setup_env(node)
         self.start_metal(node)
         self.add_dummy_rsc(node)
         return True
 
     def __call__(self, node):
         return self.failure("This base class is not meant to be called directly.")
 
     def errorstoignore(self):
         '''Return list of errors which should be ignored'''
-        return [ """is running on remote.*which isn't allowed""",
-                 """Connection terminated""",
-                 """Failed to send remote""",
+        return [ r"""is running on remote.*which isn't allowed""",
+                 r"""Connection terminated""",
+                 r"""Could not send remote""",
                 ]
 
 # RemoteDriver is just a base class for other tests, so it is not added to AllTestClasses
 
 
 class RemoteBasic(RemoteDriver):
 
     def __call__(self, node):
         '''Perform the 'RemoteBaremetal' test. '''
 
         if not self.start_new_test(node):
             return self.failure(self.fail_string)
 
         self.test_attributes(node)
         self.cleanup_metal(node)
 
         self.debug("Waiting for the cluster to recover")
         self.CM.cluster_stable()
         if self.failed:
             return self.failure(self.fail_string)
 
         return self.success()
 
 AllTestClasses.append(RemoteBasic)
 
 class RemoteStonithd(RemoteDriver):
 
     def __call__(self, node):
         '''Perform the 'RemoteStonithd' test. '''
 
         if not self.start_new_test(node):
             return self.failure(self.fail_string)
 
         self.fail_connection(node)
         self.cleanup_metal(node)
 
         self.debug("Waiting for the cluster to recover")
         self.CM.cluster_stable()
         if self.failed:
             return self.failure(self.fail_string)
 
         return self.success()
 
     def is_applicable(self):
         if not RemoteDriver.is_applicable(self):
             return False
 
         if "DoFencing" in list(self.Env.keys()):
             return self.Env["DoFencing"]
 
         return True
 
     def errorstoignore(self):
         ignore_pats = [
             r"Lost connection to Pacemaker Remote node",
             r"Software caused connection abort",
             r"pacemaker-controld.*:\s+error.*: Operation remote-.*_monitor",
             r"pacemaker-controld.*:\s+error.*: Result of monitor operation for remote-.*",
             r"schedulerd.*:\s+Recover remote-.*\s*\(.*\)",
             r"Calculated [Tt]ransition .*pe-error",
             r"error.*: Resource .*ocf::.* is active on 2 nodes attempting recovery",
         ]
 
         ignore_pats.extend(RemoteDriver.errorstoignore(self))
         return ignore_pats
 
 AllTestClasses.append(RemoteStonithd)
 
 
 class RemoteMigrate(RemoteDriver):
 
     def __call__(self, node):
         '''Perform the 'RemoteMigrate' test. '''
 
         if not self.start_new_test(node):
             return self.failure(self.fail_string)
 
         self.migrate_connection(node)
         self.cleanup_metal(node)
 
         self.debug("Waiting for the cluster to recover")
         self.CM.cluster_stable()
         if self.failed:
             return self.failure(self.fail_string)
 
         return self.success()
 
 AllTestClasses.append(RemoteMigrate)
 
 
 class RemoteRscFailure(RemoteDriver):
 
     def __call__(self, node):
         '''Perform the 'RemoteRscFailure' test. '''
 
         if not self.start_new_test(node):
             return self.failure(self.fail_string)
 
         # This is an important step. We are migrating the connection
         # before failing the resource. This verifies that the migration
         # has properly maintained control over the remote-node.
         self.migrate_connection(node)
 
         self.fail_rsc(node)
         self.cleanup_metal(node)
 
         self.debug("Waiting for the cluster to recover")
         self.CM.cluster_stable()
         if self.failed:
             return self.failure(self.fail_string)
 
         return self.success()
 
     def errorstoignore(self):
         ignore_pats = [
             r"schedulerd.*: Recover remote-rsc\s*\(.*\)",
             r"Dummy.*: No process state file found",
         ]
 
         ignore_pats.extend(RemoteDriver.errorstoignore(self))
         return ignore_pats
 
 AllTestClasses.append(RemoteRscFailure)
 
 # vim:ts=4:sw=4:et:
diff --git a/cts/README.md b/cts/README.md
index 95603b046c..659b69fbc9 100644
--- a/cts/README.md
+++ b/cts/README.md
@@ -1,326 +1,344 @@
 # Pacemaker Cluster Test Suite (CTS)
 
 ## Purpose
 
 Pacemaker's CTS is primarily for developers and packagers of the Pacemaker
 source code, but it can be useful for users who wish to see how their cluster
 will react to various situations.
 
 CTS consists of two main parts: a set of regression tests for verifying the
 functionality of particular Pacemaker components, and a cluster exerciser for
 intensively testing the behavior of an entire working cluster.
 
 The primary regression test front end is cts-regression in this directory. Run
 it with the --help option to see its usage. The regression tests can be run on
 any single cluster node. The cluster should be stopped on that node when
 running the tests.
 
 The rest of this document focuses on the cluster exerciser. The cluster
 exerciser runs a randomized series of predefined tests on the cluster. CTS can
 be run against a pre-existing cluster configuration or overwrite the existing
 configuration with a test configuration.
 
 
+## Helpers
+
+Some of the component regression tests and the cluster exerciser require
+certain helpers to be installed as root. This includes a dummy LSB init script,
+dummy systemd service, etc. The tests will install these as needed and
+uninstall them when done.
+
+This means that the cluster configuration created by the cluster exerciser
+will generate failures if started manually after the exerciser exits. However,
+the helper installer can be run manually to make the configuration usable, if
+you want to do your own further testing with it:
+
+    /usr/libexec/pacemaker/cts-support install
+
+As you might expect, you can also remove the helpers with:
+
+    /usr/libexec/pacemaker/cts-support uninstall
+
 ## Requirements
 
 * Three or more machines (one test exerciser and two or more test cluster
   machines).
 
 * The test cluster machines should be on the same subnet and have journalling
   filesystems (ext3, ext4, xfs, etc.) for all of their filesystems other than
   /boot. You also need a number of free IP addresses on that subnet if you
   intend to test mutual IP address takeover.
 
 * The test exerciser machine doesn't need to be on the same subnet as the test
   cluster machines.  Minimal demands are made on the exerciser machine - it
   just has to stay up during the tests.
 
 * It helps a lot in tracking problems if all machines' clocks are closely
   synchronized. NTP does this automatically, but you can do it by hand if you
   want.
 
 * The exerciser needs to be able to ssh over to the cluster nodes as root
   without a password challenge. Configure ssh accordingly (see the Mini-HOWTO
   at the end of this document for more details).
 
 * The exerciser needs to be able to resolve the machine names of the
   test cluster - either by DNS or by /etc/hosts.
 
 * CTS is not guaranteed to run on all platforms that pacemaker itself does.
   It calls commands such as service that may not be provided by all OSes.
 
 
 ## Preparation
 
 Install Pacemaker (including CTS) on all machines. These scripts are
 coordinated with particular versions of Pacemaker, so you need the same version
 of CTS as the rest of Pacemaker, and you need the same version of
 pacemaker and CTS on both the test exerciser and the test cluster machines.
 
 You can install CTS from source, although many distributions provide
 packages that include it (e.g. pacemaker-cts or pacemaker-dev).
 Typically, packages will install CTS as /usr/share/pacemaker/tests/cts.
 
 Configure cluster communications (Corosync) on the
 cluster machines and verify everything works.
 
 NOTE: Do not run the cluster on the test exerciser machine.
 
 NOTE: Wherever machine names are mentioned in these configuration files,
 they must match the machines' `uname -n` name.  This may or may not match
 the machines' FQDN (fully qualified domain name) - it depends on how
 you (and your OS) have named the machines.
 
 
 ## Run CTS
 
 Now assuming you did all this, what you need to do is run CTSlab.py:
 
     python ./CTSlab.py [options] number-of-tests-to-run
 
 You must specify which nodes are part of the cluster with --nodes, e.g.:
 
     --nodes "pcmk-1 pcmk-2 pcmk-3"
 
 Most people will want to save the output with --outputfile, e.g.:
 
     --outputfile ~/cts.log
 
 Unless you want to test your pre-existing cluster configuration, you also want:
 
     --clobber-cib
     --populate-resources
     --test-ip-base $IP    # e.g. --test-ip-base 192.168.9.100
 
 and configure some sort of fencing:
 
     --stonith $TYPE  # e.g. "--stonith xvm" to use fence_xvm or "--stonith ssh" to use external/ssh
 
 A complete command line might look like:
 
     python ./CTSlab.py --nodes "pcmk-1 pcmk-2 pcmk-3" --outputfile ~/cts.log \
         --clobber-cib --populate-resources --test-ip-base 192.168.9.100   \
         --stonith xvm 50
 
 For more options, use the --help option.
 
 NOTE: Perhaps more convenient way to compile a command line like above
       is to use cluster\_test script that, at least in the source repository,
       sits in the same directory as this very file.
 
 To extract the result of a particular test, run:
 
     crm_report -T $test
 
 
 ## Optional/advanced testing
 
 ### Memory testing
 
 Pacemaker and CTS have various options for testing memory management. On the
 cluster nodes, pacemaker components will use various environment variables to
 control these options. How these variables are set varies by OS, but usually
 they are set in the /etc/sysconfig/pacemaker or /etc/default/pacemaker file.
 
 Valgrind is a program for detecting memory management problems (such as
 use-after-free errors). If you have valgrind installed, you can enable it by
 setting the following environment variables on all cluster nodes:
 
     PCMK_valgrind_enabled=pacemaker-attrd,pacemaker-based,pacemaker-controld,pacemaker-execd,pacemaker-fenced,pacemaker-schedulerd
     VALGRIND_OPTS="--leak-check=full --trace-children=no --num-callers=25
         --log-file=/var/lib/pacemaker/valgrind-%p
         --suppressions=/usr/share/pacemaker/tests/valgrind-pcmk.suppressions
         --gen-suppressions=all"
 
 and running CTS with these options:
 
     --valgrind-tests --valgrind-procs="pacemaker-attrd pacemaker-based pacemaker-controld pacemaker-execd pacemaker-schedulerd pacemaker-fenced"
 
 These options should only be set while specifically testing memory management,
 because they may slow down the cluster significantly, and they will disable
 writes to the CIB. If desired, you can enable valgrind on a subset of pacemaker
 components rather than all of them as listed above.
 
 Valgrind will put a text file for each process in the location specified by
 valgrind's --log-file option. For explanations of the messages valgrind
 generates, see http://valgrind.org/docs/manual/mc-manual.html
 
 Separately, if you are using the GNU C library, the G\_SLICE, MALLOC\_PERTURB\_,
 and MALLOC\_CHECK\_ environment variables can be set to affect the library's
 memory management functions.
 
 When using valgrind, G\_SLICE should be set to "always-malloc", which helps
 valgrind track memory by always using the malloc() and free() routines
 directly. When not using valgrind, G\_SLICE can be left unset, or set to
 "debug-blocks", which enables the C library to catch many memory errors
 but may impact performance.
 
 If the MALLOC\_PERTURB\_ environment variable is set to an 8-bit integer, the C
 library will initialize all newly allocated bytes of memory to the integer
 value, and will set all newly freed bytes of memory to the bitwise inverse of
 the integer value. This helps catch uses of uninitialized or freed memory
 blocks that might otherwise go unnoticed. Example:
 
     MALLOC_PERTURB_=221
 
 If the MALLOC\_CHECK\_ environment variable is set, the C library will check for
 certain heap corruption errors. The most useful value in testing is 3, which
 will cause the library to print a message to stderr and abort execution.
 Example:
 
     MALLOC_CHECK_=3
 
 Valgrind should be enabled for either all nodes or none, but the C library
 variables may be set differently on different nodes.
 
 
 ### Remote node testing
 
 If the pacemaker-remoted daemon is installed on all cluster nodes, CTS will
 enable remote node tests.
 
 The remote node tests choose a random node, stop the cluster on it, start
 pacemaker-remoted on it, and add an ocf:pacemaker:remote resource to turn it
 into a remote node. When the test is done, CTS will turn the node back into
 a cluster node.
 
 To avoid conflicts, CTS will rename the node, prefixing the original node name
 with "remote-". For example, "pcmk-1" will become "remote-pcmk-1".
 
 The name change may require special stonith configuration, if the fence agent
 expects the node name to be the same as its hostname. A common approach is to
 specify the "remote-" names in pcmk\_host\_list. If you use pcmk\_host\_list=all,
 CTS will expand that to all cluster nodes and their "remote-" names.
 You may additionally need a pcmk\_host\_map argument to map the "remote-" names
 to the hostnames. Example:
 
     --stonith xvm --stonith-args \
     pcmk_host_list=all,pcmk_host_map=remote-pcmk-1:pcmk-1;remote-pcmk-2:pcmk-2
 
 ### Remote node testing with valgrind
 
 When running the remote node tests, the pacemaker components on the cluster
 nodes can be run under valgrind as described in the "Memory testing" section.
 However, pacemaker-remoted cannot be run under valgrind that way, because it is
 started by the OS's regular boot system and not by pacemaker.
 
 Details vary by system, but the goal is to set the VALGRIND\_OPTS environment
 variable and then start pacemaker-remoted by prefixing it with the path to
 valgrind.
 
 The init script and systemd service file provided with pacemaker-remoted will
 load the pacemaker environment variables from the same location used by other
 pacemaker components, so VALGRIND\_OPTS will be set correctly if using one of
 those.
 
 For an OS using systemd, you can override the ExecStart parameter to run
 valgrind. For example:
 
     mkdir /etc/systemd/system/pacemaker_remote.service.d
     cat >/etc/systemd/system/pacemaker_remote.service.d/valgrind.conf <<EOF
     [Service]
     ExecStart=
     ExecStart=/usr/bin/valgrind /usr/sbin/pacemaker-remoted
     EOF
 
 ### Container testing
 
 If the --container-tests option is given to CTS, it will enable
 testing of LXC resources (currently only the RemoteLXC test,
 which starts a remote node using an LXC container).
 
 The container tests have additional package dependencies (see the toplevel
 README). Also, SELinux must be enabled (in either permissive or enforcing mode),
 libvirtd must be enabled and running, and root must be able to ssh without a
 password between all cluster nodes (not just from the test machine). Before
 running the tests, you can verify your environment with:
 
     /usr/share/pacemaker/tests/cts/lxc_autogen.sh -v
 
 LXC tests will create two containers with hardcoded parameters: a NAT'ed bridge
 named virbr0 using the IP network 192.168.123.0/24 will be created on the
 cluster node hosting the containers; the host will be assigned
 52:54:00:A8:12:35 as the MAC address and 192.168.123.1 as the IP address.
 Each container will be assigned a random MAC address starting with 52:54:,
 the IP address 192.168.123.11 or 192.168.123.12, the hostname lxc1 or lxc2
 (which will be added to the host's /etc/hosts file), and 196MB RAM.
 
 The test will revert all of the configuration when it is done.
 
 
 ## Mini-HOWTO: Allow passwordless remote SSH connections
 
 The CTS scripts run "ssh -l root" so you don't have to do any of your testing
 logged in as root on the test machine. Here is how to allow such connections
 without requiring a password to be entered each time:
 
 * On your test exerciser, create an SSH key if you do not already have one.
   Most commonly, SSH keys will be in your ~/.ssh directory, with the
   private key file not having an extension, and the public key file
   named the same with the extension ".pub" (for example, ~/.ssh/id\_rsa.pub).
 
   If you don't already have a key, you can create one with:
 
       ssh-keygen -t rsa
 
 * From your test exerciser, authorize your SSH public key for root on all test
   machines (both the exerciser and the cluster test machines):
 
       ssh-copy-id -i ~/.ssh/id_rsa.pub root@$MACHINE
 
   You will probably have to provide your password, and possibly say
   "yes" to some questions about accepting the identity of the test machines.
 
   The above assumes you have a RSA SSH key in the specified location;
   if you have some other type of key (DSA, ECDSA, etc.), use its file name
   in the -i option above.
 
 * To test, try this command from the exerciser machine for each
   of your cluster machines, and for the exerciser machine itself.
 
       ssh -l root $MACHINE
 
   If this works without prompting for a password, you're in business.
   If not, look at the documentation for your version of ssh.
 
 
 ## Note on the maintenance
 
 ### Tests for scheduler
 
 The source `*.xml` files are preferably kept in sync with the newest
 major (and only major, which is enough) schema version, unless justified
 otherwise (e.g. testing a feature backed only in `pacemaker-next` special
 version of the schema), since these tests are not meant to double as
 schema upgrade ones (unless some cases expressly designated so).
 
 Currently and unless something goes wrong, the procedure of upgrading
 these tests en masse is as easy as:
 
     cd "$(git rev-parse --show-toplevel)/cts"  # if not already
     pushd "$(git rev-parse --show-toplevel)/xml"
     ./regression.sh cts_scheduler -G
     popd
     git add --interactive .
     git commit -m 'XML: upgrade-M.N.xsl: apply on scheduler CTS test cases'
     git reset HEAD && git checkout .  # if some differences still remain
     ./cts-scheduler  # absolutely vital to check nothing got broken!
 
 Now, sadly, there's no proved automated way to minimize instances like this:
 
     <primitive id="rsc1" class="ocf" provider="heartbeat" type="apache">
     </primitive>
 
 that may be left behind into more canonical:
 
     <primitive id="rsc1" class="ocf" provider="heartbeat" type="apache"/>
 
 so manual editing is tasked, or perhaps `--format` or `--c14n`
 to `xmllint` will be of help (without any other side effects).
 
 If the overall process gets stuck anywhere, common sense to the rescue.
 The initial part of the above recipe can be repeated anytime to verify
 there's nothing to upgrade artificially like this, which is a desired
 state.  Note that `regression.sh` script performs validation of both
 the input and output, should the upgrade take place, implicitly, so
 there's no need of revalidation in the happy case.
diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in
index ac5e3c23fa..8b6b2ac9b7 100644
--- a/cts/cts-scheduler.in
+++ b/cts/cts-scheduler.in
@@ -1,1295 +1,1296 @@
 #!@BASH_PATH@
 #
 # Copyright 2004-2018 Andrew Beekhof <andrew@beekhof.net>
 #
 # This source code is licensed under the GNU General Public License version 2
 # or later (GPLv2+) WITHOUT ANY WARRANTY.
 #
 
 USAGE_TEXT="Usage: cts-scheduler [<options>]
 Options:
  --help                 Display this text, then exit
  -V, --verbose          Display any differences from expected output
  --run TEST             Run only single specified test
  --update               Update expected results with actual results
  -b, --binary PATH      Specify path to crm_simulate
  -i, --io-dir PATH      Specify path to regression test data directory
  -v, --valgrind         Run all commands under valgrind
  --valgrind-dhat        Run all commands under valgrind with heap analyzer
  --valgrind-skip-output If running under valgrind, don't display output
  --testcmd-options      Additional options for command under test"
 
 SBINDIR="@sbindir@"
 BUILDDIR="@abs_top_builddir@"
 CRM_SCHEMA_DIRECTORY="@CRM_SCHEMA_DIRECTORY@"
 
 # If readlink supports -e (i.e. GNU), use it
 readlink -e / >/dev/null 2>/dev/null
 if [ $? -eq 0 ]; then
     test_home="$(dirname "$(readlink -e "$0")")"
 else
     test_home="$(dirname "$0")"
 fi
 
 io_dir="$test_home/scheduler"
 failed="$test_home/.regression.failed.diff"
 test_binary=
 testcmd_options=
 
 single_test=
 verbose=0
 num_failed=0
 num_tests=0
 VALGRIND_CMD=""
 VALGRIND_OPTS="-q
     --gen-suppressions=all
     --log-file=%q{valgrind_output}
     --time-stamp=yes
     --trace-children=no
     --show-reachable=no
     --leak-check=full
     --num-callers=20
     --suppressions=$test_home/valgrind-pcmk.suppressions"
 VALGRIND_DHAT_OPTS="--tool=exp-dhat
     --log-file=%q{valgrind_output}
     --time-stamp=yes
     --trace-children=no
     --show-top-n=100
     --num-callers=4"
 diff_opts="--ignore-all-space --ignore-blank-lines -u -N"
 
 # These constants must track crm_exit_t values
 CRM_EX_OK=0
 CRM_EX_ERROR=1
 CRM_EX_NOT_INSTALLED=5
 CRM_EX_USAGE=64
 CRM_EX_NOINPUT=66
 
 EXITCODE=$CRM_EX_OK
 
 function info() {
     printf "$*\n"
 }
 
 function error() {
     printf "      * ERROR:   $*\n"
 }
 
 function failed() {
     printf "      * FAILED:  $*\n"
 }
 
 function show_test() {
     name=$1; shift
     printf "  Test %-25s $*\n" "$name:"
 }
 
 # Normalize scheduler output for comparison
 normalize() {
     for NORMALIZE_FILE in "$@"; do
         # sed -i is not portable :-(
         sed -e 's/crm_feature_set="[^"]*"//' \
             -e 's/batch-limit="[0-9]*"//'    \
             "$NORMALIZE_FILE" > "${NORMALIZE_FILE}.$$"
         mv -- "${NORMALIZE_FILE}.$$" "$NORMALIZE_FILE"
     done
 }
 
 info "Test home is:\t$test_home"
 
 create_mode="false"
 while [ $# -gt 0 ] ; do
     case "$1" in
         -V|--verbose)
             verbose=1
             shift
             ;;
         -v|--valgrind)
             export G_SLICE=always-malloc
             VALGRIND_CMD="valgrind $VALGRIND_OPTS"
             shift
             ;;
         --valgrind-dhat)
             VALGRIND_CMD="valgrind $VALGRIND_DHAT_OPTS"
             shift
             ;;
         --valgrind-skip-output)
             VALGRIND_SKIP_OUTPUT=1
             shift
             ;;
         --update)
             create_mode="true"
             shift
             ;;
         --run)
             single_test=$(basename "$2" ".xml")
             shift 2
             break # any remaining arguments will be passed to test command
             ;;
         -b|--binary)
             test_binary="$2"
             shift 2
             ;;
         -i|--io-dir)
             io_dir="$2"
             shift 2
             ;;
         --help)
             echo "$USAGE_TEXT"
             exit $CRM_EX_OK
             ;;
         --testcmd-options)
             testcmd_options=$2
             shift 2
             ;;
         *)
             error "unknown option: $1"
             exit $CRM_EX_USAGE
             ;;
     esac
 done
 
 if [ -z "$PCMK_schema_directory" ]; then
     if [ -d "$BUILDDIR/xml" ]; then
         export PCMK_schema_directory="$BUILDDIR/xml"
     elif [ -d "$CRM_SCHEMA_DIRECTORY" ]; then
         export PCMK_schema_directory="$CRM_SCHEMA_DIRECTORY"
     fi
 fi
 
 if [ -z "$test_binary" ]; then
     if [ -x "$BUILDDIR/tools/crm_simulate" ]; then
         test_binary="$BUILDDIR/tools/crm_simulate"
     elif [ -x "$SBINDIR/crm_simulate" ]; then
         test_binary="$SBINDIR/crm_simulate"
     fi
 fi
 if [ ! -x "$test_binary" ]; then
     error "Test binary $test_binary not found"
     exit $CRM_EX_NOT_INSTALLED
 fi
 
 info "Test binary is:\t$test_binary"
 if [ -n "$PCMK_schema_directory" ]; then
     info "Schema home is:\t$PCMK_schema_directory"
 fi
 if [ "x$VALGRIND_CMD" != "x" ]; then
     info "Activating memory testing with valgrind";
 fi
 
 info " "
 
 test_cmd="$VALGRIND_CMD $test_binary $testcmd_options"
 #echo $test_cmd
 
 if [ "$(whoami)" != "root" ]; then
     declare -x CIB_shadow_dir=/tmp
 fi
 
 do_test() {
     did_fail=0
     expected_rc=0
     num_tests=$(( $num_tests + 1 ))
 
     base=$1; shift
     name=$1; shift
 
     input=$io_dir/${base}.xml
     output=$io_dir/${base}.out
     expected=$io_dir/${base}.exp
 
     dot_expected=$io_dir/${base}.dot
     dot_output=$io_dir/${base}.pe.dot
 
     scores=$io_dir/${base}.scores
     score_output=$io_dir/${base}.scores.pe
 
     stderr_expected=$io_dir/${base}.stderr
     stderr_output=$io_dir/${base}.stderr.pe
 
     summary=$io_dir/${base}.summary
     summary_output=$io_dir/${base}.summary.pe
 
     valgrind_output=$io_dir/${base}.valgrind
     export valgrind_output
 
     if [ "x$1" = "x--rc" ]; then
         expected_rc=$2
         shift; shift;
     fi
 
     show_test "$base" "$name"
 
     if [ ! -f $input ]; then
         error "No input";
         did_fail=1
         num_failed=$(( $num_failed + 1 ))
         return $CRM_EX_NOINPUT;
     fi
 
     if [ "$create_mode" != "true" ] && [ ! -f "$expected" ]; then
         error "no stored output";
         return $CRM_EX_NOINPUT;
     fi
 
 #    ../admin/crm_verify -X $input
     if [ ! -z "$single_test" ]; then
         echo "CIB_shadow_dir=\"$io_dir\" $test_cmd -x \"$input\" -D \"$dot_output\" -G \"$output\" -S" "$@"
         CIB_shadow_dir="$io_dir" $test_cmd -x "$input" -D "$dot_output" \
             -G "$output" -S "$@" 2>&1 | tee "$summary_output"
     else
         CIB_shadow_dir="$io_dir" $test_cmd -x "$input" -S &> "$summary_output"
     fi
 
     CIB_shadow_dir="$io_dir" $test_cmd -x "$input" -D "$dot_output" \
         -G "$output" -SQ -s "$@" 2> "$stderr_output" > "$score_output"
     rc=$?
 
     if [ $rc -ne $expected_rc ]; then
         failed "Test returned: $rc";
         did_fail=1
         echo "CIB_shadow_dir=\"$io_dir\" $test_cmd -x \"$input\" -D \"$dot_output\" -G \"$output\" -SQ -s" "$@"
     fi
 
     if [ -z "$VALGRIND_SKIP_OUTPUT" ]; then
         if [ -s "${valgrind_output}" ]; then
             error "Valgrind reported errors";
             did_fail=1
             cat ${valgrind_output}
         fi
         rm -f ${valgrind_output}
     fi
 
     if [ -s core ]; then
         error "Core-file detected: core.${base}";
         did_fail=1
         rm -f $test_home/core.$base
         mv core $test_home/core.$base
     fi
 
     if [ -e "$stderr_expected" ]; then
 
         diff $diff_opts $stderr_expected $stderr_output >/dev/null
         rc2=$?
         if [ $rc2 -ne 0 ]; then
             failed "stderr changed";
             diff $diff_opts $stderr_expected $stderr_output 2>/dev/null >> $failed
             echo "" >> $failed
             did_fail=1
         fi
 
     elif [ -s "$stderr_output" ]; then
         error "Output was written to stderr"
         did_fail=1
         cat $stderr_output
     fi
     rm -f $stderr_output
 
     if [ ! -s $output ]; then
         error "No graph produced";
         did_fail=1
         num_failed=$(( $num_failed + 1 ))
         rm -f $output
         return $CRM_EX_ERROR;
     fi
 
     if [ ! -s $dot_output ]; then
         error "No dot-file summary produced";
         did_fail=1
         num_failed=$(( $num_failed + 1 ))
         rm -f $output
         return $CRM_EX_ERROR;
     else
         echo "digraph \"g\" {" > $dot_output.sort
         LC_ALL=POSIX sort -u $dot_output | grep -v -e '^}$' -e digraph >> $dot_output.sort
         echo "}" >> $dot_output.sort
         mv -f $dot_output.sort $dot_output
     fi
 
     if [ ! -s $score_output ]; then
         error "No allocation scores produced";
         did_fail=1
         num_failed=$(( $num_failed + 1 ))
         rm $output
         return $CRM_EX_ERROR;
     else
         LC_ALL=POSIX sort $score_output > $score_output.sorted
         mv -f $score_output.sorted $score_output
     fi
 
     if [ "$create_mode" = "true" ]; then
         cp "$output" "$expected"
         cp "$dot_output" "$dot_expected"
         cp "$score_output" "$scores"
         cp "$summary_output" "$summary"
         info "  Updated expected outputs"
     fi
 
     diff $diff_opts $summary $summary_output >/dev/null
     rc2=$?
     if [ $rc2 -ne 0 ]; then
         failed "summary changed";
         diff $diff_opts $summary $summary_output 2>/dev/null >> $failed
         echo "" >> $failed
         did_fail=1
     fi
 
     diff $diff_opts $dot_expected $dot_output >/dev/null
     rc=$?
     if [ $rc -ne 0 ]; then
         failed "dot-file summary changed";
         diff $diff_opts $dot_expected $dot_output 2>/dev/null >> $failed
         echo "" >> $failed
         did_fail=1
     else
         rm -f $dot_output
     fi
 
     normalize "$expected" "$output"
     diff $diff_opts $expected $output >/dev/null
     rc2=$?
     if [ $rc2 -ne 0 ]; then
         failed "xml-file changed";
         diff $diff_opts $expected $output 2>/dev/null >> $failed
         echo "" >> $failed
         did_fail=1
     fi
 
     diff $diff_opts $scores $score_output >/dev/null
     rc=$?
     if [ $rc -ne 0 ]; then
         failed "scores-file changed";
         diff $diff_opts $scores $score_output 2>/dev/null >> $failed
         echo "" >> $failed
         did_fail=1
     fi
     rm -f $output $score_output $summary_output
     if [ $did_fail -eq 1 ]; then
         num_failed=$(( $num_failed + 1 ))
         return $CRM_EX_ERROR
     fi
     return $CRM_EX_OK
 }
 
 function test_results {
     if [ $num_failed -ne 0 ]; then
         if [ -s "$failed" ]; then
             if [ $verbose -eq 1 ]; then
                 error "Results of $num_failed failed tests (out of $num_tests):"
                 cat $failed
             else
                 error "Results of $num_failed failed tests (out of $num_tests) are in $failed"
                 error "Use -V to display them after running the tests"
             fi
         else
             error "$num_failed (of $num_tests) tests failed (no diff results)"
             rm $failed
         fi
         EXITCODE=$CRM_EX_ERROR
     fi
 }
 
 # zero out the error log
 true > $failed
 
 if [ -n "$single_test" ]; then
     do_test "$single_test" "Single shot" "$@"
     TEST_RC=$?
     cat "$failed"
     exit $TEST_RC
 fi
 
 DO_VERSIONED_TESTS=0
 
 info Performing the following tests from $io_dir
 echo ""
 
 do_test simple1 "Offline     "
 do_test simple2 "Start       "
 do_test simple3 "Start 2     "
 do_test simple4 "Start Failed"
 do_test simple6 "Stop Start  "
 do_test simple7 "Shutdown    "
 #do_test simple8 "Stonith     "
 #do_test simple9 "Lower version"
 #do_test simple10 "Higher version"
 do_test simple11 "Priority (ne)"
 do_test simple12 "Priority (eq)"
 do_test simple8 "Stickiness"
 
 echo ""
 do_test group1 "Group                   "
 do_test group2 "Group + Native          "
 do_test group3 "Group + Group           "
 do_test group4 "Group + Native (nothing)"
 do_test group5 "Group + Native (move)   "
 do_test group6 "Group + Group (move)    "
 do_test group7 "Group colocation"
 do_test group13 "Group colocation (cant run)"
 do_test group8 "Group anti-colocation"
 do_test group9 "Group recovery"
 do_test group10 "Group partial recovery"
 do_test group11 "Group target_role"
 do_test group14 "Group stop (graph terminated)"
 do_test group15 "Negative group colocation"
 do_test bug-1573 "Partial stop of a group with two children"
 do_test bug-1718 "Mandatory group ordering - Stop group_FUN"
 do_test bug-lf-2613 "Move group on failure"
 do_test bug-lf-2619 "Move group on clone failure"
 do_test group-fail "Ensure stop order is preserved for partially active groups"
 do_test group-unmanaged "No need to restart r115 because r114 is unmanaged"
 do_test group-unmanaged-stopped "Make sure r115 is stopped when r114 fails"
 do_test group-dependents "Account for the location preferences of things colocated with a group"
 
 echo ""
 do_test rsc_dep1 "Must not     "
 do_test rsc_dep3 "Must         "
 do_test rsc_dep5 "Must not 3   "
 do_test rsc_dep7 "Must 3       "
 do_test rsc_dep10 "Must (but cant)"
 do_test rsc_dep2  "Must (running) "
 do_test rsc_dep8  "Must (running : alt) "
 do_test rsc_dep4  "Must (running + move)"
 do_test asymmetric "Asymmetric - require explicit location constraints"
 
 echo ""
 do_test orphan-0 "Orphan ignore"
 do_test orphan-1 "Orphan stop"
 do_test orphan-2 "Orphan stop, remove failcount"
 
 echo ""
 do_test params-0 "Params: No change"
 do_test params-1 "Params: Changed"
 do_test params-2 "Params: Resource definition"
 do_test params-4 "Params: Reload"
 do_test params-5 "Params: Restart based on probe digest"
 do_test novell-251689 "Resource definition change + target_role=stopped"
 do_test bug-lf-2106 "Restart all anonymous clone instances after config change"
 do_test params-6 "Params: Detect reload in previously migrated resource"
 do_test nvpair-id-ref "Support id-ref in nvpair with optional name"
 do_test not-reschedule-unneeded-monitor "Do not reschedule unneeded monitors while resource definitions have changed"
 do_test reload-becomes-restart "Cancel reload if restart becomes required"
 
 echo ""
 do_test target-0 "Target Role : baseline"
 do_test target-1 "Target Role : master"
 do_test target-2 "Target Role : invalid"
 
 echo ""
 do_test base-score "Set a node's default score for all nodes"
 
 echo ""
 do_test date-1 "Dates" -t "2005-020"
 do_test date-2 "Date Spec - Pass" -t "2005-020T12:30"
 do_test date-3 "Date Spec - Fail" -t "2005-020T11:30"
 do_test origin "Timing of recurring operations" -t "2014-05-07 00:28:00" 
 do_test probe-0 "Probe (anon clone)"
 do_test probe-1 "Pending Probe"
 do_test probe-2 "Correctly re-probe cloned groups"
 do_test probe-3 "Probe (pending node)"
 do_test probe-4 "Probe (pending node + stopped resource)"
 do_test standby "Standby"
 do_test comments "Comments"
 
 echo ""
 do_test one-or-more-0 "Everything starts"
 do_test one-or-more-1 "Nothing starts because of A"
 do_test one-or-more-2 "D can start because of C"
 do_test one-or-more-3 "D cannot start because of B and C"
 do_test one-or-more-4 "D cannot start because of target-role"
 do_test one-or-more-5 "Start A and F even though C and D are stopped"
 do_test one-or-more-6 "Leave A running even though B is stopped"
 do_test one-or-more-7 "Leave A running even though C is stopped"
 do_test bug-5140-require-all-false "Allow basegrp:0 to stop"
 do_test clone-require-all-1 "clone B starts node 3 and 4"
 do_test clone-require-all-2 "clone B remains stopped everywhere"
 do_test clone-require-all-3 "clone B stops everywhere because A stops everywhere"
 do_test clone-require-all-4 "clone B remains on node 3 and 4 with only one instance of A remaining."
 do_test clone-require-all-5 "clone B starts on node 1 3 and 4"
 do_test clone-require-all-6 "clone B remains active after shutting down instances of A"
 do_test clone-require-all-7 "clone A and B both start at the same time. all instances of A start before B."
 do_test clone-require-all-no-interleave-1 "C starts everywhere after A and B"
 do_test clone-require-all-no-interleave-2 "C starts on nodes 1, 2, and 4 with only one active instance of B"
 do_test clone-require-all-no-interleave-3 "C remains active when instance of B is stopped on one node and started on another."
 do_test one-or-more-unrunnable-instances "Avoid dependencies on instances that won't ever be started"
 
 echo ""
 do_test order1 "Order start 1     "
 do_test order2 "Order start 2     "
 do_test order3 "Order stop        "
 do_test order4 "Order (multiple)  "
 do_test order5 "Order (move)  "
 do_test order6 "Order (move w/ restart)  "
 do_test order7 "Order (mandatory)  "
 do_test order-optional "Order (score=0)  "
 do_test order-required "Order (score=INFINITY)  "
 do_test bug-lf-2171 "Prevent group start when clone is stopped"
 do_test order-clone "Clone ordering should be able to prevent startup of dependent clones"
 do_test order-sets "Ordering for resource sets"
 do_test order-serialize "Serialize resources without inhibiting migration"
 do_test order-serialize-set "Serialize a set of resources without inhibiting migration"
 do_test clone-order-primitive "Order clone start after a primitive"
 do_test clone-order-16instances "Verify ordering of 16 cloned resources"
 do_test order-optional-keyword "Order (optional keyword)"
 do_test order-mandatory "Order (mandatory keyword)"
 do_test bug-lf-2493 "Don't imply colocation requirements when applying ordering constraints with clones"
 do_test ordered-set-basic-startup "Constraint set with default order settings."
 do_test ordered-set-natural "Allow natural set ordering"
 do_test order-wrong-kind "Order (error)"
 
 echo ""
 do_test coloc-loop "Colocation - loop"
 do_test coloc-many-one "Colocation - many-to-one"
 do_test coloc-list "Colocation - many-to-one with list"
 do_test coloc-group "Colocation - groups"
 do_test coloc-slave-anti "Anti-colocation with slave shouldn't prevent master colocation"
 do_test coloc-attr "Colocation based on node attributes"
 do_test coloc-negative-group "Negative colocation with a group"
 do_test coloc-intra-set "Intra-set colocation"
 do_test bug-lf-2435 "Colocation sets with a negative score"
 do_test coloc-clone-stays-active "Ensure clones don't get stopped/demoted because a dependent must stop"
 do_test coloc_fp_logic "Verify floating point calculations in colocation are working"
 do_test colo_master_w_native "cl#5070 - Verify promotion order is affected when colocating master to native rsc."
 do_test colo_slave_w_native  "cl#5070 - Verify promotion order is affected when colocating slave to native rsc."
 do_test anti-colocation-order "cl#5187 - Prevent resources in an anti-colocation from even temporarily running on a same node"
 do_test anti-colocation-master "Organize order of actions for master resources in anti-colocations"
 do_test anti-colocation-slave "Organize order of actions for slave resources in anti-colocations"
 do_test enforce-colo1 "Always enforce B with A INFINITY."
 do_test complex_enforce_colo "Always enforce B with A INFINITY. (make sure heat-engine stops)"
 
 echo ""
 do_test rsc-sets-seq-true "Resource Sets - sequential=false"
 do_test rsc-sets-seq-false "Resource Sets - sequential=true"
 do_test rsc-sets-clone "Resource Sets - Clone"
 do_test rsc-sets-master "Resource Sets - Master"
 do_test rsc-sets-clone-1 "Resource Sets - Clone (lf#2404)"
 
 #echo ""
 #do_test agent1 "version: lt (empty)"
 #do_test agent2 "version: eq "
 #do_test agent3 "version: gt "
 
 echo ""
 do_test attrs1 "string: eq (and)     "
 do_test attrs2 "string: lt / gt (and)"
 do_test attrs3 "string: ne (or)      "
 do_test attrs4 "string: exists       "
 do_test attrs5 "string: not_exists   "
 do_test attrs6 "is_dc: true          "
 do_test attrs7 "is_dc: false         "
 do_test attrs8 "score_attribute      "
 do_test per-node-attrs "Per node resource parameters"
 
 echo ""
 do_test mon-rsc-1 "Schedule Monitor - start"
 do_test mon-rsc-2 "Schedule Monitor - move "
 do_test mon-rsc-3 "Schedule Monitor - pending start     "
 do_test mon-rsc-4 "Schedule Monitor - move/pending start"
 
 echo ""
 do_test rec-rsc-0 "Resource Recover - no start     "
 do_test rec-rsc-1 "Resource Recover - start        "
 do_test rec-rsc-2 "Resource Recover - monitor      "
 do_test rec-rsc-3 "Resource Recover - stop - ignore"
 do_test rec-rsc-4 "Resource Recover - stop - block "
 do_test rec-rsc-5 "Resource Recover - stop - fence "
 do_test rec-rsc-6 "Resource Recover - multiple - restart"
 do_test rec-rsc-7 "Resource Recover - multiple - stop   "
 do_test rec-rsc-8 "Resource Recover - multiple - block  "
 do_test rec-rsc-9 "Resource Recover - group/group"
 do_test monitor-recovery "on-fail=block + resource recovery detected by recurring monitor"
 do_test stop-failure-no-quorum "Stop failure without quorum"
 do_test stop-failure-no-fencing "Stop failure without fencing available"
 do_test stop-failure-with-fencing "Stop failure with fencing available"
 do_test multiple-active-block-group "Support of multiple-active=block for resource groups"
 do_test multiple-monitor-one-failed "Consider resource failed if any of the configured monitor operations failed"
 
 echo ""
 do_test quorum-1 "No quorum - ignore"
 do_test quorum-2 "No quorum - freeze"
 do_test quorum-3 "No quorum - stop  "
 do_test quorum-4 "No quorum - start anyway"
 do_test quorum-5 "No quorum - start anyway (group)"
 do_test quorum-6 "No quorum - start anyway (clone)"
 do_test bug-cl-5212 "No promotion with no-quorum-policy=freeze"
 do_test suicide-needed-inquorate "no-quorum-policy=suicide: suicide necessary"
 do_test suicide-not-needed-initial-quorum "no-quorum-policy=suicide: suicide not necessary at initial quorum"
 do_test suicide-not-needed-never-quorate "no-quorum-policy=suicide: suicide not necessary if never quorate"
 do_test suicide-not-needed-quorate "no-quorum-policy=suicide: suicide necessary if quorate"
 
 echo ""
 do_test rec-node-1 "Node Recover - Startup   - no fence"
 do_test rec-node-2 "Node Recover - Startup   - fence   "
 do_test rec-node-3 "Node Recover - HA down   - no fence"
 do_test rec-node-4 "Node Recover - HA down   - fence   "
 do_test rec-node-5 "Node Recover - CRM down  - no fence"
 do_test rec-node-6 "Node Recover - CRM down  - fence   "
 do_test rec-node-7 "Node Recover - no quorum - ignore  "
 do_test rec-node-8 "Node Recover - no quorum - freeze  "
 do_test rec-node-9 "Node Recover - no quorum - stop    "
 do_test rec-node-10 "Node Recover - no quorum - stop w/fence"
 do_test rec-node-11 "Node Recover - CRM down w/ group - fence   "
 do_test rec-node-12 "Node Recover - nothing active - fence   "
 do_test rec-node-13 "Node Recover - failed resource + shutdown - fence   "
 do_test rec-node-15 "Node Recover - unknown lrm section"
 do_test rec-node-14 "Serialize all stonith's"
 
 echo ""
 do_test multi1 "Multiple Active (stop/start)"
 
 echo ""
 do_test migrate-begin     "Normal migration"
 do_test migrate-success   "Completed migration"
 do_test migrate-partial-1 "Completed migration, missing stop on source"
 do_test migrate-partial-2 "Successful migrate_to only"
 do_test migrate-partial-3 "Successful migrate_to only, target down"
 do_test migrate-partial-4 "Migrate from the correct host after migrate_to+migrate_from"
 do_test bug-5186-partial-migrate "Handle partial migration when src node loses membership"
 
 do_test migrate-fail-2 "Failed migrate_from"
 do_test migrate-fail-3 "Failed migrate_from + stop on source"
 do_test migrate-fail-4 "Failed migrate_from + stop on target - ideally we wouldn't need to re-stop on target"
 do_test migrate-fail-5 "Failed migrate_from + stop on source and target"
 
 do_test migrate-fail-6 "Failed migrate_to"
 do_test migrate-fail-7 "Failed migrate_to + stop on source"
 do_test migrate-fail-8 "Failed migrate_to + stop on target - ideally we wouldn't need to re-stop on target"
 do_test migrate-fail-9 "Failed migrate_to + stop on source and target"
 
 do_test migrate-stop "Migration in a stopping stack"
 do_test migrate-start "Migration in a starting stack"
 do_test migrate-stop_start "Migration in a restarting stack"
 do_test migrate-stop-complex "Migration in a complex stopping stack"
 do_test migrate-start-complex "Migration in a complex starting stack"
 do_test migrate-stop-start-complex "Migration in a complex moving stack"
 do_test migrate-shutdown "Order the post-migration 'stop' before node shutdown"
 
 do_test migrate-1 "Migrate (migrate)"
 do_test migrate-2 "Migrate (stable)"
 do_test migrate-3 "Migrate (failed migrate_to)"
 do_test migrate-4 "Migrate (failed migrate_from)"
 do_test novell-252693 "Migration in a stopping stack"
 do_test novell-252693-2 "Migration in a starting stack"
 do_test novell-252693-3 "Non-Migration in a starting and stopping stack"
 do_test bug-1820 "Migration in a group"
 do_test bug-1820-1 "Non-migration in a group"
 do_test migrate-5 "Primitive migration with a clone"
 do_test migrate-fencing "Migration after Fencing"
 do_test migrate-both-vms "Migrate two VMs that have no colocation"
 do_test migration-behind-migrating-remote "Migrate resource behind migrating remote connection"
 
 do_test 1-a-then-bm-move-b "Advanced migrate logic. A then B. migrate B."
 do_test 2-am-then-b-move-a "Advanced migrate logic, A then B, migrate A without stopping B"
 do_test 3-am-then-bm-both-migrate "Advanced migrate logic. A then B. migrate both"
 do_test 4-am-then-bm-b-not-migratable "Advanced migrate logic, A then B, B not migratable"
 do_test 5-am-then-bm-a-not-migratable "Advanced migrate logic. A then B. move both, a not migratable"
 do_test 6-migrate-group "Advanced migrate logic, migrate a group"
 do_test 7-migrate-group-one-unmigratable "Advanced migrate logic, migrate group mixed with allow-migrate true/false"
 do_test 8-am-then-bm-a-migrating-b-stopping "Advanced migrate logic, A then B, A migrating, B stopping"
 do_test 9-am-then-bm-b-migrating-a-stopping "Advanced migrate logic, A then B, B migrate, A stopping"
 do_test 10-a-then-bm-b-move-a-clone "Advanced migrate logic, A clone then B, migrate B while stopping A"
 do_test 11-a-then-bm-b-move-a-clone-starting "Advanced migrate logic, A clone then B, B moving while A is start/stopping"
 
 do_test a-promote-then-b-migrate "A promote then B start. migrate B"
 do_test a-demote-then-b-migrate "A demote then B stop. migrate B"
 
 if [ $DO_VERSIONED_TESTS -eq 1 ]; then
     do_test migrate-versioned "Disable migration for versioned resources"
 fi
 
 #echo ""
 #do_test complex1 "Complex "
 
 do_test bug-lf-2422 "Dependency on partially active group - stop ocfs:*"
 
 echo ""
 do_test clone-anon-probe-1 "Probe the correct (anonymous) clone instance for each node"
 do_test clone-anon-probe-2 "Avoid needless re-probing of anonymous clones"
 do_test clone-anon-failcount "Merge failcounts for anonymous clones"
 do_test force-anon-clone-max "Update clone-max properly when forcing a clone to be anonymous"
 do_test anon-instance-pending "Assign anonymous clone instance numbers properly when action pending"
 do_test inc0 "Incarnation start"
 do_test inc1 "Incarnation start order"
 do_test inc2 "Incarnation silent restart, stop, move"
 do_test inc3 "Inter-incarnation ordering, silent restart, stop, move"
 do_test inc4 "Inter-incarnation ordering, silent restart, stop, move (ordered)"
 do_test inc5 "Inter-incarnation ordering, silent restart, stop, move (restart 1)"
 do_test inc6 "Inter-incarnation ordering, silent restart, stop, move (restart 2)"
 do_test inc7 "Clone colocation"
 do_test inc8 "Clone anti-colocation"
 do_test inc9 "Non-unique clone"
 do_test inc10 "Non-unique clone (stop)"
 do_test inc11 "Primitive colocation with clones"
 do_test inc12 "Clone shutdown"
 do_test cloned-group "Make sure only the correct number of cloned groups are started"
 do_test cloned-group-stop "Ensure stopping qpidd also stops glance and cinder"
 do_test clone-no-shuffle "Don't prioritize allocation of instances that must be moved"
 do_test clone-max-zero "Orphan processing with clone-max=0"
 do_test clone-anon-dup "Bug LF#2087 - Correctly parse the state of anonymous clones that are active more than once per node"
 do_test bug-lf-2160 "Don't shuffle clones due to colocation"
 do_test bug-lf-2213 "clone-node-max enforcement for cloned groups"
 do_test bug-lf-2153 "Clone ordering constraints"
 do_test bug-lf-2361 "Ensure clones observe mandatory ordering constraints if the LHS is unrunnable"
 do_test bug-lf-2317 "Avoid needless restart of primitive depending on a clone"
 do_test clone-colocate-instance-1 "Colocation with a specific clone instance (negative example)"
 do_test clone-colocate-instance-2 "Colocation with a specific clone instance"
 do_test clone-order-instance "Ordering with specific clone instances"
 do_test bug-lf-2453 "Enforce mandatory clone ordering without colocation"
 do_test bug-lf-2508 "Correctly reconstruct the status of anonymous cloned groups"
 do_test bug-lf-2544 "Balanced clone placement"
 do_test bug-lf-2445 "Redistribute clones with node-max > 1 and stickiness = 0"
 do_test bug-lf-2574 "Avoid clone shuffle"
 do_test bug-lf-2581 "Avoid group restart due to unrelated clone (re)start"
 do_test bug-cl-5168 "Don't shuffle clones"
 do_test bug-cl-5170 "Prevent clone from starting with on-fail=block"
 do_test clone-fail-block-colocation "Move colocated group when failed clone has on-fail=block"
 do_test clone-interleave-1 "Clone-3 cannot start on pcmk-1 due to interleaved ordering (no colocation)"
 do_test clone-interleave-2 "Clone-3 must stop on pcmk-1 due to interleaved ordering (no colocation)"
 do_test clone-interleave-3 "Clone-3 must be recovered on pcmk-1 due to interleaved ordering (no colocation)"
 do_test rebalance-unique-clones "Rebalance unique clone instances with no stickiness"
 do_test clone-requires-quorum-recovery "Clone with requires=quorum on failed node needing recovery"
 do_test clone-requires-quorum "Clone with requires=quorum with presumed-inactive instance on failed node"
 
 echo ""
 do_test cloned_start_one  "order first clone then clone... first clone_min=2"
 do_test cloned_start_two  "order first clone then clone... first clone_min=2"
 do_test cloned_stop_one   "order first clone then clone... first clone_min=2"
 do_test cloned_stop_two   "order first clone then clone... first clone_min=2"
 do_test clone_min_interleave_start_one "order first clone then clone... first clone_min=2 and then has interleave=true"
 do_test clone_min_interleave_start_two "order first clone then clone... first clone_min=2 and then has interleave=true"
 do_test clone_min_interleave_stop_one  "order first clone then clone... first clone_min=2 and then has interleave=true"
 do_test clone_min_interleave_stop_two  "order first clone then clone... first clone_min=2 and then has interleave=true"
 do_test clone_min_start_one "order first clone then primitive... first clone_min=2"
 do_test clone_min_start_two "order first clone then primitive... first clone_min=2"
 do_test clone_min_stop_all  "order first clone then primitive... first clone_min=2"
 do_test clone_min_stop_one  "order first clone then primitive... first clone_min=2"
 do_test clone_min_stop_two  "order first clone then primitive... first clone_min=2"
 
 echo ""
 do_test unfence-startup "Clean unfencing"
 do_test unfence-definition "Unfencing when the agent changes"
 do_test unfence-parameters "Unfencing when the agent parameters changes"
 do_test unfence-device "Unfencing when a cluster has only fence devices"
 
 echo ""
 do_test master-0 "Stopped -> Slave"
 do_test master-1 "Stopped -> Promote"
 do_test master-2 "Stopped -> Promote : notify"
 do_test master-3 "Stopped -> Promote : master location"
 do_test master-4 "Started -> Promote : master location"
 do_test master-5 "Promoted -> Promoted"
 do_test master-6 "Promoted -> Promoted (2)"
 do_test master-7 "Promoted -> Fenced"
 do_test master-8 "Promoted -> Fenced -> Moved"
 do_test master-9 "Stopped + Promotable + No quorum"
 do_test master-10 "Stopped -> Promotable : notify with monitor"
 do_test master-11 "Stopped -> Promote : colocation"
 do_test novell-239082 "Demote/Promote ordering"
 do_test novell-239087 "Stable master placement"
 do_test master-12 "Promotion based solely on rsc_location constraints"
 do_test master-13 "Include preferences of colocated resources when placing master"
 do_test master-demote "Ordering when actions depends on demoting a slave resource"
 do_test master-ordering "Prevent resources from starting that need a master"
 do_test bug-1765 "Master-Master Colocation (dont stop the slaves)"
 do_test master-group "Promotion of cloned groups"
 do_test bug-lf-1852 "Don't shuffle master/slave instances unnecessarily"
 do_test master-failed-demote "Don't retry failed demote actions"
 do_test master-failed-demote-2 "Don't retry failed demote actions (notify=false)"
 do_test master-depend "Ensure resources that depend on the master don't get allocated until the master does"
 do_test master-reattach "Re-attach to a running master"
 do_test master-allow-start "Don't include master score if it would prevent allocation"
 do_test master-colocation "Allow master instances placemaker to be influenced by colocation constraints"
 do_test master-pseudo "Make sure promote/demote pseudo actions are created correctly"
 do_test master-role "Prevent target-role from promoting more than master-max instances"
 do_test bug-lf-2358 "Master-Master anti-colocation"
 do_test master-promotion-constraint "Mandatory master colocation constraints"
 do_test unmanaged-master "Ensure role is preserved for unmanaged resources"
 do_test master-unmanaged-monitor "Start the correct monitor operation for unmanaged masters"
 do_test master-demote-2 "Demote does not clear past failure"
 do_test master-move "Move master based on failure of colocated group"
 do_test master-probed-score "Observe the promotion score of probed resources"
 do_test colocation_constraint_stops_master "cl#5054 - Ensure master is demoted when stopped by colocation constraint"
 do_test colocation_constraint_stops_slave  "cl#5054 - Ensure slave is not demoted when stopped by colocation constraint"
 do_test order_constraint_stops_master      "cl#5054 - Ensure master is demoted when stopped by order constraint"
 do_test order_constraint_stops_slave       "cl#5054 - Ensure slave is not demoted when stopped by order constraint"
 do_test master_monitor_restart "cl#5072 - Ensure master monitor operation will start after promotion."
 do_test bug-rh-880249 "Handle replacement of an m/s resource with a primitive"
 do_test bug-5143-ms-shuffle "Prevent master shuffling due to promotion score"
 do_test master-demote-block "Block promotion if demote fails with on-fail=block"
 do_test master-dependent-ban "Don't stop instances from being active because a dependent is banned from that host"
 do_test master-stop "Stop instances due to location constraint with role=Started"
 do_test master-partially-demoted-group "Allow partially demoted group to finish demoting"
 do_test bug-cl-5213 "Ensure role colocation with -INFINITY is enforced"
 do_test bug-cl-5219 "Allow unrelated resources with a common colocation target to remain promoted"
 do_test master-asymmetrical-order "Fix the behaviors of multi-state resources with asymmetrical ordering"
 do_test master-notify "Master promotion with notifies"
 do_test master-score-startup "Use permanent master scores without LRM history"
 do_test failed-demote-recovery "Recover resource in slave role after demote fails"
 do_test failed-demote-recovery-master "Recover resource in master role after demote fails"
 
 echo ""
 do_test history-1 "Correctly parse stateful-1 resource state"
 
 echo ""
 do_test managed-0 "Managed (reference)"
 do_test managed-1 "Not managed - down "
 do_test managed-2 "Not managed - up   "
 do_test bug-5028 "Shutdown should block if anything depends on an unmanaged resource"
 do_test bug-5028-detach "Ensure detach still works"
 do_test bug-5028-bottom "Ensure shutdown still blocks if the blocked resource is at the bottom of the stack"
 do_test unmanaged-stop-1 "cl#5155 - Block the stop of resources if any depending resource is unmanaged "
 do_test unmanaged-stop-2 "cl#5155 - Block the stop of resources if the first resource in a mandatory stop order is unmanaged "
 do_test unmanaged-stop-3 "cl#5155 - Block the stop of resources if any depending resource in a group is unmanaged "
 do_test unmanaged-stop-4 "cl#5155 - Block the stop of resources if any depending resource in the middle of a group is unmanaged "
 do_test unmanaged-block-restart "Block restart of resources if any dependent resource in a group is unmanaged"
 
 echo ""
 do_test interleave-0 "Interleave (reference)"
 do_test interleave-1 "coloc - not interleaved"
 do_test interleave-2 "coloc - interleaved   "
 do_test interleave-3 "coloc - interleaved (2)"
 do_test interleave-pseudo-stop "Interleaved clone during stonith"
 do_test interleave-stop "Interleaved clone during stop"
 do_test interleave-restart "Interleaved clone during dependency restart"
 
 echo ""
 do_test notify-0 "Notify reference"
 do_test notify-1 "Notify simple"
 do_test notify-2 "Notify simple, confirm"
 do_test notify-3 "Notify move, confirm"
 do_test novell-239079 "Notification priority"
 #do_test notify-2 "Notify - 764"
 do_test notifs-for-unrunnable "Don't schedule notifications for an unrunnable action"
 do_test route-remote-notify "Route remote notify actions through correct cluster node"
+do_test notify-behind-stopping-remote "Don't schedule notifications behind stopped remote"
 
 echo ""
 do_test 594 "OSDL #594 - Unrunnable actions scheduled in transition"
 do_test 662 "OSDL #662 - Two resources start on one node when incarnation_node_max = 1"
 do_test 696 "OSDL #696 - CRM starts stonith RA without monitor"
 do_test 726 "OSDL #726 - Attempting to schedule rsc_posic041_monitor_5000 _after_ a stop"
 do_test 735 "OSDL #735 - Correctly detect that rsc_hadev1 is stopped on hadev3"
 do_test 764 "OSDL #764 - Missing monitor op for DoFencing:child_DoFencing:1"
 do_test 797 "OSDL #797 - Assert triggered: task_id_i > max_call_id"
 do_test 829 "OSDL #829"
 do_test 994 "OSDL #994 - Stopping the last resource in a resource group causes the entire group to be restarted"
 do_test 994-2 "OSDL #994 - with a dependent resource"
 do_test 1360 "OSDL #1360 - Clone stickiness"
 do_test 1484 "OSDL #1484 - on_fail=stop"
 do_test 1494 "OSDL #1494 - Clone stability"
 do_test unrunnable-1 "Unrunnable"
 do_test unrunnable-2 "Unrunnable 2"
 do_test stonith-0 "Stonith loop - 1"
 do_test stonith-1 "Stonith loop - 2"
 do_test stonith-2 "Stonith loop - 3"
 do_test stonith-3 "Stonith startup"
 do_test stonith-4 "Stonith node state"
 do_test bug-1572-1 "Recovery of groups depending on master/slave"
 do_test bug-1572-2 "Recovery of groups depending on master/slave when the master is never re-promoted"
 do_test bug-1685 "Depends-on-master ordering"
 do_test bug-1822 "Don't promote partially active groups"
 do_test bug-pm-11 "New resource added to a m/s group"
 do_test bug-pm-12 "Recover only the failed portion of a cloned group"
 do_test bug-n-387749 "Don't shuffle clone instances"
 do_test bug-n-385265 "Don't ignore the failure stickiness of group children - resource_idvscommon should stay stopped"
 do_test bug-n-385265-2 "Ensure groups are migrated instead of remaining partially active on the current node"
 do_test bug-lf-1920 "Correctly handle probes that find active resources"
 do_test bnc-515172 "Location constraint with multiple expressions"
 do_test colocate-primitive-with-clone "Optional colocation with a clone"
 do_test use-after-free-merge "Use-after-free in native_merge_weights"
 do_test bug-lf-2551 "STONITH ordering for stop"
 do_test bug-lf-2606 "Stonith implies demote"
 do_test bug-lf-2474 "Ensure resource op timeout takes precedence over op_defaults"
 do_test bug-suse-707150 "Prevent vm-01 from starting due to colocation/ordering"
 do_test bug-5014-A-start-B-start "Verify when A starts B starts using symmetrical=false"
 do_test bug-5014-A-stop-B-started "Verify when A stops B does not stop if it has already started using symmetric=false"
 do_test bug-5014-A-stopped-B-stopped "Verify when A is stopped and B has not started, B does not start before A using symmetric=false"
 do_test bug-5014-CthenAthenB-C-stopped "Verify when C then A is symmetrical=true, A then B is symmetric=false, and C is stopped that nothing starts."
 do_test bug-5014-CLONE-A-start-B-start "Verify when A starts B starts using clone resources with symmetric=false"
 do_test bug-5014-CLONE-A-stop-B-started "Verify when A stops B does not stop if it has already started using clone resources with symmetric=false."
 do_test bug-5014-GROUP-A-start-B-start "Verify when A starts B starts when using group resources with symmetric=false."
 do_test bug-5014-GROUP-A-stopped-B-started "Verify when A stops B does not stop if it has already started using group resources with symmetric=false."
 do_test bug-5014-GROUP-A-stopped-B-stopped "Verify when A is stopped and B has not started, B does not start before A using group resources with symmetric=false."
 do_test bug-5014-ordered-set-symmetrical-false "Verify ordered sets work with symmetrical=false"
 do_test bug-5014-ordered-set-symmetrical-true "Verify ordered sets work with symmetrical=true"
 do_test bug-5007-masterslave_colocation "Verify use of colocation scores other than INFINITY and -INFINITY work on multi-state resources."
 do_test bug-5038 "Prevent restart of anonymous clones when clone-max decreases"
 do_test bug-5025-1 "Automatically clean up failcount after resource config change with reload"
 do_test bug-5025-2 "Make sure clear failcount action isn't set when config does not change."
 do_test bug-5025-3 "Automatically clean up failcount after resource config change with restart"
 do_test bug-5025-4 "Clear failcount when last failure is a start op and rsc attributes changed."
 do_test failcount "Ensure failcounts are correctly expired"
 do_test failcount-block "Ensure failcounts are not expired when on-fail=block is present"
 do_test per-op-failcount "Ensure per-operation failcount is handled and not passed to fence agent"
 do_test on-fail-ignore "Ensure on-fail=ignore works even beyond migration-threshold"
 do_test monitor-onfail-restart "bug-5058 - Monitor failure with on-fail set to restart"
 do_test monitor-onfail-stop    "bug-5058 - Monitor failure wiht on-fail set to stop"
 do_test bug-5059 "No need to restart p_stateful1:*"
 do_test bug-5069-op-enabled  "Test on-fail=ignore with failure when monitor is enabled."
 do_test bug-5069-op-disabled "Test on-fail-ignore with failure when monitor is disabled."
 do_test obsolete-lrm-resource "cl#5115 - Do not use obsolete lrm_resource sections"
 do_test expire-non-blocked-failure "Ignore failure-timeout only if the failed operation has on-fail=block"
 do_test asymmetrical-order-move "Respect asymmetrical ordering when trying to move resources"
 do_test asymmetrical-order-restart "Respect asymmetrical ordering when restarting dependent resource"
 do_test start-then-stop-with-unfence "Avoid graph loop with start-then-stop constraint plus unfencing"
 do_test order-expired-failure "Order failcount cleanup after remote fencing"
 
 do_test ignore_stonith_rsc_order1 "cl#5056- Ignore order constraint between stonith and non-stonith rsc."
 do_test ignore_stonith_rsc_order2 "cl#5056- Ignore order constraint with group rsc containing mixed stonith and non-stonith."
 do_test ignore_stonith_rsc_order3 "cl#5056- Ignore order constraint, stonith clone and mixed group"
 do_test ignore_stonith_rsc_order4 "cl#5056- Ignore order constraint, stonith clone and clone with nested mixed group"
 do_test honor_stonith_rsc_order1 "cl#5056- Honor order constraint, stonith clone and pure stonith group(single rsc)."
 do_test honor_stonith_rsc_order2 "cl#5056- Honor order constraint, stonith clone and pure stonith group(multiple rsc)"
 do_test honor_stonith_rsc_order3 "cl#5056- Honor order constraint, stonith clones with nested pure stonith group."
 do_test honor_stonith_rsc_order4 "cl#5056- Honor order constraint, between two native stonith rscs."
 do_test multiply-active-stonith
 do_test probe-timeout "cl#5099 - Default probe timeout"
 
 do_test concurrent-fencing "Allow performing fencing operations in parallel"
 
 echo ""
 do_test systemhealth1  "System Health ()               #1"
 do_test systemhealth2  "System Health ()               #2"
 do_test systemhealth3  "System Health ()               #3"
 do_test systemhealthn1 "System Health (None)           #1"
 do_test systemhealthn2 "System Health (None)           #2"
 do_test systemhealthn3 "System Health (None)           #3"
 do_test systemhealthm1 "System Health (Migrate On Red) #1"
 do_test systemhealthm2 "System Health (Migrate On Red) #2"
 do_test systemhealthm3 "System Health (Migrate On Red) #3"
 do_test systemhealtho1 "System Health (Only Green)     #1"
 do_test systemhealtho2 "System Health (Only Green)     #2"
 do_test systemhealtho3 "System Health (Only Green)     #3"
 do_test systemhealthp1 "System Health (Progessive)     #1"
 do_test systemhealthp2 "System Health (Progessive)     #2"
 do_test systemhealthp3 "System Health (Progessive)     #3"
 
 echo ""
 do_test utilization "Placement Strategy - utilization"
 do_test minimal     "Placement Strategy - minimal"
 do_test balanced    "Placement Strategy - balanced"
 
 echo ""
 do_test placement-stickiness "Optimized Placement Strategy - stickiness"
 do_test placement-priority   "Optimized Placement Strategy - priority"
 do_test placement-location   "Optimized Placement Strategy - location"
 do_test placement-capacity   "Optimized Placement Strategy - capacity"
 
 echo ""
 do_test utilization-order1 "Utilization Order - Simple"
 do_test utilization-order2 "Utilization Order - Complex"
 do_test utilization-order3 "Utilization Order - Migrate"
 do_test utilization-order4 "Utilization Order - Live Migration (bnc#695440)"
 do_test utilization-shuffle "Don't displace prmExPostgreSQLDB2 on act2, Start prmExPostgreSQLDB1 on act3"
 do_test load-stopped-loop "Avoid transition loop due to load_stopped (cl#5044)"
 do_test load-stopped-loop-2 "cl#5235 - Prevent graph loops that can be introduced by load_stopped -> migrate_to ordering"
 
 echo ""
 do_test colocated-utilization-primitive-1 "Colocated Utilization - Primitive"
 do_test colocated-utilization-primitive-2 "Colocated Utilization - Choose the most capable node"
 do_test colocated-utilization-group "Colocated Utilization - Group"
 do_test colocated-utilization-clone "Colocated Utilization - Clone"
 
 do_test utilization-check-allowed-nodes "Only check the capacities of the nodes that can run the resource"
 
 echo ""
 do_test reprobe-target_rc "Ensure correct target_rc for reprobe of inactive resources"
 do_test node-maintenance-1 "cl#5128 - Node maintenance"
 do_test node-maintenance-2 "cl#5128 - Node maintenance (coming out of maintenance mode)"
 do_test shutdown-maintenance-node "Do not fence a maintenance node if it shuts down cleanly"
 
 do_test rsc-maintenance "Per-resource maintenance"
 
 echo ""
 do_test not-installed-agent "The resource agent is missing"
 do_test not-installed-tools "Something the resource agent needs is missing"
 
 echo ""
 do_test stopped-monitor-00 "Stopped Monitor - initial start"
 do_test stopped-monitor-01 "Stopped Monitor - failed started"
 do_test stopped-monitor-02 "Stopped Monitor - started multi-up"
 do_test stopped-monitor-03 "Stopped Monitor - stop started"
 do_test stopped-monitor-04 "Stopped Monitor - failed stop"
 do_test stopped-monitor-05 "Stopped Monitor - start unmanaged"
 do_test stopped-monitor-06 "Stopped Monitor - unmanaged multi-up"
 do_test stopped-monitor-07 "Stopped Monitor - start unmanaged multi-up"
 do_test stopped-monitor-08 "Stopped Monitor - migrate"
 do_test stopped-monitor-09 "Stopped Monitor - unmanage started"
 do_test stopped-monitor-10 "Stopped Monitor - unmanaged started multi-up"
 do_test stopped-monitor-11 "Stopped Monitor - stop unmanaged started"
 do_test stopped-monitor-12 "Stopped Monitor - unmanaged started multi-up (target-role=Stopped)"
 do_test stopped-monitor-20 "Stopped Monitor - initial stop"
 do_test stopped-monitor-21 "Stopped Monitor - stopped single-up"
 do_test stopped-monitor-22 "Stopped Monitor - stopped multi-up"
 do_test stopped-monitor-23 "Stopped Monitor - start stopped"
 do_test stopped-monitor-24 "Stopped Monitor - unmanage stopped"
 do_test stopped-monitor-25 "Stopped Monitor - unmanaged stopped multi-up"
 do_test stopped-monitor-26 "Stopped Monitor - start unmanaged stopped"
 do_test stopped-monitor-27 "Stopped Monitor - unmanaged stopped multi-up (target-role=Started)"
 do_test stopped-monitor-30 "Stopped Monitor - new node started"
 do_test stopped-monitor-31 "Stopped Monitor - new node stopped"
 
 echo ""
 # This is a combo test to check:
 # - probe timeout defaults to the minimum-interval monitor's
 # - duplicate recurring operations are ignored
 # - if timeout spec is bad, the default timeout is used
 # - failure is blocked with on-fail=block even if ISO8601 interval is specified
 # - started/stopped role monitors are started/stopped on right nodes
 do_test intervals "Recurring monitor interval handling"
 
 echo""
 do_test ticket-primitive-1 "Ticket - Primitive (loss-policy=stop, initial)"
 do_test ticket-primitive-2 "Ticket - Primitive (loss-policy=stop, granted)"
 do_test ticket-primitive-3 "Ticket - Primitive (loss-policy-stop, revoked)"
 do_test ticket-primitive-4 "Ticket - Primitive (loss-policy=demote, initial)"
 do_test ticket-primitive-5 "Ticket - Primitive (loss-policy=demote, granted)"
 do_test ticket-primitive-6 "Ticket - Primitive (loss-policy=demote, revoked)"
 do_test ticket-primitive-7 "Ticket - Primitive (loss-policy=fence, initial)"
 do_test ticket-primitive-8 "Ticket - Primitive (loss-policy=fence, granted)"
 do_test ticket-primitive-9 "Ticket - Primitive (loss-policy=fence, revoked)"
 do_test ticket-primitive-10 "Ticket - Primitive (loss-policy=freeze, initial)"
 do_test ticket-primitive-11 "Ticket - Primitive (loss-policy=freeze, granted)"
 do_test ticket-primitive-12 "Ticket - Primitive (loss-policy=freeze, revoked)"
 
 do_test ticket-primitive-13 "Ticket - Primitive (loss-policy=stop, standby, granted)"
 do_test ticket-primitive-14 "Ticket - Primitive (loss-policy=stop, granted, standby)"
 do_test ticket-primitive-15 "Ticket - Primitive (loss-policy=stop, standby, revoked)"
 do_test ticket-primitive-16 "Ticket - Primitive (loss-policy=demote, standby, granted)"
 do_test ticket-primitive-17 "Ticket - Primitive (loss-policy=demote, granted, standby)"
 do_test ticket-primitive-18 "Ticket - Primitive (loss-policy=demote, standby, revoked)"
 do_test ticket-primitive-19 "Ticket - Primitive (loss-policy=fence, standby, granted)"
 do_test ticket-primitive-20 "Ticket - Primitive (loss-policy=fence, granted, standby)"
 do_test ticket-primitive-21 "Ticket - Primitive (loss-policy=fence, standby, revoked)"
 do_test ticket-primitive-22 "Ticket - Primitive (loss-policy=freeze, standby, granted)"
 do_test ticket-primitive-23 "Ticket - Primitive (loss-policy=freeze, granted, standby)"
 do_test ticket-primitive-24 "Ticket - Primitive (loss-policy=freeze, standby, revoked)"
 
 echo""
 do_test ticket-group-1 "Ticket - Group (loss-policy=stop, initial)"
 do_test ticket-group-2 "Ticket - Group (loss-policy=stop, granted)"
 do_test ticket-group-3 "Ticket - Group (loss-policy-stop, revoked)"
 do_test ticket-group-4 "Ticket - Group (loss-policy=demote, initial)"
 do_test ticket-group-5 "Ticket - Group (loss-policy=demote, granted)"
 do_test ticket-group-6 "Ticket - Group (loss-policy=demote, revoked)"
 do_test ticket-group-7 "Ticket - Group (loss-policy=fence, initial)"
 do_test ticket-group-8 "Ticket - Group (loss-policy=fence, granted)"
 do_test ticket-group-9 "Ticket - Group (loss-policy=fence, revoked)"
 do_test ticket-group-10 "Ticket - Group (loss-policy=freeze, initial)"
 do_test ticket-group-11 "Ticket - Group (loss-policy=freeze, granted)"
 do_test ticket-group-12 "Ticket - Group (loss-policy=freeze, revoked)"
 
 do_test ticket-group-13 "Ticket - Group (loss-policy=stop, standby, granted)"
 do_test ticket-group-14 "Ticket - Group (loss-policy=stop, granted, standby)"
 do_test ticket-group-15 "Ticket - Group (loss-policy=stop, standby, revoked)"
 do_test ticket-group-16 "Ticket - Group (loss-policy=demote, standby, granted)"
 do_test ticket-group-17 "Ticket - Group (loss-policy=demote, granted, standby)"
 do_test ticket-group-18 "Ticket - Group (loss-policy=demote, standby, revoked)"
 do_test ticket-group-19 "Ticket - Group (loss-policy=fence, standby, granted)"
 do_test ticket-group-20 "Ticket - Group (loss-policy=fence, granted, standby)"
 do_test ticket-group-21 "Ticket - Group (loss-policy=fence, standby, revoked)"
 do_test ticket-group-22 "Ticket - Group (loss-policy=freeze, standby, granted)"
 do_test ticket-group-23 "Ticket - Group (loss-policy=freeze, granted, standby)"
 do_test ticket-group-24 "Ticket - Group (loss-policy=freeze, standby, revoked)"
 
 echo""
 do_test ticket-clone-1 "Ticket - Clone (loss-policy=stop, initial)"
 do_test ticket-clone-2 "Ticket - Clone (loss-policy=stop, granted)"
 do_test ticket-clone-3 "Ticket - Clone (loss-policy-stop, revoked)"
 do_test ticket-clone-4 "Ticket - Clone (loss-policy=demote, initial)"
 do_test ticket-clone-5 "Ticket - Clone (loss-policy=demote, granted)"
 do_test ticket-clone-6 "Ticket - Clone (loss-policy=demote, revoked)"
 do_test ticket-clone-7 "Ticket - Clone (loss-policy=fence, initial)"
 do_test ticket-clone-8 "Ticket - Clone (loss-policy=fence, granted)"
 do_test ticket-clone-9 "Ticket - Clone (loss-policy=fence, revoked)"
 do_test ticket-clone-10 "Ticket - Clone (loss-policy=freeze, initial)"
 do_test ticket-clone-11 "Ticket - Clone (loss-policy=freeze, granted)"
 do_test ticket-clone-12 "Ticket - Clone (loss-policy=freeze, revoked)"
 
 do_test ticket-clone-13 "Ticket - Clone (loss-policy=stop, standby, granted)"
 do_test ticket-clone-14 "Ticket - Clone (loss-policy=stop, granted, standby)"
 do_test ticket-clone-15 "Ticket - Clone (loss-policy=stop, standby, revoked)"
 do_test ticket-clone-16 "Ticket - Clone (loss-policy=demote, standby, granted)"
 do_test ticket-clone-17 "Ticket - Clone (loss-policy=demote, granted, standby)"
 do_test ticket-clone-18 "Ticket - Clone (loss-policy=demote, standby, revoked)"
 do_test ticket-clone-19 "Ticket - Clone (loss-policy=fence, standby, granted)"
 do_test ticket-clone-20 "Ticket - Clone (loss-policy=fence, granted, standby)"
 do_test ticket-clone-21 "Ticket - Clone (loss-policy=fence, standby, revoked)"
 do_test ticket-clone-22 "Ticket - Clone (loss-policy=freeze, standby, granted)"
 do_test ticket-clone-23 "Ticket - Clone (loss-policy=freeze, granted, standby)"
 do_test ticket-clone-24 "Ticket - Clone (loss-policy=freeze, standby, revoked)"
 
 echo""
 do_test ticket-master-1 "Ticket - Master (loss-policy=stop, initial)"
 do_test ticket-master-2 "Ticket - Master (loss-policy=stop, granted)"
 do_test ticket-master-3 "Ticket - Master (loss-policy-stop, revoked)"
 do_test ticket-master-4 "Ticket - Master (loss-policy=demote, initial)"
 do_test ticket-master-5 "Ticket - Master (loss-policy=demote, granted)"
 do_test ticket-master-6 "Ticket - Master (loss-policy=demote, revoked)"
 do_test ticket-master-7 "Ticket - Master (loss-policy=fence, initial)"
 do_test ticket-master-8 "Ticket - Master (loss-policy=fence, granted)"
 do_test ticket-master-9 "Ticket - Master (loss-policy=fence, revoked)"
 do_test ticket-master-10 "Ticket - Master (loss-policy=freeze, initial)"
 do_test ticket-master-11 "Ticket - Master (loss-policy=freeze, granted)"
 do_test ticket-master-12 "Ticket - Master (loss-policy=freeze, revoked)"
 
 do_test ticket-master-13 "Ticket - Master (loss-policy=stop, standby, granted)"
 do_test ticket-master-14 "Ticket - Master (loss-policy=stop, granted, standby)"
 do_test ticket-master-15 "Ticket - Master (loss-policy=stop, standby, revoked)"
 do_test ticket-master-16 "Ticket - Master (loss-policy=demote, standby, granted)"
 do_test ticket-master-17 "Ticket - Master (loss-policy=demote, granted, standby)"
 do_test ticket-master-18 "Ticket - Master (loss-policy=demote, standby, revoked)"
 do_test ticket-master-19 "Ticket - Master (loss-policy=fence, standby, granted)"
 do_test ticket-master-20 "Ticket - Master (loss-policy=fence, granted, standby)"
 do_test ticket-master-21 "Ticket - Master (loss-policy=fence, standby, revoked)"
 do_test ticket-master-22 "Ticket - Master (loss-policy=freeze, standby, granted)"
 do_test ticket-master-23 "Ticket - Master (loss-policy=freeze, granted, standby)"
 do_test ticket-master-24 "Ticket - Master (loss-policy=freeze, standby, revoked)"
 
 echo ""
 do_test ticket-rsc-sets-1 "Ticket - Resource sets (1 ticket, initial)"
 do_test ticket-rsc-sets-2 "Ticket - Resource sets (1 ticket, granted)"
 do_test ticket-rsc-sets-3 "Ticket - Resource sets (1 ticket, revoked)"
 do_test ticket-rsc-sets-4 "Ticket - Resource sets (2 tickets, initial)"
 do_test ticket-rsc-sets-5 "Ticket - Resource sets (2 tickets, granted)"
 do_test ticket-rsc-sets-6 "Ticket - Resource sets (2 tickets, granted)"
 do_test ticket-rsc-sets-7 "Ticket - Resource sets (2 tickets, revoked)"
 
 do_test ticket-rsc-sets-8 "Ticket - Resource sets (1 ticket, standby, granted)"
 do_test ticket-rsc-sets-9 "Ticket - Resource sets (1 ticket, granted, standby)"
 do_test ticket-rsc-sets-10 "Ticket - Resource sets (1 ticket, standby, revoked)"
 do_test ticket-rsc-sets-11 "Ticket - Resource sets (2 tickets, standby, granted)"
 do_test ticket-rsc-sets-12 "Ticket - Resource sets (2 tickets, standby, granted)"
 do_test ticket-rsc-sets-13 "Ticket - Resource sets (2 tickets, granted, standby)"
 do_test ticket-rsc-sets-14 "Ticket - Resource sets (2 tickets, standby, revoked)"
 
 do_test cluster-specific-params "Cluster-specific instance attributes based on rules"
 do_test site-specific-params "Site-specific instance attributes based on rules"
 
 echo ""
 do_test template-1 "Template - 1"
 do_test template-2 "Template - 2"
 do_test template-3 "Template - 3 (merge operations)"
 
 do_test template-coloc-1 "Template - Colocation 1"
 do_test template-coloc-2 "Template - Colocation 2"
 do_test template-coloc-3 "Template - Colocation 3"
 do_test template-order-1 "Template - Order 1"
 do_test template-order-2 "Template - Order 2"
 do_test template-order-3 "Template - Order 3"
 do_test template-ticket  "Template - Ticket"
 
 do_test template-rsc-sets-1  "Template - Resource Sets 1"
 do_test template-rsc-sets-2  "Template - Resource Sets 2"
 do_test template-rsc-sets-3  "Template - Resource Sets 3"
 do_test template-rsc-sets-4  "Template - Resource Sets 4"
 
 do_test template-clone-primitive "Cloned primitive from template"
 do_test template-clone-group     "Cloned group from template"
 
 do_test location-sets-templates "Resource sets and templates - Location"
 
 do_test tags-coloc-order-1 "Tags - Colocation and Order (Simple)"
 do_test tags-coloc-order-2 "Tags - Colocation and Order (Resource Sets with Templates)"
 do_test tags-location      "Tags - Location"
 do_test tags-ticket        "Tags - Ticket"
 
 echo ""
 do_test container-1 "Container - initial"
 do_test container-2 "Container - monitor failed"
 do_test container-3 "Container - stop failed"
 do_test container-4 "Container - reached migration-threshold"
 do_test container-group-1 "Container in group - initial"
 do_test container-group-2 "Container in group - monitor failed"
 do_test container-group-3 "Container in group - stop failed"
 do_test container-group-4 "Container in group - reached migration-threshold"
 do_test container-is-remote-node "Place resource within container when container is remote-node"
 do_test bug-rh-1097457 "Kill user defined container/contents ordering"
 do_test bug-cl-5247 "Graph loop when recovering m/s resource in a container"
 
 do_test bundle-order-startup "Bundle startup ordering"
 do_test bundle-order-partial-start "Bundle startup ordering when some dependancies are already running"
 do_test bundle-order-partial-start-2 "Bundle startup ordering when some dependancies and the container are already running"
 do_test bundle-order-stop    "Bundle stop ordering"
 do_test bundle-order-partial-stop "Bundle startup ordering when some dependancies are already stopped"
 do_test bundle-order-stop-on-remote "Stop nested resource after bringing up the connection"
 
 do_test bundle-order-startup-clone "Prevent startup because bundle isn't promoted"
 do_test bundle-order-startup-clone-2 "Bundle startup with clones"
 do_test bundle-order-stop-clone "Stop bundle because clone is stopping"
 do_test bundle-nested-colocation "Colocation of nested connection resources"
 
 do_test bundle-order-fencing "Order pseudo bundle fencing after parent node fencing if both are happening"
 
 do_test bundle-probe-order-1 "order 1"
 do_test bundle-probe-order-2 "order 2"
 do_test bundle-probe-order-3 "order 3"
 do_test bundle-probe-remotes "Ensure remotes get probed too"
 do_test bundle-replicas-change "Change bundle from 1 replica to multiple"
 do_test nested-remote-recovery "Recover bundle's container hosted on remote node"
 
 echo ""
 do_test whitebox-fail1 "Fail whitebox container rsc."
 do_test whitebox-fail2 "Fail cluster connection to guest node"
 do_test whitebox-fail3 "Failed containers should not run nested on remote nodes."
 do_test whitebox-start "Start whitebox container with resources assigned to it"
 do_test whitebox-stop "Stop whitebox container with resources assigned to it"
 do_test whitebox-move "Move whitebox container with resources assigned to it"
 do_test whitebox-asymmetric "Verify connection rsc opts-in based on container resource"
 do_test whitebox-ms-ordering "Verify promote/demote can not occur before connection is established"
 do_test whitebox-ms-ordering-move "Stop/Start cycle within a moving container"
 do_test whitebox-orphaned    "Properly shutdown orphaned whitebox container"
 do_test whitebox-orphan-ms   "Properly tear down orphan ms resources on remote-nodes"
 do_test whitebox-unexpectedly-running "Recover container nodes the cluster did not start."
 do_test whitebox-migrate1 "Migrate both container and connection resource"
 do_test whitebox-imply-stop-on-fence "imply stop action on container node rsc when host node is fenced"
 do_test whitebox-nested-group "Verify guest remote-node works nested in a group"
 do_test guest-node-host-dies "Verify guest node is recovered if host goes away"
 do_test guest-node-cleanup "Order guest node connection recovery after container probe"
 
 echo ""
 do_test remote-startup-probes  "Baremetal remote-node startup probes"
 do_test remote-startup         "Startup a newly discovered remote-nodes with no status."
 do_test remote-fence-unclean   "Fence unclean baremetal remote-node"
 do_test remote-fence-unclean2  "Fence baremetal remote-node after cluster node fails and connection can not be recovered"
 do_test remote-fence-unclean-3 "Probe failed remote nodes (triggers fencing)"
 do_test remote-move            "Move remote-node connection resource"
 do_test remote-disable         "Disable a baremetal remote-node"
 do_test remote-probe-disable   "Probe then stop a baremetal remote-node"
 do_test remote-orphaned        "Properly shutdown orphaned connection resource"
 do_test remote-orphaned2       "verify we can handle orphaned remote connections with active resources on the remote"
 do_test remote-recover         "Recover connection resource after cluster-node fails."
 do_test remote-stale-node-entry "Make sure we properly handle leftover remote-node entries in the node section"
 do_test remote-partial-migrate  "Make sure partial migrations are handled before ops on the remote node."
 do_test remote-partial-migrate2 "Make sure partial migration target is prefered for remote connection."
 do_test remote-recover-fail     "Make sure start failure causes fencing if rsc are active on remote."
 do_test remote-start-fail       "Make sure a start failure does not result in fencing if no active resources are on remote."
 do_test remote-unclean2         "Make monitor failure always results in fencing, even if no rsc are active on remote."
 do_test remote-fence-before-reconnect "Fence before clearing recurring monitor failure"
 do_test remote-recovery "Recover remote connections before attempting demotion"
 do_test remote-recover-connection "Optimistically recovery of only the connection"
 do_test remote-recover-all        "Fencing when the connection has no home"
 do_test remote-recover-no-resources   "Fencing when the connection has no home and no active resources"
 do_test remote-recover-unknown        "Fencing when the connection has no home and the remote has no operation history"
 do_test remote-reconnect-delay        "Waiting for remote reconnect interval to expire"
 do_test remote-connection-unrecoverable  "Remote connection host must be fenced, with connection unrecoverable"
 
 echo ""
 do_test resource-discovery      "Exercises resource-discovery location constraint option."
 do_test rsc-discovery-per-node  "Disable resource discovery per node"
 
 if [ $DO_VERSIONED_TESTS -eq 1 ]; then
     echo ""
     do_test versioned-resources     "Start resources with #ra-version rules"
     do_test restart-versioned       "Restart resources on #ra-version change"
     do_test reload-versioned        "Reload resources on #ra-version change"
 
     echo ""
     do_test versioned-operations-1  "Use #ra-version to configure operations of native resources"
     do_test versioned-operations-2  "Use #ra-version to configure operations of stonith resources"
     do_test versioned-operations-3  "Use #ra-version to configure operations of master/slave resources"
     do_test versioned-operations-4  "Use #ra-version to configure operations of groups of the resources"
 fi
 
 echo ""
 test_results
 exit $EXITCODE
diff --git a/cts/environment.py b/cts/environment.py
index 19573c31e7..0ce75136f2 100644
--- a/cts/environment.py
+++ b/cts/environment.py
@@ -1,633 +1,632 @@
 """ Test environment classes for Pacemaker's Cluster Test Suite (CTS)
 """
 
 # Pacemaker targets compatibility with Python 2.7 and 3.2+
 from __future__ import print_function, unicode_literals, absolute_import, division
 
 __copyright__ = "Copyright 2014-2018 Andrew Beekhof <andrew@beekhof.net>"
 __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
 
 import sys, time, os, socket, random
 
 from cts.remote import *
 from cts.CTSvars import *
 
 class Environment(object):
 
     def __init__(self, args):
         self.data = {}
         self.Nodes = []
 
         self["DeadTime"] = 300
         self["StartTime"] = 300
         self["StableTime"] = 30
         self["tests"] = []
         self["IPagent"] = "IPaddr2"
         self["DoStandby"] = 1
         self["DoFencing"] = 1
         self["XmitLoss"] = "0.0"
         self["RecvLoss"] = "0.0"
         self["ClobberCIB"] = 0
         self["CIBfilename"] = None
         self["CIBResource"] = 0
         self["DoBSC"]    = 0
         self["oprofile"] = []
         self["warn-inactive"] = 0
         self["ListTests"] = 0
         self["benchmark"] = 0
         self["LogWatcher"] = "any"
         self["SyslogFacility"] = "daemon"
         self["LogFileName"] = "/var/log/messages"
         self["Schema"] = "pacemaker-3.0"
         self["Stack"] = "corosync"
         self["stonith-type"] = "external/ssh"
         self["stonith-params"] = "hostlist=all,livedangerously=yes"
         self["notification-agent"] = "/var/lib/pacemaker/notify.sh"
         self["notification-recipient"] = "/var/lib/pacemaker/notify.log"
         self["loop-minutes"] = 60
         self["valgrind-procs"] = "pacemaker-attrd pacemaker-based pacemaker-controld pacemaker-execd pacemaker-fenced pacemaker-schedulerd"
         self["experimental-tests"] = 0
         self["container-tests"] = 0
         self["valgrind-tests"] = 0
         self["unsafe-tests"] = 1
         self["loop-tests"] = 1
         self["scenario"] = "random"
         self["stats"] = 0
         self["docker"] = 0
         self["continue"] = 0
 
         self.RandomGen = random.Random()
         self.logger = LogFactory()
 
         self.SeedRandom()
         self.rsh = RemoteFactory().getInstance()
 
         self.target = "localhost"
 
         self.parse_args(args)
-        self.discover()
-        self.validate()
+        if self["ListTests"] == 0:
+            self.validate()
+            self.discover()
 
     def SeedRandom(self, seed=None):
         if not seed:
             seed = int(time.time())
 
         self["RandSeed"] = seed
         self.RandomGen.seed(str(seed))
 
     def dump(self):
         keys = []
         for key in list(self.data.keys()):
             keys.append(key)
 
         keys.sort()
         for key in keys:
             self.logger.debug("Environment["+key+"]:\t"+str(self[key]))
 
     def keys(self):
         return list(self.data.keys())
 
     def has_key(self, key):
         if key == "nodes":
             return True
 
         return key in self.data
 
     def __getitem__(self, key):
         if str(key) == "0":
             raise ValueError("Bad call to 'foo in X', should reference 'foo in X.keys()' instead")
 
         if key == "nodes":
             return self.Nodes
 
         elif key == "Name":
             return self.get_stack_short()
 
         elif key in self.data:
             return self.data[key]
 
         else:
             return None
 
     def __setitem__(self, key, value):
         if key == "Stack":
             self.set_stack(value)
 
         elif key == "node-limit":
             self.data[key] = value
             self.filter_nodes()
 
         elif key == "nodes":
             self.Nodes = []
             for node in value:
                 # I don't think I need the IP address, etc. but this validates
                 # the node name against /etc/hosts and/or DNS, so it's a
                 # GoodThing(tm).
                 try:
                     n = node.strip()
                     if self.data["docker"] == 0:
                         socket.gethostbyname_ex(n)
 
                     self.Nodes.append(n) 
                 except:
                     self.logger.log(node+" not found in DNS... aborting")
                     raise
 
             self.filter_nodes()
 
         else:
             self.data[key] = value
 
     def RandomNode(self):
         '''Choose a random node from the cluster'''
         return self.RandomGen.choice(self["nodes"])
 
     def set_stack(self, name):
         # Normalize stack names
         if name == "corosync" or name == "cs" or name == "mcp":
             self.data["Stack"] = "corosync 2+"
 
         else:
             raise ValueError("Unknown stack: "+name)
 
     def get_stack_short(self):
         # Create the Cluster Manager object
         if not "Stack" in self.data:
             return "unknown"
 
         elif self.data["Stack"] == "corosync 2+":
             if self["docker"]:
                 return "crm-corosync-docker"
             else:
                 return "crm-corosync"
 
         else:
             LogFactory().log("Unknown stack: "+self["stack"])
             raise ValueError("Unknown stack: "+self["stack"])
 
     def detect_syslog(self):
         # Detect syslog variant
         if not "syslogd" in self.data:
             if self["have_systemd"]:
                 # Systemd
                 self["syslogd"] = self.rsh(self.target, "systemctl list-units | grep syslog.*\.service.*active.*running | sed 's:.service.*::'", stdout=1).strip()
             else:
                 # SYS-V
                 self["syslogd"] = self.rsh(self.target, "chkconfig --list | grep syslog.*on | awk '{print $1}' | head -n 1", stdout=1).strip()
 
             if not "syslogd" in self.data or not self["syslogd"]:
                 # default
                 self["syslogd"] = "rsyslog"
 
     def detect_at_boot(self):
         # Detect if the cluster starts at boot
         if not "at-boot" in self.data:
             atboot = 0
 
             if self["have_systemd"]:
             # Systemd
                 atboot = atboot or not self.rsh(self.target, "systemctl is-enabled corosync.service")
                 atboot = atboot or not self.rsh(self.target, "systemctl is-enabled pacemaker.service")
             else:
                 # SYS-V
                 atboot = atboot or not self.rsh(self.target, "chkconfig --list | grep -e corosync.*on -e pacemaker.*on")
 
             self["at-boot"] = atboot
 
     def detect_ip_offset(self):
-        # Try to determin an offset for IPaddr resources
+        # Try to determine an offset for IPaddr resources
         if self["CIBResource"] and not "IPBase" in self.data:
             network=self.rsh(self.target, "ip addr | grep inet | grep -v -e link -e inet6 -e '/32' -e ' lo' | awk '{print $2}'", stdout=1).strip()
             self["IPBase"] = self.rsh(self.target, "nmap -sn -n %s | grep 'scan report' | awk '{print $NF}' | sed 's:(::' | sed 's:)::' | sort -V | tail -n 1" % network, stdout=1).strip()
             if not self["IPBase"]:
                 self["IPBase"] = " fe80::1234:56:7890:1000"
                 self.logger.log("Could not determine an offset for IPaddr resources.  Perhaps nmap is not installed on the nodes.")
                 self.logger.log("Defaulting to '%s', use --test-ip-base to override" % self["IPBase"])
 
             elif int(self["IPBase"].split('.')[3]) >= 240:
                 self.logger.log("Could not determine an offset for IPaddr resources. Upper bound is too high: %s %s"
                                 % (self["IPBase"], self["IPBase"].split('.')[3]))
                 self["IPBase"] = " fe80::1234:56:7890:1000"
                 self.logger.log("Defaulting to '%s', use --test-ip-base to override" % self["IPBase"])
 
     def filter_nodes(self):
         if self['node-limit'] is not None and self["node-limit"] > 0:
             if len(self["nodes"]) > self["node-limit"]:
                 self.logger.log("Limiting the number of nodes configured=%d (max=%d)"
                                 %(len(self["nodes"]), self["node-limit"]))
                 while len(self["nodes"]) > self["node-limit"]:
                     self["nodes"].pop(len(self["nodes"])-1)
 
     def validate(self):
         if len(self["nodes"]) < 1:
             print("No nodes specified!")
             sys.exit(1)
 
     def discover(self):
         self.target = random.Random().choice(self["nodes"])
 
         master = socket.gethostname()
 
         # Use the IP where possible to avoid name lookup failures
         for ip in socket.gethostbyname_ex(master)[2]:
             if ip != "127.0.0.1":
                 master = ip
                 break;
         self["cts-master"] = master
 
         if not "have_systemd" in self.data:
             self["have_systemd"] = not self.rsh(self.target,
                                                 "systemctl list-units",
                                                 silent=True)
         self.detect_syslog()
         self.detect_at_boot()
         self.detect_ip_offset()
 
-        self.validate()
-
     def parse_args(self, args):
         skipthis=None
 
         if not args:
             args=sys.argv[1:]
 
         for i in range(0, len(args)):
             if skipthis:
                 skipthis=None
                 continue
 
             elif args[i] == "-l" or args[i] == "--limit-nodes":
                 skipthis=1
                 self["node-limit"] = int(args[i+1])
 
             elif args[i] == "-r" or args[i] == "--populate-resources":
                 self["CIBResource"] = 1
                 self["ClobberCIB"] = 1
 
             elif args[i] == "--outputfile":
                 skipthis=1
                 self["OutputFile"] = args[i+1]
                 LogFactory().add_file(self["OutputFile"])
 
             elif args[i] == "-L" or args[i] == "--logfile":
                 skipthis=1
                 self["LogWatcher"] = "remote"
                 self["LogAuditDisabled"] = 1
                 self["LogFileName"] = args[i+1]
 
             elif args[i] == "--ip" or args[i] == "--test-ip-base":
                 skipthis=1
                 self["IPBase"] = args[i+1]
                 self["CIBResource"] = 1
                 self["ClobberCIB"] = 1
 
             elif args[i] == "--oprofile":
                 skipthis=1
                 self["oprofile"] = args[i+1].split(' ')
 
             elif args[i] == "--trunc":
                 self["TruncateLog"]=1
 
             elif args[i] == "--list-tests" or args[i] == "--list" :
                 self["ListTests"]=1
 
             elif args[i] == "--benchmark":
                 self["benchmark"]=1
 
             elif args[i] == "--bsc":
                 self["DoBSC"] = 1
                 self["scenario"] = "basic-sanity"
 
             elif args[i] == "--qarsh":
                 RemoteFactory().enable_qarsh()
 
             elif args[i] == "--docker":
                 self["docker"] = 1
                 RemoteFactory().enable_docker()
             elif args[i] == "--yes" or args[i] == "-y":
                 self["continue"] = 1
             elif args[i] == "--stonith" or args[i] == "--fencing":
                 skipthis=1
                 if args[i+1] == "1" or args[i+1] == "yes":
                     self["DoFencing"]=1
                 elif args[i+1] == "0" or args[i+1] == "no":
                     self["DoFencing"]=0
                 elif args[i+1] == "phd":
                     self["DoStonith"]=1
                     self["stonith-type"] = "fence_phd_kvm"
                 elif args[i+1] == "rhcs" or args[i+1] == "xvm" or args[i+1] == "virt":
                     self["DoStonith"]=1
                     self["stonith-type"] = "fence_xvm"
                 elif args[i+1] == "docker":
                     self["DoStonith"]=1
                     self["stonith-type"] = "fence_docker_cts"
                 elif args[i+1] == "scsi":
                     self["DoStonith"]=1
                     self["stonith-type"] = "fence_scsi"
                 elif args[i+1] == "ssh" or args[i+1] == "lha":
                     self["DoStonith"]=1
                     self["stonith-type"] = "external/ssh"
                     self["stonith-params"] = "hostlist=all,livedangerously=yes"
                 elif args[i+1] == "north":
                     self["DoStonith"]=1
                     self["stonith-type"] = "fence_apc"
                     self["stonith-params"] = "ipaddr=north-apc,login=apc,passwd=apc,pcmk_host_map=north-01:2;north-02:3;north-03:4;north-04:5;north-05:6;north-06:7;north-07:9;north-08:10;north-09:11;north-10:12;north-11:13;north-12:14;north-13:15;north-14:18;north-15:17;north-16:19;"
                 elif args[i+1] == "south":
                     self["DoStonith"]=1
                     self["stonith-type"] = "fence_apc"
                     self["stonith-params"] = "ipaddr=south-apc,login=apc,passwd=apc,pcmk_host_map=south-01:2;south-02:3;south-03:4;south-04:5;south-05:6;south-06:7;south-07:9;south-08:10;south-09:11;south-10:12;south-11:13;south-12:14;south-13:15;south-14:18;south-15:17;south-16:19;"
                 elif args[i+1] == "east":
                     self["DoStonith"]=1
                     self["stonith-type"] = "fence_apc"
                     self["stonith-params"] = "ipaddr=east-apc,login=apc,passwd=apc,pcmk_host_map=east-01:2;east-02:3;east-03:4;east-04:5;east-05:6;east-06:7;east-07:9;east-08:10;east-09:11;east-10:12;east-11:13;east-12:14;east-13:15;east-14:18;east-15:17;east-16:19;"
                 elif args[i+1] == "west":
                     self["DoStonith"]=1
                     self["stonith-type"] = "fence_apc"
                     self["stonith-params"] = "ipaddr=west-apc,login=apc,passwd=apc,pcmk_host_map=west-01:2;west-02:3;west-03:4;west-04:5;west-05:6;west-06:7;west-07:9;west-08:10;west-09:11;west-10:12;west-11:13;west-12:14;west-13:15;west-14:18;west-15:17;west-16:19;"
                 elif args[i+1] == "openstack":
                     self["DoStonith"]=1
                     self["stonith-type"] = "fence_openstack"
                     
                     print("Obtaining OpenStack credentials from the current environment")
                     self["stonith-params"] = "region=%s,tenant=%s,auth=%s,user=%s,password=%s" % (
                         os.environ['OS_REGION_NAME'],
                         os.environ['OS_TENANT_NAME'],
                         os.environ['OS_AUTH_URL'],
                         os.environ['OS_USERNAME'],
                         os.environ['OS_PASSWORD']
                     )
                     
                 elif args[i+1] == "rhevm":
                     self["DoStonith"]=1
                     self["stonith-type"] = "fence_rhevm"
                     
                     print("Obtaining RHEV-M credentials from the current environment")
                     self["stonith-params"] = "login=%s,passwd=%s,ipaddr=%s,ipport=%s,ssl=1,shell_timeout=10" % (
                         os.environ['RHEVM_USERNAME'],
                         os.environ['RHEVM_PASSWORD'],
                         os.environ['RHEVM_SERVER'],
                         os.environ['RHEVM_PORT'],
                     )
                     
                 else:
                     self.usage(args[i+1])
 
             elif args[i] == "--stonith-type":
                 self["stonith-type"] = args[i+1]
                 skipthis=1
 
             elif args[i] == "--stonith-args":
                 self["stonith-params"] = args[i+1]
                 skipthis=1
 
             elif args[i] == "--standby":
                 skipthis=1
                 if args[i+1] == "1" or args[i+1] == "yes":
                     self["DoStandby"] = 1
                 elif args[i+1] == "0" or args[i+1] == "no":
                     self["DoStandby"] = 0
                 else:
                     self.usage(args[i+1])
 
             elif args[i] == "--clobber-cib" or args[i] == "-c":
                 self["ClobberCIB"] = 1
                 
             elif args[i] == "--cib-filename":
                 skipthis=1
                 self["CIBfilename"] = args[i+1]
 
             elif args[i] == "--xmit-loss":
                 try:
                     float(args[i+1])
                 except ValueError:
                     print("--xmit-loss parameter should be float")
                     self.usage(args[i+1])
                 skipthis=1
                 self["XmitLoss"] = args[i+1]
 
             elif args[i] == "--recv-loss":
                 try:
                     float(args[i+1])
                 except ValueError:
                     print("--recv-loss parameter should be float")
                     self.usage(args[i+1])
                 skipthis=1
                 self["RecvLoss"] = args[i+1]
 
             elif args[i] == "--choose":
                 skipthis=1
                 self["tests"].append(args[i+1])
                 self["scenario"] = "sequence"
 
             elif args[i] == "--nodes":
                 skipthis=1
                 self["nodes"] = args[i+1].split(' ')
 
             elif args[i] == "-g" or args[i] == "--group" or args[i] == "--dsh-group":
                 skipthis=1
                 self["OutputFile"] = "%s/cluster-%s.log" % (os.environ['HOME'], args[i+1])
                 LogFactory().add_file(self["OutputFile"], "CTS")
 
                 dsh_file = "%s/.dsh/group/%s" % (os.environ['HOME'], args[i+1])
 
                 # Hacks to make my life easier
                 if args[i+1] == "virt1":
                     self["Stack"] = "corosync"
                     self["DoStonith"]=1
                     self["stonith-type"] = "fence_xvm"
                     self["stonith-params"] = "delay=0"
                     self["IPBase"] = " fe80::1234:56:7890:1000"
 
                 elif args[i+1] == "east16" or args[i+1] == "nsew":
                     self["Stack"] = "corosync"
                     self["DoStonith"]=1
                     self["stonith-type"] = "fence_apc"
                     self["stonith-params"] = "ipaddr=east-apc,login=apc,passwd=apc,pcmk_host_map=east-01:2;east-02:3;east-03:4;east-04:5;east-05:6;east-06:7;east-07:9;east-08:10;east-09:11;east-10:12;east-11:13;east-12:14;east-13:15;east-14:18;east-15:17;east-16:19;"
                     self["IPBase"] = " fe80::1234:56:7890:2000"
 
                     if args[i+1] == "east16":
                         # Requires newer python than available via nsew
                         self["IPagent"] = "Dummy"
 
                 elif args[i+1] == "corosync8":
                     self["Stack"] = "corosync"
                     self["DoStonith"]=1
                     self["stonith-type"] = "fence_rhevm"
 
                     print("Obtaining RHEV-M credentials from the current environment")
                     self["stonith-params"] = "login=%s,passwd=%s,ipaddr=%s,ipport=%s,ssl=1,shell_timeout=10" % (
                         os.environ['RHEVM_USERNAME'],
                         os.environ['RHEVM_PASSWORD'],
                         os.environ['RHEVM_SERVER'],
                         os.environ['RHEVM_PORT'],
                    )
                     self["IPBase"] = " fe80::1234:56:7890:3000"
 
                 if os.path.isfile(dsh_file):
                     self["nodes"] = []
                     f = open(dsh_file, 'r')
                     for line in f:
                         l = line.strip().rstrip()
                         if not l.startswith('#'):
                             self["nodes"].append(l)
                     f.close()
 
                 else:
                     print("Unknown DSH group: %s" % args[i+1])
 
             elif args[i] == "--syslog-facility" or args[i] == "--facility":
                 skipthis=1
                 self["SyslogFacility"] = args[i+1]
                 
             elif args[i] == "--seed":
                 skipthis=1
                 self.SeedRandom(args[i+1])
 
             elif args[i] == "--warn-inactive":
                 self["warn-inactive"] = 1
 
             elif args[i] == "--schema":
                 skipthis=1
                 self["Schema"] = args[i+1]
 
             elif args[i] == "--at-boot" or args[i] == "--cluster-starts-at-boot":
                 skipthis=1
                 if args[i+1] == "1" or args[i+1] == "yes":
                     self["at-boot"] = 1
                 elif args[i+1] == "0" or args[i+1] == "no":
                     self["at-boot"] = 0
                 else:
                     self.usage(args[i+1])
 
             elif args[i] == "--stack":
                 if args[i+1] == "fedora" or args[i+1] == "fedora-17" or args[i+1] == "fedora-18":
                     self["Stack"] = "corosync"
                 elif args[i+1] == "rhel-7":
                     self["Stack"] = "corosync"
                 else:
                     self["Stack"] = args[i+1]
                 skipthis=1
 
             elif args[i] == "--once":
                 self["scenario"] = "all-once"
 
             elif args[i] == "--boot":
                 self["scenario"] = "boot"
 
             elif args[i] == "--notification-agent":
                 self["notification-agent"] = args[i+1]
                 skipthis = 1
 
             elif args[i] == "--notification-recipient":
                 self["notification-recipient"] = args[i+1]
                 skipthis = 1
 
             elif args[i] == "--valgrind-tests":
                 self["valgrind-tests"] = 1
 
             elif args[i] == "--valgrind-procs":
                 self["valgrind-procs"] = args[i+1]
                 skipthis = 1
 
             elif args[i] == "--no-loop-tests":
                 self["loop-tests"] = 0
 
             elif args[i] == "--loop-minutes":
                 skipthis=1
                 try:
                     self["loop-minutes"]=int(args[i+1])
                 except ValueError:
                     self.usage(args[i])
 
             elif args[i] == "--no-unsafe-tests":
                 self["unsafe-tests"] = 0
 
             elif args[i] == "--experimental-tests":
                 self["experimental-tests"] = 1
 
             elif args[i] == "--container-tests":
                 self["container-tests"] = 1
 
             elif args[i] == "--set":
                 skipthis=1
                 (name, value) = args[i+1].split('=')
                 self[name] = value
                 print("Setting %s = %s" % (name, value))
                 
             elif args[i] == "--help":
                 self.usage(args[i], 0)
 
             elif args[i] == "--":
                 break
 
             else:
                 try:
                     NumIter=int(args[i])
                     self["iterations"] = NumIter
                 except ValueError:
                     self.usage(args[i])
 
     def usage(self, arg, status=1):
         if status:
             print("Illegal argument %s" % arg)
         print("usage: " + sys.argv[0] +" [options] number-of-iterations")
         print("\nCommon options: ")
         print("\t [--nodes 'node list']        list of cluster nodes separated by whitespace")
         print("\t [--group | -g 'name']        use the nodes listed in the named DSH group (~/.dsh/groups/$name)")
         print("\t [--limit-nodes max]          only use the first 'max' cluster nodes supplied with --nodes")
         print("\t [--stack corosync]           which cluster stack is installed")
         print("\t [--list-tests]               list the valid tests")
         print("\t [--benchmark]                add the timing information")
         print("\t ")
         print("Options that CTS will usually auto-detect correctly: ")
         print("\t [--logfile path]             where should the test software look for logs from cluster nodes")
         print("\t [--syslog-facility name]     which syslog facility should the test software log to")
         print("\t [--at-boot (1|0)]            does the cluster software start at boot time")
         print("\t [--test-ip-base ip]          offset for generated IP address resources")
         print("\t ")
         print("Options for release testing: ")
         print("\t [--populate-resources | -r]  generate a sample configuration")
         print("\t [--choose name]              run only the named test")
         print("\t [--stonith (1 | 0 | yes | no | rhcs | ssh)]")
         print("\t [--once]                     run all valid tests once")
         print("\t ")
         print("Additional (less common) options: ")
         print("\t [--clobber-cib | -c ]        erase any existing configuration")
         print("\t [--outputfile path]          optional location for the test software to write logs to")
         print("\t [--trunc]                    truncate logfile before starting")
         print("\t [--xmit-loss lost-rate(0.0-1.0)]")
         print("\t [--recv-loss lost-rate(0.0-1.0)]")
         print("\t [--standby (1 | 0 | yes | no)]")
         print("\t [--fencing (1 | 0 | yes | no | rhcs | lha | openstack )]")
         print("\t [--stonith-type type]")
         print("\t [--stonith-args name=value]")
         print("\t [--bsc]")
         print("\t [--notification-agent path]  script to configure for Pacemaker alerts")
         print("\t [--notification-recipient r] recipient to pass to alert script")
         print("\t [--no-loop-tests]            don't run looping/time-based tests")
         print("\t [--no-unsafe-tests]          don't run tests that are unsafe for use with ocfs2/drbd")
         print("\t [--valgrind-tests]           include tests using valgrind")
         print("\t [--experimental-tests]       include experimental tests")
         print("\t [--container-tests]          include pacemaker_remote tests that run in lxc container resources")
         print("\t [--oprofile 'node list']     list of cluster nodes to run oprofile on]")
         print("\t [--qarsh]                    use the QARSH backdoor to access nodes instead of SSH")
         print("\t [--docker]                   Indicates nodes are docker nodes.")
         print("\t [--seed random_seed]")
         print("\t [--set option=value]")
         print("\t [--yes | -y]                 continue to run cts when there is an interaction whether to continue running pacemaker-cts")
         print("\t ")
         print("\t Example: ")
         # @PYTHON@ would be better here but not worth making file this a .in
         print("\t    python sys.argv[0] -g virt1 -r --stonith ssh --schema pacemaker-2.0 500")
 
         sys.exit(status)
 
 class EnvFactory(object):
     instance = None
     def __init__(self):
         pass
 
     def getInstance(self, args=None):
         if not EnvFactory.instance:
             EnvFactory.instance = Environment(args)
         return EnvFactory.instance
diff --git a/cts/scheduler/bundle-order-fencing.dot b/cts/scheduler/bundle-order-fencing.dot
index 62a331e758..0c18211fd8 100644
--- a/cts/scheduler/bundle-order-fencing.dot
+++ b/cts/scheduler/bundle-order-fencing.dot
@@ -1,413 +1,405 @@
 digraph "g" {
 "Cancel redis_monitor_45000 redis-bundle-1" -> "redis_promote_0 redis-bundle-1" [ style = bold]
 "Cancel redis_monitor_45000 redis-bundle-1" [ style=bold color="green" fontcolor="black"]
 "Cancel redis_monitor_60000 redis-bundle-1" -> "redis_promote_0 redis-bundle-1" [ style = bold]
 "Cancel redis_monitor_60000 redis-bundle-1" [ style=bold color="green" fontcolor="black"]
 "all_stopped" [ style=bold color="green" fontcolor="orange"]
 "galera-bundle-0_monitor_0 controller-1" -> "galera-bundle-0_start_0 controller-2" [ style = dashed]
 "galera-bundle-0_monitor_0 controller-1" [ style=bold color="green" fontcolor="black"]
 "galera-bundle-0_monitor_0 controller-2" -> "galera-bundle-0_start_0 controller-2" [ style = dashed]
 "galera-bundle-0_monitor_0 controller-2" [ style=bold color="green" fontcolor="black"]
 "galera-bundle-0_monitor_30000 controller-2" [ style=dashed color="red" fontcolor="black"]
 "galera-bundle-0_start_0 controller-2" -> "galera-bundle-0_monitor_30000 controller-2" [ style = dashed]
 "galera-bundle-0_start_0 controller-2" -> "galera_monitor_20000 galera-bundle-0" [ style = dashed]
 "galera-bundle-0_start_0 controller-2" -> "galera_monitor_30000 galera-bundle-0" [ style = dashed]
 "galera-bundle-0_start_0 controller-2" -> "galera_start_0 galera-bundle-0" [ style = dashed]
 "galera-bundle-0_start_0 controller-2" [ style=dashed color="red" fontcolor="black"]
 "galera-bundle-0_stop_0 controller-0" -> "all_stopped" [ style = bold]
 "galera-bundle-0_stop_0 controller-0" -> "galera-bundle-0_start_0 controller-2" [ style = dashed]
 "galera-bundle-0_stop_0 controller-0" -> "galera-bundle-docker-0_stop_0 controller-0" [ style = bold]
 "galera-bundle-0_stop_0 controller-0" [ style=bold color="green" fontcolor="orange"]
 "galera-bundle-1_monitor_0 controller-2" [ style=bold color="green" fontcolor="black"]
 "galera-bundle-2_monitor_0 controller-1" [ style=bold color="green" fontcolor="black"]
 "galera-bundle-docker-0_stop_0 controller-0" -> "all_stopped" [ style = bold]
 "galera-bundle-docker-0_stop_0 controller-0" -> "galera-bundle_stopped_0" [ style = bold]
 "galera-bundle-docker-0_stop_0 controller-0" [ style=bold color="green" fontcolor="orange"]
 "galera-bundle-master_demote_0" -> "galera-bundle-master_demoted_0" [ style = bold]
 "galera-bundle-master_demote_0" -> "galera_demote_0 galera-bundle-0" [ style = bold]
 "galera-bundle-master_demote_0" [ style=bold color="green" fontcolor="orange"]
 "galera-bundle-master_demoted_0" -> "galera-bundle-master_start_0" [ style = bold]
 "galera-bundle-master_demoted_0" -> "galera-bundle-master_stop_0" [ style = bold]
 "galera-bundle-master_demoted_0" -> "galera-bundle_demoted_0" [ style = bold]
 "galera-bundle-master_demoted_0" [ style=bold color="green" fontcolor="orange"]
 "galera-bundle-master_running_0" -> "galera-bundle_running_0" [ style = bold]
 "galera-bundle-master_running_0" [ style=bold color="green" fontcolor="orange"]
 "galera-bundle-master_start_0" -> "galera-bundle-master_running_0" [ style = bold]
 "galera-bundle-master_start_0" -> "galera_start_0 galera-bundle-0" [ style = dashed]
 "galera-bundle-master_start_0" [ style=bold color="green" fontcolor="orange"]
 "galera-bundle-master_stop_0" -> "galera-bundle-master_stopped_0" [ style = bold]
 "galera-bundle-master_stop_0" -> "galera_stop_0 galera-bundle-0" [ style = bold]
 "galera-bundle-master_stop_0" [ style=bold color="green" fontcolor="orange"]
 "galera-bundle-master_stopped_0" -> "galera-bundle-master_start_0" [ style = bold]
 "galera-bundle-master_stopped_0" -> "galera-bundle_stopped_0" [ style = bold]
 "galera-bundle-master_stopped_0" [ style=bold color="green" fontcolor="orange"]
 "galera-bundle_demote_0" -> "galera-bundle-master_demote_0" [ style = bold]
 "galera-bundle_demote_0" -> "galera-bundle_demoted_0" [ style = bold]
 "galera-bundle_demote_0" [ style=bold color="green" fontcolor="orange"]
 "galera-bundle_demoted_0" -> "galera-bundle_start_0" [ style = bold]
 "galera-bundle_demoted_0" -> "galera-bundle_stop_0" [ style = bold]
 "galera-bundle_demoted_0" [ style=bold color="green" fontcolor="orange"]
 "galera-bundle_running_0" [ style=bold color="green" fontcolor="orange"]
 "galera-bundle_start_0" -> "galera-bundle-master_start_0" [ style = bold]
 "galera-bundle_start_0" [ style=bold color="green" fontcolor="orange"]
 "galera-bundle_stop_0" -> "galera-bundle-docker-0_stop_0 controller-0" [ style = bold]
 "galera-bundle_stop_0" -> "galera-bundle-master_stop_0" [ style = bold]
 "galera-bundle_stop_0" -> "galera_stop_0 galera-bundle-0" [ style = bold]
 "galera-bundle_stop_0" [ style=bold color="green" fontcolor="orange"]
 "galera-bundle_stopped_0" -> "galera-bundle_start_0" [ style = bold]
 "galera-bundle_stopped_0" [ style=bold color="green" fontcolor="orange"]
 "galera_demote_0 galera-bundle-0" -> "galera-bundle-master_demoted_0" [ style = bold]
 "galera_demote_0 galera-bundle-0" -> "galera_monitor_20000 galera-bundle-0" [ style = dashed]
 "galera_demote_0 galera-bundle-0" -> "galera_monitor_30000 galera-bundle-0" [ style = dashed]
 "galera_demote_0 galera-bundle-0" -> "galera_stop_0 galera-bundle-0" [ style = bold]
 "galera_demote_0 galera-bundle-0" [ style=bold color="green" fontcolor="orange"]
 "galera_monitor_20000 galera-bundle-0" [ style=dashed color="red" fontcolor="black"]
 "galera_monitor_30000 galera-bundle-0" [ style=dashed color="red" fontcolor="black"]
 "galera_start_0 galera-bundle-0" -> "galera-bundle-master_running_0" [ style = dashed]
 "galera_start_0 galera-bundle-0" -> "galera_monitor_20000 galera-bundle-0" [ style = dashed]
 "galera_start_0 galera-bundle-0" -> "galera_monitor_30000 galera-bundle-0" [ style = dashed]
 "galera_start_0 galera-bundle-0" [ style=dashed color="red" fontcolor="black"]
 "galera_stop_0 galera-bundle-0" -> "all_stopped" [ style = bold]
 "galera_stop_0 galera-bundle-0" -> "galera-bundle-master_stopped_0" [ style = bold]
 "galera_stop_0 galera-bundle-0" -> "galera_start_0 galera-bundle-0" [ style = dashed]
 "galera_stop_0 galera-bundle-0" [ style=bold color="green" fontcolor="orange"]
 "haproxy-bundle-docker-0_stop_0 controller-0" -> "all_stopped" [ style = bold]
 "haproxy-bundle-docker-0_stop_0 controller-0" -> "haproxy-bundle_stopped_0" [ style = bold]
 "haproxy-bundle-docker-0_stop_0 controller-0" [ style=bold color="green" fontcolor="orange"]
 "haproxy-bundle_stop_0" -> "haproxy-bundle-docker-0_stop_0 controller-0" [ style = bold]
 "haproxy-bundle_stop_0" [ style=bold color="green" fontcolor="orange"]
 "haproxy-bundle_stopped_0" -> "ip-10.0.0.109_stop_0 controller-0" [ style = bold]
 "haproxy-bundle_stopped_0" -> "ip-172.17.4.11_stop_0 controller-0" [ style = bold]
 "haproxy-bundle_stopped_0" -> "ip-192.168.24.7_stop_0 controller-0" [ style = bold]
 "haproxy-bundle_stopped_0" [ style=bold color="green" fontcolor="orange"]
 "ip-10.0.0.109_monitor_10000 controller-1" [ style=bold color="green" fontcolor="black"]
 "ip-10.0.0.109_start_0 controller-1" -> "ip-10.0.0.109_monitor_10000 controller-1" [ style = bold]
 "ip-10.0.0.109_start_0 controller-1" [ style=bold color="green" fontcolor="black"]
 "ip-10.0.0.109_stop_0 controller-0" -> "all_stopped" [ style = bold]
 "ip-10.0.0.109_stop_0 controller-0" -> "ip-10.0.0.109_start_0 controller-1" [ style = bold]
 "ip-10.0.0.109_stop_0 controller-0" [ style=bold color="green" fontcolor="orange"]
 "ip-172.17.4.11_monitor_10000 controller-1" [ style=bold color="green" fontcolor="black"]
 "ip-172.17.4.11_start_0 controller-1" -> "ip-172.17.4.11_monitor_10000 controller-1" [ style = bold]
 "ip-172.17.4.11_start_0 controller-1" [ style=bold color="green" fontcolor="black"]
 "ip-172.17.4.11_stop_0 controller-0" -> "all_stopped" [ style = bold]
 "ip-172.17.4.11_stop_0 controller-0" -> "ip-172.17.4.11_start_0 controller-1" [ style = bold]
 "ip-172.17.4.11_stop_0 controller-0" [ style=bold color="green" fontcolor="orange"]
 "ip-192.168.24.7_monitor_10000 controller-2" [ style=bold color="green" fontcolor="black"]
 "ip-192.168.24.7_start_0 controller-2" -> "ip-192.168.24.7_monitor_10000 controller-2" [ style = bold]
 "ip-192.168.24.7_start_0 controller-2" [ style=bold color="green" fontcolor="black"]
 "ip-192.168.24.7_stop_0 controller-0" -> "all_stopped" [ style = bold]
 "ip-192.168.24.7_stop_0 controller-0" -> "ip-192.168.24.7_start_0 controller-2" [ style = bold]
 "ip-192.168.24.7_stop_0 controller-0" [ style=bold color="green" fontcolor="orange"]
 "rabbitmq-bundle-0_monitor_0 controller-1" -> "rabbitmq-bundle-0_start_0 controller-1" [ style = dashed]
 "rabbitmq-bundle-0_monitor_0 controller-1" [ style=bold color="green" fontcolor="black"]
 "rabbitmq-bundle-0_monitor_0 controller-2" -> "rabbitmq-bundle-0_start_0 controller-1" [ style = dashed]
 "rabbitmq-bundle-0_monitor_0 controller-2" [ style=bold color="green" fontcolor="black"]
 "rabbitmq-bundle-0_monitor_30000 controller-1" [ style=dashed color="red" fontcolor="black"]
 "rabbitmq-bundle-0_start_0 controller-1" -> "rabbitmq-bundle-0_monitor_30000 controller-1" [ style = dashed]
 "rabbitmq-bundle-0_start_0 controller-1" -> "rabbitmq_monitor_10000 rabbitmq-bundle-0" [ style = dashed]
 "rabbitmq-bundle-0_start_0 controller-1" -> "rabbitmq_start_0 rabbitmq-bundle-0" [ style = dashed]
 "rabbitmq-bundle-0_start_0 controller-1" [ style=dashed color="red" fontcolor="black"]
 "rabbitmq-bundle-0_stop_0 controller-0" -> "all_stopped" [ style = bold]
 "rabbitmq-bundle-0_stop_0 controller-0" -> "rabbitmq-bundle-0_start_0 controller-1" [ style = dashed]
 "rabbitmq-bundle-0_stop_0 controller-0" -> "rabbitmq-bundle-docker-0_stop_0 controller-0" [ style = bold]
 "rabbitmq-bundle-0_stop_0 controller-0" [ style=bold color="green" fontcolor="orange"]
 "rabbitmq-bundle-1_monitor_0 controller-2" [ style=bold color="green" fontcolor="black"]
 "rabbitmq-bundle-2_monitor_0 controller-1" [ style=bold color="green" fontcolor="black"]
 "rabbitmq-bundle-clone_confirmed-post_notify_running_0" -> "rabbitmq-bundle_running_0" [ style = bold]
 "rabbitmq-bundle-clone_confirmed-post_notify_running_0" [ style=bold color="green" fontcolor="orange"]
 "rabbitmq-bundle-clone_confirmed-post_notify_stopped_0" -> "all_stopped" [ style = bold]
 "rabbitmq-bundle-clone_confirmed-post_notify_stopped_0" -> "rabbitmq-bundle-clone_pre_notify_start_0" [ style = bold]
 "rabbitmq-bundle-clone_confirmed-post_notify_stopped_0" -> "rabbitmq-bundle_stopped_0" [ style = bold]
 "rabbitmq-bundle-clone_confirmed-post_notify_stopped_0" [ style=bold color="green" fontcolor="orange"]
 "rabbitmq-bundle-clone_confirmed-pre_notify_start_0" -> "rabbitmq-bundle-clone_post_notify_running_0" [ style = bold]
 "rabbitmq-bundle-clone_confirmed-pre_notify_start_0" -> "rabbitmq-bundle-clone_start_0" [ style = bold]
 "rabbitmq-bundle-clone_confirmed-pre_notify_start_0" [ style=bold color="green" fontcolor="orange"]
 "rabbitmq-bundle-clone_confirmed-pre_notify_stop_0" -> "rabbitmq-bundle-clone_post_notify_stopped_0" [ style = bold]
 "rabbitmq-bundle-clone_confirmed-pre_notify_stop_0" -> "rabbitmq-bundle-clone_stop_0" [ style = bold]
 "rabbitmq-bundle-clone_confirmed-pre_notify_stop_0" [ style=bold color="green" fontcolor="orange"]
 "rabbitmq-bundle-clone_post_notify_running_0" -> "rabbitmq-bundle-clone_confirmed-post_notify_running_0" [ style = bold]
 "rabbitmq-bundle-clone_post_notify_running_0" [ style=bold color="green" fontcolor="orange"]
 "rabbitmq-bundle-clone_post_notify_stopped_0" -> "rabbitmq-bundle-clone_confirmed-post_notify_stopped_0" [ style = bold]
 "rabbitmq-bundle-clone_post_notify_stopped_0" -> "rabbitmq_post_notify_stonith_0 rabbitmq-bundle-1" [ style = bold]
 "rabbitmq-bundle-clone_post_notify_stopped_0" -> "rabbitmq_post_notify_stonith_0 rabbitmq-bundle-2" [ style = bold]
 "rabbitmq-bundle-clone_post_notify_stopped_0" [ style=bold color="green" fontcolor="orange"]
 "rabbitmq-bundle-clone_pre_notify_start_0" -> "rabbitmq-bundle-clone_confirmed-pre_notify_start_0" [ style = bold]
 "rabbitmq-bundle-clone_pre_notify_start_0" [ style=bold color="green" fontcolor="orange"]
 "rabbitmq-bundle-clone_pre_notify_stop_0" -> "rabbitmq-bundle-clone_confirmed-pre_notify_stop_0" [ style = bold]
 "rabbitmq-bundle-clone_pre_notify_stop_0" -> "rabbitmq_pre_notify_stop_0 rabbitmq-bundle-1" [ style = bold]
 "rabbitmq-bundle-clone_pre_notify_stop_0" -> "rabbitmq_pre_notify_stop_0 rabbitmq-bundle-2" [ style = bold]
 "rabbitmq-bundle-clone_pre_notify_stop_0" [ style=bold color="green" fontcolor="orange"]
 "rabbitmq-bundle-clone_running_0" -> "rabbitmq-bundle-clone_post_notify_running_0" [ style = bold]
 "rabbitmq-bundle-clone_running_0" [ style=bold color="green" fontcolor="orange"]
 "rabbitmq-bundle-clone_start_0" -> "rabbitmq-bundle-clone_running_0" [ style = bold]
 "rabbitmq-bundle-clone_start_0" -> "rabbitmq_start_0 rabbitmq-bundle-0" [ style = dashed]
 "rabbitmq-bundle-clone_start_0" [ style=bold color="green" fontcolor="orange"]
 "rabbitmq-bundle-clone_stop_0" -> "rabbitmq-bundle-clone_stopped_0" [ style = bold]
 "rabbitmq-bundle-clone_stop_0" -> "rabbitmq_stop_0 rabbitmq-bundle-0" [ style = bold]
 "rabbitmq-bundle-clone_stop_0" [ style=bold color="green" fontcolor="orange"]
 "rabbitmq-bundle-clone_stopped_0" -> "rabbitmq-bundle-clone_post_notify_stopped_0" [ style = bold]
 "rabbitmq-bundle-clone_stopped_0" -> "rabbitmq-bundle-clone_start_0" [ style = bold]
 "rabbitmq-bundle-clone_stopped_0" [ style=bold color="green" fontcolor="orange"]
 "rabbitmq-bundle-docker-0_stop_0 controller-0" -> "all_stopped" [ style = bold]
 "rabbitmq-bundle-docker-0_stop_0 controller-0" -> "rabbitmq-bundle_stopped_0" [ style = bold]
 "rabbitmq-bundle-docker-0_stop_0 controller-0" [ style=bold color="green" fontcolor="orange"]
 "rabbitmq-bundle_running_0" [ style=bold color="green" fontcolor="orange"]
 "rabbitmq-bundle_start_0" -> "rabbitmq-bundle-clone_start_0" [ style = bold]
 "rabbitmq-bundle_start_0" [ style=bold color="green" fontcolor="orange"]
 "rabbitmq-bundle_stop_0" -> "rabbitmq-bundle-clone_stop_0" [ style = bold]
 "rabbitmq-bundle_stop_0" -> "rabbitmq-bundle-docker-0_stop_0 controller-0" [ style = bold]
 "rabbitmq-bundle_stop_0" -> "rabbitmq_stop_0 rabbitmq-bundle-0" [ style = bold]
 "rabbitmq-bundle_stop_0" [ style=bold color="green" fontcolor="orange"]
 "rabbitmq-bundle_stopped_0" [ style=bold color="green" fontcolor="orange"]
 "rabbitmq_confirmed-post_notify_stonith_0" -> "all_stopped" [ style = bold]
 "rabbitmq_confirmed-post_notify_stonith_0" [ style=bold color="green" fontcolor="orange"]
 "rabbitmq_monitor_10000 rabbitmq-bundle-0" [ style=dashed color="red" fontcolor="black"]
 "rabbitmq_post_notify_stonith_0 rabbitmq-bundle-1" -> "rabbitmq-bundle-clone_confirmed-post_notify_stopped_0" [ style = bold]
 "rabbitmq_post_notify_stonith_0 rabbitmq-bundle-1" -> "rabbitmq_confirmed-post_notify_stonith_0" [ style = bold]
 "rabbitmq_post_notify_stonith_0 rabbitmq-bundle-1" [ style=bold color="green" fontcolor="black"]
 "rabbitmq_post_notify_stonith_0 rabbitmq-bundle-2" -> "rabbitmq-bundle-clone_confirmed-post_notify_stopped_0" [ style = bold]
 "rabbitmq_post_notify_stonith_0 rabbitmq-bundle-2" -> "rabbitmq_confirmed-post_notify_stonith_0" [ style = bold]
 "rabbitmq_post_notify_stonith_0 rabbitmq-bundle-2" [ style=bold color="green" fontcolor="black"]
 "rabbitmq_post_notify_stonith_0" -> "rabbitmq_confirmed-post_notify_stonith_0" [ style = bold]
 "rabbitmq_post_notify_stonith_0" -> "rabbitmq_post_notify_stonith_0 rabbitmq-bundle-1" [ style = bold]
 "rabbitmq_post_notify_stonith_0" -> "rabbitmq_post_notify_stonith_0 rabbitmq-bundle-2" [ style = bold]
 "rabbitmq_post_notify_stonith_0" [ style=bold color="green" fontcolor="orange"]
 "rabbitmq_pre_notify_stop_0 rabbitmq-bundle-1" -> "rabbitmq-bundle-clone_confirmed-pre_notify_stop_0" [ style = bold]
 "rabbitmq_pre_notify_stop_0 rabbitmq-bundle-1" [ style=bold color="green" fontcolor="black"]
 "rabbitmq_pre_notify_stop_0 rabbitmq-bundle-2" -> "rabbitmq-bundle-clone_confirmed-pre_notify_stop_0" [ style = bold]
 "rabbitmq_pre_notify_stop_0 rabbitmq-bundle-2" [ style=bold color="green" fontcolor="black"]
 "rabbitmq_start_0 rabbitmq-bundle-0" -> "rabbitmq-bundle-clone_running_0" [ style = dashed]
 "rabbitmq_start_0 rabbitmq-bundle-0" -> "rabbitmq_monitor_10000 rabbitmq-bundle-0" [ style = dashed]
 "rabbitmq_start_0 rabbitmq-bundle-0" [ style=dashed color="red" fontcolor="black"]
 "rabbitmq_stop_0 rabbitmq-bundle-0" -> "all_stopped" [ style = bold]
 "rabbitmq_stop_0 rabbitmq-bundle-0" -> "rabbitmq-bundle-clone_stopped_0" [ style = bold]
 "rabbitmq_stop_0 rabbitmq-bundle-0" -> "rabbitmq_start_0 rabbitmq-bundle-0" [ style = dashed]
 "rabbitmq_stop_0 rabbitmq-bundle-0" [ style=bold color="green" fontcolor="orange"]
 "redis-bundle-0_monitor_0 controller-1" -> "redis-bundle-0_start_0 controller-1" [ style = dashed]
 "redis-bundle-0_monitor_0 controller-1" [ style=bold color="green" fontcolor="black"]
 "redis-bundle-0_monitor_0 controller-2" -> "redis-bundle-0_start_0 controller-1" [ style = dashed]
 "redis-bundle-0_monitor_0 controller-2" [ style=bold color="green" fontcolor="black"]
 "redis-bundle-0_monitor_30000 controller-1" [ style=dashed color="red" fontcolor="black"]
 "redis-bundle-0_start_0 controller-1" -> "redis-bundle-0_monitor_30000 controller-1" [ style = dashed]
 "redis-bundle-0_start_0 controller-1" -> "redis_monitor_45000 redis-bundle-0" [ style = dashed]
 "redis-bundle-0_start_0 controller-1" -> "redis_monitor_60000 redis-bundle-0" [ style = dashed]
 "redis-bundle-0_start_0 controller-1" -> "redis_start_0 redis-bundle-0" [ style = dashed]
 "redis-bundle-0_start_0 controller-1" [ style=dashed color="red" fontcolor="black"]
 "redis-bundle-0_stop_0 controller-0" -> "all_stopped" [ style = bold]
 "redis-bundle-0_stop_0 controller-0" -> "redis-bundle-0_start_0 controller-1" [ style = dashed]
 "redis-bundle-0_stop_0 controller-0" -> "redis-bundle-docker-0_stop_0 controller-0" [ style = bold]
 "redis-bundle-0_stop_0 controller-0" [ style=bold color="green" fontcolor="orange"]
 "redis-bundle-1_monitor_0 controller-2" [ style=bold color="green" fontcolor="black"]
 "redis-bundle-2_monitor_0 controller-1" [ style=bold color="green" fontcolor="black"]
 "redis-bundle-docker-0_stop_0 controller-0" -> "all_stopped" [ style = bold]
 "redis-bundle-docker-0_stop_0 controller-0" -> "redis-bundle_stopped_0" [ style = bold]
 "redis-bundle-docker-0_stop_0 controller-0" [ style=bold color="green" fontcolor="orange"]
 "redis-bundle-master_confirmed-post_notify_demoted_0" -> "redis-bundle-master_pre_notify_promote_0" [ style = bold]
 "redis-bundle-master_confirmed-post_notify_demoted_0" -> "redis-bundle-master_pre_notify_start_0" [ style = bold]
 "redis-bundle-master_confirmed-post_notify_demoted_0" -> "redis-bundle-master_pre_notify_stop_0" [ style = bold]
 "redis-bundle-master_confirmed-post_notify_demoted_0" -> "redis-bundle_demoted_0" [ style = bold]
 "redis-bundle-master_confirmed-post_notify_demoted_0" -> "redis_monitor_20000 redis-bundle-1" [ style = bold]
 "redis-bundle-master_confirmed-post_notify_demoted_0" [ style=bold color="green" fontcolor="orange"]
 "redis-bundle-master_confirmed-post_notify_promoted_0" -> "redis-bundle_promoted_0" [ style = bold]
 "redis-bundle-master_confirmed-post_notify_promoted_0" -> "redis_monitor_20000 redis-bundle-1" [ style = bold]
-"redis-bundle-master_confirmed-post_notify_promoted_0" -> "redis_monitor_45000 redis-bundle-0" [ style = dashed]
-"redis-bundle-master_confirmed-post_notify_promoted_0" -> "redis_monitor_60000 redis-bundle-0" [ style = dashed]
 "redis-bundle-master_confirmed-post_notify_promoted_0" [ style=bold color="green" fontcolor="orange"]
 "redis-bundle-master_confirmed-post_notify_running_0" -> "redis-bundle-master_pre_notify_promote_0" [ style = bold]
 "redis-bundle-master_confirmed-post_notify_running_0" -> "redis-bundle_running_0" [ style = bold]
 "redis-bundle-master_confirmed-post_notify_running_0" [ style=bold color="green" fontcolor="orange"]
 "redis-bundle-master_confirmed-post_notify_stopped_0" -> "all_stopped" [ style = bold]
 "redis-bundle-master_confirmed-post_notify_stopped_0" -> "redis-bundle-master_pre_notify_promote_0" [ style = bold]
 "redis-bundle-master_confirmed-post_notify_stopped_0" -> "redis-bundle-master_pre_notify_start_0" [ style = bold]
 "redis-bundle-master_confirmed-post_notify_stopped_0" -> "redis-bundle_stopped_0" [ style = bold]
 "redis-bundle-master_confirmed-post_notify_stopped_0" -> "redis_monitor_20000 redis-bundle-1" [ style = bold]
 "redis-bundle-master_confirmed-post_notify_stopped_0" [ style=bold color="green" fontcolor="orange"]
 "redis-bundle-master_confirmed-pre_notify_demote_0" -> "redis-bundle-master_demote_0" [ style = bold]
 "redis-bundle-master_confirmed-pre_notify_demote_0" -> "redis-bundle-master_post_notify_demoted_0" [ style = bold]
 "redis-bundle-master_confirmed-pre_notify_demote_0" [ style=bold color="green" fontcolor="orange"]
 "redis-bundle-master_confirmed-pre_notify_promote_0" -> "redis-bundle-master_post_notify_promoted_0" [ style = bold]
 "redis-bundle-master_confirmed-pre_notify_promote_0" -> "redis-bundle-master_promote_0" [ style = bold]
 "redis-bundle-master_confirmed-pre_notify_promote_0" [ style=bold color="green" fontcolor="orange"]
 "redis-bundle-master_confirmed-pre_notify_start_0" -> "redis-bundle-master_post_notify_running_0" [ style = bold]
 "redis-bundle-master_confirmed-pre_notify_start_0" -> "redis-bundle-master_start_0" [ style = bold]
 "redis-bundle-master_confirmed-pre_notify_start_0" [ style=bold color="green" fontcolor="orange"]
 "redis-bundle-master_confirmed-pre_notify_stop_0" -> "redis-bundle-master_post_notify_stopped_0" [ style = bold]
 "redis-bundle-master_confirmed-pre_notify_stop_0" -> "redis-bundle-master_stop_0" [ style = bold]
 "redis-bundle-master_confirmed-pre_notify_stop_0" [ style=bold color="green" fontcolor="orange"]
 "redis-bundle-master_demote_0" -> "redis-bundle-master_demoted_0" [ style = bold]
 "redis-bundle-master_demote_0" -> "redis_demote_0 redis-bundle-0" [ style = bold]
 "redis-bundle-master_demote_0" [ style=bold color="green" fontcolor="orange"]
 "redis-bundle-master_demoted_0" -> "redis-bundle-master_post_notify_demoted_0" [ style = bold]
 "redis-bundle-master_demoted_0" -> "redis-bundle-master_promote_0" [ style = bold]
 "redis-bundle-master_demoted_0" -> "redis-bundle-master_start_0" [ style = bold]
 "redis-bundle-master_demoted_0" -> "redis-bundle-master_stop_0" [ style = bold]
 "redis-bundle-master_demoted_0" [ style=bold color="green" fontcolor="orange"]
 "redis-bundle-master_post_notify_demoted_0" -> "redis-bundle-master_confirmed-post_notify_demoted_0" [ style = bold]
 "redis-bundle-master_post_notify_demoted_0" -> "redis_post_notify_demoted_0 redis-bundle-1" [ style = bold]
 "redis-bundle-master_post_notify_demoted_0" -> "redis_post_notify_demoted_0 redis-bundle-2" [ style = bold]
 "redis-bundle-master_post_notify_demoted_0" [ style=bold color="green" fontcolor="orange"]
 "redis-bundle-master_post_notify_promoted_0" -> "redis-bundle-master_confirmed-post_notify_promoted_0" [ style = bold]
-"redis-bundle-master_post_notify_promoted_0" -> "redis_post_notify_promoted_0 redis-bundle-0" [ style = bold]
 "redis-bundle-master_post_notify_promoted_0" -> "redis_post_notify_promoted_0 redis-bundle-1" [ style = bold]
 "redis-bundle-master_post_notify_promoted_0" -> "redis_post_notify_promoted_0 redis-bundle-2" [ style = bold]
 "redis-bundle-master_post_notify_promoted_0" [ style=bold color="green" fontcolor="orange"]
 "redis-bundle-master_post_notify_running_0" -> "redis-bundle-master_confirmed-post_notify_running_0" [ style = bold]
 "redis-bundle-master_post_notify_running_0" [ style=bold color="green" fontcolor="orange"]
 "redis-bundle-master_post_notify_stopped_0" -> "redis-bundle-master_confirmed-post_notify_stopped_0" [ style = bold]
 "redis-bundle-master_post_notify_stopped_0" -> "redis_post_notify_stonith_0 redis-bundle-1" [ style = bold]
 "redis-bundle-master_post_notify_stopped_0" -> "redis_post_notify_stonith_0 redis-bundle-2" [ style = bold]
 "redis-bundle-master_post_notify_stopped_0" [ style=bold color="green" fontcolor="orange"]
 "redis-bundle-master_pre_notify_demote_0" -> "redis-bundle-master_confirmed-pre_notify_demote_0" [ style = bold]
 "redis-bundle-master_pre_notify_demote_0" -> "redis_pre_notify_demote_0 redis-bundle-1" [ style = bold]
 "redis-bundle-master_pre_notify_demote_0" -> "redis_pre_notify_demote_0 redis-bundle-2" [ style = bold]
 "redis-bundle-master_pre_notify_demote_0" [ style=bold color="green" fontcolor="orange"]
 "redis-bundle-master_pre_notify_promote_0" -> "redis-bundle-master_confirmed-pre_notify_promote_0" [ style = bold]
-"redis-bundle-master_pre_notify_promote_0" -> "redis_pre_notify_promote_0 redis-bundle-0" [ style = bold]
 "redis-bundle-master_pre_notify_promote_0" -> "redis_pre_notify_promote_0 redis-bundle-1" [ style = bold]
 "redis-bundle-master_pre_notify_promote_0" -> "redis_pre_notify_promote_0 redis-bundle-2" [ style = bold]
 "redis-bundle-master_pre_notify_promote_0" [ style=bold color="green" fontcolor="orange"]
 "redis-bundle-master_pre_notify_start_0" -> "redis-bundle-master_confirmed-pre_notify_start_0" [ style = bold]
 "redis-bundle-master_pre_notify_start_0" [ style=bold color="green" fontcolor="orange"]
 "redis-bundle-master_pre_notify_stop_0" -> "redis-bundle-master_confirmed-pre_notify_stop_0" [ style = bold]
 "redis-bundle-master_pre_notify_stop_0" -> "redis_pre_notify_stop_0 redis-bundle-1" [ style = bold]
 "redis-bundle-master_pre_notify_stop_0" -> "redis_pre_notify_stop_0 redis-bundle-2" [ style = bold]
 "redis-bundle-master_pre_notify_stop_0" [ style=bold color="green" fontcolor="orange"]
 "redis-bundle-master_promote_0" -> "redis_promote_0 redis-bundle-1" [ style = bold]
 "redis-bundle-master_promote_0" [ style=bold color="green" fontcolor="orange"]
 "redis-bundle-master_promoted_0" -> "redis-bundle-master_post_notify_promoted_0" [ style = bold]
 "redis-bundle-master_promoted_0" [ style=bold color="green" fontcolor="orange"]
 "redis-bundle-master_running_0" -> "redis-bundle-master_post_notify_running_0" [ style = bold]
 "redis-bundle-master_running_0" -> "redis-bundle-master_promote_0" [ style = bold]
 "redis-bundle-master_running_0" [ style=bold color="green" fontcolor="orange"]
 "redis-bundle-master_start_0" -> "redis-bundle-master_running_0" [ style = bold]
 "redis-bundle-master_start_0" -> "redis_start_0 redis-bundle-0" [ style = dashed]
 "redis-bundle-master_start_0" [ style=bold color="green" fontcolor="orange"]
 "redis-bundle-master_stop_0" -> "redis-bundle-master_stopped_0" [ style = bold]
 "redis-bundle-master_stop_0" -> "redis_stop_0 redis-bundle-0" [ style = bold]
 "redis-bundle-master_stop_0" [ style=bold color="green" fontcolor="orange"]
 "redis-bundle-master_stopped_0" -> "redis-bundle-master_post_notify_stopped_0" [ style = bold]
 "redis-bundle-master_stopped_0" -> "redis-bundle-master_promote_0" [ style = bold]
 "redis-bundle-master_stopped_0" -> "redis-bundle-master_start_0" [ style = bold]
 "redis-bundle-master_stopped_0" [ style=bold color="green" fontcolor="orange"]
 "redis-bundle_demote_0" -> "redis-bundle-master_demote_0" [ style = bold]
 "redis-bundle_demote_0" -> "redis-bundle_demoted_0" [ style = bold]
 "redis-bundle_demote_0" [ style=bold color="green" fontcolor="orange"]
 "redis-bundle_demoted_0" -> "redis-bundle_promote_0" [ style = bold]
 "redis-bundle_demoted_0" -> "redis-bundle_start_0" [ style = bold]
 "redis-bundle_demoted_0" -> "redis-bundle_stop_0" [ style = bold]
 "redis-bundle_demoted_0" [ style=bold color="green" fontcolor="orange"]
 "redis-bundle_promote_0" -> "redis-bundle-master_promote_0" [ style = bold]
 "redis-bundle_promote_0" [ style=bold color="green" fontcolor="orange"]
 "redis-bundle_promoted_0" [ style=bold color="green" fontcolor="orange"]
 "redis-bundle_running_0" -> "redis-bundle_promote_0" [ style = bold]
 "redis-bundle_running_0" [ style=bold color="green" fontcolor="orange"]
 "redis-bundle_start_0" -> "redis-bundle-master_start_0" [ style = bold]
 "redis-bundle_start_0" [ style=bold color="green" fontcolor="orange"]
 "redis-bundle_stop_0" -> "redis-bundle-docker-0_stop_0 controller-0" [ style = bold]
 "redis-bundle_stop_0" -> "redis-bundle-master_stop_0" [ style = bold]
 "redis-bundle_stop_0" -> "redis_stop_0 redis-bundle-0" [ style = bold]
 "redis-bundle_stop_0" [ style=bold color="green" fontcolor="orange"]
 "redis-bundle_stopped_0" -> "redis-bundle_promote_0" [ style = bold]
 "redis-bundle_stopped_0" -> "redis-bundle_start_0" [ style = bold]
 "redis-bundle_stopped_0" [ style=bold color="green" fontcolor="orange"]
 "redis_confirmed-post_notify_stonith_0" -> "all_stopped" [ style = bold]
 "redis_confirmed-post_notify_stonith_0" -> "redis_monitor_20000 redis-bundle-1" [ style = bold]
 "redis_confirmed-post_notify_stonith_0" [ style=bold color="green" fontcolor="orange"]
 "redis_demote_0 redis-bundle-0" -> "redis-bundle-master_demoted_0" [ style = bold]
 "redis_demote_0 redis-bundle-0" -> "redis_monitor_45000 redis-bundle-0" [ style = dashed]
 "redis_demote_0 redis-bundle-0" -> "redis_monitor_60000 redis-bundle-0" [ style = dashed]
 "redis_demote_0 redis-bundle-0" -> "redis_stop_0 redis-bundle-0" [ style = bold]
 "redis_demote_0 redis-bundle-0" [ style=bold color="green" fontcolor="orange"]
 "redis_monitor_20000 redis-bundle-1" [ style=bold color="green" fontcolor="black"]
 "redis_monitor_45000 redis-bundle-0" [ style=dashed color="red" fontcolor="black"]
 "redis_monitor_60000 redis-bundle-0" [ style=dashed color="red" fontcolor="black"]
 "redis_post_notify_demoted_0 redis-bundle-1" -> "redis-bundle-master_confirmed-post_notify_demoted_0" [ style = bold]
 "redis_post_notify_demoted_0 redis-bundle-1" [ style=bold color="green" fontcolor="black"]
 "redis_post_notify_demoted_0 redis-bundle-2" -> "redis-bundle-master_confirmed-post_notify_demoted_0" [ style = bold]
 "redis_post_notify_demoted_0 redis-bundle-2" [ style=bold color="green" fontcolor="black"]
-"redis_post_notify_promoted_0 redis-bundle-0" -> "redis-bundle-master_confirmed-post_notify_promoted_0" [ style = bold]
-"redis_post_notify_promoted_0 redis-bundle-0" [ style=bold color="green" fontcolor="black"]
 "redis_post_notify_promoted_0 redis-bundle-1" -> "redis-bundle-master_confirmed-post_notify_promoted_0" [ style = bold]
 "redis_post_notify_promoted_0 redis-bundle-1" [ style=bold color="green" fontcolor="black"]
 "redis_post_notify_promoted_0 redis-bundle-2" -> "redis-bundle-master_confirmed-post_notify_promoted_0" [ style = bold]
 "redis_post_notify_promoted_0 redis-bundle-2" [ style=bold color="green" fontcolor="black"]
 "redis_post_notify_stonith_0 redis-bundle-1" -> "redis-bundle-master_confirmed-post_notify_stopped_0" [ style = bold]
 "redis_post_notify_stonith_0 redis-bundle-1" -> "redis_confirmed-post_notify_stonith_0" [ style = bold]
 "redis_post_notify_stonith_0 redis-bundle-1" [ style=bold color="green" fontcolor="black"]
 "redis_post_notify_stonith_0 redis-bundle-2" -> "redis-bundle-master_confirmed-post_notify_stopped_0" [ style = bold]
 "redis_post_notify_stonith_0 redis-bundle-2" -> "redis_confirmed-post_notify_stonith_0" [ style = bold]
 "redis_post_notify_stonith_0 redis-bundle-2" [ style=bold color="green" fontcolor="black"]
 "redis_post_notify_stonith_0" -> "redis_confirmed-post_notify_stonith_0" [ style = bold]
 "redis_post_notify_stonith_0" -> "redis_post_notify_stonith_0 redis-bundle-1" [ style = bold]
 "redis_post_notify_stonith_0" -> "redis_post_notify_stonith_0 redis-bundle-2" [ style = bold]
 "redis_post_notify_stonith_0" [ style=bold color="green" fontcolor="orange"]
 "redis_pre_notify_demote_0 redis-bundle-1" -> "redis-bundle-master_confirmed-pre_notify_demote_0" [ style = bold]
 "redis_pre_notify_demote_0 redis-bundle-1" [ style=bold color="green" fontcolor="black"]
 "redis_pre_notify_demote_0 redis-bundle-2" -> "redis-bundle-master_confirmed-pre_notify_demote_0" [ style = bold]
 "redis_pre_notify_demote_0 redis-bundle-2" [ style=bold color="green" fontcolor="black"]
-"redis_pre_notify_promote_0 redis-bundle-0" -> "redis-bundle-master_confirmed-pre_notify_promote_0" [ style = bold]
-"redis_pre_notify_promote_0 redis-bundle-0" [ style=bold color="green" fontcolor="black"]
 "redis_pre_notify_promote_0 redis-bundle-1" -> "redis-bundle-master_confirmed-pre_notify_promote_0" [ style = bold]
 "redis_pre_notify_promote_0 redis-bundle-1" [ style=bold color="green" fontcolor="black"]
 "redis_pre_notify_promote_0 redis-bundle-2" -> "redis-bundle-master_confirmed-pre_notify_promote_0" [ style = bold]
 "redis_pre_notify_promote_0 redis-bundle-2" [ style=bold color="green" fontcolor="black"]
 "redis_pre_notify_stop_0 redis-bundle-1" -> "redis-bundle-master_confirmed-pre_notify_stop_0" [ style = bold]
 "redis_pre_notify_stop_0 redis-bundle-1" [ style=bold color="green" fontcolor="black"]
 "redis_pre_notify_stop_0 redis-bundle-2" -> "redis-bundle-master_confirmed-pre_notify_stop_0" [ style = bold]
 "redis_pre_notify_stop_0 redis-bundle-2" [ style=bold color="green" fontcolor="black"]
 "redis_promote_0 redis-bundle-1" -> "redis-bundle-master_promoted_0" [ style = bold]
 "redis_promote_0 redis-bundle-1" -> "redis_monitor_20000 redis-bundle-1" [ style = bold]
 "redis_promote_0 redis-bundle-1" [ style=bold color="green" fontcolor="black"]
 "redis_start_0 redis-bundle-0" -> "redis-bundle-master_running_0" [ style = dashed]
 "redis_start_0 redis-bundle-0" -> "redis_monitor_45000 redis-bundle-0" [ style = dashed]
 "redis_start_0 redis-bundle-0" -> "redis_monitor_60000 redis-bundle-0" [ style = dashed]
 "redis_start_0 redis-bundle-0" [ style=dashed color="red" fontcolor="black"]
 "redis_stop_0 redis-bundle-0" -> "all_stopped" [ style = bold]
 "redis_stop_0 redis-bundle-0" -> "redis-bundle-master_stopped_0" [ style = bold]
 "redis_stop_0 redis-bundle-0" -> "redis_start_0 redis-bundle-0" [ style = dashed]
 "redis_stop_0 redis-bundle-0" [ style=bold color="green" fontcolor="orange"]
 "stonith 'off' galera-bundle-0" -> "galera-bundle-master_stop_0" [ style = bold]
 "stonith 'off' galera-bundle-0" -> "stonith_complete" [ style = bold]
 "stonith 'off' galera-bundle-0" [ style=bold color="green" fontcolor="orange"]
 "stonith 'off' rabbitmq-bundle-0" -> "rabbitmq-bundle-clone_stop_0" [ style = bold]
 "stonith 'off' rabbitmq-bundle-0" -> "rabbitmq_post_notify_stonith_0" [ style = bold]
 "stonith 'off' rabbitmq-bundle-0" -> "stonith_complete" [ style = bold]
 "stonith 'off' rabbitmq-bundle-0" [ style=bold color="green" fontcolor="orange"]
 "stonith 'off' redis-bundle-0" -> "redis-bundle-master_stop_0" [ style = bold]
 "stonith 'off' redis-bundle-0" -> "redis_post_notify_stonith_0" [ style = bold]
 "stonith 'off' redis-bundle-0" -> "stonith_complete" [ style = bold]
 "stonith 'off' redis-bundle-0" [ style=bold color="green" fontcolor="orange"]
 "stonith 'reboot' controller-0" -> "galera-bundle-docker-0_stop_0 controller-0" [ style = bold]
 "stonith 'reboot' controller-0" -> "haproxy-bundle-docker-0_stop_0 controller-0" [ style = bold]
 "stonith 'reboot' controller-0" -> "ip-10.0.0.109_stop_0 controller-0" [ style = bold]
 "stonith 'reboot' controller-0" -> "ip-172.17.4.11_stop_0 controller-0" [ style = bold]
 "stonith 'reboot' controller-0" -> "ip-192.168.24.7_stop_0 controller-0" [ style = bold]
 "stonith 'reboot' controller-0" -> "rabbitmq-bundle-docker-0_stop_0 controller-0" [ style = bold]
 "stonith 'reboot' controller-0" -> "redis-bundle-docker-0_stop_0 controller-0" [ style = bold]
 "stonith 'reboot' controller-0" -> "stonith 'off' galera-bundle-0" [ style = bold]
 "stonith 'reboot' controller-0" -> "stonith 'off' rabbitmq-bundle-0" [ style = bold]
 "stonith 'reboot' controller-0" -> "stonith 'off' redis-bundle-0" [ style = bold]
 "stonith 'reboot' controller-0" -> "stonith_complete" [ style = bold]
 "stonith 'reboot' controller-0" [ style=bold color="green" fontcolor="black"]
 "stonith-fence_ipmilan-5254000dcb3f_monitor_60000 controller-2" [ style=bold color="green" fontcolor="black"]
 "stonith-fence_ipmilan-5254000dcb3f_start_0 controller-2" -> "stonith-fence_ipmilan-5254000dcb3f_monitor_60000 controller-2" [ style = bold]
 "stonith-fence_ipmilan-5254000dcb3f_start_0 controller-2" [ style=bold color="green" fontcolor="black"]
 "stonith-fence_ipmilan-5254000dcb3f_stop_0 controller-0" -> "all_stopped" [ style = bold]
 "stonith-fence_ipmilan-5254000dcb3f_stop_0 controller-0" -> "stonith-fence_ipmilan-5254000dcb3f_start_0 controller-2" [ style = bold]
 "stonith-fence_ipmilan-5254000dcb3f_stop_0 controller-0" [ style=bold color="green" fontcolor="orange"]
 "stonith-fence_ipmilan-5254003e8e97_monitor_60000 controller-1" [ style=bold color="green" fontcolor="black"]
 "stonith-fence_ipmilan-5254003e8e97_start_0 controller-1" -> "stonith-fence_ipmilan-5254003e8e97_monitor_60000 controller-1" [ style = bold]
 "stonith-fence_ipmilan-5254003e8e97_start_0 controller-1" [ style=bold color="green" fontcolor="black"]
 "stonith-fence_ipmilan-5254003e8e97_stop_0 controller-0" -> "all_stopped" [ style = bold]
 "stonith-fence_ipmilan-5254003e8e97_stop_0 controller-0" -> "stonith-fence_ipmilan-5254003e8e97_start_0 controller-1" [ style = bold]
 "stonith-fence_ipmilan-5254003e8e97_stop_0 controller-0" [ style=bold color="green" fontcolor="orange"]
 "stonith_complete" -> "all_stopped" [ style = bold]
 "stonith_complete" -> "galera_start_0 galera-bundle-0" [ style = dashed]
 "stonith_complete" -> "ip-10.0.0.109_start_0 controller-1" [ style = bold]
 "stonith_complete" -> "ip-172.17.4.11_start_0 controller-1" [ style = bold]
 "stonith_complete" -> "ip-192.168.24.7_start_0 controller-2" [ style = bold]
 "stonith_complete" -> "rabbitmq_start_0 rabbitmq-bundle-0" [ style = dashed]
 "stonith_complete" -> "redis_promote_0 redis-bundle-1" [ style = bold]
 "stonith_complete" -> "redis_start_0 redis-bundle-0" [ style = dashed]
 "stonith_complete" [ style=bold color="green" fontcolor="orange"]
 }
diff --git a/cts/scheduler/bundle-order-fencing.exp b/cts/scheduler/bundle-order-fencing.exp
index a40a18ec8c..b4a714db00 100644
--- a/cts/scheduler/bundle-order-fencing.exp
+++ b/cts/scheduler/bundle-order-fencing.exp
@@ -1,1894 +1,1862 @@
 <transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY"  transition_id="0">
   <synapse id="0" priority="1000000">
     <action_set>
       <pseudo_event id="211" operation="notified" operation_key="rabbitmq_notified_0" internal_operation_key="rabbitmq:0_confirmed-post_notify_stonith_0">
         <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="true" CRM_meta_notify_key_operation="stonith" CRM_meta_notify_key_type="confirmed-post" CRM_meta_notify_operation="stop" CRM_meta_notify_type="post" CRM_meta_timeout="20000"  set_policy="ha-all ^(?!amq\.).* {&quot;ha-mode&quot;:&quot;all&quot;}"/>
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="210" operation="notify" operation_key="rabbitmq_post_notify_stop_0" internal_operation_key="rabbitmq:0_post_notify_stonith_0"/>
       </trigger>
       <trigger>
         <rsc_op id="212" operation="notify" operation_key="rabbitmq_post_notify_stop_0" internal_operation_key="rabbitmq:1_post_notify_stop_0" on_node="rabbitmq-bundle-1" on_node_uuid="rabbitmq-bundle-1" router_node="controller-1"/>
       </trigger>
       <trigger>
         <rsc_op id="213" operation="notify" operation_key="rabbitmq_post_notify_stop_0" internal_operation_key="rabbitmq:2_post_notify_stop_0" on_node="rabbitmq-bundle-2" on_node_uuid="rabbitmq-bundle-2" router_node="controller-2"/>
       </trigger>
     </inputs>
   </synapse>
   <synapse id="1" priority="1000000">
     <action_set>
       <pseudo_event id="210" operation="notify" operation_key="rabbitmq_post_notify_stop_0" internal_operation_key="rabbitmq:0_post_notify_stonith_0">
         <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="true" CRM_meta_notify_key_operation="stonith" CRM_meta_notify_key_type="post" CRM_meta_notify_operation="stop" CRM_meta_notify_type="post" CRM_meta_timeout="20000"  set_policy="ha-all ^(?!amq\.).* {&quot;ha-mode&quot;:&quot;all&quot;}"/>
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="209" operation="stonith" operation_key="stonith-rabbitmq-bundle-0-off" on_node="rabbitmq-bundle-0" on_node_uuid="rabbitmq-bundle-0"/>
       </trigger>
     </inputs>
   </synapse>
   <synapse id="2">
     <action_set>
       <pseudo_event id="62" operation="stop" operation_key="rabbitmq_stop_0" internal_operation_key="rabbitmq:0_stop_0">
         <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_name="stop" CRM_meta_notify="true" CRM_meta_notify_active_resource="rabbitmq:0 rabbitmq:1 rabbitmq:2" CRM_meta_notify_active_uname="rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="rabbitmq-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 controller-2 controller-1 controller-0" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource=" " CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_promote_resource=" " CRM_meta_notify_promote_uname=" " CRM_meta_notify_slave_resource=" " CRM_meta_notify_slave_uname=" " CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="rabbitmq:0" CRM_meta_notify_stop_uname="rabbitmq-bundle-0" CRM_meta_physical_host="controller-0" CRM_meta_timeout="200000"  set_policy="ha-all ^(?!amq\.).* {&quot;ha-mode&quot;:&quot;all&quot;}"/>
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="56" operation="stop" operation_key="rabbitmq-bundle_stop_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="71" operation="stop" operation_key="rabbitmq-bundle-clone_stop_0"/>
       </trigger>
     </inputs>
   </synapse>
   <synapse id="3">
     <action_set>
       <rsc_op id="235" operation="notify" operation_key="rabbitmq_pre_notify_stop_0" internal_operation_key="rabbitmq:1_pre_notify_stop_0" on_node="rabbitmq-bundle-1" on_node_uuid="rabbitmq-bundle-1" router_node="controller-1">
         <primitive id="rabbitmq" long-id="rabbitmq:1" class="ocf" provider="heartbeat" type="rabbitmq-cluster"/>
         <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_notify="true" CRM_meta_notify_active_resource="rabbitmq:0 rabbitmq:1 rabbitmq:2" CRM_meta_notify_active_uname="rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="rabbitmq-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 controller-2 controller-1 controller-0" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource=" " CRM_meta_notify_key_operation="stop" CRM_meta_notify_key_type="pre" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="stop" CRM_meta_notify_promote_resource=" " CRM_meta_notify_promote_uname=" " CRM_meta_notify_slave_resource=" " CRM_meta_notify_slave_uname=" " CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="rabbitmq:0" CRM_meta_notify_stop_uname="rabbitmq-bundle-0" CRM_meta_notify_type="pre" CRM_meta_on_node="rabbitmq-bundle-1" CRM_meta_on_node_uuid="rabbitmq-bundle-1" CRM_meta_physical_host="controller-1" CRM_meta_timeout="20000"  set_policy="ha-all ^(?!amq\.).* {&quot;ha-mode&quot;:&quot;all&quot;}"/>
       </rsc_op>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="73" operation="notify" operation_key="rabbitmq-bundle-clone_pre_notify_stop_0"/>
       </trigger>
     </inputs>
   </synapse>
   <synapse id="4" priority="1000000">
     <action_set>
       <rsc_op id="212" operation="notify" operation_key="rabbitmq_post_notify_stop_0" internal_operation_key="rabbitmq:1_post_notify_stop_0" on_node="rabbitmq-bundle-1" on_node_uuid="rabbitmq-bundle-1" router_node="controller-1">
         <primitive id="rabbitmq" long-id="rabbitmq:1" class="ocf" provider="heartbeat" type="rabbitmq-cluster"/>
         <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_notify="true" CRM_meta_notify_active_resource="rabbitmq:0 rabbitmq:1 rabbitmq:2" CRM_meta_notify_active_uname="rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="rabbitmq-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 controller-2 controller-1 controller-0" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource=" " CRM_meta_notify_key_operation="stonith" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="stop" CRM_meta_notify_promote_resource=" " CRM_meta_notify_promote_uname=" " CRM_meta_notify_slave_resource=" " CRM_meta_notify_slave_uname=" " CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="rabbitmq:0" CRM_meta_notify_stop_uname="rabbitmq-bundle-0" CRM_meta_notify_type="post" CRM_meta_on_node="rabbitmq-bundle-1" CRM_meta_on_node_uuid="rabbitmq-bundle-1" CRM_meta_physical_host="controller-1" CRM_meta_timeout="20000"  set_policy="ha-all ^(?!amq\.).* {&quot;ha-mode&quot;:&quot;all&quot;}"/>
       </rsc_op>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="75" operation="notify" operation_key="rabbitmq-bundle-clone_post_notify_stopped_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="210" operation="notify" operation_key="rabbitmq_post_notify_stop_0" internal_operation_key="rabbitmq:0_post_notify_stonith_0"/>
       </trigger>
     </inputs>
   </synapse>
   <synapse id="5">
     <action_set>
       <rsc_op id="236" operation="notify" operation_key="rabbitmq_pre_notify_stop_0" internal_operation_key="rabbitmq:2_pre_notify_stop_0" on_node="rabbitmq-bundle-2" on_node_uuid="rabbitmq-bundle-2" router_node="controller-2">
         <primitive id="rabbitmq" long-id="rabbitmq:2" class="ocf" provider="heartbeat" type="rabbitmq-cluster"/>
         <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_notify="true" CRM_meta_notify_active_resource="rabbitmq:0 rabbitmq:1 rabbitmq:2" CRM_meta_notify_active_uname="rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="rabbitmq-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 controller-2 controller-1 controller-0" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource=" " CRM_meta_notify_key_operation="stop" CRM_meta_notify_key_type="pre" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="stop" CRM_meta_notify_promote_resource=" " CRM_meta_notify_promote_uname=" " CRM_meta_notify_slave_resource=" " CRM_meta_notify_slave_uname=" " CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="rabbitmq:0" CRM_meta_notify_stop_uname="rabbitmq-bundle-0" CRM_meta_notify_type="pre" CRM_meta_on_node="rabbitmq-bundle-2" CRM_meta_on_node_uuid="rabbitmq-bundle-2" CRM_meta_physical_host="controller-2" CRM_meta_timeout="20000"  set_policy="ha-all ^(?!amq\.).* {&quot;ha-mode&quot;:&quot;all&quot;}"/>
       </rsc_op>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="73" operation="notify" operation_key="rabbitmq-bundle-clone_pre_notify_stop_0"/>
       </trigger>
     </inputs>
   </synapse>
   <synapse id="6" priority="1000000">
     <action_set>
       <rsc_op id="213" operation="notify" operation_key="rabbitmq_post_notify_stop_0" internal_operation_key="rabbitmq:2_post_notify_stop_0" on_node="rabbitmq-bundle-2" on_node_uuid="rabbitmq-bundle-2" router_node="controller-2">
         <primitive id="rabbitmq" long-id="rabbitmq:2" class="ocf" provider="heartbeat" type="rabbitmq-cluster"/>
         <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_notify="true" CRM_meta_notify_active_resource="rabbitmq:0 rabbitmq:1 rabbitmq:2" CRM_meta_notify_active_uname="rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="rabbitmq-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 controller-2 controller-1 controller-0" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource=" " CRM_meta_notify_key_operation="stonith" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="stop" CRM_meta_notify_promote_resource=" " CRM_meta_notify_promote_uname=" " CRM_meta_notify_slave_resource=" " CRM_meta_notify_slave_uname=" " CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="rabbitmq:0" CRM_meta_notify_stop_uname="rabbitmq-bundle-0" CRM_meta_notify_type="post" CRM_meta_on_node="rabbitmq-bundle-2" CRM_meta_on_node_uuid="rabbitmq-bundle-2" CRM_meta_physical_host="controller-2" CRM_meta_timeout="20000"  set_policy="ha-all ^(?!amq\.).* {&quot;ha-mode&quot;:&quot;all&quot;}"/>
       </rsc_op>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="75" operation="notify" operation_key="rabbitmq-bundle-clone_post_notify_stopped_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="210" operation="notify" operation_key="rabbitmq_post_notify_stop_0" internal_operation_key="rabbitmq:0_post_notify_stonith_0"/>
       </trigger>
     </inputs>
   </synapse>
   <synapse id="7" priority="1000000">
     <action_set>
       <pseudo_event id="76" operation="notified" operation_key="rabbitmq-bundle-clone_confirmed-post_notify_stopped_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="true" CRM_meta_notify_key_operation="stopped" CRM_meta_notify_key_type="confirmed-post" CRM_meta_notify_operation="stop" CRM_meta_notify_type="post" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="75" operation="notify" operation_key="rabbitmq-bundle-clone_post_notify_stopped_0"/>
       </trigger>
       <trigger>
         <rsc_op id="212" operation="notify" operation_key="rabbitmq_post_notify_stop_0" internal_operation_key="rabbitmq:1_post_notify_stop_0" on_node="rabbitmq-bundle-1" on_node_uuid="rabbitmq-bundle-1" router_node="controller-1"/>
       </trigger>
       <trigger>
         <rsc_op id="213" operation="notify" operation_key="rabbitmq_post_notify_stop_0" internal_operation_key="rabbitmq:2_post_notify_stop_0" on_node="rabbitmq-bundle-2" on_node_uuid="rabbitmq-bundle-2" router_node="controller-2"/>
       </trigger>
     </inputs>
   </synapse>
   <synapse id="8" priority="1000000">
     <action_set>
       <pseudo_event id="75" operation="notify" operation_key="rabbitmq-bundle-clone_post_notify_stopped_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="true" CRM_meta_notify_key_operation="stopped" CRM_meta_notify_key_type="post" CRM_meta_notify_operation="stop" CRM_meta_notify_type="post" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="72" operation="stopped" operation_key="rabbitmq-bundle-clone_stopped_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="74" operation="notified" operation_key="rabbitmq-bundle-clone_confirmed-pre_notify_stop_0"/>
       </trigger>
     </inputs>
   </synapse>
   <synapse id="9">
     <action_set>
       <pseudo_event id="74" operation="notified" operation_key="rabbitmq-bundle-clone_confirmed-pre_notify_stop_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="true" CRM_meta_notify_key_operation="stop" CRM_meta_notify_key_type="confirmed-pre" CRM_meta_notify_operation="stop" CRM_meta_notify_type="pre" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="73" operation="notify" operation_key="rabbitmq-bundle-clone_pre_notify_stop_0"/>
       </trigger>
       <trigger>
         <rsc_op id="235" operation="notify" operation_key="rabbitmq_pre_notify_stop_0" internal_operation_key="rabbitmq:1_pre_notify_stop_0" on_node="rabbitmq-bundle-1" on_node_uuid="rabbitmq-bundle-1" router_node="controller-1"/>
       </trigger>
       <trigger>
         <rsc_op id="236" operation="notify" operation_key="rabbitmq_pre_notify_stop_0" internal_operation_key="rabbitmq:2_pre_notify_stop_0" on_node="rabbitmq-bundle-2" on_node_uuid="rabbitmq-bundle-2" router_node="controller-2"/>
       </trigger>
     </inputs>
   </synapse>
   <synapse id="10">
     <action_set>
       <pseudo_event id="73" operation="notify" operation_key="rabbitmq-bundle-clone_pre_notify_stop_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="true" CRM_meta_notify_key_operation="stop" CRM_meta_notify_key_type="pre" CRM_meta_notify_operation="stop" CRM_meta_notify_type="pre" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs/>
   </synapse>
   <synapse id="11" priority="1000000">
     <action_set>
       <pseudo_event id="72" operation="stopped" operation_key="rabbitmq-bundle-clone_stopped_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="true" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="62" operation="stop" operation_key="rabbitmq_stop_0" internal_operation_key="rabbitmq:0_stop_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="71" operation="stop" operation_key="rabbitmq-bundle-clone_stop_0"/>
       </trigger>
     </inputs>
   </synapse>
   <synapse id="12">
     <action_set>
       <pseudo_event id="71" operation="stop" operation_key="rabbitmq-bundle-clone_stop_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="true" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="56" operation="stop" operation_key="rabbitmq-bundle_stop_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="74" operation="notified" operation_key="rabbitmq-bundle-clone_confirmed-pre_notify_stop_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="209" operation="stonith" operation_key="stonith-rabbitmq-bundle-0-off" on_node="rabbitmq-bundle-0" on_node_uuid="rabbitmq-bundle-0"/>
       </trigger>
     </inputs>
   </synapse>
   <synapse id="13" priority="1000000">
     <action_set>
       <pseudo_event id="70" operation="notified" operation_key="rabbitmq-bundle-clone_confirmed-post_notify_running_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="true" CRM_meta_notify_key_operation="running" CRM_meta_notify_key_type="confirmed-post" CRM_meta_notify_operation="start" CRM_meta_notify_type="post" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="69" operation="notify" operation_key="rabbitmq-bundle-clone_post_notify_running_0"/>
       </trigger>
     </inputs>
   </synapse>
   <synapse id="14" priority="1000000">
     <action_set>
       <pseudo_event id="69" operation="notify" operation_key="rabbitmq-bundle-clone_post_notify_running_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="true" CRM_meta_notify_key_operation="running" CRM_meta_notify_key_type="post" CRM_meta_notify_operation="start" CRM_meta_notify_type="post" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="66" operation="running" operation_key="rabbitmq-bundle-clone_running_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="68" operation="notified" operation_key="rabbitmq-bundle-clone_confirmed-pre_notify_start_0"/>
       </trigger>
     </inputs>
   </synapse>
   <synapse id="15">
     <action_set>
       <pseudo_event id="68" operation="notified" operation_key="rabbitmq-bundle-clone_confirmed-pre_notify_start_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="true" CRM_meta_notify_key_operation="start" CRM_meta_notify_key_type="confirmed-pre" CRM_meta_notify_operation="start" CRM_meta_notify_type="pre" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="67" operation="notify" operation_key="rabbitmq-bundle-clone_pre_notify_start_0"/>
       </trigger>
     </inputs>
   </synapse>
   <synapse id="16">
     <action_set>
       <pseudo_event id="67" operation="notify" operation_key="rabbitmq-bundle-clone_pre_notify_start_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="true" CRM_meta_notify_key_operation="start" CRM_meta_notify_key_type="pre" CRM_meta_notify_operation="start" CRM_meta_notify_type="pre" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="76" operation="notified" operation_key="rabbitmq-bundle-clone_confirmed-post_notify_stopped_0"/>
       </trigger>
     </inputs>
   </synapse>
   <synapse id="17" priority="1000000">
     <action_set>
       <pseudo_event id="66" operation="running" operation_key="rabbitmq-bundle-clone_running_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="true" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="65" operation="start" operation_key="rabbitmq-bundle-clone_start_0"/>
       </trigger>
     </inputs>
   </synapse>
   <synapse id="18">
     <action_set>
       <pseudo_event id="65" operation="start" operation_key="rabbitmq-bundle-clone_start_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="true" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="54" operation="start" operation_key="rabbitmq-bundle_start_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="68" operation="notified" operation_key="rabbitmq-bundle-clone_confirmed-pre_notify_start_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="72" operation="stopped" operation_key="rabbitmq-bundle-clone_stopped_0"/>
       </trigger>
     </inputs>
   </synapse>
   <synapse id="19">
     <action_set>
       <pseudo_event id="42" operation="stop" operation_key="rabbitmq-bundle-docker-0_stop_0">
         <attributes CRM_meta_timeout="20000" allow_pull="true"  force_kill="false" image="192.168.24.1:8787/rhosp12/openstack-rabbitmq-docker:pcmklatest" monitor_cmd="/bin/true" mount_points="/var/log/pacemaker/bundles/rabbitmq-bundle-0" reuse="false" run_cmd="/bin/bash /usr/local/bin/kolla_start" run_opts=" --restart=no -e PCMK_stderr=1 --net=host -e PCMK_remote_port=3122 -v /var/lib/kolla/config_files/rabbitmq.json:/var/lib/kolla/config_files/config.json:ro -v /var/lib/config-data/puppet-generated/rabbitmq/:/var/lib/kolla/config_files/src:ro -v /etc/hosts:/etc/hosts:ro -v /etc/localtime:/etc/localtime:ro -v /var/lib/rabbitmq:/var/lib/rabbitmq:rw -v /etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro -v /etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro -v /etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro -v /etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro -v /dev/log:/dev/log:rw -v /etc/pacemaker/authkey:/etc/pacemaker/authkey -v /var/log/pacemaker/bundles/rabbitmq-bundle-0:/var/log --user=root --log-driver=journald -e KOLLA_CONFIG_STRATEGY=COPY_ALWAYS "/>
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <crm_event id="1" operation="stonith" operation_key="stonith-controller-0-reboot" on_node="controller-0" on_node_uuid="1"/>
       </trigger>
       <trigger>
         <pseudo_event id="43" operation="stop" operation_key="rabbitmq-bundle-0_stop_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="56" operation="stop" operation_key="rabbitmq-bundle_stop_0"/>
       </trigger>
     </inputs>
   </synapse>
   <synapse id="20">
     <action_set>
       <pseudo_event id="43" operation="stop" operation_key="rabbitmq-bundle-0_stop_0">
         <attributes CRM_meta_container="rabbitmq-bundle-docker-0" CRM_meta_timeout="20000" addr="controller-0"  port="3122"/>
       </pseudo_event>
     </action_set>
     <inputs/>
   </synapse>
   <synapse id="21">
     <action_set>
       <rsc_op id="36" operation="monitor" operation_key="rabbitmq-bundle-0_monitor_0" on_node="controller-2" on_node_uuid="3">
         <primitive id="rabbitmq-bundle-0" class="ocf" provider="pacemaker" type="remote"/>
         <attributes CRM_meta_container="rabbitmq-bundle-docker-0" CRM_meta_on_node="controller-2" CRM_meta_on_node_uuid="3" CRM_meta_op_target_rc="7" CRM_meta_timeout="30000" addr="controller-0"  port="3122"/>
       </rsc_op>
     </action_set>
     <inputs/>
   </synapse>
   <synapse id="22">
     <action_set>
       <rsc_op id="30" operation="monitor" operation_key="rabbitmq-bundle-0_monitor_0" on_node="controller-1" on_node_uuid="2">
         <primitive id="rabbitmq-bundle-0" class="ocf" provider="pacemaker" type="remote"/>
         <attributes CRM_meta_container="rabbitmq-bundle-docker-0" CRM_meta_on_node="controller-1" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="30000" addr="controller-0"  port="3122"/>
       </rsc_op>
     </action_set>
     <inputs/>
   </synapse>
   <synapse id="23">
     <action_set>
       <rsc_op id="37" operation="monitor" operation_key="rabbitmq-bundle-1_monitor_0" on_node="controller-2" on_node_uuid="3">
         <primitive id="rabbitmq-bundle-1" class="ocf" provider="pacemaker" type="remote"/>
         <attributes CRM_meta_container="rabbitmq-bundle-docker-1" CRM_meta_on_node="controller-2" CRM_meta_on_node_uuid="3" CRM_meta_op_target_rc="7" CRM_meta_timeout="30000" addr="controller-1"  port="3122"/>
       </rsc_op>
     </action_set>
     <inputs/>
   </synapse>
   <synapse id="24">
     <action_set>
       <rsc_op id="31" operation="monitor" operation_key="rabbitmq-bundle-2_monitor_0" on_node="controller-1" on_node_uuid="2">
         <primitive id="rabbitmq-bundle-2" class="ocf" provider="pacemaker" type="remote"/>
         <attributes CRM_meta_container="rabbitmq-bundle-docker-2" CRM_meta_on_node="controller-1" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="30000" addr="controller-2"  port="3122"/>
       </rsc_op>
     </action_set>
     <inputs/>
   </synapse>
   <synapse id="25">
     <action_set>
       <pseudo_event id="102" operation="stop" operation_key="galera_stop_0" internal_operation_key="galera:0_stop_0">
         <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="3" CRM_meta_master_node_max="1" CRM_meta_name="stop" CRM_meta_notify="false" CRM_meta_physical_host="controller-0" CRM_meta_promoted_max="3" CRM_meta_promoted_node_max="1" CRM_meta_timeout="120000" additional_parameters="--open-files-limit=16384" cluster_host_map="controller-0:controller-0.internalapi.localdomain;controller-1:controller-1.internalapi.localdomain;controller-2:controller-2.internalapi.localdomain"  enable_creation="true" wsrep_cluster_address="gcomm://controller-0.internalapi.localdomain,controller-1.internalapi.localdomain,controller-2.internalapi.localdomain"/>
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="91" operation="stop" operation_key="galera-bundle_stop_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="101" operation="demote" operation_key="galera_demote_0" internal_operation_key="galera:0_demote_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="108" operation="stop" operation_key="galera-bundle-master_stop_0"/>
       </trigger>
     </inputs>
   </synapse>
   <synapse id="26">
     <action_set>
       <pseudo_event id="101" operation="demote" operation_key="galera_demote_0" internal_operation_key="galera:0_demote_0">
         <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="3" CRM_meta_master_node_max="1" CRM_meta_name="demote" CRM_meta_notify="false" CRM_meta_physical_host="controller-0" CRM_meta_promoted_max="3" CRM_meta_promoted_node_max="1" CRM_meta_timeout="120000" additional_parameters="--open-files-limit=16384" cluster_host_map="controller-0:controller-0.internalapi.localdomain;controller-1:controller-1.internalapi.localdomain;controller-2:controller-2.internalapi.localdomain"  enable_creation="true" wsrep_cluster_address="gcomm://controller-0.internalapi.localdomain,controller-1.internalapi.localdomain,controller-2.internalapi.localdomain"/>
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="112" operation="demote" operation_key="galera-bundle-master_demote_0"/>
       </trigger>
     </inputs>
   </synapse>
   <synapse id="27" priority="1000000">
     <action_set>
       <pseudo_event id="113" operation="demoted" operation_key="galera-bundle-master_demoted_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="3" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="3" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="101" operation="demote" operation_key="galera_demote_0" internal_operation_key="galera:0_demote_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="112" operation="demote" operation_key="galera-bundle-master_demote_0"/>
       </trigger>
     </inputs>
   </synapse>
   <synapse id="28">
     <action_set>
       <pseudo_event id="112" operation="demote" operation_key="galera-bundle-master_demote_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="3" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="3" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="116" operation="demote" operation_key="galera-bundle_demote_0"/>
       </trigger>
     </inputs>
   </synapse>
   <synapse id="29" priority="1000000">
     <action_set>
       <pseudo_event id="109" operation="stopped" operation_key="galera-bundle-master_stopped_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="3" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="3" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="102" operation="stop" operation_key="galera_stop_0" internal_operation_key="galera:0_stop_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="108" operation="stop" operation_key="galera-bundle-master_stop_0"/>
       </trigger>
     </inputs>
   </synapse>
   <synapse id="30">
     <action_set>
       <pseudo_event id="108" operation="stop" operation_key="galera-bundle-master_stop_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="3" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="3" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="91" operation="stop" operation_key="galera-bundle_stop_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="113" operation="demoted" operation_key="galera-bundle-master_demoted_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="208" operation="stonith" operation_key="stonith-galera-bundle-0-off" on_node="galera-bundle-0" on_node_uuid="galera-bundle-0"/>
       </trigger>
     </inputs>
   </synapse>
   <synapse id="31" priority="1000000">
     <action_set>
       <pseudo_event id="107" operation="running" operation_key="galera-bundle-master_running_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="3" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="3" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="106" operation="start" operation_key="galera-bundle-master_start_0"/>
       </trigger>
     </inputs>
   </synapse>
   <synapse id="32">
     <action_set>
       <pseudo_event id="106" operation="start" operation_key="galera-bundle-master_start_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="3" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="3" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="89" operation="start" operation_key="galera-bundle_start_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="109" operation="stopped" operation_key="galera-bundle-master_stopped_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="113" operation="demoted" operation_key="galera-bundle-master_demoted_0"/>
       </trigger>
     </inputs>
   </synapse>
   <synapse id="33">
     <action_set>
       <pseudo_event id="77" operation="stop" operation_key="galera-bundle-docker-0_stop_0">
         <attributes CRM_meta_timeout="20000" allow_pull="true"  force_kill="false" image="192.168.24.1:8787/rhosp12/openstack-mariadb-docker:pcmklatest" monitor_cmd="/bin/true" mount_points="/var/log/pacemaker/bundles/galera-bundle-0" reuse="false" run_cmd="/bin/bash /usr/local/bin/kolla_start" run_opts=" --restart=no -e PCMK_stderr=1 --net=host -e PCMK_remote_port=3123 -v /var/lib/kolla/config_files/mysql.json:/var/lib/kolla/config_files/config.json:ro -v /var/lib/config-data/puppet-generated/mysql/:/var/lib/kolla/config_files/src:ro -v /etc/hosts:/etc/hosts:ro -v /etc/localtime:/etc/localtime:ro -v /var/lib/mysql:/var/lib/mysql:rw -v /var/log/mariadb:/var/log/mariadb:rw -v /dev/log:/dev/log:rw -v /etc/pacemaker/authkey:/etc/pacemaker/authkey -v /var/log/pacemaker/bundles/galera-bundle-0:/var/log --user=root --log-driver=journald -e KOLLA_CONFIG_STRATEGY=COPY_ALWAYS "/>
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <crm_event id="1" operation="stonith" operation_key="stonith-controller-0-reboot" on_node="controller-0" on_node_uuid="1"/>
       </trigger>
       <trigger>
         <pseudo_event id="78" operation="stop" operation_key="galera-bundle-0_stop_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="91" operation="stop" operation_key="galera-bundle_stop_0"/>
       </trigger>
     </inputs>
   </synapse>
   <synapse id="34">
     <action_set>
       <pseudo_event id="78" operation="stop" operation_key="galera-bundle-0_stop_0">
         <attributes CRM_meta_container="galera-bundle-docker-0" CRM_meta_timeout="20000" addr="controller-0"  port="3123"/>
       </pseudo_event>
     </action_set>
     <inputs/>
   </synapse>
   <synapse id="35">
     <action_set>
       <rsc_op id="38" operation="monitor" operation_key="galera-bundle-0_monitor_0" on_node="controller-2" on_node_uuid="3">
         <primitive id="galera-bundle-0" class="ocf" provider="pacemaker" type="remote"/>
         <attributes CRM_meta_container="galera-bundle-docker-0" CRM_meta_on_node="controller-2" CRM_meta_on_node_uuid="3" CRM_meta_op_target_rc="7" CRM_meta_timeout="30000" addr="controller-0"  port="3123"/>
       </rsc_op>
     </action_set>
     <inputs/>
   </synapse>
   <synapse id="36">
     <action_set>
       <rsc_op id="32" operation="monitor" operation_key="galera-bundle-0_monitor_0" on_node="controller-1" on_node_uuid="2">
         <primitive id="galera-bundle-0" class="ocf" provider="pacemaker" type="remote"/>
         <attributes CRM_meta_container="galera-bundle-docker-0" CRM_meta_on_node="controller-1" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="30000" addr="controller-0"  port="3123"/>
       </rsc_op>
     </action_set>
     <inputs/>
   </synapse>
   <synapse id="37">
     <action_set>
       <rsc_op id="39" operation="monitor" operation_key="galera-bundle-1_monitor_0" on_node="controller-2" on_node_uuid="3">
         <primitive id="galera-bundle-1" class="ocf" provider="pacemaker" type="remote"/>
         <attributes CRM_meta_container="galera-bundle-docker-1" CRM_meta_on_node="controller-2" CRM_meta_on_node_uuid="3" CRM_meta_op_target_rc="7" CRM_meta_timeout="30000" addr="controller-1"  port="3123"/>
       </rsc_op>
     </action_set>
     <inputs/>
   </synapse>
   <synapse id="38">
     <action_set>
       <rsc_op id="33" operation="monitor" operation_key="galera-bundle-2_monitor_0" on_node="controller-1" on_node_uuid="2">
         <primitive id="galera-bundle-2" class="ocf" provider="pacemaker" type="remote"/>
         <attributes CRM_meta_container="galera-bundle-docker-2" CRM_meta_on_node="controller-1" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="30000" addr="controller-2"  port="3123"/>
       </rsc_op>
     </action_set>
     <inputs/>
   </synapse>
   <synapse id="39" priority="1000000">
-    <action_set>
-      <rsc_op id="240" operation="notify" operation_key="redis_post_notify_promote_0" internal_operation_key="redis:0_post_notify_promote_0" on_node="redis-bundle-0" on_node_uuid="redis-bundle-0" router_node="controller-1">
-        <primitive id="redis" long-id="redis:0" class="ocf" provider="heartbeat" type="redis"/>
-        <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_active_resource="redis:0 redis:1 redis:2" CRM_meta_notify_active_uname="redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="redis-bundle-1 redis-bundle-0 redis-bundle-2 controller-2 controller-1 controller-0" CRM_meta_notify_demote_resource="redis:0" CRM_meta_notify_demote_uname="redis-bundle-0" CRM_meta_notify_inactive_resource=" " CRM_meta_notify_key_operation="promoted" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource="redis:0" CRM_meta_notify_master_uname="redis-bundle-0" CRM_meta_notify_operation="promote" CRM_meta_notify_promote_resource="redis:1" CRM_meta_notify_promote_uname="redis-bundle-1" CRM_meta_notify_slave_resource="redis:1 redis:2" CRM_meta_notify_slave_uname="redis-bundle-1 redis-bundle-2" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="redis:0" CRM_meta_notify_stop_uname="redis-bundle-0" CRM_meta_notify_type="post" CRM_meta_on_node="redis-bundle-0" CRM_meta_on_node_uuid="redis-bundle-0" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000"  wait_last_known_master="true"/>
-      </rsc_op>
-    </action_set>
-    <inputs>
-      <trigger>
-        <pseudo_event id="161" operation="notify" operation_key="redis-bundle-master_post_notify_promoted_0"/>
-      </trigger>
-    </inputs>
-  </synapse>
-  <synapse id="40">
-    <action_set>
-      <rsc_op id="239" operation="notify" operation_key="redis_pre_notify_promote_0" internal_operation_key="redis:0_pre_notify_promote_0" on_node="redis-bundle-0" on_node_uuid="redis-bundle-0" router_node="controller-1">
-        <primitive id="redis" long-id="redis:0" class="ocf" provider="heartbeat" type="redis"/>
-        <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_active_resource="redis:0 redis:1 redis:2" CRM_meta_notify_active_uname="redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="redis-bundle-1 redis-bundle-0 redis-bundle-2 controller-2 controller-1 controller-0" CRM_meta_notify_demote_resource="redis:0" CRM_meta_notify_demote_uname="redis-bundle-0" CRM_meta_notify_inactive_resource=" " CRM_meta_notify_key_operation="promote" CRM_meta_notify_key_type="pre" CRM_meta_notify_master_resource="redis:0" CRM_meta_notify_master_uname="redis-bundle-0" CRM_meta_notify_operation="promote" CRM_meta_notify_promote_resource="redis:1" CRM_meta_notify_promote_uname="redis-bundle-1" CRM_meta_notify_slave_resource="redis:1 redis:2" CRM_meta_notify_slave_uname="redis-bundle-1 redis-bundle-2" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="redis:0" CRM_meta_notify_stop_uname="redis-bundle-0" CRM_meta_notify_type="pre" CRM_meta_on_node="redis-bundle-0" CRM_meta_on_node_uuid="redis-bundle-0" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000"  wait_last_known_master="true"/>
-      </rsc_op>
-    </action_set>
-    <inputs>
-      <trigger>
-        <pseudo_event id="159" operation="notify" operation_key="redis-bundle-master_pre_notify_promote_0"/>
-      </trigger>
-    </inputs>
-  </synapse>
-  <synapse id="41" priority="1000000">
     <action_set>
       <pseudo_event id="216" operation="notified" operation_key="redis_notified_0" internal_operation_key="redis:0_confirmed-post_notify_stonith_0">
         <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="stonith" CRM_meta_notify_key_type="confirmed-post" CRM_meta_notify_operation="stop" CRM_meta_notify_type="post" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000"  wait_last_known_master="true"/>
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="215" operation="notify" operation_key="redis_post_notify_stop_0" internal_operation_key="redis:0_post_notify_stonith_0"/>
       </trigger>
       <trigger>
         <rsc_op id="217" operation="notify" operation_key="redis_post_notify_stop_0" internal_operation_key="redis:1_post_notify_stop_0" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1"/>
       </trigger>
       <trigger>
         <rsc_op id="218" operation="notify" operation_key="redis_post_notify_stop_0" internal_operation_key="redis:2_post_notify_stop_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="controller-2"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="42" priority="1000000">
+  <synapse id="40" priority="1000000">
     <action_set>
       <pseudo_event id="215" operation="notify" operation_key="redis_post_notify_stop_0" internal_operation_key="redis:0_post_notify_stonith_0">
         <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="stonith" CRM_meta_notify_key_type="post" CRM_meta_notify_operation="stop" CRM_meta_notify_type="post" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000"  wait_last_known_master="true"/>
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="214" operation="stonith" operation_key="stonith-redis-bundle-0-off" on_node="redis-bundle-0" on_node_uuid="redis-bundle-0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="43">
+  <synapse id="41">
     <action_set>
       <pseudo_event id="141" operation="stop" operation_key="redis_stop_0" internal_operation_key="redis:0_stop_0">
         <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="stop" CRM_meta_notify="true" CRM_meta_notify_active_resource="redis:0 redis:1 redis:2" CRM_meta_notify_active_uname="redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="redis-bundle-1 redis-bundle-0 redis-bundle-2 controller-2 controller-1 controller-0" CRM_meta_notify_demote_resource="redis:0" CRM_meta_notify_demote_uname="redis-bundle-0" CRM_meta_notify_inactive_resource=" " CRM_meta_notify_master_resource="redis:0" CRM_meta_notify_master_uname="redis-bundle-0" CRM_meta_notify_promote_resource="redis:1" CRM_meta_notify_promote_uname="redis-bundle-1" CRM_meta_notify_slave_resource="redis:1 redis:2" CRM_meta_notify_slave_uname="redis-bundle-1 redis-bundle-2" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="redis:0" CRM_meta_notify_stop_uname="redis-bundle-0" CRM_meta_physical_host="controller-0" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="200000"  wait_last_known_master="true"/>
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="132" operation="stop" operation_key="redis-bundle_stop_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="140" operation="demote" operation_key="redis_demote_0" internal_operation_key="redis:0_demote_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="151" operation="stop" operation_key="redis-bundle-master_stop_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="44">
+  <synapse id="42">
     <action_set>
       <pseudo_event id="140" operation="demote" operation_key="redis_demote_0" internal_operation_key="redis:0_demote_0">
         <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="demote" CRM_meta_notify="true" CRM_meta_notify_active_resource="redis:0 redis:1 redis:2" CRM_meta_notify_active_uname="redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="redis-bundle-1 redis-bundle-0 redis-bundle-2 controller-2 controller-1 controller-0" CRM_meta_notify_demote_resource="redis:0" CRM_meta_notify_demote_uname="redis-bundle-0" CRM_meta_notify_inactive_resource=" " CRM_meta_notify_master_resource="redis:0" CRM_meta_notify_master_uname="redis-bundle-0" CRM_meta_notify_promote_resource="redis:1" CRM_meta_notify_promote_uname="redis-bundle-1" CRM_meta_notify_slave_resource="redis:1 redis:2" CRM_meta_notify_slave_uname="redis-bundle-1 redis-bundle-2" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="redis:0" CRM_meta_notify_stop_uname="redis-bundle-0" CRM_meta_physical_host="controller-0" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="120000"  wait_last_known_master="true"/>
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="163" operation="demote" operation_key="redis-bundle-master_demote_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="45" priority="1000000">
+  <synapse id="43" priority="1000000">
     <action_set>
-      <rsc_op id="246" operation="notify" operation_key="redis_post_notify_demote_0" internal_operation_key="redis:1_post_notify_demote_0" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1">
+      <rsc_op id="244" operation="notify" operation_key="redis_post_notify_demote_0" internal_operation_key="redis:1_post_notify_demote_0" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1">
         <primitive id="redis" long-id="redis:1" class="ocf" provider="heartbeat" type="redis"/>
         <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_active_resource="redis:0 redis:1 redis:2" CRM_meta_notify_active_uname="redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="redis-bundle-1 redis-bundle-0 redis-bundle-2 controller-2 controller-1 controller-0" CRM_meta_notify_demote_resource="redis:0" CRM_meta_notify_demote_uname="redis-bundle-0" CRM_meta_notify_inactive_resource=" " CRM_meta_notify_key_operation="demoted" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource="redis:0" CRM_meta_notify_master_uname="redis-bundle-0" CRM_meta_notify_operation="demote" CRM_meta_notify_promote_resource="redis:1" CRM_meta_notify_promote_uname="redis-bundle-1" CRM_meta_notify_slave_resource="redis:1 redis:2" CRM_meta_notify_slave_uname="redis-bundle-1 redis-bundle-2" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="redis:0" CRM_meta_notify_stop_uname="redis-bundle-0" CRM_meta_notify_type="post" CRM_meta_on_node="redis-bundle-1" CRM_meta_on_node_uuid="redis-bundle-1" CRM_meta_physical_host="controller-1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000"  wait_last_known_master="true"/>
       </rsc_op>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="167" operation="notify" operation_key="redis-bundle-master_post_notify_demoted_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="46">
+  <synapse id="44">
     <action_set>
-      <rsc_op id="245" operation="notify" operation_key="redis_pre_notify_demote_0" internal_operation_key="redis:1_pre_notify_demote_0" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1">
+      <rsc_op id="243" operation="notify" operation_key="redis_pre_notify_demote_0" internal_operation_key="redis:1_pre_notify_demote_0" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1">
         <primitive id="redis" long-id="redis:1" class="ocf" provider="heartbeat" type="redis"/>
         <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_active_resource="redis:0 redis:1 redis:2" CRM_meta_notify_active_uname="redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="redis-bundle-1 redis-bundle-0 redis-bundle-2 controller-2 controller-1 controller-0" CRM_meta_notify_demote_resource="redis:0" CRM_meta_notify_demote_uname="redis-bundle-0" CRM_meta_notify_inactive_resource=" " CRM_meta_notify_key_operation="demote" CRM_meta_notify_key_type="pre" CRM_meta_notify_master_resource="redis:0" CRM_meta_notify_master_uname="redis-bundle-0" CRM_meta_notify_operation="demote" CRM_meta_notify_promote_resource="redis:1" CRM_meta_notify_promote_uname="redis-bundle-1" CRM_meta_notify_slave_resource="redis:1 redis:2" CRM_meta_notify_slave_uname="redis-bundle-1 redis-bundle-2" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="redis:0" CRM_meta_notify_stop_uname="redis-bundle-0" CRM_meta_notify_type="pre" CRM_meta_on_node="redis-bundle-1" CRM_meta_on_node_uuid="redis-bundle-1" CRM_meta_physical_host="controller-1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000"  wait_last_known_master="true"/>
       </rsc_op>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="165" operation="notify" operation_key="redis-bundle-master_pre_notify_demote_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="47" priority="1000000">
+  <synapse id="45" priority="1000000">
     <action_set>
-      <rsc_op id="242" operation="notify" operation_key="redis_post_notify_promote_0" internal_operation_key="redis:1_post_notify_promote_0" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1">
+      <rsc_op id="240" operation="notify" operation_key="redis_post_notify_promote_0" internal_operation_key="redis:1_post_notify_promote_0" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1">
         <primitive id="redis" long-id="redis:1" class="ocf" provider="heartbeat" type="redis"/>
         <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_active_resource="redis:0 redis:1 redis:2" CRM_meta_notify_active_uname="redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="redis-bundle-1 redis-bundle-0 redis-bundle-2 controller-2 controller-1 controller-0" CRM_meta_notify_demote_resource="redis:0" CRM_meta_notify_demote_uname="redis-bundle-0" CRM_meta_notify_inactive_resource=" " CRM_meta_notify_key_operation="promoted" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource="redis:0" CRM_meta_notify_master_uname="redis-bundle-0" CRM_meta_notify_operation="promote" CRM_meta_notify_promote_resource="redis:1" CRM_meta_notify_promote_uname="redis-bundle-1" CRM_meta_notify_slave_resource="redis:1 redis:2" CRM_meta_notify_slave_uname="redis-bundle-1 redis-bundle-2" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="redis:0" CRM_meta_notify_stop_uname="redis-bundle-0" CRM_meta_notify_type="post" CRM_meta_on_node="redis-bundle-1" CRM_meta_on_node_uuid="redis-bundle-1" CRM_meta_physical_host="controller-1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000"  wait_last_known_master="true"/>
       </rsc_op>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="161" operation="notify" operation_key="redis-bundle-master_post_notify_promoted_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="48">
+  <synapse id="46">
     <action_set>
-      <rsc_op id="241" operation="notify" operation_key="redis_pre_notify_promote_0" internal_operation_key="redis:1_pre_notify_promote_0" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1">
+      <rsc_op id="239" operation="notify" operation_key="redis_pre_notify_promote_0" internal_operation_key="redis:1_pre_notify_promote_0" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1">
         <primitive id="redis" long-id="redis:1" class="ocf" provider="heartbeat" type="redis"/>
         <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_active_resource="redis:0 redis:1 redis:2" CRM_meta_notify_active_uname="redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="redis-bundle-1 redis-bundle-0 redis-bundle-2 controller-2 controller-1 controller-0" CRM_meta_notify_demote_resource="redis:0" CRM_meta_notify_demote_uname="redis-bundle-0" CRM_meta_notify_inactive_resource=" " CRM_meta_notify_key_operation="promote" CRM_meta_notify_key_type="pre" CRM_meta_notify_master_resource="redis:0" CRM_meta_notify_master_uname="redis-bundle-0" CRM_meta_notify_operation="promote" CRM_meta_notify_promote_resource="redis:1" CRM_meta_notify_promote_uname="redis-bundle-1" CRM_meta_notify_slave_resource="redis:1 redis:2" CRM_meta_notify_slave_uname="redis-bundle-1 redis-bundle-2" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="redis:0" CRM_meta_notify_stop_uname="redis-bundle-0" CRM_meta_notify_type="pre" CRM_meta_on_node="redis-bundle-1" CRM_meta_on_node_uuid="redis-bundle-1" CRM_meta_physical_host="controller-1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000"  wait_last_known_master="true"/>
       </rsc_op>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="159" operation="notify" operation_key="redis-bundle-master_pre_notify_promote_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="49">
+  <synapse id="47">
     <action_set>
       <rsc_op id="237" operation="notify" operation_key="redis_pre_notify_stop_0" internal_operation_key="redis:1_pre_notify_stop_0" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1">
         <primitive id="redis" long-id="redis:1" class="ocf" provider="heartbeat" type="redis"/>
         <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_active_resource="redis:0 redis:1 redis:2" CRM_meta_notify_active_uname="redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="redis-bundle-1 redis-bundle-0 redis-bundle-2 controller-2 controller-1 controller-0" CRM_meta_notify_demote_resource="redis:0" CRM_meta_notify_demote_uname="redis-bundle-0" CRM_meta_notify_inactive_resource=" " CRM_meta_notify_key_operation="stop" CRM_meta_notify_key_type="pre" CRM_meta_notify_master_resource="redis:0" CRM_meta_notify_master_uname="redis-bundle-0" CRM_meta_notify_operation="stop" CRM_meta_notify_promote_resource="redis:1" CRM_meta_notify_promote_uname="redis-bundle-1" CRM_meta_notify_slave_resource="redis:1 redis:2" CRM_meta_notify_slave_uname="redis-bundle-1 redis-bundle-2" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="redis:0" CRM_meta_notify_stop_uname="redis-bundle-0" CRM_meta_notify_type="pre" CRM_meta_on_node="redis-bundle-1" CRM_meta_on_node_uuid="redis-bundle-1" CRM_meta_physical_host="controller-1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000"  wait_last_known_master="true"/>
       </rsc_op>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="153" operation="notify" operation_key="redis-bundle-master_pre_notify_stop_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="50" priority="1000000">
+  <synapse id="48" priority="1000000">
     <action_set>
       <rsc_op id="217" operation="notify" operation_key="redis_post_notify_stop_0" internal_operation_key="redis:1_post_notify_stop_0" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1">
         <primitive id="redis" long-id="redis:1" class="ocf" provider="heartbeat" type="redis"/>
         <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_active_resource="redis:0 redis:1 redis:2" CRM_meta_notify_active_uname="redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="redis-bundle-1 redis-bundle-0 redis-bundle-2 controller-2 controller-1 controller-0" CRM_meta_notify_demote_resource="redis:0" CRM_meta_notify_demote_uname="redis-bundle-0" CRM_meta_notify_inactive_resource=" " CRM_meta_notify_key_operation="stonith" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource="redis:0" CRM_meta_notify_master_uname="redis-bundle-0" CRM_meta_notify_operation="stop" CRM_meta_notify_promote_resource="redis:1" CRM_meta_notify_promote_uname="redis-bundle-1" CRM_meta_notify_slave_resource="redis:1 redis:2" CRM_meta_notify_slave_uname="redis-bundle-1 redis-bundle-2" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="redis:0" CRM_meta_notify_stop_uname="redis-bundle-0" CRM_meta_notify_type="post" CRM_meta_on_node="redis-bundle-1" CRM_meta_on_node_uuid="redis-bundle-1" CRM_meta_physical_host="controller-1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000"  wait_last_known_master="true"/>
       </rsc_op>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="155" operation="notify" operation_key="redis-bundle-master_post_notify_stopped_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="215" operation="notify" operation_key="redis_post_notify_stop_0" internal_operation_key="redis:0_post_notify_stonith_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="51">
+  <synapse id="49">
     <action_set>
       <rsc_op id="137" operation="monitor" operation_key="redis_monitor_20000" internal_operation_key="redis:1_monitor_20000" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1">
         <primitive id="redis" long-id="redis:1" class="ocf" provider="heartbeat" type="redis"/>
         <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_interval="20000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="true" CRM_meta_on_node="redis-bundle-1" CRM_meta_on_node_uuid="redis-bundle-1" CRM_meta_op_target_rc="8" CRM_meta_physical_host="controller-1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Master" CRM_meta_timeout="60000"  wait_last_known_master="true"/>
       </rsc_op>
     </action_set>
     <inputs>
       <trigger>
         <rsc_op id="136" operation="promote" operation_key="redis_promote_0" internal_operation_key="redis:1_promote_0" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1"/>
       </trigger>
       <trigger>
         <pseudo_event id="156" operation="notified" operation_key="redis-bundle-master_confirmed-post_notify_stopped_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="162" operation="notified" operation_key="redis-bundle-master_confirmed-post_notify_promoted_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="168" operation="notified" operation_key="redis-bundle-master_confirmed-post_notify_demoted_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="216" operation="notified" operation_key="redis_notified_0" internal_operation_key="redis:0_confirmed-post_notify_stonith_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="52">
+  <synapse id="50">
     <action_set>
       <rsc_op id="136" operation="promote" operation_key="redis_promote_0" internal_operation_key="redis:1_promote_0" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1">
         <primitive id="redis" long-id="redis:1" class="ocf" provider="heartbeat" type="redis"/>
         <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="promote" CRM_meta_notify="true" CRM_meta_notify_active_resource="redis:0 redis:1 redis:2" CRM_meta_notify_active_uname="redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="redis-bundle-1 redis-bundle-0 redis-bundle-2 controller-2 controller-1 controller-0" CRM_meta_notify_demote_resource="redis:0" CRM_meta_notify_demote_uname="redis-bundle-0" CRM_meta_notify_inactive_resource=" " CRM_meta_notify_master_resource="redis:0" CRM_meta_notify_master_uname="redis-bundle-0" CRM_meta_notify_promote_resource="redis:1" CRM_meta_notify_promote_uname="redis-bundle-1" CRM_meta_notify_slave_resource="redis:1 redis:2" CRM_meta_notify_slave_uname="redis-bundle-1 redis-bundle-2" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="redis:0" CRM_meta_notify_stop_uname="redis-bundle-0" CRM_meta_on_node="redis-bundle-1" CRM_meta_on_node_uuid="redis-bundle-1" CRM_meta_physical_host="controller-1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="120000"  wait_last_known_master="true"/>
       </rsc_op>
     </action_set>
     <inputs>
       <trigger>
         <rsc_op id="27" operation="cancel" operation_key="redis_monitor_60000" internal_operation_key="redis:1_monitor_60000" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1"/>
       </trigger>
       <trigger>
         <rsc_op id="28" operation="cancel" operation_key="redis_monitor_45000" internal_operation_key="redis:1_monitor_45000" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1"/>
       </trigger>
       <trigger>
         <pseudo_event id="157" operation="promote" operation_key="redis-bundle-master_promote_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="207" operation="stonith_complete" operation_key="stonith_complete"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="53">
+  <synapse id="51">
     <action_set>
       <rsc_op id="28" operation="cancel" operation_key="redis_monitor_45000" internal_operation_key="redis:1_monitor_45000" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1">
         <primitive id="redis" long-id="redis:1" class="ocf" provider="heartbeat" type="redis"/>
         <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="45000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="true" CRM_meta_on_node="redis-bundle-1" CRM_meta_on_node_uuid="redis-bundle-1" CRM_meta_operation="monitor" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="60000"  wait_last_known_master="true"/>
       </rsc_op>
     </action_set>
     <inputs/>
   </synapse>
-  <synapse id="54">
+  <synapse id="52">
     <action_set>
       <rsc_op id="27" operation="cancel" operation_key="redis_monitor_60000" internal_operation_key="redis:1_monitor_60000" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1">
         <primitive id="redis" long-id="redis:1" class="ocf" provider="heartbeat" type="redis"/>
         <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="60000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="true" CRM_meta_on_node="redis-bundle-1" CRM_meta_on_node_uuid="redis-bundle-1" CRM_meta_operation="monitor" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Slave" CRM_meta_timeout="60000"  wait_last_known_master="true"/>
       </rsc_op>
     </action_set>
     <inputs/>
   </synapse>
-  <synapse id="55" priority="1000000">
+  <synapse id="53" priority="1000000">
     <action_set>
-      <rsc_op id="248" operation="notify" operation_key="redis_post_notify_demote_0" internal_operation_key="redis:2_post_notify_demote_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="controller-2">
+      <rsc_op id="246" operation="notify" operation_key="redis_post_notify_demote_0" internal_operation_key="redis:2_post_notify_demote_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="controller-2">
         <primitive id="redis" long-id="redis:2" class="ocf" provider="heartbeat" type="redis"/>
         <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_active_resource="redis:0 redis:1 redis:2" CRM_meta_notify_active_uname="redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="redis-bundle-1 redis-bundle-0 redis-bundle-2 controller-2 controller-1 controller-0" CRM_meta_notify_demote_resource="redis:0" CRM_meta_notify_demote_uname="redis-bundle-0" CRM_meta_notify_inactive_resource=" " CRM_meta_notify_key_operation="demoted" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource="redis:0" CRM_meta_notify_master_uname="redis-bundle-0" CRM_meta_notify_operation="demote" CRM_meta_notify_promote_resource="redis:1" CRM_meta_notify_promote_uname="redis-bundle-1" CRM_meta_notify_slave_resource="redis:1 redis:2" CRM_meta_notify_slave_uname="redis-bundle-1 redis-bundle-2" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="redis:0" CRM_meta_notify_stop_uname="redis-bundle-0" CRM_meta_notify_type="post" CRM_meta_on_node="redis-bundle-2" CRM_meta_on_node_uuid="redis-bundle-2" CRM_meta_physical_host="controller-2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000"  wait_last_known_master="true"/>
       </rsc_op>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="167" operation="notify" operation_key="redis-bundle-master_post_notify_demoted_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="56">
+  <synapse id="54">
     <action_set>
-      <rsc_op id="247" operation="notify" operation_key="redis_pre_notify_demote_0" internal_operation_key="redis:2_pre_notify_demote_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="controller-2">
+      <rsc_op id="245" operation="notify" operation_key="redis_pre_notify_demote_0" internal_operation_key="redis:2_pre_notify_demote_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="controller-2">
         <primitive id="redis" long-id="redis:2" class="ocf" provider="heartbeat" type="redis"/>
         <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_active_resource="redis:0 redis:1 redis:2" CRM_meta_notify_active_uname="redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="redis-bundle-1 redis-bundle-0 redis-bundle-2 controller-2 controller-1 controller-0" CRM_meta_notify_demote_resource="redis:0" CRM_meta_notify_demote_uname="redis-bundle-0" CRM_meta_notify_inactive_resource=" " CRM_meta_notify_key_operation="demote" CRM_meta_notify_key_type="pre" CRM_meta_notify_master_resource="redis:0" CRM_meta_notify_master_uname="redis-bundle-0" CRM_meta_notify_operation="demote" CRM_meta_notify_promote_resource="redis:1" CRM_meta_notify_promote_uname="redis-bundle-1" CRM_meta_notify_slave_resource="redis:1 redis:2" CRM_meta_notify_slave_uname="redis-bundle-1 redis-bundle-2" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="redis:0" CRM_meta_notify_stop_uname="redis-bundle-0" CRM_meta_notify_type="pre" CRM_meta_on_node="redis-bundle-2" CRM_meta_on_node_uuid="redis-bundle-2" CRM_meta_physical_host="controller-2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000"  wait_last_known_master="true"/>
       </rsc_op>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="165" operation="notify" operation_key="redis-bundle-master_pre_notify_demote_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="57" priority="1000000">
+  <synapse id="55" priority="1000000">
     <action_set>
-      <rsc_op id="244" operation="notify" operation_key="redis_post_notify_promote_0" internal_operation_key="redis:2_post_notify_promote_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="controller-2">
+      <rsc_op id="242" operation="notify" operation_key="redis_post_notify_promote_0" internal_operation_key="redis:2_post_notify_promote_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="controller-2">
         <primitive id="redis" long-id="redis:2" class="ocf" provider="heartbeat" type="redis"/>
         <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_active_resource="redis:0 redis:1 redis:2" CRM_meta_notify_active_uname="redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="redis-bundle-1 redis-bundle-0 redis-bundle-2 controller-2 controller-1 controller-0" CRM_meta_notify_demote_resource="redis:0" CRM_meta_notify_demote_uname="redis-bundle-0" CRM_meta_notify_inactive_resource=" " CRM_meta_notify_key_operation="promoted" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource="redis:0" CRM_meta_notify_master_uname="redis-bundle-0" CRM_meta_notify_operation="promote" CRM_meta_notify_promote_resource="redis:1" CRM_meta_notify_promote_uname="redis-bundle-1" CRM_meta_notify_slave_resource="redis:1 redis:2" CRM_meta_notify_slave_uname="redis-bundle-1 redis-bundle-2" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="redis:0" CRM_meta_notify_stop_uname="redis-bundle-0" CRM_meta_notify_type="post" CRM_meta_on_node="redis-bundle-2" CRM_meta_on_node_uuid="redis-bundle-2" CRM_meta_physical_host="controller-2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000"  wait_last_known_master="true"/>
       </rsc_op>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="161" operation="notify" operation_key="redis-bundle-master_post_notify_promoted_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="58">
+  <synapse id="56">
     <action_set>
-      <rsc_op id="243" operation="notify" operation_key="redis_pre_notify_promote_0" internal_operation_key="redis:2_pre_notify_promote_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="controller-2">
+      <rsc_op id="241" operation="notify" operation_key="redis_pre_notify_promote_0" internal_operation_key="redis:2_pre_notify_promote_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="controller-2">
         <primitive id="redis" long-id="redis:2" class="ocf" provider="heartbeat" type="redis"/>
         <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_active_resource="redis:0 redis:1 redis:2" CRM_meta_notify_active_uname="redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="redis-bundle-1 redis-bundle-0 redis-bundle-2 controller-2 controller-1 controller-0" CRM_meta_notify_demote_resource="redis:0" CRM_meta_notify_demote_uname="redis-bundle-0" CRM_meta_notify_inactive_resource=" " CRM_meta_notify_key_operation="promote" CRM_meta_notify_key_type="pre" CRM_meta_notify_master_resource="redis:0" CRM_meta_notify_master_uname="redis-bundle-0" CRM_meta_notify_operation="promote" CRM_meta_notify_promote_resource="redis:1" CRM_meta_notify_promote_uname="redis-bundle-1" CRM_meta_notify_slave_resource="redis:1 redis:2" CRM_meta_notify_slave_uname="redis-bundle-1 redis-bundle-2" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="redis:0" CRM_meta_notify_stop_uname="redis-bundle-0" CRM_meta_notify_type="pre" CRM_meta_on_node="redis-bundle-2" CRM_meta_on_node_uuid="redis-bundle-2" CRM_meta_physical_host="controller-2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000"  wait_last_known_master="true"/>
       </rsc_op>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="159" operation="notify" operation_key="redis-bundle-master_pre_notify_promote_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="59">
+  <synapse id="57">
     <action_set>
       <rsc_op id="238" operation="notify" operation_key="redis_pre_notify_stop_0" internal_operation_key="redis:2_pre_notify_stop_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="controller-2">
         <primitive id="redis" long-id="redis:2" class="ocf" provider="heartbeat" type="redis"/>
         <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_active_resource="redis:0 redis:1 redis:2" CRM_meta_notify_active_uname="redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="redis-bundle-1 redis-bundle-0 redis-bundle-2 controller-2 controller-1 controller-0" CRM_meta_notify_demote_resource="redis:0" CRM_meta_notify_demote_uname="redis-bundle-0" CRM_meta_notify_inactive_resource=" " CRM_meta_notify_key_operation="stop" CRM_meta_notify_key_type="pre" CRM_meta_notify_master_resource="redis:0" CRM_meta_notify_master_uname="redis-bundle-0" CRM_meta_notify_operation="stop" CRM_meta_notify_promote_resource="redis:1" CRM_meta_notify_promote_uname="redis-bundle-1" CRM_meta_notify_slave_resource="redis:1 redis:2" CRM_meta_notify_slave_uname="redis-bundle-1 redis-bundle-2" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="redis:0" CRM_meta_notify_stop_uname="redis-bundle-0" CRM_meta_notify_type="pre" CRM_meta_on_node="redis-bundle-2" CRM_meta_on_node_uuid="redis-bundle-2" CRM_meta_physical_host="controller-2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000"  wait_last_known_master="true"/>
       </rsc_op>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="153" operation="notify" operation_key="redis-bundle-master_pre_notify_stop_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="60" priority="1000000">
+  <synapse id="58" priority="1000000">
     <action_set>
       <rsc_op id="218" operation="notify" operation_key="redis_post_notify_stop_0" internal_operation_key="redis:2_post_notify_stop_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="controller-2">
         <primitive id="redis" long-id="redis:2" class="ocf" provider="heartbeat" type="redis"/>
         <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_active_resource="redis:0 redis:1 redis:2" CRM_meta_notify_active_uname="redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="redis-bundle-1 redis-bundle-0 redis-bundle-2 controller-2 controller-1 controller-0" CRM_meta_notify_demote_resource="redis:0" CRM_meta_notify_demote_uname="redis-bundle-0" CRM_meta_notify_inactive_resource=" " CRM_meta_notify_key_operation="stonith" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource="redis:0" CRM_meta_notify_master_uname="redis-bundle-0" CRM_meta_notify_operation="stop" CRM_meta_notify_promote_resource="redis:1" CRM_meta_notify_promote_uname="redis-bundle-1" CRM_meta_notify_slave_resource="redis:1 redis:2" CRM_meta_notify_slave_uname="redis-bundle-1 redis-bundle-2" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="redis:0" CRM_meta_notify_stop_uname="redis-bundle-0" CRM_meta_notify_type="post" CRM_meta_on_node="redis-bundle-2" CRM_meta_on_node_uuid="redis-bundle-2" CRM_meta_physical_host="controller-2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000"  wait_last_known_master="true"/>
       </rsc_op>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="155" operation="notify" operation_key="redis-bundle-master_post_notify_stopped_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="215" operation="notify" operation_key="redis_post_notify_stop_0" internal_operation_key="redis:0_post_notify_stonith_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="61" priority="1000000">
+  <synapse id="59" priority="1000000">
     <action_set>
       <pseudo_event id="168" operation="notified" operation_key="redis-bundle-master_confirmed-post_notify_demoted_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="demoted" CRM_meta_notify_key_type="confirmed-post" CRM_meta_notify_operation="demote" CRM_meta_notify_type="post" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="167" operation="notify" operation_key="redis-bundle-master_post_notify_demoted_0"/>
       </trigger>
       <trigger>
-        <rsc_op id="246" operation="notify" operation_key="redis_post_notify_demote_0" internal_operation_key="redis:1_post_notify_demote_0" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1"/>
+        <rsc_op id="244" operation="notify" operation_key="redis_post_notify_demote_0" internal_operation_key="redis:1_post_notify_demote_0" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1"/>
       </trigger>
       <trigger>
-        <rsc_op id="248" operation="notify" operation_key="redis_post_notify_demote_0" internal_operation_key="redis:2_post_notify_demote_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="controller-2"/>
+        <rsc_op id="246" operation="notify" operation_key="redis_post_notify_demote_0" internal_operation_key="redis:2_post_notify_demote_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="controller-2"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="62" priority="1000000">
+  <synapse id="60" priority="1000000">
     <action_set>
       <pseudo_event id="167" operation="notify" operation_key="redis-bundle-master_post_notify_demoted_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="demoted" CRM_meta_notify_key_type="post" CRM_meta_notify_operation="demote" CRM_meta_notify_type="post" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="164" operation="demoted" operation_key="redis-bundle-master_demoted_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="166" operation="notified" operation_key="redis-bundle-master_confirmed-pre_notify_demote_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="63">
+  <synapse id="61">
     <action_set>
       <pseudo_event id="166" operation="notified" operation_key="redis-bundle-master_confirmed-pre_notify_demote_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="demote" CRM_meta_notify_key_type="confirmed-pre" CRM_meta_notify_operation="demote" CRM_meta_notify_type="pre" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="165" operation="notify" operation_key="redis-bundle-master_pre_notify_demote_0"/>
       </trigger>
       <trigger>
-        <rsc_op id="245" operation="notify" operation_key="redis_pre_notify_demote_0" internal_operation_key="redis:1_pre_notify_demote_0" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1"/>
+        <rsc_op id="243" operation="notify" operation_key="redis_pre_notify_demote_0" internal_operation_key="redis:1_pre_notify_demote_0" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1"/>
       </trigger>
       <trigger>
-        <rsc_op id="247" operation="notify" operation_key="redis_pre_notify_demote_0" internal_operation_key="redis:2_pre_notify_demote_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="controller-2"/>
+        <rsc_op id="245" operation="notify" operation_key="redis_pre_notify_demote_0" internal_operation_key="redis:2_pre_notify_demote_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="controller-2"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="64">
+  <synapse id="62">
     <action_set>
       <pseudo_event id="165" operation="notify" operation_key="redis-bundle-master_pre_notify_demote_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="demote" CRM_meta_notify_key_type="pre" CRM_meta_notify_operation="demote" CRM_meta_notify_type="pre" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs/>
   </synapse>
-  <synapse id="65" priority="1000000">
+  <synapse id="63" priority="1000000">
     <action_set>
       <pseudo_event id="164" operation="demoted" operation_key="redis-bundle-master_demoted_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="140" operation="demote" operation_key="redis_demote_0" internal_operation_key="redis:0_demote_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="163" operation="demote" operation_key="redis-bundle-master_demote_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="66">
+  <synapse id="64">
     <action_set>
       <pseudo_event id="163" operation="demote" operation_key="redis-bundle-master_demote_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="166" operation="notified" operation_key="redis-bundle-master_confirmed-pre_notify_demote_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="171" operation="demote" operation_key="redis-bundle_demote_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="67" priority="1000000">
+  <synapse id="65" priority="1000000">
     <action_set>
       <pseudo_event id="162" operation="notified" operation_key="redis-bundle-master_confirmed-post_notify_promoted_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="promoted" CRM_meta_notify_key_type="confirmed-post" CRM_meta_notify_operation="promote" CRM_meta_notify_type="post" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="161" operation="notify" operation_key="redis-bundle-master_post_notify_promoted_0"/>
       </trigger>
       <trigger>
-        <rsc_op id="240" operation="notify" operation_key="redis_post_notify_promote_0" internal_operation_key="redis:0_post_notify_promote_0" on_node="redis-bundle-0" on_node_uuid="redis-bundle-0" router_node="controller-1"/>
+        <rsc_op id="240" operation="notify" operation_key="redis_post_notify_promote_0" internal_operation_key="redis:1_post_notify_promote_0" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1"/>
       </trigger>
       <trigger>
-        <rsc_op id="242" operation="notify" operation_key="redis_post_notify_promote_0" internal_operation_key="redis:1_post_notify_promote_0" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1"/>
-      </trigger>
-      <trigger>
-        <rsc_op id="244" operation="notify" operation_key="redis_post_notify_promote_0" internal_operation_key="redis:2_post_notify_promote_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="controller-2"/>
+        <rsc_op id="242" operation="notify" operation_key="redis_post_notify_promote_0" internal_operation_key="redis:2_post_notify_promote_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="controller-2"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="68" priority="1000000">
+  <synapse id="66" priority="1000000">
     <action_set>
       <pseudo_event id="161" operation="notify" operation_key="redis-bundle-master_post_notify_promoted_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="promoted" CRM_meta_notify_key_type="post" CRM_meta_notify_operation="promote" CRM_meta_notify_type="post" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="158" operation="promoted" operation_key="redis-bundle-master_promoted_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="160" operation="notified" operation_key="redis-bundle-master_confirmed-pre_notify_promote_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="69">
+  <synapse id="67">
     <action_set>
       <pseudo_event id="160" operation="notified" operation_key="redis-bundle-master_confirmed-pre_notify_promote_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="promote" CRM_meta_notify_key_type="confirmed-pre" CRM_meta_notify_operation="promote" CRM_meta_notify_type="pre" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="159" operation="notify" operation_key="redis-bundle-master_pre_notify_promote_0"/>
       </trigger>
       <trigger>
-        <rsc_op id="239" operation="notify" operation_key="redis_pre_notify_promote_0" internal_operation_key="redis:0_pre_notify_promote_0" on_node="redis-bundle-0" on_node_uuid="redis-bundle-0" router_node="controller-1"/>
-      </trigger>
-      <trigger>
-        <rsc_op id="241" operation="notify" operation_key="redis_pre_notify_promote_0" internal_operation_key="redis:1_pre_notify_promote_0" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1"/>
+        <rsc_op id="239" operation="notify" operation_key="redis_pre_notify_promote_0" internal_operation_key="redis:1_pre_notify_promote_0" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1"/>
       </trigger>
       <trigger>
-        <rsc_op id="243" operation="notify" operation_key="redis_pre_notify_promote_0" internal_operation_key="redis:2_pre_notify_promote_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="controller-2"/>
+        <rsc_op id="241" operation="notify" operation_key="redis_pre_notify_promote_0" internal_operation_key="redis:2_pre_notify_promote_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="controller-2"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="70">
+  <synapse id="68">
     <action_set>
       <pseudo_event id="159" operation="notify" operation_key="redis-bundle-master_pre_notify_promote_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="promote" CRM_meta_notify_key_type="pre" CRM_meta_notify_operation="promote" CRM_meta_notify_type="pre" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="150" operation="notified" operation_key="redis-bundle-master_confirmed-post_notify_running_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="156" operation="notified" operation_key="redis-bundle-master_confirmed-post_notify_stopped_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="168" operation="notified" operation_key="redis-bundle-master_confirmed-post_notify_demoted_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="71" priority="1000000">
+  <synapse id="69" priority="1000000">
     <action_set>
       <pseudo_event id="158" operation="promoted" operation_key="redis-bundle-master_promoted_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <rsc_op id="136" operation="promote" operation_key="redis_promote_0" internal_operation_key="redis:1_promote_0" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="72">
+  <synapse id="70">
     <action_set>
       <pseudo_event id="157" operation="promote" operation_key="redis-bundle-master_promote_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="146" operation="running" operation_key="redis-bundle-master_running_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="152" operation="stopped" operation_key="redis-bundle-master_stopped_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="160" operation="notified" operation_key="redis-bundle-master_confirmed-pre_notify_promote_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="164" operation="demoted" operation_key="redis-bundle-master_demoted_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="169" operation="promote" operation_key="redis-bundle_promote_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="73" priority="1000000">
+  <synapse id="71" priority="1000000">
     <action_set>
       <pseudo_event id="156" operation="notified" operation_key="redis-bundle-master_confirmed-post_notify_stopped_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="stopped" CRM_meta_notify_key_type="confirmed-post" CRM_meta_notify_operation="stop" CRM_meta_notify_type="post" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="155" operation="notify" operation_key="redis-bundle-master_post_notify_stopped_0"/>
       </trigger>
       <trigger>
         <rsc_op id="217" operation="notify" operation_key="redis_post_notify_stop_0" internal_operation_key="redis:1_post_notify_stop_0" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1"/>
       </trigger>
       <trigger>
         <rsc_op id="218" operation="notify" operation_key="redis_post_notify_stop_0" internal_operation_key="redis:2_post_notify_stop_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="controller-2"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="74" priority="1000000">
+  <synapse id="72" priority="1000000">
     <action_set>
       <pseudo_event id="155" operation="notify" operation_key="redis-bundle-master_post_notify_stopped_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="stopped" CRM_meta_notify_key_type="post" CRM_meta_notify_operation="stop" CRM_meta_notify_type="post" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="152" operation="stopped" operation_key="redis-bundle-master_stopped_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="154" operation="notified" operation_key="redis-bundle-master_confirmed-pre_notify_stop_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="75">
+  <synapse id="73">
     <action_set>
       <pseudo_event id="154" operation="notified" operation_key="redis-bundle-master_confirmed-pre_notify_stop_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="stop" CRM_meta_notify_key_type="confirmed-pre" CRM_meta_notify_operation="stop" CRM_meta_notify_type="pre" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="153" operation="notify" operation_key="redis-bundle-master_pre_notify_stop_0"/>
       </trigger>
       <trigger>
         <rsc_op id="237" operation="notify" operation_key="redis_pre_notify_stop_0" internal_operation_key="redis:1_pre_notify_stop_0" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1"/>
       </trigger>
       <trigger>
         <rsc_op id="238" operation="notify" operation_key="redis_pre_notify_stop_0" internal_operation_key="redis:2_pre_notify_stop_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="controller-2"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="76">
+  <synapse id="74">
     <action_set>
       <pseudo_event id="153" operation="notify" operation_key="redis-bundle-master_pre_notify_stop_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="stop" CRM_meta_notify_key_type="pre" CRM_meta_notify_operation="stop" CRM_meta_notify_type="pre" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="168" operation="notified" operation_key="redis-bundle-master_confirmed-post_notify_demoted_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="77" priority="1000000">
+  <synapse id="75" priority="1000000">
     <action_set>
       <pseudo_event id="152" operation="stopped" operation_key="redis-bundle-master_stopped_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="141" operation="stop" operation_key="redis_stop_0" internal_operation_key="redis:0_stop_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="151" operation="stop" operation_key="redis-bundle-master_stop_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="78">
+  <synapse id="76">
     <action_set>
       <pseudo_event id="151" operation="stop" operation_key="redis-bundle-master_stop_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="132" operation="stop" operation_key="redis-bundle_stop_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="154" operation="notified" operation_key="redis-bundle-master_confirmed-pre_notify_stop_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="164" operation="demoted" operation_key="redis-bundle-master_demoted_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="214" operation="stonith" operation_key="stonith-redis-bundle-0-off" on_node="redis-bundle-0" on_node_uuid="redis-bundle-0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="79" priority="1000000">
+  <synapse id="77" priority="1000000">
     <action_set>
       <pseudo_event id="150" operation="notified" operation_key="redis-bundle-master_confirmed-post_notify_running_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="running" CRM_meta_notify_key_type="confirmed-post" CRM_meta_notify_operation="start" CRM_meta_notify_type="post" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="149" operation="notify" operation_key="redis-bundle-master_post_notify_running_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="80" priority="1000000">
+  <synapse id="78" priority="1000000">
     <action_set>
       <pseudo_event id="149" operation="notify" operation_key="redis-bundle-master_post_notify_running_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="running" CRM_meta_notify_key_type="post" CRM_meta_notify_operation="start" CRM_meta_notify_type="post" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="146" operation="running" operation_key="redis-bundle-master_running_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="148" operation="notified" operation_key="redis-bundle-master_confirmed-pre_notify_start_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="81">
+  <synapse id="79">
     <action_set>
       <pseudo_event id="148" operation="notified" operation_key="redis-bundle-master_confirmed-pre_notify_start_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="start" CRM_meta_notify_key_type="confirmed-pre" CRM_meta_notify_operation="start" CRM_meta_notify_type="pre" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="147" operation="notify" operation_key="redis-bundle-master_pre_notify_start_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="82">
+  <synapse id="80">
     <action_set>
       <pseudo_event id="147" operation="notify" operation_key="redis-bundle-master_pre_notify_start_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="start" CRM_meta_notify_key_type="pre" CRM_meta_notify_operation="start" CRM_meta_notify_type="pre" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="156" operation="notified" operation_key="redis-bundle-master_confirmed-post_notify_stopped_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="168" operation="notified" operation_key="redis-bundle-master_confirmed-post_notify_demoted_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="83" priority="1000000">
+  <synapse id="81" priority="1000000">
     <action_set>
       <pseudo_event id="146" operation="running" operation_key="redis-bundle-master_running_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="145" operation="start" operation_key="redis-bundle-master_start_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="84">
+  <synapse id="82">
     <action_set>
       <pseudo_event id="145" operation="start" operation_key="redis-bundle-master_start_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="130" operation="start" operation_key="redis-bundle_start_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="148" operation="notified" operation_key="redis-bundle-master_confirmed-pre_notify_start_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="152" operation="stopped" operation_key="redis-bundle-master_stopped_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="164" operation="demoted" operation_key="redis-bundle-master_demoted_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="85">
+  <synapse id="83">
     <action_set>
       <pseudo_event id="118" operation="stop" operation_key="redis-bundle-docker-0_stop_0">
         <attributes CRM_meta_timeout="20000" allow_pull="true"  force_kill="false" image="192.168.24.1:8787/rhosp12/openstack-redis-docker:pcmklatest" monitor_cmd="/bin/true" mount_points="/var/log/pacemaker/bundles/redis-bundle-0" reuse="false" run_cmd="/bin/bash /usr/local/bin/kolla_start" run_opts=" --restart=no -e PCMK_stderr=1 --net=host -e PCMK_remote_port=3124 -v /var/lib/kolla/config_files/redis.json:/var/lib/kolla/config_files/config.json:ro -v /var/lib/config-data/puppet-generated/redis/:/var/lib/kolla/config_files/src:ro -v /etc/hosts:/etc/hosts:ro -v /etc/localtime:/etc/localtime:ro -v /var/lib/redis:/var/lib/redis:rw -v /var/log/redis:/var/log/redis:rw -v /var/run/redis:/var/run/redis:rw -v /etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro -v /etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro -v /etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro -v /etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro -v /dev/log:/dev/log:rw -v /etc/pacemaker/authkey:/etc/pacemaker/authkey -v /var/log/pacemaker/bundles/redis-bundle-0:/var/log --user=root --log-driver=journald -e KOLLA_CONFIG_STRATEGY=COPY_ALWAYS "/>
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <crm_event id="1" operation="stonith" operation_key="stonith-controller-0-reboot" on_node="controller-0" on_node_uuid="1"/>
       </trigger>
       <trigger>
         <pseudo_event id="119" operation="stop" operation_key="redis-bundle-0_stop_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="132" operation="stop" operation_key="redis-bundle_stop_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="86">
+  <synapse id="84">
     <action_set>
       <pseudo_event id="119" operation="stop" operation_key="redis-bundle-0_stop_0">
         <attributes CRM_meta_container="redis-bundle-docker-0" CRM_meta_timeout="20000" addr="controller-0"  port="3124"/>
       </pseudo_event>
     </action_set>
     <inputs/>
   </synapse>
-  <synapse id="87">
+  <synapse id="85">
     <action_set>
       <rsc_op id="40" operation="monitor" operation_key="redis-bundle-0_monitor_0" on_node="controller-2" on_node_uuid="3">
         <primitive id="redis-bundle-0" class="ocf" provider="pacemaker" type="remote"/>
         <attributes CRM_meta_container="redis-bundle-docker-0" CRM_meta_on_node="controller-2" CRM_meta_on_node_uuid="3" CRM_meta_op_target_rc="7" CRM_meta_timeout="30000" addr="controller-0"  port="3124"/>
       </rsc_op>
     </action_set>
     <inputs/>
   </synapse>
-  <synapse id="88">
+  <synapse id="86">
     <action_set>
       <rsc_op id="34" operation="monitor" operation_key="redis-bundle-0_monitor_0" on_node="controller-1" on_node_uuid="2">
         <primitive id="redis-bundle-0" class="ocf" provider="pacemaker" type="remote"/>
         <attributes CRM_meta_container="redis-bundle-docker-0" CRM_meta_on_node="controller-1" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="30000" addr="controller-0"  port="3124"/>
       </rsc_op>
     </action_set>
     <inputs/>
   </synapse>
-  <synapse id="89">
+  <synapse id="87">
     <action_set>
       <rsc_op id="41" operation="monitor" operation_key="redis-bundle-1_monitor_0" on_node="controller-2" on_node_uuid="3">
         <primitive id="redis-bundle-1" class="ocf" provider="pacemaker" type="remote"/>
         <attributes CRM_meta_container="redis-bundle-docker-1" CRM_meta_on_node="controller-2" CRM_meta_on_node_uuid="3" CRM_meta_op_target_rc="7" CRM_meta_timeout="30000" addr="controller-1"  port="3124"/>
       </rsc_op>
     </action_set>
     <inputs/>
   </synapse>
-  <synapse id="90">
+  <synapse id="88">
     <action_set>
       <rsc_op id="35" operation="monitor" operation_key="redis-bundle-2_monitor_0" on_node="controller-1" on_node_uuid="2">
         <primitive id="redis-bundle-2" class="ocf" provider="pacemaker" type="remote"/>
         <attributes CRM_meta_container="redis-bundle-docker-2" CRM_meta_on_node="controller-1" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="30000" addr="controller-2"  port="3124"/>
       </rsc_op>
     </action_set>
     <inputs/>
   </synapse>
-  <synapse id="91">
+  <synapse id="89">
     <action_set>
       <rsc_op id="175" operation="monitor" operation_key="ip-192.168.24.7_monitor_10000" on_node="controller-2" on_node_uuid="3">
         <primitive id="ip-192.168.24.7" class="ocf" provider="heartbeat" type="IPaddr2"/>
         <attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_on_node="controller-2" CRM_meta_on_node_uuid="3" CRM_meta_timeout="20000" cidr_netmask="32"  ip="192.168.24.7"/>
       </rsc_op>
     </action_set>
     <inputs>
       <trigger>
         <rsc_op id="174" operation="start" operation_key="ip-192.168.24.7_start_0" on_node="controller-2" on_node_uuid="3"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="92">
+  <synapse id="90">
     <action_set>
       <rsc_op id="174" operation="start" operation_key="ip-192.168.24.7_start_0" on_node="controller-2" on_node_uuid="3">
         <primitive id="ip-192.168.24.7" class="ocf" provider="heartbeat" type="IPaddr2"/>
         <attributes CRM_meta_name="start" CRM_meta_on_node="controller-2" CRM_meta_on_node_uuid="3" CRM_meta_timeout="20000" cidr_netmask="32"  ip="192.168.24.7"/>
       </rsc_op>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="173" operation="stop" operation_key="ip-192.168.24.7_stop_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="207" operation="stonith_complete" operation_key="stonith_complete"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="93">
+  <synapse id="91">
     <action_set>
       <pseudo_event id="173" operation="stop" operation_key="ip-192.168.24.7_stop_0">
         <attributes CRM_meta_name="stop" CRM_meta_timeout="20000" cidr_netmask="32"  ip="192.168.24.7"/>
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <crm_event id="1" operation="stonith" operation_key="stonith-controller-0-reboot" on_node="controller-0" on_node_uuid="1"/>
       </trigger>
       <trigger>
         <pseudo_event id="196" operation="stopped" operation_key="haproxy-bundle_stopped_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="94">
+  <synapse id="92">
     <action_set>
       <rsc_op id="178" operation="monitor" operation_key="ip-10.0.0.109_monitor_10000" on_node="controller-1" on_node_uuid="2">
         <primitive id="ip-10.0.0.109" class="ocf" provider="heartbeat" type="IPaddr2"/>
         <attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_on_node="controller-1" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" cidr_netmask="32"  ip="10.0.0.109"/>
       </rsc_op>
     </action_set>
     <inputs>
       <trigger>
         <rsc_op id="177" operation="start" operation_key="ip-10.0.0.109_start_0" on_node="controller-1" on_node_uuid="2"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="95">
+  <synapse id="93">
     <action_set>
       <rsc_op id="177" operation="start" operation_key="ip-10.0.0.109_start_0" on_node="controller-1" on_node_uuid="2">
         <primitive id="ip-10.0.0.109" class="ocf" provider="heartbeat" type="IPaddr2"/>
         <attributes CRM_meta_name="start" CRM_meta_on_node="controller-1" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" cidr_netmask="32"  ip="10.0.0.109"/>
       </rsc_op>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="176" operation="stop" operation_key="ip-10.0.0.109_stop_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="207" operation="stonith_complete" operation_key="stonith_complete"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="96">
+  <synapse id="94">
     <action_set>
       <pseudo_event id="176" operation="stop" operation_key="ip-10.0.0.109_stop_0">
         <attributes CRM_meta_name="stop" CRM_meta_timeout="20000" cidr_netmask="32"  ip="10.0.0.109"/>
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <crm_event id="1" operation="stonith" operation_key="stonith-controller-0-reboot" on_node="controller-0" on_node_uuid="1"/>
       </trigger>
       <trigger>
         <pseudo_event id="196" operation="stopped" operation_key="haproxy-bundle_stopped_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="97">
+  <synapse id="95">
     <action_set>
       <rsc_op id="187" operation="monitor" operation_key="ip-172.17.4.11_monitor_10000" on_node="controller-1" on_node_uuid="2">
         <primitive id="ip-172.17.4.11" class="ocf" provider="heartbeat" type="IPaddr2"/>
         <attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_on_node="controller-1" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" cidr_netmask="32"  ip="172.17.4.11"/>
       </rsc_op>
     </action_set>
     <inputs>
       <trigger>
         <rsc_op id="186" operation="start" operation_key="ip-172.17.4.11_start_0" on_node="controller-1" on_node_uuid="2"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="98">
+  <synapse id="96">
     <action_set>
       <rsc_op id="186" operation="start" operation_key="ip-172.17.4.11_start_0" on_node="controller-1" on_node_uuid="2">
         <primitive id="ip-172.17.4.11" class="ocf" provider="heartbeat" type="IPaddr2"/>
         <attributes CRM_meta_name="start" CRM_meta_on_node="controller-1" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" cidr_netmask="32"  ip="172.17.4.11"/>
       </rsc_op>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="185" operation="stop" operation_key="ip-172.17.4.11_stop_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="207" operation="stonith_complete" operation_key="stonith_complete"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="99">
+  <synapse id="97">
     <action_set>
       <pseudo_event id="185" operation="stop" operation_key="ip-172.17.4.11_stop_0">
         <attributes CRM_meta_name="stop" CRM_meta_timeout="20000" cidr_netmask="32"  ip="172.17.4.11"/>
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <crm_event id="1" operation="stonith" operation_key="stonith-controller-0-reboot" on_node="controller-0" on_node_uuid="1"/>
       </trigger>
       <trigger>
         <pseudo_event id="196" operation="stopped" operation_key="haproxy-bundle_stopped_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="100">
+  <synapse id="98">
     <action_set>
       <pseudo_event id="188" operation="stop" operation_key="haproxy-bundle-docker-0_stop_0">
         <attributes CRM_meta_timeout="20000" allow_pull="true"  force_kill="false" image="192.168.24.1:8787/rhosp12/openstack-haproxy-docker:pcmklatest" monitor_cmd="/bin/true" mount_points="" reuse="false" run_cmd="/bin/bash /usr/local/bin/kolla_start" run_opts=" --restart=no -e PCMK_stderr=1 --net=host -e PCMK_remote_port=3121 -v /var/lib/kolla/config_files/haproxy.json:/var/lib/kolla/config_files/config.json:ro -v /var/lib/config-data/puppet-generated/haproxy/:/var/lib/kolla/config_files/src:ro -v /etc/hosts:/etc/hosts:ro -v /etc/localtime:/etc/localtime:ro -v /etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro -v /etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro -v /etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro -v /etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro -v /dev/log:/dev/log:rw --user=root --log-driver=journald -e KOLLA_CONFIG_STRATEGY=COPY_ALWAYS "/>
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <crm_event id="1" operation="stonith" operation_key="stonith-controller-0-reboot" on_node="controller-0" on_node_uuid="1"/>
       </trigger>
       <trigger>
         <pseudo_event id="195" operation="stop" operation_key="haproxy-bundle_stop_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="101">
+  <synapse id="99">
     <action_set>
       <rsc_op id="203" operation="monitor" operation_key="stonith-fence_ipmilan-5254003e8e97_monitor_60000" on_node="controller-1" on_node_uuid="2">
         <primitive id="stonith-fence_ipmilan-5254003e8e97" class="stonith" type="fence_ipmilan"/>
         <attributes CRM_meta_interval="60000" CRM_meta_name="monitor" CRM_meta_on_node="controller-1" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" action="reboot"  ipaddr="172.16.0.1" ipport="6231" lanplus="true" login="admin" passwd="password" pcmk_host_list="controller-2" privlvl="administrator"/>
       </rsc_op>
     </action_set>
     <inputs>
       <trigger>
         <rsc_op id="202" operation="start" operation_key="stonith-fence_ipmilan-5254003e8e97_start_0" on_node="controller-1" on_node_uuid="2"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="102">
+  <synapse id="100">
     <action_set>
       <rsc_op id="202" operation="start" operation_key="stonith-fence_ipmilan-5254003e8e97_start_0" on_node="controller-1" on_node_uuid="2">
         <primitive id="stonith-fence_ipmilan-5254003e8e97" class="stonith" type="fence_ipmilan"/>
         <attributes CRM_meta_on_node="controller-1" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" action="reboot"  ipaddr="172.16.0.1" ipport="6231" lanplus="true" login="admin" passwd="password" pcmk_host_list="controller-2" privlvl="administrator"/>
       </rsc_op>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="201" operation="stop" operation_key="stonith-fence_ipmilan-5254003e8e97_stop_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="103">
+  <synapse id="101">
     <action_set>
       <pseudo_event id="201" operation="stop" operation_key="stonith-fence_ipmilan-5254003e8e97_stop_0">
         <attributes CRM_meta_timeout="20000" action="reboot"  ipaddr="172.16.0.1" ipport="6231" lanplus="true" login="admin" passwd="password" pcmk_host_list="controller-2" privlvl="administrator"/>
       </pseudo_event>
     </action_set>
     <inputs/>
   </synapse>
-  <synapse id="104">
+  <synapse id="102">
     <action_set>
       <rsc_op id="206" operation="monitor" operation_key="stonith-fence_ipmilan-5254000dcb3f_monitor_60000" on_node="controller-2" on_node_uuid="3">
         <primitive id="stonith-fence_ipmilan-5254000dcb3f" class="stonith" type="fence_ipmilan"/>
         <attributes CRM_meta_interval="60000" CRM_meta_name="monitor" CRM_meta_on_node="controller-2" CRM_meta_on_node_uuid="3" CRM_meta_timeout="20000" action="reboot"  ipaddr="172.16.0.1" ipport="6230" lanplus="true" login="admin" passwd="password" pcmk_host_list="controller-1" privlvl="administrator"/>
       </rsc_op>
     </action_set>
     <inputs>
       <trigger>
         <rsc_op id="205" operation="start" operation_key="stonith-fence_ipmilan-5254000dcb3f_start_0" on_node="controller-2" on_node_uuid="3"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="105">
+  <synapse id="103">
     <action_set>
       <rsc_op id="205" operation="start" operation_key="stonith-fence_ipmilan-5254000dcb3f_start_0" on_node="controller-2" on_node_uuid="3">
         <primitive id="stonith-fence_ipmilan-5254000dcb3f" class="stonith" type="fence_ipmilan"/>
         <attributes CRM_meta_on_node="controller-2" CRM_meta_on_node_uuid="3" CRM_meta_timeout="20000" action="reboot"  ipaddr="172.16.0.1" ipport="6230" lanplus="true" login="admin" passwd="password" pcmk_host_list="controller-1" privlvl="administrator"/>
       </rsc_op>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="204" operation="stop" operation_key="stonith-fence_ipmilan-5254000dcb3f_stop_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="106">
+  <synapse id="104">
     <action_set>
       <pseudo_event id="204" operation="stop" operation_key="stonith-fence_ipmilan-5254000dcb3f_stop_0">
         <attributes CRM_meta_timeout="20000" action="reboot"  ipaddr="172.16.0.1" ipport="6230" lanplus="true" login="admin" passwd="password" pcmk_host_list="controller-1" privlvl="administrator"/>
       </pseudo_event>
     </action_set>
     <inputs/>
   </synapse>
-  <synapse id="107">
+  <synapse id="105">
     <action_set>
       <pseudo_event id="214" operation="stonith" operation_key="stonith-redis-bundle-0-off" on_node="redis-bundle-0" on_node_uuid="redis-bundle-0">
         <attributes CRM_meta_master_redis="1" CRM_meta_on_node="redis-bundle-0" CRM_meta_on_node_uuid="redis-bundle-0" CRM_meta_stonith_action="off" />
         <downed>
           <node id="redis-bundle-0"/>
         </downed>
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <crm_event id="1" operation="stonith" operation_key="stonith-controller-0-reboot" on_node="controller-0" on_node_uuid="1"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="108">
+  <synapse id="106">
     <action_set>
       <pseudo_event id="209" operation="stonith" operation_key="stonith-rabbitmq-bundle-0-off" on_node="rabbitmq-bundle-0" on_node_uuid="rabbitmq-bundle-0">
         <attributes CRM_meta_on_node="rabbitmq-bundle-0" CRM_meta_on_node_uuid="rabbitmq-bundle-0" CRM_meta_rmq_node_attr_last_known_rabbitmq="rabbit@controller-0" CRM_meta_stonith_action="off" />
         <downed>
           <node id="rabbitmq-bundle-0"/>
         </downed>
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <crm_event id="1" operation="stonith" operation_key="stonith-controller-0-reboot" on_node="controller-0" on_node_uuid="1"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="109">
+  <synapse id="107">
     <action_set>
       <pseudo_event id="208" operation="stonith" operation_key="stonith-galera-bundle-0-off" on_node="galera-bundle-0" on_node_uuid="galera-bundle-0">
         <attributes CRM_meta_on_node="galera-bundle-0" CRM_meta_on_node_uuid="galera-bundle-0" CRM_meta_stonith_action="off" />
         <downed>
           <node id="galera-bundle-0"/>
         </downed>
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <crm_event id="1" operation="stonith" operation_key="stonith-controller-0-reboot" on_node="controller-0" on_node_uuid="1"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="110">
+  <synapse id="108">
     <action_set>
       <pseudo_event id="207" operation="stonith_complete" operation_key="stonith_complete">
         <attributes />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <crm_event id="1" operation="stonith" operation_key="stonith-controller-0-reboot" on_node="controller-0" on_node_uuid="1"/>
       </trigger>
       <trigger>
         <pseudo_event id="208" operation="stonith" operation_key="stonith-galera-bundle-0-off" on_node="galera-bundle-0" on_node_uuid="galera-bundle-0"/>
       </trigger>
       <trigger>
         <pseudo_event id="209" operation="stonith" operation_key="stonith-rabbitmq-bundle-0-off" on_node="rabbitmq-bundle-0" on_node_uuid="rabbitmq-bundle-0"/>
       </trigger>
       <trigger>
         <pseudo_event id="214" operation="stonith" operation_key="stonith-redis-bundle-0-off" on_node="redis-bundle-0" on_node_uuid="redis-bundle-0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="111" priority="1000000">
+  <synapse id="109" priority="1000000">
     <action_set>
       <pseudo_event id="196" operation="stopped" operation_key="haproxy-bundle_stopped_0">
         <attributes CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="188" operation="stop" operation_key="haproxy-bundle-docker-0_stop_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="112">
+  <synapse id="110">
     <action_set>
       <pseudo_event id="195" operation="stop" operation_key="haproxy-bundle_stop_0">
         <attributes CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs/>
   </synapse>
-  <synapse id="113" priority="1000000">
+  <synapse id="111" priority="1000000">
     <action_set>
       <pseudo_event id="172" operation="demoted" operation_key="redis-bundle_demoted_0">
         <attributes CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="168" operation="notified" operation_key="redis-bundle-master_confirmed-post_notify_demoted_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="171" operation="demote" operation_key="redis-bundle_demote_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="114">
+  <synapse id="112">
     <action_set>
       <pseudo_event id="171" operation="demote" operation_key="redis-bundle_demote_0">
         <attributes CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs/>
   </synapse>
-  <synapse id="115" priority="1000000">
+  <synapse id="113" priority="1000000">
     <action_set>
       <pseudo_event id="170" operation="promoted" operation_key="redis-bundle_promoted_0">
         <attributes CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="162" operation="notified" operation_key="redis-bundle-master_confirmed-post_notify_promoted_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="116">
+  <synapse id="114">
     <action_set>
       <pseudo_event id="169" operation="promote" operation_key="redis-bundle_promote_0">
         <attributes CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="131" operation="running" operation_key="redis-bundle_running_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="133" operation="stopped" operation_key="redis-bundle_stopped_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="172" operation="demoted" operation_key="redis-bundle_demoted_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="117" priority="1000000">
+  <synapse id="115" priority="1000000">
     <action_set>
       <pseudo_event id="133" operation="stopped" operation_key="redis-bundle_stopped_0">
         <attributes CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="118" operation="stop" operation_key="redis-bundle-docker-0_stop_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="156" operation="notified" operation_key="redis-bundle-master_confirmed-post_notify_stopped_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="118">
+  <synapse id="116">
     <action_set>
       <pseudo_event id="132" operation="stop" operation_key="redis-bundle_stop_0">
         <attributes CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="172" operation="demoted" operation_key="redis-bundle_demoted_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="119" priority="1000000">
+  <synapse id="117" priority="1000000">
     <action_set>
       <pseudo_event id="131" operation="running" operation_key="redis-bundle_running_0">
         <attributes CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="150" operation="notified" operation_key="redis-bundle-master_confirmed-post_notify_running_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="120">
+  <synapse id="118">
     <action_set>
       <pseudo_event id="130" operation="start" operation_key="redis-bundle_start_0">
         <attributes CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="133" operation="stopped" operation_key="redis-bundle_stopped_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="172" operation="demoted" operation_key="redis-bundle_demoted_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="121" priority="1000000">
+  <synapse id="119" priority="1000000">
     <action_set>
       <pseudo_event id="117" operation="demoted" operation_key="galera-bundle_demoted_0">
         <attributes CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="113" operation="demoted" operation_key="galera-bundle-master_demoted_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="116" operation="demote" operation_key="galera-bundle_demote_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="122">
+  <synapse id="120">
     <action_set>
       <pseudo_event id="116" operation="demote" operation_key="galera-bundle_demote_0">
         <attributes CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs/>
   </synapse>
-  <synapse id="123" priority="1000000">
+  <synapse id="121" priority="1000000">
     <action_set>
       <pseudo_event id="92" operation="stopped" operation_key="galera-bundle_stopped_0">
         <attributes CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="77" operation="stop" operation_key="galera-bundle-docker-0_stop_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="109" operation="stopped" operation_key="galera-bundle-master_stopped_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="124">
+  <synapse id="122">
     <action_set>
       <pseudo_event id="91" operation="stop" operation_key="galera-bundle_stop_0">
         <attributes CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="117" operation="demoted" operation_key="galera-bundle_demoted_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="125" priority="1000000">
+  <synapse id="123" priority="1000000">
     <action_set>
       <pseudo_event id="90" operation="running" operation_key="galera-bundle_running_0">
         <attributes CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="107" operation="running" operation_key="galera-bundle-master_running_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="126">
+  <synapse id="124">
     <action_set>
       <pseudo_event id="89" operation="start" operation_key="galera-bundle_start_0">
         <attributes CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="92" operation="stopped" operation_key="galera-bundle_stopped_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="117" operation="demoted" operation_key="galera-bundle_demoted_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="127" priority="1000000">
+  <synapse id="125" priority="1000000">
     <action_set>
       <pseudo_event id="57" operation="stopped" operation_key="rabbitmq-bundle_stopped_0">
         <attributes CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="42" operation="stop" operation_key="rabbitmq-bundle-docker-0_stop_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="76" operation="notified" operation_key="rabbitmq-bundle-clone_confirmed-post_notify_stopped_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="128">
+  <synapse id="126">
     <action_set>
       <pseudo_event id="56" operation="stop" operation_key="rabbitmq-bundle_stop_0">
         <attributes CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs/>
   </synapse>
-  <synapse id="129" priority="1000000">
+  <synapse id="127" priority="1000000">
     <action_set>
       <pseudo_event id="55" operation="running" operation_key="rabbitmq-bundle_running_0">
         <attributes CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="70" operation="notified" operation_key="rabbitmq-bundle-clone_confirmed-post_notify_running_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="130">
+  <synapse id="128">
     <action_set>
       <pseudo_event id="54" operation="start" operation_key="rabbitmq-bundle_start_0">
         <attributes CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs/>
   </synapse>
-  <synapse id="131">
+  <synapse id="129">
     <action_set>
       <pseudo_event id="29" operation="all_stopped" operation_key="all_stopped">
         <attributes />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="42" operation="stop" operation_key="rabbitmq-bundle-docker-0_stop_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="43" operation="stop" operation_key="rabbitmq-bundle-0_stop_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="62" operation="stop" operation_key="rabbitmq_stop_0" internal_operation_key="rabbitmq:0_stop_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="76" operation="notified" operation_key="rabbitmq-bundle-clone_confirmed-post_notify_stopped_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="77" operation="stop" operation_key="galera-bundle-docker-0_stop_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="78" operation="stop" operation_key="galera-bundle-0_stop_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="102" operation="stop" operation_key="galera_stop_0" internal_operation_key="galera:0_stop_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="118" operation="stop" operation_key="redis-bundle-docker-0_stop_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="119" operation="stop" operation_key="redis-bundle-0_stop_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="141" operation="stop" operation_key="redis_stop_0" internal_operation_key="redis:0_stop_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="156" operation="notified" operation_key="redis-bundle-master_confirmed-post_notify_stopped_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="173" operation="stop" operation_key="ip-192.168.24.7_stop_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="176" operation="stop" operation_key="ip-10.0.0.109_stop_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="185" operation="stop" operation_key="ip-172.17.4.11_stop_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="188" operation="stop" operation_key="haproxy-bundle-docker-0_stop_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="201" operation="stop" operation_key="stonith-fence_ipmilan-5254003e8e97_stop_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="204" operation="stop" operation_key="stonith-fence_ipmilan-5254000dcb3f_stop_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="207" operation="stonith_complete" operation_key="stonith_complete"/>
       </trigger>
       <trigger>
         <pseudo_event id="211" operation="notified" operation_key="rabbitmq_notified_0" internal_operation_key="rabbitmq:0_confirmed-post_notify_stonith_0"/>
       </trigger>
       <trigger>
         <pseudo_event id="216" operation="notified" operation_key="redis_notified_0" internal_operation_key="redis:0_confirmed-post_notify_stonith_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="132">
+  <synapse id="130">
     <action_set>
       <crm_event id="1" operation="stonith" operation_key="stonith-controller-0-reboot" on_node="controller-0" on_node_uuid="1">
         <attributes CRM_meta_cinder_volume_role="true" CRM_meta_galera_role="true" CRM_meta_haproxy_role="true" CRM_meta_on_node="controller-0" CRM_meta_on_node_uuid="1" CRM_meta_rabbitmq_role="true" CRM_meta_redis_role="true" CRM_meta_rmq_node_attr_last_known_rabbitmq="rabbit@controller-0" CRM_meta_stonith_action="reboot" />
         <downed>
           <node id="1"/>
           <node id="redis-bundle-0"/>
           <node id="galera-bundle-0"/>
           <node id="rabbitmq-bundle-0"/>
         </downed>
       </crm_event>
     </action_set>
     <inputs/>
   </synapse>
 </transition_graph>
diff --git a/cts/scheduler/bundle-order-fencing.summary b/cts/scheduler/bundle-order-fencing.summary
index d398a120c6..a3dc3d441d 100644
--- a/cts/scheduler/bundle-order-fencing.summary
+++ b/cts/scheduler/bundle-order-fencing.summary
@@ -1,224 +1,222 @@
 Using the original execution date of: 2017-09-12 10:51:59Z
 
 Current cluster status:
 Node controller-0 (1): UNCLEAN (offline)
 Online: [ controller-1 controller-2 ]
 Containers: [ galera-bundle-1:galera-bundle-docker-1 galera-bundle-2:galera-bundle-docker-2 rabbitmq-bundle-1:rabbitmq-bundle-docker-1 rabbitmq-bundle-2:rabbitmq-bundle-docker-2 redis-bundle-1:redis-bundle-docker-1 redis-bundle-2:redis-bundle-docker-2 ]
 
  Docker container set: rabbitmq-bundle [192.168.24.1:8787/rhosp12/openstack-rabbitmq-docker:pcmklatest]
    rabbitmq-bundle-0	(ocf::heartbeat:rabbitmq-cluster):	FAILED controller-0 (UNCLEAN)
    rabbitmq-bundle-1	(ocf::heartbeat:rabbitmq-cluster):	Started controller-1
    rabbitmq-bundle-2	(ocf::heartbeat:rabbitmq-cluster):	Started controller-2
  Docker container set: galera-bundle [192.168.24.1:8787/rhosp12/openstack-mariadb-docker:pcmklatest]
    galera-bundle-0	(ocf::heartbeat:galera):	FAILED Master controller-0 (UNCLEAN)
    galera-bundle-1	(ocf::heartbeat:galera):	Master controller-1
    galera-bundle-2	(ocf::heartbeat:galera):	Master controller-2
  Docker container set: redis-bundle [192.168.24.1:8787/rhosp12/openstack-redis-docker:pcmklatest]
    redis-bundle-0	(ocf::heartbeat:redis):	FAILED Master controller-0 (UNCLEAN)
    redis-bundle-1	(ocf::heartbeat:redis):	Slave controller-1
    redis-bundle-2	(ocf::heartbeat:redis):	Slave controller-2
  ip-192.168.24.7	(ocf::heartbeat:IPaddr2):	Started controller-0 (UNCLEAN)
  ip-10.0.0.109	(ocf::heartbeat:IPaddr2):	Started controller-0 (UNCLEAN)
  ip-172.17.1.14	(ocf::heartbeat:IPaddr2):	Started controller-2
  ip-172.17.1.19	(ocf::heartbeat:IPaddr2):	Started controller-2
  ip-172.17.3.19	(ocf::heartbeat:IPaddr2):	Started controller-2
  ip-172.17.4.11	(ocf::heartbeat:IPaddr2):	Started controller-0 (UNCLEAN)
  Docker container set: haproxy-bundle [192.168.24.1:8787/rhosp12/openstack-haproxy-docker:pcmklatest]
    haproxy-bundle-docker-0	(ocf::heartbeat:docker):	Started controller-0 (UNCLEAN)
    haproxy-bundle-docker-1	(ocf::heartbeat:docker):	Started controller-2
    haproxy-bundle-docker-2	(ocf::heartbeat:docker):	Started controller-1
  openstack-cinder-volume	(systemd:openstack-cinder-volume):	Started controller-2
  stonith-fence_ipmilan-525400efba5c	(stonith:fence_ipmilan):	Started controller-2
  stonith-fence_ipmilan-5254003e8e97	(stonith:fence_ipmilan):	Started controller-0 (UNCLEAN)
  stonith-fence_ipmilan-5254000dcb3f	(stonith:fence_ipmilan):	Started controller-0 (UNCLEAN)
 
 Transition Summary:
  * Fence (off) redis-bundle-0 (resource: redis-bundle-docker-0) 'guest is unclean'
  * Fence (off) rabbitmq-bundle-0 (resource: rabbitmq-bundle-docker-0) 'guest is unclean'
  * Fence (off) galera-bundle-0 (resource: galera-bundle-docker-0) 'guest is unclean'
  * Fence (reboot) controller-0 'peer is no longer part of the cluster'
  * Stop       rabbitmq-bundle-docker-0               (                   controller-0 )   due to node availability
  * Stop       rabbitmq-bundle-0                      (                   controller-0 )   due to unrunnable rabbitmq-bundle-docker-0 start
  * Stop       rabbitmq:0                             (              rabbitmq-bundle-0 )   due to unrunnable rabbitmq-bundle-docker-0 start
  * Stop       galera-bundle-docker-0                 (                   controller-0 )   due to node availability
  * Stop       galera-bundle-0                        (                   controller-0 )   due to unrunnable galera-bundle-docker-0 start
  * Stop       galera:0                               (         Master galera-bundle-0 )   due to unrunnable galera-bundle-docker-0 start
  * Stop       redis-bundle-docker-0                  (                   controller-0 )   due to node availability
  * Stop       redis-bundle-0                         (                   controller-0 )   due to unrunnable redis-bundle-docker-0 start
  * Stop       redis:0                                (          Master redis-bundle-0 )   due to unrunnable redis-bundle-docker-0 start
  * Promote    redis:1                                ( Slave -> Master redis-bundle-1 )  
  * Move       ip-192.168.24.7                        (   controller-0 -> controller-2 )  
  * Move       ip-10.0.0.109                          (   controller-0 -> controller-1 )  
  * Move       ip-172.17.4.11                         (   controller-0 -> controller-1 )  
  * Stop       haproxy-bundle-docker-0                (                   controller-0 )   due to node availability
  * Move       stonith-fence_ipmilan-5254003e8e97     (   controller-0 -> controller-1 )  
  * Move       stonith-fence_ipmilan-5254000dcb3f     (   controller-0 -> controller-2 )  
 
 Executing cluster transition:
  * Pseudo action:   rabbitmq-bundle-clone_pre_notify_stop_0
  * Pseudo action:   rabbitmq-bundle-0_stop_0
  * Resource action: rabbitmq-bundle-0 monitor on controller-2
  * Resource action: rabbitmq-bundle-0 monitor on controller-1
  * Resource action: rabbitmq-bundle-1 monitor on controller-2
  * Resource action: rabbitmq-bundle-2 monitor on controller-1
  * Pseudo action:   galera-bundle-0_stop_0
  * Resource action: galera-bundle-0 monitor on controller-2
  * Resource action: galera-bundle-0 monitor on controller-1
  * Resource action: galera-bundle-1 monitor on controller-2
  * Resource action: galera-bundle-2 monitor on controller-1
  * Resource action: redis           cancel=45000 on redis-bundle-1
  * Resource action: redis           cancel=60000 on redis-bundle-1
  * Pseudo action:   redis-bundle-master_pre_notify_demote_0
  * Pseudo action:   redis-bundle-0_stop_0
  * Resource action: redis-bundle-0  monitor on controller-2
  * Resource action: redis-bundle-0  monitor on controller-1
  * Resource action: redis-bundle-1  monitor on controller-2
  * Resource action: redis-bundle-2  monitor on controller-1
  * Pseudo action:   stonith-fence_ipmilan-5254003e8e97_stop_0
  * Pseudo action:   stonith-fence_ipmilan-5254000dcb3f_stop_0
  * Pseudo action:   haproxy-bundle_stop_0
  * Pseudo action:   redis-bundle_demote_0
  * Pseudo action:   galera-bundle_demote_0
  * Pseudo action:   rabbitmq-bundle_stop_0
  * Pseudo action:   rabbitmq-bundle_start_0
  * Fencing controller-0 (reboot)
  * Resource action: rabbitmq        notify on rabbitmq-bundle-1
  * Resource action: rabbitmq        notify on rabbitmq-bundle-2
  * Pseudo action:   rabbitmq-bundle-clone_confirmed-pre_notify_stop_0
  * Pseudo action:   rabbitmq-bundle-docker-0_stop_0
  * Pseudo action:   galera-bundle-master_demote_0
  * Resource action: redis           notify on redis-bundle-1
  * Resource action: redis           notify on redis-bundle-2
  * Pseudo action:   redis-bundle-master_confirmed-pre_notify_demote_0
  * Pseudo action:   redis-bundle-master_demote_0
  * Pseudo action:   haproxy-bundle-docker-0_stop_0
  * Resource action: stonith-fence_ipmilan-5254003e8e97 start on controller-1
  * Resource action: stonith-fence_ipmilan-5254000dcb3f start on controller-2
  * Pseudo action:   stonith-redis-bundle-0-off on redis-bundle-0
  * Pseudo action:   stonith-rabbitmq-bundle-0-off on rabbitmq-bundle-0
  * Pseudo action:   stonith-galera-bundle-0-off on galera-bundle-0
  * Pseudo action:   stonith_complete
  * Pseudo action:   haproxy-bundle_stopped_0
  * Pseudo action:   rabbitmq_post_notify_stop_0
  * Pseudo action:   rabbitmq-bundle-clone_stop_0
  * Pseudo action:   galera_demote_0
  * Pseudo action:   galera-bundle-master_demoted_0
  * Pseudo action:   redis_post_notify_stop_0
  * Pseudo action:   redis_demote_0
  * Pseudo action:   redis-bundle-master_demoted_0
  * Pseudo action:   ip-192.168.24.7_stop_0
  * Pseudo action:   ip-10.0.0.109_stop_0
  * Pseudo action:   ip-172.17.4.11_stop_0
  * Resource action: stonith-fence_ipmilan-5254003e8e97 monitor=60000 on controller-1
  * Resource action: stonith-fence_ipmilan-5254000dcb3f monitor=60000 on controller-2
  * Pseudo action:   galera-bundle_demoted_0
  * Pseudo action:   galera-bundle_stop_0
  * Pseudo action:   rabbitmq_stop_0
  * Pseudo action:   rabbitmq-bundle-clone_stopped_0
  * Pseudo action:   galera-bundle-master_stop_0
  * Pseudo action:   galera-bundle-docker-0_stop_0
  * Pseudo action:   redis-bundle-master_post_notify_demoted_0
  * Resource action: ip-192.168.24.7 start on controller-2
  * Resource action: ip-10.0.0.109   start on controller-1
  * Resource action: ip-172.17.4.11  start on controller-1
  * Pseudo action:   rabbitmq-bundle-clone_post_notify_stopped_0
  * Pseudo action:   galera_stop_0
  * Pseudo action:   galera-bundle-master_stopped_0
  * Resource action: redis           notify on redis-bundle-1
  * Resource action: redis           notify on redis-bundle-2
  * Pseudo action:   redis-bundle-master_confirmed-post_notify_demoted_0
  * Pseudo action:   redis-bundle-master_pre_notify_stop_0
  * Resource action: ip-192.168.24.7 monitor=10000 on controller-2
  * Resource action: ip-10.0.0.109   monitor=10000 on controller-1
  * Resource action: ip-172.17.4.11  monitor=10000 on controller-1
  * Pseudo action:   redis-bundle_demoted_0
  * Pseudo action:   redis-bundle_stop_0
  * Pseudo action:   galera-bundle_stopped_0
  * Pseudo action:   galera-bundle_start_0
  * Resource action: rabbitmq        notify on rabbitmq-bundle-1
  * Resource action: rabbitmq        notify on rabbitmq-bundle-2
  * Pseudo action:   rabbitmq-bundle-clone_confirmed-post_notify_stopped_0
  * Pseudo action:   rabbitmq-bundle-clone_pre_notify_start_0
  * Pseudo action:   galera-bundle-master_start_0
  * Resource action: redis           notify on redis-bundle-1
  * Resource action: redis           notify on redis-bundle-2
  * Pseudo action:   redis-bundle-master_confirmed-pre_notify_stop_0
  * Pseudo action:   redis-bundle-master_stop_0
  * Pseudo action:   redis-bundle-docker-0_stop_0
  * Pseudo action:   rabbitmq-bundle_stopped_0
  * Pseudo action:   rabbitmq_notified_0
  * Pseudo action:   rabbitmq-bundle-clone_confirmed-pre_notify_start_0
  * Pseudo action:   rabbitmq-bundle-clone_start_0
  * Pseudo action:   galera-bundle-master_running_0
  * Pseudo action:   redis_stop_0
  * Pseudo action:   redis-bundle-master_stopped_0
  * Pseudo action:   galera-bundle_running_0
  * Pseudo action:   rabbitmq-bundle-clone_running_0
  * Pseudo action:   redis-bundle-master_post_notify_stopped_0
  * Pseudo action:   rabbitmq-bundle-clone_post_notify_running_0
  * Resource action: redis           notify on redis-bundle-1
  * Resource action: redis           notify on redis-bundle-2
  * Pseudo action:   redis-bundle-master_confirmed-post_notify_stopped_0
  * Pseudo action:   redis-bundle-master_pre_notify_start_0
  * Pseudo action:   redis-bundle_stopped_0
  * Pseudo action:   redis-bundle_start_0
  * Pseudo action:   rabbitmq-bundle-clone_confirmed-post_notify_running_0
  * Pseudo action:   redis_notified_0
  * Pseudo action:   redis-bundle-master_confirmed-pre_notify_start_0
  * Pseudo action:   redis-bundle-master_start_0
  * Pseudo action:   rabbitmq-bundle_running_0
  * Pseudo action:   all_stopped
  * Pseudo action:   redis-bundle-master_running_0
  * Pseudo action:   redis-bundle-master_post_notify_running_0
  * Pseudo action:   redis-bundle-master_confirmed-post_notify_running_0
  * Pseudo action:   redis-bundle_running_0
  * Pseudo action:   redis-bundle-master_pre_notify_promote_0
  * Pseudo action:   redis-bundle_promote_0
- * Resource action: redis           notify on redis-bundle-0
  * Resource action: redis           notify on redis-bundle-1
  * Resource action: redis           notify on redis-bundle-2
  * Pseudo action:   redis-bundle-master_confirmed-pre_notify_promote_0
  * Pseudo action:   redis-bundle-master_promote_0
  * Resource action: redis           promote on redis-bundle-1
  * Pseudo action:   redis-bundle-master_promoted_0
  * Pseudo action:   redis-bundle-master_post_notify_promoted_0
- * Resource action: redis           notify on redis-bundle-0
  * Resource action: redis           notify on redis-bundle-1
  * Resource action: redis           notify on redis-bundle-2
  * Pseudo action:   redis-bundle-master_confirmed-post_notify_promoted_0
  * Pseudo action:   redis-bundle_promoted_0
  * Resource action: redis           monitor=20000 on redis-bundle-1
 Using the original execution date of: 2017-09-12 10:51:59Z
 
 Revised cluster status:
 Online: [ controller-1 controller-2 ]
 OFFLINE: [ controller-0 ]
 Containers: [ galera-bundle-1:galera-bundle-docker-1 galera-bundle-2:galera-bundle-docker-2 rabbitmq-bundle-1:rabbitmq-bundle-docker-1 rabbitmq-bundle-2:rabbitmq-bundle-docker-2 redis-bundle-1:redis-bundle-docker-1 redis-bundle-2:redis-bundle-docker-2 ]
 
  Docker container set: rabbitmq-bundle [192.168.24.1:8787/rhosp12/openstack-rabbitmq-docker:pcmklatest]
    rabbitmq-bundle-0	(ocf::heartbeat:rabbitmq-cluster):	FAILED
    rabbitmq-bundle-1	(ocf::heartbeat:rabbitmq-cluster):	Started controller-1
    rabbitmq-bundle-2	(ocf::heartbeat:rabbitmq-cluster):	Started controller-2
  Docker container set: galera-bundle [192.168.24.1:8787/rhosp12/openstack-mariadb-docker:pcmklatest]
    galera-bundle-0	(ocf::heartbeat:galera):	FAILED Master
    galera-bundle-1	(ocf::heartbeat:galera):	Master controller-1
    galera-bundle-2	(ocf::heartbeat:galera):	Master controller-2
  Docker container set: redis-bundle [192.168.24.1:8787/rhosp12/openstack-redis-docker:pcmklatest]
    redis-bundle-0	(ocf::heartbeat:redis):	FAILED Master
    redis-bundle-1	(ocf::heartbeat:redis):	Master controller-1
    redis-bundle-2	(ocf::heartbeat:redis):	Slave controller-2
  ip-192.168.24.7	(ocf::heartbeat:IPaddr2):	Started controller-2
  ip-10.0.0.109	(ocf::heartbeat:IPaddr2):	Started controller-1
  ip-172.17.1.14	(ocf::heartbeat:IPaddr2):	Started controller-2
  ip-172.17.1.19	(ocf::heartbeat:IPaddr2):	Started controller-2
  ip-172.17.3.19	(ocf::heartbeat:IPaddr2):	Started controller-2
  ip-172.17.4.11	(ocf::heartbeat:IPaddr2):	Started controller-1
  Docker container set: haproxy-bundle [192.168.24.1:8787/rhosp12/openstack-haproxy-docker:pcmklatest]
    haproxy-bundle-docker-0	(ocf::heartbeat:docker):	Stopped
    haproxy-bundle-docker-1	(ocf::heartbeat:docker):	Started controller-2
    haproxy-bundle-docker-2	(ocf::heartbeat:docker):	Started controller-1
  openstack-cinder-volume	(systemd:openstack-cinder-volume):	Started controller-2
  stonith-fence_ipmilan-525400efba5c	(stonith:fence_ipmilan):	Started controller-2
  stonith-fence_ipmilan-5254003e8e97	(stonith:fence_ipmilan):	Started controller-1
  stonith-fence_ipmilan-5254000dcb3f	(stonith:fence_ipmilan):	Started controller-2
 
diff --git a/cts/scheduler/notify-behind-stopping-remote.dot b/cts/scheduler/notify-behind-stopping-remote.dot
new file mode 100644
index 0000000000..1ced66f450
--- /dev/null
+++ b/cts/scheduler/notify-behind-stopping-remote.dot
@@ -0,0 +1,90 @@
+digraph "g" {
+"Cancel redis-bundle-0_monitor_60000 ra1" [ style=bold color="green" fontcolor="black"]
+"Cancel redis-bundle-1_monitor_60000 ra2" [ style=bold color="green" fontcolor="black"]
+"Cancel redis-bundle-2_monitor_60000 ra3" [ style=bold color="green" fontcolor="black"]
+"Cancel redis_monitor_45000 redis-bundle-0" -> "redis_promote_0 redis-bundle-0" [ style = bold]
+"Cancel redis_monitor_45000 redis-bundle-0" [ style=bold color="green" fontcolor="black"]
+"Cancel redis_monitor_60000 redis-bundle-0" -> "redis_promote_0 redis-bundle-0" [ style = bold]
+"Cancel redis_monitor_60000 redis-bundle-0" [ style=bold color="green" fontcolor="black"]
+"all_stopped" [ style=bold color="green" fontcolor="orange"]
+"redis-bundle-0_monitor_30000 ra1" [ style=bold color="green" fontcolor="black"]
+"redis-bundle-1_monitor_30000 ra2" [ style=dashed color="red" fontcolor="black"]
+"redis-bundle-1_start_0 ra2" -> "redis-bundle-1_monitor_30000 ra2" [ style = dashed]
+"redis-bundle-1_start_0 ra2" -> "redis_monitor_45000 redis-bundle-1" [ style = dashed]
+"redis-bundle-1_start_0 ra2" -> "redis_monitor_60000 redis-bundle-1" [ style = dashed]
+"redis-bundle-1_start_0 ra2" -> "redis_start_0 redis-bundle-1" [ style = dashed]
+"redis-bundle-1_start_0 ra2" [ style=dashed color="red" fontcolor="black"]
+"redis-bundle-1_stop_0 ra2" -> "Cancel redis-bundle-1_monitor_60000 ra2" [ style = bold]
+"redis-bundle-1_stop_0 ra2" -> "all_stopped" [ style = bold]
+"redis-bundle-1_stop_0 ra2" -> "redis-bundle-1_start_0 ra2" [ style = dashed]
+"redis-bundle-1_stop_0 ra2" -> "redis-bundle-docker-1_stop_0 ra2" [ style = bold]
+"redis-bundle-1_stop_0 ra2" [ style=bold color="green" fontcolor="black"]
+"redis-bundle-2_monitor_30000 ra3" [ style=bold color="green" fontcolor="black"]
+"redis-bundle-docker-1_stop_0 ra2" -> "all_stopped" [ style = bold]
+"redis-bundle-docker-1_stop_0 ra2" -> "redis-bundle_stopped_0" [ style = bold]
+"redis-bundle-docker-1_stop_0 ra2" [ style=bold color="green" fontcolor="black"]
+"redis-bundle-master_confirmed-post_notify_promoted_0" -> "redis-bundle_promoted_0" [ style = bold]
+"redis-bundle-master_confirmed-post_notify_promoted_0" -> "redis_monitor_20000 redis-bundle-0" [ style = bold]
+"redis-bundle-master_confirmed-post_notify_promoted_0" [ style=bold color="green" fontcolor="orange"]
+"redis-bundle-master_confirmed-post_notify_running_0" -> "redis-bundle-master_pre_notify_promote_0" [ style = bold]
+"redis-bundle-master_confirmed-post_notify_running_0" -> "redis-bundle_running_0" [ style = bold]
+"redis-bundle-master_confirmed-post_notify_running_0" [ style=bold color="green" fontcolor="orange"]
+"redis-bundle-master_confirmed-pre_notify_promote_0" -> "redis-bundle-master_post_notify_promoted_0" [ style = bold]
+"redis-bundle-master_confirmed-pre_notify_promote_0" -> "redis-bundle-master_promote_0" [ style = bold]
+"redis-bundle-master_confirmed-pre_notify_promote_0" [ style=bold color="green" fontcolor="orange"]
+"redis-bundle-master_confirmed-pre_notify_start_0" -> "redis-bundle-master_post_notify_running_0" [ style = bold]
+"redis-bundle-master_confirmed-pre_notify_start_0" -> "redis-bundle-master_start_0" [ style = bold]
+"redis-bundle-master_confirmed-pre_notify_start_0" [ style=bold color="green" fontcolor="orange"]
+"redis-bundle-master_post_notify_promoted_0" -> "redis-bundle-master_confirmed-post_notify_promoted_0" [ style = bold]
+"redis-bundle-master_post_notify_promoted_0" -> "redis_post_notify_promoted_0 redis-bundle-0" [ style = bold]
+"redis-bundle-master_post_notify_promoted_0" -> "redis_post_notify_promoted_0 redis-bundle-2" [ style = bold]
+"redis-bundle-master_post_notify_promoted_0" [ style=bold color="green" fontcolor="orange"]
+"redis-bundle-master_post_notify_running_0" -> "redis-bundle-master_confirmed-post_notify_running_0" [ style = bold]
+"redis-bundle-master_post_notify_running_0" [ style=bold color="green" fontcolor="orange"]
+"redis-bundle-master_pre_notify_promote_0" -> "redis-bundle-master_confirmed-pre_notify_promote_0" [ style = bold]
+"redis-bundle-master_pre_notify_promote_0" -> "redis_pre_notify_promote_0 redis-bundle-0" [ style = bold]
+"redis-bundle-master_pre_notify_promote_0" -> "redis_pre_notify_promote_0 redis-bundle-2" [ style = bold]
+"redis-bundle-master_pre_notify_promote_0" [ style=bold color="green" fontcolor="orange"]
+"redis-bundle-master_pre_notify_start_0" -> "redis-bundle-master_confirmed-pre_notify_start_0" [ style = bold]
+"redis-bundle-master_pre_notify_start_0" [ style=bold color="green" fontcolor="orange"]
+"redis-bundle-master_promote_0" -> "redis_promote_0 redis-bundle-0" [ style = bold]
+"redis-bundle-master_promote_0" [ style=bold color="green" fontcolor="orange"]
+"redis-bundle-master_promoted_0" -> "redis-bundle-master_post_notify_promoted_0" [ style = bold]
+"redis-bundle-master_promoted_0" [ style=bold color="green" fontcolor="orange"]
+"redis-bundle-master_running_0" -> "redis-bundle-master_post_notify_running_0" [ style = bold]
+"redis-bundle-master_running_0" -> "redis-bundle-master_promote_0" [ style = bold]
+"redis-bundle-master_running_0" [ style=bold color="green" fontcolor="orange"]
+"redis-bundle-master_start_0" -> "redis-bundle-master_running_0" [ style = bold]
+"redis-bundle-master_start_0" -> "redis_start_0 redis-bundle-1" [ style = dashed]
+"redis-bundle-master_start_0" [ style=bold color="green" fontcolor="orange"]
+"redis-bundle_promote_0" -> "redis-bundle-master_promote_0" [ style = bold]
+"redis-bundle_promote_0" [ style=bold color="green" fontcolor="orange"]
+"redis-bundle_promoted_0" [ style=bold color="green" fontcolor="orange"]
+"redis-bundle_running_0" -> "redis-bundle_promote_0" [ style = bold]
+"redis-bundle_running_0" [ style=bold color="green" fontcolor="orange"]
+"redis-bundle_start_0" -> "redis-bundle-master_start_0" [ style = bold]
+"redis-bundle_start_0" [ style=bold color="green" fontcolor="orange"]
+"redis-bundle_stop_0" -> "redis-bundle-docker-1_stop_0 ra2" [ style = bold]
+"redis-bundle_stop_0" [ style=bold color="green" fontcolor="orange"]
+"redis-bundle_stopped_0" -> "redis-bundle_promote_0" [ style = bold]
+"redis-bundle_stopped_0" -> "redis-bundle_start_0" [ style = bold]
+"redis-bundle_stopped_0" [ style=bold color="green" fontcolor="orange"]
+"redis_monitor_20000 redis-bundle-0" [ style=bold color="green" fontcolor="black"]
+"redis_monitor_45000 redis-bundle-1" [ style=dashed color="red" fontcolor="black"]
+"redis_monitor_60000 redis-bundle-1" [ style=dashed color="red" fontcolor="black"]
+"redis_post_notify_promoted_0 redis-bundle-0" -> "redis-bundle-master_confirmed-post_notify_promoted_0" [ style = bold]
+"redis_post_notify_promoted_0 redis-bundle-0" [ style=bold color="green" fontcolor="black"]
+"redis_post_notify_promoted_0 redis-bundle-2" -> "redis-bundle-master_confirmed-post_notify_promoted_0" [ style = bold]
+"redis_post_notify_promoted_0 redis-bundle-2" [ style=bold color="green" fontcolor="black"]
+"redis_pre_notify_promote_0 redis-bundle-0" -> "redis-bundle-master_confirmed-pre_notify_promote_0" [ style = bold]
+"redis_pre_notify_promote_0 redis-bundle-0" [ style=bold color="green" fontcolor="black"]
+"redis_pre_notify_promote_0 redis-bundle-2" -> "redis-bundle-master_confirmed-pre_notify_promote_0" [ style = bold]
+"redis_pre_notify_promote_0 redis-bundle-2" [ style=bold color="green" fontcolor="black"]
+"redis_promote_0 redis-bundle-0" -> "redis-bundle-master_promoted_0" [ style = bold]
+"redis_promote_0 redis-bundle-0" -> "redis_monitor_20000 redis-bundle-0" [ style = bold]
+"redis_promote_0 redis-bundle-0" [ style=bold color="green" fontcolor="black"]
+"redis_start_0 redis-bundle-1" -> "redis-bundle-master_running_0" [ style = dashed]
+"redis_start_0 redis-bundle-1" -> "redis_monitor_45000 redis-bundle-1" [ style = dashed]
+"redis_start_0 redis-bundle-1" -> "redis_monitor_60000 redis-bundle-1" [ style = dashed]
+"redis_start_0 redis-bundle-1" [ style=dashed color="red" fontcolor="black"]
+}
diff --git a/cts/scheduler/notify-behind-stopping-remote.exp b/cts/scheduler/notify-behind-stopping-remote.exp
new file mode 100644
index 0000000000..481dfb7034
--- /dev/null
+++ b/cts/scheduler/notify-behind-stopping-remote.exp
@@ -0,0 +1,437 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY"  transition_id="0">
+  <synapse id="0" priority="1000000">
+    <action_set>
+      <rsc_op id="80" operation="notify" operation_key="redis_post_notify_promote_0" internal_operation_key="redis:0_post_notify_promote_0" on_node="redis-bundle-0" on_node_uuid="redis-bundle-0" router_node="ra1">
+        <primitive id="redis" long-id="redis:0" class="ocf" provider="heartbeat" type="redis"/>
+        <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="notify" CRM_meta_notify="true" CRM_meta_notify_active_resource="redis:0 redis:2" CRM_meta_notify_active_uname="redis-bundle-0 redis-bundle-2" CRM_meta_notify_all_hosts="ra1 ra2 ra3 ra1 ra2 ra3" CRM_meta_notify_all_uname="ra1 ra2 ra3 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="redis-bundle-1 redis-bundle-0 redis-bundle-2 ra3 ra2 ra1" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="redis:1" CRM_meta_notify_key_operation="promoted" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="promote" CRM_meta_notify_promote_resource="redis:0" CRM_meta_notify_promote_uname="redis-bundle-0" CRM_meta_notify_slave_resource="redis:0 redis:2" CRM_meta_notify_slave_uname="redis-bundle-0 redis-bundle-2" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_notify_type="post" CRM_meta_on_node="redis-bundle-0" CRM_meta_on_node_uuid="redis-bundle-0" CRM_meta_physical_host="ra1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="90000"  wait_last_known_master="true"/>
+      </rsc_op>
+    </action_set>
+    <inputs>
+      <trigger>
+        <pseudo_event id="55" operation="notify" operation_key="redis-bundle-master_post_notify_promoted_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="1">
+    <action_set>
+      <rsc_op id="79" operation="notify" operation_key="redis_pre_notify_promote_0" internal_operation_key="redis:0_pre_notify_promote_0" on_node="redis-bundle-0" on_node_uuid="redis-bundle-0" router_node="ra1">
+        <primitive id="redis" long-id="redis:0" class="ocf" provider="heartbeat" type="redis"/>
+        <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="notify" CRM_meta_notify="true" CRM_meta_notify_active_resource="redis:0 redis:2" CRM_meta_notify_active_uname="redis-bundle-0 redis-bundle-2" CRM_meta_notify_all_hosts="ra1 ra2 ra3 ra1 ra2 ra3" CRM_meta_notify_all_uname="ra1 ra2 ra3 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="redis-bundle-1 redis-bundle-0 redis-bundle-2 ra3 ra2 ra1" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="redis:1" CRM_meta_notify_key_operation="promote" CRM_meta_notify_key_type="pre" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="promote" CRM_meta_notify_promote_resource="redis:0" CRM_meta_notify_promote_uname="redis-bundle-0" CRM_meta_notify_slave_resource="redis:0 redis:2" CRM_meta_notify_slave_uname="redis-bundle-0 redis-bundle-2" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_notify_type="pre" CRM_meta_on_node="redis-bundle-0" CRM_meta_on_node_uuid="redis-bundle-0" CRM_meta_physical_host="ra1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="90000"  wait_last_known_master="true"/>
+      </rsc_op>
+    </action_set>
+    <inputs>
+      <trigger>
+        <pseudo_event id="53" operation="notify" operation_key="redis-bundle-master_pre_notify_promote_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="2">
+    <action_set>
+      <rsc_op id="33" operation="monitor" operation_key="redis_monitor_20000" internal_operation_key="redis:0_monitor_20000" on_node="redis-bundle-0" on_node_uuid="redis-bundle-0" router_node="ra1">
+        <primitive id="redis" long-id="redis:0" class="ocf" provider="heartbeat" type="redis"/>
+        <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_interval="20000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="true" CRM_meta_on_node="redis-bundle-0" CRM_meta_on_node_uuid="redis-bundle-0" CRM_meta_op_target_rc="8" CRM_meta_physical_host="ra1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Master" CRM_meta_timeout="60000"  wait_last_known_master="true"/>
+      </rsc_op>
+    </action_set>
+    <inputs>
+      <trigger>
+        <rsc_op id="32" operation="promote" operation_key="redis_promote_0" internal_operation_key="redis:0_promote_0" on_node="redis-bundle-0" on_node_uuid="redis-bundle-0" router_node="ra1"/>
+      </trigger>
+      <trigger>
+        <pseudo_event id="56" operation="notified" operation_key="redis-bundle-master_confirmed-post_notify_promoted_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="3">
+    <action_set>
+      <rsc_op id="32" operation="promote" operation_key="redis_promote_0" internal_operation_key="redis:0_promote_0" on_node="redis-bundle-0" on_node_uuid="redis-bundle-0" router_node="ra1">
+        <primitive id="redis" long-id="redis:0" class="ocf" provider="heartbeat" type="redis"/>
+        <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="promote" CRM_meta_notify="true" CRM_meta_notify_active_resource="redis:0 redis:2" CRM_meta_notify_active_uname="redis-bundle-0 redis-bundle-2" CRM_meta_notify_all_hosts="ra1 ra2 ra3 ra1 ra2 ra3" CRM_meta_notify_all_uname="ra1 ra2 ra3 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="redis-bundle-1 redis-bundle-0 redis-bundle-2 ra3 ra2 ra1" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="redis:1" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_promote_resource="redis:0" CRM_meta_notify_promote_uname="redis-bundle-0" CRM_meta_notify_slave_resource="redis:0 redis:2" CRM_meta_notify_slave_uname="redis-bundle-0 redis-bundle-2" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_on_node="redis-bundle-0" CRM_meta_on_node_uuid="redis-bundle-0" CRM_meta_physical_host="ra1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="120000"  wait_last_known_master="true"/>
+      </rsc_op>
+    </action_set>
+    <inputs>
+      <trigger>
+        <rsc_op id="7" operation="cancel" operation_key="redis_monitor_60000" internal_operation_key="redis:0_monitor_60000" on_node="redis-bundle-0" on_node_uuid="redis-bundle-0" router_node="ra1"/>
+      </trigger>
+      <trigger>
+        <rsc_op id="8" operation="cancel" operation_key="redis_monitor_45000" internal_operation_key="redis:0_monitor_45000" on_node="redis-bundle-0" on_node_uuid="redis-bundle-0" router_node="ra1"/>
+      </trigger>
+      <trigger>
+        <pseudo_event id="51" operation="promote" operation_key="redis-bundle-master_promote_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="4">
+    <action_set>
+      <rsc_op id="8" operation="cancel" operation_key="redis_monitor_45000" internal_operation_key="redis:0_monitor_45000" on_node="redis-bundle-0" on_node_uuid="redis-bundle-0" router_node="ra1">
+        <primitive id="redis" long-id="redis:0" class="ocf" provider="heartbeat" type="redis"/>
+        <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="45000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="true" CRM_meta_on_node="redis-bundle-0" CRM_meta_on_node_uuid="redis-bundle-0" CRM_meta_operation="monitor" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="60000"  wait_last_known_master="true"/>
+      </rsc_op>
+    </action_set>
+    <inputs/>
+  </synapse>
+  <synapse id="5">
+    <action_set>
+      <rsc_op id="7" operation="cancel" operation_key="redis_monitor_60000" internal_operation_key="redis:0_monitor_60000" on_node="redis-bundle-0" on_node_uuid="redis-bundle-0" router_node="ra1">
+        <primitive id="redis" long-id="redis:0" class="ocf" provider="heartbeat" type="redis"/>
+        <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="60000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="true" CRM_meta_on_node="redis-bundle-0" CRM_meta_on_node_uuid="redis-bundle-0" CRM_meta_operation="monitor" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Slave" CRM_meta_timeout="60000"  wait_last_known_master="true"/>
+      </rsc_op>
+    </action_set>
+    <inputs/>
+  </synapse>
+  <synapse id="6" priority="1000000">
+    <action_set>
+      <rsc_op id="82" operation="notify" operation_key="redis_post_notify_promote_0" internal_operation_key="redis:2_post_notify_promote_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="ra3">
+        <primitive id="redis" long-id="redis:2" class="ocf" provider="heartbeat" type="redis"/>
+        <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="notify" CRM_meta_notify="true" CRM_meta_notify_active_resource="redis:0 redis:2" CRM_meta_notify_active_uname="redis-bundle-0 redis-bundle-2" CRM_meta_notify_all_hosts="ra1 ra2 ra3 ra1 ra2 ra3" CRM_meta_notify_all_uname="ra1 ra2 ra3 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="redis-bundle-1 redis-bundle-0 redis-bundle-2 ra3 ra2 ra1" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="redis:1" CRM_meta_notify_key_operation="promoted" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="promote" CRM_meta_notify_promote_resource="redis:0" CRM_meta_notify_promote_uname="redis-bundle-0" CRM_meta_notify_slave_resource="redis:0 redis:2" CRM_meta_notify_slave_uname="redis-bundle-0 redis-bundle-2" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_notify_type="post" CRM_meta_on_node="redis-bundle-2" CRM_meta_on_node_uuid="redis-bundle-2" CRM_meta_physical_host="ra3" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="90000"  wait_last_known_master="true"/>
+      </rsc_op>
+    </action_set>
+    <inputs>
+      <trigger>
+        <pseudo_event id="55" operation="notify" operation_key="redis-bundle-master_post_notify_promoted_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="7">
+    <action_set>
+      <rsc_op id="81" operation="notify" operation_key="redis_pre_notify_promote_0" internal_operation_key="redis:2_pre_notify_promote_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="ra3">
+        <primitive id="redis" long-id="redis:2" class="ocf" provider="heartbeat" type="redis"/>
+        <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="notify" CRM_meta_notify="true" CRM_meta_notify_active_resource="redis:0 redis:2" CRM_meta_notify_active_uname="redis-bundle-0 redis-bundle-2" CRM_meta_notify_all_hosts="ra1 ra2 ra3 ra1 ra2 ra3" CRM_meta_notify_all_uname="ra1 ra2 ra3 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="redis-bundle-1 redis-bundle-0 redis-bundle-2 ra3 ra2 ra1" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="redis:1" CRM_meta_notify_key_operation="promote" CRM_meta_notify_key_type="pre" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="promote" CRM_meta_notify_promote_resource="redis:0" CRM_meta_notify_promote_uname="redis-bundle-0" CRM_meta_notify_slave_resource="redis:0 redis:2" CRM_meta_notify_slave_uname="redis-bundle-0 redis-bundle-2" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_notify_type="pre" CRM_meta_on_node="redis-bundle-2" CRM_meta_on_node_uuid="redis-bundle-2" CRM_meta_physical_host="ra3" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="90000"  wait_last_known_master="true"/>
+      </rsc_op>
+    </action_set>
+    <inputs>
+      <trigger>
+        <pseudo_event id="53" operation="notify" operation_key="redis-bundle-master_pre_notify_promote_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="8" priority="1000000">
+    <action_set>
+      <pseudo_event id="56" operation="notified" operation_key="redis-bundle-master_confirmed-post_notify_promoted_0">
+        <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="promoted" CRM_meta_notify_key_type="confirmed-post" CRM_meta_notify_operation="promote" CRM_meta_notify_type="post" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+      </pseudo_event>
+    </action_set>
+    <inputs>
+      <trigger>
+        <pseudo_event id="55" operation="notify" operation_key="redis-bundle-master_post_notify_promoted_0"/>
+      </trigger>
+      <trigger>
+        <rsc_op id="80" operation="notify" operation_key="redis_post_notify_promote_0" internal_operation_key="redis:0_post_notify_promote_0" on_node="redis-bundle-0" on_node_uuid="redis-bundle-0" router_node="ra1"/>
+      </trigger>
+      <trigger>
+        <rsc_op id="82" operation="notify" operation_key="redis_post_notify_promote_0" internal_operation_key="redis:2_post_notify_promote_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="ra3"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="9" priority="1000000">
+    <action_set>
+      <pseudo_event id="55" operation="notify" operation_key="redis-bundle-master_post_notify_promoted_0">
+        <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="promoted" CRM_meta_notify_key_type="post" CRM_meta_notify_operation="promote" CRM_meta_notify_type="post" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+      </pseudo_event>
+    </action_set>
+    <inputs>
+      <trigger>
+        <pseudo_event id="52" operation="promoted" operation_key="redis-bundle-master_promoted_0"/>
+      </trigger>
+      <trigger>
+        <pseudo_event id="54" operation="notified" operation_key="redis-bundle-master_confirmed-pre_notify_promote_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="10">
+    <action_set>
+      <pseudo_event id="54" operation="notified" operation_key="redis-bundle-master_confirmed-pre_notify_promote_0">
+        <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="promote" CRM_meta_notify_key_type="confirmed-pre" CRM_meta_notify_operation="promote" CRM_meta_notify_type="pre" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+      </pseudo_event>
+    </action_set>
+    <inputs>
+      <trigger>
+        <pseudo_event id="53" operation="notify" operation_key="redis-bundle-master_pre_notify_promote_0"/>
+      </trigger>
+      <trigger>
+        <rsc_op id="79" operation="notify" operation_key="redis_pre_notify_promote_0" internal_operation_key="redis:0_pre_notify_promote_0" on_node="redis-bundle-0" on_node_uuid="redis-bundle-0" router_node="ra1"/>
+      </trigger>
+      <trigger>
+        <rsc_op id="81" operation="notify" operation_key="redis_pre_notify_promote_0" internal_operation_key="redis:2_pre_notify_promote_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="ra3"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="11">
+    <action_set>
+      <pseudo_event id="53" operation="notify" operation_key="redis-bundle-master_pre_notify_promote_0">
+        <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="promote" CRM_meta_notify_key_type="pre" CRM_meta_notify_operation="promote" CRM_meta_notify_type="pre" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+      </pseudo_event>
+    </action_set>
+    <inputs>
+      <trigger>
+        <pseudo_event id="44" operation="notified" operation_key="redis-bundle-master_confirmed-post_notify_running_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="12" priority="1000000">
+    <action_set>
+      <pseudo_event id="52" operation="promoted" operation_key="redis-bundle-master_promoted_0">
+        <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+      </pseudo_event>
+    </action_set>
+    <inputs>
+      <trigger>
+        <rsc_op id="32" operation="promote" operation_key="redis_promote_0" internal_operation_key="redis:0_promote_0" on_node="redis-bundle-0" on_node_uuid="redis-bundle-0" router_node="ra1"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="13">
+    <action_set>
+      <pseudo_event id="51" operation="promote" operation_key="redis-bundle-master_promote_0">
+        <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+      </pseudo_event>
+    </action_set>
+    <inputs>
+      <trigger>
+        <pseudo_event id="40" operation="running" operation_key="redis-bundle-master_running_0"/>
+      </trigger>
+      <trigger>
+        <pseudo_event id="54" operation="notified" operation_key="redis-bundle-master_confirmed-pre_notify_promote_0"/>
+      </trigger>
+      <trigger>
+        <pseudo_event id="63" operation="promote" operation_key="redis-bundle_promote_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="14" priority="1000000">
+    <action_set>
+      <pseudo_event id="44" operation="notified" operation_key="redis-bundle-master_confirmed-post_notify_running_0">
+        <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="running" CRM_meta_notify_key_type="confirmed-post" CRM_meta_notify_operation="start" CRM_meta_notify_type="post" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+      </pseudo_event>
+    </action_set>
+    <inputs>
+      <trigger>
+        <pseudo_event id="43" operation="notify" operation_key="redis-bundle-master_post_notify_running_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="15" priority="1000000">
+    <action_set>
+      <pseudo_event id="43" operation="notify" operation_key="redis-bundle-master_post_notify_running_0">
+        <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="running" CRM_meta_notify_key_type="post" CRM_meta_notify_operation="start" CRM_meta_notify_type="post" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+      </pseudo_event>
+    </action_set>
+    <inputs>
+      <trigger>
+        <pseudo_event id="40" operation="running" operation_key="redis-bundle-master_running_0"/>
+      </trigger>
+      <trigger>
+        <pseudo_event id="42" operation="notified" operation_key="redis-bundle-master_confirmed-pre_notify_start_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="16">
+    <action_set>
+      <pseudo_event id="42" operation="notified" operation_key="redis-bundle-master_confirmed-pre_notify_start_0">
+        <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="start" CRM_meta_notify_key_type="confirmed-pre" CRM_meta_notify_operation="start" CRM_meta_notify_type="pre" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+      </pseudo_event>
+    </action_set>
+    <inputs>
+      <trigger>
+        <pseudo_event id="41" operation="notify" operation_key="redis-bundle-master_pre_notify_start_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="17">
+    <action_set>
+      <pseudo_event id="41" operation="notify" operation_key="redis-bundle-master_pre_notify_start_0">
+        <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="start" CRM_meta_notify_key_type="pre" CRM_meta_notify_operation="start" CRM_meta_notify_type="pre" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+      </pseudo_event>
+    </action_set>
+    <inputs/>
+  </synapse>
+  <synapse id="18" priority="1000000">
+    <action_set>
+      <pseudo_event id="40" operation="running" operation_key="redis-bundle-master_running_0">
+        <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+      </pseudo_event>
+    </action_set>
+    <inputs>
+      <trigger>
+        <pseudo_event id="39" operation="start" operation_key="redis-bundle-master_start_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="19">
+    <action_set>
+      <pseudo_event id="39" operation="start" operation_key="redis-bundle-master_start_0">
+        <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+      </pseudo_event>
+    </action_set>
+    <inputs>
+      <trigger>
+        <pseudo_event id="26" operation="start" operation_key="redis-bundle_start_0"/>
+      </trigger>
+      <trigger>
+        <pseudo_event id="42" operation="notified" operation_key="redis-bundle-master_confirmed-pre_notify_start_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="20">
+    <action_set>
+      <rsc_op id="16" operation="monitor" operation_key="redis-bundle-0_monitor_30000" on_node="ra1" on_node_uuid="1">
+        <primitive id="redis-bundle-0" class="ocf" provider="pacemaker" type="remote"/>
+        <attributes CRM_meta_container="redis-bundle-docker-0" CRM_meta_interval="30000" CRM_meta_name="monitor" CRM_meta_on_node="ra1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="30000" addr="ra1"  port="3123"/>
+      </rsc_op>
+    </action_set>
+    <inputs/>
+  </synapse>
+  <synapse id="21">
+    <action_set>
+      <rsc_op id="4" operation="cancel" operation_key="redis-bundle-0_monitor_60000" on_node="ra1" on_node_uuid="1">
+        <primitive id="redis-bundle-0" class="ocf" provider="pacemaker" type="remote"/>
+        <attributes CRM_meta_call_id="14" CRM_meta_container="redis-bundle-docker-0" CRM_meta_interval="60000" CRM_meta_on_node="ra1" CRM_meta_on_node_uuid="1" CRM_meta_operation="monitor" CRM_meta_timeout="20000" addr="ra1"  port="3123"/>
+      </rsc_op>
+    </action_set>
+    <inputs/>
+  </synapse>
+  <synapse id="22">
+    <action_set>
+      <rsc_op id="17" operation="stop" operation_key="redis-bundle-docker-1_stop_0" on_node="ra2" on_node_uuid="2">
+        <primitive id="redis-bundle-docker-1" class="ocf" provider="heartbeat" type="docker"/>
+        <attributes CRM_meta_on_node="ra2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" allow_pull="true"  force_kill="false" image="docker.io/tripleoqueens/centos-binary-redis:current-tripleo-rdo" monitor_cmd="/bin/true" mount_points="/var/log/pacemaker/bundles/redis-bundle-1" reuse="false" run_cmd="/usr/sbin/pacemaker_remoted" run_opts=" --restart=no -e PCMK_stderr=1 --net=host -e PCMK_remote_port=3123 -v /dev/log:/dev/log -v /dev/zero:/etc/libqb/force-filesystem-sockets:ro -v /etc/hosts:/etc/hosts:ro -v /etc/localtime:/etc/localtime:ro -v /etc/redis.conf:/etc/redis.conf:ro -v /var/lib/redis:/var/lib/redis:rw -v /var/log/redis:/var/log/redis:rw -v /var/run/redis:/var/run/redis:rw -v /usr/lib/ocf:/usr/lib/ocf:rw -v /var/log/pacemaker:/var/log/pacemaker:rw -v /etc/pacemaker/authkey:/etc/pacemaker/authkey -v /var/log/pacemaker/bundles/redis-bundle-1:/var/log --user=root --log-driver=journald "/>
+      </rsc_op>
+    </action_set>
+    <inputs>
+      <trigger>
+        <rsc_op id="18" operation="stop" operation_key="redis-bundle-1_stop_0" on_node="ra2" on_node_uuid="2"/>
+      </trigger>
+      <trigger>
+        <pseudo_event id="28" operation="stop" operation_key="redis-bundle_stop_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="23">
+    <action_set>
+      <rsc_op id="18" operation="stop" operation_key="redis-bundle-1_stop_0" on_node="ra2" on_node_uuid="2">
+        <primitive id="redis-bundle-1" class="ocf" provider="pacemaker" type="remote"/>
+        <attributes CRM_meta_container="redis-bundle-docker-1" CRM_meta_on_node="ra2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" addr="ra2"  port="3123"/>
+        <downed>
+          <node id="redis-bundle-1"/>
+        </downed>
+      </rsc_op>
+    </action_set>
+    <inputs/>
+  </synapse>
+  <synapse id="24">
+    <action_set>
+      <rsc_op id="2" operation="cancel" operation_key="redis-bundle-1_monitor_60000" on_node="ra2" on_node_uuid="2">
+        <primitive id="redis-bundle-1" class="ocf" provider="pacemaker" type="remote"/>
+        <attributes CRM_meta_call_id="14" CRM_meta_container="redis-bundle-docker-1" CRM_meta_interval="60000" CRM_meta_on_node="ra2" CRM_meta_on_node_uuid="2" CRM_meta_operation="monitor" CRM_meta_timeout="20000" addr="ra2"  port="3123"/>
+      </rsc_op>
+    </action_set>
+    <inputs>
+      <trigger>
+        <rsc_op id="18" operation="stop" operation_key="redis-bundle-1_stop_0" on_node="ra2" on_node_uuid="2"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="25">
+    <action_set>
+      <rsc_op id="25" operation="monitor" operation_key="redis-bundle-2_monitor_30000" on_node="ra3" on_node_uuid="3">
+        <primitive id="redis-bundle-2" class="ocf" provider="pacemaker" type="remote"/>
+        <attributes CRM_meta_container="redis-bundle-docker-2" CRM_meta_interval="30000" CRM_meta_name="monitor" CRM_meta_on_node="ra3" CRM_meta_on_node_uuid="3" CRM_meta_timeout="30000" addr="ra3"  port="3123"/>
+      </rsc_op>
+    </action_set>
+    <inputs/>
+  </synapse>
+  <synapse id="26">
+    <action_set>
+      <rsc_op id="6" operation="cancel" operation_key="redis-bundle-2_monitor_60000" on_node="ra3" on_node_uuid="3">
+        <primitive id="redis-bundle-2" class="ocf" provider="pacemaker" type="remote"/>
+        <attributes CRM_meta_call_id="14" CRM_meta_container="redis-bundle-docker-2" CRM_meta_interval="60000" CRM_meta_on_node="ra3" CRM_meta_on_node_uuid="3" CRM_meta_operation="monitor" CRM_meta_timeout="20000" addr="ra3"  port="3123"/>
+      </rsc_op>
+    </action_set>
+    <inputs/>
+  </synapse>
+  <synapse id="27" priority="1000000">
+    <action_set>
+      <pseudo_event id="64" operation="promoted" operation_key="redis-bundle_promoted_0">
+        <attributes CRM_meta_timeout="20000" />
+      </pseudo_event>
+    </action_set>
+    <inputs>
+      <trigger>
+        <pseudo_event id="56" operation="notified" operation_key="redis-bundle-master_confirmed-post_notify_promoted_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="28">
+    <action_set>
+      <pseudo_event id="63" operation="promote" operation_key="redis-bundle_promote_0">
+        <attributes CRM_meta_timeout="20000" />
+      </pseudo_event>
+    </action_set>
+    <inputs>
+      <trigger>
+        <pseudo_event id="27" operation="running" operation_key="redis-bundle_running_0"/>
+      </trigger>
+      <trigger>
+        <pseudo_event id="29" operation="stopped" operation_key="redis-bundle_stopped_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="29" priority="1000000">
+    <action_set>
+      <pseudo_event id="29" operation="stopped" operation_key="redis-bundle_stopped_0">
+        <attributes CRM_meta_timeout="20000" />
+      </pseudo_event>
+    </action_set>
+    <inputs>
+      <trigger>
+        <rsc_op id="17" operation="stop" operation_key="redis-bundle-docker-1_stop_0" on_node="ra2" on_node_uuid="2"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="30">
+    <action_set>
+      <pseudo_event id="28" operation="stop" operation_key="redis-bundle_stop_0">
+        <attributes CRM_meta_timeout="20000" />
+      </pseudo_event>
+    </action_set>
+    <inputs/>
+  </synapse>
+  <synapse id="31" priority="1000000">
+    <action_set>
+      <pseudo_event id="27" operation="running" operation_key="redis-bundle_running_0">
+        <attributes CRM_meta_timeout="20000" />
+      </pseudo_event>
+    </action_set>
+    <inputs>
+      <trigger>
+        <pseudo_event id="44" operation="notified" operation_key="redis-bundle-master_confirmed-post_notify_running_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="32">
+    <action_set>
+      <pseudo_event id="26" operation="start" operation_key="redis-bundle_start_0">
+        <attributes CRM_meta_timeout="20000" />
+      </pseudo_event>
+    </action_set>
+    <inputs>
+      <trigger>
+        <pseudo_event id="29" operation="stopped" operation_key="redis-bundle_stopped_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="33">
+    <action_set>
+      <pseudo_event id="11" operation="all_stopped" operation_key="all_stopped">
+        <attributes />
+      </pseudo_event>
+    </action_set>
+    <inputs>
+      <trigger>
+        <rsc_op id="17" operation="stop" operation_key="redis-bundle-docker-1_stop_0" on_node="ra2" on_node_uuid="2"/>
+      </trigger>
+      <trigger>
+        <rsc_op id="18" operation="stop" operation_key="redis-bundle-1_stop_0" on_node="ra2" on_node_uuid="2"/>
+      </trigger>
+    </inputs>
+  </synapse>
+</transition_graph>
diff --git a/cts/scheduler/notify-behind-stopping-remote.scores b/cts/scheduler/notify-behind-stopping-remote.scores
new file mode 100644
index 0000000000..e58b614aa7
--- /dev/null
+++ b/cts/scheduler/notify-behind-stopping-remote.scores
@@ -0,0 +1,65 @@
+Allocation scores:
+Using the original execution date of: 2018-11-22 20:36:07Z
+clone_color: redis-bundle-master allocation score on ra1: -INFINITY
+clone_color: redis-bundle-master allocation score on ra2: -INFINITY
+clone_color: redis-bundle-master allocation score on ra3: -INFINITY
+clone_color: redis-bundle-master allocation score on redis-bundle-0: 0
+clone_color: redis-bundle-master allocation score on redis-bundle-1: 0
+clone_color: redis-bundle-master allocation score on redis-bundle-2: 0
+clone_color: redis:0 allocation score on redis-bundle-0: INFINITY
+clone_color: redis:1 allocation score on redis-bundle-1: INFINITY
+clone_color: redis:2 allocation score on redis-bundle-2: INFINITY
+container_color: redis-bundle allocation score on ra1: 0
+container_color: redis-bundle allocation score on ra2: -INFINITY
+container_color: redis-bundle allocation score on ra3: 0
+container_color: redis-bundle-0 allocation score on ra1: 0
+container_color: redis-bundle-0 allocation score on ra2: 0
+container_color: redis-bundle-0 allocation score on ra3: 0
+container_color: redis-bundle-1 allocation score on ra1: 0
+container_color: redis-bundle-1 allocation score on ra2: 0
+container_color: redis-bundle-1 allocation score on ra3: 0
+container_color: redis-bundle-2 allocation score on ra1: 0
+container_color: redis-bundle-2 allocation score on ra2: 0
+container_color: redis-bundle-2 allocation score on ra3: 0
+container_color: redis-bundle-docker-0 allocation score on ra1: 0
+container_color: redis-bundle-docker-0 allocation score on ra2: -INFINITY
+container_color: redis-bundle-docker-0 allocation score on ra3: 0
+container_color: redis-bundle-docker-1 allocation score on ra1: 0
+container_color: redis-bundle-docker-1 allocation score on ra2: -INFINITY
+container_color: redis-bundle-docker-1 allocation score on ra3: 0
+container_color: redis-bundle-docker-2 allocation score on ra1: 0
+container_color: redis-bundle-docker-2 allocation score on ra2: -INFINITY
+container_color: redis-bundle-docker-2 allocation score on ra3: 0
+container_color: redis-bundle-master allocation score on ra1: 0
+container_color: redis-bundle-master allocation score on ra2: 0
+container_color: redis-bundle-master allocation score on ra3: 0
+container_color: redis-bundle-master allocation score on redis-bundle-0: -INFINITY
+container_color: redis-bundle-master allocation score on redis-bundle-1: -INFINITY
+container_color: redis-bundle-master allocation score on redis-bundle-2: -INFINITY
+container_color: redis:0 allocation score on redis-bundle-0: 501
+container_color: redis:1 allocation score on redis-bundle-1: 500
+container_color: redis:2 allocation score on redis-bundle-2: 501
+native_color: redis-bundle-0 allocation score on ra1: 10000
+native_color: redis-bundle-0 allocation score on ra2: 0
+native_color: redis-bundle-0 allocation score on ra3: 0
+native_color: redis-bundle-1 allocation score on ra1: 0
+native_color: redis-bundle-1 allocation score on ra2: 0
+native_color: redis-bundle-1 allocation score on ra3: 0
+native_color: redis-bundle-2 allocation score on ra1: 0
+native_color: redis-bundle-2 allocation score on ra2: 0
+native_color: redis-bundle-2 allocation score on ra3: 10000
+native_color: redis-bundle-docker-0 allocation score on ra1: 0
+native_color: redis-bundle-docker-0 allocation score on ra2: -INFINITY
+native_color: redis-bundle-docker-0 allocation score on ra3: 0
+native_color: redis-bundle-docker-1 allocation score on ra1: -INFINITY
+native_color: redis-bundle-docker-1 allocation score on ra2: -INFINITY
+native_color: redis-bundle-docker-1 allocation score on ra3: -INFINITY
+native_color: redis-bundle-docker-2 allocation score on ra1: -INFINITY
+native_color: redis-bundle-docker-2 allocation score on ra2: -INFINITY
+native_color: redis-bundle-docker-2 allocation score on ra3: 0
+native_color: redis:0 allocation score on redis-bundle-0: INFINITY
+native_color: redis:1 allocation score on redis-bundle-1: INFINITY
+native_color: redis:2 allocation score on redis-bundle-2: INFINITY
+redis:0 promotion score on redis-bundle-0: 1
+redis:1 promotion score on redis-bundle-1: -1
+redis:2 promotion score on redis-bundle-2: 1
diff --git a/cts/scheduler/notify-behind-stopping-remote.summary b/cts/scheduler/notify-behind-stopping-remote.summary
new file mode 100644
index 0000000000..82bce9ef52
--- /dev/null
+++ b/cts/scheduler/notify-behind-stopping-remote.summary
@@ -0,0 +1,63 @@
+Using the original execution date of: 2018-11-22 20:36:07Z
+
+Current cluster status:
+Online: [ ra1 ra2 ra3 ]
+Containers: [ redis-bundle-0:redis-bundle-docker-0 redis-bundle-1:redis-bundle-docker-1 redis-bundle-2:redis-bundle-docker-2 ]
+
+ Docker container set: redis-bundle [docker.io/tripleoqueens/centos-binary-redis:current-tripleo-rdo]
+   redis-bundle-0	(ocf::heartbeat:redis):	Slave ra1
+   redis-bundle-1	(ocf::heartbeat:redis):	Stopped ra2
+   redis-bundle-2	(ocf::heartbeat:redis):	Slave ra3
+
+Transition Summary:
+ * Promote    redis:0                   ( Slave -> Master redis-bundle-0 )  
+ * Stop       redis-bundle-docker-1     (                            ra2 )   due to node availability
+ * Stop       redis-bundle-1            (                            ra2 )   due to unrunnable redis-bundle-docker-1 start
+ * Start      redis:1                   (                 redis-bundle-1 )   due to unrunnable redis-bundle-docker-1 start (blocked)
+
+Executing cluster transition:
+ * Resource action: redis           cancel=45000 on redis-bundle-0
+ * Resource action: redis           cancel=60000 on redis-bundle-0
+ * Pseudo action:   redis-bundle-master_pre_notify_start_0
+ * Resource action: redis-bundle-0  monitor=30000 on ra1
+ * Resource action: redis-bundle-0  cancel=60000 on ra1
+ * Resource action: redis-bundle-1  stop on ra2
+ * Resource action: redis-bundle-1  cancel=60000 on ra2
+ * Resource action: redis-bundle-2  monitor=30000 on ra3
+ * Resource action: redis-bundle-2  cancel=60000 on ra3
+ * Pseudo action:   redis-bundle_stop_0
+ * Pseudo action:   redis-bundle-master_confirmed-pre_notify_start_0
+ * Resource action: redis-bundle-docker-1 stop on ra2
+ * Pseudo action:   redis-bundle_stopped_0
+ * Pseudo action:   redis-bundle_start_0
+ * Pseudo action:   all_stopped
+ * Pseudo action:   redis-bundle-master_start_0
+ * Pseudo action:   redis-bundle-master_running_0
+ * Pseudo action:   redis-bundle-master_post_notify_running_0
+ * Pseudo action:   redis-bundle-master_confirmed-post_notify_running_0
+ * Pseudo action:   redis-bundle_running_0
+ * Pseudo action:   redis-bundle-master_pre_notify_promote_0
+ * Pseudo action:   redis-bundle_promote_0
+ * Resource action: redis           notify on redis-bundle-0
+ * Resource action: redis           notify on redis-bundle-2
+ * Pseudo action:   redis-bundle-master_confirmed-pre_notify_promote_0
+ * Pseudo action:   redis-bundle-master_promote_0
+ * Resource action: redis           promote on redis-bundle-0
+ * Pseudo action:   redis-bundle-master_promoted_0
+ * Pseudo action:   redis-bundle-master_post_notify_promoted_0
+ * Resource action: redis           notify on redis-bundle-0
+ * Resource action: redis           notify on redis-bundle-2
+ * Pseudo action:   redis-bundle-master_confirmed-post_notify_promoted_0
+ * Pseudo action:   redis-bundle_promoted_0
+ * Resource action: redis           monitor=20000 on redis-bundle-0
+Using the original execution date of: 2018-11-22 20:36:07Z
+
+Revised cluster status:
+Online: [ ra1 ra2 ra3 ]
+Containers: [ redis-bundle-0:redis-bundle-docker-0 redis-bundle-2:redis-bundle-docker-2 ]
+
+ Docker container set: redis-bundle [docker.io/tripleoqueens/centos-binary-redis:current-tripleo-rdo]
+   redis-bundle-0	(ocf::heartbeat:redis):	Master ra1
+   redis-bundle-1	(ocf::heartbeat:redis):	Stopped
+   redis-bundle-2	(ocf::heartbeat:redis):	Slave ra3
+
diff --git a/cts/scheduler/notify-behind-stopping-remote.xml b/cts/scheduler/notify-behind-stopping-remote.xml
new file mode 100644
index 0000000000..66351b808b
--- /dev/null
+++ b/cts/scheduler/notify-behind-stopping-remote.xml
@@ -0,0 +1,187 @@
+<cib crm_feature_set="3.0.14" validate-with="pacemaker-2.10" epoch="53" num_updates="5" admin_epoch="0" cib-last-written="Thu Nov 22 15:35:52 2018" update-origin="ra2" update-client="crm_resource" update-user="root" have-quorum="1" dc-uuid="3" execution-date="1542918967">
+  <configuration>
+    <crm_config>
+      <cluster_property_set id="cib-bootstrap-options">
+        <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/>
+        <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+        <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="1.1.19-8.el7_6.1-c3c624ea3d"/>
+        <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+        <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="ratester"/>
+        <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1542909574"/>
+      </cluster_property_set>
+      <cluster_property_set id="redis_replication">
+        <nvpair id="redis_replication-redis_REPL_INFO" name="redis_REPL_INFO" value="ra2"/>
+      </cluster_property_set>
+    </crm_config>
+    <nodes>
+      <node id="1" uname="ra1"/>
+      <node id="2" uname="ra2"/>
+      <node id="3" uname="ra3"/>
+    </nodes>
+    <resources>
+      <bundle id="redis-bundle">
+        <docker masters="1" network="host" replicas="3" image="docker.io/tripleoqueens/centos-binary-redis:current-tripleo-rdo" run-command="/usr/sbin/pacemaker_remoted" options="--user=root --log-driver=journald"/>
+        <network control-port="3123"/>
+        <storage>
+          <storage-mapping target-dir="/dev/log" id="map0" source-dir="/dev/log"/>
+          <storage-mapping target-dir="/etc/libqb/force-filesystem-sockets" options="ro" id="map1" source-dir="/dev/zero"/>
+          <storage-mapping target-dir="/etc/hosts" options="ro" id="map2" source-dir="/etc/hosts"/>
+          <storage-mapping target-dir="/etc/localtime" options="ro" id="map3" source-dir="/etc/localtime"/>
+          <storage-mapping target-dir="/etc/redis.conf" options="ro" id="map4" source-dir="/etc/redis.conf"/>
+          <storage-mapping target-dir="/var/lib/redis" options="rw" id="map5" source-dir="/var/lib/redis"/>
+          <storage-mapping target-dir="/var/log/redis" options="rw" id="map6" source-dir="/var/log/redis"/>
+          <storage-mapping target-dir="/var/run/redis" options="rw" id="map7" source-dir="/var/run/redis"/>
+          <storage-mapping target-dir="/usr/lib/ocf" options="rw" id="map8" source-dir="/usr/lib/ocf"/>
+          <storage-mapping target-dir="/var/log/pacemaker" options="rw" id="pcmk1" source-dir="/var/log/pacemaker"/>
+        </storage>
+        <primitive class="ocf" id="redis" provider="heartbeat" type="redis">
+          <instance_attributes id="redis-instance_attributes">
+            <nvpair id="redis-instance_attributes-wait_last_known_master" name="wait_last_known_master" value="true"/>
+          </instance_attributes>
+          <meta_attributes id="redis-meta_attributes">
+            <nvpair id="redis-meta_attributes-container-attribute-target" name="container-attribute-target" value="host"/>
+            <nvpair id="redis-meta_attributes-notify" name="notify" value="true"/>
+          </meta_attributes>
+          <operations>
+            <op id="redis-demote-interval-0s" interval="0s" name="demote" timeout="120s"/>
+            <op id="redis-monitor-interval-45s" interval="45s" name="monitor" timeout="60s"/>
+            <op id="redis-monitor-interval-20s" interval="20s" name="monitor" role="Master" timeout="60s"/>
+            <op id="redis-monitor-interval-60s" interval="60s" name="monitor" role="Slave" timeout="60s"/>
+            <op id="redis-notify-interval-0s" interval="0s" name="notify" timeout="90s"/>
+            <op id="redis-promote-interval-0s" interval="0s" name="promote" timeout="120s"/>
+            <op id="redis-start-interval-0s" interval="0s" name="start" timeout="120s"/>
+            <op id="redis-stop-interval-0s" interval="0s" name="stop" timeout="200s"/>
+          </operations>
+        </primitive>
+      </bundle>
+    </resources>
+    <constraints>
+      <rsc_location id="cli-ban-redis-bundle-on-ra2" rsc="redis-bundle" role="Started" node="ra2" score="-INFINITY"/>
+    </constraints>
+  </configuration>
+  <status>
+    <node_state id="2" uname="ra2" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+      <lrm id="2">
+        <lrm_resources>
+          <lrm_resource id="redis-bundle-docker-2" type="docker" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="redis-bundle-docker-2_last_0" operation_key="redis-bundle-docker-2_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="7:38:7:ae78ea4e-f87a-4433-b26e-94016870d3fc" transition-magic="0:7;7:38:7:ae78ea4e-f87a-4433-b26e-94016870d3fc" exit-reason="" on_node="ra2" call-id="53" rc-code="7" op-status="0" interval="0" last-run="1542907811" last-rc-change="1542907811" exec-time="65" queue-time="0" op-digest="317386a3af010667314d88ddf93a0150"/>
+          </lrm_resource>
+          <lrm_resource id="redis-bundle-docker-0" type="docker" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="redis-bundle-docker-0_last_0" operation_key="redis-bundle-docker-0_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="7:39:7:ae78ea4e-f87a-4433-b26e-94016870d3fc" transition-magic="0:7;7:39:7:ae78ea4e-f87a-4433-b26e-94016870d3fc" exit-reason="" on_node="ra2" call-id="61" rc-code="7" op-status="0" interval="0" last-run="1542907815" last-rc-change="1542907815" exec-time="122" queue-time="0" op-digest="cabda3f80e7220806c7022b88efab9d4"/>
+          </lrm_resource>
+          <lrm_resource id="redis-bundle-docker-1" type="docker" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="redis-bundle-docker-1_last_0" operation_key="redis-bundle-docker-1_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="6:142:0:ae78ea4e-f87a-4433-b26e-94016870d3fc" transition-magic="0:0;6:142:0:ae78ea4e-f87a-4433-b26e-94016870d3fc" exit-reason="" on_node="ra2" call-id="81" rc-code="0" op-status="0" interval="0" last-run="1542918685" last-rc-change="1542918685" exec-time="1479" queue-time="0" op-digest="ad90cb2f61c71c84e840c796e44d628a"/>
+            <lrm_rsc_op id="redis-bundle-docker-1_monitor_60000" operation_key="redis-bundle-docker-1_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="7:142:0:ae78ea4e-f87a-4433-b26e-94016870d3fc" transition-magic="0:0;7:142:0:ae78ea4e-f87a-4433-b26e-94016870d3fc" exit-reason="" on_node="ra2" call-id="82" rc-code="0" op-status="0" interval="60000" last-rc-change="1542918686" exec-time="173" queue-time="0" op-digest="51d9043cab411f1c983d9539d2997c26"/>
+          </lrm_resource>
+          <lrm_resource id="redis-bundle-0" type="remote" class="ocf" provider="pacemaker" container="redis-bundle-docker-0">
+            <lrm_rsc_op id="redis-bundle-0_last_0" operation_key="redis-bundle-0_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="5:40:7:ae78ea4e-f87a-4433-b26e-94016870d3fc" transition-magic="0:7;5:40:7:ae78ea4e-f87a-4433-b26e-94016870d3fc" exit-reason="" on_node="ra2" call-id="1" rc-code="7" op-status="0" interval="0" last-run="1542907819" last-rc-change="1542907819" exec-time="0" queue-time="0" op-digest="109c7a28bbb65c3c294a591f03377dd3" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+          </lrm_resource>
+          <lrm_resource id="redis-bundle-1" type="remote" class="ocf" provider="pacemaker" container="redis-bundle-docker-1">
+            <lrm_rsc_op id="redis-bundle-1_last_0" operation_key="redis-bundle-1_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="8:142:0:ae78ea4e-f87a-4433-b26e-94016870d3fc" transition-magic="0:0;8:142:0:ae78ea4e-f87a-4433-b26e-94016870d3fc" exit-reason="" on_node="ra2" call-id="13" rc-code="0" op-status="0" interval="0" last-run="1542918686" last-rc-change="1542918686" exec-time="0" queue-time="0" op-digest="9c100f1c3496f07f7a953f9cc8fbd746" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+            <lrm_rsc_op id="redis-bundle-1_monitor_60000" operation_key="redis-bundle-1_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="17:143:0:ae78ea4e-f87a-4433-b26e-94016870d3fc" transition-magic="0:0;17:143:0:ae78ea4e-f87a-4433-b26e-94016870d3fc" exit-reason="" on_node="ra2" call-id="14" rc-code="0" op-status="0" interval="60000" last-rc-change="1542918687" exec-time="0" queue-time="0" op-digest="21306bcef84a89e55ae729d77ceca1dc"/>
+          </lrm_resource>
+          <lrm_resource id="redis-bundle-2" type="remote" class="ocf" provider="pacemaker" container="redis-bundle-docker-2">
+            <lrm_rsc_op id="redis-bundle-2_last_0" operation_key="redis-bundle-2_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="7:40:7:ae78ea4e-f87a-4433-b26e-94016870d3fc" transition-magic="0:7;7:40:7:ae78ea4e-f87a-4433-b26e-94016870d3fc" exit-reason="" on_node="ra2" call-id="4" rc-code="7" op-status="0" interval="0" last-run="1542907819" last-rc-change="1542907819" exec-time="0" queue-time="0" op-digest="6ed7cb9eb2c67afe77784971250638a7" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+          </lrm_resource>
+        </lrm_resources>
+      </lrm>
+      <transient_attributes id="2">
+        <instance_attributes id="status-2"/>
+      </transient_attributes>
+    </node_state>
+    <node_state id="1" uname="ra1" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+      <lrm id="1">
+        <lrm_resources>
+          <lrm_resource id="redis-bundle-docker-2" type="docker" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="redis-bundle-docker-2_last_0" operation_key="redis-bundle-docker-2_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="4:37:7:ae78ea4e-f87a-4433-b26e-94016870d3fc" transition-magic="0:7;4:37:7:ae78ea4e-f87a-4433-b26e-94016870d3fc" exit-reason="" on_node="ra1" call-id="57" rc-code="7" op-status="0" interval="0" last-run="1542907811" last-rc-change="1542907811" exec-time="63" queue-time="0" op-digest="0160bbff6f3f012b38edc9bddb631132"/>
+          </lrm_resource>
+          <lrm_resource id="redis-bundle-docker-0" type="docker" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="redis-bundle-docker-0_last_0" operation_key="redis-bundle-docker-0_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="2:142:0:ae78ea4e-f87a-4433-b26e-94016870d3fc" transition-magic="0:0;2:142:0:ae78ea4e-f87a-4433-b26e-94016870d3fc" exit-reason="" on_node="ra1" call-id="84" rc-code="0" op-status="0" interval="0" last-run="1542918685" last-rc-change="1542918685" exec-time="1504" queue-time="0" op-digest="cabda3f80e7220806c7022b88efab9d4"/>
+            <lrm_rsc_op id="redis-bundle-docker-0_monitor_60000" operation_key="redis-bundle-docker-0_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="3:142:0:ae78ea4e-f87a-4433-b26e-94016870d3fc" transition-magic="0:0;3:142:0:ae78ea4e-f87a-4433-b26e-94016870d3fc" exit-reason="" on_node="ra1" call-id="85" rc-code="0" op-status="0" interval="60000" last-rc-change="1542918686" exec-time="167" queue-time="0" op-digest="922138cf0d1c7127c35f2e174c4af461"/>
+          </lrm_resource>
+          <lrm_resource id="redis-bundle-docker-1" type="docker" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="redis-bundle-docker-1_last_0" operation_key="redis-bundle-docker-1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="4:39:7:ae78ea4e-f87a-4433-b26e-94016870d3fc" transition-magic="0:7;4:39:7:ae78ea4e-f87a-4433-b26e-94016870d3fc" exit-reason="" on_node="ra1" call-id="68" rc-code="7" op-status="0" interval="0" last-run="1542907815" last-rc-change="1542907815" exec-time="132" queue-time="0" op-digest="ad90cb2f61c71c84e840c796e44d628a"/>
+          </lrm_resource>
+          <lrm_resource id="redis-bundle-0" type="remote" class="ocf" provider="pacemaker" container="redis-bundle-docker-0">
+            <lrm_rsc_op id="redis-bundle-0_last_0" operation_key="redis-bundle-0_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="4:142:0:ae78ea4e-f87a-4433-b26e-94016870d3fc" transition-magic="0:0;4:142:0:ae78ea4e-f87a-4433-b26e-94016870d3fc" exit-reason="" on_node="ra1" call-id="13" rc-code="0" op-status="0" interval="0" last-run="1542918686" last-rc-change="1542918686" exec-time="0" queue-time="0" op-digest="109c7a28bbb65c3c294a591f03377dd3" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+            <lrm_rsc_op id="redis-bundle-0_monitor_60000" operation_key="redis-bundle-0_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="12:143:0:ae78ea4e-f87a-4433-b26e-94016870d3fc" transition-magic="0:0;12:143:0:ae78ea4e-f87a-4433-b26e-94016870d3fc" exit-reason="" on_node="ra1" call-id="14" rc-code="0" op-status="0" interval="60000" last-rc-change="1542918687" exec-time="0" queue-time="0" op-digest="478c16eefb4fa513134f527efb7e063a"/>
+          </lrm_resource>
+          <lrm_resource id="redis-bundle-1" type="remote" class="ocf" provider="pacemaker" container="redis-bundle-docker-1">
+            <lrm_rsc_op id="redis-bundle-1_last_0" operation_key="redis-bundle-1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="3:40:7:ae78ea4e-f87a-4433-b26e-94016870d3fc" transition-magic="0:7;3:40:7:ae78ea4e-f87a-4433-b26e-94016870d3fc" exit-reason="" on_node="ra1" call-id="3" rc-code="7" op-status="0" interval="0" last-run="1542907819" last-rc-change="1542907819" exec-time="0" queue-time="0" op-digest="9c100f1c3496f07f7a953f9cc8fbd746" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+          </lrm_resource>
+          <lrm_resource id="redis-bundle-2" type="remote" class="ocf" provider="pacemaker" container="redis-bundle-docker-2">
+            <lrm_rsc_op id="redis-bundle-2_last_0" operation_key="redis-bundle-2_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="4:40:7:ae78ea4e-f87a-4433-b26e-94016870d3fc" transition-magic="0:7;4:40:7:ae78ea4e-f87a-4433-b26e-94016870d3fc" exit-reason="" on_node="ra1" call-id="4" rc-code="7" op-status="0" interval="0" last-run="1542907819" last-rc-change="1542907819" exec-time="0" queue-time="0" op-digest="6ed7cb9eb2c67afe77784971250638a7" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+          </lrm_resource>
+        </lrm_resources>
+      </lrm>
+      <transient_attributes id="1">
+        <instance_attributes id="status-1">
+          <nvpair id="status-1-master-redis" name="master-redis" value="1"/>
+        </instance_attributes>
+      </transient_attributes>
+    </node_state>
+    <node_state id="3" uname="ra3" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+      <lrm id="3">
+        <lrm_resources>
+          <lrm_resource id="redis-bundle-docker-2" type="docker" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="redis-bundle-docker-2_last_0" operation_key="redis-bundle-docker-2_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="10:142:0:ae78ea4e-f87a-4433-b26e-94016870d3fc" transition-magic="0:0;10:142:0:ae78ea4e-f87a-4433-b26e-94016870d3fc" exit-reason="" on_node="ra3" call-id="77" rc-code="0" op-status="0" interval="0" last-run="1542918685" last-rc-change="1542918685" exec-time="1501" queue-time="0" op-digest="317386a3af010667314d88ddf93a0150"/>
+            <lrm_rsc_op id="redis-bundle-docker-2_monitor_60000" operation_key="redis-bundle-docker-2_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="11:142:0:ae78ea4e-f87a-4433-b26e-94016870d3fc" transition-magic="0:0;11:142:0:ae78ea4e-f87a-4433-b26e-94016870d3fc" exit-reason="" on_node="ra3" call-id="78" rc-code="0" op-status="0" interval="60000" last-rc-change="1542918687" exec-time="165" queue-time="0" op-digest="b4fcc691b4f687c09bb3dd0610b658ca"/>
+          </lrm_resource>
+          <lrm_resource id="redis-bundle-docker-0" type="docker" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="redis-bundle-docker-0_last_0" operation_key="redis-bundle-docker-0_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="12:39:7:ae78ea4e-f87a-4433-b26e-94016870d3fc" transition-magic="0:7;12:39:7:ae78ea4e-f87a-4433-b26e-94016870d3fc" exit-reason="" on_node="ra3" call-id="57" rc-code="7" op-status="0" interval="0" last-run="1542907815" last-rc-change="1542907815" exec-time="129" queue-time="0" op-digest="cabda3f80e7220806c7022b88efab9d4"/>
+          </lrm_resource>
+          <lrm_resource id="redis-bundle-docker-1" type="docker" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="redis-bundle-docker-1_last_0" operation_key="redis-bundle-docker-1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="14:39:7:ae78ea4e-f87a-4433-b26e-94016870d3fc" transition-magic="0:7;14:39:7:ae78ea4e-f87a-4433-b26e-94016870d3fc" exit-reason="" on_node="ra3" call-id="61" rc-code="7" op-status="0" interval="0" last-run="1542907815" last-rc-change="1542907815" exec-time="111" queue-time="0" op-digest="ad90cb2f61c71c84e840c796e44d628a"/>
+          </lrm_resource>
+          <lrm_resource id="redis-bundle-0" type="remote" class="ocf" provider="pacemaker" container="redis-bundle-docker-0">
+            <lrm_rsc_op id="redis-bundle-0_last_0" operation_key="redis-bundle-0_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="8:40:7:ae78ea4e-f87a-4433-b26e-94016870d3fc" transition-magic="0:7;8:40:7:ae78ea4e-f87a-4433-b26e-94016870d3fc" exit-reason="" on_node="ra3" call-id="1" rc-code="7" op-status="0" interval="0" last-run="1542907819" last-rc-change="1542907819" exec-time="0" queue-time="0" op-digest="109c7a28bbb65c3c294a591f03377dd3" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+          </lrm_resource>
+          <lrm_resource id="redis-bundle-1" type="remote" class="ocf" provider="pacemaker" container="redis-bundle-docker-1">
+            <lrm_rsc_op id="redis-bundle-1_last_0" operation_key="redis-bundle-1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="9:40:7:ae78ea4e-f87a-4433-b26e-94016870d3fc" transition-magic="0:7;9:40:7:ae78ea4e-f87a-4433-b26e-94016870d3fc" exit-reason="" on_node="ra3" call-id="2" rc-code="7" op-status="0" interval="0" last-run="1542907819" last-rc-change="1542907819" exec-time="0" queue-time="0" op-digest="9c100f1c3496f07f7a953f9cc8fbd746" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+          </lrm_resource>
+          <lrm_resource id="redis-bundle-2" type="remote" class="ocf" provider="pacemaker" container="redis-bundle-docker-2">
+            <lrm_rsc_op id="redis-bundle-2_last_0" operation_key="redis-bundle-2_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="12:142:0:ae78ea4e-f87a-4433-b26e-94016870d3fc" transition-magic="0:0;12:142:0:ae78ea4e-f87a-4433-b26e-94016870d3fc" exit-reason="" on_node="ra3" call-id="13" rc-code="0" op-status="0" interval="0" last-run="1542918687" last-rc-change="1542918687" exec-time="0" queue-time="0" op-digest="6ed7cb9eb2c67afe77784971250638a7" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+            <lrm_rsc_op id="redis-bundle-2_monitor_60000" operation_key="redis-bundle-2_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="22:143:0:ae78ea4e-f87a-4433-b26e-94016870d3fc" transition-magic="0:0;22:143:0:ae78ea4e-f87a-4433-b26e-94016870d3fc" exit-reason="" on_node="ra3" call-id="14" rc-code="0" op-status="0" interval="60000" last-rc-change="1542918688" exec-time="0" queue-time="0" op-digest="6815f4f1d3f361f086d1e45371d9cb53"/>
+          </lrm_resource>
+        </lrm_resources>
+      </lrm>
+      <transient_attributes id="3">
+        <instance_attributes id="status-3">
+          <nvpair id="status-3-master-redis" name="master-redis" value="1"/>
+        </instance_attributes>
+      </transient_attributes>
+    </node_state>
+    <node_state remote_node="true" id="redis-bundle-0" uname="redis-bundle-0" in_ccm="true" crm-debug-origin="do_update_resource" node_fenced="0">
+      <lrm id="redis-bundle-0">
+        <lrm_resources>
+          <lrm_resource id="redis" type="redis" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="redis_last_0" operation_key="redis_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="27:143:0:ae78ea4e-f87a-4433-b26e-94016870d3fc" transition-magic="0:0;27:143:0:ae78ea4e-f87a-4433-b26e-94016870d3fc" exit-reason="" on_node="ra1" call-id="8" rc-code="0" op-status="0" interval="0" last-run="1542918688" last-rc-change="1542918688" exec-time="5253" queue-time="0" op-digest="a5b2a4f5c557278af14d6cbffc5a229d" op-secure-params=" user " op-secure-digest="a5b2a4f5c557278af14d6cbffc5a229d"/>
+            <lrm_rsc_op id="redis_monitor_60000" operation_key="redis_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="32:145:0:ae78ea4e-f87a-4433-b26e-94016870d3fc" transition-magic="0:0;32:145:0:ae78ea4e-f87a-4433-b26e-94016870d3fc" exit-reason="" on_node="ra1" call-id="78" rc-code="0" op-status="0" interval="60000" last-rc-change="1542918713" exec-time="1195" queue-time="0" op-digest="992feffd37882eb5ce9bfc847b2fa75e" op-secure-params=" user " op-secure-digest="a5b2a4f5c557278af14d6cbffc5a229d"/>
+            <lrm_rsc_op id="redis_monitor_45000" operation_key="redis_monitor_45000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="31:145:0:ae78ea4e-f87a-4433-b26e-94016870d3fc" transition-magic="0:0;31:145:0:ae78ea4e-f87a-4433-b26e-94016870d3fc" exit-reason="" on_node="ra1" call-id="79" rc-code="0" op-status="0" interval="45000" last-rc-change="1542918715" exec-time="1191" queue-time="1114" op-digest="992feffd37882eb5ce9bfc847b2fa75e" op-secure-params=" user " op-secure-digest="a5b2a4f5c557278af14d6cbffc5a229d"/>
+          </lrm_resource>
+        </lrm_resources>
+      </lrm>
+    </node_state>
+    <node_state remote_node="true" id="redis-bundle-1" uname="redis-bundle-1" in_ccm="true" crm-debug-origin="do_update_resource" node_fenced="0">
+      <lrm id="redis-bundle-1">
+        <lrm_resources>
+          <lrm_resource id="redis" type="redis" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="redis_last_0" operation_key="redis_stop_0" operation="stop" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="30:148:0:ae78ea4e-f87a-4433-b26e-94016870d3fc" transition-magic="0:0;30:148:0:ae78ea4e-f87a-4433-b26e-94016870d3fc" exit-reason="" on_node="ra2" call-id="143" rc-code="0" op-status="0" interval="0" last-run="1542918963" last-rc-change="1542918963" exec-time="2398" queue-time="0" op-digest="a5b2a4f5c557278af14d6cbffc5a229d" op-secure-params=" user " op-secure-digest="a5b2a4f5c557278af14d6cbffc5a229d"/>
+            <lrm_rsc_op id="redis_monitor_20000" operation_key="redis_monitor_20000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="28:145:8:ae78ea4e-f87a-4433-b26e-94016870d3fc" transition-magic="0:8;28:145:8:ae78ea4e-f87a-4433-b26e-94016870d3fc" exit-reason="" on_node="ra2" call-id="59" rc-code="8" op-status="0" interval="20000" last-rc-change="1542918715" exec-time="1210" queue-time="0" op-digest="992feffd37882eb5ce9bfc847b2fa75e" op-secure-params=" user " op-secure-digest="a5b2a4f5c557278af14d6cbffc5a229d"/>
+          </lrm_resource>
+        </lrm_resources>
+      </lrm>
+    </node_state>
+    <node_state remote_node="true" id="redis-bundle-2" uname="redis-bundle-2" in_ccm="true" crm-debug-origin="do_update_resource" node_fenced="0">
+      <lrm id="redis-bundle-2">
+        <lrm_resources>
+          <lrm_resource id="redis" type="redis" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="redis_last_0" operation_key="redis_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="32:144:0:ae78ea4e-f87a-4433-b26e-94016870d3fc" transition-magic="0:0;32:144:0:ae78ea4e-f87a-4433-b26e-94016870d3fc" exit-reason="" on_node="ra3" call-id="8" rc-code="0" op-status="0" interval="0" last-run="1542918700" last-rc-change="1542918700" exec-time="5173" queue-time="0" op-digest="a5b2a4f5c557278af14d6cbffc5a229d" op-secure-params=" user " op-secure-digest="a5b2a4f5c557278af14d6cbffc5a229d"/>
+            <lrm_rsc_op id="redis_monitor_60000" operation_key="redis_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="36:145:0:ae78ea4e-f87a-4433-b26e-94016870d3fc" transition-magic="0:0;36:145:0:ae78ea4e-f87a-4433-b26e-94016870d3fc" exit-reason="" on_node="ra3" call-id="77" rc-code="0" op-status="0" interval="60000" last-rc-change="1542918714" exec-time="1184" queue-time="0" op-digest="992feffd37882eb5ce9bfc847b2fa75e" op-secure-params=" user " op-secure-digest="a5b2a4f5c557278af14d6cbffc5a229d"/>
+            <lrm_rsc_op id="redis_monitor_45000" operation_key="redis_monitor_45000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="35:145:0:ae78ea4e-f87a-4433-b26e-94016870d3fc" transition-magic="0:0;35:145:0:ae78ea4e-f87a-4433-b26e-94016870d3fc" exit-reason="" on_node="ra3" call-id="78" rc-code="0" op-status="0" interval="45000" last-rc-change="1542918715" exec-time="1157" queue-time="1102" op-digest="992feffd37882eb5ce9bfc847b2fa75e" op-secure-params=" user " op-secure-digest="a5b2a4f5c557278af14d6cbffc5a229d"/>
+          </lrm_resource>
+        </lrm_resources>
+      </lrm>
+    </node_state>
+  </status>
+</cib>
diff --git a/daemons/controld/controld_execd.c b/daemons/controld/controld_execd.c
index 455babde52..f7c5cded13 100644
--- a/daemons/controld/controld_execd.c
+++ b/daemons/controld/controld_execd.c
@@ -1,2676 +1,2700 @@
 /*
  * Copyright 2004-2018 Andrew Beekhof <andrew@beekhof.net>
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <sys/param.h>
 #include <sys/types.h>
 #include <sys/wait.h>
 
 #include <crm/crm.h>
 #include <crm/services.h>
 
 #include <crm/msg_xml.h>
 #include <crm/common/xml.h>
 
 #include <pacemaker-controld.h>
 #include <controld_fsa.h>
 #include <controld_messages.h>
 #include <controld_callbacks.h>
 #include <controld_lrm.h>
 #include <regex.h>
 #include <crm/pengine/rules.h>
 
 #define START_DELAY_THRESHOLD 5 * 60 * 1000
 #define MAX_LRM_REG_FAILS 30
 
 #define s_if_plural(i) (((i) == 1)? "" : "s")
 
 struct delete_event_s {
     int rc;
     const char *rsc;
     lrm_state_t *lrm_state;
 };
 
 static gboolean is_rsc_active(lrm_state_t * lrm_state, const char *rsc_id);
 static gboolean build_active_RAs(lrm_state_t * lrm_state, xmlNode * rsc_list);
 static gboolean stop_recurring_actions(gpointer key, gpointer value, gpointer user_data);
 static int delete_rsc_status(lrm_state_t * lrm_state, const char *rsc_id, int call_options,
                              const char *user_name);
 
 static lrmd_event_data_t *construct_op(lrm_state_t * lrm_state, xmlNode * rsc_op,
                                        const char *rsc_id, const char *operation);
 static void do_lrm_rsc_op(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, const char *operation,
                           xmlNode * msg, xmlNode * request);
 
 void send_direct_ack(const char *to_host, const char *to_sys,
                      lrmd_rsc_info_t * rsc, lrmd_event_data_t * op, const char *rsc_id);
 
 static gboolean lrm_state_verify_stopped(lrm_state_t * lrm_state, enum crmd_fsa_state cur_state,
                                          int log_level);
 static int do_update_resource(const char *node_name, lrmd_rsc_info_t * rsc, lrmd_event_data_t * op);
 
 static void
 lrm_connection_destroy(void)
 {
     if (is_set(fsa_input_register, R_LRM_CONNECTED)) {
         crm_crit("Connection to executor failed");
         register_fsa_input(C_FSA_INTERNAL, I_ERROR, NULL);
         clear_bit(fsa_input_register, R_LRM_CONNECTED);
 
     } else {
         crm_info("Disconnected from executor");
     }
 
 }
 
 static char *
 make_stop_id(const char *rsc, int call_id)
 {
     return crm_strdup_printf("%s:%d", rsc, call_id);
 }
 
 static void
 copy_instance_keys(gpointer key, gpointer value, gpointer user_data)
 {
     if (strstr(key, CRM_META "_") == NULL) {
         g_hash_table_replace(user_data, strdup((const char *)key), strdup((const char *)value));
     }
 }
 
 static void
 copy_meta_keys(gpointer key, gpointer value, gpointer user_data)
 {
     if (strstr(key, CRM_META "_") != NULL) {
         g_hash_table_replace(user_data, strdup((const char *)key), strdup((const char *)value));
     }
 }
 
 /*!
  * \internal
  * \brief Remove a recurring operation from a resource's history
  *
  * \param[in,out] history  Resource history to modify
  * \param[in]     op       Operation to remove
  *
  * \return TRUE if the operation was found and removed, FALSE otherwise
  */
 static gboolean
 history_remove_recurring_op(rsc_history_t *history, const lrmd_event_data_t *op)
 {
     GList *iter;
 
     for (iter = history->recurring_op_list; iter != NULL; iter = iter->next) {
         lrmd_event_data_t *existing = iter->data;
 
         if ((op->interval_ms == existing->interval_ms)
             && crm_str_eq(op->rsc_id, existing->rsc_id, TRUE)
             && safe_str_eq(op->op_type, existing->op_type)) {
 
             history->recurring_op_list = g_list_delete_link(history->recurring_op_list, iter);
             lrmd_free_event(existing);
             return TRUE;
         }
     }
     return FALSE;
 }
 
 /*!
  * \internal
  * \brief Free all recurring operations in resource history
  *
  * \param[in,out] history  Resource history to modify
  */
 static void
 history_free_recurring_ops(rsc_history_t *history)
 {
     GList *iter;
 
     for (iter = history->recurring_op_list; iter != NULL; iter = iter->next) {
         lrmd_free_event(iter->data);
     }
     g_list_free(history->recurring_op_list);
     history->recurring_op_list = NULL;
 }
 
 /*!
  * \internal
  * \brief Free resource history
  *
  * \param[in,out] history  Resource history to free
  */
 void
 history_free(gpointer data)
 {
     rsc_history_t *history = (rsc_history_t*)data;
 
     if (history->stop_params) {
         g_hash_table_destroy(history->stop_params);
     }
 
     /* Don't need to free history->rsc.id because it's set to history->id */
     free(history->rsc.type);
     free(history->rsc.standard);
     free(history->rsc.provider);
 
     lrmd_free_event(history->failed);
     lrmd_free_event(history->last);
     free(history->id);
     history_free_recurring_ops(history);
     free(history);
 }
 
 static void
 update_history_cache(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, lrmd_event_data_t * op)
 {
     int target_rc = 0;
     rsc_history_t *entry = NULL;
 
     if (op->rsc_deleted) {
         crm_debug("Purged history for '%s' after %s", op->rsc_id, op->op_type);
         delete_rsc_status(lrm_state, op->rsc_id, cib_quorum_override, NULL);
         return;
     }
 
     if (safe_str_eq(op->op_type, RSC_NOTIFY)) {
         return;
     }
 
     crm_debug("Updating history for '%s' with %s op", op->rsc_id, op->op_type);
 
     entry = g_hash_table_lookup(lrm_state->resource_history, op->rsc_id);
     if (entry == NULL && rsc) {
         entry = calloc(1, sizeof(rsc_history_t));
         entry->id = strdup(op->rsc_id);
         g_hash_table_insert(lrm_state->resource_history, entry->id, entry);
 
         entry->rsc.id = entry->id;
         entry->rsc.type = strdup(rsc->type);
         entry->rsc.standard = strdup(rsc->standard);
         if (rsc->provider) {
             entry->rsc.provider = strdup(rsc->provider);
         } else {
             entry->rsc.provider = NULL;
         }
 
     } else if (entry == NULL) {
         crm_info("Resource %s no longer exists, not updating cache", op->rsc_id);
         return;
     }
 
     entry->last_callid = op->call_id;
     target_rc = rsc_op_expected_rc(op);
     if (op->op_status == PCMK_LRM_OP_CANCELLED) {
         if (op->interval_ms > 0) {
             crm_trace("Removing cancelled recurring op: " CRM_OP_FMT,
                       op->rsc_id, op->op_type, op->interval_ms);
             history_remove_recurring_op(entry, op);
             return;
         } else {
             crm_trace("Skipping " CRM_OP_FMT " rc=%d, status=%d",
                       op->rsc_id, op->op_type, op->interval_ms, op->rc,
                       op->op_status);
         }
 
     } else if (did_rsc_op_fail(op, target_rc)) {
         /* Store failed monitors here, otherwise the block below will cause them
          * to be forgotten when a stop happens.
          */
         if (entry->failed) {
             lrmd_free_event(entry->failed);
         }
         entry->failed = lrmd_copy_event(op);
 
     } else if (op->interval_ms == 0) {
         if (entry->last) {
             lrmd_free_event(entry->last);
         }
         entry->last = lrmd_copy_event(op);
 
         if (op->params &&
             (safe_str_eq(CRMD_ACTION_START, op->op_type) ||
              safe_str_eq("reload", op->op_type) ||
              safe_str_eq(CRMD_ACTION_STATUS, op->op_type))) {
 
             if (entry->stop_params) {
                 g_hash_table_destroy(entry->stop_params);
             }
             entry->stop_params = crm_str_table_new();
 
             g_hash_table_foreach(op->params, copy_instance_keys, entry->stop_params);
         }
     }
 
     if (op->interval_ms > 0) {
         /* Ensure there are no duplicates */
         history_remove_recurring_op(entry, op);
 
         crm_trace("Adding recurring op: " CRM_OP_FMT,
                   op->rsc_id, op->op_type, op->interval_ms);
         entry->recurring_op_list = g_list_prepend(entry->recurring_op_list, lrmd_copy_event(op));
 
     } else if (entry->recurring_op_list && safe_str_eq(op->op_type, RSC_STATUS) == FALSE) {
         crm_trace("Dropping %d recurring ops because of: " CRM_OP_FMT,
                   g_list_length(entry->recurring_op_list), op->rsc_id,
                   op->op_type, op->interval_ms);
         history_free_recurring_ops(entry);
     }
 }
 
 /*!
  * \internal
  * \brief Send a direct OK ack for a resource task
  *
  * \param[in] lrm_state  LRM connection
  * \param[in] input      Input message being ack'ed
  * \param[in] rsc_id     ID of affected resource
  * \param[in] rsc        Affected resource (if available)
  * \param[in] task       Operation task being ack'ed
  * \param[in] ack_host   Name of host to send ack to
  * \param[in] ack_sys    IPC system name to ack
  */
 static void
 send_task_ok_ack(lrm_state_t *lrm_state, ha_msg_input_t *input,
                  const char *rsc_id, lrmd_rsc_info_t *rsc, const char *task,
                  const char *ack_host, const char *ack_sys)
 {
     lrmd_event_data_t *op = construct_op(lrm_state, input->xml, rsc_id, task);
 
     op->rc = PCMK_OCF_OK;
     op->op_status = PCMK_LRM_OP_DONE;
     send_direct_ack(ack_host, ack_sys, rsc, op, rsc_id);
     lrmd_free_event(op);
 }
 
 void
 lrm_op_callback(lrmd_event_data_t * op)
 {
     const char *nodename = NULL;
     lrm_state_t *lrm_state = NULL;
 
     CRM_CHECK(op != NULL, return);
 
     /* determine the node name for this connection. */
     nodename = op->remote_nodename ? op->remote_nodename : fsa_our_uname;
 
     if (op->type == lrmd_event_disconnect && (safe_str_eq(nodename, fsa_our_uname))) {
         /* If this is the local executor IPC connection, set the right bits in the
          * controller when the connection goes down.
          */
         lrm_connection_destroy();
         return;
     } else if (op->type != lrmd_event_exec_complete) {
         /* we only need to process execution results */
         return;
     }
 
     lrm_state = lrm_state_find(nodename);
     CRM_ASSERT(lrm_state != NULL);
 
-    process_lrm_event(lrm_state, op, NULL);
+    process_lrm_event(lrm_state, op, NULL, NULL);
 }
 
 /*	 A_LRM_CONNECT	*/
 void
 do_lrm_control(long long action,
                enum crmd_fsa_cause cause,
                enum crmd_fsa_state cur_state,
                enum crmd_fsa_input current_input, fsa_data_t * msg_data)
 {
     /* This only pertains to local executor connections. Remote connections are
      * handled as resources within the scheduler. Connecting and disconnecting
      * from remote executor instances is handled differently.
      */
 
     lrm_state_t *lrm_state = NULL;
 
     if(fsa_our_uname == NULL) {
         return; /* Nothing to do */
     }
     lrm_state = lrm_state_find_or_create(fsa_our_uname);
     if (lrm_state == NULL) {
         register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
         return;
     }
 
     if (action & A_LRM_DISCONNECT) {
         if (lrm_state_verify_stopped(lrm_state, cur_state, LOG_INFO) == FALSE) {
             if (action == A_LRM_DISCONNECT) {
                 crmd_fsa_stall(FALSE);
                 return;
             }
         }
 
         clear_bit(fsa_input_register, R_LRM_CONNECTED);
         crm_info("Disconnecting from the executor");
         lrm_state_disconnect(lrm_state);
         lrm_state_reset_tables(lrm_state, FALSE);
         crm_notice("Disconnected from the executor");
     }
 
     if (action & A_LRM_CONNECT) {
         int ret = pcmk_ok;
 
         crm_debug("Connecting to the executor");
         ret = lrm_state_ipc_connect(lrm_state);
 
         if (ret != pcmk_ok) {
             if (lrm_state->num_lrm_register_fails < MAX_LRM_REG_FAILS) {
                 crm_warn("Failed to connect to the executor %d time%s (%d max)",
                          lrm_state->num_lrm_register_fails,
                          s_if_plural(lrm_state->num_lrm_register_fails),
                          MAX_LRM_REG_FAILS);
 
                 crm_timer_start(wait_timer);
                 crmd_fsa_stall(FALSE);
                 return;
             }
         }
 
         if (ret != pcmk_ok) {
             crm_err("Failed to connect to the executor the max allowed %d time%s",
                     lrm_state->num_lrm_register_fails,
                     s_if_plural(lrm_state->num_lrm_register_fails));
             register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
             return;
         }
 
         set_bit(fsa_input_register, R_LRM_CONNECTED);
         crm_info("Connection to the executor established");
     }
 
     if (action & ~(A_LRM_CONNECT | A_LRM_DISCONNECT)) {
         crm_err("Unexpected action %s in %s", fsa_action2string(action), __FUNCTION__);
     }
 }
 
 static gboolean
 lrm_state_verify_stopped(lrm_state_t * lrm_state, enum crmd_fsa_state cur_state, int log_level)
 {
     int counter = 0;
     gboolean rc = TRUE;
     const char *when = "lrm disconnect";
 
     GHashTableIter gIter;
     const char *key = NULL;
     rsc_history_t *entry = NULL;
     struct recurring_op_s *pending = NULL;
 
     crm_debug("Checking for active resources before exit");
 
     if (cur_state == S_TERMINATE) {
         log_level = LOG_ERR;
         when = "shutdown";
 
     } else if (is_set(fsa_input_register, R_SHUTDOWN)) {
         when = "shutdown... waiting";
     }
 
     if (lrm_state->pending_ops && lrm_state_is_connected(lrm_state) == TRUE) {
         guint removed = g_hash_table_foreach_remove(
             lrm_state->pending_ops, stop_recurring_actions, lrm_state);
         guint nremaining = g_hash_table_size(lrm_state->pending_ops);
 
         if (removed || nremaining) {
             crm_notice("Stopped %u recurring operation%s at %s (%u remaining)",
                        removed, s_if_plural(removed), when, nremaining);
         }
     }
 
     if (lrm_state->pending_ops) {
         g_hash_table_iter_init(&gIter, lrm_state->pending_ops);
         while (g_hash_table_iter_next(&gIter, NULL, (void **)&pending)) {
             /* Ignore recurring actions in the shutdown calculations */
             if (pending->interval_ms == 0) {
                 counter++;
             }
         }
     }
 
     if (counter > 0) {
         do_crm_log(log_level, "%d pending executor operation%s at %s",
                    counter, s_if_plural(counter), when);
 
         if (cur_state == S_TERMINATE || !is_set(fsa_input_register, R_SENT_RSC_STOP)) {
             g_hash_table_iter_init(&gIter, lrm_state->pending_ops);
             while (g_hash_table_iter_next(&gIter, (gpointer*)&key, (gpointer*)&pending)) {
                 do_crm_log(log_level, "Pending action: %s (%s)", key, pending->op_key);
             }
 
         } else {
             rc = FALSE;
         }
         return rc;
     }
 
     if (lrm_state->resource_history == NULL) {
         return rc;
     }
 
     if (is_set(fsa_input_register, R_SHUTDOWN)) {
         /* At this point we're not waiting, we're just shutting down */
         when = "shutdown";
     }
 
     counter = 0;
     g_hash_table_iter_init(&gIter, lrm_state->resource_history);
     while (g_hash_table_iter_next(&gIter, NULL, (gpointer*)&entry)) {
         if (is_rsc_active(lrm_state, entry->id) == FALSE) {
             continue;
         }
 
         counter++;
         if (log_level == LOG_ERR) {
             crm_info("Found %s active at %s", entry->id, when);
         } else {
             crm_trace("Found %s active at %s", entry->id, when);
         }
         if (lrm_state->pending_ops) {
             GHashTableIter hIter;
 
             g_hash_table_iter_init(&hIter, lrm_state->pending_ops);
             while (g_hash_table_iter_next(&hIter, (gpointer*)&key, (gpointer*)&pending)) {
                 if (crm_str_eq(entry->id, pending->rsc_id, TRUE)) {
                     crm_notice("%sction %s (%s) incomplete at %s",
                                pending->interval_ms == 0 ? "A" : "Recurring a",
                                key, pending->op_key, when);
                 }
             }
         }
     }
 
     if (counter) {
         crm_err("%d resource%s active at %s",
                 counter, (counter == 1)? " was" : "s were", when);
     }
 
     return rc;
 }
 
 static char *
 build_parameter_list(const lrmd_event_data_t *op,
                      const struct ra_metadata_s *metadata,
                      xmlNode *result, enum ra_param_flags_e param_type,
                      bool invert_for_xml)
 {
     int len = 0;
     int max = 0;
     char *list = NULL;
     GList *iter = NULL;
 
     /* Newer resource agents support the "private" parameter attribute to
      * indicate sensitive parameters. For backward compatibility with older
      * agents, this list is used if the agent doesn't specify any as "private".
      */
     const char *secure_terms[] = {
         "password",
         "passwd",
         "user",
     };
 
     if (is_not_set(metadata->ra_flags, ra_uses_private)
         && (param_type == ra_param_private)) {
 
         max = DIMOF(secure_terms);
     }
 
     for (iter = metadata->ra_params; iter != NULL; iter = iter->next) {
         struct ra_param_s *param = (struct ra_param_s *) iter->data;
         bool accept = FALSE;
 
         if (is_set(param->rap_flags, param_type)) {
             accept = TRUE;
 
         } else if (max) {
             for (int lpc = 0; lpc < max; lpc++) {
                 if (safe_str_eq(secure_terms[lpc], param->rap_name)) {
                     accept = TRUE;
                     break;
                 }
             }
         }
 
         if (accept) {
             int start = len;
 
             crm_trace("Attr %s is %s", param->rap_name, ra_param_flag2text(param_type));
 
             len += strlen(param->rap_name) + 2; // include spaces around
             list = realloc_safe(list, len + 1); // include null terminator
 
             // spaces before and after make parsing simpler
             sprintf(list + start, " %s ", param->rap_name);
 
         } else {
             crm_trace("Rejecting %s for %s", param->rap_name, ra_param_flag2text(param_type));
         }
 
         if (result && (invert_for_xml? !accept : accept)) {
             const char *v = g_hash_table_lookup(op->params, param->rap_name);
 
             if (v != NULL) {
                 crm_trace("Adding attr %s=%s to the xml result", param->rap_name, v);
                 crm_xml_add(result, param->rap_name, v);
             }
         }
     }
 
     return list;
 }
 
 static void
 append_restart_list(lrmd_event_data_t *op, struct ra_metadata_s *metadata,
                     xmlNode *update, const char *version)
 {
     char *list = NULL;
     char *digest = NULL;
     xmlNode *restart = NULL;
 
     CRM_LOG_ASSERT(op->params != NULL);
 
     if (op->interval_ms > 0) {
         /* monitors are not reloadable */
         return;
     }
 
     if (is_set(metadata->ra_flags, ra_supports_reload)) {
         restart = create_xml_node(NULL, XML_TAG_PARAMS);
         /* Add any parameters with unique="1" to the "op-force-restart" list.
          *
          * (Currently, we abuse "unique=0" to indicate reloadability. This is
          * nonstandard and should eventually be replaced once the OCF standard
          * is updated with something better.)
          */
         list = build_parameter_list(op, metadata, restart, ra_param_unique,
                                     FALSE);
 
     } else {
         /* Resource does not support reloads */
         return;
     }
 
     digest = calculate_operation_digest(restart, version);
     /* Add "op-force-restart" and "op-restart-digest" to indicate the resource supports reload,
      * no matter if it actually supports any parameters with unique="1"). */
     crm_xml_add(update, XML_LRM_ATTR_OP_RESTART, list? list: "");
     crm_xml_add(update, XML_LRM_ATTR_RESTART_DIGEST, digest);
 
     crm_trace("%s: %s, %s", op->rsc_id, digest, list);
     crm_log_xml_trace(restart, "restart digest source");
 
     free_xml(restart);
     free(digest);
     free(list);
 }
 
 static void
 append_secure_list(lrmd_event_data_t *op, struct ra_metadata_s *metadata,
                    xmlNode *update, const char *version)
 {
     char *list = NULL;
     char *digest = NULL;
     xmlNode *secure = NULL;
 
     CRM_LOG_ASSERT(op->params != NULL);
 
     /*
      * To keep XML_LRM_ATTR_OP_SECURE short, we want it to contain the
      * secure parameters but XML_LRM_ATTR_SECURE_DIGEST to be based on
      * the insecure ones
      */
     secure = create_xml_node(NULL, XML_TAG_PARAMS);
     list = build_parameter_list(op, metadata, secure, ra_param_private, TRUE);
 
     if (list != NULL) {
         digest = calculate_operation_digest(secure, version);
         crm_xml_add(update, XML_LRM_ATTR_OP_SECURE, list);
         crm_xml_add(update, XML_LRM_ATTR_SECURE_DIGEST, digest);
 
         crm_trace("%s: %s, %s", op->rsc_id, digest, list);
         crm_log_xml_trace(secure, "secure digest source");
     } else {
         crm_trace("%s: no secure parameters", op->rsc_id);
     }
 
     free_xml(secure);
     free(digest);
     free(list);
 }
 
 static gboolean
 build_operation_update(xmlNode * parent, lrmd_rsc_info_t * rsc, lrmd_event_data_t * op,
                        const char *node_name, const char *src)
 {
     int target_rc = 0;
     xmlNode *xml_op = NULL;
     struct ra_metadata_s *metadata = NULL;
     const char *caller_version = NULL;
     lrm_state_t *lrm_state = NULL;
 
     if (op == NULL) {
         return FALSE;
     }
 
     target_rc = rsc_op_expected_rc(op);
 
     /* there is a small risk in formerly mixed clusters that it will
      * be sub-optimal.
      *
      * however with our upgrade policy, the update we send should
      * still be completely supported anyway
      */
     caller_version = g_hash_table_lookup(op->params, XML_ATTR_CRM_VERSION);
     CRM_LOG_ASSERT(caller_version != NULL);
 
     if(caller_version == NULL) {
         caller_version = CRM_FEATURE_SET;
     }
 
     crm_trace("Building %s operation update with originator version: %s", op->rsc_id, caller_version);
     xml_op = create_operation_update(parent, op, caller_version, target_rc, fsa_our_uname, src, LOG_DEBUG);
     if (xml_op == NULL) {
         return TRUE;
     }
 
     if ((rsc == NULL) || (op->params == NULL)
         || !crm_op_needs_metadata(rsc->standard, op->op_type)) {
 
         crm_trace("No digests needed for %s action on %s (params=%p rsc=%p)",
                   op->op_type, op->rsc_id, op->params, rsc);
         return TRUE;
     }
 
     lrm_state = lrm_state_find(node_name);
     if (lrm_state == NULL) {
         crm_warn("Cannot calculate digests for operation " CRM_OP_FMT
                  " because we have no connection to executor for %s",
                  op->rsc_id, op->op_type, op->interval_ms, node_name);
         return TRUE;
     }
 
     metadata = metadata_cache_get(lrm_state->metadata_cache, rsc);
     if (metadata == NULL) {
         /* For now, we always collect resource agent meta-data via a local,
          * synchronous, direct execution of the agent. This has multiple issues:
          * the executor should execute agents, not the controller; meta-data for
          * Pacemaker Remote nodes should be collected on those nodes, not
          * locally; and the meta-data call shouldn't eat into the timeout of the
          * real action being performed.
          *
          * These issues are planned to be addressed by having the scheduler
          * schedule a meta-data cache check at the beginning of each transition.
          * Once that is working, this block will only be a fallback in case the
          * initial collection fails.
          */
         char *metadata_str = NULL;
 
         int rc = lrm_state_get_metadata(lrm_state, rsc->standard,
                                         rsc->provider, rsc->type,
                                         &metadata_str, 0);
 
         if (rc != pcmk_ok) {
             crm_warn("Failed to get metadata for %s (%s:%s:%s)",
                      rsc->id, rsc->standard, rsc->provider, rsc->type);
             return TRUE;
         }
 
         metadata = metadata_cache_update(lrm_state->metadata_cache, rsc,
                                          metadata_str);
         free(metadata_str);
         if (metadata == NULL) {
             crm_warn("Failed to update metadata for %s (%s:%s:%s)",
                      rsc->id, rsc->standard, rsc->provider, rsc->type);
             return TRUE;
         }
     }
 
 #if ENABLE_VERSIONED_ATTRS
     crm_xml_add(xml_op, XML_ATTR_RA_VERSION, metadata->ra_version);
 #endif
 
     crm_trace("Including additional digests for %s::%s:%s", rsc->standard, rsc->provider, rsc->type);
     append_restart_list(op, metadata, xml_op, caller_version);
     append_secure_list(op, metadata, xml_op, caller_version);
 
     return TRUE;
 }
 
 static gboolean
 is_rsc_active(lrm_state_t * lrm_state, const char *rsc_id)
 {
     rsc_history_t *entry = NULL;
 
     entry = g_hash_table_lookup(lrm_state->resource_history, rsc_id);
     if (entry == NULL || entry->last == NULL) {
         return FALSE;
     }
 
     crm_trace("Processing %s: %s.%d=%d", rsc_id, entry->last->op_type,
               entry->last->interval_ms, entry->last->rc);
     if (entry->last->rc == PCMK_OCF_OK && safe_str_eq(entry->last->op_type, CRMD_ACTION_STOP)) {
         return FALSE;
 
     } else if (entry->last->rc == PCMK_OCF_OK
                && safe_str_eq(entry->last->op_type, CRMD_ACTION_MIGRATE)) {
         /* a stricter check is too complex...
          * leave that to the PE
          */
         return FALSE;
 
     } else if (entry->last->rc == PCMK_OCF_NOT_RUNNING) {
         return FALSE;
 
     } else if ((entry->last->interval_ms == 0)
                && (entry->last->rc == PCMK_OCF_NOT_CONFIGURED)) {
         /* Badly configured resources can't be reliably stopped */
         return FALSE;
     }
 
     return TRUE;
 }
 
 static gboolean
 build_active_RAs(lrm_state_t * lrm_state, xmlNode * rsc_list)
 {
     GHashTableIter iter;
     rsc_history_t *entry = NULL;
 
     g_hash_table_iter_init(&iter, lrm_state->resource_history);
     while (g_hash_table_iter_next(&iter, NULL, (void **)&entry)) {
 
         GList *gIter = NULL;
         xmlNode *xml_rsc = create_xml_node(rsc_list, XML_LRM_TAG_RESOURCE);
 
         crm_xml_add(xml_rsc, XML_ATTR_ID, entry->id);
         crm_xml_add(xml_rsc, XML_ATTR_TYPE, entry->rsc.type);
         crm_xml_add(xml_rsc, XML_AGENT_ATTR_CLASS, entry->rsc.standard);
         crm_xml_add(xml_rsc, XML_AGENT_ATTR_PROVIDER, entry->rsc.provider);
 
         if (entry->last && entry->last->params) {
             const char *container = g_hash_table_lookup(entry->last->params, CRM_META"_"XML_RSC_ATTR_CONTAINER);
             if (container) {
                 crm_trace("Resource %s is a part of container resource %s", entry->id, container);
                 crm_xml_add(xml_rsc, XML_RSC_ATTR_CONTAINER, container);
             }
         }
         build_operation_update(xml_rsc, &(entry->rsc), entry->failed, lrm_state->node_name, __FUNCTION__);
         build_operation_update(xml_rsc, &(entry->rsc), entry->last, lrm_state->node_name, __FUNCTION__);
         for (gIter = entry->recurring_op_list; gIter != NULL; gIter = gIter->next) {
             build_operation_update(xml_rsc, &(entry->rsc), gIter->data, lrm_state->node_name, __FUNCTION__);
         }
     }
 
     return FALSE;
 }
 
 static xmlNode *
 do_lrm_query_internal(lrm_state_t *lrm_state, int update_flags)
 {
     xmlNode *xml_state = NULL;
     xmlNode *xml_data = NULL;
     xmlNode *rsc_list = NULL;
     crm_node_t *peer = NULL;
 
     peer = crm_get_peer_full(0, lrm_state->node_name, CRM_GET_PEER_ANY);
     CRM_CHECK(peer != NULL, return NULL);
 
     xml_state = create_node_state_update(peer, update_flags, NULL,
                                          __FUNCTION__);
     if (xml_state == NULL) {
         return NULL;
     }
 
     xml_data = create_xml_node(xml_state, XML_CIB_TAG_LRM);
     crm_xml_add(xml_data, XML_ATTR_ID, peer->uuid);
     rsc_list = create_xml_node(xml_data, XML_LRM_TAG_RESOURCES);
 
     /* Build a list of active (not always running) resources */
     build_active_RAs(lrm_state, rsc_list);
 
     crm_log_xml_trace(xml_state, "Current executor state");
 
     return xml_state;
 }
 
 xmlNode *
 do_lrm_query(gboolean is_replace, const char *node_name)
 {
     lrm_state_t *lrm_state = lrm_state_find(node_name);
 
     if (!lrm_state) {
         crm_err("Could not find executor state for node %s", node_name);
         return NULL;
     }
     return do_lrm_query_internal(lrm_state,
                                  node_update_cluster|node_update_peer);
 }
 
 static void
 notify_deleted(lrm_state_t * lrm_state, ha_msg_input_t * input, const char *rsc_id, int rc)
 {
     lrmd_event_data_t *op = NULL;
     const char *from_sys = crm_element_value(input->msg, F_CRM_SYS_FROM);
     const char *from_host = crm_element_value(input->msg, F_CRM_HOST_FROM);
 
     crm_info("Notifying %s on %s that %s was%s deleted",
              from_sys, (from_host? from_host : "localhost"), rsc_id,
              ((rc == pcmk_ok)? "" : " not"));
 
     op = construct_op(lrm_state, input->xml, rsc_id, CRMD_ACTION_DELETE);
 
     if (rc == pcmk_ok) {
         op->op_status = PCMK_LRM_OP_DONE;
         op->rc = PCMK_OCF_OK;
     } else {
         op->op_status = PCMK_LRM_OP_ERROR;
         op->rc = PCMK_OCF_UNKNOWN_ERROR;
     }
 
     send_direct_ack(from_host, from_sys, NULL, op, rsc_id);
     lrmd_free_event(op);
 
     if (safe_str_neq(from_sys, CRM_SYSTEM_TENGINE)) {
         /* this isn't expected - trigger a new transition */
         time_t now = time(NULL);
         char *now_s = crm_itoa(now);
 
         crm_debug("Triggering a refresh after %s deleted %s from the executor",
                   from_sys, rsc_id);
 
         update_attr_delegate(fsa_cib_conn, cib_none, XML_CIB_TAG_CRMCONFIG, NULL, NULL, NULL, NULL,
                              "last-lrm-refresh", now_s, FALSE, NULL, NULL);
 
         free(now_s);
     }
 }
 
 static gboolean
 lrm_remove_deleted_rsc(gpointer key, gpointer value, gpointer user_data)
 {
     struct delete_event_s *event = user_data;
     struct pending_deletion_op_s *op = value;
 
     if (crm_str_eq(event->rsc, op->rsc, TRUE)) {
         notify_deleted(event->lrm_state, op->input, event->rsc, event->rc);
         return TRUE;
     }
     return FALSE;
 }
 
 static gboolean
 lrm_remove_deleted_op(gpointer key, gpointer value, gpointer user_data)
 {
     const char *rsc = user_data;
     struct recurring_op_s *pending = value;
 
     if (crm_str_eq(rsc, pending->rsc_id, TRUE)) {
         crm_info("Removing op %s:%d for deleted resource %s",
                  pending->op_key, pending->call_id, rsc);
         return TRUE;
     }
     return FALSE;
 }
 
 /*
  * Remove the rsc from the CIB
  *
  * Avoids refreshing the entire LRM section of this host
  */
 #define RSC_TEMPLATE "//"XML_CIB_TAG_STATE"[@uname='%s']//"XML_LRM_TAG_RESOURCE"[@id='%s']"
 
 static int
 delete_rsc_status(lrm_state_t * lrm_state, const char *rsc_id, int call_options,
                   const char *user_name)
 {
     char *rsc_xpath = NULL;
     int rc = pcmk_ok;
 
     CRM_CHECK(rsc_id != NULL, return -ENXIO);
 
     rsc_xpath = crm_strdup_printf(RSC_TEMPLATE, lrm_state->node_name, rsc_id);
 
     rc = cib_internal_op(fsa_cib_conn, CIB_OP_DELETE, NULL, rsc_xpath,
                          NULL, NULL, call_options | cib_xpath, user_name);
 
     free(rsc_xpath);
     return rc;
 }
 
 static void
 delete_rsc_entry(lrm_state_t * lrm_state, ha_msg_input_t * input, const char *rsc_id,
                  GHashTableIter * rsc_gIter, int rc, const char *user_name)
 {
     struct delete_event_s event;
 
     CRM_CHECK(rsc_id != NULL, return);
 
     if (rc == pcmk_ok) {
         char *rsc_id_copy = strdup(rsc_id);
 
         if (rsc_gIter)
             g_hash_table_iter_remove(rsc_gIter);
         else
             g_hash_table_remove(lrm_state->resource_history, rsc_id_copy);
         crm_debug("sync: Sending delete op for %s", rsc_id_copy);
         delete_rsc_status(lrm_state, rsc_id_copy, cib_quorum_override, user_name);
 
         g_hash_table_foreach_remove(lrm_state->pending_ops, lrm_remove_deleted_op, rsc_id_copy);
         free(rsc_id_copy);
     }
 
     if (input) {
         notify_deleted(lrm_state, input, rsc_id, rc);
     }
 
     event.rc = rc;
     event.rsc = rsc_id;
     event.lrm_state = lrm_state;
     g_hash_table_foreach_remove(lrm_state->deletion_ops, lrm_remove_deleted_rsc, &event);
 }
 
 /*!
  * \internal
  * \brief Erase an LRM history entry from the CIB, given the operation data
  *
  * \param[in] lrm_state  LRM state of the desired node
  * \param[in] op         Operation whose history should be deleted
  */
 static void
 erase_lrm_history_by_op(lrm_state_t *lrm_state, lrmd_event_data_t *op)
 {
     xmlNode *xml_top = NULL;
 
     CRM_CHECK(op != NULL, return);
 
     xml_top = create_xml_node(NULL, XML_LRM_TAG_RSC_OP);
     crm_xml_add_int(xml_top, XML_LRM_ATTR_CALLID, op->call_id);
     crm_xml_add(xml_top, XML_ATTR_TRANSITION_KEY, op->user_data);
 
     if (op->interval_ms > 0) {
         char *op_id = generate_op_key(op->rsc_id, op->op_type, op->interval_ms);
 
         /* Avoid deleting last_failure too (if it was a result of this recurring op failing) */
         crm_xml_add(xml_top, XML_ATTR_ID, op_id);
         free(op_id);
     }
 
     crm_debug("Erasing resource operation history for " CRM_OP_FMT " (call=%d)",
               op->rsc_id, op->op_type, op->interval_ms, op->call_id);
 
     fsa_cib_conn->cmds->remove(fsa_cib_conn, XML_CIB_TAG_STATUS, xml_top,
                                cib_quorum_override);
 
     crm_log_xml_trace(xml_top, "op:cancel");
     free_xml(xml_top);
 }
 
 /* Define xpath to find LRM resource history entry by node and resource */
 #define XPATH_HISTORY                                   \
     "/" XML_TAG_CIB "/" XML_CIB_TAG_STATUS              \
     "/" XML_CIB_TAG_STATE "[@" XML_ATTR_UNAME "='%s']"  \
     "/" XML_CIB_TAG_LRM "/" XML_LRM_TAG_RESOURCES       \
     "/" XML_LRM_TAG_RESOURCE "[@" XML_ATTR_ID "='%s']"  \
     "/" XML_LRM_TAG_RSC_OP
 
 /* ... and also by operation key */
 #define XPATH_HISTORY_ID XPATH_HISTORY \
     "[@" XML_ATTR_ID "='%s']"
 
 /* ... and also by operation key and operation call ID */
 #define XPATH_HISTORY_CALL XPATH_HISTORY \
     "[@" XML_ATTR_ID "='%s' and @" XML_LRM_ATTR_CALLID "='%d']"
 
 /* ... and also by operation key and original operation key */
 #define XPATH_HISTORY_ORIG XPATH_HISTORY \
     "[@" XML_ATTR_ID "='%s' and @" XML_LRM_ATTR_TASK_KEY "='%s']"
 
 /*!
  * \internal
  * \brief Erase an LRM history entry from the CIB, given operation identifiers
  *
  * \param[in] lrm_state  LRM state of the node to clear history for
  * \param[in] rsc_id     Name of resource to clear history for
  * \param[in] key        Operation key of operation to clear history for
  * \param[in] orig_op    If specified, delete only if it has this original op
  * \param[in] call_id    If specified, delete entry only if it has this call ID
  */
 static void
 erase_lrm_history_by_id(lrm_state_t *lrm_state, const char *rsc_id,
                         const char *key, const char *orig_op, int call_id)
 {
     char *op_xpath = NULL;
 
     CRM_CHECK((rsc_id != NULL) && (key != NULL), return);
 
     if (call_id > 0) {
         op_xpath = crm_strdup_printf(XPATH_HISTORY_CALL,
                                      lrm_state->node_name, rsc_id, key,
                                      call_id);
 
     } else if (orig_op) {
         op_xpath = crm_strdup_printf(XPATH_HISTORY_ORIG,
                                      lrm_state->node_name, rsc_id, key,
                                      orig_op);
     } else {
         op_xpath = crm_strdup_printf(XPATH_HISTORY_ID,
                                      lrm_state->node_name, rsc_id, key);
     }
 
     crm_debug("Erasing resource operation history for %s on %s (call=%d)",
               key, rsc_id, call_id);
     fsa_cib_conn->cmds->remove(fsa_cib_conn, op_xpath, NULL,
                                cib_quorum_override | cib_xpath);
     free(op_xpath);
 }
 
 static inline gboolean
 last_failed_matches_op(rsc_history_t *entry, const char *op, guint interval_ms)
 {
     if (entry == NULL) {
         return FALSE;
     }
     if (op == NULL) {
         return TRUE;
     }
     return (safe_str_eq(op, entry->failed->op_type)
             && (interval_ms == entry->failed->interval_ms));
 }
 
 /*!
  * \internal
  * \brief Clear a resource's last failure
  *
  * Erase a resource's last failure on a particular node from both the
  * LRM resource history in the CIB, and the resource history remembered
  * for the LRM state.
  *
  * \param[in] rsc_id      Resource name
  * \param[in] node_name   Node name
  * \param[in] operation   If specified, only clear if matching this operation
  * \param[in] interval_ms If operation is specified, it has this interval
  */
 void
 lrm_clear_last_failure(const char *rsc_id, const char *node_name,
                        const char *operation, guint interval_ms)
 {
     char *op_key = NULL;
     char *orig_op_key = NULL;
     lrm_state_t *lrm_state = NULL;
 
     lrm_state = lrm_state_find(node_name);
     if (lrm_state == NULL) {
         return;
     }
 
     /* Erase from CIB */
     op_key = generate_op_key(rsc_id, "last_failure", 0);
     if (operation) {
         orig_op_key = generate_op_key(rsc_id, operation, interval_ms);
     }
     erase_lrm_history_by_id(lrm_state, rsc_id, op_key, orig_op_key, 0);
     free(op_key);
     free(orig_op_key);
 
     /* Remove from memory */
     if (lrm_state->resource_history) {
         rsc_history_t *entry = g_hash_table_lookup(lrm_state->resource_history,
                                                    rsc_id);
 
         if (last_failed_matches_op(entry, operation, interval_ms)) {
             lrmd_free_event(entry->failed);
             entry->failed = NULL;
         }
     }
 }
 
 /* Returns: gboolean - cancellation is in progress */
 static gboolean
 cancel_op(lrm_state_t * lrm_state, const char *rsc_id, const char *key, int op, gboolean remove)
 {
     int rc = pcmk_ok;
     char *local_key = NULL;
     struct recurring_op_s *pending = NULL;
 
     CRM_CHECK(op != 0, return FALSE);
     CRM_CHECK(rsc_id != NULL, return FALSE);
     if (key == NULL) {
         local_key = make_stop_id(rsc_id, op);
         key = local_key;
     }
     pending = g_hash_table_lookup(lrm_state->pending_ops, key);
 
     if (pending) {
         if (remove && pending->remove == FALSE) {
             pending->remove = TRUE;
             crm_debug("Scheduling %s for removal", key);
         }
 
         if (pending->cancelled) {
             crm_debug("Operation %s already cancelled", key);
             free(local_key);
             return FALSE;
         }
 
         pending->cancelled = TRUE;
 
     } else {
         crm_info("No pending op found for %s", key);
         free(local_key);
         return FALSE;
     }
 
     crm_debug("Cancelling op %d for %s (%s)", op, rsc_id, key);
     rc = lrm_state_cancel(lrm_state, pending->rsc_id, pending->op_type,
                           pending->interval_ms);
     if (rc == pcmk_ok) {
         crm_debug("Op %d for %s (%s): cancelled", op, rsc_id, key);
         free(local_key);
         return TRUE;
     }
 
     crm_debug("Op %d for %s (%s): Nothing to cancel", op, rsc_id, key);
     /* The caller needs to make sure the entry is
      * removed from the pending_ops list
      *
      * Usually by returning TRUE inside the worker function
      * supplied to g_hash_table_foreach_remove()
      *
      * Not removing the entry from pending_ops will block
      * the node from shutting down
      */
     free(local_key);
     return FALSE;
 }
 
 struct cancel_data {
     gboolean done;
     gboolean remove;
     const char *key;
     lrmd_rsc_info_t *rsc;
     lrm_state_t *lrm_state;
 };
 
 static gboolean
 cancel_action_by_key(gpointer key, gpointer value, gpointer user_data)
 {
     gboolean remove = FALSE;
     struct cancel_data *data = user_data;
     struct recurring_op_s *op = (struct recurring_op_s *)value;
 
     if (crm_str_eq(op->op_key, data->key, TRUE)) {
         data->done = TRUE;
         remove = !cancel_op(data->lrm_state, data->rsc->id, key, op->call_id, data->remove);
     }
     return remove;
 }
 
 static gboolean
 cancel_op_key(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, const char *key, gboolean remove)
 {
     guint removed = 0;
     struct cancel_data data;
 
     CRM_CHECK(rsc != NULL, return FALSE);
     CRM_CHECK(key != NULL, return FALSE);
 
     data.key = key;
     data.rsc = rsc;
     data.done = FALSE;
     data.remove = remove;
     data.lrm_state = lrm_state;
 
     removed = g_hash_table_foreach_remove(lrm_state->pending_ops, cancel_action_by_key, &data);
     crm_trace("Removed %u op cache entries, new size: %u",
               removed, g_hash_table_size(lrm_state->pending_ops));
     return data.done;
 }
 
 /*!
  * \internal
  * \brief Retrieve resource information from LRM
  *
  * \param[in]  lrm_state LRM connection to use
  * \param[in]  rsc_xml   XML containing resource configuration
  * \param[in]  do_create If true, register resource with LRM if not already
  * \param[out] rsc_info  Where to store resource information obtained from LRM
  *
  * \retval pcmk_ok   Success (and rsc_info holds newly allocated result)
  * \retval -EINVAL   Required information is missing from arguments
  * \retval -ENOTCONN No active connection to LRM
  * \retval -ENODEV   Resource not found
  * \retval -errno    Error communicating with executor when registering resource
  *
  * \note Caller is responsible for freeing result on success.
  */
 static int
 get_lrm_resource(lrm_state_t *lrm_state, xmlNode *rsc_xml, gboolean do_create,
                  lrmd_rsc_info_t **rsc_info)
 {
     const char *id = ID(rsc_xml);
 
     CRM_CHECK(lrm_state && rsc_xml && rsc_info, return -EINVAL);
     CRM_CHECK(id, return -EINVAL);
 
     if (lrm_state_is_connected(lrm_state) == FALSE) {
         return -ENOTCONN;
     }
 
     crm_trace("Retrieving resource information for %s from the executor", id);
     *rsc_info = lrm_state_get_rsc_info(lrm_state, id, 0);
 
     // If resource isn't known by ID, try clone name, if provided
     if (!*rsc_info) {
         const char *long_id = crm_element_value(rsc_xml, XML_ATTR_ID_LONG);
 
         if (long_id) {
             *rsc_info = lrm_state_get_rsc_info(lrm_state, long_id, 0);
         }
     }
 
     if ((*rsc_info == NULL) && do_create) {
         const char *class = crm_element_value(rsc_xml, XML_AGENT_ATTR_CLASS);
         const char *provider = crm_element_value(rsc_xml, XML_AGENT_ATTR_PROVIDER);
         const char *type = crm_element_value(rsc_xml, XML_ATTR_TYPE);
         int rc;
 
         crm_trace("Registering resource %s with the executor", id);
         rc = lrm_state_register_rsc(lrm_state, id, class, provider, type,
                                     lrmd_opt_drop_recurring);
         if (rc != pcmk_ok) {
             fsa_data_t *msg_data = NULL;
 
             crm_err("Could not register resource %s with the executor on %s: %s "
                     CRM_XS " rc=%d",
                     id, lrm_state->node_name, pcmk_strerror(rc), rc);
 
             /* Register this as an internal error if this involves the local
              * executor. Otherwise, we're likely dealing with an unresponsive
              * remote node, which is not an FSA failure.
              */
             if (lrm_state_is_local(lrm_state) == TRUE) {
                 register_fsa_error(C_FSA_INTERNAL, I_FAIL, NULL);
             }
             return rc;
         }
 
         *rsc_info = lrm_state_get_rsc_info(lrm_state, id, 0);
     }
     return *rsc_info? pcmk_ok : -ENODEV;
 }
 
 static void
 delete_resource(lrm_state_t * lrm_state,
                 const char *id,
                 lrmd_rsc_info_t * rsc,
                 GHashTableIter * gIter,
                 const char *sys,
                 const char *host,
                 const char *user,
                 ha_msg_input_t * request,
                 gboolean unregister)
 {
     int rc = pcmk_ok;
 
     crm_info("Removing resource %s for %s (%s) on %s", id, sys, user ? user : "internal", host);
 
     if (rsc && unregister) {
         rc = lrm_state_unregister_rsc(lrm_state, id, 0);
     }
 
     if (rc == pcmk_ok) {
         crm_trace("Resource '%s' deleted", id);
     } else if (rc == -EINPROGRESS) {
         crm_info("Deletion of resource '%s' pending", id);
         if (request) {
             struct pending_deletion_op_s *op = NULL;
             char *ref = crm_element_value_copy(request->msg, XML_ATTR_REFERENCE);
 
             op = calloc(1, sizeof(struct pending_deletion_op_s));
             op->rsc = strdup(rsc->id);
             op->input = copy_ha_msg_input(request);
             g_hash_table_insert(lrm_state->deletion_ops, ref, op);
         }
         return;
     } else {
         crm_warn("Deletion of resource '%s' for %s (%s) on %s failed: %d",
                  id, sys, user ? user : "internal", host, rc);
     }
 
     delete_rsc_entry(lrm_state, request, id, gIter, rc, user);
 }
 
 static int
 get_fake_call_id(lrm_state_t *lrm_state, const char *rsc_id)
 {
     int call_id = 999999999;
     rsc_history_t *entry = NULL;
 
     if(lrm_state) {
         entry = g_hash_table_lookup(lrm_state->resource_history, rsc_id);
     }
 
     /* Make sure the call id is greater than the last successful operation,
      * otherwise the failure will not result in a possible recovery of the resource
      * as it could appear the failure occurred before the successful start */
     if (entry) {
         call_id = entry->last_callid + 1;
     }
 
     if (call_id < 0) {
         call_id = 1;
     }
     return call_id;
 }
 
 static void
 fake_op_status(lrm_state_t *lrm_state, lrmd_event_data_t *op, int op_status,
                enum ocf_exitcode op_exitcode)
 {
     op->call_id = get_fake_call_id(lrm_state, op->rsc_id);
     op->t_run = time(NULL);
     op->t_rcchange = op->t_run;
     op->op_status = op_status;
     op->rc = op_exitcode;
 }
 
 static void
 force_reprobe(lrm_state_t *lrm_state, const char *from_sys,
               const char *from_host, const char *user_name,
               gboolean is_remote_node)
 {
     GHashTableIter gIter;
     rsc_history_t *entry = NULL;
 
     crm_info("Clearing resource history on node %s", lrm_state->node_name);
     g_hash_table_iter_init(&gIter, lrm_state->resource_history);
     while (g_hash_table_iter_next(&gIter, NULL, (void **)&entry)) {
         /* only unregister the resource during a reprobe if it is not a remote connection
          * resource. otherwise unregistering the connection will terminate remote-node
          * membership */
         gboolean unregister = TRUE;
 
         if (is_remote_lrmd_ra(NULL, NULL, entry->id)) {
             lrm_state_t *remote_lrm_state = lrm_state_find(entry->id);
             if (remote_lrm_state) {
                 /* when forcing a reprobe, make sure to clear remote node before
                  * clearing the remote node's connection resource */ 
                 force_reprobe(remote_lrm_state, from_sys, from_host, user_name, TRUE);
             }
             unregister = FALSE;
         }
 
         delete_resource(lrm_state, entry->id, &entry->rsc, &gIter, from_sys, from_host,
                         user_name, NULL, unregister);
     }
 
     /* Now delete the copy in the CIB */
     erase_status_tag(lrm_state->node_name, XML_CIB_TAG_LRM, cib_scope_local);
 
     /* Finally, _delete_ the value in pacemaker-attrd -- setting it to FALSE
      * would result in the scheduler sending us back here again
      */
     update_attrd(lrm_state->node_name, CRM_OP_PROBED, NULL, user_name, is_remote_node);
 }
 
 static void
 synthesize_lrmd_failure(lrm_state_t *lrm_state, xmlNode *action, int rc) 
 {
     lrmd_event_data_t *op = NULL;
-    lrmd_rsc_info_t *rsc_info = NULL;
     const char *operation = crm_element_value(action, XML_LRM_ATTR_TASK);
     const char *target_node = crm_element_value(action, XML_LRM_ATTR_TARGET);
     xmlNode *xml_rsc = find_xml_node(action, XML_CIB_TAG_RESOURCE, TRUE);
 
     if ((xml_rsc == NULL) || (ID(xml_rsc) == NULL)) {
         /* @TODO Should we do something else, like direct ack? */
         crm_info("Can't fake %s failure (%d) on %s without resource configuration",
                  crm_element_value(action, XML_LRM_ATTR_TASK_KEY), rc,
                  target_node);
         return;
 
     } else if(operation == NULL) {
         /* This probably came from crm_resource -C, nothing to do */
         crm_info("Can't fake %s failure (%d) on %s without operation",
                  ID(xml_rsc), rc, target_node);
         return;
     }
 
     op = construct_op(lrm_state, action, ID(xml_rsc), operation);
 
     if (safe_str_eq(operation, RSC_NOTIFY)) { // Notifications can't fail
         fake_op_status(lrm_state, op, PCMK_LRM_OP_DONE, PCMK_OCF_OK);
     } else {
         fake_op_status(lrm_state, op, PCMK_LRM_OP_ERROR, rc);
     }
 
     crm_info("Faking " CRM_OP_FMT " result (%d) on %s",
              op->rsc_id, op->op_type, op->interval_ms, op->rc, target_node);
 
-    /* Process the result as if it came from the LRM, if possible
-     * (i.e. resource info can be obtained from the lrm_state).
-     */
-    if (lrm_state) {
-        rsc_info = lrm_state_get_rsc_info(lrm_state, op->rsc_id, 0);
-    }
-    if (rsc_info) {
-        lrmd_free_rsc_info(rsc_info);
-        process_lrm_event(lrm_state, op, NULL);
-
-    } else if (controld_action_is_recordable(op->op_type)) {
-        /* If we can't process the result normally, at least write it to the CIB
-         * if possible, so the scheduler can act on it.
-         */
-        const char *standard = crm_element_value(xml_rsc, XML_AGENT_ATTR_CLASS);
-        const char *provider = crm_element_value(xml_rsc, XML_AGENT_ATTR_PROVIDER);
-        const char *type = crm_element_value(xml_rsc, XML_ATTR_TYPE);
-
-        if (standard && type) {
-            rsc_info = lrmd_new_rsc_info(op->rsc_id, standard, provider, type);
-            do_update_resource(target_node, rsc_info, op);
-            lrmd_free_rsc_info(rsc_info);
-        } else {
-            // @TODO Should we direct ack?
-            crm_info("Can't fake %s failure (%d) on %s without resource standard and type",
-                     crm_element_value(action, XML_LRM_ATTR_TASK_KEY), rc,
-                     target_node);
-        }
-    }
+    // Process the result as if it came from the LRM
+    process_lrm_event(lrm_state, op, NULL, action);
     lrmd_free_event(op);
 }
 
 /*!
  * \internal
  * \brief Get target of an LRM operation
  *
  * \param[in] xml  LRM operation data XML
  *
  * \return LRM operation target node name (local node or Pacemaker Remote node)
  */
 static const char *
 lrm_op_target(xmlNode *xml)
 {
     const char *target = NULL;
 
     if (xml) {
         target = crm_element_value(xml, XML_LRM_ATTR_TARGET);
     }
     if (target == NULL) {
         target = fsa_our_uname;
     }
     return target;
 }
 
 static void
 fail_lrm_resource(xmlNode *xml, lrm_state_t *lrm_state, const char *user_name,
                   const char *from_host, const char *from_sys)
 {
     lrmd_event_data_t *op = NULL;
     lrmd_rsc_info_t *rsc = NULL;
     xmlNode *xml_rsc = find_xml_node(xml, XML_CIB_TAG_RESOURCE, TRUE);
 
     CRM_CHECK(xml_rsc != NULL, return);
 
     /* The executor simply executes operations and reports the results, without
      * any concept of success or failure, so to fail a resource, we must fake
      * what a failure looks like.
      *
      * To do this, we create a fake executor operation event for the resource,
      * and pass that event to the executor client callback so it will be
      * processed as if it came from the executor.
      */
     op = construct_op(lrm_state, xml, ID(xml_rsc), "asyncmon");
     fake_op_status(lrm_state, op, PCMK_LRM_OP_DONE, PCMK_OCF_UNKNOWN_ERROR);
 
     free((char*) op->user_data);
     op->user_data = NULL;
     op->interval_ms = 0;
 
 #if ENABLE_ACL
     if (user_name && is_privileged(user_name) == FALSE) {
         crm_err("%s does not have permission to fail %s", user_name, ID(xml_rsc));
         send_direct_ack(from_host, from_sys, NULL, op, ID(xml_rsc));
         lrmd_free_event(op);
         return;
     }
 #endif
 
     if (get_lrm_resource(lrm_state, xml_rsc, TRUE, &rsc) == pcmk_ok) {
         crm_info("Failing resource %s...", rsc->id);
         op->exit_reason = strdup("Simulated failure");
-        process_lrm_event(lrm_state, op, NULL);
+        process_lrm_event(lrm_state, op, NULL, xml);
         op->op_status = PCMK_LRM_OP_DONE;
         op->rc = PCMK_OCF_OK;
         lrmd_free_rsc_info(rsc);
 
     } else {
         crm_info("Cannot find/create resource in order to fail it...");
         crm_log_xml_warn(xml, "bad input");
     }
 
     send_direct_ack(from_host, from_sys, NULL, op, ID(xml_rsc));
     lrmd_free_event(op);
 }
 
 static void
 handle_refresh_op(lrm_state_t *lrm_state, const char *user_name,
                   const char *from_host, const char *from_sys)
 {
     int rc = pcmk_ok;
     xmlNode *fragment = do_lrm_query_internal(lrm_state, node_update_all);
 
     fsa_cib_update(XML_CIB_TAG_STATUS, fragment, cib_quorum_override, rc, user_name);
     crm_info("Forced a local resource history refresh: call=%d", rc);
 
     if (safe_str_neq(CRM_SYSTEM_CRMD, from_sys)) {
         xmlNode *reply = create_request(CRM_OP_INVOKE_LRM, fragment, from_host,
                                         from_sys, CRM_SYSTEM_LRMD,
                                         fsa_our_uuid);
 
         crm_debug("ACK'ing refresh from %s (%s)", from_sys, from_host);
 
         if (relay_message(reply, TRUE) == FALSE) {
             crm_log_xml_err(reply, "Unable to route reply");
         }
         free_xml(reply);
     }
 
     free_xml(fragment);
 }
 
 static void
 handle_query_op(xmlNode *msg, lrm_state_t *lrm_state)
 {
     xmlNode *data = do_lrm_query_internal(lrm_state, node_update_all);
     xmlNode *reply = create_reply(msg, data);
 
     if (relay_message(reply, TRUE) == FALSE) {
         crm_err("Unable to route reply");
         crm_log_xml_err(reply, "reply");
     }
     free_xml(reply);
     free_xml(data);
 }
 
 static void
 handle_reprobe_op(lrm_state_t *lrm_state, const char *from_sys,
                   const char *from_host, const char *user_name,
                   gboolean is_remote_node)
 {
     crm_notice("Forcing the status of all resources to be redetected");
     force_reprobe(lrm_state, from_sys, from_host, user_name, is_remote_node);
 
     if (safe_str_neq(CRM_SYSTEM_PENGINE, from_sys)
         && safe_str_neq(CRM_SYSTEM_TENGINE, from_sys)) {
 
         xmlNode *reply = create_request(CRM_OP_INVOKE_LRM, NULL, from_host,
                                         from_sys, CRM_SYSTEM_LRMD,
                                         fsa_our_uuid);
 
         crm_debug("ACK'ing re-probe from %s (%s)", from_sys, from_host);
 
         if (relay_message(reply, TRUE) == FALSE) {
             crm_log_xml_err(reply, "Unable to route reply");
         }
         free_xml(reply);
     }
 }
 
 static bool do_lrm_cancel(ha_msg_input_t *input, lrm_state_t *lrm_state,
               lrmd_rsc_info_t *rsc, const char *from_host, const char *from_sys)
 {
     char *op_key = NULL;
     char *meta_key = NULL;
     int call = 0;
     const char *call_id = NULL;
     const char *op_task = NULL;
     const char *interval_ms_s = NULL;
     gboolean in_progress = FALSE;
     xmlNode *params = find_xml_node(input->xml, XML_TAG_ATTRS, TRUE);
 
     CRM_CHECK(params != NULL, return FALSE);
 
     meta_key = crm_meta_name(XML_LRM_ATTR_INTERVAL_MS);
     interval_ms_s = crm_element_value(params, meta_key);
     free(meta_key);
     CRM_CHECK(interval_ms_s != NULL, return FALSE);
 
     meta_key = crm_meta_name(XML_LRM_ATTR_TASK);
     op_task = crm_element_value(params, meta_key);
     free(meta_key);
     CRM_CHECK(op_task != NULL, return FALSE);
 
     meta_key = crm_meta_name(XML_LRM_ATTR_CALLID);
     call_id = crm_element_value(params, meta_key);
     free(meta_key);
 
     op_key = generate_op_key(rsc->id, op_task, crm_parse_ms(interval_ms_s));
 
     crm_debug("Scheduler requested op %s (call=%s) be cancelled",
               op_key, (call_id? call_id : "NA"));
     call = crm_parse_int(call_id, "0");
     if (call == 0) {
         // Normal case when the scheduler cancels a recurring op
         in_progress = cancel_op_key(lrm_state, rsc, op_key, TRUE);
 
     } else {
         // Normal case when the scheduler cancels an orphan op
         in_progress = cancel_op(lrm_state, rsc->id, NULL, call, TRUE);
     }
 
     // Acknowledge cancellation operation if for a remote connection resource
     if (!in_progress || is_remote_lrmd_ra(NULL, NULL, rsc->id)) {
         char *op_id = make_stop_id(rsc->id, call);
 
         if (is_remote_lrmd_ra(NULL, NULL, rsc->id) == FALSE) {
             crm_info("Nothing known about operation %d for %s", call, op_key);
         }
         erase_lrm_history_by_id(lrm_state, rsc->id, op_key, NULL, call);
         send_task_ok_ack(lrm_state, input, rsc->id, rsc, op_task,
                          from_host, from_sys);
 
         /* needed at least for cancellation of a remote operation */
         g_hash_table_remove(lrm_state->pending_ops, op_id);
         free(op_id);
 
     } else {
         /* No ack is needed since abcdaa8, but peers with older versions
          * in a rolling upgrade need one. We didn't bump the feature set
          * at that commit, so we can only compare against the previous
          * CRM version (3.0.8). If any peers have feature set 3.0.9 but
          * not abcdaa8, they will time out waiting for the ack (no
          * released versions of Pacemaker are affected).
          */
         const char *peer_version = crm_element_value(params, XML_ATTR_CRM_VERSION);
 
         if (compare_version(peer_version, "3.0.8") <= 0) {
             crm_info("Sending compatibility ack for %s cancellation to %s (CRM version %s)",
                      op_key, from_host, peer_version);
             send_task_ok_ack(lrm_state, input, rsc->id, rsc, op_task,
                              from_host, from_sys);
         }
     }
 
     free(op_key);
     return TRUE;
 }
 
 static void
 do_lrm_delete(ha_msg_input_t *input, lrm_state_t *lrm_state,
               lrmd_rsc_info_t *rsc, const char *from_sys, const char *from_host,
               bool crm_rsc_delete, const char *user_name)
 {
     gboolean unregister = TRUE;
 
 #if ENABLE_ACL
     int cib_rc = delete_rsc_status(lrm_state, rsc->id,
                                    cib_dryrun|cib_sync_call, user_name);
 
     if (cib_rc != pcmk_ok) {
         lrmd_event_data_t *op = NULL;
 
         crm_err("Could not delete resource status of %s for %s (user %s) on %s: %s"
                 CRM_XS " rc=%d",
                 rsc->id, from_sys, (user_name? user_name : "unknown"),
                 from_host, pcmk_strerror(cib_rc), cib_rc);
 
         op = construct_op(lrm_state, input->xml, rsc->id, CRMD_ACTION_DELETE);
         op->op_status = PCMK_LRM_OP_ERROR;
 
         if (cib_rc == -EACCES) {
             op->rc = PCMK_OCF_INSUFFICIENT_PRIV;
         } else {
             op->rc = PCMK_OCF_UNKNOWN_ERROR;
         }
         send_direct_ack(from_host, from_sys, NULL, op, rsc->id);
         lrmd_free_event(op);
         return;
     }
 #endif
 
     if (crm_rsc_delete && is_remote_lrmd_ra(NULL, NULL, rsc->id)) {
         unregister = FALSE;
     }
 
     delete_resource(lrm_state, rsc->id, rsc, NULL, from_sys, from_host,
                     user_name, input, unregister);
 }
 
 /*	 A_LRM_INVOKE	*/
 void
 do_lrm_invoke(long long action,
               enum crmd_fsa_cause cause,
               enum crmd_fsa_state cur_state,
               enum crmd_fsa_input current_input, fsa_data_t * msg_data)
 {
     lrm_state_t *lrm_state = NULL;
     const char *crm_op = NULL;
     const char *from_sys = NULL;
     const char *from_host = NULL;
     const char *operation = NULL;
     ha_msg_input_t *input = fsa_typed_data(fsa_dt_ha_msg);
     const char *user_name = NULL;
     const char *target_node = NULL;
     gboolean is_remote_node = FALSE;
     bool crm_rsc_delete = FALSE;
 
     target_node = lrm_op_target(input->xml);
     is_remote_node = safe_str_neq(target_node, fsa_our_uname);
 
     lrm_state = lrm_state_find(target_node);
     if ((lrm_state == NULL) && is_remote_node) {
         crm_err("Failing action because local node has never had connection to remote node %s",
                 target_node);
         synthesize_lrmd_failure(NULL, input->xml, PCMK_OCF_CONNECTION_DIED);
         return;
     }
     CRM_ASSERT(lrm_state != NULL);
 
 #if ENABLE_ACL
     user_name = crm_acl_get_set_user(input->msg, F_CRM_USER, NULL);
     crm_trace("Executor command from user '%s'", user_name);
 #endif
 
     crm_op = crm_element_value(input->msg, F_CRM_TASK);
     from_sys = crm_element_value(input->msg, F_CRM_SYS_FROM);
     if (safe_str_neq(from_sys, CRM_SYSTEM_TENGINE)) {
         from_host = crm_element_value(input->msg, F_CRM_HOST_FROM);
     }
     crm_trace("Executor %s command from %s", crm_op, from_sys);
 
     if (safe_str_eq(crm_op, CRM_OP_LRM_DELETE)) {
         crm_rsc_delete = TRUE; // Only crm_resource uses this op
         operation = CRMD_ACTION_DELETE;
 
     } else if (safe_str_eq(crm_op, CRM_OP_LRM_FAIL)) {
         fail_lrm_resource(input->xml, lrm_state, user_name, from_host,
                           from_sys);
         return;
 
     } else if (input->xml != NULL) {
         operation = crm_element_value(input->xml, XML_LRM_ATTR_TASK);
     }
 
     if (safe_str_eq(crm_op, CRM_OP_LRM_REFRESH)) {
         handle_refresh_op(lrm_state, user_name, from_host, from_sys);
 
     } else if (safe_str_eq(crm_op, CRM_OP_LRM_QUERY)) {
         handle_query_op(input->msg, lrm_state);
 
     } else if (safe_str_eq(operation, CRM_OP_PROBED)) {
         update_attrd(lrm_state->node_name, CRM_OP_PROBED, XML_BOOLEAN_TRUE,
                      user_name, is_remote_node);
 
     } else if (safe_str_eq(operation, CRM_OP_REPROBE)
                || safe_str_eq(crm_op, CRM_OP_REPROBE)) {
         handle_reprobe_op(lrm_state, from_sys, from_host, user_name,
                           is_remote_node);
 
     } else if (operation != NULL) {
         lrmd_rsc_info_t *rsc = NULL;
         xmlNode *xml_rsc = find_xml_node(input->xml, XML_CIB_TAG_RESOURCE, TRUE);
         gboolean create_rsc = safe_str_neq(operation, CRMD_ACTION_DELETE);
         int rc;
 
         // We can't return anything meaningful without a resource ID
         CRM_CHECK(xml_rsc && ID(xml_rsc), return);
 
         rc = get_lrm_resource(lrm_state, xml_rsc, create_rsc, &rsc);
         if (rc == -ENOTCONN) {
             synthesize_lrmd_failure(lrm_state, input->xml,
                                     PCMK_OCF_CONNECTION_DIED);
             return;
 
         } else if ((rc < 0) && !create_rsc) {
             /* Delete of malformed or nonexistent resource
              * (deleting something that does not exist is a success)
              */
             crm_notice("Not registering resource '%s' for a %s event "
                        CRM_XS " get-rc=%d (%s) transition-key=%s",
                        ID(xml_rsc), operation,
                        rc, pcmk_strerror(rc), ID(input->xml));
             delete_rsc_entry(lrm_state, input, ID(xml_rsc), NULL, pcmk_ok,
                              user_name);
             send_task_ok_ack(lrm_state, input, ID(xml_rsc), NULL, operation,
                              from_host, from_sys);
             return;
 
         } else if (rc == -EINVAL) {
             // Resource operation on malformed resource
             crm_err("Invalid resource definition for %s", ID(xml_rsc));
             crm_log_xml_warn(input->msg, "invalid resource");
             synthesize_lrmd_failure(lrm_state, input->xml,
                                     PCMK_OCF_NOT_CONFIGURED); // fatal error
             return;
 
         } else if (rc < 0) {
             // Error communicating with the executor
             crm_err("Could not register resource '%s' with executor: %s "
                     CRM_XS " rc=%d",
                     ID(xml_rsc), pcmk_strerror(rc), rc);
             crm_log_xml_warn(input->msg, "failed registration");
             synthesize_lrmd_failure(lrm_state, input->xml,
                                     PCMK_OCF_INVALID_PARAM); // hard error
             return;
         }
 
         if (safe_str_eq(operation, CRMD_ACTION_CANCEL)) {
             if (!do_lrm_cancel(input, lrm_state, rsc, from_host, from_sys)) {
                 crm_log_xml_warn(input->xml, "Bad command");
             }
 
         } else if (safe_str_eq(operation, CRMD_ACTION_DELETE)) {
             do_lrm_delete(input, lrm_state, rsc, from_sys, from_host,
                           crm_rsc_delete, user_name);
 
         } else {
             do_lrm_rsc_op(lrm_state, rsc, operation, input->xml, input->msg);
         }
 
         lrmd_free_rsc_info(rsc);
 
     } else {
         crm_err("Cannot perform operation %s of unknown type", crm_str(crm_op));
         register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
     }
 }
 
 static lrmd_event_data_t *
 construct_op(lrm_state_t * lrm_state, xmlNode * rsc_op, const char *rsc_id, const char *operation)
 {
     lrmd_event_data_t *op = NULL;
     const char *op_delay = NULL;
     const char *op_timeout = NULL;
     const char *interval_ms_s = NULL;
     GHashTable *params = NULL;
 
     const char *transition = NULL;
 
     CRM_ASSERT(rsc_id && operation);
 
     op = calloc(1, sizeof(lrmd_event_data_t));
     CRM_ASSERT(op != NULL);
 
     op->type = lrmd_event_exec_complete;
     op->op_type = strdup(operation);
     op->op_status = PCMK_LRM_OP_PENDING;
     op->rc = -1;
     op->rsc_id = strdup(rsc_id);
     op->interval_ms = 0;
     op->timeout = 0;
     op->start_delay = 0;
 
     if (rsc_op == NULL) {
         CRM_LOG_ASSERT(safe_str_eq(CRMD_ACTION_STOP, operation));
         op->user_data = NULL;
         /* the stop_all_resources() case
          * by definition there is no DC (or they'd be shutting
          *   us down).
          * So we should put our version here.
          */
         op->params = crm_str_table_new();
 
         g_hash_table_insert(op->params, strdup(XML_ATTR_CRM_VERSION), strdup(CRM_FEATURE_SET));
 
         crm_trace("Constructed %s op for %s", operation, rsc_id);
         return op;
     }
 
     params = xml2list(rsc_op);
     g_hash_table_remove(params, CRM_META "_op_target_rc");
 
     op_delay = crm_meta_value(params, XML_OP_ATTR_START_DELAY);
     op_timeout = crm_meta_value(params, XML_ATTR_TIMEOUT);
     interval_ms_s = crm_meta_value(params, XML_LRM_ATTR_INTERVAL_MS);
 
     op->interval_ms = crm_parse_ms(interval_ms_s);
     op->timeout = crm_parse_int(op_timeout, "0");
     op->start_delay = crm_parse_int(op_delay, "0");
 
 #if ENABLE_VERSIONED_ATTRS
     // Resolve any versioned parameters
     if (lrm_state && safe_str_neq(op->op_type, RSC_METADATA)
         && safe_str_neq(op->op_type, CRMD_ACTION_DELETE)
         && !is_remote_lrmd_ra(NULL, NULL, rsc_id)) {
 
         // Resource info *should* already be cached, so we don't get executor call
         lrmd_rsc_info_t *rsc = lrm_state_get_rsc_info(lrm_state, rsc_id, 0);
         struct ra_metadata_s *metadata;
 
         metadata = metadata_cache_get(lrm_state->metadata_cache, rsc);
         if (metadata) {
             xmlNode *versioned_attrs = NULL;
             GHashTable *hash = NULL;
             char *key = NULL;
             char *value = NULL;
             GHashTableIter iter;
 
             versioned_attrs = first_named_child(rsc_op, XML_TAG_OP_VER_ATTRS);
             hash = pe_unpack_versioned_parameters(versioned_attrs, metadata->ra_version);
             g_hash_table_iter_init(&iter, hash);
             while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) {
                 g_hash_table_iter_steal(&iter);
                 g_hash_table_replace(params, key, value);
             }
             g_hash_table_destroy(hash);
 
             versioned_attrs = first_named_child(rsc_op, XML_TAG_OP_VER_META);
             hash = pe_unpack_versioned_parameters(versioned_attrs, metadata->ra_version);
             g_hash_table_iter_init(&iter, hash);
             while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) {
                 g_hash_table_replace(params, crm_meta_name(key), strdup(value));
 
                 if (safe_str_eq(key, XML_ATTR_TIMEOUT)) {
                     op->timeout = crm_parse_int(value, "0");
                 } else if (safe_str_eq(key, XML_OP_ATTR_START_DELAY)) {
                     op->start_delay = crm_parse_int(value, "0");
                 }
             }
             g_hash_table_destroy(hash);
 
             versioned_attrs = first_named_child(rsc_op, XML_TAG_RSC_VER_ATTRS);
             hash = pe_unpack_versioned_parameters(versioned_attrs, metadata->ra_version);
             g_hash_table_iter_init(&iter, hash);
             while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) {
                 g_hash_table_iter_steal(&iter);
                 g_hash_table_replace(params, key, value);
             }
             g_hash_table_destroy(hash);
         }
 
         lrmd_free_rsc_info(rsc);
     }
 #endif
 
     if (safe_str_neq(operation, RSC_STOP)) {
         op->params = params;
 
     } else {
         rsc_history_t *entry = NULL;
 
         if (lrm_state) {
             entry = g_hash_table_lookup(lrm_state->resource_history, rsc_id);
         }
 
         /* If we do not have stop parameters cached, use
          * whatever we are given */
         if (!entry || !entry->stop_params) {
             op->params = params;
         } else {
             /* Copy the cached parameter list so that we stop the resource
              * with the old attributes, not the new ones */
             op->params = crm_str_table_new();
 
             g_hash_table_foreach(params, copy_meta_keys, op->params);
             g_hash_table_foreach(entry->stop_params, copy_instance_keys, op->params);
             g_hash_table_destroy(params);
             params = NULL;
         }
     }
 
     /* sanity */
     if (op->timeout <= 0) {
         op->timeout = op->interval_ms;
     }
     if (op->start_delay < 0) {
         op->start_delay = 0;
     }
 
     transition = crm_element_value(rsc_op, XML_ATTR_TRANSITION_KEY);
     CRM_CHECK(transition != NULL, return op);
 
     op->user_data = strdup(transition);
 
     if (op->interval_ms != 0) {
         if (safe_str_eq(operation, CRMD_ACTION_START)
             || safe_str_eq(operation, CRMD_ACTION_STOP)) {
             crm_err("Start and Stop actions cannot have an interval: %u",
                     op->interval_ms);
             op->interval_ms = 0;
         }
     }
 
     crm_trace("Constructed %s op for %s: interval=%u",
               operation, rsc_id, op->interval_ms);
 
     return op;
 }
 
 void
 send_direct_ack(const char *to_host, const char *to_sys,
                 lrmd_rsc_info_t * rsc, lrmd_event_data_t * op, const char *rsc_id)
 {
     xmlNode *reply = NULL;
     xmlNode *update, *iter;
     crm_node_t *peer = NULL;
 
     CRM_CHECK(op != NULL, return);
     if (op->rsc_id == NULL) {
         CRM_ASSERT(rsc_id != NULL);
         op->rsc_id = strdup(rsc_id);
     }
     if (to_sys == NULL) {
         to_sys = CRM_SYSTEM_TENGINE;
     }
 
     peer = crm_get_peer(0, fsa_our_uname);
     update = create_node_state_update(peer, node_update_none, NULL,
                                       __FUNCTION__);
 
     iter = create_xml_node(update, XML_CIB_TAG_LRM);
     crm_xml_add(iter, XML_ATTR_ID, fsa_our_uuid);
     iter = create_xml_node(iter, XML_LRM_TAG_RESOURCES);
     iter = create_xml_node(iter, XML_LRM_TAG_RESOURCE);
 
     crm_xml_add(iter, XML_ATTR_ID, op->rsc_id);
 
     build_operation_update(iter, rsc, op, fsa_our_uname, __FUNCTION__);
     reply = create_request(CRM_OP_INVOKE_LRM, update, to_host, to_sys, CRM_SYSTEM_LRMD, NULL);
 
     crm_log_xml_trace(update, "ACK Update");
 
     crm_debug("ACK'ing resource op " CRM_OP_FMT " from %s: %s",
               op->rsc_id, op->op_type, op->interval_ms, op->user_data,
               crm_element_value(reply, XML_ATTR_REFERENCE));
 
     if (relay_message(reply, TRUE) == FALSE) {
         crm_log_xml_err(reply, "Unable to route reply");
     }
 
     free_xml(update);
     free_xml(reply);
 }
 
 gboolean
 verify_stopped(enum crmd_fsa_state cur_state, int log_level)
 {
     gboolean res = TRUE;
     GList *lrm_state_list = lrm_state_get_list();
     GList *state_entry;
 
     for (state_entry = lrm_state_list; state_entry != NULL; state_entry = state_entry->next) {
         lrm_state_t *lrm_state = state_entry->data;
 
         if (!lrm_state_verify_stopped(lrm_state, cur_state, log_level)) {
             /* keep iterating through all even when false is returned */
             res = FALSE;
         }
     }
 
     set_bit(fsa_input_register, R_SENT_RSC_STOP);
     g_list_free(lrm_state_list); lrm_state_list = NULL;
     return res;
 }
 
 struct stop_recurring_action_s {
     lrmd_rsc_info_t *rsc;
     lrm_state_t *lrm_state;
 };
 
 static gboolean
 stop_recurring_action_by_rsc(gpointer key, gpointer value, gpointer user_data)
 {
     gboolean remove = FALSE;
     struct stop_recurring_action_s *event = user_data;
     struct recurring_op_s *op = (struct recurring_op_s *)value;
 
     if ((op->interval_ms != 0)
         && crm_str_eq(op->rsc_id, event->rsc->id, TRUE)) {
 
         crm_debug("Cancelling op %d for %s (%s)", op->call_id, op->rsc_id, (char*)key);
         remove = !cancel_op(event->lrm_state, event->rsc->id, key, op->call_id, FALSE);
     }
 
     return remove;
 }
 
 static gboolean
 stop_recurring_actions(gpointer key, gpointer value, gpointer user_data)
 {
     gboolean remove = FALSE;
     lrm_state_t *lrm_state = user_data;
     struct recurring_op_s *op = (struct recurring_op_s *)value;
 
     if (op->interval_ms != 0) {
         crm_info("Cancelling op %d for %s (%s)", op->call_id, op->rsc_id,
                  (const char *) key);
         remove = !cancel_op(lrm_state, op->rsc_id, key, op->call_id, FALSE);
     }
 
     return remove;
 }
 
 static void
 record_pending_op(const char *node_name, lrmd_rsc_info_t *rsc, lrmd_event_data_t *op)
 {
     const char *record_pending = NULL;
 
     CRM_CHECK(node_name != NULL, return);
     CRM_CHECK(rsc != NULL, return);
     CRM_CHECK(op != NULL, return);
 
     // Never record certain operation types as pending
     if ((op->op_type == NULL) || (op->params == NULL)
         || !controld_action_is_recordable(op->op_type)) {
         return;
     }
 
     // defaults to true
     record_pending = crm_meta_value(op->params, XML_OP_ATTR_PENDING);
     if (record_pending && !crm_is_true(record_pending)) {
         return;
     }
 
     op->call_id = -1;
     op->op_status = PCMK_LRM_OP_PENDING;
     op->rc = PCMK_OCF_UNKNOWN;
 
     op->t_run = time(NULL);
     op->t_rcchange = op->t_run;
 
     /* write a "pending" entry to the CIB, inhibit notification */
     crm_debug("Recording pending op " CRM_OP_FMT " on %s in the CIB",
               op->rsc_id, op->op_type, op->interval_ms, node_name);
 
     do_update_resource(node_name, rsc, op);
 }
 
 static void
 do_lrm_rsc_op(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, const char *operation, xmlNode * msg,
               xmlNode * request)
 {
     int call_id = 0;
     char *op_id = NULL;
     lrmd_event_data_t *op = NULL;
     lrmd_key_value_t *params = NULL;
     fsa_data_t *msg_data = NULL;
     const char *transition = NULL;
     gboolean stop_recurring = FALSE;
     bool send_nack = FALSE;
 
     CRM_CHECK(rsc != NULL, return);
     CRM_CHECK(operation != NULL, return);
 
     if (msg != NULL) {
         transition = crm_element_value(msg, XML_ATTR_TRANSITION_KEY);
         if (transition == NULL) {
             crm_log_xml_err(msg, "Missing transition number");
         }
     }
 
     op = construct_op(lrm_state, msg, rsc->id, operation);
     CRM_CHECK(op != NULL, return);
 
     if (is_remote_lrmd_ra(NULL, NULL, rsc->id)
         && (op->interval_ms == 0)
         && strcmp(operation, CRMD_ACTION_MIGRATE) == 0) {
 
         /* pcmk remote connections are a special use case.
          * We never ever want to stop monitoring a connection resource until
          * the entire migration has completed. If the connection is unexpectedly
          * severed, even during a migration, this is an event we must detect.*/
         stop_recurring = FALSE;
 
     } else if ((op->interval_ms == 0)
         && strcmp(operation, CRMD_ACTION_STATUS) != 0
         && strcmp(operation, CRMD_ACTION_NOTIFY) != 0) {
 
         /* stop any previous monitor operations before changing the resource state */
         stop_recurring = TRUE;
     }
 
     if (stop_recurring == TRUE) {
         guint removed = 0;
         struct stop_recurring_action_s data;
 
         data.rsc = rsc;
         data.lrm_state = lrm_state;
         removed = g_hash_table_foreach_remove(
             lrm_state->pending_ops, stop_recurring_action_by_rsc, &data);
 
         if (removed) {
             crm_debug("Stopped %u recurring operation%s in preparation for " CRM_OP_FMT,
                       removed, s_if_plural(removed),
                       rsc->id, operation, op->interval_ms);
         }
     }
 
     /* now do the op */
     crm_info("Performing key=%s op=" CRM_OP_FMT,
              transition, rsc->id, operation, op->interval_ms);
 
     if (is_set(fsa_input_register, R_SHUTDOWN) && safe_str_eq(operation, RSC_START)) {
         register_fsa_input(C_SHUTDOWN, I_SHUTDOWN, NULL);
         send_nack = TRUE;
 
     } else if (fsa_state != S_NOT_DC
                && fsa_state != S_POLICY_ENGINE /* Recalculating */
                && fsa_state != S_TRANSITION_ENGINE
                && safe_str_neq(operation, CRMD_ACTION_STOP)) {
         send_nack = TRUE;
     }
 
     if(send_nack) {
         crm_notice("Discarding attempt to perform action %s on %s in state %s (shutdown=%s)",
                    operation, rsc->id, fsa_state2string(fsa_state),
                    is_set(fsa_input_register, R_SHUTDOWN)?"true":"false");
 
         op->rc = CRM_DIRECT_NACK_RC;
         op->op_status = PCMK_LRM_OP_ERROR;
         send_direct_ack(NULL, NULL, rsc, op, rsc->id);
         lrmd_free_event(op);
         free(op_id);
         return;
     }
 
     record_pending_op(lrm_state->node_name, rsc, op);
 
     op_id = generate_op_key(rsc->id, op->op_type, op->interval_ms);
 
     if (op->interval_ms > 0) {
         /* cancel it so we can then restart it without conflict */
         cancel_op_key(lrm_state, rsc, op_id, FALSE);
     }
 
     if (op->params) {
         char *key = NULL;
         char *value = NULL;
         GHashTableIter iter;
 
         g_hash_table_iter_init(&iter, op->params);
         while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) {
             params = lrmd_key_value_add(params, key, value);
         }
     }
 
     call_id = lrm_state_exec(lrm_state, rsc->id, op->op_type, op->user_data,
                              op->interval_ms, op->timeout, op->start_delay,
                              params);
 
     if (call_id <= 0 && lrm_state_is_local(lrm_state)) {
         crm_err("Operation %s on %s failed: %d", operation, rsc->id, call_id);
         register_fsa_error(C_FSA_INTERNAL, I_FAIL, NULL);
 
     } else if (call_id <= 0) {
         crm_err("Operation %s on resource %s failed to execute on remote node %s: %d",
                 operation, rsc->id, lrm_state->node_name, call_id);
         fake_op_status(lrm_state, op, PCMK_LRM_OP_DONE, PCMK_OCF_UNKNOWN_ERROR);
-        process_lrm_event(lrm_state, op, NULL);
+        process_lrm_event(lrm_state, op, NULL, NULL);
 
     } else {
         /* record all operations so we can wait
          * for them to complete during shutdown
          */
         char *call_id_s = make_stop_id(rsc->id, call_id);
         struct recurring_op_s *pending = NULL;
 
         pending = calloc(1, sizeof(struct recurring_op_s));
         crm_trace("Recording pending op: %d - %s %s", call_id, op_id, call_id_s);
 
         pending->call_id = call_id;
         pending->interval_ms = op->interval_ms;
         pending->op_type = strdup(operation);
         pending->op_key = strdup(op_id);
         pending->rsc_id = strdup(rsc->id);
         pending->start_time = time(NULL);
         pending->user_data = op->user_data? strdup(op->user_data) : NULL;
         g_hash_table_replace(lrm_state->pending_ops, call_id_s, pending);
 
         if ((op->interval_ms > 0)
             && (op->start_delay > START_DELAY_THRESHOLD)) {
 
             char *uuid = NULL;
             int dummy = 0, target_rc = 0;
 
             crm_info("Faking confirmation of %s: execution postponed for over 5 minutes", op_id);
 
             decode_transition_key(op->user_data, &uuid, &dummy, &dummy, &target_rc);
             free(uuid);
 
             op->rc = target_rc;
             op->op_status = PCMK_LRM_OP_DONE;
             send_direct_ack(NULL, NULL, rsc, op, rsc->id);
         }
 
         pending->params = op->params;
         op->params = NULL;
     }
 
     free(op_id);
     lrmd_free_event(op);
     return;
 }
 
 int last_resource_update = 0;
 
 static void
 cib_rsc_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data)
 {
     switch (rc) {
         case pcmk_ok:
         case -pcmk_err_diff_failed:
         case -pcmk_err_diff_resync:
             crm_trace("Resource update %d complete: rc=%d", call_id, rc);
             break;
         default:
             crm_warn("Resource update %d failed: (rc=%d) %s", call_id, rc, pcmk_strerror(rc));
     }
 
     if (call_id == last_resource_update) {
         last_resource_update = 0;
         trigger_fsa(fsa_source);
     }
 }
 
 static int
 do_update_resource(const char *node_name, lrmd_rsc_info_t * rsc, lrmd_event_data_t * op)
 {
 /*
   <status>
   <nodes_status id=uname>
   <lrm>
   <lrm_resources>
   <lrm_resource id=...>
   </...>
 */
     int rc = pcmk_ok;
     xmlNode *update, *iter = NULL;
     int call_opt = crmd_cib_smart_opt();
     const char *uuid = NULL;
 
     CRM_CHECK(op != NULL, return 0);
 
     iter = create_xml_node(iter, XML_CIB_TAG_STATUS);
     update = iter;
     iter = create_xml_node(iter, XML_CIB_TAG_STATE);
 
     if (safe_str_eq(node_name, fsa_our_uname)) {
         uuid = fsa_our_uuid;
 
     } else {
         /* remote nodes uuid and uname are equal */
         uuid = node_name;
         crm_xml_add(iter, XML_NODE_IS_REMOTE, "true");
     }
 
     CRM_LOG_ASSERT(uuid != NULL);
     if(uuid == NULL) {
         rc = -EINVAL;
         goto done;
     }
 
     crm_xml_add(iter, XML_ATTR_UUID,  uuid);
     crm_xml_add(iter, XML_ATTR_UNAME, node_name);
     crm_xml_add(iter, XML_ATTR_ORIGIN, __FUNCTION__);
 
     iter = create_xml_node(iter, XML_CIB_TAG_LRM);
     crm_xml_add(iter, XML_ATTR_ID, uuid);
 
     iter = create_xml_node(iter, XML_LRM_TAG_RESOURCES);
     iter = create_xml_node(iter, XML_LRM_TAG_RESOURCE);
     crm_xml_add(iter, XML_ATTR_ID, op->rsc_id);
 
     build_operation_update(iter, rsc, op, node_name, __FUNCTION__);
 
     if (rsc) {
         const char *container = NULL;
 
         crm_xml_add(iter, XML_ATTR_TYPE, rsc->type);
         crm_xml_add(iter, XML_AGENT_ATTR_CLASS, rsc->standard);
         crm_xml_add(iter, XML_AGENT_ATTR_PROVIDER, rsc->provider);
 
         if (op->params) {
             container = g_hash_table_lookup(op->params, CRM_META"_"XML_RSC_ATTR_CONTAINER);
         }
         if (container) {
             crm_trace("Resource %s is a part of container resource %s", op->rsc_id, container);
             crm_xml_add(iter, XML_RSC_ATTR_CONTAINER, container);
         }
 
     } else {
         crm_warn("Resource %s no longer exists in the executor", op->rsc_id);
         send_direct_ack(NULL, NULL, rsc, op, op->rsc_id);
         goto cleanup;
     }
 
     crm_log_xml_trace(update, __FUNCTION__);
 
     /* make it an asynchronous call and be done with it
      *
      * Best case:
      *   the resource state will be discovered during
      *   the next signup or election.
      *
      * Bad case:
      *   we are shutting down and there is no DC at the time,
      *   but then why were we shutting down then anyway?
      *   (probably because of an internal error)
      *
      * Worst case:
      *   we get shot for having resources "running" that really weren't
      *
      * the alternative however means blocking here for too long, which
      * isn't acceptable
      */
     fsa_cib_update(XML_CIB_TAG_STATUS, update, call_opt, rc, NULL);
 
     if (rc > 0) {
         last_resource_update = rc;
     }
   done:
     /* the return code is a call number, not an error code */
     crm_trace("Sent resource state update message: %d for %s=%u on %s",
               rc, op->op_type, op->interval_ms, op->rsc_id);
     fsa_register_cib_callback(rc, FALSE, NULL, cib_rsc_callback);
 
   cleanup:
     free_xml(update);
     return rc;
 }
 
 void
 do_lrm_event(long long action,
              enum crmd_fsa_cause cause,
              enum crmd_fsa_state cur_state, enum crmd_fsa_input cur_input, fsa_data_t * msg_data)
 {
     CRM_CHECK(FALSE, return);
 }
 
 static char *
 unescape_newlines(const char *string)
 {
     char *pch = NULL;
     char *ret = NULL;
     static const char *escaped_newline = "\\n";
 
     if (!string) {
         return NULL;
     }
 
     ret = strdup(string);
     pch = strstr(ret, escaped_newline);
     while (pch != NULL) {
         /* 2 chars for 2 chars, null-termination irrelevant */
         memcpy(pch, "\n ", 2 * sizeof(char));
         pch = strstr(pch, escaped_newline);
     }
 
     return ret;
 }
 
-gboolean
-process_lrm_event(lrm_state_t * lrm_state, lrmd_event_data_t * op, struct recurring_op_s *pending)
+void
+process_lrm_event(lrm_state_t *lrm_state, lrmd_event_data_t *op,
+                  struct recurring_op_s *pending, xmlNode *action_xml)
 {
     char *op_id = NULL;
     char *op_key = NULL;
 
     int update_id = 0;
     gboolean remove = FALSE;
     gboolean removed = FALSE;
     lrmd_rsc_info_t *rsc = NULL;
+    const char *node_name = NULL;
 
-    CRM_CHECK(op != NULL, return FALSE);
-    CRM_CHECK(op->rsc_id != NULL, return FALSE);
+    CRM_CHECK(op != NULL, return);
+    CRM_CHECK(op->rsc_id != NULL, return);
 
     op_id = make_stop_id(op->rsc_id, op->call_id);
     op_key = generate_op_key(op->rsc_id, op->op_type, op->interval_ms);
-    rsc = lrm_state_get_rsc_info(lrm_state, op->rsc_id, 0);
+
+    // Get resource info if available (from executor state or action XML)
+    if (lrm_state) {
+        rsc = lrm_state_get_rsc_info(lrm_state, op->rsc_id, 0);
+    }
+    if ((rsc == NULL) && action_xml) {
+        xmlNode *xml = find_xml_node(action_xml, XML_CIB_TAG_RESOURCE, TRUE);
+
+        const char *standard = crm_element_value(xml, XML_AGENT_ATTR_CLASS);
+        const char *provider = crm_element_value(xml, XML_AGENT_ATTR_PROVIDER);
+        const char *type = crm_element_value(xml, XML_ATTR_TYPE);
+
+        if (standard && type) {
+            crm_info("%s agent information not cached, using %s%s%s:%s from action XML",
+                     op->rsc_id, standard,
+                     (provider? ":" : ""), (provider? provider : ""), type);
+            rsc = lrmd_new_rsc_info(op->rsc_id, standard, provider, type);
+        } else {
+            crm_err("Can't process %s result because %s agent information not cached or in XML",
+                    op_key, op->rsc_id);
+        }
+    }
+    CRM_LOG_ASSERT(rsc != NULL); // If it's still NULL, there's a bug somewhere
+
+    // Get node name if available (from executor state or action XML)
+    if (lrm_state) {
+        node_name = lrm_state->node_name;
+    } else if (action_xml) {
+        node_name = crm_element_value(action_xml, XML_LRM_ATTR_TARGET);
+    }
+
     if(pending == NULL) {
         remove = TRUE;
-        pending = g_hash_table_lookup(lrm_state->pending_ops, op_id);
+        if (lrm_state) {
+            pending = g_hash_table_lookup(lrm_state->pending_ops, op_id);
+        }
     }
 
     if (op->op_status == PCMK_LRM_OP_ERROR) {
         switch(op->rc) {
             case PCMK_OCF_NOT_RUNNING:
             case PCMK_OCF_RUNNING_MASTER:
             case PCMK_OCF_DEGRADED:
             case PCMK_OCF_DEGRADED_MASTER:
                 // Leave it to the TE/scheduler to decide if this is an error
                 op->op_status = PCMK_LRM_OP_DONE;
                 break;
             default:
                 /* Nothing to do */
                 break;
         }
     }
 
     if (op->op_status != PCMK_LRM_OP_CANCELLED) {
         if (controld_action_is_recordable(op->op_type)) {
-            update_id = do_update_resource(lrm_state->node_name, rsc, op);
+            if (node_name && rsc) {
+                update_id = do_update_resource(node_name, rsc, op);
+            } else {
+                // @TODO Should we direct ack?
+                crm_err("Unable to record %s result in CIB: %s",
+                        op_key,
+                        (node_name? "No resource information" : "No node name"));
+            }
         } else {
             send_direct_ack(NULL, NULL, NULL, op, op->rsc_id);
         }
     } else if (op->interval_ms == 0) {
         /* This will occur when "crm resource cleanup" is called while actions are in-flight */
         crm_err("Op %s (call=%d): Cancelled", op_key, op->call_id);
         send_direct_ack(NULL, NULL, NULL, op, op->rsc_id);
 
     } else if (pending == NULL) {
         /* We don't need to do anything for cancelled ops
          * that are not in our pending op list. There are no
          * transition actions waiting on these operations. */
 
     } else if (op->user_data == NULL) {
         /* At this point we have a pending entry, but no transition
          * key present in the user_data field. report this */
         crm_err("Op %s (call=%d): No user data", op_key, op->call_id);
 
     } else if (pending->remove) {
         /* The tengine canceled this op, we have been waiting for the cancel to finish. */
-        erase_lrm_history_by_op(lrm_state, op);
+        if (lrm_state) {
+            erase_lrm_history_by_op(lrm_state, op);
+        }
 
     } else if (op->rsc_deleted) {
         /* The tengine initiated this op, but it was cancelled outside of the
          * tengine's control during a resource cleanup/re-probe request. The tengine
          * must be alerted that this operation completed, otherwise the tengine
          * will continue waiting for this update to occur until it is timed out.
          * We don't want this update going to the cib though, so use a direct ack. */
         crm_trace("Op %s (call=%d): cancelled due to rsc deletion", op_key, op->call_id);
         send_direct_ack(NULL, NULL, NULL, op, op->rsc_id);
 
     } else {
         /* Before a stop is called, no need to direct ack */
         crm_trace("Op %s (call=%d): no delete event required", op_key, op->call_id);
     }
 
     if(remove == FALSE) {
         /* The caller will do this afterwards, but keep the logging consistent */
         removed = TRUE;
 
-    } else if ((op->interval_ms == 0)
-               && g_hash_table_remove(lrm_state->pending_ops, op_id)) {
+    } else if (lrm_state && ((op->interval_ms == 0)
+                             || (op->op_status == PCMK_LRM_OP_CANCELLED))) {
 
-        removed = TRUE;
-        crm_trace("Op %s (call=%d, stop-id=%s, remaining=%u): Confirmed",
-                  op_key, op->call_id, op_id, g_hash_table_size(lrm_state->pending_ops));
+        gboolean found = g_hash_table_remove(lrm_state->pending_ops, op_id);
 
-    } else if ((op->interval_ms != 0)
-               && (op->op_status == PCMK_LRM_OP_CANCELLED)) {
+        if (op->interval_ms != 0) {
+            removed = TRUE;
+        } else if (found) {
+            removed = TRUE;
+            crm_trace("Op %s (call=%d, stop-id=%s, remaining=%u): Confirmed",
+                      op_key, op->call_id, op_id,
+                      g_hash_table_size(lrm_state->pending_ops));
+        }
+    }
 
-        removed = TRUE;
-        g_hash_table_remove(lrm_state->pending_ops, op_id);
+    if (node_name == NULL) {
+        node_name = "unknown node"; // for logging
     }
 
     switch (op->op_status) {
         case PCMK_LRM_OP_CANCELLED:
             crm_info("Result of %s operation for %s on %s: %s "
                      CRM_XS " call=%d key=%s confirmed=%s",
                      crm_action_str(op->op_type, op->interval_ms),
-                     op->rsc_id, lrm_state->node_name,
+                     op->rsc_id, node_name,
                      services_lrm_status_str(op->op_status),
                      op->call_id, op_key, (removed? "true" : "false"));
             break;
 
         case PCMK_LRM_OP_DONE:
             do_crm_log((op->interval_ms? LOG_INFO : LOG_NOTICE),
                        "Result of %s operation for %s on %s: %d (%s) "
                        CRM_XS " call=%d key=%s confirmed=%s cib-update=%d",
                        crm_action_str(op->op_type, op->interval_ms),
-                       op->rsc_id, lrm_state->node_name,
+                       op->rsc_id, node_name,
                        op->rc, services_ocf_exitcode_str(op->rc),
                        op->call_id, op_key, (removed? "true" : "false"),
                        update_id);
             break;
 
         case PCMK_LRM_OP_TIMEOUT:
             crm_err("Result of %s operation for %s on %s: %s "
                     CRM_XS " call=%d key=%s timeout=%dms",
                     crm_action_str(op->op_type, op->interval_ms),
-                    op->rsc_id, lrm_state->node_name,
+                    op->rsc_id, node_name,
                     services_lrm_status_str(op->op_status),
                     op->call_id, op_key, op->timeout);
             break;
 
         default:
             crm_err("Result of %s operation for %s on %s: %s "
                     CRM_XS " call=%d key=%s confirmed=%s status=%d cib-update=%d",
                     crm_action_str(op->op_type, op->interval_ms),
-                    op->rsc_id, lrm_state->node_name,
+                    op->rsc_id, node_name,
                     services_lrm_status_str(op->op_status), op->call_id, op_key,
                     (removed? "true" : "false"), op->op_status, update_id);
     }
 
     if (op->output) {
         char *prefix =
-            crm_strdup_printf("%s-" CRM_OP_FMT ":%d", lrm_state->node_name,
+            crm_strdup_printf("%s-" CRM_OP_FMT ":%d", node_name,
                               op->rsc_id, op->op_type, op->interval_ms,
                               op->call_id);
 
         if (op->rc) {
             crm_log_output(LOG_NOTICE, prefix, op->output);
         } else {
             crm_log_output(LOG_DEBUG, prefix, op->output);
         }
         free(prefix);
     }
 
-    if (safe_str_neq(op->op_type, RSC_METADATA)) {
-        crmd_alert_resource_op(lrm_state->node_name, op);
-    } else if (op->rc == PCMK_OCF_OK) {
-        char *metadata = unescape_newlines(op->output);
+    if (lrm_state) {
+        if (safe_str_neq(op->op_type, RSC_METADATA)) {
+            crmd_alert_resource_op(lrm_state->node_name, op);
+        } else if (rsc && (op->rc == PCMK_OCF_OK)) {
+            char *metadata = unescape_newlines(op->output);
 
-        metadata_cache_update(lrm_state->metadata_cache, rsc, metadata);
-        free(metadata);
+            metadata_cache_update(lrm_state->metadata_cache, rsc, metadata);
+            free(metadata);
+        }
     }
 
     if (op->rsc_deleted) {
         crm_info("Deletion of resource '%s' complete after %s", op->rsc_id, op_key);
-        delete_rsc_entry(lrm_state, NULL, op->rsc_id, NULL, pcmk_ok, NULL);
+        if (lrm_state) {
+            delete_rsc_entry(lrm_state, NULL, op->rsc_id, NULL, pcmk_ok, NULL);
+        }
     }
 
     /* If a shutdown was escalated while operations were pending,
      * then the FSA will be stalled right now... allow it to continue
      */
     mainloop_set_trigger(fsa_source);
-    update_history_cache(lrm_state, rsc, op);
+    if (lrm_state && rsc) {
+        update_history_cache(lrm_state, rsc, op);
+    }
 
     lrmd_free_rsc_info(rsc);
     free(op_key);
     free(op_id);
-
-    return TRUE;
 }
diff --git a/daemons/controld/controld_execd_state.c b/daemons/controld/controld_execd_state.c
index 8fc733e755..8a1a7f31fe 100644
--- a/daemons/controld/controld_execd_state.c
+++ b/daemons/controld/controld_execd_state.c
@@ -1,832 +1,832 @@
 /*
  * Copyright 2012-2018 David Vossel <davidvossel@gmail.com>
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 #include <crm/crm.h>
 #include <crm/msg_xml.h>
 #include <crm/common/iso8601.h>
 
 #include <pacemaker-controld.h>
 #include <controld_fsa.h>
 #include <controld_messages.h>
 #include <controld_callbacks.h>
 #include <controld_lrm.h>
 #include <controld_alerts.h>
 #include <crm/pengine/rules.h>
 #include <crm/pengine/rules_internal.h>
 #include <crm/transition.h>
 #include <crm/lrmd_alerts_internal.h>
 
 GHashTable *lrm_state_table = NULL;
 extern GHashTable *proxy_table;
 int lrmd_internal_proxy_send(lrmd_t * lrmd, xmlNode *msg);
 void lrmd_internal_set_proxy_callback(lrmd_t * lrmd, void *userdata, void (*callback)(lrmd_t *lrmd, void *userdata, xmlNode *msg));
 
 static void
 free_rsc_info(gpointer value)
 {
     lrmd_rsc_info_t *rsc_info = value;
 
     lrmd_free_rsc_info(rsc_info);
 }
 
 static void
 free_deletion_op(gpointer value)
 {
     struct pending_deletion_op_s *op = value;
 
     free(op->rsc);
     delete_ha_msg_input(op->input);
     free(op);
 }
 
 static void
 free_recurring_op(gpointer value)
 {
     struct recurring_op_s *op = (struct recurring_op_s *)value;
 
     free(op->user_data);
     free(op->rsc_id);
     free(op->op_type);
     free(op->op_key);
     if (op->params) {
         g_hash_table_destroy(op->params);
     }
     free(op);
 }
 
 static gboolean
 fail_pending_op(gpointer key, gpointer value, gpointer user_data)
 {
     lrmd_event_data_t event = { 0, };
     lrm_state_t *lrm_state = user_data;
     struct recurring_op_s *op = (struct recurring_op_s *)value;
 
     crm_trace("Pre-emptively failing " CRM_OP_FMT " on %s (call=%s, %s)",
               op->rsc_id, op->op_type, op->interval_ms,
               lrm_state->node_name, (char*)key, op->user_data);
 
     event.type = lrmd_event_exec_complete;
     event.rsc_id = op->rsc_id;
     event.op_type = op->op_type;
     event.user_data = op->user_data;
     event.timeout = 0;
     event.interval_ms = op->interval_ms;
     event.rc = PCMK_OCF_CONNECTION_DIED;
     event.op_status = PCMK_LRM_OP_ERROR;
     event.t_run = op->start_time;
     event.t_rcchange = op->start_time;
 
     event.call_id = op->call_id;
     event.remote_nodename = lrm_state->node_name;
     event.params = op->params;
 
-    process_lrm_event(lrm_state, &event, op);
+    process_lrm_event(lrm_state, &event, op, NULL);
     return TRUE;
 }
 
 gboolean
 lrm_state_is_local(lrm_state_t *lrm_state)
 {
     if (lrm_state == NULL || fsa_our_uname == NULL) {
         return FALSE;
     }
 
     if (strcmp(lrm_state->node_name, fsa_our_uname) != 0) {
         return FALSE;
     }
 
     return TRUE;
 
 }
 
 lrm_state_t *
 lrm_state_create(const char *node_name)
 {
     lrm_state_t *state = NULL;
 
     if (!node_name) {
         crm_err("No node name given for lrm state object");
         return NULL;
     }
 
     state = calloc(1, sizeof(lrm_state_t));
     if (!state) {
         return NULL;
     }
 
     state->node_name = strdup(node_name);
 
     state->rsc_info_cache = g_hash_table_new_full(crm_str_hash,
                                                 g_str_equal, NULL, free_rsc_info);
 
     state->deletion_ops = g_hash_table_new_full(crm_str_hash, g_str_equal, free,
                                                 free_deletion_op);
 
     state->pending_ops = g_hash_table_new_full(crm_str_hash, g_str_equal, free,
                                                free_recurring_op);
 
     state->resource_history = g_hash_table_new_full(crm_str_hash,
                                                     g_str_equal, NULL, history_free);
 
     state->metadata_cache = metadata_cache_new();
 
     g_hash_table_insert(lrm_state_table, (char *)state->node_name, state);
     return state;
 
 }
 
 void
 lrm_state_destroy(const char *node_name)
 {
     g_hash_table_remove(lrm_state_table, node_name);
 }
 
 static gboolean
 remote_proxy_remove_by_node(gpointer key, gpointer value, gpointer user_data)
 {
     remote_proxy_t *proxy = value;
     const char *node_name = user_data;
 
     if (safe_str_eq(node_name, proxy->node_name)) {
         return TRUE;
     }
 
     return FALSE;
 }
 
 static void
 internal_lrm_state_destroy(gpointer data)
 {
     lrm_state_t *lrm_state = data;
 
     if (!lrm_state) {
         return;
     }
 
     crm_trace("Destroying proxy table %s with %d members", lrm_state->node_name, g_hash_table_size(proxy_table));
     g_hash_table_foreach_remove(proxy_table, remote_proxy_remove_by_node, (char *) lrm_state->node_name);
     remote_ra_cleanup(lrm_state);
     lrmd_api_delete(lrm_state->conn);
 
     if (lrm_state->rsc_info_cache) {
         crm_trace("Destroying rsc info cache with %d members", g_hash_table_size(lrm_state->rsc_info_cache));
         g_hash_table_destroy(lrm_state->rsc_info_cache);
     }
     if (lrm_state->resource_history) {
         crm_trace("Destroying history op cache with %d members", g_hash_table_size(lrm_state->resource_history));
         g_hash_table_destroy(lrm_state->resource_history);
     }
     if (lrm_state->deletion_ops) {
         crm_trace("Destroying deletion op cache with %d members", g_hash_table_size(lrm_state->deletion_ops));
         g_hash_table_destroy(lrm_state->deletion_ops);
     }
     if (lrm_state->pending_ops) {
         crm_trace("Destroying pending op cache with %d members", g_hash_table_size(lrm_state->pending_ops));
         g_hash_table_destroy(lrm_state->pending_ops);
     }
     metadata_cache_free(lrm_state->metadata_cache);
 
     free((char *)lrm_state->node_name);
     free(lrm_state);
 }
 
 void
 lrm_state_reset_tables(lrm_state_t * lrm_state, gboolean reset_metadata)
 {
     if (lrm_state->resource_history) {
         crm_trace("Re-setting history op cache with %d members",
                   g_hash_table_size(lrm_state->resource_history));
         g_hash_table_remove_all(lrm_state->resource_history);
     }
     if (lrm_state->deletion_ops) {
         crm_trace("Re-setting deletion op cache with %d members",
                   g_hash_table_size(lrm_state->deletion_ops));
         g_hash_table_remove_all(lrm_state->deletion_ops);
     }
     if (lrm_state->pending_ops) {
         crm_trace("Re-setting pending op cache with %d members",
                   g_hash_table_size(lrm_state->pending_ops));
         g_hash_table_remove_all(lrm_state->pending_ops);
     }
     if (lrm_state->rsc_info_cache) {
         crm_trace("Re-setting rsc info cache with %d members",
                   g_hash_table_size(lrm_state->rsc_info_cache));
         g_hash_table_remove_all(lrm_state->rsc_info_cache);
     }
     if (reset_metadata) {
         metadata_cache_reset(lrm_state->metadata_cache);
     }
 }
 
 gboolean
 lrm_state_init_local(void)
 {
     if (lrm_state_table) {
         return TRUE;
     }
 
     lrm_state_table =
         g_hash_table_new_full(crm_strcase_hash, crm_strcase_equal, NULL, internal_lrm_state_destroy);
     if (!lrm_state_table) {
         return FALSE;
     }
 
     proxy_table =
         g_hash_table_new_full(crm_strcase_hash, crm_strcase_equal, NULL, remote_proxy_free);
     if (!proxy_table) {
         g_hash_table_destroy(lrm_state_table);
         lrm_state_table = NULL;
         return FALSE;
     }
 
     return TRUE;
 }
 
 void
 lrm_state_destroy_all(void)
 {
     if (lrm_state_table) {
         crm_trace("Destroying state table with %d members", g_hash_table_size(lrm_state_table));
         g_hash_table_destroy(lrm_state_table); lrm_state_table = NULL;
     }
     if(proxy_table) {
         crm_trace("Destroying proxy table with %d members", g_hash_table_size(proxy_table));
         g_hash_table_destroy(proxy_table); proxy_table = NULL;
     }
 }
 
 lrm_state_t *
 lrm_state_find(const char *node_name)
 {
     if (!node_name) {
         return NULL;
     }
     return g_hash_table_lookup(lrm_state_table, node_name);
 }
 
 lrm_state_t *
 lrm_state_find_or_create(const char *node_name)
 {
     lrm_state_t *lrm_state;
 
     lrm_state = g_hash_table_lookup(lrm_state_table, node_name);
     if (!lrm_state) {
         lrm_state = lrm_state_create(node_name);
     }
 
     return lrm_state;
 }
 
 GList *
 lrm_state_get_list(void)
 {
     return g_hash_table_get_values(lrm_state_table);
 }
 
 static remote_proxy_t *
 find_connected_proxy_by_node(const char * node_name)
 {
     GHashTableIter gIter;
     remote_proxy_t *proxy = NULL;
 
     CRM_CHECK(proxy_table != NULL, return NULL);
 
     g_hash_table_iter_init(&gIter, proxy_table);
 
     while (g_hash_table_iter_next(&gIter, NULL, (gpointer *) &proxy)) {
         if (proxy->source
             && safe_str_eq(node_name, proxy->node_name)) {
             return proxy;
         }
     }
 
     return NULL;
 }
 
 static void
 remote_proxy_disconnect_by_node(const char * node_name)
 {
     remote_proxy_t *proxy = NULL;
 
     CRM_CHECK(proxy_table != NULL, return);
 
     while ((proxy = find_connected_proxy_by_node(node_name)) != NULL) {
         /* mainloop_del_ipc_client() eventually calls remote_proxy_disconnected()
          * , which removes the entry from proxy_table.
          * Do not do this in a g_hash_table_iter_next() loop. */
         if (proxy->source) {
             mainloop_del_ipc_client(proxy->source);
         }
     }
 
     return;
 }
 
 void
 lrm_state_disconnect_only(lrm_state_t * lrm_state)
 {
     int removed = 0;
 
     if (!lrm_state->conn) {
         return;
     }
     crm_trace("Disconnecting %s", lrm_state->node_name);
 
     remote_proxy_disconnect_by_node(lrm_state->node_name);
 
     ((lrmd_t *) lrm_state->conn)->cmds->disconnect(lrm_state->conn);
 
     if (is_not_set(fsa_input_register, R_SHUTDOWN)) {
         removed = g_hash_table_foreach_remove(lrm_state->pending_ops, fail_pending_op, lrm_state);
         crm_trace("Synthesized %d operation failures for %s", removed, lrm_state->node_name);
     }
 }
 
 void
 lrm_state_disconnect(lrm_state_t * lrm_state)
 {
     if (!lrm_state->conn) {
         return;
     }
 
     lrm_state_disconnect_only(lrm_state);
 
     lrmd_api_delete(lrm_state->conn);
     lrm_state->conn = NULL;
 }
 
 int
 lrm_state_is_connected(lrm_state_t * lrm_state)
 {
     if (!lrm_state->conn) {
         return FALSE;
     }
     return ((lrmd_t *) lrm_state->conn)->cmds->is_connected(lrm_state->conn);
 }
 
 int
 lrm_state_poke_connection(lrm_state_t * lrm_state)
 {
 
     if (!lrm_state->conn) {
         return -1;
     }
     return ((lrmd_t *) lrm_state->conn)->cmds->poke_connection(lrm_state->conn);
 }
 
 int
 lrm_state_ipc_connect(lrm_state_t * lrm_state)
 {
     int ret;
 
     if (!lrm_state->conn) {
         lrm_state->conn = lrmd_api_new();
         ((lrmd_t *) lrm_state->conn)->cmds->set_callback(lrm_state->conn, lrm_op_callback);
     }
 
     ret = ((lrmd_t *) lrm_state->conn)->cmds->connect(lrm_state->conn, CRM_SYSTEM_CRMD, NULL);
 
     if (ret != pcmk_ok) {
         lrm_state->num_lrm_register_fails++;
     } else {
         lrm_state->num_lrm_register_fails = 0;
     }
 
     return ret;
 }
 
 static remote_proxy_t *
 crmd_remote_proxy_new(lrmd_t *lrmd, const char *node_name, const char *session_id, const char *channel)
 {
     static struct ipc_client_callbacks proxy_callbacks = {
         .dispatch = remote_proxy_dispatch,
         .destroy = remote_proxy_disconnected
     };
     remote_proxy_t *proxy = remote_proxy_new(lrmd, &proxy_callbacks, node_name,
                                              session_id, channel);
     return proxy;
 }
 
 gboolean
 crmd_is_proxy_session(const char *session)
 {
     return g_hash_table_lookup(proxy_table, session) ? TRUE : FALSE;
 }
 
 void
 crmd_proxy_send(const char *session, xmlNode *msg)
 {
     remote_proxy_t *proxy = g_hash_table_lookup(proxy_table, session);
     lrm_state_t *lrm_state = NULL;
 
     if (!proxy) {
         return;
     }
     crm_log_xml_trace(msg, "to-proxy");
     lrm_state = lrm_state_find(proxy->node_name);
     if (lrm_state) {
         crm_trace("Sending event to %.8s on %s", proxy->session_id, proxy->node_name);
         remote_proxy_relay_event(proxy, msg);
     }
 }
 
 static void
 crmd_proxy_dispatch(const char *session, xmlNode *msg)
 {
 
     crm_log_xml_trace(msg, "controller-proxy[inbound]");
 
     crm_xml_add(msg, F_CRM_SYS_FROM, session);
     if (crmd_authorize_message(msg, NULL, session)) {
         route_message(C_IPC_MESSAGE, msg);
     }
 
     trigger_fsa(fsa_source);
 }
 
 static void
 remote_config_check(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data)
 {
     if (rc != pcmk_ok) {
         crm_err("Query resulted in an error: %s", pcmk_strerror(rc));
 
         if (rc == -EACCES || rc == -pcmk_err_schema_validation) {
             crm_err("The cluster is mis-configured - shutting down and staying down");
         }
 
     } else {
         lrmd_t * lrmd = (lrmd_t *)user_data;
         crm_time_t *now = crm_time_new(NULL);
         GHashTable *config_hash = crm_str_table_new();
 
         crm_debug("Call %d : Parsing CIB options", call_id);
 
         unpack_instance_attributes(
             output, output, XML_CIB_TAG_PROPSET, NULL, config_hash, CIB_OPTIONS_FIRST, FALSE, now);
 
         /* Now send it to the remote peer */
         remote_proxy_check(lrmd, config_hash);
 
         g_hash_table_destroy(config_hash);
         crm_time_free(now);
     }
 }
 
 static void
 crmd_remote_proxy_cb(lrmd_t *lrmd, void *userdata, xmlNode *msg)
 {
     lrm_state_t *lrm_state = userdata;
     const char *session = crm_element_value(msg, F_LRMD_IPC_SESSION);
     remote_proxy_t *proxy = g_hash_table_lookup(proxy_table, session);
 
     const char *op = crm_element_value(msg, F_LRMD_IPC_OP);
     if (safe_str_eq(op, LRMD_IPC_OP_NEW)) {
         const char *channel = crm_element_value(msg, F_LRMD_IPC_IPC_SERVER);
 
         proxy = crmd_remote_proxy_new(lrmd, lrm_state->node_name, session, channel);
         if (!remote_ra_controlling_guest(lrm_state)) {
             if (proxy != NULL) {
                 /* Look up stonith-watchdog-timeout and send to the remote peer for validation */
                 int rc = fsa_cib_conn->cmds->query(fsa_cib_conn, XML_CIB_TAG_CRMCONFIG, NULL, cib_scope_local);
                 fsa_cib_conn->cmds->register_callback_full(fsa_cib_conn, rc, 10, FALSE, lrmd,
                                                         "remote_config_check", remote_config_check, NULL);
             }
         } else {
             crm_debug("Skipping remote_config_check for guest-nodes");
         }
 
     } else if (safe_str_eq(op, LRMD_IPC_OP_SHUTDOWN_REQ)) {
         char *now_s = NULL;
         time_t now = time(NULL);
 
         crm_notice("%s requested shutdown of its remote connection",
                    lrm_state->node_name);
 
         if (!remote_ra_is_in_maintenance(lrm_state)) {
             now_s = crm_itoa(now);
             update_attrd(lrm_state->node_name, XML_CIB_ATTR_SHUTDOWN, now_s, NULL, TRUE);
             free(now_s);
 
             remote_proxy_ack_shutdown(lrmd);
 
             crm_warn("Reconnection attempts to %s may result in failures that must be cleared",
                     lrm_state->node_name);
         } else {
             remote_proxy_nack_shutdown(lrmd);
 
             crm_notice("Remote resource for %s is not managed so no ordered shutdown happening",
                     lrm_state->node_name);
         }
         return;
 
     } else if (safe_str_eq(op, LRMD_IPC_OP_REQUEST) && proxy && proxy->is_local) {
         /* This is for the controller, which we are, so don't try
          * to send to ourselves over IPC -- do it directly.
          */
         int flags = 0;
         xmlNode *request = get_message_xml(msg, F_LRMD_IPC_MSG);
 
         CRM_CHECK(request != NULL, return);
 #if ENABLE_ACL
         CRM_CHECK(lrm_state->node_name, return);
         crm_xml_add(request, XML_ACL_TAG_ROLE, "pacemaker-remote");
         crm_acl_get_set_user(request, F_LRMD_IPC_USER, lrm_state->node_name);
 #endif
 
         /* Pacemaker Remote nodes don't know their own names (as known to the
          * cluster). When getting a node info request with no name or ID, add
          * the name, so we don't return info for ourselves instead of the
          * Pacemaker Remote node.
          */
         if (safe_str_eq(crm_element_value(request, F_CRM_TASK),
                         CRM_OP_NODE_INFO)) {
             int node_id;
 
             crm_element_value_int(request, XML_ATTR_ID, &node_id);
             if ((node_id <= 0)
                 && (crm_element_value(request, XML_ATTR_UNAME) == NULL)) {
                 crm_xml_add(request, XML_ATTR_UNAME, lrm_state->node_name);
             }
         }
 
         crmd_proxy_dispatch(session, request);
 
         crm_element_value_int(msg, F_LRMD_IPC_MSG_FLAGS, &flags);
         if (flags & crm_ipc_client_response) {
             int msg_id = 0;
             xmlNode *op_reply = create_xml_node(NULL, "ack");
 
             crm_xml_add(op_reply, "function", __FUNCTION__);
             crm_xml_add_int(op_reply, "line", __LINE__);
 
             crm_element_value_int(msg, F_LRMD_IPC_MSG_ID, &msg_id);
             remote_proxy_relay_response(proxy, op_reply, msg_id);
 
             free_xml(op_reply);
         }
 
     } else {
         remote_proxy_cb(lrmd, lrm_state->node_name, msg);
     }
 }
 
 
 int
 lrm_state_remote_connect_async(lrm_state_t * lrm_state, const char *server, int port,
                                int timeout_ms)
 {
     int ret;
 
     if (!lrm_state->conn) {
         lrm_state->conn = lrmd_remote_api_new(lrm_state->node_name, server, port);
         if (!lrm_state->conn) {
             return -1;
         }
         ((lrmd_t *) lrm_state->conn)->cmds->set_callback(lrm_state->conn, remote_lrm_op_callback);
         lrmd_internal_set_proxy_callback(lrm_state->conn, lrm_state, crmd_remote_proxy_cb);
     }
 
     crm_trace("initiating remote connection to %s at %d with timeout %d", server, port, timeout_ms);
     ret =
         ((lrmd_t *) lrm_state->conn)->cmds->connect_async(lrm_state->conn, lrm_state->node_name,
                                                           timeout_ms);
 
     if (ret != pcmk_ok) {
         lrm_state->num_lrm_register_fails++;
     } else {
         lrm_state->num_lrm_register_fails = 0;
     }
 
     return ret;
 }
 
 int
 lrm_state_get_metadata(lrm_state_t * lrm_state,
                        const char *class,
                        const char *provider,
                        const char *agent, char **output, enum lrmd_call_options options)
 {
     lrmd_key_value_t *params = NULL;
 
     if (!lrm_state->conn) {
         return -ENOTCONN;
     }
 
     /* Add the node name to the environment, as is done with normal resource
      * action calls. Meta-data calls shouldn't need it, but some agents are
      * written with an ocf_local_nodename call at the beginning regardless of
      * action. Without the environment variable, the agent would try to contact
      * the controller to get the node name -- but the controller would be
      * blocking on the synchronous meta-data call.
      *
      * At this point, we have to assume that agents are unlikely to make other
      * calls that require the controller, such as crm_node --quorum or
      * --cluster-id.
      *
      * @TODO Make meta-data calls asynchronous. (This will be part of a larger
      * project to make meta-data calls via the executor rather than directly.)
      */
     params = lrmd_key_value_add(params, CRM_META "_" XML_LRM_ATTR_TARGET,
                                 lrm_state->node_name);
 
     return ((lrmd_t *) lrm_state->conn)->cmds->get_metadata_params(lrm_state->conn,
             class, provider, agent, output, options, params);
 }
 
 int
 lrm_state_cancel(lrm_state_t *lrm_state, const char *rsc_id, const char *action,
                  guint interval_ms)
 {
     if (!lrm_state->conn) {
         return -ENOTCONN;
     }
 
     /* Figure out a way to make this async?
      * NOTICE: Currently it's synced and directly acknowledged in do_lrm_invoke(). */
     if (is_remote_lrmd_ra(NULL, NULL, rsc_id)) {
         return remote_ra_cancel(lrm_state, rsc_id, action, interval_ms);
     }
     return ((lrmd_t *) lrm_state->conn)->cmds->cancel(lrm_state->conn, rsc_id,
                                                       action, interval_ms);
 }
 
 lrmd_rsc_info_t *
 lrm_state_get_rsc_info(lrm_state_t * lrm_state, const char *rsc_id, enum lrmd_call_options options)
 {
     lrmd_rsc_info_t *rsc = NULL;
 
     if (!lrm_state->conn) {
         return NULL;
     }
     if (is_remote_lrmd_ra(NULL, NULL, rsc_id)) {
         return remote_ra_get_rsc_info(lrm_state, rsc_id);
     }
 
     rsc = g_hash_table_lookup(lrm_state->rsc_info_cache, rsc_id);
     if (rsc == NULL) {
         /* only contact the lrmd if we don't already have a cached rsc info */
         rsc = ((lrmd_t *) lrm_state->conn)->cmds->get_rsc_info(lrm_state->conn, rsc_id, options);
         if (rsc == NULL) {
 		    return NULL;
         }
         /* cache the result */
         g_hash_table_insert(lrm_state->rsc_info_cache, rsc->id, rsc);
     }
 
     return lrmd_copy_rsc_info(rsc);
 
 }
 
 int
 lrm_state_exec(lrm_state_t *lrm_state, const char *rsc_id, const char *action,
                const char *userdata, guint interval_ms,
                int timeout,     /* ms */
                int start_delay, /* ms */
                lrmd_key_value_t * params)
 {
 
     if (!lrm_state->conn) {
         lrmd_key_value_freeall(params);
         return -ENOTCONN;
     }
 
     if (is_remote_lrmd_ra(NULL, NULL, rsc_id)) {
         return remote_ra_exec(lrm_state, rsc_id, action, userdata, interval_ms,
                               timeout, start_delay, params);
     }
 
     return ((lrmd_t *) lrm_state->conn)->cmds->exec(lrm_state->conn,
                                                     rsc_id,
                                                     action,
                                                     userdata,
                                                     interval_ms,
                                                     timeout,
                                                     start_delay,
                                                     lrmd_opt_notify_changes_only, params);
 }
 
 int
 lrm_state_register_rsc(lrm_state_t * lrm_state,
                        const char *rsc_id,
                        const char *class,
                        const char *provider, const char *agent, enum lrmd_call_options options)
 {
     lrmd_t *conn = (lrmd_t *) lrm_state->conn;
 
     if (conn == NULL) {
         return -ENOTCONN;
     }
 
     if (is_remote_lrmd_ra(agent, provider, NULL)) {
         return lrm_state_find_or_create(rsc_id)? pcmk_ok : -EINVAL;
     }
 
     /* @TODO Implement an asynchronous version of this (currently a blocking
      * call to the lrmd).
      */
     return conn->cmds->register_rsc(lrm_state->conn, rsc_id, class, provider,
                                     agent, options);
 }
 
 int
 lrm_state_unregister_rsc(lrm_state_t * lrm_state,
                          const char *rsc_id, enum lrmd_call_options options)
 {
     if (!lrm_state->conn) {
         return -ENOTCONN;
     }
 
     if (is_remote_lrmd_ra(NULL, NULL, rsc_id)) {
         lrm_state_destroy(rsc_id);
         return pcmk_ok;
     }
 
     g_hash_table_remove(lrm_state->rsc_info_cache, rsc_id);
 
     /* @TODO Optimize this ... this function is a blocking round trip from
      * client to daemon. The controld_execd_state.c code path that uses this
      * function should always treat it as an async operation. The executor API
      * should make an async version available.
      */
     return ((lrmd_t *) lrm_state->conn)->cmds->unregister_rsc(lrm_state->conn, rsc_id, options);
 }
 
 /*
  * Functions for sending alerts via local executor connection
  */
 
 static GListPtr crmd_alert_list = NULL;
 
 void
 crmd_unpack_alerts(xmlNode *alerts)
 {
     pe_free_alert_list(crmd_alert_list);
     crmd_alert_list = pe_unpack_alerts(alerts);
 }
 
 void
 crmd_alert_node_event(crm_node_t *node)
 {
     lrm_state_t *lrm_state;
 
     if (crmd_alert_list == NULL) {
         return;
     }
 
     lrm_state = lrm_state_find(fsa_our_uname);
     if (lrm_state == NULL) {
         return;
     }
 
     lrmd_send_node_alert((lrmd_t *) lrm_state->conn, crmd_alert_list,
                          node->uname, node->id, node->state);
 }
 
 void
 crmd_alert_fencing_op(stonith_event_t * e)
 {
     char *desc;
     lrm_state_t *lrm_state;
 
     if (crmd_alert_list == NULL) {
         return;
     }
 
     lrm_state = lrm_state_find(fsa_our_uname);
     if (lrm_state == NULL) {
         return;
     }
 
     desc = crm_strdup_printf("Operation %s of %s by %s for %s@%s: %s (ref=%s)",
                              e->action, e->target,
                              (e->executioner? e->executioner : "<no-one>"),
                              e->client_origin, e->origin,
                              pcmk_strerror(e->result), e->id);
 
     lrmd_send_fencing_alert((lrmd_t *) lrm_state->conn, crmd_alert_list,
                             e->target, e->operation, desc, e->result);
     free(desc);
 }
 
 void
 crmd_alert_resource_op(const char *node, lrmd_event_data_t * op)
 {
     lrm_state_t *lrm_state;
 
     if (crmd_alert_list == NULL) {
         return;
     }
 
     lrm_state = lrm_state_find(fsa_our_uname);
     if (lrm_state == NULL) {
         return;
     }
 
     lrmd_send_resource_alert((lrmd_t *) lrm_state->conn, crmd_alert_list, node,
                              op);
 }
diff --git a/daemons/controld/controld_lrm.h b/daemons/controld/controld_lrm.h
index 13238a0160..a4965057df 100644
--- a/daemons/controld/controld_lrm.h
+++ b/daemons/controld/controld_lrm.h
@@ -1,164 +1,165 @@
 /*
  * Copyright 2004-2018 Andrew Beekhof <andrew@beekhof.net>
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <controld_messages.h>
 #include <controld_metadata.h>
 
 extern gboolean verify_stopped(enum crmd_fsa_state cur_state, int log_level);
 void lrm_clear_last_failure(const char *rsc_id, const char *node_name,
                             const char *operation, guint interval_ms);
 void lrm_op_callback(lrmd_event_data_t * op);
 lrmd_t *crmd_local_lrmd_conn(void);
 
 typedef struct resource_history_s {
     char *id;
     uint32_t last_callid;
     lrmd_rsc_info_t rsc;
     lrmd_event_data_t *last;
     lrmd_event_data_t *failed;
     GList *recurring_op_list;
 
     /* Resources must be stopped using the same
      * parameters they were started with.  This hashtable
      * holds the parameters that should be used for the next stop
      * cmd on this resource. */
     GHashTable *stop_params;
 } rsc_history_t;
 
 void history_free(gpointer data);
 
 /* TODO - Replace this with lrmd_event_data_t */
 struct recurring_op_s {
     guint interval_ms;
     int call_id;
     gboolean remove;
     gboolean cancelled;
     unsigned int start_time;
     char *rsc_id;
     char *op_type;
     char *op_key;
     char *user_data;
     GHashTable *params;
 };
 
 typedef struct lrm_state_s {
     const char *node_name;
     void *conn;                 // Reserved for controld_execd_state.c usage
     void *remote_ra_data;       // Reserved for controld_remote_ra.c usage
 
     GHashTable *resource_history;
     GHashTable *pending_ops;
     GHashTable *deletion_ops;
     GHashTable *rsc_info_cache;
     GHashTable *metadata_cache; // key = class[:provider]:agent, value = ra_metadata_s
 
     int num_lrm_register_fails;
 } lrm_state_t;
 
 struct pending_deletion_op_s {
     char *rsc;
     ha_msg_input_t *input;
 };
 
 /*!
  * \brief Check whether this the local IPC connection to the executor
  */
 gboolean
 lrm_state_is_local(lrm_state_t *lrm_state);
 
 /*!
  * \brief Clear all state information from a single state entry.
  * \note It sometimes useful to save metadata cache when it won't go stale.
  * \note This does not close the executor connection
  */
 void lrm_state_reset_tables(lrm_state_t * lrm_state, gboolean reset_metadata);
 GList *lrm_state_get_list(void);
 
 /*!
  * \brief Initiate internal state tables
  */
 gboolean lrm_state_init_local(void);
 
 /*!
  * \brief Destroy all state entries and internal state tables
  */
 void lrm_state_destroy_all(void);
 
 /*!
  * \brief Create executor connection entry
  */
 lrm_state_t *lrm_state_create(const char *node_name);
 
 /*!
  * \brief Destroy executor connection by node name
  */
 void lrm_state_destroy(const char *node_name);
 
 /*!
  * \brief Find lrm_state data by node name
  */
 lrm_state_t *lrm_state_find(const char *node_name);
 
 /*!
  * \brief Either find or create a new entry
  */
 lrm_state_t *lrm_state_find_or_create(const char *node_name);
 
 /*!
  * The functions below are wrappers for the executor API the the controller
  * uses. These wrapper functions allow us to treat the controller's remote
  * executor connection resources the same as regular resources. Internally,
  * regular resources go to the executor, and remote connection resources are
  * handled locally in the controller.
  */
 void lrm_state_disconnect_only(lrm_state_t * lrm_state);
 void lrm_state_disconnect(lrm_state_t * lrm_state);
 int lrm_state_ipc_connect(lrm_state_t * lrm_state);
 int lrm_state_remote_connect_async(lrm_state_t * lrm_state, const char *server, int port,
                                    int timeout);
 int lrm_state_is_connected(lrm_state_t * lrm_state);
 int lrm_state_poke_connection(lrm_state_t * lrm_state);
 
 int lrm_state_get_metadata(lrm_state_t * lrm_state,
                            const char *class,
                            const char *provider,
                            const char *agent, char **output, enum lrmd_call_options options);
 int lrm_state_cancel(lrm_state_t *lrm_state, const char *rsc_id,
                      const char *action, guint interval_ms);
 int lrm_state_exec(lrm_state_t *lrm_state, const char *rsc_id,
                    const char *action, const char *userdata, guint interval_ms,
                    int timeout, /* ms */
                    int start_delay,     /* ms */
                    lrmd_key_value_t * params);
 lrmd_rsc_info_t *lrm_state_get_rsc_info(lrm_state_t * lrm_state,
                                         const char *rsc_id, enum lrmd_call_options options);
 int lrm_state_register_rsc(lrm_state_t * lrm_state,
                            const char *rsc_id,
                            const char *class,
                            const char *provider, const char *agent, enum lrmd_call_options options);
 int lrm_state_unregister_rsc(lrm_state_t * lrm_state,
                              const char *rsc_id, enum lrmd_call_options options);
 
 // Functions used to manage remote executor connection resources
 void remote_lrm_op_callback(lrmd_event_data_t * op);
 gboolean is_remote_lrmd_ra(const char *agent, const char *provider, const char *id);
 lrmd_rsc_info_t *remote_ra_get_rsc_info(lrm_state_t * lrm_state, const char *rsc_id);
 int remote_ra_cancel(lrm_state_t *lrm_state, const char *rsc_id,
                      const char *action, guint interval_ms);
 int remote_ra_exec(lrm_state_t *lrm_state, const char *rsc_id,
                    const char *action, const char *userdata, guint interval_ms,
                    int timeout, /* ms */
                    int start_delay,     /* ms */
                    lrmd_key_value_t * params);
 void remote_ra_cleanup(lrm_state_t * lrm_state);
 void remote_ra_fail(const char *node_name);
 void remote_ra_process_pseudo(xmlNode *xml);
 gboolean remote_ra_is_in_maintenance(lrm_state_t * lrm_state);
 void remote_ra_process_maintenance_nodes(xmlNode *xml);
 gboolean remote_ra_controlling_guest(lrm_state_t * lrm_state);
 
-gboolean process_lrm_event(lrm_state_t * lrm_state, lrmd_event_data_t * op, struct recurring_op_s *pending);
+void process_lrm_event(lrm_state_t *lrm_state, lrmd_event_data_t *op,
+                       struct recurring_op_s *pending, xmlNode *action_xml);
diff --git a/daemons/controld/controld_remote_ra.c b/daemons/controld/controld_remote_ra.c
index 91ca19db7b..944094b97d 100644
--- a/daemons/controld/controld_remote_ra.c
+++ b/daemons/controld/controld_remote_ra.c
@@ -1,1289 +1,1293 @@
 /*
  * Copyright 2013-2018 David Vossel <davidvossel@gmail.com>
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 #include <crm/crm.h>
 #include <crm/msg_xml.h>
 
 #include <pacemaker-controld.h>
 #include <controld_fsa.h>
 #include <controld_messages.h>
 #include <controld_callbacks.h>
 #include <controld_lrm.h>
 #include <crm/lrmd.h>
 #include <crm/services.h>
 
 #define REMOTE_LRMD_RA "remote"
 
 /* The max start timeout before cmd retry */
 #define MAX_START_TIMEOUT_MS 10000
 
 typedef struct remote_ra_cmd_s {
     /*! the local node the cmd is issued from */
     char *owner;
     /*! the remote node the cmd is executed on */
     char *rsc_id;
     /*! the action to execute */
     char *action;
     /*! some string the client wants us to give it back */
     char *userdata;
     char *exit_reason;          // descriptive text on error
     /*! start delay in ms */
     int start_delay;
     /*! timer id used for start delay. */
     int delay_id;
     /*! timeout in ms for cmd */
     int timeout;
     int remaining_timeout;
     /*! recurring interval in ms */
     guint interval_ms;
     /*! interval timer id */
     int interval_id;
     int reported_success;
     int monitor_timeout_id;
     int takeover_timeout_id;
     /*! action parameters */
     lrmd_key_value_t *params;
     /*! executed rc */
     int rc;
     int op_status;
     int call_id;
     time_t start_time;
     gboolean cancel;
 } remote_ra_cmd_t;
 
 enum remote_migration_status {
     expect_takeover = 1,
     takeover_complete,
 };
 
 typedef struct remote_ra_data_s {
     crm_trigger_t *work;
     remote_ra_cmd_t *cur_cmd;
     GList *cmds;
     GList *recurring_cmds;
 
     enum remote_migration_status migrate_status;
 
     gboolean active;
 
     /* Maintenance mode is difficult to determine from the controller's context,
      * so we have it signalled back with the transition from the scheduler.
      */
     gboolean is_maintenance;
 
     /* Similar for if we are controlling a guest or a bare-metal remote.
      * Fortunately there is a meta-attribute in the transition already and
      * as the situation doesn't change over time we can use the
      * resource start for noting down the information for later use when
      * the attributes aren't at hand.
      */
     gboolean controlling_guest;
 } remote_ra_data_t;
 
 static int handle_remote_ra_start(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd, int timeout_ms);
 static void handle_remote_ra_stop(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd);
 static GList *fail_all_monitor_cmds(GList * list);
 
 static void
 free_cmd(gpointer user_data)
 {
     remote_ra_cmd_t *cmd = user_data;
 
     if (!cmd) {
         return;
     }
     if (cmd->delay_id) {
         g_source_remove(cmd->delay_id);
     }
     if (cmd->interval_id) {
         g_source_remove(cmd->interval_id);
     }
     if (cmd->monitor_timeout_id) {
         g_source_remove(cmd->monitor_timeout_id);
     }
     if (cmd->takeover_timeout_id) {
         g_source_remove(cmd->takeover_timeout_id);
     }
     free(cmd->owner);
     free(cmd->rsc_id);
     free(cmd->action);
     free(cmd->userdata);
     free(cmd->exit_reason);
     lrmd_key_value_freeall(cmd->params);
     free(cmd);
 }
 
 static int
 generate_callid(void)
 {
     static int remote_ra_callid = 0;
 
     remote_ra_callid++;
     if (remote_ra_callid <= 0) {
         remote_ra_callid = 1;
     }
 
     return remote_ra_callid;
 }
 
 static gboolean
 recurring_helper(gpointer data)
 {
     remote_ra_cmd_t *cmd = data;
     lrm_state_t *connection_rsc = NULL;
 
     cmd->interval_id = 0;
     connection_rsc = lrm_state_find(cmd->rsc_id);
     if (connection_rsc && connection_rsc->remote_ra_data) {
         remote_ra_data_t *ra_data = connection_rsc->remote_ra_data;
 
         ra_data->recurring_cmds = g_list_remove(ra_data->recurring_cmds, cmd);
 
         ra_data->cmds = g_list_append(ra_data->cmds, cmd);
         mainloop_set_trigger(ra_data->work);
     }
     return FALSE;
 }
 
 static gboolean
 start_delay_helper(gpointer data)
 {
     remote_ra_cmd_t *cmd = data;
     lrm_state_t *connection_rsc = NULL;
 
     cmd->delay_id = 0;
     connection_rsc = lrm_state_find(cmd->rsc_id);
     if (connection_rsc && connection_rsc->remote_ra_data) {
         remote_ra_data_t *ra_data = connection_rsc->remote_ra_data;
 
         mainloop_set_trigger(ra_data->work);
     }
     return FALSE;
 }
 
 /*!
  * \internal
  * \brief Handle cluster communication related to pacemaker_remote node joining
  *
  * \param[in] node_name  Name of newly integrated pacemaker_remote node
  */
 static void
 remote_node_up(const char *node_name)
 {
     int call_opt, call_id = 0;
     xmlNode *update, *state;
     crm_node_t *node;
 
     CRM_CHECK(node_name != NULL, return);
     crm_info("Announcing pacemaker_remote node %s", node_name);
 
     /* Clear node's operation history. The node's transient attributes should
      * and normally will be cleared when the node leaves, but since remote node
      * state has a number of corner cases, clear them here as well, to be sure.
      */
     call_opt = crmd_cib_smart_opt();
     erase_status_tag(node_name, XML_CIB_TAG_LRM, call_opt);
     erase_status_tag(node_name, XML_TAG_TRANSIENT_NODEATTRS, call_opt);
 
     /* Clear node's probed attribute */
     update_attrd(node_name, CRM_OP_PROBED, NULL, NULL, TRUE);
 
     /* Ensure node is in the remote peer cache with member status */
     node = crm_remote_peer_get(node_name);
     CRM_CHECK(node != NULL, return);
     crm_update_peer_state(__FUNCTION__, node, CRM_NODE_MEMBER, 0);
 
     /* pacemaker_remote nodes don't participate in the membership layer,
      * so cluster nodes don't automatically get notified when they come and go.
      * We send a cluster message to the DC, and update the CIB node state entry,
      * so the DC will get it sooner (via message) or later (via CIB refresh),
      * and any other interested parties can query the CIB.
      */
     send_remote_state_message(node_name, TRUE);
 
     update = create_xml_node(NULL, XML_CIB_TAG_STATUS);
     state = create_node_state_update(node, node_update_cluster, update,
                                      __FUNCTION__);
 
     /* Clear the XML_NODE_IS_FENCED flag in the node state. If the node ever
      * needs to be fenced, this flag will allow various actions to determine
      * whether the fencing has happened yet.
      */
     crm_xml_add(state, XML_NODE_IS_FENCED, "0");
 
     /* TODO: If the remote connection drops, and this (async) CIB update either
      * failed or has not yet completed, later actions could mistakenly think the
      * node has already been fenced (if the XML_NODE_IS_FENCED attribute was
      * previously set, because it won't have been cleared). This could prevent
      * actual fencing or allow recurring monitor failures to be cleared too
      * soon. Ideally, we wouldn't rely on the CIB for the fenced status.
      */
     fsa_cib_update(XML_CIB_TAG_STATUS, update, call_opt, call_id, NULL);
     if (call_id < 0) {
         crm_perror(LOG_WARNING, "%s CIB node state setup", node_name);
     }
     free_xml(update);
 }
 
 enum down_opts {
     DOWN_KEEP_LRM,
     DOWN_ERASE_LRM
 };
 
 /*!
  * \internal
  * \brief Handle cluster communication related to pacemaker_remote node leaving
  *
  * \param[in] node_name  Name of lost node
  * \param[in] opts       Whether to keep or erase LRM history
  */
 static void
 remote_node_down(const char *node_name, const enum down_opts opts)
 {
     xmlNode *update;
     int call_id = 0;
     int call_opt = crmd_cib_smart_opt();
     crm_node_t *node;
 
     /* Purge node from attrd's memory */
     update_attrd_remote_node_removed(node_name, NULL);
 
     /* Purge node's transient attributes */
     erase_status_tag(node_name, XML_TAG_TRANSIENT_NODEATTRS, call_opt);
 
     /* Normally, the LRM operation history should be kept until the node comes
      * back up. However, after a successful fence, we want to clear it, so we
      * don't think resources are still running on the node.
      */
     if (opts == DOWN_ERASE_LRM) {
         erase_status_tag(node_name, XML_CIB_TAG_LRM, call_opt);
     }
 
     /* Ensure node is in the remote peer cache with lost state */
     node = crm_remote_peer_get(node_name);
     CRM_CHECK(node != NULL, return);
     crm_update_peer_state(__FUNCTION__, node, CRM_NODE_LOST, 0);
 
     /* Notify DC */
     send_remote_state_message(node_name, FALSE);
 
     /* Update CIB node state */
     update = create_xml_node(NULL, XML_CIB_TAG_STATUS);
     create_node_state_update(node, node_update_cluster, update, __FUNCTION__);
     fsa_cib_update(XML_CIB_TAG_STATUS, update, call_opt, call_id, NULL);
     if (call_id < 0) {
         crm_perror(LOG_ERR, "%s CIB node state update", node_name);
     }
     free_xml(update);
 }
 
 /*!
  * \internal
  * \brief Handle effects of a remote RA command on node state
  *
  * \param[in] cmd  Completed remote RA command
  */
 static void
 check_remote_node_state(remote_ra_cmd_t *cmd)
 {
     /* Only successful actions can change node state */
     if (cmd->rc != PCMK_OCF_OK) {
         return;
     }
 
     if (safe_str_eq(cmd->action, "start")) {
         remote_node_up(cmd->rsc_id);
 
     } else if (safe_str_eq(cmd->action, "migrate_from")) {
         /* After a successful migration, we don't need to do remote_node_up()
          * because the DC already knows the node is up, and we don't want to
          * clear LRM history etc. We do need to add the remote node to this
          * host's remote peer cache, because (unless it happens to be DC)
          * it hasn't been tracking the remote node, and other code relies on
          * the cache to distinguish remote nodes from unseen cluster nodes.
          */
         crm_node_t *node = crm_remote_peer_get(cmd->rsc_id);
 
         CRM_CHECK(node != NULL, return);
         crm_update_peer_state(__FUNCTION__, node, CRM_NODE_MEMBER, 0);
 
     } else if (safe_str_eq(cmd->action, "stop")) {
         lrm_state_t *lrm_state = lrm_state_find(cmd->rsc_id);
         remote_ra_data_t *ra_data = lrm_state? lrm_state->remote_ra_data : NULL;
 
         if (ra_data) {
             if (ra_data->migrate_status != takeover_complete) {
                 /* Stop means down if we didn't successfully migrate elsewhere */
                 remote_node_down(cmd->rsc_id, DOWN_KEEP_LRM);
             } else if (AM_I_DC == FALSE) {
                 /* Only the connection host and DC track node state,
                  * so if the connection migrated elsewhere and we aren't DC,
                  * un-cache the node, so we don't have stale info
                  */
                 crm_remote_peer_cache_remove(cmd->rsc_id);
             }
         }
     }
 
     /* We don't do anything for successful monitors, which is correct for
      * routine recurring monitors, and for monitors on nodes where the
      * connection isn't supposed to be (the cluster will stop the connection in
      * that case). However, if the initial probe finds the connection already
      * active on the node where we want it, we probably should do
      * remote_node_up(). Unfortunately, we can't distinguish that case here.
      * Given that connections have to be initiated by the cluster, the chance of
      * that should be close to zero.
      */
 }
 
 static void
 report_remote_ra_result(remote_ra_cmd_t * cmd)
 {
     lrmd_event_data_t op = { 0, };
 
     check_remote_node_state(cmd);
 
     op.type = lrmd_event_exec_complete;
     op.rsc_id = cmd->rsc_id;
     op.op_type = cmd->action;
     op.user_data = cmd->userdata;
     op.exit_reason = cmd->exit_reason;
     op.timeout = cmd->timeout;
     op.interval_ms = cmd->interval_ms;
     op.rc = cmd->rc;
     op.op_status = cmd->op_status;
     op.t_run = cmd->start_time;
     op.t_rcchange = cmd->start_time;
     if (cmd->reported_success && cmd->rc != PCMK_OCF_OK) {
         op.t_rcchange = time(NULL);
         /* This edge case will likely never ever occur, but if it does the
          * result is that a failure will not be processed correctly. This is only
          * remotely possible because we are able to detect a connection resource's tcp
          * connection has failed at any moment after start has completed. The actual
          * recurring operation is just a connectivity ping.
          *
          * basically, we are not guaranteed that the first successful monitor op and
          * a subsequent failed monitor op will not occur in the same timestamp. We have to
          * make it look like the operations occurred at separate times though. */
         if (op.t_rcchange == op.t_run) {
             op.t_rcchange++;
         }
     }
 
     if (cmd->params) {
         lrmd_key_value_t *tmp;
 
         op.params = crm_str_table_new();
         for (tmp = cmd->params; tmp; tmp = tmp->next) {
             g_hash_table_insert(op.params, strdup(tmp->key), strdup(tmp->value));
         }
 
     }
     op.call_id = cmd->call_id;
     op.remote_nodename = cmd->owner;
 
     lrm_op_callback(&op);
 
     if (op.params) {
         g_hash_table_destroy(op.params);
     }
 }
 
 static void
 update_remaining_timeout(remote_ra_cmd_t * cmd)
 {
     cmd->remaining_timeout = ((cmd->timeout / 1000) - (time(NULL) - cmd->start_time)) * 1000;
 }
 
 static gboolean
 retry_start_cmd_cb(gpointer data)
 {
     lrm_state_t *lrm_state = data;
     remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
     remote_ra_cmd_t *cmd = NULL;
     int rc = -1;
 
     if (!ra_data || !ra_data->cur_cmd) {
         return FALSE;
     }
     cmd = ra_data->cur_cmd;
     if (safe_str_neq(cmd->action, "start") && safe_str_neq(cmd->action, "migrate_from")) {
         return FALSE;
     }
     update_remaining_timeout(cmd);
 
     if (cmd->remaining_timeout > 0) {
         rc = handle_remote_ra_start(lrm_state, cmd, cmd->remaining_timeout);
     }
 
     if (rc != 0) {
         cmd->rc = PCMK_OCF_UNKNOWN_ERROR;
         cmd->op_status = PCMK_LRM_OP_ERROR;
         report_remote_ra_result(cmd);
 
         if (ra_data->cmds) {
             mainloop_set_trigger(ra_data->work);
         }
         ra_data->cur_cmd = NULL;
         free_cmd(cmd);
     } else {
         /* wait for connection event */
     }
 
     return FALSE;
 }
 
 
 static gboolean
 connection_takeover_timeout_cb(gpointer data)
 {
     lrm_state_t *lrm_state = NULL;
     remote_ra_cmd_t *cmd = data;
 
     crm_info("takeover event timed out for node %s", cmd->rsc_id);
     cmd->takeover_timeout_id = 0;
 
     lrm_state = lrm_state_find(cmd->rsc_id);
 
     handle_remote_ra_stop(lrm_state, cmd);
     free_cmd(cmd);
 
     return FALSE;
 }
 
 static gboolean
 monitor_timeout_cb(gpointer data)
 {
     lrm_state_t *lrm_state = NULL;
     remote_ra_cmd_t *cmd = data;
 
     lrm_state = lrm_state_find(cmd->rsc_id);
 
     crm_info("Timed out waiting for remote poke response from %s%s",
              cmd->rsc_id, (lrm_state? "" : " (no LRM state)"));
     cmd->monitor_timeout_id = 0;
     cmd->op_status = PCMK_LRM_OP_TIMEOUT;
     cmd->rc = PCMK_OCF_UNKNOWN_ERROR;
 
     if (lrm_state && lrm_state->remote_ra_data) {
         remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
 
         if (ra_data->cur_cmd == cmd) {
             ra_data->cur_cmd = NULL;
         }
         if (ra_data->cmds) {
             mainloop_set_trigger(ra_data->work);
         }
     }
 
     report_remote_ra_result(cmd);
     free_cmd(cmd);
 
     if(lrm_state) {
         lrm_state_disconnect(lrm_state);
     }
     return FALSE;
 }
 
 static void
 synthesize_lrmd_success(lrm_state_t *lrm_state, const char *rsc_id, const char *op_type)
 {
     lrmd_event_data_t op = { 0, };
 
     if (lrm_state == NULL) {
         /* if lrm_state not given assume local */
         lrm_state = lrm_state_find(fsa_our_uname);
     }
     CRM_ASSERT(lrm_state != NULL);
 
     op.type = lrmd_event_exec_complete;
     op.rsc_id = rsc_id;
     op.op_type = op_type;
     op.rc = PCMK_OCF_OK;
     op.op_status = PCMK_LRM_OP_DONE;
     op.t_run = time(NULL);
     op.t_rcchange = op.t_run;
     op.call_id = generate_callid();
-    process_lrm_event(lrm_state, &op, NULL);
+    process_lrm_event(lrm_state, &op, NULL, NULL);
 }
 
 void
 remote_lrm_op_callback(lrmd_event_data_t * op)
 {
     gboolean cmd_handled = FALSE;
     lrm_state_t *lrm_state = NULL;
     remote_ra_data_t *ra_data = NULL;
     remote_ra_cmd_t *cmd = NULL;
 
-    crm_debug("remote connection event - event_type:%s node:%s action:%s rc:%s op_status:%s",
-              lrmd_event_type2str(op->type),
-              op->remote_nodename,
-              op->op_type ? op->op_type : "none",
-              services_ocf_exitcode_str(op->rc), services_lrm_status_str(op->op_status));
+    crm_debug("Processing '%s%s%s' event on remote connection to %s: %s "
+              "(%d) status=%s (%d)",
+              (op->op_type? op->op_type : ""), (op->op_type? " " : ""),
+              lrmd_event_type2str(op->type), op->remote_nodename,
+              services_ocf_exitcode_str(op->rc), op->rc,
+              services_lrm_status_str(op->op_status), op->op_status);
 
     lrm_state = lrm_state_find(op->remote_nodename);
     if (!lrm_state || !lrm_state->remote_ra_data) {
         crm_debug("No state information found for remote connection event");
         return;
     }
     ra_data = lrm_state->remote_ra_data;
 
     if (op->type == lrmd_event_new_client) {
         // Another client has connected to the remote daemon
 
         if (ra_data->migrate_status == expect_takeover) {
             // Great, we knew this was coming
             ra_data->migrate_status = takeover_complete;
 
         } else {
             crm_err("Unexpected pacemaker_remote client takeover for %s. Disconnecting", op->remote_nodename);
             /* In this case, lrmd_tls_connection_destroy() will be called under the control of mainloop. */
             /* Do not free lrm_state->conn yet. */
             /* It'll be freed in the following stop action. */
             lrm_state_disconnect_only(lrm_state);
         }
         return;
     }
 
     /* filter all EXEC events up */
     if (op->type == lrmd_event_exec_complete) {
         if (ra_data->migrate_status == takeover_complete) {
             crm_debug("ignoring event, this connection is taken over by another node");
         } else {
             lrm_op_callback(op);
         }
         return;
     }
 
-    if ((op->type == lrmd_event_disconnect) &&
-        (ra_data->cur_cmd == NULL) &&
-        (ra_data->active == TRUE)) {
+    if ((op->type == lrmd_event_disconnect) && (ra_data->cur_cmd == NULL)) {
 
-        if (!remote_ra_is_in_maintenance(lrm_state)) {
+        if (ra_data->active == FALSE) {
+            crm_debug("Disconnection from Pacemaker Remote node %s complete",
+                      lrm_state->node_name);
+
+        } else if (!remote_ra_is_in_maintenance(lrm_state)) {
             crm_err("Lost connection to Pacemaker Remote node %s",
                     lrm_state->node_name);
             ra_data->recurring_cmds = fail_all_monitor_cmds(ra_data->recurring_cmds);
             ra_data->cmds = fail_all_monitor_cmds(ra_data->cmds);
+
         } else {
             crm_notice("Unmanaged Pacemaker Remote node %s disconnected",
                        lrm_state->node_name);
             /* Do roughly what a 'stop' on the remote-resource would do */
             handle_remote_ra_stop(lrm_state, NULL);
             remote_node_down(lrm_state->node_name, DOWN_KEEP_LRM);
             /* now fake the reply of a successful 'stop' */
             synthesize_lrmd_success(NULL, lrm_state->node_name, "stop");
         }
         return;
     }
 
     if (!ra_data->cur_cmd) {
         crm_debug("no event to match");
         return;
     }
 
     cmd = ra_data->cur_cmd;
 
     /* Start actions and migrate from actions complete after connection
      * comes back to us. */
     if (op->type == lrmd_event_connect && (safe_str_eq(cmd->action, "start") ||
                                            safe_str_eq(cmd->action, "migrate_from"))) {
 
         if (op->connection_rc < 0) {
             update_remaining_timeout(cmd);
 
             if (op->connection_rc == -ENOKEY) {
                 // Hard error, don't retry
                 cmd->op_status = PCMK_LRM_OP_ERROR;
                 cmd->rc = PCMK_OCF_INVALID_PARAM;
                 cmd->exit_reason = strdup("Authentication key not readable");
 
             } else if (cmd->remaining_timeout > 3000) {
                 crm_trace("rescheduling start, remaining timeout %d", cmd->remaining_timeout);
                 g_timeout_add(1000, retry_start_cmd_cb, lrm_state);
                 return;
 
             } else {
                 crm_trace("can't reschedule start, remaining timeout too small %d",
                           cmd->remaining_timeout);
                 cmd->op_status = PCMK_LRM_OP_TIMEOUT;
                 cmd->rc = PCMK_OCF_UNKNOWN_ERROR;
             }
 
         } else {
             lrm_state_reset_tables(lrm_state, TRUE);
             cmd->rc = PCMK_OCF_OK;
             cmd->op_status = PCMK_LRM_OP_DONE;
             ra_data->active = TRUE;
         }
 
         crm_debug("Remote connection event matched %s action", cmd->action);
         report_remote_ra_result(cmd);
         cmd_handled = TRUE;
 
     } else if (op->type == lrmd_event_poke && safe_str_eq(cmd->action, "monitor")) {
 
         if (cmd->monitor_timeout_id) {
             g_source_remove(cmd->monitor_timeout_id);
             cmd->monitor_timeout_id = 0;
         }
 
         /* Only report success the first time, after that only worry about failures.
          * For this function, if we get the poke pack, it is always a success. Pokes
          * only fail if the send fails, or the response times out. */
         if (!cmd->reported_success) {
             cmd->rc = PCMK_OCF_OK;
             cmd->op_status = PCMK_LRM_OP_DONE;
             report_remote_ra_result(cmd);
             cmd->reported_success = 1;
         }
 
         crm_debug("Remote poke event matched %s action", cmd->action);
 
         /* success, keep rescheduling if interval is present. */
         if (cmd->interval_ms && (cmd->cancel == FALSE)) {
             ra_data->recurring_cmds = g_list_append(ra_data->recurring_cmds, cmd);
             cmd->interval_id = g_timeout_add(cmd->interval_ms,
                                              recurring_helper, cmd);
             cmd = NULL;         /* prevent free */
         }
         cmd_handled = TRUE;
 
     } else if (op->type == lrmd_event_disconnect && safe_str_eq(cmd->action, "monitor")) {
         if (ra_data->active == TRUE && (cmd->cancel == FALSE)) {
             cmd->rc = PCMK_OCF_UNKNOWN_ERROR;
             cmd->op_status = PCMK_LRM_OP_ERROR;
             report_remote_ra_result(cmd);
             crm_err("Remote connection to %s unexpectedly dropped during monitor",
                     lrm_state->node_name);
         }
         cmd_handled = TRUE;
 
     } else if (op->type == lrmd_event_new_client && safe_str_eq(cmd->action, "stop")) {
 
         handle_remote_ra_stop(lrm_state, cmd);
         cmd_handled = TRUE;
 
     } else {
         crm_debug("Event did not match %s action", ra_data->cur_cmd->action);
     }
 
     if (cmd_handled) {
         ra_data->cur_cmd = NULL;
         if (ra_data->cmds) {
             mainloop_set_trigger(ra_data->work);
         }
         free_cmd(cmd);
     }
 }
 
 static void
 handle_remote_ra_stop(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd)
 {
     remote_ra_data_t *ra_data = NULL;
 
     CRM_ASSERT(lrm_state);
     ra_data = lrm_state->remote_ra_data;
 
     if (ra_data->migrate_status != takeover_complete) {
         /* delete pending ops when ever the remote connection is intentionally stopped */
         g_hash_table_remove_all(lrm_state->pending_ops);
     } else {
         /* we no longer hold the history if this connection has been migrated,
          * however, we keep metadata cache for future use */
         lrm_state_reset_tables(lrm_state, FALSE);
     }
 
     ra_data->active = FALSE;
     lrm_state_disconnect(lrm_state);
 
     if (ra_data->cmds) {
         g_list_free_full(ra_data->cmds, free_cmd);
     }
     if (ra_data->recurring_cmds) {
         g_list_free_full(ra_data->recurring_cmds, free_cmd);
     }
     ra_data->cmds = NULL;
     ra_data->recurring_cmds = NULL;
     ra_data->cur_cmd = NULL;
 
     if (cmd) {
         cmd->rc = PCMK_OCF_OK;
         cmd->op_status = PCMK_LRM_OP_DONE;
 
         report_remote_ra_result(cmd);
     }
 }
 
 static int
 handle_remote_ra_start(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd, int timeout_ms)
 {
     const char *server = NULL;
     lrmd_key_value_t *tmp = NULL;
     int port = 0;
     remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
     int timeout_used = timeout_ms > MAX_START_TIMEOUT_MS ? MAX_START_TIMEOUT_MS : timeout_ms;
 
     for (tmp = cmd->params; tmp; tmp = tmp->next) {
         if (safe_str_eq(tmp->key, XML_RSC_ATTR_REMOTE_RA_ADDR) ||
             safe_str_eq(tmp->key, XML_RSC_ATTR_REMOTE_RA_SERVER)) {
             server = tmp->value;
         } else if (safe_str_eq(tmp->key, XML_RSC_ATTR_REMOTE_RA_PORT)) {
             port = atoi(tmp->value);
         } else if (safe_str_eq(tmp->key, CRM_META"_"XML_RSC_ATTR_CONTAINER)) {
             ra_data->controlling_guest = TRUE;
         }
     }
 
     return lrm_state_remote_connect_async(lrm_state, server, port, timeout_used);
 }
 
 static gboolean
 handle_remote_ra_exec(gpointer user_data)
 {
     int rc = 0;
     lrm_state_t *lrm_state = user_data;
     remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
     remote_ra_cmd_t *cmd;
     GList *first = NULL;
 
     if (ra_data->cur_cmd) {
         /* still waiting on previous cmd */
         return TRUE;
     }
 
     while (ra_data->cmds) {
         first = ra_data->cmds;
         cmd = first->data;
         if (cmd->delay_id) {
             /* still waiting for start delay timer to trip */
             return TRUE;
         }
 
         ra_data->cmds = g_list_remove_link(ra_data->cmds, first);
         g_list_free_1(first);
 
         if (!strcmp(cmd->action, "start") || !strcmp(cmd->action, "migrate_from")) {
             ra_data->migrate_status = 0;
             rc = handle_remote_ra_start(lrm_state, cmd, cmd->timeout);
             if (rc == 0) {
                 /* take care of this later when we get async connection result */
                 crm_debug("Remote connection started, waiting for connect event");
                 ra_data->cur_cmd = cmd;
                 return TRUE;
             } else {
                 crm_debug("connect failed, not expecting to match any connection event later");
                 cmd->rc = PCMK_OCF_UNKNOWN_ERROR;
                 cmd->op_status = PCMK_LRM_OP_ERROR;
             }
             report_remote_ra_result(cmd);
 
         } else if (!strcmp(cmd->action, "monitor")) {
 
             if (lrm_state_is_connected(lrm_state) == TRUE) {
                 rc = lrm_state_poke_connection(lrm_state);
                 if (rc < 0) {
                     cmd->rc = PCMK_OCF_UNKNOWN_ERROR;
                     cmd->op_status = PCMK_LRM_OP_ERROR;
                 }
             } else {
                 rc = -1;
                 cmd->op_status = PCMK_LRM_OP_DONE;
                 cmd->rc = PCMK_OCF_NOT_RUNNING;
             }
 
             if (rc == 0) {
                 crm_debug("Poked Pacemaker Remote at node %s, waiting for async response",
                           cmd->rsc_id);
                 ra_data->cur_cmd = cmd;
                 cmd->monitor_timeout_id = g_timeout_add(cmd->timeout, monitor_timeout_cb, cmd);
                 return TRUE;
             }
             report_remote_ra_result(cmd);
 
         } else if (!strcmp(cmd->action, "stop")) {
 
             if (ra_data->migrate_status == expect_takeover) {
                 /* briefly wait on stop for the takeover event to occur. If the
                  * takeover event does not occur during the wait period, that's fine.
                  * It just means that the remote-node's lrm_status section is going to get
                  * cleared which will require all the resources running in the remote-node
                  * to be explicitly re-detected via probe actions.  If the takeover does occur
                  * successfully, then we can leave the status section intact. */
                 cmd->takeover_timeout_id = g_timeout_add((cmd->timeout/2), connection_takeover_timeout_cb, cmd);
                 ra_data->cur_cmd = cmd;
                 return TRUE;
             }
 
             handle_remote_ra_stop(lrm_state, cmd);
 
         } else if (!strcmp(cmd->action, "migrate_to")) {
             ra_data->migrate_status = expect_takeover;
             cmd->rc = PCMK_OCF_OK;
             cmd->op_status = PCMK_LRM_OP_DONE;
             report_remote_ra_result(cmd);
         } else if (!strcmp(cmd->action, "reload")) {
             /* reloads are a no-op right now, add logic here when they become important */
             cmd->rc = PCMK_OCF_OK;
             cmd->op_status = PCMK_LRM_OP_DONE;
             report_remote_ra_result(cmd);
         }
 
         free_cmd(cmd);
     }
 
     return TRUE;
 }
 
 static void
 remote_ra_data_init(lrm_state_t * lrm_state)
 {
     remote_ra_data_t *ra_data = NULL;
 
     if (lrm_state->remote_ra_data) {
         return;
     }
 
     ra_data = calloc(1, sizeof(remote_ra_data_t));
     ra_data->work = mainloop_add_trigger(G_PRIORITY_HIGH, handle_remote_ra_exec, lrm_state);
     lrm_state->remote_ra_data = ra_data;
 }
 
 void
 remote_ra_cleanup(lrm_state_t * lrm_state)
 {
     remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
 
     if (!ra_data) {
         return;
     }
 
     if (ra_data->cmds) {
         g_list_free_full(ra_data->cmds, free_cmd);
     }
 
     if (ra_data->recurring_cmds) {
         g_list_free_full(ra_data->recurring_cmds, free_cmd);
     }
     mainloop_destroy_trigger(ra_data->work);
     free(ra_data);
     lrm_state->remote_ra_data = NULL;
 }
 
 gboolean
 is_remote_lrmd_ra(const char *agent, const char *provider, const char *id)
 {
     if (agent && provider && !strcmp(agent, REMOTE_LRMD_RA) && !strcmp(provider, "pacemaker")) {
         return TRUE;
     }
     if (id && lrm_state_find(id) && safe_str_neq(id, fsa_our_uname)) {
         return TRUE;
     }
 
     return FALSE;
 }
 
 lrmd_rsc_info_t *
 remote_ra_get_rsc_info(lrm_state_t * lrm_state, const char *rsc_id)
 {
     lrmd_rsc_info_t *info = NULL;
 
     if ((lrm_state_find(rsc_id))) {
         info = calloc(1, sizeof(lrmd_rsc_info_t));
 
         info->id = strdup(rsc_id);
         info->type = strdup(REMOTE_LRMD_RA);
         info->standard = strdup(PCMK_RESOURCE_CLASS_OCF);
         info->provider = strdup("pacemaker");
     }
 
     return info;
 }
 
 static gboolean
 is_remote_ra_supported_action(const char *action)
 {
     if (!action) {
         return FALSE;
     } else if (strcmp(action, "start") &&
                strcmp(action, "stop") &&
                strcmp(action, "reload") &&
                strcmp(action, "migrate_to") &&
                strcmp(action, "migrate_from") && strcmp(action, "monitor")) {
         return FALSE;
     }
 
     return TRUE;
 }
 
 static GList *
 fail_all_monitor_cmds(GList * list)
 {
     GList *rm_list = NULL;
     remote_ra_cmd_t *cmd = NULL;
     GListPtr gIter = NULL;
 
     for (gIter = list; gIter != NULL; gIter = gIter->next) {
         cmd = gIter->data;
         if ((cmd->interval_ms > 0) && safe_str_eq(cmd->action, "monitor")) {
             rm_list = g_list_append(rm_list, cmd);
         }
     }
 
     for (gIter = rm_list; gIter != NULL; gIter = gIter->next) {
         cmd = gIter->data;
 
         cmd->rc = PCMK_OCF_UNKNOWN_ERROR;
         cmd->op_status = PCMK_LRM_OP_ERROR;
         crm_trace("Pre-emptively failing %s %s (interval=%u, %s)",
                   cmd->action, cmd->rsc_id, cmd->interval_ms, cmd->userdata);
         report_remote_ra_result(cmd);
 
         list = g_list_remove(list, cmd);
         free_cmd(cmd);
     }
 
     /* frees only the list data, not the cmds */
     g_list_free(rm_list);
     return list;
 }
 
 static GList *
 remove_cmd(GList * list, const char *action, guint interval_ms)
 {
     remote_ra_cmd_t *cmd = NULL;
     GListPtr gIter = NULL;
 
     for (gIter = list; gIter != NULL; gIter = gIter->next) {
         cmd = gIter->data;
         if ((cmd->interval_ms == interval_ms)
             && safe_str_eq(cmd->action, action)) {
             break;
         }
         cmd = NULL;
     }
     if (cmd) {
         list = g_list_remove(list, cmd);
         free_cmd(cmd);
     }
     return list;
 }
 
 int
 remote_ra_cancel(lrm_state_t *lrm_state, const char *rsc_id,
                  const char *action, guint interval_ms)
 {
     lrm_state_t *connection_rsc = NULL;
     remote_ra_data_t *ra_data = NULL;
 
     connection_rsc = lrm_state_find(rsc_id);
     if (!connection_rsc || !connection_rsc->remote_ra_data) {
         return -EINVAL;
     }
 
     ra_data = connection_rsc->remote_ra_data;
     ra_data->cmds = remove_cmd(ra_data->cmds, action, interval_ms);
     ra_data->recurring_cmds = remove_cmd(ra_data->recurring_cmds, action,
                                          interval_ms);
     if (ra_data->cur_cmd &&
         (ra_data->cur_cmd->interval_ms == interval_ms) &&
         (safe_str_eq(ra_data->cur_cmd->action, action))) {
 
         ra_data->cur_cmd->cancel = TRUE;
     }
 
     return 0;
 }
 
 static remote_ra_cmd_t *
 handle_dup_monitor(remote_ra_data_t *ra_data, guint interval_ms,
                    const char *userdata)
 {
     GList *gIter = NULL;
     remote_ra_cmd_t *cmd = NULL;
 
     /* there are 3 places a potential duplicate monitor operation
      * could exist.
      * 1. recurring_cmds list. where the op is waiting for its next interval
      * 2. cmds list, where the op is queued to get executed immediately
      * 3. cur_cmd, which means the monitor op is in flight right now.
      */
     if (interval_ms == 0) {
         return NULL;
     }
 
     if (ra_data->cur_cmd &&
         ra_data->cur_cmd->cancel == FALSE &&
         (ra_data->cur_cmd->interval_ms == interval_ms) &&
         safe_str_eq(ra_data->cur_cmd->action, "monitor")) {
 
         cmd = ra_data->cur_cmd;
         goto handle_dup;
     }
 
     for (gIter = ra_data->recurring_cmds; gIter != NULL; gIter = gIter->next) {
         cmd = gIter->data;
         if ((cmd->interval_ms == interval_ms)
             && safe_str_eq(cmd->action, "monitor")) {
             goto handle_dup;
         }
     }
 
     for (gIter = ra_data->cmds; gIter != NULL; gIter = gIter->next) {
         cmd = gIter->data;
         if ((cmd->interval_ms == interval_ms)
             && safe_str_eq(cmd->action, "monitor")) {
             goto handle_dup;
         }
     }
 
     return NULL;
 
 handle_dup:
 
     crm_trace("merging duplicate monitor cmd " CRM_OP_FMT,
               cmd->rsc_id, "monitor", interval_ms);
 
     /* update the userdata */
     if (userdata) {
        free(cmd->userdata);
        cmd->userdata = strdup(userdata);
     }
 
     /* if we've already reported success, generate a new call id */
     if (cmd->reported_success) {
         cmd->start_time = time(NULL);
         cmd->call_id = generate_callid();
         cmd->reported_success = 0;
     }
 
     /* if we have an interval_id set, that means we are in the process of
      * waiting for this cmd's next interval. instead of waiting, cancel
      * the timer and execute the action immediately */
     if (cmd->interval_id) {
         g_source_remove(cmd->interval_id);
         cmd->interval_id = 0;
         recurring_helper(cmd);
     }
 
     return cmd;  
 }
 
 int
 remote_ra_exec(lrm_state_t *lrm_state, const char *rsc_id, const char *action,
                const char *userdata, guint interval_ms,
                int timeout,     /* ms */
                int start_delay, /* ms */
                lrmd_key_value_t * params)
 {
     int rc = 0;
     lrm_state_t *connection_rsc = NULL;
     remote_ra_cmd_t *cmd = NULL;
     remote_ra_data_t *ra_data = NULL;
 
     if (is_remote_ra_supported_action(action) == FALSE) {
         rc = -EINVAL;
         goto exec_done;
     }
 
     connection_rsc = lrm_state_find(rsc_id);
     if (!connection_rsc) {
         rc = -EINVAL;
         goto exec_done;
     }
 
     remote_ra_data_init(connection_rsc);
     ra_data = connection_rsc->remote_ra_data;
 
     cmd = handle_dup_monitor(ra_data, interval_ms, userdata);
     if (cmd) {
        return cmd->call_id;
     }
 
     cmd = calloc(1, sizeof(remote_ra_cmd_t));
     cmd->owner = strdup(lrm_state->node_name);
     cmd->rsc_id = strdup(rsc_id);
     cmd->action = strdup(action);
     cmd->userdata = strdup(userdata);
     cmd->interval_ms = interval_ms;
     cmd->timeout = timeout;
     cmd->start_delay = start_delay;
     cmd->params = params;
     cmd->start_time = time(NULL);
 
     cmd->call_id = generate_callid();
 
     if (cmd->start_delay) {
         cmd->delay_id = g_timeout_add(cmd->start_delay, start_delay_helper, cmd);
     }
 
     ra_data->cmds = g_list_append(ra_data->cmds, cmd);
     mainloop_set_trigger(ra_data->work);
 
     return cmd->call_id;
   exec_done:
 
     lrmd_key_value_freeall(params);
     return rc;
 }
 
 /*!
  * \internal
  * \brief Immediately fail all monitors of a remote node, if proxied here
  *
  * \param[in] node_name  Name of pacemaker_remote node
  */
 void
 remote_ra_fail(const char *node_name)
 {
     lrm_state_t *lrm_state = lrm_state_find(node_name);
 
     if (lrm_state && lrm_state_is_connected(lrm_state)) {
         remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
 
         crm_info("Failing monitors on pacemaker_remote node %s", node_name);
         ra_data->recurring_cmds = fail_all_monitor_cmds(ra_data->recurring_cmds);
         ra_data->cmds = fail_all_monitor_cmds(ra_data->cmds);
     }
 }
 
 /* A guest node fencing implied by host fencing looks like:
  *
  *  <pseudo_event id="103" operation="stonith" operation_key="stonith-lxc1-off"
  *                on_node="lxc1" on_node_uuid="lxc1">
  *     <attributes CRM_meta_master_lxc_ms="10" CRM_meta_on_node="lxc1"
  *                 CRM_meta_on_node_uuid="lxc1" CRM_meta_stonith_action="off"
  *                 crm_feature_set="3.0.12"/>
  *     <downed>
  *       <node id="lxc1"/>
  *     </downed>
  *  </pseudo_event>
  */
 #define XPATH_PSEUDO_FENCE "//" XML_GRAPH_TAG_PSEUDO_EVENT \
     "[@" XML_LRM_ATTR_TASK "='stonith']/" XML_GRAPH_TAG_DOWNED \
     "/" XML_CIB_TAG_NODE
 
 /*!
  * \internal
  * \brief Check a pseudo-action for Pacemaker Remote node side effects
  *
  * \param[in] xml  XML of pseudo-action to check
  */
 void
 remote_ra_process_pseudo(xmlNode *xml)
 {
     xmlXPathObjectPtr search = xpath_search(xml, XPATH_PSEUDO_FENCE);
 
     if (numXpathResults(search) == 1) {
         xmlNode *result = getXpathResult(search, 0);
 
         /* Normally, we handle the necessary side effects of a guest node stop
          * action when reporting the remote agent's result. However, if the stop
          * is implied due to fencing, it will be a fencing pseudo-event, and
          * there won't be a result to report. Handle that case here.
          *
          * This will result in a duplicate call to remote_node_down() if the
          * guest stop was real instead of implied, but that shouldn't hurt.
          *
          * There is still one corner case that isn't handled: if a guest node
          * isn't running any resources when its host is fenced, it will appear
          * to be cleanly stopped, so there will be no pseudo-fence, and our
          * peer cache state will be incorrect unless and until the guest is
          * recovered.
          */
         if (result) {
             const char *remote = ID(result);
 
             if (remote) {
                 remote_node_down(remote, DOWN_ERASE_LRM);
             }
         }
     }
     freeXpathObject(search);
 }
 
 static void
 remote_ra_maintenance(lrm_state_t * lrm_state, gboolean maintenance)
 {
     remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
     xmlNode *update, *state;
     int call_opt, call_id = 0;
     crm_node_t *node;
 
     call_opt = crmd_cib_smart_opt();
     node = crm_remote_peer_get(lrm_state->node_name);
     CRM_CHECK(node != NULL, return);
     update = create_xml_node(NULL, XML_CIB_TAG_STATUS);
     state = create_node_state_update(node, node_update_none, update,
                                      __FUNCTION__);
     crm_xml_add(state, XML_NODE_IS_MAINTENANCE, maintenance?"1":"0");
     fsa_cib_update(XML_CIB_TAG_STATUS, update, call_opt, call_id, NULL);
     if (call_id < 0) {
         crm_perror(LOG_WARNING, "%s CIB node state update failed", lrm_state->node_name);
     } else {
         /* TODO: still not 100% sure that async update will succeed ... */
         ra_data->is_maintenance = maintenance;
     }
     free_xml(update);
 }
 
 #define XPATH_PSEUDO_MAINTENANCE "//" XML_GRAPH_TAG_PSEUDO_EVENT \
     "[@" XML_LRM_ATTR_TASK "='" CRM_OP_MAINTENANCE_NODES "']/" \
     XML_GRAPH_TAG_MAINTENANCE
 
 /*!
  * \internal
  * \brief Check a pseudo-action holding updates for maintenance state
  *
  * \param[in] xml  XML of pseudo-action to check
  */
 
 void
 remote_ra_process_maintenance_nodes(xmlNode *xml)
 {
     xmlXPathObjectPtr search = xpath_search(xml, XPATH_PSEUDO_MAINTENANCE);
 
     if (numXpathResults(search) == 1) {
         xmlNode *node;
         int cnt = 0, cnt_remote = 0;
 
         for (node =
                 first_named_child(getXpathResult(search, 0), XML_CIB_TAG_NODE);
             node; node = __xml_next(node)) {
             lrm_state_t *lrm_state = lrm_state_find(ID(node));
 
             cnt++;
             if (lrm_state && lrm_state->remote_ra_data &&
                 ((remote_ra_data_t *) lrm_state->remote_ra_data)->active) {
                 cnt_remote++;
                 remote_ra_maintenance(lrm_state,
                                         crm_atoi(crm_element_value(node,
                                             XML_NODE_IS_MAINTENANCE), "0"));
 
             }
         }
         crm_trace("Action holds %d nodes (%d remotes found) "
                     "adjusting maintenance-mode", cnt, cnt_remote);
     }
     freeXpathObject(search);
 }
 
 gboolean
 remote_ra_is_in_maintenance(lrm_state_t * lrm_state)
 {
     remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
 
     return ra_data->is_maintenance;
 }
 
 gboolean
 remote_ra_controlling_guest(lrm_state_t * lrm_state)
 {
     remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
 
     return ra_data->controlling_guest;
 }
diff --git a/daemons/schedulerd/sched_allocate.c b/daemons/schedulerd/sched_allocate.c
index c22b29c821..91eb70293c 100644
--- a/daemons/schedulerd/sched_allocate.c
+++ b/daemons/schedulerd/sched_allocate.c
@@ -1,2566 +1,2574 @@
 /*
  * Copyright 2004-2018 Andrew Beekhof <andrew@beekhof.net>
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <sys/param.h>
 
 #include <crm/crm.h>
 #include <crm/cib.h>
 #include <crm/msg_xml.h>
 #include <crm/common/xml.h>
 
 #include <glib.h>
 
 #include <crm/pengine/status.h>
 #include <pacemaker-schedulerd.h>
 #include <sched_allocate.h>
 #include <sched_utils.h>
 
 CRM_TRACE_INIT_DATA(pe_allocate);
 
 void set_alloc_actions(pe_working_set_t * data_set);
 extern void ReloadRsc(resource_t * rsc, node_t *node, pe_working_set_t * data_set);
 extern gboolean DeleteRsc(resource_t * rsc, node_t * node, gboolean optional, pe_working_set_t * data_set);
 static void apply_remote_node_ordering(pe_working_set_t *data_set);
 static enum remote_connection_state get_remote_node_state(pe_node_t *node);
 
 enum remote_connection_state {
     remote_state_unknown = 0,
     remote_state_alive = 1,
     remote_state_resting = 2,
     remote_state_failed = 3,
     remote_state_stopped = 4
 };
 
 static const char *
 state2text(enum remote_connection_state state)
 {
     switch (state) {
         case remote_state_unknown:
             return "unknown";
         case remote_state_alive:
             return "alive";
         case remote_state_resting:
             return "resting";
         case remote_state_failed:
             return "failed";
         case remote_state_stopped:
             return "stopped";
     }
 
     return "impossible";
 }
 
 resource_alloc_functions_t resource_class_alloc_functions[] = {
     {
      native_merge_weights,
      native_color,
      native_create_actions,
      native_create_probe,
      native_internal_constraints,
      native_rsc_colocation_lh,
      native_rsc_colocation_rh,
      native_rsc_location,
      native_action_flags,
      native_update_actions,
      native_expand,
      native_append_meta,
      },
     {
      group_merge_weights,
      group_color,
      group_create_actions,
      native_create_probe,
      group_internal_constraints,
      group_rsc_colocation_lh,
      group_rsc_colocation_rh,
      group_rsc_location,
      group_action_flags,
      group_update_actions,
      group_expand,
      group_append_meta,
      },
     {
      clone_merge_weights,
      clone_color,
      clone_create_actions,
      clone_create_probe,
      clone_internal_constraints,
      clone_rsc_colocation_lh,
      clone_rsc_colocation_rh,
      clone_rsc_location,
      clone_action_flags,
      container_update_actions,
      clone_expand,
      clone_append_meta,
      },
     {
      container_merge_weights,
      container_color,
      container_create_actions,
      container_create_probe,
      container_internal_constraints,
      container_rsc_colocation_lh,
      container_rsc_colocation_rh,
      container_rsc_location,
      container_action_flags,
      container_update_actions,
      container_expand,
      container_append_meta,
      }
 };
 
 gboolean
 update_action_flags(action_t * action, enum pe_action_flags flags, const char *source, int line)
 {
     static unsigned long calls = 0;
     gboolean changed = FALSE;
     gboolean clear = is_set(flags, pe_action_clear);
     enum pe_action_flags last = action->flags;
 
     if (clear) {
         action->flags = crm_clear_bit(source, line, action->uuid, action->flags, flags);
     } else {
         action->flags = crm_set_bit(source, line, action->uuid, action->flags, flags);
     }
 
     if (last != action->flags) {
         calls++;
         changed = TRUE;
         /* Useful for tracking down _who_ changed a specific flag */
         /* CRM_ASSERT(calls != 534); */
         clear_bit(flags, pe_action_clear);
         crm_trace("%s on %s: %sset flags 0x%.6x (was 0x%.6x, now 0x%.6x, %lu, %s)",
                   action->uuid, action->node ? action->node->details->uname : "[none]",
                   clear ? "un-" : "", flags, last, action->flags, calls, source);
     }
 
     return changed;
 }
 
 static gboolean
 check_rsc_parameters(resource_t * rsc, node_t * node, xmlNode * rsc_entry,
                      gboolean active_here, pe_working_set_t * data_set)
 {
     int attr_lpc = 0;
     gboolean force_restart = FALSE;
     gboolean delete_resource = FALSE;
     gboolean changed = FALSE;
 
     const char *value = NULL;
     const char *old_value = NULL;
 
     const char *attr_list[] = {
         XML_ATTR_TYPE,
         XML_AGENT_ATTR_CLASS,
         XML_AGENT_ATTR_PROVIDER
     };
 
     for (; attr_lpc < DIMOF(attr_list); attr_lpc++) {
         value = crm_element_value(rsc->xml, attr_list[attr_lpc]);
         old_value = crm_element_value(rsc_entry, attr_list[attr_lpc]);
         if (value == old_value  /* i.e. NULL */
             || crm_str_eq(value, old_value, TRUE)) {
             continue;
         }
 
         changed = TRUE;
         trigger_unfencing(rsc, node, "Device definition changed", NULL, data_set);
         if (active_here) {
             force_restart = TRUE;
             crm_notice("Forcing restart of %s on %s, %s changed: %s -> %s",
                        rsc->id, node->details->uname, attr_list[attr_lpc],
                        crm_str(old_value), crm_str(value));
         }
     }
     if (force_restart) {
         /* make sure the restart happens */
         stop_action(rsc, node, FALSE);
         set_bit(rsc->flags, pe_rsc_start_pending);
         delete_resource = TRUE;
 
     } else if (changed) {
         delete_resource = TRUE;
     }
     return delete_resource;
 }
 
 static void
 CancelXmlOp(resource_t * rsc, xmlNode * xml_op, node_t * active_node,
             const char *reason, pe_working_set_t * data_set)
 {
     guint interval_ms = 0;
     action_t *cancel = NULL;
 
     const char *task = NULL;
     const char *call_id = NULL;
     const char *interval_ms_s = NULL;
 
     CRM_CHECK(xml_op != NULL, return);
     CRM_CHECK(active_node != NULL, return);
 
     task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
     call_id = crm_element_value(xml_op, XML_LRM_ATTR_CALLID);
     interval_ms_s = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL_MS);
 
     interval_ms = crm_parse_ms(interval_ms_s);
 
     crm_info("Action " CRM_OP_FMT " on %s will be stopped: %s",
              rsc->id, task, interval_ms,
              active_node->details->uname, (reason? reason : "unknown"));
 
     cancel = pe_cancel_op(rsc, task, interval_ms, active_node, data_set);
     add_hash_param(cancel->meta, XML_LRM_ATTR_CALLID, call_id);
     custom_action_order(rsc, stop_key(rsc), NULL, rsc, NULL, cancel, pe_order_optional, data_set);
 }
 
 static gboolean
 check_action_definition(resource_t * rsc, node_t * active_node, xmlNode * xml_op,
                         pe_working_set_t * data_set)
 {
     char *key = NULL;
     guint interval_ms = 0;
     const char *interval_ms_s = NULL;
     const op_digest_cache_t *digest_data = NULL;
     gboolean did_change = FALSE;
 
     const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
     const char *digest_secure = NULL;
 
     CRM_CHECK(active_node != NULL, return FALSE);
 
     interval_ms_s = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL_MS);
     interval_ms = crm_parse_ms(interval_ms_s);
 
     if (interval_ms > 0) {
         xmlNode *op_match = NULL;
 
         /* we need to reconstruct the key because of the way we used to construct resource IDs */
         key = generate_op_key(rsc->id, task, interval_ms);
 
         pe_rsc_trace(rsc, "Checking parameters for %s", key);
         op_match = find_rsc_op_entry(rsc, key);
 
         if (op_match == NULL && is_set(data_set->flags, pe_flag_stop_action_orphans)) {
             CancelXmlOp(rsc, xml_op, active_node, "orphan", data_set);
             free(key);
             return TRUE;
 
         } else if (op_match == NULL) {
             pe_rsc_debug(rsc, "Orphan action detected: %s on %s", key, active_node->details->uname);
             free(key);
             return TRUE;
         }
         free(key);
         key = NULL;
     }
 
     crm_trace("Testing " CRM_OP_FMT " on %s",
               rsc->id, task, interval_ms, active_node->details->uname);
     if ((interval_ms == 0) && safe_str_eq(task, RSC_STATUS)) {
         /* Reload based on the start action not a probe */
         task = RSC_START;
 
     } else if ((interval_ms == 0) && safe_str_eq(task, RSC_MIGRATED)) {
         /* Reload based on the start action not a migrate */
         task = RSC_START;
     } else if ((interval_ms == 0) && safe_str_eq(task, RSC_PROMOTE)) {
         /* Reload based on the start action not a promote */
         task = RSC_START;
     }
 
     digest_data = rsc_action_digest_cmp(rsc, xml_op, active_node, data_set);
 
     if(is_set(data_set->flags, pe_flag_sanitized)) {
         digest_secure = crm_element_value(xml_op, XML_LRM_ATTR_SECURE_DIGEST);
     }
 
     if(digest_data->rc != RSC_DIGEST_MATCH
        && digest_secure
        && digest_data->digest_secure_calc
        && strcmp(digest_data->digest_secure_calc, digest_secure) == 0) {
         if (is_set(data_set->flags, pe_flag_stdout)) {
             printf("Only 'private' parameters to " CRM_OP_FMT " on %s changed: %s\n",
                    rsc->id, task, interval_ms, active_node->details->uname,
                    crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC));
         }
 
     } else if (digest_data->rc == RSC_DIGEST_RESTART) {
         /* Changes that force a restart */
         pe_action_t *required = NULL;
 
         did_change = TRUE;
         key = generate_op_key(rsc->id, task, interval_ms);
         crm_log_xml_info(digest_data->params_restart, "params:restart");
         required = custom_action(rsc, key, task, NULL, TRUE, TRUE, data_set);
         pe_action_set_flag_reason(__FUNCTION__, __LINE__, required, NULL,
                                   "resource definition change", pe_action_optional, TRUE);
 
         trigger_unfencing(rsc, active_node, "Device parameters changed", NULL, data_set);
 
     } else if ((digest_data->rc == RSC_DIGEST_ALL) || (digest_data->rc == RSC_DIGEST_UNKNOWN)) {
         /* Changes that can potentially be handled by a reload */
         const char *digest_restart = crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST);
 
         did_change = TRUE;
         trigger_unfencing(rsc, active_node, "Device parameters changed (reload)", NULL, data_set);
         crm_log_xml_info(digest_data->params_all, "params:reload");
         key = generate_op_key(rsc->id, task, interval_ms);
 
         if (interval_ms > 0) {
             action_t *op = NULL;
 
 #if 0
             /* Always reload/restart the entire resource */
             ReloadRsc(rsc, active_node, data_set);
 #else
             /* Re-sending the recurring op is sufficient - the old one will be cancelled automatically */
             op = custom_action(rsc, key, task, active_node, TRUE, TRUE, data_set);
             set_bit(op->flags, pe_action_reschedule);
 #endif
 
         } else if (digest_restart) {
             pe_rsc_trace(rsc, "Reloading '%s' action for resource %s", task, rsc->id);
 
             /* Reload this resource */
             ReloadRsc(rsc, active_node, data_set);
             free(key);
 
         } else {
             pe_action_t *required = NULL;
             pe_rsc_trace(rsc, "Resource %s doesn't know how to reload", rsc->id);
 
             /* Re-send the start/demote/promote op
              * Recurring ops will be detected independently
              */
             required = custom_action(rsc, key, task, NULL, TRUE, TRUE, data_set);
             pe_action_set_flag_reason(__FUNCTION__, __LINE__, required, NULL,
                                       "resource definition change", pe_action_optional, TRUE);
         }
     }
 
     return did_change;
 }
 
 /*!
  * \internal
  * \brief Do deferred action checks after allocation
  *
  * \param[in] data_set  Working set for cluster
  */
 static void
 check_params(pe_resource_t *rsc, pe_node_t *node, xmlNode *rsc_op,
              enum pe_check_parameters check, pe_working_set_t *data_set)
 {
     const char *reason = NULL;
     op_digest_cache_t *digest_data = NULL;
 
     switch (check) {
         case pe_check_active:
             if (check_action_definition(rsc, node, rsc_op, data_set)
                 && pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL,
                                     data_set)) {
 
                 reason = "action definition changed";
             }
             break;
 
         case pe_check_last_failure:
             digest_data = rsc_action_digest_cmp(rsc, rsc_op, node, data_set);
             switch (digest_data->rc) {
                 case RSC_DIGEST_UNKNOWN:
                     crm_trace("Resource %s history entry %s on %s has no digest to compare",
                               rsc->id, ID(rsc_op), node->details->id);
                     break;
                 case RSC_DIGEST_MATCH:
                     break;
                 default:
                     reason = "resource parameters have changed";
                     break;
             }
             break;
     }
 
     if (reason) {
         pe__clear_failcount(rsc, node, reason, data_set);
     }
 }
 
 static void
 check_actions_for(xmlNode * rsc_entry, resource_t * rsc, node_t * node, pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
     int offset = -1;
     guint interval_ms = 0;
     int stop_index = 0;
     int start_index = 0;
 
     const char *task = NULL;
     const char *interval_ms_s = NULL;
 
     xmlNode *rsc_op = NULL;
     GListPtr op_list = NULL;
     GListPtr sorted_op_list = NULL;
 
     CRM_CHECK(node != NULL, return);
 
     if (is_set(rsc->flags, pe_rsc_orphan)) {
         resource_t *parent = uber_parent(rsc);
         if(parent == NULL
            || pe_rsc_is_clone(parent) == FALSE
            || is_set(parent->flags, pe_rsc_unique)) {
             pe_rsc_trace(rsc, "Skipping param check for %s and deleting: orphan", rsc->id);
             DeleteRsc(rsc, node, FALSE, data_set);
         } else {
             pe_rsc_trace(rsc, "Skipping param check for %s (orphan clone)", rsc->id);
         }
         return;
 
     } else if (pe_find_node_id(rsc->running_on, node->details->id) == NULL) {
         if (check_rsc_parameters(rsc, node, rsc_entry, FALSE, data_set)) {
             DeleteRsc(rsc, node, FALSE, data_set);
         }
         pe_rsc_trace(rsc, "Skipping param check for %s: no longer active on %s",
                      rsc->id, node->details->uname);
         return;
     }
 
     pe_rsc_trace(rsc, "Processing %s on %s", rsc->id, node->details->uname);
 
     if (check_rsc_parameters(rsc, node, rsc_entry, TRUE, data_set)) {
         DeleteRsc(rsc, node, FALSE, data_set);
     }
 
     for (rsc_op = __xml_first_child(rsc_entry); rsc_op != NULL; rsc_op = __xml_next_element(rsc_op)) {
         if (crm_str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, TRUE)) {
             op_list = g_list_prepend(op_list, rsc_op);
         }
     }
 
     sorted_op_list = g_list_sort(op_list, sort_op_by_callid);
     calculate_active_ops(sorted_op_list, &start_index, &stop_index);
 
     for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) {
         xmlNode *rsc_op = (xmlNode *) gIter->data;
 
         offset++;
 
         if (start_index < stop_index) {
             /* stopped */
             continue;
         } else if (offset < start_index) {
             /* action occurred prior to a start */
             continue;
         }
 
         task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK);
 
         interval_ms_s = crm_element_value(rsc_op, XML_LRM_ATTR_INTERVAL_MS);
         interval_ms = crm_parse_ms(interval_ms_s);
 
         if ((interval_ms > 0) &&
             (is_set(rsc->flags, pe_rsc_maintenance) || node->details->maintenance)) {
             // Maintenance mode cancels recurring operations
             CancelXmlOp(rsc, rsc_op, node, "maintenance mode", data_set);
 
         } else if ((interval_ms > 0)
                    || safe_str_eq(task, RSC_STATUS)
                    || safe_str_eq(task, RSC_START)
                    || safe_str_eq(task, RSC_PROMOTE)
                    || safe_str_eq(task, RSC_MIGRATED)) {
 
             /* If a resource operation failed, and the operation's definition
              * has changed, clear any fail count so they can be retried fresh.
              */
 
             if (container_fix_remote_addr(rsc)) {
                 /* We haven't allocated resources to nodes yet, so if the
                  * REMOTE_CONTAINER_HACK is used, we may calculate the digest
                  * based on the literal "#uname" value rather than the properly
                  * substituted value. That would mistakenly make the action
                  * definition appear to have been changed. Defer the check until
                  * later in this case.
                  */
                 pe__add_param_check(rsc_op, rsc, node, pe_check_active,
                                     data_set);
 
             } else if (check_action_definition(rsc, node, rsc_op, data_set)
                 && pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL,
                                     data_set)) {
                 pe__clear_failcount(rsc, node, "action definition changed",
                                     data_set);
             }
         }
     }
     g_list_free(sorted_op_list);
 }
 
 static GListPtr
 find_rsc_list(GListPtr result, resource_t * rsc, const char *id, gboolean renamed_clones,
               gboolean partial, pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
     gboolean match = FALSE;
 
     if (id == NULL) {
         return NULL;
 
     } else if (rsc == NULL && data_set) {
 
         for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
             resource_t *child = (resource_t *) gIter->data;
 
             result = find_rsc_list(result, child, id, renamed_clones, partial, NULL);
         }
 
         return result;
 
     } else if (rsc == NULL) {
         return NULL;
     }
 
     if (partial) {
         if (strstr(rsc->id, id)) {
             match = TRUE;
 
         } else if (renamed_clones && rsc->clone_name && strstr(rsc->clone_name, id)) {
             match = TRUE;
         }
 
     } else {
         if (strcmp(rsc->id, id) == 0) {
             match = TRUE;
 
         } else if (renamed_clones && rsc->clone_name && strcmp(rsc->clone_name, id) == 0) {
             match = TRUE;
         }
     }
 
     if (match) {
         result = g_list_prepend(result, rsc);
     }
 
     if (rsc->children) {
         gIter = rsc->children;
         for (; gIter != NULL; gIter = gIter->next) {
             resource_t *child = (resource_t *) gIter->data;
 
             result = find_rsc_list(result, child, id, renamed_clones, partial, NULL);
         }
     }
 
     return result;
 }
 
 static void
 check_actions(pe_working_set_t * data_set)
 {
     const char *id = NULL;
     node_t *node = NULL;
     xmlNode *lrm_rscs = NULL;
     xmlNode *status = get_object_root(XML_CIB_TAG_STATUS, data_set->input);
 
     xmlNode *node_state = NULL;
 
     for (node_state = __xml_first_child(status); node_state != NULL;
          node_state = __xml_next_element(node_state)) {
         if (crm_str_eq((const char *)node_state->name, XML_CIB_TAG_STATE, TRUE)) {
             id = crm_element_value(node_state, XML_ATTR_ID);
             lrm_rscs = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE);
             lrm_rscs = find_xml_node(lrm_rscs, XML_LRM_TAG_RESOURCES, FALSE);
 
             node = pe_find_node_id(data_set->nodes, id);
 
             if (node == NULL) {
                 continue;
 
             /* Still need to check actions for a maintenance node to cancel existing monitor operations */
             } else if (can_run_resources(node) == FALSE && node->details->maintenance == FALSE) {
                 crm_trace("Skipping param check for %s: can't run resources",
                           node->details->uname);
                 continue;
             }
 
             crm_trace("Processing node %s", node->details->uname);
             if (node->details->online || is_set(data_set->flags, pe_flag_stonith_enabled)) {
                 xmlNode *rsc_entry = NULL;
 
                 for (rsc_entry = __xml_first_child(lrm_rscs); rsc_entry != NULL;
                      rsc_entry = __xml_next_element(rsc_entry)) {
                     if (crm_str_eq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE, TRUE)) {
 
                         if (xml_has_children(rsc_entry)) {
                             GListPtr gIter = NULL;
                             GListPtr result = NULL;
                             const char *rsc_id = ID(rsc_entry);
 
                             CRM_CHECK(rsc_id != NULL, return);
 
                             result = find_rsc_list(NULL, NULL, rsc_id, TRUE, FALSE, data_set);
                             for (gIter = result; gIter != NULL; gIter = gIter->next) {
                                 resource_t *rsc = (resource_t *) gIter->data;
 
                                 if (rsc->variant != pe_native) {
                                     continue;
                                 }
                                 check_actions_for(rsc_entry, rsc, node, data_set);
                             }
                             g_list_free(result);
                         }
                     }
                 }
             }
         }
     }
 }
 
 static gboolean
 apply_placement_constraints(pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
 
     crm_trace("Applying constraints...");
 
     for (gIter = data_set->placement_constraints; gIter != NULL; gIter = gIter->next) {
         pe__location_t *cons = gIter->data;
 
         cons->rsc_lh->cmds->rsc_location(cons->rsc_lh, cons);
     }
 
     return TRUE;
 
 }
 
 static gboolean
 failcount_clear_action_exists(node_t * node, resource_t * rsc)
 {
     gboolean rc = FALSE;
     char *key = generate_op_key(rsc->id, CRM_OP_CLEAR_FAILCOUNT, 0);
     GListPtr list = find_actions_exact(rsc->actions, key, node);
 
     if (list) {
         rc = TRUE;
     }
     g_list_free(list);
     free(key);
 
     return rc;
 }
 
 /*!
  * \internal
  * \brief Force resource away if failures hit migration threshold
  *
  * \param[in,out] rsc       Resource to check for failures
  * \param[in,out] node      Node to check for failures
  * \param[in,out] data_set  Cluster working set to update
  */
 static void
 check_migration_threshold(resource_t *rsc, node_t *node,
                           pe_working_set_t *data_set)
 {
     int fail_count, countdown;
     resource_t *failed;
 
     /* Migration threshold of 0 means never force away */
     if (rsc->migration_threshold == 0) {
         return;
     }
 
     // If we're ignoring failures, also ignore the migration threshold
     if (is_set(rsc->flags, pe_rsc_failure_ignored)) {
         return;
     }
 
     /* If there are no failures, there's no need to force away */
     fail_count = pe_get_failcount(node, rsc, NULL,
                                   pe_fc_effective|pe_fc_fillers, NULL,
                                   data_set);
     if (fail_count <= 0) {
         return;
     }
 
     /* How many more times recovery will be tried on this node */
     countdown = QB_MAX(rsc->migration_threshold - fail_count, 0);
 
     /* If failed resource has a parent, we'll force the parent away */
     failed = rsc;
     if (is_not_set(rsc->flags, pe_rsc_unique)) {
         failed = uber_parent(rsc);
     }
 
     if (countdown == 0) {
         resource_location(failed, node, -INFINITY, "__fail_limit__", data_set);
         crm_warn("Forcing %s away from %s after %d failures (max=%d)",
                  failed->id, node->details->uname, fail_count,
                  rsc->migration_threshold);
     } else {
         crm_info("%s can fail %d more times on %s before being forced off",
                  failed->id, countdown, node->details->uname);
     }
 }
 
 static void
 common_apply_stickiness(resource_t * rsc, node_t * node, pe_working_set_t * data_set)
 {
     if (rsc->children) {
         GListPtr gIter = rsc->children;
 
         for (; gIter != NULL; gIter = gIter->next) {
             resource_t *child_rsc = (resource_t *) gIter->data;
 
             common_apply_stickiness(child_rsc, node, data_set);
         }
         return;
     }
 
     if (is_set(rsc->flags, pe_rsc_managed)
         && rsc->stickiness != 0 && g_list_length(rsc->running_on) == 1) {
         node_t *current = pe_find_node_id(rsc->running_on, node->details->id);
         node_t *match = pe_hash_table_lookup(rsc->allowed_nodes, node->details->id);
 
         if (current == NULL) {
 
         } else if (match != NULL || is_set(data_set->flags, pe_flag_symmetric_cluster)) {
             resource_t *sticky_rsc = rsc;
 
             resource_location(sticky_rsc, node, rsc->stickiness, "stickiness", data_set);
             pe_rsc_debug(sticky_rsc, "Resource %s: preferring current location"
                          " (node=%s, weight=%d)", sticky_rsc->id,
                          node->details->uname, rsc->stickiness);
         } else {
             GHashTableIter iter;
             node_t *nIter = NULL;
 
             pe_rsc_debug(rsc, "Ignoring stickiness for %s: the cluster is asymmetric"
                          " and node %s is not explicitly allowed", rsc->id, node->details->uname);
             g_hash_table_iter_init(&iter, rsc->allowed_nodes);
             while (g_hash_table_iter_next(&iter, NULL, (void **)&nIter)) {
                 crm_err("%s[%s] = %d", rsc->id, nIter->details->uname, nIter->weight);
             }
         }
     }
 
     /* Check the migration threshold only if a failcount clear action
      * has not already been placed for this resource on the node.
      * There is no sense in potentially forcing the resource from this
      * node if the failcount is being reset anyway.
      *
      * @TODO A clear_failcount operation can be scheduled in stage4() via
      * check_actions_for(), or in stage5() via check_params(). This runs in
      * stage2(), so it cannot detect those, meaning we might check the migration
      * threshold when we shouldn't -- worst case, we stop or move the resource,
      * then move it back next transition.
      */
     if (failcount_clear_action_exists(node, rsc) == FALSE) {
         check_migration_threshold(rsc, node, data_set);
     }
 }
 
 void
 complex_set_cmds(resource_t * rsc)
 {
     GListPtr gIter = rsc->children;
 
     rsc->cmds = &resource_class_alloc_functions[rsc->variant];
 
     for (; gIter != NULL; gIter = gIter->next) {
         resource_t *child_rsc = (resource_t *) gIter->data;
 
         complex_set_cmds(child_rsc);
     }
 }
 
 void
 set_alloc_actions(pe_working_set_t * data_set)
 {
 
     GListPtr gIter = data_set->resources;
 
     for (; gIter != NULL; gIter = gIter->next) {
         resource_t *rsc = (resource_t *) gIter->data;
 
         complex_set_cmds(rsc);
     }
 }
 
 static void
 calculate_system_health(gpointer gKey, gpointer gValue, gpointer user_data)
 {
     const char *key = (const char *)gKey;
     const char *value = (const char *)gValue;
     int *system_health = (int *)user_data;
 
     if (!gKey || !gValue || !user_data) {
         return;
     }
 
     if (crm_starts_with(key, "#health")) {
         int score;
 
         /* Convert the value into an integer */
         score = char2score(value);
 
         /* Add it to the running total */
         *system_health = merge_weights(score, *system_health);
     }
 }
 
 static gboolean
 apply_system_health(pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
     const char *health_strategy = pe_pref(data_set->config_hash, "node-health-strategy");
     int base_health = 0;
 
     if (health_strategy == NULL || safe_str_eq(health_strategy, "none")) {
         /* Prevent any accidental health -> score translation */
         node_score_red = 0;
         node_score_yellow = 0;
         node_score_green = 0;
         return TRUE;
 
     } else if (safe_str_eq(health_strategy, "migrate-on-red")) {
 
         /* Resources on nodes which have health values of red are
          * weighted away from that node.
          */
         node_score_red = -INFINITY;
         node_score_yellow = 0;
         node_score_green = 0;
 
     } else if (safe_str_eq(health_strategy, "only-green")) {
 
         /* Resources on nodes which have health values of red or yellow
          * are forced away from that node.
          */
         node_score_red = -INFINITY;
         node_score_yellow = -INFINITY;
         node_score_green = 0;
 
     } else if (safe_str_eq(health_strategy, "progressive")) {
         /* Same as the above, but use the r/y/g scores provided by the user
          * Defaults are provided by the pe_prefs table
          * Also, custom health "base score" can be used
          */
         base_health = crm_parse_int(pe_pref(data_set->config_hash, "node-health-base"), "0");
 
     } else if (safe_str_eq(health_strategy, "custom")) {
 
         /* Requires the admin to configure the rsc_location constaints for
          * processing the stored health scores
          */
         /* TODO: Check for the existence of appropriate node health constraints */
         return TRUE;
 
     } else {
         crm_err("Unknown node health strategy: %s", health_strategy);
         return FALSE;
     }
 
     crm_info("Applying automated node health strategy: %s", health_strategy);
 
     for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
         int system_health = base_health;
         node_t *node = (node_t *) gIter->data;
 
         /* Search through the node hash table for system health entries. */
         g_hash_table_foreach(node->details->attrs, calculate_system_health, &system_health);
 
         crm_info(" Node %s has an combined system health of %d",
                  node->details->uname, system_health);
 
         /* If the health is non-zero, then create a new rsc2node so that the
          * weight will be added later on.
          */
         if (system_health != 0) {
 
             GListPtr gIter2 = data_set->resources;
 
             for (; gIter2 != NULL; gIter2 = gIter2->next) {
                 resource_t *rsc = (resource_t *) gIter2->data;
 
                 rsc2node_new(health_strategy, rsc, system_health, NULL, node, data_set);
             }
         }
     }
 
     return TRUE;
 }
 
 gboolean
 stage0(pe_working_set_t * data_set)
 {
     xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, data_set->input);
 
     if (data_set->input == NULL) {
         return FALSE;
     }
 
     if (is_set(data_set->flags, pe_flag_have_status) == FALSE) {
         crm_trace("Calculating status");
         cluster_status(data_set);
     }
 
     set_alloc_actions(data_set);
     apply_system_health(data_set);
     unpack_constraints(cib_constraints, data_set);
 
     return TRUE;
 }
 
 /*
  * Check nodes for resources started outside of the LRM
  */
 gboolean
 probe_resources(pe_working_set_t * data_set)
 {
     action_t *probe_node_complete = NULL;
 
     for (GListPtr gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
         node_t *node = (node_t *) gIter->data;
         const char *probed = pe_node_attribute_raw(node, CRM_OP_PROBED);
 
         if (node->details->online == FALSE) {
 
             if (is_baremetal_remote_node(node) && node->details->remote_rsc
                 && (get_remote_node_state(node) == remote_state_failed)) {
 
                 pe_fence_node(data_set, node, "the connection is unrecoverable");
             }
             continue;
 
         } else if (node->details->unclean) {
             continue;
 
         } else if (node->details->rsc_discovery_enabled == FALSE) {
             /* resource discovery is disabled for this node */
             continue;
         }
 
         if (probed != NULL && crm_is_true(probed) == FALSE) {
             action_t *probe_op = custom_action(NULL, crm_strdup_printf("%s-%s", CRM_OP_REPROBE, node->details->uname),
                                                CRM_OP_REPROBE, node, FALSE, TRUE, data_set);
 
             add_hash_param(probe_op->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE);
             continue;
         }
 
         for (GListPtr gIter2 = data_set->resources; gIter2 != NULL; gIter2 = gIter2->next) {
             resource_t *rsc = (resource_t *) gIter2->data;
 
             rsc->cmds->create_probe(rsc, node, probe_node_complete, FALSE, data_set);
         }
     }
     return TRUE;
 }
 
 static void
 rsc_discover_filter(resource_t *rsc, node_t *node)
 {
     GListPtr gIter = rsc->children;
     resource_t *top = uber_parent(rsc);
     node_t *match;
 
     if (rsc->exclusive_discover == FALSE && top->exclusive_discover == FALSE) {
         return;
     }
 
     for (; gIter != NULL; gIter = gIter->next) {
         resource_t *child_rsc = (resource_t *) gIter->data;
         rsc_discover_filter(child_rsc, node);
     }
 
     match = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
     if (match && match->rsc_discover_mode != pe_discover_exclusive) {
         match->weight = -INFINITY;
     }
 }
 
 /*
  * Count how many valid nodes we have (so we know the maximum number of
  *  colors we can resolve).
  *
  * Apply node constraints (i.e. filter the "allowed_nodes" part of resources)
  */
 gboolean
 stage2(pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
 
     crm_trace("Applying placement constraints");
 
     gIter = data_set->nodes;
     for (; gIter != NULL; gIter = gIter->next) {
         node_t *node = (node_t *) gIter->data;
 
         if (node == NULL) {
             /* error */
 
         } else if (node->weight >= 0.0  /* global weight */
                    && node->details->online && node->details->type != node_ping) {
             data_set->max_valid_nodes++;
         }
     }
 
     apply_placement_constraints(data_set);
 
     gIter = data_set->nodes;
     for (; gIter != NULL; gIter = gIter->next) {
         GListPtr gIter2 = NULL;
         node_t *node = (node_t *) gIter->data;
 
         gIter2 = data_set->resources;
         for (; gIter2 != NULL; gIter2 = gIter2->next) {
             resource_t *rsc = (resource_t *) gIter2->data;
 
             common_apply_stickiness(rsc, node, data_set);
             rsc_discover_filter(rsc, node);
         }
     }
 
     return TRUE;
 }
 
 /*
  * Create internal resource constraints before allocation
  */
 gboolean
 stage3(pe_working_set_t * data_set)
 {
 
     GListPtr gIter = data_set->resources;
 
     for (; gIter != NULL; gIter = gIter->next) {
         resource_t *rsc = (resource_t *) gIter->data;
 
         rsc->cmds->internal_constraints(rsc, data_set);
     }
 
     return TRUE;
 }
 
 /*
  * Check for orphaned or redefined actions
  */
 gboolean
 stage4(pe_working_set_t * data_set)
 {
     check_actions(data_set);
     return TRUE;
 }
 
+static void *
+convert_const_pointer(const void *ptr)
+{
+    /* Worst function ever */
+    return (void *)ptr;
+}
+
 static gint
 sort_rsc_process_order(gconstpointer a, gconstpointer b, gpointer data)
 {
     int rc = 0;
     int r1_weight = -INFINITY;
     int r2_weight = -INFINITY;
 
     const char *reason = "existence";
 
     const GListPtr nodes = (GListPtr) data;
-    resource_t *resource1 = (resource_t *) convert_const_pointer(a);
-    resource_t *resource2 = (resource_t *) convert_const_pointer(b);
+    const resource_t *resource1 = a;
+    const resource_t *resource2 = b;
 
     node_t *r1_node = NULL;
     node_t *r2_node = NULL;
     GListPtr gIter = NULL;
     GHashTable *r1_nodes = NULL;
     GHashTable *r2_nodes = NULL;
 
     if (a == NULL && b == NULL) {
         goto done;
     }
     if (a == NULL) {
         return 1;
     }
     if (b == NULL) {
         return -1;
     }
 
     reason = "priority";
     r1_weight = resource1->priority;
     r2_weight = resource2->priority;
 
     if (r1_weight > r2_weight) {
         rc = -1;
         goto done;
     }
 
     if (r1_weight < r2_weight) {
         rc = 1;
         goto done;
     }
 
     reason = "no node list";
     if (nodes == NULL) {
         goto done;
     }
 
-    r1_nodes =
-        rsc_merge_weights(resource1, resource1->id, NULL, NULL, 1,
-                          pe_weights_forward | pe_weights_init);
+    r1_nodes = rsc_merge_weights(convert_const_pointer(resource1),
+                                 resource1->id, NULL, NULL, 1,
+                                 pe_weights_forward | pe_weights_init);
     dump_node_scores(LOG_TRACE, NULL, resource1->id, r1_nodes);
-    r2_nodes =
-        rsc_merge_weights(resource2, resource2->id, NULL, NULL, 1,
-                          pe_weights_forward | pe_weights_init);
+
+    r2_nodes = rsc_merge_weights(convert_const_pointer(resource2),
+                                 resource2->id, NULL, NULL, 1,
+                                 pe_weights_forward | pe_weights_init);
     dump_node_scores(LOG_TRACE, NULL, resource2->id, r2_nodes);
 
     /* Current location score */
     reason = "current location";
     r1_weight = -INFINITY;
     r2_weight = -INFINITY;
 
     if (resource1->running_on) {
         r1_node = pe__current_node(resource1);
         r1_node = g_hash_table_lookup(r1_nodes, r1_node->details->id);
         if (r1_node != NULL) {
             r1_weight = r1_node->weight;
         }
     }
     if (resource2->running_on) {
         r2_node = pe__current_node(resource2);
         r2_node = g_hash_table_lookup(r2_nodes, r2_node->details->id);
         if (r2_node != NULL) {
             r2_weight = r2_node->weight;
         }
     }
 
     if (r1_weight > r2_weight) {
         rc = -1;
         goto done;
     }
 
     if (r1_weight < r2_weight) {
         rc = 1;
         goto done;
     }
 
     reason = "score";
     for (gIter = nodes; gIter != NULL; gIter = gIter->next) {
         node_t *node = (node_t *) gIter->data;
 
         r1_node = NULL;
         r2_node = NULL;
 
         r1_weight = -INFINITY;
         if (r1_nodes) {
             r1_node = g_hash_table_lookup(r1_nodes, node->details->id);
         }
         if (r1_node) {
             r1_weight = r1_node->weight;
         }
 
         r2_weight = -INFINITY;
         if (r2_nodes) {
             r2_node = g_hash_table_lookup(r2_nodes, node->details->id);
         }
         if (r2_node) {
             r2_weight = r2_node->weight;
         }
 
         if (r1_weight > r2_weight) {
             rc = -1;
             goto done;
         }
 
         if (r1_weight < r2_weight) {
             rc = 1;
             goto done;
         }
     }
 
   done:
     crm_trace("%s (%d) on %s %c %s (%d) on %s: %s",
               resource1->id, r1_weight, r1_node ? r1_node->details->id : "n/a",
               rc < 0 ? '>' : rc > 0 ? '<' : '=',
               resource2->id, r2_weight, r2_node ? r2_node->details->id : "n/a", reason);
 
     if (r1_nodes) {
         g_hash_table_destroy(r1_nodes);
     }
     if (r2_nodes) {
         g_hash_table_destroy(r2_nodes);
     }
 
     return rc;
 }
 
 static void
 allocate_resources(pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
 
     if (is_set(data_set->flags, pe_flag_have_remote_nodes)) {
         /* Force remote connection resources to be allocated first. This
          * also forces any colocation dependencies to be allocated as well */
         for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
             resource_t *rsc = (resource_t *) gIter->data;
             if (rsc->is_remote_node == FALSE) {
                 continue;
             }
             pe_rsc_trace(rsc, "Allocating: %s", rsc->id);
             /* For remote node connection resources, always prefer the partial
              * migration target during resource allocation, if the rsc is in the
              * middle of a migration.
              */
             rsc->cmds->allocate(rsc, rsc->partial_migration_target, data_set);
         }
     }
 
     /* now do the rest of the resources */
     for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
         resource_t *rsc = (resource_t *) gIter->data;
         if (rsc->is_remote_node == TRUE) {
             continue;
         }
         pe_rsc_trace(rsc, "Allocating: %s", rsc->id);
         rsc->cmds->allocate(rsc, NULL, data_set);
     }
 }
 
 /* We always use pe_order_preserve with these convenience functions to exempt
  * internally generated constraints from the prohibition of user constraints
  * involving remote connection resources.
  *
  * The start ordering additionally uses pe_order_runnable_left so that the
  * specified action is not runnable if the start is not runnable.
  */
 
 static inline void
 order_start_then_action(resource_t *lh_rsc, action_t *rh_action,
                         enum pe_ordering extra, pe_working_set_t *data_set)
 {
     if (lh_rsc && rh_action && data_set) {
         custom_action_order(lh_rsc, start_key(lh_rsc), NULL,
                             rh_action->rsc, NULL, rh_action,
                             pe_order_preserve | pe_order_runnable_left | extra,
                             data_set);
     }
 }
 
 static inline void
 order_action_then_stop(action_t *lh_action, resource_t *rh_rsc,
                        enum pe_ordering extra, pe_working_set_t *data_set)
 {
     if (lh_action && rh_rsc && data_set) {
         custom_action_order(lh_action->rsc, NULL, lh_action,
                             rh_rsc, stop_key(rh_rsc), NULL,
                             pe_order_preserve | extra, data_set);
     }
 }
 
 static void
 cleanup_orphans(resource_t * rsc, pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
 
     if (is_set(data_set->flags, pe_flag_stop_rsc_orphans) == FALSE) {
         return;
     }
 
     /* Don't recurse into ->children, those are just unallocated clone instances */
     if(is_not_set(rsc->flags, pe_rsc_orphan)) {
         return;
     }
 
     for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
         node_t *node = (node_t *) gIter->data;
 
         if (node->details->online
             && pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL,
                                 data_set)) {
 
             pe_action_t *clear_op = NULL;
 
             clear_op = pe__clear_failcount(rsc, node, "it is orphaned",
                                            data_set);
 
             /* We can't use order_action_then_stop() here because its
              * pe_order_preserve breaks things
              */
             custom_action_order(clear_op->rsc, NULL, clear_op,
                                 rsc, stop_key(rsc), NULL,
                                 pe_order_optional, data_set);
         }
     }
 }
 
 gboolean
 stage5(pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
 
     if (safe_str_neq(data_set->placement_strategy, "default")) {
         GListPtr nodes = g_list_copy(data_set->nodes);
 
         nodes = sort_nodes_by_weight(nodes, NULL, data_set);
         data_set->resources =
             g_list_sort_with_data(data_set->resources, sort_rsc_process_order, nodes);
 
         g_list_free(nodes);
     }
 
     gIter = data_set->nodes;
     for (; gIter != NULL; gIter = gIter->next) {
         node_t *node = (node_t *) gIter->data;
 
         dump_node_capacity(show_utilization ? 0 : utilization_log_level, "Original", node);
     }
 
     crm_trace("Allocating services");
     /* Take (next) highest resource, assign it and create its actions */
 
     allocate_resources(data_set);
 
     gIter = data_set->nodes;
     for (; gIter != NULL; gIter = gIter->next) {
         node_t *node = (node_t *) gIter->data;
 
         dump_node_capacity(show_utilization ? 0 : utilization_log_level, "Remaining", node);
     }
 
     // Process deferred action checks
     pe__foreach_param_check(data_set, check_params);
     pe__free_param_checks(data_set);
 
     if (is_set(data_set->flags, pe_flag_startup_probes)) {
         crm_trace("Calculating needed probes");
         /* This code probably needs optimization
          * ptest -x with 100 nodes, 100 clones and clone-max=100:
 
          With probes:
 
          ptest[14781]: 2010/09/27_17:56:46 notice: TRACE: do_calculations: pengine.c:258 Calculate cluster status
          ptest[14781]: 2010/09/27_17:56:46 notice: TRACE: do_calculations: pengine.c:278 Applying placement constraints
          ptest[14781]: 2010/09/27_17:56:47 notice: TRACE: do_calculations: pengine.c:285 Create internal constraints
          ptest[14781]: 2010/09/27_17:56:47 notice: TRACE: do_calculations: pengine.c:292 Check actions
          ptest[14781]: 2010/09/27_17:56:48 notice: TRACE: do_calculations: pengine.c:299 Allocate resources
          ptest[14781]: 2010/09/27_17:56:48 notice: TRACE: stage5: allocate.c:881 Allocating services
          ptest[14781]: 2010/09/27_17:56:49 notice: TRACE: stage5: allocate.c:894 Calculating needed probes
          ptest[14781]: 2010/09/27_17:56:51 notice: TRACE: stage5: allocate.c:899 Creating actions
          ptest[14781]: 2010/09/27_17:56:52 notice: TRACE: stage5: allocate.c:905 Creating done
          ptest[14781]: 2010/09/27_17:56:52 notice: TRACE: do_calculations: pengine.c:306 Processing fencing and shutdown cases
          ptest[14781]: 2010/09/27_17:56:52 notice: TRACE: do_calculations: pengine.c:313 Applying ordering constraints
          36s
          ptest[14781]: 2010/09/27_17:57:28 notice: TRACE: do_calculations: pengine.c:320 Create transition graph
 
          Without probes:
 
          ptest[14637]: 2010/09/27_17:56:21 notice: TRACE: do_calculations: pengine.c:258 Calculate cluster status
          ptest[14637]: 2010/09/27_17:56:22 notice: TRACE: do_calculations: pengine.c:278 Applying placement constraints
          ptest[14637]: 2010/09/27_17:56:22 notice: TRACE: do_calculations: pengine.c:285 Create internal constraints
          ptest[14637]: 2010/09/27_17:56:22 notice: TRACE: do_calculations: pengine.c:292 Check actions
          ptest[14637]: 2010/09/27_17:56:23 notice: TRACE: do_calculations: pengine.c:299 Allocate resources
          ptest[14637]: 2010/09/27_17:56:23 notice: TRACE: stage5: allocate.c:881 Allocating services
          ptest[14637]: 2010/09/27_17:56:24 notice: TRACE: stage5: allocate.c:899 Creating actions
          ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: stage5: allocate.c:905 Creating done
          ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: do_calculations: pengine.c:306 Processing fencing and shutdown cases
          ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: do_calculations: pengine.c:313 Applying ordering constraints
          ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: do_calculations: pengine.c:320 Create transition graph
         */
 
         probe_resources(data_set);
     }
 
     crm_trace("Handle orphans");
 
     for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
         resource_t *rsc = (resource_t *) gIter->data;
         cleanup_orphans(rsc, data_set);
     }
 
     crm_trace("Creating actions");
 
     for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
         resource_t *rsc = (resource_t *) gIter->data;
 
         rsc->cmds->create_actions(rsc, data_set);
     }
 
     crm_trace("Creating done");
     return TRUE;
 }
 
 static gboolean
 is_managed(const resource_t * rsc)
 {
     GListPtr gIter = rsc->children;
 
     if (is_set(rsc->flags, pe_rsc_managed)) {
         return TRUE;
     }
 
     for (; gIter != NULL; gIter = gIter->next) {
         resource_t *child_rsc = (resource_t *) gIter->data;
 
         if (is_managed(child_rsc)) {
             return TRUE;
         }
     }
 
     return FALSE;
 }
 
 static gboolean
 any_managed_resources(pe_working_set_t * data_set)
 {
 
     GListPtr gIter = data_set->resources;
 
     for (; gIter != NULL; gIter = gIter->next) {
         resource_t *rsc = (resource_t *) gIter->data;
 
         if (is_managed(rsc)) {
             return TRUE;
         }
     }
     return FALSE;
 }
 
 /*!
  * \internal
  * \brief Create pseudo-op for guest node fence, and order relative to it
  *
  * \param[in] node      Guest node to fence
  * \param[in] done      STONITH_DONE operation
  * \param[in] data_set  Working set of CIB state
  */
 static void
 fence_guest(pe_node_t *node, pe_action_t *done, pe_working_set_t *data_set)
 {
     resource_t *container = node->details->remote_rsc->container;
     pe_action_t *stop = NULL;
     pe_action_t *stonith_op = NULL;
 
     /* The fence action is just a label; we don't do anything differently for
      * off vs. reboot. We specify it explicitly, rather than let it default to
      * cluster's default action, because we are not _initiating_ fencing -- we
      * are creating a pseudo-event to describe fencing that is already occurring
      * by other means (container recovery).
      */
     const char *fence_action = "off";
 
     /* Check whether guest's container resource has any explicit stop or
      * start (the stop may be implied by fencing of the guest's host).
      */
     if (container) {
         stop = find_first_action(container->actions, NULL, CRMD_ACTION_STOP, NULL);
 
         if (find_first_action(container->actions, NULL, CRMD_ACTION_START, NULL)) {
             fence_action = "reboot";
         }
     }
 
     /* Create a fence pseudo-event, so we have an event to order actions
      * against, and the controller can always detect it.
      */
     stonith_op = pe_fence_op(node, fence_action, FALSE, "guest is unclean", data_set);
     update_action_flags(stonith_op, pe_action_pseudo | pe_action_runnable,
                         __FUNCTION__, __LINE__);
 
     /* We want to imply stops/demotes after the guest is stopped, not wait until
      * it is restarted, so we always order pseudo-fencing after stop, not start
      * (even though start might be closer to what is done for a real reboot).
      */
     if(stop && is_set(stop->flags, pe_action_pseudo)) {
         pe_action_t *parent_stonith_op = pe_fence_op(stop->node, NULL, FALSE, NULL, data_set);
         crm_info("Implying guest node %s is down (action %d) after %s fencing",
                  node->details->uname, stonith_op->id, stop->node->details->uname);
         order_actions(parent_stonith_op, stonith_op,
                       pe_order_runnable_left|pe_order_implies_then);
 
     } else if (stop) {
         order_actions(stop, stonith_op,
                       pe_order_runnable_left|pe_order_implies_then);
         crm_info("Implying guest node %s is down (action %d) "
                  "after container %s is stopped (action %d)",
                  node->details->uname, stonith_op->id,
                  container->id, stop->id);
     } else {
         /* If we're fencing the guest node but there's no stop for the guest
          * resource, we must think the guest is already stopped. However, we may
          * think so because its resource history was just cleaned. To avoid
          * unnecessarily considering the guest node down if it's really up,
          * order the pseudo-fencing after any stop of the connection resource,
          * which will be ordered after any container (re-)probe.
          */
         stop = find_first_action(node->details->remote_rsc->actions, NULL,
                                  RSC_STOP, NULL);
 
         if (stop) {
             order_actions(stop, stonith_op, pe_order_optional);
             crm_info("Implying guest node %s is down (action %d) "
                      "after connection is stopped (action %d)",
                      node->details->uname, stonith_op->id, stop->id);
         } else {
             /* Not sure why we're fencing, but everything must already be
              * cleanly stopped.
              */
             crm_info("Implying guest node %s is down (action %d) ",
                      node->details->uname, stonith_op->id);
         }
     }
 
     /* Order/imply other actions relative to pseudo-fence as with real fence */
     stonith_constraints(node, stonith_op, data_set);
     if(done) {
         order_actions(stonith_op, done, pe_order_implies_then);
     }
 }
 
 /*
  * Create dependencies for stonith and shutdown operations
  */
 gboolean
 stage6(pe_working_set_t * data_set)
 {
     action_t *dc_down = NULL;
     action_t *dc_fence = NULL;
     action_t *stonith_op = NULL;
     action_t *last_stonith = NULL;
     gboolean integrity_lost = FALSE;
     action_t *all_stopped = get_pseudo_op(ALL_STOPPED, data_set);
     action_t *done = get_pseudo_op(STONITH_DONE, data_set);
     gboolean need_stonith = TRUE;
     GListPtr gIter;
     GListPtr stonith_ops = NULL;
 
     /* Remote ordering constraints need to happen prior to calculate
      * fencing because it is one more place we will mark the node as
      * dirty.
      *
      * A nice side-effect of doing it first is that we can remove a
      * bunch of special logic from apply_*_ordering() because its
      * already part of pe_fence_node()
      */
     crm_trace("Creating remote ordering constraints");
     apply_remote_node_ordering(data_set);
 
     crm_trace("Processing fencing and shutdown cases");
     if (any_managed_resources(data_set) == FALSE) {
         crm_notice("Delaying fencing operations until there are resources to manage");
         need_stonith = FALSE;
     }
 
     /* Check each node for stonith/shutdown */
     for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
         node_t *node = (node_t *) gIter->data;
 
         /* Guest nodes are "fenced" by recovering their container resource,
          * so handle them separately.
          */
         if (is_container_remote_node(node)) {
             if (node->details->remote_requires_reset && need_stonith) {
                 fence_guest(node, done, data_set);
             }
             continue;
         }
 
         stonith_op = NULL;
 
         if (node->details->unclean
             && need_stonith && pe_can_fence(data_set, node)) {
 
             stonith_op = pe_fence_op(node, NULL, FALSE, "node is unclean", data_set);
             pe_warn("Scheduling Node %s for STONITH", node->details->uname);
 
             stonith_constraints(node, stonith_op, data_set);
 
             if (node->details->is_dc) {
                 dc_down = stonith_op;
                 dc_fence = stonith_op;
 
             } else if (is_set(data_set->flags, pe_flag_concurrent_fencing) == FALSE) {
                 if (last_stonith) {
                     order_actions(last_stonith, stonith_op, pe_order_optional);
                 }
                 last_stonith = stonith_op;
 
             } else {
                 order_actions(stonith_op, done, pe_order_implies_then);
                 stonith_ops = g_list_append(stonith_ops, stonith_op);
             }
 
         } else if (node->details->online && node->details->shutdown &&
                 /* TODO define what a shutdown op means for a remote node.
                  * For now we do not send shutdown operations for remote nodes, but
                  * if we can come up with a good use for this in the future, we will. */
                     is_remote_node(node) == FALSE) {
 
             action_t *down_op = NULL;
 
             crm_notice("Scheduling Node %s for shutdown", node->details->uname);
 
             down_op = custom_action(NULL, crm_strdup_printf("%s-%s", CRM_OP_SHUTDOWN, node->details->uname),
                                     CRM_OP_SHUTDOWN, node, FALSE, TRUE, data_set);
 
             shutdown_constraints(node, down_op, data_set);
             add_hash_param(down_op->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE);
 
             if (node->details->is_dc) {
                 dc_down = down_op;
             }
         }
 
         if (node->details->unclean && stonith_op == NULL) {
             integrity_lost = TRUE;
             pe_warn("Node %s is unclean!", node->details->uname);
         }
     }
 
     if (integrity_lost) {
         if (is_set(data_set->flags, pe_flag_stonith_enabled) == FALSE) {
             pe_warn("YOUR RESOURCES ARE NOW LIKELY COMPROMISED");
             pe_err("ENABLE STONITH TO KEEP YOUR RESOURCES SAFE");
 
         } else if (is_set(data_set->flags, pe_flag_have_quorum) == FALSE) {
             crm_notice("Cannot fence unclean nodes until quorum is"
                        " attained (or no-quorum-policy is set to ignore)");
         }
     }
 
     if (dc_down != NULL) {
         GListPtr gIter = NULL;
 
         crm_trace("Ordering shutdowns before %s on %s (DC)",
                   dc_down->task, dc_down->node->details->uname);
 
         add_hash_param(dc_down->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE);
 
         for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
             action_t *node_stop = (action_t *) gIter->data;
 
             if (safe_str_neq(CRM_OP_SHUTDOWN, node_stop->task)) {
                 continue;
             } else if (node_stop->node->details->is_dc) {
                 continue;
             }
 
             crm_debug("Ordering shutdown on %s before %s on %s",
                       node_stop->node->details->uname,
                       dc_down->task, dc_down->node->details->uname);
 
             order_actions(node_stop, dc_down, pe_order_optional);
         }
 
         if (last_stonith) {
             if (dc_down != last_stonith) {
                 order_actions(last_stonith, dc_down, pe_order_optional);
             }
 
         } else {
             GListPtr gIter2 = NULL;
 
             for (gIter2 = stonith_ops; gIter2 != NULL; gIter2 = gIter2->next) {
                 stonith_op = (action_t *) gIter2->data;
 
                 if (dc_down != stonith_op) {
                     order_actions(stonith_op, dc_down, pe_order_optional);
                 }
             }
         }
     }
 
 
     if (dc_fence) {
         order_actions(dc_down, done, pe_order_implies_then);
 
     } else if (last_stonith) {
         order_actions(last_stonith, done, pe_order_implies_then);
     }
 
     order_actions(done, all_stopped, pe_order_implies_then);
 
     g_list_free(stonith_ops);
     return TRUE;
 }
 
 /*
  * Determine the sets of independent actions and the correct order for the
  *  actions in each set.
  *
  * Mark dependencies of un-runnable actions un-runnable
  *
  */
 static GListPtr
 find_actions_by_task(GListPtr actions, resource_t * rsc, const char *original_key)
 {
     GListPtr list = NULL;
 
     list = find_actions(actions, original_key, NULL);
     if (list == NULL) {
         /* we're potentially searching a child of the original resource */
         char *key = NULL;
         char *task = NULL;
         guint interval_ms = 0;
 
         if (parse_op_key(original_key, NULL, &task, &interval_ms)) {
             key = generate_op_key(rsc->id, task, interval_ms);
             list = find_actions(actions, key, NULL);
 
         } else {
             crm_err("search key: %s", original_key);
         }
 
         free(key);
         free(task);
     }
 
     return list;
 }
 
 static void
 rsc_order_then(pe_action_t *lh_action, pe_resource_t *rsc,
                pe__ordering_t *order)
 {
     GListPtr gIter = NULL;
     GListPtr rh_actions = NULL;
     action_t *rh_action = NULL;
     enum pe_ordering type;
 
     CRM_CHECK(rsc != NULL, return);
     CRM_CHECK(order != NULL, return);
 
     type = order->type;
     rh_action = order->rh_action;
     crm_trace("Processing RH of ordering constraint %d", order->id);
 
     if (rh_action != NULL) {
         rh_actions = g_list_prepend(NULL, rh_action);
 
     } else if (rsc != NULL) {
         rh_actions = find_actions_by_task(rsc->actions, rsc, order->rh_action_task);
     }
 
     if (rh_actions == NULL) {
         pe_rsc_trace(rsc, "No RH-Side (%s/%s) found for constraint..."
                      " ignoring", rsc->id, order->rh_action_task);
         if (lh_action) {
             pe_rsc_trace(rsc, "LH-Side was: %s", lh_action->uuid);
         }
         return;
     }
 
     if (lh_action && lh_action->rsc == rsc && is_set(lh_action->flags, pe_action_dangle)) {
         pe_rsc_trace(rsc, "Detected dangling operation %s -> %s", lh_action->uuid,
                      order->rh_action_task);
         clear_bit(type, pe_order_implies_then);
     }
 
     gIter = rh_actions;
     for (; gIter != NULL; gIter = gIter->next) {
         action_t *rh_action_iter = (action_t *) gIter->data;
 
         if (lh_action) {
             order_actions(lh_action, rh_action_iter, type);
 
         } else if (type & pe_order_implies_then) {
             update_action_flags(rh_action_iter, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
             crm_warn("Unrunnable %s 0x%.6x", rh_action_iter->uuid, type);
         } else {
             crm_warn("neither %s 0x%.6x", rh_action_iter->uuid, type);
         }
     }
 
     g_list_free(rh_actions);
 }
 
 static void
 rsc_order_first(pe_resource_t *lh_rsc, pe__ordering_t *order,
                 pe_working_set_t *data_set)
 {
     GListPtr gIter = NULL;
     GListPtr lh_actions = NULL;
     action_t *lh_action = order->lh_action;
     resource_t *rh_rsc = order->rh_rsc;
 
     crm_trace("Processing LH of ordering constraint %d", order->id);
     CRM_ASSERT(lh_rsc != NULL);
 
     if (lh_action != NULL) {
         lh_actions = g_list_prepend(NULL, lh_action);
 
     } else {
         lh_actions = find_actions_by_task(lh_rsc->actions, lh_rsc, order->lh_action_task);
     }
 
     if (lh_actions == NULL && lh_rsc != rh_rsc) {
         char *key = NULL;
         char *op_type = NULL;
         guint interval_ms = 0;
 
         parse_op_key(order->lh_action_task, NULL, &op_type, &interval_ms);
         key = generate_op_key(lh_rsc->id, op_type, interval_ms);
 
         if (lh_rsc->fns->state(lh_rsc, TRUE) == RSC_ROLE_STOPPED && safe_str_eq(op_type, RSC_STOP)) {
             free(key);
             pe_rsc_trace(lh_rsc, "No LH-Side (%s/%s) found for constraint %d with %s - ignoring",
                          lh_rsc->id, order->lh_action_task, order->id, order->rh_action_task);
 
         } else if (lh_rsc->fns->state(lh_rsc, TRUE) == RSC_ROLE_SLAVE && safe_str_eq(op_type, RSC_DEMOTE)) {
             free(key);
             pe_rsc_trace(lh_rsc, "No LH-Side (%s/%s) found for constraint %d with %s - ignoring",
                          lh_rsc->id, order->lh_action_task, order->id, order->rh_action_task);
 
         } else {
             pe_rsc_trace(lh_rsc, "No LH-Side (%s/%s) found for constraint %d with %s - creating",
                          lh_rsc->id, order->lh_action_task, order->id, order->rh_action_task);
             lh_action = custom_action(lh_rsc, key, op_type, NULL, TRUE, TRUE, data_set);
             lh_actions = g_list_prepend(NULL, lh_action);
         }
 
         free(op_type);
     }
 
     gIter = lh_actions;
     for (; gIter != NULL; gIter = gIter->next) {
         action_t *lh_action_iter = (action_t *) gIter->data;
 
         if (rh_rsc == NULL && order->rh_action) {
             rh_rsc = order->rh_action->rsc;
         }
         if (rh_rsc) {
             rsc_order_then(lh_action_iter, rh_rsc, order);
 
         } else if (order->rh_action) {
             order_actions(lh_action_iter, order->rh_action, order->type);
         }
     }
 
     g_list_free(lh_actions);
 }
 
 extern void update_colo_start_chain(pe_action_t *action,
                                     pe_working_set_t *data_set);
 
 static int
 is_recurring_action(action_t *action) 
 {
     const char *interval_ms_s = g_hash_table_lookup(action->meta,
                                                     XML_LRM_ATTR_INTERVAL_MS);
     guint interval_ms = crm_parse_ms(interval_ms_s);
 
     return (interval_ms > 0);
 }
 
 static void
 apply_container_ordering(action_t *action, pe_working_set_t *data_set)
 {
     /* VMs are also classified as containers for these purposes... in
      * that they both involve a 'thing' running on a real or remote
      * cluster node.
      *
      * This allows us to be smarter about the type and extent of
      * recovery actions required in various scenarios
      */
     resource_t *remote_rsc = NULL;
     resource_t *container = NULL;
     enum action_tasks task = text2task(action->task);
 
     CRM_ASSERT(action->rsc);
     CRM_ASSERT(action->node);
     CRM_ASSERT(is_remote_node(action->node));
 
     remote_rsc = action->node->details->remote_rsc;
     CRM_ASSERT(remote_rsc);
 
     container = remote_rsc->container;
     CRM_ASSERT(container);
 
     if(is_set(container->flags, pe_rsc_failed)) {
         pe_fence_node(data_set, action->node, "container failed");
     }
 
     crm_trace("Order %s action %s relative to %s%s for %s%s",
               action->task, action->uuid,
               is_set(remote_rsc->flags, pe_rsc_failed)? "failed " : "",
               remote_rsc->id,
               is_set(container->flags, pe_rsc_failed)? "failed " : "",
               container->id);
 
     if (safe_str_eq(action->task, CRMD_ACTION_MIGRATE)
         || safe_str_eq(action->task, CRMD_ACTION_MIGRATED)) {
         /* Migration ops map to "no_action", but we need to apply the same
          * ordering as for stop or demote (see get_router_node()).
          */
         task = stop_rsc;
     }
 
     switch (task) {
         case start_rsc:
         case action_promote:
             /* Force resource recovery if the container is recovered */
             order_start_then_action(container, action, pe_order_implies_then,
                                     data_set);
 
             /* Wait for the connection resource to be up too */
             order_start_then_action(remote_rsc, action, pe_order_none,
                                     data_set);
             break;
 
         case stop_rsc:
         case action_demote:
             if (is_set(container->flags, pe_rsc_failed)) {
                 /* When the container representing a guest node fails, any stop
                  * or demote actions for resources running on the guest node
                  * are implied by the container stopping. This is similar to
                  * how fencing operations work for cluster nodes and remote
                  * nodes.
                  */
             } else {
                 /* Ensure the operation happens before the connection is brought
                  * down.
                  *
                  * If we really wanted to, we could order these after the
                  * connection start, IFF the container's current role was
                  * stopped (otherwise we re-introduce an ordering loop when the
                  * connection is restarting).
                  */
                 order_action_then_stop(action, remote_rsc, pe_order_none,
                                        data_set);
             }
             break;
 
         default:
             /* Wait for the connection resource to be up */
             if (is_recurring_action(action)) {
                 /* In case we ever get the recovery logic wrong, force
                  * recurring monitors to be restarted, even if just
                  * the connection was re-established
                  */
                 if(task != no_action) {
                     order_start_then_action(remote_rsc, action,
                                             pe_order_implies_then, data_set);
                 }
             } else {
                 order_start_then_action(remote_rsc, action, pe_order_none,
                                         data_set);
             }
             break;
     }
 }
 
 static enum remote_connection_state
 get_remote_node_state(pe_node_t *node) 
 {
     resource_t *remote_rsc = NULL;
     node_t *cluster_node = NULL;
 
     CRM_ASSERT(node);
 
     remote_rsc = node->details->remote_rsc;
     CRM_ASSERT(remote_rsc);
 
     cluster_node = pe__current_node(remote_rsc);
 
     /* If the cluster node the remote connection resource resides on
      * is unclean or went offline, we can't process any operations
      * on that remote node until after it starts elsewhere.
      */
     if(remote_rsc->next_role == RSC_ROLE_STOPPED || remote_rsc->allocated_to == NULL) {
         /* The connection resource is not going to run anywhere */
 
         if (cluster_node && cluster_node->details->unclean) {
             /* The remote connection is failed because its resource is on a
              * failed node and can't be recovered elsewhere, so we must fence.
              */
             return remote_state_failed;
         }
 
         if (is_not_set(remote_rsc->flags, pe_rsc_failed)) {
             /* Connection resource is cleanly stopped */
             return remote_state_stopped;
         }
 
         /* Connection resource is failed */
 
         if ((remote_rsc->next_role == RSC_ROLE_STOPPED)
             && remote_rsc->remote_reconnect_ms
             && node->details->remote_was_fenced) {
 
             /* We won't know whether the connection is recoverable until the
              * reconnect interval expires and we reattempt connection.
              */
             return remote_state_unknown;
         }
 
         /* The remote connection is in a failed state. If there are any
          * resources known to be active on it (stop) or in an unknown state
          * (probe), we must assume the worst and fence it.
          */
         return remote_state_failed;
 
     } else if (cluster_node == NULL) {
         /* Connection is recoverable but not currently running anywhere, see if we can recover it first */
         return remote_state_unknown;
 
     } else if(cluster_node->details->unclean == TRUE
               || cluster_node->details->online == FALSE) {
         /* Connection is running on a dead node, see if we can recover it first */
         return remote_state_resting;
 
     } else if (g_list_length(remote_rsc->running_on) > 1
                && remote_rsc->partial_migration_source
                && remote_rsc->partial_migration_target) {
         /* We're in the middle of migrating a connection resource,
          * wait until after the resource migrates before performing
          * any actions.
          */
         return remote_state_resting;
 
     }
     return remote_state_alive;
 }
 
 /*!
  * \internal
  * \brief Order actions on remote node relative to actions for the connection
  */
 static void
 apply_remote_ordering(action_t *action, pe_working_set_t *data_set)
 {
     resource_t *remote_rsc = NULL;
     enum action_tasks task = text2task(action->task);
     enum remote_connection_state state = get_remote_node_state(action->node);
 
     enum pe_ordering order_opts = pe_order_none;
 
     if (action->rsc == NULL) {
         return;
     }
 
     CRM_ASSERT(action->node);
     CRM_ASSERT(is_remote_node(action->node));
 
     remote_rsc = action->node->details->remote_rsc;
     CRM_ASSERT(remote_rsc);
 
     crm_trace("Order %s action %s relative to %s%s (state: %s)",
               action->task, action->uuid,
               is_set(remote_rsc->flags, pe_rsc_failed)? "failed " : "",
               remote_rsc->id, state2text(state));
 
     if (safe_str_eq(action->task, CRMD_ACTION_MIGRATE)
         || safe_str_eq(action->task, CRMD_ACTION_MIGRATED)) {
         /* Migration ops map to "no_action", but we need to apply the same
          * ordering as for stop or demote (see get_router_node()).
          */
         task = stop_rsc;
     }
 
     switch (task) {
         case start_rsc:
         case action_promote:
             order_opts = pe_order_none;
 
             if (state == remote_state_failed) {
                 /* Force recovery, by making this action required */
                 order_opts |= pe_order_implies_then;
             }
 
             /* Ensure connection is up before running this action */
             order_start_then_action(remote_rsc, action, order_opts, data_set);
             break;
 
         case stop_rsc:
             if(state == remote_state_alive) {
                 order_action_then_stop(action, remote_rsc,
                                        pe_order_implies_first, data_set);
 
             } else if(state == remote_state_failed) {
                 /* We would only be here if the resource is
                  * running on the remote node.  Since we have no
                  * way to stop it, it is necessary to fence the
                  * node.
                  */
                 pe_fence_node(data_set, action->node, "resources are active and the connection is unrecoverable");
                 order_action_then_stop(action, remote_rsc,
                                        pe_order_implies_first, data_set);
 
             } else if(remote_rsc->next_role == RSC_ROLE_STOPPED) {
                 /* State must be remote_state_unknown or remote_state_stopped.
                  * Since the connection is not coming back up in this
                  * transition, stop this resource first.
                  */
                 order_action_then_stop(action, remote_rsc,
                                        pe_order_implies_first, data_set);
 
             } else {
                 /* The connection is going to be started somewhere else, so
                  * stop this resource after that completes.
                  */
                 order_start_then_action(remote_rsc, action, pe_order_none, data_set);
             }
             break;
 
         case action_demote:
             /* Only order this demote relative to the connection start if the
              * connection isn't being torn down. Otherwise, the demote would be
              * blocked because the connection start would not be allowed.
              */
             if(state == remote_state_resting || state == remote_state_unknown) {
                 order_start_then_action(remote_rsc, action, pe_order_none,
                                         data_set);
             } /* Otherwise we can rely on the stop ordering */
             break;
 
         default:
             /* Wait for the connection resource to be up */
             if (is_recurring_action(action)) {
                 /* In case we ever get the recovery logic wrong, force
                  * recurring monitors to be restarted, even if just
                  * the connection was re-established
                  */
                 order_start_then_action(remote_rsc, action,
                                         pe_order_implies_then, data_set);
 
             } else {
                 node_t *cluster_node = pe__current_node(remote_rsc);
 
                 if(task == monitor_rsc && state == remote_state_failed) {
                     /* We would only be here if we do not know the
                      * state of the resource on the remote node.
                      * Since we have no way to find out, it is
                      * necessary to fence the node.
                      */
                     pe_fence_node(data_set, action->node, "resources are in an unknown state and the connection is unrecoverable");
                 }
 
                 if(cluster_node && state == remote_state_stopped) {
                     /* The connection is currently up, but is going
                      * down permanently.
                      *
                      * Make sure we check services are actually
                      * stopped _before_ we let the connection get
                      * closed
                      */
                     order_action_then_stop(action, remote_rsc,
                                            pe_order_runnable_left, data_set);
 
                 } else {
                     order_start_then_action(remote_rsc, action, pe_order_none,
                                             data_set);
                 }
             }
             break;
     }
 }
 
 static void
 apply_remote_node_ordering(pe_working_set_t *data_set)
 {
     if (is_set(data_set->flags, pe_flag_have_remote_nodes) == FALSE) {
         return;
     }
 
     for (GListPtr gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
         action_t *action = (action_t *) gIter->data;
         resource_t *remote = NULL;
 
         // We are only interested in resource actions
         if (action->rsc == NULL) {
             continue;
         }
 
         /* Special case: If we are clearing the failcount of an actual
          * remote connection resource, then make sure this happens before
          * any start of the resource in this transition.
          */
         if (action->rsc->is_remote_node &&
             safe_str_eq(action->task, CRM_OP_CLEAR_FAILCOUNT)) {
 
             custom_action_order(action->rsc,
                 NULL,
                 action,
                 action->rsc,
                 generate_op_key(action->rsc->id, RSC_START, 0),
                 NULL,
                 pe_order_optional,
                 data_set);
 
             continue;
         }
 
         // We are only interested in actions allocated to a node
         if (action->node == NULL) {
             continue;
         }
 
         if (is_remote_node(action->node) == FALSE) {
             continue;
         }
 
         /* We are only interested in real actions.
          *
          * @TODO This is probably wrong; pseudo-actions might be converted to
          * real actions and vice versa later in update_actions() at the end of
          * stage7().
          */
         if (is_set(action->flags, pe_action_pseudo)) {
             continue;
         }
 
         remote = action->node->details->remote_rsc;
         if (remote == NULL) {
             // Orphaned
             continue;
         }
 
         /* The action occurs across a remote connection, so create
          * ordering constraints that guarantee the action occurs while the node
          * is active (after start, before stop ... things like that).
          *
          * This is somewhat brittle in that we need to make sure the results of
          * this ordering are compatible with the result of get_router_node().
          * It would probably be better to add XML_LRM_ATTR_ROUTER_NODE as part
          * of this logic rather than action2xml().
          */
         if (remote->container) {
             crm_trace("Container ordering for %s", action->uuid);
             apply_container_ordering(action, data_set);
 
         } else {
             crm_trace("Remote ordering for %s", action->uuid);
             apply_remote_ordering(action, data_set);
         }
     }
 }
 
 static void
 order_probes(pe_working_set_t * data_set) 
 {
 #if 0
     GListPtr gIter = NULL;
 
     for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
         resource_t *rsc = (resource_t *) gIter->data;
 
         /* Given "A then B", we would prefer to wait for A to be
          * started before probing B.
          *
          * If A was a filesystem on which the binaries and data for B
          * lived, it would have been useful if the author of B's agent
          * could assume that A is running before B.monitor will be
          * called.
          *
          * However we can't _only_ probe once A is running, otherwise
          * we'd not detect the state of B if A could not be started
          * for some reason.
          *
          * In practice however, we cannot even do an opportunistic
          * version of this because B may be moving:
          *
          *   B.probe -> B.start
          *   B.probe -> B.stop
          *   B.stop -> B.start
          *   A.stop -> A.start
          *   A.start -> B.probe
          *
          * So far so good, but if we add the result of this code:
          *
          *   B.stop -> A.stop
          *
          * Then we get a loop:
          *
          *   B.probe -> B.stop -> A.stop -> A.start -> B.probe
          *
          * We could kill the 'B.probe -> B.stop' dependency, but that
          * could mean stopping B "too" soon, because B.start must wait
          * for the probes to complete.
          *
          * Another option is to allow it only if A is a non-unique
          * clone with clone-max == node-max (since we'll never be
          * moving it).  However, we could still be stopping one
          * instance at the same time as starting another.
 
          * The complexity of checking for allowed conditions combined
          * with the ever narrowing usecase suggests that this code
          * should remain disabled until someone gets smarter.
          */
         action_t *start = NULL;
         GListPtr actions = NULL;
         GListPtr probes = NULL;
         char *key = NULL;
 
         key = start_key(rsc);
         actions = find_actions(rsc->actions, key, NULL);
         free(key);
 
         if (actions) {
             start = actions->data;
             g_list_free(actions);
         }
 
         if(start == NULL) {
             crm_err("No start action for %s", rsc->id);
             continue;
         }
 
         key = generate_op_key(rsc->id, CRMD_ACTION_STATUS, 0);
         probes = find_actions(rsc->actions, key, NULL);
         free(key);
 
         for (actions = start->actions_before; actions != NULL; actions = actions->next) {
             action_wrapper_t *before = (action_wrapper_t *) actions->data;
 
             GListPtr pIter = NULL;
             action_t *first = before->action;
             resource_t *first_rsc = first->rsc;
 
             if(first->required_runnable_before) {
                 GListPtr clone_actions = NULL;
                 for (clone_actions = first->actions_before; clone_actions != NULL; clone_actions = clone_actions->next) {
                     before = (action_wrapper_t *) clone_actions->data;
 
                     crm_trace("Testing %s -> %s (%p) for %s", first->uuid, before->action->uuid, before->action->rsc, start->uuid);
 
                     CRM_ASSERT(before->action->rsc);
                     first_rsc = before->action->rsc;
                     break;
                 }
 
             } else if(safe_str_neq(first->task, RSC_START)) {
                 crm_trace("Not a start op %s for %s", first->uuid, start->uuid);
             }
 
             if(first_rsc == NULL) {
                 continue;
 
             } else if(uber_parent(first_rsc) == uber_parent(start->rsc)) {
                 crm_trace("Same parent %s for %s", first_rsc->id, start->uuid);
                 continue;
 
             } else if(FALSE && pe_rsc_is_clone(uber_parent(first_rsc)) == FALSE) {
                 crm_trace("Not a clone %s for %s", first_rsc->id, start->uuid);
                 continue;
             }
 
             crm_err("Applying %s before %s %d", first->uuid, start->uuid, uber_parent(first_rsc)->variant);
 
             for (pIter = probes; pIter != NULL; pIter = pIter->next) {
                 action_t *probe = (action_t *) pIter->data;
 
                 crm_err("Ordering %s before %s", first->uuid, probe->uuid);
                 order_actions(first, probe, pe_order_optional);
             }
         }
     }
 #endif
 }
 
 gboolean
 stage7(pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
 
     crm_trace("Applying ordering constraints");
 
     /* Don't ask me why, but apparently they need to be processed in
      * the order they were created in... go figure
      *
      * Also g_list_append() has horrendous performance characteristics
      * So we need to use g_list_prepend() and then reverse the list here
      */
     data_set->ordering_constraints = g_list_reverse(data_set->ordering_constraints);
 
     for (gIter = data_set->ordering_constraints; gIter != NULL; gIter = gIter->next) {
         pe__ordering_t *order = gIter->data;
         resource_t *rsc = order->lh_rsc;
 
         crm_trace("Applying ordering constraint: %d", order->id);
 
         if (rsc != NULL) {
             crm_trace("rsc_action-to-*");
             rsc_order_first(rsc, order, data_set);
             continue;
         }
 
         rsc = order->rh_rsc;
         if (rsc != NULL) {
             crm_trace("action-to-rsc_action");
             rsc_order_then(order->lh_action, rsc, order);
 
         } else {
             crm_trace("action-to-action");
             order_actions(order->lh_action, order->rh_action, order->type);
         }
     }
 
     for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
         action_t *action = (action_t *) gIter->data;
 
         update_colo_start_chain(action, data_set);
     }
 
     crm_trace("Ordering probes");
     order_probes(data_set);
 
     crm_trace("Updating %d actions", g_list_length(data_set->actions));
     for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
         action_t *action = (action_t *) gIter->data;
 
         update_action(action, data_set);
     }
 
     LogNodeActions(data_set, FALSE);
     for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
         resource_t *rsc = (resource_t *) gIter->data;
 
         LogActions(rsc, data_set, FALSE);
     }
     return TRUE;
 }
 
 int transition_id = -1;
 
 /*
  * Create a dependency graph to send to the transitioner (via the controller)
  */
 gboolean
 stage8(pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
     const char *value = NULL;
 
     transition_id++;
     crm_trace("Creating transition graph %d.", transition_id);
 
     data_set->graph = create_xml_node(NULL, XML_TAG_GRAPH);
 
     value = pe_pref(data_set->config_hash, "cluster-delay");
     crm_xml_add(data_set->graph, "cluster-delay", value);
 
     value = pe_pref(data_set->config_hash, "stonith-timeout");
     crm_xml_add(data_set->graph, "stonith-timeout", value);
 
     crm_xml_add(data_set->graph, "failed-stop-offset", "INFINITY");
 
     if (is_set(data_set->flags, pe_flag_start_failure_fatal)) {
         crm_xml_add(data_set->graph, "failed-start-offset", "INFINITY");
     } else {
         crm_xml_add(data_set->graph, "failed-start-offset", "1");
     }
 
     value = pe_pref(data_set->config_hash, "batch-limit");
     crm_xml_add(data_set->graph, "batch-limit", value);
 
     crm_xml_add_int(data_set->graph, "transition_id", transition_id);
 
     value = pe_pref(data_set->config_hash, "migration-limit");
     if (crm_int_helper(value, NULL) > 0) {
         crm_xml_add(data_set->graph, "migration-limit", value);
     }
 
 /* errors...
    slist_iter(action, action_t, action_list, lpc,
    if(action->optional == FALSE && action->runnable == FALSE) {
    print_action("Ignoring", action, TRUE);
    }
    );
 */
 
     gIter = data_set->resources;
     for (; gIter != NULL; gIter = gIter->next) {
         resource_t *rsc = (resource_t *) gIter->data;
 
         pe_rsc_trace(rsc, "processing actions for rsc=%s", rsc->id);
         rsc->cmds->expand(rsc, data_set);
     }
 
     crm_log_xml_trace(data_set->graph, "created resource-driven action list");
 
     /* pseudo action to distribute list of nodes with maintenance state update */
     add_maintenance_update(data_set);
 
     /* catch any non-resource specific actions */
     crm_trace("processing non-resource actions");
 
     gIter = data_set->actions;
     for (; gIter != NULL; gIter = gIter->next) {
         action_t *action = (action_t *) gIter->data;
 
         if (action->rsc
             && action->node
             && action->node->details->shutdown
             && is_not_set(action->rsc->flags, pe_rsc_maintenance)
             && is_not_set(action->flags, pe_action_optional)
             && is_not_set(action->flags, pe_action_runnable)
             && crm_str_eq(action->task, RSC_STOP, TRUE)
             ) {
             /* Eventually we should just ignore the 'fence' case
              * But for now it's the best way to detect (in CTS) when
              * CIB resource updates are being lost
              */
             if (is_set(data_set->flags, pe_flag_have_quorum)
                 || data_set->no_quorum_policy == no_quorum_ignore) {
                 crm_crit("Cannot %s node '%s' because of %s:%s%s (%s)",
                          action->node->details->unclean ? "fence" : "shut down",
                          action->node->details->uname, action->rsc->id,
                          is_not_set(action->rsc->flags, pe_rsc_managed) ? " unmanaged" : " blocked",
                          is_set(action->rsc->flags, pe_rsc_failed) ? " failed" : "",
                          action->uuid);
             }
         }
 
         graph_element_from_action(action, data_set);
     }
 
     crm_log_xml_trace(data_set->graph, "created generic action list");
     crm_trace("Created transition graph %d.", transition_id);
 
     return TRUE;
 }
 
 void
 LogNodeActions(pe_working_set_t * data_set, gboolean terminal)
 {
     GListPtr gIter = NULL;
 
     for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
         char *node_name = NULL;
         char *task = NULL;
         action_t *action = (action_t *) gIter->data;
 
         if (action->rsc != NULL) {
             continue;
         } else if (is_set(action->flags, pe_action_optional)) {
             continue;
         }
 
         if (is_container_remote_node(action->node)) {
             node_name = crm_strdup_printf("%s (resource: %s)", action->node->details->uname, action->node->details->remote_rsc->container->id);
         } else if(action->node) {
             node_name = crm_strdup_printf("%s", action->node->details->uname);
         }
 
 
         if (safe_str_eq(action->task, CRM_OP_SHUTDOWN)) {
             task = strdup("Shutdown");
         } else if (safe_str_eq(action->task, CRM_OP_FENCE)) {
             const char *op = g_hash_table_lookup(action->meta, "stonith_action");
             task = crm_strdup_printf("Fence (%s)", op);
         }
 
         if(task == NULL) {
             /* Nothing to report */
         } else if(terminal && action->reason) {
             printf(" * %s %s '%s'\n", task, node_name, action->reason);
         } else if(terminal) {
             printf(" * %s %s\n", task, node_name);
         } else if(action->reason) {
             crm_notice(" * %s %s '%s'\n", task, node_name, action->reason);
         } else {
             crm_notice(" * %s %s\n", task, node_name);
         }
 
         free(node_name);
         free(task);
     }
 }
diff --git a/daemons/schedulerd/sched_notif.c b/daemons/schedulerd/sched_notif.c
index c3e2565911..3e21bfab3a 100644
--- a/daemons/schedulerd/sched_notif.c
+++ b/daemons/schedulerd/sched_notif.c
@@ -1,786 +1,822 @@
 /*
  * Copyright 2004-2018 Andrew Beekhof <andrew@beekhof.net>
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 #include <crm/msg_xml.h>
 #include <sched_allocate.h>
 #include <sched_notif.h>
 #include <sched_utils.h>
 
 typedef struct notify_entry_s {
     resource_t *rsc;
     node_t *node;
 } notify_entry_t;
 
 static gint
 sort_notify_entries(gconstpointer a, gconstpointer b)
 {
     int tmp;
     const notify_entry_t *entry_a = a;
     const notify_entry_t *entry_b = b;
 
     if (entry_a == NULL && entry_b == NULL) {
         return 0;
     }
     if (entry_a == NULL) {
         return 1;
     }
     if (entry_b == NULL) {
         return -1;
     }
 
     if (entry_a->rsc == NULL && entry_b->rsc == NULL) {
         return 0;
     }
     if (entry_a->rsc == NULL) {
         return 1;
     }
     if (entry_b->rsc == NULL) {
         return -1;
     }
 
     tmp = strcmp(entry_a->rsc->id, entry_b->rsc->id);
     if (tmp != 0) {
         return tmp;
     }
 
     if (entry_a->node == NULL && entry_b->node == NULL) {
         return 0;
     }
     if (entry_a->node == NULL) {
         return 1;
     }
     if (entry_b->node == NULL) {
         return -1;
     }
 
     return strcmp(entry_a->node->details->id, entry_b->node->details->id);
 }
 
 static notify_entry_t *dup_notify_entry(notify_entry_t *entry)
 {
     notify_entry_t *dup = malloc(sizeof(notify_entry_t));
 
     CRM_ASSERT(dup != NULL);
     dup->rsc = entry->rsc;
     dup->node = entry->node;
     return dup;
 }
 
 static void
 expand_node_list(GListPtr list, char **uname, char **metal)
 {
     GListPtr gIter = NULL;
     char *node_list = NULL;
     char *metal_list = NULL;
 
     CRM_ASSERT(uname != NULL);
     if (list == NULL) {
         *uname = strdup(" ");
         if(metal) {
             *metal = strdup(" ");
         }
         return;
     }
 
     for (gIter = list; gIter != NULL; gIter = gIter->next) {
         int len = 0;
         int existing_len = 0;
         node_t *node = (node_t *) gIter->data;
 
         if (node->details->uname == NULL) {
             continue;
         }
         len = 2 + strlen(node->details->uname);
 
         if(node_list) {
             existing_len = strlen(node_list);
         }
 //            crm_trace("Adding %s (%dc) at offset %d", node->details->uname, len - 2, existing_len);
         node_list = realloc_safe(node_list, len + existing_len);
         sprintf(node_list + existing_len, "%s%s", existing_len == 0 ? "":" ", node->details->uname);
 
         if(metal) {
             existing_len = 0;
             if(metal_list) {
                 existing_len = strlen(metal_list);
             }
 
             if(node->details->remote_rsc
                && node->details->remote_rsc->container
                && node->details->remote_rsc->container->running_on) {
                 node = pe__current_node(node->details->remote_rsc->container);
             }
 
             if (node->details->uname == NULL) {
                 continue;
             }
             len = 2 + strlen(node->details->uname);
             metal_list = realloc_safe(metal_list, len + existing_len);
             sprintf(metal_list + existing_len, "%s%s", existing_len == 0 ? "":" ", node->details->uname);
         }
     }
 
     *uname = node_list;
     if(metal) {
         *metal = metal_list;
     }
 }
 
 static void
 expand_list(GListPtr list, char **rsc_list, char **node_list)
 {
     GListPtr gIter = NULL;
     const char *uname = NULL;
     const char *rsc_id = NULL;
     const char *last_rsc_id = NULL;
 
     if (rsc_list) {
         *rsc_list = NULL;
     }
 
     if (list == NULL) {
         if (rsc_list) {
             *rsc_list = strdup(" ");
         }
         if (node_list) {
             *node_list = strdup(" ");
         }
         return;
     }
 
     if (node_list) {
         *node_list = NULL;
     }
 
     for (gIter = list; gIter != NULL; gIter = gIter->next) {
         notify_entry_t *entry = (notify_entry_t *) gIter->data;
 
         CRM_LOG_ASSERT(entry != NULL);
         CRM_LOG_ASSERT(entry && entry->rsc != NULL);
 
         if(entry == NULL || entry->rsc == NULL) {
             continue;
         }
 
         /* Uh, why? */
         CRM_LOG_ASSERT(node_list == NULL || entry->node != NULL);
         if(node_list != NULL && entry->node == NULL) {
             continue;
         }
 
         uname = NULL;
         rsc_id = entry->rsc->id;
         CRM_ASSERT(rsc_id != NULL);
 
         /* filter dups */
         if (safe_str_eq(rsc_id, last_rsc_id)) {
             continue;
         }
         last_rsc_id = rsc_id;
 
         if (rsc_list != NULL) {
             int existing_len = 0;
             int len = 2 + strlen(rsc_id);       /* +1 space, +1 EOS */
 
             if (*rsc_list) {
                 existing_len = strlen(*rsc_list);
             }
 
             crm_trace("Adding %s (%dc) at offset %d", rsc_id, len - 2, existing_len);
             *rsc_list = realloc_safe(*rsc_list, len + existing_len);
             sprintf(*rsc_list + existing_len, "%s%s", existing_len == 0 ? "":" ", rsc_id);
         }
 
         if (entry->node != NULL) {
             uname = entry->node->details->uname;
         }
 
         if (node_list != NULL && uname) {
             int existing_len = 0;
             int len = 2 + strlen(uname);
 
             if (*node_list) {
                 existing_len = strlen(*node_list);
             }
 
             crm_trace("Adding %s (%dc) at offset %d", uname, len - 2, existing_len);
             *node_list = realloc_safe(*node_list, len + existing_len);
             sprintf(*node_list + existing_len, "%s%s", existing_len == 0 ? "":" ", uname);
         }
     }
 
 }
 
 static void
 dup_attr(gpointer key, gpointer value, gpointer user_data)
 {
     add_hash_param(user_data, key, value);
 }
 
 static action_t *
 pe_notify(resource_t * rsc, node_t * node, action_t * op, action_t * confirm,
           notify_data_t * n_data, pe_working_set_t * data_set)
 {
     char *key = NULL;
     action_t *trigger = NULL;
     const char *value = NULL;
     const char *task = NULL;
 
     if (op == NULL || confirm == NULL) {
         pe_rsc_trace(rsc, "Op=%p confirm=%p", op, confirm);
         return NULL;
     }
 
     CRM_CHECK(rsc != NULL, return NULL);
     CRM_CHECK(node != NULL, return NULL);
 
     if (node->details->online == FALSE) {
         pe_rsc_trace(rsc, "Skipping notification for %s: node offline", rsc->id);
         return NULL;
     } else if (is_set(op->flags, pe_action_runnable) == FALSE) {
         pe_rsc_trace(rsc, "Skipping notification for %s: not runnable", op->uuid);
         return NULL;
     }
 
     value = g_hash_table_lookup(op->meta, "notify_type");
     task = g_hash_table_lookup(op->meta, "notify_operation");
 
     pe_rsc_trace(rsc, "Creating notify actions for %s: %s (%s-%s)", op->uuid, rsc->id, value, task);
 
     key = generate_notify_key(rsc->id, value, task);
     trigger = custom_action(rsc, key, op->task, node,
                             is_set(op->flags, pe_action_optional), TRUE, data_set);
     g_hash_table_foreach(op->meta, dup_attr, trigger->meta);
     g_hash_table_foreach(n_data->keys, dup_attr, trigger->meta);
 
     /* pseudo_notify before notify */
     pe_rsc_trace(rsc, "Ordering %s before %s (%d->%d)", op->uuid, trigger->uuid, trigger->id,
                  op->id);
 
     order_actions(op, trigger, pe_order_optional);
     order_actions(trigger, confirm, pe_order_optional);
     return trigger;
 }
 
 static void
 pe_post_notify(resource_t * rsc, node_t * node, notify_data_t * n_data, pe_working_set_t * data_set)
 {
     action_t *notify = NULL;
 
     CRM_CHECK(rsc != NULL, return);
 
     if (n_data->post == NULL) {
         return;                 /* Nothing to do */
     }
 
     notify = pe_notify(rsc, node, n_data->post, n_data->post_done, n_data, data_set);
 
     if (notify != NULL) {
         notify->priority = INFINITY;
     }
 
     if (n_data->post_done) {
         GListPtr gIter = rsc->actions;
 
         for (; gIter != NULL; gIter = gIter->next) {
             action_t *mon = (action_t *) gIter->data;
             const char *interval_ms_s = g_hash_table_lookup(mon->meta,
                                                             XML_LRM_ATTR_INTERVAL_MS);
 
             if ((interval_ms_s == NULL) || safe_str_eq(interval_ms_s, "0")) {
                 pe_rsc_trace(rsc, "Skipping %s: interval", mon->uuid);
                 continue;
             } else if (safe_str_eq(mon->task, RSC_CANCEL)) {
                 pe_rsc_trace(rsc, "Skipping %s: cancel", mon->uuid);
                 continue;
             }
 
             order_actions(n_data->post_done, mon, pe_order_optional);
         }
     }
 }
 
 notify_data_t *
 create_notification_boundaries(resource_t * rsc, const char *action, action_t * start,
                                action_t * end, pe_working_set_t * data_set)
 {
     /* Create the pseudo ops that precede and follow the actual notifications */
 
     /*
      * Creates two sequences (conditional on start and end being supplied):
      *   pre_notify -> pre_notify_complete -> start, and
      *   end -> post_notify -> post_notify_complete
      *
      * 'start' and 'end' may be the same event or ${X} and ${X}ed as per clones
      */
     char *key = NULL;
     notify_data_t *n_data = NULL;
 
     if (is_not_set(rsc->flags, pe_rsc_notify)) {
         return NULL;
     }
 
     n_data = calloc(1, sizeof(notify_data_t));
     n_data->action = action;
     n_data->keys = crm_str_table_new();
 
     if (start) {
         /* create pre-event notification wrappers */
         key = generate_notify_key(rsc->id, "pre", start->task);
         n_data->pre =
             custom_action(rsc, key, RSC_NOTIFY, NULL, is_set(start->flags, pe_action_optional),
                           TRUE, data_set);
 
         update_action_flags(n_data->pre, pe_action_pseudo, __FUNCTION__, __LINE__);
         update_action_flags(n_data->pre, pe_action_runnable, __FUNCTION__, __LINE__);
 
         add_hash_param(n_data->pre->meta, "notify_type", "pre");
         add_hash_param(n_data->pre->meta, "notify_operation", n_data->action);
 
         add_hash_param(n_data->pre->meta, "notify_key_type", "pre");
         add_hash_param(n_data->pre->meta, "notify_key_operation", start->task);
 
         /* create pre_notify_complete */
         key = generate_notify_key(rsc->id, "confirmed-pre", start->task);
         n_data->pre_done =
             custom_action(rsc, key, RSC_NOTIFIED, NULL, is_set(start->flags, pe_action_optional),
                           TRUE, data_set);
 
         update_action_flags(n_data->pre_done, pe_action_pseudo, __FUNCTION__, __LINE__);
         update_action_flags(n_data->pre_done, pe_action_runnable, __FUNCTION__, __LINE__);
 
         add_hash_param(n_data->pre_done->meta, "notify_type", "pre");
         add_hash_param(n_data->pre_done->meta, "notify_operation", n_data->action);
 
         add_hash_param(n_data->pre_done->meta, "notify_key_type", "confirmed-pre");
         add_hash_param(n_data->pre_done->meta, "notify_key_operation", start->task);
 
         order_actions(n_data->pre_done, start, pe_order_optional);
         order_actions(n_data->pre, n_data->pre_done, pe_order_optional);
     }
 
     if (end) {
         /* create post-event notification wrappers */
         key = generate_notify_key(rsc->id, "post", end->task);
         n_data->post =
             custom_action(rsc, key, RSC_NOTIFY, NULL, is_set(end->flags, pe_action_optional), TRUE,
                           data_set);
 
         n_data->post->priority = INFINITY;
         update_action_flags(n_data->post, pe_action_pseudo, __FUNCTION__, __LINE__);
         if (is_set(end->flags, pe_action_runnable)) {
             update_action_flags(n_data->post, pe_action_runnable, __FUNCTION__, __LINE__);
         } else {
             update_action_flags(n_data->post, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
         }
 
         add_hash_param(n_data->post->meta, "notify_type", "post");
         add_hash_param(n_data->post->meta, "notify_operation", n_data->action);
 
         add_hash_param(n_data->post->meta, "notify_key_type", "post");
         add_hash_param(n_data->post->meta, "notify_key_operation", end->task);
 
         /* create post_notify_complete */
         key = generate_notify_key(rsc->id, "confirmed-post", end->task);
         n_data->post_done =
             custom_action(rsc, key, RSC_NOTIFIED, NULL, is_set(end->flags, pe_action_optional),
                           TRUE, data_set);
 
         n_data->post_done->priority = INFINITY;
         update_action_flags(n_data->post_done, pe_action_pseudo, __FUNCTION__, __LINE__);
         if (is_set(end->flags, pe_action_runnable)) {
             update_action_flags(n_data->post_done, pe_action_runnable, __FUNCTION__, __LINE__);
         } else {
             update_action_flags(n_data->post_done, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
         }
 
         add_hash_param(n_data->post_done->meta, "notify_type", "post");
         add_hash_param(n_data->post_done->meta, "notify_operation", n_data->action);
 
         add_hash_param(n_data->post_done->meta, "notify_key_type", "confirmed-post");
         add_hash_param(n_data->post_done->meta, "notify_key_operation", end->task);
 
         order_actions(end, n_data->post, pe_order_implies_then);
         order_actions(n_data->post, n_data->post_done, pe_order_implies_then);
     }
 
     if (start && end) {
         order_actions(n_data->pre_done, n_data->post, pe_order_optional);
     }
 
     if (safe_str_eq(action, RSC_STOP)) {
         action_t *all_stopped = get_pseudo_op(ALL_STOPPED, data_set);
 
         order_actions(n_data->post_done, all_stopped, pe_order_optional);
     }
 
     return n_data;
 }
 
 void
 collect_notification_data(resource_t * rsc, gboolean state, gboolean activity,
                           notify_data_t * n_data)
 {
 
     if(n_data->allowed_nodes == NULL) {
         n_data->allowed_nodes = rsc->allowed_nodes;
     }
 
     if (rsc->children) {
         GListPtr gIter = rsc->children;
 
         for (; gIter != NULL; gIter = gIter->next) {
             resource_t *child = (resource_t *) gIter->data;
 
             collect_notification_data(child, state, activity, n_data);
         }
         return;
     }
 
     if (state) {
         notify_entry_t *entry = NULL;
 
         entry = calloc(1, sizeof(notify_entry_t));
         entry->rsc = rsc;
         if (rsc->running_on) {
             /* we only take the first one */
             entry->node = rsc->running_on->data;
         }
 
         pe_rsc_trace(rsc, "%s state: %s", rsc->id, role2text(rsc->role));
 
         switch (rsc->role) {
             case RSC_ROLE_STOPPED:
                 n_data->inactive = g_list_prepend(n_data->inactive, entry);
                 break;
             case RSC_ROLE_STARTED:
                 n_data->active = g_list_prepend(n_data->active, entry);
                 break;
             case RSC_ROLE_SLAVE:
                 n_data->slave = g_list_prepend(n_data->slave, entry);
                 n_data->active = g_list_prepend(n_data->active,
                                                 dup_notify_entry(entry));
                 break;
             case RSC_ROLE_MASTER:
                 n_data->master = g_list_prepend(n_data->master, entry);
                 n_data->active = g_list_prepend(n_data->active,
                                                 dup_notify_entry(entry));
                 break;
             default:
                 crm_err("Unsupported notify role");
                 free(entry);
                 break;
         }
     }
 
     if (activity) {
         notify_entry_t *entry = NULL;
         enum action_tasks task;
 
         GListPtr gIter = rsc->actions;
 
         for (; gIter != NULL; gIter = gIter->next) {
             action_t *op = (action_t *) gIter->data;
 
             if (is_set(op->flags, pe_action_optional) == FALSE && op->node != NULL) {
                 task = text2task(op->task);
 
                 if(task == stop_rsc && op->node->details->unclean) {
                     // Create anyway (additional noise if node can't be fenced)
                 } else if(is_not_set(op->flags, pe_action_runnable)) {
                     continue;
                 }
 
                 entry = calloc(1, sizeof(notify_entry_t));
                 entry->node = op->node;
                 entry->rsc = rsc;
 
                 switch (task) {
                     case start_rsc:
                         n_data->start = g_list_prepend(n_data->start, entry);
                         break;
                     case stop_rsc:
                         n_data->stop = g_list_prepend(n_data->stop, entry);
                         break;
                     case action_promote:
                         n_data->promote = g_list_prepend(n_data->promote, entry);
                         break;
                     case action_demote:
                         n_data->demote = g_list_prepend(n_data->demote, entry);
                         break;
                     default:
                         free(entry);
                         break;
                 }
             }
         }
     }
 }
 
 gboolean
 expand_notification_data(resource_t *rsc, notify_data_t * n_data, pe_working_set_t * data_set)
 {
     /* Expand the notification entries into a key=value hashtable
      * This hashtable is later used in action2xml()
      */
     gboolean required = FALSE;
     char *rsc_list = NULL;
     char *node_list = NULL;
     char *metal_list = NULL;
     const char *source = NULL;
     GListPtr nodes = NULL;
 
     if (n_data->stop) {
         n_data->stop = g_list_sort(n_data->stop, sort_notify_entries);
     }
     expand_list(n_data->stop, &rsc_list, &node_list);
     if (rsc_list != NULL && safe_str_neq(" ", rsc_list)) {
         if (safe_str_eq(n_data->action, RSC_STOP)) {
             required = TRUE;
         }
     }
     g_hash_table_insert(n_data->keys, strdup("notify_stop_resource"), rsc_list);
     g_hash_table_insert(n_data->keys, strdup("notify_stop_uname"), node_list);
 
     if (n_data->start) {
         n_data->start = g_list_sort(n_data->start, sort_notify_entries);
         if (rsc_list && safe_str_eq(n_data->action, RSC_START)) {
             required = TRUE;
         }
     }
     expand_list(n_data->start, &rsc_list, &node_list);
     g_hash_table_insert(n_data->keys, strdup("notify_start_resource"), rsc_list);
     g_hash_table_insert(n_data->keys, strdup("notify_start_uname"), node_list);
 
     if (n_data->demote) {
         n_data->demote = g_list_sort(n_data->demote, sort_notify_entries);
         if (safe_str_eq(n_data->action, RSC_DEMOTE)) {
             required = TRUE;
         }
     }
 
     expand_list(n_data->demote, &rsc_list, &node_list);
     g_hash_table_insert(n_data->keys, strdup("notify_demote_resource"), rsc_list);
     g_hash_table_insert(n_data->keys, strdup("notify_demote_uname"), node_list);
 
     if (n_data->promote) {
         n_data->promote = g_list_sort(n_data->promote, sort_notify_entries);
         if (safe_str_eq(n_data->action, RSC_PROMOTE)) {
             required = TRUE;
         }
     }
     expand_list(n_data->promote, &rsc_list, &node_list);
     g_hash_table_insert(n_data->keys, strdup("notify_promote_resource"), rsc_list);
     g_hash_table_insert(n_data->keys, strdup("notify_promote_uname"), node_list);
 
     if (n_data->active) {
         n_data->active = g_list_sort(n_data->active, sort_notify_entries);
     }
     expand_list(n_data->active, &rsc_list, &node_list);
     g_hash_table_insert(n_data->keys, strdup("notify_active_resource"), rsc_list);
     g_hash_table_insert(n_data->keys, strdup("notify_active_uname"), node_list);
 
     if (n_data->slave) {
         n_data->slave = g_list_sort(n_data->slave, sort_notify_entries);
     }
     expand_list(n_data->slave, &rsc_list, &node_list);
     g_hash_table_insert(n_data->keys, strdup("notify_slave_resource"), rsc_list);
     g_hash_table_insert(n_data->keys, strdup("notify_slave_uname"), node_list);
 
     if (n_data->master) {
         n_data->master = g_list_sort(n_data->master, sort_notify_entries);
     }
     expand_list(n_data->master, &rsc_list, &node_list);
     g_hash_table_insert(n_data->keys, strdup("notify_master_resource"), rsc_list);
     g_hash_table_insert(n_data->keys, strdup("notify_master_uname"), node_list);
 
     if (n_data->inactive) {
         n_data->inactive = g_list_sort(n_data->inactive, sort_notify_entries);
     }
     expand_list(n_data->inactive, &rsc_list, NULL);
     g_hash_table_insert(n_data->keys, strdup("notify_inactive_resource"), rsc_list);
 
     nodes = g_hash_table_get_values(n_data->allowed_nodes);
     expand_node_list(nodes, &node_list, NULL);
     g_hash_table_insert(n_data->keys, strdup("notify_available_uname"), node_list);
     g_list_free(nodes);
 
     source = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET);
     if (safe_str_eq("host", source)) {
         expand_node_list(data_set->nodes, &node_list, &metal_list);
         g_hash_table_insert(n_data->keys, strdup("notify_all_hosts"),
                             metal_list);
     } else {
         expand_node_list(data_set->nodes, &node_list, NULL);
     }
     g_hash_table_insert(n_data->keys, strdup("notify_all_uname"), node_list);
 
     if (required && n_data->pre) {
         update_action_flags(n_data->pre, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__);
         update_action_flags(n_data->pre_done, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__);
     }
 
     if (required && n_data->post) {
         update_action_flags(n_data->post, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__);
         update_action_flags(n_data->post_done, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__);
     }
     return required;
 }
 
+/*
+ * \internal
+ * \brief Find any remote connection start relevant to an action
+ *
+ * \param[in] action  Action to chek
+ *
+ * \return If action is behind a remote connection, connection's start
+ */
+static pe_action_t *
+find_remote_start(pe_action_t *action)
+{
+    if (action && action->node) {
+        pe_resource_t *remote_rsc = action->node->details->remote_rsc;
+
+        if (remote_rsc) {
+            return find_first_action(remote_rsc->actions, NULL, RSC_START,
+                                     NULL);
+        }
+    }
+    return NULL;
+}
+
 void
 create_notifications(resource_t * rsc, notify_data_t * n_data, pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
     action_t *stop = NULL;
     action_t *start = NULL;
     enum action_tasks task = text2task(n_data->action);
 
     if (rsc->children) {
         gIter = rsc->children;
         for (; gIter != NULL; gIter = gIter->next) {
             resource_t *child = (resource_t *) gIter->data;
 
             create_notifications(child, n_data, data_set);
         }
         return;
     }
 
     /* Copy notification details into standard ops */
 
     for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) {
         action_t *op = (action_t *) gIter->data;
 
         if (is_set(op->flags, pe_action_optional) == FALSE && op->node != NULL) {
             enum action_tasks t = text2task(op->task);
 
             switch (t) {
                 case start_rsc:
                 case stop_rsc:
                 case action_promote:
                 case action_demote:
                     g_hash_table_foreach(n_data->keys, dup_attr, op->meta);
                     break;
                 default:
                     break;
             }
         }
     }
 
     switch (task) {
         case start_rsc:
             if(g_list_length(n_data->start) == 0) {
                 pe_rsc_trace(rsc, "Skipping empty notification for: %s.%s (%s->%s)",
                              n_data->action, rsc->id, role2text(rsc->role), role2text(rsc->next_role));
                 return;
             }
             break;
         case action_promote:
             if(g_list_length(n_data->promote) == 0) {
                 pe_rsc_trace(rsc, "Skipping empty notification for: %s.%s (%s->%s)",
                              n_data->action, rsc->id, role2text(rsc->role), role2text(rsc->next_role));
                 return;
             }
             break;
         case action_demote:
             if(g_list_length(n_data->demote) == 0) {
                 pe_rsc_trace(rsc, "Skipping empty notification for: %s.%s (%s->%s)",
                              n_data->action, rsc->id, role2text(rsc->role), role2text(rsc->next_role));
                 return;
             }
             break;
         default:
             /* We cannot do the same for stop_rsc/n_data->stop at it
              * might be implied by fencing
              */
             break;
     }
 
     pe_rsc_trace(rsc, "Creating notifications for: %s.%s (%s->%s)",
                  n_data->action, rsc->id, role2text(rsc->role), role2text(rsc->next_role));
 
     stop = find_first_action(rsc->actions, NULL, RSC_STOP, NULL);
     start = find_first_action(rsc->actions, NULL, RSC_START, NULL);
 
     /* stop / demote */
     if (rsc->role != RSC_ROLE_STOPPED) {
         if (task == stop_rsc || task == action_demote) {
             gIter = rsc->running_on;
             for (; gIter != NULL; gIter = gIter->next) {
                 node_t *current_node = (node_t *) gIter->data;
 
                 /* if this stop action is a pseudo action as a result of the current
                  * node being fenced, this stop action is implied by the fencing 
                  * action. There's no reason to send the fenced node a stop notification */ 
                 if (stop &&
                     is_set(stop->flags, pe_action_pseudo) &&
                     (current_node->details->unclean || current_node->details->remote_requires_reset) ) {
 
                     continue;
                 }
 
                 pe_notify(rsc, current_node, n_data->pre, n_data->pre_done, n_data, data_set);
                 if (task == action_demote || stop == NULL
                     || is_set(stop->flags, pe_action_optional)) {
                     pe_post_notify(rsc, current_node, n_data, data_set);
                 }
             }
         }
     }
 
     /* start / promote */
     if (rsc->next_role != RSC_ROLE_STOPPED) {
         if (rsc->allocated_to == NULL) {
             pe_proc_err("Next role '%s' but %s is not allocated", role2text(rsc->next_role),
                         rsc->id);
 
         } else if (task == start_rsc || task == action_promote) {
+
+            if (start) {
+                pe_action_t *remote_start = find_remote_start(start);
+
+                if (remote_start
+                    && is_not_set(remote_start->flags, pe_action_runnable)) {
+                    /* Start and promote actions for a clone instance behind
+                     * a Pacemaker Remote connection happen after the
+                     * connection starts. If the connection start is blocked, do
+                     * not schedule notifications for these actions.
+                     */
+                    return;
+                }
+            }
             if (task != start_rsc || start == NULL || is_set(start->flags, pe_action_optional)) {
                 pe_notify(rsc, rsc->allocated_to, n_data->pre, n_data->pre_done, n_data, data_set);
             }
             pe_post_notify(rsc, rsc->allocated_to, n_data, data_set);
         }
     }
 }
 
 void
 free_notification_data(notify_data_t * n_data)
 {
     if (n_data == NULL) {
         return;
     }
 
     g_list_free_full(n_data->stop, free);
     g_list_free_full(n_data->start, free);
     g_list_free_full(n_data->demote, free);
     g_list_free_full(n_data->promote, free);
     g_list_free_full(n_data->master, free);
     g_list_free_full(n_data->slave, free);
     g_list_free_full(n_data->active, free);
     g_list_free_full(n_data->inactive, free);
     g_hash_table_destroy(n_data->keys);
     free(n_data);
 }
 
 void
 create_secondary_notification(pe_action_t *action, resource_t *rsc,
                               pe_action_t *stonith_op,
                               pe_working_set_t *data_set)
 {
     notify_data_t *n_data;
 
     crm_info("Creating secondary notification for %s", action->uuid);
     n_data = create_notification_boundaries(rsc, RSC_STOP, NULL, stonith_op,
                                             data_set);
     collect_notification_data(rsc, TRUE, FALSE, n_data);
     g_hash_table_insert(n_data->keys, strdup("notify_stop_resource"),
                         strdup(rsc->id));
     g_hash_table_insert(n_data->keys, strdup("notify_stop_uname"),
                         strdup(action->node->details->uname));
     create_notifications(uber_parent(rsc), n_data, data_set);
     free_notification_data(n_data);
 }
diff --git a/include/crm_internal.h b/include/crm_internal.h
index 9823acf0c1..b7464e7fcb 100644
--- a/include/crm_internal.h
+++ b/include/crm_internal.h
@@ -1,304 +1,303 @@
 /*
  * Copyright 2006-2018 Andrew Beekhof <andrew@beekhof.net>
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #ifndef CRM_INTERNAL__H
 #  define CRM_INTERNAL__H
 
 #  include <config.h>
 #  include <portability.h>
 
 #  include <glib.h>
 #  include <stdbool.h>
 #  include <libxml/tree.h>
 
 #  include <crm/lrmd.h>
 #  include <crm/common/logging.h>
 #  include <crm/common/ipcs.h>
 #  include <crm/common/internal.h>
 
 /* Dynamic loading of libraries */
 void *find_library_function(void **handle, const char *lib, const char *fn, int fatal);
-void *convert_const_pointer(const void *ptr);
 
 /* For ACLs */
 char *uid2username(uid_t uid);
 const char *crm_acl_get_set_user(xmlNode * request, const char *field, const char *peer_user);
 
 #  if ENABLE_ACL
 #    include <string.h>
 static inline gboolean
 is_privileged(const char *user)
 {
     if (user == NULL) {
         return FALSE;
     } else if (strcmp(user, CRM_DAEMON_USER) == 0) {
         return TRUE;
     } else if (strcmp(user, "root") == 0) {
         return TRUE;
     }
     return FALSE;
 }
 #  endif
 
 /* CLI option processing*/
 #  ifdef HAVE_GETOPT_H
 #    include <getopt.h>
 #  else
 #    define no_argument 0
 #    define required_argument 1
 #  endif
 
 #  define pcmk_option_default	0x00000
 #  define pcmk_option_hidden	0x00001
 #  define pcmk_option_paragraph	0x00002
 #  define pcmk_option_example	0x00004
 
 struct crm_option {
     /* Fields from 'struct option' in getopt.h */
     /* name of long option */
     const char *name;
     /*
      * one of no_argument, required_argument, and optional_argument:
      * whether option takes an argument
      */
     int has_arg;
     /* if not NULL, set *flag to val when option found */
     int *flag;
     /* if flag not NULL, value to set *flag to; else return value */
     int val;
 
     /* Custom fields */
     const char *desc;
     long flags;
 };
 
 void crm_set_options(const char *short_options, const char *usage, struct crm_option *long_options,
                      const char *app_desc);
 int crm_get_option(int argc, char **argv, int *index);
 int crm_get_option_long(int argc, char **argv, int *index, const char **longname);
 crm_exit_t crm_help(char cmd, crm_exit_t exit_code);
 
 /* Cluster Option Processing */
 typedef struct pe_cluster_option_s {
     const char *name;
     const char *alt_name;
     const char *type;
     const char *values;
     const char *default_value;
 
      gboolean(*is_valid) (const char *);
 
     const char *description_short;
     const char *description_long;
 
 } pe_cluster_option;
 
 const char *cluster_option(GHashTable * options, gboolean(*validate) (const char *),
                            const char *name, const char *old_name, const char *def_value);
 
 const char *get_cluster_pref(GHashTable * options, pe_cluster_option * option_list, int len,
                              const char *name);
 
 void config_metadata(const char *name, const char *version, const char *desc_short,
                      const char *desc_long, pe_cluster_option * option_list, int len);
 
 void verify_all_options(GHashTable * options, pe_cluster_option * option_list, int len);
 gboolean check_time(const char *value);
 gboolean check_timer(const char *value);
 gboolean check_boolean(const char *value);
 gboolean check_number(const char *value);
 gboolean check_positive_number(const char *value);
 gboolean check_quorum(const char *value);
 gboolean check_script(const char *value);
 gboolean check_utilization(const char *value);
 long crm_get_sbd_timeout(void);
 long crm_auto_watchdog_timeout(void);
 gboolean check_sbd_timeout(const char *value);
 void crm_args_fini(void);
 
 /* char2score */
 extern int node_score_red;
 extern int node_score_green;
 extern int node_score_yellow;
 
 /* Assorted convenience functions */
 void crm_make_daemon(const char *name, gboolean daemonize, const char *pidfile);
 
 // printf-style format to create operation ID from resource, action, interval
 #define CRM_OP_FMT "%s_%s_%u"
 
 static inline long long
 crm_clear_bit(const char *function, int line, const char *target, long long word, long long bit)
 {
     long long rc = (word & ~bit);
 
     if (rc == word) {
         /* Unchanged */
     } else if (target) {
         crm_trace("Bit 0x%.8llx for %s cleared by %s:%d", bit, target, function, line);
     } else {
         crm_trace("Bit 0x%.8llx cleared by %s:%d", bit, function, line);
     }
 
     return rc;
 }
 
 static inline long long
 crm_set_bit(const char *function, int line, const char *target, long long word, long long bit)
 {
     long long rc = (word | bit);
 
     if (rc == word) {
         /* Unchanged */
     } else if (target) {
         crm_trace("Bit 0x%.8llx for %s set by %s:%d", bit, target, function, line);
     } else {
         crm_trace("Bit 0x%.8llx set by %s:%d", bit, function, line);
     }
 
     return rc;
 }
 
 #  define set_bit(word, bit) word = crm_set_bit(__FUNCTION__, __LINE__, NULL, word, bit)
 #  define clear_bit(word, bit) word = crm_clear_bit(__FUNCTION__, __LINE__, NULL, word, bit)
 
 char *generate_hash_key(const char *crm_msg_reference, const char *sys);
 
 const char *daemon_option(const char *option);
 void set_daemon_option(const char *option, const char *value);
 gboolean daemon_option_enabled(const char *daemon, const char *option);
 void strip_text_nodes(xmlNode * xml);
 void pcmk_panic(const char *origin);
 void sysrq_init(void);
 pid_t pcmk_locate_sbd(void);
 
 #  define crm_config_err(fmt...) { crm_config_error = TRUE; crm_err(fmt); }
 #  define crm_config_warn(fmt...) { crm_config_warning = TRUE; crm_warn(fmt); }
 
 #  define F_ATTRD_KEY		"attr_key"
 #  define F_ATTRD_ATTRIBUTE	"attr_name"
 #  define F_ATTRD_REGEX 	"attr_regex"
 #  define F_ATTRD_TASK		"task"
 #  define F_ATTRD_VALUE		"attr_value"
 #  define F_ATTRD_SET		"attr_set"
 #  define F_ATTRD_IS_REMOTE	"attr_is_remote"
 #  define F_ATTRD_IS_PRIVATE     "attr_is_private"
 #  define F_ATTRD_SECTION	"attr_section"
 #  define F_ATTRD_DAMPEN	"attr_dampening"
 #  define F_ATTRD_HOST		"attr_host"
 #  define F_ATTRD_HOST_ID	"attr_host_id"
 #  define F_ATTRD_USER		"attr_user"
 #  define F_ATTRD_WRITER	"attr_writer"
 #  define F_ATTRD_VERSION	"attr_version"
 #  define F_ATTRD_RESOURCE          "attr_resource"
 #  define F_ATTRD_OPERATION         "attr_clear_operation"
 #  define F_ATTRD_INTERVAL          "attr_clear_interval"
 
 /* attrd operations */
 #  define ATTRD_OP_PEER_REMOVE   "peer-remove"
 #  define ATTRD_OP_UPDATE        "update"
 #  define ATTRD_OP_UPDATE_BOTH   "update-both"
 #  define ATTRD_OP_UPDATE_DELAY  "update-delay"
 #  define ATTRD_OP_QUERY         "query"
 #  define ATTRD_OP_REFRESH       "refresh"
 #  define ATTRD_OP_FLUSH         "flush"
 #  define ATTRD_OP_SYNC          "sync"
 #  define ATTRD_OP_SYNC_RESPONSE "sync-response"
 #  define ATTRD_OP_CLEAR_FAILURE "clear-failure"
 
 #  define PCMK_ENV_PHYSICAL_HOST "physical_host"
 
 
 #  if SUPPORT_COROSYNC
 #    include <qb/qbipc_common.h>
 #    include <corosync/corotypes.h>
 typedef struct qb_ipc_request_header cs_ipc_header_request_t;
 typedef struct qb_ipc_response_header cs_ipc_header_response_t;
 #  else
 typedef struct {
     int size __attribute__ ((aligned(8)));
     int id __attribute__ ((aligned(8)));
 } __attribute__ ((aligned(8))) cs_ipc_header_request_t;
 
 typedef struct {
     int size __attribute__ ((aligned(8)));
     int id __attribute__ ((aligned(8)));
     int error __attribute__ ((aligned(8)));
 } __attribute__ ((aligned(8))) cs_ipc_header_response_t;
 
 #  endif
 
 void
 attrd_ipc_server_init(qb_ipcs_service_t **ipcs, struct qb_ipcs_service_handlers *cb);
 void
 stonith_ipc_server_init(qb_ipcs_service_t **ipcs, struct qb_ipcs_service_handlers *cb);
 
 qb_ipcs_service_t *
 crmd_ipc_server_init(struct qb_ipcs_service_handlers *cb);
 
 void cib_ipc_servers_init(qb_ipcs_service_t **ipcs_ro,
         qb_ipcs_service_t **ipcs_rw,
         qb_ipcs_service_t **ipcs_shm,
         struct qb_ipcs_service_handlers *ro_cb,
         struct qb_ipcs_service_handlers *rw_cb);
 
 void cib_ipc_servers_destroy(qb_ipcs_service_t *ipcs_ro,
         qb_ipcs_service_t *ipcs_rw,
         qb_ipcs_service_t *ipcs_shm);
 
 static inline void *realloc_safe(void *ptr, size_t size)
 {
     void *ret = realloc(ptr, size);
 
     if (ret == NULL) {
         free(ptr); /* make coverity happy */
         abort();
     }
 
     return ret;
 }
 
 const char *crm_xml_add_last_written(xmlNode *xml_node);
 void crm_xml_dump(xmlNode * data, int options, char **buffer, int *offset, int *max, int depth);
 void crm_buffer_add_char(char **buffer, int *offset, int *max, char c);
 
 gboolean crm_digest_verify(xmlNode *input, const char *expected);
 
 /* cross-platform compatibility functions */
 char *crm_compat_realpath(const char *path);
 
 /* IPC Proxy Backend Shared Functions */
 typedef struct remote_proxy_s {
     char *node_name;
     char *session_id;
 
     gboolean is_local;
 
     crm_ipc_t *ipc;
     mainloop_io_t *source;
     uint32_t last_request_id;
     lrmd_t *lrm;
 
 } remote_proxy_t;
 
 remote_proxy_t *remote_proxy_new(
     lrmd_t *lrmd, struct ipc_client_callbacks *proxy_callbacks,
     const char *node_name, const char *session_id, const char *channel);
 
 int  remote_proxy_check(lrmd_t *lrmd, GHashTable *hash);
 void remote_proxy_cb(lrmd_t *lrmd, const char *node_name, xmlNode *msg);
 void remote_proxy_ack_shutdown(lrmd_t *lrmd);
 void remote_proxy_nack_shutdown(lrmd_t *lrmd);
 
 int  remote_proxy_dispatch(const char *buffer, ssize_t length, gpointer userdata);
 void remote_proxy_disconnected(gpointer data);
 void remote_proxy_free(gpointer data);
 
 void remote_proxy_relay_event(remote_proxy_t *proxy, xmlNode *msg);
 void remote_proxy_relay_response(remote_proxy_t *proxy, xmlNode *msg, int msg_id);
 
 #endif                          /* CRM_INTERNAL__H */
diff --git a/lib/common/Makefile.am b/lib/common/Makefile.am
index 29c3350013..dd8742f218 100644
--- a/lib/common/Makefile.am
+++ b/lib/common/Makefile.am
@@ -1,42 +1,43 @@
 #
 # Copyright 2004-2018 Andrew Beekhof <andrew@beekhof.net>
 #
 # This source code is licensed under the GNU General Public License version 2
 # or later (GPLv2+) WITHOUT ANY WARRANTY.
 #
 include $(top_srcdir)/Makefile.common
 
 AM_CPPFLAGS		+= -I$(top_builddir)/lib/gnu -I$(top_srcdir)/lib/gnu -DPCMK_SCHEMAS_EMERGENCY_XSLT=0
 
 ## libraries
 lib_LTLIBRARIES	= libcrmcommon.la
 
-# Can't use -Wcast-qual here because glib insists on pretending things are const  
-# when they're not and thus we need the crm_element_value_const() hack
+# Disable -Wcast-qual if used, because we do some hacky casting,
+# and because libxml2 has some signatures that should be const but aren't
+# for backward compatibility reasons.
 
 # s390 needs -fPIC 
 # s390-suse-linux/bin/ld: .libs/ipc.o: relocation R_390_PC32DBL against `__stack_chk_fail@@GLIBC_2.4' can not be used when making a shared object; recompile with -fPIC
 
 CFLAGS		= $(CFLAGS_COPY:-Wcast-qual=) -fPIC
 
 noinst_HEADERS		= crmcommon_private.h
 
 libcrmcommon_la_LDFLAGS	= -version-info 34:0:0
 
 libcrmcommon_la_CFLAGS	= $(CFLAGS_HARDENED_LIB)
 libcrmcommon_la_LDFLAGS	+= $(LDFLAGS_HARDENED_LIB)
 
 libcrmcommon_la_LIBADD	= @LIBADD_DL@ $(GNUTLSLIBS)
 
 libcrmcommon_la_SOURCES	= compat.c digest.c ipc.c io.c procfs.c utils.c xml.c	\
 			  iso8601.c remote.c mainloop.c logging.c watchdog.c	\
 			  schemas.c strings.c xpath.c attrd_client.c alerts.c	\
 			  operations.c pid.c results.c acl.c agents.c
 if BUILD_CIBSECRETS
 libcrmcommon_la_SOURCES	+= cib_secrets.c
 endif
 #libcrmcommon_la_SOURCES	+= $(top_builddir)/lib/gnu/md5.c
 libcrmcommon_la_SOURCES	+= ../gnu/md5.c
 
 clean-generic:
 	rm -f *.log *.debug *.xml *~
diff --git a/lib/common/utils.c b/lib/common/utils.c
index 65eb76252b..6a81d95a74 100644
--- a/lib/common/utils.c
+++ b/lib/common/utils.c
@@ -1,1204 +1,1197 @@
 /*
  * Copyright 2004-2018 Andrew Beekhof <andrew@beekhof.net>
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 #include <dlfcn.h>
 
 #ifndef _GNU_SOURCE
 #  define _GNU_SOURCE
 #endif
 
 #include <sys/types.h>
 #include <sys/wait.h>
 #include <sys/stat.h>
 #include <sys/utsname.h>
 
 #include <stdio.h>
 #include <unistd.h>
 #include <string.h>
 #include <stdlib.h>
 #include <limits.h>
 #include <pwd.h>
 #include <time.h>
 #include <libgen.h>
 #include <signal.h>
 
 #include <qb/qbdefs.h>
 
 #include <crm/crm.h>
 #include <crm/services.h>
 #include <crm/msg_xml.h>
 #include <crm/cib/internal.h>
 #include <crm/common/xml.h>
 #include <crm/common/util.h>
 #include <crm/common/ipc.h>
 #include <crm/common/iso8601.h>
 #include <crm/common/mainloop.h>
 #include <libxml2/libxml/relaxng.h>
 
 #ifndef MAXLINE
 #  define MAXLINE 512
 #endif
 
 #ifdef HAVE_GETOPT_H
 #  include <getopt.h>
 #endif
 
 #ifndef PW_BUFFER_LEN
 #  define PW_BUFFER_LEN		500
 #endif
 
 CRM_TRACE_INIT_DATA(common);
 
 gboolean crm_config_error = FALSE;
 gboolean crm_config_warning = FALSE;
 char *crm_system_name = NULL;
 
 int node_score_red = 0;
 int node_score_green = 0;
 int node_score_yellow = 0;
 
 static struct crm_option *crm_long_options = NULL;
 static const char *crm_app_description = NULL;
 static char *crm_short_options = NULL;
 static const char *crm_app_usage = NULL;
 
 gboolean
 check_time(const char *value)
 {
     if (crm_get_msec(value) < 5000) {
         return FALSE;
     }
     return TRUE;
 }
 
 gboolean
 check_timer(const char *value)
 {
     if (crm_get_msec(value) < 0) {
         return FALSE;
     }
     return TRUE;
 }
 
 gboolean
 check_boolean(const char *value)
 {
     int tmp = FALSE;
 
     if (crm_str_to_boolean(value, &tmp) != 1) {
         return FALSE;
     }
     return TRUE;
 }
 
 gboolean
 check_number(const char *value)
 {
     errno = 0;
     if (value == NULL) {
         return FALSE;
 
     } else if (safe_str_eq(value, CRM_MINUS_INFINITY_S)) {
 
     } else if (safe_str_eq(value, CRM_INFINITY_S)) {
 
     } else {
         crm_int_helper(value, NULL);
     }
 
     if (errno != 0) {
         return FALSE;
     }
     return TRUE;
 }
 
 gboolean
 check_positive_number(const char* value)
 {
     if (safe_str_eq(value, CRM_INFINITY_S) || (crm_int_helper(value, NULL))) {
         return TRUE;
     }
     return FALSE;
 }
 
 gboolean
 check_quorum(const char *value)
 {
     if (safe_str_eq(value, "stop")) {
         return TRUE;
 
     } else if (safe_str_eq(value, "freeze")) {
         return TRUE;
 
     } else if (safe_str_eq(value, "ignore")) {
         return TRUE;
 
     } else if (safe_str_eq(value, "suicide")) {
         return TRUE;
     }
     return FALSE;
 }
 
 gboolean
 check_script(const char *value)
 {
     struct stat st;
 
     if(safe_str_eq(value, "/dev/null")) {
         return TRUE;
     }
 
     if(stat(value, &st) != 0) {
         crm_err("Script %s does not exist", value);
         return FALSE;
     }
 
     if(S_ISREG(st.st_mode) == 0) {
         crm_err("Script %s is not a regular file", value);
         return FALSE;
     }
 
     if( (st.st_mode & (S_IXUSR | S_IXGRP )) == 0) {
         crm_err("Script %s is not executable", value);
         return FALSE;
     }
 
     return TRUE;
 }
 
 gboolean
 check_utilization(const char *value)
 {
     char *end = NULL;
     long number = strtol(value, &end, 10);
 
     if(end && end[0] != '%') {
         return FALSE;
     } else if(number < 0) {
         return FALSE;
     }
 
     return TRUE;
 }
 
 void
 crm_args_fini()
 {
     free(crm_short_options);
     crm_short_options = NULL;
 }
 
 int
 char2score(const char *score)
 {
     int score_f = 0;
 
     if (score == NULL) {
 
     } else if (safe_str_eq(score, CRM_MINUS_INFINITY_S)) {
         score_f = -CRM_SCORE_INFINITY;
 
     } else if (safe_str_eq(score, CRM_INFINITY_S)) {
         score_f = CRM_SCORE_INFINITY;
 
     } else if (safe_str_eq(score, CRM_PLUS_INFINITY_S)) {
         score_f = CRM_SCORE_INFINITY;
 
     } else if (safe_str_eq(score, "red")) {
         score_f = node_score_red;
 
     } else if (safe_str_eq(score, "yellow")) {
         score_f = node_score_yellow;
 
     } else if (safe_str_eq(score, "green")) {
         score_f = node_score_green;
 
     } else {
         score_f = crm_parse_int(score, NULL);
         if (score_f > 0 && score_f > CRM_SCORE_INFINITY) {
             score_f = CRM_SCORE_INFINITY;
 
         } else if (score_f < 0 && score_f < -CRM_SCORE_INFINITY) {
             score_f = -CRM_SCORE_INFINITY;
         }
     }
 
     return score_f;
 }
 
 char *
 score2char_stack(int score, char *buf, size_t len)
 {
     if (score >= CRM_SCORE_INFINITY) {
         strncpy(buf, CRM_INFINITY_S, 9);
     } else if (score <= -CRM_SCORE_INFINITY) {
         strncpy(buf, CRM_MINUS_INFINITY_S , 10);
     } else {
         return crm_itoa_stack(score, buf, len);
     }
 
     return buf;
 }
 
 char *
 score2char(int score)
 {
     if (score >= CRM_SCORE_INFINITY) {
         return strdup(CRM_INFINITY_S);
 
     } else if (score <= -CRM_SCORE_INFINITY) {
         return strdup(CRM_MINUS_INFINITY_S);
     }
     return crm_itoa(score);
 }
 
 const char *
 cluster_option(GHashTable * options, gboolean(*validate) (const char *),
                const char *name, const char *old_name, const char *def_value)
 {
     const char *value = NULL;
     char *new_value = NULL;
 
     CRM_ASSERT(name != NULL);
 
     if (options) {
         value = g_hash_table_lookup(options, name);
 
         if ((value == NULL) && old_name) {
             value = g_hash_table_lookup(options, old_name);
             if (value != NULL) {
                 crm_config_warn("Support for legacy name '%s' for cluster option '%s'"
                                 " is deprecated and will be removed in a future release",
                                 old_name, name);
 
                 // Inserting copy with current name ensures we only warn once
                 new_value = strdup(value);
                 g_hash_table_insert(options, strdup(name), new_value);
                 value = new_value;
             }
         }
 
         if (value && validate && (validate(value) == FALSE)) {
             crm_config_err("Resetting cluster option '%s' to default: value '%s' is invalid",
                            name, value);
             value = NULL;
         }
 
         if (value) {
             return value;
         }
     }
 
     // No value found, use default
     value = def_value;
 
     if (value == NULL) {
         crm_trace("No value or default provided for cluster option '%s'",
                   name);
         return NULL;
     }
 
     if (validate) {
         CRM_CHECK(validate(value) != FALSE,
                   crm_err("Bug: default value for cluster option '%s' is invalid", name);
                   return NULL);
     }
 
     crm_trace("Using default value '%s' for cluster option '%s'",
               value, name);
     if (options) {
         new_value = strdup(value);
         g_hash_table_insert(options, strdup(name), new_value);
         value = new_value;
     }
     return value;
 }
 
 const char *
 get_cluster_pref(GHashTable * options, pe_cluster_option * option_list, int len, const char *name)
 {
     const char *value = NULL;
 
     for (int lpc = 0; lpc < len; lpc++) {
         if (safe_str_eq(name, option_list[lpc].name)) {
             value = cluster_option(options,
                                    option_list[lpc].is_valid,
                                    option_list[lpc].name,
                                    option_list[lpc].alt_name,
                                    option_list[lpc].default_value);
             return value;
         }
     }
     CRM_CHECK(FALSE, crm_err("Bug: looking for unknown option '%s'", name));
     return NULL;
 }
 
 void
 config_metadata(const char *name, const char *version, const char *desc_short,
                 const char *desc_long, pe_cluster_option * option_list, int len)
 {
     int lpc = 0;
 
     fprintf(stdout, "<?xml version=\"1.0\"?>"
             "<!DOCTYPE resource-agent SYSTEM \"ra-api-1.dtd\">\n"
             "<resource-agent name=\"%s\">\n"
             "  <version>%s</version>\n"
             "  <longdesc lang=\"en\">%s</longdesc>\n"
             "  <shortdesc lang=\"en\">%s</shortdesc>\n"
             "  <parameters>\n", name, version, desc_long, desc_short);
 
     for (lpc = 0; lpc < len; lpc++) {
         if (option_list[lpc].description_long == NULL && option_list[lpc].description_short == NULL) {
             continue;
         }
         fprintf(stdout, "    <parameter name=\"%s\" unique=\"0\">\n"
                 "      <shortdesc lang=\"en\">%s</shortdesc>\n"
                 "      <content type=\"%s\" default=\"%s\"/>\n"
                 "      <longdesc lang=\"en\">%s%s%s</longdesc>\n"
                 "    </parameter>\n",
                 option_list[lpc].name,
                 option_list[lpc].description_short,
                 option_list[lpc].type,
                 option_list[lpc].default_value,
                 option_list[lpc].description_long ? option_list[lpc].
                 description_long : option_list[lpc].description_short,
                 option_list[lpc].values ? "  Allowed values: " : "",
                 option_list[lpc].values ? option_list[lpc].values : "");
     }
     fprintf(stdout, "  </parameters>\n</resource-agent>\n");
 }
 
 void
 verify_all_options(GHashTable * options, pe_cluster_option * option_list, int len)
 {
     int lpc = 0;
 
     for (lpc = 0; lpc < len; lpc++) {
         cluster_option(options,
                        option_list[lpc].is_valid,
                        option_list[lpc].name,
                        option_list[lpc].alt_name, option_list[lpc].default_value);
     }
 }
 
 char *
 generate_hash_key(const char *crm_msg_reference, const char *sys)
 {
     char *hash_key = crm_concat(sys ? sys : "none", crm_msg_reference, '_');
 
     crm_trace("created hash key: (%s)", hash_key);
     return hash_key;
 }
 
 
 int
 crm_user_lookup(const char *name, uid_t * uid, gid_t * gid)
 {
     int rc = pcmk_ok;
     char *buffer = NULL;
     struct passwd pwd;
     struct passwd *pwentry = NULL;
 
     buffer = calloc(1, PW_BUFFER_LEN);
     rc = getpwnam_r(name, &pwd, buffer, PW_BUFFER_LEN, &pwentry);
     if (pwentry) {
         if (uid) {
             *uid = pwentry->pw_uid;
         }
         if (gid) {
             *gid = pwentry->pw_gid;
         }
         crm_trace("User %s has uid=%d gid=%d", name, pwentry->pw_uid, pwentry->pw_gid);
 
     } else {
         rc = rc? -rc : -EINVAL;
         crm_info("User %s lookup: %s", name, pcmk_strerror(rc));
     }
 
     free(buffer);
     return rc;
 }
 
 static int
 crm_version_helper(const char *text, char **end_text)
 {
     int atoi_result = -1;
 
     CRM_ASSERT(end_text != NULL);
 
     errno = 0;
 
     if (text != NULL && text[0] != 0) {
         atoi_result = (int)strtol(text, end_text, 10);
 
         if (errno == EINVAL) {
             crm_err("Conversion of '%s' %c failed", text, text[0]);
             atoi_result = -1;
         }
     }
     return atoi_result;
 }
 
 /*
  * version1 < version2 : -1
  * version1 = version2 :  0
  * version1 > version2 :  1
  */
 int
 compare_version(const char *version1, const char *version2)
 {
     int rc = 0;
     int lpc = 0;
     char *ver1_copy = NULL, *ver2_copy = NULL;
     char *rest1 = NULL, *rest2 = NULL;
 
     if (version1 == NULL && version2 == NULL) {
         return 0;
     } else if (version1 == NULL) {
         return -1;
     } else if (version2 == NULL) {
         return 1;
     }
 
     ver1_copy = strdup(version1);
     ver2_copy = strdup(version2);
     rest1 = ver1_copy;
     rest2 = ver2_copy;
 
     while (1) {
         int digit1 = 0;
         int digit2 = 0;
 
         lpc++;
 
         if (rest1 == rest2) {
             break;
         }
 
         if (rest1 != NULL) {
             digit1 = crm_version_helper(rest1, &rest1);
         }
 
         if (rest2 != NULL) {
             digit2 = crm_version_helper(rest2, &rest2);
         }
 
         if (digit1 < digit2) {
             rc = -1;
             break;
 
         } else if (digit1 > digit2) {
             rc = 1;
             break;
         }
 
         if (rest1 != NULL && rest1[0] == '.') {
             rest1++;
         }
         if (rest1 != NULL && rest1[0] == 0) {
             rest1 = NULL;
         }
 
         if (rest2 != NULL && rest2[0] == '.') {
             rest2++;
         }
         if (rest2 != NULL && rest2[0] == 0) {
             rest2 = NULL;
         }
     }
 
     free(ver1_copy);
     free(ver2_copy);
 
     if (rc == 0) {
         crm_trace("%s == %s (%d)", version1, version2, lpc);
     } else if (rc < 0) {
         crm_trace("%s < %s (%d)", version1, version2, lpc);
     } else if (rc > 0) {
         crm_trace("%s > %s (%d)", version1, version2, lpc);
     }
 
     return rc;
 }
 
 gboolean do_stderr = FALSE;
 
 #ifndef NUMCHARS
 #  define	NUMCHARS	"0123456789."
 #endif
 
 #ifndef WHITESPACE
 #  define	WHITESPACE	" \t\n\r\f"
 #endif
 
 guint
 crm_parse_interval_spec(const char *input)
 {
     long long msec = 0;
 
     if (input == NULL) {
         return 0;
 
     } else if (input[0] != 'P') {
         long long tmp = crm_get_msec(input);
 
         if(tmp > 0) {
             msec = tmp;
         }
 
     } else {
         crm_time_t *period_s = crm_time_parse_duration(input);
 
         msec = 1000 * crm_time_get_seconds(period_s);
         crm_time_free(period_s);
     }
 
     return (msec <= 0)? 0 : ((msec >= G_MAXUINT)? G_MAXUINT : (guint) msec);
 }
 
 long long
 crm_get_msec(const char *input)
 {
     const char *cp = input;
     const char *units;
     long long multiplier = 1000;
     long long divisor = 1;
     long long msec = -1;
     char *end_text = NULL;
 
     /* double dret; */
 
     if (input == NULL) {
         return msec;
     }
 
     cp += strspn(cp, WHITESPACE);
     units = cp + strspn(cp, NUMCHARS);
     units += strspn(units, WHITESPACE);
 
     if (strchr(NUMCHARS, *cp) == NULL) {
         return msec;
     }
 
     if (strncasecmp(units, "ms", 2) == 0 || strncasecmp(units, "msec", 4) == 0) {
         multiplier = 1;
         divisor = 1;
     } else if (strncasecmp(units, "us", 2) == 0 || strncasecmp(units, "usec", 4) == 0) {
         multiplier = 1;
         divisor = 1000;
     } else if (strncasecmp(units, "s", 1) == 0 || strncasecmp(units, "sec", 3) == 0) {
         multiplier = 1000;
         divisor = 1;
     } else if (strncasecmp(units, "m", 1) == 0 || strncasecmp(units, "min", 3) == 0) {
         multiplier = 60 * 1000;
         divisor = 1;
     } else if (strncasecmp(units, "h", 1) == 0 || strncasecmp(units, "hr", 2) == 0) {
         multiplier = 60 * 60 * 1000;
         divisor = 1;
     } else if (*units != EOS && *units != '\n' && *units != '\r') {
         return msec;
     }
 
     msec = crm_int_helper(cp, &end_text);
     if (msec > LLONG_MAX/multiplier) {
         /* arithmetics overflow while multiplier/divisor mutually exclusive */
         return LLONG_MAX;
     }
     msec *= multiplier;
     msec /= divisor;
     /* dret += 0.5; */
     /* msec = (long long)dret; */
     return msec;
 }
 
 extern bool crm_is_daemon;
 
 /* coverity[+kill] */
 void
 crm_abort(const char *file, const char *function, int line,
           const char *assert_condition, gboolean do_core, gboolean do_fork)
 {
     int rc = 0;
     int pid = 0;
     int status = 0;
 
     /* Implied by the parent's error logging below */
     /* crm_write_blackbox(0); */
 
     if(crm_is_daemon == FALSE) {
         /* This is a command line tool - do not fork */
 
         /* crm_add_logfile(NULL);   * Record it to a file? */
         crm_enable_stderr(TRUE); /* Make sure stderr is enabled so we can tell the caller */
         do_fork = FALSE;         /* Just crash if needed */
     }
 
     if (do_core == FALSE) {
         crm_err("%s: Triggered assert at %s:%d : %s", function, file, line, assert_condition);
         return;
 
     } else if (do_fork) {
         pid = fork();
 
     } else {
         crm_err("%s: Triggered fatal assert at %s:%d : %s", function, file, line, assert_condition);
     }
 
     if (pid == -1) {
         crm_crit("%s: Cannot create core for non-fatal assert at %s:%d : %s",
                  function, file, line, assert_condition);
         return;
 
     } else if(pid == 0) {
         /* Child process */
         abort();
         return;
     }
 
     /* Parent process */
     crm_err("%s: Forked child %d to record non-fatal assert at %s:%d : %s",
             function, pid, file, line, assert_condition);
     crm_write_blackbox(SIGTRAP, NULL);
 
     do {
         rc = waitpid(pid, &status, 0);
         if(rc == pid) {
             return; /* Job done */
         }
 
     } while(errno == EINTR);
 
     if (errno == ECHILD) {
         /* crm_mon does this */
         crm_trace("Cannot wait on forked child %d - SIGCHLD is probably set to SIG_IGN", pid);
         return;
     }
     crm_perror(LOG_ERR, "Cannot wait on forked child %d", pid);
 }
 
 void
 crm_make_daemon(const char *name, gboolean daemonize, const char *pidfile)
 {
     int rc;
     long pid;
     const char *devnull = "/dev/null";
 
     if (daemonize == FALSE) {
         return;
     }
 
     /* Check before we even try... */
     rc = crm_pidfile_inuse(pidfile, 1, name);
     if(rc < pcmk_ok && rc != -ENOENT) {
         pid = crm_read_pidfile(pidfile);
         crm_err("%s: already running [pid %ld in %s]", name, pid, pidfile);
         printf("%s: already running [pid %ld in %s]\n", name, pid, pidfile);
         crm_exit(CRM_EX_ERROR);
     }
 
     pid = fork();
     if (pid < 0) {
         fprintf(stderr, "%s: could not start daemon\n", name);
         crm_perror(LOG_ERR, "fork");
         crm_exit(CRM_EX_OSERR);
 
     } else if (pid > 0) {
         crm_exit(CRM_EX_OK);
     }
 
     rc = crm_lock_pidfile(pidfile, name);
     if(rc < pcmk_ok) {
         crm_err("Could not lock '%s' for %s: %s (%d)", pidfile, name, pcmk_strerror(rc), rc);
         printf("Could not lock '%s' for %s: %s (%d)\n", pidfile, name, pcmk_strerror(rc), rc);
         crm_exit(CRM_EX_ERROR);
     }
 
     umask(S_IWGRP | S_IWOTH | S_IROTH);
 
     close(STDIN_FILENO);
     (void)open(devnull, O_RDONLY);      /* Stdin:  fd 0 */
     close(STDOUT_FILENO);
     (void)open(devnull, O_WRONLY);      /* Stdout: fd 1 */
     close(STDERR_FILENO);
     (void)open(devnull, O_WRONLY);      /* Stderr: fd 2 */
 }
 
 char *
 crm_meta_name(const char *field)
 {
     int lpc = 0;
     int max = 0;
     char *crm_name = NULL;
 
     CRM_CHECK(field != NULL, return NULL);
     crm_name = crm_concat(CRM_META, field, '_');
 
     /* Massage the names so they can be used as shell variables */
     max = strlen(crm_name);
     for (; lpc < max; lpc++) {
         switch (crm_name[lpc]) {
             case '-':
                 crm_name[lpc] = '_';
                 break;
         }
     }
     return crm_name;
 }
 
 const char *
 crm_meta_value(GHashTable * hash, const char *field)
 {
     char *key = NULL;
     const char *value = NULL;
 
     key = crm_meta_name(field);
     if (key) {
         value = g_hash_table_lookup(hash, key);
         free(key);
     }
 
     return value;
 }
 
 static struct option *
 crm_create_long_opts(struct crm_option *long_options)
 {
     struct option *long_opts = NULL;
 
 #ifdef HAVE_GETOPT_H
     int index = 0, lpc = 0;
 
     /*
      * A previous, possibly poor, choice of '?' as the short form of --help
      * means that getopt_long() returns '?' for both --help and for "unknown option"
      *
      * This dummy entry allows us to differentiate between the two in crm_get_option()
      * and exit with the correct error code
      */
     long_opts = realloc_safe(long_opts, (index + 1) * sizeof(struct option));
     long_opts[index].name = "__dummmy__";
     long_opts[index].has_arg = 0;
     long_opts[index].flag = 0;
     long_opts[index].val = '_';
     index++;
 
     for (lpc = 0; long_options[lpc].name != NULL; lpc++) {
         if (long_options[lpc].name[0] == '-') {
             continue;
         }
 
         long_opts = realloc_safe(long_opts, (index + 1) * sizeof(struct option));
         /*fprintf(stderr, "Creating %d %s = %c\n", index,
          * long_options[lpc].name, long_options[lpc].val);      */
         long_opts[index].name = long_options[lpc].name;
         long_opts[index].has_arg = long_options[lpc].has_arg;
         long_opts[index].flag = long_options[lpc].flag;
         long_opts[index].val = long_options[lpc].val;
         index++;
     }
 
     /* Now create the list terminator */
     long_opts = realloc_safe(long_opts, (index + 1) * sizeof(struct option));
     long_opts[index].name = NULL;
     long_opts[index].has_arg = 0;
     long_opts[index].flag = 0;
     long_opts[index].val = 0;
 #endif
 
     return long_opts;
 }
 
 void
 crm_set_options(const char *short_options, const char *app_usage, struct crm_option *long_options,
                 const char *app_desc)
 {
     if (short_options) {
         crm_short_options = strdup(short_options);
 
     } else if (long_options) {
         int lpc = 0;
         int opt_string_len = 0;
         char *local_short_options = NULL;
 
         for (lpc = 0; long_options[lpc].name != NULL; lpc++) {
             if (long_options[lpc].val && long_options[lpc].val != '-' && long_options[lpc].val < UCHAR_MAX) {
                 local_short_options = realloc_safe(local_short_options, opt_string_len + 4);
                 local_short_options[opt_string_len++] = long_options[lpc].val;
                 /* getopt(3) says: Two colons mean an option takes an optional arg; */
                 if (long_options[lpc].has_arg == optional_argument) {
                     local_short_options[opt_string_len++] = ':';
                 }
                 if (long_options[lpc].has_arg >= required_argument) {
                     local_short_options[opt_string_len++] = ':';
                 }
                 local_short_options[opt_string_len] = 0;
             }
         }
         crm_short_options = local_short_options;
         crm_trace("Generated short option string: '%s'", local_short_options);
     }
 
     if (long_options) {
         crm_long_options = long_options;
     }
     if (app_desc) {
         crm_app_description = app_desc;
     }
     if (app_usage) {
         crm_app_usage = app_usage;
     }
 }
 
 int
 crm_get_option(int argc, char **argv, int *index)
 {
     return crm_get_option_long(argc, argv, index, NULL);
 }
 
 int
 crm_get_option_long(int argc, char **argv, int *index, const char **longname)
 {
 #ifdef HAVE_GETOPT_H
     static struct option *long_opts = NULL;
 
     if (long_opts == NULL && crm_long_options) {
         long_opts = crm_create_long_opts(crm_long_options);
     }
 
     *index = 0;
     if (long_opts) {
         int flag = getopt_long(argc, argv, crm_short_options, long_opts, index);
 
         switch (flag) {
             case 0:
                 if (long_opts[*index].val) {
                     return long_opts[*index].val;
                 } else if (longname) {
                     *longname = long_opts[*index].name;
                 } else {
                     crm_notice("Unhandled option --%s", long_opts[*index].name);
                     return flag;
                 }
             case -1:           /* End of option processing */
                 break;
             case ':':
                 crm_trace("Missing argument");
                 crm_help('?', CRM_EX_USAGE);
                 break;
             case '?':
                 crm_help('?', (*index? CRM_EX_OK : CRM_EX_USAGE));
                 break;
         }
         return flag;
     }
 #endif
 
     if (crm_short_options) {
         return getopt(argc, argv, crm_short_options);
     }
 
     return -1;
 }
 
 crm_exit_t
 crm_help(char cmd, crm_exit_t exit_code)
 {
     int i = 0;
     FILE *stream = (exit_code ? stderr : stdout);
 
     if (cmd == 'v' || cmd == '$') {
         fprintf(stream, "Pacemaker %s\n", PACEMAKER_VERSION);
         fprintf(stream, "Written by Andrew Beekhof\n");
         goto out;
     }
 
     if (cmd == '!') {
         fprintf(stream, "Pacemaker %s (Build: %s): %s\n", PACEMAKER_VERSION, BUILD_VERSION, CRM_FEATURES);
         goto out;
     }
 
     fprintf(stream, "%s - %s\n", crm_system_name, crm_app_description);
 
     if (crm_app_usage) {
         fprintf(stream, "Usage: %s %s\n", crm_system_name, crm_app_usage);
     }
 
     if (crm_long_options) {
         fprintf(stream, "Options:\n");
         for (i = 0; crm_long_options[i].name != NULL; i++) {
             if (crm_long_options[i].flags & pcmk_option_hidden) {
 
             } else if (crm_long_options[i].flags & pcmk_option_paragraph) {
                 fprintf(stream, "%s\n\n", crm_long_options[i].desc);
 
             } else if (crm_long_options[i].flags & pcmk_option_example) {
                 fprintf(stream, "\t#%s\n\n", crm_long_options[i].desc);
 
             } else if (crm_long_options[i].val == '-' && crm_long_options[i].desc) {
                 fprintf(stream, "%s\n", crm_long_options[i].desc);
 
             } else {
                 /* is val printable as char ? */
                 if (crm_long_options[i].val && crm_long_options[i].val <= UCHAR_MAX) {
                     fprintf(stream, " -%c,", crm_long_options[i].val);
                 } else {
                     fputs("    ", stream);
                 }
                 fprintf(stream, " --%s%s\t%s\n", crm_long_options[i].name,
                         crm_long_options[i].has_arg == optional_argument ? "[=value]" :
                         crm_long_options[i].has_arg == required_argument ? "=value" : "",
                         crm_long_options[i].desc ? crm_long_options[i].desc : "");
             }
         }
 
     } else if (crm_short_options) {
         fprintf(stream, "Usage: %s - %s\n", crm_system_name, crm_app_description);
         for (i = 0; crm_short_options[i] != 0; i++) {
             int has_arg = no_argument /* 0 */;
 
             if (crm_short_options[i + 1] == ':') {
                 if (crm_short_options[i + 2] == ':')
                     has_arg = optional_argument /* 2 */;
                 else
                     has_arg = required_argument /* 1 */;
             }
 
             fprintf(stream, " -%c %s\n", crm_short_options[i],
                     has_arg == optional_argument ? "[value]" :
                     has_arg == required_argument ? "{value}" : "");
             i += has_arg;
         }
     }
 
     fprintf(stream, "\nReport bugs to %s\n", PACKAGE_BUGREPORT);
 
   out:
     return crm_exit(exit_code);
 }
 
 void cib_ipc_servers_init(qb_ipcs_service_t **ipcs_ro,
         qb_ipcs_service_t **ipcs_rw,
         qb_ipcs_service_t **ipcs_shm,
         struct qb_ipcs_service_handlers *ro_cb,
         struct qb_ipcs_service_handlers *rw_cb)
 {
     *ipcs_ro = mainloop_add_ipc_server(CIB_CHANNEL_RO, QB_IPC_NATIVE, ro_cb);
     *ipcs_rw = mainloop_add_ipc_server(CIB_CHANNEL_RW, QB_IPC_NATIVE, rw_cb);
     *ipcs_shm = mainloop_add_ipc_server(CIB_CHANNEL_SHM, QB_IPC_SHM, rw_cb);
 
     if (*ipcs_ro == NULL || *ipcs_rw == NULL || *ipcs_shm == NULL) {
         crm_err("Failed to create the CIB manager: exiting and inhibiting respawn");
         crm_warn("Verify pacemaker and pacemaker_remote are not both enabled");
         crm_exit(CRM_EX_FATAL);
     }
 }
 
 void cib_ipc_servers_destroy(qb_ipcs_service_t *ipcs_ro,
         qb_ipcs_service_t *ipcs_rw,
         qb_ipcs_service_t *ipcs_shm)
 {
     qb_ipcs_destroy(ipcs_ro);
     qb_ipcs_destroy(ipcs_rw);
     qb_ipcs_destroy(ipcs_shm);
 }
 
 qb_ipcs_service_t *
 crmd_ipc_server_init(struct qb_ipcs_service_handlers *cb)
 {
     return mainloop_add_ipc_server(CRM_SYSTEM_CRMD, QB_IPC_NATIVE, cb);
 }
 
 void
 attrd_ipc_server_init(qb_ipcs_service_t **ipcs, struct qb_ipcs_service_handlers *cb)
 {
     *ipcs = mainloop_add_ipc_server(T_ATTRD, QB_IPC_NATIVE, cb);
 
     if (*ipcs == NULL) {
         crm_err("Failed to create pacemaker-attrd server: exiting and inhibiting respawn");
         crm_warn("Verify pacemaker and pacemaker_remote are not both enabled.");
         crm_exit(CRM_EX_FATAL);
     }
 }
 
 void
 stonith_ipc_server_init(qb_ipcs_service_t **ipcs, struct qb_ipcs_service_handlers *cb)
 {
     *ipcs = mainloop_add_ipc_server("stonith-ng", QB_IPC_NATIVE, cb);
 
     if (*ipcs == NULL) {
         crm_err("Failed to create fencer: exiting and inhibiting respawn.");
         crm_warn("Verify pacemaker and pacemaker_remote are not both enabled.");
         crm_exit(CRM_EX_FATAL);
     }
 }
 
 void *
 find_library_function(void **handle, const char *lib, const char *fn, gboolean fatal)
 {
     char *error;
     void *a_function;
 
     if (*handle == NULL) {
         *handle = dlopen(lib, RTLD_LAZY);
     }
 
     if (!(*handle)) {
         crm_err("%sCould not open %s: %s", fatal ? "Fatal: " : "", lib, dlerror());
         if (fatal) {
             crm_exit(CRM_EX_FATAL);
         }
         return NULL;
     }
 
     a_function = dlsym(*handle, fn);
     if (a_function == NULL) {
         error = dlerror();
         crm_err("%sCould not find %s in %s: %s", fatal ? "Fatal: " : "", fn, lib, error);
         if (fatal) {
             crm_exit(CRM_EX_FATAL);
         }
     }
 
     return a_function;
 }
 
-void *
-convert_const_pointer(const void *ptr)
-{
-    /* Worst function ever */
-    return (void *)ptr;
-}
-
 #ifdef HAVE_UUID_UUID_H
 #  include <uuid/uuid.h>
 #endif
 
 char *
 crm_generate_uuid(void)
 {
     unsigned char uuid[16];
     char *buffer = malloc(37);  /* Including NUL byte */
 
     uuid_generate(uuid);
     uuid_unparse(uuid, buffer);
     return buffer;
 }
 
 /*!
  * \brief Get name to be used as identifier for cluster messages
  *
  * \param[in] name  Actual system name to check
  *
  * \return Non-NULL cluster message identifier corresponding to name
  *
  * \note The Pacemaker daemons were renamed in version 2.0.0, but the old names
  *       must continue to be used as the identifier for cluster messages, so
  *       that mixed-version clusters are possible during a rolling upgrade.
  */
 const char *
 pcmk_message_name(const char *name)
 {
     if (name == NULL) {
         return "unknown";
 
     } else if (!strcmp(name, "pacemaker-attrd")) {
         return "attrd";
 
     } else if (!strcmp(name, "pacemaker-based")) {
         return CRM_SYSTEM_CIB;
 
     } else if (!strcmp(name, "pacemaker-controld")) {
         return CRM_SYSTEM_CRMD;
 
     } else if (!strcmp(name, "pacemaker-execd")) {
         return CRM_SYSTEM_LRMD;
 
     } else if (!strcmp(name, "pacemaker-fenced")) {
         return "stonith-ng";
 
     } else if (!strcmp(name, "pacemaker-schedulerd")) {
         return CRM_SYSTEM_PENGINE;
 
     } else {
         return name;
     }
 }
 
 /*!
  * \brief Check whether a string represents a cluster daemon name
  *
  * \param[in] name  String to check
  *
  * \return TRUE if name is standard client name used by daemons, FALSE otherwise
  */
 bool
 crm_is_daemon_name(const char *name)
 {
     name = pcmk_message_name(name);
     return (!strcmp(name, CRM_SYSTEM_CRMD)
             || !strcmp(name, CRM_SYSTEM_STONITHD)
             || !strcmp(name, "stonith-ng")
             || !strcmp(name, "attrd")
             || !strcmp(name, CRM_SYSTEM_CIB)
             || !strcmp(name, CRM_SYSTEM_MCP)
             || !strcmp(name, CRM_SYSTEM_DC)
             || !strcmp(name, CRM_SYSTEM_TENGINE)
             || !strcmp(name, CRM_SYSTEM_LRMD));
 }
 
 #include <md5.h>
 
 char *
 crm_md5sum(const char *buffer)
 {
     int lpc = 0, len = 0;
     char *digest = NULL;
     unsigned char raw_digest[MD5_DIGEST_SIZE];
 
     if (buffer == NULL) {
         buffer = "";
     }
     len = strlen(buffer);
 
     crm_trace("Beginning digest of %d bytes", len);
     digest = malloc(2 * MD5_DIGEST_SIZE + 1);
     if(digest) {
         md5_buffer(buffer, len, raw_digest);
         for (lpc = 0; lpc < MD5_DIGEST_SIZE; lpc++) {
             sprintf(digest + (2 * lpc), "%02x", raw_digest[lpc]);
         }
         digest[(2 * MD5_DIGEST_SIZE)] = 0;
         crm_trace("Digest %s.", digest);
 
     } else {
         crm_err("Could not create digest");
     }
     return digest;
 }
 
 #ifdef HAVE_GNUTLS_GNUTLS_H
 void
 crm_gnutls_global_init(void)
 {
     signal(SIGPIPE, SIG_IGN);
     gnutls_global_init();
 }
 #endif
 
 /*!
  * \brief Get the local hostname
  *
  * \return Newly allocated string with name, or NULL (and set errno) on error
  */
 char *
 pcmk_hostname()
 {
     struct utsname hostinfo;
 
     return (uname(&hostinfo) < 0)? NULL : strdup(hostinfo.nodename);
 }
diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c
index 5e7587e8e0..27ca3b1802 100644
--- a/tools/crm_resource_runtime.c
+++ b/tools/crm_resource_runtime.c
@@ -1,2005 +1,2003 @@
 /*
  * Copyright 2004-2018 Andrew Beekhof <andrew@beekhof.net>
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_resource.h>
 
 int resource_verbose = 0;
 bool do_force = FALSE;
 int crmd_replies_needed = 1; /* The welcome message */
 
 const char *attr_set_type = XML_TAG_ATTR_SETS;
 
 static int
 do_find_resource(const char *rsc, resource_t * the_rsc, pe_working_set_t * data_set)
 {
     int found = 0;
     GListPtr lpc = NULL;
 
     for (lpc = the_rsc->running_on; lpc != NULL; lpc = lpc->next) {
         node_t *node = (node_t *) lpc->data;
 
         if (BE_QUIET) {
             fprintf(stdout, "%s\n", node->details->uname);
         } else {
             const char *state = "";
 
             if (!pe_rsc_is_clone(the_rsc) && the_rsc->fns->state(the_rsc, TRUE) == RSC_ROLE_MASTER) {
                 state = "Master";
             }
             fprintf(stdout, "resource %s is running on: %s %s\n", rsc, node->details->uname, state);
         }
 
         found++;
     }
 
     if (BE_QUIET == FALSE && found == 0) {
         fprintf(stderr, "resource %s is NOT running\n", rsc);
     }
 
     return found;
 }
 
 int
 cli_resource_search(resource_t *rsc, const char *requested_name,
                     pe_working_set_t *data_set)
 {
     int found = 0;
     resource_t *parent = uber_parent(rsc);
 
     if (pe_rsc_is_clone(rsc)) {
         for (GListPtr iter = rsc->children; iter != NULL; iter = iter->next) {
             found += do_find_resource(requested_name, iter->data, data_set);
         }
 
     /* The anonymous clone children's common ID is supplied */
     } else if (pe_rsc_is_clone(parent)
                && is_not_set(rsc->flags, pe_rsc_unique)
                && rsc->clone_name
                && safe_str_eq(requested_name, rsc->clone_name)
                && safe_str_neq(requested_name, rsc->id)) {
 
         for (GListPtr iter = parent->children; iter; iter = iter->next) {
             found += do_find_resource(requested_name, iter->data, data_set);
         }
 
     } else {
         found += do_find_resource(requested_name, rsc, data_set);
     }
 
     return found;
 }
 
 #define XPATH_MAX 1024
 
 static int
 find_resource_attr(cib_t * the_cib, const char *attr, const char *rsc, const char *set_type,
                    const char *set_name, const char *attr_id, const char *attr_name, char **value)
 {
     int offset = 0;
     int rc = pcmk_ok;
     xmlNode *xml_search = NULL;
     char *xpath_string = NULL;
 
     if(value) {
         *value = NULL;
     }
 
     if(the_cib == NULL) {
         return -ENOTCONN;
     }
 
     xpath_string = calloc(1, XPATH_MAX);
     offset +=
         snprintf(xpath_string + offset, XPATH_MAX - offset, "%s", get_object_path("resources"));
 
     offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "//*[@id=\"%s\"]", rsc);
 
     if (set_type) {
         offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "/%s", set_type);
         if (set_name) {
             offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "[@id=\"%s\"]", set_name);
         }
     }
 
     offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "//nvpair[");
     if (attr_id) {
         offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "@id=\"%s\"", attr_id);
     }
 
     if (attr_name) {
         if (attr_id) {
             offset += snprintf(xpath_string + offset, XPATH_MAX - offset, " and ");
         }
         offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "@name=\"%s\"", attr_name);
     }
     offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "]");
     CRM_LOG_ASSERT(offset > 0);
 
     rc = the_cib->cmds->query(the_cib, xpath_string, &xml_search,
                               cib_sync_call | cib_scope_local | cib_xpath);
 
     if (rc != pcmk_ok) {
         goto bail;
     }
 
     crm_log_xml_debug(xml_search, "Match");
     if (xml_has_children(xml_search)) {
         xmlNode *child = NULL;
 
         rc = -EINVAL;
         printf("Multiple attributes match name=%s\n", attr_name);
 
         for (child = __xml_first_child(xml_search); child != NULL; child = __xml_next(child)) {
             printf("  Value: %s \t(id=%s)\n",
                    crm_element_value(child, XML_NVPAIR_ATTR_VALUE), ID(child));
         }
 
     } else if(value) {
         const char *tmp = crm_element_value(xml_search, attr);
 
         if (tmp) {
             *value = strdup(tmp);
         }
     }
 
   bail:
     free(xpath_string);
     free_xml(xml_search);
     return rc;
 }
 
 static resource_t *
 find_matching_attr_resource(resource_t * rsc, const char * rsc_id, const char * attr_set, const char * attr_id,
                             const char * attr_name, cib_t * cib, const char * cmd)
 {
     int rc = pcmk_ok;
     char *lookup_id = NULL;
     char *local_attr_id = NULL;
 
     if(do_force == TRUE) {
         return rsc;
 
     } else if(rsc->parent) {
         switch(rsc->parent->variant) {
             case pe_group:
                 if (BE_QUIET == FALSE) {
                     printf("Performing %s of '%s' for '%s' will not apply to its peers in '%s'\n", cmd, attr_name, rsc_id, rsc->parent->id);
                 }
                 break;
 
             case pe_clone:
                 rc = find_resource_attr(cib, XML_ATTR_ID, rsc_id, attr_set_type, attr_set, attr_id, attr_name, &local_attr_id);
                 free(local_attr_id);
 
                 if(rc != pcmk_ok) {
                     rsc = rsc->parent;
                     if (BE_QUIET == FALSE) {
                         printf("Performing %s of '%s' on '%s', the parent of '%s'\n", cmd, attr_name, rsc->id, rsc_id);
                     }
                 }
                 break;
             default:
                 break;
         }
 
     } else if (rsc->parent && BE_QUIET == FALSE) {
         printf("Forcing %s of '%s' for '%s' instead of '%s'\n", cmd, attr_name, rsc_id, rsc->parent->id);
 
     } else if(rsc->parent == NULL && rsc->children) {
         resource_t *child = rsc->children->data;
 
         if(child->variant == pe_native) {
             lookup_id = clone_strip(child->id); /* Could be a cloned group! */
             rc = find_resource_attr(cib, XML_ATTR_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name, &local_attr_id);
 
             if(rc == pcmk_ok) {
                 rsc = child;
                 if (BE_QUIET == FALSE) {
                     printf("A value for '%s' already exists in child '%s', performing %s on that instead of '%s'\n", attr_name, lookup_id, cmd, rsc_id);
                 }
             }
 
             free(local_attr_id);
             free(lookup_id);
         }
     }
 
     return rsc;
 }
 
 int
 cli_resource_update_attribute(resource_t *rsc, const char *requested_name,
                               const char *attr_set, const char *attr_id,
                               const char *attr_name, const char *attr_value,
                               bool recursive, cib_t *cib,
                               pe_working_set_t *data_set)
 {
     int rc = pcmk_ok;
     static bool need_init = TRUE;
 
     char *lookup_id = NULL;
     char *local_attr_id = NULL;
     char *local_attr_set = NULL;
 
     xmlNode *xml_top = NULL;
     xmlNode *xml_obj = NULL;
 
     if(attr_id == NULL
        && do_force == FALSE
        && find_resource_attr(
            cib, XML_ATTR_ID, uber_parent(rsc)->id, NULL, NULL, NULL, attr_name, NULL) == -EINVAL) {
         printf("\n");
     }
 
     if (safe_str_eq(attr_set_type, XML_TAG_ATTR_SETS)) {
         if (do_force == FALSE) {
             rc = find_resource_attr(cib, XML_ATTR_ID, uber_parent(rsc)->id,
                                     XML_TAG_META_SETS, attr_set, attr_id,
                                     attr_name, &local_attr_id);
             if (rc == pcmk_ok && BE_QUIET == FALSE) {
                 printf("WARNING: There is already a meta attribute for '%s' called '%s' (id=%s)\n",
                        uber_parent(rsc)->id, attr_name, local_attr_id);
                 printf("         Delete '%s' first or use the force option to override\n",
                        local_attr_id);
             }
             free(local_attr_id);
             if (rc == pcmk_ok) {
                 return -ENOTUNIQ;
             }
         }
 
     } else {
         rsc = find_matching_attr_resource(rsc, requested_name, attr_set,
                                           attr_id, attr_name, cib, "update");
     }
 
     lookup_id = clone_strip(rsc->id); /* Could be a cloned group! */
     rc = find_resource_attr(cib, XML_ATTR_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name,
                             &local_attr_id);
 
     if (rc == pcmk_ok) {
         crm_debug("Found a match for name=%s: id=%s", attr_name, local_attr_id);
         attr_id = local_attr_id;
 
     } else if (rc != -ENXIO) {
         free(lookup_id);
         free(local_attr_id);
         return rc;
 
     } else {
         const char *tag = crm_element_name(rsc->xml);
 
         if (attr_set == NULL) {
             local_attr_set = crm_concat(lookup_id, attr_set_type, '-');
             attr_set = local_attr_set;
         }
         if (attr_id == NULL) {
             local_attr_id = crm_concat(attr_set, attr_name, '-');
             attr_id = local_attr_id;
         }
 
         xml_top = create_xml_node(NULL, tag);
         crm_xml_add(xml_top, XML_ATTR_ID, lookup_id);
 
         xml_obj = create_xml_node(xml_top, attr_set_type);
         crm_xml_add(xml_obj, XML_ATTR_ID, attr_set);
     }
 
     xml_obj = crm_create_nvpair_xml(xml_obj, attr_id, attr_name, attr_value);
     if (xml_top == NULL) {
         xml_top = xml_obj;
     }
 
     crm_log_xml_debug(xml_top, "Update");
 
     rc = cib->cmds->modify(cib, XML_CIB_TAG_RESOURCES, xml_top, cib_options);
     if (rc == pcmk_ok && BE_QUIET == FALSE) {
         printf("Set '%s' option: id=%s%s%s%s%s value=%s\n", lookup_id, local_attr_id,
                attr_set ? " set=" : "", attr_set ? attr_set : "",
                attr_name ? " name=" : "", attr_name ? attr_name : "", attr_value);
     }
 
     free_xml(xml_top);
 
     free(lookup_id);
     free(local_attr_id);
     free(local_attr_set);
 
     if(recursive && safe_str_eq(attr_set_type, XML_TAG_META_SETS)) {
         GListPtr lpc = NULL;
 
         if(need_init) {
             xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, data_set->input);
 
             need_init = FALSE;
             unpack_constraints(cib_constraints, data_set);
 
             for (lpc = data_set->resources; lpc != NULL; lpc = lpc->next) {
                 resource_t *r = (resource_t *) lpc->data;
 
                 clear_bit(r->flags, pe_rsc_allocating);
             }
         }
 
         crm_debug("Looking for dependencies %p", rsc->rsc_cons_lhs);
         set_bit(rsc->flags, pe_rsc_allocating);
         for (lpc = rsc->rsc_cons_lhs; lpc != NULL; lpc = lpc->next) {
             rsc_colocation_t *cons = (rsc_colocation_t *) lpc->data;
             resource_t *peer = cons->rsc_lh;
 
             crm_debug("Checking %s %d", cons->id, cons->score);
             if (cons->score > 0 && is_not_set(peer->flags, pe_rsc_allocating)) {
                 /* Don't get into colocation loops */
                 crm_debug("Setting %s=%s for dependent resource %s", attr_name, attr_value, peer->id);
                 cli_resource_update_attribute(peer, peer->id, NULL, NULL,
                                               attr_name, attr_value, recursive,
                                               cib, data_set);
             }
         }
     }
 
     return rc;
 }
 
 int
 cli_resource_delete_attribute(resource_t *rsc, const char *requested_name,
                               const char *attr_set, const char *attr_id,
                               const char *attr_name, cib_t *cib,
                               pe_working_set_t *data_set)
 {
     xmlNode *xml_obj = NULL;
 
     int rc = pcmk_ok;
     char *lookup_id = NULL;
     char *local_attr_id = NULL;
 
     if(attr_id == NULL
        && do_force == FALSE
        && find_resource_attr(
            cib, XML_ATTR_ID, uber_parent(rsc)->id, NULL, NULL, NULL, attr_name, NULL) == -EINVAL) {
         printf("\n");
     }
 
     if(safe_str_eq(attr_set_type, XML_TAG_META_SETS)) {
         rsc = find_matching_attr_resource(rsc, requested_name, attr_set,
                                           attr_id, attr_name, cib, "delete");
     }
 
     lookup_id = clone_strip(rsc->id);
     rc = find_resource_attr(cib, XML_ATTR_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name,
                             &local_attr_id);
 
     if (rc == -ENXIO) {
         free(lookup_id);
         return pcmk_ok;
 
     } else if (rc != pcmk_ok) {
         free(lookup_id);
         return rc;
     }
 
     if (attr_id == NULL) {
         attr_id = local_attr_id;
     }
 
     xml_obj = crm_create_nvpair_xml(NULL, attr_id, attr_name, NULL);
     crm_log_xml_debug(xml_obj, "Delete");
 
     CRM_ASSERT(cib);
     rc = cib->cmds->remove(cib, XML_CIB_TAG_RESOURCES, xml_obj, cib_options);
 
     if (rc == pcmk_ok && BE_QUIET == FALSE) {
         printf("Deleted '%s' option: id=%s%s%s%s%s\n", lookup_id, local_attr_id,
                attr_set ? " set=" : "", attr_set ? attr_set : "",
                attr_name ? " name=" : "", attr_name ? attr_name : "");
     }
 
     free(lookup_id);
     free_xml(xml_obj);
     free(local_attr_id);
     return rc;
 }
 
 static int
 send_lrm_rsc_op(crm_ipc_t * crmd_channel, const char *op,
                 const char *host_uname, const char *rsc_id,
                 bool only_failed, pe_working_set_t * data_set)
 {
     char *our_pid = NULL;
     char *key = NULL;
     int rc = -ECOMM;
     xmlNode *cmd = NULL;
     xmlNode *xml_rsc = NULL;
     const char *value = NULL;
     const char *router_node = host_uname;
     xmlNode *params = NULL;
     xmlNode *msg_data = NULL;
     resource_t *rsc = pe_find_resource(data_set->resources, rsc_id);
 
     if (rsc == NULL) {
         CMD_ERR("Resource %s not found", rsc_id);
         return -ENXIO;
 
     } else if (rsc->variant != pe_native) {
         CMD_ERR("We can only process primitive resources, not %s", rsc_id);
         return -EINVAL;
 
     } else if (host_uname == NULL) {
         CMD_ERR("Please specify a node name");
         return -EINVAL;
     } else {
         node_t *node = pe_find_node(data_set->nodes, host_uname);
 
         if (node && is_remote_node(node)) {
             node = pe__current_node(node->details->remote_rsc);
             if (node == NULL) {
                 CMD_ERR("No cluster connection to Pacemaker Remote node %s detected",
                         host_uname);
                 return -ENXIO;
             }
             router_node = node->details->uname;
         }
     }
 
     key = generate_transition_key(0, getpid(), 0, "xxxxxxxx-xrsc-opxx-xcrm-resourcexxxx");
 
     msg_data = create_xml_node(NULL, XML_GRAPH_TAG_RSC_OP);
     crm_xml_add(msg_data, XML_ATTR_TRANSITION_KEY, key);
     free(key);
 
     crm_xml_add(msg_data, XML_LRM_ATTR_TARGET, host_uname);
     if (safe_str_neq(router_node, host_uname)) {
         crm_xml_add(msg_data, XML_LRM_ATTR_ROUTER_NODE, router_node);
     }
 
     xml_rsc = create_xml_node(msg_data, XML_CIB_TAG_RESOURCE);
     if (rsc->clone_name) {
         crm_xml_add(xml_rsc, XML_ATTR_ID, rsc->clone_name);
         crm_xml_add(xml_rsc, XML_ATTR_ID_LONG, rsc->id);
 
     } else {
         crm_xml_add(xml_rsc, XML_ATTR_ID, rsc->id);
     }
 
     value = crm_copy_xml_element(rsc->xml, xml_rsc, XML_ATTR_TYPE);
     if (value == NULL) {
         CMD_ERR("%s has no type!  Aborting...", rsc_id);
         return -ENXIO;
     }
 
     value = crm_copy_xml_element(rsc->xml, xml_rsc, XML_AGENT_ATTR_CLASS);
     if (value == NULL) {
         CMD_ERR("%s has no class!  Aborting...", rsc_id);
         return -ENXIO;
     }
 
     crm_copy_xml_element(rsc->xml, xml_rsc, XML_AGENT_ATTR_PROVIDER);
 
     params = create_xml_node(msg_data, XML_TAG_ATTRS);
     crm_xml_add(params, XML_ATTR_CRM_VERSION, CRM_FEATURE_SET);
 
     key = crm_meta_name(XML_LRM_ATTR_INTERVAL_MS);
     crm_xml_add(params, key, "60000");  /* 1 minute */
     free(key);
 
     our_pid = crm_getpid_s();
     cmd = create_request(op, msg_data, router_node, CRM_SYSTEM_CRMD, crm_system_name, our_pid);
 
 /* 	crm_log_xml_warn(cmd, "send_lrm_rsc_op"); */
     free_xml(msg_data);
 
     if (crm_ipc_send(crmd_channel, cmd, 0, 0, NULL) > 0) {
         rc = 0;
 
     } else {
         crm_debug("Could not send %s op to the controller", op);
         rc = -ENOTCONN;
     }
 
     free_xml(cmd);
     return rc;
 }
 
 /*!
  * \internal
  * \brief Get resource name as used in failure-related node attributes
  *
  * \param[in] rsc  Resource to check
  *
  * \return Newly allocated string containing resource's fail name
  * \note The caller is responsible for freeing the result.
  */
 static inline char *
 rsc_fail_name(resource_t *rsc)
 {
     const char *name = (rsc->clone_name? rsc->clone_name : rsc->id);
 
     return is_set(rsc->flags, pe_rsc_unique)? strdup(name) : clone_strip(name);
 }
 
 static int
 clear_rsc_history(crm_ipc_t *crmd_channel, const char *host_uname,
                   const char *rsc_id, pe_working_set_t *data_set)
 {
     int rc = pcmk_ok;
 
     /* Erase the resource's entire LRM history in the CIB, even if we're only
      * clearing a single operation's fail count. If we erased only entries for a
      * single operation, we might wind up with a wrong idea of the current
      * resource state, and we might not re-probe the resource.
      */
     rc = send_lrm_rsc_op(crmd_channel, CRM_OP_LRM_DELETE, host_uname, rsc_id,
                          TRUE, data_set);
     if (rc != pcmk_ok) {
         return rc;
     }
     crmd_replies_needed++;
 
     crm_trace("Processing %d mainloop inputs", crmd_replies_needed);
     while (g_main_context_iteration(NULL, FALSE)) {
         crm_trace("Processed mainloop input, %d still remaining",
                   crmd_replies_needed);
     }
 
     if (crmd_replies_needed < 0) {
         crmd_replies_needed = 0;
     }
     return rc;
 }
 
 static int
 clear_rsc_failures(crm_ipc_t *crmd_channel, const char *node_name,
                    const char *rsc_id, const char *operation,
                    const char *interval_spec, pe_working_set_t *data_set)
 {
     int rc = pcmk_ok;
     const char *failed_value = NULL;
     const char *failed_id = NULL;
     const char *interval_ms_s = NULL;
     GHashTable *rscs = NULL;
     GHashTableIter iter;
 
     /* Create a hash table to use as a set of resources to clean. This lets us
      * clean each resource only once (per node) regardless of how many failed
      * operations it has.
      */
     rscs = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, NULL);
 
     // Normalize interval to milliseconds for comparison to history entry
     if (operation) {
         interval_ms_s = crm_strdup_printf("%u",
                                           crm_parse_interval_spec(interval_spec));
     }
 
     for (xmlNode *xml_op = __xml_first_child(data_set->failed); xml_op != NULL;
          xml_op = __xml_next(xml_op)) {
 
         failed_id = crm_element_value(xml_op, XML_LRM_ATTR_RSCID);
         if (failed_id == NULL) {
             // Malformed history entry, should never happen
             continue;
         }
 
         // No resource specified means all resources match
         if (rsc_id) {
             resource_t *fail_rsc = pe_find_resource_with_flags(data_set->resources,
                                                                failed_id,
                                                                pe_find_renamed|pe_find_anon);
 
             if (!fail_rsc || safe_str_neq(rsc_id, fail_rsc->id)) {
                 continue;
             }
         }
 
         // Host name should always have been provided by this point
         failed_value = crm_element_value(xml_op, XML_ATTR_UNAME);
         if (safe_str_neq(node_name, failed_value)) {
             continue;
         }
 
         // No operation specified means all operations match
         if (operation) {
             failed_value = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
             if (safe_str_neq(operation, failed_value)) {
                 continue;
             }
 
             // Interval (if operation was specified) defaults to 0 (not all)
             failed_value = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL_MS);
             if (safe_str_neq(interval_ms_s, failed_value)) {
                 continue;
             }
         }
 
         /* not available until glib 2.32
         g_hash_table_add(rscs, (gpointer) failed_id);
         */
         g_hash_table_insert(rscs, (gpointer) failed_id, (gpointer) failed_id);
     }
 
     g_hash_table_iter_init(&iter, rscs);
     while (g_hash_table_iter_next(&iter, (gpointer *) &failed_id, NULL)) {
         crm_debug("Erasing failures of %s on %s", failed_id, node_name);
         rc = clear_rsc_history(crmd_channel, node_name, failed_id, data_set);
         if (rc != pcmk_ok) {
             return rc;
         }
     }
     g_hash_table_destroy(rscs);
     return rc;
 }
 
 static int
 clear_rsc_fail_attrs(resource_t *rsc, const char *operation,
                      const char *interval_spec, node_t *node)
 {
     int rc = pcmk_ok;
     int attr_options = attrd_opt_none;
     char *rsc_name = rsc_fail_name(rsc);
 
     if (is_remote_node(node)) {
         attr_options |= attrd_opt_remote;
     }
     rc = attrd_clear_delegate(NULL, node->details->uname, rsc_name, operation,
                               interval_spec, NULL, attr_options);
     free(rsc_name);
     return rc;
 }
 
 int
 cli_resource_delete(crm_ipc_t *crmd_channel, const char *host_uname,
                     resource_t *rsc, const char *operation,
                     const char *interval_spec, bool just_failures,
                     pe_working_set_t *data_set)
 {
     int rc = pcmk_ok;
     node_t *node = NULL;
 
     if (rsc == NULL) {
         return -ENXIO;
 
     } else if (rsc->children) {
         GListPtr lpc = NULL;
 
         for (lpc = rsc->children; lpc != NULL; lpc = lpc->next) {
             resource_t *child = (resource_t *) lpc->data;
 
             rc = cli_resource_delete(crmd_channel, host_uname, child, operation,
                                      interval_spec, just_failures, data_set);
             if (rc != pcmk_ok) {
                 return rc;
             }
         }
         return pcmk_ok;
 
     } else if (host_uname == NULL) {
         GListPtr lpc = NULL;
         GListPtr nodes = g_hash_table_get_values(rsc->known_on);
 
         if(nodes == NULL && do_force) {
             nodes = node_list_dup(data_set->nodes, FALSE, FALSE);
 
         } else if(nodes == NULL && rsc->exclusive_discover) {
             GHashTableIter iter;
             pe_node_t *node = NULL;
 
             g_hash_table_iter_init(&iter, rsc->allowed_nodes);
             while (g_hash_table_iter_next(&iter, NULL, (void**)&node)) {
                 if(node->weight >= 0) {
                     nodes = g_list_prepend(nodes, node);
                 }
             }
 
         } else if(nodes == NULL) {
             nodes = g_hash_table_get_values(rsc->allowed_nodes);
         }
 
         for (lpc = nodes; lpc != NULL; lpc = lpc->next) {
             node = (node_t *) lpc->data;
 
             if (node->details->online) {
                 rc = cli_resource_delete(crmd_channel, node->details->uname,
                                          rsc, operation, interval_spec,
                                          just_failures, data_set);
             }
             if (rc != pcmk_ok) {
                 g_list_free(nodes);
                 return rc;
             }
         }
 
         g_list_free(nodes);
         return pcmk_ok;
     }
 
     node = pe_find_node(data_set->nodes, host_uname);
 
     if (node == NULL) {
         printf("Unable to clean up %s because node %s not found\n",
                rsc->id, host_uname);
         return -ENODEV;
     }
 
     if (!node->details->rsc_discovery_enabled) {
         printf("Unable to clean up %s because resource discovery disabled on %s\n",
                rsc->id, host_uname);
         return -EOPNOTSUPP;
     }
 
     if (crmd_channel == NULL) {
         printf("Dry run: skipping clean-up of %s on %s due to CIB_file\n",
                rsc->id, host_uname);
         return pcmk_ok;
     }
 
     rc = clear_rsc_fail_attrs(rsc, operation, interval_spec, node);
     if (rc != pcmk_ok) {
         printf("Unable to clean up %s failures on %s: %s\n",
                 rsc->id, host_uname, pcmk_strerror(rc));
         return rc;
     }
 
     if (just_failures) {
         rc = clear_rsc_failures(crmd_channel, host_uname, rsc->id, operation,
                                 interval_spec, data_set);
     } else {
         rc = clear_rsc_history(crmd_channel, host_uname, rsc->id, data_set);
     }
     if (rc != pcmk_ok) {
         printf("Cleaned %s failures on %s, but unable to clean history: %s\n",
                rsc->id, host_uname, pcmk_strerror(rc));
     } else {
         printf("Cleaned up %s on %s\n", rsc->id, host_uname);
     }
     return rc;
 }
 
 int
 cli_cleanup_all(crm_ipc_t *crmd_channel, const char *node_name,
                 const char *operation, const char *interval_spec,
                 pe_working_set_t *data_set)
 {
     int rc = pcmk_ok;
     int attr_options = attrd_opt_none;
     const char *display_name = node_name? node_name : "all nodes";
 
     if (crmd_channel == NULL) {
         printf("Dry run: skipping clean-up of %s due to CIB_file\n",
                display_name);
         return pcmk_ok;
     }
     crmd_replies_needed = 0;
 
     if (node_name) {
         node_t *node = pe_find_node(data_set->nodes, node_name);
 
         if (node == NULL) {
             CMD_ERR("Unknown node: %s", node_name);
             return -ENXIO;
         }
         if (is_remote_node(node)) {
             attr_options |= attrd_opt_remote;
         }
     }
 
     rc = attrd_clear_delegate(NULL, node_name, NULL, operation, interval_spec,
                               NULL, attr_options);
     if (rc != pcmk_ok) {
         printf("Unable to clean up all failures on %s: %s\n",
                 display_name, pcmk_strerror(rc));
         return rc;
     }
 
     if (node_name) {
         rc = clear_rsc_failures(crmd_channel, node_name, NULL,
                                 operation, interval_spec, data_set);
         if (rc != pcmk_ok) {
             printf("Cleaned all resource failures on %s, but unable to clean history: %s\n",
                    node_name, pcmk_strerror(rc));
             return rc;
         }
     } else {
         for (GList *iter = data_set->nodes; iter; iter = iter->next) {
             pe_node_t *node = (pe_node_t *) iter->data;
 
             rc = clear_rsc_failures(crmd_channel, node->details->uname, NULL,
                                     operation, interval_spec, data_set);
             if (rc != pcmk_ok) {
                 printf("Cleaned all resource failures on all nodes, but unable to clean history: %s\n",
                        pcmk_strerror(rc));
                 return rc;
             }
         }
     }
 
     printf("Cleaned up all resources on %s\n", display_name);
     return pcmk_ok;
 }
 
 void
 cli_resource_check(cib_t * cib_conn, resource_t *rsc)
 {
     int need_nl = 0;
     char *role_s = NULL;
     char *managed = NULL;
     resource_t *parent = uber_parent(rsc);
 
     find_resource_attr(cib_conn, XML_NVPAIR_ATTR_VALUE, parent->id,
                        NULL, NULL, NULL, XML_RSC_ATTR_MANAGED, &managed);
 
     find_resource_attr(cib_conn, XML_NVPAIR_ATTR_VALUE, parent->id,
                        NULL, NULL, NULL, XML_RSC_ATTR_TARGET_ROLE, &role_s);
 
     if(role_s) {
         enum rsc_role_e role = text2role(role_s);
 
         free(role_s);
         if(role == RSC_ROLE_UNKNOWN) {
             // Treated as if unset
 
         } else if(role == RSC_ROLE_STOPPED) {
             printf("\n  * The configuration specifies that '%s' should remain stopped\n", parent->id);
             need_nl++;
 
         } else if (is_set(parent->flags, pe_rsc_promotable)
                    && (role == RSC_ROLE_SLAVE)) {
             printf("\n  * The configuration specifies that '%s' should not be promoted\n", parent->id);
             need_nl++;
         }
     }
 
     if(managed && crm_is_true(managed) == FALSE) {
         printf("%s  * The configuration prevents the cluster from stopping or starting '%s' (unmanaged)\n", need_nl == 0?"\n":"", parent->id);
         need_nl++;
     }
     free(managed);
 
     if(need_nl) {
         printf("\n");
     }
 }
 
 int
 cli_resource_fail(crm_ipc_t * crmd_channel, const char *host_uname,
              const char *rsc_id, pe_working_set_t * data_set)
 {
     crm_warn("Failing: %s", rsc_id);
     return send_lrm_rsc_op(crmd_channel, CRM_OP_LRM_FAIL, host_uname, rsc_id, FALSE, data_set);
 }
 
 static GHashTable *
 generate_resource_params(resource_t * rsc, pe_working_set_t * data_set)
 {
     GHashTable *params = NULL;
     GHashTable *meta = NULL;
     GHashTable *combined = NULL;
     GHashTableIter iter;
 
     if (!rsc) {
         crm_err("Resource does not exist in config");
         return NULL;
     }
 
     params = crm_str_table_new();
     meta = crm_str_table_new();
     combined = crm_str_table_new();
 
     get_rsc_attributes(params, rsc, NULL /* TODO: Pass in local node */ , data_set);
     get_meta_attributes(meta, rsc, NULL /* TODO: Pass in local node */ , data_set);
 
     if (params) {
         char *key = NULL;
         char *value = NULL;
 
         g_hash_table_iter_init(&iter, params);
         while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) {
             g_hash_table_insert(combined, strdup(key), strdup(value));
         }
         g_hash_table_destroy(params);
     }
 
     if (meta) {
         char *key = NULL;
         char *value = NULL;
 
         g_hash_table_iter_init(&iter, meta);
         while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) {
             char *crm_name = crm_meta_name(key);
 
             g_hash_table_insert(combined, crm_name, strdup(value));
         }
         g_hash_table_destroy(meta);
     }
 
     return combined;
 }
 
 static bool resource_is_running_on(resource_t *rsc, const char *host) 
 {
     bool found = TRUE;
     GListPtr hIter = NULL;
     GListPtr hosts = NULL;
 
     if(rsc == NULL) {
         return FALSE;
     }
 
     rsc->fns->location(rsc, &hosts, TRUE);
     for (hIter = hosts; host != NULL && hIter != NULL; hIter = hIter->next) {
         pe_node_t *node = (pe_node_t *) hIter->data;
 
         if(strcmp(host, node->details->uname) == 0) {
             crm_trace("Resource %s is running on %s\n", rsc->id, host);
             goto done;
         } else if(strcmp(host, node->details->id) == 0) {
             crm_trace("Resource %s is running on %s\n", rsc->id, host);
             goto done;
         }
     }
 
     if(host != NULL) {
         crm_trace("Resource %s is not running on: %s\n", rsc->id, host);
         found = FALSE;
 
     } else if(host == NULL && hosts == NULL) {
         crm_trace("Resource %s is not running\n", rsc->id);
         found = FALSE;
     }
 
   done:
 
     g_list_free(hosts);
     return found;
 }
 
 /*!
  * \internal
  * \brief Create a list of all resources active on host from a given list
  *
  * \param[in] host      Name of host to check whether resources are active
  * \param[in] rsc_list  List of resources to check
  *
  * \return New list of resources from list that are active on host
  */
 static GList *
 get_active_resources(const char *host, GList *rsc_list)
 {
     GList *rIter = NULL;
     GList *active = NULL;
 
     for (rIter = rsc_list; rIter != NULL; rIter = rIter->next) {
         resource_t *rsc = (resource_t *) rIter->data;
 
         /* Expand groups to their members, because if we're restarting a member
          * other than the first, we can't otherwise tell which resources are
          * stopping and starting.
          */
         if (rsc->variant == pe_group) {
             active = g_list_concat(active,
                                    get_active_resources(host, rsc->children));
         } else if (resource_is_running_on(rsc, host)) {
             active = g_list_append(active, strdup(rsc->id));
         }
     }
     return active;
 }
 
 static GList*
 subtract_lists(GList *from, GList *items) 
 {
     GList *item = NULL;
     GList *result = g_list_copy(from);
 
     for (item = items; item != NULL; item = item->next) {
         GList *candidate = NULL;
         for (candidate = from; candidate != NULL; candidate = candidate->next) {
             crm_info("Comparing %s with %s", (const char *) candidate->data,
                      (const char *) item->data);
             if(strcmp(candidate->data, item->data) == 0) {
                 result = g_list_remove(result, candidate->data);
                 break;
             }
         }
     }
 
     return result;
 }
 
 static void dump_list(GList *items, const char *tag) 
 {
     int lpc = 0;
     GList *item = NULL;
 
     for (item = items; item != NULL; item = item->next) {
         crm_trace("%s[%d]: %s", tag, lpc, (char*)item->data);
         lpc++;
     }
 }
 
 static void display_list(GList *items, const char *tag) 
 {
     GList *item = NULL;
 
     for (item = items; item != NULL; item = item->next) {
         fprintf(stdout, "%s%s\n", tag, (const char *)item->data);
     }
 }
 
 /*!
  * \internal
  * \brief Upgrade XML to latest schema version and use it as working set input
  *
  * This also updates the working set timestamp to the current time.
  *
  * \param[in] data_set   Working set instance to update
  * \param[in] xml        XML to use as input
  *
  * \return pcmk_ok on success, -ENOKEY if unable to upgrade XML
  * \note On success, caller is responsible for freeing memory allocated for
  *       data_set->now.
  * \todo This follows the example of other callers of cli_config_update()
  *       and returns -ENOKEY ("Required key not available") if that fails,
  *       but perhaps -pcmk_err_schema_validation would be better in that case.
  */
 int
 update_working_set_xml(pe_working_set_t *data_set, xmlNode **xml)
 {
     if (cli_config_update(xml, NULL, FALSE) == FALSE) {
         return -ENOKEY;
     }
     data_set->input = *xml;
     data_set->now = crm_time_new(NULL);
     return pcmk_ok;
 }
 
 /*!
  * \internal
  * \brief Update a working set's XML input based on a CIB query
  *
  * \param[in] data_set   Data set instance to initialize
  * \param[in] cib        Connection to the CIB manager
  *
  * \return pcmk_ok on success, -errno on failure
  * \note On success, caller is responsible for freeing memory allocated for
  *       data_set->input and data_set->now.
  */
 static int
 update_working_set_from_cib(pe_working_set_t * data_set, cib_t *cib)
 {
     xmlNode *cib_xml_copy = NULL;
     int rc;
 
     rc = cib->cmds->query(cib, NULL, &cib_xml_copy, cib_scope_local | cib_sync_call);
     if (rc != pcmk_ok) {
         fprintf(stderr, "Could not obtain the current CIB: %s (%d)\n", pcmk_strerror(rc), rc);
         return rc;
     }
     rc = update_working_set_xml(data_set, &cib_xml_copy);
     if (rc != pcmk_ok) {
         fprintf(stderr, "Could not upgrade the current CIB XML\n");
         free_xml(cib_xml_copy);
         return rc;
     }
     return pcmk_ok;
 }
 
 static int
 update_dataset(cib_t *cib, pe_working_set_t * data_set, bool simulate)
 {
     char *pid = NULL;
     char *shadow_file = NULL;
     cib_t *shadow_cib = NULL;
     int rc;
 
     pe_reset_working_set(data_set);
     rc = update_working_set_from_cib(data_set, cib);
     if (rc != pcmk_ok) {
         return rc;
     }
 
     if(simulate) {
         pid = crm_getpid_s();
         shadow_cib = cib_shadow_new(pid);
         shadow_file = get_shadow_file(pid);
 
         if (shadow_cib == NULL) {
             fprintf(stderr, "Could not create shadow cib: '%s'\n", pid);
             rc = -ENXIO;
             goto cleanup;
         }
 
         rc = write_xml_file(data_set->input, shadow_file, FALSE);
 
         if (rc < 0) {
             fprintf(stderr, "Could not populate shadow cib: %s (%d)\n", pcmk_strerror(rc), rc);
             goto cleanup;
         }
 
         rc = shadow_cib->cmds->signon(shadow_cib, crm_system_name, cib_command);
         if(rc != pcmk_ok) {
             fprintf(stderr, "Could not connect to shadow cib: %s (%d)\n", pcmk_strerror(rc), rc);
             goto cleanup;
         }
 
         do_calculations(data_set, data_set->input, NULL);
         run_simulation(data_set, shadow_cib, NULL, TRUE);
         rc = update_dataset(shadow_cib, data_set, FALSE);
 
     } else {
         cluster_status(data_set);
     }
 
   cleanup:
     /* Do not free data_set->input here, we need rsc->xml to be valid later on */
     cib_delete(shadow_cib);
     free(pid);
 
     if(shadow_file) {
         unlink(shadow_file);
         free(shadow_file);
     }
 
     return rc;
 }
 
 static int
 max_delay_for_resource(pe_working_set_t * data_set, resource_t *rsc) 
 {
     int delay = 0;
     int max_delay = 0;
 
     if(rsc && rsc->children) {
         GList *iter = NULL;
 
         for(iter = rsc->children; iter; iter = iter->next) {
             resource_t *child = (resource_t *)iter->data;
 
             delay = max_delay_for_resource(data_set, child);
             if(delay > max_delay) {
                 double seconds = delay / 1000.0;
                 crm_trace("Calculated new delay of %.1fs due to %s", seconds, child->id);
                 max_delay = delay;
             }
         }
 
     } else if(rsc) {
         char *key = crm_strdup_printf("%s_%s_0", rsc->id, RSC_STOP);
         action_t *stop = custom_action(rsc, key, RSC_STOP, NULL, TRUE, FALSE, data_set);
         const char *value = g_hash_table_lookup(stop->meta, XML_ATTR_TIMEOUT);
 
         max_delay = crm_int_helper(value, NULL);
         pe_free_action(stop);
     }
 
 
     return max_delay;
 }
 
 static int
 max_delay_in(pe_working_set_t * data_set, GList *resources) 
 {
     int max_delay = 0;
     GList *item = NULL;
 
     for (item = resources; item != NULL; item = item->next) {
         int delay = 0;
         resource_t *rsc = pe_find_resource(data_set->resources, (const char *)item->data);
 
         delay = max_delay_for_resource(data_set, rsc);
 
         if(delay > max_delay) {
             double seconds = delay / 1000.0;
             crm_trace("Calculated new delay of %.1fs due to %s", seconds, rsc->id);
             max_delay = delay;
         }
     }
 
     return 5 + (max_delay / 1000);
 }
 
 #define waiting_for_starts(d, r, h) ((g_list_length(d) > 0) || \
                                     (resource_is_running_on((r), (h)) == FALSE))
 
 /*!
  * \internal
  * \brief Restart a resource (on a particular host if requested).
  *
  * \param[in] rsc        The resource to restart
  * \param[in] host       The host to restart the resource on (or NULL for all)
  * \param[in] timeout_ms Consider failed if actions do not complete in this time
  *                       (specified in milliseconds, but a two-second
  *                       granularity is actually used; if 0, a timeout will be
  *                       calculated based on the resource timeout)
  * \param[in] cib        Connection to the CIB manager
  *
  * \return pcmk_ok on success, -errno on failure (exits on certain failures)
  */
 int
 cli_resource_restart(pe_resource_t *rsc, const char *host, int timeout_ms,
                      cib_t *cib)
 {
     int rc = 0;
     int lpc = 0;
     int before = 0;
     int step_timeout_s = 0;
     int sleep_interval = 2;
     int timeout = timeout_ms / 1000;
 
     bool stop_via_ban = FALSE;
     char *rsc_id = NULL;
     char *orig_target_role = NULL;
 
     GList *list_delta = NULL;
     GList *target_active = NULL;
     GList *current_active = NULL;
     GList *restart_target_active = NULL;
 
     pe_working_set_t *data_set = NULL;
 
     if(resource_is_running_on(rsc, host) == FALSE) {
         const char *id = rsc->clone_name?rsc->clone_name:rsc->id;
         if(host) {
             printf("%s is not running on %s and so cannot be restarted\n", id, host);
         } else {
             printf("%s is not running anywhere and so cannot be restarted\n", id);
         }
         return -ENXIO;
     }
 
     /* We might set the target-role meta-attribute */
     attr_set_type = XML_TAG_META_SETS;
 
     rsc_id = strdup(rsc->id);
     if ((pe_rsc_is_clone(rsc) || pe_bundle_replicas(rsc)) && host) {
         stop_via_ban = TRUE;
     }
 
     /*
       grab full cib
       determine originally active resources
       disable or ban
       poll cib and watch for affected resources to get stopped
       without --timeout, calculate the stop timeout for each step and wait for that
       if we hit --timeout or the service timeout, re-enable or un-ban, report failure and indicate which resources we couldn't take down
       if everything stopped, re-enable or un-ban
       poll cib and watch for affected resources to get started
       without --timeout, calculate the start timeout for each step and wait for that
       if we hit --timeout or the service timeout, report (different) failure and indicate which resources we couldn't bring back up
       report success
 
       Optimizations:
       - use constraints to determine ordered list of affected resources
       - Allow a --no-deps option (aka. --force-restart)
     */
 
     data_set = pe_new_working_set();
     if (data_set == NULL) {
         crm_perror(LOG_ERR, "Could not allocate working set");
-        return -ENOMEM;
+        rc = -ENOMEM;
+        goto done;
     }
     rc = update_dataset(cib, data_set, FALSE);
     if(rc != pcmk_ok) {
         fprintf(stdout, "Could not get new resource list: %s (%d)\n", pcmk_strerror(rc), rc);
-        free(rsc_id);
-        return rc;
+        goto done;
     }
 
     restart_target_active = get_active_resources(host, data_set->resources);
     current_active = get_active_resources(host, data_set->resources);
 
     dump_list(current_active, "Origin");
 
     if (stop_via_ban) {
         /* Stop the clone or bundle instance by banning it from the host */
         BE_QUIET = TRUE;
         rc = cli_resource_ban(rsc_id, host, NULL, cib);
 
     } else {
         /* Stop the resource by setting target-role to Stopped.
          * Remember any existing target-role so we can restore it later
          * (though it only makes any difference if it's Slave).
          */
         char *lookup_id = clone_strip(rsc->id);
 
         find_resource_attr(cib, XML_NVPAIR_ATTR_VALUE, lookup_id, NULL, NULL,
                            NULL, XML_RSC_ATTR_TARGET_ROLE, &orig_target_role);
         free(lookup_id);
         rc = cli_resource_update_attribute(rsc, rsc_id, NULL, NULL,
                                            XML_RSC_ATTR_TARGET_ROLE,
                                            RSC_STOPPED, FALSE, cib, data_set);
     }
     if(rc != pcmk_ok) {
         fprintf(stderr, "Could not set target-role for %s: %s (%d)\n", rsc_id, pcmk_strerror(rc), rc);
         if (current_active) {
             g_list_free_full(current_active, free);
         }
         if (restart_target_active) {
             g_list_free_full(restart_target_active, free);
         }
-        free(rsc_id);
-        return crm_exit(crm_errno2exit(rc));
+        goto done;
     }
 
     rc = update_dataset(cib, data_set, TRUE);
     if(rc != pcmk_ok) {
         fprintf(stderr, "Could not determine which resources would be stopped\n");
         goto failure;
     }
 
     target_active = get_active_resources(host, data_set->resources);
     dump_list(target_active, "Target");
 
     list_delta = subtract_lists(current_active, target_active);
     fprintf(stdout, "Waiting for %d resources to stop:\n", g_list_length(list_delta));
     display_list(list_delta, " * ");
 
     step_timeout_s = timeout / sleep_interval;
     while(g_list_length(list_delta) > 0) {
         before = g_list_length(list_delta);
         if(timeout_ms == 0) {
             step_timeout_s = max_delay_in(data_set, list_delta) / sleep_interval;
         }
 
         /* We probably don't need the entire step timeout */
         for(lpc = 0; lpc < step_timeout_s && g_list_length(list_delta) > 0; lpc++) {
             sleep(sleep_interval);
             if(timeout) {
                 timeout -= sleep_interval;
                 crm_trace("%ds remaining", timeout);
             }
             rc = update_dataset(cib, data_set, FALSE);
             if(rc != pcmk_ok) {
                 fprintf(stderr, "Could not determine which resources were stopped\n");
                 goto failure;
             }
 
             if (current_active) {
                 g_list_free_full(current_active, free);
             }
             current_active = get_active_resources(host, data_set->resources);
             g_list_free(list_delta);
             list_delta = subtract_lists(current_active, target_active);
             dump_list(current_active, "Current");
             dump_list(list_delta, "Delta");
         }
 
         crm_trace("%d (was %d) resources remaining", g_list_length(list_delta), before);
         if(before == g_list_length(list_delta)) {
             /* aborted during stop phase, print the contents of list_delta */
             fprintf(stderr, "Could not complete shutdown of %s, %d resources remaining\n", rsc_id, g_list_length(list_delta));
             display_list(list_delta, " * ");
             rc = -ETIME;
             goto failure;
         }
 
     }
 
     if (stop_via_ban) {
         rc = cli_resource_clear(rsc_id, host, NULL, cib);
 
     } else if (orig_target_role) {
         rc = cli_resource_update_attribute(rsc, rsc_id, NULL, NULL,
                                            XML_RSC_ATTR_TARGET_ROLE,
                                            orig_target_role, FALSE, cib,
                                            data_set);
         free(orig_target_role);
         orig_target_role = NULL;
     } else {
         rc = cli_resource_delete_attribute(rsc, rsc_id, NULL, NULL,
                                            XML_RSC_ATTR_TARGET_ROLE, cib,
                                            data_set);
     }
 
     if(rc != pcmk_ok) {
         fprintf(stderr, "Could not unset target-role for %s: %s (%d)\n", rsc_id, pcmk_strerror(rc), rc);
-        free(rsc_id);
-        return crm_exit(crm_errno2exit(rc));
+        goto done;
     }
 
     if (target_active) {
         g_list_free_full(target_active, free);
     }
     target_active = restart_target_active;
     if (list_delta) {
         g_list_free(list_delta);
     }
     list_delta = subtract_lists(target_active, current_active);
     fprintf(stdout, "Waiting for %d resources to start again:\n", g_list_length(list_delta));
     display_list(list_delta, " * ");
 
     step_timeout_s = timeout / sleep_interval;
     while (waiting_for_starts(list_delta, rsc, host)) {
         before = g_list_length(list_delta);
         if(timeout_ms == 0) {
             step_timeout_s = max_delay_in(data_set, list_delta) / sleep_interval;
         }
 
         /* We probably don't need the entire step timeout */
         for (lpc = 0; (lpc < step_timeout_s) && waiting_for_starts(list_delta, rsc, host); lpc++) {
 
             sleep(sleep_interval);
             if(timeout) {
                 timeout -= sleep_interval;
                 crm_trace("%ds remaining", timeout);
             }
 
             rc = update_dataset(cib, data_set, FALSE);
             if(rc != pcmk_ok) {
                 fprintf(stderr, "Could not determine which resources were started\n");
                 goto failure;
             }
 
             if (current_active) {
                 g_list_free_full(current_active, free);
             }
 
             /* It's OK if dependent resources moved to a different node,
              * so we check active resources on all nodes.
              */
             current_active = get_active_resources(NULL, data_set->resources);
             g_list_free(list_delta);
             list_delta = subtract_lists(target_active, current_active);
             dump_list(current_active, "Current");
             dump_list(list_delta, "Delta");
         }
 
         if(before == g_list_length(list_delta)) {
             /* aborted during start phase, print the contents of list_delta */
             fprintf(stdout, "Could not complete restart of %s, %d resources remaining\n", rsc_id, g_list_length(list_delta));
             display_list(list_delta, " * ");
             rc = -ETIME;
             goto failure;
         }
 
     }
 
     rc = pcmk_ok;
     goto done;
 
   failure:
     if (stop_via_ban) {
         cli_resource_clear(rsc_id, host, NULL, cib);
     } else if (orig_target_role) {
         cli_resource_update_attribute(rsc, rsc_id, NULL, NULL,
                                       XML_RSC_ATTR_TARGET_ROLE,
                                       orig_target_role, FALSE, cib, data_set);
         free(orig_target_role);
     } else {
         cli_resource_delete_attribute(rsc, rsc_id, NULL, NULL,
                                       XML_RSC_ATTR_TARGET_ROLE, cib, data_set);
     }
 
 done:
     if (list_delta) {
         g_list_free(list_delta);
     }
     if (current_active) {
         g_list_free_full(current_active, free);
     }
     if (target_active && (target_active != restart_target_active)) {
         g_list_free_full(target_active, free);
     }
     if (restart_target_active) {
         g_list_free_full(restart_target_active, free);
     }
     free(rsc_id);
     pe_free_working_set(data_set);
     return rc;
 }
 
 static inline int action_is_pending(action_t *action) 
 {
     if(is_set(action->flags, pe_action_optional)) {
         return FALSE;
     } else if(is_set(action->flags, pe_action_runnable) == FALSE) {
         return FALSE;
     } else if(is_set(action->flags, pe_action_pseudo)) {
         return FALSE;
     } else if(safe_str_eq("notify", action->task)) {
         return FALSE;
     }
     return TRUE;
 }
 
 /*!
  * \internal
  * \brief Return TRUE if any actions in a list are pending
  *
  * \param[in] actions   List of actions to check
  *
  * \return TRUE if any actions in the list are pending, FALSE otherwise
  */
 static bool
 actions_are_pending(GListPtr actions)
 {
     GListPtr action;
 
     for (action = actions; action != NULL; action = action->next) {
         action_t *a = (action_t *)action->data;
         if (action_is_pending(a)) {
             crm_notice("Waiting for %s (flags=0x%.8x)", a->uuid, a->flags);
             return TRUE;
         }
     }
     return FALSE;
 }
 
 /*!
  * \internal
  * \brief Print pending actions to stderr
  *
  * \param[in] actions   List of actions to check
  *
  * \return void
  */
 static void
 print_pending_actions(GListPtr actions)
 {
     GListPtr action;
 
     fprintf(stderr, "Pending actions:\n");
     for (action = actions; action != NULL; action = action->next) {
         action_t *a = (action_t *) action->data;
 
         if (action_is_pending(a)) {
             fprintf(stderr, "\tAction %d: %s", a->id, a->uuid);
             if (a->node) {
                 fprintf(stderr, "\ton %s", a->node->details->uname);
             }
             fprintf(stderr, "\n");
         }
     }
 }
 
 /* For --wait, timeout (in seconds) to use if caller doesn't specify one */
 #define WAIT_DEFAULT_TIMEOUT_S (60 * 60)
 
 /* For --wait, how long to sleep between cluster state checks */
 #define WAIT_SLEEP_S (2)
 
 /*!
  * \internal
  * \brief Wait until all pending cluster actions are complete
  *
  * This waits until either the CIB's transition graph is idle or a timeout is
  * reached.
  *
  * \param[in] timeout_ms Consider failed if actions do not complete in this time
  *                       (specified in milliseconds, but one-second granularity
  *                       is actually used; if 0, a default will be used)
  * \param[in] cib        Connection to the CIB manager
  *
  * \return pcmk_ok on success, -errno on failure
  */
 int
 wait_till_stable(int timeout_ms, cib_t * cib)
 {
     pe_working_set_t *data_set = NULL;
     int rc = -1;
     int timeout_s = timeout_ms? ((timeout_ms + 999) / 1000) : WAIT_DEFAULT_TIMEOUT_S;
     time_t expire_time = time(NULL) + timeout_s;
     time_t time_diff;
     bool printed_version_warning = BE_QUIET; // i.e. don't print if quiet
 
     data_set = pe_new_working_set();
     if (data_set == NULL) {
         return -ENOMEM;
     }
 
     do {
 
         /* Abort if timeout is reached */
         time_diff = expire_time - time(NULL);
         if (time_diff > 0) {
             crm_info("Waiting up to %ld seconds for cluster actions to complete", time_diff);
         } else {
             print_pending_actions(data_set->actions);
             pe_free_working_set(data_set);
             return -ETIME;
         }
         if (rc == pcmk_ok) { /* this avoids sleep on first loop iteration */
             sleep(WAIT_SLEEP_S);
         }
 
         /* Get latest transition graph */
         pe_reset_working_set(data_set);
         rc = update_working_set_from_cib(data_set, cib);
         if (rc != pcmk_ok) {
             pe_free_working_set(data_set);
             return rc;
         }
         do_calculations(data_set, data_set->input, NULL);
 
         if (!printed_version_warning) {
             /* If the DC has a different version than the local node, the two
              * could come to different conclusions about what actions need to be
              * done. Warn the user in this case.
              *
              * @TODO A possible long-term solution would be to reimplement the
              * wait as a new controller operation that would be forwarded to the
              * DC. However, that would have potential problems of its own.
              */
             const char *dc_version = g_hash_table_lookup(data_set->config_hash,
                                                          "dc-version");
 
             if (safe_str_neq(dc_version, PACEMAKER_VERSION "-" BUILD_VERSION)) {
                 printf("warning: wait option may not work properly in "
                        "mixed-version cluster\n");
                 printed_version_warning = TRUE;
             }
         }
 
     } while (actions_are_pending(data_set->actions));
 
     pe_free_working_set(data_set);
     return pcmk_ok;
 }
 
 int
 cli_resource_execute(resource_t *rsc, const char *requested_name,
                      const char *rsc_action, GHashTable *override_hash,
                      int timeout_ms, cib_t * cib, pe_working_set_t *data_set)
 {
     int rc = pcmk_ok;
     svc_action_t *op = NULL;
     const char *rid = NULL;
     const char *rtype = NULL;
     const char *rprov = NULL;
     const char *rclass = NULL;
     const char *action = NULL;
     GHashTable *params = NULL;
 
     if (safe_str_eq(rsc_action, "validate")) {
         action = "validate-all";
 
     } else if (safe_str_eq(rsc_action, "force-check")) {
         action = "monitor";
 
     } else if (safe_str_eq(rsc_action, "force-stop")) {
         action = rsc_action+6;
 
     } else if (safe_str_eq(rsc_action, "force-start")
                || safe_str_eq(rsc_action, "force-demote")
                || safe_str_eq(rsc_action, "force-promote")) {
         action = rsc_action+6;
 
         if(pe_rsc_is_clone(rsc)) {
             rc = cli_resource_search(rsc, requested_name, data_set);
             if(rc > 0 && do_force == FALSE) {
                 CMD_ERR("It is not safe to %s %s here: the cluster claims it is already active",
                         action, rsc->id);
                 CMD_ERR("Try setting target-role=Stopped first or specifying "
                         "the force option");
                 crm_exit(CRM_EX_UNSAFE);
             }
         }
     }
 
     if(pe_rsc_is_clone(rsc)) {
         /* Grab the first child resource in the hope it's not a group */
         rsc = rsc->children->data;
     }
 
     if(rsc->variant == pe_group) {
         CMD_ERR("Sorry, the %s option doesn't support group resources",
                 rsc_action);
         crm_exit(CRM_EX_UNIMPLEMENT_FEATURE);
     }
 
     rclass = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
     rprov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
     rtype = crm_element_value(rsc->xml, XML_ATTR_TYPE);
 
     if (safe_str_eq(rclass, PCMK_RESOURCE_CLASS_STONITH)) {
         CMD_ERR("Sorry, the %s option doesn't support %s resources yet",
                 rsc_action, rclass);
         crm_exit(CRM_EX_UNIMPLEMENT_FEATURE);
     }
 
     params = generate_resource_params(rsc, data_set);
 
     /* add meta_timeout env needed by some resource agents */
     if (timeout_ms == 0) {
         timeout_ms = pe_get_configured_timeout(rsc, action, data_set);
     }
     g_hash_table_insert(params, strdup("CRM_meta_timeout"),
                         crm_strdup_printf("%d", timeout_ms));
 
     /* add crm_feature_set env needed by some resource agents */
     g_hash_table_insert(params, strdup(XML_ATTR_CRM_VERSION), strdup(CRM_FEATURE_SET));
 
     rid = pe_rsc_is_anon_clone(rsc->parent)? requested_name : rsc->id;
 
     op = resources_action_create(rid, rclass, rprov, rtype, action, 0,
                                  timeout_ms, params, 0);
     if (op == NULL) {
         /* Re-run with stderr enabled so we can display a sane error message */
         crm_enable_stderr(TRUE);
         op = resources_action_create(rid, rclass, rprov, rtype, action, 0,
                                      timeout_ms, params, 0);
 
         /* We know op will be NULL, but this makes static analysis happy */
         services_action_free(op);
 
         return crm_exit(CRM_EX_DATAERR);
     }
 
 
     setenv("HA_debug", resource_verbose > 0 ? "1" : "0", 1);
     if(resource_verbose > 1) {
         setenv("OCF_TRACE_RA", "1", 1);
     }
 
     /* A resource agent using the standard ocf-shellfuncs library will not print
      * messages to stderr if it doesn't have a controlling terminal (e.g. if
      * crm_resource is called via script or ssh). This forces it to do so.
      */
     setenv("OCF_TRACE_FILE", "/dev/stderr", 0);
 
     if (override_hash) {
         GHashTableIter iter;
         char *name = NULL;
         char *value = NULL;
 
         g_hash_table_iter_init(&iter, override_hash);
         while (g_hash_table_iter_next(&iter, (gpointer *) & name, (gpointer *) & value)) {
             printf("Overriding the cluster configuration for '%s' with '%s' = '%s'\n",
                    rsc->id, name, value);
             g_hash_table_replace(op->params, strdup(name), strdup(value));
         }
     }
 
     if (services_action_sync(op)) {
         int more, lpc, last;
         char *local_copy = NULL;
 
         if (op->status == PCMK_LRM_OP_DONE) {
             printf("Operation %s for %s (%s:%s:%s) returned: '%s' (%d)\n",
                    action, rsc->id, rclass, rprov ? rprov : "", rtype,
                    services_ocf_exitcode_str(op->rc), op->rc);
         } else {
             printf("Operation %s for %s (%s:%s:%s) failed: '%s' (%d)\n",
                    action, rsc->id, rclass, rprov ? rprov : "", rtype,
                    services_lrm_status_str(op->status), op->status);
         }
 
         /* hide output for validate-all if not in verbose */
         if (resource_verbose == 0 && safe_str_eq(action, "validate-all"))
             goto done;
 
         if (op->stdout_data) {
             local_copy = strdup(op->stdout_data);
             more = strlen(local_copy);
             last = 0;
 
             for (lpc = 0; lpc < more; lpc++) {
                 if (local_copy[lpc] == '\n' || local_copy[lpc] == 0) {
                     local_copy[lpc] = 0;
                     printf(" >  stdout: %s\n", local_copy + last);
                     last = lpc + 1;
                 }
             }
             free(local_copy);
         }
         if (op->stderr_data) {
             local_copy = strdup(op->stderr_data);
             more = strlen(local_copy);
             last = 0;
 
             for (lpc = 0; lpc < more; lpc++) {
                 if (local_copy[lpc] == '\n' || local_copy[lpc] == 0) {
                     local_copy[lpc] = 0;
                     printf(" >  stderr: %s\n", local_copy + last);
                     last = lpc + 1;
                 }
             }
             free(local_copy);
         }
     }
   done:
     rc = op->rc;
     services_action_free(op);
     return rc;
 }
 
 int
 cli_resource_move(resource_t *rsc, const char *rsc_id, const char *host_name,
                   cib_t *cib, pe_working_set_t *data_set)
 {
     int rc = pcmk_ok;
     unsigned int count = 0;
     node_t *current = NULL;
     node_t *dest = pe_find_node(data_set->nodes, host_name);
     bool cur_is_dest = FALSE;
 
     if (dest == NULL) {
         return -pcmk_err_node_unknown;
     }
 
     if (scope_master && is_not_set(rsc->flags, pe_rsc_promotable)) {
         resource_t *p = uber_parent(rsc);
 
         if (is_set(p->flags, pe_rsc_promotable)) {
             CMD_ERR("Using parent '%s' for move instead of '%s'.", rsc->id, rsc_id);
             rsc_id = p->id;
             rsc = p;
 
         } else {
             CMD_ERR("Ignoring master option: %s is not promotable", rsc_id);
             scope_master = FALSE;
         }
     }
 
     current = pe__find_active_requires(rsc, &count);
 
     if (is_set(rsc->flags, pe_rsc_promotable)) {
         GListPtr iter = NULL;
         unsigned int master_count = 0;
         pe_node_t *master_node = NULL;
 
         for(iter = rsc->children; iter; iter = iter->next) {
             resource_t *child = (resource_t *)iter->data;
             enum rsc_role_e child_role = child->fns->state(child, TRUE);
 
             if(child_role == RSC_ROLE_MASTER) {
                 rsc = child;
                 master_node = pe__current_node(child);
                 master_count++;
             }
         }
         if (scope_master || master_count) {
             count = master_count;
             current = master_node;
         }
 
     }
 
     if (count > 1) {
         if (pe_rsc_is_clone(rsc)) {
             current = NULL;
         } else {
             return -pcmk_err_multiple;
         }
     }
 
     if (current && (current->details == dest->details)) {
         cur_is_dest = TRUE;
         if (do_force) {
             crm_info("%s is already %s on %s, reinforcing placement with location constraint.",
                      rsc_id, scope_master?"promoted":"active", dest->details->uname);
         } else {
             return -pcmk_err_already;
         }
     }
 
     /* Clear any previous constraints for 'dest' */
     cli_resource_clear(rsc_id, dest->details->uname, data_set->nodes, cib);
 
     /* Record an explicit preference for 'dest' */
     rc = cli_resource_prefer(rsc_id, dest->details->uname, cib);
 
     crm_trace("%s%s now prefers node %s%s",
               rsc->id, scope_master?" (master)":"", dest->details->uname, do_force?"(forced)":"");
 
     /* only ban the previous location if current location != destination location.
      * it is possible to use -M to enforce a location without regard of where the
      * resource is currently located */
     if(do_force && (cur_is_dest == FALSE)) {
         /* Ban the original location if possible */
         if(current) {
             (void)cli_resource_ban(rsc_id, current->details->uname, NULL, cib);
 
         } else if(count > 1) {
             CMD_ERR("Resource '%s' is currently %s in %d locations. "
                     "One may now move to %s",
                     rsc_id, (scope_master? "promoted" : "active"),
                     count, dest->details->uname);
             CMD_ERR("To prevent '%s' from being %s at a specific location, "
                     "specify a node.",
                     rsc_id, (scope_master? "promoted" : "active"));
 
         } else {
             crm_trace("Not banning %s from its current location: not active", rsc_id);
         }
     }
 
     return rc;
 }
 
 static void
 cli_resource_why_without_rsc_and_host(cib_t *cib_conn,GListPtr resources)
 {
     GListPtr lpc = NULL;
     GListPtr hosts = NULL;
 
     for (lpc = resources; lpc != NULL; lpc = lpc->next) {
         resource_t *rsc = (resource_t *) lpc->data;
         rsc->fns->location(rsc, &hosts, TRUE);
 
         if (hosts == NULL) {
             printf("Resource %s is not running\n", rsc->id);
         } else {
             printf("Resource %s is running\n", rsc->id);
         }
 
         cli_resource_check(cib_conn, rsc);
         g_list_free(hosts);
         hosts = NULL;
      }
 
 }
 
 static void
 cli_resource_why_with_rsc_and_host(cib_t *cib_conn, GListPtr resources,
                                    resource_t *rsc, const char *host_uname)
 {
     if (resource_is_running_on(rsc, host_uname)) {
         printf("Resource %s is running on host %s\n",rsc->id,host_uname);
     } else {
         printf("Resource %s is not running on host %s\n", rsc->id, host_uname);
     }
     cli_resource_check(cib_conn, rsc);
 }
 
 static void
 cli_resource_why_without_rsc_with_host(cib_t *cib_conn,GListPtr resources,node_t *node)
 {
     const char* host_uname =  node->details->uname;
     GListPtr allResources = node->details->allocated_rsc;
     GListPtr activeResources = node->details->running_rsc;
     GListPtr unactiveResources = subtract_lists(allResources,activeResources);
     GListPtr lpc = NULL;
 
     for (lpc = activeResources; lpc != NULL; lpc = lpc->next) {
         resource_t *rsc = (resource_t *) lpc->data;
         printf("Resource %s is running on host %s\n",rsc->id,host_uname);
         cli_resource_check(cib_conn,rsc);
     }
 
     for(lpc = unactiveResources; lpc != NULL; lpc = lpc->next) {
         resource_t *rsc = (resource_t *) lpc->data;
         printf("Resource %s is assigned to host %s but not running\n",
                rsc->id, host_uname);
         cli_resource_check(cib_conn,rsc);
      }
 
      g_list_free(allResources);
      g_list_free(activeResources);
      g_list_free(unactiveResources);
 }
 
 static void
 cli_resource_why_with_rsc_without_host(cib_t *cib_conn, GListPtr resources,
                                        resource_t *rsc)
 {
     GListPtr hosts = NULL;
 
     rsc->fns->location(rsc, &hosts, TRUE);
     printf("Resource %s is %srunning\n", rsc->id, (hosts? "" : "not "));
     cli_resource_check(cib_conn, rsc);
     g_list_free(hosts);
 }
 
 void cli_resource_why(cib_t *cib_conn, GListPtr resources, resource_t *rsc,
                       node_t *node)
 {
     const char *host_uname = (node == NULL)? NULL : node->details->uname;
 
     if ((rsc == NULL) && (host_uname == NULL)) {
         cli_resource_why_without_rsc_and_host(cib_conn, resources);
 
     } else if ((rsc != NULL) && (host_uname != NULL)) {
         cli_resource_why_with_rsc_and_host(cib_conn, resources, rsc,
                                            host_uname);
 
     } else if ((rsc == NULL) && (host_uname != NULL)) {
         cli_resource_why_without_rsc_with_host(cib_conn, resources, node);
 
     } else if ((rsc != NULL) && (host_uname == NULL)) {
         cli_resource_why_with_rsc_without_host(cib_conn, resources, rsc);
     }
 }