diff --git a/cts/CIB.py b/cts/CIB.py
index 8fbba6c265..3a16a1135f 100644
--- a/cts/CIB.py
+++ b/cts/CIB.py
@@ -1,451 +1,478 @@
 '''CTS: Cluster Testing System: CIB generator
 '''
 __copyright__ = '''
 Author: Andrew Beekhof <abeekhof@suse.de>
 Copyright (C) 2008 Andrew Beekhof
 '''
 
 import os, string, warnings
 
 from cts.CTSvars import *
-from cts.CTS     import ClusterManager
 
 
 class CibBase:
     def __init__(self, Factory, tag, _id, **kwargs):
         self.tag = tag
         self.name = _id
         self.kwargs = kwargs
-        self.values = []
         self.children = []
         self.Factory = Factory
 
     def __repr__(self):
         return "%s-%s" % (self.tag, self.name)
 
     def add_child(self, child):
         self.children.append(child)
 
     def __setitem__(self, key, value):
         if value:
             self.kwargs[key] = value
         else:
-            self.values.append(key)
+            self.kwargs.pop(key, None)
 
 from cib_xml import *
 
 
 class ConfigBase:
     cts_cib = None
     version = "unknown"
     feature_set = "unknown"
     Factory = None
 
     def __init__(self, CM, factory, tmpfile=None):
         self.CM = CM
         self.Factory = factory
 
         if not tmpfile:
             warnings.filterwarnings("ignore")
             tmpfile = os.tmpnam()
             warnings.resetwarnings()
 
         self.Factory.tmpfile = tmpfile
 
     def version(self):
         return self.version
 
     def NextIP(self):
         ip = self.CM.Env["IPBase"]
         if ":" in ip:
             (prefix, sep, suffix) = ip.rpartition(":")
             suffix = str(hex(int(suffix, 16)+1)).lstrip("0x")
         else:
             (prefix, sep, suffix) = ip.rpartition(".")
             suffix = str(int(suffix)+1)
 
         ip = prefix + sep + suffix
         self.CM.Env["IPBase"] = ip
         return ip.strip()
 
 
 class CIB11(ConfigBase):
     feature_set = "3.0"
     version = "pacemaker-1.1"
     counter = 1
 
     def _show(self, command=""):
         output = ""
         (rc, result) = self.Factory.rsh(self.Factory.target, "HOME=/root CIB_file="+self.Factory.tmpfile+" cibadmin -Ql "+command, None, )
         for line in result:
             output += line
             self.Factory.debug("Generated Config: "+line)
         return output
 
     def NewIP(self, name=None, standard="ocf"):
         if self.CM.Env["IPagent"] == "IPaddr2":
             ip = self.NextIP()
             if not name:
                 if ":" in ip:
                     (prefix, sep, suffix) = ip.rpartition(":")
                     name = "r"+suffix
                 else:
                     name = "r"+ip
 
             r = Resource(self.Factory, name, self.CM.Env["IPagent"], standard)
             r["ip"] = ip
         
             if ":" in ip:
                 r["cidr_netmask"] = "64"
                 r["nic"] = "eth0"
             else:
                 r["cidr_netmask"] = "32"
 
         else:
             if not name:
                 name = "r%s%d" % (self.CM.Env["IPagent"], self.counter)
                 self.counter = self.counter + 1
             r = Resource(self.Factory, name, self.CM.Env["IPagent"], standard)
 
         r.add_op("monitor", "5s")
         return r
 
     def install(self, target):
         old = self.Factory.tmpfile
 
         # Force a rebuild
         self.cts_cib = None
 
         self.Factory.tmpfile = CTSvars.CRM_CONFIG_DIR+"/cib.xml"
         self.contents(target)
         self.Factory.rsh(self.Factory.target, "chown "+CTSvars.CRM_DAEMON_USER+" "+self.Factory.tmpfile)
 
         self.Factory.tmpfile = old
 
     def contents(self, target=None):
         # fencing resource
         if self.cts_cib:
             return self.cts_cib
 
         if target:
             self.Factory.target = target
 
         self.Factory.rsh(self.Factory.target, "HOME=/root cibadmin --empty %s > %s" % (self.version, self.Factory.tmpfile))
         #cib_base = self.cib_template % (self.feature_set, self.version, ''' remote-tls-port='9898' remote-clear-port='9999' ''')
 
-        nodelist = ""
-        self.num_nodes = 0
-        for node in self.CM.Env["nodes"]:
-            nodelist += node + " "
-            self.num_nodes = self.num_nodes + 1
+        self.num_nodes = len(self.CM.Env["nodes"])
 
         no_quorum = "stop"
         if self.num_nodes < 3:
             no_quorum = "ignore"
-            self.Factory.log("Cluster only has %d nodes, configuring: no-quroum-policy=ignore" % self.num_nodes)
+            self.Factory.log("Cluster only has %d nodes, configuring: no-quorum-policy=ignore" % self.num_nodes)
+
+        # We don't need a nodes section unless we add attributes
+        stn = None
 
         # Fencing resource
         # Define first so that the shell doesn't reject every update
         if self.CM.Env["DoFencing"]:
             st = Resource(self.Factory, "Fencing", ""+self.CM.Env["stonith-type"], "stonith")
             # Set a threshold for unreliable stonith devices such as the vmware one
             st.add_meta("migration-threshold", "5")
             st.add_op("monitor", "120s", timeout="120s")
             st.add_op("stop", "0", timeout="60s")
             st.add_op("start", "0", timeout="60s")
 
             entries = string.split(self.CM.Env["stonith-params"], ',')
             for entry in entries:
                 (name, value) = string.split(entry, '=')
                 if name == "hostlist" and value == "all":
                     value = string.join(self.CM.Env["nodes"], " ")
                 elif name == "pcmk_host_list" and value == "all":
                     value = string.join(self.CM.Env["nodes"], " ")
 
                 st[name] = value
 
             st.commit()
 
             # Test advanced fencing logic
             if True:
                 stf_nodes = []
                 stt_nodes = []
+                attr_nodes = []
 
                 # Create the levels
                 stl = FencingTopology(self.Factory)
                 for node in self.CM.Env["nodes"]:
+                    # Randomly assign node to a fencing method
                     ftype = self.CM.Env.RandomGen.choice(["levels-and", "levels-or ", "broadcast "])
-                    self.CM.log(" - Using %s fencing for node: %s" % (ftype, node))
-                    # for baremetal remote node tests
+
+                    # For levels-and, randomly choose targeting by node name or attribute
+                    by = ""
+                    if ftype == "levels-and":
+                        if self.CM.Env.RandomGen.choice([True, False]):
+                            attr_nodes.append(node)
+                            by = " (by attribute)"
+                        else:
+                            by = " (by name)"
+
+                    self.CM.log(" - Using %s fencing for node: %s%s" % (ftype, node, by))
+
+                    # for baremetal remote node tests (is this really necessary?)
                     stt_nodes.append("remote_%s" % node)
+
                     if ftype == "levels-and":
-                        stl.level(1, node, "FencingPass,Fencing")
+                        if node not in attr_nodes:
+                            stl.level(1, node, "FencingPass,Fencing")
                         stt_nodes.append(node)
 
                     elif ftype == "levels-or ":
                         stl.level(1, node, "FencingFail")
                         stl.level(2, node, "Fencing")
                         stf_nodes.append(node)
 
+                # If any levels-and nodes were targeted by attribute,
+                # create the attributes and a level for the attribute.
+                if len(attr_nodes):
+                    stn = Nodes(self.Factory)
+                    for node in attr_nodes:
+                        stn[node] = { "cts-fencing" : "levels-and" }
+                    stl.level(1, "cts-fencing=levels-and", "FencingPass,Fencing")
+
                 # Create a Dummy agent that always passes for levels-and
                 if len(stt_nodes):
                     self.CM.install_helper("fence_dummy", destdir="/usr/sbin", sourcedir=CTSvars.Fencing_home)
                     stt = Resource(self.Factory, "FencingPass", "fence_dummy", "stonith")
                     stt["pcmk_host_list"] = string.join(stt_nodes, " ")
                     # Wait this many seconds before doing anything, handy for letting disks get flushed too
                     stt["random_sleep_range"] = "30"
                     stt["mode"] = "pass"
                     stt.commit()
 
                 # Create a Dummy agent that always fails for levels-or
                 if len(stf_nodes):
                     self.CM.install_helper("fence_dummy", destdir="/usr/sbin", sourcedir=CTSvars.Fencing_home)
                     stf = Resource(self.Factory, "FencingFail", "fence_dummy", "stonith")
                     stf["pcmk_host_list"] = string.join(stf_nodes, " ")
                     # Wait this many seconds before doing anything, handy for letting disks get flushed too
                     stf["random_sleep_range"] = "30"
                     stf["mode"] = "fail"
                     stf.commit()
 
                 # Now commit the levels themselves
                 stl.commit()
 
         o = Option(self.Factory, "stonith-enabled", self.CM.Env["DoFencing"])
         o["start-failure-is-fatal"] = "false"
         o["pe-input-series-max"] = "5000"
         o["default-action-timeout"] = "90s"
         o["shutdown-escalation"] = "5min"
         o["batch-limit"] = "10"
         o["dc-deadtime"] = "5s"
         o["no-quorum-policy"] = no_quorum
         o["expected-quorum-votes"] = self.num_nodes
 
         if self.CM.Env["DoBSC"] == 1:
             o["ident-string"] = "Linux-HA TEST configuration file - REMOVEME!!"
 
         o.commit()
 
+        # Commit the nodes section if we defined one
+        if stn is not None:
+            stn.commit()
+
         # Add resources?
         if self.CM.Env["CIBResource"] == 1:
             self.add_resources()
 
         if self.CM.cluster_monitor == 1:
             mon = Resource(self.Factory, "cluster_mon", "ocf", "ClusterMon", "pacemaker")
             mon.add_op("start", "0", requires="nothing")
             mon.add_op("monitor", "5s", requires="nothing")
             mon["update"] = "10"
             mon["extra_options"] = "-r -n"
             mon["user"] = "abeekhof"
             mon["htmlfile"] = "/suse/abeekhof/Export/cluster.html"
             mon.commit()
 
             #self._create('''location prefer-dc cluster_mon rule -INFINITY: \#is_dc eq false''')
 
         # generate cib
         self.cts_cib = self._show()
 
         if self.Factory.tmpfile != CTSvars.CRM_CONFIG_DIR+"/cib.xml":
             self.Factory.rsh(self.Factory.target, "rm -f "+self.Factory.tmpfile)
 
         return self.cts_cib
 
     def add_resources(self):
         # Per-node resources
         for node in self.CM.Env["nodes"]:
             name = "rsc_"+node
             r = self.NewIP(name)
             r.prefer(node, "100")
             r.commit()
 
         # Migrator
         # Make this slightly sticky (since we have no other location constraints) to avoid relocation during Reattach
         m = Resource(self.Factory, "migrator","Dummy",  "ocf", "pacemaker")
         m["passwd"] = "whatever"
         m.add_meta("resource-stickiness","1")
         m.add_meta("allow-migrate", "1")
         m.add_op("monitor", "P10S")
         m.commit()
 
         # Ping the test master
         p = Resource(self.Factory, "ping-1","ping",  "ocf", "pacemaker")
         p.add_op("monitor", "60s")
         p["host_list"] = self.CM.Env["cts-master"]
         p["name"] = "connected"
         p["debug"] = "true"
 
         c = Clone(self.Factory, "Connectivity", p)
         c["globally-unique"] = "false"
         c.commit()
 
         #master slave resource
         s = Resource(self.Factory, "stateful-1", "Stateful", "ocf", "pacemaker")
         s.add_op("monitor", "15s", timeout="60s")
         s.add_op("monitor", "16s", timeout="60s", role="Master")
         ms = Master(self.Factory, "master-1", s)
         ms["clone-max"] = self.num_nodes
         ms["master-max"] = 1
         ms["clone-node-max"] = 1
         ms["master-node-max"] = 1
 
         # Require conectivity to run the master
         r = Rule(self.Factory, "connected", "-INFINITY", op="or")
         r.add_child(Expression(self.Factory, "m1-connected-1", "connected", "lt", "1"))
         r.add_child(Expression(self.Factory, "m1-connected-2", "connected", "not_defined", None))
         ms.prefer("connected", rule=r)
 
         ms.commit()
 
         # Group Resource
         g = Group(self.Factory, "group-1")
         g.add_child(self.NewIP())
 
         if self.CM.Env["have_systemd"]:
             # It would be better to put the python in a separate file, so we
             # could loop "while True" rather than sleep for 24 hours. We can't
             # put a loop in a single-line python command; only simple commands
             # may be separated by semicolon in python.
             dummy_service_file = """
 [Unit]
 Description=Dummy resource that takes a while to start
 
 [Service]
 Type=notify
 ExecStart=/usr/bin/python -c 'import time, systemd.daemon; time.sleep(10); systemd.daemon.notify("READY=1"); time.sleep(86400)'
 ExecStop=/bin/sleep 10
 ExecStop=/bin/kill -s KILL \$MAINPID
 """
 
             os.system("cat <<-END >/tmp/DummySD.service\n%s\nEND" % (dummy_service_file))
 
             self.CM.install_helper("DummySD.service", destdir="/usr/lib/systemd/system/", sourcedir="/tmp")
             sysd = Resource(self.Factory, "petulant", "DummySD",  "service")
             sysd.add_op("monitor", "P10S")
             g.add_child(sysd)
         else:
             g.add_child(self.NewIP())
 
         g.add_child(self.NewIP())
 
         # Group with the master
         g.after("master-1", first="promote", then="start")
         g.colocate("master-1", "INFINITY", withrole="Master")
 
         g.commit()
 
         # LSB resource
         lsb_agent = self.CM.install_helper("LSBDummy")
 
         lsb = Resource(self.Factory, "lsb-dummy",lsb_agent,  "lsb")
         lsb.add_op("monitor", "5s")
 
         # LSB with group
         lsb.after("group-1")
         lsb.colocate("group-1")
 
         lsb.commit()
 
 
 class CIB12(CIB11):
     feature_set = "3.0"
     version = "pacemaker-1.2"
 
 class CIB20(CIB11):
     feature_set = "3.0"
     version = "pacemaker-2.0"
 
 #class HASI(CIB10):
 #    def add_resources(self):
 #        # DLM resource
 #        self._create('''primitive dlm ocf:pacemaker:controld op monitor interval=120s''')
 #        self._create('''clone dlm-clone dlm meta globally-unique=false interleave=true''')
 
         # O2CB resource
 #        self._create('''primitive o2cb ocf:ocfs2:o2cb op monitor interval=120s''')
 #        self._create('''clone o2cb-clone o2cb meta globally-unique=false interleave=true''')
 #        self._create('''colocation o2cb-with-dlm INFINITY: o2cb-clone dlm-clone''')
 #        self._create('''order start-o2cb-after-dlm mandatory: dlm-clone o2cb-clone''')
 
 
 class ConfigFactory:
     def __init__(self, CM):
         self.CM = CM
         self.rsh = self.CM.rsh
         self.register("pacemaker11", CIB11, CM, self)
         self.register("pacemaker12", CIB12, CM, self)
         self.register("pacemaker20", CIB20, CM, self)
 #        self.register("hae", HASI, CM, self)
         self.target = self.CM.Env["nodes"][0]
         self.tmpfile = None
 
     def log(self, args):
         self.CM.log("cib: %s" % args)
 
     def debug(self, args):
         self.CM.debug("cib: %s" % args)
 
     def register(self, methodName, constructor, *args, **kargs):
         """register a constructor"""
         _args = [constructor]
         _args.extend(args)
         setattr(self, methodName, ConfigFactoryItem(*_args, **kargs))
 
     def unregister(self, methodName):
         """unregister a constructor"""
         delattr(self, methodName)
 
     def createConfig(self, name="pacemaker-1.0"):
         if name == "pacemaker-1.0":
             name = "pacemaker10";
         elif name == "pacemaker-1.1":
             name = "pacemaker11";
         elif name == "pacemaker-1.2":
             name = "pacemaker12";
         elif name == "pacemaker-2.0":
             name = "pacemaker20";
         elif name == "hasi":
             name = "hae";
 
         if hasattr(self, name):
             return getattr(self, name)()
         else:
             self.CM.log("Configuration variant '%s' is unknown.  Defaulting to latest config" % name)
 
         return self.pacemaker12()
 
 
 class ConfigFactoryItem:
     def __init__(self, function, *args, **kargs):
         self._function = function
         self._args = args
         self._kargs = kargs
 
     def __call__(self, *args, **kargs):
         """call function"""
         _args = list(self._args)
         _args.extend(args)
         _kargs = self._kargs.copy()
         _kargs.update(kargs)
         return self._function(*_args,**_kargs)
 
-# Basic Sanity Testing
 if __name__ == '__main__':
-    import CTSlab
-    env = CTSlab.LabEnvironment()
-    env["nodes"] = []
-    env["nodes"].append("pcmk-1")
-    env["nodes"].append("pcmk-2")
-    env["nodes"].append("pcmk-3")
-    env["nodes"].append("pcmk-4")
-
-    env["CIBResource"] = 1
-    env["IPBase"] = "fe80::1234:56:7890:1000"
-    env["DoStonith"] = 1
-    env["stonith-type"] = "fence_xvm"
-    env["stonith-params"] = "pcmk_arg_map=domain:uname"
-
-    manager = ClusterManager(env)
-    manager.cluster_monitor = False
-
-    CibFactory = ConfigFactory(manager)
+    """ Unit test (pass cluster node names as command line arguments) """
+
+    import CTS
+    import CM_ais
+    import sys
+
+    if len(sys.argv) < 2:
+        print("Usage: %s <node> ..." % sys.argv[0])
+        sys.exit(1)
+
+    args = [
+        "--nodes", " ".join(sys.argv[1:]),
+        "--clobber-cib",
+        "--populate-resources",
+        "--stack", "corosync",
+        "--test-ip-base", "fe80::1234:56:7890:1000",
+        "--stonith", "rhcs",
+        "--stonith-args", "pcmk_arg_map=domain:uname"
+    ]
+    env = CTS.CtsLab(args)
+    cm = CM_ais.crm_mcp(env)
+    CibFactory = ConfigFactory(cm)
     cib = CibFactory.createConfig("pacemaker-1.1")
     print(cib.contents())
diff --git a/cts/cib_xml.py b/cts/cib_xml.py
index 3d8f8d4daf..feb4e05a9c 100644
--- a/cts/cib_xml.py
+++ b/cts/cib_xml.py
@@ -1,249 +1,297 @@
 '''CTS: Cluster Testing System: CIB generator
 '''
 __copyright__ = '''
 Author: Andrew Beekhof <abeekhof@suse.de>
 Copyright (C) 2008 Andrew Beekhof
 '''
 
 import sys
+import string
 
 from cts.CTSvars import *
 from cts.CIB     import CibBase
 
 
 class XmlBase(CibBase):
     def __init__(self, Factory, tag, _id, **kwargs):
         CibBase.__init__(self, Factory, tag, _id, **kwargs)
 
     def show(self):
         text = '''<%s''' % self.tag
         if self.name:
             text += ''' id="%s"''' % (self.name)
         for k in list(self.kwargs.keys()):
             text += ''' %s="%s"''' % (k, self.kwargs[k])
 
         if not self.children:
             text += '''/>'''
             return text
 
         text += '''>'''
 
         for c in self.children:
             text += c.show()
 
         text += '''</%s>''' % self.tag
         return text
 
     def _run(self, operation, xml, section="all", options=""):
-        self.Factory.debug("Writing out %s" % self.name)
+        if self.name:
+            label = self.name
+        else:
+            label = "<%s>" % self.tag
+        self.Factory.debug("Writing out %s" % label)
         fixed  = "HOME=/root CIB_file="+self.Factory.tmpfile
         fixed += " cibadmin --%s --scope %s %s --xml-text '%s'" % (operation, section, options, xml)
         rc = self.Factory.rsh(self.Factory.target, fixed)
         if rc != 0:
             self.Factory.log("Configure call failed: "+fixed)
             sys.exit(1)
 
 
+class InstanceAttributes(XmlBase):
+    """ Create an <instance_attributes> section with name-value pairs """
+
+    def __init__(self, Factory, name, attrs):
+        XmlBase.__init__(self, Factory, "instance_attributes", name)
+
+        # Create an <nvpair> for each attribute
+        for attr in list(attrs.keys()):
+            self.add_child(XmlBase(Factory, "nvpair", "%s-%s" % (name, attr),
+                name=attr, value=attrs[attr]))
+
+
+class Node(XmlBase):
+    """ Create a <node> section with node attributes for one node """
+
+    def __init__(self, Factory, nodeid, uname, attrs):
+        XmlBase.__init__(self, Factory, "node", nodeid, uname=uname)
+        self.add_child(InstanceAttributes(Factory, "%s-1" % uname, attrs))
+
+
+class Nodes(XmlBase):
+    """ Create a <nodes> section """
+
+    def __init__(self, Factory):
+        XmlBase.__init__(self, Factory, "nodes", None)
+
+        # The schema requires a node ID when defining node attributes,
+        # but we don't know it, so we fake it and let the cluster fix it
+        self.next_nodeid = 1000
+
+    def __setitem__(self, key, value):
+        """ For "nodes[UNAME] = ATTRS" where ATTRS is a dictionary of attribute name/value pairs """
+
+        self.add_child(Node(self.Factory, self.next_nodeid, key, value))
+        self.next_nodeid = self.next_nodeid + 1
+
+    def commit(self):
+        self._run("modify", self.show(), "configuration", "--allow-create")
+
+
 class FencingTopology(XmlBase):
     def __init__(self, Factory):
         XmlBase.__init__(self, Factory, "fencing-topology", None)
 
-    def level(self, index, node, devices):
-        self.add_child(XmlBase(self.Factory, "fencing-level", "cts-%s.%d" % (node, index), target=node, index=index, devices=devices))
+    def level(self, index, target, devices):
+        # Generate XML ID (sanitizing target-by-attribute levels)
+        xml_id = "cts-%s.%d" % (string.replace(target, "=", "-"), index)
+
+        self.add_child(XmlBase(self.Factory, "fencing-level", xml_id, target=target, index=index, devices=devices))
 
     def commit(self):
         self._run("create", self.show(), "configuration", "--allow-create")
 
 
 class Option(XmlBase):
     def __init__(self, Factory, name=None, value=None, section="cib-bootstrap-options"):
         XmlBase.__init__(self, Factory, "cluster_property_set", section)
         if name and value:
             self.add_child(XmlBase(Factory, "nvpair", "cts-%s" % name, name=name, value=value))
 
     def __setitem__(self, key, value):
         self.add_child(XmlBase(self.Factory, "nvpair", "cts-%s" % key, name=key, value=value))
 
     def commit(self):
         self._run("modify", self.show(), "crm_config", "--allow-create")
 
 
 class Expression(XmlBase):
     def __init__(self, Factory, name, attr, op, value=None):
         XmlBase.__init__(self, Factory, "expression", name, attribute=attr, operation=op)
         if value:
             self["value"] = value
 
 
 class Rule(XmlBase):
     def __init__(self, Factory, name, score, op="and", expr=None):
         XmlBase.__init__(self, Factory, "rule", "%s" % name)
         self["boolean-op"] = op
         self["score"] = score
         if expr:
             self.add_child(expr)
 
 
 class Resource(XmlBase):
     def __init__(self, Factory, name, rtype, standard, provider=None):
         XmlBase.__init__(self, Factory, "native", name)
 
         self.rtype = rtype
         self.standard = standard
         self.provider = provider
 
         self.op = []
         self.meta = {}
         self.param = {}
 
         self.scores = {}
         self.needs = {}
         self.coloc = {}
 
         if self.standard == "ocf" and not provider:
             self.provider = "heartbeat"
         elif self.standard == "lsb":
             self.provider = None
 
     def __setitem__(self, key, value):
         self.add_param(key, value)
 
     def add_op(self, name, interval, **kwargs):
         self.op.append(
             XmlBase(self.Factory, "op", "%s-%s" % (name, interval), name=name, interval=interval, **kwargs))
 
     def add_param(self, name, value):
         self.param[name] = value
 
     def add_meta(self, name, value):
         self.meta[name] = value
 
     def prefer(self, node, score="INFINITY", rule=None):
         if not rule:
             rule = Rule(self.Factory, "prefer-%s-r" % node, score,
                         expr=Expression(self.Factory, "prefer-%s-e" % node, "#uname", "eq", node))
         self.scores[node] = rule
 
     def after(self, resource, kind="Mandatory", first="start", then="start", **kwargs):
         kargs = kwargs.copy()
         kargs["kind"] = kind
         if then:
             kargs["first-action"] = "start"
             kargs["then-action"] = then
 
         if first:
             kargs["first-action"] = first
 
         self.needs[resource] = kargs
 
     def colocate(self, resource, score="INFINITY", role=None, withrole=None, **kwargs):
         kargs = kwargs.copy()
         kargs["score"] = score
         if role:
             kargs["rsc-role"] = role
         if withrole:
             kargs["with-rsc-role"] = withrole
 
         self.coloc[resource] = kargs
 
     def constraints(self):
         text = "<constraints>"
 
         for k in list(self.scores.keys()):
             text += '''<rsc_location id="prefer-%s" rsc="%s">''' % (k, self.name)
             text += self.scores[k].show()
             text += '''</rsc_location>'''
 
         for k in list(self.needs.keys()):
             text += '''<rsc_order id="%s-after-%s" first="%s" then="%s"''' % (self.name, k, k, self.name)
             kargs = self.needs[k]
             for kw in list(kargs.keys()):
                 text += ''' %s="%s"''' % (kw, kargs[kw])
             text += '''/>'''
 
         for k in list(self.coloc.keys()):
             text += '''<rsc_colocation id="%s-with-%s" rsc="%s" with-rsc="%s"''' % (self.name, k, self.name, k)
             kargs = self.coloc[k]
             for kw in list(kargs.keys()):
                 text += ''' %s="%s"''' % (kw, kargs[kw])
             text += '''/>'''
 
         text += "</constraints>"
         return text
 
     def show(self):
         text = '''<primitive id="%s" class="%s" type="%s"''' % (self.name, self.standard, self.rtype)
         if self.provider:
             text += ''' provider="%s"''' % (self.provider)
         text += '''>'''
 
         if len(self.meta) > 0:
             text += '''<meta_attributes id="%s-meta">''' % self.name
             for p in list(self.meta.keys()):
                 text += '''<nvpair id="%s-%s" name="%s" value="%s"/>''' % (self.name, p, p, self.meta[p])
             text += '''</meta_attributes>'''
 
         if len(self.param) > 0:
             text += '''<instance_attributes id="%s-params">''' % self.name
             for p in list(self.param.keys()):
                 text += '''<nvpair id="%s-%s" name="%s" value="%s"/>''' % (self.name, p, p, self.param[p])
             text += '''</instance_attributes>'''
 
         if len(self.op) > 0:
             text += '''<operations>'''
             for o in self.op:
                 key = o.name
                 o.name = "%s-%s" % (self.name, key)
                 text += o.show()
                 o.name = key
             text += '''</operations>'''
 
         text += '''</primitive>'''
         return text
 
     def commit(self):
         self._run("create", self.show(), "resources")
         self._run("modify", self.constraints())
 
 
 class Group(Resource):
     def __init__(self, Factory, name):
         Resource.__init__(self, Factory, name, None, None)
         self.tag = "group"
 
     def __setitem__(self, key, value):
         self.add_meta(key, value)
 
     def show(self):
         text = '''<%s id="%s">''' % (self.tag, self.name)
 
         if len(self.meta) > 0:
             text += '''<meta_attributes id="%s-meta">''' % self.name
             for p in list(self.meta.keys()):
                 text += '''<nvpair id="%s-%s" name="%s" value="%s"/>''' % (self.name, p, p, self.meta[p])
             text += '''</meta_attributes>'''
 
         for c in self.children:
             text += c.show()
         text += '''</%s>''' % self.tag
         return text
 
 
 class Clone(Group):
     def __init__(self, Factory, name, child=None):
         Group.__init__(self, Factory, name)
         self.tag = "clone"
         if child:
             self.add_child(child)
 
     def add_child(self, resource):
         if not self.children:
             self.children.append(resource)
         else:
             self.Factory.log("Clones can only have a single child. Ignoring %s" % resource.name)
 
 
 class Master(Clone):
     def __init__(self, Factory, name, child=None):
         Clone.__init__(self, Factory, name, child)
         self.tag = "master"
diff --git a/fencing/admin.c b/fencing/admin.c
index 6956e0a3c2..d62da89116 100644
--- a/fencing/admin.c
+++ b/fencing/admin.c
@@ -1,554 +1,557 @@
 /*
  * Copyright (C) 2009 Andrew Beekhof <andrew@beekhof.net>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public
  * License as published by the Free Software Foundation; either
  * version 2 of the License, or (at your option) any later version.
  *
  * This software is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
  * You should have received a copy of the GNU General Public
  * License along with this library; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
 #include <crm_internal.h>
 
 #include <sys/param.h>
 #include <stdio.h>
 #include <sys/types.h>
 #include <sys/stat.h>
 #include <unistd.h>
 #include <sys/utsname.h>
 
 #include <stdlib.h>
 #include <errno.h>
 #include <fcntl.h>
 
 #include <crm/crm.h>
 #include <crm/msg_xml.h>
 #include <crm/common/ipc.h>
 #include <crm/cluster/internal.h>
 #include <crm/common/mainloop.h>
 
 #include <crm/stonith-ng.h>
 #include <crm/cib.h>
 #include <crm/pengine/status.h>
 
 #include <crm/common/xml.h>
 
 
 /* *INDENT-OFF* */
 static struct crm_option long_options[] = {
     {"help",        0, 0, '?', "\tThis text"},
     {"version",     0, 0, '$', "\tVersion information"  },
     {"verbose",     0, 0, 'V', "\tIncrease debug output"},
     {"quiet",       0, 0, 'q', "\tPrint only essential output"},
 
     {"-spacer-",    0, 0, '-', "Commands:"},
     {"list",            1, 0, 'l', "List devices that can terminate the specified host"},
     {"list-registered", 0, 0, 'L', "List all registered devices"},
     {"list-installed",  0, 0, 'I', "List all installed devices"},
 
     {"-spacer-",    0, 0, '-', ""},
     {"metadata",    0, 0, 'M', "Check the device's metadata"},
     {"query",       1, 0, 'Q', "Check the device's status"},
     {"fence",       1, 0, 'F', "Fence the named host"},
     {"unfence",     1, 0, 'U', "Unfence the named host"},
     {"reboot",      1, 0, 'B', "Reboot the named host"},
     {"confirm",     1, 0, 'C', "Confirm the named host is now safely down"},
     {"history",     1, 0, 'H', "Retrieve last fencing operation"},
     {"last",        1, 0, 'h', "Indicate when the named node was last fenced. Optional: --as-node-id"},
 
     {"-spacer-",    0, 0, '-', ""},
     {"register",    1, 0, 'R', "Register the named stonith device. Requires: --agent, optional: --option"},
     {"deregister",  1, 0, 'D', "De-register the named stonith device"},
 
-    {"register-level",    1, 0, 'r', "Register a stonith level for the named host. Requires: --index, one or more --device entries"},
-    {"deregister-level",  1, 0, 'd', "De-register a stonith level for the named host. Requires: --index"},
+    {"register-level",    1, 0, 'r',
+     "Register a stonith level for the named target (a node name\n\t"
+     "pattern, or a node attribute NAME=VALUE pair).\n\t"
+     "Requires: --index, one or more --device entries"},
+    {"deregister-level",  1, 0, 'd', "De-register a stonith level for the named target. Requires: --index"},
 
     {"-spacer-",    0, 0, '-', ""},
     {"-spacer-",    0, 0, '-', "Options and modifiers:"},
     {"agent",       1, 0, 'a', "The agent (eg. fence_xvm) to instantiate when calling with --register"},
     {"env-option",  1, 0, 'e'},
     {"option",      1, 0, 'o'},
     {"tag",         1, 0, 'T', "Identify fencing operations with the specified tag.\n\t"
      "Useful when there are multiple entities that might be invoking stonith_admin(8)"},
 
     {"device",      1, 0, 'v', "A device to associate with a given host and stonith level"},
     {"index",       1, 0, 'i', "The stonith level (1-9)"},
 
     {"timeout",     1, 0, 't', "Operation timeout in seconds"},
     {"as-node-id",  0, 0, 'n', "(Advanced) The supplied node is the corosync nodeid (Only for use with --last)" },
     {"tolerance",   1, 0,   0, "(Advanced) Do nothing if an equivalent --fence request succeeded less than N seconds earlier" },
 
     {"list-all",    0, 0, 'L', "legacy alias for --list-registered"},
 
     {0, 0, 0, 0}
 };
 /* *INDENT-ON* */
 
 int st_opts = st_opt_sync_call | st_opt_allow_suicide;
 
 GMainLoop *mainloop = NULL;
 struct {
     stonith_t *st;
     const char *target;
     const char *action;
     char *name;
     int timeout;
     int tolerance;
     int rc;
 } async_fence_data;
 
 static int
 try_mainloop_connect(void)
 {
     stonith_t *st = async_fence_data.st;
     int tries = 10;
     int i = 0;
     int rc = 0;
 
     for (i = 0; i < tries; i++) {
         crm_debug("Connecting as %s", async_fence_data.name);
         rc = st->cmds->connect(st, async_fence_data.name, NULL);
 
         if (!rc) {
             crm_debug("stonith client connection established");
             return 0;
         } else {
             crm_debug("stonith client connection failed");
         }
         sleep(1);
     }
 
     crm_err("Couldn not connect to stonithd. \n");
     return -1;
 }
 
 static void
 notify_callback(stonith_t * st, stonith_event_t * e)
 {
     if (e->result != pcmk_ok) {
         return;
     }
 
     if (safe_str_eq(async_fence_data.target, e->target) &&
         safe_str_eq(async_fence_data.action, e->action)) {
 
         async_fence_data.rc = e->result;
         g_main_loop_quit(mainloop);
     }
 }
 
 static void
 fence_callback(stonith_t * stonith, stonith_callback_data_t * data)
 {
     async_fence_data.rc = data->rc;
 
     g_main_loop_quit(mainloop);
 }
 
 static gboolean
 async_fence_helper(gpointer user_data)
 {
     stonith_t *st = async_fence_data.st;
     int call_id = 0;
 
     if (try_mainloop_connect()) {
         g_main_loop_quit(mainloop);
         return TRUE;
     }
 
     st->cmds->register_notification(st, T_STONITH_NOTIFY_FENCE, notify_callback);
 
     call_id = st->cmds->fence(st,
                               st_opt_allow_suicide,
                               async_fence_data.target,
                               async_fence_data.action,
                               async_fence_data.timeout, async_fence_data.tolerance);
 
     if (call_id < 0) {
         g_main_loop_quit(mainloop);
         return TRUE;
     }
 
     st->cmds->register_callback(st,
                                 call_id,
                                 async_fence_data.timeout,
                                 st_opt_timeout_updates, NULL, "callback", fence_callback);
 
     return TRUE;
 }
 
 static int
 mainloop_fencing(stonith_t * st, const char *target, const char *action, int timeout, int tolerance)
 {
     crm_trigger_t *trig;
 
     async_fence_data.st = st;
     async_fence_data.target = target;
     async_fence_data.action = action;
     async_fence_data.timeout = timeout;
     async_fence_data.tolerance = tolerance;
     async_fence_data.rc = -1;
 
     trig = mainloop_add_trigger(G_PRIORITY_HIGH, async_fence_helper, NULL);
     mainloop_set_trigger(trig);
 
     mainloop = g_main_new(FALSE);
     g_main_run(mainloop);
 
     return async_fence_data.rc;
 }
 
 int
 main(int argc, char **argv)
 {
     int flag;
     int rc = 0;
     int quiet = 0;
     int verbose = 0;
     int argerr = 0;
     int timeout = 120;
     int option_index = 0;
     int fence_level = 0;
     int no_connect = 0;
     int tolerance = 0;
     int as_nodeid = FALSE;
 
     char *name = NULL;
     char *value = NULL;
     const char *agent = NULL;
     const char *device = NULL;
     const char *target = NULL;
     const char *longname = NULL;
 
     char action = 0;
     stonith_t *st = NULL;
     stonith_key_value_t *params = NULL;
     stonith_key_value_t *devices = NULL;
     stonith_key_value_t *dIter = NULL;
 
     crm_log_cli_init("stonith_admin");
     crm_set_options(NULL, "mode [options]", long_options,
                     "Provides access to the stonith-ng API.\n"
                     "\nAllows the administrator to add/remove/list devices, check device and host status and fence hosts\n");
 
     async_fence_data.name = strdup(crm_system_name);
 
     while (1) {
         flag = crm_get_option_long(argc, argv, &option_index, &longname);
         if (flag == -1)
             break;
 
         switch (flag) {
             case 'V':
                 verbose = 1;
                 crm_bump_log_level(argc, argv);
                 break;
             case '$':
             case '?':
                 crm_help(flag, EX_OK);
                 break;
             case 'I':
                 no_connect = 1;
                 /* fall through */
             case 'L':
                 action = flag;
                 break;
             case 'q':
                 quiet = 1;
                 break;
             case 'Q':
             case 'R':
             case 'D':
                 action = flag;
                 device = optarg;
                 break;
             case 'T':
                 free(async_fence_data.name);
                 async_fence_data.name = crm_strdup_printf("%s.%s", crm_system_name, optarg);
                 break;
             case 'a':
                 agent = optarg;
                 break;
             case 'l':
                 target = optarg;
                 action = 'L';
                 break;
             case 'M':
                 no_connect = 1;
                 action = flag;
                 break;
             case 't':
                 timeout = crm_atoi(optarg, NULL);
                 break;
             case 'B':
             case 'F':
             case 'U':
                 /* using mainloop here */
                 no_connect = 1;
                 /* fall through */
             case 'C':
                 /* Always log the input arguments */
                 crm_log_args(argc, argv);
                 target = optarg;
                 action = flag;
                 break;
             case 'n':
                 as_nodeid = TRUE;
                 break;
             case 'h':
             case 'H':
             case 'r':
             case 'd':
                 target = optarg;
                 action = flag;
                 break;
             case 'i':
                 fence_level = crm_atoi(optarg, NULL);
                 break;
             case 'v':
                 devices = stonith_key_value_add(devices, NULL, optarg);
                 break;
             case 'o':
                 crm_info("Scanning: -o %s", optarg);
                 rc = sscanf(optarg, "%m[^=]=%m[^=]", &name, &value);
                 if (rc != 2) {
                     crm_err("Invalid option: -o %s", optarg);
                     ++argerr;
                 } else {
                     crm_info("Got: '%s'='%s'", name, value);
                     params = stonith_key_value_add(params, name, value);
                 }
                 free(value); value = NULL;
                 free(name); name = NULL;
                 break;
             case 'e':
                 {
                     char *key = crm_concat("OCF_RESKEY", optarg, '_');
                     const char *env = getenv(key);
 
                     if (env == NULL) {
                         crm_err("Invalid option: -e %s", optarg);
                         ++argerr;
                     } else {
                         crm_info("Got: '%s'='%s'", optarg, env);
                         params = stonith_key_value_add(params, optarg, env);
                     }
                 }
                 break;
             case 0:
                 if (safe_str_eq("tolerance", longname)) {
                     tolerance = crm_get_msec(optarg) / 1000;    /* Send in seconds */
                 }
                 break;
             default:
                 ++argerr;
                 break;
         }
     }
 
     if (optind > argc) {
         ++argerr;
     }
 
     if (argerr) {
         crm_help('?', EX_USAGE);
     }
 
     crm_debug("Create");
     st = stonith_api_new();
     crm_debug("Created");
 
     if (!no_connect) {
         crm_debug("Connecting as %s", async_fence_data.name);
         rc = st->cmds->connect(st, async_fence_data.name, NULL);
 
         crm_debug("Connect: %d", rc);
 
         if (rc < 0) {
             goto done;
         }
     }
 
     switch (action) {
         case 'I':
             rc = st->cmds->list_agents(st, st_opt_sync_call, NULL, &devices, timeout);
             for (dIter = devices; dIter; dIter = dIter->next) {
                 fprintf(stdout, " %s\n", dIter->value);
             }
             if (rc == 0) {
                 fprintf(stderr, "No devices found\n");
 
             } else if (rc > 0) {
                 fprintf(stderr, "%d devices found\n", rc);
                 rc = 0;
             }
             stonith_key_value_freeall(devices, 1, 1);
             break;
         case 'L':
             rc = st->cmds->query(st, st_opts, target, &devices, timeout);
             for (dIter = devices; dIter; dIter = dIter->next) {
                 fprintf(stdout, " %s\n", dIter->value);
             }
             if (rc == 0) {
                 fprintf(stderr, "No devices found\n");
             } else if (rc > 0) {
                 fprintf(stderr, "%d devices found\n", rc);
                 rc = 0;
             }
             stonith_key_value_freeall(devices, 1, 1);
             break;
         case 'Q':
             rc = st->cmds->monitor(st, st_opts, device, timeout);
             if (rc < 0) {
                 rc = st->cmds->list(st, st_opts, device, NULL, timeout);
             }
             break;
         case 'R':
             rc = st->cmds->register_device(st, st_opts, device, "stonith-ng", agent, params);
             break;
         case 'D':
             rc = st->cmds->remove_device(st, st_opts, device);
             break;
         case 'r':
             rc = st->cmds->register_level(st, st_opts, target, fence_level, devices);
             break;
         case 'd':
             rc = st->cmds->remove_level(st, st_opts, target, fence_level);
             break;
         case 'M':
             if (agent == NULL) {
                 printf("Please specify an agent to query using -a,--agent [value]\n");
                 return -1;
             } else {
                 char *buffer = NULL;
 
                 rc = st->cmds->metadata(st, st_opt_sync_call, agent, NULL, &buffer, timeout);
                 if (rc == pcmk_ok) {
                     printf("%s\n", buffer);
                 }
                 free(buffer);
             }
             break;
         case 'C':
             rc = st->cmds->confirm(st, st_opts, target);
             break;
         case 'B':
             rc = mainloop_fencing(st, target, "reboot", timeout, tolerance);
             break;
         case 'F':
             rc = mainloop_fencing(st, target, "off", timeout, tolerance);
             break;
         case 'U':
             rc = mainloop_fencing(st, target, "on", timeout, tolerance);
             break;
         case 'h':
             {
                 time_t when = 0;
 
                 if(as_nodeid) {
                     uint32_t nodeid = atol(target);
                     when = stonith_api_time(nodeid, NULL, FALSE);
                 } else {
                     when = stonith_api_time(0, target, FALSE);
                 }
                 if(when) {
                     printf("Node %s last kicked at: %s\n", target, ctime(&when));
                 } else {
                     printf("Node %s has never been kicked\n", target);
                 }
             }
             break;
         case 'H':
             {
                 stonith_history_t *history, *hp, *latest = NULL;
 
                 rc = st->cmds->history(st, st_opts, target, &history, timeout);
                 for (hp = history; hp; hp = hp->next) {
                     char *action_s = NULL;
                     time_t complete = hp->completed;
 
                     if (hp->state == st_done) {
                         latest = hp;
                     }
 
                     if (quiet || !verbose) {
                         continue;
                     } else if (hp->action == NULL) {
                         action_s = strdup("unknown");
                     } else if (hp->action[0] != 'r') {
                         action_s = crm_concat("turn", hp->action, ' ');
                     } else {
                         action_s = strdup(hp->action);
                     }
 
                     if (hp->state == st_failed) {
                         printf("%s failed to %s node %s on behalf of %s from %s at %s\n",
                                hp->delegate ? hp->delegate : "We", action_s, hp->target,
                                hp->client, hp->origin, ctime(&complete));
 
                     } else if (hp->state == st_done && hp->delegate) {
                         printf("%s was able to %s node %s on behalf of %s from %s at %s\n",
                                hp->delegate, action_s, hp->target,
                                hp->client, hp->origin, ctime(&complete));
 
                     } else if (hp->state == st_done) {
                         printf("We were able to %s node %s on behalf of %s from %s at %s\n",
                                action_s, hp->target, hp->client, hp->origin, ctime(&complete));
                     } else {
                         printf("%s at %s wishes to %s node %s - %d %d\n",
                                hp->client, hp->origin, action_s, hp->target, hp->state, hp->completed);
                     }
 
                     free(action_s);
                 }
 
                 if (latest) {
                     if (quiet) {
                         printf("%d\n", latest->completed);
                     } else {
                         char *action_s = NULL;
                         time_t complete = latest->completed;
 
                         if (latest->action == NULL) {
                             action_s = strdup("unknown");
                         } else if (latest->action[0] != 'r') {
                             action_s = crm_concat("turn", latest->action, ' ');
                         } else {
                             action_s = strdup(latest->action);
                         }
 
                         printf("%s was able to %s node %s on behalf of %s from %s at %s\n",
                                latest->delegate ? latest->delegate : "We", action_s, latest->target,
                                latest->client, latest->origin, ctime(&complete));
 
                         free(action_s);
                     }
                 }
                 break;
             }                   /* closing bracket for -H case */
     }                           /* closing bracket for switch case */
 
   done:
     free(async_fence_data.name);
     crm_info("Command returned: %s (%d)", pcmk_strerror(rc), rc);
     if (rc < 0) {
         printf("Command failed: %s\n", pcmk_strerror(rc));
     }
 
     stonith_key_value_freeall(params, 1, 1);
     st->cmds->disconnect(st);
     crm_debug("Disconnect: %d", rc);
 
     crm_debug("Destroy");
     stonith_api_delete(st);
 
     return rc;
 }
diff --git a/fencing/commands.c b/fencing/commands.c
index bd3b27d829..adf655b887 100644
--- a/fencing/commands.c
+++ b/fencing/commands.c
@@ -1,2467 +1,2472 @@
 /*
  * Copyright (C) 2009 Andrew Beekhof <andrew@beekhof.net>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public
  * License as published by the Free Software Foundation; either
  * version 2 of the License, or (at your option) any later version.
  *
  * This software is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
  * You should have received a copy of the GNU General Public
  * License along with this library; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
 #include <crm_internal.h>
 
 #include <sys/param.h>
 #include <stdio.h>
 #include <sys/types.h>
 #include <sys/wait.h>
 #include <sys/stat.h>
 #include <unistd.h>
 #include <sys/utsname.h>
 
 #include <stdlib.h>
 #include <errno.h>
 #include <fcntl.h>
 #include <ctype.h>
 
 #include <crm/crm.h>
 #include <crm/msg_xml.h>
 #include <crm/common/ipc.h>
 #include <crm/common/ipcs.h>
 #include <crm/cluster/internal.h>
 #include <crm/common/mainloop.h>
 
 #include <crm/stonith-ng.h>
 #include <crm/fencing/internal.h>
 #include <crm/common/xml.h>
 
 #if SUPPORT_CIBSECRETS
 #  include <crm/common/cib_secrets.h>
 #endif
 
 #include <internal.h>
 
 GHashTable *device_list = NULL;
 GHashTable *topology = NULL;
 GList *cmd_list = NULL;
 
 struct device_search_s {
     /* target of fence action */
     char *host;
     /* requested fence action */
     char *action;
     /* timeout to use if a device is queried dynamically for possible targets */
     int per_device_timeout;
     /* number of registered fencing devices at time of request */
     int replies_needed;
     /* number of device replies received so far */
     int replies_received;
     /* whether the target is eligible to perform requested action (or off) */
     bool allow_suicide;
 
     /* private data to pass to search callback function */
     void *user_data;
     /* function to call when all replies have been received */
     void (*callback) (GList * devices, void *user_data);
     /* devices capable of performing requested action (or off if remapping) */
     GListPtr capable;
 };
 
 static gboolean stonith_device_dispatch(gpointer user_data);
 static void st_child_done(GPid pid, int rc, const char *output, gpointer user_data);
 static void stonith_send_reply(xmlNode * reply, int call_options, const char *remote_peer,
                                const char *client_id);
 
 static void search_devices_record_result(struct device_search_s *search, const char *device,
                                          gboolean can_fence);
 
 typedef struct async_command_s {
 
     int id;
     int pid;
     int fd_stdout;
     int options;
     int default_timeout; /* seconds */
     int timeout; /* seconds */
 
     int start_delay; /* milliseconds */
     int delay_id;
 
     char *op;
     char *origin;
     char *client;
     char *client_name;
     char *remote_op_id;
 
     char *victim;
     uint32_t victim_nodeid;
     char *action;
     char *device;
     char *mode;
 
     GListPtr device_list;
     GListPtr device_next;
 
     void *internal_user_data;
     void (*done_cb) (GPid pid, int rc, const char *output, gpointer user_data);
     guint timer_sigterm;
     guint timer_sigkill;
     /*! If the operation timed out, this is the last signal
      *  we sent to the process to get it to terminate */
     int last_timeout_signo;
 } async_command_t;
 
 static xmlNode *stonith_construct_async_reply(async_command_t * cmd, const char *output,
                                               xmlNode * data, int rc);
 
 static gboolean
 is_action_required(const char *action, stonith_device_t *device)
 {
     return device && device->automatic_unfencing && safe_str_eq(action, "on");
 }
 
 static int
 get_action_delay_max(stonith_device_t * device, const char * action)
 {
     const char *value = NULL;
     int delay_max_ms = 0;
 
     if (safe_str_neq(action, "off") && safe_str_neq(action, "reboot")) {
         return 0;
     }
 
     value = g_hash_table_lookup(device->params, STONITH_ATTR_DELAY_MAX);
     if (value) {
        delay_max_ms = crm_get_msec(value);
     }
 
     return delay_max_ms;
 }
 
 /*!
  * \internal
  * \brief Override STONITH timeout with pcmk_*_timeout if available
  *
  * \param[in] device           STONITH device to use
  * \param[in] action           STONITH action name
  * \param[in] default_timeout  Timeout to use if device does not have
  *                             a pcmk_*_timeout parameter for action
  *
  * \return Value of pcmk_(action)_timeout if available, otherwise default_timeout
  * \note For consistency, it would be nice if reboot/off/on timeouts could be
  *       set the same way as start/stop/monitor timeouts, i.e. with an
  *       <operation> entry in the fencing resource configuration. However that
  *       is insufficient because fencing devices may be registered directly via
  *       the STONITH register_device() API instead of going through the CIB
  *       (e.g. stonith_admin uses it for its -R option, and the LRMD uses it to
  *       ensure a device is registered when a command is issued). As device
  *       properties, pcmk_*_timeout parameters can be grabbed by stonithd when
  *       the device is registered, whether by CIB change or API call.
  */
 static int
 get_action_timeout(stonith_device_t * device, const char *action, int default_timeout)
 {
     if (action && device && device->params) {
         char buffer[64] = { 0, };
         const char *value = NULL;
 
         /* If "reboot" was requested but the device does not support it,
          * we will remap to "off", so check timeout for "off" instead
          */
         if (safe_str_eq(action, "reboot")
             && is_not_set(device->flags, st_device_supports_reboot)) {
             crm_trace("%s doesn't support reboot, using timeout for off instead",
                       device->id);
             action = "off";
         }
 
         /* If the device config specified an action-specific timeout, use it */
         snprintf(buffer, sizeof(buffer) - 1, "pcmk_%s_timeout", action);
         value = g_hash_table_lookup(device->params, buffer);
         if (value) {
             return atoi(value);
         }
     }
     return default_timeout;
 }
 
 static void
 free_async_command(async_command_t * cmd)
 {
     if (!cmd) {
         return;
     }
 
     if (cmd->delay_id) {
         g_source_remove(cmd->delay_id);
     }
 
     cmd_list = g_list_remove(cmd_list, cmd);
 
     g_list_free_full(cmd->device_list, free);
     free(cmd->device);
     free(cmd->action);
     free(cmd->victim);
     free(cmd->remote_op_id);
     free(cmd->client);
     free(cmd->client_name);
     free(cmd->origin);
     free(cmd->mode);
     free(cmd->op);
     free(cmd);
 }
 
 static async_command_t *
 create_async_command(xmlNode * msg)
 {
     async_command_t *cmd = NULL;
     xmlNode *op = get_xpath_object("//@" F_STONITH_ACTION, msg, LOG_ERR);
     const char *action = crm_element_value(op, F_STONITH_ACTION);
 
     CRM_CHECK(action != NULL, crm_log_xml_warn(msg, "NoAction"); return NULL);
 
     crm_log_xml_trace(msg, "Command");
     cmd = calloc(1, sizeof(async_command_t));
     crm_element_value_int(msg, F_STONITH_CALLID, &(cmd->id));
     crm_element_value_int(msg, F_STONITH_CALLOPTS, &(cmd->options));
     crm_element_value_int(msg, F_STONITH_TIMEOUT, &(cmd->default_timeout));
     cmd->timeout = cmd->default_timeout;
 
     cmd->origin = crm_element_value_copy(msg, F_ORIG);
     cmd->remote_op_id = crm_element_value_copy(msg, F_STONITH_REMOTE_OP_ID);
     cmd->client = crm_element_value_copy(msg, F_STONITH_CLIENTID);
     cmd->client_name = crm_element_value_copy(msg, F_STONITH_CLIENTNAME);
     cmd->op = crm_element_value_copy(msg, F_STONITH_OPERATION);
     cmd->action = strdup(action);
     cmd->victim = crm_element_value_copy(op, F_STONITH_TARGET);
     cmd->mode = crm_element_value_copy(op, F_STONITH_MODE);
     cmd->device = crm_element_value_copy(op, F_STONITH_DEVICE);
 
     CRM_CHECK(cmd->op != NULL, crm_log_xml_warn(msg, "NoOp"); free_async_command(cmd); return NULL);
     CRM_CHECK(cmd->client != NULL, crm_log_xml_warn(msg, "NoClient"));
 
     cmd->done_cb = st_child_done;
     cmd_list = g_list_append(cmd_list, cmd);
     return cmd;
 }
 
 static gboolean
 stonith_device_execute(stonith_device_t * device)
 {
     int exec_rc = 0;
     const char *action_str = NULL;
     async_command_t *cmd = NULL;
     stonith_action_t *action = NULL;
 
     CRM_CHECK(device != NULL, return FALSE);
 
     if (device->active_pid) {
         crm_trace("%s is still active with pid %u", device->id, device->active_pid);
         return TRUE;
     }
 
     if (device->pending_ops) {
         GList *first = device->pending_ops;
 
         cmd = first->data;
         if (cmd && cmd->delay_id) {
             crm_trace
                 ("Operation %s%s%s on %s was asked to run too early, waiting for start_delay timeout of %dms",
                  cmd->action, cmd->victim ? " for node " : "", cmd->victim ? cmd->victim : "",
                  device->id, cmd->start_delay);
             return TRUE;
         }
 
         device->pending_ops = g_list_remove_link(device->pending_ops, first);
         g_list_free_1(first);
     }
 
     if (cmd == NULL) {
         crm_trace("Nothing further to do for %s", device->id);
         return TRUE;
     }
 
     if(safe_str_eq(device->agent, STONITH_WATCHDOG_AGENT)) {
         if(safe_str_eq(cmd->action, "reboot")) {
             pcmk_panic(__FUNCTION__);
             return TRUE;
 
         } else if(safe_str_eq(cmd->action, "off")) {
             pcmk_panic(__FUNCTION__);
             return TRUE;
 
         } else {
             crm_info("Faking success for %s watchdog operation", cmd->action);
             cmd->done_cb(0, 0, NULL, cmd);
             return TRUE;
         }
     }
 
 #if SUPPORT_CIBSECRETS
     if (replace_secret_params(device->id, device->params) < 0) {
         /* replacing secrets failed! */
         if (safe_str_eq(cmd->action,"stop")) {
             /* don't fail on stop! */
             crm_info("proceeding with the stop operation for %s", device->id);
 
         } else {
             crm_err("failed to get secrets for %s, "
                     "considering resource not configured", device->id);
             exec_rc = PCMK_OCF_NOT_CONFIGURED;
             cmd->done_cb(0, exec_rc, NULL, cmd);
             return TRUE;
         }
     }
 #endif
 
     action_str = cmd->action;
     if (safe_str_eq(cmd->action, "reboot") && is_not_set(device->flags, st_device_supports_reboot)) {
         crm_warn("Agent '%s' does not advertise support for 'reboot', performing 'off' action instead", device->agent);
         action_str = "off";
     }
 
     action = stonith_action_create(device->agent,
                                    action_str,
                                    cmd->victim,
                                    cmd->victim_nodeid,
                                    cmd->timeout, device->params, device->aliases);
 
     /* for async exec, exec_rc is pid if positive and error code if negative/zero */
     exec_rc = stonith_action_execute_async(action, (void *)cmd, cmd->done_cb);
 
     if (exec_rc > 0) {
         crm_debug("Operation %s%s%s on %s now running with pid=%d, timeout=%ds",
                   cmd->action, cmd->victim ? " for node " : "", cmd->victim ? cmd->victim : "",
                   device->id, exec_rc, cmd->timeout);
         device->active_pid = exec_rc;
 
     } else {
         crm_warn("Operation %s%s%s on %s failed: %s (%d)",
                  cmd->action, cmd->victim ? " for node " : "", cmd->victim ? cmd->victim : "",
                  device->id, pcmk_strerror(exec_rc), exec_rc);
         cmd->done_cb(0, exec_rc, NULL, cmd);
     }
     return TRUE;
 }
 
 static gboolean
 stonith_device_dispatch(gpointer user_data)
 {
     return stonith_device_execute(user_data);
 }
 
 static gboolean
 start_delay_helper(gpointer data)
 {
     async_command_t *cmd = data;
     stonith_device_t *device = NULL;
 
     cmd->delay_id = 0;
     device = cmd->device ? g_hash_table_lookup(device_list, cmd->device) : NULL;
 
     if (device) {
         mainloop_set_trigger(device->work);
     }
 
     return FALSE;
 }
 
 static void
 schedule_stonith_command(async_command_t * cmd, stonith_device_t * device)
 {
     int delay_max = 0;
 
     CRM_CHECK(cmd != NULL, return);
     CRM_CHECK(device != NULL, return);
 
     if (cmd->device) {
         free(cmd->device);
     }
 
     if (device->include_nodeid && cmd->victim) {
         crm_node_t *node = crm_get_peer(0, cmd->victim);
 
         cmd->victim_nodeid = node->id;
     }
 
     cmd->device = strdup(device->id);
     cmd->timeout = get_action_timeout(device, cmd->action, cmd->default_timeout);
 
     if (cmd->remote_op_id) {
         crm_debug("Scheduling %s on %s for remote peer %s with op id (%s) (timeout=%ds)",
                   cmd->action, device->id, cmd->origin, cmd->remote_op_id, cmd->timeout);
     } else {
         crm_debug("Scheduling %s on %s for %s (timeout=%ds)",
                   cmd->action, device->id, cmd->client, cmd->timeout);
     }
 
     device->pending_ops = g_list_append(device->pending_ops, cmd);
     mainloop_set_trigger(device->work);
 
     delay_max = get_action_delay_max(device, cmd->action);
     if (delay_max > 0) {
         cmd->start_delay = rand() % delay_max;
         crm_notice("Delaying %s on %s for %lldms (timeout=%ds)",
                     cmd->action, device->id, cmd->start_delay, cmd->timeout);
         cmd->delay_id = g_timeout_add(cmd->start_delay, start_delay_helper, cmd);
     }
 }
 
 void
 free_device(gpointer data)
 {
     GListPtr gIter = NULL;
     stonith_device_t *device = data;
 
     g_hash_table_destroy(device->params);
     g_hash_table_destroy(device->aliases);
 
     for (gIter = device->pending_ops; gIter != NULL; gIter = gIter->next) {
         async_command_t *cmd = gIter->data;
 
         crm_warn("Removal of device '%s' purged operation %s", device->id, cmd->action);
         cmd->done_cb(0, -ENODEV, NULL, cmd);
         free_async_command(cmd);
     }
     g_list_free(device->pending_ops);
 
     g_list_free_full(device->targets, free);
 
     mainloop_destroy_trigger(device->work);
 
     free_xml(device->agent_metadata);
     free(device->namespace);
     free(device->on_target_actions);
     free(device->agent);
     free(device->id);
     free(device);
 }
 
 static GHashTable *
 build_port_aliases(const char *hostmap, GListPtr * targets)
 {
     char *name = NULL;
     int last = 0, lpc = 0, max = 0, added = 0;
     GHashTable *aliases =
         g_hash_table_new_full(crm_strcase_hash, crm_strcase_equal, g_hash_destroy_str, g_hash_destroy_str);
 
     if (hostmap == NULL) {
         return aliases;
     }
 
     max = strlen(hostmap);
     for (; lpc <= max; lpc++) {
         switch (hostmap[lpc]) {
                 /* Assignment chars */
             case '=':
             case ':':
                 if (lpc > last) {
                     free(name);
                     name = calloc(1, 1 + lpc - last);
                     memcpy(name, hostmap + last, lpc - last);
                 }
                 last = lpc + 1;
                 break;
 
                 /* Delimeter chars */
                 /* case ',': Potentially used to specify multiple ports */
             case 0:
             case ';':
             case ' ':
             case '\t':
                 if (name) {
                     char *value = NULL;
 
                     value = calloc(1, 1 + lpc - last);
                     memcpy(value, hostmap + last, lpc - last);
 
                     crm_debug("Adding alias '%s'='%s'", name, value);
                     g_hash_table_replace(aliases, name, value);
                     if (targets) {
                         *targets = g_list_append(*targets, strdup(value));
                     }
                     value = NULL;
                     name = NULL;
                     added++;
 
                 } else if (lpc > last) {
                     crm_debug("Parse error at offset %d near '%s'", lpc - last, hostmap + last);
                 }
 
                 last = lpc + 1;
                 break;
         }
 
         if (hostmap[lpc] == 0) {
             break;
         }
     }
 
     if (added == 0) {
         crm_info("No host mappings detected in '%s'", hostmap);
     }
 
     free(name);
     return aliases;
 }
 
 static void
 parse_host_line(const char *line, int max, GListPtr * output)
 {
     int lpc = 0;
     int last = 0;
 
     if (max <= 0) {
         return;
     }
 
     /* Check for any complaints about additional parameters that the device doesn't understand */
     if (strstr(line, "invalid") || strstr(line, "variable")) {
         crm_debug("Skipping: %s", line);
         return;
     }
 
     crm_trace("Processing %d bytes: [%s]", max, line);
     /* Skip initial whitespace */
     for (lpc = 0; lpc <= max && isspace(line[lpc]); lpc++) {
         last = lpc + 1;
     }
 
     /* Now the actual content */
     for (lpc = 0; lpc <= max; lpc++) {
         gboolean a_space = isspace(line[lpc]);
 
         if (a_space && lpc < max && isspace(line[lpc + 1])) {
             /* fast-forward to the end of the spaces */
 
         } else if (a_space || line[lpc] == ',' || line[lpc] == ';' || line[lpc] == 0) {
             int rc = 1;
             char *entry = NULL;
 
             if (lpc != last) {
                 entry = calloc(1, 1 + lpc - last);
                 rc = sscanf(line + last, "%[a-zA-Z0-9_-.]", entry);
             }
 
             if (entry == NULL) {
                 /* Skip */
             } else if (rc != 1) {
                 crm_warn("Could not parse (%d %d): %s", last, lpc, line + last);
             } else if (safe_str_neq(entry, "on") && safe_str_neq(entry, "off")) {
                 crm_trace("Adding '%s'", entry);
                 *output = g_list_append(*output, entry);
                 entry = NULL;
             }
 
             free(entry);
             last = lpc + 1;
         }
     }
 }
 
 static GListPtr
 parse_host_list(const char *hosts)
 {
     int lpc = 0;
     int max = 0;
     int last = 0;
     GListPtr output = NULL;
 
     if (hosts == NULL) {
         return output;
     }
 
     max = strlen(hosts);
     for (lpc = 0; lpc <= max; lpc++) {
         if (hosts[lpc] == '\n' || hosts[lpc] == 0) {
             char *line = NULL;
             int len = lpc - last;
 
             if(len > 1) {
                 line = malloc(1 + len);
             }
 
             if(line) {
                 snprintf(line, 1 + len, "%s", hosts + last);
                 line[len] = 0; /* Because it might be '\n' */
                 parse_host_line(line, len, &output);
                 free(line);
             }
 
             last = lpc + 1;
         }
     }
 
     crm_trace("Parsed %d entries from '%s'", g_list_length(output), hosts);
     return output;
 }
 
 GHashTable *metadata_cache = NULL;
 
 static xmlNode *
 get_agent_metadata(const char *agent)
 {
     xmlNode *xml = NULL;
     char *buffer = NULL;
 
     if(metadata_cache == NULL) {
         metadata_cache = g_hash_table_new_full(
             crm_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_str);
     }
 
     buffer = g_hash_table_lookup(metadata_cache, agent);
     if(safe_str_eq(agent, STONITH_WATCHDOG_AGENT)) {
         return NULL;
 
     } else if(buffer == NULL) {
         stonith_t *st = stonith_api_new();
         int rc = st->cmds->metadata(st, st_opt_sync_call, agent, NULL, &buffer, 10);
 
         stonith_api_delete(st);
         if (rc || !buffer) {
             crm_err("Could not retrieve metadata for fencing agent %s", agent);
             return NULL;
         }
         g_hash_table_replace(metadata_cache, strdup(agent), buffer);
     }
 
     xml = string2xml(buffer);
 
     return xml;
 }
 
 static gboolean
 is_nodeid_required(xmlNode * xml)
 {
     xmlXPathObjectPtr xpath = NULL;
 
     if (stand_alone) {
         return FALSE;
     }
 
     if (!xml) {
         return FALSE;
     }
 
     xpath = xpath_search(xml, "//parameter[@name='nodeid']");
     if (numXpathResults(xpath)  <= 0) {
         freeXpathObject(xpath);
         return FALSE;
     }
 
     freeXpathObject(xpath);
     return TRUE;
 }
 
 static char *
 add_action(char *actions, const char *action)
 {
     static size_t len = 256;
     int offset = 0;
 
     if (actions == NULL) {
         actions = calloc(1, len);
     } else {
         offset = strlen(actions);
     }
 
     if (offset > 0) {
         offset += snprintf(actions+offset, len-offset, " ");
     }
     offset += snprintf(actions+offset, len-offset, "%s", action);
 
     return actions;
 }
 
 static void
 read_action_metadata(stonith_device_t *device)
 {
     xmlXPathObjectPtr xpath = NULL;
     int max = 0;
     int lpc = 0;
 
     if (device->agent_metadata == NULL) {
         return;
     }
 
     xpath = xpath_search(device->agent_metadata, "//action");
     max = numXpathResults(xpath);
 
     if (max <= 0) {
         freeXpathObject(xpath);
         return;
     }
 
     for (lpc = 0; lpc < max; lpc++) {
         const char *on_target = NULL;
         const char *action = NULL;
         xmlNode *match = getXpathResult(xpath, lpc);
 
         CRM_LOG_ASSERT(match != NULL);
         if(match == NULL) { continue; };
 
         on_target = crm_element_value(match, "on_target");
         action = crm_element_value(match, "name");
 
         if(safe_str_eq(action, "list")) {
             set_bit(device->flags, st_device_supports_list);
         } else if(safe_str_eq(action, "status")) {
             set_bit(device->flags, st_device_supports_status);
         } else if(safe_str_eq(action, "reboot")) {
             set_bit(device->flags, st_device_supports_reboot);
         } else if (safe_str_eq(action, "on")) {
             /* "automatic" means the cluster will unfence node when it joins */
             const char *automatic = crm_element_value(match, "automatic");
 
             /* "required" is a deprecated synonym for "automatic" */
             const char *required = crm_element_value(match, "required");
 
             if (crm_is_true(automatic) || crm_is_true(required)) {
                 device->automatic_unfencing = TRUE;
             }
         }
 
         if (action && crm_is_true(on_target)) {
             device->on_target_actions = add_action(device->on_target_actions, action);
         }
     }
 
     freeXpathObject(xpath);
 }
 
 static stonith_device_t *
 build_device_from_xml(xmlNode * msg)
 {
     const char *value = NULL;
     xmlNode *dev = get_xpath_object("//" F_STONITH_DEVICE, msg, LOG_ERR);
     stonith_device_t *device = NULL;
 
     device = calloc(1, sizeof(stonith_device_t));
     device->id = crm_element_value_copy(dev, XML_ATTR_ID);
     device->agent = crm_element_value_copy(dev, "agent");
     device->namespace = crm_element_value_copy(dev, "namespace");
     device->params = xml2list(dev);
 
     value = g_hash_table_lookup(device->params, STONITH_ATTR_HOSTLIST);
     if (value) {
         device->targets = parse_host_list(value);
     }
 
     value = g_hash_table_lookup(device->params, STONITH_ATTR_HOSTMAP);
     device->aliases = build_port_aliases(value, &(device->targets));
 
     device->agent_metadata = get_agent_metadata(device->agent);
     read_action_metadata(device);
 
     value = g_hash_table_lookup(device->params, "nodeid");
     if (!value) {
         device->include_nodeid = is_nodeid_required(device->agent_metadata);
     }
 
     value = crm_element_value(dev, "rsc_provides");
     if (safe_str_eq(value, "unfencing")) {
         device->automatic_unfencing = TRUE;
     }
 
     if (is_action_required("on", device)) {
         crm_info("The fencing device '%s' requires unfencing", device->id);
     }
 
     if (device->on_target_actions) {
         crm_info("The fencing device '%s' requires actions (%s) to be executed on the target node",
                  device->id, device->on_target_actions);
     }
 
     device->work = mainloop_add_trigger(G_PRIORITY_HIGH, stonith_device_dispatch, device);
     /* TODO: Hook up priority */
 
     return device;
 }
 
 static const char *
 target_list_type(stonith_device_t * dev)
 {
     const char *check_type = NULL;
 
     check_type = g_hash_table_lookup(dev->params, STONITH_ATTR_HOSTCHECK);
 
     if (check_type == NULL) {
 
         if (g_hash_table_lookup(dev->params, STONITH_ATTR_HOSTLIST)) {
             check_type = "static-list";
         } else if (g_hash_table_lookup(dev->params, STONITH_ATTR_HOSTMAP)) {
             check_type = "static-list";
         } else if(is_set(dev->flags, st_device_supports_list)){
             check_type = "dynamic-list";
         } else if(is_set(dev->flags, st_device_supports_status)){
             check_type = "status";
         } else {
             check_type = "none";
         }
     }
 
     return check_type;
 }
 
 void
 schedule_internal_command(const char *origin,
                           stonith_device_t * device,
                           const char *action,
                           const char *victim,
                           int timeout,
                           void *internal_user_data,
                           void (*done_cb) (GPid pid, int rc, const char *output,
                                            gpointer user_data))
 {
     async_command_t *cmd = NULL;
 
     cmd = calloc(1, sizeof(async_command_t));
 
     cmd->id = -1;
     cmd->default_timeout = timeout ? timeout : 60;
     cmd->timeout = cmd->default_timeout;
     cmd->action = strdup(action);
     cmd->victim = victim ? strdup(victim) : NULL;
     cmd->device = strdup(device->id);
     cmd->origin = strdup(origin);
     cmd->client = strdup(crm_system_name);
     cmd->client_name = strdup(crm_system_name);
 
     cmd->internal_user_data = internal_user_data;
     cmd->done_cb = done_cb; /* cmd, not internal_user_data, is passed to 'done_cb' as the userdata */
 
     schedule_stonith_command(cmd, device);
 }
 
 gboolean
 string_in_list(GListPtr list, const char *item)
 {
     int lpc = 0;
     int max = g_list_length(list);
 
     for (lpc = 0; lpc < max; lpc++) {
         const char *value = g_list_nth_data(list, lpc);
 
         if (safe_str_eq(item, value)) {
             return TRUE;
         } else {
             crm_trace("%d: '%s' != '%s'", lpc, item, value);
         }
     }
     return FALSE;
 }
 
 static void
 status_search_cb(GPid pid, int rc, const char *output, gpointer user_data)
 {
     async_command_t *cmd = user_data;
     struct device_search_s *search = cmd->internal_user_data;
     stonith_device_t *dev = cmd->device ? g_hash_table_lookup(device_list, cmd->device) : NULL;
     gboolean can = FALSE;
 
     free_async_command(cmd);
 
     if (!dev) {
         search_devices_record_result(search, NULL, FALSE);
         return;
     }
 
     dev->active_pid = 0;
     mainloop_set_trigger(dev->work);
 
     if (rc == 1 /* unknown */ ) {
         crm_trace("Host %s is not known by %s", search->host, dev->id);
 
     } else if (rc == 0 /* active */  || rc == 2 /* inactive */ ) {
         crm_trace("Host %s is known by %s", search->host, dev->id);
         can = TRUE;
 
     } else {
         crm_notice("Unknown result when testing if %s can fence %s: rc=%d", dev->id, search->host,
                    rc);
     }
     search_devices_record_result(search, dev->id, can);
 }
 
 static void
 dynamic_list_search_cb(GPid pid, int rc, const char *output, gpointer user_data)
 {
     async_command_t *cmd = user_data;
     struct device_search_s *search = cmd->internal_user_data;
     stonith_device_t *dev = cmd->device ? g_hash_table_lookup(device_list, cmd->device) : NULL;
     gboolean can_fence = FALSE;
 
     free_async_command(cmd);
 
     /* Host/alias must be in the list output to be eligible to be fenced
      *
      * Will cause problems if down'd nodes aren't listed or (for virtual nodes)
      *  if the guest is still listed despite being moved to another machine
      */
     if (!dev) {
         search_devices_record_result(search, NULL, FALSE);
         return;
     }
 
     dev->active_pid = 0;
     mainloop_set_trigger(dev->work);
 
     /* If we successfully got the targets earlier, don't disable. */
     if (rc != 0 && !dev->targets) {
         crm_notice("Disabling port list queries for %s (%d): %s", dev->id, rc, output);
         /* Fall back to status */
         g_hash_table_replace(dev->params, strdup(STONITH_ATTR_HOSTCHECK), strdup("status"));
 
         g_list_free_full(dev->targets, free);
         dev->targets = NULL;
     } else if (!rc) {
         crm_info("Refreshing port list for %s", dev->id);
         g_list_free_full(dev->targets, free);
         dev->targets = parse_host_list(output);
         dev->targets_age = time(NULL);
     }
 
     if (dev->targets) {
         const char *alias = g_hash_table_lookup(dev->aliases, search->host);
 
         if (!alias) {
             alias = search->host;
         }
         if (string_in_list(dev->targets, alias)) {
             can_fence = TRUE;
         }
     }
     search_devices_record_result(search, dev->id, can_fence);
 }
 
 /*!
  * \internal
  * \brief Checks to see if an identical device already exists in the device_list
  */
 static stonith_device_t *
 device_has_duplicate(stonith_device_t * device)
 {
     char *key = NULL;
     char *value = NULL;
     GHashTableIter gIter;
     stonith_device_t *dup = g_hash_table_lookup(device_list, device->id);
 
     if (!dup) {
         crm_trace("No match for %s", device->id);
         return NULL;
 
     } else if (safe_str_neq(dup->agent, device->agent)) {
         crm_trace("Different agent: %s != %s", dup->agent, device->agent);
         return NULL;
     }
 
     /* Use calculate_operation_digest() here? */
     g_hash_table_iter_init(&gIter, device->params);
     while (g_hash_table_iter_next(&gIter, (void **)&key, (void **)&value)) {
 
         if(strstr(key, "CRM_meta") == key) {
             continue;
         } else if(strcmp(key, "crm_feature_set") == 0) {
             continue;
         } else {
             char *other_value = g_hash_table_lookup(dup->params, key);
 
             if (!other_value || safe_str_neq(other_value, value)) {
                 crm_trace("Different value for %s: %s != %s", key, other_value, value);
                 return NULL;
             }
         }
     }
 
     crm_trace("Match");
     return dup;
 }
 
 int
 stonith_device_register(xmlNode * msg, const char **desc, gboolean from_cib)
 {
     stonith_device_t *dup = NULL;
     stonith_device_t *device = build_device_from_xml(msg);
 
     dup = device_has_duplicate(device);
     if (dup) {
         crm_debug("Device '%s' already existed in device list (%d active devices)", device->id,
                    g_hash_table_size(device_list));
         free_device(device);
         device = dup;
 
     } else {
         stonith_device_t *old = g_hash_table_lookup(device_list, device->id);
 
         if (from_cib && old && old->api_registered) {
             /* If the cib is writing over an entry that is shared with a stonith client,
              * copy any pending ops that currently exist on the old entry to the new one.
              * Otherwise the pending ops will be reported as failures
              */
             crm_info("Overwriting an existing entry for %s from the cib", device->id);
             device->pending_ops = old->pending_ops;
             device->api_registered = TRUE;
             old->pending_ops = NULL;
             if (device->pending_ops) {
                 mainloop_set_trigger(device->work);
             }
         }
         g_hash_table_replace(device_list, device->id, device);
 
         crm_notice("Added '%s' to the device list (%d active devices)", device->id,
                    g_hash_table_size(device_list));
     }
     if (desc) {
         *desc = device->id;
     }
 
     if (from_cib) {
         device->cib_registered = TRUE;
     } else {
         device->api_registered = TRUE;
     }
 
     return pcmk_ok;
 }
 
 int
 stonith_device_remove(const char *id, gboolean from_cib)
 {
     stonith_device_t *device = g_hash_table_lookup(device_list, id);
 
     if (!device) {
         crm_info("Device '%s' not found (%d active devices)", id, g_hash_table_size(device_list));
         return pcmk_ok;
     }
 
     if (from_cib) {
         device->cib_registered = FALSE;
     } else {
         device->verified = FALSE;
         device->api_registered = FALSE;
     }
 
     if (!device->cib_registered && !device->api_registered) {
         g_hash_table_remove(device_list, id);
         crm_info("Removed '%s' from the device list (%d active devices)",
                  id, g_hash_table_size(device_list));
     }
     return pcmk_ok;
 }
 
 /*!
  * \internal
  * \brief Return the number of stonith levels registered for a node
  *
  * \param[in] tp  Node's topology table entry
  *
  * \return Number of non-NULL levels in topology entry
  * \note This function is used only for log messages.
  */
 static int
 count_active_levels(stonith_topology_t * tp)
 {
     int lpc = 0;
     int count = 0;
 
     for (lpc = 0; lpc < ST_LEVEL_MAX; lpc++) {
         if (tp->levels[lpc] != NULL) {
             count++;
         }
     }
     return count;
 }
 
 void
 free_topology_entry(gpointer data)
 {
     stonith_topology_t *tp = data;
 
     int lpc = 0;
 
     for (lpc = 0; lpc < ST_LEVEL_MAX; lpc++) {
         if (tp->levels[lpc] != NULL) {
             g_list_free_full(tp->levels[lpc], free);
         }
     }
-    free(tp->node);
+    free(tp->target);
     free(tp);
 }
 
 /*!
  * \internal
- * \brief Register a STONITH level for a node
+ * \brief Register a STONITH level for a target
  *
- * Given an XML request specifying the node name, level index, and device IDs
- * for the level, this will create an entry for the node in the global topology
+ * Given an XML request specifying the target name, level index, and device IDs
+ * for the level, this will create an entry for the target in the global topology
  * table if one does not already exist, then append the specified device IDs to
  * the entry's device list for the specified level.
  *
  * \param[in]  msg   XML request for STONITH level registration
- * \param[out] desc  If not NULL, will be set to string representation ("NODE[LEVEL]")
+ * \param[out] desc  If not NULL, will be set to string representation ("TARGET[LEVEL]")
  *
  * \return pcmk_ok on success, -EINVAL if XML does not specify valid level index
  */
 int
 stonith_level_register(xmlNode * msg, char **desc)
 {
     int id = 0;
-    int rc = pcmk_ok;
     xmlNode *child = NULL;
 
     xmlNode *level = get_xpath_object("//" F_STONITH_LEVEL, msg, LOG_ERR);
-    const char *node = crm_element_value(level, F_STONITH_TARGET);
-    stonith_topology_t *tp = g_hash_table_lookup(topology, node);
+    const char *target = crm_element_value(level, F_STONITH_TARGET);
+    stonith_topology_t *tp = g_hash_table_lookup(topology, target);
 
-    CRM_LOG_ASSERT(node != NULL);
+    CRM_LOG_ASSERT(target != NULL);
 
     crm_element_value_int(level, XML_ATTR_ID, &id);
     if (desc) {
-        *desc = crm_strdup_printf("%s[%d]", node, id);
+        *desc = crm_strdup_printf("%s[%d]", target, id);
     }
     if (id <= 0 || id >= ST_LEVEL_MAX) {
         return -EINVAL;
     }
 
+    /* Target-by-node-attribute requires the CIB, so disallow if standalone */
+    if (stand_alone && target && strchr(target, '=')) {
+        return -EINVAL;
+    }
+
     if (tp == NULL) {
         tp = calloc(1, sizeof(stonith_topology_t));
-        tp->node = strdup(node);
-        g_hash_table_replace(topology, tp->node, tp);
-        crm_trace("Added %s to the topology (%d active entries)", node,
-                  g_hash_table_size(topology));
+        tp->target = strdup(target);
+        g_hash_table_replace(topology, tp->target, tp);
+        crm_trace("Added %s to the topology (%d active entries)",
+                  target, g_hash_table_size(topology));
     }
 
     if (tp->levels[id] != NULL) {
-        crm_info("Adding to the existing %s[%d] topology entry (%d active entries)", node, id,
-                 count_active_levels(tp));
+        crm_info("Adding to the existing %s[%d] topology entry", target, id);
     }
 
     for (child = __xml_first_child(level); child != NULL; child = __xml_next(child)) {
         const char *device = ID(child);
 
-        crm_trace("Adding device '%s' for %s (%d)", device, node, id);
+        crm_trace("Adding device '%s' for %s[%d]", device, target, id);
         tp->levels[id] = g_list_append(tp->levels[id], strdup(device));
     }
 
-    crm_info("Node %s has %d active fencing levels", node, count_active_levels(tp));
-    return rc;
+    crm_info("Target %s has %d active fencing levels",
+             target, count_active_levels(tp));
+    return pcmk_ok;
 }
 
 int
 stonith_level_remove(xmlNode * msg, char **desc)
 {
     int id = 0;
     xmlNode *level = get_xpath_object("//" F_STONITH_LEVEL, msg, LOG_ERR);
-    const char *node = crm_element_value(level, F_STONITH_TARGET);
-    stonith_topology_t *tp = g_hash_table_lookup(topology, node);
+    const char *target = crm_element_value(level, F_STONITH_TARGET);
+    stonith_topology_t *tp = g_hash_table_lookup(topology, target);
 
-    CRM_LOG_ASSERT(node != NULL);
+    CRM_LOG_ASSERT(target != NULL);
 
     if (desc) {
-        *desc = crm_strdup_printf("%s[%d]", node, id);
+        *desc = crm_strdup_printf("%s[%d]", target, id);
     }
     crm_element_value_int(level, XML_ATTR_ID, &id);
 
     if (tp == NULL) {
-        crm_info("Node %s not found (%d active entries)", node, g_hash_table_size(topology));
+        crm_info("Topology for %s not found (%d active entries)",
+                 target, g_hash_table_size(topology));
         return pcmk_ok;
 
     } else if (id < 0 || id >= ST_LEVEL_MAX) {
         return -EINVAL;
     }
 
-    if (id == 0 && g_hash_table_remove(topology, node)) {
+    if (id == 0 && g_hash_table_remove(topology, target)) {
         crm_info("Removed all %s related entries from the topology (%d active entries)",
-                 node, g_hash_table_size(topology));
+                 target, g_hash_table_size(topology));
 
     } else if (id > 0 && tp->levels[id] != NULL) {
         g_list_free_full(tp->levels[id], free);
         tp->levels[id] = NULL;
 
-        crm_info("Removed entry '%d' from %s's topology (%d active entries remaining)",
-                 id, node, count_active_levels(tp));
+        crm_info("Removed level '%d' from topology for %s (%d active levels remaining)",
+                 id, target, count_active_levels(tp));
     }
     return pcmk_ok;
 }
 
 static int
 stonith_device_action(xmlNode * msg, char **output)
 {
     int rc = pcmk_ok;
     xmlNode *dev = get_xpath_object("//" F_STONITH_DEVICE, msg, LOG_ERR);
     const char *id = crm_element_value(dev, F_STONITH_DEVICE);
 
     async_command_t *cmd = NULL;
     stonith_device_t *device = NULL;
 
     if (id) {
         crm_trace("Looking for '%s'", id);
         device = g_hash_table_lookup(device_list, id);
     }
 
     if (device && device->api_registered == FALSE) {
         rc = -ENODEV;
 
     } else if (device) {
         cmd = create_async_command(msg);
         if (cmd == NULL) {
             return -EPROTO;
         }
 
         schedule_stonith_command(cmd, device);
         rc = -EINPROGRESS;
 
     } else {
         crm_info("Device %s not found", id ? id : "<none>");
         rc = -ENODEV;
     }
     return rc;
 }
 
 static void
 search_devices_record_result(struct device_search_s *search, const char *device, gboolean can_fence)
 {
     search->replies_received++;
 
     if (can_fence && device) {
         search->capable = g_list_append(search->capable, strdup(device));
     }
 
     if (search->replies_needed == search->replies_received) {
 
         crm_debug("Finished Search. %d devices can perform action (%s) on node %s",
                   g_list_length(search->capable),
                   search->action ? search->action : "<unknown>",
                   search->host ? search->host : "<anyone>");
 
         search->callback(search->capable, search->user_data);
         free(search->host);
         free(search->action);
         free(search);
     }
 }
 
 /*
  * \internal
  * \brief Check whether the local host is allowed to execute a fencing action
  *
  * \param[in] device         Fence device to check
  * \param[in] action         Fence action to check
  * \param[in] target         Hostname of fence target
  * \param[in] allow_suicide  Whether self-fencing is allowed for this operation
  *
  * \return TRUE if local host is allowed to execute action, FALSE otherwise
  */
 static gboolean
 localhost_is_eligible(const stonith_device_t *device, const char *action,
                       const char *target, gboolean allow_suicide)
 {
     gboolean localhost_is_target = safe_str_eq(target, stonith_our_uname);
 
     if (device && action && device->on_target_actions
         && strstr(device->on_target_actions, action)) {
         if (!localhost_is_target) {
             crm_trace("%s operation with %s can only be executed for localhost not %s",
                       action, device->id, target);
             return FALSE;
         }
 
     } else if (localhost_is_target && !allow_suicide) {
         crm_trace("%s operation does not support self-fencing", action);
         return FALSE;
     }
     return TRUE;
 }
 
 static void
 can_fence_host_with_device(stonith_device_t * dev, struct device_search_s *search)
 {
     gboolean can = FALSE;
     const char *check_type = NULL;
     const char *host = search->host;
     const char *alias = NULL;
 
     CRM_LOG_ASSERT(dev != NULL);
 
     if (dev == NULL) {
         goto search_report_results;
     } else if (host == NULL) {
         can = TRUE;
         goto search_report_results;
     }
 
     /* Short-circuit query if this host is not allowed to perform the action */
     if (safe_str_eq(search->action, "reboot")) {
         /* A "reboot" *might* get remapped to "off" then "on", so short-circuit
          * only if all three are disallowed. If only one or two are disallowed,
          * we'll report that with the results. We never allow suicide for
          * remapped "on" operations because the host is off at that point.
          */
         if (!localhost_is_eligible(dev, "reboot", host, search->allow_suicide)
             && !localhost_is_eligible(dev, "off", host, search->allow_suicide)
             && !localhost_is_eligible(dev, "on", host, FALSE)) {
             goto search_report_results;
         }
     } else if (!localhost_is_eligible(dev, search->action, host,
                                       search->allow_suicide)) {
         goto search_report_results;
     }
 
     alias = g_hash_table_lookup(dev->aliases, host);
     if (alias == NULL) {
         alias = host;
     }
 
     check_type = target_list_type(dev);
 
     if (safe_str_eq(check_type, "none")) {
         can = TRUE;
 
     } else if (safe_str_eq(check_type, "static-list")) {
 
         /* Presence in the hostmap is sufficient
          * Only use if all hosts on which the device can be active can always fence all listed hosts
          */
 
         if (string_in_list(dev->targets, host)) {
             can = TRUE;
         } else if (g_hash_table_lookup(dev->params, STONITH_ATTR_HOSTMAP)
                    && g_hash_table_lookup(dev->aliases, host)) {
             can = TRUE;
         }
 
     } else if (safe_str_eq(check_type, "dynamic-list")) {
         time_t now = time(NULL);
 
         if (dev->targets == NULL || dev->targets_age + 60 < now) {
             crm_trace("Running %s command to see if %s can fence %s (%s)",
                       check_type, dev?dev->id:"N/A", search->host, search->action);
 
             schedule_internal_command(__FUNCTION__, dev, "list", NULL,
                                       search->per_device_timeout, search, dynamic_list_search_cb);
 
             /* we'll respond to this search request async in the cb */
             return;
         }
 
         if (string_in_list(dev->targets, alias)) {
             can = TRUE;
         }
 
     } else if (safe_str_eq(check_type, "status")) {
         crm_trace("Running %s command to see if %s can fence %s (%s)",
                   check_type, dev?dev->id:"N/A", search->host, search->action);
         schedule_internal_command(__FUNCTION__, dev, "status", search->host,
                                   search->per_device_timeout, search, status_search_cb);
         /* we'll respond to this search request async in the cb */
         return;
     } else {
         crm_err("Unknown check type: %s", check_type);
     }
 
     if (safe_str_eq(host, alias)) {
         crm_notice("%s can%s fence (%s) %s: %s", dev->id, can ? "" : " not", search->action, host, check_type);
     } else {
         crm_notice("%s can%s fence (%s) %s (aka. '%s'): %s", dev->id, can ? "" : " not", search->action, host, alias,
                    check_type);
     }
 
   search_report_results:
     search_devices_record_result(search, dev ? dev->id : NULL, can);
 }
 
 static void
 search_devices(gpointer key, gpointer value, gpointer user_data)
 {
     stonith_device_t *dev = value;
     struct device_search_s *search = user_data;
 
     can_fence_host_with_device(dev, search);
 }
 
 #define DEFAULT_QUERY_TIMEOUT 20
 static void
 get_capable_devices(const char *host, const char *action, int timeout, bool suicide, void *user_data,
                     void (*callback) (GList * devices, void *user_data))
 {
     struct device_search_s *search;
     int per_device_timeout = DEFAULT_QUERY_TIMEOUT;
     int devices_needing_async_query = 0;
     char *key = NULL;
     const char *check_type = NULL;
     GHashTableIter gIter;
     stonith_device_t *device = NULL;
 
     if (!g_hash_table_size(device_list)) {
         callback(NULL, user_data);
         return;
     }
 
     search = calloc(1, sizeof(struct device_search_s));
     if (!search) {
         callback(NULL, user_data);
         return;
     }
 
     g_hash_table_iter_init(&gIter, device_list);
     while (g_hash_table_iter_next(&gIter, (void **)&key, (void **)&device)) {
         check_type = target_list_type(device);
         if (safe_str_eq(check_type, "status") || safe_str_eq(check_type, "dynamic-list")) {
             devices_needing_async_query++;
         }
     }
 
     /* If we have devices that require an async event in order to know what
      * nodes they can fence, we have to give the events a timeout. The total
      * query timeout is divided among those events. */
     if (devices_needing_async_query) {
         per_device_timeout = timeout / devices_needing_async_query;
         if (!per_device_timeout) {
             crm_err("STONITH timeout %ds is too low; using %ds, but consider raising to at least %ds",
                     timeout, DEFAULT_QUERY_TIMEOUT,
                     DEFAULT_QUERY_TIMEOUT * devices_needing_async_query);
             per_device_timeout = DEFAULT_QUERY_TIMEOUT;
         } else if (per_device_timeout < DEFAULT_QUERY_TIMEOUT) {
             crm_notice("STONITH timeout %ds is low for the current configuration;"
                        " consider raising to at least %ds",
                        timeout, DEFAULT_QUERY_TIMEOUT * devices_needing_async_query);
         }
     }
 
     search->host = host ? strdup(host) : NULL;
     search->action = action ? strdup(action) : NULL;
     search->per_device_timeout = per_device_timeout;
     /* We are guaranteed this many replies. Even if a device gets
      * unregistered some how during the async search, we will get
      * the correct number of replies. */
     search->replies_needed = g_hash_table_size(device_list);
     search->allow_suicide = suicide;
     search->callback = callback;
     search->user_data = user_data;
     /* kick off the search */
 
     crm_debug("Searching through %d devices to see what is capable of action (%s) for target %s",
               search->replies_needed,
               search->action ? search->action : "<unknown>",
               search->host ? search->host : "<anyone>");
     g_hash_table_foreach(device_list, search_devices, search);
 }
 
 struct st_query_data {
     xmlNode *reply;
     char *remote_peer;
     char *client_id;
     char *target;
     char *action;
     int call_options;
 };
 
 /*
  * \internal
  * \brief Add action-specific attributes to query reply XML
  *
  * \param[in,out] xml     XML to add attributes to
  * \param[in]     action  Fence action
  * \param[in]     device  Fence device
  */
 static void
 add_action_specific_attributes(xmlNode *xml, const char *action,
                                stonith_device_t *device)
 {
     int action_specific_timeout;
     int delay_max;
 
     CRM_CHECK(xml && action && device, return);
 
     if (is_action_required(action, device)) {
         crm_trace("Action %s is required on %s", action, device->id);
         crm_xml_add_int(xml, F_STONITH_DEVICE_REQUIRED, 1);
     }
 
     action_specific_timeout = get_action_timeout(device, action, 0);
     if (action_specific_timeout) {
         crm_trace("Action %s has timeout %dms on %s",
                   action, action_specific_timeout, device->id);
         crm_xml_add_int(xml, F_STONITH_ACTION_TIMEOUT, action_specific_timeout);
     }
 
     delay_max = get_action_delay_max(device, action);
     if (delay_max > 0) {
         crm_trace("Action %s has maximum random delay %dms on %s",
                   action, delay_max, device->id);
         crm_xml_add_int(xml, F_STONITH_DELAY_MAX, delay_max / 1000);
     }
 }
 
 /*
  * \internal
  * \brief Add "disallowed" attribute to query reply XML if appropriate
  *
  * \param[in,out] xml            XML to add attribute to
  * \param[in]     action         Fence action
  * \param[in]     device         Fence device
  * \param[in]     target         Fence target
  * \param[in]     allow_suicide  Whether self-fencing is allowed
  */
 static void
 add_disallowed(xmlNode *xml, const char *action, stonith_device_t *device,
                const char *target, gboolean allow_suicide)
 {
     if (!localhost_is_eligible(device, action, target, allow_suicide)) {
         crm_trace("Action %s on %s is disallowed for local host",
                   action, device->id);
         crm_xml_add(xml, F_STONITH_ACTION_DISALLOWED, XML_BOOLEAN_TRUE);
     }
 }
 
 /*
  * \internal
  * \brief Add child element with action-specific values to query reply XML
  *
  * \param[in,out] xml            XML to add attribute to
  * \param[in]     action         Fence action
  * \param[in]     device         Fence device
  * \param[in]     target         Fence target
  * \param[in]     allow_suicide  Whether self-fencing is allowed
  */
 static void
 add_action_reply(xmlNode *xml, const char *action, stonith_device_t *device,
                const char *target, gboolean allow_suicide)
 {
     xmlNode *child = create_xml_node(xml, F_STONITH_ACTION);
 
     crm_xml_add(child, XML_ATTR_ID, action);
     add_action_specific_attributes(child, action, device);
     add_disallowed(child, action, device, target, allow_suicide);
 }
 
 static void
 stonith_query_capable_device_cb(GList * devices, void *user_data)
 {
     struct st_query_data *query = user_data;
     int available_devices = 0;
     xmlNode *dev = NULL;
     xmlNode *list = NULL;
     GListPtr lpc = NULL;
 
     /* Pack the results into XML */
     list = create_xml_node(NULL, __FUNCTION__);
     crm_xml_add(list, F_STONITH_TARGET, query->target);
     for (lpc = devices; lpc != NULL; lpc = lpc->next) {
         stonith_device_t *device = g_hash_table_lookup(device_list, lpc->data);
         const char *action = query->action;
 
         if (!device) {
             /* It is possible the device got unregistered while
              * determining who can fence the target */
             continue;
         }
 
         available_devices++;
 
         dev = create_xml_node(list, F_STONITH_DEVICE);
         crm_xml_add(dev, XML_ATTR_ID, device->id);
         crm_xml_add(dev, "namespace", device->namespace);
         crm_xml_add(dev, "agent", device->agent);
         crm_xml_add_int(dev, F_STONITH_DEVICE_VERIFIED, device->verified);
 
         /* If the originating stonithd wants to reboot the node, and we have a
          * capable device that doesn't support "reboot", remap to "off" instead.
          */
         if (is_not_set(device->flags, st_device_supports_reboot)
             && safe_str_eq(query->action, "reboot")) {
             crm_trace("%s doesn't support reboot, using values for off instead",
                       device->id);
             action = "off";
         }
 
         /* Add action-specific values if available */
         add_action_specific_attributes(dev, action, device);
         if (safe_str_eq(query->action, "reboot")) {
             /* A "reboot" *might* get remapped to "off" then "on", so after
              * sending the "reboot"-specific values in the main element, we add
              * sub-elements for "off" and "on" values.
              *
              * We short-circuited earlier if "reboot", "off" and "on" are all
              * disallowed for the local host. However if only one or two are
              * disallowed, we send back the results and mark which ones are
              * disallowed. If "reboot" is disallowed, this might cause problems
              * with older stonithd versions, which won't check for it. Older
              * versions will ignore "off" and "on", so they are not a problem.
              */
             add_disallowed(dev, action, device, query->target,
                            is_set(query->call_options, st_opt_allow_suicide));
             add_action_reply(dev, "off", device, query->target,
                              is_set(query->call_options, st_opt_allow_suicide));
             add_action_reply(dev, "on", device, query->target, FALSE);
         }
 
         /* A query without a target wants device parameters */
         if (query->target == NULL) {
             xmlNode *attrs = create_xml_node(dev, XML_TAG_ATTRS);
 
             g_hash_table_foreach(device->params, hash2field, attrs);
         }
     }
 
     crm_xml_add_int(list, F_STONITH_AVAILABLE_DEVICES, available_devices);
     if (query->target) {
         crm_debug("Found %d matching devices for '%s'", available_devices, query->target);
     } else {
         crm_debug("%d devices installed", available_devices);
     }
 
     if (list != NULL) {
         crm_log_xml_trace(list, "Add query results");
         add_message_xml(query->reply, F_STONITH_CALLDATA, list);
     }
     stonith_send_reply(query->reply, query->call_options, query->remote_peer, query->client_id);
 
     free_xml(query->reply);
     free(query->remote_peer);
     free(query->client_id);
     free(query->target);
     free(query->action);
     free(query);
     free_xml(list);
     g_list_free_full(devices, free);
 }
 
 static void
 stonith_query(xmlNode * msg, const char *remote_peer, const char *client_id, int call_options)
 {
     struct st_query_data *query = NULL;
     const char *action = NULL;
     const char *target = NULL;
     int timeout = 0;
     xmlNode *dev = get_xpath_object("//@" F_STONITH_ACTION, msg, LOG_DEBUG_3);
 
     crm_element_value_int(msg, F_STONITH_TIMEOUT, &timeout);
     if (dev) {
         const char *device = crm_element_value(dev, F_STONITH_DEVICE);
 
         target = crm_element_value(dev, F_STONITH_TARGET);
         action = crm_element_value(dev, F_STONITH_ACTION);
         if (device && safe_str_eq(device, "manual_ack")) {
             /* No query or reply necessary */
             return;
         }
     }
 
     crm_log_xml_debug(msg, "Query");
     query = calloc(1, sizeof(struct st_query_data));
 
     query->reply = stonith_construct_reply(msg, NULL, NULL, pcmk_ok);
     query->remote_peer = remote_peer ? strdup(remote_peer) : NULL;
     query->client_id = client_id ? strdup(client_id) : NULL;
     query->target = target ? strdup(target) : NULL;
     query->action = action ? strdup(action) : NULL;
     query->call_options = call_options;
 
     get_capable_devices(target, action, timeout,
                         is_set(call_options, st_opt_allow_suicide),
                         query, stonith_query_capable_device_cb);
 }
 
 #define ST_LOG_OUTPUT_MAX 512
 static void
 log_operation(async_command_t * cmd, int rc, int pid, const char *next, const char *output)
 {
     if (rc == 0) {
         next = NULL;
     }
 
     if (cmd->victim != NULL) {
         do_crm_log(rc == 0 ? LOG_NOTICE : LOG_ERR,
                    "Operation '%s' [%d] (call %d from %s) for host '%s' with device '%s' returned: %d (%s)%s%s",
                    cmd->action, pid, cmd->id, cmd->client_name, cmd->victim, cmd->device, rc,
                    pcmk_strerror(rc), next ? ". Trying: " : "", next ? next : "");
     } else {
         do_crm_log_unlikely(rc == 0 ? LOG_DEBUG : LOG_NOTICE,
                             "Operation '%s' [%d] for device '%s' returned: %d (%s)%s%s",
                             cmd->action, pid, cmd->device, rc, pcmk_strerror(rc),
                             next ? ". Trying: " : "", next ? next : "");
     }
 
     if (output) {
         /* Logging the whole string confuses syslog when the string is xml */
         char *prefix = crm_strdup_printf("%s:%d", cmd->device, pid);
 
         crm_log_output(rc == 0 ? LOG_DEBUG : LOG_WARNING, prefix, output);
         free(prefix);
     }
 }
 
 static void
 stonith_send_async_reply(async_command_t * cmd, const char *output, int rc, GPid pid)
 {
     xmlNode *reply = NULL;
     gboolean bcast = FALSE;
 
     reply = stonith_construct_async_reply(cmd, output, NULL, rc);
 
     if (safe_str_eq(cmd->action, "metadata")) {
         /* Too verbose to log */
         crm_trace("Metadata query for %s", cmd->device);
         output = NULL;
 
     } else if (crm_str_eq(cmd->action, "monitor", TRUE) ||
                crm_str_eq(cmd->action, "list", TRUE) || crm_str_eq(cmd->action, "status", TRUE)) {
         crm_trace("Never broadcast %s replies", cmd->action);
 
     } else if (!stand_alone && safe_str_eq(cmd->origin, cmd->victim) && safe_str_neq(cmd->action, "on")) {
         crm_trace("Broadcast %s reply for %s", cmd->action, cmd->victim);
         crm_xml_add(reply, F_SUBTYPE, "broadcast");
         bcast = TRUE;
     }
 
     log_operation(cmd, rc, pid, NULL, output);
     crm_log_xml_trace(reply, "Reply");
 
     if (bcast) {
         crm_xml_add(reply, F_STONITH_OPERATION, T_STONITH_NOTIFY);
         send_cluster_message(NULL, crm_msg_stonith_ng, reply, FALSE);
 
     } else if (cmd->origin) {
         crm_trace("Directed reply to %s", cmd->origin);
         send_cluster_message(crm_get_peer(0, cmd->origin), crm_msg_stonith_ng, reply, FALSE);
 
     } else {
         crm_trace("Directed local %ssync reply to %s",
                   (cmd->options & st_opt_sync_call) ? "" : "a-", cmd->client_name);
         do_local_reply(reply, cmd->client, cmd->options & st_opt_sync_call, FALSE);
     }
 
     if (stand_alone) {
         /* Do notification with a clean data object */
         xmlNode *notify_data = create_xml_node(NULL, T_STONITH_NOTIFY_FENCE);
 
         crm_xml_add_int(notify_data, F_STONITH_RC, rc);
         crm_xml_add(notify_data, F_STONITH_TARGET, cmd->victim);
         crm_xml_add(notify_data, F_STONITH_OPERATION, cmd->op);
         crm_xml_add(notify_data, F_STONITH_DELEGATE, "localhost");
         crm_xml_add(notify_data, F_STONITH_DEVICE, cmd->device);
         crm_xml_add(notify_data, F_STONITH_REMOTE_OP_ID, cmd->remote_op_id);
         crm_xml_add(notify_data, F_STONITH_ORIGIN, cmd->client);
 
         do_stonith_notify(0, T_STONITH_NOTIFY_FENCE, rc, notify_data);
     }
 
     free_xml(reply);
 }
 
 void
 unfence_cb(GPid pid, int rc, const char *output, gpointer user_data)
 {
     async_command_t * cmd = user_data;
     stonith_device_t *dev = g_hash_table_lookup(device_list, cmd->device);
 
     log_operation(cmd, rc, pid, NULL, output);
 
     if(dev) {
         dev->active_pid = 0;
         mainloop_set_trigger(dev->work);
     } else {
         crm_trace("Device %s does not exist", cmd->device);
     }
 
     if(rc != 0) {
         crm_exit(DAEMON_RESPAWN_STOP);
     }
 }
 
 static void
 cancel_stonith_command(async_command_t * cmd)
 {
     stonith_device_t *device;
 
     CRM_CHECK(cmd != NULL, return);
 
     if (!cmd->device) {
         return;
     }
 
     device = g_hash_table_lookup(device_list, cmd->device);
 
     if (device) {
         crm_trace("Cancel scheduled %s on %s", cmd->action, device->id);
         device->pending_ops = g_list_remove(device->pending_ops, cmd);
     }
 }
 
 static void
 st_child_done(GPid pid, int rc, const char *output, gpointer user_data)
 {
     stonith_device_t *device = NULL;
     stonith_device_t *next_device = NULL;
     async_command_t *cmd = user_data;
 
     GListPtr gIter = NULL;
     GListPtr gIterNext = NULL;
 
     CRM_CHECK(cmd != NULL, return);
 
     /* The device is ready to do something else now */
     device = g_hash_table_lookup(device_list, cmd->device);
     if (device) {
         device->active_pid = 0;
         if (rc == pcmk_ok &&
             (safe_str_eq(cmd->action, "list") ||
              safe_str_eq(cmd->action, "monitor") || safe_str_eq(cmd->action, "status"))) {
 
             device->verified = TRUE;
         }
 
         mainloop_set_trigger(device->work);
     }
 
     crm_debug("Operation '%s' on '%s' completed with rc=%d (%d remaining)",
               cmd->action, cmd->device, rc, g_list_length(cmd->device_next));
 
     if (rc == 0) {
         GListPtr iter;
         /* see if there are any required devices left to execute for this op */
         for (iter = cmd->device_next; iter != NULL; iter = iter->next) {
             next_device = g_hash_table_lookup(device_list, iter->data);
 
             if (next_device != NULL && is_action_required(cmd->action, next_device)) {
                 cmd->device_next = iter->next;
                 break;
             }
             next_device = NULL;
         }
 
     } else if (rc != 0 && cmd->device_next && (is_action_required(cmd->action, device) == FALSE)) {
         /* if this device didn't work out, see if there are any others we can try.
          * if the failed device was 'required', we can't pick another device. */
         next_device = g_hash_table_lookup(device_list, cmd->device_next->data);
         cmd->device_next = cmd->device_next->next;
     }
 
     /* this operation requires more fencing, hooray! */
     if (next_device) {
         log_operation(cmd, rc, pid, cmd->device, output);
 
         schedule_stonith_command(cmd, next_device);
         /* Prevent cmd from being freed */
         cmd = NULL;
         goto done;
     }
 
     if (rc > 0) {
         /* Try to provide _something_ useful */
         if(output == NULL) {
             rc = -ENODATA;
 
         } else if(strstr(output, "imed out")) {
             rc = -ETIMEDOUT;
 
         } else if(strstr(output, "Unrecognised action")) {
             rc = -EOPNOTSUPP;
 
         } else {
             rc = -pcmk_err_generic;
         }
     }
 
     stonith_send_async_reply(cmd, output, rc, pid);
 
     if (rc != 0) {
         goto done;
     }
 
     /* Check to see if any operations are scheduled to do the exact
      * same thing that just completed.  If so, rather than
      * performing the same fencing operation twice, return the result
      * of this operation for all pending commands it matches. */
     for (gIter = cmd_list; gIter != NULL; gIter = gIterNext) {
         async_command_t *cmd_other = gIter->data;
 
         gIterNext = gIter->next;
 
         if (cmd == cmd_other) {
             continue;
         }
 
         /* A pending scheduled command matches the command that just finished if.
          * 1. The client connections are different.
          * 2. The node victim is the same.
          * 3. The fencing action is the same.
          * 4. The device scheduled to execute the action is the same.
          */
         if (safe_str_eq(cmd->client, cmd_other->client) ||
             safe_str_neq(cmd->victim, cmd_other->victim) ||
             safe_str_neq(cmd->action, cmd_other->action) ||
             safe_str_neq(cmd->device, cmd_other->device)) {
 
             continue;
         }
 
         /* Duplicate merging will do the right thing for either type of remapped
          * reboot. If the executing stonithd remapped an unsupported reboot to
          * off, then cmd->action will be reboot and will be merged with any
          * other reboot requests. If the originating stonithd remapped a
          * topology reboot to off then on, we will get here once with
          * cmd->action "off" and once with "on", and they will be merged
          * separately with similar requests.
          */
         crm_notice
             ("Merging stonith action %s for node %s originating from client %s with identical stonith request from client %s",
              cmd_other->action, cmd_other->victim, cmd_other->client_name, cmd->client_name);
 
         cmd_list = g_list_remove_link(cmd_list, gIter);
 
         stonith_send_async_reply(cmd_other, output, rc, pid);
         cancel_stonith_command(cmd_other);
 
         free_async_command(cmd_other);
         g_list_free_1(gIter);
     }
 
   done:
     free_async_command(cmd);
 }
 
 static gint
 sort_device_priority(gconstpointer a, gconstpointer b)
 {
     const stonith_device_t *dev_a = a;
     const stonith_device_t *dev_b = b;
 
     if (dev_a->priority > dev_b->priority) {
         return -1;
     } else if (dev_a->priority < dev_b->priority) {
         return 1;
     }
     return 0;
 }
 
 static void
 stonith_fence_get_devices_cb(GList * devices, void *user_data)
 {
     async_command_t *cmd = user_data;
     stonith_device_t *device = NULL;
 
     crm_info("Found %d matching devices for '%s'", g_list_length(devices), cmd->victim);
 
     if (g_list_length(devices) > 0) {
         /* Order based on priority */
         devices = g_list_sort(devices, sort_device_priority);
         device = g_hash_table_lookup(device_list, devices->data);
 
         if (device) {
             cmd->device_list = devices;
             cmd->device_next = devices->next;
             devices = NULL;     /* list owned by cmd now */
         }
     }
 
     /* we have a device, schedule it for fencing. */
     if (device) {
         schedule_stonith_command(cmd, device);
         /* in progress */
         return;
     }
 
     /* no device found! */
     stonith_send_async_reply(cmd, NULL, -ENODEV, 0);
 
     free_async_command(cmd);
     g_list_free_full(devices, free);
 }
 
 static int
 stonith_fence(xmlNode * msg)
 {
     const char *device_id = NULL;
     stonith_device_t *device = NULL;
     async_command_t *cmd = create_async_command(msg);
     xmlNode *dev = get_xpath_object("//@" F_STONITH_TARGET, msg, LOG_ERR);
 
     if (cmd == NULL) {
         return -EPROTO;
     }
 
     device_id = crm_element_value(dev, F_STONITH_DEVICE);
     if (device_id) {
         device = g_hash_table_lookup(device_list, device_id);
         if (device == NULL) {
             crm_err("Requested device '%s' is not available", device_id);
             return -ENODEV;
         }
         schedule_stonith_command(cmd, device);
 
     } else {
         const char *host = crm_element_value(dev, F_STONITH_TARGET);
 
         if (cmd->options & st_opt_cs_nodeid) {
             int nodeid = crm_atoi(host, NULL);
             crm_node_t *node = crm_get_peer(nodeid, NULL);
 
             if (node) {
                 host = node->uname;
             }
         }
 
         /* If we get to here, then self-fencing is implicitly allowed */
         get_capable_devices(host, cmd->action, cmd->default_timeout,
                             TRUE, cmd, stonith_fence_get_devices_cb);
     }
 
     return -EINPROGRESS;
 }
 
 xmlNode *
 stonith_construct_reply(xmlNode * request, const char *output, xmlNode * data, int rc)
 {
     int lpc = 0;
     xmlNode *reply = NULL;
 
     const char *name = NULL;
     const char *value = NULL;
 
     const char *names[] = {
         F_STONITH_OPERATION,
         F_STONITH_CALLID,
         F_STONITH_CLIENTID,
         F_STONITH_CLIENTNAME,
         F_STONITH_REMOTE_OP_ID,
         F_STONITH_CALLOPTS
     };
 
     crm_trace("Creating a basic reply");
     reply = create_xml_node(NULL, T_STONITH_REPLY);
 
     crm_xml_add(reply, "st_origin", __FUNCTION__);
     crm_xml_add(reply, F_TYPE, T_STONITH_NG);
     crm_xml_add(reply, "st_output", output);
     crm_xml_add_int(reply, F_STONITH_RC, rc);
 
     CRM_CHECK(request != NULL, crm_warn("Can't create a sane reply"); return reply);
     for (lpc = 0; lpc < DIMOF(names); lpc++) {
         name = names[lpc];
         value = crm_element_value(request, name);
         crm_xml_add(reply, name, value);
     }
 
     if (data != NULL) {
         crm_trace("Attaching reply output");
         add_message_xml(reply, F_STONITH_CALLDATA, data);
     }
     return reply;
 }
 
 static xmlNode *
 stonith_construct_async_reply(async_command_t * cmd, const char *output, xmlNode * data, int rc)
 {
     xmlNode *reply = NULL;
 
     crm_trace("Creating a basic reply");
     reply = create_xml_node(NULL, T_STONITH_REPLY);
 
     crm_xml_add(reply, "st_origin", __FUNCTION__);
     crm_xml_add(reply, F_TYPE, T_STONITH_NG);
 
     crm_xml_add(reply, F_STONITH_OPERATION, cmd->op);
     crm_xml_add(reply, F_STONITH_DEVICE, cmd->device);
     crm_xml_add(reply, F_STONITH_REMOTE_OP_ID, cmd->remote_op_id);
     crm_xml_add(reply, F_STONITH_CLIENTID, cmd->client);
     crm_xml_add(reply, F_STONITH_CLIENTNAME, cmd->client_name);
     crm_xml_add(reply, F_STONITH_TARGET, cmd->victim);
     crm_xml_add(reply, F_STONITH_ACTION, cmd->op);
     crm_xml_add(reply, F_STONITH_ORIGIN, cmd->origin);
     crm_xml_add_int(reply, F_STONITH_CALLID, cmd->id);
     crm_xml_add_int(reply, F_STONITH_CALLOPTS, cmd->options);
 
     crm_xml_add_int(reply, F_STONITH_RC, rc);
 
     crm_xml_add(reply, "st_output", output);
 
     if (data != NULL) {
         crm_info("Attaching reply output");
         add_message_xml(reply, F_STONITH_CALLDATA, data);
     }
     return reply;
 }
 
 bool fencing_peer_active(crm_node_t *peer)
 {
     if (peer == NULL) {
         return FALSE;
     } else if (peer->uname == NULL) {
         return FALSE;
     } else if (is_set(peer->processes, crm_get_cluster_proc())) {
         return TRUE;
     }
     return FALSE;
 }
 
 /*!
  * \internal
  * \brief Determine if we need to use an alternate node to
  * fence the target. If so return that node's uname
  *
  * \retval NULL, no alternate host
  * \retval uname, uname of alternate host to use
  */
 static const char *
 check_alternate_host(const char *target)
 {
     const char *alternate_host = NULL;
 
     if (find_topology_for_host(target) && safe_str_eq(target, stonith_our_uname)) {
         GHashTableIter gIter;
         crm_node_t *entry = NULL;
 
         g_hash_table_iter_init(&gIter, crm_peer_cache);
         while (g_hash_table_iter_next(&gIter, NULL, (void **)&entry)) {
             crm_trace("Checking for %s.%d != %s", entry->uname, entry->id, target);
             if (fencing_peer_active(entry)
                 && safe_str_neq(entry->uname, target)) {
                 alternate_host = entry->uname;
                 break;
             }
         }
         if (alternate_host == NULL) {
             crm_err("No alternate host available to handle complex self fencing request");
             g_hash_table_iter_init(&gIter, crm_peer_cache);
             while (g_hash_table_iter_next(&gIter, NULL, (void **)&entry)) {
                 crm_notice("Peer[%d] %s", entry->id, entry->uname);
             }
         }
     }
 
     return alternate_host;
 }
 
 static void
 stonith_send_reply(xmlNode * reply, int call_options, const char *remote_peer,
                    const char *client_id)
 {
     if (remote_peer) {
         send_cluster_message(crm_get_peer(0, remote_peer), crm_msg_stonith_ng, reply, FALSE);
     } else {
         do_local_reply(reply, client_id, is_set(call_options, st_opt_sync_call), remote_peer != NULL);
     }
 }
 
 static int
 handle_request(crm_client_t * client, uint32_t id, uint32_t flags, xmlNode * request,
                const char *remote_peer)
 {
     int call_options = 0;
     int rc = -EOPNOTSUPP;
 
     xmlNode *data = NULL;
     xmlNode *reply = NULL;
 
     char *output = NULL;
     const char *op = crm_element_value(request, F_STONITH_OPERATION);
     const char *client_id = crm_element_value(request, F_STONITH_CLIENTID);
 
     crm_element_value_int(request, F_STONITH_CALLOPTS, &call_options);
 
     if (is_set(call_options, st_opt_sync_call)) {
         CRM_ASSERT(client == NULL || client->request_id == id);
     }
 
     if (crm_str_eq(op, CRM_OP_REGISTER, TRUE)) {
         xmlNode *reply = create_xml_node(NULL, "reply");
 
         CRM_ASSERT(client);
         crm_xml_add(reply, F_STONITH_OPERATION, CRM_OP_REGISTER);
         crm_xml_add(reply, F_STONITH_CLIENTID, client->id);
         crm_ipcs_send(client, id, reply, flags);
         client->request_id = 0;
         free_xml(reply);
         return 0;
 
     } else if (crm_str_eq(op, STONITH_OP_EXEC, TRUE)) {
         rc = stonith_device_action(request, &output);
 
     } else if (crm_str_eq(op, STONITH_OP_TIMEOUT_UPDATE, TRUE)) {
         const char *call_id = crm_element_value(request, F_STONITH_CALLID);
         const char *client_id = crm_element_value(request, F_STONITH_CLIENTID);
         int op_timeout = 0;
 
         crm_element_value_int(request, F_STONITH_TIMEOUT, &op_timeout);
         do_stonith_async_timeout_update(client_id, call_id, op_timeout);
         return 0;
 
     } else if (crm_str_eq(op, STONITH_OP_QUERY, TRUE)) {
         if (remote_peer) {
             create_remote_stonith_op(client_id, request, TRUE); /* Record it for the future notification */
         }
         stonith_query(request, remote_peer, client_id, call_options);
         return 0;
 
     } else if (crm_str_eq(op, T_STONITH_NOTIFY, TRUE)) {
         const char *flag_name = NULL;
 
         CRM_ASSERT(client);
         flag_name = crm_element_value(request, F_STONITH_NOTIFY_ACTIVATE);
         if (flag_name) {
             crm_debug("Setting %s callbacks for %s (%s): ON", flag_name, client->name, client->id);
             client->options |= get_stonith_flag(flag_name);
         }
 
         flag_name = crm_element_value(request, F_STONITH_NOTIFY_DEACTIVATE);
         if (flag_name) {
             crm_debug("Setting %s callbacks for %s (%s): off", flag_name, client->name, client->id);
             client->options |= get_stonith_flag(flag_name);
         }
 
         if (flags & crm_ipc_client_response) {
             crm_ipcs_send_ack(client, id, flags, "ack", __FUNCTION__, __LINE__);
         }
         return 0;
 
     } else if (crm_str_eq(op, STONITH_OP_RELAY, TRUE)) {
         xmlNode *dev = get_xpath_object("//@" F_STONITH_TARGET, request, LOG_TRACE);
 
         crm_notice("Peer %s has received a forwarded fencing request from %s to fence (%s) peer %s",
                    stonith_our_uname,
                    client ? client->name : remote_peer,
                    crm_element_value(dev, F_STONITH_ACTION),
                    crm_element_value(dev, F_STONITH_TARGET));
 
         if (initiate_remote_stonith_op(NULL, request, FALSE) != NULL) {
             rc = -EINPROGRESS;
         }
 
     } else if (crm_str_eq(op, STONITH_OP_FENCE, TRUE)) {
 
         if (remote_peer || stand_alone) {
             rc = stonith_fence(request);
 
         } else if (call_options & st_opt_manual_ack) {
             remote_fencing_op_t *rop = NULL;
             xmlNode *dev = get_xpath_object("//@" F_STONITH_TARGET, request, LOG_TRACE);
             const char *target = crm_element_value(dev, F_STONITH_TARGET);
 
             crm_notice("Received manual confirmation that %s is fenced", target);
             rop = initiate_remote_stonith_op(client, request, TRUE);
             rc = stonith_manual_ack(request, rop);
 
         } else {
             const char *alternate_host = NULL;
             xmlNode *dev = get_xpath_object("//@" F_STONITH_TARGET, request, LOG_TRACE);
             const char *target = crm_element_value(dev, F_STONITH_TARGET);
             const char *action = crm_element_value(dev, F_STONITH_ACTION);
             const char *device = crm_element_value(dev, F_STONITH_DEVICE);
 
             if (client) {
                 int tolerance = 0;
 
                 crm_notice("Client %s.%.8s wants to fence (%s) '%s' with device '%s'",
                            client->name, client->id, action, target, device ? device : "(any)");
 
                 crm_element_value_int(dev, F_STONITH_TOLERANCE, &tolerance);
 
                 if (stonith_check_fence_tolerance(tolerance, target, action)) {
                     rc = 0;
                     goto done;
                 }
 
             } else {
                 crm_notice("Peer %s wants to fence (%s) '%s' with device '%s'",
                            remote_peer, action, target, device ? device : "(any)");
             }
 
             alternate_host = check_alternate_host(target);
 
             if (alternate_host && client) {
                 const char *client_id = NULL;
 
                 crm_notice("Forwarding complex self fencing request to peer %s", alternate_host);
 
                 if (client) {
                     client_id = client->id;
                 } else {
                     client_id = crm_element_value(request, F_STONITH_CLIENTID);
                 }
                 /* Create a record of it, otherwise call_id will be 0 if we need to notify of failures */
                 create_remote_stonith_op(client_id, request, FALSE);
 
                 crm_xml_add(request, F_STONITH_OPERATION, STONITH_OP_RELAY);
                 crm_xml_add(request, F_STONITH_CLIENTID, client->id);
                 send_cluster_message(crm_get_peer(0, alternate_host), crm_msg_stonith_ng, request,
                                      FALSE);
                 rc = -EINPROGRESS;
 
             } else if (initiate_remote_stonith_op(client, request, FALSE) != NULL) {
                 rc = -EINPROGRESS;
             }
         }
 
     } else if (crm_str_eq(op, STONITH_OP_FENCE_HISTORY, TRUE)) {
         rc = stonith_fence_history(request, &data);
 
     } else if (crm_str_eq(op, STONITH_OP_DEVICE_ADD, TRUE)) {
         const char *id = NULL;
         xmlNode *notify_data = create_xml_node(NULL, op);
 
         rc = stonith_device_register(request, &id, FALSE);
 
         crm_xml_add(notify_data, F_STONITH_DEVICE, id);
         crm_xml_add_int(notify_data, F_STONITH_ACTIVE, g_hash_table_size(device_list));
 
         do_stonith_notify(call_options, op, rc, notify_data);
         free_xml(notify_data);
 
     } else if (crm_str_eq(op, STONITH_OP_DEVICE_DEL, TRUE)) {
         xmlNode *dev = get_xpath_object("//" F_STONITH_DEVICE, request, LOG_ERR);
         const char *id = crm_element_value(dev, XML_ATTR_ID);
         xmlNode *notify_data = create_xml_node(NULL, op);
 
         rc = stonith_device_remove(id, FALSE);
 
         crm_xml_add(notify_data, F_STONITH_DEVICE, id);
         crm_xml_add_int(notify_data, F_STONITH_ACTIVE, g_hash_table_size(device_list));
 
         do_stonith_notify(call_options, op, rc, notify_data);
         free_xml(notify_data);
 
     } else if (crm_str_eq(op, STONITH_OP_LEVEL_ADD, TRUE)) {
         char *id = NULL;
         xmlNode *notify_data = create_xml_node(NULL, op);
 
         rc = stonith_level_register(request, &id);
 
         crm_xml_add(notify_data, F_STONITH_DEVICE, id);
         crm_xml_add_int(notify_data, F_STONITH_ACTIVE, g_hash_table_size(topology));
 
         do_stonith_notify(call_options, op, rc, notify_data);
         free_xml(notify_data);
         free(id);
 
     } else if (crm_str_eq(op, STONITH_OP_LEVEL_DEL, TRUE)) {
         char *id = NULL;
         xmlNode *notify_data = create_xml_node(NULL, op);
 
         rc = stonith_level_remove(request, &id);
 
         crm_xml_add(notify_data, F_STONITH_DEVICE, id);
         crm_xml_add_int(notify_data, F_STONITH_ACTIVE, g_hash_table_size(topology));
 
         do_stonith_notify(call_options, op, rc, notify_data);
         free_xml(notify_data);
 
     } else if (crm_str_eq(op, STONITH_OP_CONFIRM, TRUE)) {
         async_command_t *cmd = create_async_command(request);
         xmlNode *reply = stonith_construct_async_reply(cmd, NULL, NULL, 0);
 
         crm_xml_add(reply, F_STONITH_OPERATION, T_STONITH_NOTIFY);
         crm_notice("Broadcasting manual fencing confirmation for node %s", cmd->victim);
         send_cluster_message(NULL, crm_msg_stonith_ng, reply, FALSE);
 
         free_async_command(cmd);
         free_xml(reply);
 
     } else if(safe_str_eq(op, CRM_OP_RM_NODE_CACHE)) {
         int id = 0;
         const char *name = NULL;
 
         crm_element_value_int(request, XML_ATTR_ID, &id);
         name = crm_element_value(request, XML_ATTR_UNAME);
         reap_crm_member(id, name);
 
         return pcmk_ok;
 
     } else {
         crm_err("Unknown %s from %s", op, client ? client->name : remote_peer);
         crm_log_xml_warn(request, "UnknownOp");
     }
 
   done:
 
     /* Always reply unless the request is in process still.
      * If in progress, a reply will happen async after the request
      * processing is finished */
     if (rc != -EINPROGRESS) {
         crm_trace("Reply handling: %p %u %u %d %d %s", client, client?client->request_id:0,
                   id, is_set(call_options, st_opt_sync_call), call_options,
                   crm_element_value(request, F_STONITH_CALLOPTS));
 
         if (is_set(call_options, st_opt_sync_call)) {
             CRM_ASSERT(client == NULL || client->request_id == id);
         }
         reply = stonith_construct_reply(request, output, data, rc);
         stonith_send_reply(reply, call_options, remote_peer, client_id);
     }
 
     free(output);
     free_xml(data);
     free_xml(reply);
 
     return rc;
 }
 
 static void
 handle_reply(crm_client_t * client, xmlNode * request, const char *remote_peer)
 {
     const char *op = crm_element_value(request, F_STONITH_OPERATION);
 
     if (crm_str_eq(op, STONITH_OP_QUERY, TRUE)) {
         process_remote_stonith_query(request);
     } else if (crm_str_eq(op, T_STONITH_NOTIFY, TRUE)) {
         process_remote_stonith_exec(request);
     } else if (crm_str_eq(op, STONITH_OP_FENCE, TRUE)) {
         /* Reply to a complex fencing op */
         process_remote_stonith_exec(request);
     } else {
         crm_err("Unknown %s reply from %s", op, client ? client->name : remote_peer);
         crm_log_xml_warn(request, "UnknownOp");
     }
 }
 
 void
 stonith_command(crm_client_t * client, uint32_t id, uint32_t flags, xmlNode * request,
                 const char *remote_peer)
 {
     int call_options = 0;
     int rc = 0;
     gboolean is_reply = FALSE;
 
     /* Copy op for reporting. The original might get freed by handle_reply()
      * before we use it in crm_debug():
      *     handle_reply()
      *     |- process_remote_stonith_exec()
      *     |-- remote_op_done()
      *     |--- handle_local_reply_and_notify()
      *     |---- crm_xml_add(...F_STONITH_OPERATION...)
      *     |--- free_xml(op->request)
      */
     char *op = crm_element_value_copy(request, F_STONITH_OPERATION);
 
     if (get_xpath_object("//" T_STONITH_REPLY, request, LOG_DEBUG_3)) {
         is_reply = TRUE;
     }
 
     crm_element_value_int(request, F_STONITH_CALLOPTS, &call_options);
     crm_debug("Processing %s%s %u from %s (%16x)", op, is_reply ? " reply" : "",
               id, client ? client->name : remote_peer, call_options);
 
     if (is_set(call_options, st_opt_sync_call)) {
         CRM_ASSERT(client == NULL || client->request_id == id);
     }
 
     if (is_reply) {
         handle_reply(client, request, remote_peer);
     } else {
         rc = handle_request(client, id, flags, request, remote_peer);
     }
 
     crm_debug("Processed %s%s from %s: %s (%d)", op,
               is_reply ? " reply" : "", client ? client->name : remote_peer,
               rc > 0 ? "" : pcmk_strerror(rc), rc);
 
     free(op);
 }
diff --git a/fencing/internal.h b/fencing/internal.h
index 0f418ec47f..3fc75ecb49 100644
--- a/fencing/internal.h
+++ b/fencing/internal.h
@@ -1,229 +1,233 @@
 #include <crm/common/mainloop.h>
 
 /*!
  * \internal
  * \brief Check to see if target was fenced in the last few seconds.
  * \param tolerance, The number of seconds to look back in time
  * \param target, The node to search for
  * \param action, The action we want to match.
  *
  * \retval FALSE, not match
  * \retval TRUE, fencing operation took place in the last 'tolerance' number of seconds.
  */
 gboolean stonith_check_fence_tolerance(int tolerance, const char *target, const char *action);
 
 enum st_device_flags
 {
     st_device_supports_list   = 0x0001,
     st_device_supports_status = 0x0002,
     st_device_supports_reboot = 0x0004,
 };
 
 typedef struct stonith_device_s {
     char *id;
     char *agent;
     char *namespace;
 
     /*! list of actions that must execute on the target node. Used for unfencing */
     char *on_target_actions;
     GListPtr targets;
     time_t targets_age;
     gboolean has_attr_map;
     /* should nodeid parameter for victim be included in agent arguments */
     gboolean include_nodeid;
     /* whether the cluster should automatically unfence nodes with the device */
     gboolean automatic_unfencing;
     guint priority;
     guint active_pid;
 
     enum st_device_flags flags;
 
     GHashTable *params;
     GHashTable *aliases;
     GList *pending_ops;
     crm_trigger_t *work;
     xmlNode *agent_metadata;
 
     /*! A verified device is one that has contacted the
      * agent successfully to perform a monitor operation */
     gboolean verified;
 
     gboolean cib_registered;
     gboolean api_registered;
 } stonith_device_t;
 
 /* These values are used to index certain arrays by "phase". Usually an
  * operation has only one "phase", so phase is always zero. However, some
  * reboots are remapped to "off" then "on", in which case "reboot" will be
  * phase 0, "off" will be phase 1 and "on" will be phase 2.
  */
 enum st_remap_phase {
     st_phase_requested = 0,
     st_phase_off = 1,
     st_phase_on = 2,
     st_phase_max = 3
 };
 
 typedef struct remote_fencing_op_s {
     /* The unique id associated with this operation */
     char *id;
     /*! The node this operation will fence */
     char *target;
     /*! The fencing action to perform on the target. (reboot, on, off) */
     char *action;
 
     /*! When was the fencing action recorded (seconds since epoch) */
     time_t created;
 
     /*! Marks if the final notifications have been sent to local stonith clients. */
     gboolean notify_sent;
     /*! The number of query replies received */
     guint replies;
     /*! The number of query replies expected */
     guint replies_expected;
     /*! Does this node own control of this operation */
     gboolean owner;
     /*! After query is complete, This the high level timer that expires the entire operation */
     guint op_timer_total;
     /*! This timer expires the current fencing request. Many fencing
      * requests may exist in a single operation */
     guint op_timer_one;
     /*! This timer expires the query request sent out to determine
      * what nodes are contain what devices, and who those devices can fence */
     guint query_timer;
     /*! This is the default timeout to use for each fencing device if no
      * custom timeout is received in the query. */
     gint base_timeout;
     /*! This is the calculated total timeout an operation can take before
      * expiring. This is calculated by adding together all the timeout
      * values associated with the devices this fencing operation may call */
     gint total_timeout;
 
     /*! Delegate is the node being asked to perform a fencing action
      * on behalf of the node that owns the remote operation. Some operations
      * will involve multiple delegates. This value represents the final delegate
      * that is used. */
     char *delegate;
     /*! The point at which the remote operation completed */
     time_t completed;
     /*! The stonith_call_options associated with this remote operation */
     long long call_options;
 
     /*! The current state of the remote operation. This indicates
      * what stage the op is in, query, exec, done, duplicate, failed. */
     enum op_state state;
     /*! The node that owns the remote operation */
     char *originator;
     /*! The local client id that initiated the fencing request */
     char *client_id;
     /*! The client's call_id that initiated the fencing request */
     int client_callid;
     /*! The name of client that initiated the fencing request */
     char *client_name;
     /*! List of the received query results for all the nodes in the cpg group */
     GListPtr query_results;
     /*! The original request that initiated the remote stonith operation */
     xmlNode *request;
 
     /*! The current topology level being executed */
     guint level;
     /*! The current operation phase being executed */
     enum st_remap_phase phase;
 
     /*! Devices with automatic unfencing (always run if "on" requested, never if remapped) */
     GListPtr automatic_list;
     /*! List of all devices at the currently executing topology level */
     GListPtr devices_list;
     /*! Current entry in the topology device list */
     GListPtr devices;
 
     /*! List of duplicate operations attached to this operation. Once this operation
      * completes, the duplicate operations will be closed out as well. */
     GListPtr duplicates;
 
 } remote_fencing_op_t;
 
 /*
  * Complex fencing requirements are specified via fencing topologies.
  * A topology consists of levels; each level is a list of fencing devices.
  * Topologies are stored in a hash table by node name. When a node needs to be
  * fenced, if it has an entry in the topology table, the levels are tried
  * sequentially, and the devices in each level are tried sequentially.
  * Fencing is considered successful as soon as any level succeeds;
  * a level is considered successful if all its devices succeed.
  * Essentially, all devices at a given level are "and-ed" and the
  * levels are "or-ed".
  *
  * This structure is used for the topology table entries.
  * Topology levels start from 1, so levels[0] is unused and always NULL.
  */
 typedef struct stonith_topology_s {
-    char *node;
+    /*! Node name regex or attribute name=value for which topology applies */
+    char *target;
+    /*! Names of fencing devices at each topology level */
     GListPtr levels[ST_LEVEL_MAX];
 
 } stonith_topology_t;
 
 extern long long get_stonith_flag(const char *name);
 
 extern void stonith_command(crm_client_t * client, uint32_t id, uint32_t flags,
                             xmlNode * op_request, const char *remote_peer);
 
 extern int stonith_device_register(xmlNode * msg, const char **desc, gboolean from_cib);
 
 extern int stonith_device_remove(const char *id, gboolean from_cib);
 
 extern int stonith_level_register(xmlNode * msg, char **desc);
 
 extern int stonith_level_remove(xmlNode * msg, char **desc);
 
 extern stonith_topology_t *find_topology_for_host(const char *host);
 
 extern void do_local_reply(xmlNode * notify_src, const char *client_id, gboolean sync_reply,
                            gboolean from_peer);
 
 extern xmlNode *stonith_construct_reply(xmlNode * request, const char *output, xmlNode * data,
                                         int rc);
 
 void
  do_stonith_async_timeout_update(const char *client, const char *call_id, int timeout);
 
 extern void do_stonith_notify(int options, const char *type, int result, xmlNode * data);
 
 extern remote_fencing_op_t *initiate_remote_stonith_op(crm_client_t * client, xmlNode * request,
                                                        gboolean manual_ack);
 
 extern int process_remote_stonith_exec(xmlNode * msg);
 
 extern int process_remote_stonith_query(xmlNode * msg);
 
 extern void *create_remote_stonith_op(const char *client, xmlNode * request, gboolean peer);
 
 extern int stonith_fence_history(xmlNode * msg, xmlNode ** output);
 
 extern void free_device(gpointer data);
 
 extern void free_topology_entry(gpointer data);
 
 bool fencing_peer_active(crm_node_t *peer);
 
 int stonith_manual_ack(xmlNode * msg, remote_fencing_op_t * op);
 
 void unfence_cb(GPid pid, int rc, const char *output, gpointer user_data);
 
 gboolean string_in_list(GListPtr list, const char *item);
 
+gboolean node_has_attr(const char *node, char *nvpair);
+
 void
 schedule_internal_command(const char *origin,
                           stonith_device_t * device,
                           const char *action,
                           const char *victim,
                           int timeout,
                           void *internal_user_data,
                           void (*done_cb) (GPid pid, int rc, const char *output,
                                            gpointer user_data));
 
 extern char *stonith_our_uname;
 extern gboolean stand_alone;
 extern GHashTable *device_list;
 extern GHashTable *topology;
 extern long stonith_watchdog_timeout_ms;
diff --git a/fencing/main.c b/fencing/main.c
index c48e12d0bb..43d2cb3bf3 100644
--- a/fencing/main.c
+++ b/fencing/main.c
@@ -1,1507 +1,1555 @@
 /*
  * Copyright (C) 2009 Andrew Beekhof <andrew@beekhof.net>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public
  * License as published by the Free Software Foundation; either
  * version 2 of the License, or (at your option) any later version.
  *
  * This software is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
  * You should have received a copy of the GNU General Public
  * License along with this library; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
 #include <crm_internal.h>
 
 #include <sys/param.h>
 #include <stdio.h>
 #include <sys/types.h>
 #include <sys/stat.h>
 #include <unistd.h>
 #include <sys/utsname.h>
 
 #include <stdlib.h>
 #include <errno.h>
 #include <fcntl.h>
 
 #include <crm/crm.h>
 #include <crm/msg_xml.h>
 #include <crm/common/ipc.h>
 #include <crm/common/ipcs.h>
 #include <crm/cluster/internal.h>
 
 #include <crm/stonith-ng.h>
 #include <crm/fencing/internal.h>
 #include <crm/common/xml.h>
 
 #include <crm/common/mainloop.h>
 
 #include <crm/cib/internal.h>
 #include <crm/pengine/status.h>
 #include <allocate.h>
 
 #include <internal.h>
 
 #include <standalone_config.h>
 
 char *stonith_our_uname = NULL;
 char *stonith_our_uuid = NULL;
 long stonith_watchdog_timeout_ms = 0;
 
 GMainLoop *mainloop = NULL;
 
 gboolean stand_alone = FALSE;
 gboolean no_cib_connect = FALSE;
 gboolean stonith_shutdown_flag = FALSE;
 
 qb_ipcs_service_t *ipcs = NULL;
 xmlNode *local_cib = NULL;
 
 static cib_t *cib_api = NULL;
 static void *cib_library = NULL;
 
 static void stonith_shutdown(int nsig);
 static void stonith_cleanup(void);
 
 static int32_t
 st_ipc_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid)
 {
     if (stonith_shutdown_flag) {
         crm_info("Ignoring new client [%d] during shutdown", crm_ipcs_client_pid(c));
         return -EPERM;
     }
 
     if (crm_client_new(c, uid, gid) == NULL) {
         return -EIO;
     }
     return 0;
 }
 
 static void
 st_ipc_created(qb_ipcs_connection_t * c)
 {
     crm_trace("Connection created for %p", c);
 }
 
 /* Exit code means? */
 static int32_t
 st_ipc_dispatch(qb_ipcs_connection_t * qbc, void *data, size_t size)
 {
     uint32_t id = 0;
     uint32_t flags = 0;
     int call_options = 0;
     xmlNode *request = NULL;
     crm_client_t *c = crm_client_get(qbc);
     const char *op = NULL;
 
     if (c == NULL) {
         crm_info("Invalid client: %p", qbc);
         return 0;
     }
 
     request = crm_ipcs_recv(c, data, size, &id, &flags);
     if (request == NULL) {
         crm_ipcs_send_ack(c, id, flags, "nack", __FUNCTION__, __LINE__);
         return 0;
     }
 
 
     op = crm_element_value(request, F_CRM_TASK);
     if(safe_str_eq(op, CRM_OP_RM_NODE_CACHE)) {
         crm_xml_add(request, F_TYPE, T_STONITH_NG);
         crm_xml_add(request, F_STONITH_OPERATION, op);
         crm_xml_add(request, F_STONITH_CLIENTID, c->id);
         crm_xml_add(request, F_STONITH_CLIENTNAME, crm_client_name(c));
         crm_xml_add(request, F_STONITH_CLIENTNODE, stonith_our_uname);
 
         send_cluster_message(NULL, crm_msg_stonith_ng, request, FALSE);
         free_xml(request);
         return 0;
     }
 
     if (c->name == NULL) {
         const char *value = crm_element_value(request, F_STONITH_CLIENTNAME);
 
         if (value == NULL) {
             value = "unknown";
         }
         c->name = crm_strdup_printf("%s.%u", value, c->pid);
     }
 
     crm_element_value_int(request, F_STONITH_CALLOPTS, &call_options);
     crm_trace("Flags %u/%u for command %u from %s", flags, call_options, id, crm_client_name(c));
 
     if (is_set(call_options, st_opt_sync_call)) {
         CRM_ASSERT(flags & crm_ipc_client_response);
         CRM_LOG_ASSERT(c->request_id == 0);     /* This means the client has two synchronous events in-flight */
         c->request_id = id;     /* Reply only to the last one */
     }
 
     crm_xml_add(request, F_STONITH_CLIENTID, c->id);
     crm_xml_add(request, F_STONITH_CLIENTNAME, crm_client_name(c));
     crm_xml_add(request, F_STONITH_CLIENTNODE, stonith_our_uname);
 
     crm_log_xml_trace(request, "Client[inbound]");
     stonith_command(c, id, flags, request, NULL);
 
     free_xml(request);
     return 0;
 }
 
 /* Error code means? */
 static int32_t
 st_ipc_closed(qb_ipcs_connection_t * c)
 {
     crm_client_t *client = crm_client_get(c);
 
     if (client == NULL) {
         return 0;
     }
 
     crm_trace("Connection %p closed", c);
     crm_client_destroy(client);
 
     /* 0 means: yes, go ahead and destroy the connection */
     return 0;
 }
 
 static void
 st_ipc_destroy(qb_ipcs_connection_t * c)
 {
     crm_trace("Connection %p destroyed", c);
     st_ipc_closed(c);
 }
 
 static void
 stonith_peer_callback(xmlNode * msg, void *private_data)
 {
     const char *remote_peer = crm_element_value(msg, F_ORIG);
     const char *op = crm_element_value(msg, F_STONITH_OPERATION);
 
     if (crm_str_eq(op, "poke", TRUE)) {
         return;
     }
 
     crm_log_xml_trace(msg, "Peer[inbound]");
     stonith_command(NULL, 0, 0, msg, remote_peer);
 }
 
 #if SUPPORT_HEARTBEAT
 static void
 stonith_peer_hb_callback(HA_Message * msg, void *private_data)
 {
     xmlNode *xml = convert_ha_message(NULL, msg, __FUNCTION__);
 
     stonith_peer_callback(xml, private_data);
     free_xml(xml);
 }
 
 static void
 stonith_peer_hb_destroy(gpointer user_data)
 {
     if (stonith_shutdown_flag) {
         crm_info("Heartbeat disconnection complete... exiting");
     } else {
         crm_err("Heartbeat connection lost!  Exiting.");
     }
     stonith_shutdown(0);
 }
 #endif
 
 #if SUPPORT_COROSYNC
 static void
 stonith_peer_ais_callback(cpg_handle_t handle,
                           const struct cpg_name *groupName,
                           uint32_t nodeid, uint32_t pid, void *msg, size_t msg_len)
 {
     uint32_t kind = 0;
     xmlNode *xml = NULL;
     const char *from = NULL;
     char *data = pcmk_message_common_cs(handle, nodeid, pid, msg, &kind, &from);
 
     if(data == NULL) {
         return;
     }
     if (kind == crm_class_cluster) {
         xml = string2xml(data);
         if (xml == NULL) {
             crm_err("Invalid XML: '%.120s'", data);
             free(data);
             return;
         }
         crm_xml_add(xml, F_ORIG, from);
         /* crm_xml_add_int(xml, F_SEQ, wrapper->id); */
         stonith_peer_callback(xml, NULL);
     }
 
     free_xml(xml);
     free(data);
     return;
 }
 
 static void
 stonith_peer_cs_destroy(gpointer user_data)
 {
     crm_err("Corosync connection terminated");
     stonith_shutdown(0);
 }
 #endif
 
 void
 do_local_reply(xmlNode * notify_src, const char *client_id, gboolean sync_reply, gboolean from_peer)
 {
     /* send callback to originating child */
     crm_client_t *client_obj = NULL;
     int local_rc = pcmk_ok;
 
     crm_trace("Sending response");
     client_obj = crm_client_get_by_id(client_id);
 
     crm_trace("Sending callback to request originator");
     if (client_obj == NULL) {
         local_rc = -1;
         crm_trace("No client to sent the response to.  F_STONITH_CLIENTID not set.");
 
     } else {
         int rid = 0;
 
         if (sync_reply) {
             CRM_LOG_ASSERT(client_obj->request_id);
 
             rid = client_obj->request_id;
             client_obj->request_id = 0;
 
             crm_trace("Sending response %d to %s %s",
                       rid, client_obj->name, from_peer ? "(originator of delegated request)" : "");
 
         } else {
             crm_trace("Sending an event to %s %s",
                       client_obj->name, from_peer ? "(originator of delegated request)" : "");
         }
 
         local_rc = crm_ipcs_send(client_obj, rid, notify_src, sync_reply?crm_ipc_flags_none:crm_ipc_server_event);
     }
 
     if (local_rc < pcmk_ok && client_obj != NULL) {
         crm_warn("%sSync reply to %s failed: %s",
                  sync_reply ? "" : "A-",
                  client_obj ? client_obj->name : "<unknown>", pcmk_strerror(local_rc));
     }
 }
 
 long long
 get_stonith_flag(const char *name)
 {
     if (safe_str_eq(name, T_STONITH_NOTIFY_FENCE)) {
         return 0x01;
 
     } else if (safe_str_eq(name, STONITH_OP_DEVICE_ADD)) {
         return 0x04;
 
     } else if (safe_str_eq(name, STONITH_OP_DEVICE_DEL)) {
         return 0x10;
     }
     return 0;
 }
 
 static void
 stonith_notify_client(gpointer key, gpointer value, gpointer user_data)
 {
 
     xmlNode *update_msg = user_data;
     crm_client_t *client = value;
     const char *type = NULL;
 
     CRM_CHECK(client != NULL, return);
     CRM_CHECK(update_msg != NULL, return);
 
     type = crm_element_value(update_msg, F_SUBTYPE);
     CRM_CHECK(type != NULL, crm_log_xml_err(update_msg, "notify"); return);
 
     if (client->ipcs == NULL) {
         crm_trace("Skipping client with NULL channel");
         return;
     }
 
     if (client->options & get_stonith_flag(type)) {
         int rc = crm_ipcs_send(client, 0, update_msg, crm_ipc_server_event | crm_ipc_server_error);
 
         if (rc <= 0) {
             crm_warn("%s notification of client %s.%.6s failed: %s (%d)",
                      type, crm_client_name(client), client->id, pcmk_strerror(rc), rc);
         } else {
             crm_trace("Sent %s notification to client %s.%.6s", type, crm_client_name(client),
                       client->id);
         }
     }
 }
 
 void
 do_stonith_async_timeout_update(const char *client_id, const char *call_id, int timeout)
 {
     crm_client_t *client = NULL;
     xmlNode *notify_data = NULL;
 
     if (!timeout || !call_id || !client_id) {
         return;
     }
 
     client = crm_client_get_by_id(client_id);
     if (!client) {
         return;
     }
 
     notify_data = create_xml_node(NULL, T_STONITH_TIMEOUT_VALUE);
     crm_xml_add(notify_data, F_TYPE, T_STONITH_TIMEOUT_VALUE);
     crm_xml_add(notify_data, F_STONITH_CALLID, call_id);
     crm_xml_add_int(notify_data, F_STONITH_TIMEOUT, timeout);
 
     crm_trace("timeout update is %d for client %s and call id %s", timeout, client_id, call_id);
 
     if (client) {
         crm_ipcs_send(client, 0, notify_data, crm_ipc_server_event);
     }
 
     free_xml(notify_data);
 }
 
 void
 do_stonith_notify(int options, const char *type, int result, xmlNode * data)
 {
     /* TODO: Standardize the contents of data */
     xmlNode *update_msg = create_xml_node(NULL, "notify");
 
     CRM_CHECK(type != NULL,;);
 
     crm_xml_add(update_msg, F_TYPE, T_STONITH_NOTIFY);
     crm_xml_add(update_msg, F_SUBTYPE, type);
     crm_xml_add(update_msg, F_STONITH_OPERATION, type);
     crm_xml_add_int(update_msg, F_STONITH_RC, result);
 
     if (data != NULL) {
         add_message_xml(update_msg, F_STONITH_CALLDATA, data);
     }
 
     crm_trace("Notifying clients");
     g_hash_table_foreach(client_connections, stonith_notify_client, update_msg);
     free_xml(update_msg);
     crm_trace("Notify complete");
 }
 
 static stonith_key_value_t *
 parse_device_list(const char *devices)
 {
     int lpc = 0;
     int max = 0;
     int last = 0;
     stonith_key_value_t *output = NULL;
 
     if (devices == NULL) {
         return output;
     }
 
     max = strlen(devices);
     for (lpc = 0; lpc <= max; lpc++) {
         if (devices[lpc] == ',' || devices[lpc] == 0) {
             char *line = NULL;
 
             line = calloc(1, 2 + lpc - last);
             snprintf(line, 1 + lpc - last, "%s", devices + last);
             output = stonith_key_value_add(output, NULL, line);
             free(line);
 
             last = lpc + 1;
         }
     }
 
     return output;
 }
 
 static void
 topology_remove_helper(const char *node, int level)
 {
     int rc;
     char *desc = NULL;
     xmlNode *data = create_xml_node(NULL, F_STONITH_LEVEL);
     xmlNode *notify_data = create_xml_node(NULL, STONITH_OP_LEVEL_DEL);
 
     crm_xml_add(data, F_STONITH_ORIGIN, __FUNCTION__);
     crm_xml_add_int(data, XML_ATTR_ID, level);
     crm_xml_add(data, F_STONITH_TARGET, node);
 
     rc = stonith_level_remove(data, &desc);
 
     crm_xml_add(notify_data, F_STONITH_DEVICE, desc);
     crm_xml_add_int(notify_data, F_STONITH_ACTIVE, g_hash_table_size(topology));
 
     do_stonith_notify(0, STONITH_OP_LEVEL_DEL, rc, notify_data);
 
     free_xml(notify_data);
     free_xml(data);
     free(desc);
 }
 
 static void
 topology_register_helper(const char *node, int level, stonith_key_value_t * device_list)
 {
     int rc;
     char *desc = NULL;
     xmlNode *notify_data = create_xml_node(NULL, STONITH_OP_LEVEL_ADD);
     xmlNode *data = create_level_registration_xml(node, level, device_list);
 
     rc = stonith_level_register(data, &desc);
 
     crm_xml_add(notify_data, F_STONITH_DEVICE, desc);
     crm_xml_add_int(notify_data, F_STONITH_ACTIVE, g_hash_table_size(topology));
 
     do_stonith_notify(0, STONITH_OP_LEVEL_ADD, rc, notify_data);
 
     free_xml(notify_data);
     free_xml(data);
     free(desc);
 }
 
 static void
 remove_cib_device(xmlXPathObjectPtr xpathObj)
 {
     int max = numXpathResults(xpathObj), lpc = 0;
 
     for (lpc = 0; lpc < max; lpc++) {
         const char *rsc_id = NULL;
         const char *standard = NULL;
         xmlNode *match = getXpathResult(xpathObj, lpc);
 
         CRM_LOG_ASSERT(match != NULL);
         if(match != NULL) {
             standard = crm_element_value(match, XML_AGENT_ATTR_CLASS);
         }
 
         if (safe_str_neq(standard, "stonith")) {
             continue;
         }
 
         rsc_id = crm_element_value(match, XML_ATTR_ID);
 
         stonith_device_remove(rsc_id, TRUE);
     }
 }
 
 static void
 handle_topology_change(xmlNode *match, bool remove) 
 {
     CRM_LOG_ASSERT(match != NULL);
     if(match) {
         int index = 0;
         const char *target;
         const char *dev_list;
         stonith_key_value_t *devices = NULL;
 
         crm_element_value_int(match, XML_ATTR_STONITH_INDEX, &index);
         target = crm_element_value(match, XML_ATTR_STONITH_TARGET);
         if(target == NULL) {
             target = crm_element_value(match, XML_ATTR_STONITH_TARGET_PATTERN);
         }
         dev_list = crm_element_value(match, XML_ATTR_STONITH_DEVICES);
         devices = parse_device_list(dev_list);
 
         crm_trace("Updating %s[%d] (%s) to %s", target, index, ID(match), dev_list);
 
         if(remove) {
             topology_remove_helper(target, index);
         }
         topology_register_helper(target, index, devices);
         stonith_key_value_freeall(devices, 1, 1);
     }
 }
 
 static void
 remove_fencing_topology(xmlXPathObjectPtr xpathObj)
 {
     int max = numXpathResults(xpathObj), lpc = 0;
 
     for (lpc = 0; lpc < max; lpc++) {
         xmlNode *match = getXpathResult(xpathObj, lpc);
 
         CRM_LOG_ASSERT(match != NULL);
         if (match && crm_element_value(match, XML_DIFF_MARKER)) {
             /* Deletion */
             int index = 0;
             const char *target = crm_element_value(match, XML_ATTR_STONITH_TARGET);
 
             if(target == NULL) {
                 target = crm_element_value(match, XML_ATTR_STONITH_TARGET_PATTERN);
             }
 
             crm_element_value_int(match, XML_ATTR_STONITH_INDEX, &index);
             if (target == NULL) {
                 crm_err("Invalid fencing target in element %s", ID(match));
 
             } else if (index <= 0) {
                 crm_err("Invalid level for %s in element %s", target, ID(match));
 
             } else {
                 topology_remove_helper(target, index);
             }
             /* } else { Deal with modifications during the 'addition' stage */
         }
     }
 }
 
 static void
 register_fencing_topology(xmlXPathObjectPtr xpathObj)
 {
     int max = numXpathResults(xpathObj), lpc = 0;
 
     for (lpc = 0; lpc < max; lpc++) {
         xmlNode *match = getXpathResult(xpathObj, lpc);
 
         handle_topology_change(match, TRUE);
     }
 }
 
 /* Fencing
 <diff crm_feature_set="3.0.6">
   <diff-removed>
     <fencing-topology>
       <fencing-level id="f-p1.1" target="pcmk-1" index="1" devices="poison-pill" __crm_diff_marker__="removed:top"/>
       <fencing-level id="f-p1.2" target="pcmk-1" index="2" devices="power" __crm_diff_marker__="removed:top"/>
       <fencing-level devices="disk,network" id="f-p2.1"/>
     </fencing-topology>
   </diff-removed>
   <diff-added>
     <fencing-topology>
       <fencing-level id="f-p.1" target="pcmk-1" index="1" devices="poison-pill" __crm_diff_marker__="added:top"/>
       <fencing-level id="f-p2.1" target="pcmk-2" index="1" devices="disk,something"/>
       <fencing-level id="f-p3.1" target="pcmk-2" index="2" devices="power" __crm_diff_marker__="added:top"/>
     </fencing-topology>
   </diff-added>
 </diff>
 */
 
 static void
 fencing_topology_init()
 {
     xmlXPathObjectPtr xpathObj = NULL;
     const char *xpath = "//" XML_TAG_FENCING_LEVEL;
 
     crm_trace("Full topology refresh");
 
     if(topology) {
         g_hash_table_destroy(topology);
         topology = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, free_topology_entry);
     }
 
     /* Grab everything */
     xpathObj = xpath_search(local_cib, xpath);
     register_fencing_topology(xpathObj);
 
     freeXpathObject(xpathObj);
 }
 
 #define rsc_name(x) x->clone_name?x->clone_name:x->id
 
 /*!
  * \internal
  * \brief Check whether our uname is in a resource's allowed node list
  *
  * \param[in] rsc  Resource to check
  *
  * \return Pointer to node object if found, NULL otherwise
  */
 static node_t *
 our_node_allowed_for(resource_t *rsc)
 {
     GHashTableIter iter;
     node_t *node = NULL;
 
     if (rsc && stonith_our_uname) {
         g_hash_table_iter_init(&iter, rsc->allowed_nodes);
         while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
             if (node && strcmp(node->details->uname, stonith_our_uname) == 0) {
                 break;
             }
             node = NULL;
         }
     }
     return node;
 }
 
 /*!
  * \internal
  * \brief If a resource or any of its children are STONITH devices, update their
  *        definitions given a cluster working set.
  *
  * \param[in] rsc       Resource to check
  * \param[in] data_set  Cluster working set with device information
  */
 static void cib_device_update(resource_t *rsc, pe_working_set_t *data_set)
 {
     node_t *node = NULL;
     const char *value = NULL;
     const char *rclass = NULL;
     node_t *parent = NULL;
     gboolean remove = TRUE;
 
     /* If this is a complex resource, check children rather than this resource itself.
      * TODO: Mark each installed device and remove if untouched when this process finishes.
      */
     if(rsc->children) {
         GListPtr gIter = NULL;
         for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
             cib_device_update(gIter->data, data_set);
             if(rsc->variant == pe_clone || rsc->variant == pe_master) {
                 crm_trace("Only processing one copy of the clone %s", rsc->id);
                 break;
             }
         }
         return;
     }
 
     /* We only care about STONITH resources. */
     rclass = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
     if(safe_str_neq(rclass, "stonith")) {
         return;
     }
 
     /* If this STONITH resource is disabled, just remove it. */
     value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
     if (safe_str_eq(value, RSC_STOPPED)) {
         crm_info("Device %s has been disabled", rsc->id);
         goto update_done;
     }
 
     /* Check whether our node is allowed for this resource (and its parent if in a group) */
     node = our_node_allowed_for(rsc);
     if (rsc->parent && (rsc->parent->variant == pe_group)) {
         parent = our_node_allowed_for(rsc->parent);
     }
 
     if(node == NULL) {
         /* Our node is disallowed, so remove the device */
         GHashTableIter iter;
 
         crm_info("Device %s has been disabled on %s: unknown", rsc->id, stonith_our_uname);
         g_hash_table_iter_init(&iter, rsc->allowed_nodes);
         while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
             crm_trace("Available: %s = %d", node->details->uname, node->weight);
         }
 
         goto update_done;
 
     } else if(node->weight < 0 || (parent && parent->weight < 0)) {
         /* Our node (or its group) is disallowed by score, so remove the device */
         char *score = score2char((node->weight < 0) ? node->weight : parent->weight);
 
         crm_info("Device %s has been disabled on %s: score=%s", rsc->id, stonith_our_uname, score);
         free(score);
 
         goto update_done;
 
     } else {
         /* Our node is allowed, so update the device information */
         xmlNode *data;
         GHashTableIter gIter;
         stonith_key_value_t *params = NULL;
 
         const char *name = NULL;
         const char *agent = crm_element_value(rsc->xml, XML_EXPR_ATTR_TYPE);
         const char *provider = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
         const char *rsc_provides = NULL;
 
         crm_debug("Device %s is allowed on %s: score=%d", rsc->id, stonith_our_uname, node->weight);
         get_rsc_attributes(rsc->parameters, rsc, node, data_set);
         get_meta_attributes(rsc->meta, rsc, node, data_set);
 
         rsc_provides = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_PROVIDES);
 
         g_hash_table_iter_init(&gIter, rsc->parameters);
         while (g_hash_table_iter_next(&gIter, (gpointer *) & name, (gpointer *) & value)) {
             if (!name || !value) {
                 continue;
             }
             params = stonith_key_value_add(params, name, value);
             crm_trace(" %s=%s", name, value);
         }
 
         remove = FALSE;
         data = create_device_registration_xml(rsc_name(rsc), provider, agent, params, rsc_provides);
         stonith_device_register(data, NULL, TRUE);
 
         stonith_key_value_freeall(params, 1, 1);
         free_xml(data);
     }
 
 update_done:
 
     if(remove && g_hash_table_lookup(device_list, rsc_name(rsc))) {
         stonith_device_remove(rsc_name(rsc), TRUE);
     }
 }
 
 extern xmlNode *do_calculations(pe_working_set_t * data_set, xmlNode * xml_input, crm_time_t * now);
 extern node_t *create_node(const char *id, const char *uname, const char *type, const char *score, pe_working_set_t * data_set);
 
 /*!
  * \internal
  * \brief Update all STONITH device definitions based on current CIB
  */
 static void
 cib_devices_update(void)
 {
     GListPtr gIter = NULL;
     pe_working_set_t data_set;
 
     crm_info("Updating devices to version %s.%s.%s",
              crm_element_value(local_cib, XML_ATTR_GENERATION_ADMIN),
              crm_element_value(local_cib, XML_ATTR_GENERATION),
              crm_element_value(local_cib, XML_ATTR_NUMUPDATES));
 
     set_working_set_defaults(&data_set);
     data_set.input = local_cib;
     data_set.now = crm_time_new(NULL);
     data_set.flags |= pe_flag_quick_location;
     data_set.localhost = stonith_our_uname;
 
     cluster_status(&data_set);
     do_calculations(&data_set, NULL, NULL);
 
     for (gIter = data_set.resources; gIter != NULL; gIter = gIter->next) {
         cib_device_update(gIter->data, &data_set);
     }
     data_set.input = NULL; /* Wasn't a copy */
     cleanup_alloc_calculations(&data_set);
 }
 
 static void
 update_cib_stonith_devices_v2(const char *event, xmlNode * msg)
 {
     xmlNode *change = NULL;
     char *reason = NULL;
     bool needs_update = FALSE;
     xmlNode *patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT);
 
     for (change = __xml_first_child(patchset); change != NULL; change = __xml_next(change)) {
         const char *op = crm_element_value(change, XML_DIFF_OP);
         const char *xpath = crm_element_value(change, XML_DIFF_PATH);
         const char *shortpath = NULL;
 
         if(op == NULL || strcmp(op, "move") == 0) {
             continue;
 
         } else if(safe_str_eq(op, "delete") && strstr(xpath, XML_CIB_TAG_RESOURCE)) {
             const char *rsc_id = NULL;
             char *search = NULL;
             char *mutable = strdup(xpath);
 
             rsc_id = strstr(mutable, "primitive[@id=\'") + strlen("primitive[@id=\'");
             search = strchr(rsc_id, '\'');
             search[0] = 0;
 
             stonith_device_remove(rsc_id, TRUE);
             free(mutable);
 
         } else if(strstr(xpath, "/"XML_CIB_TAG_RESOURCES)) {
             shortpath = strrchr(xpath, '/'); CRM_ASSERT(shortpath);
             reason = crm_strdup_printf("%s %s", op, shortpath+1);
             needs_update = TRUE;
             break;
 
         } else if(strstr(xpath, XML_CONS_TAG_RSC_LOCATION)) {
             shortpath = strrchr(xpath, '/'); CRM_ASSERT(shortpath);
             reason = crm_strdup_printf("%s %s", op, shortpath+1);
             needs_update = TRUE;
             break;
         }
     }
 
     if(needs_update) {
         crm_info("Updating device list from the cib: %s", reason);
         cib_devices_update();
     }
     free(reason);
 }
 
 
 static void
 update_cib_stonith_devices_v1(const char *event, xmlNode * msg)
 {
     const char *reason = "none";
     gboolean needs_update = FALSE;
     xmlXPathObjectPtr xpath_obj = NULL;
 
     /* process new constraints */
     xpath_obj = xpath_search(msg, "//" F_CIB_UPDATE_RESULT "//" XML_CONS_TAG_RSC_LOCATION);
     if (numXpathResults(xpath_obj) > 0) {
         int max = numXpathResults(xpath_obj), lpc = 0;
 
         /* Safest and simplest to always recompute */
         needs_update = TRUE;
         reason = "new location constraint";
 
         for (lpc = 0; lpc < max; lpc++) {
             xmlNode *match = getXpathResult(xpath_obj, lpc);
 
             crm_log_xml_trace(match, "new constraint");
         }
     }
     freeXpathObject(xpath_obj);
 
     /* process deletions */
     xpath_obj = xpath_search(msg, "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_REMOVED "//" XML_CIB_TAG_RESOURCE);
     if (numXpathResults(xpath_obj) > 0) {
         remove_cib_device(xpath_obj);
     }
     freeXpathObject(xpath_obj);
 
     /* process additions */
     xpath_obj = xpath_search(msg, "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED "//" XML_CIB_TAG_RESOURCE);
     if (numXpathResults(xpath_obj) > 0) {
         int max = numXpathResults(xpath_obj), lpc = 0;
 
         for (lpc = 0; lpc < max; lpc++) {
             const char *rsc_id = NULL;
             const char *standard = NULL;
             xmlNode *match = getXpathResult(xpath_obj, lpc);
 
             rsc_id = crm_element_value(match, XML_ATTR_ID);
             standard = crm_element_value(match, XML_AGENT_ATTR_CLASS);
 
             if (safe_str_neq(standard, "stonith")) {
                 continue;
             }
 
             crm_trace("Fencing resource %s was added or modified", rsc_id);
             reason = "new resource";
             needs_update = TRUE;
         }
     }
     freeXpathObject(xpath_obj);
 
     if(needs_update) {
         crm_info("Updating device list from the cib: %s", reason);
         cib_devices_update();
     }
 }
 
 static void
 update_cib_stonith_devices(const char *event, xmlNode * msg)
 {
     int format = 1;
     xmlNode *patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT);
 
     CRM_ASSERT(patchset);
     crm_element_value_int(patchset, "format", &format);
     switch(format) {
         case 1:
             update_cib_stonith_devices_v1(event, msg);
             break;
         case 2:
             update_cib_stonith_devices_v2(event, msg);
             break;
         default:
             crm_warn("Unknown patch format: %d", format);
     }
 }
 
+/* Needs to hold node name + attribute name + attribute value + 75 */
+#define XPATH_MAX 512
+
+/*
+ * \internal
+ * \brief Check whether a node has a specific attribute name/value
+ *
+ * \param[in] node    Name of node to check
+ * \param[in] nvpair  String of the form "name=value" specifying attribute
+ *
+ * \return TRUE if the locally cached CIB has the specified node attribute
+ */
+gboolean
+node_has_attr(const char *node, char *nvpair)
+{
+    const char *name, *value;
+    char *split = strchr(nvpair, '=');
+    char xpath[XPATH_MAX];
+    xmlNode *match;
+    int n;
+
+    CRM_CHECK((split != NULL) && (local_cib != NULL), return FALSE);
+
+    /* Split the nvpair into name and value */
+    name = nvpair;
+    *split = '\0';
+    value = split + 1;
+
+    /* Search for the node's attributes in the CIB. While the schema allows
+     * multiple sets of instance attributes, and allows instance attributes to
+     * use id-ref to reference values elsewhere, that is intended for resources,
+     * so we ignore that here.
+     */
+    n = snprintf(xpath, XPATH_MAX, "//" XML_CIB_TAG_NODES
+                 "/" XML_CIB_TAG_NODE "[@uname='%s']/" XML_TAG_ATTR_SETS
+                 "/" XML_CIB_TAG_NVPAIR "[@name='%s' and @value='%s']",
+                 node, name, value);
+    match = get_xpath_object(xpath, local_cib, LOG_TRACE);
+
+    /* Undo split */
+    *split = '=';
+
+    CRM_CHECK(n < XPATH_MAX, return FALSE);
+    return (match != NULL);
+}
+
 static void
 update_fencing_topology(const char *event, xmlNode * msg)
 {
     int format = 1;
     const char *xpath;
     xmlXPathObjectPtr xpathObj = NULL;
     xmlNode *patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT);
 
     CRM_ASSERT(patchset);
     crm_element_value_int(patchset, "format", &format);
 
     if(format == 1) {
         /* Process deletions (only) */
         xpath = "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_REMOVED "//" XML_TAG_FENCING_LEVEL;
         xpathObj = xpath_search(msg, xpath);
 
         remove_fencing_topology(xpathObj);
         freeXpathObject(xpathObj);
 
         /* Process additions and changes */
         xpath = "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED "//" XML_TAG_FENCING_LEVEL;
         xpathObj = xpath_search(msg, xpath);
 
         register_fencing_topology(xpathObj);
         freeXpathObject(xpathObj);
 
     } else if(format == 2) {
         xmlNode *change = NULL;
         int add[] = { 0, 0, 0 };
         int del[] = { 0, 0, 0 };
 
         xml_patch_versions(patchset, add, del);
 
         for (change = __xml_first_child(patchset); change != NULL; change = __xml_next(change)) {
             const char *op = crm_element_value(change, XML_DIFF_OP);
             const char *xpath = crm_element_value(change, XML_DIFF_PATH);
 
             if(op == NULL) {
                 continue;
 
             } else if(strstr(xpath, "/" XML_TAG_FENCING_LEVEL) != NULL) {
                 /* Change to a specific entry */
 
                 crm_trace("Handling %s operation %d.%d.%d for %s", op, add[0], add[1], add[2], xpath);
                 if(strcmp(op, "move") == 0) {
                     continue;
 
                 } else if(strcmp(op, "create") == 0) {
                     handle_topology_change(change->children, FALSE);
 
                 } else if(strcmp(op, "modify") == 0) {
                     xmlNode *match = first_named_child(change, XML_DIFF_RESULT);
 
                     if(match) {
                         handle_topology_change(match->children, TRUE);
                     }
 
                 } else if(strcmp(op, "delete") == 0) {
                     /* Nuclear option, all we have is the path and an id... not enough to remove a specific entry */
                     crm_info("Re-initializing fencing topology after %s operation %d.%d.%d for %s",
                              op, add[0], add[1], add[2], xpath);
                     fencing_topology_init();
                     return;
                 }
 
             } else if (strstr(xpath, "/" XML_TAG_FENCING_TOPOLOGY) != NULL) {
                 /* Change to the topology in general */
                 crm_info("Re-initializing fencing topology after top-level %s operation  %d.%d.%d for %s",
                          op, add[0], add[1], add[2], xpath);
                 fencing_topology_init();
                 return;
 
             } else if (strstr(xpath, "/" XML_CIB_TAG_CONFIGURATION)) {
                 /* Changes to the whole config section, possibly including the topology as a whild */
                 if(first_named_child(change, XML_TAG_FENCING_TOPOLOGY) == NULL) {
                     crm_trace("Nothing for us in %s operation %d.%d.%d for %s.",
                               op, add[0], add[1], add[2], xpath);
 
                 } else if(strcmp(op, "delete") == 0 || strcmp(op, "create") == 0) {
                     crm_info("Re-initializing fencing topology after top-level %s operation %d.%d.%d for %s.",
                              op, add[0], add[1], add[2], xpath);
                     fencing_topology_init();
                     return;
                 }
 
             } else {
                 crm_trace("Nothing for us in %s operation %d.%d.%d for %s",
                           op, add[0], add[1], add[2], xpath);
             }
         }
 
     } else {
         crm_warn("Unknown patch format: %d", format);
     }
 }
 static bool have_cib_devices = FALSE;
 
 static void
 update_cib_cache_cb(const char *event, xmlNode * msg)
 {
     int rc = pcmk_ok;
     xmlNode *stonith_enabled_xml = NULL;
     xmlNode *stonith_watchdog_xml = NULL;
     const char *stonith_enabled_s = NULL;
     static gboolean stonith_enabled_saved = TRUE;
 
     if(!have_cib_devices) {
         crm_trace("Skipping updates until we get a full dump");
         return;
 
     } else if(msg == NULL) {
         crm_trace("Missing %s update", event);
         return;
     }
 
-    /* Maintain a local copy of the CIB so that we have full access to the device definitions and location constraints */
+    /* Maintain a local copy of the CIB so that we have full access
+     * to device definitions, location constraints, and node attributes
+     */
     if (local_cib != NULL) {
         int rc = pcmk_ok;
         xmlNode *patchset = NULL;
 
         crm_element_value_int(msg, F_CIB_RC, &rc);
         if (rc != pcmk_ok) {
             return;
         }
 
         patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT);
         xml_log_patchset(LOG_TRACE, "Config update", patchset);
         rc = xml_apply_patchset(local_cib, patchset, TRUE);
         switch (rc) {
             case pcmk_ok:
             case -pcmk_err_old_data:
                 break;
             case -pcmk_err_diff_resync:
             case -pcmk_err_diff_failed:
                 crm_notice("[%s] Patch aborted: %s (%d)", event, pcmk_strerror(rc), rc);
                 free_xml(local_cib);
                 local_cib = NULL;
                 break;
             default:
                 crm_warn("[%s] ABORTED: %s (%d)", event, pcmk_strerror(rc), rc);
                 free_xml(local_cib);
                 local_cib = NULL;
         }
     }
 
     if (local_cib == NULL) {
         crm_trace("Re-requesting the full cib");
         rc = cib_api->cmds->query(cib_api, NULL, &local_cib, cib_scope_local | cib_sync_call);
         if(rc != pcmk_ok) {
             crm_err("Couldnt retrieve the CIB: %s (%d)", pcmk_strerror(rc), rc);
             return;
         }
         CRM_ASSERT(local_cib != NULL);
         stonith_enabled_saved = FALSE; /* Trigger a full refresh below */
     }
 
     stonith_enabled_xml = get_xpath_object("//nvpair[@name='stonith-enabled']", local_cib, LOG_TRACE);
     if (stonith_enabled_xml) {
         stonith_enabled_s = crm_element_value(stonith_enabled_xml, XML_NVPAIR_ATTR_VALUE);
     }
 
     if(daemon_option_enabled(crm_system_name, "watchdog")) {
         const char *value = NULL;
         long timeout_ms = 0;
 
         if(value == NULL) {
             stonith_watchdog_xml = get_xpath_object("//nvpair[@name='stonith-watchdog-timeout']", local_cib, LOG_TRACE);
             if (stonith_watchdog_xml) {
                 value = crm_element_value(stonith_watchdog_xml, XML_NVPAIR_ATTR_VALUE);
             }
         }
 
         if(value) {
             timeout_ms = crm_get_msec(value);
         }
 
         if(timeout_ms != stonith_watchdog_timeout_ms) {
             crm_notice("New watchdog timeout %lds (was %lds)", timeout_ms/1000, stonith_watchdog_timeout_ms/1000);
             stonith_watchdog_timeout_ms = timeout_ms;
         }
     }
 
     if (stonith_enabled_s && crm_is_true(stonith_enabled_s) == FALSE) {
         crm_trace("Ignoring cib updates while stonith is disabled");
         stonith_enabled_saved = FALSE;
         return;
 
     } else if (stonith_enabled_saved == FALSE) {
         crm_info("Updating stonith device and topology lists now that stonith is enabled");
         stonith_enabled_saved = TRUE;
         fencing_topology_init();
         cib_devices_update();
 
     } else {
         update_fencing_topology(event, msg);
         update_cib_stonith_devices(event, msg);
     }
 }
 
 static void
 init_cib_cache_cb(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data)
 {
     crm_info("Updating device list from the cib: init");
     have_cib_devices = TRUE;
     local_cib = copy_xml(output);
 
     fencing_topology_init();
     cib_devices_update();
 }
 
 static void
 stonith_shutdown(int nsig)
 {
     stonith_shutdown_flag = TRUE;
     crm_info("Terminating with  %d clients", crm_hash_table_size(client_connections));
     if (mainloop != NULL && g_main_is_running(mainloop)) {
         g_main_quit(mainloop);
     } else {
         stonith_cleanup();
         crm_exit(pcmk_ok);
     }
 }
 
 static void
 cib_connection_destroy(gpointer user_data)
 {
     if (stonith_shutdown_flag) {
         crm_info("Connection to the CIB closed.");
         return;
     } else {
         crm_notice("Connection to the CIB terminated. Shutting down.");
     }
     if (cib_api) {
         cib_api->cmds->signoff(cib_api);
     }
     stonith_shutdown(0);
 }
 
 static void
 stonith_cleanup(void)
 {
     if (cib_api) {
         cib_api->cmds->signoff(cib_api);
     }
 
     if (ipcs) {
         qb_ipcs_destroy(ipcs);
     }
     crm_peer_destroy();
     crm_client_cleanup();
     free(stonith_our_uname);
     free_xml(local_cib);
 }
 
 /* *INDENT-OFF* */
 static struct crm_option long_options[] = {
     {"stand-alone",         0, 0, 's'},
     {"stand-alone-w-cpg",   0, 0, 'c'},
     {"logfile",             1, 0, 'l'},
     {"verbose",     0, 0, 'V'},
     {"version",     0, 0, '$'},
     {"help",        0, 0, '?'},
 
     {0, 0, 0, 0}
 };
 /* *INDENT-ON* */
 
 static void
 setup_cib(void)
 {
     int rc, retries = 0;
     static cib_t *(*cib_new_fn) (void) = NULL;
 
     if (cib_new_fn == NULL) {
         cib_new_fn = find_library_function(&cib_library, CIB_LIBRARY, "cib_new", TRUE);
     }
 
     if (cib_new_fn != NULL) {
         cib_api = (*cib_new_fn) ();
     }
 
     if (cib_api == NULL) {
         crm_err("No connection to the CIB");
         return;
     }
 
     do {
         sleep(retries);
         rc = cib_api->cmds->signon(cib_api, CRM_SYSTEM_CRMD, cib_command);
     } while (rc == -ENOTCONN && ++retries < 5);
 
     if (rc != pcmk_ok) {
         crm_err("Could not connect to the CIB service: %s (%d)", pcmk_strerror(rc), rc);
 
     } else if (pcmk_ok !=
                cib_api->cmds->add_notify_callback(cib_api, T_CIB_DIFF_NOTIFY, update_cib_cache_cb)) {
         crm_err("Could not set CIB notification callback");
 
     } else {
         rc = cib_api->cmds->query(cib_api, NULL, NULL, cib_scope_local);
         cib_api->cmds->register_callback(cib_api, rc, 120, FALSE, NULL, "init_cib_cache_cb",
                                          init_cib_cache_cb);
         cib_api->cmds->set_connection_dnotify(cib_api, cib_connection_destroy);
         crm_notice("Watching for stonith topology changes");
     }
 }
 
 struct qb_ipcs_service_handlers ipc_callbacks = {
     .connection_accept = st_ipc_accept,
     .connection_created = st_ipc_created,
     .msg_process = st_ipc_dispatch,
     .connection_closed = st_ipc_closed,
     .connection_destroyed = st_ipc_destroy
 };
 
 /*!
  * \internal
  * \brief Callback for peer status changes
  *
  * \param[in] type  What changed
  * \param[in] node  What peer had the change
  * \param[in] data  Previous value of what changed
  */
 static void
 st_peer_update_callback(enum crm_status_type type, crm_node_t * node, const void *data)
 {
     if (type != crm_status_processes) {
         /*
          * This is a hack until we can send to a nodeid and/or we fix node name lookups
          * These messages are ignored in stonith_peer_callback()
          */
         xmlNode *query = create_xml_node(NULL, "stonith_command");
 
         crm_xml_add(query, F_XML_TAGNAME, "stonith_command");
         crm_xml_add(query, F_TYPE, T_STONITH_NG);
         crm_xml_add(query, F_STONITH_OPERATION, "poke");
 
         crm_debug("Broadcasting our uname because of node %u", node->id);
         send_cluster_message(NULL, crm_msg_stonith_ng, query, FALSE);
 
         free_xml(query);
     }
 }
 
 int
 main(int argc, char **argv)
 {
     int flag;
     int rc = 0;
     int lpc = 0;
     int argerr = 0;
     int option_index = 0;
     crm_cluster_t cluster;
     const char *actions[] = { "reboot", "off", "list", "monitor", "status" };
 
     crm_log_preinit("stonith-ng", argc, argv);
     crm_set_options(NULL, "mode [options]", long_options,
                     "Provides a summary of cluster's current state."
                     "\n\nOutputs varying levels of detail in a number of different formats.\n");
 
     while (1) {
         flag = crm_get_option(argc, argv, &option_index);
         if (flag == -1) {
             break;
         }
 
         switch (flag) {
             case 'V':
                 crm_bump_log_level(argc, argv);
                 break;
             case 'l':
                 crm_add_logfile(optarg);
                 break;
             case 's':
                 stand_alone = TRUE;
                 break;
             case 'c':
                 stand_alone = FALSE;
                 no_cib_connect = TRUE;
                 break;
             case '$':
             case '?':
                 crm_help(flag, EX_OK);
                 break;
             default:
                 ++argerr;
                 break;
         }
     }
 
     if (argc - optind == 1 && safe_str_eq("metadata", argv[optind])) {
         printf("<?xml version=\"1.0\"?><!DOCTYPE resource-agent SYSTEM \"ra-api-1.dtd\">\n");
         printf("<resource-agent name=\"stonithd\">\n");
         printf(" <version>1.0</version>\n");
         printf
             (" <longdesc lang=\"en\">This is a fake resource that details the instance attributes handled by stonithd.</longdesc>\n");
         printf(" <shortdesc lang=\"en\">Options available for all stonith resources</shortdesc>\n");
         printf(" <parameters>\n");
 
         printf("  <parameter name=\"priority\" unique=\"0\">\n");
         printf
             ("    <shortdesc lang=\"en\">The priority of the stonith resource. Devices are tried in order of highest priority to lowest.</shortdesc>\n");
         printf("    <content type=\"integer\" default=\"0\"/>\n");
         printf("  </parameter>\n");
 
         printf("  <parameter name=\"%s\" unique=\"0\">\n", STONITH_ATTR_HOSTARG);
         printf
             ("    <shortdesc lang=\"en\">Advanced use only: An alternate parameter to supply instead of 'port'</shortdesc>\n");
         printf
             ("    <longdesc lang=\"en\">Some devices do not support the standard 'port' parameter or may provide additional ones.\n"
              "Use this to specify an alternate, device-specific, parameter that should indicate the machine to be fenced.\n"
              "A value of 'none' can be used to tell the cluster not to supply any additional parameters.\n"
              "     </longdesc>\n");
         printf("    <content type=\"string\" default=\"port\"/>\n");
         printf("  </parameter>\n");
 
         printf("  <parameter name=\"%s\" unique=\"0\">\n", STONITH_ATTR_HOSTMAP);
         printf
             ("    <shortdesc lang=\"en\">A mapping of host names to ports numbers for devices that do not support host names.</shortdesc>\n");
         printf
             ("    <longdesc lang=\"en\">Eg. node1:1;node2:2,3 would tell the cluster to use port 1 for node1 and ports 2 and 3 for node2</longdesc>\n");
         printf("    <content type=\"string\" default=\"\"/>\n");
         printf("  </parameter>\n");
 
         printf("  <parameter name=\"%s\" unique=\"0\">\n", STONITH_ATTR_HOSTLIST);
         printf
             ("    <shortdesc lang=\"en\">A list of machines controlled by this device (Optional unless %s=static-list).</shortdesc>\n",
              STONITH_ATTR_HOSTCHECK);
         printf("    <content type=\"string\" default=\"\"/>\n");
         printf("  </parameter>\n");
 
         printf("  <parameter name=\"%s\" unique=\"0\">\n", STONITH_ATTR_HOSTCHECK);
         printf
             ("    <shortdesc lang=\"en\">How to determine which machines are controlled by the device.</shortdesc>\n");
         printf
             ("    <longdesc lang=\"en\">Allowed values: dynamic-list (query the device), static-list (check the %s attribute), none (assume every device can fence every machine)</longdesc>\n",
              STONITH_ATTR_HOSTLIST);
         printf("    <content type=\"string\" default=\"dynamic-list\"/>\n");
         printf("  </parameter>\n");
 
         printf("  <parameter name=\"%s\" unique=\"0\">\n", STONITH_ATTR_DELAY_MAX);
         printf
             ("    <shortdesc lang=\"en\">Enable random delay for stonith actions and specify the maximum of random delay</shortdesc>\n");
         printf
             ("    <longdesc lang=\"en\">This prevents double fencing when using slow devices such as sbd.\n"
              "Use this to enable random delay for stonith actions and specify the maximum of random delay.</longdesc>\n");
         printf("    <content type=\"time\" default=\"0s\"/>\n");
         printf("  </parameter>\n");
 
         for (lpc = 0; lpc < DIMOF(actions); lpc++) {
             printf("  <parameter name=\"pcmk_%s_action\" unique=\"0\">\n", actions[lpc]);
             printf
                 ("    <shortdesc lang=\"en\">Advanced use only: An alternate command to run instead of '%s'</shortdesc>\n",
                  actions[lpc]);
             printf
                 ("    <longdesc lang=\"en\">Some devices do not support the standard commands or may provide additional ones.\n"
                  "Use this to specify an alternate, device-specific, command that implements the '%s' action.</longdesc>\n",
                  actions[lpc]);
             printf("    <content type=\"string\" default=\"%s\"/>\n", actions[lpc]);
             printf("  </parameter>\n");
 
             printf("  <parameter name=\"pcmk_%s_timeout\" unique=\"0\">\n", actions[lpc]);
             printf
                 ("    <shortdesc lang=\"en\">Advanced use only: Specify an alternate timeout to use for %s actions instead of stonith-timeout</shortdesc>\n",
                  actions[lpc]);
             printf
                 ("    <longdesc lang=\"en\">Some devices need much more/less time to complete than normal.\n"
                  "Use this to specify an alternate, device-specific, timeout for '%s' actions.</longdesc>\n",
                  actions[lpc]);
             printf("    <content type=\"time\" default=\"60s\"/>\n");
             printf("  </parameter>\n");
 
             printf("  <parameter name=\"pcmk_%s_retries\" unique=\"0\">\n", actions[lpc]);
             printf
                 ("    <shortdesc lang=\"en\">Advanced use only: The maximum number of times to retry the '%s' command within the timeout period</shortdesc>\n",
                  actions[lpc]);
             printf("    <longdesc lang=\"en\">Some devices do not support multiple connections."
                    " Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining."
                    " Use this option to alter the number of times Pacemaker retries '%s' actions before giving up."
                    "</longdesc>\n", actions[lpc]);
             printf("    <content type=\"integer\" default=\"2\"/>\n");
             printf("  </parameter>\n");
         }
 
         printf(" </parameters>\n");
         printf("</resource-agent>\n");
         return 0;
     }
 
     if (optind != argc) {
         ++argerr;
     }
 
     if (argerr) {
         crm_help('?', EX_USAGE);
     }
 
     crm_log_init("stonith-ng", LOG_INFO, TRUE, FALSE, argc, argv, FALSE);
     mainloop_add_signal(SIGTERM, stonith_shutdown);
 
     crm_peer_init();
 
     if (stand_alone == FALSE) {
 #if SUPPORT_HEARTBEAT
         cluster.hb_conn = NULL;
         cluster.hb_dispatch = stonith_peer_hb_callback;
         cluster.destroy = stonith_peer_hb_destroy;
 #endif
 
         if (is_openais_cluster()) {
 #if SUPPORT_COROSYNC
             cluster.destroy = stonith_peer_cs_destroy;
             cluster.cpg.cpg_deliver_fn = stonith_peer_ais_callback;
             cluster.cpg.cpg_confchg_fn = pcmk_cpg_membership;
 #endif
         }
 
         if (crm_cluster_connect(&cluster) == FALSE) {
             crm_crit("Cannot sign in to the cluster... terminating");
             crm_exit(DAEMON_RESPAWN_STOP);
         }
         stonith_our_uname = cluster.uname;
         stonith_our_uuid = cluster.uuid;
 
 #if SUPPORT_HEARTBEAT
         if (is_heartbeat_cluster()) {
             /* crm_cluster_connect() registered us for crm_system_name, which
              * usually is the only F_TYPE used by the respective sub system.
              * Stonith needs to register two additional F_TYPE callbacks,
              * because it can :-/ */
             if (HA_OK !=
                 cluster.hb_conn->llc_ops->set_msg_callback(cluster.hb_conn, T_STONITH_NOTIFY,
                                                             cluster.hb_dispatch, cluster.hb_conn)) {
                 crm_crit("Cannot set msg callback %s: %s", T_STONITH_NOTIFY, cluster.hb_conn->llc_ops->errmsg(cluster.hb_conn));
                 crm_exit(DAEMON_RESPAWN_STOP);
             }
             if (HA_OK !=
                 cluster.hb_conn->llc_ops->set_msg_callback(cluster.hb_conn, T_STONITH_TIMEOUT_VALUE,
                                                             cluster.hb_dispatch, cluster.hb_conn)) {
                 crm_crit("Cannot set msg callback %s: %s", T_STONITH_TIMEOUT_VALUE, cluster.hb_conn->llc_ops->errmsg(cluster.hb_conn));
                 crm_exit(DAEMON_RESPAWN_STOP);
             }
         }
 #endif
 
         if (no_cib_connect == FALSE) {
             setup_cib();
         }
 
     } else {
         stonith_our_uname = strdup("localhost");
     }
 
     crm_set_status_callback(&st_peer_update_callback);
 
     device_list = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, free_device);
 
     topology = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, free_topology_entry);
 
     if(daemon_option_enabled(crm_system_name, "watchdog")) {
         xmlNode *xml;
         stonith_key_value_t *params = NULL;
 
         params = stonith_key_value_add(params, STONITH_ATTR_HOSTLIST, stonith_our_uname);
 
         xml = create_device_registration_xml("watchdog", "internal", STONITH_WATCHDOG_AGENT, params, NULL);
         stonith_device_register(xml, NULL, FALSE);
 
         stonith_key_value_freeall(params, 1, 1);
         free_xml(xml);
     }
 
     stonith_ipc_server_init(&ipcs, &ipc_callbacks);
 
 #if SUPPORT_STONITH_CONFIG
     if (((stand_alone == TRUE)) && !(standalone_cfg_read_file(STONITH_NG_CONF_FILE))) {
         standalone_cfg_commit();
     }
 #endif
 
     /* Create the mainloop and run it... */
     mainloop = g_main_new(FALSE);
     crm_info("Starting %s mainloop", crm_system_name);
 
     g_main_run(mainloop);
     stonith_cleanup();
 
 #if SUPPORT_HEARTBEAT
     if (cluster.hb_conn) {
         cluster.hb_conn->llc_ops->delete(cluster.hb_conn);
     }
 #endif
 
     crm_info("Done");
 
     return crm_exit(rc);
 }
diff --git a/fencing/remote.c b/fencing/remote.c
index d741672e98..5b032c420d 100644
--- a/fencing/remote.c
+++ b/fencing/remote.c
@@ -1,2036 +1,2069 @@
 /*
  * Copyright (C) 2009 Andrew Beekhof <andrew@beekhof.net>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public
  * License as published by the Free Software Foundation; either
  * version 2 of the License, or (at your option) any later version.
  *
  * This software is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
  * You should have received a copy of the GNU General Public
  * License along with this library; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
 #include <crm_internal.h>
 
 #include <sys/param.h>
 #include <stdio.h>
 #include <sys/types.h>
 #include <sys/wait.h>
 #include <sys/stat.h>
 #include <unistd.h>
 #include <sys/utsname.h>
 
 #include <stdlib.h>
 #include <errno.h>
 #include <fcntl.h>
 #include <ctype.h>
 #include <regex.h>
 
 #include <crm/crm.h>
 #include <crm/msg_xml.h>
 #include <crm/common/ipc.h>
 #include <crm/common/ipcs.h>
 #include <crm/cluster/internal.h>
 
 #include <crm/stonith-ng.h>
 #include <crm/fencing/internal.h>
 #include <crm/common/xml.h>
 
 #include <crm/common/util.h>
 #include <internal.h>
 
 #define TIMEOUT_MULTIPLY_FACTOR 1.2
 
 /* When one stonithd queries its peers for devices able to handle a fencing
  * request, each peer will reply with a list of such devices available to it.
  * Each reply will be parsed into a st_query_result_t, with each device's
  * information kept in a device_properties_t.
  */
 
 typedef struct device_properties_s {
     /* Whether access to this device has been verified */
     gboolean verified;
 
     /* The remaining members are indexed by the operation's "phase" */
 
     /* Whether this device has been executed in each phase */
     gboolean executed[st_phase_max];
     /* Whether this device is disallowed from executing in each phase */
     gboolean disallowed[st_phase_max];
     /* Action-specific timeout for each phase */
     int custom_action_timeout[st_phase_max];
     /* Action-specific maximum random delay for each phase */
     int delay_max[st_phase_max];
 } device_properties_t;
 
 typedef struct st_query_result_s {
     /* Name of peer that sent this result */
     char *host;
     /* Only try peers for non-topology based operations once */
     gboolean tried;
     /* Number of entries in the devices table */
     int ndevices;
     /* Devices available to this host that are capable of fencing the target */
     GHashTable *devices;
 } st_query_result_t;
 
 GHashTable *remote_op_list = NULL;
 void call_remote_stonith(remote_fencing_op_t * op, st_query_result_t * peer);
 static void remote_op_done(remote_fencing_op_t * op, xmlNode * data, int rc, int dup);
 extern xmlNode *stonith_create_op(int call_id, const char *token, const char *op, xmlNode * data,
                                   int call_options);
 
 static void report_timeout_period(remote_fencing_op_t * op, int op_timeout);
 static int get_op_total_timeout(const remote_fencing_op_t *op,
                                 const st_query_result_t *chosen_peer);
 
 static gint
 sort_strings(gconstpointer a, gconstpointer b)
 {
     return strcmp(a, b);
 }
 
 static void
 free_remote_query(gpointer data)
 {
     if (data) {
         st_query_result_t *query = data;
 
         crm_trace("Free'ing query result from %s", query->host);
         g_hash_table_destroy(query->devices);
         free(query->host);
         free(query);
     }
 }
 
 struct peer_count_data {
     const remote_fencing_op_t *op;
     gboolean verified_only;
     int count;
 };
 
 /*!
  * \internal
  * \brief Increment a counter if a device has not been executed yet
  *
  * \param[in] key        Device ID (ignored)
  * \param[in] value      Device properties
  * \param[in] user_data  Peer count data
  */
 static void
 count_peer_device(gpointer key, gpointer value, gpointer user_data)
 {
     device_properties_t *props = (device_properties_t*)value;
     struct peer_count_data *data = user_data;
 
     if (!props->executed[data->op->phase]
         && (!data->verified_only || props->verified)) {
         ++(data->count);
     }
 }
 
 /*!
  * \internal
  * \brief Check the number of available devices in a peer's query results
  *
  * \param[in] op             Operation that results are for
  * \param[in] peer           Peer to count
  * \param[in] verified_only  Whether to count only verified devices
  *
  * \return Number of devices available to peer that were not already executed
  */
 static int
 count_peer_devices(const remote_fencing_op_t *op, const st_query_result_t *peer,
                    gboolean verified_only)
 {
     struct peer_count_data data;
 
     data.op = op;
     data.verified_only = verified_only;
     data.count = 0;
     if (peer) {
         g_hash_table_foreach(peer->devices, count_peer_device, &data);
     }
     return data.count;
 }
 
 /*!
  * \internal
  * \brief Search for a device in a query result
  *
  * \param[in] op      Operation that result is for
  * \param[in] peer    Query result for a peer
  * \param[in] device  Device ID to search for
  *
  * \return Device properties if found, NULL otherwise
  */
 static device_properties_t *
 find_peer_device(const remote_fencing_op_t *op, const st_query_result_t *peer,
                  const char *device)
 {
     device_properties_t *props = g_hash_table_lookup(peer->devices, device);
 
     return (props && !props->executed[op->phase]
            && !props->disallowed[op->phase])? props : NULL;
 }
 
 /*!
  * \internal
  * \brief Find a device in a peer's device list and mark it as executed
  *
  * \param[in]     op                     Operation that peer result is for
  * \param[in,out] peer                   Peer with results to search
  * \param[in]     device                 ID of device to mark as done
  * \param[in]     verified_devices_only  Only consider verified devices
  *
  * \return TRUE if device was found and marked, FALSE otherwise
  */
 static gboolean
 grab_peer_device(const remote_fencing_op_t *op, st_query_result_t *peer,
                  const char *device, gboolean verified_devices_only)
 {
     device_properties_t *props = find_peer_device(op, peer, device);
 
     if ((props == NULL) || (verified_devices_only && !props->verified)) {
         return FALSE;
     }
 
     crm_trace("Removing %s from %s (%d remaining)",
               device, peer->host, count_peer_devices(op, peer, FALSE));
     props->executed[op->phase] = TRUE;
     return TRUE;
 }
 
 static void
 clear_remote_op_timers(remote_fencing_op_t * op)
 {
     if (op->query_timer) {
         g_source_remove(op->query_timer);
         op->query_timer = 0;
     }
     if (op->op_timer_total) {
         g_source_remove(op->op_timer_total);
         op->op_timer_total = 0;
     }
     if (op->op_timer_one) {
         g_source_remove(op->op_timer_one);
         op->op_timer_one = 0;
     }
 }
 
 static void
 free_remote_op(gpointer data)
 {
     remote_fencing_op_t *op = data;
 
     crm_trace("Free'ing op %s for %s", op->id, op->target);
     crm_log_xml_debug(op->request, "Destroying");
 
     clear_remote_op_timers(op);
 
     free(op->id);
     free(op->action);
     free(op->target);
     free(op->client_id);
     free(op->client_name);
     free(op->originator);
 
     if (op->query_results) {
         g_list_free_full(op->query_results, free_remote_query);
     }
     if (op->request) {
         free_xml(op->request);
         op->request = NULL;
     }
     if (op->devices_list) {
         g_list_free_full(op->devices_list, free);
         op->devices_list = NULL;
     }
     g_list_free_full(op->automatic_list, free);
     free(op);
 }
 
 /*
  * \internal
  * \brief Return an operation's originally requested action (before any remap)
  *
  * \param[in] op  Operation to check
  *
  * \return Operation's original action
  */
 static const char *
 op_requested_action(const remote_fencing_op_t *op)
 {
     return ((op->phase > st_phase_requested)? "reboot" : op->action);
 }
 
 /*
  * \internal
  * \brief Remap a "reboot" operation to the "off" phase
  *
  * \param[in,out] op      Operation to remap
  */
 static void
 op_phase_off(remote_fencing_op_t *op)
 {
     crm_info("Remapping multiple-device reboot of %s (%s) to off",
              op->target, op->id);
     op->phase = st_phase_off;
 
     /* Happily, "off" and "on" are shorter than "reboot", so we can reuse the
      * memory allocation at each phase.
      */
     strcpy(op->action, "off");
 }
 
 /*!
  * \internal
  * \brief Advance a remapped reboot operation to the "on" phase
  *
  * \param[in,out] op  Operation to remap
  */
 static void
 op_phase_on(remote_fencing_op_t *op)
 {
     GListPtr iter = NULL;
 
     crm_info("Remapped off of %s complete, remapping to on for %s.%.8s",
              op->target, op->client_name, op->id);
     op->phase = st_phase_on;
     strcpy(op->action, "on");
 
     /* Skip devices with automatic unfencing, because the cluster will handle it
      * when the node rejoins.
      */
     for (iter = op->automatic_list; iter != NULL; iter = iter->next) {
         GListPtr match = g_list_find_custom(op->devices_list, iter->data,
                                             sort_strings);
 
         if (match) {
             op->devices_list = g_list_remove(op->devices_list, match->data);
         }
     }
     g_list_free_full(op->automatic_list, free);
     op->automatic_list = NULL;
 
     /* Rewind device list pointer */
     op->devices = op->devices_list;
 }
 
 /*!
  * \internal
  * \brief Reset a remapped reboot operation
  *
  * \param[in,out] op  Operation to reset
  */
 static void
 undo_op_remap(remote_fencing_op_t *op)
 {
     if (op->phase > 0) {
         crm_info("Undoing remap of reboot of %s for %s.%.8s",
                  op->target, op->client_name, op->id);
         op->phase = st_phase_requested;
         strcpy(op->action, "reboot");
     }
 }
 
 static xmlNode *
 create_op_done_notify(remote_fencing_op_t * op, int rc)
 {
     xmlNode *notify_data = create_xml_node(NULL, T_STONITH_NOTIFY_FENCE);
 
     crm_xml_add_int(notify_data, "state", op->state);
     crm_xml_add_int(notify_data, F_STONITH_RC, rc);
     crm_xml_add(notify_data, F_STONITH_TARGET, op->target);
     crm_xml_add(notify_data, F_STONITH_ACTION, op->action);
     crm_xml_add(notify_data, F_STONITH_DELEGATE, op->delegate);
     crm_xml_add(notify_data, F_STONITH_REMOTE_OP_ID, op->id);
     crm_xml_add(notify_data, F_STONITH_ORIGIN, op->originator);
     crm_xml_add(notify_data, F_STONITH_CLIENTID, op->client_id);
     crm_xml_add(notify_data, F_STONITH_CLIENTNAME, op->client_name);
 
     return notify_data;
 }
 
 static void
 bcast_result_to_peers(remote_fencing_op_t * op, int rc)
 {
     static int count = 0;
     xmlNode *bcast = create_xml_node(NULL, T_STONITH_REPLY);
     xmlNode *notify_data = create_op_done_notify(op, rc);
 
     count++;
     crm_trace("Broadcasting result to peers");
     crm_xml_add(bcast, F_TYPE, T_STONITH_NOTIFY);
     crm_xml_add(bcast, F_SUBTYPE, "broadcast");
     crm_xml_add(bcast, F_STONITH_OPERATION, T_STONITH_NOTIFY);
     crm_xml_add_int(bcast, "count", count);
     add_message_xml(bcast, F_STONITH_CALLDATA, notify_data);
     send_cluster_message(NULL, crm_msg_stonith_ng, bcast, FALSE);
     free_xml(notify_data);
     free_xml(bcast);
 
     return;
 }
 
 static void
 handle_local_reply_and_notify(remote_fencing_op_t * op, xmlNode * data, int rc)
 {
     xmlNode *notify_data = NULL;
     xmlNode *reply = NULL;
 
     if (op->notify_sent == TRUE) {
         /* nothing to do */
         return;
     }
 
     /* Do notification with a clean data object */
     notify_data = create_op_done_notify(op, rc);
     crm_xml_add_int(data, "state", op->state);
     crm_xml_add(data, F_STONITH_TARGET, op->target);
     crm_xml_add(data, F_STONITH_OPERATION, op->action);
 
     reply = stonith_construct_reply(op->request, NULL, data, rc);
     crm_xml_add(reply, F_STONITH_DELEGATE, op->delegate);
 
     /* Send fencing OP reply to local client that initiated fencing */
     do_local_reply(reply, op->client_id, op->call_options & st_opt_sync_call, FALSE);
 
     /* bcast to all local clients that the fencing operation happend */
     do_stonith_notify(0, T_STONITH_NOTIFY_FENCE, rc, notify_data);
 
     /* mark this op as having notify's already sent */
     op->notify_sent = TRUE;
     free_xml(reply);
     free_xml(notify_data);
 }
 
 static void
 handle_duplicates(remote_fencing_op_t * op, xmlNode * data, int rc)
 {
     GListPtr iter = NULL;
 
     for (iter = op->duplicates; iter != NULL; iter = iter->next) {
         remote_fencing_op_t *other = iter->data;
 
         if (other->state == st_duplicate) {
             /* Ie. it hasn't timed out already */
             other->state = op->state;
             crm_debug("Peforming duplicate notification for %s@%s.%.8s = %s", other->client_name,
                       other->originator, other->id, pcmk_strerror(rc));
             remote_op_done(other, data, rc, TRUE);
 
         } else {
             crm_err("Skipping duplicate notification for %s@%s - %d", other->client_name,
                     other->originator, other->state);
         }
     }
 }
 
 /*!
  * \internal
  * \brief Finalize a remote operation.
  *
  * \description This function has two code paths.
  *
  * Path 1. This node is the owner of the operation and needs
  *         to notify the cpg group via a broadcast as to the operation's
  *         results.
  *
  * Path 2. The cpg broadcast is received. All nodes notify their local
  *         stonith clients the operation results.
  *
  * So, The owner of the operation first notifies the cluster of the result,
  * and once that cpg notify is received back it notifies all the local clients.
  *
  * Nodes that are passive watchers of the operation will receive the
  * broadcast and only need to notify their local clients the operation finished.
  *
  * \param op, The fencing operation to finalize
  * \param data, The xml msg reply (if present) of the last delegated fencing
  *              operation.
  * \param dup, Is this operation a duplicate, if so treat it a little differently
  *             making sure the broadcast is not sent out.
  */
 static void
 remote_op_done(remote_fencing_op_t * op, xmlNode * data, int rc, int dup)
 {
     int level = LOG_ERR;
     const char *subt = NULL;
     xmlNode *local_data = NULL;
 
     op->completed = time(NULL);
     clear_remote_op_timers(op);
     undo_op_remap(op);
 
     if (op->notify_sent == TRUE) {
         crm_err("Already sent notifications for '%s of %s by %s' (for=%s@%s.%.8s, state=%d): %s",
                 op->action, op->target, op->delegate ? op->delegate : "<no-one>",
                 op->client_name, op->originator, op->id, op->state, pcmk_strerror(rc));
         goto remote_op_done_cleanup;
     }
 
     if (!op->delegate && data && rc != -ENODEV && rc != -EHOSTUNREACH) {
         xmlNode *ndata = get_xpath_object("//@" F_STONITH_DELEGATE, data, LOG_TRACE);
         if(ndata) {
             op->delegate = crm_element_value_copy(ndata, F_STONITH_DELEGATE);
         } else { 
             op->delegate = crm_element_value_copy(data, F_ORIG);
         }
     }
 
     if (data == NULL) {
         data = create_xml_node(NULL, "remote-op");
         local_data = data;
     }
 
     /* Tell everyone the operation is done, we will continue
      * with doing the local notifications once we receive
      * the broadcast back. */
     subt = crm_element_value(data, F_SUBTYPE);
     if (dup == FALSE && safe_str_neq(subt, "broadcast")) {
         /* Defer notification until the bcast message arrives */
         bcast_result_to_peers(op, rc);
         goto remote_op_done_cleanup;
     }
 
     if (rc == pcmk_ok || dup) {
         level = LOG_NOTICE;
     } else if (safe_str_neq(op->originator, stonith_our_uname)) {
         level = LOG_NOTICE;
     }
 
     do_crm_log(level,
                "Operation %s of %s by %s for %s@%s.%.8s: %s",
                op->action, op->target, op->delegate ? op->delegate : "<no-one>",
                op->client_name, op->originator, op->id, pcmk_strerror(rc));
 
     handle_local_reply_and_notify(op, data, rc);
 
     if (dup == FALSE) {
         handle_duplicates(op, data, rc);
     }
 
     /* Free non-essential parts of the record
      * Keep the record around so we can query the history
      */
     if (op->query_results) {
         g_list_free_full(op->query_results, free_remote_query);
         op->query_results = NULL;
     }
 
     if (op->request) {
         free_xml(op->request);
         op->request = NULL;
     }
 
   remote_op_done_cleanup:
     free_xml(local_data);
 }
 
 static gboolean
 remote_op_watchdog_done(gpointer userdata)
 {
     remote_fencing_op_t *op = userdata;
 
     op->op_timer_one = 0;
 
     crm_notice("Remote %s operation on %s for %s.%8s assumed complete",
                op->action, op->target, op->client_name, op->id);
     op->state = st_done;
     remote_op_done(op, NULL, pcmk_ok, FALSE);
     return FALSE;
 }
 
 static gboolean
 remote_op_timeout_one(gpointer userdata)
 {
     remote_fencing_op_t *op = userdata;
 
     op->op_timer_one = 0;
 
     crm_notice("Remote %s operation on %s for %s.%8s timed out",
                op->action, op->target, op->client_name, op->id);
     call_remote_stonith(op, NULL);
     return FALSE;
 }
 
 static gboolean
 remote_op_timeout(gpointer userdata)
 {
     remote_fencing_op_t *op = userdata;
 
     op->op_timer_total = 0;
 
     if (op->state == st_done) {
         crm_debug("Action %s (%s) for %s (%s) already completed",
                   op->action, op->id, op->target, op->client_name);
         return FALSE;
     }
 
     crm_debug("Action %s (%s) for %s (%s) timed out",
               op->action, op->id, op->target, op->client_name);
 
     if (op->phase == st_phase_on) {
         /* A remapped reboot operation timed out in the "on" phase, but the
          * "off" phase completed successfully, so quit trying any further
          * devices, and return success.
          */
         remote_op_done(op, NULL, pcmk_ok, FALSE);
         return FALSE;
     }
 
     op->state = st_failed;
 
     remote_op_done(op, NULL, -ETIME, FALSE);
 
     return FALSE;
 }
 
 static gboolean
 remote_op_query_timeout(gpointer data)
 {
     remote_fencing_op_t *op = data;
 
     op->query_timer = 0;
     if (op->state == st_done) {
         crm_debug("Operation %s for %s already completed", op->id, op->target);
     } else if (op->state == st_exec) {
         crm_debug("Operation %s for %s already in progress", op->id, op->target);
     } else if (op->query_results) {
         crm_debug("Query %s for %s complete: %d", op->id, op->target, op->state);
         call_remote_stonith(op, NULL);
     } else {
         crm_debug("Query %s for %s timed out: %d", op->id, op->target, op->state);
         if (op->op_timer_total) {
             g_source_remove(op->op_timer_total);
             op->op_timer_total = 0;
         }
         remote_op_timeout(op);
     }
 
     return FALSE;
 }
 
 static gboolean
 topology_is_empty(stonith_topology_t *tp)
 {
     int i;
 
     if (tp == NULL) {
         return TRUE;
     }
 
     for (i = 0; i < ST_LEVEL_MAX; i++) {
         if (tp->levels[i] != NULL) {
             return FALSE;
         }
     }
     return TRUE;
 }
 
 /*
  * \internal
  * \brief Add a device to an operation's automatic unfencing list
  *
  * \param[in,out] op      Operation to modify
  * \param[in]     device  Device ID to add
  */
 static void
 add_required_device(remote_fencing_op_t *op, const char *device)
 {
     GListPtr match  = g_list_find_custom(op->automatic_list, device,
                                          sort_strings);
 
     if (!match) {
         op->automatic_list = g_list_prepend(op->automatic_list, strdup(device));
     }
 }
 
 /*
  * \internal
  * \brief Remove a device from the automatic unfencing list
  *
  * \param[in,out] op      Operation to modify
  * \param[in]     device  Device ID to remove
  */
 static void
 remove_required_device(remote_fencing_op_t *op, const char *device)
 {
     GListPtr match = g_list_find_custom(op->automatic_list, device,
                                         sort_strings);
 
     if (match) {
         op->automatic_list = g_list_remove(op->automatic_list, match->data);
     }
 }
 
 /* deep copy the device list */
 static void
 set_op_device_list(remote_fencing_op_t * op, GListPtr devices)
 {
     GListPtr lpc = NULL;
 
     if (op->devices_list) {
         g_list_free_full(op->devices_list, free);
         op->devices_list = NULL;
     }
     for (lpc = devices; lpc != NULL; lpc = lpc->next) {
         op->devices_list = g_list_append(op->devices_list, strdup(lpc->data));
     }
     op->devices = op->devices_list;
 }
 
+/*
+ * \internal
+ * \brief Check whether a node matches a topology target
+ *
+ * \param[in] tp    Topology table entry to check
+ * \param[in] node  Name of node to check
+ *
+ * \return TRUE if node matches topology target
+ */
+static gboolean
+topology_matches(const stonith_topology_t *tp, const char *node)
+{
+    CRM_CHECK(node && tp && tp->target, return FALSE);
+    if (strchr(tp->target, '=')) {
+        /* This level targets by attribute, so tp->target is a NAME=VALUE pair
+         * of a permanent attribute applied to targeted nodes. The test below
+         * relies on the locally cached copy of the CIB, so if fencing needs to
+         * be done before the initial CIB is received or after a malformed CIB
+         * is received, then the topology will be unable to be used.
+         */
+        if (node_has_attr(node, tp->target)) {
+            crm_notice("Matched %s with %s by attribute", node, tp->target);
+            return TRUE;
+        }
+    } else {
+        /* This level targets by name, so tp->target is a regular expression
+         * matching names of nodes to be targeted.
+         */
+        regex_t r_patt;
+
+        if (regcomp(&r_patt, tp->target, REG_EXTENDED)) {
+            crm_info("Bad regex '%s' for fencing level", tp->target);
+        } else {
+            int status = regexec(&r_patt, node, 0, NULL, 0);
+
+            regfree(&r_patt);
+            if (status == 0) {
+                crm_notice("Matched %s with %s by name", node, tp->target);
+                return TRUE;
+            }
+        }
+    }
+    crm_trace("No match for %s with %s", node, tp->target);
+    return FALSE;
+}
+
 stonith_topology_t *
 find_topology_for_host(const char *host) 
 {
     stonith_topology_t *tp = g_hash_table_lookup(topology, host);
 
     if(tp == NULL) {
-        int status = 1;
-        regex_t r_patt;
         GHashTableIter tIter;
 
         crm_trace("Testing %d topologies for a match", g_hash_table_size(topology));
         g_hash_table_iter_init(&tIter, topology);
         while (g_hash_table_iter_next(&tIter, NULL, (gpointer *) & tp)) {
-
-            if (regcomp(&r_patt, tp->node, REG_EXTENDED)) {
-                crm_info("Bad regex '%s' for fencing level", tp->node);
-            } else {
-                status = regexec(&r_patt, host, 0, NULL, 0);
-                regfree(&r_patt);
-            }
-
-            if (status == 0) {
-                crm_notice("Matched %s with %s", host, tp->node);
+            if (topology_matches(tp, host)) {
                 break;
             }
-            crm_trace("No match for %s with %s", host, tp->node);
             tp = NULL;
         }
     }
-
     return tp;
 }
 
 /*!
  * \internal
  * \brief Set fencing operation's device list to target's next topology level
  *
  * \param[in,out] op  Remote fencing operation to modify
  *
  * \return pcmk_ok if successful, target was not specified (i.e. queries) or
  *         target has no topology, or -EINVAL if no more topology levels to try
  */
 static int
 stonith_topology_next(remote_fencing_op_t * op)
 {
     stonith_topology_t *tp = NULL;
 
     if (op->target) {
         /* Queries don't have a target set */
         tp = find_topology_for_host(op->target);
     }
     if (topology_is_empty(tp)) {
         return pcmk_ok;
     }
 
     set_bit(op->call_options, st_opt_topology);
 
     /* This is a new level, so undo any remapping left over from previous */
     undo_op_remap(op);
 
     do {
         op->level++;
 
     } while (op->level < ST_LEVEL_MAX && tp->levels[op->level] == NULL);
 
     if (op->level < ST_LEVEL_MAX) {
         crm_trace("Attempting fencing level %d for %s (%d devices) - %s@%s.%.8s",
                   op->level, op->target, g_list_length(tp->levels[op->level]),
                   op->client_name, op->originator, op->id);
         set_op_device_list(op, tp->levels[op->level]);
 
         if (g_list_next(op->devices_list) && safe_str_eq(op->action, "reboot")) {
             /* A reboot has been requested for a topology level with multiple
              * devices. Instead of rebooting the devices sequentially, we will
              * turn them all off, then turn them all on again. (Think about
              * switched power outlets for redundant power supplies.)
              */
             op_phase_off(op);
         }
         return pcmk_ok;
     }
 
     crm_notice("All fencing options to fence %s for %s@%s.%.8s failed",
                op->target, op->client_name, op->originator, op->id);
     return -EINVAL;
 }
 
 /*!
  * \brief Check to see if this operation is a duplicate of another in flight
  * operation. If so merge this operation into the inflight operation, and mark
  * it as a duplicate.
  */
 static void
 merge_duplicates(remote_fencing_op_t * op)
 {
     GHashTableIter iter;
     remote_fencing_op_t *other = NULL;
 
     time_t now = time(NULL);
 
     g_hash_table_iter_init(&iter, remote_op_list);
     while (g_hash_table_iter_next(&iter, NULL, (void **)&other)) {
         crm_node_t *peer = NULL;
         const char *other_action = op_requested_action(other);
 
         if (other->state > st_exec) {
             /* Must be in-progress */
             continue;
         } else if (safe_str_neq(op->target, other->target)) {
             /* Must be for the same node */
             continue;
         } else if (safe_str_neq(op->action, other_action)) {
             crm_trace("Must be for the same action: %s vs. %s",
                       op->action, other_action);
             continue;
         } else if (safe_str_eq(op->client_name, other->client_name)) {
             crm_trace("Must be for different clients: %s", op->client_name);
             continue;
         } else if (safe_str_eq(other->target, other->originator)) {
             crm_trace("Can't be a suicide operation: %s", other->target);
             continue;
         }
 
         peer = crm_get_peer(0, other->originator);
         if(fencing_peer_active(peer) == FALSE) {
             crm_notice("Failing stonith action %s for node %s originating from %s@%s.%.8s: Originator is dead",
                        other->action, other->target, other->client_name, other->originator, other->id);
             other->state = st_failed;
             continue;
 
         } else if(other->total_timeout > 0 && now > (other->total_timeout + other->created)) {
             crm_info("Stonith action %s for node %s originating from %s@%s.%.8s is too old: %d vs. %d + %d",
                      other->action, other->target, other->client_name, other->originator, other->id,
                      now, other->created, other->total_timeout);
             continue;
         }
 
         /* There is another in-flight request to fence the same host
          * Piggyback on that instead.  If it fails, so do we.
          */
         other->duplicates = g_list_append(other->duplicates, op);
         if (other->total_timeout == 0) {
             crm_trace("Making a best-guess as to the timeout used");
             other->total_timeout = op->total_timeout =
                 TIMEOUT_MULTIPLY_FACTOR * get_op_total_timeout(op, NULL);
         }
         crm_notice
             ("Merging stonith action %s for node %s originating from client %s.%.8s with identical request from %s@%s.%.8s (%ds)",
              op->action, op->target, op->client_name, op->id, other->client_name, other->originator,
              other->id, other->total_timeout);
         report_timeout_period(op, other->total_timeout);
         op->state = st_duplicate;
     }
 }
 
 static uint32_t fencing_active_peers(void)
 {
     uint32_t count = 0;
     crm_node_t *entry;
     GHashTableIter gIter;
 
     g_hash_table_iter_init(&gIter, crm_peer_cache);
     while (g_hash_table_iter_next(&gIter, NULL, (void **)&entry)) {
         if(fencing_peer_active(entry)) {
             count++;
         }
     }
     return count;
 }
 
 int
 stonith_manual_ack(xmlNode * msg, remote_fencing_op_t * op)
 {
     xmlNode *dev = get_xpath_object("//@" F_STONITH_TARGET, msg, LOG_ERR);
 
     op->state = st_done;
     op->completed = time(NULL);
     op->delegate = strdup("a human");
 
     crm_notice("Injecting manual confirmation that %s is safely off/down",
                crm_element_value(dev, F_STONITH_TARGET));
 
     remote_op_done(op, msg, pcmk_ok, FALSE);
 
     /* Replies are sent via done_cb->stonith_send_async_reply()->do_local_reply() */
     return -EINPROGRESS;
 }
 
 /*!
  * \internal
  * \brief Create a new remote stonith op
  * \param client, he local stonith client id that initaited the operation
  * \param request, The request from the client that started the operation
  * \param peer, Is this operation owned by another stonith peer? Operations
  *        owned by other peers are stored on all the stonith nodes, but only the
  *        owner executes the operation.  All the nodes get the results to the operation
  *        once the owner finishes executing it.
  */
 void *
 create_remote_stonith_op(const char *client, xmlNode * request, gboolean peer)
 {
     remote_fencing_op_t *op = NULL;
     xmlNode *dev = get_xpath_object("//@" F_STONITH_TARGET, request, LOG_TRACE);
     int call_options = 0;
 
     if (remote_op_list == NULL) {
         remote_op_list = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, free_remote_op);
     }
 
     /* If this operation is owned by another node, check to make
      * sure we haven't already created this operation. */
     if (peer && dev) {
         const char *op_id = crm_element_value(dev, F_STONITH_REMOTE_OP_ID);
 
         CRM_CHECK(op_id != NULL, return NULL);
 
         op = g_hash_table_lookup(remote_op_list, op_id);
         if (op) {
             crm_debug("%s already exists", op_id);
             return op;
         }
     }
 
     op = calloc(1, sizeof(remote_fencing_op_t));
 
     crm_element_value_int(request, F_STONITH_TIMEOUT, &(op->base_timeout));
 
     if (peer && dev) {
         op->id = crm_element_value_copy(dev, F_STONITH_REMOTE_OP_ID);
     } else {
         op->id = crm_generate_uuid();
     }
 
     g_hash_table_replace(remote_op_list, op->id, op);
     CRM_LOG_ASSERT(g_hash_table_lookup(remote_op_list, op->id) != NULL);
     crm_trace("Created %s", op->id);
 
     op->state = st_query;
     op->replies_expected = fencing_active_peers();
     op->action = crm_element_value_copy(dev, F_STONITH_ACTION);
     op->originator = crm_element_value_copy(dev, F_STONITH_ORIGIN);
     op->delegate = crm_element_value_copy(dev, F_STONITH_DELEGATE); /* May not be set */
     op->created = time(NULL);
 
     if (op->originator == NULL) {
         /* Local or relayed request */
         op->originator = strdup(stonith_our_uname);
     }
 
     CRM_LOG_ASSERT(client != NULL);
     if (client) {
         op->client_id = strdup(client);
     }
 
     op->client_name = crm_element_value_copy(request, F_STONITH_CLIENTNAME);
 
     op->target = crm_element_value_copy(dev, F_STONITH_TARGET);
     op->request = copy_xml(request);    /* TODO: Figure out how to avoid this */
     crm_element_value_int(request, F_STONITH_CALLOPTS, &call_options);
     op->call_options = call_options;
 
     crm_element_value_int(request, F_STONITH_CALLID, &(op->client_callid));
 
     crm_trace("%s new stonith op: %s - %s of %s for %s",
               (peer
                && dev) ? "Recorded" : "Generated", op->id, op->action, op->target, op->client_name);
 
     if (op->call_options & st_opt_cs_nodeid) {
         int nodeid = crm_atoi(op->target, NULL);
         crm_node_t *node = crm_get_peer(nodeid, NULL);
 
         /* Ensure the conversion only happens once */
         op->call_options &= ~st_opt_cs_nodeid;
 
         if (node && node->uname) {
             free(op->target);
             op->target = strdup(node->uname);
         } else {
             crm_warn("Could not expand nodeid '%s' into a host name (%p)", op->target, node);
         }
     }
 
     /* check to see if this is a duplicate operation of another in-flight operation */
     merge_duplicates(op);
 
     return op;
 }
 
 remote_fencing_op_t *
 initiate_remote_stonith_op(crm_client_t * client, xmlNode * request, gboolean manual_ack)
 {
     int query_timeout = 0;
     xmlNode *query = NULL;
     const char *client_id = NULL;
     remote_fencing_op_t *op = NULL;
 
     if (client) {
         client_id = client->id;
     } else {
         client_id = crm_element_value(request, F_STONITH_CLIENTID);
     }
 
     CRM_LOG_ASSERT(client_id != NULL);
     op = create_remote_stonith_op(client_id, request, FALSE);
     op->owner = TRUE;
     if (manual_ack) {
         crm_notice("Initiating manual confirmation for %s: %s",
                    op->target, op->id);
         return op;
     }
 
     CRM_CHECK(op->action, return NULL);
 
     if (stonith_topology_next(op) != pcmk_ok) {
         op->state = st_failed;
     }
 
     switch (op->state) {
         case st_failed:
             crm_warn("Initiation of remote operation %s for %s: failed (%s)", op->action,
                      op->target, op->id);
             remote_op_done(op, NULL, -EINVAL, FALSE);
             return op;
 
         case st_duplicate:
             crm_info("Initiating remote operation %s for %s: %s (duplicate)", op->action,
                      op->target, op->id);
             return op;
 
         default:
             crm_notice("Initiating remote operation %s for %s: %s (%d)", op->action, op->target,
                        op->id, op->state);
     }
 
     query = stonith_create_op(op->client_callid, op->id, STONITH_OP_QUERY,
                               NULL, op->call_options);
 
     crm_xml_add(query, F_STONITH_REMOTE_OP_ID, op->id);
     crm_xml_add(query, F_STONITH_TARGET, op->target);
     crm_xml_add(query, F_STONITH_ACTION, op_requested_action(op));
     crm_xml_add(query, F_STONITH_ORIGIN, op->originator);
     crm_xml_add(query, F_STONITH_CLIENTID, op->client_id);
     crm_xml_add(query, F_STONITH_CLIENTNAME, op->client_name);
     crm_xml_add_int(query, F_STONITH_TIMEOUT, op->base_timeout);
 
     send_cluster_message(NULL, crm_msg_stonith_ng, query, FALSE);
     free_xml(query);
 
     query_timeout = op->base_timeout * TIMEOUT_MULTIPLY_FACTOR;
     op->query_timer = g_timeout_add((1000 * query_timeout), remote_op_query_timeout, op);
 
     return op;
 }
 
 enum find_best_peer_options {
     /*! Skip checking the target peer for capable fencing devices */
     FIND_PEER_SKIP_TARGET = 0x0001,
     /*! Only check the target peer for capable fencing devices */
     FIND_PEER_TARGET_ONLY = 0x0002,
     /*! Skip peers and devices that are not verified */
     FIND_PEER_VERIFIED_ONLY = 0x0004,
 };
 
 static st_query_result_t *
 find_best_peer(const char *device, remote_fencing_op_t * op, enum find_best_peer_options options)
 {
     GListPtr iter = NULL;
     gboolean verified_devices_only = (options & FIND_PEER_VERIFIED_ONLY) ? TRUE : FALSE;
 
     if (!device && is_set(op->call_options, st_opt_topology)) {
         return NULL;
     }
 
     for (iter = op->query_results; iter != NULL; iter = iter->next) {
         st_query_result_t *peer = iter->data;
 
         crm_trace("Testing result from %s for %s with %d devices: %d %x",
                   peer->host, op->target, peer->ndevices, peer->tried, options);
         if ((options & FIND_PEER_SKIP_TARGET) && safe_str_eq(peer->host, op->target)) {
             continue;
         }
         if ((options & FIND_PEER_TARGET_ONLY) && safe_str_neq(peer->host, op->target)) {
             continue;
         }
 
         if (is_set(op->call_options, st_opt_topology)) {
 
             if (grab_peer_device(op, peer, device, verified_devices_only)) {
                 return peer;
             }
 
         } else if ((peer->tried == FALSE)
                    && count_peer_devices(op, peer, verified_devices_only)) {
 
             /* No topology: Use the current best peer */
             crm_trace("Simple fencing");
             return peer;
         }
     }
 
     return NULL;
 }
 
 static st_query_result_t *
 stonith_choose_peer(remote_fencing_op_t * op)
 {
     const char *device = NULL;
     st_query_result_t *peer = NULL;
     uint32_t active = fencing_active_peers();
 
     do {
         if (op->devices) {
             device = op->devices->data;
             crm_trace("Checking for someone to fence (%s) %s with %s",
                       op->action, op->target, device);
         } else {
             crm_trace("Checking for someone to fence (%s) %s",
                       op->action, op->target);
         }
 
         /* Best choice is a peer other than the target with verified access */
         peer = find_best_peer(device, op, FIND_PEER_SKIP_TARGET|FIND_PEER_VERIFIED_ONLY);
         if (peer) {
             crm_trace("Found verified peer %s for %s", peer->host, device?device:"<any>");
             return peer;
         }
 
         if(op->query_timer != 0 && op->replies < QB_MIN(op->replies_expected, active)) {
             crm_trace("Waiting before looking for unverified devices to fence %s", op->target);
             return NULL;
         }
 
         /* If no other peer has verified access, next best is unverified access */
         peer = find_best_peer(device, op, FIND_PEER_SKIP_TARGET);
         if (peer) {
             crm_trace("Found best unverified peer %s", peer->host);
             return peer;
         }
 
         /* If no other peer can do it, last option is self-fencing
          * (which is never allowed for the "on" phase of a remapped reboot)
          */
         if (op->phase != st_phase_on) {
             peer = find_best_peer(device, op, FIND_PEER_TARGET_ONLY);
             if (peer) {
                 crm_trace("%s will fence itself", peer->host);
                 return peer;
             }
         }
 
         /* Try the next fencing level if there is one (unless we're in the "on"
          * phase of a remapped "reboot", because we ignore errors in that case)
          */
     } while ((op->phase != st_phase_on)
              && is_set(op->call_options, st_opt_topology)
              && stonith_topology_next(op) == pcmk_ok);
 
     crm_notice("Couldn't find anyone to fence (%s) %s with %s",
                op->action, op->target, (device? device : "any device"));
     return NULL;
 }
 
 static int
 get_device_timeout(const remote_fencing_op_t *op, const st_query_result_t *peer,
                    const char *device)
 {
     device_properties_t *props;
 
     if (!peer || !device) {
         return op->base_timeout;
     }
 
     props = g_hash_table_lookup(peer->devices, device);
     if (!props) {
         return op->base_timeout;
     }
 
     return (props->custom_action_timeout[op->phase]?
            props->custom_action_timeout[op->phase] : op->base_timeout)
            + props->delay_max[op->phase];
 }
 
 struct timeout_data {
     const remote_fencing_op_t *op;
     const st_query_result_t *peer;
     int total_timeout;
 };
 
 /*!
  * \internal
  * \brief Add timeout to a total if device has not been executed yet
  *
  * \param[in] key        GHashTable key (device ID)
  * \param[in] value      GHashTable value (device properties)
  * \param[in] user_data  Timeout data
  */
 static void
 add_device_timeout(gpointer key, gpointer value, gpointer user_data)
 {
     const char *device_id = key;
     device_properties_t *props = value;
     struct timeout_data *timeout = user_data;
 
     if (!props->executed[timeout->op->phase]
         && !props->disallowed[timeout->op->phase]) {
         timeout->total_timeout += get_device_timeout(timeout->op,
                                                      timeout->peer, device_id);
     }
 }
 
 static int
 get_peer_timeout(const remote_fencing_op_t *op, const st_query_result_t *peer)
 {
     struct timeout_data timeout;
 
     timeout.op = op;
     timeout.peer = peer;
     timeout.total_timeout = 0;
 
     g_hash_table_foreach(peer->devices, add_device_timeout, &timeout);
 
     return (timeout.total_timeout? timeout.total_timeout : op->base_timeout);
 }
 
 static int
 get_op_total_timeout(const remote_fencing_op_t *op,
                      const st_query_result_t *chosen_peer)
 {
     int total_timeout = 0;
     stonith_topology_t *tp = find_topology_for_host(op->target);
 
     if (is_set(op->call_options, st_opt_topology) && tp) {
         int i;
         GListPtr device_list = NULL;
         GListPtr iter = NULL;
 
         /* Yep, this looks scary, nested loops all over the place.
          * Here is what is going on.
          * Loop1: Iterate through fencing levels.
          * Loop2: If a fencing level has devices, loop through each device
          * Loop3: For each device in a fencing level, see what peer owns it
          *        and what that peer has reported the timeout is for the device.
          */
         for (i = 0; i < ST_LEVEL_MAX; i++) {
             if (!tp->levels[i]) {
                 continue;
             }
             for (device_list = tp->levels[i]; device_list; device_list = device_list->next) {
                 for (iter = op->query_results; iter != NULL; iter = iter->next) {
                     const st_query_result_t *peer = iter->data;
 
                     if (find_peer_device(op, peer, device_list->data)) {
                         total_timeout += get_device_timeout(op, peer,
                                                             device_list->data);
                         break;
                     }
                 }               /* End Loop3: match device with peer that owns device, find device's timeout period */
             }                   /* End Loop2: iterate through devices at a specific level */
         }                       /*End Loop1: iterate through fencing levels */
 
     } else if (chosen_peer) {
         total_timeout = get_peer_timeout(op, chosen_peer);
     } else {
         total_timeout = op->base_timeout;
     }
 
     return total_timeout ? total_timeout : op->base_timeout;
 }
 
 static void
 report_timeout_period(remote_fencing_op_t * op, int op_timeout)
 {
     GListPtr iter = NULL;
     xmlNode *update = NULL;
     const char *client_node = NULL;
     const char *client_id = NULL;
     const char *call_id = NULL;
 
     if (op->call_options & st_opt_sync_call) {
         /* There is no reason to report the timeout for a syncronous call. It
          * is impossible to use the reported timeout to do anything when the client
          * is blocking for the response.  This update is only important for
          * async calls that require a callback to report the results in. */
         return;
     } else if (!op->request) {
         return;
     }
 
     crm_trace("Reporting timeout for %s.%.8s", op->client_name, op->id);
     client_node = crm_element_value(op->request, F_STONITH_CLIENTNODE);
     call_id = crm_element_value(op->request, F_STONITH_CALLID);
     client_id = crm_element_value(op->request, F_STONITH_CLIENTID);
     if (!client_node || !call_id || !client_id) {
         return;
     }
 
     if (safe_str_eq(client_node, stonith_our_uname)) {
         /* The client is connected to this node, send the update direclty to them */
         do_stonith_async_timeout_update(client_id, call_id, op_timeout);
         return;
     }
 
     /* The client is connected to another node, relay this update to them */
     update = stonith_create_op(op->client_callid, op->id, STONITH_OP_TIMEOUT_UPDATE, NULL, 0);
     crm_xml_add(update, F_STONITH_REMOTE_OP_ID, op->id);
     crm_xml_add(update, F_STONITH_CLIENTID, client_id);
     crm_xml_add(update, F_STONITH_CALLID, call_id);
     crm_xml_add_int(update, F_STONITH_TIMEOUT, op_timeout);
 
     send_cluster_message(crm_get_peer(0, client_node), crm_msg_stonith_ng, update, FALSE);
 
     free_xml(update);
 
     for (iter = op->duplicates; iter != NULL; iter = iter->next) {
         remote_fencing_op_t *dup = iter->data;
 
         crm_trace("Reporting timeout for duplicate %s.%.8s", dup->client_name, dup->id);
         report_timeout_period(iter->data, op_timeout);
     }
 }
 
 /*
  * \internal
  * \brief Advance an operation to the next device in its topology
  *
  * \param[in,out] op      Operation to advance
  * \param[in]     device  ID of device just completed
  * \param[in]     msg     XML reply that contained device result (if available)
  * \param[in]     rc      Return code of device's execution
  */
 static void
 advance_op_topology(remote_fencing_op_t *op, const char *device, xmlNode *msg,
                     int rc)
 {
     /* Advance to the next device at this topology level, if any */
     if (op->devices) {
         op->devices = op->devices->next;
     }
 
     /* Handle automatic unfencing if an "on" action was requested */
     if ((op->phase == st_phase_requested) && safe_str_eq(op->action, "on")) {
         /* If the device we just executed was required, it's not anymore */
         remove_required_device(op, device);
 
         /* If there are no more devices at this topology level, run through any
          * remaining devices with automatic unfencing
          */
         if (op->devices == NULL) {
             op->devices = op->automatic_list;
         }
     }
 
     if ((op->devices == NULL) && (op->phase == st_phase_off)) {
         /* We're done with this level and with required devices, but we had
          * remapped "reboot" to "off", so start over with "on". If any devices
          * need to be turned back on, op->devices will be non-NULL after this.
          */
         op_phase_on(op);
     }
 
     if (op->devices) {
         /* Necessary devices remain, so execute the next one */
         crm_trace("Next for %s on behalf of %s@%s (rc was %d)",
                   op->target, op->originator, op->client_name, rc);
         call_remote_stonith(op, NULL);
     } else {
         /* We're done with all devices and phases, so finalize operation */
         crm_trace("Marking complex fencing op for %s as complete", op->target);
         op->state = st_done;
         remote_op_done(op, msg, rc, FALSE);
     }
 }
 
 void
 call_remote_stonith(remote_fencing_op_t * op, st_query_result_t * peer)
 {
     const char *device = NULL;
     int timeout = op->base_timeout;
 
     crm_trace("State for %s.%.8s: %s %d", op->target, op->client_name, op->id, op->state);
     if (peer == NULL && !is_set(op->call_options, st_opt_topology)) {
         peer = stonith_choose_peer(op);
     }
 
     if (!op->op_timer_total) {
         int total_timeout = get_op_total_timeout(op, peer);
 
         op->total_timeout = TIMEOUT_MULTIPLY_FACTOR * total_timeout;
         op->op_timer_total = g_timeout_add(1000 * op->total_timeout, remote_op_timeout, op);
         report_timeout_period(op, op->total_timeout);
         crm_info("Total remote op timeout set to %d for fencing of node %s for %s.%.8s",
                  total_timeout, op->target, op->client_name, op->id);
     }
 
     if (is_set(op->call_options, st_opt_topology) && op->devices) {
         /* Ignore any peer preference, they might not have the device we need */
         /* When using topology, stonith_choose_peer() removes the device from
          * further consideration, so be sure to calculate timeout beforehand */
         peer = stonith_choose_peer(op);
 
         device = op->devices->data;
         timeout = get_device_timeout(op, peer, device);
     }
 
     if (peer) {
         int timeout_one = 0;
         xmlNode *remote_op = stonith_create_op(op->client_callid, op->id, STONITH_OP_FENCE, NULL, 0);
 
         crm_xml_add(remote_op, F_STONITH_REMOTE_OP_ID, op->id);
         crm_xml_add(remote_op, F_STONITH_TARGET, op->target);
         crm_xml_add(remote_op, F_STONITH_ACTION, op->action);
         crm_xml_add(remote_op, F_STONITH_ORIGIN, op->originator);
         crm_xml_add(remote_op, F_STONITH_CLIENTID, op->client_id);
         crm_xml_add(remote_op, F_STONITH_CLIENTNAME, op->client_name);
         crm_xml_add_int(remote_op, F_STONITH_TIMEOUT, timeout);
         crm_xml_add_int(remote_op, F_STONITH_CALLOPTS, op->call_options);
 
         if (device) {
             timeout_one = TIMEOUT_MULTIPLY_FACTOR *
                           get_device_timeout(op, peer, device);
             crm_info("Requesting that %s perform op %s %s with %s for %s (%ds)", peer->host,
                      op->action, op->target, device, op->client_name, timeout_one);
             crm_xml_add(remote_op, F_STONITH_DEVICE, device);
             crm_xml_add(remote_op, F_STONITH_MODE, "slave");
 
         } else {
             timeout_one = TIMEOUT_MULTIPLY_FACTOR * get_peer_timeout(op, peer);
             crm_info("Requesting that %s perform op %s %s for %s (%ds, %ds)",
                      peer->host, op->action, op->target, op->client_name, timeout_one, stonith_watchdog_timeout_ms);
             crm_xml_add(remote_op, F_STONITH_MODE, "smart");
 
         }
 
         op->state = st_exec;
         if (op->op_timer_one) {
             g_source_remove(op->op_timer_one);
         }
 
         if(stonith_watchdog_timeout_ms > 0 && device && safe_str_eq(device, "watchdog")) {
             crm_notice("Waiting %ds for %s to self-fence (%s) for %s.%.8s (%p)",
                        stonith_watchdog_timeout_ms/1000, op->target,
                        op->action, op->client_name, op->id, device);
             op->op_timer_one = g_timeout_add(stonith_watchdog_timeout_ms, remote_op_watchdog_done, op);
 
             /* TODO check devices to verify watchdog will be in use */
         } else if(stonith_watchdog_timeout_ms > 0
                   && safe_str_eq(peer->host, op->target)
                   && safe_str_neq(op->action, "on")) {
             crm_notice("Waiting %ds for %s to self-fence (%s) for %s.%.8s (%p)",
                        stonith_watchdog_timeout_ms/1000, op->target,
                        op->action, op->client_name, op->id, device);
             op->op_timer_one = g_timeout_add(stonith_watchdog_timeout_ms, remote_op_watchdog_done, op);
 
         } else {
             op->op_timer_one = g_timeout_add((1000 * timeout_one), remote_op_timeout_one, op);
         }
 
 
         send_cluster_message(crm_get_peer(0, peer->host), crm_msg_stonith_ng, remote_op, FALSE);
         peer->tried = TRUE;
         free_xml(remote_op);
         return;
 
     } else if (op->phase == st_phase_on) {
         /* A remapped "on" cannot be executed, but the node was already
          * turned off successfully, so ignore the error and continue.
          */
         crm_warn("Ignoring %s 'on' failure (no capable peers) for %s after successful 'off'",
                  device, op->target);
         advance_op_topology(op, device, NULL, pcmk_ok);
         return;
 
     } else if (op->owner == FALSE) {
         crm_err("Fencing (%s) of %s for %s is not ours to control",
                 op->action, op->target, op->client_name);
 
     } else if (op->query_timer == 0) {
         /* We've exhausted all available peers */
         crm_info("No remaining peers capable of fencing (%s) %s for %s (%d)",
                  op->target, op->action, op->client_name, op->state);
         CRM_LOG_ASSERT(op->state < st_done);
         remote_op_timeout(op);
 
     } else if(op->replies >= op->replies_expected || op->replies >= fencing_active_peers()) {
         int rc = -EHOSTUNREACH;
 
         /* if the operation never left the query state,
          * but we have all the expected replies, then no devices
          * are available to execute the fencing operation. */
 
         if(stonith_watchdog_timeout_ms && (device == NULL || safe_str_eq(device, "watchdog"))) {
             crm_notice("Waiting %ds for %s to self-fence (%s) for %s.%.8s (%p)",
                      stonith_watchdog_timeout_ms/1000, op->target,
                      op->action, op->client_name, op->id, device);
 
             op->op_timer_one = g_timeout_add(stonith_watchdog_timeout_ms, remote_op_watchdog_done, op);
             return;
         }
 
         if (op->state == st_query) {
            crm_info("None of the %d peers have devices capable of fencing (%s) %s for %s (%d)",
                    op->replies, op->action, op->target, op->client_name,
                    op->state);
 
             rc = -ENODEV;
         } else {
            crm_info("None of the %d peers are capable of fencing (%s) %s for %s (%d)",
                    op->replies, op->action, op->target, op->client_name,
                    op->state);
         }
 
         op->state = st_failed;
         remote_op_done(op, NULL, rc, FALSE);
 
     } else if (device) {
         crm_info("Waiting for additional peers capable of fencing (%s) %s with %s for %s.%.8s",
                  op->action, op->target, device, op->client_name, op->id);
     } else {
         crm_info("Waiting for additional peers capable of fencing (%s) %s for %s%.8s",
                  op->action, op->target, op->client_name, op->id);
     }
 }
 
 /*!
  * \internal
  * \brief Comparison function for sorting query results
  *
  * \param[in] a  GList item to compare
  * \param[in] b  GList item to compare
  *
  * \return Per the glib documentation, "a negative integer if the first value
  *         comes before the second, 0 if they are equal, or a positive integer
  *         if the first value comes after the second."
  */
 static gint
 sort_peers(gconstpointer a, gconstpointer b)
 {
     const st_query_result_t *peer_a = a;
     const st_query_result_t *peer_b = b;
 
     return (peer_b->ndevices - peer_a->ndevices);
 }
 
 /*!
  * \internal
  * \brief Determine if all the devices in the topology are found or not
  */
 static gboolean
 all_topology_devices_found(remote_fencing_op_t * op)
 {
     GListPtr device = NULL;
     GListPtr iter = NULL;
     device_properties_t *match = NULL;
     stonith_topology_t *tp = NULL;
     gboolean skip_target = FALSE;
     int i;
 
     tp = find_topology_for_host(op->target);
     if (!tp) {
         return FALSE;
     }
     if (safe_str_eq(op->action, "off") || safe_str_eq(op->action, "reboot")) {
         /* Don't count the devices on the target node if we are killing
          * the target node. */
         skip_target = TRUE;
     }
 
     for (i = 0; i < ST_LEVEL_MAX; i++) {
         for (device = tp->levels[i]; device; device = device->next) {
             match = NULL;
             for (iter = op->query_results; iter && !match; iter = iter->next) {
                 st_query_result_t *peer = iter->data;
 
                 if (skip_target && safe_str_eq(peer->host, op->target)) {
                     continue;
                 }
                 match = find_peer_device(op, peer, device->data);
             }
             if (!match) {
                 return FALSE;
             }
         }
     }
 
     return TRUE;
 }
 
 /*
  * \internal
  * \brief Parse action-specific device properties from XML
  *
  * \param[in]     msg     XML element containing the properties
  * \param[in]     peer    Name of peer that sent XML (for logs)
  * \param[in]     device  Device ID (for logs)
  * \param[in]     action  Action the properties relate to (for logs)
  * \param[in]     phase   Phase the properties relate to
  * \param[in,out] props   Device properties to update
  */
 static void
 parse_action_specific(xmlNode *xml, const char *peer, const char *device,
                       const char *action, remote_fencing_op_t *op,
                       enum st_remap_phase phase, device_properties_t *props)
 {
     props->custom_action_timeout[phase] = 0;
     crm_element_value_int(xml, F_STONITH_ACTION_TIMEOUT,
                           &props->custom_action_timeout[phase]);
     if (props->custom_action_timeout[phase]) {
         crm_trace("Peer %s with device %s returned %s action timeout %d",
                   peer, device, action, props->custom_action_timeout[phase]);
     }
 
     props->delay_max[phase] = 0;
     crm_element_value_int(xml, F_STONITH_DELAY_MAX, &props->delay_max[phase]);
     if (props->delay_max[phase]) {
         crm_trace("Peer %s with device %s returned maximum of random delay %d for %s",
                   peer, device, props->delay_max[phase], action);
     }
 
     /* Handle devices with automatic unfencing */
     if (safe_str_eq(action, "on")) {
         int required = 0;
 
         crm_element_value_int(xml, F_STONITH_DEVICE_REQUIRED, &required);
         if (required) {
             crm_trace("Peer %s requires device %s to execute for action %s",
                       peer, device, action);
             add_required_device(op, device);
         }
     }
 
     /* If a reboot is remapped to off+on, it's possible that a node is allowed
      * to perform one action but not another.
      */
     if (crm_is_true(crm_element_value(xml, F_STONITH_ACTION_DISALLOWED))) {
         props->disallowed[phase] = TRUE;
         crm_trace("Peer %s is disallowed from executing %s for device %s",
                   peer, action, device);
     }
 }
 
 /*
  * \internal
  * \brief Parse one device's properties from peer's XML query reply
  *
  * \param[in]     xml       XML node containing device properties
  * \param[in,out] op        Operation that query and reply relate to
  * \param[in,out] result    Peer's results
  * \param[in]     device    ID of device being parsed
  */
 static void
 add_device_properties(xmlNode *xml, remote_fencing_op_t *op,
                       st_query_result_t *result, const char *device)
 {
     xmlNode *child;
     int verified = 0;
     device_properties_t *props = calloc(1, sizeof(device_properties_t));
 
     /* Add a new entry to this result's devices list */
     CRM_ASSERT(props != NULL);
     g_hash_table_insert(result->devices, strdup(device), props);
 
     /* Peers with verified (monitored) access will be preferred */
     crm_element_value_int(xml, F_STONITH_DEVICE_VERIFIED, &verified);
     if (verified) {
         crm_trace("Peer %s has confirmed a verified device %s",
                   result->host, device);
         props->verified = TRUE;
     }
 
     /* Parse action-specific device properties */
     parse_action_specific(xml, result->host, device, op_requested_action(op),
                           op, st_phase_requested, props);
     for (child = __xml_first_child(xml); child != NULL; child = __xml_next(child)) {
         /* Replies for "reboot" operations will include the action-specific
          * values for "off" and "on" in child elements, just in case the reboot
          * winds up getting remapped.
          */
         if (safe_str_eq(ID(child), "off")) {
             parse_action_specific(child, result->host, device, "off",
                                   op, st_phase_off, props);
         } else if (safe_str_eq(ID(child), "on")) {
             parse_action_specific(child, result->host, device, "on",
                                   op, st_phase_on, props);
         }
     }
 }
 
 /*
  * \internal
  * \brief Parse a peer's XML query reply and add it to operation's results
  *
  * \param[in,out] op        Operation that query and reply relate to
  * \param[in]     host      Name of peer that sent this reply
  * \param[in]     ndevices  Number of devices expected in reply
  * \param[in]     xml       XML node containing device list
  *
  * \return Newly allocated result structure with parsed reply
  */
 static st_query_result_t *
 add_result(remote_fencing_op_t *op, const char *host, int ndevices, xmlNode *xml)
 {
     st_query_result_t *result = calloc(1, sizeof(st_query_result_t));
     xmlNode *child;
 
     CRM_CHECK(result != NULL, return NULL);
     result->host = strdup(host);
     result->devices = g_hash_table_new_full(crm_str_hash, g_str_equal, free, free);
 
     /* Each child element describes one capable device available to the peer */
     for (child = __xml_first_child(xml); child != NULL; child = __xml_next(child)) {
         const char *device = ID(child);
 
         if (device) {
             add_device_properties(child, op, result, device);
         }
     }
 
     result->ndevices = g_hash_table_size(result->devices);
     CRM_CHECK(ndevices == result->ndevices,
               crm_err("Query claimed to have %d devices but %d found",
                       ndevices, result->ndevices));
 
     op->query_results = g_list_insert_sorted(op->query_results, result, sort_peers);
     return result;
 }
 
 /*
  * \internal
  * \brief Handle a peer's reply to our fencing query
  *
  * Parse a query result from XML and store it in the remote operation
  * table, and when enough replies have been received, issue a fencing request.
  *
  * \param[in] msg  XML reply received
  *
  * \return pcmk_ok on success, -errno on error
  *
  * \note See initiate_remote_stonith_op() for how the XML query was initially
  *       formed, and stonith_query() for how the peer formed its XML reply.
  */
 int
 process_remote_stonith_query(xmlNode * msg)
 {
     int ndevices = 0;
     gboolean host_is_target = FALSE;
     gboolean have_all_replies = FALSE;
     const char *id = NULL;
     const char *host = NULL;
     remote_fencing_op_t *op = NULL;
     st_query_result_t *result = NULL;
     uint32_t replies_expected;
     xmlNode *dev = get_xpath_object("//@" F_STONITH_REMOTE_OP_ID, msg, LOG_ERR);
 
     CRM_CHECK(dev != NULL, return -EPROTO);
 
     id = crm_element_value(dev, F_STONITH_REMOTE_OP_ID);
     CRM_CHECK(id != NULL, return -EPROTO);
 
     dev = get_xpath_object("//@" F_STONITH_AVAILABLE_DEVICES, msg, LOG_ERR);
     CRM_CHECK(dev != NULL, return -EPROTO);
     crm_element_value_int(dev, F_STONITH_AVAILABLE_DEVICES, &ndevices);
 
     op = g_hash_table_lookup(remote_op_list, id);
     if (op == NULL) {
         crm_debug("Unknown or expired remote op: %s", id);
         return -EOPNOTSUPP;
     }
 
     replies_expected = QB_MIN(op->replies_expected, fencing_active_peers());
     if ((++op->replies >= replies_expected) && (op->state == st_query)) {
         have_all_replies = TRUE;
     }
     host = crm_element_value(msg, F_ORIG);
     host_is_target = safe_str_eq(host, op->target);
 
     crm_info("Query result %d of %d from %s for %s/%s (%d devices) %s",
              op->replies, replies_expected, host,
              op->target, op->action, ndevices, id);
     if (ndevices > 0) {
         result = add_result(op, host, ndevices, dev);
     }
 
     if (is_set(op->call_options, st_opt_topology)) {
         /* If we start the fencing before all the topology results are in,
          * it is possible fencing levels will be skipped because of the missing
          * query results. */
         if (op->state == st_query && all_topology_devices_found(op)) {
             /* All the query results are in for the topology, start the fencing ops. */
             crm_trace("All topology devices found");
             call_remote_stonith(op, result);
 
         } else if (have_all_replies) {
             crm_info("All topology query replies have arrived, continuing (%d expected/%d received) ",
                      replies_expected, op->replies);
             call_remote_stonith(op, NULL);
         }
 
     } else if (op->state == st_query) {
         int nverified = count_peer_devices(op, result, TRUE);
 
         /* We have a result for a non-topology fencing op that looks promising,
          * go ahead and start fencing before query timeout */
         if (result && (host_is_target == FALSE) && nverified) {
             /* we have a verified device living on a peer that is not the target */
             crm_trace("Found %d verified devices", nverified);
             call_remote_stonith(op, result);
 
         } else if (have_all_replies) {
             crm_info("All query replies have arrived, continuing (%d expected/%d received) ",
                      replies_expected, op->replies);
             call_remote_stonith(op, NULL);
 
         } else {
             crm_trace("Waiting for more peer results before launching fencing operation");
         }
 
     } else if (result && (op->state == st_done)) {
         crm_info("Discarding query result from %s (%d devices): Operation is in state %d",
                  result->host, result->ndevices, op->state);
     }
 
     return pcmk_ok;
 }
 
 /*
  * \internal
  * \brief Handle a peer's reply to a fencing request
  *
  * Parse a fencing reply from XML, and either finalize the operation
  * or attempt another device as appropriate.
  *
  * \param[in] msg  XML reply received
  *
  * \return pcmk_ok on success, -errno on error
  */
 int
 process_remote_stonith_exec(xmlNode * msg)
 {
     int rc = 0;
     const char *id = NULL;
     const char *device = NULL;
     remote_fencing_op_t *op = NULL;
     xmlNode *dev = get_xpath_object("//@" F_STONITH_REMOTE_OP_ID, msg, LOG_ERR);
 
     CRM_CHECK(dev != NULL, return -EPROTO);
 
     id = crm_element_value(dev, F_STONITH_REMOTE_OP_ID);
     CRM_CHECK(id != NULL, return -EPROTO);
 
     dev = get_xpath_object("//@" F_STONITH_RC, msg, LOG_ERR);
     CRM_CHECK(dev != NULL, return -EPROTO);
 
     crm_element_value_int(dev, F_STONITH_RC, &rc);
 
     device = crm_element_value(dev, F_STONITH_DEVICE);
 
     if (remote_op_list) {
         op = g_hash_table_lookup(remote_op_list, id);
     }
 
     if (op == NULL && rc == pcmk_ok) {
         /* Record successful fencing operations */
         const char *client_id = crm_element_value(dev, F_STONITH_CLIENTID);
 
         op = create_remote_stonith_op(client_id, dev, TRUE);
     }
 
     if (op == NULL) {
         /* Could be for an event that began before we started */
         /* TODO: Record the op for later querying */
         crm_info("Unknown or expired remote op: %s", id);
         return -EOPNOTSUPP;
     }
 
     if (op->devices && device && safe_str_neq(op->devices->data, device)) {
         crm_err
             ("Received outdated reply for device %s (instead of %s) to %s node %s. Operation already timed out at remote level.",
              device, op->devices->data, op->action, op->target);
         return rc;
     }
 
     if (safe_str_eq(crm_element_value(msg, F_SUBTYPE), "broadcast")) {
         crm_debug("Marking call to %s for %s on behalf of %s@%s.%.8s: %s (%d)",
                   op->action, op->target, op->client_name, op->id, op->originator,
                   pcmk_strerror(rc), rc);
         if (rc == pcmk_ok) {
             op->state = st_done;
         } else {
             op->state = st_failed;
         }
         remote_op_done(op, msg, rc, FALSE);
         return pcmk_ok;
     } else if (safe_str_neq(op->originator, stonith_our_uname)) {
         /* If this isn't a remote level broadcast, and we are not the
          * originator of the operation, we should not be receiving this msg. */
         crm_err
             ("%s received non-broadcast fencing result for operation it does not own (device %s targeting %s)",
              stonith_our_uname, device, op->target);
         return rc;
     }
 
     if (is_set(op->call_options, st_opt_topology)) {
         const char *device = crm_element_value(msg, F_STONITH_DEVICE);
 
         crm_notice("Call to %s for %s on behalf of %s@%s: %s (%d)",
                    device, op->target, op->client_name, op->originator,
                    pcmk_strerror(rc), rc);
 
         /* We own the op, and it is complete. broadcast the result to all nodes
          * and notify our local clients. */
         if (op->state == st_done) {
             remote_op_done(op, msg, rc, FALSE);
             return rc;
         }
 
         if ((op->phase == 2) && (rc != pcmk_ok)) {
             /* A remapped "on" failed, but the node was already turned off
              * successfully, so ignore the error and continue.
              */
             crm_warn("Ignoring %s 'on' failure (exit code %d) for %s after successful 'off'",
                      device, rc, op->target);
             rc = pcmk_ok;
         }
 
         if (rc == pcmk_ok) {
             /* An operation completed successfully. Try another device if
              * necessary, otherwise mark the operation as done. */
             advance_op_topology(op, device, msg, rc);
             return rc;
         } else {
             /* This device failed, time to try another topology level. If no other
              * levels are available, mark this operation as failed and report results. */
             if (stonith_topology_next(op) != pcmk_ok) {
                 op->state = st_failed;
                 remote_op_done(op, msg, rc, FALSE);
                 return rc;
             }
         }
     } else if (rc == pcmk_ok && op->devices == NULL) {
         crm_trace("All done for %s", op->target);
 
         op->state = st_done;
         remote_op_done(op, msg, rc, FALSE);
         return rc;
     } else if (rc == -ETIME && op->devices == NULL) {
         /* If the operation timed out don't bother retrying other peers. */
         op->state = st_failed;
         remote_op_done(op, msg, rc, FALSE);
         return rc;
     } else {
         /* fall-through and attempt other fencing action using another peer */
     }
 
     /* Retry on failure */
     crm_trace("Next for %s on behalf of %s@%s (rc was %d)", op->target, op->originator,
               op->client_name, rc);
     call_remote_stonith(op, NULL);
     return rc;
 }
 
 int
 stonith_fence_history(xmlNode * msg, xmlNode ** output)
 {
     int rc = 0;
     const char *target = NULL;
     xmlNode *dev = get_xpath_object("//@" F_STONITH_TARGET, msg, LOG_TRACE);
 
     if (dev) {
         int options = 0;
 
         target = crm_element_value(dev, F_STONITH_TARGET);
         crm_element_value_int(msg, F_STONITH_CALLOPTS, &options);
         if (target && (options & st_opt_cs_nodeid)) {
             int nodeid = crm_atoi(target, NULL);
             crm_node_t *node = crm_get_peer(nodeid, NULL);
 
             if (node) {
                 target = node->uname;
             }
         }
     }
 
     crm_trace("Looking for operations on %s in %p", target, remote_op_list);
 
     *output = create_xml_node(NULL, F_STONITH_HISTORY_LIST);
     if (remote_op_list) {
         GHashTableIter iter;
         remote_fencing_op_t *op = NULL;
 
         g_hash_table_iter_init(&iter, remote_op_list);
         while (g_hash_table_iter_next(&iter, NULL, (void **)&op)) {
             xmlNode *entry = NULL;
 
             if (target && strcmp(op->target, target) != 0) {
                 continue;
             }
 
             rc = 0;
             crm_trace("Attaching op %s", op->id);
             entry = create_xml_node(*output, STONITH_OP_EXEC);
             crm_xml_add(entry, F_STONITH_TARGET, op->target);
             crm_xml_add(entry, F_STONITH_ACTION, op->action);
             crm_xml_add(entry, F_STONITH_ORIGIN, op->originator);
             crm_xml_add(entry, F_STONITH_DELEGATE, op->delegate);
             crm_xml_add(entry, F_STONITH_CLIENTNAME, op->client_name);
             crm_xml_add_int(entry, F_STONITH_DATE, op->completed);
             crm_xml_add_int(entry, F_STONITH_STATE, op->state);
         }
     }
 
     return rc;
 }
 
 gboolean
 stonith_check_fence_tolerance(int tolerance, const char *target, const char *action)
 {
     GHashTableIter iter;
     time_t now = time(NULL);
     remote_fencing_op_t *rop = NULL;
 
     crm_trace("tolerance=%d, remote_op_list=%p", tolerance, remote_op_list);
 
     if (tolerance <= 0 || !remote_op_list || target == NULL || action == NULL) {
         return FALSE;
     }
 
     g_hash_table_iter_init(&iter, remote_op_list);
     while (g_hash_table_iter_next(&iter, NULL, (void **)&rop)) {
         if (strcmp(rop->target, target) != 0) {
             continue;
         } else if (rop->state != st_done) {
             continue;
         /* We don't have to worry about remapped reboots here
          * because if state is done, any remapping has been undone
          */
         } else if (strcmp(rop->action, action) != 0) {
             continue;
         } else if ((rop->completed + tolerance) < now) {
             continue;
         }
 
         crm_notice("Target %s was fenced (%s) less than %ds ago by %s on behalf of %s",
                    target, action, tolerance, rop->delegate, rop->originator);
         return TRUE;
     }
     return FALSE;
 }
diff --git a/fencing/test.c b/fencing/test.c
index a0893754d2..8e336a2e9b 100644
--- a/fencing/test.c
+++ b/fencing/test.c
@@ -1,664 +1,675 @@
 /* 
  * Copyright (C) 2009 Andrew Beekhof <andrew@beekhof.net>
  * 
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public
  * License as published by the Free Software Foundation; either
  * version 2 of the License, or (at your option) any later version.
  * 
  * This software is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  * 
  * You should have received a copy of the GNU General Public
  * License along with this library; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
 #include <crm_internal.h>
 
 #include <sys/param.h>
 #include <stdio.h>
 #include <sys/time.h>
 #include <sys/types.h>
 #include <sys/stat.h>
 #include <unistd.h>
 #include <sys/utsname.h>
 
 #include <stdlib.h>
 #include <errno.h>
 #include <fcntl.h>
 
 #include <crm/crm.h>
 #include <crm/msg_xml.h>
 #include <crm/common/ipc.h>
 #include <crm/cluster/internal.h>
 
 #include <crm/stonith-ng.h>
 #include <crm/fencing/internal.h>
 #include <crm/common/xml.h>
 
 #include <crm/common/mainloop.h>
 
 GMainLoop *mainloop = NULL;
 crm_trigger_t *trig = NULL;
 int mainloop_iter = 0;
 int callback_rc = 0;
 typedef void (*mainloop_test_iteration_cb) (int check_event);
 
 #define MAINLOOP_DEFAULT_TIMEOUT 2
 
 #define mainloop_test_done(pass) \
     if (pass) { \
         crm_info("SUCCESS - %s", __FUNCTION__); \
         mainloop_iter++;   \
         mainloop_set_trigger(trig);  \
     } else { \
         crm_err("FAILURE = %s async_callback %d", __FUNCTION__, callback_rc); \
         crm_exit(pcmk_err_generic); \
     } \
     callback_rc = 0; \
 
 
 /* *INDENT-OFF* */
 enum test_modes {
     /* class dev test using a very specific environment */
     test_standard = 0,
     /* watch notifications only */
     test_passive,
     /* sanity test stonith client api using fence_dummy */
     test_api_sanity,
     /* sanity test mainloop code with async respones. */
     test_api_mainloop,
 };
 
 static struct crm_option long_options[] = {
     {"verbose",     0, 0, 'V'},
     {"version",     0, 0, '$'},
     {"help",        0, 0, '?'},
     {"passive",     0, 0, 'p'},
     {"api_test",    0, 0, 't'},
     {"mainloop_api_test",    0, 0, 'm'},
 
     {0, 0, 0, 0}
 };
 /* *INDENT-ON* */
 
 stonith_t *st = NULL;
 struct pollfd pollfd;
 int st_opts = st_opt_sync_call;
 int expected_notifications = 0;
 int verbose = 0;
 
 static void
 dispatch_helper(int timeout)
 {
     int rc;
 
     crm_debug("Looking for notification");
     pollfd.events = POLLIN;
     while (true) {
         rc = poll(&pollfd, 1, timeout); /* wait 10 minutes, -1 forever */
         if (rc > 0) {
             if (!stonith_dispatch(st)) {
                 break;
             }
         } else {
             break;
         }
     }
 }
 
 static void
 st_callback(stonith_t * st, stonith_event_t * e)
 {
     if (st->state == stonith_disconnected) {
         crm_exit(ENOTCONN);
     }
 
     crm_notice("Operation %s requested by %s %s for peer %s.  %s reported: %s (ref=%s)",
                e->operation, e->origin, e->result == pcmk_ok ? "completed" : "failed",
                e->target, e->executioner ? e->executioner : "<none>",
                pcmk_strerror(e->result), e->id);
 
     if (expected_notifications) {
         expected_notifications--;
     }
 }
 
 static void
 st_global_callback(stonith_t * stonith, stonith_callback_data_t * data)
 {
     crm_notice("Call id %d completed with rc %d", data->call_id, data->rc);
 }
 
 static void
 passive_test(void)
 {
     int rc = 0;
 
     rc = st->cmds->connect(st, crm_system_name, &pollfd.fd);
     crm_debug("Connect: %d", rc);
 
     st->cmds->register_notification(st, T_STONITH_NOTIFY_DISCONNECT, st_callback);
     st->cmds->register_notification(st, T_STONITH_NOTIFY_FENCE, st_callback);
     st->cmds->register_notification(st, STONITH_OP_DEVICE_ADD, st_callback);
     st->cmds->register_notification(st, STONITH_OP_DEVICE_DEL, st_callback);
     st->cmds->register_callback(st, 0, 120, st_opt_timeout_updates, NULL, "st_global_callback",
                                 st_global_callback);
 
     dispatch_helper(600 * 1000);
 }
 
 #define single_test(cmd, str, num_notifications, expected_rc) \
 { \
     int rc = 0; \
     rc = cmd; \
     expected_notifications = 0;  \
     if (num_notifications) { \
         expected_notifications = num_notifications; \
         dispatch_helper(500);  \
     } \
     if (rc != expected_rc) { \
         crm_err("FAILURE - expected rc %d != %d(%s) for cmd - %s\n", expected_rc, rc, pcmk_strerror(rc), str); \
         crm_exit(pcmk_err_generic); \
     } else if (expected_notifications) { \
         crm_err("FAILURE - expected %d notifications, got only %d for cmd - %s\n", \
             num_notifications, num_notifications - expected_notifications, str); \
         crm_exit(pcmk_err_generic); \
     } else { \
         if (verbose) {                   \
             crm_info("SUCCESS - %s: %d", str, rc);    \
         } else {   \
             crm_debug("SUCCESS - %s: %d", str, rc);    \
         }                          \
     } \
 }\
 
 static void
 run_fence_failure_test(void)
 {
     stonith_key_value_t *params = NULL;
 
     params = stonith_key_value_add(params, "pcmk_host_map", "false_1_node1=1,2 false_1_node2=3,4");
     params = stonith_key_value_add(params, "mode", "fail");
 
     single_test(st->
                 cmds->register_device(st, st_opts, "test-id1", "stonith-ng", "fence_dummy", params),
                 "Register device1 for failure test", 1, 0);
 
     single_test(st->cmds->fence(st, st_opts, "false_1_node2", "off", 3, 0),
-                "Fence failure results off", 1, -201);
+                "Fence failure results off", 1, -pcmk_err_generic);
 
     single_test(st->cmds->fence(st, st_opts, "false_1_node2", "reboot", 3, 0),
-                "Fence failure results reboot", 1, -201);
+                "Fence failure results reboot", 1, -pcmk_err_generic);
 
     single_test(st->cmds->remove_device(st, st_opts, "test-id1"),
                 "Remove device1 for failure test", 1, 0);
 
     stonith_key_value_freeall(params, 1, 1);
 }
 
 static void
 run_fence_failure_rollover_test(void)
 {
     stonith_key_value_t *params = NULL;
 
     params = stonith_key_value_add(params, "pcmk_host_map", "false_1_node1=1,2 false_1_node2=3,4");
     params = stonith_key_value_add(params, "mode", "fail");
 
     single_test(st->
                 cmds->register_device(st, st_opts, "test-id1", "stonith-ng", "fence_dummy", params),
                 "Register device1 for rollover test", 1, 0);
     stonith_key_value_freeall(params, 1, 1);
     params = NULL;
     params = stonith_key_value_add(params, "pcmk_host_map", "false_1_node1=1,2 false_1_node2=3,4");
     params = stonith_key_value_add(params, "mode", "pass");
 
     single_test(st->
                 cmds->register_device(st, st_opts, "test-id2", "stonith-ng", "fence_dummy", params),
                 "Register device2 for rollover test", 1, 0);
 
     single_test(st->cmds->fence(st, st_opts, "false_1_node2", "off", 3, 0),
                 "Fence rollover results off", 1, 0);
 
-    /* we expect -19 here because fence_dummy now requires 'on' to be executed on target. */
+    /* Expect -ENODEV because fence_dummy requires 'on' to be executed on target */
     single_test(st->cmds->fence(st, st_opts, "false_1_node2", "on", 3, 0),
-                "Fence rollover results on", 1, -19);
+                "Fence rollover results on", 1, -ENODEV);
 
     single_test(st->cmds->remove_device(st, st_opts, "test-id1"),
                 "Remove device1 for rollover tests", 1, 0);
 
     single_test(st->cmds->remove_device(st, st_opts, "test-id2"),
                 "Remove device2 for rollover tests", 1, 0);
 
     stonith_key_value_freeall(params, 1, 1);
 }
 
 static void
 run_standard_test(void)
 {
     stonith_key_value_t *params = NULL;
 
     params = stonith_key_value_add(params, "pcmk_host_map", "false_1_node1=1,2 false_1_node2=3,4");
     params = stonith_key_value_add(params, "mode", "pass");
 
     single_test(st->
                 cmds->register_device(st, st_opts, "test-id", "stonith-ng", "fence_dummy", params),
                 "Register", 1, 0);
+    stonith_key_value_freeall(params, 1, 1);
+    params = NULL;
 
     single_test(st->cmds->list(st, st_opts, "test-id", NULL, 1), "list", 1, 0);
 
     single_test(st->cmds->monitor(st, st_opts, "test-id", 1), "Monitor", 1, 0);
 
     single_test(st->cmds->status(st, st_opts, "test-id", "false_1_node2", 1),
                 "Status false_1_node2", 1, 0);
 
     single_test(st->cmds->status(st, st_opts, "test-id", "false_1_node1", 1),
                 "Status false_1_node1", 1, 0);
 
     single_test(st->cmds->fence(st, st_opts, "unknown-host", "off", 1, 0),
-                "Fence unknown-host (expected failure)", 0, -19);
+                "Fence unknown-host (expected failure)", 0, -ENODEV);
 
     single_test(st->cmds->fence(st, st_opts, "false_1_node1", "off", 1, 0),
                 "Fence false_1_node1", 1, 0);
 
-    /* we expect -19 here because fence_dummy now requires 'on' to be executed on target. */
+    /* Expect -ENODEV because fence_dummy requires 'on' to be executed on target */
     single_test(st->cmds->fence(st, st_opts, "false_1_node1", "on", 1, 0),
-                "Unfence false_1_node1", 1, -19);
+                "Unfence false_1_node1", 1, -ENODEV);
+
+    /* Confirm that targeting by attribute is rejected in standalone mode */
+    params = stonith_key_value_add(params, NULL, "test-id");
+    single_test(st->cmds->register_level(st, st_opts, "a=b", 1, params),
+                "Attempt to register a level by attribute", 0, -EINVAL);
+
+    /* Confirm that an invalid level index is rejected */
+    single_test(st->cmds->register_level(st, st_opts, "node1", 999, params),
+                "Attempt to register an invalid level index", 0, -EINVAL);
 
     single_test(st->cmds->remove_device(st, st_opts, "test-id"), "Remove test-id", 1, 0);
 
     stonith_key_value_freeall(params, 1, 1);
 }
 
 static void
 sanity_tests(void)
 {
     int rc = 0;
 
     rc = st->cmds->connect(st, crm_system_name, &pollfd.fd);
     crm_debug("Connect: %d", rc);
 
     st->cmds->register_notification(st, T_STONITH_NOTIFY_DISCONNECT, st_callback);
     st->cmds->register_notification(st, T_STONITH_NOTIFY_FENCE, st_callback);
     st->cmds->register_notification(st, STONITH_OP_DEVICE_ADD, st_callback);
     st->cmds->register_notification(st, STONITH_OP_DEVICE_DEL, st_callback);
     st->cmds->register_callback(st, 0, 120, st_opt_timeout_updates, NULL, "st_global_callback",
                                 st_global_callback);
 
     crm_info("Starting API Sanity Tests");
     run_standard_test();
     run_fence_failure_test();
     run_fence_failure_rollover_test();
     crm_info("Sanity Tests Passed");
 }
 
 static void
 standard_dev_test(void)
 {
     int rc = 0;
     char *tmp = NULL;
     stonith_key_value_t *params = NULL;
 
     rc = st->cmds->connect(st, crm_system_name, &pollfd.fd);
     crm_debug("Connect: %d", rc);
 
     params = stonith_key_value_add(params, "pcmk_host_map", "some-host=pcmk-7 true_1_node1=3,4");
 
     rc = st->cmds->register_device(st, st_opts, "test-id", "stonith-ng", "fence_xvm", params);
     crm_debug("Register: %d", rc);
 
     rc = st->cmds->list(st, st_opts, "test-id", &tmp, 10);
     crm_debug("List: %d output: %s\n", rc, tmp ? tmp : "<none>");
 
     rc = st->cmds->monitor(st, st_opts, "test-id", 10);
     crm_debug("Monitor: %d", rc);
 
     rc = st->cmds->status(st, st_opts, "test-id", "false_1_node2", 10);
     crm_debug("Status false_1_node2: %d", rc);
 
     rc = st->cmds->status(st, st_opts, "test-id", "false_1_node1", 10);
     crm_debug("Status false_1_node1: %d", rc);
 
     rc = st->cmds->fence(st, st_opts, "unknown-host", "off", 60, 0);
     crm_debug("Fence unknown-host: %d", rc);
 
     rc = st->cmds->status(st, st_opts, "test-id", "false_1_node1", 10);
     crm_debug("Status false_1_node1: %d", rc);
 
     rc = st->cmds->fence(st, st_opts, "false_1_node1", "off", 60, 0);
     crm_debug("Fence false_1_node1: %d", rc);
 
     rc = st->cmds->status(st, st_opts, "test-id", "false_1_node1", 10);
     crm_debug("Status false_1_node1: %d", rc);
 
     rc = st->cmds->fence(st, st_opts, "false_1_node1", "on", 10, 0);
     crm_debug("Unfence false_1_node1: %d", rc);
 
     rc = st->cmds->status(st, st_opts, "test-id", "false_1_node1", 10);
     crm_debug("Status false_1_node1: %d", rc);
 
     rc = st->cmds->fence(st, st_opts, "some-host", "off", 10, 0);
     crm_debug("Fence alias: %d", rc);
 
     rc = st->cmds->status(st, st_opts, "test-id", "some-host", 10);
     crm_debug("Status alias: %d", rc);
 
     rc = st->cmds->fence(st, st_opts, "false_1_node1", "on", 10, 0);
     crm_debug("Unfence false_1_node1: %d", rc);
 
     rc = st->cmds->remove_device(st, st_opts, "test-id");
     crm_debug("Remove test-id: %d", rc);
 
     stonith_key_value_freeall(params, 1, 1);
 }
 
 static void
  iterate_mainloop_tests(gboolean event_ready);
 
 static void
 mainloop_callback(stonith_t * stonith, stonith_callback_data_t * data)
 {
     callback_rc = data->rc;
     iterate_mainloop_tests(TRUE);
 }
 
 static int
 register_callback_helper(int callid)
 {
     return st->cmds->register_callback(st,
                                        callid,
                                        MAINLOOP_DEFAULT_TIMEOUT,
                                        st_opt_timeout_updates, NULL, "callback", mainloop_callback);
 }
 
 static void
 test_async_fence_pass(int check_event)
 {
     int rc = 0;
 
     if (check_event) {
         if (callback_rc != 0) {
             mainloop_test_done(FALSE);
         } else {
             mainloop_test_done(TRUE);
         }
         return;
     }
 
     rc = st->cmds->fence(st, 0, "true_1_node1", "off", MAINLOOP_DEFAULT_TIMEOUT, 0);
     if (rc < 0) {
         crm_err("fence failed with rc %d", rc);
         mainloop_test_done(FALSE);
     }
     register_callback_helper(rc);
     /* wait for event */
 }
 
 #define CUSTOM_TIMEOUT_ADDITION 10
 static void
 test_async_fence_custom_timeout(int check_event)
 {
     int rc = 0;
     static time_t begin = 0;
 
     if (check_event) {
         uint32_t diff = (time(NULL) - begin);
 
         if (callback_rc != -ETIME) {
             mainloop_test_done(FALSE);
         } else if (diff < CUSTOM_TIMEOUT_ADDITION + MAINLOOP_DEFAULT_TIMEOUT) {
             crm_err
                 ("Custom timeout test failed, callback expiration should be updated to %d, actual timeout was %d",
                  CUSTOM_TIMEOUT_ADDITION + MAINLOOP_DEFAULT_TIMEOUT, diff);
             mainloop_test_done(FALSE);
         } else {
             mainloop_test_done(TRUE);
         }
         return;
     }
     begin = time(NULL);
 
     rc = st->cmds->fence(st, 0, "custom_timeout_node1", "off", MAINLOOP_DEFAULT_TIMEOUT, 0);
     if (rc < 0) {
         crm_err("fence failed with rc %d", rc);
         mainloop_test_done(FALSE);
     }
     register_callback_helper(rc);
     /* wait for event */
 }
 
 static void
 test_async_fence_timeout(int check_event)
 {
     int rc = 0;
 
     if (check_event) {
         if (callback_rc != -ENODEV) {
             mainloop_test_done(FALSE);
         } else {
             mainloop_test_done(TRUE);
         }
         return;
     }
 
     rc = st->cmds->fence(st, 0, "false_1_node2", "off", MAINLOOP_DEFAULT_TIMEOUT, 0);
     if (rc < 0) {
         crm_err("fence failed with rc %d", rc);
         mainloop_test_done(FALSE);
     }
     register_callback_helper(rc);
     /* wait for event */
 }
 
 static void
 test_async_monitor(int check_event)
 {
     int rc = 0;
 
     if (check_event) {
         if (callback_rc) {
             mainloop_test_done(FALSE);
         } else {
             mainloop_test_done(TRUE);
         }
         return;
     }
 
     rc = st->cmds->monitor(st, 0, "false_1", MAINLOOP_DEFAULT_TIMEOUT);
     if (rc < 0) {
         crm_err("monitor failed with rc %d", rc);
         mainloop_test_done(FALSE);
     }
 
     register_callback_helper(rc);
     /* wait for event */
 }
 
 static void
 test_register_async_devices(int check_event)
 {
     char buf[16] = { 0, };
     stonith_key_value_t *params = NULL;
 
     params = stonith_key_value_add(params, "pcmk_host_map", "false_1_node1=1,2");
     params = stonith_key_value_add(params, "mode", "fail");
     st->cmds->register_device(st, st_opts, "false_1", "stonith-ng", "fence_dummy", params);
     stonith_key_value_freeall(params, 1, 1);
 
     params = NULL;
     params = stonith_key_value_add(params, "pcmk_host_map", "true_1_node1=1,2");
     params = stonith_key_value_add(params, "mode", "pass");
     st->cmds->register_device(st, st_opts, "true_1", "stonith-ng", "fence_dummy", params);
     stonith_key_value_freeall(params, 1, 1);
 
     params = NULL;
     params = stonith_key_value_add(params, "pcmk_host_map", "custom_timeout_node1=1,2");
     params = stonith_key_value_add(params, "mode", "fail");
     params = stonith_key_value_add(params, "delay", "1000");
     snprintf(buf, sizeof(buf) - 1, "%d", MAINLOOP_DEFAULT_TIMEOUT + CUSTOM_TIMEOUT_ADDITION);
     params = stonith_key_value_add(params, "pcmk_off_timeout", buf);
     st->cmds->register_device(st, st_opts, "false_custom_timeout", "stonith-ng", "fence_dummy",
                               params);
     stonith_key_value_freeall(params, 1, 1);
 
     mainloop_test_done(TRUE);
 }
 
 static void
 try_mainloop_connect(int check_event)
 {
     int tries = 10;
     int i = 0;
     int rc = 0;
 
     for (i = 0; i < tries; i++) {
         rc = st->cmds->connect(st, crm_system_name, NULL);
 
         if (!rc) {
             crm_info("stonith client connection established");
             mainloop_test_done(TRUE);
             return;
         } else {
             crm_info("stonith client connection failed");
         }
         sleep(1);
     }
 
     crm_err("API CONNECTION FAILURE\n");
     mainloop_test_done(FALSE);
 }
 
 static void
 iterate_mainloop_tests(gboolean event_ready)
 {
     static mainloop_test_iteration_cb callbacks[] = {
         try_mainloop_connect,
         test_register_async_devices,
         test_async_monitor,
         test_async_fence_pass,
         test_async_fence_timeout,
         test_async_fence_custom_timeout,
     };
 
     if (mainloop_iter == (sizeof(callbacks) / sizeof(mainloop_test_iteration_cb))) {
         /* all tests ran, everything passed */
         crm_info("ALL MAINLOOP TESTS PASSED!");
         crm_exit(pcmk_ok);
     }
 
     callbacks[mainloop_iter] (event_ready);
 }
 
 static gboolean
 trigger_iterate_mainloop_tests(gpointer user_data)
 {
     iterate_mainloop_tests(FALSE);
     return TRUE;
 }
 
 static void
 test_shutdown(int nsig)
 {
     int rc = 0;
 
     if (st) {
         rc = st->cmds->disconnect(st);
         crm_info("Disconnect: %d", rc);
 
         crm_debug("Destroy");
         stonith_api_delete(st);
     }
 
     if (rc) {
         crm_exit(pcmk_err_generic);
     }
 }
 
 static void
 mainloop_tests(void)
 {
     trig = mainloop_add_trigger(G_PRIORITY_HIGH, trigger_iterate_mainloop_tests, NULL);
     mainloop_set_trigger(trig);
     mainloop_add_signal(SIGTERM, test_shutdown);
 
     crm_info("Starting");
     mainloop = g_main_new(FALSE);
     g_main_run(mainloop);
 }
 
 int
 main(int argc, char **argv)
 {
     int argerr = 0;
     int flag;
     int option_index = 0;
 
     enum test_modes mode = test_standard;
 
     crm_set_options(NULL, "mode [options]", long_options,
                     "Provides a summary of cluster's current state."
                     "\n\nOutputs varying levels of detail in a number of different formats.\n");
 
     while (1) {
         flag = crm_get_option(argc, argv, &option_index);
         if (flag == -1) {
             break;
         }
 
         switch (flag) {
             case 'V':
                 verbose = 1;
                 break;
             case '$':
             case '?':
                 crm_help(flag, EX_OK);
                 break;
             case 'p':
                 mode = test_passive;
                 break;
             case 't':
                 mode = test_api_sanity;
                 break;
             case 'm':
                 mode = test_api_mainloop;
                 break;
             default:
                 ++argerr;
                 break;
         }
     }
 
     crm_log_init("stonith-test", LOG_INFO, TRUE, verbose ? TRUE : FALSE, argc, argv, FALSE);
 
     if (optind > argc) {
         ++argerr;
     }
 
     if (argerr) {
         crm_help('?', EX_USAGE);
     }
 
     crm_debug("Create");
     st = stonith_api_new();
 
     switch (mode) {
         case test_standard:
             standard_dev_test();
             break;
         case test_passive:
             passive_test();
             break;
         case test_api_sanity:
             sanity_tests();
             break;
         case test_api_mainloop:
             mainloop_tests();
             break;
     }
 
     test_shutdown(0);
     return 0;
 }