diff --git a/BasicSanity.sh b/BasicSanity.sh
index 6ae3e22acd..2485953ea3 100755
--- a/BasicSanity.sh
+++ b/BasicSanity.sh
@@ -1,79 +1,86 @@
 #!/bin/bash
 
 test_home=`dirname $0`
 valgrind=""
 verbose=""
 tests=""
 
 if [ "$test_home" = "." ]; then
     test_home="$PWD"
 fi
 
 function info() {
     printf "$*\n"
 }
 
 function error() {
     printf "      * ERROR:   $*\n"
 }
 
 info "Test home is:\t$test_home"
 
 while true ; do
     case "$1" in
-	all) tests="pengine lrmd fencing cli"; shift;;
-	pengine|lrmd|fencing|cli) tests="$tests $1"; shift;;
-	-V|--verbose) verbose="-V"; shift;;
-	-v|--valgrind) valgrind="-v"; shift;;
-	--) shift ; break ;;
-	"") break;;
-	*) echo "unknown option: $1"; exit 1;;
+        all) tests="pengine lrmd fencing cli"; shift;;
+        pengine|lrmd|pacemaker_remote|fencing|cli) tests="$tests $1"; shift;;
+        -V|--verbose) verbose="-V"; shift;;
+        -v|--valgrind) valgrind="-v"; shift;;
+        --) shift ; break ;;
+        "") break;;
+        *) echo "unknown option: $1"; exit 1;;
     esac
 done
 
 if [ -z "$tests" ]; then
     tests="pengine lrmd cli"
 fi
 
 failed=""
 for t in $tests; do
     info "Executing the $t regression tests"
     info "============================================================"
     if [ -e $test_home/$t/regression.py ]; then
-	# Fencing, lrmd need root access
-	chmod a+x $test_home/$t/regression.py
-	echo "Enter the root password..."
-	su root -c "$test_home/$t/regression.py $verbose"
-	rc=$?
+        # Fencing, lrmd need root access
+        chmod a+x $test_home/$t/regression.py
+        echo "Enter the root password..."
+        su root -c "$test_home/$t/regression.py $verbose"
+        rc=$?
+
+    elif [ $t == "pacemaker_remote" ] && [ -e $test_home/lrmd/regression.py ]; then
+        # pacemaker_remote
+        chmod a+x $test_home/lrmd/regression.py
+        echo "Enter the root password..."
+        su root -c "$test_home/lrmd/regression.py -R $verbose"
+        rc=$?
 
     elif [ -e $test_home/$t ]; then
-	# pengine, cli
-	$test_home/$t/regression.sh $verbose $valgrind
-	rc=$?
+        # pengine, cli
+        $test_home/$t/regression.sh $verbose $valgrind
+        rc=$?
 
     elif [ $t = cli -a -e $test_home/tools ]; then
-	# Running cli tests from the source tree
-	$test_home/tools/regression.sh $verbose $valgrind
-	rc=$?
+        # Running cli tests from the source tree
+        $test_home/tools/regression.sh $verbose $valgrind
+        rc=$?
 
     else
-	error "Cannot find $t test in $test_home"
-	rc=1
+        error "Cannot find $t test in $test_home"
+        rc=1
     fi
 
     if [ $rc != 0 ]; then
-	info "$t regression tests failed: $rc"
-	failed="$failed $t"
+        info "$t regression tests failed: $rc"
+        failed="$failed $t"
     fi
 
     info "============================================================"
     info ""
     info ""
 done
 
 if [ -z "$failed" ]; then
     exit 0
 fi
 
 error "regression tests for $failed failed"
 exit 1
diff --git a/fencing/regression.py.in b/fencing/regression.py.in
index 851ae17a74..6b203a288d 100644
--- a/fencing/regression.py.in
+++ b/fencing/regression.py.in
@@ -1,1029 +1,1029 @@
 #!/usr/bin/python
 
 #
 # This program is free software; you can redistribute it and/or
 # modify it under the terms of the GNU General Public License
 # as published by the Free Software Foundation; either version 2
 # of the License, or (at your option) any later version.
 #
 # This program is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 # GNU General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License
 # along with this program; if not, write to the Free Software
 # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA.
 
 
 import os
 import sys
 import subprocess
 import shlex
 import time
 
 def output_from_command(command):
 	test = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
 	test.wait()
 
 	return test.communicate()[0].split("\n")
 
 class Test:
 	def __init__(self, name, description, verbose = 0, with_cpg = 0):
 		self.name = name
 		self.description = description
 		self.cmds = []
 		self.verbose = verbose
 
 		self.result_txt = ""
 		self.cmd_tool_output = ""
 		self.result_exitcode = 0;
 
 		self.stonith_options = "-s"
 		self.enable_corosync = 0
 
 		if with_cpg:
 			self.stonith_options = "-c"
 			self.enable_corosync = 1
 
 		self.stonith_process = None
 		self.stonith_output = ""
 		self.stonith_patterns = []
 		self.negative_stonith_patterns = []
 
 		self.executed = 0
 
 		rsc_classes = output_from_command("crm_resource --list-standards")
 
 	def __new_cmd(self, cmd, args, exitcode, stdout_match = "", no_wait = 0, stdout_negative_match = "", kill=None):
 		self.cmds.append(
 			{
 				"cmd" : cmd,
 				"kill" : kill,
 				"args" : args,
 				"expected_exitcode" : exitcode,
 				"stdout_match" : stdout_match,
 				"stdout_negative_match" : stdout_negative_match,
 				"no_wait" : no_wait,
 			}
 		)
 
 	def stop_pacemaker(self):
 		cmd = shlex.split("killall -9 -q pacemakerd")
 		test = subprocess.Popen(cmd, stdout=subprocess.PIPE)
 		test.wait()
 
 	def start_environment(self):
 		### make sure we are in full control here ###
 		self.stop_pacemaker()
 
 		cmd = shlex.split("killall -9 -q stonithd")
 		test = subprocess.Popen(cmd, stdout=subprocess.PIPE)
 		test.wait()
 
 		if self.verbose:
 			print "Starting stonithd with %s" % self.stonith_options
 
 		self.stonith_process = subprocess.Popen(
 			shlex.split("@CRM_DAEMON_DIR@/stonithd %s -V" % self.stonith_options),
 			stdout=subprocess.PIPE,
 			stderr=subprocess.PIPE)
 
 		time.sleep(1)
 
 	def clean_environment(self):
 		if self.stonith_process:
 			self.stonith_process.terminate()
 
 		self.stonith_output = self.stonith_process.communicate()[1]
 		self.stonith_process = None
 
 		if self.verbose:
 			print self.stonith_output
 
 	def add_stonith_log_pattern(self, pattern):
 		self.stonith_patterns.append(pattern)
 
 	def add_stonith_negative_log_pattern(self, pattern):
 		self.negative_stonith_patterns.append(pattern)
 
 	def add_cmd(self, cmd, args):
 		self.__new_cmd(cmd, args, 0, "")
 
 	def add_cmd_no_wait(self, cmd, args):
 		self.__new_cmd(cmd, args, 0, "", 1)
 
 	def add_cmd_check_stdout(self, cmd, args, match, no_match = ""):
 		self.__new_cmd(cmd, args, 0, match, 0, no_match)
 
 	def add_expected_fail_cmd(self, cmd, args, exitcode = 255):
 		self.__new_cmd(cmd, args, exitcode, "")
 
 	def get_exitcode(self):
 		return self.result_exitcode
 
 	def print_result(self, filler):
 		print "%s%s" % (filler, self.result_txt)
 
 	def run_cmd(self, args):
 		cmd = shlex.split(args['args'])
 		cmd.insert(0, args['cmd'])
 
 		if self.verbose:
 			print "\n\nRunning: "+" ".join(cmd)
 		test = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
 
 		if args['kill']:
 			if self.verbose:
 				print "Also running: "+args['kill']
 			subprocess.Popen(shlex.split(args['kill']))
 
 		if args['no_wait'] == 0:
 			test.wait()
 		else:
 			return 0
 
 		output_res = test.communicate()
 		output = output_res[0] + output_res[1]
 
 		if self.verbose:
 			print output
 
 		if args['stdout_match'] != "" and output.count(args['stdout_match']) == 0:
 			test.returncode = -2
 			print "STDOUT string '%s' was not found in cmd output: %s" % (args['stdout_match'], output)
 
 		if args['stdout_negative_match'] != "" and output.count(args['stdout_negative_match']) != 0:
 			test.returncode = -2
 			print "STDOUT string '%s' was found in cmd output: %s" % (args['stdout_negative_match'], output)
 
 		return test.returncode;
 
 
 	def count_negative_matches(self, outline):
 		count = 0
 		for line in self.negative_stonith_patterns:
 			if outline.count(line):
 				count = 1
 				if self.verbose:
 					print "This pattern should not have matched = '%s" % (line)
 		return count
 
 	def match_stonith_patterns(self):
 		negative_matches = 0
 		cur = 0
 		pats = self.stonith_patterns
 		total_patterns = len(self.stonith_patterns)
 
 		if len(self.stonith_patterns) == 0:
 			return
 
 		for line in self.stonith_output.split("\n"):
 			negative_matches = negative_matches + self.count_negative_matches(line)
 			if len(pats) == 0:
 				continue
 			cur = -1
 			for p in pats:
 				cur = cur + 1
 				if line.count(pats[cur]):
 					del pats[cur]
 					break
 
 		if len(pats) > 0 or negative_matches:
 			if self.verbose:
 				for p in pats:
 					print "Pattern Not Matched = '%s'" % p
 
 			self.result_txt = "FAILURE - '%s' failed. %d patterns out of %d not matched. %d negative matches." % (self.name, len(pats), total_patterns, negative_matches)
 			self.result_exitcode = -1
 
 	def run(self):
 		res = 0
 		i = 1
 		self.start_environment()
 
 		if self.verbose:
 			print "\n--- START TEST - %s" % self.name
 
 		self.result_txt = "SUCCESS - '%s'" % (self.name)
 		self.result_exitcode = 0
 		for cmd in self.cmds:
 			res = self.run_cmd(cmd)
 			if res != cmd['expected_exitcode']:
 				print "Step %d FAILED - command returned %d, expected %d" % (i, res, cmd['expected_exitcode'])
 				self.result_txt = "FAILURE - '%s' failed at step %d. Command: %s %s" % (self.name, i, cmd['cmd'], cmd['args'])
 				self.result_exitcode = -1
 				break
 			else:
 				if self.verbose:
 					print "Step %d SUCCESS" % (i)
 			i = i + 1
 		self.clean_environment()
 
 		if self.result_exitcode == 0:
 			self.match_stonith_patterns()
 
 		print self.result_txt
 		if self.verbose:
 			print "--- END TEST - %s\n" % self.name
 
 		self.executed = 1
 		return res
 
 class Tests:
 	def __init__(self, verbose = 0):
 		self.tests = []
 		self.verbose = verbose
 		self.autogen_corosync_cfg = 0
 		if not os.path.exists("/etc/corosync/corosync.conf"):
 			self.autogen_corosync_cfg = 1
 
 	def new_test(self, name, description, with_cpg = 0):
 		test = Test(name, description, self.verbose, with_cpg)
 		self.tests.append(test)
 		return test
 
 	def print_list(self):
 		print "\n==== %d TESTS FOUND ====" % (len(self.tests))
 		print "%35s - %s" % ("TEST NAME", "TEST DESCRIPTION")
 		print "%35s - %s" % ("--------------------", "--------------------")
 		for test in self.tests:
 			print "%35s - %s" % (test.name, test.description)
 		print "==== END OF LIST ====\n"
 
 
 	def start_corosync(self):
 		if self.verbose:
 			print "Starting corosync"
 
 		test = subprocess.Popen("corosync", stdout=subprocess.PIPE)
 		test.wait()
 		time.sleep(10)
 
 	def stop_corosync(self):
 		cmd = shlex.split("killall -9 -q corosync")
 		test = subprocess.Popen(cmd, stdout=subprocess.PIPE)
 		test.wait()
 
 	def run_single(self, name):
 		for test in self.tests:
 			if test.name == name:
 				test.run()
 				break;
 
 	def run_tests_matching(self, pattern):
 		for test in self.tests:
 			if test.name.count(pattern) != 0:
 				test.run()
 
 	def run_cpg_only(self):
 		for test in self.tests:
 			if test.enable_corosync:
 				test.run()
 
 	def run_no_cpg(self):
 		for test in self.tests:
 			if not test.enable_corosync:
 				test.run()
 
 	def run_tests(self):
 		for test in self.tests:
 			test.run()
 
 	def exit(self):
 		for test in self.tests:
 			if test.executed == 0:
 				continue
 
 			if test.get_exitcode() != 0:
 				sys.exit(-1)
 
 		sys.exit(0)
 
 	def print_results(self):
 		failures = 0;
 		success = 0;
 		print "\n\n======= FINAL RESULTS =========="
 		print "\n--- FAILURE RESULTS:"
 		for test in self.tests:
 			if test.executed == 0:
 				continue
 
 			if test.get_exitcode() != 0:
 				failures = failures + 1
 				test.print_result("    ")
 			else:
 				success = success + 1
 
 		if failures == 0:
 			print "    None"
 
 		print "\n--- TOTALS\n    Pass:%d\n    Fail:%d\n" % (success, failures)
 	def build_api_sanity_tests(self):
 		verbose_arg = ""
 		if self.verbose:
 			verbose_arg = "-V"
 
 		test = self.new_test("standalone_low_level_api_test", "Sanity test client api in standalone mode.")
 		test.add_cmd("@CRM_DAEMON_DIR@/stonith-test", "-t %s" % (verbose_arg))
 
 		test = self.new_test("cpg_low_level_api_test", "Sanity test client api using mainloop and cpg.", 1)
 		test.add_cmd("@CRM_DAEMON_DIR@/stonith-test", "-m %s" % (verbose_arg))
 
 	def build_custom_timeout_tests(self):
 		# custom timeout without topology
 		test = self.new_test("cpg_custom_timeout_1",
 				"Verify per device timeouts work as expected without using topology.", 1)
 		test.add_cmd("stonith_admin", "-R false1 -a fence_false -o \"pcmk_host_list=node1 node2 node3\"")
 		test.add_cmd("stonith_admin", "-R true1  -a fence_true -o \"pcmk_host_list=node3\" -o \"pcmk_off_timeout=1\"")
 		test.add_cmd("stonith_admin", "-R false2 -a fence_false -o \"pcmk_host_list=node3\" -o \"pcmk_off_timeout=4\"")
 		test.add_cmd("stonith_admin", "-F node3 -t 2")
 		# timeout is 2+1+4 = 7
 		test.add_stonith_log_pattern("remote op timeout set to 7")
 
 		# custom timeout _WITH_ topology
 		test = self.new_test("cpg_custom_timeout_2",
 				"Verify per device timeouts work as expected _WITH_ topology.", 1)
 		test.add_cmd("stonith_admin", "-R false1 -a fence_false -o \"pcmk_host_list=node1 node2 node3\"")
 		test.add_cmd("stonith_admin", "-R true1  -a fence_true -o \"pcmk_host_list=node3\" -o \"pcmk_off_timeout=1\"")
 		test.add_cmd("stonith_admin", "-R false2 -a fence_false -o \"pcmk_host_list=node3\" -o \"pcmk_off_timeout=4000\"")
 		test.add_cmd("stonith_admin", "-r node3 -i 1 -v false1")
 		test.add_cmd("stonith_admin", "-r node3 -i 2 -v true1")
 		test.add_cmd("stonith_admin", "-r node3 -i 3 -v false2")
 		test.add_cmd("stonith_admin", "-F node3 -t 2")
 		# timeout is 2+1+4000 = 4003
 		test.add_stonith_log_pattern("remote op timeout set to 4003")
 
 	def build_fence_merge_tests(self):
 
 		### Simple test that overlapping fencing operations get merged
 		test = self.new_test("cpg_custom_merge_single",
 				"Verify overlapping identical fencing operations are merged, no fencing levels used.", 1)
 		test.add_cmd("stonith_admin", "-R false1 -a fence_false -o \"pcmk_host_list=node3\"")
 		test.add_cmd("stonith_admin", "-R true1  -a fence_true -o \"pcmk_host_list=node3\" ")
 		test.add_cmd("stonith_admin", "-R false2 -a fence_false -o \"pcmk_host_list=node3\"")
 		test.add_cmd_no_wait("stonith_admin", "-F node3 -t 10")
 		test.add_cmd("stonith_admin", "-F node3 -t 10")
 		### one merger will happen
 		test.add_stonith_log_pattern("Merging stonith action off for node node3 originating from client")
 		### the pattern below signifies that both the original and duplicate operation completed
 		test.add_stonith_log_pattern("Operation off of node3 by")
 		test.add_stonith_log_pattern("Operation off of node3 by")
 
 		### Test that multiple mergers occur
 		test = self.new_test("cpg_custom_merge_multiple",
 				"Verify multiple overlapping identical fencing operations are merged", 1)
 		test.add_cmd("stonith_admin", "-R false1 -a fence_false -o \"pcmk_host_list=node3\"")
 		test.add_cmd("stonith_admin", "-R true1  -a fence_true -o \"pcmk_host_list=node3\" ")
 		test.add_cmd("stonith_admin", "-R false2 -a fence_false -o \"pcmk_host_list=node3\"")
 		test.add_cmd_no_wait("stonith_admin", "-F node3 -t 10")
 		test.add_cmd_no_wait("stonith_admin", "-F node3 -t 10")
 		test.add_cmd_no_wait("stonith_admin", "-F node3 -t 10")
 		test.add_cmd_no_wait("stonith_admin", "-F node3 -t 10")
 		test.add_cmd("stonith_admin", "-F node3 -t 10")
 		### 4 mergers should occur
 		test.add_stonith_log_pattern("Merging stonith action off for node node3 originating from client")
 		test.add_stonith_log_pattern("Merging stonith action off for node node3 originating from client")
 		test.add_stonith_log_pattern("Merging stonith action off for node node3 originating from client")
 		test.add_stonith_log_pattern("Merging stonith action off for node node3 originating from client")
 		### the pattern below signifies that both the original and duplicate operation completed
 		test.add_stonith_log_pattern("Operation off of node3 by")
 		test.add_stonith_log_pattern("Operation off of node3 by")
 		test.add_stonith_log_pattern("Operation off of node3 by")
 		test.add_stonith_log_pattern("Operation off of node3 by")
 		test.add_stonith_log_pattern("Operation off of node3 by")
 
 		### Test that multiple mergers occur with topologies used
 		test = self.new_test("cpg_custom_merge_with_topology",
 				"Verify multiple overlapping identical fencing operations are merged with fencing levels.", 1)
 		test.add_cmd("stonith_admin", "-R false1 -a fence_false -o \"pcmk_host_list=node3\"")
 		test.add_cmd("stonith_admin", "-R true1  -a fence_true -o \"pcmk_host_list=node3\" ")
 		test.add_cmd("stonith_admin", "-R false2 -a fence_false -o \"pcmk_host_list=node3\"")
 		test.add_cmd("stonith_admin", "-r node3 -i 1 -v false1")
 		test.add_cmd("stonith_admin", "-r node3 -i 1 -v false2")
 		test.add_cmd("stonith_admin", "-r node3 -i 2 -v true1")
 		test.add_cmd_no_wait("stonith_admin", "-F node3 -t 10")
 		test.add_cmd_no_wait("stonith_admin", "-F node3 -t 10")
 		test.add_cmd_no_wait("stonith_admin", "-F node3 -t 10")
 		test.add_cmd_no_wait("stonith_admin", "-F node3 -t 10")
 		test.add_cmd("stonith_admin", "-F node3 -t 10")
 		### 4 mergers should occur
 		test.add_stonith_log_pattern("Merging stonith action off for node node3 originating from client")
 		test.add_stonith_log_pattern("Merging stonith action off for node node3 originating from client")
 		test.add_stonith_log_pattern("Merging stonith action off for node node3 originating from client")
 		test.add_stonith_log_pattern("Merging stonith action off for node node3 originating from client")
 		### the pattern below signifies that both the original and duplicate operation completed
 		test.add_stonith_log_pattern("Operation off of node3 by")
 		test.add_stonith_log_pattern("Operation off of node3 by")
 		test.add_stonith_log_pattern("Operation off of node3 by")
 		test.add_stonith_log_pattern("Operation off of node3 by")
 		test.add_stonith_log_pattern("Operation off of node3 by")
 
 
 		test = self.new_test("cpg_custom_no_merge",
 				"Verify differing fencing operations are not merged", 1)
 		test.add_cmd("stonith_admin", "-R false1 -a fence_false -o \"pcmk_host_list=node3 node2\"")
 		test.add_cmd("stonith_admin", "-R true1  -a fence_true -o \"pcmk_host_list=node3 node2\" ")
 		test.add_cmd("stonith_admin", "-R false2 -a fence_false -o \"pcmk_host_list=node3 node2\"")
 		test.add_cmd("stonith_admin", "-r node3 -i 1 -v false1")
 		test.add_cmd("stonith_admin", "-r node3 -i 1 -v false2")
 		test.add_cmd("stonith_admin", "-r node3 -i 2 -v true1")
 		test.add_cmd_no_wait("stonith_admin", "-F node2 -t 10")
 		test.add_cmd("stonith_admin", "-F node3 -t 10")
 		test.add_stonith_negative_log_pattern("Merging stonith action off for node node3 originating from client")
 
 	def build_standalone_tests(self):
 		test_types = [
 			{
 				"prefix" : "standalone" ,
 				"use_cpg" : 0,
 			},
 			{
 				"prefix" : "cpg" ,
 				"use_cpg" : 1,
 			},
 		]
 
 		# test what happens when all devices timeout
 		for test_type in test_types:
 			test = self.new_test("%s_fence_multi_device_failure" % test_type["prefix"],
 					"Verify that all devices timeout, a fencing failure is returned.", test_type["use_cpg"])
 			test.add_cmd("stonith_admin", "-R false1 -a fence_false -o \"pcmk_host_list=node1 node2 node3\"")
 			test.add_cmd("stonith_admin", "-R false2  -a fence_false -o \"pcmk_host_list=node1 node2 node3\"")
 			test.add_cmd("stonith_admin", "-R false3 -a fence_false -o \"pcmk_host_list=node1 node2 node3\"")
 			test.add_expected_fail_cmd("stonith_admin", "-F node3 -t 2", 194)
 
 			if test_type["use_cpg"] == 1:
 				test.add_stonith_log_pattern("remote op timeout set to 6")
 
 			test.add_stonith_log_pattern("for host 'node3' with device 'false1' returned: -62")
 			test.add_stonith_log_pattern("for host 'node3' with device 'false2' returned: -62")
 			test.add_stonith_log_pattern("for host 'node3' with device 'false3' returned: -62")
 
 		# test what happens when multiple devices can fence a node, but the first device fails.
 		for test_type in test_types:
 			test = self.new_test("%s_fence_device_failure_rollover" % test_type["prefix"],
 					"Verify that when one fence device fails for a node, the others are tried.", test_type["use_cpg"])
 			test.add_cmd("stonith_admin", "-R false1 -a fence_false -o \"pcmk_host_list=node1 node2 node3\"")
 			test.add_cmd("stonith_admin", "-R true1  -a fence_true -o \"pcmk_host_list=node1 node2 node3\"")
 			test.add_cmd("stonith_admin", "-R false2 -a fence_false -o \"pcmk_host_list=node1 node2 node3\"")
 			test.add_cmd("stonith_admin", "-F node3 -t 2")
 
 			if test_type["use_cpg"] == 1:
 				test.add_stonith_log_pattern("remote op timeout set to 6")
 
 		# simple topology test for one device
 		for test_type in test_types:
 			if test_type["use_cpg"] == 0:
 				continue
 
 			test = self.new_test("%s_topology_simple" % test_type["prefix"],
 					"Verify all fencing devices at a level are used.", test_type["use_cpg"])
 			test.add_cmd("stonith_admin", "-R true  -a fence_true -o \"pcmk_host_list=node1 node2 node3\"")
 
 			test.add_cmd("stonith_admin", "-r node3 -i 1 -v true")
 			test.add_cmd("stonith_admin", "-F node3 -t 2")
 
 			test.add_stonith_log_pattern("remote op timeout set to 2")
 			test.add_stonith_log_pattern("for host 'node3' with device 'true' returned: 0")
 
 		# test what happens when the first fencing level has multiple devices.
 		for test_type in test_types:
 			if test_type["use_cpg"] == 0:
 				continue
 
 			test = self.new_test("%s_topology_device_fails" % test_type["prefix"],
 					"Verify if one device in a level fails, the other is tried.", test_type["use_cpg"])
 			test.add_cmd("stonith_admin", "-R false  -a fence_false -o \"pcmk_host_list=node1 node2 node3\"")
 			test.add_cmd("stonith_admin", "-R true  -a fence_true -o \"pcmk_host_list=node1 node2 node3\"")
 
 			test.add_cmd("stonith_admin", "-r node3 -i 1 -v false")
 			test.add_cmd("stonith_admin", "-r node3 -i 2 -v true")
 			test.add_cmd("stonith_admin", "-F node3 -t 20")
 
 			test.add_stonith_log_pattern("remote op timeout set to 4")
 			test.add_stonith_log_pattern("for host 'node3' with device 'false' returned: -62")
 			test.add_stonith_log_pattern("for host 'node3' with device 'true' returned: 0")
 
 		# test what happens when the first fencing level fails.
 		for test_type in test_types:
 			if test_type["use_cpg"] == 0:
 				continue
 
 			test = self.new_test("%s_topology_multi_level_fails" % test_type["prefix"],
 					"Verify if one level fails, the next leve is tried.", test_type["use_cpg"])
 			test.add_cmd("stonith_admin", "-R true1  -a fence_true -o \"pcmk_host_list=node1 node2 node3\"")
 			test.add_cmd("stonith_admin", "-R true2  -a fence_true -o \"pcmk_host_list=node1 node2 node3\"")
 			test.add_cmd("stonith_admin", "-R true3  -a fence_true -o \"pcmk_host_list=node1 node2 node3\"")
 			test.add_cmd("stonith_admin", "-R true4  -a fence_true -o \"pcmk_host_list=node1 node2 node3\"")
 			test.add_cmd("stonith_admin", "-R false1 -a fence_false -o \"pcmk_host_list=node1 node2 node3\"")
 			test.add_cmd("stonith_admin", "-R false2 -a fence_false -o \"pcmk_host_list=node1 node2 node3\"")
 
 			test.add_cmd("stonith_admin", "-r node3 -i 1 -v false1")
 			test.add_cmd("stonith_admin", "-r node3 -i 1 -v true1")
 			test.add_cmd("stonith_admin", "-r node3 -i 2 -v true2")
 			test.add_cmd("stonith_admin", "-r node3 -i 2 -v false2")
 			test.add_cmd("stonith_admin", "-r node3 -i 3 -v true3")
 			test.add_cmd("stonith_admin", "-r node3 -i 3 -v true4")
 
 			test.add_cmd("stonith_admin", "-F node3 -t 2")
 
 			test.add_stonith_log_pattern("remote op timeout set to 12")
 			test.add_stonith_log_pattern("for host 'node3' with device 'false1' returned: -62")
 			test.add_stonith_log_pattern("for host 'node3' with device 'false2' returned: -62")
 			test.add_stonith_log_pattern("for host 'node3' with device 'true3' returned: 0")
 			test.add_stonith_log_pattern("for host 'node3' with device 'true4' returned: 0")
 
 
 		# test what happens when the first fencing level had devices that no one has registered
 		for test_type in test_types:
 			if test_type["use_cpg"] == 0:
 				continue
 
 			test = self.new_test("%s_topology_missing_devices" % test_type["prefix"],
 					"Verify topology can continue with missing devices.", test_type["use_cpg"])
 			test.add_cmd("stonith_admin", "-R true2  -a fence_true -o \"pcmk_host_list=node1 node2 node3\"")
 			test.add_cmd("stonith_admin", "-R true3  -a fence_true -o \"pcmk_host_list=node1 node2 node3\"")
 			test.add_cmd("stonith_admin", "-R true4  -a fence_true -o \"pcmk_host_list=node1 node2 node3\"")
 			test.add_cmd("stonith_admin", "-R false2 -a fence_false -o \"pcmk_host_list=node1 node2 node3\"")
 
 			test.add_cmd("stonith_admin", "-r node3 -i 1 -v false1")
 			test.add_cmd("stonith_admin", "-r node3 -i 1 -v true1")
 			test.add_cmd("stonith_admin", "-r node3 -i 2 -v true2")
 			test.add_cmd("stonith_admin", "-r node3 -i 2 -v false2")
 			test.add_cmd("stonith_admin", "-r node3 -i 3 -v true3")
 			test.add_cmd("stonith_admin", "-r node3 -i 3 -v true4")
 
 			test.add_cmd("stonith_admin", "-F node3 -t 2")
 
 		# Test what happens if multiple fencing levels are defined, and then the first one is removed.
 		for test_type in test_types:
 			if test_type["use_cpg"] == 0:
 				continue
 
 			test = self.new_test("%s_topology_level_removal" % test_type["prefix"],
 					"Verify level removal works.", test_type["use_cpg"])
 			test.add_cmd("stonith_admin", "-R true1  -a fence_true -o \"pcmk_host_list=node1 node2 node3\"")
 			test.add_cmd("stonith_admin", "-R true2  -a fence_true -o \"pcmk_host_list=node1 node2 node3\"")
 			test.add_cmd("stonith_admin", "-R true3  -a fence_true -o \"pcmk_host_list=node1 node2 node3\"")
 			test.add_cmd("stonith_admin", "-R true4  -a fence_true -o \"pcmk_host_list=node1 node2 node3\"")
 			test.add_cmd("stonith_admin", "-R false1 -a fence_false -o \"pcmk_host_list=node1 node2 node3\"")
 			test.add_cmd("stonith_admin", "-R false2 -a fence_false -o \"pcmk_host_list=node1 node2 node3\"")
 
 			test.add_cmd("stonith_admin", "-r node3 -i 1 -v false1")
 			test.add_cmd("stonith_admin", "-r node3 -i 1 -v true1")
 
 			test.add_cmd("stonith_admin", "-r node3 -i 2 -v true2")
 			test.add_cmd("stonith_admin", "-r node3 -i 2 -v false2")
 
 			test.add_cmd("stonith_admin", "-r node3 -i 3 -v true3")
 			test.add_cmd("stonith_admin", "-r node3 -i 3 -v true4")
 
 			# Now remove level 2, verify none of the devices in level two are hit.
 			test.add_cmd("stonith_admin", "-d node3 -i 2")
 
 			test.add_cmd("stonith_admin", "-F node3 -t 20")
 
 			test.add_stonith_log_pattern("remote op timeout set to 8")
 			test.add_stonith_log_pattern("for host 'node3' with device 'false1' returned: -62")
 			test.add_stonith_negative_log_pattern("for host 'node3' with device 'false2' returned: -62")
 			test.add_stonith_negative_log_pattern("for host 'node3' with device 'false2' returned: -1001")
 			test.add_stonith_log_pattern("for host 'node3' with device 'true3' returned: 0")
 			test.add_stonith_log_pattern("for host 'node3' with device 'true4' returned: 0")
 
 		# test the stonith builds the correct list of devices that can fence a node.
 		for test_type in test_types:
 			test = self.new_test("%s_list_devices" % test_type["prefix"],
 					"Verify list of devices that can fence a node is correct", test_type["use_cpg"])
 			test.add_cmd("stonith_admin", "-R true1  -a fence_true -o \"pcmk_host_list=node3\"")
 			test.add_cmd("stonith_admin", "-R true2 -a fence_true -o \"pcmk_host_list=node1 node2 node3\"")
 			test.add_cmd("stonith_admin", "-R true3 -a fence_true -o \"pcmk_host_list=node1 node2 node3\"")
 
 			test.add_cmd_check_stdout("stonith_admin", "-l node1 -V", "true2", "true1")
 			test.add_cmd_check_stdout("stonith_admin", "-l node1 -V", "true3", "true1")
 
 		# simple test of device monitor
 		for test_type in test_types:
 			test = self.new_test("%s_monitor" % test_type["prefix"],
 					"Verify device is reachable", test_type["use_cpg"])
 			test.add_cmd("stonith_admin", "-R true1  -a fence_true -o \"pcmk_host_list=node3\"")
 			test.add_cmd("stonith_admin", "-R false1  -a fence_false -o \"pcmk_host_list=node3\"")
 
 			test.add_cmd("stonith_admin", "-Q true1")
 			test.add_cmd("stonith_admin", "-Q false1")
 			test.add_expected_fail_cmd("stonith_admin", "-Q true2", 237)
 
 		# Verify monitor occurs for duration of timeout period on failure
 		for test_type in test_types:
 			test = self.new_test("%s_monitor_timeout" % test_type["prefix"],
 					"Verify monitor uses duration of timeout period given.", test_type["use_cpg"])
 			test.add_cmd("stonith_admin", "-R true1  -a fence_dummy_monitor_fail -o \"pcmk_host_list=node3\"")
-			test.add_expected_fail_cmd("stonith_admin", "-Q true1 -t 5", 23)
+			test.add_expected_fail_cmd("stonith_admin", "-Q true1 -t 5", 195)
 			test.add_stonith_log_pattern("Attempt 2 to execute")
 
 		# Verify monitor occurs for duration of timeout period on failure, but stops at max retries
 		for test_type in test_types:
 			test = self.new_test("%s_monitor_timeout_max_retries" % test_type["prefix"],
 					"Verify monitor retries until max retry value or timeout is hit.", test_type["use_cpg"])
 			test.add_cmd("stonith_admin", "-R true1  -a fence_dummy_monitor_fail -o \"pcmk_host_list=node3\"")
-			test.add_expected_fail_cmd("stonith_admin", "-Q true1 -t 15", 23)
+			test.add_expected_fail_cmd("stonith_admin", "-Q true1 -t 15",195)
 			test.add_stonith_log_pattern("Attempted to execute agent fence_dummy_monitor_fail (list) the maximum number of times")
 
 		# simple register test
 		for test_type in test_types:
 			test = self.new_test("%s_register" % test_type["prefix"],
 					"Verify devices can be registered and un-registered", test_type["use_cpg"])
 			test.add_cmd("stonith_admin", "-R true1  -a fence_true -o \"pcmk_host_list=node3\"")
 
 			test.add_cmd("stonith_admin", "-Q true1")
 
 			test.add_cmd("stonith_admin", "-D true1")
 
 			test.add_expected_fail_cmd("stonith_admin", "-Q true1", 237)
 
 
 		# simple reboot test
 		for test_type in test_types:
 			test = self.new_test("%s_reboot" % test_type["prefix"],
 					"Verify devices can be rebooted", test_type["use_cpg"])
 			test.add_cmd("stonith_admin", "-R true1  -a fence_true -o \"pcmk_host_list=node3\"")
 
 			test.add_cmd("stonith_admin", "-B node3 -t 2")
 
 			test.add_cmd("stonith_admin", "-D true1")
 
 			test.add_expected_fail_cmd("stonith_admin", "-Q true1", 237)
 
 		# test fencing history.
 		for test_type in test_types:
 			if test_type["use_cpg"] == 0:
 				continue
 			test = self.new_test("%s_fence_history" % test_type["prefix"],
 					"Verify last fencing operation is returned.", test_type["use_cpg"])
 			test.add_cmd("stonith_admin", "-R true1  -a fence_true -o \"pcmk_host_list=node3\"")
 
 			test.add_cmd("stonith_admin", "-F node3 -t 2 -V")
 
 			test.add_cmd_check_stdout("stonith_admin", "-H node3", "was able to turn off node node3", "")
 
 		# simple test of dynamic list query
 		for test_type in test_types:
 			test = self.new_test("%s_dynamic_list_query" % test_type["prefix"],
 					"Verify dynamic list of fencing devices can be retrieved.", test_type["use_cpg"])
 			test.add_cmd("stonith_admin", "-R true1  -a fence_true")
 			test.add_cmd("stonith_admin", "-R true2  -a fence_true")
 			test.add_cmd("stonith_admin", "-R true3  -a fence_true")
 
 			test.add_cmd_check_stdout("stonith_admin", "-l fake_port_1", "3 devices found")
 
 
 		# fence using dynamic list query
 		for test_type in test_types:
 			test = self.new_test("%s_fence_dynamic_list_query" % test_type["prefix"],
 					"Verify dynamic list of fencing devices can be retrieved.", test_type["use_cpg"])
 			test.add_cmd("stonith_admin", "-R true1  -a fence_true")
 			test.add_cmd("stonith_admin", "-R true2  -a fence_true")
 			test.add_cmd("stonith_admin", "-R true3  -a fence_true")
 
 			test.add_cmd("stonith_admin", "-F fake_port_1 -t 5 -V");
 
 		# simple test of  query using status action
 		for test_type in test_types:
 			test = self.new_test("%s_status_query" % test_type["prefix"],
 					"Verify dynamic list of fencing devices can be retrieved.", test_type["use_cpg"])
 			test.add_cmd("stonith_admin", "-R true1  -a fence_true -o \"pcmk_host_check=status\"")
 			test.add_cmd("stonith_admin", "-R true2  -a fence_true -o \"pcmk_host_check=status\"")
 			test.add_cmd("stonith_admin", "-R true3  -a fence_true -o \"pcmk_host_check=status\"")
 
 			test.add_cmd_check_stdout("stonith_admin", "-l fake_port_1", "3 devices found")
 
 
 	def build_nodeid_tests(self):
 		our_uname = output_from_command("uname -n")
 		if our_uname:
 			our_uname = our_uname[0]
 
 		### verify nodeid is supplied when nodeid is in the metadata parameters
 		test = self.new_test("cpg_supply_nodeid",
 				"Verify nodeid is given when fence agent has nodeid as parameter", 1)
 
 		test.add_cmd("stonith_admin", "-R true1 -a fence_dummy_metadata_helper -o \"pcmk_host_list=%s\"" % (our_uname))
 		test.add_cmd("stonith_admin", "-F %s -t 3" % (our_uname))
 		test.add_stonith_log_pattern("For stonith action (off) for victim %s, adding nodeid" % (our_uname))
 
 		### verify nodeid is _NOT_ supplied when nodeid is not in the metadata parameters
 		test = self.new_test("cpg_do_not_supply_nodeid",
 				"Verify nodeid is _NOT_ given when fence agent does not have nodeid as parameter", 1)
 
 		test.add_cmd("stonith_admin", "-R true1 -a fence_true -o \"pcmk_host_list=%s\"" % (our_uname))
 		test.add_cmd("stonith_admin", "-F %s -t 3" % (our_uname))
 		test.add_stonith_negative_log_pattern("For stonith action (off) for victim %s, adding nodeid" % (our_uname))
 
 		### verify nodeid use doesn't explode standalone mode
 		test = self.new_test("standalone_do_not_supply_nodeid",
 				"Verify nodeid in metadata parameter list doesn't kill standalone mode", 0)
 
 		test.add_cmd("stonith_admin", "-R true1 -a fence_dummy_metadata_helper -o \"pcmk_host_list=%s\"" % (our_uname))
 		test.add_cmd("stonith_admin", "-F %s -t 3" % (our_uname))
 		test.add_stonith_negative_log_pattern("For stonith action (off) for victim %s, adding nodeid" % (our_uname))
 
 
 	def build_unfence_tests(self):
 		our_uname = output_from_command("uname -n")
 		if our_uname:
 			our_uname = our_uname[0]
 
 		### Simple test unfencing works
 		test = self.new_test("cpg_unfence_simple",
 				"Verify simple unfencing.", 1)
 		test.add_cmd("stonith_admin", "-R false1 -a fence_false -o \"pcmk_host_list=node3\"")
 		test.add_cmd("stonith_admin", "-R true1  -a fence_true -o \"pcmk_host_list=node3\" ")
 		test.add_cmd("stonith_admin", "-R false2 -a fence_false -o \"pcmk_host_list=node3\"")
 		test.add_cmd("stonith_admin", "-U node3 -t 3")
 
 		### verify unfencing using on_target device
 		test = self.new_test("cpg_unfence_on_target_1",
 				"Verify unfencing with on_target = true", 1)
 		test.add_cmd("stonith_admin", "-R true1 -a fence_dummy_metadata_helper -o \"pcmk_host_list=%s\"" % (our_uname))
 		test.add_cmd("stonith_admin", "-U %s -t 3" % (our_uname))
 		test.add_stonith_log_pattern("(on) to be executed on the target node")
 
 
 		### verify failure of unfencing using on_target device
 		test = self.new_test("cpg_unfence_on_target_2",
 				"Verify failure unfencing with on_target = true", 1)
 		test.add_cmd("stonith_admin", "-R true1 -a fence_dummy_metadata_helper -o \"pcmk_host_list=%s node_fake_1234\"" % (our_uname))
-		test.add_expected_fail_cmd("stonith_admin", "-U node_fake_1234 -t 3", 194)
+		test.add_expected_fail_cmd("stonith_admin", "-U node_fake_1234 -t 3", 143)
 		test.add_stonith_log_pattern("(on) to be executed on the target node")
 
 
 		### verify unfencing using on_target device with topology
 		test = self.new_test("cpg_unfence_on_target_3",
 				"Verify unfencing with on_target = true using topology", 1)
 
 		test.add_cmd("stonith_admin", "-R false1 -a fence_false -o \"pcmk_host_list=%s node3\"" % (our_uname))
 		test.add_cmd("stonith_admin", "-R false2 -a fence_false -o \"pcmk_host_list=%s node3\"" % (our_uname))
 		test.add_cmd("stonith_admin", "-R true1 -a fence_dummy_metadata_helper -o \"pcmk_host_list=%s node3\"" % (our_uname))
 
 		test.add_cmd("stonith_admin", "-r %s -i 1 -v false1" % (our_uname))
 		test.add_cmd("stonith_admin", "-r %s -i 2 -v false2" % (our_uname))
 		test.add_cmd("stonith_admin", "-r %s -i 3 -v true1" % (our_uname))
 
 		test.add_cmd("stonith_admin", "-U %s -t 3" % (our_uname))
 		test.add_stonith_log_pattern("(on) to be executed on the target node")
 
 		### verify unfencing using on_target device with topology fails
 		test = self.new_test("cpg_unfence_on_target_4",
 				"Verify unfencing failure with on_target = true using topology", 1)
 
 		test.add_cmd("stonith_admin", "-R false1 -a fence_false -o \"pcmk_host_list=%s node_fake\"" % (our_uname))
 		test.add_cmd("stonith_admin", "-R false2 -a fence_false -o \"pcmk_host_list=%s node_fake\"" % (our_uname))
 		test.add_cmd("stonith_admin", "-R true1 -a fence_dummy_metadata_helper -o \"pcmk_host_list=%s node_fake\"" % (our_uname))
 
 		test.add_cmd("stonith_admin", "-r node_fake -i 1 -v false1")
 		test.add_cmd("stonith_admin", "-r node_fake -i 2 -v false2")
 		test.add_cmd("stonith_admin", "-r node_fake -i 3 -v true1")
 
 		test.add_expected_fail_cmd("stonith_admin", "-U node_fake -t 3", 194)
 		test.add_stonith_log_pattern("(on) to be executed on the target node")
 
 
 		### verify use of on_target = true for "on" action does not interfere with "off" action
 		test = self.new_test("cpg_unfence_on_target_ignored",
 				"Verify on target is ignored for other actions", 1)
 		test.add_cmd("stonith_admin", "-R false1 -a fence_false -o \"pcmk_host_list=%s node_fake\"" % (our_uname))
 		test.add_cmd("stonith_admin", "-R false2 -a fence_false -o \"pcmk_host_list=%s node_fake\"" % (our_uname))
 		test.add_cmd("stonith_admin", "-R true1 -a fence_dummy_metadata_helper -o \"pcmk_host_list=%s node_fake\"" % (our_uname))
 		test.add_cmd("stonith_admin", "-r node_fake -i 1 -v false1")
 		test.add_cmd("stonith_admin", "-r node_fake -i 2 -v false2")
 		test.add_cmd("stonith_admin", "-r node_fake -i 3 -v true1")
 		test.add_cmd("stonith_admin", "-F node_fake -t 3")
 		test.add_stonith_log_pattern("(on) to be executed on the target node")
 
 	def setup_environment(self, use_corosync):
 		if self.autogen_corosync_cfg and use_corosync:
 			corosync_conf = ("""
 totem {
         version: 2
         crypto_cipher: none
         crypto_hash: none
 
         nodeid:         101
         secauth:        off
 
         interface {
                 ttl: 1
                 ringnumber: 0
                 mcastport: 6666
                 mcastaddr: 226.94.1.1
                 bindnetaddr: 127.0.0.1
         }
 }
 
 logging {
         debug: off
         fileline: off
         to_syslog: no
         to_stderr: no
         syslog_facility: daemon
         timestamp: on
         to_logfile: yes
         logfile: /var/log/corosync.log
         logfile_priority: info
 }
 """)
 
 			os.system("cat <<-END >>/etc/corosync/corosync.conf\n%s\nEND" % (corosync_conf))
 
 
 		if use_corosync:
 			### make sure we are in control ###
 			self.stop_corosync()
 			self.start_corosync()
 
 		monitor_fail_agent = ("""#!/usr/bin/python
 import sys
 def main():
     for line in sys.stdin.readlines():
         if line.count("monitor") > 0:
             sys.exit(-1);
     sys.exit(-1)
 if __name__ == "__main__":
     main()
 """)
 		on_target_agent = ("""#!/usr/bin/python
 import sys
 def main():
     for line in sys.stdin.readlines():
         off_hit = 0
         nodeid_found = 0
         if line.count("monitor") > 0:
             sys.exit(0)
         if line.count("metadata") > 0:
             print '<resource-agent name="fence_dummy_metadata_helper" shortdesc="Dummy Fence agent for testing">'
             print '  <longdesc>dummy description.</longdesc>'
             print '  <vendor-url>http://www.example.com</vendor-url>'
             print '  <parameters>'
             print '    <parameter name="action" unique="0" required="1">'
             print '      <getopt mixed="-o, --action=[action]"/>'
             print '      <content type="string" default="reboot"/>'
             print '      <shortdesc lang="en">Fencing Action</shortdesc>'
             print '    </parameter>'
             print '    <parameter name="nodeid" unique="0" required="0">'
             print '      <content type="string"/>'
             print '      <shortdesc lang="en">Corosync nodeid of the fence victim</shortdesc>'
             print '    </parameter>'
             print '    <parameter name="port" unique="0" required="0">'
             print '      <getopt mixed="-n, --plug=[id]"/>'
             print '      <content type="string"/>'
             print '      <shortdesc lang="en">Physical plug number or name of virtual machine</shortdesc>'
             print '    </parameter>'
             print '  </parameters>'
             print '  <actions>'
             print '    <action name="on" on_target="1"/>'
             print '    <action name="off"/>'
             print '    <action name="monitor"/>'
             print '    <action name="metadata"/>'
             print '  </actions>'
             print '</resource-agent>'
             sys.exit(0)
         if line.count("on") > 0:
             sys.exit(0)
         if line.count("off") > 0:
             off_hit = 1
         if line.count("nodeid") > 0:
             nodeid_found = 1
 
     if off_hit and nodeid_found:
         sys.exit(0)
     sys.exit(-1)
 if __name__ == "__main__":
     main()
 """)
 
 		os.system("cat <<-END >>/usr/sbin/fence_dummy_metadata_helper\n%s\nEND" % (on_target_agent))
 		os.system("chmod 711 /usr/sbin/fence_dummy_metadata_helper")
 
 		os.system("cat <<-END >>/usr/sbin/fence_dummy_monitor_fail\n%s\nEND" % (monitor_fail_agent))
 		os.system("chmod 711 /usr/sbin/fence_dummy_monitor_fail")
 		os.system("cp /usr/share/pacemaker/tests/cts/fence_false /usr/sbin/fence_false")
 		os.system("cp /usr/share/pacemaker/tests/cts/fence_true /usr/sbin/fence_true")
 
 	def cleanup_environment(self, use_corosync):
 		if use_corosync:
 			self.stop_corosync()
 
 			if self.verbose and os.path.exists('/var/log/corosync.log'):
 				print "Daemon output"
 				f = open('/var/log/corosync.log', 'r')
 				for line in f.readlines():
 					print line.strip()
 				os.remove('/var/log/corosync.log')
 
 		if self.autogen_corosync_cfg:
 			os.system("rm -f /etc/corosync/corosync.conf")
 
 		os.system("rm -f /usr/sbin/fence_dummy_metadata_helper")
 		os.system("rm -f /usr/sbin/fence_dummy_monitor_fail")
 
 class TestOptions:
 	def __init__(self):
 		self.options = {}
 		self.options['list-tests'] = 0
 		self.options['run-all'] = 1
 		self.options['run-only'] = ""
 		self.options['run-only-pattern'] = ""
 		self.options['verbose'] = 0
 		self.options['invalid-arg'] = ""
 		self.options['cpg-only'] = 0
 		self.options['no-cpg'] = 0
 		self.options['show-usage'] = 0
 
 	def build_options(self, argv):
 		args = argv[1:]
 		skip = 0
 		for i in range(0, len(args)):
 			if skip:
 				skip = 0
 				continue
 			elif args[i] == "-h" or args[i] == "--help":
 				self.options['show-usage'] = 1
 			elif args[i] == "-l" or args[i] == "--list-tests":
 				self.options['list-tests'] = 1
 			elif args[i] == "-V" or args[i] == "--verbose":
 				self.options['verbose'] = 1
 			elif args[i] == "-n" or args[i] == "--no-cpg":
 				self.options['no-cpg'] = 1
 			elif args[i] == "-c" or args[i] == "--cpg-only":
 				self.options['cpg-only'] = 1
 			elif args[i] == "-r" or args[i] == "--run-only":
 				self.options['run-only'] = args[i+1]
 				skip = 1
 			elif args[i] == "-p" or args[i] == "--run-only-pattern":
 				self.options['run-only-pattern'] = args[i+1]
 				skip = 1
 
 	def show_usage(self):
 		print "usage: " + sys.argv[0] + " [options]"
 		print "If no options are provided, all tests will run"
 		print "Options:"
 		print "\t [--help | -h]                        Show usage"
 		print "\t [--list-tests | -l]                  Print out all registered tests."
 		print "\t [--cpg-only | -c]                    Only run tests that require corosync."
 		print "\t [--no-cpg | -n]                      Only run tests that do not require corosync"
 		print "\t [--run-only | -r 'testname']         Run a specific test"
 		print "\t [--verbose | -V]                     Verbose output"
 		print "\t [--run-only-pattern | -p 'string']   Run only tests containing the string value"
 		print "\n\tExample: Run only the test 'start_top'"
 		print "\t\t python ./regression.py --run-only start_stop"
 		print "\n\tExample: Run only the tests with the string 'systemd' present in them"
 		print "\t\t python ./regression.py --run-only-pattern systemd"
 
 def main(argv):
 	o = TestOptions()
 	o.build_options(argv)
 
 	use_corosync = 1
 
 	tests = Tests(o.options['verbose'])
 	tests.build_standalone_tests()
 	tests.build_custom_timeout_tests()
 	tests.build_api_sanity_tests()
 	tests.build_fence_merge_tests()
 	tests.build_unfence_tests()
 	tests.build_nodeid_tests()
 
 	if o.options['list-tests']:
 		tests.print_list()
 		sys.exit(0)
 	elif o.options['show-usage']:
 		o.show_usage()
 		sys.exit(0)
 
 	print "Starting ..."
 
 	if o.options['no-cpg']:
 		use_corosync = 0
 
 	tests.setup_environment(use_corosync)
 
 	if o.options['run-only-pattern'] != "":
 		tests.run_tests_matching(o.options['run-only-pattern'])
 		tests.print_results()
 	elif o.options['run-only'] != "":
 		tests.run_single(o.options['run-only'])
 		tests.print_results()
 	elif o.options['no-cpg']:
 		tests.run_no_cpg()
 		tests.print_results()
 	elif o.options['cpg-only']:
 		tests.run_cpg_only()
 		tests.print_results()
 	else:
 		tests.run_tests()
 		tests.print_results()
 
 	tests.cleanup_environment(use_corosync)
 	tests.exit()
 if __name__=="__main__":
 	main(sys.argv)
diff --git a/fencing/remote.c b/fencing/remote.c
index 5a8156af53..18c13bce0a 100644
--- a/fencing/remote.c
+++ b/fencing/remote.c
@@ -1,1358 +1,1358 @@
 /*
  * Copyright (C) 2009 Andrew Beekhof <andrew@beekhof.net>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public
  * License as published by the Free Software Foundation; either
  * version 2 of the License, or (at your option) any later version.
  *
  * This software is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
  * You should have received a copy of the GNU General Public
  * License along with this library; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
 #include <crm_internal.h>
 
 #include <sys/param.h>
 #include <stdio.h>
 #include <sys/types.h>
 #include <sys/wait.h>
 #include <sys/stat.h>
 #include <unistd.h>
 #include <sys/utsname.h>
 
 #include <stdlib.h>
 #include <errno.h>
 #include <fcntl.h>
 #include <ctype.h>
 
 #include <crm/crm.h>
 #include <crm/msg_xml.h>
 #include <crm/common/ipc.h>
 #include <crm/common/ipcs.h>
 #include <crm/cluster/internal.h>
 
 #include <crm/stonith-ng.h>
 #include <crm/fencing/internal.h>
 #include <crm/common/xml.h>
 
 #include <crm/common/util.h>
 #include <internal.h>
 
 #define TIMEOUT_MULTIPLY_FACTOR 1.2
 
 typedef struct st_query_result_s {
     char *host;
     int devices;
     /* only try peers for non-topology based operations once */
     gboolean tried;
     GListPtr device_list;
     GHashTable *custom_action_timeouts;
     /* Subset of devices that peer has verified connectivity on */
     GHashTable *verified_devices;
 
 } st_query_result_t;
 
 GHashTable *remote_op_list = NULL;
 void call_remote_stonith(remote_fencing_op_t * op, st_query_result_t * peer);
 static void remote_op_done(remote_fencing_op_t * op, xmlNode * data, int rc, int dup);
 extern xmlNode *stonith_create_op(int call_id, const char *token, const char *op, xmlNode * data,
                                   int call_options);
 
 static void report_timeout_period(remote_fencing_op_t * op, int op_timeout);
 static int get_op_total_timeout(remote_fencing_op_t * op, st_query_result_t * chosen_peer,
                                 int default_timeout);
 
 static void
 free_remote_query(gpointer data)
 {
     if (data) {
         st_query_result_t *query = data;
 
         crm_trace("Free'ing query result from %s", query->host);
         free(query->host);
         g_list_free_full(query->device_list, free);
         g_hash_table_destroy(query->custom_action_timeouts);
         g_hash_table_destroy(query->verified_devices);
         free(query);
     }
 }
 
 static void
 clear_remote_op_timers(remote_fencing_op_t * op)
 {
     if (op->query_timer) {
         g_source_remove(op->query_timer);
         op->query_timer = 0;
     }
     if (op->op_timer_total) {
         g_source_remove(op->op_timer_total);
         op->op_timer_total = 0;
     }
     if (op->op_timer_one) {
         g_source_remove(op->op_timer_one);
         op->op_timer_one = 0;
     }
 }
 
 static void
 free_remote_op(gpointer data)
 {
     remote_fencing_op_t *op = data;
 
     crm_trace("Free'ing op %s for %s", op->id, op->target);
     crm_log_xml_debug(op->request, "Destroying");
 
     clear_remote_op_timers(op);
 
     free(op->id);
     free(op->action);
     free(op->target);
     free(op->client_id);
     free(op->client_name);
     free(op->originator);
 
     if (op->query_results) {
         g_list_free_full(op->query_results, free_remote_query);
     }
     if (op->request) {
         free_xml(op->request);
         op->request = NULL;
     }
     free(op);
 }
 
 static xmlNode *
 create_op_done_notify(remote_fencing_op_t * op, int rc)
 {
     xmlNode *notify_data = create_xml_node(NULL, T_STONITH_NOTIFY_FENCE);
 
     crm_xml_add_int(notify_data, "state", op->state);
     crm_xml_add_int(notify_data, F_STONITH_RC, rc);
     crm_xml_add(notify_data, F_STONITH_TARGET, op->target);
     crm_xml_add(notify_data, F_STONITH_ACTION, op->action);
     crm_xml_add(notify_data, F_STONITH_DELEGATE, op->delegate);
     crm_xml_add(notify_data, F_STONITH_REMOTE_OP_ID, op->id);
     crm_xml_add(notify_data, F_STONITH_ORIGIN, op->originator);
     crm_xml_add(notify_data, F_STONITH_CLIENTID, op->client_id);
     crm_xml_add(notify_data, F_STONITH_CLIENTNAME, op->client_name);
 
     return notify_data;
 }
 
 static void
 bcast_result_to_peers(remote_fencing_op_t * op, int rc)
 {
     static int count = 0;
     xmlNode *bcast = create_xml_node(NULL, T_STONITH_REPLY);
     xmlNode *notify_data = create_op_done_notify(op, rc);
 
     count++;
     crm_trace("Broadcasting result to peers");
     crm_xml_add(bcast, F_TYPE, T_STONITH_NOTIFY);
     crm_xml_add(bcast, F_SUBTYPE, "broadcast");
     crm_xml_add(bcast, F_STONITH_OPERATION, T_STONITH_NOTIFY);
     crm_xml_add_int(bcast, "count", count);
     add_message_xml(bcast, F_STONITH_CALLDATA, notify_data);
     send_cluster_message(NULL, crm_msg_stonith_ng, bcast, FALSE);
     free_xml(notify_data);
     free_xml(bcast);
 
     return;
 }
 
 static void
 handle_local_reply_and_notify(remote_fencing_op_t * op, xmlNode * data, int rc)
 {
     xmlNode *notify_data = NULL;
     xmlNode *reply = NULL;
 
     if (op->notify_sent == TRUE) {
         /* nothing to do */
         return;
     }
 
     /* Do notification with a clean data object */
     notify_data = create_op_done_notify(op, rc);
     crm_xml_add_int(data, "state", op->state);
     crm_xml_add(data, F_STONITH_TARGET, op->target);
     crm_xml_add(data, F_STONITH_OPERATION, op->action);
 
     reply = stonith_construct_reply(op->request, NULL, data, rc);
     crm_xml_add(reply, F_STONITH_DELEGATE, op->delegate);
 
     /* Send fencing OP reply to local client that initiated fencing */
     do_local_reply(reply, op->client_id, op->call_options & st_opt_sync_call, FALSE);
 
     /* bcast to all local clients that the fencing operation happend */
     do_stonith_notify(0, T_STONITH_NOTIFY_FENCE, rc, notify_data);
 
     /* mark this op as having notify's already sent */
     op->notify_sent = TRUE;
     free_xml(reply);
     free_xml(notify_data);
 }
 
 static void
 handle_duplicates(remote_fencing_op_t * op, xmlNode * data, int rc)
 {
     GListPtr iter = NULL;
 
     for (iter = op->duplicates; iter != NULL; iter = iter->next) {
         remote_fencing_op_t *other = iter->data;
 
         if (other->state == st_duplicate) {
             /* Ie. it hasn't timed out already */
             other->state = op->state;
             crm_debug("Peforming duplicate notification for %s@%s.%.8s = %s", other->client_name,
                       other->originator, other->id, pcmk_strerror(rc));
             remote_op_done(other, data, rc, TRUE);
 
         } else {
             crm_err("Skipping duplicate notification for %s@%s - %d", other->client_name,
                     other->originator, other->state);
         }
     }
 }
 
 /*!
  * \internal
  * \brief Finalize a remote operation.
  *
  * \description This function has two code paths.
  *
  * Path 1. This node is the owner of the operation and needs
  *         to notify the cpg group via a broadcast as to the operation's
  *         results.
  *
  * Path 2. The cpg broadcast is received. All nodes notify their local
  *         stonith clients the operation results.
  *
  * So, The owner of the operation first notifies the cluster of the result,
  * and once that cpg notify is received back it notifies all the local clients.
  *
  * Nodes that are passive watchers of the operation will receive the
  * broadcast and only need to notify their local clients the operation finished.
  *
  * \param op, The fencing operation to finalize
  * \param data, The xml msg reply (if present) of the last delegated fencing
  *              operation.
  * \param dup, Is this operation a duplicate, if so treat it a little differently
  *             making sure the broadcast is not sent out.
  */
 static void
 remote_op_done(remote_fencing_op_t * op, xmlNode * data, int rc, int dup)
 {
     int level = LOG_ERR;
     const char *subt = NULL;
     xmlNode *local_data = NULL;
 
     op->completed = time(NULL);
     clear_remote_op_timers(op);
 
     if (op->notify_sent == TRUE) {
         crm_err("Already sent notifications for '%s of %s by %s' (for=%s@%s.%.8s, state=%d): %s",
                 op->action, op->target, op->delegate ? op->delegate : "<no-one>",
                 op->client_name, op->originator, op->id, op->state, pcmk_strerror(rc));
         goto remote_op_done_cleanup;
     }
 
     if (!op->delegate && data) {
         op->delegate = crm_element_value_copy(data, F_ORIG);
     }
 
     if (data == NULL) {
         data = create_xml_node(NULL, "remote-op");
         local_data = data;
     }
 
     /* Tell everyone the operation is done, we will continue
      * with doing the local notifications once we receive
      * the broadcast back. */
     subt = crm_element_value(data, F_SUBTYPE);
     if (dup == FALSE && safe_str_neq(subt, "broadcast")) {
         /* Defer notification until the bcast message arrives */
         bcast_result_to_peers(op, rc);
         goto remote_op_done_cleanup;
     }
 
     if (rc == pcmk_ok || dup) {
         level = LOG_NOTICE;
     } else if (safe_str_neq(op->originator, stonith_our_uname)) {
         level = LOG_NOTICE;
     }
 
     do_crm_log(level,
                "Operation %s of %s by %s for %s@%s.%.8s: %s",
                op->action, op->target, op->delegate ? op->delegate : "<no-one>",
                op->client_name, op->originator, op->id, pcmk_strerror(rc));
 
     handle_local_reply_and_notify(op, data, rc);
 
     if (dup == FALSE) {
         handle_duplicates(op, data, rc);
     }
 
     /* Free non-essential parts of the record
      * Keep the record around so we can query the history
      */
     if (op->query_results) {
         g_list_free_full(op->query_results, free_remote_query);
         op->query_results = NULL;
     }
 
     if (op->request) {
         free_xml(op->request);
         op->request = NULL;
     }
 
   remote_op_done_cleanup:
     free_xml(local_data);
 }
 
 static gboolean
 remote_op_timeout_one(gpointer userdata)
 {
     remote_fencing_op_t *op = userdata;
 
     op->op_timer_one = 0;
 
     crm_notice("Remote %s operation on %s for %s.%8s timed out",
                op->action, op->target, op->client_name, op->id);
     call_remote_stonith(op, NULL);
     return FALSE;
 }
 
 static gboolean
 remote_op_timeout(gpointer userdata)
 {
     remote_fencing_op_t *op = userdata;
 
     op->op_timer_total = 0;
 
     if (op->state == st_done) {
         crm_debug("Action %s (%s) for %s (%s) already completed",
                   op->action, op->id, op->target, op->client_name);
         return FALSE;
     }
 
     crm_debug("Action %s (%s) for %s (%s) timed out",
               op->action, op->id, op->target, op->client_name);
     op->state = st_failed;
     remote_op_done(op, NULL, -ETIME, FALSE);
 
     return FALSE;
 }
 
 static gboolean
 remote_op_query_timeout(gpointer data)
 {
     remote_fencing_op_t *op = data;
 
     op->query_timer = 0;
     if (op->state == st_done) {
         crm_debug("Operation %s for %s already completed", op->id, op->target);
     } else if (op->state == st_exec) {
         crm_debug("Operation %s for %s already in progress", op->id, op->target);
     } else if (op->query_results) {
         crm_debug("Query %s for %s complete: %d", op->id, op->target, op->state);
         call_remote_stonith(op, NULL);
     } else {
         crm_debug("Query %s for %s timed out: %d", op->id, op->target, op->state);
         if (op->op_timer_total) {
             g_source_remove(op->op_timer_total);
             op->op_timer_total = 0;
         }
         remote_op_timeout(op);
     }
 
     return FALSE;
 }
 
 static int
 stonith_topology_next(remote_fencing_op_t * op)
 {
     stonith_topology_t *tp = NULL;
 
     if (op->target) {
         /* Queries don't have a target set */
         tp = g_hash_table_lookup(topology, op->target);
     }
     if (tp == NULL) {
         return pcmk_ok;
     }
 
     set_bit(op->call_options, st_opt_topology);
 
     do {
         op->level++;
 
     } while (op->level < ST_LEVEL_MAX && tp->levels[op->level] == NULL);
 
     if (op->level < ST_LEVEL_MAX) {
         crm_trace("Attempting fencing level %d for %s (%d devices) - %s@%s.%.8s",
                   op->level, op->target, g_list_length(tp->levels[op->level]),
                   op->client_name, op->originator, op->id);
         op->devices = tp->levels[op->level];
         return pcmk_ok;
     }
 
     crm_notice("All fencing options to fence %s for %s@%s.%.8s failed",
                op->target, op->client_name, op->originator, op->id);
     return -EINVAL;
 }
 
 /*!
  * \brief Check to see if this operation is a duplicate of another in flight
  * operation. If so merge this operation into the inflight operation, and mark
  * it as a duplicate.
  */
 static void
 merge_duplicates(remote_fencing_op_t * op)
 {
     GHashTableIter iter;
     remote_fencing_op_t *other = NULL;
 
     time_t now = time(NULL);
 
     g_hash_table_iter_init(&iter, remote_op_list);
     while (g_hash_table_iter_next(&iter, NULL, (void **)&other)) {
         crm_node_t *peer = NULL;
 
         if (other->state > st_exec) {
             /* Must be in-progress */
             continue;
         } else if (safe_str_neq(op->target, other->target)) {
             /* Must be for the same node */
             continue;
         } else if (safe_str_neq(op->action, other->action)) {
             crm_trace("Must be for the same action: %s vs. ", op->action, other->action);
             continue;
         } else if (safe_str_eq(op->client_name, other->client_name)) {
             crm_trace("Must be for different clients: %s", op->client_name);
             continue;
         } else if (safe_str_eq(other->target, other->originator)) {
             crm_trace("Can't be a suicide operation: %s", other->target);
             continue;
         }
 
         peer = crm_get_peer(0, other->originator);
         if(fencing_peer_active(peer) == FALSE) {
             crm_notice("Failing stonith action %s for node %s originating from %s@%s.%.8s: Originator is dead",
                        other->action, other->target, other->client_name, other->originator, other->id);
             other->state = st_failed;
             continue;
 
         } else if(other->total_timeout > 0 && now > (other->total_timeout + other->created)) {
             crm_info("Stonith action %s for node %s originating from %s@%s.%.8s is too old: %d vs. %d + %d",
                      other->action, other->target, other->client_name, other->originator, other->id,
                      now, other->created, other->total_timeout);
             continue;
         }
 
         /* There is another in-flight request to fence the same host
          * Piggyback on that instead.  If it fails, so do we.
          */
         other->duplicates = g_list_append(other->duplicates, op);
         if (other->total_timeout == 0) {
             crm_trace("Making a best-guess as to the timeout used");
             other->total_timeout = op->total_timeout =
                 TIMEOUT_MULTIPLY_FACTOR * get_op_total_timeout(op, NULL, op->base_timeout);
         }
         crm_notice
             ("Merging stonith action %s for node %s originating from client %s.%.8s with identical request from %s@%s.%.8s (%ds)",
              op->action, op->target, op->client_name, op->id, other->client_name, other->originator,
              other->id, other->total_timeout);
         report_timeout_period(op, other->total_timeout);
         op->state = st_duplicate;
     }
 }
 
 static uint32_t fencing_active_peers(void)
 {
     uint32_t count = 0;
     crm_node_t *entry;
     GHashTableIter gIter;
 
     g_hash_table_iter_init(&gIter, crm_peer_cache);
     while (g_hash_table_iter_next(&gIter, NULL, (void **)&entry)) {
         if(fencing_peer_active(entry)) {
             count++;
         }
     }
     return count;
 }
 
 /*!
  * \internal
  * \brief Create a new remote stonith op
  * \param client, he local stonith client id that initaited the operation
  * \param request, The request from the client that started the operation
  * \param peer, Is this operation owned by another stonith peer? Operations
  *        owned by other peers are stored on all the stonith nodes, but only the
  *        owner executes the operation.  All the nodes get the results to the operation
  *        once the owner finishes executing it.
  */
 void *
 create_remote_stonith_op(const char *client, xmlNode * request, gboolean peer)
 {
     remote_fencing_op_t *op = NULL;
     xmlNode *dev = get_xpath_object("//@" F_STONITH_TARGET, request, LOG_TRACE);
 
     if (remote_op_list == NULL) {
         remote_op_list = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, free_remote_op);
     }
 
     /* If this operation is owned by another node, check to make
      * sure we haven't already created this operation. */
     if (peer && dev) {
         const char *op_id = crm_element_value(dev, F_STONITH_REMOTE_OP_ID);
 
         CRM_CHECK(op_id != NULL, return NULL);
 
         op = g_hash_table_lookup(remote_op_list, op_id);
         if (op) {
             crm_debug("%s already exists", op_id);
             return op;
         }
     }
 
     op = calloc(1, sizeof(remote_fencing_op_t));
 
     crm_element_value_int(request, F_STONITH_TIMEOUT, (int *)&(op->base_timeout));
 
     if (peer && dev) {
         op->id = crm_element_value_copy(dev, F_STONITH_REMOTE_OP_ID);
     } else {
         op->id = crm_generate_uuid();
     }
 
     g_hash_table_replace(remote_op_list, op->id, op);
     CRM_LOG_ASSERT(g_hash_table_lookup(remote_op_list, op->id) != NULL);
 
     op->state = st_query;
     op->replies_expected = fencing_active_peers();
     op->action = crm_element_value_copy(dev, F_STONITH_ACTION);
     op->originator = crm_element_value_copy(dev, F_STONITH_ORIGIN);
     op->created = time(NULL);
 
     if (op->originator == NULL) {
         /* Local or relayed request */
         op->originator = strdup(stonith_our_uname);
     }
 
     CRM_LOG_ASSERT(client != NULL);
     if (client) {
         op->client_id = strdup(client);
     }
 
     op->client_name = crm_element_value_copy(request, F_STONITH_CLIENTNAME);
 
     op->target = crm_element_value_copy(dev, F_STONITH_TARGET);
     op->request = copy_xml(request);    /* TODO: Figure out how to avoid this */
     crm_element_value_int(request, F_STONITH_CALLOPTS, (int *)&(op->call_options));
 
     crm_trace("%s new stonith op: %s - %s of %s for %s",
               (peer
                && dev) ? "Recorded" : "Generated", op->id, op->action, op->target, op->client_name);
 
     if (op->call_options & st_opt_cs_nodeid) {
         int nodeid = crm_atoi(op->target, NULL);
         crm_node_t *node = crm_get_peer(nodeid, NULL);
 
         /* Ensure the conversion only happens once */
         op->call_options &= ~st_opt_cs_nodeid;
 
         if (node && node->uname) {
             free(op->target);
             op->target = strdup(node->uname);
         } else {
             crm_warn("Could not expand nodeid '%s' into a host name (%p)", op->target, node);
         }
     }
 
     /* check to see if this is a duplicate operation of another in-flight operation */
     merge_duplicates(op);
 
     return op;
 }
 
 remote_fencing_op_t *
 initiate_remote_stonith_op(crm_client_t * client, xmlNode * request, gboolean manual_ack)
 {
     xmlNode *query = NULL;
     const char *client_id = NULL;
     remote_fencing_op_t *op = NULL;
 
     if (client) {
         client_id = client->id;
     } else {
         client_id = crm_element_value(request, F_STONITH_CLIENTID);
     }
 
     CRM_LOG_ASSERT(client_id != NULL);
     op = create_remote_stonith_op(client_id, request, FALSE);
     op->owner = TRUE;
 
     CRM_CHECK(op->action, return NULL);
 
     if (stonith_topology_next(op) != pcmk_ok) {
         op->state = st_failed;
     }
 
     switch (op->state) {
         case st_failed:
             crm_warn("Initiation of remote operation %s for %s: failed (%s)", op->action,
                      op->target, op->id);
             remote_op_done(op, NULL, -EINVAL, FALSE);
             return op;
 
         case st_duplicate:
             crm_info("Initiating remote operation %s for %s: %s (duplicate)", op->action,
                      op->target, op->id);
             return op;
 
         default:
             crm_notice("Initiating remote operation %s for %s: %s (%d)", op->action, op->target,
                        op->id, op->state);
     }
 
     query = stonith_create_op(0, op->id, STONITH_OP_QUERY, NULL, 0);
 
     if (!manual_ack) {
         int query_timeout = op->base_timeout * TIMEOUT_MULTIPLY_FACTOR;
 
         op->query_timer = g_timeout_add((1000 * query_timeout), remote_op_query_timeout, op);
 
     } else {
         crm_xml_add(query, F_STONITH_DEVICE, "manual_ack");
     }
 
     crm_xml_add(query, F_STONITH_REMOTE_OP_ID, op->id);
     crm_xml_add(query, F_STONITH_TARGET, op->target);
     crm_xml_add(query, F_STONITH_ACTION, op->action);
     crm_xml_add(query, F_STONITH_ORIGIN, op->originator);
     crm_xml_add(query, F_STONITH_CLIENTID, op->client_id);
     crm_xml_add(query, F_STONITH_CLIENTNAME, op->client_name);
     crm_xml_add_int(query, F_STONITH_TIMEOUT, op->base_timeout);
 
     send_cluster_message(NULL, crm_msg_stonith_ng, query, FALSE);
 
     free_xml(query);
     return op;
 }
 
 static gint
 sort_strings(gconstpointer a, gconstpointer b)
 {
     return strcmp(a, b);
 }
 
 enum find_best_peer_options {
     /*! Skip checking the target peer for capable fencing devices */
     FIND_PEER_SKIP_TARGET = 0x0001,
     /*! Only check the target peer for capable fencing devices */
     FIND_PEER_TARGET_ONLY = 0x0002,
     /*! Skip peers and devices that are not verified */
     FIND_PEER_VERIFIED_ONLY = 0x0004,
 };
 
 static st_query_result_t *
 find_best_peer(const char *device, remote_fencing_op_t * op, enum find_best_peer_options options)
 {
     GListPtr iter = NULL;
     gboolean verified_devices_only = (options & FIND_PEER_VERIFIED_ONLY) ? TRUE : FALSE;
 
     if (!device && is_set(op->call_options, st_opt_topology)) {
         return NULL;
     }
 
     for (iter = op->query_results; iter != NULL; iter = iter->next) {
         st_query_result_t *peer = iter->data;
 
         if ((options & FIND_PEER_SKIP_TARGET) && safe_str_eq(peer->host, op->target)) {
             continue;
         }
         if ((options & FIND_PEER_TARGET_ONLY) && safe_str_neq(peer->host, op->target)) {
             continue;
         }
 
         if (is_set(op->call_options, st_opt_topology)) {
             /* Do they have the next device of the current fencing level? */
             GListPtr match = NULL;
 
             if (verified_devices_only && !g_hash_table_lookup(peer->verified_devices, device)) {
                 continue;
             }
 
             match = g_list_find_custom(peer->device_list, device, sort_strings);
             if (match) {
                 crm_trace("Removing %s from %s (%d remaining)", (char *)match->data, peer->host,
                           g_list_length(peer->device_list));
                 peer->device_list = g_list_remove(peer->device_list, match->data);
                 return peer;
             }
 
         } else if (peer->devices > 0 && peer->tried == FALSE) {
             if (verified_devices_only && !g_hash_table_size(peer->verified_devices)) {
                 continue;
             }
 
             /* No topology: Use the current best peer */
             crm_trace("Simple fencing");
             return peer;
         }
     }
 
     return NULL;
 }
 
 static st_query_result_t *
 stonith_choose_peer(remote_fencing_op_t * op)
 {
     st_query_result_t *peer = NULL;
     const char *device = NULL;
 
     do {
         if (op->devices) {
             device = op->devices->data;
             crm_trace("Checking for someone to fence %s with %s", op->target,
                       (char *)op->devices->data);
         } else {
             crm_trace("Checking for someone to fence %s", op->target);
         }
 
         if ((peer = find_best_peer(device, op, FIND_PEER_SKIP_TARGET | FIND_PEER_VERIFIED_ONLY))) {
             return peer;
         } else if ((peer = find_best_peer(device, op, FIND_PEER_SKIP_TARGET))) {
             return peer;
         } else if ((peer = find_best_peer(device, op, FIND_PEER_TARGET_ONLY))) {
             return peer;
         }
 
         /* Try the next fencing level if there is one */
     } while (is_set(op->call_options, st_opt_topology)
              && stonith_topology_next(op) == pcmk_ok);
 
     if (op->devices) {
         crm_debug("Couldn't find anyone to fence %s with %s", op->target,
                   (char *)op->devices->data);
     } else {
         crm_debug("Couldn't find anyone to fence %s", op->target);
     }
 
     return NULL;
 }
 
 static int
 get_device_timeout(st_query_result_t * peer, const char *device, int default_timeout)
 {
     gpointer res;
 
     if (!peer || !device) {
         return default_timeout;
     }
 
     res = g_hash_table_lookup(peer->custom_action_timeouts, device);
 
     return res ? GPOINTER_TO_INT(res) : default_timeout;
 }
 
 static int
 get_peer_timeout(st_query_result_t * peer, int default_timeout)
 {
     int total_timeout = 0;
 
     GListPtr cur = NULL;
 
     for (cur = peer->device_list; cur; cur = cur->next) {
         total_timeout += get_device_timeout(peer, cur->data, default_timeout);
     }
 
     return total_timeout ? total_timeout : default_timeout;
 }
 
 static int
 get_op_total_timeout(remote_fencing_op_t * op, st_query_result_t * chosen_peer, int default_timeout)
 {
     stonith_topology_t *tp = g_hash_table_lookup(topology, op->target);
     int total_timeout = 0;
 
     if (is_set(op->call_options, st_opt_topology) && tp) {
         int i;
         GListPtr device_list = NULL;
         GListPtr iter = NULL;
 
         /* Yep, this looks scary, nested loops all over the place.
          * Here is what is going on.
          * Loop1: Iterate through fencing levels.
          * Loop2: If a fencing level has devices, loop through each device
          * Loop3: For each device in a fencing level, see what peer owns it
          *        and what that peer has reported the timeout is for the device.
          */
         for (i = 0; i < ST_LEVEL_MAX; i++) {
             if (!tp->levels[i]) {
                 continue;
             }
             for (device_list = tp->levels[i]; device_list; device_list = device_list->next) {
                 for (iter = op->query_results; iter != NULL; iter = iter->next) {
                     st_query_result_t *peer = iter->data;
 
                     if (g_list_find_custom(peer->device_list, device_list->data, sort_strings)) {
                         total_timeout +=
                             get_device_timeout(peer, device_list->data, default_timeout);
                         break;
                     }
                 }               /* End Loop3: match device with peer that owns device, find device's timeout period */
             }                   /* End Loop2: iterate through devices at a specific level */
         }                       /*End Loop1: iterate through fencing levels */
 
     } else if (chosen_peer) {
         total_timeout = get_peer_timeout(chosen_peer, default_timeout);
     } else {
         total_timeout = default_timeout;
     }
 
     return total_timeout ? total_timeout : default_timeout;
 }
 
 static void
 report_timeout_period(remote_fencing_op_t * op, int op_timeout)
 {
     GListPtr iter = NULL;
     xmlNode *update = NULL;
     const char *client_node = NULL;
     const char *client_id = NULL;
     const char *call_id = NULL;
 
     if (op->call_options & st_opt_sync_call) {
         /* There is no reason to report the timeout for a syncronous call. It
          * is impossible to use the reported timeout to do anything when the client
          * is blocking for the response.  This update is only important for
          * async calls that require a callback to report the results in. */
         return;
     } else if (!op->request) {
         return;
     }
 
     crm_trace("Reporting timeout for %s.%.8s", op->client_name, op->id);
     client_node = crm_element_value(op->request, F_STONITH_CLIENTNODE);
     call_id = crm_element_value(op->request, F_STONITH_CALLID);
     client_id = crm_element_value(op->request, F_STONITH_CLIENTID);
     if (!client_node || !call_id || !client_id) {
         return;
     }
 
     if (safe_str_eq(client_node, stonith_our_uname)) {
         /* The client is connected to this node, send the update direclty to them */
         do_stonith_async_timeout_update(client_id, call_id, op_timeout);
         return;
     }
 
     /* The client is connected to another node, relay this update to them */
     update = stonith_create_op(0, op->id, STONITH_OP_TIMEOUT_UPDATE, NULL, 0);
     crm_xml_add(update, F_STONITH_REMOTE_OP_ID, op->id);
     crm_xml_add(update, F_STONITH_CLIENTID, client_id);
     crm_xml_add(update, F_STONITH_CALLID, call_id);
     crm_xml_add_int(update, F_STONITH_TIMEOUT, op_timeout);
 
     send_cluster_message(crm_get_peer(0, client_node), crm_msg_stonith_ng, update, FALSE);
 
     free_xml(update);
 
     for (iter = op->duplicates; iter != NULL; iter = iter->next) {
         remote_fencing_op_t *dup = iter->data;
 
         crm_trace("Reporting timeout for duplicate %s.%.8s", dup->client_name, dup->id);
         report_timeout_period(iter->data, op_timeout);
     }
 }
 
 void
 call_remote_stonith(remote_fencing_op_t * op, st_query_result_t * peer)
 {
     const char *device = NULL;
     int timeout = op->base_timeout;
 
     crm_trace("State for %s.%.8s: %s %d", op->target, op->client_name, op->id, op->state);
     if (peer == NULL && !is_set(op->call_options, st_opt_topology)) {
         peer = stonith_choose_peer(op);
     }
 
     if (!op->op_timer_total) {
         int total_timeout = get_op_total_timeout(op, peer, op->base_timeout);
 
         op->total_timeout = TIMEOUT_MULTIPLY_FACTOR * total_timeout;
         op->op_timer_total = g_timeout_add(1000 * op->total_timeout, remote_op_timeout, op);
         report_timeout_period(op, op->total_timeout);
         crm_info("Total remote op timeout set to %d for fencing of node %s for %s.%.8s",
                  total_timeout, op->target, op->client_name, op->id);
     }
 
     if (is_set(op->call_options, st_opt_topology) && op->devices) {
         /* Ignore any preference, they might not have the device we need */
         /* When using topology, the stonith_choose_peer function pops off
          * the peer from the op's query results.  Make sure to calculate
          * the op_timeout before calling this function when topology is in use */
         peer = stonith_choose_peer(op);
         device = op->devices->data;
         timeout = get_device_timeout(peer, device, op->base_timeout);
     }
 
     if (peer) {
         int timeout_one = 0;
         xmlNode *query = stonith_create_op(0, op->id, STONITH_OP_FENCE, NULL, 0);
 
         crm_xml_add(query, F_STONITH_REMOTE_OP_ID, op->id);
         crm_xml_add(query, F_STONITH_TARGET, op->target);
         crm_xml_add(query, F_STONITH_ACTION, op->action);
         crm_xml_add(query, F_STONITH_ORIGIN, op->originator);
         crm_xml_add(query, F_STONITH_CLIENTID, op->client_id);
         crm_xml_add(query, F_STONITH_CLIENTNAME, op->client_name);
         crm_xml_add_int(query, F_STONITH_TIMEOUT, timeout);
 
         if (device) {
             timeout_one =
                 TIMEOUT_MULTIPLY_FACTOR * get_device_timeout(peer, device, op->base_timeout);
             crm_info("Requesting that %s perform op %s %s with %s for %s (%ds)", peer->host,
                      op->action, op->target, device, op->client_name, timeout_one);
             crm_xml_add(query, F_STONITH_DEVICE, device);
             crm_xml_add(query, F_STONITH_MODE, "slave");
 
         } else {
             timeout_one = TIMEOUT_MULTIPLY_FACTOR * get_peer_timeout(peer, op->base_timeout);
             crm_info("Requesting that %s perform op %s %s for %s (%ds)",
                      peer->host, op->action, op->target, op->client_name, timeout_one);
             crm_xml_add(query, F_STONITH_MODE, "smart");
         }
 
         op->state = st_exec;
         if (op->op_timer_one) {
             g_source_remove(op->op_timer_one);
         }
         op->op_timer_one = g_timeout_add((1000 * timeout_one), remote_op_timeout_one, op);
 
         send_cluster_message(crm_get_peer(0, peer->host), crm_msg_stonith_ng, query, FALSE);
         peer->tried = TRUE;
         free_xml(query);
         return;
 
     } else if (op->owner == FALSE) {
         crm_err("The termination of %s for %s is not ours to control", op->target, op->client_name);
 
     } else if (op->query_timer == 0) {
         /* We've exhausted all available peers */
         crm_info("No remaining peers capable of terminating %s for %s (%d)", op->target,
                  op->client_name, op->state);
         CRM_LOG_ASSERT(op->state < st_done);
         remote_op_timeout(op);
 
     } else if(op->replies >= op->replies_expected || op->replies >= fencing_active_peers()) {
-        crm_info("None of the %d peers is capable of terminating %s for %s (%d)",
+        crm_info("None of the %d peers are capable of terminating %s for %s (%d)",
                  op->replies, op->target, op->client_name, op->state);
 
         op->state = st_failed;
         remote_op_done(op, NULL, -EHOSTUNREACH, FALSE);
 
     } else if (device) {
         crm_info("Waiting for additional peers capable of terminating %s with %s for %s.%.8s",
                  op->target, device, op->client_name, op->id);
     } else {
         crm_info("Waiting for additional peers capable of terminating %s for %s%.8s",
                  op->target, op->client_name, op->id);
     }
 }
 
 static gint
 sort_peers(gconstpointer a, gconstpointer b)
 {
     const st_query_result_t *peer_a = a;
     const st_query_result_t *peer_b = a;
 
     if (peer_a->devices > peer_b->devices) {
         return -1;
     } else if (peer_a->devices > peer_b->devices) {
         return 1;
     }
     return 0;
 }
 
 /*!
  * \internal
  * \brief Determine if all the devices in the topology are found or not
  */
 static gboolean
 all_topology_devices_found(remote_fencing_op_t * op)
 {
     GListPtr device = NULL;
     GListPtr iter = NULL;
     GListPtr match = NULL;
     stonith_topology_t *tp = NULL;
     gboolean skip_target = FALSE;
     int i;
 
     tp = g_hash_table_lookup(topology, op->target);
 
     if (!tp) {
         return FALSE;
     }
     if (safe_str_eq(op->action, "off") || safe_str_eq(op->action, "reboot")) {
         /* Don't count the devices on the target node if we are killing
          * the target node. */
         skip_target = TRUE;
     }
 
     for (i = 0; i < ST_LEVEL_MAX; i++) {
         for (device = tp->levels[i]; device; device = device->next) {
             match = FALSE;
             for (iter = op->query_results; iter != NULL; iter = iter->next) {
                 st_query_result_t *peer = iter->data;
 
                 if (skip_target && safe_str_eq(peer->host, op->target)) {
                     continue;
                 }
                 match = g_list_find_custom(peer->device_list, device->data, sort_strings);
             }
             if (!match) {
                 return FALSE;
             }
         }
     }
 
     return TRUE;
 }
 
 int
 process_remote_stonith_query(xmlNode * msg)
 {
     int devices = 0;
     gboolean host_is_target = FALSE;
     const char *id = NULL;
     const char *host = NULL;
     remote_fencing_op_t *op = NULL;
     st_query_result_t *result = NULL;
     uint32_t active = fencing_active_peers();
     xmlNode *dev = get_xpath_object("//@" F_STONITH_REMOTE_OP_ID, msg, LOG_ERR);
     xmlNode *child = NULL;
 
     CRM_CHECK(dev != NULL, return -EPROTO);
 
     id = crm_element_value(dev, F_STONITH_REMOTE_OP_ID);
     CRM_CHECK(id != NULL, return -EPROTO);
 
     dev = get_xpath_object("//@st-available-devices", msg, LOG_ERR);
     CRM_CHECK(dev != NULL, return -EPROTO);
     crm_element_value_int(dev, "st-available-devices", &devices);
 
     op = g_hash_table_lookup(remote_op_list, id);
     if (op == NULL) {
         crm_debug("Unknown or expired remote op: %s", id);
         return -EOPNOTSUPP;
     }
 
     op->replies++;
     host = crm_element_value(msg, F_ORIG);
     host_is_target = safe_str_eq(host, op->target);
 
     if (devices <= 0) {
         /* If we're doing 'known' then we might need to fire anyway */
         crm_trace("Query result from %s (%d devices)", host, devices);
         if(op->state == st_query && (op->replies >= op->replies_expected || op->replies >= active)) {
             crm_info("All queries have arrived, continuing (%d, %d, %d) ", op->replies_expected, active, op->replies);
             call_remote_stonith(op, NULL);
         }
         return pcmk_ok;
 
     } else if (host_is_target) {
         if (op->call_options & st_opt_allow_suicide) {
             crm_trace("Allowing %s to potentialy fence itself", op->target);
         } else {
             crm_info("Ignoring reply from %s, hosts are not permitted to commit suicide",
                      op->target);
             return pcmk_ok;
         }
     }
 
     crm_info("Query result %d of %d from %s (%d devices)", op->replies, op->replies_expected, host, devices);
     result = calloc(1, sizeof(st_query_result_t));
     result->host = strdup(host);
     result->devices = devices;
     result->custom_action_timeouts = g_hash_table_new_full(crm_str_hash, g_str_equal, free, NULL);
     result->verified_devices = g_hash_table_new_full(crm_str_hash, g_str_equal, free, NULL);
 
     for (child = __xml_first_child(dev); child != NULL; child = __xml_next(child)) {
         const char *device = ID(child);
         int action_timeout = 0;
         int verified = 0;
 
         if (device) {
             result->device_list = g_list_prepend(result->device_list, strdup(device));
             crm_element_value_int(child, F_STONITH_ACTION_TIMEOUT, &action_timeout);
             crm_element_value_int(child, F_STONITH_DEVICE_VERIFIED, &verified);
             if (action_timeout) {
                 crm_trace("Peer %s with device %s returned action timeout %d",
                           result->host, device, action_timeout);
                 g_hash_table_insert(result->custom_action_timeouts,
                                     strdup(device), GINT_TO_POINTER(action_timeout));
             }
             if (verified) {
                 crm_trace("Peer %s has confirmed a verified device %s", result->host, device);
                 g_hash_table_insert(result->verified_devices,
                                     strdup(device), GINT_TO_POINTER(verified));
             }
         }
     }
 
     CRM_CHECK(devices == g_list_length(result->device_list),
               crm_err("Mis-match: Query claimed to have %d devices but %d found", devices,
                       g_list_length(result->device_list)));
 
     op->query_results = g_list_insert_sorted(op->query_results, result, sort_peers);
 
     if (is_set(op->call_options, st_opt_topology)) {
         /* If we start the fencing before all the topology results are in,
          * it is possible fencing levels will be skipped because of the missing
          * query results. */
         if (op->state == st_query && all_topology_devices_found(op)) {
             /* All the query results are in for the topology, start the fencing ops. */
             crm_trace("All topology devices found");
             call_remote_stonith(op, result);
 
         } else if(op->state == st_query && (op->replies >= op->replies_expected || op->replies >= active)) {
             crm_info("All topology queries have arrived, continuing (%d, %d, %d) ", op->replies_expected, active, op->replies);
             call_remote_stonith(op, NULL);
         }
 
     } else if (op->state == st_query) {
         /* We have a result for a non-topology fencing op that looks promising,
          * go ahead and start fencing before query timeout */
         if (host_is_target == FALSE && g_hash_table_size(result->verified_devices)) {
             /* we have a verified device living on a peer that is not the target */
             crm_trace("Found %d verified devices", g_hash_table_size(result->verified_devices));
             call_remote_stonith(op, result);
 
         } else if (safe_str_eq(op->action, "on")) {
             crm_trace("Unfencing %s", op->target);
             call_remote_stonith(op, result);
 
         } else if(op->replies >= op->replies_expected || op->replies >= active) {
             crm_info("All queries have arrived, continuing (%d, %d, %d) ", op->replies_expected, active, op->replies);
             call_remote_stonith(op, NULL);
 
         } else {
             crm_trace("Waiting for more peer results before launching fencing operation");
         }
 
     } else if (op->state == st_done) {
         crm_info("Discarding query result from %s (%d devices): Operation is in state %d",
                  result->host, result->devices, op->state);
     }
 
     return pcmk_ok;
 }
 
 int
 process_remote_stonith_exec(xmlNode * msg)
 {
     int rc = 0;
     const char *id = NULL;
     const char *device = NULL;
     remote_fencing_op_t *op = NULL;
     xmlNode *dev = get_xpath_object("//@" F_STONITH_REMOTE_OP_ID, msg, LOG_ERR);
 
     CRM_CHECK(dev != NULL, return -EPROTO);
 
     id = crm_element_value(dev, F_STONITH_REMOTE_OP_ID);
     CRM_CHECK(id != NULL, return -EPROTO);
 
     dev = get_xpath_object("//@" F_STONITH_RC, msg, LOG_ERR);
     CRM_CHECK(dev != NULL, return -EPROTO);
 
     crm_element_value_int(dev, F_STONITH_RC, &rc);
 
     device = crm_element_value(dev, F_STONITH_DEVICE);
 
     if (remote_op_list) {
         op = g_hash_table_lookup(remote_op_list, id);
     }
 
     if (op == NULL && rc == pcmk_ok) {
         /* Record successful fencing operations */
         const char *client_id = crm_element_value(dev, F_STONITH_CLIENTID);
 
         op = create_remote_stonith_op(client_id, dev, TRUE);
     }
 
     if (op == NULL) {
         /* Could be for an event that began before we started */
         /* TODO: Record the op for later querying */
         crm_info("Unknown or expired remote op: %s", id);
         return -EOPNOTSUPP;
     }
 
     if (op->devices && device && safe_str_neq(op->devices->data, device)) {
         crm_err
             ("Received outdated reply for device %s (instead of %s) to %s node %s. Operation already timed out at remote level.",
              device, op->devices->data, op->action, op->target);
         return rc;
     }
 
     if (safe_str_eq(crm_element_value(msg, F_SUBTYPE), "broadcast")) {
         crm_debug("Marking call to %s for %s on behalf of %s@%s.%.8s: %s (%d)",
                   op->action, op->target, op->client_name, op->id, op->originator,
                   pcmk_strerror(rc), rc);
         if (rc == pcmk_ok) {
             op->state = st_done;
         } else {
             op->state = st_failed;
         }
         remote_op_done(op, msg, rc, FALSE);
         return pcmk_ok;
     } else if (safe_str_neq(op->originator, stonith_our_uname)) {
         /* If this isn't a remote level broadcast, and we are not the
          * originator of the operation, we should not be receiving this msg. */
         crm_err
             ("%s received non-broadcast fencing result for operation it does not own (device %s targeting %s)",
              stonith_our_uname, device, op->target);
         return rc;
     }
 
     if (is_set(op->call_options, st_opt_topology)) {
         const char *device = crm_element_value(msg, F_STONITH_DEVICE);
 
         crm_notice("Call to %s for %s on behalf of %s@%s: %s (%d)",
                    device, op->target, op->client_name, op->originator,
                    pcmk_strerror(rc), rc);
 
         /* We own the op, and it is complete. broadcast the result to all nodes
          * and notify our local clients. */
         if (op->state == st_done) {
             remote_op_done(op, msg, rc, FALSE);
             return rc;
         }
 
         /* An operation completed succesfully but has not yet been marked as done.
          * Continue the topology if more devices exist at the current level, otherwise
          * mark as done. */
         if (rc == pcmk_ok) {
             if (op->devices) {
                 /* Success, are there any more? */
                 op->devices = op->devices->next;
             }
             /* if no more devices at this fencing level, we are done,
              * else we need to contine with executing the next device in the list */
             if (op->devices == NULL) {
                 crm_trace("Marking complex fencing op for %s as complete", op->target);
                 op->state = st_done;
                 remote_op_done(op, msg, rc, FALSE);
                 return rc;
             }
         } else {
             /* This device failed, time to try another topology level. If no other
              * levels are available, mark this operation as failed and report results. */
             if (stonith_topology_next(op) != pcmk_ok) {
                 op->state = st_failed;
                 remote_op_done(op, msg, rc, FALSE);
                 return rc;
             }
         }
     } else if (rc == pcmk_ok && op->devices == NULL) {
         crm_trace("All done for %s", op->target);
 
         op->state = st_done;
         remote_op_done(op, msg, rc, FALSE);
         return rc;
     }
 
     /* Retry on failure or execute the rest of the topology */
     crm_trace("Next for %s on behalf of %s@%s (rc was %d)", op->target, op->originator,
               op->client_name, rc);
     call_remote_stonith(op, NULL);
     return rc;
 }
 
 int
 stonith_fence_history(xmlNode * msg, xmlNode ** output)
 {
     int rc = 0;
     const char *target = NULL;
     xmlNode *dev = get_xpath_object("//@" F_STONITH_TARGET, msg, LOG_TRACE);
 
     if (dev) {
         int options = 0;
 
         target = crm_element_value(dev, F_STONITH_TARGET);
         crm_element_value_int(msg, F_STONITH_CALLOPTS, &options);
         if (target && (options & st_opt_cs_nodeid)) {
             int nodeid = crm_atoi(target, NULL);
             crm_node_t *node = crm_get_peer(nodeid, NULL);
 
             if (node) {
                 target = node->uname;
             }
         }
     }
     *output = create_xml_node(NULL, F_STONITH_HISTORY_LIST);
 
     if (remote_op_list) {
         GHashTableIter iter;
         remote_fencing_op_t *op = NULL;
 
         g_hash_table_iter_init(&iter, remote_op_list);
         while (g_hash_table_iter_next(&iter, NULL, (void **)&op)) {
             xmlNode *entry = NULL;
 
             if (target && strcmp(op->target, target) != 0) {
                 continue;
             }
 
             rc = 0;
             entry = create_xml_node(*output, STONITH_OP_EXEC);
             crm_xml_add(entry, F_STONITH_TARGET, op->target);
             crm_xml_add(entry, F_STONITH_ACTION, op->action);
             crm_xml_add(entry, F_STONITH_ORIGIN, op->originator);
             crm_xml_add(entry, F_STONITH_DELEGATE, op->delegate);
             crm_xml_add_int(entry, F_STONITH_DATE, op->completed);
             crm_xml_add_int(entry, F_STONITH_STATE, op->state);
         }
     }
 
     return rc;
 }
 
 gboolean
 stonith_check_fence_tolerance(int tolerance, const char *target, const char *action)
 {
     GHashTableIter iter;
     time_t now = time(NULL);
     remote_fencing_op_t *rop = NULL;
 
     crm_trace("tolerance=%d, remote_op_list=%p", tolerance, remote_op_list);
 
     if (tolerance <= 0 || !remote_op_list || target == NULL || action == NULL) {
         return FALSE;
     }
 
     g_hash_table_iter_init(&iter, remote_op_list);
     while (g_hash_table_iter_next(&iter, NULL, (void **)&rop)) {
         if (strcmp(rop->target, target) != 0) {
             continue;
         } else if (rop->state != st_done) {
             continue;
         } else if (strcmp(rop->action, action) != 0) {
             continue;
         } else if ((rop->completed + tolerance) < now) {
             continue;
         }
 
         crm_notice("Target %s was fenced (%s) less than %ds ago by %s on behalf of %s",
                    target, action, tolerance, rop->delegate, rop->originator);
         return TRUE;
     }
     return FALSE;
 }
diff --git a/fencing/test.c b/fencing/test.c
index 5ae83f5b57..7ca876524b 100644
--- a/fencing/test.c
+++ b/fencing/test.c
@@ -1,651 +1,651 @@
 /* 
  * Copyright (C) 2009 Andrew Beekhof <andrew@beekhof.net>
  * 
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public
  * License as published by the Free Software Foundation; either
  * version 2 of the License, or (at your option) any later version.
  * 
  * This software is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  * 
  * You should have received a copy of the GNU General Public
  * License along with this library; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
 #include <crm_internal.h>
 
 #include <sys/param.h>
 #include <stdio.h>
 #include <sys/time.h>
 #include <sys/types.h>
 #include <sys/stat.h>
 #include <unistd.h>
 #include <sys/utsname.h>
 
 #include <stdlib.h>
 #include <errno.h>
 #include <fcntl.h>
 
 #include <crm/crm.h>
 #include <crm/msg_xml.h>
 #include <crm/common/ipc.h>
 #include <crm/cluster/internal.h>
 
 #include <crm/stonith-ng.h>
 #include <crm/fencing/internal.h>
 #include <crm/common/xml.h>
 
 #include <crm/common/mainloop.h>
 
 GMainLoop *mainloop = NULL;
 crm_trigger_t *trig = NULL;
 int mainloop_iter = 0;
 int callback_rc = 0;
 typedef void (*mainloop_test_iteration_cb) (int check_event);
 
 #define MAINLOOP_DEFAULT_TIMEOUT 2
 
 #define mainloop_test_done(pass) \
     if (pass) { \
         crm_info("SUCCESS - %s", __PRETTY_FUNCTION__); \
         mainloop_iter++;   \
         mainloop_set_trigger(trig);  \
     } else { \
-        crm_info("FAILURE = %s async_callback %d", __PRETTY_FUNCTION__, callback_rc); \
+        crm_err("FAILURE = %s async_callback %d", __PRETTY_FUNCTION__, callback_rc); \
         crm_exit(-1); \
     } \
     callback_rc = 0; \
 
 
 /* *INDENT-OFF* */
 enum test_modes {
     /* class dev test using a very specific environment */
     test_standard = 0,
     /* watch notifications only */
     test_passive,
     /* sanity test stonith client api using fence_true and fence_false */
     test_api_sanity,
     /* sanity test mainloop code with async respones. */
     test_api_mainloop,
 };
 
 static struct crm_option long_options[] = {
     {"verbose",     0, 0, 'V'},
     {"version",     0, 0, '$'},
     {"help",        0, 0, '?'},
     {"passive",     0, 0, 'p'},
     {"api_test",    0, 0, 't'},
     {"mainloop_api_test",    0, 0, 'm'},
 
     {0, 0, 0, 0}
 };
 /* *INDENT-ON* */
 
 stonith_t *st = NULL;
 struct pollfd pollfd;
 int st_opts = st_opt_sync_call;
 int expected_notifications = 0;
 int verbose = 0;
 
 static void
 dispatch_helper(int timeout)
 {
     int rc;
 
     crm_debug("Looking for notification");
     pollfd.events = POLLIN;
     while (true) {
         rc = poll(&pollfd, 1, timeout); /* wait 10 minutes, -1 forever */
         if (rc > 0) {
             if (!stonith_dispatch(st)) {
                 break;
             }
         } else {
             break;
         }
     }
 }
 
 static void
 st_callback(stonith_t * st, stonith_event_t * e)
 {
     if (st->state == stonith_disconnected) {
         crm_exit(1);
     }
 
     crm_notice("Operation %s requested by %s %s for peer %s.  %s reported: %s (ref=%s)",
                e->operation, e->origin, e->result == pcmk_ok ? "completed" : "failed",
                e->target, e->executioner ? e->executioner : "<none>",
                pcmk_strerror(e->result), e->id);
 
     if (expected_notifications) {
         expected_notifications--;
     }
 }
 
 static void
 st_global_callback(stonith_t * stonith, stonith_callback_data_t * data)
 {
     crm_notice("Call id %d completed with rc %d", data->call_id, data->rc);
 }
 
 static void
 passive_test(void)
 {
     int rc = 0;
 
     rc = st->cmds->connect(st, crm_system_name, &pollfd.fd);
     crm_debug("Connect: %d", rc);
 
     st->cmds->register_notification(st, T_STONITH_NOTIFY_DISCONNECT, st_callback);
     st->cmds->register_notification(st, T_STONITH_NOTIFY_FENCE, st_callback);
     st->cmds->register_notification(st, STONITH_OP_DEVICE_ADD, st_callback);
     st->cmds->register_notification(st, STONITH_OP_DEVICE_DEL, st_callback);
     st->cmds->register_callback(st, 0, 120, st_opt_timeout_updates, NULL, "st_global_callback",
                                 st_global_callback);
 
     dispatch_helper(600 * 1000);
 }
 
 #define single_test(cmd, str, num_notifications, expected_rc) \
 { \
     int rc = 0; \
     rc = cmd; \
     expected_notifications = 0;  \
     if (num_notifications) { \
         expected_notifications = num_notifications; \
         dispatch_helper(500);  \
     } \
     if (rc != expected_rc) { \
-        crm_info("FAILURE - expected rc %d != %d(%s) for cmd - %s\n", expected_rc, rc, pcmk_strerror(rc), str); \
+        crm_err("FAILURE - expected rc %d != %d(%s) for cmd - %s\n", expected_rc, rc, pcmk_strerror(rc), str); \
         crm_exit(-1); \
     } else if (expected_notifications) { \
-        crm_info("FAILURE - expected %d notifications, got only %d for cmd - %s\n", \
+        crm_err("FAILURE - expected %d notifications, got only %d for cmd - %s\n", \
             num_notifications, num_notifications - expected_notifications, str); \
         crm_exit(-1); \
     } else { \
         if (verbose) {                   \
             crm_info("SUCCESS - %s: %d", str, rc);    \
         } else {   \
             crm_debug("SUCCESS - %s: %d", str, rc);    \
         }                          \
     } \
 }\
 
 static void
 run_fence_failure_test(void)
 {
     stonith_key_value_t *params = NULL;
 
     params = stonith_key_value_add(params, "pcmk_host_map", "false_1_node1=1,2 false_1_node2=3,4");
 
     single_test(st->
                 cmds->register_device(st, st_opts, "test-id1", "stonith-ng", "fence_false", params),
                 "Register device1 for failure test", 1, 0);
 
     single_test(st->cmds->fence(st, st_opts, "false_1_node2", "off", 3, 0),
                 "Fence failure results off", 1, -62);
 
     single_test(st->cmds->fence(st, st_opts, "false_1_node2", "reboot", 3, 0),
                 "Fence failure results reboot", 1, -62);
 
     single_test(st->cmds->remove_device(st, st_opts, "test-id1"),
                 "Remove device1 for failure test", 1, 0);
 
     stonith_key_value_freeall(params, 1, 1);
 }
 
 static void
 run_fence_failure_rollover_test(void)
 {
     stonith_key_value_t *params = NULL;
 
     params = stonith_key_value_add(params, "pcmk_host_map", "false_1_node1=1,2 false_1_node2=3,4");
 
     single_test(st->
                 cmds->register_device(st, st_opts, "test-id1", "stonith-ng", "fence_false", params),
                 "Register device1 for rollover test", 1, 0);
 
     single_test(st->
                 cmds->register_device(st, st_opts, "test-id2", "stonith-ng", "fence_true", params),
                 "Register device2 for rollover test", 1, 0);
 
     single_test(st->cmds->fence(st, st_opts, "false_1_node2", "off", 3, 0),
                 "Fence rollover results off", 1, 0);
 
     single_test(st->cmds->fence(st, st_opts, "false_1_node2", "on", 3, 0),
                 "Fence rollover results on", 1, 0);
 
     single_test(st->cmds->remove_device(st, st_opts, "test-id1"),
                 "Remove device1 for rollover tests", 1, 0);
 
     single_test(st->cmds->remove_device(st, st_opts, "test-id2"),
                 "Remove device2 for rollover tests", 1, 0);
 
     stonith_key_value_freeall(params, 1, 1);
 }
 
 static void
 run_standard_test(void)
 {
     stonith_key_value_t *params = NULL;
 
     params = stonith_key_value_add(params, "pcmk_host_map", "false_1_node1=1,2 false_1_node2=3,4");
 
     single_test(st->
                 cmds->register_device(st, st_opts, "test-id", "stonith-ng", "fence_true", params),
                 "Register", 1, 0);
 
     single_test(st->cmds->list(st, st_opts, "test-id", NULL, 1), "list", 1, 0);
 
     single_test(st->cmds->monitor(st, st_opts, "test-id", 1), "Monitor", 1, 0);
 
     single_test(st->cmds->status(st, st_opts, "test-id", "false_1_node2", 1),
                 "Status false_1_node2", 1, 0);
 
     single_test(st->cmds->status(st, st_opts, "test-id", "false_1_node1", 1),
                 "Status false_1_node1", 1, 0);
 
     single_test(st->cmds->fence(st, st_opts, "unknown-host", "off", 1, 0),
-                "Fence unknown-host (expected failure)", 0, -113);
+                "Fence unknown-host (expected failure)", 0, -19);
 
     single_test(st->cmds->fence(st, st_opts, "false_1_node1", "off", 1, 0),
                 "Fence false_1_node1", 1, 0);
 
     single_test(st->cmds->fence(st, st_opts, "false_1_node1", "on", 1, 0),
                 "Unfence false_1_node1", 1, 0);
 
     single_test(st->cmds->remove_device(st, st_opts, "test-id"), "Remove test-id", 1, 0);
 
     stonith_key_value_freeall(params, 1, 1);
 }
 
 static void
 sanity_tests(void)
 {
     int rc = 0;
 
     rc = st->cmds->connect(st, crm_system_name, &pollfd.fd);
     crm_debug("Connect: %d", rc);
 
     st->cmds->register_notification(st, T_STONITH_NOTIFY_DISCONNECT, st_callback);
     st->cmds->register_notification(st, T_STONITH_NOTIFY_FENCE, st_callback);
     st->cmds->register_notification(st, STONITH_OP_DEVICE_ADD, st_callback);
     st->cmds->register_notification(st, STONITH_OP_DEVICE_DEL, st_callback);
     st->cmds->register_callback(st, 0, 120, st_opt_timeout_updates, NULL, "st_global_callback",
                                 st_global_callback);
 
     crm_info("Starting API Sanity Tests");
     run_standard_test();
     run_fence_failure_test();
     run_fence_failure_rollover_test();
     crm_info("Sanity Tests Passed");
 }
 
 static void
 standard_dev_test(void)
 {
     int rc = 0;
     char *tmp = NULL;
     stonith_key_value_t *params = NULL;
 
     rc = st->cmds->connect(st, crm_system_name, &pollfd.fd);
     crm_debug("Connect: %d", rc);
 
     params = stonith_key_value_add(params, "pcmk_host_map", "some-host=pcmk-7 true_1_node1=3,4");
 
     rc = st->cmds->register_device(st, st_opts, "test-id", "stonith-ng", "fence_xvm", params);
     crm_debug("Register: %d", rc);
 
     rc = st->cmds->list(st, st_opts, "test-id", &tmp, 10);
     crm_debug("List: %d output: %s\n", rc, tmp ? tmp : "<none>");
 
     rc = st->cmds->monitor(st, st_opts, "test-id", 10);
     crm_debug("Monitor: %d", rc);
 
     rc = st->cmds->status(st, st_opts, "test-id", "false_1_node2", 10);
     crm_debug("Status false_1_node2: %d", rc);
 
     rc = st->cmds->status(st, st_opts, "test-id", "false_1_node1", 10);
     crm_debug("Status false_1_node1: %d", rc);
 
     rc = st->cmds->fence(st, st_opts, "unknown-host", "off", 60, 0);
     crm_debug("Fence unknown-host: %d", rc);
 
     rc = st->cmds->status(st, st_opts, "test-id", "false_1_node1", 10);
     crm_debug("Status false_1_node1: %d", rc);
 
     rc = st->cmds->fence(st, st_opts, "false_1_node1", "off", 60, 0);
     crm_debug("Fence false_1_node1: %d", rc);
 
     rc = st->cmds->status(st, st_opts, "test-id", "false_1_node1", 10);
     crm_debug("Status false_1_node1: %d", rc);
 
     rc = st->cmds->fence(st, st_opts, "false_1_node1", "on", 10, 0);
     crm_debug("Unfence false_1_node1: %d", rc);
 
     rc = st->cmds->status(st, st_opts, "test-id", "false_1_node1", 10);
     crm_debug("Status false_1_node1: %d", rc);
 
     rc = st->cmds->fence(st, st_opts, "some-host", "off", 10, 0);
     crm_debug("Fence alias: %d", rc);
 
     rc = st->cmds->status(st, st_opts, "test-id", "some-host", 10);
     crm_debug("Status alias: %d", rc);
 
     rc = st->cmds->fence(st, st_opts, "false_1_node1", "on", 10, 0);
     crm_debug("Unfence false_1_node1: %d", rc);
 
     rc = st->cmds->remove_device(st, st_opts, "test-id");
     crm_debug("Remove test-id: %d", rc);
 
     stonith_key_value_freeall(params, 1, 1);
 }
 
 static void
  iterate_mainloop_tests(gboolean event_ready);
 
 static void
 mainloop_callback(stonith_t * stonith, stonith_callback_data_t * data)
 {
     callback_rc = data->rc;
     iterate_mainloop_tests(TRUE);
 }
 
 static int
 register_callback_helper(int callid)
 {
     return st->cmds->register_callback(st,
                                        callid,
                                        MAINLOOP_DEFAULT_TIMEOUT,
                                        st_opt_timeout_updates, NULL, "callback", mainloop_callback);
 }
 
 static void
 test_async_fence_pass(int check_event)
 {
     int rc = 0;
 
     if (check_event) {
         if (callback_rc != 0) {
             mainloop_test_done(FALSE);
         } else {
             mainloop_test_done(TRUE);
         }
         return;
     }
 
     rc = st->cmds->fence(st, 0, "true_1_node1", "off", MAINLOOP_DEFAULT_TIMEOUT, 0);
     if (rc < 0) {
         crm_err("fence failed with rc %d", rc);
         mainloop_test_done(FALSE);
     }
     register_callback_helper(rc);
     /* wait for event */
 }
 
 #define CUSTOM_TIMEOUT_ADDITION 10
 static void
 test_async_fence_custom_timeout(int check_event)
 {
     int rc = 0;
     static time_t begin = 0;
 
     if (check_event) {
         uint32_t diff = (time(NULL) - begin);
 
         if (callback_rc != -ETIME) {
             mainloop_test_done(FALSE);
         } else if (diff < CUSTOM_TIMEOUT_ADDITION + MAINLOOP_DEFAULT_TIMEOUT) {
             crm_err
                 ("Custom timeout test failed, callback expiration should be updated to %d, actual timeout was %d",
                  CUSTOM_TIMEOUT_ADDITION + MAINLOOP_DEFAULT_TIMEOUT, diff);
             mainloop_test_done(FALSE);
         } else {
             mainloop_test_done(TRUE);
         }
         return;
     }
     begin = time(NULL);
 
     rc = st->cmds->fence(st, 0, "custom_timeout_node1", "off", MAINLOOP_DEFAULT_TIMEOUT, 0);
     if (rc < 0) {
         crm_err("fence failed with rc %d", rc);
         mainloop_test_done(FALSE);
     }
     register_callback_helper(rc);
     /* wait for event */
 }
 
 static void
 test_async_fence_timeout(int check_event)
 {
     int rc = 0;
 
     if (check_event) {
-        if (callback_rc != -ETIME) {
+        if (callback_rc != -EHOSTUNREACH) {
             mainloop_test_done(FALSE);
         } else {
             mainloop_test_done(TRUE);
         }
         return;
     }
 
     rc = st->cmds->fence(st, 0, "false_1_node2", "off", MAINLOOP_DEFAULT_TIMEOUT, 0);
     if (rc < 0) {
         crm_err("fence failed with rc %d", rc);
         mainloop_test_done(FALSE);
     }
     register_callback_helper(rc);
     /* wait for event */
 }
 
 static void
 test_async_monitor(int check_event)
 {
     int rc = 0;
 
     if (check_event) {
         if (callback_rc) {
             mainloop_test_done(FALSE);
         } else {
             mainloop_test_done(TRUE);
         }
         return;
     }
 
     rc = st->cmds->monitor(st, 0, "false_1", MAINLOOP_DEFAULT_TIMEOUT);
     if (rc < 0) {
         crm_err("monitor failed with rc %d", rc);
         mainloop_test_done(FALSE);
     }
 
     register_callback_helper(rc);
     /* wait for event */
 }
 
 static void
 test_register_async_devices(int check_event)
 {
     char buf[16] = { 0, };
     stonith_key_value_t *params = NULL;
 
     params = stonith_key_value_add(params, "pcmk_host_map", "false_1_node1=1,2");
     st->cmds->register_device(st, st_opts, "false_1", "stonith-ng", "fence_false", params);
     stonith_key_value_freeall(params, 1, 1);
 
     params = NULL;
     params = stonith_key_value_add(params, "pcmk_host_map", "true_1_node1=1,2");
     st->cmds->register_device(st, st_opts, "true_1", "stonith-ng", "fence_true", params);
     stonith_key_value_freeall(params, 1, 1);
 
     params = NULL;
     params = stonith_key_value_add(params, "pcmk_host_map", "custom_timeout_node1=1,2");
     snprintf(buf, sizeof(buf) - 1, "%d", MAINLOOP_DEFAULT_TIMEOUT + CUSTOM_TIMEOUT_ADDITION);
     params = stonith_key_value_add(params, "pcmk_off_timeout", buf);
     st->cmds->register_device(st, st_opts, "false_custom_timeout", "stonith-ng", "fence_false",
                               params);
     stonith_key_value_freeall(params, 1, 1);
 
     mainloop_test_done(TRUE);
 }
 
 static void
 try_mainloop_connect(int check_event)
 {
     int tries = 10;
     int i = 0;
     int rc = 0;
 
     for (i = 0; i < tries; i++) {
         rc = st->cmds->connect(st, crm_system_name, NULL);
 
         if (!rc) {
             crm_info("stonith client connection established");
             mainloop_test_done(TRUE);
             return;
         } else {
             crm_info("stonith client connection failed");
         }
         sleep(1);
     }
 
     crm_err("API CONNECTION FAILURE\n");
     mainloop_test_done(FALSE);
 }
 
 static void
 iterate_mainloop_tests(gboolean event_ready)
 {
     static mainloop_test_iteration_cb callbacks[] = {
         try_mainloop_connect,
         test_register_async_devices,
         test_async_monitor,
         test_async_fence_pass,
         test_async_fence_timeout,
         test_async_fence_custom_timeout,
     };
 
     if (mainloop_iter == (sizeof(callbacks) / sizeof(mainloop_test_iteration_cb))) {
         /* all tests ran, everything passed */
         crm_info("ALL MAINLOOP TESTS PASSED!");
         crm_exit(0);
     }
 
     callbacks[mainloop_iter] (event_ready);
 }
 
 static gboolean
 trigger_iterate_mainloop_tests(gpointer user_data)
 {
     iterate_mainloop_tests(FALSE);
     return TRUE;
 }
 
 static void
 test_shutdown(int nsig)
 {
     int rc = 0;
 
     if (st) {
         rc = st->cmds->disconnect(st);
         crm_info("Disconnect: %d", rc);
 
         crm_debug("Destroy");
         stonith_api_delete(st);
     }
 
     if (rc) {
         crm_exit(-1);
     }
 }
 
 static void
 mainloop_tests(void)
 {
     trig = mainloop_add_trigger(G_PRIORITY_HIGH, trigger_iterate_mainloop_tests, NULL);
     mainloop_set_trigger(trig);
     mainloop_add_signal(SIGTERM, test_shutdown);
 
     crm_info("Starting");
     mainloop = g_main_new(FALSE);
     g_main_run(mainloop);
 }
 
 int
 main(int argc, char **argv)
 {
     int argerr = 0;
     int flag;
     int option_index = 0;
 
     enum test_modes mode = test_standard;
 
     crm_set_options(NULL, "mode [options]", long_options,
                     "Provides a summary of cluster's current state."
                     "\n\nOutputs varying levels of detail in a number of different formats.\n");
 
     while (1) {
         flag = crm_get_option(argc, argv, &option_index);
         if (flag == -1) {
             break;
         }
 
         switch (flag) {
             case 'V':
                 verbose = 1;
                 break;
             case '$':
             case '?':
                 crm_help(flag, EX_OK);
                 break;
             case 'p':
                 mode = test_passive;
                 break;
             case 't':
                 mode = test_api_sanity;
                 break;
             case 'm':
                 mode = test_api_mainloop;
                 break;
             default:
                 ++argerr;
                 break;
         }
     }
 
     crm_log_init("stonith-test", LOG_INFO, TRUE, verbose ? TRUE : FALSE, argc, argv, FALSE);
 
     if (optind > argc) {
         ++argerr;
     }
 
     if (argerr) {
         crm_help('?', EX_USAGE);
     }
 
     crm_debug("Create");
     st = stonith_api_new();
 
     switch (mode) {
         case test_standard:
             standard_dev_test();
             break;
         case test_passive:
             passive_test();
             break;
         case test_api_sanity:
             sanity_tests();
             break;
         case test_api_mainloop:
             mainloop_tests();
             break;
     }
 
     test_shutdown(0);
     return 0;
 }
diff --git a/lrmd/regression.py.in b/lrmd/regression.py.in
index 7db366371c..7b2b02dd32 100755
--- a/lrmd/regression.py.in
+++ b/lrmd/regression.py.in
@@ -1,951 +1,951 @@
 #!/usr/bin/python
 
 #
 # This program is free software; you can redistribute it and/or
 # modify it under the terms of the GNU General Public License
 # as published by the Free Software Foundation; either version 2
 # of the License, or (at your option) any later version.
 #
 # This program is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 # GNU General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License
 # along with this program; if not, write to the Free Software
 # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA.
 
 import os
 import sys
 import subprocess
 import shlex
 import time
 
 def output_from_command(command, no_wait=0):
 	test = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE)
 
 	if no_wait == 0:
 		test.wait()
 	else:
 		return 0
 
 	return test.communicate()[0].split("\n")
 
 class Test:
 	def __init__(self, name, description, verbose = 0, tls = 0):
 		self.name = name
 		self.description = description
 		self.cmds = []
 
 		if tls:
 			self.daemon_location = "/usr/sbin/pacemaker_remoted"
 		else:
 			self.daemon_location = "@CRM_DAEMON_DIR@/lrmd"
 
 		self.test_tool_location = "@CRM_DAEMON_DIR@/lrmd_test"
 		self.verbose = verbose
 		self.tls = tls
 
 		self.result_txt = ""
 		self.cmd_tool_output = ""
 		self.result_exitcode = 0;
 
 		self.lrmd_process = None
 		self.stonith_process = None
 
 		self.executed = 0
 
 	def __new_cmd(self, cmd, args, exitcode, stdout_match = "", no_wait = 0, stdout_negative_match = "", kill=None):
 		if self.verbose and cmd == self.test_tool_location:
 			args = args + " -V "
 
 		if (cmd == self.test_tool_location) and self.tls:
 			args = args + " -S "
 
 		self.cmds.append(
 			{
 				"cmd" : cmd,
 				"kill" : kill,
 				"args" : args,
 				"expected_exitcode" : exitcode,
 				"stdout_match" : stdout_match,
 				"stdout_negative_match" : stdout_negative_match,
 				"no_wait" : no_wait,
 				"cmd_output" : "",
 			}
 		)
 
 	def start_environment(self):
 		### make sure we are in full control here ###
 		cmd = shlex.split("killall -q -9 stonithd lrmd lt-lrmd lrmd_test lt-lrmd_test pacemaker_remoted")
 		test = subprocess.Popen(cmd, stdout=subprocess.PIPE)
 		test.wait()
 
 		additional_args = ""
 
 		if self.tls == 0:
 			self.stonith_process = subprocess.Popen(shlex.split("@CRM_DAEMON_DIR@/stonithd -s"))
 
 		if self.verbose:
 			additional_args = additional_args + " -VVV "
 
 		self.lrmd_process = subprocess.Popen(shlex.split("%s %s -l /tmp/lrmd-regression.log" % (self.daemon_location, additional_args)))
 
 		time.sleep(1)
 
 	def clean_environment(self):
 		if self.lrmd_process:
 			self.lrmd_process.terminate()
 			self.lrmd_process.wait()
 
 			if self.verbose:
 				print "Daemon output"
 				f = open('/tmp/lrmd-regression.log', 'r')
 				for line in f.readlines():
 					print line.strip()
 			os.remove('/tmp/lrmd-regression.log')
 
 		if self.stonith_process:
 			self.stonith_process.terminate()
 			self.stonith_process.wait()
 
 		self.lrmd_process = None
 		self.stonith_process = None
 
 	def add_sys_cmd(self, cmd, args):
 		self.__new_cmd(cmd, args, 0, "")
 
 	def add_sys_cmd_no_wait(self, cmd, args):
 		self.__new_cmd(cmd, args, 0, "", 1)
 
 	def add_cmd_check_stdout(self, args, match, no_match = ""):
 		self.__new_cmd(self.test_tool_location, args, 0, match, 0, no_match)
 
 	def add_cmd(self, args):
 		self.__new_cmd(self.test_tool_location, args, 0, "")
 
 	def add_cmd_and_kill(self, killProc, args):
 		self.__new_cmd(self.test_tool_location, args, 0, "", kill=killProc)
 
 	def add_expected_fail_cmd(self, args):
 		self.__new_cmd(self.test_tool_location, args, 255, "")
 
 	def get_exitcode(self):
 		return self.result_exitcode
 
 	def print_result(self, filler):
 		print "%s%s" % (filler, self.result_txt)
 
 	def run_cmd(self, args):
 		cmd = shlex.split(args['args'])
 		cmd.insert(0, args['cmd'])
 		if self.verbose:
 			print "\n\nRunning: "+" ".join(cmd)
 		test = subprocess.Popen(cmd, stdout=subprocess.PIPE)
 
 		if args['kill']:
 			if self.verbose:
 				print "Also running: "+args['kill']
 			### Typically the kill argument is used to detect some sort of
 			### failure.  Without yeilding for a few seconds here the process
 			### launched earlier that is listening for the failure may not have time
 			### to connect to the lrmd.
 			time.sleep(2)
 			subprocess.Popen(shlex.split(args['kill']))
 
 		if args['no_wait'] == 0:
 			test.wait()
 		else:
 			return 0
 
 		output = test.communicate()[0]
 
 		if args['stdout_match'] != "" and output.count(args['stdout_match']) == 0:
 			test.returncode = -2
 			print "STDOUT string '%s' was not found in cmd output" % (args['stdout_match'])
 
 		if args['stdout_negative_match'] != "" and output.count(args['stdout_negative_match']) != 0:
 			test.returncode = -2
 			print "STDOUT string '%s' was found in cmd output" % (args['stdout_negative_match'])
 
 		args['cmd_output'] = output
 
 		return test.returncode;
 
 	def run(self):
 		res = 0
 		i = 1
 
 		if self.tls and self.name.count("stonith") != 0:
 			self.result_txt = "SKIPPED - '%s' - disabled when testing pacemaker_remote" % (self.name)
 			print self.result_txt
 			return res
 
 		self.start_environment()
 
 		if self.verbose:
 			print "\n--- START TEST - %s" % self.name
 
 		self.result_txt = "SUCCESS - '%s'" % (self.name)
 		self.result_exitcode = 0
 		for cmd in self.cmds:
 			res = self.run_cmd(cmd)
 			if res != cmd['expected_exitcode']:
 				print cmd['cmd_output']
 				print "Step %d FAILED - command returned %d, expected %d" % (i, res, cmd['expected_exitcode'])
 				self.result_txt = "FAILURE - '%s' failed at step %d. Command: lrmd_test %s" % (self.name, i, cmd['args'])
 				self.result_exitcode = -1
 				break
 			else:
 				if self.verbose:
 					print cmd['cmd_output'].strip()
 					print "Step %d SUCCESS" % (i)
 			i = i + 1
 		self.clean_environment()
 
 		print self.result_txt
 		if self.verbose:
 			print "--- END TEST - %s\n" % self.name
 
 		self.executed = 1
 		return res
 
 class Tests:
 	def __init__(self, verbose = 0, tls = 0):
 		self.tests = []
 		self.verbose = verbose
 		self.tls = tls;
 		self.rsc_classes = output_from_command("crm_resource --list-standards")
 		self.rsc_classes = self.rsc_classes[:-1] # Strip trailing empty line
 		self.need_authkey = 0
 		if self.tls:
 			self.rsc_classes.remove("stonith")
 
 		print "Testing "+repr(self.rsc_classes)
 
 		self.common_cmds = {
 			"ocf_reg_line"      : "-c register_rsc -r ocf_test_rsc -t 3000 -C ocf -P pacemaker -T Dummy",
 			"ocf_reg_event"     : "-l \"NEW_EVENT event_type:register rsc_id:ocf_test_rsc action:none rc:ok op_status:complete\"",
 			"ocf_unreg_line"    : "-c unregister_rsc -r \"ocf_test_rsc\" -t 3000",
 			"ocf_unreg_event"   : "-l \"NEW_EVENT event_type:unregister rsc_id:ocf_test_rsc action:none rc:ok op_status:complete\"",
 			"ocf_start_line"    : "-c exec -r \"ocf_test_rsc\" -a \"start\" -t 3000 ",
 			"ocf_start_event"   : "-l \"NEW_EVENT event_type:exec_complete rsc_id:ocf_test_rsc action:start rc:ok op_status:complete\" ",
 			"ocf_stop_line"     : "-c exec -r \"ocf_test_rsc\" -a \"stop\" -t 3000 ",
 			"ocf_stop_event"    : "-l \"NEW_EVENT event_type:exec_complete rsc_id:ocf_test_rsc action:stop rc:ok op_status:complete\" ",
 			"ocf_monitor_line"  : "-c exec -r \"ocf_test_rsc\" -a \"monitor\" -i \"2000\" -t 3000",
 			"ocf_monitor_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:ocf_test_rsc action:monitor rc:ok op_status:complete\" -t 3000",
 			"ocf_cancel_line"   : "-c cancel -r \"ocf_test_rsc\" -a \"monitor\" -i \"2000\" -t \"3000\" ",
 			"ocf_cancel_event"  : "-l \"NEW_EVENT event_type:exec_complete rsc_id:ocf_test_rsc action:monitor rc:ok op_status:Cancelled\" ",
 
 			"systemd_reg_line"      : "-c register_rsc -r systemd_test_rsc -t 3000 -C systemd -T lrmd_dummy_daemon",
 			"systemd_reg_event"     : "-l \"NEW_EVENT event_type:register rsc_id:systemd_test_rsc action:none rc:ok op_status:complete\"",
 			"systemd_unreg_line"    : "-c unregister_rsc -r \"systemd_test_rsc\" -t 3000",
 			"systemd_unreg_event"   : "-l \"NEW_EVENT event_type:unregister rsc_id:systemd_test_rsc action:none rc:ok op_status:complete\"",
 			"systemd_start_line"    : "-c exec -r \"systemd_test_rsc\" -a \"start\" -t 3000 ",
 			"systemd_start_event"   : "-l \"NEW_EVENT event_type:exec_complete rsc_id:systemd_test_rsc action:start rc:ok op_status:complete\" ",
 			"systemd_stop_line"     : "-c exec -r \"systemd_test_rsc\" -a \"stop\" -t 3000 ",
 			"systemd_stop_event"    : "-l \"NEW_EVENT event_type:exec_complete rsc_id:systemd_test_rsc action:stop rc:ok op_status:complete\" ",
 			"systemd_monitor_line"  : "-c exec -r \"systemd_test_rsc\" -a \"monitor\" -i \"2000\" -t 3000",
 			"systemd_monitor_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:systemd_test_rsc action:monitor rc:ok op_status:complete\" -t 3000",
 			"systemd_cancel_line"   : "-c cancel -r \"systemd_test_rsc\" -a \"monitor\" -i \"2000\" -t \"3000\" ",
 			"systemd_cancel_event"  : "-l \"NEW_EVENT event_type:exec_complete rsc_id:systemd_test_rsc action:monitor rc:ok op_status:Cancelled\" ",
 
 			"upstart_reg_line"      : "-c register_rsc -r upstart_test_rsc -t 3000 -C upstart -T lrmd_dummy_daemon",
 			"upstart_reg_event"     : "-l \"NEW_EVENT event_type:register rsc_id:upstart_test_rsc action:none rc:ok op_status:complete\"",
 			"upstart_unreg_line"    : "-c unregister_rsc -r \"upstart_test_rsc\" -t 3000",
 			"upstart_unreg_event"   : "-l \"NEW_EVENT event_type:unregister rsc_id:upstart_test_rsc action:none rc:ok op_status:complete\"",
 			"upstart_start_line"    : "-c exec -r \"upstart_test_rsc\" -a \"start\" -t 3000 ",
 			"upstart_start_event"   : "-l \"NEW_EVENT event_type:exec_complete rsc_id:upstart_test_rsc action:start rc:ok op_status:complete\" ",
 			"upstart_stop_line"     : "-c exec -r \"upstart_test_rsc\" -a \"stop\" -t 3000 ",
 			"upstart_stop_event"    : "-l \"NEW_EVENT event_type:exec_complete rsc_id:upstart_test_rsc action:stop rc:ok op_status:complete\" ",
 			"upstart_monitor_line"  : "-c exec -r \"upstart_test_rsc\" -a \"monitor\" -i \"2000\" -t 3000",
 			"upstart_monitor_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:upstart_test_rsc action:monitor rc:ok op_status:complete\" -t 3000",
 			"upstart_cancel_line"   : "-c cancel -r \"upstart_test_rsc\" -a \"monitor\" -i \"2000\" -t \"3000\" ",
 			"upstart_cancel_event"  : "-l \"NEW_EVENT event_type:exec_complete rsc_id:upstart_test_rsc action:monitor rc:ok op_status:Cancelled\" ",
 
 			"service_reg_line"      : "-c register_rsc -r service_test_rsc -t 3000 -C service -T lrmd_dummy_daemon",
 			"service_reg_event"     : "-l \"NEW_EVENT event_type:register rsc_id:service_test_rsc action:none rc:ok op_status:complete\"",
 			"service_unreg_line"    : "-c unregister_rsc -r \"service_test_rsc\" -t 3000",
 			"service_unreg_event"   : "-l \"NEW_EVENT event_type:unregister rsc_id:service_test_rsc action:none rc:ok op_status:complete\"",
 			"service_start_line"    : "-c exec -r \"service_test_rsc\" -a \"start\" -t 3000 ",
 			"service_start_event"   : "-l \"NEW_EVENT event_type:exec_complete rsc_id:service_test_rsc action:start rc:ok op_status:complete\" ",
 			"service_stop_line"     : "-c exec -r \"service_test_rsc\" -a \"stop\" -t 3000 ",
 			"service_stop_event"    : "-l \"NEW_EVENT event_type:exec_complete rsc_id:service_test_rsc action:stop rc:ok op_status:complete\" ",
 			"service_monitor_line"  : "-c exec -r \"service_test_rsc\" -a \"monitor\" -i \"2000\" -t 3000",
 			"service_monitor_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:service_test_rsc action:monitor rc:ok op_status:complete\" -t 3000",
 			"service_cancel_line"   : "-c cancel -r \"service_test_rsc\" -a \"monitor\" -i \"2000\" -t \"3000\" ",
 			"service_cancel_event"  : "-l \"NEW_EVENT event_type:exec_complete rsc_id:service_test_rsc action:monitor rc:ok op_status:Cancelled\" ",
 
 			"lsb_reg_line"      : "-c register_rsc -r lsb_test_rsc -t 3000 -C lsb -T lrmd_dummy_daemon",
 			"lsb_reg_event"     : "-l \"NEW_EVENT event_type:register rsc_id:lsb_test_rsc action:none rc:ok op_status:complete\" ",
 			"lsb_unreg_line"    : "-c unregister_rsc -r \"lsb_test_rsc\" -t 3000",
 			"lsb_unreg_event"   : "-l \"NEW_EVENT event_type:unregister rsc_id:lsb_test_rsc action:none rc:ok op_status:complete\"",
 			"lsb_start_line"    : "-c exec -r \"lsb_test_rsc\" -a \"start\" -t 3000 ",
 			"lsb_start_event"   : "-l \"NEW_EVENT event_type:exec_complete rsc_id:lsb_test_rsc action:start rc:ok op_status:complete\" ",
 			"lsb_stop_line"     : "-c exec -r \"lsb_test_rsc\" -a \"stop\" -t 3000 ",
 			"lsb_stop_event"    : "-l \"NEW_EVENT event_type:exec_complete rsc_id:lsb_test_rsc action:stop rc:ok op_status:complete\" ",
 			"lsb_monitor_line"  : "-c exec -r \"lsb_test_rsc\" -a status -i \"2000\" -t 3000",
 			"lsb_monitor_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:lsb_test_rsc action:status rc:ok op_status:complete\" -t 3000",
 			"lsb_cancel_line"   : "-c cancel -r \"lsb_test_rsc\" -a \"status\" -i \"2000\" -t \"3000\" ",
 			"lsb_cancel_event"  : "-l \"NEW_EVENT event_type:exec_complete rsc_id:lsb_test_rsc action:status rc:ok op_status:Cancelled\" ",
 
 			"stonith_reg_line"      : "-c register_rsc -r stonith_test_rsc -t 3000 -C stonith -P pacemaker -T fence_dummy_monitor",
 			"stonith_reg_event"     : "-l \"NEW_EVENT event_type:register rsc_id:stonith_test_rsc action:none rc:ok op_status:complete\" ",
 			"stonith_unreg_line"    : "-c unregister_rsc -r \"stonith_test_rsc\" -t 3000",
 			"stonith_unreg_event"   : "-l \"NEW_EVENT event_type:unregister rsc_id:stonith_test_rsc action:none rc:ok op_status:complete\"",
 			"stonith_start_line"    : "-c exec -r \"stonith_test_rsc\" -a \"start\" -t 8000 ",
 			"stonith_start_event"   : "-l \"NEW_EVENT event_type:exec_complete rsc_id:stonith_test_rsc action:start rc:ok op_status:complete\" ",
 			"stonith_stop_line"     : "-c exec -r \"stonith_test_rsc\" -a \"stop\" -t 3000 ",
 			"stonith_stop_event"    : "-l \"NEW_EVENT event_type:exec_complete rsc_id:stonith_test_rsc action:stop rc:ok op_status:complete\" ",
 			"stonith_monitor_line"  : "-c exec -r \"stonith_test_rsc\" -a \"monitor\" -i \"2000\" -t 3000",
 			"stonith_monitor_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:stonith_test_rsc action:monitor rc:ok op_status:complete\" -t 3000",
 			"stonith_cancel_line"   : "-c cancel -r \"stonith_test_rsc\" -a \"monitor\" -i \"2000\" -t \"3000\" ",
 			"stonith_cancel_event"  : "-l \"NEW_EVENT event_type:exec_complete rsc_id:stonith_test_rsc action:monitor rc:ok op_status:Cancelled\" ",
 		}
 
 	def new_test(self, name, description):
 		test = Test(name, description, self.verbose, self.tls)
 		self.tests.append(test)
 		return test
 
 	def setup_test_environment(self):
 		os.system("service pacemaker_remote stop")
 		self.cleanup_test_environment()
 
 		if self.tls and not os.path.isfile("/etc/pacemaker/authkey"):
 			self.need_authkey = 1
 			os.system("mkdir -p /etc/pacemaker")
 			os.system("dd if=/dev/urandom of=/etc/pacemaker/authkey bs=4096 count=1")
 
 		### Make fake systemd daemon and unit file ###
 		dummy_daemon = "#!/bin/bash\nwhile true\ndo\nsleep 5\ndone"
 		dummy_service_file = ("[Unit]\n"
 			"Description=Dummy Resource\n"
 			"[Service]\n"
 			"Type=simple\n"
 			"ExecStart=/usr/sbin/lrmd_dummy_daemon\n")
 		dummy_upstart_job = ("""
 description     "Dummy service for regression tests"
 exec dd if=/dev/random of=/dev/null
 """)
 
 		dummy_fence_sleep_agent = ("""#!/usr/bin/python
 import sys
 import time
 def main():
     for line in sys.stdin.readlines():
         if line.count("monitor") > 0:
             time.sleep(30000)
             sys.exit(0)
     sys.exit(-1)
 if __name__ == "__main__":
     main()
 """)
 		dummy_fence_agent = ("""#!/usr/bin/python
 import sys
 def main():
     for line in sys.stdin.readlines():
         if line.count("monitor") > 0:
             sys.exit(0)
         if line.count("metadata") > 0:
             print '<resource-agent name="fence_dummy_monitor" shortdesc="Dummy Fence agent for testing">'
             print '  <longdesc>dummy description.</longdesc>'
             print '  <vendor-url>http://www.example.com</vendor-url>'
             print '  <parameters>'
             print '    <parameter name="action" unique="0" required="1">'
             print '      <getopt mixed="-o, --action=[action]"/>'
             print '      <content type="string" default="reboot"/>'
             print '      <shortdesc lang="en">Fencing Action</shortdesc>'
             print '    </parameter>'
             print '    <parameter name="port" unique="0" required="0">'
             print '      <getopt mixed="-n, --plug=[id]"/>'
             print '      <content type="string"/>'
             print '      <shortdesc lang="en">Physical plug number or name of virtual machine</shortdesc>'
             print '    </parameter>'
             print '  </parameters>'
             print '  <actions>'
             print '    <action name="on"/>'
             print '    <action name="off"/>'
             print '    <action name="monitor"/>'
             print '    <action name="metadata"/>'
             print '  </actions>'
             print '</resource-agent>'
             sys.exit(0)
     sys.exit(-1)
 if __name__ == "__main__":
     main()
 """)
 
 		os.system("cat <<-END >>/etc/init/lrmd_dummy_daemon.conf\n%s\nEND" % (dummy_upstart_job))
 		os.system("cat <<-END >>/usr/sbin/lrmd_dummy_daemon\n%s\nEND" % (dummy_daemon))
 		os.system("cat <<-END >>/lib/systemd/system/lrmd_dummy_daemon.service\n%s\nEND" % (dummy_service_file))
 		os.system("chmod u+x /usr/sbin/lrmd_dummy_daemon")
 
 		os.system("cat <<-END >>/usr/sbin/fence_dummy_sleep\n%s\nEND" % (dummy_fence_sleep_agent))
 		os.system("chmod 711 /usr/sbin/fence_dummy_sleep")
 
 		os.system("cat <<-END >>/usr/sbin/fence_dummy_monitor\n%s\nEND" % (dummy_fence_agent))
 		os.system("chmod 711 /usr/sbin/fence_dummy_monitor")
 
 		os.system("cp /usr/share/pacemaker/tests/cts/LSBDummy /etc/init.d/lrmd_dummy_daemon")
 		os.system("mkdir -p @CRM_CORE_DIR@/root")
 
 		os.system("systemctl daemon-reload")
 
 	def cleanup_test_environment(self):
 		if self.need_authkey:
 		    os.system("rm -f /etc/pacemaker/authkey")
 
 		os.system("rm -f /lib/systemd/system/lrmd_dummy_daemon.service")
 		os.system("rm -f /etc/init.d/lrmd_dummy_daemon")
 		os.system("rm -f /usr/sbin/lrmd_dummy_daemon")
 		os.system("rm -f /usr/sbin/fence_dummy_monitor")
 		os.system("rm -f /usr/sbin/fence_dummy_sleep")
 		os.system("systemctl daemon-reload")
 
 	### These are tests that should apply to all resource classes ###
 	def build_generic_tests(self):
 		common_cmds = self.common_cmds
 
 		### register/unregister tests ###
 		for rsc in self.rsc_classes:
 			test = self.new_test("generic_registration_%s" % (rsc), "Simple resource registration test for %s standard" % (rsc))
 			test.add_cmd(common_cmds["%s_reg_line" % (rsc)] + " " + common_cmds["%s_reg_event" % (rsc)])
 			test.add_cmd(common_cmds["%s_unreg_line" % (rsc)] + " " + common_cmds["%s_unreg_event" % (rsc)])
 
 		### start/stop tests  ###
 		for rsc in self.rsc_classes:
 			test = self.new_test("generic_start_stop_%s" % (rsc), "Simple start and stop test for %s standard" % (rsc))
 			test.add_cmd(common_cmds["%s_reg_line" % (rsc)]   + " " + common_cmds["%s_reg_event" % (rsc)])
 			test.add_cmd(common_cmds["%s_start_line" % (rsc)] + " " + common_cmds["%s_start_event" % (rsc)])
 			test.add_cmd(common_cmds["%s_stop_line" % (rsc)]  + " " + common_cmds["%s_stop_event" % (rsc)])
 			test.add_cmd(common_cmds["%s_unreg_line" % (rsc)] + " " + common_cmds["%s_unreg_event" % (rsc)])
 
 		### monitor cancel test ###
 		for rsc in self.rsc_classes:
 			test = self.new_test("generic_monitor_cancel_%s" % (rsc), "Simple monitor cancel test for %s standard" % (rsc))
 			test.add_cmd(common_cmds["%s_reg_line" % (rsc)]   + " " + common_cmds["%s_reg_event" % (rsc)])
 			test.add_cmd(common_cmds["%s_start_line" % (rsc)] + " " + common_cmds["%s_start_event" % (rsc)])
 			test.add_cmd(common_cmds["%s_monitor_line" % (rsc)] + " " + common_cmds["%s_monitor_event" % (rsc)])
 			test.add_cmd(common_cmds["%s_monitor_event" % (rsc)]) ### If this fails, that means the monitor may not be getting rescheduled ####
 			test.add_cmd(common_cmds["%s_monitor_event" % (rsc)]) ### If this fails, that means the monitor may not be getting rescheduled ####
 			test.add_cmd(common_cmds["%s_cancel_line" % (rsc)] + " " + common_cmds["%s_cancel_event" % (rsc)])
 			test.add_expected_fail_cmd(common_cmds["%s_monitor_event" % (rsc)]) ### If this happens the monitor did not actually cancel correctly. ###
 			test.add_expected_fail_cmd(common_cmds["%s_monitor_event" % (rsc)]) ### If this happens the monitor did not actually cancel correctly. ###
 			test.add_cmd(common_cmds["%s_stop_line" % (rsc)]  + " " + common_cmds["%s_stop_event" % (rsc)])
 			test.add_cmd(common_cmds["%s_unreg_line" % (rsc)] + " " + common_cmds["%s_unreg_event" % (rsc)])
 
 
 		### stop implies cancel test ###
 		for rsc in self.rsc_classes:
 			test = self.new_test("generic_stop_implies_cancel_%s" % (rsc), "Verify stopping a resource implies cancel of recurring ops for %s standard" % (rsc))
 			test.add_cmd(common_cmds["%s_reg_line" % (rsc)]   + " " + common_cmds["%s_reg_event" % (rsc)])
 			test.add_cmd(common_cmds["%s_start_line" % (rsc)] + " " + common_cmds["%s_start_event" % (rsc)])
 			test.add_cmd(common_cmds["%s_monitor_line" % (rsc)] + " " + common_cmds["%s_monitor_event" % (rsc)])
 			test.add_cmd(common_cmds["%s_monitor_event" % (rsc)]) ### If this fails, that means the monitor may not be getting rescheduled ####
 			test.add_cmd(common_cmds["%s_monitor_event" % (rsc)]) ### If this fails, that means the monitor may not be getting rescheduled ####
 			test.add_cmd(common_cmds["%s_stop_line" % (rsc)]  + " " + common_cmds["%s_stop_event" % (rsc)])
 			test.add_expected_fail_cmd(common_cmds["%s_monitor_event" % (rsc)]) ### If this happens the monitor did not actually cancel correctly. ###
 			test.add_expected_fail_cmd(common_cmds["%s_monitor_event" % (rsc)]) ### If this happens the monitor did not actually cancel correctly. ###
 			test.add_cmd(common_cmds["%s_unreg_line" % (rsc)] + " " + common_cmds["%s_unreg_event" % (rsc)])
 
 
 	### These are complex tests that involve managing multiple resouces of different types ###
 	def build_multi_rsc_tests(self):
 		common_cmds = self.common_cmds
 		# do not use service and systemd at the same time, it is the same resource.
 
 		### register start monitor stop unregister resources of each type at the same time. ###
 		test = self.new_test("multi_rsc_start_stop_all", "Start, monitor, and stop resources of multiple types and classes")
 		for rsc in self.rsc_classes:
 			test.add_cmd(common_cmds["%s_reg_line" % (rsc)]   + " " + common_cmds["%s_reg_event" % (rsc)])
 		for rsc in self.rsc_classes:
 			test.add_cmd(common_cmds["%s_start_line" % (rsc)] + " " + common_cmds["%s_start_event" % (rsc)])
 		for rsc in self.rsc_classes:
 			test.add_cmd(common_cmds["%s_monitor_line" % (rsc)] + " " + common_cmds["%s_monitor_event" % (rsc)])
 		for rsc in self.rsc_classes:
 			test.add_cmd(common_cmds["%s_monitor_event" % (rsc)]) ### If this fails, that means the monitor is not being rescheduled ####
 		for rsc in self.rsc_classes:
 			test.add_cmd(common_cmds["%s_cancel_line" % (rsc)] + " " + common_cmds["%s_cancel_event" % (rsc)])
 		for rsc in self.rsc_classes:
 			test.add_cmd(common_cmds["%s_stop_line" % (rsc)]  + " " + common_cmds["%s_stop_event" % (rsc)])
 		for rsc in self.rsc_classes:
 			test.add_cmd(common_cmds["%s_unreg_line" % (rsc)] + " " + common_cmds["%s_unreg_event" % (rsc)])
 
 	### These are tests related to how the lrmd handles failures.  ###
 	def build_negative_tests(self):
 
 		### ocf start timeout test  ###
 		test = self.new_test("ocf_start_timeout", "Force start timeout to occur, verify start failure.")
 		test.add_cmd("-c register_rsc -r \"test_rsc\" -C \"ocf\" -P \"pacemaker\" -T \"Dummy\" -t 3000 "
 			"-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
 		test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" -k \"op_sleep\" -v \"3\" -t 3000 -w")
 		test.add_cmd("-l "
 			"\"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:unknown error op_status:Timed Out\" -t 3000")
 		test.add_cmd("-c exec -r test_rsc -a stop -t 3000"
 			"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:stop rc:ok op_status:complete\" ")
 		test.add_cmd("-c unregister_rsc -r test_rsc -t 3000 "
 			"-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
 
 
 		### stonith start timeout test  ###
 		test = self.new_test("stonith_start_timeout", "Force start timeout to occur, verify start failure.")
 		test.add_cmd("-c register_rsc -r \"test_rsc\" -C \"stonith\" -P \"pacemaker\" -T \"fence_dummy_sleep\" -t 3000 "
 			"-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
 		test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" -t 3000 -w")
 		test.add_cmd("-l "
 			"\"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:unknown error op_status:Timed Out\" -t 4000")
 		test.add_cmd("-c exec -r test_rsc -a stop -t 3000"
 			"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:stop rc:ok op_status:complete\" ")
 		test.add_cmd("-c unregister_rsc -r test_rsc -t 3000 "
 			"-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
 
 		### stonith component fail ###
 		common_cmds = self.common_cmds
 		test = self.new_test("stonith_component_fail", "Kill stonith component after lrmd connects")
 		test.add_cmd(common_cmds["stonith_reg_line"]   + " " + common_cmds["stonith_reg_event"])
 		test.add_cmd(common_cmds["stonith_start_line"] + " " + common_cmds["stonith_start_event"])
 
 		test.add_cmd("-c exec -r \"stonith_test_rsc\" -a \"monitor\" -i \"600000\" "
 			"-l \"NEW_EVENT event_type:exec_complete rsc_id:stonith_test_rsc action:monitor rc:ok op_status:complete\" -t 3000")
 
 		test.add_cmd_and_kill("killall -9 -q stonithd" ,"-l \"NEW_EVENT event_type:exec_complete rsc_id:stonith_test_rsc action:monitor rc:unknown error op_status:error\" -t 15000")
 		test.add_cmd(common_cmds["stonith_unreg_line"] + " " + common_cmds["stonith_unreg_event"])
 
 
 		### monitor fail for ocf resources ###
 		test = self.new_test("monitor_fail_ocf", "Force ocf monitor to fail, verify failure is reported.")
 		test.add_cmd("-c register_rsc -r \"test_rsc\" -C \"ocf\" -P \"pacemaker\" -T \"Dummy\" -t 3000 "
 			"-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
 		test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" -t 3000 "
 			"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ")
 		test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" -t 3000 "
 			"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ")
 		test.add_cmd("-c exec -r \"test_rsc\" -a \"monitor\" -i \"100\" -t 3000 "
 			"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" ")
 		test.add_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" -t 3000")
 		test.add_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" -t 3000")
-		test.add_cmd_and_kill("rm -f /var/run/Dummy-test_rsc.state", "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete\" -t 6000")
+		test.add_cmd_and_kill("rm -f @localstatedir@/run/Dummy-test_rsc.state", "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete\" -t 6000")
 		test.add_cmd("-c cancel -r \"test_rsc\" -a \"monitor\" -i \"100\" -t \"3000\" "
 			"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:Cancelled\" ")
 		test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete\" -t 3000")
 		test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" -t 3000")
 		test.add_cmd("-c unregister_rsc -r \"test_rsc\" -t 3000 "
 			"-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
 
 		### verify notify changes only for monitor operation.  ###
 		test = self.new_test("monitor_changes_only", "Verify when flag is set, only monitor changes are notified.")
 		test.add_cmd("-c register_rsc -r \"test_rsc\" -C \"ocf\" -P \"pacemaker\" -T \"Dummy\" -t 3000 "
 			"-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
 		test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" -t 3000 -o "
 			"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ")
 		test.add_cmd("-c exec -r \"test_rsc\" -a \"monitor\" -i \"100\" -t 3000 -o "
 			"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" ")
 		test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" -t 3000")
-		test.add_cmd_and_kill("rm -f /var/run/Dummy-test_rsc.state", "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete\" -t 6000")
+		test.add_cmd_and_kill("rm -f @localstatedir@/run/Dummy-test_rsc.state", "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete\" -t 6000")
 		test.add_cmd("-c cancel -r \"test_rsc\" -a \"monitor\" -i \"100\" -t \"3000\" "
 			"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:Cancelled\" ")
 		test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete\" -t 3000")
 		test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" -t 3000")
 		test.add_cmd("-c unregister_rsc -r \"test_rsc\" -t 3000 "
 			"-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
 
 		### monitor fail for systemd resource ###
 		if "systemd" in self.rsc_classes:
 			test = self.new_test("monitor_fail_systemd", "Force systemd monitor to fail, verify failure is reported..")
 			test.add_cmd("-c register_rsc -r \"test_rsc\" -C systemd -T lrmd_dummy_daemon -t 3000 "
 				     "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
 			test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" -t 3000 "
 				     "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ")
 			test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" -t 3000 "
 				     "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ")
 			test.add_cmd("-c exec -r \"test_rsc\" -a \"monitor\" -i \"100\" -t 3000 "
 				     "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" ")
 			test.add_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" -t 3000")
 			test.add_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" -t 3000")
 			test.add_cmd_and_kill("killall -9 -q lrmd_dummy_daemon", "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete\" -t 8000")
 			test.add_cmd("-c cancel -r \"test_rsc\" -a \"monitor\" -i \"100\" -t \"3000\" "
 				     "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:Cancelled\" ")
 			test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete\" -t 3000")
 			test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" -t 3000")
 			test.add_cmd("-c unregister_rsc -r \"test_rsc\" -t 3000 "
 				     "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
 
 		### monitor fail for upstart resource ###
 		if "upstart" in self.rsc_classes:
 			test = self.new_test("monitor_fail_upstart", "Force upstart monitor to fail, verify failure is reported..")
 			test.add_cmd("-c register_rsc -r \"test_rsc\" -C upstart -T lrmd_dummy_daemon -t 3000 "
 				     "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
 			test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" -t 3000 "
 				     "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ")
 			test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" -t 3000 "
 				     "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ")
 			test.add_cmd("-c exec -r \"test_rsc\" -a \"monitor\" -i \"100\" -t 3000 "
 				     "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" ")
 			test.add_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" -t 3000")
 			test.add_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" -t 3000")
 			test.add_cmd_and_kill("killall -9 -q dd", "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete\" -t 8000")
 			test.add_cmd("-c cancel -r \"test_rsc\" -a \"monitor\" -i \"100\" -t \"3000\" "
 				     "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:Cancelled\" ")
 			test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete\" -t 3000")
 			test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" -t 3000")
 			test.add_cmd("-c unregister_rsc -r \"test_rsc\" -t 3000 "
 				     "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
 
 		### Cancel non-existent operation on a resource ###
 		test = self.new_test("cancel_non_existent_op", "Attempt to cancel the wrong monitor operation, verify expected failure")
 		test.add_cmd("-c register_rsc -r \"test_rsc\" -C \"ocf\" -P \"pacemaker\" -T \"Dummy\" -t 3000 "
 			"-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
 		test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" -t 3000 "
 			"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ")
 		test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" -t 3000 "
 			"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ")
 		test.add_cmd("-c exec -r \"test_rsc\" -a \"monitor\" -i \"100\" -t 3000 "
 			"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" ")
 		test.add_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" -t 3000")
 		test.add_expected_fail_cmd("-c cancel -r test_rsc -a \"monitor\" -i 1234 -t \"3000\" " ### interval is wrong, should fail
 			"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:Cancelled\" ")
 		test.add_expected_fail_cmd("-c cancel -r test_rsc -a stop -i 100 -t \"3000\" " ### action name is wrong, should fail
 			"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:Cancelled\" ")
 		test.add_cmd("-c unregister_rsc -r \"test_rsc\" -t 3000 "
 			"-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
 
 		### Attempt to invoke non-existent rsc id ###
 		test = self.new_test("invoke_non_existent_rsc", "Attempt to perform operations on a non-existent rsc id.")
 		test.add_expected_fail_cmd("-c exec -r \"test_rsc\" -a \"start\" -t 3000 "
 			"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:unknown error op_status:complete\" ")
 		test.add_expected_fail_cmd("-c exec -r test_rsc -a stop -t 3000"
 			"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:stop rc:ok op_status:complete\" ")
 		test.add_expected_fail_cmd("-c exec -r test_rsc -a monitor -i 3000 -t 3000"
 			"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" ")
 		test.add_expected_fail_cmd("-c cancel -r test_rsc -a start -t 3000 "
 			"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:Cancelled\" ")
 		test.add_cmd("-c unregister_rsc -r \"test_rsc\" -t 3000 "
 			"-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
 
 		### Register and start a resource that doesn't exist, systemd  ###
 		if "systemd" in self.rsc_classes:
 			test = self.new_test("start_uninstalled_systemd", "Register uninstalled systemd agent, try to start, verify expected failure")
 			test.add_cmd("-c register_rsc -r \"test_rsc\" -C systemd -T this_is_fake1234 -t 3000 "
 				     "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
 			test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" -t 3000 "
 				     "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:not installed op_status:complete\" ")
 			test.add_cmd("-c unregister_rsc -r \"test_rsc\" -t 3000 "
 				     "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
 
 		if "upstart" in self.rsc_classes:
 			test = self.new_test("start_uninstalled_upstart", "Register uninstalled upstart agent, try to start, verify expected failure")
 			test.add_cmd("-c register_rsc -r \"test_rsc\" -C upstart -T this_is_fake1234 -t 3000 "
 				     "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
 			test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" -t 3000 "
 				     "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:not installed op_status:complete\" ")
 			test.add_cmd("-c unregister_rsc -r \"test_rsc\" -t 3000 "
 				     "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
 
 		### Register and start a resource that doesn't exist, ocf ###
 		test = self.new_test("start_uninstalled_ocf", "Register uninstalled ocf agent, try to start, verify expected failure.")
 		test.add_cmd("-c register_rsc -r \"test_rsc\" -C ocf -P pacemaker -T this_is_fake1234 -t 3000 "
 			"-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
 		test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" -t 3000 "
 			"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:not installed op_status:complete\" ")
 		test.add_cmd("-c unregister_rsc -r \"test_rsc\" -t 3000 "
 			"-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
 
 		### Register ocf with non-existent provider  ###
 		test = self.new_test("start_ocf_bad_provider", "Register ocf agent with a non-existent provider, verify expected failure.")
 		test.add_cmd("-c register_rsc -r \"test_rsc\" -C ocf -P pancakes -T Dummy -t 3000 "
 			"-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
 		test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" -t 3000 "
 			"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:not installed op_status:complete\" ")
 		test.add_cmd("-c unregister_rsc -r \"test_rsc\" -t 3000 "
 			"-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
 
 		### Register ocf with empty provider field  ###
 		test = self.new_test("start_ocf_no_provider", "Register ocf agent with a no provider, verify expected failure.")
 		test.add_expected_fail_cmd("-c register_rsc -r \"test_rsc\" -C ocf -T Dummy -t 3000 "
 			"-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
 		test.add_expected_fail_cmd("-c exec -r \"test_rsc\" -a \"start\" -t 3000 "
 			"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:Error\" ")
 		test.add_cmd("-c unregister_rsc -r \"test_rsc\" -t 3000 "
 			"-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
 
 	### These are tests that target specific cases ###
 	def build_custom_tests(self):
 		### start delay then stop test ###
 		test = self.new_test("start_delay", "Verify start delay works as expected.")
 		test.add_cmd("-c register_rsc -r test_rsc -P pacemaker -C ocf -T Dummy "
 			"-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" -t 3000")
 		test.add_cmd("-c exec -r test_rsc -s 6000 -a start -w -t 6000")
 		test.add_expected_fail_cmd("-l "
 			"\"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" -t 2000")
 		test.add_cmd("-l "
 			"\"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" -t 6000")
 		test.add_cmd("-c exec -r test_rsc -a stop -t 3000"
 			"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:stop rc:ok op_status:complete\" ")
 		test.add_cmd("-c unregister_rsc -r test_rsc -t 3000 "
 			"-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
 
 		### start delay, but cancel before it gets a chance to start.  ###
 		test = self.new_test("start_delay_cancel", "Using start_delay, start a rsc, but cancel the start op before execution.")
 		test.add_cmd("-c register_rsc -r test_rsc -P pacemaker -C ocf -T Dummy "
 			"-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" -t 3000")
 		test.add_cmd("-c exec -r test_rsc -s 5000 -a start -w -t 4000")
 		test.add_cmd("-c cancel -r test_rsc -a start -t 3000 "
 			"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:Cancelled\" ")
 		test.add_expected_fail_cmd("-l "
 			"\"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" -t 5000")
 		test.add_cmd("-c unregister_rsc -r test_rsc -t 3000 "
 			"-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
 
 		### Register a bunch of resources, verify we can get info on them ###
 		test = self.new_test("verify_get_rsc_info", "Register multiple resources, verify retrieval of rsc info.")
 		if "systemd" in self.rsc_classes:
 			test.add_cmd("-c register_rsc -r rsc1 -C systemd -T lrmd_dummy_daemon -t 3000 ")
 			test.add_cmd("-c get_rsc_info -r rsc1 ")
 			test.add_cmd("-c unregister_rsc -r rsc1 -t 3000 ")
 			test.add_expected_fail_cmd("-c get_rsc_info -r rsc1 ")
 
 		if "upstart" in self.rsc_classes:
 			test.add_cmd("-c register_rsc -r rsc1 -C upstart -T lrmd_dummy_daemon -t 3000 ")
 			test.add_cmd("-c get_rsc_info -r rsc1 ")
 			test.add_cmd("-c unregister_rsc -r rsc1 -t 3000 ")
 			test.add_expected_fail_cmd("-c get_rsc_info -r rsc1 ")
 
 		test.add_cmd("-c register_rsc -r rsc2 -C ocf -T Dummy -P pacemaker -t 3000 ")
 		test.add_cmd("-c get_rsc_info -r rsc2 ")
 		test.add_cmd("-c unregister_rsc -r rsc2 -t 3000 ")
 		test.add_expected_fail_cmd("-c get_rsc_info -r rsc2 ")
 
 		### Register duplicate, verify only one entry exists and can still be removed.
 		test = self.new_test("duplicate_registration", "Register resource multiple times, verify only one entry exists and can be removed.")
 		test.add_cmd("-c register_rsc -r rsc2 -C ocf -T Dummy -P pacemaker -t 3000 ")
 		test.add_cmd_check_stdout("-c get_rsc_info -r rsc2 ", "id:rsc2 class:ocf provider:pacemaker type:Dummy")
 		test.add_cmd("-c register_rsc -r rsc2 -C ocf -T Dummy -P pacemaker -t 3000 ")
 		test.add_cmd_check_stdout("-c get_rsc_info -r rsc2 ", "id:rsc2 class:ocf provider:pacemaker type:Dummy")
 		test.add_cmd("-c register_rsc -r rsc2 -C ocf -T Stateful -P pacemaker -t 3000 ")
 		test.add_cmd_check_stdout("-c get_rsc_info -r rsc2 ", "id:rsc2 class:ocf provider:pacemaker type:Stateful")
 		test.add_cmd("-c unregister_rsc -r rsc2 -t 3000 ")
 		test.add_expected_fail_cmd("-c get_rsc_info -r rsc2 ")
 
 		### verify the option to only send notification to the original client. ###
 		test = self.new_test("notify_orig_client_only", "Verify option to only send notifications to the client originating the action.")
 		test.add_cmd("-c register_rsc -r \"test_rsc\" -C \"ocf\" -P \"pacemaker\" -T \"Dummy\" -t 3000 "
 			"-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
 		test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" -t 3000 "
 			"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ")
 		test.add_cmd("-c exec -r \"test_rsc\" -a \"monitor\" -i \"100\" -t 3000 -n "
 			"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" ")
 		# this will fail because the monitor notifications should only go to the original caller, which no longer exists.
 		test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" -t 3000")
 		test.add_cmd("-c cancel -r \"test_rsc\" -a \"monitor\" -i \"100\" -t \"3000\" ")
 		test.add_cmd("-c unregister_rsc -r \"test_rsc\" -t 3000 "
 			"-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
 
 		### get metadata ###
 		test = self.new_test("get_ocf_metadata", "Retrieve metadata for a resource")
 		test.add_cmd_check_stdout("-c metadata -C \"ocf\" -P \"pacemaker\" -T \"Dummy\""
 			,"resource-agent name=\"Dummy\"")
 		test.add_cmd("-c metadata -C \"ocf\" -P \"pacemaker\" -T \"Stateful\"")
 		test.add_expected_fail_cmd("-c metadata -P \"pacemaker\" -T \"Stateful\"")
 		test.add_expected_fail_cmd("-c metadata -C \"ocf\" -P \"pacemaker\" -T \"fake_agent\"")
 
 		### get metadata ###
 		test = self.new_test("get_lsb_metadata", "Retrieve metadata for a resource")
 		test.add_cmd_check_stdout("-c metadata -C \"lsb\" -T \"lrmd_dummy_daemon\""
 			,"resource-agent name='lrmd_dummy_daemon'")
 
 		### get stonith metadata ###
 		test = self.new_test("get_stonith_metadata", "Retrieve stonith metadata for a resource")
 		test.add_cmd_check_stdout("-c metadata -C \"stonith\" -P \"pacemaker\" -T \"fence_dummy_monitor\"",
 			"resource-agent name=\"fence_dummy_monitor\"")
 
 		### get metadata ###
 		if "systemd" in self.rsc_classes:
 			test = self.new_test("get_systemd_metadata", "Retrieve metadata for a resource")
 			test.add_cmd_check_stdout("-c metadata -C \"systemd\" -T \"lrmd_dummy_daemon\""
 				,"resource-agent name=\"lrmd_dummy_daemon\"")
 
 		### get metadata ###
 		if "upstart" in self.rsc_classes:
 			test = self.new_test("get_systemd_metadata", "Retrieve metadata for a resource")
 			test.add_cmd_check_stdout("-c metadata -C \"systemd\" -T \"lrmd_dummy_daemon\""
 				,"resource-agent name=\"lrmd_dummy_daemon\"")
 
 		### get ocf providers  ###
 		test = self.new_test("list_ocf_providers", "Retrieve list of available resource providers, verifies pacemaker is a provider.")
 		test.add_cmd_check_stdout("-c list_ocf_providers ", "pacemaker")
 		test.add_cmd_check_stdout("-c list_ocf_providers -T ping", "pacemaker")
 
 		### Verify agents only exist in their lists ###
 		test = self.new_test("verify_agent_lists", "Verify the agent lists contain the right data.")
 		test.add_cmd_check_stdout("-c list_agents ", "Stateful")                                  ### ocf ###
 		test.add_cmd_check_stdout("-c list_agents -C ocf", "Stateful")
 		test.add_cmd_check_stdout("-c list_agents -C lsb", "", "Stateful")                        ### should not exist
 		test.add_cmd_check_stdout("-c list_agents -C service", "", "Stateful")                    ### should not exist
 		test.add_cmd_check_stdout("-c list_agents ", "lrmd_dummy_daemon")                         ### init.d ###
 		test.add_cmd_check_stdout("-c list_agents -C lsb", "lrmd_dummy_daemon")
 		test.add_cmd_check_stdout("-c list_agents -C service", "lrmd_dummy_daemon")
 		test.add_cmd_check_stdout("-c list_agents -C ocf", "", "lrmd_dummy_daemon")               ### should not exist
 
 		test.add_cmd_check_stdout("-c list_agents -C ocf", "", "lrmd_dummy_daemon")               ### should not exist
 		test.add_cmd_check_stdout("-c list_agents -C lsb", "", "fence_dummy_monitor")             ### should not exist
 		test.add_cmd_check_stdout("-c list_agents -C service", "", "fence_dummy_monitor")         ### should not exist
 		test.add_cmd_check_stdout("-c list_agents -C ocf", "", "fence_dummy_monitor")             ### should not exist
 
 		if "systemd" in self.rsc_classes:
 			test.add_cmd_check_stdout("-c list_agents ", "lrmd_dummy_daemon")                 ### systemd ###
 			test.add_cmd_check_stdout("-c list_agents -C service", "lrmd_dummy_daemon")
 			test.add_cmd_check_stdout("-c list_agents -C systemd", "", "Stateful")		  ### should not exist
 			test.add_cmd_check_stdout("-c list_agents -C systemd", "lrmd_dummy_daemon")
 			test.add_cmd_check_stdout("-c list_agents -C systemd", "", "fence_dummy_monitor") ### should not exist
 
 		if "upstart" in self.rsc_classes:
 			test.add_cmd_check_stdout("-c list_agents ", "lrmd_dummy_daemon")                 ### upstart ###
 			test.add_cmd_check_stdout("-c list_agents -C service", "lrmd_dummy_daemon")
 			test.add_cmd_check_stdout("-c list_agents -C upstart", "", "Stateful")		  ### should not exist
 			test.add_cmd_check_stdout("-c list_agents -C upstart", "lrmd_dummy_daemon")
 			test.add_cmd_check_stdout("-c list_agents -C upstart", "", "fence_dummy_monitor") ### should not exist
 
 		if "stonith" in self.rsc_classes:
 			test.add_cmd_check_stdout("-c list_agents -C stonith", "fence_dummy_monitor")     ### stonith ###
 			test.add_cmd_check_stdout("-c list_agents -C stonith", "", "lrmd_dummy_daemon")   ### should not exist
 			test.add_cmd_check_stdout("-c list_agents -C stonith", "", "Stateful")            ### should not exist
 			test.add_cmd_check_stdout("-c list_agents ", "fence_dummy_monitor")
 
 	def print_list(self):
 		print "\n==== %d TESTS FOUND ====" % (len(self.tests))
 		print "%35s - %s" % ("TEST NAME", "TEST DESCRIPTION")
 		print "%35s - %s" % ("--------------------", "--------------------")
 		for test in self.tests:
 			print "%35s - %s" % (test.name, test.description)
 		print "==== END OF LIST ====\n"
 
 	def run_single(self, name):
 		for test in self.tests:
 			if test.name == name:
 				test.run()
 				break;
 
 	def run_tests_matching(self, pattern):
 		for test in self.tests:
 			if test.name.count(pattern) != 0:
 				test.run()
 
 	def run_tests(self):
 		for test in self.tests:
 			test.run()
 
 	def exit(self):
 		for test in self.tests:
 			if test.executed == 0:
 				continue
 
 			if test.get_exitcode() != 0:
 				sys.exit(-1)
 
 		sys.exit(0);
 
 	def print_results(self):
 		failures = 0;
 		success = 0;
 		print "\n\n======= FINAL RESULTS =========="
 		print "\n--- FAILURE RESULTS:"
 		for test in self.tests:
 			if test.executed == 0:
 				continue
 
 			if test.get_exitcode() != 0:
 				failures = failures + 1
 				test.print_result("    ")
 			else:
 				success = success + 1
 
 		if failures == 0:
 			print "    None"
 
 		print "\n--- TOTALS\n    Pass:%d\n    Fail:%d\n" % (success, failures)
 
 class TestOptions:
 	def __init__(self):
 		self.options = {}
 		self.options['list-tests'] = 0
 		self.options['run-all'] = 1
 		self.options['run-only'] = ""
 		self.options['run-only-pattern'] = ""
 		self.options['verbose'] = 0
 		self.options['invalid-arg'] = ""
 		self.options['show-usage'] = 0
 		self.options['pacemaker-remote'] = 0
 
 	def build_options(self, argv):
 		args = argv[1:]
 		skip = 0
 		for i in range(0, len(args)):
 			if skip:
 				skip = 0
 				continue
 			elif args[i] == "-h" or args[i] == "--help":
 				self.options['show-usage'] = 1
 			elif args[i] == "-l" or args[i] == "--list-tests":
 				self.options['list-tests'] = 1
 			elif args[i] == "-V" or args[i] == "--verbose":
 				self.options['verbose'] = 1
 			elif args[i] == "-R" or args[i] == "--pacemaker-remote":
 				self.options['pacemaker-remote'] = 1
 			elif args[i] == "-r" or args[i] == "--run-only":
 				self.options['run-only'] = args[i+1]
 				skip = 1
 			elif args[i] == "-p" or args[i] == "--run-only-pattern":
 				self.options['run-only-pattern'] = args[i+1]
 				skip = 1
 
 	def show_usage(self):
 		print "usage: " + sys.argv[0] + " [options]"
 		print "If no options are provided, all tests will run"
 		print "Options:"
 		print "\t [--help | -h]                        Show usage"
 		print "\t [--list-tests | -l]                  Print out all registered tests."
 		print "\t [--run-only | -r 'testname']         Run a specific test"
 		print "\t [--verbose | -V]                     Verbose output"
 		print "\t [--pacemaker-remote | -R             Test pacemaker-remote binary instead of lrmd."
 		print "\t [--run-only-pattern | -p 'string']   Run only tests containing the string value"
 		print "\n\tExample: Run only the test 'start_top'"
 		print "\t\t python ./regression.py --run-only start_stop"
 		print "\n\tExample: Run only the tests with the string 'systemd' present in them"
 		print "\t\t python ./regression.py --run-only-pattern systemd"
 
 
 def main(argv):
 	o = TestOptions()
 	o.build_options(argv)
 
 	tests = Tests(o.options['verbose'], o.options['pacemaker-remote'])
 
 	tests.build_generic_tests()
 	tests.build_multi_rsc_tests()
 	tests.build_negative_tests()
 	tests.build_custom_tests()
 
 	tests.setup_test_environment()
 
 	print "Starting ..."
 
 	if o.options['list-tests']:
 		tests.print_list()
 	elif o.options['show-usage']:
 		o.show_usage()
 	elif o.options['run-only-pattern'] != "":
 		tests.run_tests_matching(o.options['run-only-pattern'])
 		tests.print_results()
 	elif o.options['run-only'] != "":
 		tests.run_single(o.options['run-only'])
 		tests.print_results()
 	else:
 		tests.run_tests()
 		tests.print_results()
 
 	tests.cleanup_test_environment()
 	tests.exit()
 
 if __name__=="__main__":
 	main(sys.argv)