diff --git a/lrmd/regression.py.in b/lrmd/regression.py.in
index 21efb587f9..a3397d07a8 100755
--- a/lrmd/regression.py.in
+++ b/lrmd/regression.py.in
@@ -1,1121 +1,1121 @@
#!/usr/bin/python
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
import os
import sys
import subprocess
import shlex
import time
# Where to find test binaries
# Prefer the source tree if available
build_dir="@abs_top_builddir@"
test_dir=sys.path[0]
new_path=os.environ['PATH']
if os.path.exists("%s/regression.py.in" % test_dir):
print "Running tests from the source tree: %s (%s)" % (build_dir, test_dir)
new_path = "%s/lrmd:%s" % (build_dir, new_path) # For lrmd, lrmd_test and pacemaker_remoted
new_path = "%s/tools:%s" % (build_dir, new_path) # For crm_resource
new_path = "%s/fencing:%s" % (build_dir, new_path) # For stonithd
else:
print "Running tests from the install tree: @CRM_DAEMON_DIR@ (not %s)" % test_dir
new_path = "@CRM_DAEMON_DIR@:%s" % (new_path) # For stonithd, lrmd, lrmd_test and pacemaker_remoted
print new_path
os.environ['PATH']=new_path
def output_from_command(command, no_wait=0):
test = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE)
if no_wait == 0:
test.wait()
else:
return 0
return test.communicate()[0].split("\n")
class Test:
def __init__(self, name, description, verbose = 0, tls = 0):
self.name = name
self.description = description
self.cmds = []
if tls:
self.daemon_location = "pacemaker_remoted"
else:
self.daemon_location = "lrmd"
self.test_tool_location = "lrmd_test"
self.verbose = verbose
self.tls = tls
self.result_txt = ""
self.cmd_tool_output = ""
self.result_exitcode = 0;
self.lrmd_process = None
self.stonith_process = None
self.executed = 0
def __new_cmd(self, cmd, args, exitcode, stdout_match = "", no_wait = 0, stdout_negative_match = "", kill=None):
if self.verbose and cmd == self.test_tool_location:
args = args + " -V "
if (cmd == self.test_tool_location) and self.tls:
args = args + " -S "
self.cmds.append(
{
"cmd" : cmd,
"kill" : kill,
"args" : args,
"expected_exitcode" : exitcode,
"stdout_match" : stdout_match,
"stdout_negative_match" : stdout_negative_match,
"no_wait" : no_wait,
"cmd_output" : "",
}
)
def start_environment(self):
### make sure we are in full control here ###
cmd = shlex.split("killall -q -9 stonithd lt-stonithd lrmd lt-lrmd lrmd_test lt-lrmd_test pacemaker_remoted")
test = subprocess.Popen(cmd, stdout=subprocess.PIPE)
test.wait()
additional_args = ""
if self.tls == 0:
self.stonith_process = subprocess.Popen(shlex.split("stonithd -s"))
if self.verbose:
additional_args = additional_args + " -V"
self.lrmd_process = subprocess.Popen(shlex.split("%s %s -l /tmp/lrmd-regression.log" % (self.daemon_location, additional_args)))
time.sleep(1)
def clean_environment(self):
if self.lrmd_process:
self.lrmd_process.terminate()
self.lrmd_process.wait()
if self.verbose:
print "Daemon output"
f = open('/tmp/lrmd-regression.log', 'r')
for line in f.readlines():
print line.strip()
os.remove('/tmp/lrmd-regression.log')
if self.stonith_process:
self.stonith_process.terminate()
self.stonith_process.wait()
self.lrmd_process = None
self.stonith_process = None
def add_sys_cmd(self, cmd, args):
self.__new_cmd(cmd, args, 0, "")
def add_sys_cmd_no_wait(self, cmd, args):
self.__new_cmd(cmd, args, 0, "", 1)
def add_cmd_check_stdout(self, args, match, no_match = ""):
self.__new_cmd(self.test_tool_location, args, 0, match, 0, no_match)
def add_cmd(self, args):
self.__new_cmd(self.test_tool_location, args, 0, "")
def add_cmd_and_kill(self, killProc, args):
self.__new_cmd(self.test_tool_location, args, 0, "", kill=killProc)
def add_expected_fail_cmd(self, args):
self.__new_cmd(self.test_tool_location, args, 1, "")
def get_exitcode(self):
return self.result_exitcode
def print_result(self, filler):
print "%s%s" % (filler, self.result_txt)
def run_cmd(self, args):
cmd = shlex.split(args['args'])
cmd.insert(0, args['cmd'])
if self.verbose:
print "\n\nRunning: "+" ".join(cmd)
test = subprocess.Popen(cmd, stdout=subprocess.PIPE)
if args['kill']:
if self.verbose:
print "Also running: "+args['kill']
### Typically the kill argument is used to detect some sort of
### failure. Without yeilding for a few seconds here the process
### launched earlier that is listening for the failure may not have time
### to connect to the lrmd.
time.sleep(2)
subprocess.Popen(shlex.split(args['kill']))
if args['no_wait'] == 0:
test.wait()
else:
return 0
output = test.communicate()[0]
if args['stdout_match'] != "" and output.count(args['stdout_match']) == 0:
test.returncode = -2
print "STDOUT string '%s' was not found in cmd output" % (args['stdout_match'])
if args['stdout_negative_match'] != "" and output.count(args['stdout_negative_match']) != 0:
test.returncode = -2
print "STDOUT string '%s' was found in cmd output" % (args['stdout_negative_match'])
args['cmd_output'] = output
return test.returncode;
def run(self):
res = 0
i = 1
if self.tls and self.name.count("stonith") != 0:
self.result_txt = "SKIPPED - '%s' - disabled when testing pacemaker_remote" % (self.name)
print self.result_txt
return res
self.start_environment()
if self.verbose:
print "\n--- START TEST - %s" % self.name
self.result_txt = "SUCCESS - '%s'" % (self.name)
self.result_exitcode = 0
for cmd in self.cmds:
res = self.run_cmd(cmd)
if res != cmd['expected_exitcode']:
print cmd['cmd_output']
print "Step %d FAILED - command returned %d, expected %d" % (i, res, cmd['expected_exitcode'])
self.result_txt = "FAILURE - '%s' failed at step %d. Command: lrmd_test %s" % (self.name, i, cmd['args'])
self.result_exitcode = -1
break
else:
if self.verbose:
print cmd['cmd_output'].strip()
print "Step %d SUCCESS" % (i)
i = i + 1
self.clean_environment()
print self.result_txt
if self.verbose:
print "--- END TEST - %s\n" % self.name
self.executed = 1
return res
class Tests:
def __init__(self, verbose = 0, tls = 0):
self.tests = []
self.verbose = verbose
self.tls = tls;
self.rsc_classes = output_from_command("crm_resource --list-standards")
self.rsc_classes = self.rsc_classes[:-1] # Strip trailing empty line
self.need_authkey = 0
self.action_timeout = " -t 5000 "
if self.tls:
self.rsc_classes.remove("stonith")
if "systemd" in self.rsc_classes:
# the lrmd_dummy_daemon requires this, we are importing it
# here just to guarantee it is installed before allowing this
# script to run. Otherwise, running without this import being
# available will make all the systemd tests look like they fail,
# which is really scary looking. I'd rather see the import fail.
import systemd.daemon
print "Testing "+repr(self.rsc_classes)
self.common_cmds = {
"ocf_reg_line" : "-c register_rsc -r ocf_test_rsc "+self.action_timeout+" -C ocf -P pacemaker -T Dummy",
"ocf_reg_event" : "-l \"NEW_EVENT event_type:register rsc_id:ocf_test_rsc action:none rc:ok op_status:complete\"",
"ocf_unreg_line" : "-c unregister_rsc -r \"ocf_test_rsc\" "+self.action_timeout,
"ocf_unreg_event" : "-l \"NEW_EVENT event_type:unregister rsc_id:ocf_test_rsc action:none rc:ok op_status:complete\"",
"ocf_start_line" : "-c exec -r \"ocf_test_rsc\" -a \"start\" "+self.action_timeout,
"ocf_start_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:ocf_test_rsc action:start rc:ok op_status:complete\" ",
"ocf_stop_line" : "-c exec -r \"ocf_test_rsc\" -a \"stop\" "+self.action_timeout,
"ocf_stop_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:ocf_test_rsc action:stop rc:ok op_status:complete\" ",
"ocf_monitor_line" : "-c exec -r \"ocf_test_rsc\" -a \"monitor\" -i \"2000\" "+self.action_timeout,
"ocf_monitor_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:ocf_test_rsc action:monitor rc:ok op_status:complete\" "+self.action_timeout,
- "ocf_cancel_line" : "-c cancel -r \"ocf_test_rsc\" -a \"monitor\" -i \"2000\" -t \"3000\" ",
+ "ocf_cancel_line" : "-c cancel -r \"ocf_test_rsc\" -a \"monitor\" -i \"2000\" -t \"6000\" ",
"ocf_cancel_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:ocf_test_rsc action:monitor rc:ok op_status:Cancelled\" ",
"systemd_reg_line" : "-c register_rsc -r systemd_test_rsc "+self.action_timeout+" -C systemd -T lrmd_dummy_daemon",
"systemd_reg_event" : "-l \"NEW_EVENT event_type:register rsc_id:systemd_test_rsc action:none rc:ok op_status:complete\"",
"systemd_unreg_line" : "-c unregister_rsc -r \"systemd_test_rsc\" "+self.action_timeout,
"systemd_unreg_event" : "-l \"NEW_EVENT event_type:unregister rsc_id:systemd_test_rsc action:none rc:ok op_status:complete\"",
"systemd_start_line" : "-c exec -r \"systemd_test_rsc\" -a \"start\" "+self.action_timeout,
"systemd_start_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:systemd_test_rsc action:start rc:ok op_status:complete\" ",
"systemd_stop_line" : "-c exec -r \"systemd_test_rsc\" -a \"stop\" "+self.action_timeout,
"systemd_stop_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:systemd_test_rsc action:stop rc:ok op_status:complete\" ",
"systemd_monitor_line" : "-c exec -r \"systemd_test_rsc\" -a \"monitor\" -i \"2000\" "+self.action_timeout,
"systemd_monitor_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:systemd_test_rsc action:monitor rc:ok op_status:complete\" "+self.action_timeout,
- "systemd_cancel_line" : "-c cancel -r \"systemd_test_rsc\" -a \"monitor\" -i \"2000\" -t \"3000\" ",
+ "systemd_cancel_line" : "-c cancel -r \"systemd_test_rsc\" -a \"monitor\" -i \"2000\" -t \"6000\" ",
"systemd_cancel_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:systemd_test_rsc action:monitor rc:ok op_status:Cancelled\" ",
"upstart_reg_line" : "-c register_rsc -r upstart_test_rsc "+self.action_timeout+" -C upstart -T lrmd_dummy_daemon",
"upstart_reg_event" : "-l \"NEW_EVENT event_type:register rsc_id:upstart_test_rsc action:none rc:ok op_status:complete\"",
"upstart_unreg_line" : "-c unregister_rsc -r \"upstart_test_rsc\" "+self.action_timeout,
"upstart_unreg_event" : "-l \"NEW_EVENT event_type:unregister rsc_id:upstart_test_rsc action:none rc:ok op_status:complete\"",
"upstart_start_line" : "-c exec -r \"upstart_test_rsc\" -a \"start\" "+self.action_timeout,
"upstart_start_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:upstart_test_rsc action:start rc:ok op_status:complete\" ",
"upstart_stop_line" : "-c exec -r \"upstart_test_rsc\" -a \"stop\" "+self.action_timeout,
"upstart_stop_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:upstart_test_rsc action:stop rc:ok op_status:complete\" ",
"upstart_monitor_line" : "-c exec -r \"upstart_test_rsc\" -a \"monitor\" -i \"2000\" "+self.action_timeout,
"upstart_monitor_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:upstart_test_rsc action:monitor rc:ok op_status:complete\" "+self.action_timeout,
- "upstart_cancel_line" : "-c cancel -r \"upstart_test_rsc\" -a \"monitor\" -i \"2000\" -t \"3000\" ",
+ "upstart_cancel_line" : "-c cancel -r \"upstart_test_rsc\" -a \"monitor\" -i \"2000\" -t \"6000\" ",
"upstart_cancel_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:upstart_test_rsc action:monitor rc:ok op_status:Cancelled\" ",
"service_reg_line" : "-c register_rsc -r service_test_rsc "+self.action_timeout+" -C service -T LSBDummy",
"service_reg_event" : "-l \"NEW_EVENT event_type:register rsc_id:service_test_rsc action:none rc:ok op_status:complete\"",
"service_unreg_line" : "-c unregister_rsc -r \"service_test_rsc\" "+self.action_timeout,
"service_unreg_event" : "-l \"NEW_EVENT event_type:unregister rsc_id:service_test_rsc action:none rc:ok op_status:complete\"",
"service_start_line" : "-c exec -r \"service_test_rsc\" -a \"start\" "+self.action_timeout,
"service_start_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:service_test_rsc action:start rc:ok op_status:complete\" ",
"service_stop_line" : "-c exec -r \"service_test_rsc\" -a \"stop\" "+self.action_timeout,
"service_stop_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:service_test_rsc action:stop rc:ok op_status:complete\" ",
"service_monitor_line" : "-c exec -r \"service_test_rsc\" -a \"monitor\" -i \"2000\" "+self.action_timeout,
"service_monitor_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:service_test_rsc action:monitor rc:ok op_status:complete\" "+self.action_timeout,
- "service_cancel_line" : "-c cancel -r \"service_test_rsc\" -a \"monitor\" -i \"2000\" -t \"3000\" ",
+ "service_cancel_line" : "-c cancel -r \"service_test_rsc\" -a \"monitor\" -i \"2000\" -t \"6000\" ",
"service_cancel_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:service_test_rsc action:monitor rc:ok op_status:Cancelled\" ",
"lsb_reg_line" : "-c register_rsc -r lsb_test_rsc "+self.action_timeout+" -C lsb -T LSBDummy",
"lsb_reg_event" : "-l \"NEW_EVENT event_type:register rsc_id:lsb_test_rsc action:none rc:ok op_status:complete\" ",
"lsb_unreg_line" : "-c unregister_rsc -r \"lsb_test_rsc\" "+self.action_timeout,
"lsb_unreg_event" : "-l \"NEW_EVENT event_type:unregister rsc_id:lsb_test_rsc action:none rc:ok op_status:complete\"",
"lsb_start_line" : "-c exec -r \"lsb_test_rsc\" -a \"start\" "+self.action_timeout,
"lsb_start_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:lsb_test_rsc action:start rc:ok op_status:complete\" ",
"lsb_stop_line" : "-c exec -r \"lsb_test_rsc\" -a \"stop\" "+self.action_timeout,
"lsb_stop_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:lsb_test_rsc action:stop rc:ok op_status:complete\" ",
"lsb_monitor_line" : "-c exec -r \"lsb_test_rsc\" -a status -i \"2000\" "+self.action_timeout,
"lsb_monitor_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:lsb_test_rsc action:status rc:ok op_status:complete\" "+self.action_timeout,
- "lsb_cancel_line" : "-c cancel -r \"lsb_test_rsc\" -a \"status\" -i \"2000\" -t \"3000\" ",
+ "lsb_cancel_line" : "-c cancel -r \"lsb_test_rsc\" -a \"status\" -i \"2000\" -t \"6000\" ",
"lsb_cancel_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:lsb_test_rsc action:status rc:ok op_status:Cancelled\" ",
"heartbeat_reg_line" : "-c register_rsc -r hb_test_rsc "+self.action_timeout+" -C heartbeat -T HBDummy",
"heartbeat_reg_event" : "-l \"NEW_EVENT event_type:register rsc_id:hb_test_rsc action:none rc:ok op_status:complete\" ",
"heartbeat_unreg_line" : "-c unregister_rsc -r \"hb_test_rsc\" "+self.action_timeout,
"heartbeat_unreg_event" : "-l \"NEW_EVENT event_type:unregister rsc_id:hb_test_rsc action:none rc:ok op_status:complete\"",
"heartbeat_start_line" : "-c exec -r \"hb_test_rsc\" -a \"start\" -k 1 -v a -k 2 -v b "+self.action_timeout,
"heartbeat_start_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:hb_test_rsc action:start rc:ok op_status:complete\" ",
"heartbeat_stop_line" : "-c exec -r \"hb_test_rsc\" -a \"stop\" -k 1 -v a -k 2 -v b "+self.action_timeout,
"heartbeat_stop_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:hb_test_rsc action:stop rc:ok op_status:complete\" ",
"heartbeat_monitor_line" : "-c exec -r \"hb_test_rsc\" -a status -k 1 -v a -k 2 -v b -i \"2000\" "+self.action_timeout,
"heartbeat_monitor_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:hb_test_rsc action:status rc:ok op_status:complete\" "+self.action_timeout,
- "heartbeat_cancel_line" : "-c cancel -r \"hb_test_rsc\" -a \"status\" -k 1 -v a -k 2 -v b -i \"2000\" -t \"3000\" ",
+ "heartbeat_cancel_line" : "-c cancel -r \"hb_test_rsc\" -a \"status\" -k 1 -v a -k 2 -v b -i \"2000\" -t \"6000\" ",
"heartbeat_cancel_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:hb_test_rsc action:status rc:ok op_status:Cancelled\" ",
"stonith_reg_line" : "-c register_rsc -r stonith_test_rsc "+self.action_timeout+" -C stonith -P pacemaker -T fence_dummy_monitor",
"stonith_reg_event" : "-l \"NEW_EVENT event_type:register rsc_id:stonith_test_rsc action:none rc:ok op_status:complete\" ",
"stonith_unreg_line" : "-c unregister_rsc -r \"stonith_test_rsc\" "+self.action_timeout,
"stonith_unreg_event" : "-l \"NEW_EVENT event_type:unregister rsc_id:stonith_test_rsc action:none rc:ok op_status:complete\"",
"stonith_start_line" : "-c exec -r \"stonith_test_rsc\" -a \"start\" -t 8000 ",
"stonith_start_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:stonith_test_rsc action:start rc:ok op_status:complete\" ",
"stonith_stop_line" : "-c exec -r \"stonith_test_rsc\" -a \"stop\" "+self.action_timeout,
"stonith_stop_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:stonith_test_rsc action:stop rc:ok op_status:complete\" ",
"stonith_monitor_line" : "-c exec -r \"stonith_test_rsc\" -a \"monitor\" -i \"2000\" "+self.action_timeout,
"stonith_monitor_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:stonith_test_rsc action:monitor rc:ok op_status:complete\" "+self.action_timeout,
- "stonith_cancel_line" : "-c cancel -r \"stonith_test_rsc\" -a \"monitor\" -i \"2000\" -t \"3000\" ",
+ "stonith_cancel_line" : "-c cancel -r \"stonith_test_rsc\" -a \"monitor\" -i \"2000\" -t \"6000\" ",
"stonith_cancel_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:stonith_test_rsc action:monitor rc:ok op_status:Cancelled\" ",
}
def new_test(self, name, description):
test = Test(name, description, self.verbose, self.tls)
self.tests.append(test)
return test
def setup_test_environment(self):
os.system("service pacemaker_remote stop")
self.cleanup_test_environment()
if self.tls and not os.path.isfile("/etc/pacemaker/authkey"):
self.need_authkey = 1
os.system("mkdir -p /etc/pacemaker")
os.system("dd if=/dev/urandom of=/etc/pacemaker/authkey bs=4096 count=1")
### Make fake systemd daemon and unit file ###
dummy_daemon = """#!/bin/python
import time, systemd.daemon
time.sleep(3)
systemd.daemon.notify("READY=1")
while True: time.sleep(5)
"""
dummy_service_file = """
[Unit]
Description=Dummy resource that takes a while to start
[Service]
Type=notify
ExecStart=/usr/sbin/lrmd_dummy_daemon
"""
dummy_upstart_job = ("""
description "Dummy service for regression tests"
exec dd if=/dev/random of=/dev/null
""")
dummy_fence_sleep_agent = ("""#!/usr/bin/python
import sys
import time
def main():
for line in sys.stdin.readlines():
if line.count("monitor") > 0:
time.sleep(30000)
sys.exit(0)
sys.exit(-1)
if __name__ == "__main__":
main()
""")
dummy_fence_agent = ("""#!/usr/bin/python
import sys
def main():
for line in sys.stdin.readlines():
if line.count("monitor") > 0:
sys.exit(0)
if line.count("metadata") > 0:
print ''
print ' dummy description.'
print ' http://www.example.com'
print ' '
print ' '
print ' '
print ' '
print ' Fencing Action'
print ' '
print ' '
print ' '
print ' '
print ' Physical plug number or name of virtual machine'
print ' '
print ' '
print ' '
print ' '
print ' '
print ' '
print ' '
print ' '
print ''
sys.exit(0)
sys.exit(-1)
if __name__ == "__main__":
main()
""")
os.system("cat <<-END >>/etc/init/lrmd_dummy_daemon.conf\n%s\nEND" % (dummy_upstart_job))
os.system("cat <<-END >>/usr/sbin/lrmd_dummy_daemon\n%s\nEND" % (dummy_daemon))
os.system("cat <<-END >>/lib/systemd/system/lrmd_dummy_daemon.service\n%s\nEND" % (dummy_service_file))
os.system("chmod a+x /usr/sbin/lrmd_dummy_daemon")
os.system("cat <<-END >>/usr/sbin/fence_dummy_sleep\n%s\nEND" % (dummy_fence_sleep_agent))
os.system("chmod 711 /usr/sbin/fence_dummy_sleep")
os.system("cat <<-END >>/usr/sbin/fence_dummy_monitor\n%s\nEND" % (dummy_fence_agent))
os.system("chmod 711 /usr/sbin/fence_dummy_monitor")
if os.path.exists("%s/cts/LSBDummy" % build_dir):
print "Using %s/cts/LSBDummy" % build_dir
os.system("cp %s/cts/LSBDummy /etc/init.d/LSBDummy" % build_dir)
if not os.path.exists("@OCF_RA_DIR@/pacemaker"):
os.system("mkdir -p @OCF_RA_DIR@/pacemaker/")
# Install helper OCF agents
for ra in [ "Dummy", "Stateful", "ping" ]:
os.system("cp %s/extra/resources/%s @OCF_RA_DIR@/pacemaker/%s" % (build_dir, ra, ra))
os.system("chmod a+x @OCF_RA_DIR@/pacemaker/%s" % (ra))
else:
# Assume it's installed
print "Using @datadir@/@PACKAGE@/tests/cts/LSBDummy"
os.system("cp @datadir@/@PACKAGE@/tests/cts/LSBDummy /etc/init.d/LSBDummy")
os.system("chmod a+x /etc/init.d/LSBDummy")
os.system("ls -al /etc/init.d/LSBDummy")
os.system("mkdir -p @CRM_CORE_DIR@/root")
os.system("mkdir -p /etc/ha.d/resource.d")
if os.path.exists("%s/cts/HBDummy" % build_dir):
print "Using %s/cts/HBDummy" % build_dir
os.system("cp %s/cts/HBDummy /etc/ha.d/resource.d/HBDummy" % build_dir)
else:
# Assume it's installed
print "Using @datadir@/@PACKAGE@/tests/cts/HBDummy"
os.system("cp @datadir@/@PACKAGE@/tests/cts/HBDummy /etc/ha.d/resource.d/HBDummy")
os.system("chmod a+x /etc/ha.d/resource.d/HBDummy")
os.system("ls -al /etc/ha.d/resource.d/HBDummy")
if os.path.exists("/bin/systemctl"):
os.system("systemctl daemon-reload")
def cleanup_test_environment(self):
if self.need_authkey:
os.system("rm -f /etc/pacemaker/authkey")
os.system("rm -f /etc/init.d/LSBDummy")
os.system("rm -f /etc/ha.d/resource.d/HBDummy")
os.system("rm -f /lib/systemd/system/lrmd_dummy_daemon.service")
os.system("rm -f /usr/sbin/lrmd_dummy_daemon")
os.system("rm -f /usr/sbin/fence_dummy_monitor")
os.system("rm -f /usr/sbin/fence_dummy_sleep")
if os.path.exists("/bin/systemctl"):
os.system("systemctl daemon-reload")
### These are tests that should apply to all resource classes ###
def build_generic_tests(self):
common_cmds = self.common_cmds
### register/unregister tests ###
for rsc in self.rsc_classes:
test = self.new_test("generic_registration_%s" % (rsc), "Simple resource registration test for %s standard" % (rsc))
test.add_cmd(common_cmds["%s_reg_line" % (rsc)] + " " + common_cmds["%s_reg_event" % (rsc)])
test.add_cmd(common_cmds["%s_unreg_line" % (rsc)] + " " + common_cmds["%s_unreg_event" % (rsc)])
### start/stop tests ###
for rsc in self.rsc_classes:
test = self.new_test("generic_start_stop_%s" % (rsc), "Simple start and stop test for %s standard" % (rsc))
test.add_cmd(common_cmds["%s_reg_line" % (rsc)] + " " + common_cmds["%s_reg_event" % (rsc)])
test.add_cmd(common_cmds["%s_start_line" % (rsc)] + " " + common_cmds["%s_start_event" % (rsc)])
test.add_cmd(common_cmds["%s_stop_line" % (rsc)] + " " + common_cmds["%s_stop_event" % (rsc)])
test.add_cmd(common_cmds["%s_unreg_line" % (rsc)] + " " + common_cmds["%s_unreg_event" % (rsc)])
### monitor cancel test ###
for rsc in self.rsc_classes:
test = self.new_test("generic_monitor_cancel_%s" % (rsc), "Simple monitor cancel test for %s standard" % (rsc))
test.add_cmd(common_cmds["%s_reg_line" % (rsc)] + " " + common_cmds["%s_reg_event" % (rsc)])
test.add_cmd(common_cmds["%s_start_line" % (rsc)] + " " + common_cmds["%s_start_event" % (rsc)])
test.add_cmd(common_cmds["%s_monitor_line" % (rsc)] + " " + common_cmds["%s_monitor_event" % (rsc)])
test.add_cmd(common_cmds["%s_monitor_event" % (rsc)]) ### If this fails, that means the monitor may not be getting rescheduled ####
test.add_cmd(common_cmds["%s_monitor_event" % (rsc)]) ### If this fails, that means the monitor may not be getting rescheduled ####
test.add_cmd(common_cmds["%s_cancel_line" % (rsc)] + " " + common_cmds["%s_cancel_event" % (rsc)])
test.add_expected_fail_cmd(common_cmds["%s_monitor_event" % (rsc)]) ### If this happens the monitor did not actually cancel correctly. ###
test.add_expected_fail_cmd(common_cmds["%s_monitor_event" % (rsc)]) ### If this happens the monitor did not actually cancel correctly. ###
test.add_cmd(common_cmds["%s_stop_line" % (rsc)] + " " + common_cmds["%s_stop_event" % (rsc)])
test.add_cmd(common_cmds["%s_unreg_line" % (rsc)] + " " + common_cmds["%s_unreg_event" % (rsc)])
### monitor duplicate test ###
for rsc in self.rsc_classes:
test = self.new_test("generic_monitor_duplicate_%s" % (rsc), "Test creation and canceling of duplicate monitors for %s standard" % (rsc))
test.add_cmd(common_cmds["%s_reg_line" % (rsc)] + " " + common_cmds["%s_reg_event" % (rsc)])
test.add_cmd(common_cmds["%s_start_line" % (rsc)] + " " + common_cmds["%s_start_event" % (rsc)])
test.add_cmd(common_cmds["%s_monitor_line" % (rsc)] + " " + common_cmds["%s_monitor_event" % (rsc)])
test.add_cmd(common_cmds["%s_monitor_event" % (rsc)]) ### If this fails, that means the monitor may not be getting rescheduled ####
test.add_cmd(common_cmds["%s_monitor_event" % (rsc)]) ### If this fails, that means the monitor may not be getting rescheduled ####
# Add the duplicate monitors.
test.add_cmd(common_cmds["%s_monitor_line" % (rsc)] + " " + common_cmds["%s_monitor_event" % (rsc)])
test.add_cmd(common_cmds["%s_monitor_line" % (rsc)] + " " + common_cmds["%s_monitor_event" % (rsc)])
test.add_cmd(common_cmds["%s_monitor_line" % (rsc)] + " " + common_cmds["%s_monitor_event" % (rsc)])
test.add_cmd(common_cmds["%s_monitor_line" % (rsc)] + " " + common_cmds["%s_monitor_event" % (rsc)])
# verify we still get update events
test.add_cmd(common_cmds["%s_monitor_event" % (rsc)]) ### If this fails, that means the monitor may not be getting rescheduled ####
# cancel the monitor, if the duplicate merged with the original, we should no longer see monitor updates
test.add_cmd(common_cmds["%s_cancel_line" % (rsc)] + " " + common_cmds["%s_cancel_event" % (rsc)])
test.add_expected_fail_cmd(common_cmds["%s_monitor_event" % (rsc)]) ### If this happens the monitor did not actually cancel correctly. ###
test.add_expected_fail_cmd(common_cmds["%s_monitor_event" % (rsc)]) ### If this happens the monitor did not actually cancel correctly. ###
test.add_cmd(common_cmds["%s_stop_line" % (rsc)] + " " + common_cmds["%s_stop_event" % (rsc)])
test.add_cmd(common_cmds["%s_unreg_line" % (rsc)] + " " + common_cmds["%s_unreg_event" % (rsc)])
### stop implies cancel test ###
for rsc in self.rsc_classes:
test = self.new_test("generic_stop_implies_cancel_%s" % (rsc), "Verify stopping a resource implies cancel of recurring ops for %s standard" % (rsc))
test.add_cmd(common_cmds["%s_reg_line" % (rsc)] + " " + common_cmds["%s_reg_event" % (rsc)])
test.add_cmd(common_cmds["%s_start_line" % (rsc)] + " " + common_cmds["%s_start_event" % (rsc)])
test.add_cmd(common_cmds["%s_monitor_line" % (rsc)] + " " + common_cmds["%s_monitor_event" % (rsc)])
test.add_cmd(common_cmds["%s_monitor_event" % (rsc)]) ### If this fails, that means the monitor may not be getting rescheduled ####
test.add_cmd(common_cmds["%s_monitor_event" % (rsc)]) ### If this fails, that means the monitor may not be getting rescheduled ####
test.add_cmd(common_cmds["%s_stop_line" % (rsc)] + " " + common_cmds["%s_stop_event" % (rsc)])
test.add_expected_fail_cmd(common_cmds["%s_monitor_event" % (rsc)]) ### If this happens the monitor did not actually cancel correctly. ###
test.add_expected_fail_cmd(common_cmds["%s_monitor_event" % (rsc)]) ### If this happens the monitor did not actually cancel correctly. ###
test.add_cmd(common_cmds["%s_unreg_line" % (rsc)] + " " + common_cmds["%s_unreg_event" % (rsc)])
### These are complex tests that involve managing multiple resouces of different types ###
def build_multi_rsc_tests(self):
common_cmds = self.common_cmds
# do not use service and systemd at the same time, it is the same resource.
### register start monitor stop unregister resources of each type at the same time. ###
test = self.new_test("multi_rsc_start_stop_all", "Start, monitor, and stop resources of multiple types and classes")
for rsc in self.rsc_classes:
test.add_cmd(common_cmds["%s_reg_line" % (rsc)] + " " + common_cmds["%s_reg_event" % (rsc)])
for rsc in self.rsc_classes:
test.add_cmd(common_cmds["%s_start_line" % (rsc)] + " " + common_cmds["%s_start_event" % (rsc)])
for rsc in self.rsc_classes:
test.add_cmd(common_cmds["%s_monitor_line" % (rsc)] + " " + common_cmds["%s_monitor_event" % (rsc)])
for rsc in self.rsc_classes:
test.add_cmd(common_cmds["%s_monitor_event" % (rsc)]) ### If this fails, that means the monitor is not being rescheduled ####
for rsc in self.rsc_classes:
test.add_cmd(common_cmds["%s_cancel_line" % (rsc)] + " " + common_cmds["%s_cancel_event" % (rsc)])
for rsc in self.rsc_classes:
test.add_cmd(common_cmds["%s_stop_line" % (rsc)] + " " + common_cmds["%s_stop_event" % (rsc)])
for rsc in self.rsc_classes:
test.add_cmd(common_cmds["%s_unreg_line" % (rsc)] + " " + common_cmds["%s_unreg_event" % (rsc)])
### These are tests related to how the lrmd handles failures. ###
def build_negative_tests(self):
### ocf start timeout test ###
test = self.new_test("ocf_start_timeout", "Force start timeout to occur, verify start failure.")
test.add_cmd("-c register_rsc -r \"test_rsc\" -C \"ocf\" -P \"pacemaker\" -T \"Dummy\" "+self.action_timeout+
"-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" -k \"op_sleep\" -v \"5\" -t 1000 -w") # -t must be less than self.action_timeout
test.add_cmd("-l "
"\"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:unknown error op_status:Timed Out\" "+self.action_timeout)
test.add_cmd("-c exec -r test_rsc -a stop "+self.action_timeout+
"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:stop rc:ok op_status:complete\" ")
test.add_cmd("-c unregister_rsc -r test_rsc "+self.action_timeout+
"-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
### stonith start timeout test ###
test = self.new_test("stonith_start_timeout", "Force start timeout to occur, verify start failure.")
test.add_cmd("-c register_rsc -r \"test_rsc\" -C \"stonith\" -P \"pacemaker\" -T \"fence_dummy_sleep\" "+self.action_timeout+
"-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" -t 1000 -w") # -t must be less than self.action_timeout
test.add_cmd("-l "
"\"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:unknown error op_status:Timed Out\" "+self.action_timeout)
test.add_cmd("-c exec -r test_rsc -a stop "+self.action_timeout+
"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:stop rc:ok op_status:complete\" ")
test.add_cmd("-c unregister_rsc -r test_rsc "+self.action_timeout+
"-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
### stonith component fail ###
common_cmds = self.common_cmds
test = self.new_test("stonith_component_fail", "Kill stonith component after lrmd connects")
test.add_cmd(common_cmds["stonith_reg_line"] + " " + common_cmds["stonith_reg_event"])
test.add_cmd(common_cmds["stonith_start_line"] + " " + common_cmds["stonith_start_event"])
test.add_cmd("-c exec -r \"stonith_test_rsc\" -a \"monitor\" -i \"600000\" "
"-l \"NEW_EVENT event_type:exec_complete rsc_id:stonith_test_rsc action:monitor rc:ok op_status:complete\" "+self.action_timeout)
test.add_cmd_and_kill("killall -9 -q stonithd lt-stonithd" ,"-l \"NEW_EVENT event_type:exec_complete rsc_id:stonith_test_rsc action:monitor rc:unknown error op_status:error\" -t 15000")
test.add_cmd(common_cmds["stonith_unreg_line"] + " " + common_cmds["stonith_unreg_event"])
### monitor fail for ocf resources ###
test = self.new_test("monitor_fail_ocf", "Force ocf monitor to fail, verify failure is reported.")
test.add_cmd("-c register_rsc -r \"test_rsc\" -C \"ocf\" -P \"pacemaker\" -T \"Dummy\" "+self.action_timeout+
"-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self.action_timeout+
"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ")
test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self.action_timeout+
"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ")
test.add_cmd("-c exec -r \"test_rsc\" -a \"monitor\" -i \"100\" "+self.action_timeout+
"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" ")
test.add_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" "+self.action_timeout)
test.add_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" "+self.action_timeout)
test.add_cmd_and_kill("rm -f @localstatedir@/run/Dummy-test_rsc.state", "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete\" -t 6000")
- test.add_cmd("-c cancel -r \"test_rsc\" -a \"monitor\" -i \"100\" -t \"3000\" "
+ test.add_cmd("-c cancel -r \"test_rsc\" -a \"monitor\" -i \"100\" -t \"6000\" "
"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:Cancelled\" ")
test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete\" "+self.action_timeout)
test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" "+self.action_timeout)
test.add_cmd("-c unregister_rsc -r \"test_rsc\" "+self.action_timeout+
"-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
### verify notify changes only for monitor operation. ###
test = self.new_test("monitor_changes_only", "Verify when flag is set, only monitor changes are notified.")
test.add_cmd("-c register_rsc -r \"test_rsc\" -C \"ocf\" -P \"pacemaker\" -T \"Dummy\" "+self.action_timeout+
"-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self.action_timeout+" -o "
"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ")
test.add_cmd("-c exec -r \"test_rsc\" -a \"monitor\" -i \"100\" "+self.action_timeout+" -o "
"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" ")
test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" "+self.action_timeout)
test.add_cmd_and_kill("rm -f @localstatedir@/run/Dummy-test_rsc.state", "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete\" -t 6000")
- test.add_cmd("-c cancel -r \"test_rsc\" -a \"monitor\" -i \"100\" -t \"3000\" "
+ test.add_cmd("-c cancel -r \"test_rsc\" -a \"monitor\" -i \"100\" -t \"6000\" "
"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:Cancelled\" ")
test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete\" "+self.action_timeout)
test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" "+self.action_timeout)
test.add_cmd("-c unregister_rsc -r \"test_rsc\" "+self.action_timeout+
"-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
### monitor fail for systemd resource ###
if "systemd" in self.rsc_classes:
test = self.new_test("monitor_fail_systemd", "Force systemd monitor to fail, verify failure is reported..")
test.add_cmd("-c register_rsc -r \"test_rsc\" -C systemd -T lrmd_dummy_daemon "+self.action_timeout+
"-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self.action_timeout+
"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ")
test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self.action_timeout+
"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ")
test.add_cmd("-c exec -r \"test_rsc\" -a \"monitor\" -i \"100\" "+self.action_timeout+
"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" ")
test.add_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" "+self.action_timeout)
test.add_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" "+self.action_timeout)
test.add_cmd_and_kill("killall -9 -q lrmd_dummy_daemon", "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete\" -t 8000")
- test.add_cmd("-c cancel -r \"test_rsc\" -a \"monitor\" -i \"100\" -t \"3000\" "
+ test.add_cmd("-c cancel -r \"test_rsc\" -a \"monitor\" -i \"100\" -t \"6000\" "
"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:Cancelled\" ")
test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete\" "+self.action_timeout)
test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" "+self.action_timeout)
test.add_cmd("-c unregister_rsc -r \"test_rsc\" "+self.action_timeout+
"-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
### monitor fail for upstart resource ###
if "upstart" in self.rsc_classes:
test = self.new_test("monitor_fail_upstart", "Force upstart monitor to fail, verify failure is reported..")
test.add_cmd("-c register_rsc -r \"test_rsc\" -C upstart -T lrmd_dummy_daemon "+self.action_timeout+
"-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self.action_timeout+
"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ")
test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self.action_timeout+
"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ")
test.add_cmd("-c exec -r \"test_rsc\" -a \"monitor\" -i \"100\" "+self.action_timeout+
"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" ")
test.add_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" "+self.action_timeout)
test.add_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" "+self.action_timeout)
test.add_cmd_and_kill("killall -9 -q dd", "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete\" -t 8000")
- test.add_cmd("-c cancel -r \"test_rsc\" -a \"monitor\" -i \"100\" -t \"3000\" "
+ test.add_cmd("-c cancel -r \"test_rsc\" -a \"monitor\" -i \"100\" -t \"6000\" "
"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:Cancelled\" ")
test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete\" "+self.action_timeout)
test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" "+self.action_timeout)
test.add_cmd("-c unregister_rsc -r \"test_rsc\" "+self.action_timeout+
"-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
### Cancel non-existent operation on a resource ###
test = self.new_test("cancel_non_existent_op", "Attempt to cancel the wrong monitor operation, verify expected failure")
test.add_cmd("-c register_rsc -r \"test_rsc\" -C \"ocf\" -P \"pacemaker\" -T \"Dummy\" "+self.action_timeout+
"-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self.action_timeout+
"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ")
test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self.action_timeout+
"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ")
test.add_cmd("-c exec -r \"test_rsc\" -a \"monitor\" -i \"100\" "+self.action_timeout+
"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" ")
test.add_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" "+self.action_timeout)
- test.add_expected_fail_cmd("-c cancel -r test_rsc -a \"monitor\" -i 1234 -t \"3000\" " ### interval is wrong, should fail
+ test.add_expected_fail_cmd("-c cancel -r test_rsc -a \"monitor\" -i 1234 -t \"6000\" " ### interval is wrong, should fail
"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:Cancelled\" ")
- test.add_expected_fail_cmd("-c cancel -r test_rsc -a stop -i 100 -t \"3000\" " ### action name is wrong, should fail
+ test.add_expected_fail_cmd("-c cancel -r test_rsc -a stop -i 100 -t \"6000\" " ### action name is wrong, should fail
"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:Cancelled\" ")
test.add_cmd("-c unregister_rsc -r \"test_rsc\" "+self.action_timeout+
"-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
### Attempt to invoke non-existent rsc id ###
test = self.new_test("invoke_non_existent_rsc", "Attempt to perform operations on a non-existent rsc id.")
test.add_expected_fail_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self.action_timeout+
"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:unknown error op_status:complete\" ")
test.add_expected_fail_cmd("-c exec -r test_rsc -a stop "+self.action_timeout+
"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:stop rc:ok op_status:complete\" ")
- test.add_expected_fail_cmd("-c exec -r test_rsc -a monitor -i 3000 "+self.action_timeout+
+ test.add_expected_fail_cmd("-c exec -r test_rsc -a monitor -i 6000 "+self.action_timeout+
"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" ")
test.add_expected_fail_cmd("-c cancel -r test_rsc -a start "+self.action_timeout+
"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:Cancelled\" ")
test.add_cmd("-c unregister_rsc -r \"test_rsc\" "+self.action_timeout+
"-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
### Register and start a resource that doesn't exist, systemd ###
if "systemd" in self.rsc_classes:
test = self.new_test("start_uninstalled_systemd", "Register uninstalled systemd agent, try to start, verify expected failure")
test.add_cmd("-c register_rsc -r \"test_rsc\" -C systemd -T this_is_fake1234 "+self.action_timeout+
"-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self.action_timeout+
"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:not installed op_status:Not installed\" ")
test.add_cmd("-c unregister_rsc -r \"test_rsc\" "+self.action_timeout+
"-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
if "upstart" in self.rsc_classes:
test = self.new_test("start_uninstalled_upstart", "Register uninstalled upstart agent, try to start, verify expected failure")
test.add_cmd("-c register_rsc -r \"test_rsc\" -C upstart -T this_is_fake1234 "+self.action_timeout+
"-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self.action_timeout+
"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:not installed op_status:Not installed\" ")
test.add_cmd("-c unregister_rsc -r \"test_rsc\" "+self.action_timeout+
"-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
### Register and start a resource that doesn't exist, ocf ###
test = self.new_test("start_uninstalled_ocf", "Register uninstalled ocf agent, try to start, verify expected failure.")
test.add_cmd("-c register_rsc -r \"test_rsc\" -C ocf -P pacemaker -T this_is_fake1234 "+self.action_timeout+
"-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self.action_timeout+
"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:not installed op_status:Not installed\" ")
test.add_cmd("-c unregister_rsc -r \"test_rsc\" "+self.action_timeout+
"-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
### Register ocf with non-existent provider ###
test = self.new_test("start_ocf_bad_provider", "Register ocf agent with a non-existent provider, verify expected failure.")
test.add_cmd("-c register_rsc -r \"test_rsc\" -C ocf -P pancakes -T Dummy "+self.action_timeout+
"-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self.action_timeout+
"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:not installed op_status:Not installed\" ")
test.add_cmd("-c unregister_rsc -r \"test_rsc\" "+self.action_timeout+
"-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
### Register ocf with empty provider field ###
test = self.new_test("start_ocf_no_provider", "Register ocf agent with a no provider, verify expected failure.")
test.add_expected_fail_cmd("-c register_rsc -r \"test_rsc\" -C ocf -T Dummy "+self.action_timeout+
"-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
test.add_expected_fail_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self.action_timeout+
"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:Error\" ")
test.add_cmd("-c unregister_rsc -r \"test_rsc\" "+self.action_timeout+
"-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
### stress tests ###
def build_stress_tests(self):
timeout = "-t 20000"
iterations = 25
test = self.new_test("ocf_stress", "Verify OCF agent handling works under load")
for i in range(iterations):
test.add_cmd("-c register_rsc -r rsc_%s %s -C ocf -P heartbeat -T Dummy -l \"NEW_EVENT event_type:register rsc_id:rsc_%s action:none rc:ok op_status:complete\"" % (i, timeout, i))
test.add_cmd("-c exec -r rsc_%s -a start %s -l \"NEW_EVENT event_type:exec_complete rsc_id:rsc_%s action:start rc:ok op_status:complete\"" % (i, timeout, i))
test.add_cmd("-c exec -r rsc_%s -a monitor %s -i 1000 -l \"NEW_EVENT event_type:exec_complete rsc_id:rsc_%s action:monitor rc:ok op_status:complete\"" % (i, timeout, i))
for i in range(iterations):
test.add_cmd("-c exec -r rsc_%s -a stop %s -l \"NEW_EVENT event_type:exec_complete rsc_id:rsc_%s action:stop rc:ok op_status:complete\"" % (i, timeout, i))
test.add_cmd("-c unregister_rsc -r rsc_%s %s -l \"NEW_EVENT event_type:unregister rsc_id:rsc_%s action:none rc:ok op_status:complete\"" % (i, timeout, i))
if "systemd" in self.rsc_classes:
test = self.new_test("systemd_stress", "Verify systemd dbus connection works under load")
for i in range(iterations):
test.add_cmd("-c register_rsc -r rsc_%s %s -C systemd -T lrmd_dummy_daemon -l \"NEW_EVENT event_type:register rsc_id:rsc_%s action:none rc:ok op_status:complete\"" % (i, timeout, i))
test.add_cmd("-c exec -r rsc_%s -a start %s -l \"NEW_EVENT event_type:exec_complete rsc_id:rsc_%s action:start rc:ok op_status:complete\"" % (i, timeout, i))
test.add_cmd("-c exec -r rsc_%s -a monitor %s -i 1000 -l \"NEW_EVENT event_type:exec_complete rsc_id:rsc_%s action:monitor rc:ok op_status:complete\"" % (i, timeout, i))
for i in range(iterations):
test.add_cmd("-c exec -r rsc_%s -a stop %s -l \"NEW_EVENT event_type:exec_complete rsc_id:rsc_%s action:stop rc:ok op_status:complete\"" % (i, timeout, i))
test.add_cmd("-c unregister_rsc -r rsc_%s %s -l \"NEW_EVENT event_type:unregister rsc_id:rsc_%s action:none rc:ok op_status:complete\"" % (i, timeout, i))
iterations = 9
timeout = "-t 30000"
### Verify recurring op in-flight collision is handled in series properly
test = self.new_test("rsc_inflight_collision", "Verify recurring ops do not collide with other operations for the same rsc.")
test.add_cmd("-c register_rsc -r test_rsc -P pacemaker -C ocf -T Dummy "
"-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" "+self.action_timeout)
test.add_cmd("-c exec -r test_rsc -a start %s -k op_sleep -v 1 -l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\"" % (timeout))
for i in range(iterations):
test.add_cmd("-c exec -r test_rsc -a monitor %s -i 100%d -k op_sleep -v 2 -l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\"" % (timeout, i))
# test.add_sys_cmd("sleep", "-al @CRM_RSCTMP_DIR@")
test.add_cmd("-c exec -r test_rsc -a stop %s -l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:stop rc:ok op_status:complete\"" % (timeout))
test.add_cmd("-c unregister_rsc -r test_rsc %s -l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\"" % (timeout))
### These are tests that target specific cases ###
def build_custom_tests(self):
### verify resource temporary folder is created and used by heartbeat agents. ###
test = self.new_test("rsc_tmp_dir", "Verify creation and use of rsc temporary state directory")
test.add_sys_cmd("ls", "-al @CRM_RSCTMP_DIR@")
test.add_cmd("-c register_rsc -r test_rsc -P heartbeat -C ocf -T Dummy "
"-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" "+self.action_timeout)
test.add_cmd("-c exec -r test_rsc -a start -t 4000")
test.add_sys_cmd("ls", "-al @CRM_RSCTMP_DIR@")
test.add_sys_cmd("ls", "@CRM_RSCTMP_DIR@/Dummy-test_rsc.state")
test.add_cmd("-c exec -r test_rsc -a stop -t 4000")
test.add_cmd("-c unregister_rsc -r test_rsc "+self.action_timeout+
"-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
### start delay then stop test ###
test = self.new_test("start_delay", "Verify start delay works as expected.")
test.add_cmd("-c register_rsc -r test_rsc -P pacemaker -C ocf -T Dummy "
"-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" "+self.action_timeout)
test.add_cmd("-c exec -r test_rsc -s 6000 -a start -w -t 6000")
test.add_expected_fail_cmd("-l "
"\"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" -t 2000")
test.add_cmd("-l "
"\"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" -t 6000")
test.add_cmd("-c exec -r test_rsc -a stop "+self.action_timeout+
"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:stop rc:ok op_status:complete\" ")
test.add_cmd("-c unregister_rsc -r test_rsc "+self.action_timeout+
"-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
### start delay, but cancel before it gets a chance to start. ###
test = self.new_test("start_delay_cancel", "Using start_delay, start a rsc, but cancel the start op before execution.")
test.add_cmd("-c register_rsc -r test_rsc -P pacemaker -C ocf -T Dummy "
"-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" "+self.action_timeout)
test.add_cmd("-c exec -r test_rsc -s 5000 -a start -w -t 4000")
test.add_cmd("-c cancel -r test_rsc -a start "+self.action_timeout+
"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:Cancelled\" ")
test.add_expected_fail_cmd("-l "
"\"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" -t 5000")
test.add_cmd("-c unregister_rsc -r test_rsc "+self.action_timeout+
"-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
### Register a bunch of resources, verify we can get info on them ###
test = self.new_test("verify_get_rsc_info", "Register multiple resources, verify retrieval of rsc info.")
if "systemd" in self.rsc_classes:
test.add_cmd("-c register_rsc -r rsc1 -C systemd -T lrmd_dummy_daemon "+self.action_timeout)
test.add_cmd("-c get_rsc_info -r rsc1 ")
test.add_cmd("-c unregister_rsc -r rsc1 "+self.action_timeout)
test.add_expected_fail_cmd("-c get_rsc_info -r rsc1 ")
if "upstart" in self.rsc_classes:
test.add_cmd("-c register_rsc -r rsc1 -C upstart -T lrmd_dummy_daemon "+self.action_timeout)
test.add_cmd("-c get_rsc_info -r rsc1 ")
test.add_cmd("-c unregister_rsc -r rsc1 "+self.action_timeout)
test.add_expected_fail_cmd("-c get_rsc_info -r rsc1 ")
test.add_cmd("-c register_rsc -r rsc2 -C ocf -T Dummy -P pacemaker "+self.action_timeout)
test.add_cmd("-c get_rsc_info -r rsc2 ")
test.add_cmd("-c unregister_rsc -r rsc2 "+self.action_timeout)
test.add_expected_fail_cmd("-c get_rsc_info -r rsc2 ")
### Register duplicate, verify only one entry exists and can still be removed.
test = self.new_test("duplicate_registration", "Register resource multiple times, verify only one entry exists and can be removed.")
test.add_cmd("-c register_rsc -r rsc2 -C ocf -T Dummy -P pacemaker "+self.action_timeout)
test.add_cmd_check_stdout("-c get_rsc_info -r rsc2 ", "id:rsc2 class:ocf provider:pacemaker type:Dummy")
test.add_cmd("-c register_rsc -r rsc2 -C ocf -T Dummy -P pacemaker "+self.action_timeout)
test.add_cmd_check_stdout("-c get_rsc_info -r rsc2 ", "id:rsc2 class:ocf provider:pacemaker type:Dummy")
test.add_cmd("-c register_rsc -r rsc2 -C ocf -T Stateful -P pacemaker "+self.action_timeout)
test.add_cmd_check_stdout("-c get_rsc_info -r rsc2 ", "id:rsc2 class:ocf provider:pacemaker type:Stateful")
test.add_cmd("-c unregister_rsc -r rsc2 "+self.action_timeout)
test.add_expected_fail_cmd("-c get_rsc_info -r rsc2 ")
### verify the option to only send notification to the original client. ###
test = self.new_test("notify_orig_client_only", "Verify option to only send notifications to the client originating the action.")
test.add_cmd("-c register_rsc -r \"test_rsc\" -C \"ocf\" -P \"pacemaker\" -T \"Dummy\" "+self.action_timeout+
"-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self.action_timeout+
"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ")
test.add_cmd("-c exec -r \"test_rsc\" -a \"monitor\" -i \"100\" "+self.action_timeout+" -n "
"-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" ")
# this will fail because the monitor notifications should only go to the original caller, which no longer exists.
test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" "+self.action_timeout)
- test.add_cmd("-c cancel -r \"test_rsc\" -a \"monitor\" -i \"100\" -t \"3000\" ")
+ test.add_cmd("-c cancel -r \"test_rsc\" -a \"monitor\" -i \"100\" -t \"6000\" ")
test.add_cmd("-c unregister_rsc -r \"test_rsc\" "+self.action_timeout+
"-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ")
### get metadata ###
test = self.new_test("get_ocf_metadata", "Retrieve metadata for a resource")
test.add_cmd_check_stdout("-c metadata -C \"ocf\" -P \"pacemaker\" -T \"Dummy\""
,"resource-agent name=\"Dummy\"")
test.add_cmd("-c metadata -C \"ocf\" -P \"pacemaker\" -T \"Stateful\"")
test.add_expected_fail_cmd("-c metadata -P \"pacemaker\" -T \"Stateful\"")
test.add_expected_fail_cmd("-c metadata -C \"ocf\" -P \"pacemaker\" -T \"fake_agent\"")
### get metadata ###
test = self.new_test("get_lsb_metadata", "Retrieve metadata for a resource")
test.add_cmd_check_stdout("-c metadata -C \"lsb\" -T \"LSBDummy\""
,"resource-agent name='LSBDummy'")
### get stonith metadata ###
test = self.new_test("get_stonith_metadata", "Retrieve stonith metadata for a resource")
test.add_cmd_check_stdout("-c metadata -C \"stonith\" -P \"pacemaker\" -T \"fence_dummy_monitor\"",
"resource-agent name=\"fence_dummy_monitor\"")
### get metadata ###
if "systemd" in self.rsc_classes:
test = self.new_test("get_systemd_metadata", "Retrieve metadata for a resource")
test.add_cmd_check_stdout("-c metadata -C \"systemd\" -T \"lrmd_dummy_daemon\""
,"resource-agent name=\"lrmd_dummy_daemon\"")
### get metadata ###
if "upstart" in self.rsc_classes:
test = self.new_test("get_upstart_metadata", "Retrieve metadata for a resource")
test.add_cmd_check_stdout("-c metadata -C \"upstart\" -T \"lrmd_dummy_daemon\""
,"resource-agent name=\"lrmd_dummy_daemon\"")
if "heartbeat" in self.rsc_classes:
test = self.new_test("get_heartbeat_metadata", "Retrieve metadata for a resource")
test.add_cmd_check_stdout("-c metadata -C \"heartbeat\" -T \"HBDummy\""
,"resource-agent name='HBDummy'")
### get ocf providers ###
test = self.new_test("list_ocf_providers", "Retrieve list of available resource providers, verifies pacemaker is a provider.")
test.add_cmd_check_stdout("-c list_ocf_providers ", "pacemaker")
test.add_cmd_check_stdout("-c list_ocf_providers -T ping", "pacemaker")
### Verify agents only exist in their lists ###
test = self.new_test("verify_agent_lists", "Verify the agent lists contain the right data.")
test.add_cmd_check_stdout("-c list_agents ", "Stateful") ### ocf ###
test.add_cmd_check_stdout("-c list_agents -C ocf", "Stateful")
test.add_cmd_check_stdout("-c list_agents -C lsb", "", "Stateful") ### should not exist
test.add_cmd_check_stdout("-c list_agents -C service", "", "Stateful") ### should not exist
test.add_cmd_check_stdout("-c list_agents ", "LSBDummy") ### init.d ###
test.add_cmd_check_stdout("-c list_agents -C lsb", "LSBDummy")
test.add_cmd_check_stdout("-c list_agents -C service", "LSBDummy")
test.add_cmd_check_stdout("-c list_agents -C ocf", "", "lrmd_dummy_daemon") ### should not exist
test.add_cmd_check_stdout("-c list_agents -C ocf", "", "lrmd_dummy_daemon") ### should not exist
test.add_cmd_check_stdout("-c list_agents -C lsb", "", "fence_dummy_monitor") ### should not exist
test.add_cmd_check_stdout("-c list_agents -C service", "", "fence_dummy_monitor") ### should not exist
test.add_cmd_check_stdout("-c list_agents -C ocf", "", "fence_dummy_monitor") ### should not exist
if "systemd" in self.rsc_classes:
test.add_cmd_check_stdout("-c list_agents ", "lrmd_dummy_daemon") ### systemd ###
test.add_cmd_check_stdout("-c list_agents -C service", "LSBDummy")
test.add_cmd_check_stdout("-c list_agents -C systemd", "", "Stateful") ### should not exist
test.add_cmd_check_stdout("-c list_agents -C systemd", "lrmd_dummy_daemon")
test.add_cmd_check_stdout("-c list_agents -C systemd", "", "fence_dummy_monitor") ### should not exist
if "upstart" in self.rsc_classes:
test.add_cmd_check_stdout("-c list_agents ", "lrmd_dummy_daemon") ### upstart ###
test.add_cmd_check_stdout("-c list_agents -C service", "LSBDummy")
test.add_cmd_check_stdout("-c list_agents -C upstart", "", "Stateful") ### should not exist
test.add_cmd_check_stdout("-c list_agents -C upstart", "lrmd_dummy_daemon")
test.add_cmd_check_stdout("-c list_agents -C upstart", "", "fence_dummy_monitor") ### should not exist
if "stonith" in self.rsc_classes:
test.add_cmd_check_stdout("-c list_agents -C stonith", "fence_dummy_monitor") ### stonith ###
test.add_cmd_check_stdout("-c list_agents -C stonith", "", "lrmd_dummy_daemon") ### should not exist
test.add_cmd_check_stdout("-c list_agents -C stonith", "", "Stateful") ### should not exist
test.add_cmd_check_stdout("-c list_agents ", "fence_dummy_monitor")
if "heartbeat" in self.rsc_classes:
test.add_cmd_check_stdout("-c list_agents -C heartbeat", "HBDummy")
test.add_cmd_check_stdout("-c list_agents -C heartbeat", "", "LSBDummy") ### should not exist
test.add_cmd_check_stdout("-c list_agents -C service", "", "HBDummy") ### should not exist
def print_list(self):
print "\n==== %d TESTS FOUND ====" % (len(self.tests))
print "%35s - %s" % ("TEST NAME", "TEST DESCRIPTION")
print "%35s - %s" % ("--------------------", "--------------------")
for test in self.tests:
print "%35s - %s" % (test.name, test.description)
print "==== END OF LIST ====\n"
def run_single(self, name):
for test in self.tests:
if test.name == name:
test.run()
break;
def run_tests_matching(self, pattern):
for test in self.tests:
if test.name.count(pattern) != 0:
test.run()
def run_tests(self):
for test in self.tests:
test.run()
def exit(self):
for test in self.tests:
if test.executed == 0:
continue
if test.get_exitcode() != 0:
sys.exit(-1)
sys.exit(0);
def print_results(self):
failures = 0;
success = 0;
print "\n\n======= FINAL RESULTS =========="
print "\n--- FAILURE RESULTS:"
for test in self.tests:
if test.executed == 0:
continue
if test.get_exitcode() != 0:
failures = failures + 1
test.print_result(" ")
else:
success = success + 1
if failures == 0:
print " None"
print "\n--- TOTALS\n Pass:%d\n Fail:%d\n" % (success, failures)
class TestOptions:
def __init__(self):
self.options = {}
self.options['list-tests'] = 0
self.options['run-all'] = 1
self.options['run-only'] = ""
self.options['run-only-pattern'] = ""
self.options['verbose'] = 0
self.options['invalid-arg'] = ""
self.options['show-usage'] = 0
self.options['pacemaker-remote'] = 0
def build_options(self, argv):
args = argv[1:]
skip = 0
for i in range(0, len(args)):
if skip:
skip = 0
continue
elif args[i] == "-h" or args[i] == "--help":
self.options['show-usage'] = 1
elif args[i] == "-l" or args[i] == "--list-tests":
self.options['list-tests'] = 1
elif args[i] == "-V" or args[i] == "--verbose":
self.options['verbose'] = 1
elif args[i] == "-R" or args[i] == "--pacemaker-remote":
self.options['pacemaker-remote'] = 1
elif args[i] == "-r" or args[i] == "--run-only":
self.options['run-only'] = args[i+1]
skip = 1
elif args[i] == "-p" or args[i] == "--run-only-pattern":
self.options['run-only-pattern'] = args[i+1]
skip = 1
def show_usage(self):
print "usage: " + sys.argv[0] + " [options]"
print "If no options are provided, all tests will run"
print "Options:"
print "\t [--help | -h] Show usage"
print "\t [--list-tests | -l] Print out all registered tests."
print "\t [--run-only | -r 'testname'] Run a specific test"
print "\t [--verbose | -V] Verbose output"
print "\t [--pacemaker-remote | -R Test pacemaker-remote binary instead of lrmd."
print "\t [--run-only-pattern | -p 'string'] Run only tests containing the string value"
print "\n\tExample: Run only the test 'start_top'"
print "\t\t python ./regression.py --run-only start_stop"
print "\n\tExample: Run only the tests with the string 'systemd' present in them"
print "\t\t python ./regression.py --run-only-pattern systemd"
def main(argv):
o = TestOptions()
o.build_options(argv)
tests = Tests(o.options['verbose'], o.options['pacemaker-remote'])
tests.build_generic_tests()
tests.build_multi_rsc_tests()
tests.build_negative_tests()
tests.build_custom_tests()
tests.build_stress_tests()
tests.setup_test_environment()
print "Starting ..."
if o.options['list-tests']:
tests.print_list()
elif o.options['show-usage']:
o.show_usage()
elif o.options['run-only-pattern'] != "":
tests.run_tests_matching(o.options['run-only-pattern'])
tests.print_results()
elif o.options['run-only'] != "":
tests.run_single(o.options['run-only'])
tests.print_results()
else:
tests.run_tests()
tests.print_results()
tests.cleanup_test_environment()
tests.exit()
if __name__=="__main__":
main(sys.argv)
diff --git a/pacemaker.spec.in b/pacemaker.spec.in
index 4b43d9d2be..8b1041a6a3 100644
--- a/pacemaker.spec.in
+++ b/pacemaker.spec.in
@@ -1,602 +1,612 @@
%global gname haclient
%global uname hacluster
%global pcmk_docdir %{_docdir}/%{name}
%global specversion 1
%global commit HEAD
%global shortcommit %(c=%{commit}; echo ${c:0:7})
%global github_owner ClusterLabs
# Turn off the auto compilation of python files not in the site-packages directory
# Needed so that the -devel package is multilib compliant
%global __os_install_post %(echo '%{__os_install_post}' | sed -e 's!/usr/lib[^[:space:]]*/brp-python-bytecompile[[:space:]].*$!!g')
%global rawhide %(test ! -e /etc/yum.repos.d/fedora-rawhide.repo; echo $?)
%global cs_version %(pkg-config corosync --modversion | awk -F . '{print $1}')
# It has to be eventually decided whether to use Python2 or Python3
%global py_site %{?python_sitearch}%{!?python_sitearch:%(python -c "from distutils.sysconfig import get_python_lib; print(get_python_lib(1))")}
# https://fedoraproject.org/wiki/EPEL:Packaging?rd=Packaging:EPEL#The_.25license_tag
%{!?_licensedir:%global license %doc}
# Conditionals
# Invoke "rpmbuild --without " or "rpmbuild --with "
# to disable or enable specific features
# Legacy stonithd fencing agents
%bcond_with stonithd
# Build with/without support for profiling tools
%bcond_with profiling
# Include Build with/without support for performing coverage analysis
%bcond_with coverage
# We generate docs using Publican, Asciidoc and Inkscape, but they're not available everywhere
%bcond_without doc
# Use a different versioning scheme
%bcond_with pre_release
# Ship an Upstart job file
%bcond_with upstart_job
# Turn off cman support on platforms that normally ship with it
%bcond_without cman
%if %{with profiling}
# This disables -debuginfo package creation and also the stripping binaries/libraries
# Useful if you want sane profiling data
%global debug_package %{nil}
%endif
%if %{with pre_release}
%global pcmk_release 0.%{specversion}.%{shortcommit}.git
%else
%global pcmk_release %{specversion}
%endif
Name: pacemaker
Summary: Scalable High-Availability cluster resource manager
Version: 1.1.14
Release: %{pcmk_release}%{?dist}
%if %{defined _unitdir}
License: GPLv2+ and LGPLv2+
%else
# initscript is Revised BSD
License: GPLv2+ and LGPLv2+ and BSD
%endif
Url: http://www.clusterlabs.org
Group: System Environment/Daemons
Source0: https://github.com/%{github_owner}/%{name}/archive/%{commit}/%{name}-%{commit}.tar.gz
BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX)
AutoReqProv: on
Requires: python
Requires: resource-agents
Requires: %{name}-libs = %{version}-%{release}
Requires: %{name}-cluster-libs = %{version}-%{release}
Requires: %{name}-cli = %{version}-%{release}
%if %{defined systemd_requires}
%systemd_requires
%endif
%if 0%{?rhel} > 0
ExclusiveArch: i386 i686 x86_64
%endif
# Required for core functionality (python-devel depends on python)
BuildRequires: automake autoconf libtool pkgconfig libtool-ltdl-devel
BuildRequires: pkgconfig(glib-2.0) libxml2-devel libxslt-devel libuuid-devel
BuildRequires: python-devel bzip2-devel pam-devel
# Required for agent_config.h which specifies the correct scratch directory
BuildRequires: resource-agents
# We need reasonably recent versions of libqb
BuildRequires: libqb-devel > 0.11.0
Requires: libqb > 0.11.0
# Enables optional functionality
BuildRequires: ncurses-devel openssl-devel libselinux-devel docbook-style-xsl
BuildRequires: bison byacc flex help2man gnutls-devel pkgconfig(dbus-1)
%if %{defined _unitdir}
BuildRequires: systemd-devel
%endif
%if %{with cman}
%if 0%{?fedora} > 0
%if 0%{?fedora} < 17
BuildRequires: clusterlib-devel
%endif
%endif
%if 0%{?rhel} > 0
%if 0%{?rhel} < 7
BuildRequires: clusterlib-devel
%endif
%endif
%endif
Requires: corosync
BuildRequires: corosynclib-devel
%if %{with stonithd}
BuildRequires: cluster-glue-libs-devel
%endif
%if !%{rawhide}
# More often than not, inkscape is busted on rawhide, don't even bother
%if %{with doc}
%ifarch %{ix86} x86_64
BuildRequires: publican inkscape asciidoc
%endif
%endif
%endif
%description
Pacemaker is an advanced, scalable High-Availability cluster resource
manager for Corosync, CMAN and/or Linux-HA.
It supports more than 16 node clusters with significant capabilities
for managing resources and dependencies.
It will run scripts at initialization, when machines go up or down,
when related resources fail and can be configured to periodically check
resource health.
Available rpmbuild rebuild options:
--with(out) : cman stonithd doc coverage profiling pre_release upstart_job
%package cli
License: GPLv2+ and LGPLv2+
Summary: Command line tools for controlling Pacemaker clusters
Group: System Environment/Daemons
Requires: %{name}-libs = %{version}-%{release}
Requires: perl-TimeDate
%description cli
Pacemaker is an advanced, scalable High-Availability cluster resource
manager for Corosync, CMAN and/or Linux-HA.
The %{name}-cli package contains command line tools that can be used
to query and control the cluster from machines that may, or may not,
be part of the cluster.
%package -n %{name}-libs
License: GPLv2+ and LGPLv2+
Summary: Core Pacemaker libraries
Group: System Environment/Daemons
%description -n %{name}-libs
Pacemaker is an advanced, scalable High-Availability cluster resource
manager for Corosync, CMAN and/or Linux-HA.
The %{name}-libs package contains shared libraries needed for cluster
nodes and those just running the CLI tools.
%package -n %{name}-cluster-libs
License: GPLv2+ and LGPLv2+
Summary: Cluster Libraries used by Pacemaker
Group: System Environment/Daemons
Requires: %{name}-libs = %{version}-%{release}
%description -n %{name}-cluster-libs
Pacemaker is an advanced, scalable High-Availability cluster resource
manager for Corosync, CMAN and/or Linux-HA.
The %{name}-cluster-libs package contains cluster-aware shared
libraries needed for nodes that will form part of the cluster nodes.
%package remote
%if %{defined _unitdir}
License: GPLv2+ and LGPLv2+
%else
# initscript is Revised BSD
License: GPLv2+ and LGPLv2+ and BSD
%endif
Summary: Pacemaker remote daemon for non-cluster nodes
Group: System Environment/Daemons
Requires: %{name}-libs = %{version}-%{release}
Requires: %{name}-cli = %{version}-%{release}
Requires: resource-agents
%if %{defined systemd_requires}
%systemd_requires
%endif
%description remote
Pacemaker is an advanced, scalable High-Availability cluster resource
manager for Corosync, CMAN and/or Linux-HA.
The %{name}-remote package contains the Pacemaker Remote daemon
which is capable of extending pacemaker functionality to remote
nodes not running the full corosync/cluster stack.
%package -n %{name}-libs-devel
License: GPLv2+ and LGPLv2+
Summary: Pacemaker development package
Group: Development/Libraries
Requires: %{name}-cts = %{version}-%{release}
Requires: %{name}-libs = %{version}-%{release}
Requires: %{name}-cluster-libs = %{version}-%{release}
Requires: libtool-ltdl-devel libqb-devel libuuid-devel
Requires: libxml2-devel libxslt-devel bzip2-devel glib2-devel
Requires: corosynclib-devel
%description -n %{name}-libs-devel
Pacemaker is an advanced, scalable High-Availability cluster resource
manager for Corosync, CMAN and/or Linux-HA.
The %{name}-libs-devel package contains headers and shared libraries
for developing tools for Pacemaker.
%package cts
License: GPLv2+ and LGPLv2+
Summary: Test framework for cluster-related technologies like Pacemaker
Group: System Environment/Daemons
Requires: python
Requires: %{name}-libs = %{version}-%{release}
+
+# systemd python bindings are separate package in some distros
%if %{defined systemd_requires}
+
+%if 0%{?fedora} > 20
Requires: systemd-python
%endif
+%if 0%{?rhel} > 6
+Requires: systemd-python
+%endif
+
+%endif
+
%description cts
Test framework for cluster-related technologies like Pacemaker
%package doc
License: GPLv2+ and LGPLv2+
Summary: Documentation for Pacemaker
Group: Documentation
%description doc
Documentation for Pacemaker.
Pacemaker is an advanced, scalable High-Availability cluster resource
manager for Corosync, CMAN and/or Linux-HA.
%prep
%setup -q -n %{name}-%{commit}
# Force the local time
#
# 'git' sets the file date to the date of the last commit.
# This can result in files having been created in the future
# when building on machines in timezones 'behind' the one the
# commit occurred in - which seriously confuses 'make'
find . -exec touch \{\} \;
%build
./autogen.sh
# RHEL <= 5 does not support --docdir
docdir=%{pcmk_docdir} %{configure} \
%{?with_profiling: --with-profiling} \
%{?with_coverage: --with-coverage} \
%{!?with_cman: --without-cman} \
--without-heartbeat \
--with-initdir=%{_initrddir} \
--localstatedir=%{_var} \
--with-version=%{version}-%{release}
%if 0%{?suse_version} >= 1200
# Fedora handles rpath removal automagically
sed -i 's|^hardcode_libdir_flag_spec=.*|hardcode_libdir_flag_spec=""|g' libtool
sed -i 's|^runpath_var=LD_RUN_PATH|runpath_var=DIE_RPATH_DIE|g' libtool
%endif
make %{_smp_mflags} V=1 docdir=%{pcmk_docdir} all
%install
rm -rf %{buildroot}
make DESTDIR=%{buildroot} docdir=%{pcmk_docdir} V=1 install
mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/sysconfig
mkdir -p ${RPM_BUILD_ROOT}%{_var}/lib/pacemaker/cores
install -m 644 mcp/pacemaker.sysconfig ${RPM_BUILD_ROOT}%{_sysconfdir}/sysconfig/pacemaker
install -m 644 tools/crm_mon.sysconfig ${RPM_BUILD_ROOT}%{_sysconfdir}/sysconfig/crm_mon
%if %{with upstart_job}
mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/init
install -m 644 mcp/pacemaker.upstart ${RPM_BUILD_ROOT}%{_sysconfdir}/init/pacemaker.conf
install -m 644 mcp/pacemaker.combined.upstart ${RPM_BUILD_ROOT}%{_sysconfdir}/init/pacemaker.combined.conf
install -m 644 tools/crm_mon.upstart ${RPM_BUILD_ROOT}%{_sysconfdir}/init/crm_mon.conf
%endif
# Scripts that should be executable
chmod a+x %{buildroot}/%{_datadir}/pacemaker/tests/cts/CTSlab.py
# These are not actually scripts
find %{buildroot} -name '*.xml' -type f -print0 | xargs -0 chmod a-x
find %{buildroot} -name '*.xsl' -type f -print0 | xargs -0 chmod a-x
find %{buildroot} -name '*.rng' -type f -print0 | xargs -0 chmod a-x
find %{buildroot} -name '*.dtd' -type f -print0 | xargs -0 chmod a-x
# Don't package static libs
find %{buildroot} -name '*.a' -type f -print0 | xargs -0 rm -f
find %{buildroot} -name '*.la' -type f -print0 | xargs -0 rm -f
# Do not package these either
rm -f %{buildroot}/%{_libdir}/service_crm.so
# Don't ship init scripts for systemd based platforms
%if %{defined _unitdir}
rm -f %{buildroot}/%{_initrddir}/pacemaker
rm -f %{buildroot}/%{_initrddir}/pacemaker_remote
%endif
%if %{with coverage}
GCOV_BASE=%{buildroot}/%{_var}/lib/pacemaker/gcov
mkdir -p $GCOV_BASE
find . -name '*.gcno' -type f | while read F ; do
D=`dirname $F`
mkdir -p ${GCOV_BASE}/$D
cp $F ${GCOV_BASE}/$D
done
%endif
%clean
rm -rf %{buildroot}
%if %{defined _unitdir}
%post
%systemd_post pacemaker.service
%preun
%systemd_preun pacemaker.service
%postun
%systemd_postun_with_restart pacemaker.service
%post remote
%systemd_post pacemaker_remote.service
%preun remote
%systemd_preun pacemaker_remote.service
%postun remote
%systemd_postun_with_restart pacemaker_remote.service
%post cli
%systemd_post crm_mon.service
%preun cli
%systemd_preun crm_mon.service
%postun cli
%systemd_postun_with_restart crm_mon.service
%else
%post
/sbin/chkconfig --add pacemaker || :
%preun
/sbin/service pacemaker stop || :
if [ $1 -eq 0 ]; then
# Package removal, not upgrade
/sbin/chkconfig --del pacemaker || :
fi
%post remote
/sbin/chkconfig --add pacemaker_remote || :
%preun remote
/sbin/service pacemaker_remote stop &>/dev/null || :
if [ $1 -eq 0 ]; then
# Package removal, not upgrade
/sbin/chkconfig --del pacemaker_remote || :
fi
%endif
%pre -n %{name}-libs
getent group %{gname} >/dev/null || groupadd -r %{gname} -g 189
getent passwd %{uname} >/dev/null || useradd -r -g %{gname} -u 189 -s /sbin/nologin -c "cluster user" %{uname}
exit 0
%post -n %{name}-libs -p /sbin/ldconfig
%postun -n %{name}-libs -p /sbin/ldconfig
%post -n %{name}-cluster-libs -p /sbin/ldconfig
%postun -n %{name}-cluster-libs -p /sbin/ldconfig
%files
###########################################################
%defattr(-,root,root)
%config(noreplace) %{_sysconfdir}/sysconfig/pacemaker
%{_sbindir}/pacemakerd
%if %{defined _unitdir}
%{_unitdir}/pacemaker.service
%else
%{_initrddir}/pacemaker
%endif
%exclude %{_libexecdir}/pacemaker/lrmd_test
%exclude %{_sbindir}/pacemaker_remoted
%{_libexecdir}/pacemaker/*
%{_sbindir}/crm_attribute
%{_sbindir}/crm_master
%{_sbindir}/crm_node
%{_sbindir}/fence_legacy
%{_sbindir}/fence_pcmk
%{_sbindir}/stonith_admin
%doc %{_mandir}/man7/crmd.*
%doc %{_mandir}/man7/pengine.*
%doc %{_mandir}/man7/stonithd.*
%doc %{_mandir}/man7/ocf_pacemaker_controld.*
%doc %{_mandir}/man7/ocf_pacemaker_o2cb.*
%doc %{_mandir}/man7/ocf_pacemaker_remote.*
%doc %{_mandir}/man8/crm_attribute.*
%doc %{_mandir}/man8/crm_node.*
%doc %{_mandir}/man8/crm_master.*
%doc %{_mandir}/man8/fence_pcmk.*
%doc %{_mandir}/man8/fence_legacy.*
%doc %{_mandir}/man8/pacemakerd.*
%doc %{_mandir}/man8/stonith_admin.*
%license COPYING
%doc AUTHORS
%doc ChangeLog
%dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/cib
%dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/pengine
/usr/lib/ocf/resource.d/pacemaker/controld
/usr/lib/ocf/resource.d/pacemaker/o2cb
/usr/lib/ocf/resource.d/pacemaker/remote
/usr/lib/ocf/resource.d/.isolation
%if "%{?cs_version}" != "UNKNOWN"
%if 0%{?cs_version} < 2
%{_libexecdir}/lcrso/pacemaker.lcrso
%endif
%endif
%if %{with upstart_job}
%config(noreplace) %{_sysconfdir}/init/pacemaker.conf
%config(noreplace) %{_sysconfdir}/init/pacemaker.combined.conf
%endif
%files cli
%defattr(-,root,root)
%config(noreplace) %{_sysconfdir}/logrotate.d/pacemaker
%config(noreplace) %{_sysconfdir}/sysconfig/crm_mon
%if %{defined _unitdir}
%{_unitdir}/crm_mon.service
%endif
%if %{with upstart_job}
%config(noreplace) %{_sysconfdir}/init/crm_mon.conf
%endif
%{_sbindir}/attrd_updater
%{_sbindir}/cibadmin
%{_sbindir}/crm_diff
%{_sbindir}/crm_error
%{_sbindir}/crm_failcount
%{_sbindir}/crm_mon
%{_sbindir}/crm_resource
%{_sbindir}/crm_standby
%{_sbindir}/crm_verify
%{_sbindir}/crmadmin
%{_sbindir}/iso8601
%{_sbindir}/crm_shadow
%{_sbindir}/crm_simulate
%{_sbindir}/crm_report
%{_sbindir}/crm_ticket
%exclude %{_datadir}/pacemaker/tests
%{_datadir}/pacemaker
%{_datadir}/snmp/mibs/PCMK-MIB.txt
%exclude /usr/lib/ocf/resource.d/pacemaker/controld
%exclude /usr/lib/ocf/resource.d/pacemaker/o2cb
%exclude /usr/lib/ocf/resource.d/pacemaker/remote
%dir /usr/lib/ocf
%dir /usr/lib/ocf/resource.d
/usr/lib/ocf/resource.d/pacemaker
%doc %{_mandir}/man7/*
%exclude %{_mandir}/man7/crmd.*
%exclude %{_mandir}/man7/pengine.*
%exclude %{_mandir}/man7/stonithd.*
%exclude %{_mandir}/man7/ocf_pacemaker_controld.*
%exclude %{_mandir}/man7/ocf_pacemaker_o2cb.*
%exclude %{_mandir}/man7/ocf_pacemaker_remote.*
%doc %{_mandir}/man8/*
%exclude %{_mandir}/man8/crm_attribute.*
%exclude %{_mandir}/man8/crm_node.*
%exclude %{_mandir}/man8/crm_master.*
%exclude %{_mandir}/man8/fence_pcmk.*
%exclude %{_mandir}/man8/fence_legacy.*
%exclude %{_mandir}/man8/pacemakerd.*
%exclude %{_mandir}/man8/pacemaker_remoted.*
%exclude %{_mandir}/man8/stonith_admin.*
%license COPYING
%doc AUTHORS
%doc ChangeLog
%dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker
%dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/blackbox
%dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/cores
%files -n %{name}-libs
%defattr(-,root,root)
%{_libdir}/libcib.so.*
%{_libdir}/liblrmd.so.*
%{_libdir}/libcrmservice.so.*
%{_libdir}/libcrmcommon.so.*
%{_libdir}/libpe_status.so.*
%{_libdir}/libpe_rules.so.*
%{_libdir}/libpengine.so.*
%{_libdir}/libstonithd.so.*
%{_libdir}/libtransitioner.so.*
%license COPYING.LIB
%doc AUTHORS
%files -n %{name}-cluster-libs
%defattr(-,root,root)
%{_libdir}/libcrmcluster.so.*
%license COPYING.LIB
%doc AUTHORS
%files remote
%defattr(-,root,root)
%config(noreplace) %{_sysconfdir}/sysconfig/pacemaker
%if %{defined _unitdir}
%{_unitdir}/pacemaker_remote.service
%else
%{_initrddir}/pacemaker_remote
%endif
%{_sbindir}/pacemaker_remoted
%{_mandir}/man8/pacemaker_remoted.*
%license COPYING
%doc AUTHORS
%files doc
%defattr(-,root,root)
%doc %{pcmk_docdir}
%files cts
%defattr(-,root,root)
%{py_site}/cts
%{_datadir}/pacemaker/tests/cts
%{_libexecdir}/pacemaker/lrmd_test
%license COPYING
%doc AUTHORS
%files -n %{name}-libs-devel
%defattr(-,root,root)
%exclude %{_datadir}/pacemaker/tests/cts
%{_datadir}/pacemaker/tests
%{_includedir}/pacemaker
%{_libdir}/*.so
%if %{with coverage}
%{_var}/lib/pacemaker/gcov
%endif
%{_libdir}/pkgconfig/*.pc
%license COPYING.LIB
%doc AUTHORS
%changelog
diff --git a/rpmlintrc b/rpmlintrc
index eec11e4ed7..e762c6d498 100644
--- a/rpmlintrc
+++ b/rpmlintrc
@@ -1,35 +1,38 @@
addFilter("E: non-standard-dir-perm .* 0750")
addFilter("W: shared-lib-calls-exit")
# Really, really do not care
addFilter("spelling-error")
addFilter("mixed-use-of-spaces-and-tabs")
# Mandated location for OCF directory
addFilter("W: only-non-binary-in-usr-lib")
addFilter("E: hardcoded-library-path in /usr/lib/ocf")
# Its there but rpmlint can't find it for some reason
addFilter("no-status-entry /etc/rc.d/init.d/")
# We don't want the cluster to start by default
addFilter("no-chkconfig-line /etc/rc.d/init.d/")
# Not interested in lock files
addFilter("subsys-not-used /etc/rc.d/init.d/")
# When building developer packages
addFilter("W: invalid-url Source0:")
addFilter("W: unstripped-binary-or-object")
addFilter("W: hidden-file-or-dir /var/lib/pacemaker/gcov")
# Build artifacts
addFilter("E: changelog-time-in-future")
addFilter("pacemaker.src: W: strange-permission .* 0600")
addFilter("enchant-dictionary-not-found en_US")
# Isolation agents are hidden so they don't show up in agent list
addFilter("W: hidden-file-or-dir /usr/lib/ocf/resource.d/.isolation")
# rpmlint doesn't like logrotate script being in pacemaker-cli package
addFilter("E: incoherent-logrotate-file /etc/logrotate.d/pacemaker")
+
+# buildbot builds the not-yet-released version
+addFilter("W: incoherent-version-in-changelog")