Page MenuHomeClusterLabs Projects

No OneTemporary

This file is larger than 256 KB, so syntax highlighting was skipped.
diff --git a/.gitignore b/.gitignore
index a1bbe1eab8..f58698b6f0 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,243 +1,245 @@
# Common
\#*
.\#*
GPATH
GRTAGS
GTAGS
TAGS
Makefile
Makefile.in
.deps
.dirstamp
.libs
*.pc
*.pyc
*.bz2
*.tar.gz
*.tgz
*.la
*.lo
*.o
*~
*.gcda
*.gcno
# Autobuild
aclocal.m4
autoconf
autoheader
autom4te.cache/
automake
build.counter
compile
/confdefs.h
config.guess
config.log
config.status
config.sub
configure
/conftest*
depcomp
install-sh
include/stamp-*
libtool
libtool.m4
ltdl.m4
libltdl
ltmain.sh
missing
py-compile
/m4/argz.m4
/m4/ltargz.m4
/m4/ltoptions.m4
/m4/ltsugar.m4
/m4/ltversion.m4
/m4/lt~obsolete.m4
test-driver
ylwrap
# Configure targets
/cts/CTS.py
/cts/CTSlab.py
/cts/CTSvars.py
/cts/LSBDummy
/cts/OCFIPraTest.py
/cts/benchmark/clubench
/cts/cluster_test
/cts/cts
/cts/cts-cli
/cts/cts-coverage
/cts/cts-exec
/cts/cts-fencing
/cts/cts-log-watcher
/cts/cts-regression
/cts/cts-scheduler
/cts/cts-support
/cts/fence_dummy
/cts/lxc_autogen.sh
/cts/pacemaker-cts-dummyd
/cts/pacemaker-cts-dummyd@.service
/daemons/execd/pacemaker_remote
/daemons/execd/pacemaker_remote.service
/daemons/fenced/fence_legacy
/daemons/pacemakerd/pacemaker
/daemons/pacemakerd/pacemaker.combined.upstart
/daemons/pacemakerd/pacemaker.service
/daemons/pacemakerd/pacemaker.upstart
/doc/Doxyfile
/extra/logrotate/pacemaker
/extra/resources/ClusterMon
/extra/resources/HealthSMART
/extra/resources/SysInfo
/extra/resources/ifspeed
/extra/resources/o2cb
include/config.h
include/config.h.in
include/crm_config.h
publican.cfg
/tools/cibsecret
/tools/crm_error
/tools/crm_failcount
/tools/crm_master
/tools/crm_mon.service
/tools/crm_mon.upstart
/tools/crm_report
/tools/crm_rule
/tools/crm_standby
/tools/pcmk_simtimes
/tools/report.collector
/tools/report.common
# Build targets
*.7
*.7.xml
*.7.html
*.8
*.8.xml
*.8.html
/daemons/attrd/pacemaker-attrd
/daemons/based/pacemaker-based
/daemons/based/cibmon
/daemons/controld/pacemaker-controld
/daemons/execd/cts-exec-helper
/daemons/execd/pacemaker-execd
/daemons/execd/pacemaker-remoted
/daemons/fenced/cts-fence-helper
/daemons/fenced/pacemaker-fenced
/daemons/fenced/pacemaker-fenced.xml
/daemons/pacemakerd/pacemakerd
/daemons/schedulerd/pacemaker-schedulerd
/daemons/schedulerd/pacemaker-schedulerd.xml
/doc/*/tmp/**
/doc/*/publish
/doc/*.build
/doc/*/en-US/Ap-*.xml
/doc/*/en-US/Ch-*.xml
/doc/.ABI-build
/doc/HTML
/doc/abi_dumps
/doc/abi-check
/doc/api/*
/doc/compat_reports
/doc/crm_fencing.html
/doc/publican-catalog*
/doc/shared/en-US/*.xml
/doc/shared/en-US/images/pcmk-*.png
/doc/shared/en-US/images/Policy-Engine-*.png
/doc/sphinx/*/_build
/doc/sphinx/*/conf.py
/lib/common/md5.c
/maint/testcc_helper.cc
/maint/testcc_*_h
/maint/mocked/based
scratch
/tools/attrd_updater
/tools/cibadmin
/tools/crmadmin
/tools/crm_attribute
/tools/crm_diff
/tools/crm_mon
/tools/crm_node
/tools/crm_resource
/tools/crm_shadow
/tools/crm_simulate
/tools/crm_ticket
/tools/crm_verify
/tools/iso8601
/tools/stonith_admin
xml/crm.dtd
xml/pacemaker*.rng
xml/versions.rng
xml/api/api-result*.rng
lib/gnu/libgnu.a
lib/gnu/stdalign.h
*.coverity
# Packager artifacts
*.rpm
/mock
/pacemaker.spec
/rpm/[A-Z]*
# make dist/export working directory
pacemaker-[a-f0-9][a-f0-9][a-f0-9][a-f0-9][a-f0-9][a-f0-9][a-f0-9]
# Test detritus
/cts/.regression.failed.diff
/cts/scheduler/*.ref
/cts/scheduler/*.up
/cts/scheduler/*.up.err
/cts/scheduler/bug-rh-1097457.log
/cts/scheduler/bug-rh-1097457.trs
/cts/scheduler/shadow.*
/cts/test-suite.log
/xml/test-*/*.up
/xml/test-*/*.up.err
/xml/assets/*.rng
/xml/assets/diffview.js
/xml/assets/xmlcatalog
# Test results
*.log
*.trs
+/lib/common/tests/strings/pcmk__scan_double
/lib/common/tests/strings/pcmk__parse_ll_range
/lib/common/tests/strings/pcmk__str_any_of
/lib/common/tests/strings/pcmk__strcmp
+/lib/common/tests/strings/pcmk__char_in_any_str
/lib/common/tests/utils/pcmk_str_is_infinity
/lib/common/tests/utils/pcmk_str_is_minus_infinity
/lib/pengine/tests/rules/pe_cron_range_satisfied
# Release maintenance detritus
/maint/gnulib
# Formerly built files (helps when jumping back and forth in checkout)
/.ABI-build
/Doxyfile
/HTML
/abi_dumps
/abi-check
/compat_reports
/attrd
/cib
/coverage.sh
/crmd
/cts/HBDummy
/doc/Clusters_from_Scratch.txt
/doc/Pacemaker_Explained.txt
/doc/acls.html
/fencing
/lrmd
/mcp
/pacemaker-*.spec
/pengine
#Other
coverity-*
logs
*.patch
*.diff
*.sed
*.orig
*.rej
*.swp
diff --git a/cts/cli/regression.tools.exp b/cts/cli/regression.tools.exp
index 9a26c2c707..6f3ea781a8 100644
--- a/cts/cli/regression.tools.exp
+++ b/cts/cli/regression.tools.exp
@@ -1,3473 +1,3473 @@
Created new pacemaker configuration
Setting up shadow instance
A new shadow instance was created. To begin using it paste the following into your shell:
CIB_shadow=cts-cli ; export CIB_shadow
=#=#=#= Begin test: Validate CIB =#=#=#=
<cib epoch="0" num_updates="0" admin_epoch="0">
<configuration>
<crm_config/>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= Current cib after: Validate CIB =#=#=#=
<cib epoch="0" num_updates="0" admin_epoch="0">
<configuration>
<crm_config/>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Validate CIB - OK (0) =#=#=#=
* Passed: cibadmin - Validate CIB
=#=#=#= Begin test: Configure something before erasing =#=#=#=
=#=#=#= Current cib after: Configure something before erasing =#=#=#=
<cib epoch="1" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Configure something before erasing - OK (0) =#=#=#=
* Passed: crm_attribute - Configure something before erasing
=#=#=#= Begin test: Require --force for CIB erasure =#=#=#=
The supplied command is considered dangerous. To prevent accidental destruction of the cluster, the --force flag is required in order to proceed.
=#=#=#= Current cib after: Require --force for CIB erasure =#=#=#=
<cib epoch="1" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Require --force for CIB erasure - Operation not safe (107) =#=#=#=
* Passed: cibadmin - Require --force for CIB erasure
=#=#=#= Begin test: Allow CIB erasure with --force =#=#=#=
=#=#=#= Current cib after: Allow CIB erasure with --force =#=#=#=
<cib epoch="2" num_updates="0" admin_epoch="1">
<configuration>
<crm_config/>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Allow CIB erasure with --force - OK (0) =#=#=#=
* Passed: cibadmin - Allow CIB erasure with --force
=#=#=#= Begin test: Query CIB =#=#=#=
=#=#=#= Current cib after: Query CIB =#=#=#=
<cib epoch="2" num_updates="0" admin_epoch="1">
<configuration>
<crm_config/>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Query CIB - OK (0) =#=#=#=
* Passed: cibadmin - Query CIB
=#=#=#= Begin test: Set cluster option =#=#=#=
=#=#=#= Current cib after: Set cluster option =#=#=#=
<cib epoch="3" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Set cluster option - OK (0) =#=#=#=
* Passed: crm_attribute - Set cluster option
=#=#=#= Begin test: Query new cluster option =#=#=#=
<nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
=#=#=#= Current cib after: Query new cluster option =#=#=#=
<cib epoch="3" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Query new cluster option - OK (0) =#=#=#=
* Passed: cibadmin - Query new cluster option
=#=#=#= Begin test: Query cluster options =#=#=#=
=#=#=#= Current cib after: Query cluster options =#=#=#=
<cib epoch="3" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Query cluster options - OK (0) =#=#=#=
* Passed: cibadmin - Query cluster options
=#=#=#= Begin test: Set no-quorum policy =#=#=#=
=#=#=#= Current cib after: Set no-quorum policy =#=#=#=
<cib epoch="4" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Set no-quorum policy - OK (0) =#=#=#=
* Passed: crm_attribute - Set no-quorum policy
=#=#=#= Begin test: Delete nvpair =#=#=#=
=#=#=#= Current cib after: Delete nvpair =#=#=#=
<cib epoch="5" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Delete nvpair - OK (0) =#=#=#=
* Passed: cibadmin - Delete nvpair
=#=#=#= Begin test: Create operation should fail =#=#=#=
Call failed: File exists
<failed>
<failed_update id="cib-bootstrap-options" object_type="cluster_property_set" operation="cib_create" reason="File exists">
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
</cluster_property_set>
</failed_update>
</failed>
=#=#=#= Current cib after: Create operation should fail =#=#=#=
<cib epoch="5" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Create operation should fail - Requested item already exists (108) =#=#=#=
* Passed: cibadmin - Create operation should fail
=#=#=#= Begin test: Modify cluster options section =#=#=#=
=#=#=#= Current cib after: Modify cluster options section =#=#=#=
<cib epoch="6" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Modify cluster options section - OK (0) =#=#=#=
* Passed: cibadmin - Modify cluster options section
=#=#=#= Begin test: Query updated cluster option =#=#=#=
<nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
=#=#=#= Current cib after: Query updated cluster option =#=#=#=
<cib epoch="6" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Query updated cluster option - OK (0) =#=#=#=
* Passed: cibadmin - Query updated cluster option
=#=#=#= Begin test: Set duplicate cluster option =#=#=#=
=#=#=#= Current cib after: Set duplicate cluster option =#=#=#=
<cib epoch="7" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="40s"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Set duplicate cluster option - OK (0) =#=#=#=
* Passed: crm_attribute - Set duplicate cluster option
=#=#=#= Begin test: Setting multiply defined cluster option should fail =#=#=#=
Multiple attributes match name=cluster-delay
Value: 60s (id=cib-bootstrap-options-cluster-delay)
Value: 40s (id=duplicate-cluster-delay)
Please choose from one of the matches above and supply the 'id' with --attr-id
=#=#=#= Current cib after: Setting multiply defined cluster option should fail =#=#=#=
<cib epoch="7" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="40s"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Setting multiply defined cluster option should fail - Multiple items match request (109) =#=#=#=
* Passed: crm_attribute - Setting multiply defined cluster option should fail
=#=#=#= Begin test: Set cluster option with -s =#=#=#=
=#=#=#= Current cib after: Set cluster option with -s =#=#=#=
<cib epoch="8" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Set cluster option with -s - OK (0) =#=#=#=
* Passed: crm_attribute - Set cluster option with -s
=#=#=#= Begin test: Delete cluster option with -i =#=#=#=
Deleted crm_config option: id=(null) name=cluster-delay
=#=#=#= Current cib after: Delete cluster option with -i =#=#=#=
<cib epoch="9" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Delete cluster option with -i - OK (0) =#=#=#=
* Passed: crm_attribute - Delete cluster option with -i
=#=#=#= Begin test: Create node1 and bring it online =#=#=#=
Current cluster status:
Performing requested modifications
+ Bringing node node1 online
Transition Summary:
Executing cluster transition:
Revised cluster status:
Online: [ node1 ]
=#=#=#= Current cib after: Create node1 and bring it online =#=#=#=
<cib epoch="10" num_updates="2" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1"/>
</nodes>
<resources/>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate"/>
</status>
</cib>
=#=#=#= End test: Create node1 and bring it online - OK (0) =#=#=#=
* Passed: crm_simulate - Create node1 and bring it online
=#=#=#= Begin test: Create node attribute =#=#=#=
=#=#=#= Current cib after: Create node attribute =#=#=#=
<cib epoch="11" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources/>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate"/>
</status>
</cib>
=#=#=#= End test: Create node attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Create node attribute
=#=#=#= Begin test: Query new node attribute =#=#=#=
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
=#=#=#= Current cib after: Query new node attribute =#=#=#=
<cib epoch="11" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources/>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate"/>
</status>
</cib>
=#=#=#= End test: Query new node attribute - OK (0) =#=#=#=
* Passed: cibadmin - Query new node attribute
=#=#=#= Begin test: Set a transient (fail-count) node attribute =#=#=#=
=#=#=#= Current cib after: Set a transient (fail-count) node attribute =#=#=#=
<cib epoch="11" num_updates="1" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources/>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1">
<nvpair id="status-node1-fail-count-foo" name="fail-count-foo" value="3"/>
</instance_attributes>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Set a transient (fail-count) node attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Set a transient (fail-count) node attribute
=#=#=#= Begin test: Query a fail count =#=#=#=
scope=status name=fail-count-foo value=3
=#=#=#= Current cib after: Query a fail count =#=#=#=
<cib epoch="11" num_updates="1" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources/>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1">
<nvpair id="status-node1-fail-count-foo" name="fail-count-foo" value="3"/>
</instance_attributes>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Query a fail count - OK (0) =#=#=#=
* Passed: crm_failcount - Query a fail count
=#=#=#= Begin test: Delete a transient (fail-count) node attribute =#=#=#=
Deleted status attribute: id=status-node1-fail-count-foo name=fail-count-foo
=#=#=#= Current cib after: Delete a transient (fail-count) node attribute =#=#=#=
<cib epoch="11" num_updates="2" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources/>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Delete a transient (fail-count) node attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Delete a transient (fail-count) node attribute
=#=#=#= Begin test: Digest calculation =#=#=#=
Digest: =#=#=#= Current cib after: Digest calculation =#=#=#=
<cib epoch="11" num_updates="2" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources/>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Digest calculation - OK (0) =#=#=#=
* Passed: cibadmin - Digest calculation
=#=#=#= Begin test: Replace operation should fail =#=#=#=
Call failed: Update was older than existing configuration
=#=#=#= Current cib after: Replace operation should fail =#=#=#=
<cib epoch="11" num_updates="2" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources/>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Replace operation should fail - Update was older than existing configuration (103) =#=#=#=
* Passed: cibadmin - Replace operation should fail
=#=#=#= Begin test: Default standby value =#=#=#=
scope=status name=standby value=off
=#=#=#= Current cib after: Default standby value =#=#=#=
<cib epoch="11" num_updates="2" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources/>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Default standby value - OK (0) =#=#=#=
* Passed: crm_standby - Default standby value
=#=#=#= Begin test: Set standby status =#=#=#=
=#=#=#= Current cib after: Set standby status =#=#=#=
<cib epoch="12" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
<nvpair id="nodes-node1-standby" name="standby" value="true"/>
</instance_attributes>
</node>
</nodes>
<resources/>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Set standby status - OK (0) =#=#=#=
* Passed: crm_standby - Set standby status
=#=#=#= Begin test: Query standby value =#=#=#=
scope=nodes name=standby value=true
=#=#=#= Current cib after: Query standby value =#=#=#=
<cib epoch="12" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
<nvpair id="nodes-node1-standby" name="standby" value="true"/>
</instance_attributes>
</node>
</nodes>
<resources/>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Query standby value - OK (0) =#=#=#=
* Passed: crm_standby - Query standby value
=#=#=#= Begin test: Delete standby value =#=#=#=
Deleted nodes attribute: id=nodes-node1-standby name=standby
=#=#=#= Current cib after: Delete standby value =#=#=#=
<cib epoch="13" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources/>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Delete standby value - OK (0) =#=#=#=
* Passed: crm_standby - Delete standby value
=#=#=#= Begin test: Create a resource =#=#=#=
=#=#=#= Current cib after: Create a resource =#=#=#=
<cib epoch="14" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Create a resource - OK (0) =#=#=#=
* Passed: cibadmin - Create a resource
=#=#=#= Begin test: Create a resource meta attribute =#=#=#=
Set 'dummy' option: id=dummy-meta_attributes-is-managed set=dummy-meta_attributes name=is-managed value=false
=#=#=#= Current cib after: Create a resource meta attribute =#=#=#=
<cib epoch="15" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes">
<nvpair id="dummy-meta_attributes-is-managed" name="is-managed" value="false"/>
</meta_attributes>
</primitive>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Create a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource meta attribute
=#=#=#= Begin test: Query a resource meta attribute =#=#=#=
false
=#=#=#= Current cib after: Query a resource meta attribute =#=#=#=
<cib epoch="15" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes">
<nvpair id="dummy-meta_attributes-is-managed" name="is-managed" value="false"/>
</meta_attributes>
</primitive>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Query a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Query a resource meta attribute
=#=#=#= Begin test: Remove a resource meta attribute =#=#=#=
Deleted 'dummy' option: id=dummy-meta_attributes-is-managed name=is-managed
=#=#=#= Current cib after: Remove a resource meta attribute =#=#=#=
<cib epoch="16" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
</primitive>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Remove a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Remove a resource meta attribute
=#=#=#= Begin test: Create another resource meta attribute =#=#=#=
Set 'dummy' option: id=dummy-meta_attributes-target-role set=dummy-meta_attributes name=target-role value=Stopped
=#=#=#= End test: Create another resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Create another resource meta attribute
=#=#=#= Begin test: Show why a resource is not running =#=#=#=
Resource dummy is not running
* Configuration specifies 'dummy' should remain stopped
=#=#=#= End test: Show why a resource is not running - OK (0) =#=#=#=
* Passed: crm_resource - Show why a resource is not running
=#=#=#= Begin test: Remove another resource meta attribute =#=#=#=
Deleted 'dummy' option: id=dummy-meta_attributes-target-role name=target-role
=#=#=#= End test: Remove another resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Remove another resource meta attribute
=#=#=#= Begin test: Create a resource attribute =#=#=#=
Set 'dummy' option: id=dummy-instance_attributes-delay set=dummy-instance_attributes name=delay value=10s
=#=#=#= Current cib after: Create a resource attribute =#=#=#=
<cib epoch="19" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Create a resource attribute - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource attribute
=#=#=#= Begin test: List the configured resources =#=#=#=
dummy (ocf::pacemaker:Dummy): Stopped
=#=#=#= Current cib after: List the configured resources =#=#=#=
<cib epoch="19" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: List the configured resources - OK (0) =#=#=#=
* Passed: crm_resource - List the configured resources
=#=#=#= Begin test: List IDs of instantiated resources =#=#=#=
dummy
=#=#=#= End test: List IDs of instantiated resources - OK (0) =#=#=#=
* Passed: crm_resource - List IDs of instantiated resources
=#=#=#= Begin test: Show XML configuration of resource =#=#=#=
dummy (ocf::pacemaker:Dummy): Stopped
xml:
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
=#=#=#= End test: Show XML configuration of resource - OK (0) =#=#=#=
* Passed: crm_resource - Show XML configuration of resource
=#=#=#= Begin test: Require a destination when migrating a resource that is stopped =#=#=#=
Resource 'dummy' not moved: active in 0 locations.
To prevent 'dummy' from running on a specific location, specify a node.
Error performing operation: Invalid argument
=#=#=#= Current cib after: Require a destination when migrating a resource that is stopped =#=#=#=
<cib epoch="19" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Require a destination when migrating a resource that is stopped - Incorrect usage (64) =#=#=#=
* Passed: crm_resource - Require a destination when migrating a resource that is stopped
=#=#=#= Begin test: Don't support migration to non-existent locations =#=#=#=
Error performing operation: Node not found
=#=#=#= Current cib after: Don't support migration to non-existent locations =#=#=#=
<cib epoch="19" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Don't support migration to non-existent locations - No such object (105) =#=#=#=
* Passed: crm_resource - Don't support migration to non-existent locations
=#=#=#= Begin test: Create a fencing resource =#=#=#=
=#=#=#= Current cib after: Create a fencing resource =#=#=#=
<cib epoch="20" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Create a fencing resource - OK (0) =#=#=#=
* Passed: cibadmin - Create a fencing resource
=#=#=#= Begin test: Bring resources online =#=#=#=
Current cluster status:
Online: [ node1 ]
- dummy (ocf::pacemaker:Dummy): Stopped
- Fence (stonith:fence_true): Stopped
+ dummy (ocf::pacemaker:Dummy): Stopped
+ Fence (stonith:fence_true): Stopped
Transition Summary:
* Start dummy ( node1 )
* Start Fence ( node1 )
Executing cluster transition:
* Resource action: dummy monitor on node1
* Resource action: Fence monitor on node1
* Resource action: dummy start on node1
* Resource action: Fence start on node1
Revised cluster status:
Online: [ node1 ]
- dummy (ocf::pacemaker:Dummy): Started node1
- Fence (stonith:fence_true): Started node1
+ dummy (ocf::pacemaker:Dummy): Started node1
+ Fence (stonith:fence_true): Started node1
=#=#=#= Current cib after: Bring resources online =#=#=#=
<cib epoch="20" num_updates="4" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
</status>
</cib>
=#=#=#= End test: Bring resources online - OK (0) =#=#=#=
* Passed: crm_simulate - Bring resources online
=#=#=#= Begin test: Try to move a resource to its existing location =#=#=#=
Error performing operation: Already in requested state
=#=#=#= Current cib after: Try to move a resource to its existing location =#=#=#=
<cib epoch="20" num_updates="4" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
</status>
</cib>
=#=#=#= End test: Try to move a resource to its existing location - Requested item already exists (108) =#=#=#=
* Passed: crm_resource - Try to move a resource to its existing location
=#=#=#= Begin test: Move a resource from its existing location =#=#=#=
WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1.
This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool
This will be the case even if node1 is the last node in the cluster
=#=#=#= Current cib after: Move a resource from its existing location =#=#=#=
<cib epoch="21" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints>
<rsc_location id="cli-ban-dummy-on-node1" rsc="dummy" role="Started" node="node1" score="-INFINITY"/>
</constraints>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
</status>
</cib>
=#=#=#= End test: Move a resource from its existing location - OK (0) =#=#=#=
* Passed: crm_resource - Move a resource from its existing location
=#=#=#= Begin test: Clear out constraints generated by --move =#=#=#=
Removing constraint: cli-ban-dummy-on-node1
=#=#=#= Current cib after: Clear out constraints generated by --move =#=#=#=
<cib epoch="22" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
</status>
</cib>
=#=#=#= End test: Clear out constraints generated by --move - OK (0) =#=#=#=
* Passed: crm_resource - Clear out constraints generated by --move
=#=#=#= Begin test: Default ticket granted state =#=#=#=
false
=#=#=#= Current cib after: Default ticket granted state =#=#=#=
<cib epoch="22" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
</status>
</cib>
=#=#=#= End test: Default ticket granted state - OK (0) =#=#=#=
* Passed: crm_ticket - Default ticket granted state
=#=#=#= Begin test: Set ticket granted state =#=#=#=
=#=#=#= Current cib after: Set ticket granted state =#=#=#=
<cib epoch="22" num_updates="1" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<tickets>
<ticket_state id="ticketA" granted="false"/>
</tickets>
</status>
</cib>
=#=#=#= End test: Set ticket granted state - OK (0) =#=#=#=
* Passed: crm_ticket - Set ticket granted state
=#=#=#= Begin test: Query ticket granted state =#=#=#=
false
=#=#=#= Current cib after: Query ticket granted state =#=#=#=
<cib epoch="22" num_updates="1" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<tickets>
<ticket_state id="ticketA" granted="false"/>
</tickets>
</status>
</cib>
=#=#=#= End test: Query ticket granted state - OK (0) =#=#=#=
* Passed: crm_ticket - Query ticket granted state
=#=#=#= Begin test: Delete ticket granted state =#=#=#=
=#=#=#= Current cib after: Delete ticket granted state =#=#=#=
<cib epoch="22" num_updates="2" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<tickets>
<ticket_state id="ticketA"/>
</tickets>
</status>
</cib>
=#=#=#= End test: Delete ticket granted state - OK (0) =#=#=#=
* Passed: crm_ticket - Delete ticket granted state
=#=#=#= Begin test: Make a ticket standby =#=#=#=
=#=#=#= Current cib after: Make a ticket standby =#=#=#=
<cib epoch="22" num_updates="3" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<tickets>
<ticket_state id="ticketA" standby="true"/>
</tickets>
</status>
</cib>
=#=#=#= End test: Make a ticket standby - OK (0) =#=#=#=
* Passed: crm_ticket - Make a ticket standby
=#=#=#= Begin test: Query ticket standby state =#=#=#=
true
=#=#=#= Current cib after: Query ticket standby state =#=#=#=
<cib epoch="22" num_updates="3" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<tickets>
<ticket_state id="ticketA" standby="true"/>
</tickets>
</status>
</cib>
=#=#=#= End test: Query ticket standby state - OK (0) =#=#=#=
* Passed: crm_ticket - Query ticket standby state
=#=#=#= Begin test: Activate a ticket =#=#=#=
=#=#=#= Current cib after: Activate a ticket =#=#=#=
<cib epoch="22" num_updates="4" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<tickets>
<ticket_state id="ticketA" standby="false"/>
</tickets>
</status>
</cib>
=#=#=#= End test: Activate a ticket - OK (0) =#=#=#=
* Passed: crm_ticket - Activate a ticket
=#=#=#= Begin test: Delete ticket standby state =#=#=#=
=#=#=#= Current cib after: Delete ticket standby state =#=#=#=
<cib epoch="22" num_updates="5" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<tickets>
<ticket_state id="ticketA"/>
</tickets>
</status>
</cib>
=#=#=#= End test: Delete ticket standby state - OK (0) =#=#=#=
* Passed: crm_ticket - Delete ticket standby state
=#=#=#= Begin test: Ban a resource on unknown node =#=#=#=
Error performing operation: Node not found
=#=#=#= Current cib after: Ban a resource on unknown node =#=#=#=
<cib epoch="22" num_updates="5" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<tickets>
<ticket_state id="ticketA"/>
</tickets>
</status>
</cib>
=#=#=#= End test: Ban a resource on unknown node - No such object (105) =#=#=#=
* Passed: crm_resource - Ban a resource on unknown node
=#=#=#= Begin test: Create two more nodes and bring them online =#=#=#=
Current cluster status:
Online: [ node1 ]
- dummy (ocf::pacemaker:Dummy): Started node1
- Fence (stonith:fence_true): Started node1
+ dummy (ocf::pacemaker:Dummy): Started node1
+ Fence (stonith:fence_true): Started node1
Performing requested modifications
+ Bringing node node2 online
+ Bringing node node3 online
Transition Summary:
* Move Fence ( node1 -> node2 )
Executing cluster transition:
* Resource action: dummy monitor on node3
* Resource action: dummy monitor on node2
* Resource action: Fence stop on node1
* Resource action: Fence monitor on node3
* Resource action: Fence monitor on node2
* Resource action: Fence start on node2
Revised cluster status:
Online: [ node1 node2 node3 ]
- dummy (ocf::pacemaker:Dummy): Started node1
- Fence (stonith:fence_true): Started node2
+ dummy (ocf::pacemaker:Dummy): Started node1
+ Fence (stonith:fence_true): Started node2
=#=#=#= Current cib after: Create two more nodes and bring them online =#=#=#=
<cib epoch="24" num_updates="8" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<tickets>
<ticket_state id="ticketA"/>
</tickets>
<node_state id="node2" uname="node2" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<lrm id="node2">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<node_state id="node3" uname="node3" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<lrm id="node3">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
</status>
</cib>
=#=#=#= End test: Create two more nodes and bring them online - OK (0) =#=#=#=
* Passed: crm_simulate - Create two more nodes and bring them online
=#=#=#= Begin test: Ban dummy from node1 =#=#=#=
WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1.
This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool
This will be the case even if node1 is the last node in the cluster
=#=#=#= Current cib after: Ban dummy from node1 =#=#=#=
<cib epoch="25" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints>
<rsc_location id="cli-ban-dummy-on-node1" rsc="dummy" role="Started" node="node1" score="-INFINITY"/>
</constraints>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<tickets>
<ticket_state id="ticketA"/>
</tickets>
<node_state id="node2" uname="node2" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<lrm id="node2">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<node_state id="node3" uname="node3" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<lrm id="node3">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
</status>
</cib>
=#=#=#= End test: Ban dummy from node1 - OK (0) =#=#=#=
* Passed: crm_resource - Ban dummy from node1
=#=#=#= Begin test: Show where a resource is running =#=#=#=
resource dummy is running on: node1
=#=#=#= End test: Show where a resource is running - OK (0) =#=#=#=
* Passed: crm_resource - Show where a resource is running
=#=#=#= Begin test: Show constraints on a resource =#=#=#=
* dummy
: Node node1 (score=-INFINITY, id=cli-ban-dummy-on-node1)
=#=#=#= End test: Show constraints on a resource - OK (0) =#=#=#=
* Passed: crm_resource - Show constraints on a resource
=#=#=#= Begin test: Ban dummy from node2 =#=#=#=
WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node2' with a score of -INFINITY for resource dummy on node2.
This will prevent dummy from running on node2 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool
This will be the case even if node2 is the last node in the cluster
=#=#=#= Current cib after: Ban dummy from node2 =#=#=#=
<cib epoch="26" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints>
<rsc_location id="cli-ban-dummy-on-node1" rsc="dummy" role="Started" node="node1" score="-INFINITY"/>
<rsc_location id="cli-ban-dummy-on-node2" rsc="dummy" role="Started" node="node2" score="-INFINITY"/>
</constraints>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<tickets>
<ticket_state id="ticketA"/>
</tickets>
<node_state id="node2" uname="node2" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<lrm id="node2">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<node_state id="node3" uname="node3" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<lrm id="node3">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
</status>
</cib>
=#=#=#= End test: Ban dummy from node2 - OK (0) =#=#=#=
* Passed: crm_resource - Ban dummy from node2
=#=#=#= Begin test: Relocate resources due to ban =#=#=#=
Current cluster status:
Online: [ node1 node2 node3 ]
- dummy (ocf::pacemaker:Dummy): Started node1
- Fence (stonith:fence_true): Started node2
+ dummy (ocf::pacemaker:Dummy): Started node1
+ Fence (stonith:fence_true): Started node2
Transition Summary:
* Move dummy ( node1 -> node3 )
Executing cluster transition:
* Resource action: dummy stop on node1
* Resource action: dummy start on node3
Revised cluster status:
Online: [ node1 node2 node3 ]
- dummy (ocf::pacemaker:Dummy): Started node3
- Fence (stonith:fence_true): Started node2
+ dummy (ocf::pacemaker:Dummy): Started node3
+ Fence (stonith:fence_true): Started node2
=#=#=#= Current cib after: Relocate resources due to ban =#=#=#=
<cib epoch="26" num_updates="2" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints>
<rsc_location id="cli-ban-dummy-on-node1" rsc="dummy" role="Started" node="node1" score="-INFINITY"/>
<rsc_location id="cli-ban-dummy-on-node2" rsc="dummy" role="Started" node="node2" score="-INFINITY"/>
</constraints>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<tickets>
<ticket_state id="ticketA"/>
</tickets>
<node_state id="node2" uname="node2" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<lrm id="node2">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<node_state id="node3" uname="node3" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<lrm id="node3">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
</status>
</cib>
=#=#=#= End test: Relocate resources due to ban - OK (0) =#=#=#=
* Passed: crm_simulate - Relocate resources due to ban
=#=#=#= Begin test: Move dummy to node1 =#=#=#=
=#=#=#= Current cib after: Move dummy to node1 =#=#=#=
<cib epoch="28" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints>
<rsc_location id="cli-ban-dummy-on-node2" rsc="dummy" role="Started" node="node2" score="-INFINITY"/>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<tickets>
<ticket_state id="ticketA"/>
</tickets>
<node_state id="node2" uname="node2" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<lrm id="node2">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<node_state id="node3" uname="node3" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<lrm id="node3">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
</status>
</cib>
=#=#=#= End test: Move dummy to node1 - OK (0) =#=#=#=
* Passed: crm_resource - Move dummy to node1
=#=#=#= Begin test: Clear implicit constraints for dummy on node2 =#=#=#=
Removing constraint: cli-ban-dummy-on-node2
=#=#=#= Current cib after: Clear implicit constraints for dummy on node2 =#=#=#=
<cib epoch="29" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<tickets>
<ticket_state id="ticketA"/>
</tickets>
<node_state id="node2" uname="node2" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<lrm id="node2">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<node_state id="node3" uname="node3" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<lrm id="node3">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
</status>
</cib>
=#=#=#= End test: Clear implicit constraints for dummy on node2 - OK (0) =#=#=#=
* Passed: crm_resource - Clear implicit constraints for dummy on node2
=#=#=#= Begin test: Drop the status section =#=#=#=
=#=#=#= End test: Drop the status section - OK (0) =#=#=#=
* Passed: cibadmin - Drop the status section
=#=#=#= Begin test: Create a clone =#=#=#=
=#=#=#= End test: Create a clone - OK (0) =#=#=#=
* Passed: cibadmin - Create a clone
=#=#=#= Begin test: Create a resource meta attribute =#=#=#=
Performing update of 'is-managed' on 'test-clone', the parent of 'test-primitive'
Set 'test-clone' option: id=test-clone-meta_attributes-is-managed set=test-clone-meta_attributes name=is-managed value=false
=#=#=#= Current cib after: Create a resource meta attribute =#=#=#=
<cib epoch="31" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy"/>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="false"/>
</meta_attributes>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Create a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource meta attribute
=#=#=#= Begin test: Create a resource meta attribute in the primitive =#=#=#=
Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed set=test-primitive-meta_attributes name=is-managed value=false
=#=#=#= Current cib after: Create a resource meta attribute in the primitive =#=#=#=
<cib epoch="32" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes">
<nvpair id="test-primitive-meta_attributes-is-managed" name="is-managed" value="false"/>
</meta_attributes>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="false"/>
</meta_attributes>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Create a resource meta attribute in the primitive - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource meta attribute in the primitive
=#=#=#= Begin test: Update resource meta attribute with duplicates =#=#=#=
Multiple attributes match name=is-managed
Value: false (id=test-primitive-meta_attributes-is-managed)
Value: false (id=test-clone-meta_attributes-is-managed)
A value for 'is-managed' already exists in child 'test-primitive', performing update on that instead of 'test-clone'
Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed value=true
=#=#=#= Current cib after: Update resource meta attribute with duplicates =#=#=#=
<cib epoch="33" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes">
<nvpair id="test-primitive-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="false"/>
</meta_attributes>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Update resource meta attribute with duplicates - OK (0) =#=#=#=
* Passed: crm_resource - Update resource meta attribute with duplicates
=#=#=#= Begin test: Update resource meta attribute with duplicates (force clone) =#=#=#=
Set 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed value=true
=#=#=#= Current cib after: Update resource meta attribute with duplicates (force clone) =#=#=#=
<cib epoch="34" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes">
<nvpair id="test-primitive-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Update resource meta attribute with duplicates (force clone) - OK (0) =#=#=#=
* Passed: crm_resource - Update resource meta attribute with duplicates (force clone)
=#=#=#= Begin test: Update child resource meta attribute with duplicates =#=#=#=
Multiple attributes match name=is-managed
Value: true (id=test-primitive-meta_attributes-is-managed)
Value: true (id=test-clone-meta_attributes-is-managed)
Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed value=false
=#=#=#= Current cib after: Update child resource meta attribute with duplicates =#=#=#=
<cib epoch="35" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes">
<nvpair id="test-primitive-meta_attributes-is-managed" name="is-managed" value="false"/>
</meta_attributes>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Update child resource meta attribute with duplicates - OK (0) =#=#=#=
* Passed: crm_resource - Update child resource meta attribute with duplicates
=#=#=#= Begin test: Delete resource meta attribute with duplicates =#=#=#=
Multiple attributes match name=is-managed
Value: false (id=test-primitive-meta_attributes-is-managed)
Value: true (id=test-clone-meta_attributes-is-managed)
A value for 'is-managed' already exists in child 'test-primitive', performing delete on that instead of 'test-clone'
Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed
=#=#=#= Current cib after: Delete resource meta attribute with duplicates =#=#=#=
<cib epoch="36" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes"/>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Delete resource meta attribute with duplicates - OK (0) =#=#=#=
* Passed: crm_resource - Delete resource meta attribute with duplicates
=#=#=#= Begin test: Delete resource meta attribute in parent =#=#=#=
Performing delete of 'is-managed' on 'test-clone', the parent of 'test-primitive'
Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed
=#=#=#= Current cib after: Delete resource meta attribute in parent =#=#=#=
<cib epoch="37" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes"/>
</primitive>
<meta_attributes id="test-clone-meta_attributes"/>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Delete resource meta attribute in parent - OK (0) =#=#=#=
* Passed: crm_resource - Delete resource meta attribute in parent
=#=#=#= Begin test: Create a resource meta attribute in the primitive =#=#=#=
Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed set=test-primitive-meta_attributes name=is-managed value=false
=#=#=#= Current cib after: Create a resource meta attribute in the primitive =#=#=#=
<cib epoch="38" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes">
<nvpair id="test-primitive-meta_attributes-is-managed" name="is-managed" value="false"/>
</meta_attributes>
</primitive>
<meta_attributes id="test-clone-meta_attributes"/>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Create a resource meta attribute in the primitive - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource meta attribute in the primitive
=#=#=#= Begin test: Update existing resource meta attribute =#=#=#=
A value for 'is-managed' already exists in child 'test-primitive', performing update on that instead of 'test-clone'
Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed value=true
=#=#=#= Current cib after: Update existing resource meta attribute =#=#=#=
<cib epoch="39" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes">
<nvpair id="test-primitive-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</primitive>
<meta_attributes id="test-clone-meta_attributes"/>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Update existing resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Update existing resource meta attribute
=#=#=#= Begin test: Create a resource meta attribute in the parent =#=#=#=
Set 'test-clone' option: id=test-clone-meta_attributes-is-managed set=test-clone-meta_attributes name=is-managed value=true
=#=#=#= Current cib after: Create a resource meta attribute in the parent =#=#=#=
<cib epoch="40" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes">
<nvpair id="test-primitive-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Create a resource meta attribute in the parent - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource meta attribute in the parent
=#=#=#= Begin test: Copy resources =#=#=#=
=#=#=#= End test: Copy resources - OK (0) =#=#=#=
* Passed: cibadmin - Copy resources
=#=#=#= Begin test: Delete resource parent meta attribute (force) =#=#=#=
Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed
=#=#=#= Current cib after: Delete resource parent meta attribute (force) =#=#=#=
<cib epoch="41" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes">
<nvpair id="test-primitive-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</primitive>
<meta_attributes id="test-clone-meta_attributes"/>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Delete resource parent meta attribute (force) - OK (0) =#=#=#=
* Passed: crm_resource - Delete resource parent meta attribute (force)
=#=#=#= Begin test: Restore duplicates =#=#=#=
=#=#=#= Current cib after: Restore duplicates =#=#=#=
<cib epoch="42" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes">
<nvpair id="test-primitive-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Restore duplicates - OK (0) =#=#=#=
* Passed: cibadmin - Restore duplicates
=#=#=#= Begin test: Delete resource child meta attribute =#=#=#=
Multiple attributes match name=is-managed
Value: true (id=test-primitive-meta_attributes-is-managed)
Value: true (id=test-clone-meta_attributes-is-managed)
Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed
=#=#=#= Current cib after: Delete resource child meta attribute =#=#=#=
<cib epoch="43" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes"/>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Delete resource child meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Delete resource child meta attribute
=#=#=#= Begin test: Create a resource meta attribute in dummy1 =#=#=#=
Set 'dummy1' option: id=dummy1-meta_attributes-is-managed set=dummy1-meta_attributes name=is-managed value=true
=#=#=#= Current cib after: Create a resource meta attribute in dummy1 =#=#=#=
<cib epoch="45" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes"/>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</clone>
<group id="dummy-group">
<primitive id="dummy1" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy1-meta_attributes">
<nvpair id="dummy1-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</primitive>
<primitive id="dummy2" class="ocf" provider="pacemaker" type="Dummy"/>
</group>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Create a resource meta attribute in dummy1 - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource meta attribute in dummy1
=#=#=#= Begin test: Create a resource meta attribute in dummy-group =#=#=#=
Set 'dummy1' option: id=dummy1-meta_attributes-is-managed name=is-managed value=false
Set 'dummy-group' option: id=dummy-group-meta_attributes-is-managed set=dummy-group-meta_attributes name=is-managed value=false
=#=#=#= Current cib after: Create a resource meta attribute in dummy-group =#=#=#=
<cib epoch="47" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes"/>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</clone>
<group id="dummy-group">
<primitive id="dummy1" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy1-meta_attributes">
<nvpair id="dummy1-meta_attributes-is-managed" name="is-managed" value="false"/>
</meta_attributes>
</primitive>
<primitive id="dummy2" class="ocf" provider="pacemaker" type="Dummy"/>
<meta_attributes id="dummy-group-meta_attributes">
<nvpair id="dummy-group-meta_attributes-is-managed" name="is-managed" value="false"/>
</meta_attributes>
</group>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Create a resource meta attribute in dummy-group - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource meta attribute in dummy-group
=#=#=#= Begin test: Specify a lifetime when moving a resource =#=#=#=
Migration will take effect until:
=#=#=#= Current cib after: Specify a lifetime when moving a resource =#=#=#=
<cib epoch="50" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes"/>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started">
<rule id="cli-prefer-rule-dummy" score="INFINITY" boolean-op="and">
<expression id="cli-prefer-expr-dummy" attribute="#uname" operation="eq" value="node2" type="string"/>
<date_expression id="cli-prefer-lifetime-end-dummy" operation="lt" end=""/>
</rule>
</rsc_location>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Specify a lifetime when moving a resource - OK (0) =#=#=#=
* Passed: crm_resource - Specify a lifetime when moving a resource
=#=#=#= Begin test: Try to move a resource previously moved with a lifetime =#=#=#=
=#=#=#= Current cib after: Try to move a resource previously moved with a lifetime =#=#=#=
<cib epoch="52" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes"/>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Try to move a resource previously moved with a lifetime - OK (0) =#=#=#=
* Passed: crm_resource - Try to move a resource previously moved with a lifetime
=#=#=#= Begin test: Ban dummy from node1 for a short time =#=#=#=
WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1.
This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool
This will be the case even if node1 is the last node in the cluster
Migration will take effect until:
=#=#=#= Current cib after: Ban dummy from node1 for a short time =#=#=#=
<cib epoch="53" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes"/>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
<rsc_location id="cli-ban-dummy-on-node1" rsc="dummy" role="Started">
<rule id="cli-ban-dummy-on-node1-rule" score="-INFINITY" boolean-op="and">
<expression id="cli-ban-dummy-on-node1-expr" attribute="#uname" operation="eq" value="node1" type="string"/>
<date_expression id="cli-ban-dummy-on-node1-lifetime" operation="lt" end=""/>
</rule>
</rsc_location>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Ban dummy from node1 for a short time - OK (0) =#=#=#=
* Passed: crm_resource - Ban dummy from node1 for a short time
=#=#=#= Begin test: Remove expired constraints =#=#=#=
Removing constraint: cli-ban-dummy-on-node1
=#=#=#= Current cib after: Remove expired constraints =#=#=#=
<cib epoch="54" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes"/>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Remove expired constraints - OK (0) =#=#=#=
* Passed: crm_resource - Remove expired constraints
=#=#=#= Begin test: Clear all implicit constraints for dummy =#=#=#=
Removing constraint: cli-prefer-dummy
=#=#=#= Current cib after: Clear all implicit constraints for dummy =#=#=#=
<cib epoch="55" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes"/>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</clone>
</resources>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Clear all implicit constraints for dummy - OK (0) =#=#=#=
* Passed: crm_resource - Clear all implicit constraints for dummy
=#=#=#= Begin test: Delete a resource =#=#=#=
=#=#=#= Current cib after: Delete a resource =#=#=#=
<cib epoch="56" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes"/>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</clone>
</resources>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Delete a resource - OK (0) =#=#=#=
* Passed: crm_resource - Delete a resource
=#=#=#= Begin test: Create an XML patchset =#=#=#=
<diff format="2">
<version>
<source admin_epoch="0" epoch="1" num_updates="0"/>
<target admin_epoch="0" epoch="1" num_updates="0"/>
</version>
<change operation="delete" path="/cib/configuration/comment" position="0"/>
<change operation="delete" path="/cib/configuration/comment" position="1"/>
<change operation="delete" path="/cib/configuration/resources/comment" position="0"/>
<change operation="delete" path="/cib/configuration/resources/primitive[@id=&apos;Fencing&apos;]/operations/op[@id=&apos;Fencing-start-0&apos;]"/>
<change operation="modify" path="/cib/configuration/crm_config/cluster_property_set[@id=&apos;cib-bootstrap-options&apos;]/nvpair[@id=&apos;cib-bootstrap-options-cluster-name&apos;]">
<change-list>
<change-attr name="value" operation="set" value="mycluster"/>
<change-attr name="name" operation="set" value="cluster-name"/>
</change-list>
<change-result>
<nvpair id="cib-bootstrap-options-cluster-name" value="mycluster" name="cluster-name"/>
</change-result>
</change>
<change operation="create" path="/cib/configuration/nodes" position="4">
<node id="4" uname="node4"/>
</change>
<change operation="create" path="/cib/configuration" position="3">
<!-- hello world -->
</change>
<change operation="create" path="/cib/configuration/resources" position="0">
<!-- test: modify this comment to say something different -->
</change>
<change operation="modify" path="/cib/configuration/resources/primitive[@id=&apos;Fencing&apos;]/instance_attributes[@id=&apos;Fencing-params&apos;]/nvpair[@id=&apos;Fencing-pcmk_host_list&apos;]">
<change-list>
<change-attr name="value" operation="set" value="node1 node2 node3 node4"/>
</change-list>
<change-result>
<nvpair id="Fencing-pcmk_host_list" name="pcmk_host_list" value="node1 node2 node3 node4"/>
</change-result>
</change>
<change operation="modify" path="/cib/configuration/resources/primitive[@id=&apos;Fencing&apos;]/operations/op[@id=&apos;Fencing-monitor-120s&apos;]">
<change-list>
<change-attr name="timeout" operation="set" value="120s"/>
<change-attr name="name" operation="set" value="monitor"/>
</change-list>
<change-result>
<op id="Fencing-monitor-120s" interval="120s" timeout="120s" name="monitor"/>
</change-result>
</change>
<change operation="move" path="/cib/configuration/resources/primitive[@id=&apos;dummy&apos;]/instance_attributes[@id=&apos;dummy-params&apos;]/nvpair[@id=&apos;dummy-op_sleep&apos;]" position="1"/>
<change operation="move" path="/cib/configuration/resources/primitive[@id=&apos;dummy&apos;]/instance_attributes[@id=&apos;dummy-params&apos;]/nvpair[@id=&apos;dummy-fake&apos;]" position="2"/>
<change operation="modify" path="/cib/configuration/resources/primitive[@id=&apos;dummy&apos;]/operations/op[@id=&apos;dummy-monitor-5s&apos;]">
<change-list>
<change-attr name="name" operation="set" value="monitor"/>
<change-attr name="timeout" operation="unset"/>
</change-list>
<change-result>
<op id="dummy-monitor-5s" interval="5s" name="monitor"/>
</change-result>
</change>
<change operation="create" path="/cib/configuration" position="6">
<!-- test: move this comment to end of configuration -->
</change>
</diff>
=#=#=#= End test: Create an XML patchset - Error occurred (1) =#=#=#=
* Passed: crm_diff - Create an XML patchset
diff --git a/cts/cli/regression.upgrade.exp b/cts/cli/regression.upgrade.exp
index 50b22dfbdf..10f630968c 100644
--- a/cts/cli/regression.upgrade.exp
+++ b/cts/cli/regression.upgrade.exp
@@ -1,150 +1,153 @@
Created new pacemaker configuration
Setting up shadow instance
A new shadow instance was created. To begin using it paste the following into your shell:
CIB_shadow=cts-cli ; export CIB_shadow
=#=#=#= Begin test: Set stonith-enabled=false =#=#=#=
=#=#=#= Current cib after: Set stonith-enabled=false =#=#=#=
<cib epoch="1" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Set stonith-enabled=false - OK (0) =#=#=#=
* Passed: crm_attribute - Set stonith-enabled=false
=#=#=#= Begin test: Configure the initial resource =#=#=#=
=#=#=#= Current cib after: Configure the initial resource =#=#=#=
<cib epoch="2" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources>
<primitive id="mySmartFuse" class="ocf" provider="experiment" type="SmartFuse">
<operations>
<op id="mySmartFuse-start" name="start" interval="0" timeout="40s"/>
<op id="mySmartFuse-monitor-inputpower" name="monitor" interval="30s">
<instance_attributes id="mySmartFuse-inputpower-instanceparams">
<nvpair id="mySmartFuse-inputpower-requires" name="requires" value="inputpower"/>
</instance_attributes>
</op>
<op id="mySmartFuse-monitor-outputpower" name="monitor" interval="2s">
<instance_attributes id="mySmartFuse-outputpower-instanceparams">
<nvpair id="mySmartFuse-outputpower-requires" name="requires" value="outputpower"/>
</instance_attributes>
</op>
</operations>
<instance_attributes id="mySmartFuse-params">
<nvpair id="mySmartFuse-params-ip" name="ip" value="192.0.2.10"/>
</instance_attributes>
<!-- a bit hairy but valid -->
<instance_attributes id-ref="mySmartFuse-outputpower-instanceparams"/>
</primitive>
</resources>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Configure the initial resource - OK (0) =#=#=#=
* Passed: cibadmin - Configure the initial resource
=#=#=#= Begin test: Upgrade to latest CIB schema (trigger 2.10.xsl + the wrapping) =#=#=#=
update_validation debug: Testing 'pacemaker-2.10' validation (13 of X)
update_validation debug: Upgrading pacemaker-2.10-style configuration to pacemaker-3.0 with upgrade-2.10.xsl
apply_upgrade debug: Upgrading pacemaker-2.10-style configuration, pre-upgrade phase with upgrade-2.10-enter.xsl
apply_upgrade debug: Upgrading pacemaker-2.10-style configuration, main phase with upgrade-2.10.xsl
INFO: Resources-operation instance_attributes: mySmartFuse-monitor-inputpower (rsc=mySmartFuse, meta=mySmartFuse-inputpower-instanceparams): dropping requires
INFO: Resources-operation instance_attributes: ... only start/promote operation taken into account
INFO: Resources-operation instance_attributes: mySmartFuse-monitor-outputpower (rsc=mySmartFuse, meta=mySmartFuse-outputpower-instanceparams): dropping requires
INFO: Resources-operation instance_attributes: ... only start/promote operation taken into account
apply_upgrade debug: Upgrading pacemaker-2.10-style configuration, post-upgrade phase with upgrade-2.10-leave.xsl
DEBUG: instance_attributes: original element pointed to with @id-ref (mySmartFuse-outputpower-instanceparams) disappeared during upgrade
update_validation info: Transformation upgrade-2.10.xsl successful
update_validation debug: Testing 'pacemaker-3.0' validation (14 of X)
update_validation debug: pacemaker-3.0-style configuration is also valid for pacemaker-3.1
update_validation debug: Testing 'pacemaker-3.1' validation (15 of X)
update_validation debug: Configuration valid for schema: pacemaker-3.1
update_validation debug: pacemaker-3.1-style configuration is also valid for pacemaker-3.2
update_validation debug: Testing 'pacemaker-3.2' validation (16 of X)
update_validation debug: Configuration valid for schema: pacemaker-3.2
update_validation debug: pacemaker-3.2-style configuration is also valid for pacemaker-3.3
update_validation debug: Testing 'pacemaker-3.3' validation (17 of X)
update_validation debug: Configuration valid for schema: pacemaker-3.3
update_validation debug: pacemaker-3.3-style configuration is also valid for pacemaker-3.4
update_validation debug: Testing 'pacemaker-3.4' validation (18 of X)
update_validation debug: Configuration valid for schema: pacemaker-3.4
-update_validation trace: Stopping at pacemaker-3.4
-update_validation info: Transformed the configuration from pacemaker-2.10 to pacemaker-3.4
+update_validation debug: pacemaker-3.4-style configuration is also valid for pacemaker-3.5
+update_validation debug: Testing 'pacemaker-3.5' validation (19 of X)
+update_validation debug: Configuration valid for schema: pacemaker-3.5
+update_validation trace: Stopping at pacemaker-3.5
+update_validation info: Transformed the configuration from pacemaker-2.10 to pacemaker-3.5
=#=#=#= Current cib after: Upgrade to latest CIB schema (trigger 2.10.xsl + the wrapping) =#=#=#=
<cib epoch="2" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources>
<primitive id="mySmartFuse" class="ocf" provider="experiment" type="SmartFuse">
<operations>
<op id="mySmartFuse-start" name="start" interval="0" timeout="40s"/>
<op id="mySmartFuse-monitor-inputpower" name="monitor" interval="30s"/>
<op id="mySmartFuse-monitor-outputpower" name="monitor" interval="2s"/>
</operations>
<instance_attributes id="mySmartFuse-params">
<nvpair id="mySmartFuse-params-ip" name="ip" value="192.0.2.10"/>
</instance_attributes>
<!-- a bit hairy but valid -->
<instance_attributes id="_cibtr-2_mySmartFuse-outputpower-instanceparams">
<nvpair id="__cibtr-2_mySmartFuse-outputpower-instanceparams__mySmartFuse-outputpower-requires" name="requires" value="outputpower"/>
</instance_attributes>
</primitive>
</resources>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Upgrade to latest CIB schema (trigger 2.10.xsl + the wrapping) - OK (0) =#=#=#=
* Passed: cibadmin - Upgrade to latest CIB schema (trigger 2.10.xsl + the wrapping)
=#=#=#= Begin test: Query a resource instance attribute (shall survive) =#=#=#=
outputpower
=#=#=#= Current cib after: Query a resource instance attribute (shall survive) =#=#=#=
<cib epoch="2" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources>
<primitive id="mySmartFuse" class="ocf" provider="experiment" type="SmartFuse">
<operations>
<op id="mySmartFuse-start" name="start" interval="0" timeout="40s"/>
<op id="mySmartFuse-monitor-inputpower" name="monitor" interval="30s"/>
<op id="mySmartFuse-monitor-outputpower" name="monitor" interval="2s"/>
</operations>
<instance_attributes id="mySmartFuse-params">
<nvpair id="mySmartFuse-params-ip" name="ip" value="192.0.2.10"/>
</instance_attributes>
<!-- a bit hairy but valid -->
<instance_attributes id="_cibtr-2_mySmartFuse-outputpower-instanceparams">
<nvpair id="__cibtr-2_mySmartFuse-outputpower-instanceparams__mySmartFuse-outputpower-requires" name="requires" value="outputpower"/>
</instance_attributes>
</primitive>
</resources>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Query a resource instance attribute (shall survive) - OK (0) =#=#=#=
* Passed: crm_resource - Query a resource instance attribute (shall survive)
diff --git a/cts/cli/regression.validity.exp b/cts/cli/regression.validity.exp
index 4407074176..88ca98e798 100644
--- a/cts/cli/regression.validity.exp
+++ b/cts/cli/regression.validity.exp
@@ -1,490 +1,504 @@
Created new pacemaker configuration
Setting up shadow instance
A new shadow instance was created. To begin using it paste the following into your shell:
CIB_shadow=cts-cli ; export CIB_shadow
=#=#=#= Begin test: Try to make resulting CIB invalid (enum violation) =#=#=#=
1 <cib epoch="4" num_updates="0" admin_epoch="0">
2 <configuration>
3 <crm_config/>
4 <nodes/>
5 <resources>
6 <primitive id="dummy1" class="ocf" provider="pacemaker" type="Dummy"/>
7 <primitive id="dummy2" class="ocf" provider="pacemaker" type="Dummy"/>
8 </resources>
9 <constraints>
10 <rsc_order id="ord_1-2" first="dummy1" first-action="break" then="dummy2"/>
11 </constraints>
12 </configuration>
13 <status/>
14 </cib>
15
Call failed: Update does not conform to the configured schema
=#=#=#= Current cib after: Try to make resulting CIB invalid (enum violation) =#=#=#=
<cib epoch="3" num_updates="0" admin_epoch="0">
<configuration>
<crm_config/>
<nodes/>
<resources>
<primitive id="dummy1" class="ocf" provider="pacemaker" type="Dummy"/>
<primitive id="dummy2" class="ocf" provider="pacemaker" type="Dummy"/>
</resources>
<constraints>
<rsc_order id="ord_1-2" first="dummy1" first-action="start" then="dummy2"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Try to make resulting CIB invalid (enum violation) - Invalid configuration (78) =#=#=#=
* Passed: cibadmin - Try to make resulting CIB invalid (enum violation)
=#=#=#= Begin test: Run crm_simulate with invalid CIB (enum violation) =#=#=#=
update_validation debug: Testing 'pacemaker-1.2' validation (1 of X)
element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
update_validation trace: pacemaker-1.2 validation failed
update_validation debug: Testing 'pacemaker-1.3' validation (2 of X)
element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
update_validation trace: pacemaker-1.3 validation failed
update_validation debug: Testing 'pacemaker-2.0' validation (3 of X)
element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
update_validation trace: pacemaker-2.0 validation failed
update_validation debug: Testing 'pacemaker-2.1' validation (4 of X)
element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
update_validation trace: pacemaker-2.1 validation failed
update_validation debug: Testing 'pacemaker-2.2' validation (5 of X)
element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
update_validation trace: pacemaker-2.2 validation failed
update_validation debug: Testing 'pacemaker-2.3' validation (6 of X)
element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
update_validation trace: pacemaker-2.3 validation failed
update_validation debug: Testing 'pacemaker-2.4' validation (7 of X)
element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
update_validation trace: pacemaker-2.4 validation failed
update_validation debug: Testing 'pacemaker-2.5' validation (8 of X)
element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
update_validation trace: pacemaker-2.5 validation failed
update_validation debug: Testing 'pacemaker-2.6' validation (9 of X)
element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
update_validation trace: pacemaker-2.6 validation failed
update_validation debug: Testing 'pacemaker-2.7' validation (10 of X)
element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
update_validation trace: pacemaker-2.7 validation failed
update_validation debug: Testing 'pacemaker-2.8' validation (11 of X)
element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
update_validation trace: pacemaker-2.8 validation failed
update_validation debug: Testing 'pacemaker-2.9' validation (12 of X)
element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
update_validation trace: pacemaker-2.9 validation failed
update_validation debug: Testing 'pacemaker-2.10' validation (13 of X)
element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
update_validation trace: pacemaker-2.10 validation failed
update_validation debug: Testing 'pacemaker-3.0' validation (14 of X)
element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
update_validation trace: pacemaker-3.0 validation failed
update_validation debug: Testing 'pacemaker-3.1' validation (15 of X)
element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
update_validation trace: pacemaker-3.1 validation failed
update_validation debug: Testing 'pacemaker-3.2' validation (16 of X)
element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
update_validation trace: pacemaker-3.2 validation failed
update_validation debug: Testing 'pacemaker-3.3' validation (17 of X)
element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
update_validation trace: pacemaker-3.3 validation failed
update_validation debug: Testing 'pacemaker-3.4' validation (18 of X)
element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
update_validation trace: pacemaker-3.4 validation failed
-Cannot upgrade configuration (claiming schema pacemaker-1.2) to at least pacemaker-3.0 because it does not validate with any schema from pacemaker-1.2 to pacemaker-3.4
+update_validation debug: Testing 'pacemaker-3.5' validation (19 of X)
+element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
+element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
+update_validation trace: pacemaker-3.5 validation failed
+Cannot upgrade configuration (claiming schema pacemaker-1.2) to at least pacemaker-3.0 because it does not validate with any schema from pacemaker-1.2 to pacemaker-3.5
=#=#=#= End test: Run crm_simulate with invalid CIB (enum violation) - Invalid configuration (78) =#=#=#=
* Passed: crm_simulate - Run crm_simulate with invalid CIB (enum violation)
=#=#=#= Begin test: Try to make resulting CIB invalid (unrecognized validate-with) =#=#=#=
1 <cib epoch="3" num_updates="1" admin_epoch="0">
2 <configuration>
3 <crm_config/>
4 <nodes/>
5 <resources>
6 <primitive id="dummy1" class="ocf" provider="pacemaker" type="Dummy"/>
7 <primitive id="dummy2" class="ocf" provider="pacemaker" type="Dummy"/>
8 </resources>
9 <constraints>
10 <rsc_order id="ord_1-2" first="dummy1" first-action="start" then="dummy2"/>
11 </constraints>
12 </configuration>
13 <status/>
14 </cib>
15
Call failed: Update does not conform to the configured schema
=#=#=#= Current cib after: Try to make resulting CIB invalid (unrecognized validate-with) =#=#=#=
<cib epoch="3" num_updates="0" admin_epoch="0">
<configuration>
<crm_config/>
<nodes/>
<resources>
<primitive id="dummy1" class="ocf" provider="pacemaker" type="Dummy"/>
<primitive id="dummy2" class="ocf" provider="pacemaker" type="Dummy"/>
</resources>
<constraints>
<rsc_order id="ord_1-2" first="dummy1" first-action="start" then="dummy2"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Try to make resulting CIB invalid (unrecognized validate-with) - Invalid configuration (78) =#=#=#=
* Passed: cibadmin - Try to make resulting CIB invalid (unrecognized validate-with)
=#=#=#= Begin test: Run crm_simulate with invalid CIB (unrecognized validate-with) =#=#=#=
update_validation debug: Unknown validation schema
update_validation debug: Testing 'pacemaker-1.0' validation (0 of X)
element cib: Relax-NG validity error : Invalid attribute validate-with for element cib
update_validation trace: pacemaker-1.0 validation failed
update_validation debug: Testing 'pacemaker-1.2' validation (1 of X)
element cib: Relax-NG validity error : Invalid attribute validate-with for element cib
update_validation trace: pacemaker-1.2 validation failed
update_validation debug: Testing 'pacemaker-1.3' validation (2 of X)
element cib: Relax-NG validity error : Invalid attribute validate-with for element cib
update_validation trace: pacemaker-1.3 validation failed
update_validation debug: Testing 'pacemaker-2.0' validation (3 of X)
element cib: Relax-NG validity error : Invalid attribute validate-with for element cib
update_validation trace: pacemaker-2.0 validation failed
update_validation debug: Testing 'pacemaker-2.1' validation (4 of X)
element cib: Relax-NG validity error : Invalid attribute validate-with for element cib
update_validation trace: pacemaker-2.1 validation failed
update_validation debug: Testing 'pacemaker-2.2' validation (5 of X)
element cib: Relax-NG validity error : Invalid attribute validate-with for element cib
update_validation trace: pacemaker-2.2 validation failed
update_validation debug: Testing 'pacemaker-2.3' validation (6 of X)
element cib: Relax-NG validity error : Invalid attribute validate-with for element cib
update_validation trace: pacemaker-2.3 validation failed
update_validation debug: Testing 'pacemaker-2.4' validation (7 of X)
element cib: Relax-NG validity error : Invalid attribute validate-with for element cib
update_validation trace: pacemaker-2.4 validation failed
update_validation debug: Testing 'pacemaker-2.5' validation (8 of X)
element cib: Relax-NG validity error : Invalid attribute validate-with for element cib
update_validation trace: pacemaker-2.5 validation failed
update_validation debug: Testing 'pacemaker-2.6' validation (9 of X)
element cib: Relax-NG validity error : Invalid attribute validate-with for element cib
update_validation trace: pacemaker-2.6 validation failed
update_validation debug: Testing 'pacemaker-2.7' validation (10 of X)
element cib: Relax-NG validity error : Invalid attribute validate-with for element cib
update_validation trace: pacemaker-2.7 validation failed
update_validation debug: Testing 'pacemaker-2.8' validation (11 of X)
element cib: Relax-NG validity error : Invalid attribute validate-with for element cib
update_validation trace: pacemaker-2.8 validation failed
update_validation debug: Testing 'pacemaker-2.9' validation (12 of X)
element cib: Relax-NG validity error : Invalid attribute validate-with for element cib
update_validation trace: pacemaker-2.9 validation failed
update_validation debug: Testing 'pacemaker-2.10' validation (13 of X)
element cib: Relax-NG validity error : Invalid attribute validate-with for element cib
update_validation trace: pacemaker-2.10 validation failed
update_validation debug: Testing 'pacemaker-3.0' validation (14 of X)
element cib: Relax-NG validity error : Invalid attribute validate-with for element cib
update_validation trace: pacemaker-3.0 validation failed
update_validation debug: Testing 'pacemaker-3.1' validation (15 of X)
element cib: Relax-NG validity error : Invalid attribute validate-with for element cib
update_validation trace: pacemaker-3.1 validation failed
update_validation debug: Testing 'pacemaker-3.2' validation (16 of X)
element cib: Relax-NG validity error : Invalid attribute validate-with for element cib
update_validation trace: pacemaker-3.2 validation failed
update_validation debug: Testing 'pacemaker-3.3' validation (17 of X)
element cib: Relax-NG validity error : Invalid attribute validate-with for element cib
update_validation trace: pacemaker-3.3 validation failed
update_validation debug: Testing 'pacemaker-3.4' validation (18 of X)
element cib: Relax-NG validity error : Invalid attribute validate-with for element cib
update_validation trace: pacemaker-3.4 validation failed
-Cannot upgrade configuration (claiming schema pacemaker-9999.0) to at least pacemaker-3.0 because it does not validate with any schema from unknown to pacemaker-3.4
+update_validation debug: Testing 'pacemaker-3.5' validation (19 of X)
+element cib: Relax-NG validity error : Invalid attribute validate-with for element cib
+update_validation trace: pacemaker-3.5 validation failed
+Cannot upgrade configuration (claiming schema pacemaker-9999.0) to at least pacemaker-3.0 because it does not validate with any schema from unknown to pacemaker-3.5
=#=#=#= End test: Run crm_simulate with invalid CIB (unrecognized validate-with) - Invalid configuration (78) =#=#=#=
* Passed: crm_simulate - Run crm_simulate with invalid CIB (unrecognized validate-with)
=#=#=#= Begin test: Try to make resulting CIB invalid, but possibly recoverable (valid with X.Y+1) =#=#=#=
1 <cib epoch="3" num_updates="0" admin_epoch="0">
2 <configuration>
3 <crm_config/>
4 <nodes/>
5 <resources>
6 <primitive id="dummy1" class="ocf" provider="pacemaker" type="Dummy"/>
7 <primitive id="dummy2" class="ocf" provider="pacemaker" type="Dummy"/>
8 </resources>
9 <constraints>
10 <rsc_order id="ord_1-2" first="dummy1" first-action="start" then="dummy2"/>
11 </constraints>
12 <tags/>
13 </configuration>
14 <status/>
15 </cib>
16
Call failed: Update does not conform to the configured schema
=#=#=#= Current cib after: Try to make resulting CIB invalid, but possibly recoverable (valid with X.Y+1) =#=#=#=
<cib epoch="3" num_updates="0" admin_epoch="0">
<configuration>
<crm_config/>
<nodes/>
<resources>
<primitive id="dummy1" class="ocf" provider="pacemaker" type="Dummy"/>
<primitive id="dummy2" class="ocf" provider="pacemaker" type="Dummy"/>
</resources>
<constraints>
<rsc_order id="ord_1-2" first="dummy1" first-action="start" then="dummy2"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Try to make resulting CIB invalid, but possibly recoverable (valid with X.Y+1) - Invalid configuration (78) =#=#=#=
* Passed: cibadmin - Try to make resulting CIB invalid, but possibly recoverable (valid with X.Y+1)
=#=#=#= Begin test: Run crm_simulate with invalid, but possibly recoverable CIB (valid with X.Y+1) =#=#=#=
update_validation debug: Testing 'pacemaker-1.2' validation (1 of X)
element tags: Relax-NG validity error : Element configuration has extra content: tags
update_validation trace: pacemaker-1.2 validation failed
update_validation debug: Testing 'pacemaker-1.3' validation (2 of X)
update_validation debug: pacemaker-1.3-style configuration is also valid for pacemaker-2.0
update_validation debug: Testing 'pacemaker-2.0' validation (3 of X)
update_validation debug: Configuration valid for schema: pacemaker-2.0
update_validation debug: pacemaker-2.0-style configuration is also valid for pacemaker-2.1
update_validation debug: Testing 'pacemaker-2.1' validation (4 of X)
update_validation debug: Configuration valid for schema: pacemaker-2.1
update_validation debug: pacemaker-2.1-style configuration is also valid for pacemaker-2.2
update_validation debug: Testing 'pacemaker-2.2' validation (5 of X)
update_validation debug: Configuration valid for schema: pacemaker-2.2
update_validation debug: pacemaker-2.2-style configuration is also valid for pacemaker-2.3
update_validation debug: Testing 'pacemaker-2.3' validation (6 of X)
update_validation debug: Configuration valid for schema: pacemaker-2.3
update_validation debug: pacemaker-2.3-style configuration is also valid for pacemaker-2.4
update_validation debug: Testing 'pacemaker-2.4' validation (7 of X)
update_validation debug: Configuration valid for schema: pacemaker-2.4
update_validation debug: pacemaker-2.4-style configuration is also valid for pacemaker-2.5
update_validation debug: Testing 'pacemaker-2.5' validation (8 of X)
update_validation debug: Configuration valid for schema: pacemaker-2.5
update_validation debug: pacemaker-2.5-style configuration is also valid for pacemaker-2.6
update_validation debug: Testing 'pacemaker-2.6' validation (9 of X)
update_validation debug: Configuration valid for schema: pacemaker-2.6
update_validation debug: pacemaker-2.6-style configuration is also valid for pacemaker-2.7
update_validation debug: Testing 'pacemaker-2.7' validation (10 of X)
update_validation debug: Configuration valid for schema: pacemaker-2.7
update_validation debug: pacemaker-2.7-style configuration is also valid for pacemaker-2.8
update_validation debug: Testing 'pacemaker-2.8' validation (11 of X)
update_validation debug: Configuration valid for schema: pacemaker-2.8
update_validation debug: pacemaker-2.8-style configuration is also valid for pacemaker-2.9
update_validation debug: Testing 'pacemaker-2.9' validation (12 of X)
update_validation debug: Configuration valid for schema: pacemaker-2.9
update_validation debug: pacemaker-2.9-style configuration is also valid for pacemaker-2.10
update_validation debug: Testing 'pacemaker-2.10' validation (13 of X)
update_validation debug: Configuration valid for schema: pacemaker-2.10
update_validation debug: pacemaker-2.10-style configuration is also valid for pacemaker-3.0
update_validation debug: Testing 'pacemaker-3.0' validation (14 of X)
update_validation debug: Configuration valid for schema: pacemaker-3.0
update_validation debug: pacemaker-3.0-style configuration is also valid for pacemaker-3.1
update_validation debug: Testing 'pacemaker-3.1' validation (15 of X)
update_validation debug: Configuration valid for schema: pacemaker-3.1
update_validation debug: pacemaker-3.1-style configuration is also valid for pacemaker-3.2
update_validation debug: Testing 'pacemaker-3.2' validation (16 of X)
update_validation debug: Configuration valid for schema: pacemaker-3.2
update_validation debug: pacemaker-3.2-style configuration is also valid for pacemaker-3.3
update_validation debug: Testing 'pacemaker-3.3' validation (17 of X)
update_validation debug: Configuration valid for schema: pacemaker-3.3
update_validation debug: pacemaker-3.3-style configuration is also valid for pacemaker-3.4
update_validation debug: Testing 'pacemaker-3.4' validation (18 of X)
update_validation debug: Configuration valid for schema: pacemaker-3.4
-update_validation trace: Stopping at pacemaker-3.4
-update_validation info: Transformed the configuration from pacemaker-1.2 to pacemaker-3.4
+update_validation debug: pacemaker-3.4-style configuration is also valid for pacemaker-3.5
+update_validation debug: Testing 'pacemaker-3.5' validation (19 of X)
+update_validation debug: Configuration valid for schema: pacemaker-3.5
+update_validation trace: Stopping at pacemaker-3.5
+update_validation info: Transformed the configuration from pacemaker-1.2 to pacemaker-3.5
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Current cluster status:
- dummy1 (ocf::pacemaker:Dummy): Stopped
- dummy2 (ocf::pacemaker:Dummy): Stopped
+ dummy1 (ocf::pacemaker:Dummy): Stopped
+ dummy2 (ocf::pacemaker:Dummy): Stopped
Transition Summary:
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Executing cluster transition:
Revised cluster status:
- dummy1 (ocf::pacemaker:Dummy): Stopped
- dummy2 (ocf::pacemaker:Dummy): Stopped
+ dummy1 (ocf::pacemaker:Dummy): Stopped
+ dummy2 (ocf::pacemaker:Dummy): Stopped
=#=#=#= End test: Run crm_simulate with invalid, but possibly recoverable CIB (valid with X.Y+1) - OK (0) =#=#=#=
* Passed: crm_simulate - Run crm_simulate with invalid, but possibly recoverable CIB (valid with X.Y+1)
=#=#=#= Begin test: Make resulting CIB valid, although without validate-with attribute =#=#=#=
=#=#=#= Current cib after: Make resulting CIB valid, although without validate-with attribute =#=#=#=
<cib epoch="3" num_updates="1" admin_epoch="0" validate-with="none">
<configuration>
<crm_config/>
<nodes/>
<resources>
<primitive id="dummy1" class="ocf" provider="pacemaker" type="Dummy"/>
<primitive id="dummy2" class="ocf" provider="pacemaker" type="Dummy"/>
</resources>
<constraints>
<rsc_order id="ord_1-2" first="dummy1" first-action="start" then="dummy2"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Make resulting CIB valid, although without validate-with attribute - OK (0) =#=#=#=
* Passed: cibadmin - Make resulting CIB valid, although without validate-with attribute
=#=#=#= Begin test: Run crm_simulate with valid CIB, but without validate-with attribute =#=#=#=
Schema validation of configuration is disabled (enabling is encouraged and prevents common misconfigurations)
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Current cluster status:
- dummy1 (ocf::pacemaker:Dummy): Stopped
- dummy2 (ocf::pacemaker:Dummy): Stopped
+ dummy1 (ocf::pacemaker:Dummy): Stopped
+ dummy2 (ocf::pacemaker:Dummy): Stopped
Transition Summary:
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Executing cluster transition:
Revised cluster status:
- dummy1 (ocf::pacemaker:Dummy): Stopped
- dummy2 (ocf::pacemaker:Dummy): Stopped
+ dummy1 (ocf::pacemaker:Dummy): Stopped
+ dummy2 (ocf::pacemaker:Dummy): Stopped
=#=#=#= End test: Run crm_simulate with valid CIB, but without validate-with attribute - OK (0) =#=#=#=
* Passed: crm_simulate - Run crm_simulate with valid CIB, but without validate-with attribute
=#=#=#= Begin test: Make resulting CIB invalid, and without validate-with attribute =#=#=#=
element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
+element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
+element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
=#=#=#= Current cib after: Make resulting CIB invalid, and without validate-with attribute =#=#=#=
<cib epoch="31" num_updates="0" admin_epoch="0" validate-with="none">
<configuration>
<crm_config/>
<nodes/>
<resources>
<primitive id="dummy1" class="ocf" provider="pacemaker" type="Dummy"/>
<primitive id="dummy2" class="ocf" provider="pacemaker" type="Dummy"/>
</resources>
<constraints>
<rsc_order id="ord_1-2" first="dummy1" first-action="break" then="dummy2"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Make resulting CIB invalid, and without validate-with attribute - OK (0) =#=#=#=
* Passed: cibadmin - Make resulting CIB invalid, and without validate-with attribute
=#=#=#= Begin test: Run crm_simulate with invalid CIB, also without validate-with attribute =#=#=#=
Schema validation of configuration is disabled (enabling is encouraged and prevents common misconfigurations)
validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
+validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
+validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
unpack_simple_rsc_order error: Cannot invert constraint 'ord_1-2' (please specify inverse manually)
Current cluster status:
- dummy1 (ocf::pacemaker:Dummy): Stopped
- dummy2 (ocf::pacemaker:Dummy): Stopped
+ dummy1 (ocf::pacemaker:Dummy): Stopped
+ dummy2 (ocf::pacemaker:Dummy): Stopped
Transition Summary:
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Executing cluster transition:
Revised cluster status:
- dummy1 (ocf::pacemaker:Dummy): Stopped
- dummy2 (ocf::pacemaker:Dummy): Stopped
+ dummy1 (ocf::pacemaker:Dummy): Stopped
+ dummy2 (ocf::pacemaker:Dummy): Stopped
=#=#=#= End test: Run crm_simulate with invalid CIB, also without validate-with attribute - OK (0) =#=#=#=
* Passed: crm_simulate - Run crm_simulate with invalid CIB, also without validate-with attribute
diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in
index 9e343795b6..ea9c660f53 100644
--- a/cts/cts-scheduler.in
+++ b/cts/cts-scheduler.in
@@ -1,1477 +1,1514 @@
#!@PYTHON@
""" Regression tests for Pacemaker's scheduler
"""
# Pacemaker targets compatibility with Python 2.7 and 3.2+
from __future__ import print_function, unicode_literals, absolute_import, division
__copyright__ = "Copyright 2004-2020 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import io
import os
import re
import sys
import stat
import shlex
import shutil
import argparse
import subprocess
import platform
DESC = """Regression tests for Pacemaker's scheduler"""
# Each entry in TESTS is a group of tests, where each test consists of a
# test base name, test description, and additional test arguments.
# Test groups will be separated by newlines in output.
TESTS = [
[
[ "simple1", "Offline" ],
[ "simple2", "Start" ],
[ "simple3", "Start 2" ],
[ "simple4", "Start Failed" ],
[ "simple6", "Stop Start" ],
[ "simple7", "Shutdown" ],
#[ "simple8", "Stonith" ],
#[ "simple9", "Lower version" ],
#[ "simple10", "Higher version" ],
[ "simple11", "Priority (ne)" ],
[ "simple12", "Priority (eq)" ],
[ "simple8", "Stickiness" ],
],
[
[ "group1", "Group" ],
[ "group2", "Group + Native" ],
[ "group3", "Group + Group" ],
[ "group4", "Group + Native (nothing)" ],
[ "group5", "Group + Native (move)" ],
[ "group6", "Group + Group (move)" ],
[ "group7", "Group colocation" ],
[ "group13", "Group colocation (cant run)" ],
[ "group8", "Group anti-colocation" ],
[ "group9", "Group recovery" ],
[ "group10", "Group partial recovery" ],
[ "group11", "Group target_role" ],
[ "group14", "Group stop (graph terminated)" ],
[ "group15", "Negative group colocation" ],
[ "bug-1573", "Partial stop of a group with two children" ],
[ "bug-1718", "Mandatory group ordering - Stop group_FUN" ],
[ "bug-lf-2613", "Move group on failure" ],
[ "bug-lf-2619", "Move group on clone failure" ],
[ "group-fail", "Ensure stop order is preserved for partially active groups" ],
[ "group-unmanaged", "No need to restart r115 because r114 is unmanaged" ],
[ "group-unmanaged-stopped", "Make sure r115 is stopped when r114 fails" ],
[ "group-dependents", "Account for the location preferences of things colocated with a group" ],
[ "group-stop-ordering", "Ensure blocked group member stop does not force other member stops" ],
],
[
[ "rsc_dep1", "Must not" ],
[ "rsc_dep3", "Must" ],
[ "rsc_dep5", "Must not 3" ],
[ "rsc_dep7", "Must 3" ],
[ "rsc_dep10", "Must (but cant)" ],
[ "rsc_dep2", "Must (running)" ],
[ "rsc_dep8", "Must (running : alt)" ],
[ "rsc_dep4", "Must (running + move)" ],
[ "asymmetric", "Asymmetric - require explicit location constraints" ],
],
[
[ "orphan-0", "Orphan ignore" ],
[ "orphan-1", "Orphan stop" ],
[ "orphan-2", "Orphan stop, remove failcount" ],
],
[
[ "params-0", "Params: No change" ],
[ "params-1", "Params: Changed" ],
[ "params-2", "Params: Resource definition" ],
[ "params-3", "Params: Restart instead of reload if start pending" ],
[ "params-4", "Params: Reload" ],
[ "params-5", "Params: Restart based on probe digest" ],
[ "novell-251689", "Resource definition change + target_role=stopped" ],
[ "bug-lf-2106", "Restart all anonymous clone instances after config change" ],
[ "params-6", "Params: Detect reload in previously migrated resource" ],
[ "nvpair-id-ref", "Support id-ref in nvpair with optional name" ],
[ "not-reschedule-unneeded-monitor",
"Do not reschedule unneeded monitors while resource definitions have changed" ],
[ "reload-becomes-restart", "Cancel reload if restart becomes required" ],
],
[
[ "target-0", "Target Role : baseline" ],
[ "target-1", "Target Role : master" ],
[ "target-2", "Target Role : invalid" ],
],
[
[ "base-score", "Set a node's default score for all nodes" ],
],
[
[ "date-1", "Dates", [ "-t", "2005-020" ] ],
[ "date-2", "Date Spec - Pass", [ "-t", "2005-020T12:30" ] ],
[ "date-3", "Date Spec - Fail", [ "-t", "2005-020T11:30" ] ],
[ "origin", "Timing of recurring operations", [ "-t", "2014-05-07 00:28:00" ] ],
[ "probe-0", "Probe (anon clone)" ],
[ "probe-1", "Pending Probe" ],
[ "probe-2", "Correctly re-probe cloned groups" ],
[ "probe-3", "Probe (pending node)" ],
[ "probe-4", "Probe (pending node + stopped resource)" ],
[ "standby", "Standby" ],
[ "comments", "Comments" ],
],
[
[ "one-or-more-0", "Everything starts" ],
[ "one-or-more-1", "Nothing starts because of A" ],
[ "one-or-more-2", "D can start because of C" ],
[ "one-or-more-3", "D cannot start because of B and C" ],
[ "one-or-more-4", "D cannot start because of target-role" ],
[ "one-or-more-5", "Start A and F even though C and D are stopped" ],
[ "one-or-more-6", "Leave A running even though B is stopped" ],
[ "one-or-more-7", "Leave A running even though C is stopped" ],
[ "bug-5140-require-all-false", "Allow basegrp:0 to stop" ],
[ "clone-require-all-1", "clone B starts node 3 and 4" ],
[ "clone-require-all-2", "clone B remains stopped everywhere" ],
[ "clone-require-all-3", "clone B stops everywhere because A stops everywhere" ],
[ "clone-require-all-4", "clone B remains on node 3 and 4 with only one instance of A remaining" ],
[ "clone-require-all-5", "clone B starts on node 1 3 and 4" ],
[ "clone-require-all-6", "clone B remains active after shutting down instances of A" ],
[ "clone-require-all-7",
"clone A and B both start at the same time. all instances of A start before B" ],
[ "clone-require-all-no-interleave-1", "C starts everywhere after A and B" ],
[ "clone-require-all-no-interleave-2",
"C starts on nodes 1, 2, and 4 with only one active instance of B" ],
[ "clone-require-all-no-interleave-3",
"C remains active when instance of B is stopped on one node and started on another" ],
[ "one-or-more-unrunnable-instances", "Avoid dependencies on instances that won't ever be started" ],
],
[
[ "location-date-rules-1", "Use location constraints with ineffective date-based rules" ],
[ "location-date-rules-2", "Use location constraints with effective date-based rules" ],
[ "nvpair-date-rules-1", "Use nvpair blocks with a variety of date-based rules" ],
+ [ "rule-dbl-as-auto-number-match",
+ "Floating-point rule values default to number comparison: match" ],
+ [ "rule-dbl-as-auto-number-no-match",
+ "Floating-point rule values default to number comparison: no "
+ "match" ],
+ [ "rule-dbl-as-integer-match",
+ "Floating-point rule values set to integer comparison: match" ],
+ [ "rule-dbl-as-integer-no-match",
+ "Floating-point rule values set to integer comparison: no match" ],
+ [ "rule-dbl-as-number-match",
+ "Floating-point rule values set to number comparison: match" ],
+ [ "rule-dbl-as-number-no-match",
+ "Floating-point rule values set to number comparison: no match" ],
+ [ "rule-dbl-parse-fail-default-str-match",
+ "Floating-point rule values fail to parse, default to string "
+ "comparison: match" ],
+ [ "rule-dbl-parse-fail-default-str-no-match",
+ "Floating-point rule values fail to parse, default to string "
+ "comparison: no match" ],
+ [ "rule-int-as-auto-integer-match",
+ "Integer rule values default to integer comparison: match" ],
+ [ "rule-int-as-auto-integer-no-match",
+ "Integer rule values default to integer comparison: no match" ],
+ [ "rule-int-as-integer-match",
+ "Integer rule values set to integer comparison: match" ],
+ [ "rule-int-as-integer-no-match",
+ "Integer rule values set to integer comparison: no match" ],
+ [ "rule-int-as-number-match",
+ "Integer rule values set to number comparison: match" ],
+ [ "rule-int-as-number-no-match",
+ "Integer rule values set to number comparison: no match" ],
+ [ "rule-int-parse-fail-default-str-match",
+ "Integer rule values fail to parse, default to string "
+ "comparison: match" ],
+ [ "rule-int-parse-fail-default-str-no-match",
+ "Integer rule values fail to parse, default to string "
+ "comparison: no match" ],
],
[
[ "order1", "Order start 1" ],
[ "order2", "Order start 2" ],
[ "order3", "Order stop" ],
[ "order4", "Order (multiple)" ],
[ "order5", "Order (move)" ],
[ "order6", "Order (move w/ restart)" ],
[ "order7", "Order (mandatory)" ],
[ "order-optional", "Order (score=0)" ],
[ "order-required", "Order (score=INFINITY)" ],
[ "bug-lf-2171", "Prevent group start when clone is stopped" ],
[ "order-clone", "Clone ordering should be able to prevent startup of dependent clones" ],
[ "order-sets", "Ordering for resource sets" ],
[ "order-serialize", "Serialize resources without inhibiting migration" ],
[ "order-serialize-set", "Serialize a set of resources without inhibiting migration" ],
[ "clone-order-primitive", "Order clone start after a primitive" ],
[ "clone-order-16instances", "Verify ordering of 16 cloned resources" ],
[ "order-optional-keyword", "Order (optional keyword)" ],
[ "order-mandatory", "Order (mandatory keyword)" ],
[ "bug-lf-2493",
"Don't imply colocation requirements when applying ordering constraints with clones" ],
[ "ordered-set-basic-startup", "Constraint set with default order settings" ],
[ "ordered-set-natural", "Allow natural set ordering" ],
[ "order-wrong-kind", "Order (error)" ],
],
[
[ "coloc-loop", "Colocation - loop" ],
[ "coloc-many-one", "Colocation - many-to-one" ],
[ "coloc-list", "Colocation - many-to-one with list" ],
[ "coloc-group", "Colocation - groups" ],
[ "coloc-slave-anti", "Anti-colocation with slave shouldn't prevent master colocation" ],
[ "coloc-attr", "Colocation based on node attributes" ],
[ "coloc-negative-group", "Negative colocation with a group" ],
[ "coloc-intra-set", "Intra-set colocation" ],
[ "bug-lf-2435", "Colocation sets with a negative score" ],
[ "coloc-clone-stays-active",
"Ensure clones don't get stopped/demoted because a dependent must stop" ],
[ "coloc_fp_logic", "Verify floating point calculations in colocation are working" ],
[ "colo_master_w_native",
"cl#5070 - Verify promotion order is affected when colocating master to native rsc" ],
[ "colo_slave_w_native",
"cl#5070 - Verify promotion order is affected when colocating slave to native rsc" ],
[ "anti-colocation-order",
"cl#5187 - Prevent resources in an anti-colocation from even temporarily running on a same node" ],
[ "anti-colocation-master", "Organize order of actions for master resources in anti-colocations" ],
[ "anti-colocation-slave", "Organize order of actions for slave resources in anti-colocations" ],
[ "enforce-colo1", "Always enforce B with A INFINITY" ],
[ "complex_enforce_colo", "Always enforce B with A INFINITY. (make sure heat-engine stops)" ],
[ "coloc-dependee-should-stay", "Stickiness outweighs group colocation" ],
[ "coloc-dependee-should-move", "Group colocation outweighs stickiness" ],
],
[
[ "rsc-sets-seq-true", "Resource Sets - sequential=false" ],
[ "rsc-sets-seq-false", "Resource Sets - sequential=true" ],
[ "rsc-sets-clone", "Resource Sets - Clone" ],
[ "rsc-sets-master", "Resource Sets - Master" ],
[ "rsc-sets-clone-1", "Resource Sets - Clone (lf#2404)" ],
],
[
[ "attrs1", "string: eq (and)" ],
[ "attrs2", "string: lt / gt (and)" ],
[ "attrs3", "string: ne (or)" ],
[ "attrs4", "string: exists" ],
[ "attrs5", "string: not_exists" ],
[ "attrs6", "is_dc: true" ],
[ "attrs7", "is_dc: false" ],
[ "attrs8", "score_attribute" ],
[ "per-node-attrs", "Per node resource parameters" ],
],
[
[ "mon-rsc-1", "Schedule Monitor - start" ],
[ "mon-rsc-2", "Schedule Monitor - move" ],
[ "mon-rsc-3", "Schedule Monitor - pending start" ],
[ "mon-rsc-4", "Schedule Monitor - move/pending start" ],
],
[
[ "rec-rsc-0", "Resource Recover - no start" ],
[ "rec-rsc-1", "Resource Recover - start" ],
[ "rec-rsc-2", "Resource Recover - monitor" ],
[ "rec-rsc-3", "Resource Recover - stop - ignore" ],
[ "rec-rsc-4", "Resource Recover - stop - block" ],
[ "rec-rsc-5", "Resource Recover - stop - fence" ],
[ "rec-rsc-6", "Resource Recover - multiple - restart" ],
[ "rec-rsc-7", "Resource Recover - multiple - stop" ],
[ "rec-rsc-8", "Resource Recover - multiple - block" ],
[ "rec-rsc-9", "Resource Recover - group/group" ],
[ "monitor-recovery", "on-fail=block + resource recovery detected by recurring monitor" ],
[ "stop-failure-no-quorum", "Stop failure without quorum" ],
[ "stop-failure-no-fencing", "Stop failure without fencing available" ],
[ "stop-failure-with-fencing", "Stop failure with fencing available" ],
[ "multiple-active-block-group", "Support of multiple-active=block for resource groups" ],
[ "multiple-monitor-one-failed",
"Consider resource failed if any of the configured monitor operations failed" ],
],
[
[ "quorum-1", "No quorum - ignore" ],
[ "quorum-2", "No quorum - freeze" ],
[ "quorum-3", "No quorum - stop" ],
[ "quorum-4", "No quorum - start anyway" ],
[ "quorum-5", "No quorum - start anyway (group)" ],
[ "quorum-6", "No quorum - start anyway (clone)" ],
[ "bug-cl-5212", "No promotion with no-quorum-policy=freeze" ],
[ "suicide-needed-inquorate", "no-quorum-policy=suicide: suicide necessary" ],
[ "suicide-not-needed-initial-quorum",
"no-quorum-policy=suicide: suicide not necessary at initial quorum" ],
[ "suicide-not-needed-never-quorate",
"no-quorum-policy=suicide: suicide not necessary if never quorate" ],
[ "suicide-not-needed-quorate", "no-quorum-policy=suicide: suicide necessary if quorate" ],
],
[
[ "rec-node-1", "Node Recover - Startup - no fence" ],
[ "rec-node-2", "Node Recover - Startup - fence" ],
[ "rec-node-3", "Node Recover - HA down - no fence" ],
[ "rec-node-4", "Node Recover - HA down - fence" ],
[ "rec-node-5", "Node Recover - CRM down - no fence" ],
[ "rec-node-6", "Node Recover - CRM down - fence" ],
[ "rec-node-7", "Node Recover - no quorum - ignore" ],
[ "rec-node-8", "Node Recover - no quorum - freeze" ],
[ "rec-node-9", "Node Recover - no quorum - stop" ],
[ "rec-node-10", "Node Recover - no quorum - stop w/fence" ],
[ "rec-node-11", "Node Recover - CRM down w/ group - fence" ],
[ "rec-node-12", "Node Recover - nothing active - fence" ],
[ "rec-node-13", "Node Recover - failed resource + shutdown - fence" ],
[ "rec-node-15", "Node Recover - unknown lrm section" ],
[ "rec-node-14", "Serialize all stonith's" ],
],
[
[ "multi1", "Multiple Active (stop/start)" ],
],
[
[ "migrate-begin", "Normal migration" ],
[ "migrate-success", "Completed migration" ],
[ "migrate-partial-1", "Completed migration, missing stop on source" ],
[ "migrate-partial-2", "Successful migrate_to only" ],
[ "migrate-partial-3", "Successful migrate_to only, target down" ],
[ "migrate-partial-4", "Migrate from the correct host after migrate_to+migrate_from" ],
[ "bug-5186-partial-migrate", "Handle partial migration when src node loses membership" ],
[ "migrate-fail-2", "Failed migrate_from" ],
[ "migrate-fail-3", "Failed migrate_from + stop on source" ],
[ "migrate-fail-4",
"Failed migrate_from + stop on target - ideally we wouldn't need to re-stop on target" ],
[ "migrate-fail-5", "Failed migrate_from + stop on source and target" ],
[ "migrate-fail-6", "Failed migrate_to" ],
[ "migrate-fail-7", "Failed migrate_to + stop on source" ],
[ "migrate-fail-8",
"Failed migrate_to + stop on target - ideally we wouldn't need to re-stop on target" ],
[ "migrate-fail-9", "Failed migrate_to + stop on source and target" ],
[ "migration-ping-pong", "Old migrate_to failure + successful migrate_from on same node" ],
[ "migrate-stop", "Migration in a stopping stack" ],
[ "migrate-start", "Migration in a starting stack" ],
[ "migrate-stop_start", "Migration in a restarting stack" ],
[ "migrate-stop-complex", "Migration in a complex stopping stack" ],
[ "migrate-start-complex", "Migration in a complex starting stack" ],
[ "migrate-stop-start-complex", "Migration in a complex moving stack" ],
[ "migrate-shutdown", "Order the post-migration 'stop' before node shutdown" ],
[ "migrate-1", "Migrate (migrate)" ],
[ "migrate-2", "Migrate (stable)" ],
[ "migrate-3", "Migrate (failed migrate_to)" ],
[ "migrate-4", "Migrate (failed migrate_from)" ],
[ "novell-252693", "Migration in a stopping stack" ],
[ "novell-252693-2", "Migration in a starting stack" ],
[ "novell-252693-3", "Non-Migration in a starting and stopping stack" ],
[ "bug-1820", "Migration in a group" ],
[ "bug-1820-1", "Non-migration in a group" ],
[ "migrate-5", "Primitive migration with a clone" ],
[ "migrate-fencing", "Migration after Fencing" ],
[ "migrate-both-vms", "Migrate two VMs that have no colocation" ],
[ "migration-behind-migrating-remote", "Migrate resource behind migrating remote connection" ],
[ "1-a-then-bm-move-b", "Advanced migrate logic. A then B. migrate B" ],
[ "2-am-then-b-move-a", "Advanced migrate logic, A then B, migrate A without stopping B" ],
[ "3-am-then-bm-both-migrate", "Advanced migrate logic. A then B. migrate both" ],
[ "4-am-then-bm-b-not-migratable", "Advanced migrate logic, A then B, B not migratable" ],
[ "5-am-then-bm-a-not-migratable", "Advanced migrate logic. A then B. move both, a not migratable" ],
[ "6-migrate-group", "Advanced migrate logic, migrate a group" ],
[ "7-migrate-group-one-unmigratable",
"Advanced migrate logic, migrate group mixed with allow-migrate true/false" ],
[ "8-am-then-bm-a-migrating-b-stopping",
"Advanced migrate logic, A then B, A migrating, B stopping" ],
[ "9-am-then-bm-b-migrating-a-stopping",
"Advanced migrate logic, A then B, B migrate, A stopping" ],
[ "10-a-then-bm-b-move-a-clone",
"Advanced migrate logic, A clone then B, migrate B while stopping A" ],
[ "11-a-then-bm-b-move-a-clone-starting",
"Advanced migrate logic, A clone then B, B moving while A is start/stopping" ],
[ "a-promote-then-b-migrate", "A promote then B start. migrate B" ],
[ "a-demote-then-b-migrate", "A demote then B stop. migrate B" ],
# @TODO: If pacemaker implements versioned attributes, uncomment this test
#[ "migrate-versioned", "Disable migration for versioned resources" ],
[ "bug-lf-2422", "Dependency on partially active group - stop ocfs:*" ],
],
[
[ "clone-anon-probe-1", "Probe the correct (anonymous) clone instance for each node" ],
[ "clone-anon-probe-2", "Avoid needless re-probing of anonymous clones" ],
[ "clone-anon-failcount", "Merge failcounts for anonymous clones" ],
[ "force-anon-clone-max", "Update clone-max properly when forcing a clone to be anonymous" ],
[ "anon-instance-pending", "Assign anonymous clone instance numbers properly when action pending" ],
[ "inc0", "Incarnation start" ],
[ "inc1", "Incarnation start order" ],
[ "inc2", "Incarnation silent restart, stop, move" ],
[ "inc3", "Inter-incarnation ordering, silent restart, stop, move" ],
[ "inc4", "Inter-incarnation ordering, silent restart, stop, move (ordered)" ],
[ "inc5", "Inter-incarnation ordering, silent restart, stop, move (restart 1)" ],
[ "inc6", "Inter-incarnation ordering, silent restart, stop, move (restart 2)" ],
[ "inc7", "Clone colocation" ],
[ "inc8", "Clone anti-colocation" ],
[ "inc9", "Non-unique clone" ],
[ "inc10", "Non-unique clone (stop)" ],
[ "inc11", "Primitive colocation with clones" ],
[ "inc12", "Clone shutdown" ],
[ "cloned-group", "Make sure only the correct number of cloned groups are started" ],
[ "cloned-group-stop", "Ensure stopping qpidd also stops glance and cinder" ],
[ "clone-no-shuffle", "Don't prioritize allocation of instances that must be moved" ],
[ "clone-max-zero", "Orphan processing with clone-max=0" ],
[ "clone-anon-dup",
"Bug LF#2087 - Correctly parse the state of anonymous clones that are active more than once per node" ],
[ "bug-lf-2160", "Don't shuffle clones due to colocation" ],
[ "bug-lf-2213", "clone-node-max enforcement for cloned groups" ],
[ "bug-lf-2153", "Clone ordering constraints" ],
[ "bug-lf-2361", "Ensure clones observe mandatory ordering constraints if the LHS is unrunnable" ],
[ "bug-lf-2317", "Avoid needless restart of primitive depending on a clone" ],
[ "clone-colocate-instance-1", "Colocation with a specific clone instance (negative example)" ],
[ "clone-colocate-instance-2", "Colocation with a specific clone instance" ],
[ "clone-order-instance", "Ordering with specific clone instances" ],
[ "bug-lf-2453", "Enforce mandatory clone ordering without colocation" ],
[ "bug-lf-2508", "Correctly reconstruct the status of anonymous cloned groups" ],
[ "bug-lf-2544", "Balanced clone placement" ],
[ "bug-lf-2445", "Redistribute clones with node-max > 1 and stickiness = 0" ],
[ "bug-lf-2574", "Avoid clone shuffle" ],
[ "bug-lf-2581", "Avoid group restart due to unrelated clone (re)start" ],
[ "bug-cl-5168", "Don't shuffle clones" ],
[ "bug-cl-5170", "Prevent clone from starting with on-fail=block" ],
[ "clone-fail-block-colocation", "Move colocated group when failed clone has on-fail=block" ],
[ "clone-interleave-1",
"Clone-3 cannot start on pcmk-1 due to interleaved ordering (no colocation)" ],
[ "clone-interleave-2", "Clone-3 must stop on pcmk-1 due to interleaved ordering (no colocation)" ],
[ "clone-interleave-3",
"Clone-3 must be recovered on pcmk-1 due to interleaved ordering (no colocation)" ],
[ "rebalance-unique-clones", "Rebalance unique clone instances with no stickiness" ],
[ "clone-requires-quorum-recovery", "Clone with requires=quorum on failed node needing recovery" ],
[ "clone-requires-quorum",
"Clone with requires=quorum with presumed-inactive instance on failed node" ],
],
[
[ "cloned_start_one", "order first clone then clone... first clone_min=2" ],
[ "cloned_start_two", "order first clone then clone... first clone_min=2" ],
[ "cloned_stop_one", "order first clone then clone... first clone_min=2" ],
[ "cloned_stop_two", "order first clone then clone... first clone_min=2" ],
[ "clone_min_interleave_start_one",
"order first clone then clone... first clone_min=2 and then has interleave=true" ],
[ "clone_min_interleave_start_two",
"order first clone then clone... first clone_min=2 and then has interleave=true" ],
[ "clone_min_interleave_stop_one",
"order first clone then clone... first clone_min=2 and then has interleave=true" ],
[ "clone_min_interleave_stop_two",
"order first clone then clone... first clone_min=2 and then has interleave=true" ],
[ "clone_min_start_one", "order first clone then primitive... first clone_min=2" ],
[ "clone_min_start_two", "order first clone then primitive... first clone_min=2" ],
[ "clone_min_stop_all", "order first clone then primitive... first clone_min=2" ],
[ "clone_min_stop_one", "order first clone then primitive... first clone_min=2" ],
[ "clone_min_stop_two", "order first clone then primitive... first clone_min=2" ],
],
[
[ "unfence-startup", "Clean unfencing" ],
[ "unfence-definition", "Unfencing when the agent changes" ],
[ "unfence-parameters", "Unfencing when the agent parameters changes" ],
[ "unfence-device", "Unfencing when a cluster has only fence devices" ],
],
[
[ "master-0", "Stopped -> Slave" ],
[ "master-1", "Stopped -> Promote" ],
[ "master-2", "Stopped -> Promote : notify" ],
[ "master-3", "Stopped -> Promote : master location" ],
[ "master-4", "Started -> Promote : master location" ],
[ "master-5", "Promoted -> Promoted" ],
[ "master-6", "Promoted -> Promoted (2)" ],
[ "master-7", "Promoted -> Fenced" ],
[ "master-8", "Promoted -> Fenced -> Moved" ],
[ "master-9", "Stopped + Promotable + No quorum" ],
[ "master-10", "Stopped -> Promotable : notify with monitor" ],
[ "master-11", "Stopped -> Promote : colocation" ],
[ "novell-239082", "Demote/Promote ordering" ],
[ "novell-239087", "Stable master placement" ],
[ "master-12", "Promotion based solely on rsc_location constraints" ],
[ "master-13", "Include preferences of colocated resources when placing master" ],
[ "master-demote", "Ordering when actions depends on demoting a slave resource" ],
[ "master-ordering", "Prevent resources from starting that need a master" ],
[ "bug-1765", "Master-Master Colocation (do not stop the slaves)" ],
[ "master-group", "Promotion of cloned groups" ],
[ "bug-lf-1852", "Don't shuffle master/slave instances unnecessarily" ],
[ "master-failed-demote", "Don't retry failed demote actions" ],
[ "master-failed-demote-2", "Don't retry failed demote actions (notify=false)" ],
[ "master-depend",
"Ensure resources that depend on the master don't get allocated until the master does" ],
[ "master-reattach", "Re-attach to a running master" ],
[ "master-allow-start", "Don't include master score if it would prevent allocation" ],
[ "master-colocation",
"Allow master instances placemaker to be influenced by colocation constraints" ],
[ "master-pseudo", "Make sure promote/demote pseudo actions are created correctly" ],
[ "master-role", "Prevent target-role from promoting more than master-max instances" ],
[ "bug-lf-2358", "Master-Master anti-colocation" ],
[ "master-promotion-constraint", "Mandatory master colocation constraints" ],
[ "unmanaged-master", "Ensure role is preserved for unmanaged resources" ],
[ "master-unmanaged-monitor", "Start the correct monitor operation for unmanaged masters" ],
[ "master-demote-2", "Demote does not clear past failure" ],
[ "master-move", "Move master based on failure of colocated group" ],
[ "master-probed-score", "Observe the promotion score of probed resources" ],
[ "colocation_constraint_stops_master",
"cl#5054 - Ensure master is demoted when stopped by colocation constraint" ],
[ "colocation_constraint_stops_slave",
"cl#5054 - Ensure slave is not demoted when stopped by colocation constraint" ],
[ "order_constraint_stops_master",
"cl#5054 - Ensure master is demoted when stopped by order constraint" ],
[ "order_constraint_stops_slave",
"cl#5054 - Ensure slave is not demoted when stopped by order constraint" ],
[ "master_monitor_restart", "cl#5072 - Ensure master monitor operation will start after promotion" ],
[ "bug-rh-880249", "Handle replacement of an m/s resource with a primitive" ],
[ "bug-5143-ms-shuffle", "Prevent master shuffling due to promotion score" ],
[ "master-demote-block", "Block promotion if demote fails with on-fail=block" ],
[ "master-dependent-ban",
"Don't stop instances from being active because a dependent is banned from that host" ],
[ "master-stop", "Stop instances due to location constraint with role=Started" ],
[ "master-partially-demoted-group", "Allow partially demoted group to finish demoting" ],
[ "bug-cl-5213", "Ensure role colocation with -INFINITY is enforced" ],
[ "bug-cl-5219", "Allow unrelated resources with a common colocation target to remain promoted" ],
[ "master-asymmetrical-order",
"Fix the behaviors of multi-state resources with asymmetrical ordering" ],
[ "master-notify", "Master promotion with notifies" ],
[ "master-score-startup", "Use permanent master scores without LRM history" ],
[ "failed-demote-recovery", "Recover resource in slave role after demote fails" ],
[ "failed-demote-recovery-master", "Recover resource in master role after demote fails" ],
[ "on_fail_demote1", "Recovery with on-fail=\"demote\" on healthy cluster, remote, guest, and bundle nodes" ],
[ "on_fail_demote2", "Recovery with on-fail=\"demote\" with promotion on different node" ],
[ "on_fail_demote3", "Recovery with on-fail=\"demote\" with no promotion" ],
[ "on_fail_demote4", "Recovery with on-fail=\"demote\" on failed cluster, remote, guest, and bundle nodes" ],
[ "no_quorum_demote", "Promotable demotion and primitive stop with no-quorum-policy=\"demote\"" ],
],
[
[ "history-1", "Correctly parse stateful-1 resource state" ],
],
[
[ "managed-0", "Managed (reference)" ],
[ "managed-1", "Not managed - down" ],
[ "managed-2", "Not managed - up" ],
[ "bug-5028", "Shutdown should block if anything depends on an unmanaged resource" ],
[ "bug-5028-detach", "Ensure detach still works" ],
[ "bug-5028-bottom",
"Ensure shutdown still blocks if the blocked resource is at the bottom of the stack" ],
[ "unmanaged-stop-1",
"cl#5155 - Block the stop of resources if any depending resource is unmanaged" ],
[ "unmanaged-stop-2",
"cl#5155 - Block the stop of resources if the first resource in a mandatory stop order is unmanaged" ],
[ "unmanaged-stop-3",
"cl#5155 - Block the stop of resources if any depending resource in a group is unmanaged" ],
[ "unmanaged-stop-4",
"cl#5155 - Block the stop of resources if any depending resource in the middle of a group is unmanaged" ],
[ "unmanaged-block-restart",
"Block restart of resources if any dependent resource in a group is unmanaged" ],
],
[
[ "interleave-0", "Interleave (reference)" ],
[ "interleave-1", "coloc - not interleaved" ],
[ "interleave-2", "coloc - interleaved" ],
[ "interleave-3", "coloc - interleaved (2)" ],
[ "interleave-pseudo-stop", "Interleaved clone during stonith" ],
[ "interleave-stop", "Interleaved clone during stop" ],
[ "interleave-restart", "Interleaved clone during dependency restart" ],
],
[
[ "notify-0", "Notify reference" ],
[ "notify-1", "Notify simple" ],
[ "notify-2", "Notify simple, confirm" ],
[ "notify-3", "Notify move, confirm" ],
[ "novell-239079", "Notification priority" ],
#[ "notify-2", "Notify - 764" ],
[ "notifs-for-unrunnable", "Don't schedule notifications for an unrunnable action" ],
[ "route-remote-notify", "Route remote notify actions through correct cluster node" ],
[ "notify-behind-stopping-remote", "Don't schedule notifications behind stopped remote" ],
],
[
[ "594", "OSDL #594 - Unrunnable actions scheduled in transition" ],
[ "662", "OSDL #662 - Two resources start on one node when incarnation_node_max = 1" ],
[ "696", "OSDL #696 - CRM starts stonith RA without monitor" ],
[ "726", "OSDL #726 - Attempting to schedule rsc_posic041_monitor_5000 _after_ a stop" ],
[ "735", "OSDL #735 - Correctly detect that rsc_hadev1 is stopped on hadev3" ],
[ "764", "OSDL #764 - Missing monitor op for DoFencing:child_DoFencing:1" ],
[ "797", "OSDL #797 - Assert triggered: task_id_i > max_call_id" ],
[ "829", "OSDL #829" ],
[ "994",
"OSDL #994 - Stopping the last resource in a resource group causes the entire group to be restarted" ],
[ "994-2", "OSDL #994 - with a dependent resource" ],
[ "1360", "OSDL #1360 - Clone stickiness" ],
[ "1484", "OSDL #1484 - on_fail=stop" ],
[ "1494", "OSDL #1494 - Clone stability" ],
[ "unrunnable-1", "Unrunnable" ],
[ "unrunnable-2", "Unrunnable 2" ],
[ "stonith-0", "Stonith loop - 1" ],
[ "stonith-1", "Stonith loop - 2" ],
[ "stonith-2", "Stonith loop - 3" ],
[ "stonith-3", "Stonith startup" ],
[ "stonith-4", "Stonith node state" ],
[ "dc-fence-ordering", "DC needs fencing while other nodes are shutting down" ],
[ "bug-1572-1", "Recovery of groups depending on master/slave" ],
[ "bug-1572-2", "Recovery of groups depending on master/slave when the master is never re-promoted" ],
[ "bug-1685", "Depends-on-master ordering" ],
[ "bug-1822", "Don't promote partially active groups" ],
[ "bug-pm-11", "New resource added to a m/s group" ],
[ "bug-pm-12", "Recover only the failed portion of a cloned group" ],
[ "bug-n-387749", "Don't shuffle clone instances" ],
[ "bug-n-385265",
"Don't ignore the failure stickiness of group children - resource_idvscommon should stay stopped" ],
[ "bug-n-385265-2",
"Ensure groups are migrated instead of remaining partially active on the current node" ],
[ "bug-lf-1920", "Correctly handle probes that find active resources" ],
[ "bnc-515172", "Location constraint with multiple expressions" ],
[ "colocate-primitive-with-clone", "Optional colocation with a clone" ],
[ "use-after-free-merge", "Use-after-free in native_merge_weights" ],
[ "bug-lf-2551", "STONITH ordering for stop" ],
[ "bug-lf-2606", "Stonith implies demote" ],
[ "bug-lf-2474", "Ensure resource op timeout takes precedence over op_defaults" ],
[ "bug-suse-707150", "Prevent vm-01 from starting due to colocation/ordering" ],
[ "bug-5014-A-start-B-start", "Verify when A starts B starts using symmetrical=false" ],
[ "bug-5014-A-stop-B-started",
"Verify when A stops B does not stop if it has already started using symmetric=false" ],
[ "bug-5014-A-stopped-B-stopped",
"Verify when A is stopped and B has not started, B does not start before A using symmetric=false" ],
[ "bug-5014-CthenAthenB-C-stopped",
"Verify when C then A is symmetrical=true, A then B is symmetric=false, and C is stopped that nothing starts" ],
[ "bug-5014-CLONE-A-start-B-start",
"Verify when A starts B starts using clone resources with symmetric=false" ],
[ "bug-5014-CLONE-A-stop-B-started",
"Verify when A stops B does not stop if it has already started using clone resources with symmetric=false" ],
[ "bug-5014-GROUP-A-start-B-start",
"Verify when A starts B starts when using group resources with symmetric=false" ],
[ "bug-5014-GROUP-A-stopped-B-started",
"Verify when A stops B does not stop if it has already started using group resources with symmetric=false" ],
[ "bug-5014-GROUP-A-stopped-B-stopped",
"Verify when A is stopped and B has not started, B does not start before A using group resources with symmetric=false" ],
[ "bug-5014-ordered-set-symmetrical-false",
"Verify ordered sets work with symmetrical=false" ],
[ "bug-5014-ordered-set-symmetrical-true",
"Verify ordered sets work with symmetrical=true" ],
[ "bug-5007-masterslave_colocation",
"Verify use of colocation scores other than INFINITY and -INFINITY work on multi-state resources" ],
[ "bug-5038", "Prevent restart of anonymous clones when clone-max decreases" ],
[ "bug-5025-1", "Automatically clean up failcount after resource config change with reload" ],
[ "bug-5025-2", "Make sure clear failcount action isn't set when config does not change" ],
[ "bug-5025-3", "Automatically clean up failcount after resource config change with restart" ],
[ "bug-5025-4", "Clear failcount when last failure is a start op and rsc attributes changed" ],
[ "failcount", "Ensure failcounts are correctly expired" ],
[ "failcount-block", "Ensure failcounts are not expired when on-fail=block is present" ],
[ "per-op-failcount", "Ensure per-operation failcount is handled and not passed to fence agent" ],
[ "on-fail-ignore", "Ensure on-fail=ignore works even beyond migration-threshold" ],
[ "monitor-onfail-restart", "bug-5058 - Monitor failure with on-fail set to restart" ],
[ "monitor-onfail-stop", "bug-5058 - Monitor failure wiht on-fail set to stop" ],
[ "bug-5059", "No need to restart p_stateful1:*" ],
[ "bug-5069-op-enabled", "Test on-fail=ignore with failure when monitor is enabled" ],
[ "bug-5069-op-disabled", "Test on-fail-ignore with failure when monitor is disabled" ],
[ "obsolete-lrm-resource", "cl#5115 - Do not use obsolete lrm_resource sections" ],
[ "expire-non-blocked-failure",
"Ignore failure-timeout only if the failed operation has on-fail=block" ],
[ "asymmetrical-order-move", "Respect asymmetrical ordering when trying to move resources" ],
[ "asymmetrical-order-restart", "Respect asymmetrical ordering when restarting dependent resource" ],
[ "start-then-stop-with-unfence", "Avoid graph loop with start-then-stop constraint plus unfencing" ],
[ "order-expired-failure", "Order failcount cleanup after remote fencing" ],
[ "ignore_stonith_rsc_order1",
"cl#5056- Ignore order constraint between stonith and non-stonith rsc" ],
[ "ignore_stonith_rsc_order2",
"cl#5056- Ignore order constraint with group rsc containing mixed stonith and non-stonith" ],
[ "ignore_stonith_rsc_order3", "cl#5056- Ignore order constraint, stonith clone and mixed group" ],
[ "ignore_stonith_rsc_order4",
"cl#5056- Ignore order constraint, stonith clone and clone with nested mixed group" ],
[ "honor_stonith_rsc_order1",
"cl#5056- Honor order constraint, stonith clone and pure stonith group(single rsc)" ],
[ "honor_stonith_rsc_order2",
"cl#5056- Honor order constraint, stonith clone and pure stonith group(multiple rsc)" ],
[ "honor_stonith_rsc_order3",
"cl#5056- Honor order constraint, stonith clones with nested pure stonith group" ],
[ "honor_stonith_rsc_order4",
"cl#5056- Honor order constraint, between two native stonith rscs" ],
[ "multiply-active-stonith", "Multiply active stonith" ],
[ "probe-timeout", "cl#5099 - Default probe timeout" ],
[ "order-first-probes",
"cl#5301 - respect order constraints when relevant resources are being probed" ],
[ "concurrent-fencing", "Allow performing fencing operations in parallel" ],
[ "priority-fencing-delay", "Delay fencing targeting the more significant node" ],
],
[
[ "systemhealth1", "System Health () #1" ],
[ "systemhealth2", "System Health () #2" ],
[ "systemhealth3", "System Health () #3" ],
[ "systemhealthn1", "System Health (None) #1" ],
[ "systemhealthn2", "System Health (None) #2" ],
[ "systemhealthn3", "System Health (None) #3" ],
[ "systemhealthm1", "System Health (Migrate On Red) #1" ],
[ "systemhealthm2", "System Health (Migrate On Red) #2" ],
[ "systemhealthm3", "System Health (Migrate On Red) #3" ],
[ "systemhealtho1", "System Health (Only Green) #1" ],
[ "systemhealtho2", "System Health (Only Green) #2" ],
[ "systemhealtho3", "System Health (Only Green) #3" ],
[ "systemhealthp1", "System Health (Progessive) #1" ],
[ "systemhealthp2", "System Health (Progessive) #2" ],
[ "systemhealthp3", "System Health (Progessive) #3" ],
],
[
[ "utilization", "Placement Strategy - utilization" ],
[ "minimal", "Placement Strategy - minimal" ],
[ "balanced", "Placement Strategy - balanced" ],
],
[
[ "placement-stickiness", "Optimized Placement Strategy - stickiness" ],
[ "placement-priority", "Optimized Placement Strategy - priority" ],
[ "placement-location", "Optimized Placement Strategy - location" ],
[ "placement-capacity", "Optimized Placement Strategy - capacity" ],
],
[
[ "utilization-order1", "Utilization Order - Simple" ],
[ "utilization-order2", "Utilization Order - Complex" ],
[ "utilization-order3", "Utilization Order - Migrate" ],
[ "utilization-order4", "Utilization Order - Live Migration (bnc#695440)" ],
[ "utilization-shuffle",
"Don't displace prmExPostgreSQLDB2 on act2, Start prmExPostgreSQLDB1 on act3" ],
[ "load-stopped-loop", "Avoid transition loop due to load_stopped (cl#5044)" ],
[ "load-stopped-loop-2",
"cl#5235 - Prevent graph loops that can be introduced by load_stopped -> migrate_to ordering" ],
],
[
[ "colocated-utilization-primitive-1", "Colocated Utilization - Primitive" ],
[ "colocated-utilization-primitive-2", "Colocated Utilization - Choose the most capable node" ],
[ "colocated-utilization-group", "Colocated Utilization - Group" ],
[ "colocated-utilization-clone", "Colocated Utilization - Clone" ],
[ "utilization-check-allowed-nodes",
"Only check the capacities of the nodes that can run the resource" ],
],
[
[ "reprobe-target_rc", "Ensure correct target_rc for reprobe of inactive resources" ],
[ "node-maintenance-1", "cl#5128 - Node maintenance" ],
[ "node-maintenance-2", "cl#5128 - Node maintenance (coming out of maintenance mode)" ],
[ "shutdown-maintenance-node", "Do not fence a maintenance node if it shuts down cleanly" ],
[ "rsc-maintenance", "Per-resource maintenance" ],
],
[
[ "not-installed-agent", "The resource agent is missing" ],
[ "not-installed-tools", "Something the resource agent needs is missing" ],
],
[
[ "stopped-monitor-00", "Stopped Monitor - initial start" ],
[ "stopped-monitor-01", "Stopped Monitor - failed started" ],
[ "stopped-monitor-02", "Stopped Monitor - started multi-up" ],
[ "stopped-monitor-03", "Stopped Monitor - stop started" ],
[ "stopped-monitor-04", "Stopped Monitor - failed stop" ],
[ "stopped-monitor-05", "Stopped Monitor - start unmanaged" ],
[ "stopped-monitor-06", "Stopped Monitor - unmanaged multi-up" ],
[ "stopped-monitor-07", "Stopped Monitor - start unmanaged multi-up" ],
[ "stopped-monitor-08", "Stopped Monitor - migrate" ],
[ "stopped-monitor-09", "Stopped Monitor - unmanage started" ],
[ "stopped-monitor-10", "Stopped Monitor - unmanaged started multi-up" ],
[ "stopped-monitor-11", "Stopped Monitor - stop unmanaged started" ],
[ "stopped-monitor-12", "Stopped Monitor - unmanaged started multi-up (target-role=Stopped)" ],
[ "stopped-monitor-20", "Stopped Monitor - initial stop" ],
[ "stopped-monitor-21", "Stopped Monitor - stopped single-up" ],
[ "stopped-monitor-22", "Stopped Monitor - stopped multi-up" ],
[ "stopped-monitor-23", "Stopped Monitor - start stopped" ],
[ "stopped-monitor-24", "Stopped Monitor - unmanage stopped" ],
[ "stopped-monitor-25", "Stopped Monitor - unmanaged stopped multi-up" ],
[ "stopped-monitor-26", "Stopped Monitor - start unmanaged stopped" ],
[ "stopped-monitor-27", "Stopped Monitor - unmanaged stopped multi-up (target-role=Started)" ],
[ "stopped-monitor-30", "Stopped Monitor - new node started" ],
[ "stopped-monitor-31", "Stopped Monitor - new node stopped" ],
],
[
# This is a combo test to check:
# - probe timeout defaults to the minimum-interval monitor's
# - duplicate recurring operations are ignored
# - if timeout spec is bad, the default timeout is used
# - failure is blocked with on-fail=block even if ISO8601 interval is specified
# - started/stopped role monitors are started/stopped on right nodes
[ "intervals", "Recurring monitor interval handling" ],
],
[
[ "ticket-primitive-1", "Ticket - Primitive (loss-policy=stop, initial)" ],
[ "ticket-primitive-2", "Ticket - Primitive (loss-policy=stop, granted)" ],
[ "ticket-primitive-3", "Ticket - Primitive (loss-policy-stop, revoked)" ],
[ "ticket-primitive-4", "Ticket - Primitive (loss-policy=demote, initial)" ],
[ "ticket-primitive-5", "Ticket - Primitive (loss-policy=demote, granted)" ],
[ "ticket-primitive-6", "Ticket - Primitive (loss-policy=demote, revoked)" ],
[ "ticket-primitive-7", "Ticket - Primitive (loss-policy=fence, initial)" ],
[ "ticket-primitive-8", "Ticket - Primitive (loss-policy=fence, granted)" ],
[ "ticket-primitive-9", "Ticket - Primitive (loss-policy=fence, revoked)" ],
[ "ticket-primitive-10", "Ticket - Primitive (loss-policy=freeze, initial)" ],
[ "ticket-primitive-11", "Ticket - Primitive (loss-policy=freeze, granted)" ],
[ "ticket-primitive-12", "Ticket - Primitive (loss-policy=freeze, revoked)" ],
[ "ticket-primitive-13", "Ticket - Primitive (loss-policy=stop, standby, granted)" ],
[ "ticket-primitive-14", "Ticket - Primitive (loss-policy=stop, granted, standby)" ],
[ "ticket-primitive-15", "Ticket - Primitive (loss-policy=stop, standby, revoked)" ],
[ "ticket-primitive-16", "Ticket - Primitive (loss-policy=demote, standby, granted)" ],
[ "ticket-primitive-17", "Ticket - Primitive (loss-policy=demote, granted, standby)" ],
[ "ticket-primitive-18", "Ticket - Primitive (loss-policy=demote, standby, revoked)" ],
[ "ticket-primitive-19", "Ticket - Primitive (loss-policy=fence, standby, granted)" ],
[ "ticket-primitive-20", "Ticket - Primitive (loss-policy=fence, granted, standby)" ],
[ "ticket-primitive-21", "Ticket - Primitive (loss-policy=fence, standby, revoked)" ],
[ "ticket-primitive-22", "Ticket - Primitive (loss-policy=freeze, standby, granted)" ],
[ "ticket-primitive-23", "Ticket - Primitive (loss-policy=freeze, granted, standby)" ],
[ "ticket-primitive-24", "Ticket - Primitive (loss-policy=freeze, standby, revoked)" ],
],
[
[ "ticket-group-1", "Ticket - Group (loss-policy=stop, initial)" ],
[ "ticket-group-2", "Ticket - Group (loss-policy=stop, granted)" ],
[ "ticket-group-3", "Ticket - Group (loss-policy-stop, revoked)" ],
[ "ticket-group-4", "Ticket - Group (loss-policy=demote, initial)" ],
[ "ticket-group-5", "Ticket - Group (loss-policy=demote, granted)" ],
[ "ticket-group-6", "Ticket - Group (loss-policy=demote, revoked)" ],
[ "ticket-group-7", "Ticket - Group (loss-policy=fence, initial)" ],
[ "ticket-group-8", "Ticket - Group (loss-policy=fence, granted)" ],
[ "ticket-group-9", "Ticket - Group (loss-policy=fence, revoked)" ],
[ "ticket-group-10", "Ticket - Group (loss-policy=freeze, initial)" ],
[ "ticket-group-11", "Ticket - Group (loss-policy=freeze, granted)" ],
[ "ticket-group-12", "Ticket - Group (loss-policy=freeze, revoked)" ],
[ "ticket-group-13", "Ticket - Group (loss-policy=stop, standby, granted)" ],
[ "ticket-group-14", "Ticket - Group (loss-policy=stop, granted, standby)" ],
[ "ticket-group-15", "Ticket - Group (loss-policy=stop, standby, revoked)" ],
[ "ticket-group-16", "Ticket - Group (loss-policy=demote, standby, granted)" ],
[ "ticket-group-17", "Ticket - Group (loss-policy=demote, granted, standby)" ],
[ "ticket-group-18", "Ticket - Group (loss-policy=demote, standby, revoked)" ],
[ "ticket-group-19", "Ticket - Group (loss-policy=fence, standby, granted)" ],
[ "ticket-group-20", "Ticket - Group (loss-policy=fence, granted, standby)" ],
[ "ticket-group-21", "Ticket - Group (loss-policy=fence, standby, revoked)" ],
[ "ticket-group-22", "Ticket - Group (loss-policy=freeze, standby, granted)" ],
[ "ticket-group-23", "Ticket - Group (loss-policy=freeze, granted, standby)" ],
[ "ticket-group-24", "Ticket - Group (loss-policy=freeze, standby, revoked)" ],
],
[
[ "ticket-clone-1", "Ticket - Clone (loss-policy=stop, initial)" ],
[ "ticket-clone-2", "Ticket - Clone (loss-policy=stop, granted)" ],
[ "ticket-clone-3", "Ticket - Clone (loss-policy-stop, revoked)" ],
[ "ticket-clone-4", "Ticket - Clone (loss-policy=demote, initial)" ],
[ "ticket-clone-5", "Ticket - Clone (loss-policy=demote, granted)" ],
[ "ticket-clone-6", "Ticket - Clone (loss-policy=demote, revoked)" ],
[ "ticket-clone-7", "Ticket - Clone (loss-policy=fence, initial)" ],
[ "ticket-clone-8", "Ticket - Clone (loss-policy=fence, granted)" ],
[ "ticket-clone-9", "Ticket - Clone (loss-policy=fence, revoked)" ],
[ "ticket-clone-10", "Ticket - Clone (loss-policy=freeze, initial)" ],
[ "ticket-clone-11", "Ticket - Clone (loss-policy=freeze, granted)" ],
[ "ticket-clone-12", "Ticket - Clone (loss-policy=freeze, revoked)" ],
[ "ticket-clone-13", "Ticket - Clone (loss-policy=stop, standby, granted)" ],
[ "ticket-clone-14", "Ticket - Clone (loss-policy=stop, granted, standby)" ],
[ "ticket-clone-15", "Ticket - Clone (loss-policy=stop, standby, revoked)" ],
[ "ticket-clone-16", "Ticket - Clone (loss-policy=demote, standby, granted)" ],
[ "ticket-clone-17", "Ticket - Clone (loss-policy=demote, granted, standby)" ],
[ "ticket-clone-18", "Ticket - Clone (loss-policy=demote, standby, revoked)" ],
[ "ticket-clone-19", "Ticket - Clone (loss-policy=fence, standby, granted)" ],
[ "ticket-clone-20", "Ticket - Clone (loss-policy=fence, granted, standby)" ],
[ "ticket-clone-21", "Ticket - Clone (loss-policy=fence, standby, revoked)" ],
[ "ticket-clone-22", "Ticket - Clone (loss-policy=freeze, standby, granted)" ],
[ "ticket-clone-23", "Ticket - Clone (loss-policy=freeze, granted, standby)" ],
[ "ticket-clone-24", "Ticket - Clone (loss-policy=freeze, standby, revoked)" ],
],
[
[ "ticket-master-1", "Ticket - Master (loss-policy=stop, initial)" ],
[ "ticket-master-2", "Ticket - Master (loss-policy=stop, granted)" ],
[ "ticket-master-3", "Ticket - Master (loss-policy-stop, revoked)" ],
[ "ticket-master-4", "Ticket - Master (loss-policy=demote, initial)" ],
[ "ticket-master-5", "Ticket - Master (loss-policy=demote, granted)" ],
[ "ticket-master-6", "Ticket - Master (loss-policy=demote, revoked)" ],
[ "ticket-master-7", "Ticket - Master (loss-policy=fence, initial)" ],
[ "ticket-master-8", "Ticket - Master (loss-policy=fence, granted)" ],
[ "ticket-master-9", "Ticket - Master (loss-policy=fence, revoked)" ],
[ "ticket-master-10", "Ticket - Master (loss-policy=freeze, initial)" ],
[ "ticket-master-11", "Ticket - Master (loss-policy=freeze, granted)" ],
[ "ticket-master-12", "Ticket - Master (loss-policy=freeze, revoked)" ],
[ "ticket-master-13", "Ticket - Master (loss-policy=stop, standby, granted)" ],
[ "ticket-master-14", "Ticket - Master (loss-policy=stop, granted, standby)" ],
[ "ticket-master-15", "Ticket - Master (loss-policy=stop, standby, revoked)" ],
[ "ticket-master-16", "Ticket - Master (loss-policy=demote, standby, granted)" ],
[ "ticket-master-17", "Ticket - Master (loss-policy=demote, granted, standby)" ],
[ "ticket-master-18", "Ticket - Master (loss-policy=demote, standby, revoked)" ],
[ "ticket-master-19", "Ticket - Master (loss-policy=fence, standby, granted)" ],
[ "ticket-master-20", "Ticket - Master (loss-policy=fence, granted, standby)" ],
[ "ticket-master-21", "Ticket - Master (loss-policy=fence, standby, revoked)" ],
[ "ticket-master-22", "Ticket - Master (loss-policy=freeze, standby, granted)" ],
[ "ticket-master-23", "Ticket - Master (loss-policy=freeze, granted, standby)" ],
[ "ticket-master-24", "Ticket - Master (loss-policy=freeze, standby, revoked)" ],
],
[
[ "ticket-rsc-sets-1", "Ticket - Resource sets (1 ticket, initial)" ],
[ "ticket-rsc-sets-2", "Ticket - Resource sets (1 ticket, granted)" ],
[ "ticket-rsc-sets-3", "Ticket - Resource sets (1 ticket, revoked)" ],
[ "ticket-rsc-sets-4", "Ticket - Resource sets (2 tickets, initial)" ],
[ "ticket-rsc-sets-5", "Ticket - Resource sets (2 tickets, granted)" ],
[ "ticket-rsc-sets-6", "Ticket - Resource sets (2 tickets, granted)" ],
[ "ticket-rsc-sets-7", "Ticket - Resource sets (2 tickets, revoked)" ],
[ "ticket-rsc-sets-8", "Ticket - Resource sets (1 ticket, standby, granted)" ],
[ "ticket-rsc-sets-9", "Ticket - Resource sets (1 ticket, granted, standby)" ],
[ "ticket-rsc-sets-10", "Ticket - Resource sets (1 ticket, standby, revoked)" ],
[ "ticket-rsc-sets-11", "Ticket - Resource sets (2 tickets, standby, granted)" ],
[ "ticket-rsc-sets-12", "Ticket - Resource sets (2 tickets, standby, granted)" ],
[ "ticket-rsc-sets-13", "Ticket - Resource sets (2 tickets, granted, standby)" ],
[ "ticket-rsc-sets-14", "Ticket - Resource sets (2 tickets, standby, revoked)" ],
[ "cluster-specific-params", "Cluster-specific instance attributes based on rules" ],
[ "site-specific-params", "Site-specific instance attributes based on rules" ],
],
[
[ "template-1", "Template - 1" ],
[ "template-2", "Template - 2" ],
[ "template-3", "Template - 3 (merge operations)" ],
[ "template-coloc-1", "Template - Colocation 1" ],
[ "template-coloc-2", "Template - Colocation 2" ],
[ "template-coloc-3", "Template - Colocation 3" ],
[ "template-order-1", "Template - Order 1" ],
[ "template-order-2", "Template - Order 2" ],
[ "template-order-3", "Template - Order 3" ],
[ "template-ticket", "Template - Ticket" ],
[ "template-rsc-sets-1", "Template - Resource Sets 1" ],
[ "template-rsc-sets-2", "Template - Resource Sets 2" ],
[ "template-rsc-sets-3", "Template - Resource Sets 3" ],
[ "template-rsc-sets-4", "Template - Resource Sets 4" ],
[ "template-clone-primitive", "Cloned primitive from template" ],
[ "template-clone-group", "Cloned group from template" ],
[ "location-sets-templates", "Resource sets and templates - Location" ],
[ "tags-coloc-order-1", "Tags - Colocation and Order (Simple)" ],
[ "tags-coloc-order-2", "Tags - Colocation and Order (Resource Sets with Templates)" ],
[ "tags-location", "Tags - Location" ],
[ "tags-ticket", "Tags - Ticket" ],
],
[
[ "container-1", "Container - initial" ],
[ "container-2", "Container - monitor failed" ],
[ "container-3", "Container - stop failed" ],
[ "container-4", "Container - reached migration-threshold" ],
[ "container-group-1", "Container in group - initial" ],
[ "container-group-2", "Container in group - monitor failed" ],
[ "container-group-3", "Container in group - stop failed" ],
[ "container-group-4", "Container in group - reached migration-threshold" ],
[ "container-is-remote-node", "Place resource within container when container is remote-node" ],
[ "bug-rh-1097457", "Kill user defined container/contents ordering" ],
[ "bug-cl-5247", "Graph loop when recovering m/s resource in a container" ],
[ "bundle-order-startup", "Bundle startup ordering" ],
[ "bundle-order-partial-start",
"Bundle startup ordering when some dependencies are already running" ],
[ "bundle-order-partial-start-2",
"Bundle startup ordering when some dependencies and the container are already running" ],
[ "bundle-order-stop", "Bundle stop ordering" ],
[ "bundle-order-partial-stop", "Bundle startup ordering when some dependencies are already stopped" ],
[ "bundle-order-stop-on-remote", "Stop nested resource after bringing up the connection" ],
[ "bundle-order-startup-clone", "Prevent startup because bundle isn't promoted" ],
[ "bundle-order-startup-clone-2", "Bundle startup with clones" ],
[ "bundle-order-stop-clone", "Stop bundle because clone is stopping" ],
[ "bundle-nested-colocation", "Colocation of nested connection resources" ],
[ "bundle-order-fencing",
"Order pseudo bundle fencing after parent node fencing if both are happening" ],
[ "bundle-probe-order-1", "order 1" ],
[ "bundle-probe-order-2", "order 2" ],
[ "bundle-probe-order-3", "order 3" ],
[ "bundle-probe-remotes", "Ensure remotes get probed too" ],
[ "bundle-replicas-change", "Change bundle from 1 replica to multiple" ],
[ "nested-remote-recovery", "Recover bundle's container hosted on remote node" ],
],
[
[ "whitebox-fail1", "Fail whitebox container rsc" ],
[ "whitebox-fail2", "Fail cluster connection to guest node" ],
[ "whitebox-fail3", "Failed containers should not run nested on remote nodes" ],
[ "whitebox-start", "Start whitebox container with resources assigned to it" ],
[ "whitebox-stop", "Stop whitebox container with resources assigned to it" ],
[ "whitebox-move", "Move whitebox container with resources assigned to it" ],
[ "whitebox-asymmetric", "Verify connection rsc opts-in based on container resource" ],
[ "whitebox-ms-ordering", "Verify promote/demote can not occur before connection is established" ],
[ "whitebox-ms-ordering-move", "Stop/Start cycle within a moving container" ],
[ "whitebox-orphaned", "Properly shutdown orphaned whitebox container" ],
[ "whitebox-orphan-ms", "Properly tear down orphan ms resources on remote-nodes" ],
[ "whitebox-unexpectedly-running", "Recover container nodes the cluster did not start" ],
[ "whitebox-migrate1", "Migrate both container and connection resource" ],
[ "whitebox-imply-stop-on-fence",
"imply stop action on container node rsc when host node is fenced" ],
[ "whitebox-nested-group", "Verify guest remote-node works nested in a group" ],
[ "guest-node-host-dies", "Verify guest node is recovered if host goes away" ],
[ "guest-node-cleanup", "Order guest node connection recovery after container probe" ],
[ "guest-host-not-fenceable", "Actions on guest node are unrunnable if host is unclean and cannot be fenced" ],
],
[
[ "remote-startup-probes", "Baremetal remote-node startup probes" ],
[ "remote-startup", "Startup a newly discovered remote-nodes with no status" ],
[ "remote-fence-unclean", "Fence unclean baremetal remote-node" ],
[ "remote-fence-unclean2",
"Fence baremetal remote-node after cluster node fails and connection can not be recovered" ],
[ "remote-fence-unclean-3", "Probe failed remote nodes (triggers fencing)" ],
[ "remote-move", "Move remote-node connection resource" ],
[ "remote-disable", "Disable a baremetal remote-node" ],
[ "remote-probe-disable", "Probe then stop a baremetal remote-node" ],
[ "remote-orphaned", "Properly shutdown orphaned connection resource" ],
[ "remote-orphaned2",
"verify we can handle orphaned remote connections with active resources on the remote" ],
[ "remote-recover", "Recover connection resource after cluster-node fails" ],
[ "remote-stale-node-entry",
"Make sure we properly handle leftover remote-node entries in the node section" ],
[ "remote-partial-migrate",
"Make sure partial migrations are handled before ops on the remote node" ],
[ "remote-partial-migrate2",
"Make sure partial migration target is prefered for remote connection" ],
[ "remote-recover-fail", "Make sure start failure causes fencing if rsc are active on remote" ],
[ "remote-start-fail",
"Make sure a start failure does not result in fencing if no active resources are on remote" ],
[ "remote-unclean2",
"Make monitor failure always results in fencing, even if no rsc are active on remote" ],
[ "remote-fence-before-reconnect", "Fence before clearing recurring monitor failure" ],
[ "remote-recovery", "Recover remote connections before attempting demotion" ],
[ "remote-recover-connection", "Optimistically recovery of only the connection" ],
[ "remote-recover-all", "Fencing when the connection has no home" ],
[ "remote-recover-no-resources", "Fencing when the connection has no home and no active resources" ],
[ "remote-recover-unknown",
"Fencing when the connection has no home and the remote has no operation history" ],
[ "remote-reconnect-delay", "Waiting for remote reconnect interval to expire" ],
[ "remote-connection-unrecoverable",
"Remote connection host must be fenced, with connection unrecoverable" ],
],
[
[ "resource-discovery", "Exercises resource-discovery location constraint option" ],
[ "rsc-discovery-per-node", "Disable resource discovery per node" ],
[ "shutdown-lock", "Ensure shutdown lock works properly" ],
[ "shutdown-lock-expiration", "Ensure shutdown lock expiration works properly" ],
],
[
[ "op-defaults", "Test op_defaults conditional expressions" ],
[ "op-defaults-2", "Test op_defaults AND'ed conditional expressions" ],
[ "op-defaults-3", "Test op_defaults precedence" ],
[ "rsc-defaults", "Test rsc_defaults conditional expressions" ],
[ "rsc-defaults-2", "Test rsc_defaults conditional expressions without type" ],
],
# @TODO: If pacemaker implements versioned attributes, uncomment these tests
#[
# [ "versioned-resources", "Start resources with #ra-version rules" ],
# [ "restart-versioned", "Restart resources on #ra-version change" ],
# [ "reload-versioned", "Reload resources on #ra-version change" ],
#],
#[
# [ "versioned-operations-1", "Use #ra-version to configure operations of native resources" ],
# [ "versioned-operations-2", "Use #ra-version to configure operations of stonith resources" ],
# [ "versioned-operations-3", "Use #ra-version to configure operations of master/slave resources" ],
# [ "versioned-operations-4", "Use #ra-version to configure operations of groups of the resources" ],
#],
]
TESTS_64BIT = [
[
[ "year-2038", "Check handling of timestamps beyond 2038-01-19 03:14:08 UTC" ],
],
]
# Constants substituted in the build process
class BuildVars(object):
SBINDIR = "@sbindir@"
BUILDDIR = "@abs_top_builddir@"
CRM_SCHEMA_DIRECTORY = "@CRM_SCHEMA_DIRECTORY@"
# These values must be kept in sync with include/crm/crm.h
class CrmExit(object):
OK = 0
ERROR = 1
NOT_INSTALLED = 5
NOINPUT = 66
def is_executable(path):
""" Check whether a file at a given path is executable. """
try:
return os.stat(path)[stat.ST_MODE] & stat.S_IXUSR
except OSError:
return False
def diff(file1, file2, **kwargs):
""" Call diff on two files """
return subprocess.call([ "diff", "-u", "-N", "--ignore-all-space",
"--ignore-blank-lines", file1, file2 ], **kwargs)
def sort_file(filename):
""" Sort a file alphabetically """
with io.open(filename, "rt") as f:
lines = sorted(f)
with io.open(filename, "wt") as f:
f.writelines(lines)
def remove_files(filenames):
""" Remove a list of files """
for filename in filenames:
try:
os.remove(filename)
except OSError:
pass
def normalize(filename):
""" Remove text from a file that isn't important for comparison """
if not hasattr(normalize, "patterns"):
normalize.patterns = [
re.compile(r'crm_feature_set="[^"]*"'),
re.compile(r'batch-limit="[0-9]*"')
]
if os.path.isfile(filename):
with io.open(filename, "rt") as f:
lines = f.readlines()
with io.open(filename, "wt") as f:
for line in lines:
for pattern in normalize.patterns:
line = pattern.sub("", line)
f.write(line)
def cat(filename, dest=sys.stdout):
""" Copy a file to a destination file descriptor """
with io.open(filename, "rt") as f:
shutil.copyfileobj(f, dest)
class CtsScheduler(object):
""" Regression tests for Pacemaker's scheduler """
def _parse_args(self, argv):
""" Parse command-line arguments """
parser = argparse.ArgumentParser(description=DESC)
parser.add_argument('-V', '--verbose', action='count',
help='Display any differences from expected output')
parser.add_argument('--run', metavar='TEST',
help=('Run only single specified test (any further '
'arguments will be passed to crm_simulate)'))
parser.add_argument('--update', action='store_true',
help='Update expected results with actual results')
parser.add_argument('-b', '--binary', metavar='PATH',
help='Specify path to crm_simulate')
parser.add_argument('-i', '--io-dir', metavar='PATH',
help='Specify path to regression test data directory')
parser.add_argument('-o', '--out-dir', metavar='PATH',
help='Specify where intermediate and output files should go')
parser.add_argument('-v', '--valgrind', action='store_true',
help='Run all commands under valgrind')
parser.add_argument('--valgrind-dhat', action='store_true',
help='Run all commands under valgrind with heap analyzer')
parser.add_argument('--valgrind-skip-output', action='store_true',
help='If running under valgrind, do not display output')
parser.add_argument('--testcmd-options', metavar='OPTIONS', default='',
help='Additional options for command under test')
# argparse can't handle "everything after --run TEST", so grab that
self.single_test_args = []
narg = 0
for arg in argv:
narg = narg + 1
if arg == '--run':
(argv, self.single_test_args) = (argv[:narg+1], argv[narg+1:])
break
self.args = parser.parse_args(argv[1:])
def _error(self, s):
print(" * ERROR: %s" % s)
def _failed(self, s):
print(" * FAILED: %s" % s)
def _get_valgrind_cmd(self):
""" Return command arguments needed (or not) to run valgrind """
if self.args.valgrind:
os.environ['G_SLICE'] = "always-malloc"
return [
"valgrind",
"-q",
"--gen-suppressions=all",
"--time-stamp=yes",
"--trace-children=no",
"--show-reachable=no",
"--leak-check=full",
"--num-callers=20",
"--suppressions=%s/valgrind-pcmk.suppressions" % (self.test_home)
]
if self.args.valgrind_dhat:
os.environ['G_SLICE'] = "always-malloc"
return [
"valgrind",
"--tool=exp-dhat",
"--time-stamp=yes",
"--trace-children=no",
"--show-top-n=100",
"--num-callers=4"
]
return []
def _get_simulator_cmd(self):
""" Locate the simulation binary """
if self.args.binary is None:
self.args.binary = BuildVars.BUILDDIR + "/tools/crm_simulate"
if not is_executable(self.args.binary):
self.args.binary = BuildVars.SBINDIR + "/crm_simulate"
if not is_executable(self.args.binary):
# @TODO it would be more pythonic to raise an exception
self._error("Test binary " + self.args.binary + " not found")
sys.exit(CrmExit.NOT_INSTALLED)
return [ self.args.binary ] + shlex.split(self.args.testcmd_options)
def set_schema_env(self):
""" Ensure schema directory environment variable is set, if possible """
try:
return os.environ['PCMK_schema_directory']
except KeyError:
for d in [ os.path.join(BuildVars.BUILDDIR, "xml"),
BuildVars.CRM_SCHEMA_DIRECTORY ]:
if os.path.isdir(d):
os.environ['PCMK_schema_directory'] = d
return d
return None
def __init__(self, argv=sys.argv):
self._parse_args(argv)
# Where this executable lives
self.test_home = os.path.dirname(os.path.realpath(argv[0]))
# Where test data resides
if self.args.io_dir is None:
self.args.io_dir = os.path.join(self.test_home, "scheduler")
# Where to store generated files
if self.args.out_dir is None:
self.args.out_dir = self.args.io_dir
self.failed_filename = os.path.join(self.test_home, ".regression.failed.diff")
else:
self.failed_filename = os.path.join(self.args.out_dir, ".regression.failed.diff")
os.environ['CIB_shadow_dir'] = self.args.out_dir
self.failed_file = None
# Single test mode (if requested)
try:
# User can give test base name or file name of a test input
self.args.run = os.path.splitext(os.path.basename(self.args.run))[0]
except (AttributeError, TypeError):
pass # --run was not specified
self.set_schema_env()
# Arguments needed (or not) to run commands
self.valgrind_args = self._get_valgrind_cmd()
self.simulate_args = self._get_simulator_cmd()
# Test counters
self.num_failed = 0
self.num_tests = 0
def _compare_files(self, filename1, filename2):
""" Add any file differences to failed results """
with io.open("/dev/null", "wt") as dev_null:
if diff(filename1, filename2, stdout=dev_null) != 0:
diff(filename1, filename2, stdout=self.failed_file, stderr=dev_null)
self.failed_file.write("\n");
return True
return False
def run_one(self, test_name, test_desc, test_args=[]):
""" Run one scheduler test """
print(" Test %-25s %s" % ((test_name + ":"), test_desc))
did_fail = False
self.num_tests = self.num_tests + 1
# Test inputs
input_filename = "%s/%s.xml" % (self.args.io_dir, test_name)
expected_filename = "%s/%s.exp" % (self.args.io_dir, test_name)
dot_expected_filename = "%s/%s.dot" % (self.args.io_dir, test_name)
scores_filename = "%s/%s.scores" % (self.args.io_dir, test_name)
summary_filename = "%s/%s.summary" % (self.args.io_dir, test_name)
stderr_expected_filename = "%s/%s.stderr" % (self.args.io_dir, test_name)
# (Intermediate) test outputs
output_filename = "%s/%s.out" % (self.args.out_dir, test_name)
dot_output_filename = "%s/%s.pe.dot" % (self.args.out_dir, test_name)
score_output_filename = "%s/%s.scores.pe" % (self.args.out_dir, test_name)
summary_output_filename = "%s/%s.summary.pe" % (self.args.out_dir, test_name)
stderr_output_filename = "%s/%s.stderr.pe" % (self.args.out_dir, test_name)
valgrind_output_filename = "%s/%s.valgrind" % (self.args.out_dir, test_name)
# Common arguments for running test
test_cmd = []
if self.valgrind_args:
test_cmd = self.valgrind_args + [ "--log-file=%s" % valgrind_output_filename ]
test_cmd = test_cmd + self.simulate_args
# @TODO It would be more pythonic to raise exceptions for errors,
# then perhaps it would be nice to make a single-test class
# Ensure necessary test inputs exist
if not os.path.isfile(input_filename):
self._error("No input")
self.num_failed = self.num_failed + 1
return CrmExit.NOINPUT
if not self.args.update and not os.path.isfile(expected_filename):
self._error("no stored output")
return CrmExit.NOINPUT
# Run simulation to generate summary output
if self.args.run: # Single test mode
test_cmd_full = test_cmd + [ '-x', input_filename, '-S' ] + test_args
print(" ".join(test_cmd_full))
else:
# @TODO Why isn't test_args added here?
test_cmd_full = test_cmd + [ '-x', input_filename, '-S' ]
with io.open(summary_output_filename, "wt") as f:
subprocess.call(test_cmd_full, stdout=f, stderr=subprocess.STDOUT, env=os.environ)
if self.args.run:
cat(summary_output_filename)
# Re-run simulation to generate dot, graph, and scores
test_cmd_full = test_cmd + [
'-x', input_filename,
'-D', dot_output_filename,
'-G', output_filename,
'-sSQ' ] + test_args
with io.open(stderr_output_filename, "wt") as f_stderr, \
io.open(score_output_filename, "wt") as f_score:
rc = subprocess.call(test_cmd_full, stdout=f_score, stderr=f_stderr, env=os.environ)
# Check for test command failure
if rc != CrmExit.OK:
self._failed("Test returned: %d" % rc)
did_fail = True
print(" ".join(test_cmd_full))
# Check for valgrind errors
if self.valgrind_args and not self.args.valgrind_skip_output:
if os.stat(valgrind_output_filename).st_size > 0:
self._failed("Valgrind reported errors")
did_fail = True
cat(valgrind_output_filename)
remove_files([ valgrind_output_filename ])
# Check for core dump
if os.path.isfile("core"):
self._failed("Core-file detected: core." + test_name)
did_fail = True
os.rename("core", "%s/core.%s" % (self.test_home, test_name))
# Check any stderr output
if os.path.isfile(stderr_expected_filename):
if self._compare_files(stderr_expected_filename, stderr_output_filename):
self._failed("stderr changed")
did_fail = True
elif os.stat(stderr_output_filename).st_size > 0:
self._failed("Output was written to stderr")
did_fail = True
cat(stderr_output_filename)
remove_files([ stderr_output_filename ])
# Check whether output graph exists, and normalize it
if (not os.path.isfile(output_filename)
or os.stat(output_filename).st_size == 0):
self._error("No graph produced")
did_fail = True
self.num_failed = self.num_failed + 1
remove_files([ output_filename ])
return CrmExit.ERROR
normalize(output_filename)
# Check whether dot output exists, and sort it
if (not os.path.isfile(dot_output_filename) or
os.stat(dot_output_filename).st_size == 0):
self._error("No dot-file summary produced")
did_fail = True
self.num_failed = self.num_failed + 1
remove_files([ dot_output_filename, output_filename ])
return CrmExit.ERROR
with io.open(dot_output_filename, "rt") as f:
first_line = f.readline() # "digraph" line with opening brace
lines = f.readlines()
last_line = lines[-1] # closing brace
del lines[-1]
lines = sorted(set(lines)) # unique sort
with io.open(dot_output_filename, "wt") as f:
f.write(first_line)
f.writelines(lines)
f.write(last_line)
# Check whether score output exists, and sort it
if (not os.path.isfile(score_output_filename)
or os.stat(score_output_filename).st_size == 0):
self._error("No allocation scores produced")
did_fail = True
self.num_failed = self.num_failed + 1
remove_files([ score_output_filename, output_filename ])
return CrmExit.ERROR
else:
sort_file(score_output_filename)
if self.args.update:
shutil.copyfile(output_filename, expected_filename)
shutil.copyfile(dot_output_filename, dot_expected_filename)
shutil.copyfile(score_output_filename, scores_filename)
shutil.copyfile(summary_output_filename, summary_filename)
print(" Updated expected outputs")
if self._compare_files(summary_filename, summary_output_filename):
self._failed("summary changed")
did_fail = True
if self._compare_files(dot_expected_filename, dot_output_filename):
self._failed("dot-file summary changed")
did_fail = True
else:
remove_files([ dot_output_filename ])
if self._compare_files(expected_filename, output_filename):
self._failed("xml-file changed")
did_fail = True
if self._compare_files(scores_filename, score_output_filename):
self._failed("scores-file changed")
did_fail = True
remove_files([ output_filename,
score_output_filename,
summary_output_filename])
if did_fail:
self.num_failed = self.num_failed + 1
return CrmExit.ERROR
return CrmExit.OK
def run_all(self):
""" Run all defined tests """
if platform.architecture()[0] == "64bit":
TESTS.extend(TESTS_64BIT)
for group in TESTS:
for test in group:
try:
args = test[2]
except IndexError:
args = []
self.run_one(test[0], test[1], args)
print()
def _print_summary(self):
""" Print a summary of parameters for this test run """
print("Test home is:\t" + self.test_home)
print("Test binary is:\t" + self.args.binary)
if 'PCMK_schema_directory' in os.environ:
print("Schema home is:\t" + os.environ['PCMK_schema_directory'])
if self.valgrind_args != []:
print("Activating memory testing with valgrind")
print()
def _test_results(self):
if self.num_failed == 0:
return CrmExit.OK
if os.path.isfile(self.failed_filename) and os.stat(self.failed_filename).st_size != 0:
if self.args.verbose:
self._error("Results of %d failed tests (out of %d):" %
(self.num_failed, self.num_tests))
cat(self.failed_filename)
else:
self._error("Results of %d failed tests (out of %d) are in %s" %
(self.num_failed, self.num_tests, self.failed_filename))
self._error("Use -V to display them after running the tests")
else:
self._error("%d (of %d) tests failed (no diff results)" %
(self.num_failed, self.num_tests))
if os.path.isfile(self.failed_filename):
os.remove(self.failed_filename)
return CrmExit.ERROR
def run(self):
""" Run test(s) as specified """
self._print_summary()
# Zero out the error log
self.failed_file = io.open(self.failed_filename, "wt")
if self.args.run is None:
print("Performing the following tests from " + self.args.io_dir)
print()
self.run_all()
print()
self.failed_file.close()
rc = self._test_results()
else:
rc = self.run_one(self.args.run, "Single shot", self.single_test_args)
self.failed_file.close()
cat(self.failed_filename)
return rc
if __name__ == "__main__":
sys.exit(CtsScheduler().run())
# vim: set filetype=python expandtab tabstop=4 softtabstop=4 shiftwidth=4 textwidth=120:
diff --git a/cts/scheduler/rule-dbl-as-auto-number-match.dot b/cts/scheduler/rule-dbl-as-auto-number-match.dot
new file mode 100644
index 0000000000..d300b99530
--- /dev/null
+++ b/cts/scheduler/rule-dbl-as-auto-number-match.dot
@@ -0,0 +1,3 @@
+ digraph "g" {
+"dummy_stop_0 node1" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/rule-dbl-as-auto-number-match.exp b/cts/scheduler/rule-dbl-as-auto-number-match.exp
new file mode 100644
index 0000000000..002df23ce6
--- /dev/null
+++ b/cts/scheduler/rule-dbl-as-auto-number-match.exp
@@ -0,0 +1,11 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <rsc_op id="2" operation="stop" operation_key="dummy_stop_0" on_node="node1" on_node_uuid="1">
+ <primitive id="dummy" class="ocf" provider="heartbeat" type="Dummy"/>
+ <attributes CRM_meta_name="stop" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/rule-dbl-as-auto-number-match.scores b/cts/scheduler/rule-dbl-as-auto-number-match.scores
new file mode 100644
index 0000000000..08024752e9
--- /dev/null
+++ b/cts/scheduler/rule-dbl-as-auto-number-match.scores
@@ -0,0 +1,3 @@
+Allocation scores:
+pcmk__native_allocate: dummy allocation score on node1: -INFINITY
+pcmk__native_allocate: dummy allocation score on node2: 0
diff --git a/cts/scheduler/rule-dbl-as-auto-number-match.summary b/cts/scheduler/rule-dbl-as-auto-number-match.summary
new file mode 100644
index 0000000000..090e574119
--- /dev/null
+++ b/cts/scheduler/rule-dbl-as-auto-number-match.summary
@@ -0,0 +1,19 @@
+
+Current cluster status:
+Node node2 (2): standby
+Online: [ node1 ]
+
+ dummy (ocf::heartbeat:Dummy): Started node1
+
+Transition Summary:
+ * Stop dummy ( node1 ) due to node availability
+
+Executing cluster transition:
+ * Resource action: dummy stop on node1
+
+Revised cluster status:
+Node node2 (2): standby
+Online: [ node1 ]
+
+ dummy (ocf::heartbeat:Dummy): Stopped
+
diff --git a/cts/scheduler/rule-dbl-as-auto-number-match.xml b/cts/scheduler/rule-dbl-as-auto-number-match.xml
new file mode 100644
index 0000000000..b86918d607
--- /dev/null
+++ b/cts/scheduler/rule-dbl-as-auto-number-match.xml
@@ -0,0 +1,67 @@
+<cib crm_feature_set="3.4.1" validate-with="pacemaker-3.5" epoch="9" num_updates="10" admin_epoch="1" cib-last-written="Sat Aug 8 15:47:36 2020" update-origin="node1" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="false"/>
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.0.4-80b75c64a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="testcluster"/>
+ <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1">
+ <instance_attributes id="nodes-1">
+ <nvpair id="nodes-1-test_val" name="test_val" value="10.4"/>
+ </instance_attributes>
+ </node>
+ <node id="2" uname="node2">
+ <instance_attributes id="nodes-2">
+ <nvpair id="nodes-2-standby" name="standby" value="on"/>
+ </instance_attributes>
+ </node>
+ </nodes>
+ <resources>
+ <primitive class="ocf" id="dummy" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="dummy-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+ <op id="dummy-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+ <op id="dummy-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+ <op id="dummy-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+ <op id="dummy-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+ <op id="dummy-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ </resources>
+ <constraints>
+ <rsc_location id="ban-dummy-by_test_val" rsc="dummy">
+ <rule id="ban-dummy-rule" score="-INFINITY">
+ <expression attribute="test_val" id="ban-dummy-expr" operation="gt" value="3.4"/>
+ </rule>
+ </rsc_location>
+ </constraints>
+ <acls/>
+ </configuration>
+ <status>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="dummy" type="Dummy" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.4.1" transition-key="1:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" transition-magic="0:0;1:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" exit-reason="" on_node="node1" call-id="11" rc-code="0" op-status="0" interval="0" last-rc-change="1596926857" last-run="1596926857" exec-time="48" queue-time="4" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="dummy_monitor_10000" operation_key="dummy_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.4.1" transition-key="2:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" transition-magic="0:0;2:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" exit-reason="" on_node="node1" call-id="12" rc-code="0" op-status="0" interval="10000" last-rc-change="1596926857" exec-time="18" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="2" uname="node2" crmd="online" crm-debug-origin="do_state_transition" in_ccm="true" join="member" expected="member">
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="dummy" type="Dummy" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="dummy_last_0" operation_key="dummy_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.4.1" transition-key="2:152:0:7da33dac-2387-4da8-99fa-177eb69326d7" transition-magic="0:0;2:152:0:7da33dac-2387-4da8-99fa-177eb69326d7" exit-reason="" on_node="node2" call-id="9" rc-code="0" op-status="0" interval="0" last-rc-change="1596926778" last-run="1596926778" exec-time="13" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/rule-dbl-as-auto-number-no-match.dot b/cts/scheduler/rule-dbl-as-auto-number-no-match.dot
new file mode 100644
index 0000000000..d8f1c9f22b
--- /dev/null
+++ b/cts/scheduler/rule-dbl-as-auto-number-no-match.dot
@@ -0,0 +1,2 @@
+ digraph "g" {
+}
diff --git a/cts/scheduler/rule-dbl-as-auto-number-no-match.exp b/cts/scheduler/rule-dbl-as-auto-number-no-match.exp
new file mode 100644
index 0000000000..56e315ff01
--- /dev/null
+++ b/cts/scheduler/rule-dbl-as-auto-number-no-match.exp
@@ -0,0 +1 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0"/>
diff --git a/cts/scheduler/rule-dbl-as-auto-number-no-match.scores b/cts/scheduler/rule-dbl-as-auto-number-no-match.scores
new file mode 100644
index 0000000000..f473fc348e
--- /dev/null
+++ b/cts/scheduler/rule-dbl-as-auto-number-no-match.scores
@@ -0,0 +1,3 @@
+Allocation scores:
+pcmk__native_allocate: dummy allocation score on node1: 0
+pcmk__native_allocate: dummy allocation score on node2: 0
diff --git a/cts/scheduler/rule-dbl-as-auto-number-no-match.summary b/cts/scheduler/rule-dbl-as-auto-number-no-match.summary
new file mode 100644
index 0000000000..8f1cd180c6
--- /dev/null
+++ b/cts/scheduler/rule-dbl-as-auto-number-no-match.summary
@@ -0,0 +1,17 @@
+
+Current cluster status:
+Node node2 (2): standby
+Online: [ node1 ]
+
+ dummy (ocf::heartbeat:Dummy): Started node1
+
+Transition Summary:
+
+Executing cluster transition:
+
+Revised cluster status:
+Node node2 (2): standby
+Online: [ node1 ]
+
+ dummy (ocf::heartbeat:Dummy): Started node1
+
diff --git a/cts/scheduler/rule-dbl-as-auto-number-no-match.xml b/cts/scheduler/rule-dbl-as-auto-number-no-match.xml
new file mode 100644
index 0000000000..37e203252f
--- /dev/null
+++ b/cts/scheduler/rule-dbl-as-auto-number-no-match.xml
@@ -0,0 +1,67 @@
+<cib crm_feature_set="3.4.1" validate-with="pacemaker-3.5" epoch="9" num_updates="10" admin_epoch="1" cib-last-written="Sat Aug 8 15:47:36 2020" update-origin="node1" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="false"/>
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.0.4-80b75c64a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="testcluster"/>
+ <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1">
+ <instance_attributes id="nodes-1">
+ <nvpair id="nodes-1-test_val" name="test_val" value="3.4"/>
+ </instance_attributes>
+ </node>
+ <node id="2" uname="node2">
+ <instance_attributes id="nodes-2">
+ <nvpair id="nodes-2-standby" name="standby" value="on"/>
+ </instance_attributes>
+ </node>
+ </nodes>
+ <resources>
+ <primitive class="ocf" id="dummy" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="dummy-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+ <op id="dummy-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+ <op id="dummy-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+ <op id="dummy-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+ <op id="dummy-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+ <op id="dummy-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ </resources>
+ <constraints>
+ <rsc_location id="ban-dummy-by_test_val" rsc="dummy">
+ <rule id="ban-dummy-rule" score="-INFINITY">
+ <expression attribute="test_val" id="ban-dummy-expr" operation="gt" value="10.6"/>
+ </rule>
+ </rsc_location>
+ </constraints>
+ <acls/>
+ </configuration>
+ <status>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="dummy" type="Dummy" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.4.1" transition-key="1:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" transition-magic="0:0;1:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" exit-reason="" on_node="node1" call-id="11" rc-code="0" op-status="0" interval="0" last-rc-change="1596926857" last-run="1596926857" exec-time="48" queue-time="4" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="dummy_monitor_10000" operation_key="dummy_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.4.1" transition-key="2:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" transition-magic="0:0;2:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" exit-reason="" on_node="node1" call-id="12" rc-code="0" op-status="0" interval="10000" last-rc-change="1596926857" exec-time="18" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="2" uname="node2" crmd="online" crm-debug-origin="do_state_transition" in_ccm="true" join="member" expected="member">
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="dummy" type="Dummy" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="dummy_last_0" operation_key="dummy_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.4.1" transition-key="2:152:0:7da33dac-2387-4da8-99fa-177eb69326d7" transition-magic="0:0;2:152:0:7da33dac-2387-4da8-99fa-177eb69326d7" exit-reason="" on_node="node2" call-id="9" rc-code="0" op-status="0" interval="0" last-rc-change="1596926778" last-run="1596926778" exec-time="13" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/rule-dbl-as-integer-match.dot b/cts/scheduler/rule-dbl-as-integer-match.dot
new file mode 100644
index 0000000000..d300b99530
--- /dev/null
+++ b/cts/scheduler/rule-dbl-as-integer-match.dot
@@ -0,0 +1,3 @@
+ digraph "g" {
+"dummy_stop_0 node1" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/rule-dbl-as-integer-match.exp b/cts/scheduler/rule-dbl-as-integer-match.exp
new file mode 100644
index 0000000000..002df23ce6
--- /dev/null
+++ b/cts/scheduler/rule-dbl-as-integer-match.exp
@@ -0,0 +1,11 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <rsc_op id="2" operation="stop" operation_key="dummy_stop_0" on_node="node1" on_node_uuid="1">
+ <primitive id="dummy" class="ocf" provider="heartbeat" type="Dummy"/>
+ <attributes CRM_meta_name="stop" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/rule-dbl-as-integer-match.scores b/cts/scheduler/rule-dbl-as-integer-match.scores
new file mode 100644
index 0000000000..08024752e9
--- /dev/null
+++ b/cts/scheduler/rule-dbl-as-integer-match.scores
@@ -0,0 +1,3 @@
+Allocation scores:
+pcmk__native_allocate: dummy allocation score on node1: -INFINITY
+pcmk__native_allocate: dummy allocation score on node2: 0
diff --git a/cts/scheduler/rule-dbl-as-integer-match.summary b/cts/scheduler/rule-dbl-as-integer-match.summary
new file mode 100644
index 0000000000..090e574119
--- /dev/null
+++ b/cts/scheduler/rule-dbl-as-integer-match.summary
@@ -0,0 +1,19 @@
+
+Current cluster status:
+Node node2 (2): standby
+Online: [ node1 ]
+
+ dummy (ocf::heartbeat:Dummy): Started node1
+
+Transition Summary:
+ * Stop dummy ( node1 ) due to node availability
+
+Executing cluster transition:
+ * Resource action: dummy stop on node1
+
+Revised cluster status:
+Node node2 (2): standby
+Online: [ node1 ]
+
+ dummy (ocf::heartbeat:Dummy): Stopped
+
diff --git a/cts/scheduler/rule-dbl-as-integer-match.xml b/cts/scheduler/rule-dbl-as-integer-match.xml
new file mode 100644
index 0000000000..4be7f34490
--- /dev/null
+++ b/cts/scheduler/rule-dbl-as-integer-match.xml
@@ -0,0 +1,67 @@
+<cib crm_feature_set="3.4.1" validate-with="pacemaker-3.5" epoch="9" num_updates="10" admin_epoch="1" cib-last-written="Sat Aug 8 15:47:36 2020" update-origin="node1" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="false"/>
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.0.4-80b75c64a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="testcluster"/>
+ <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1">
+ <instance_attributes id="nodes-1">
+ <nvpair id="nodes-1-test_val" name="test_val" value="3.4"/>
+ </instance_attributes>
+ </node>
+ <node id="2" uname="node2">
+ <instance_attributes id="nodes-2">
+ <nvpair id="nodes-2-standby" name="standby" value="on"/>
+ </instance_attributes>
+ </node>
+ </nodes>
+ <resources>
+ <primitive class="ocf" id="dummy" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="dummy-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+ <op id="dummy-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+ <op id="dummy-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+ <op id="dummy-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+ <op id="dummy-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+ <op id="dummy-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ </resources>
+ <constraints>
+ <rsc_location id="ban-dummy-by_test_val" rsc="dummy">
+ <rule id="ban-dummy-rule" score="-INFINITY">
+ <expression attribute="test_val" id="ban-dummy-expr" operation="eq" type="integer" value="3.6"/>
+ </rule>
+ </rsc_location>
+ </constraints>
+ <acls/>
+ </configuration>
+ <status>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="dummy" type="Dummy" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.4.1" transition-key="1:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" transition-magic="0:0;1:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" exit-reason="" on_node="node1" call-id="11" rc-code="0" op-status="0" interval="0" last-rc-change="1596926857" last-run="1596926857" exec-time="48" queue-time="4" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="dummy_monitor_10000" operation_key="dummy_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.4.1" transition-key="2:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" transition-magic="0:0;2:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" exit-reason="" on_node="node1" call-id="12" rc-code="0" op-status="0" interval="10000" last-rc-change="1596926857" exec-time="18" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="2" uname="node2" crmd="online" crm-debug-origin="do_state_transition" in_ccm="true" join="member" expected="member">
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="dummy" type="Dummy" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="dummy_last_0" operation_key="dummy_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.4.1" transition-key="2:152:0:7da33dac-2387-4da8-99fa-177eb69326d7" transition-magic="0:0;2:152:0:7da33dac-2387-4da8-99fa-177eb69326d7" exit-reason="" on_node="node2" call-id="9" rc-code="0" op-status="0" interval="0" last-rc-change="1596926778" last-run="1596926778" exec-time="13" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/rule-dbl-as-integer-no-match.dot b/cts/scheduler/rule-dbl-as-integer-no-match.dot
new file mode 100644
index 0000000000..d8f1c9f22b
--- /dev/null
+++ b/cts/scheduler/rule-dbl-as-integer-no-match.dot
@@ -0,0 +1,2 @@
+ digraph "g" {
+}
diff --git a/cts/scheduler/rule-dbl-as-integer-no-match.exp b/cts/scheduler/rule-dbl-as-integer-no-match.exp
new file mode 100644
index 0000000000..56e315ff01
--- /dev/null
+++ b/cts/scheduler/rule-dbl-as-integer-no-match.exp
@@ -0,0 +1 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0"/>
diff --git a/cts/scheduler/rule-dbl-as-integer-no-match.scores b/cts/scheduler/rule-dbl-as-integer-no-match.scores
new file mode 100644
index 0000000000..f473fc348e
--- /dev/null
+++ b/cts/scheduler/rule-dbl-as-integer-no-match.scores
@@ -0,0 +1,3 @@
+Allocation scores:
+pcmk__native_allocate: dummy allocation score on node1: 0
+pcmk__native_allocate: dummy allocation score on node2: 0
diff --git a/cts/scheduler/rule-dbl-as-integer-no-match.summary b/cts/scheduler/rule-dbl-as-integer-no-match.summary
new file mode 100644
index 0000000000..8f1cd180c6
--- /dev/null
+++ b/cts/scheduler/rule-dbl-as-integer-no-match.summary
@@ -0,0 +1,17 @@
+
+Current cluster status:
+Node node2 (2): standby
+Online: [ node1 ]
+
+ dummy (ocf::heartbeat:Dummy): Started node1
+
+Transition Summary:
+
+Executing cluster transition:
+
+Revised cluster status:
+Node node2 (2): standby
+Online: [ node1 ]
+
+ dummy (ocf::heartbeat:Dummy): Started node1
+
diff --git a/cts/scheduler/rule-dbl-as-integer-no-match.xml b/cts/scheduler/rule-dbl-as-integer-no-match.xml
new file mode 100644
index 0000000000..574273f3a3
--- /dev/null
+++ b/cts/scheduler/rule-dbl-as-integer-no-match.xml
@@ -0,0 +1,67 @@
+<cib crm_feature_set="3.4.1" validate-with="pacemaker-3.5" epoch="9" num_updates="10" admin_epoch="1" cib-last-written="Sat Aug 8 15:47:36 2020" update-origin="node1" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="false"/>
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.0.4-80b75c64a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="testcluster"/>
+ <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1">
+ <instance_attributes id="nodes-1">
+ <nvpair id="nodes-1-test_val" name="test_val" value="3.4"/>
+ </instance_attributes>
+ </node>
+ <node id="2" uname="node2">
+ <instance_attributes id="nodes-2">
+ <nvpair id="nodes-2-standby" name="standby" value="on"/>
+ </instance_attributes>
+ </node>
+ </nodes>
+ <resources>
+ <primitive class="ocf" id="dummy" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="dummy-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+ <op id="dummy-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+ <op id="dummy-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+ <op id="dummy-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+ <op id="dummy-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+ <op id="dummy-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ </resources>
+ <constraints>
+ <rsc_location id="ban-dummy-by_test_val" rsc="dummy">
+ <rule id="ban-dummy-rule" score="-INFINITY">
+ <expression attribute="test_val" id="ban-dummy-expr" operation="eq" type="integer" value="4.6"/>
+ </rule>
+ </rsc_location>
+ </constraints>
+ <acls/>
+ </configuration>
+ <status>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="dummy" type="Dummy" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.4.1" transition-key="1:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" transition-magic="0:0;1:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" exit-reason="" on_node="node1" call-id="11" rc-code="0" op-status="0" interval="0" last-rc-change="1596926857" last-run="1596926857" exec-time="48" queue-time="4" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="dummy_monitor_10000" operation_key="dummy_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.4.1" transition-key="2:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" transition-magic="0:0;2:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" exit-reason="" on_node="node1" call-id="12" rc-code="0" op-status="0" interval="10000" last-rc-change="1596926857" exec-time="18" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="2" uname="node2" crmd="online" crm-debug-origin="do_state_transition" in_ccm="true" join="member" expected="member">
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="dummy" type="Dummy" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="dummy_last_0" operation_key="dummy_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.4.1" transition-key="2:152:0:7da33dac-2387-4da8-99fa-177eb69326d7" transition-magic="0:0;2:152:0:7da33dac-2387-4da8-99fa-177eb69326d7" exit-reason="" on_node="node2" call-id="9" rc-code="0" op-status="0" interval="0" last-rc-change="1596926778" last-run="1596926778" exec-time="13" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/rule-dbl-as-number-match.dot b/cts/scheduler/rule-dbl-as-number-match.dot
new file mode 100644
index 0000000000..d300b99530
--- /dev/null
+++ b/cts/scheduler/rule-dbl-as-number-match.dot
@@ -0,0 +1,3 @@
+ digraph "g" {
+"dummy_stop_0 node1" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/rule-dbl-as-number-match.exp b/cts/scheduler/rule-dbl-as-number-match.exp
new file mode 100644
index 0000000000..002df23ce6
--- /dev/null
+++ b/cts/scheduler/rule-dbl-as-number-match.exp
@@ -0,0 +1,11 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <rsc_op id="2" operation="stop" operation_key="dummy_stop_0" on_node="node1" on_node_uuid="1">
+ <primitive id="dummy" class="ocf" provider="heartbeat" type="Dummy"/>
+ <attributes CRM_meta_name="stop" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/rule-dbl-as-number-match.scores b/cts/scheduler/rule-dbl-as-number-match.scores
new file mode 100644
index 0000000000..08024752e9
--- /dev/null
+++ b/cts/scheduler/rule-dbl-as-number-match.scores
@@ -0,0 +1,3 @@
+Allocation scores:
+pcmk__native_allocate: dummy allocation score on node1: -INFINITY
+pcmk__native_allocate: dummy allocation score on node2: 0
diff --git a/cts/scheduler/rule-dbl-as-number-match.summary b/cts/scheduler/rule-dbl-as-number-match.summary
new file mode 100644
index 0000000000..090e574119
--- /dev/null
+++ b/cts/scheduler/rule-dbl-as-number-match.summary
@@ -0,0 +1,19 @@
+
+Current cluster status:
+Node node2 (2): standby
+Online: [ node1 ]
+
+ dummy (ocf::heartbeat:Dummy): Started node1
+
+Transition Summary:
+ * Stop dummy ( node1 ) due to node availability
+
+Executing cluster transition:
+ * Resource action: dummy stop on node1
+
+Revised cluster status:
+Node node2 (2): standby
+Online: [ node1 ]
+
+ dummy (ocf::heartbeat:Dummy): Stopped
+
diff --git a/cts/scheduler/rule-dbl-as-number-match.xml b/cts/scheduler/rule-dbl-as-number-match.xml
new file mode 100644
index 0000000000..af87055d18
--- /dev/null
+++ b/cts/scheduler/rule-dbl-as-number-match.xml
@@ -0,0 +1,67 @@
+<cib crm_feature_set="3.4.1" validate-with="pacemaker-3.5" epoch="9" num_updates="10" admin_epoch="1" cib-last-written="Sat Aug 8 15:47:36 2020" update-origin="node1" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="false"/>
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.0.4-80b75c64a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="testcluster"/>
+ <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1">
+ <instance_attributes id="nodes-1">
+ <nvpair id="nodes-1-test_val" name="test_val" value="3.4"/>
+ </instance_attributes>
+ </node>
+ <node id="2" uname="node2">
+ <instance_attributes id="nodes-2">
+ <nvpair id="nodes-2-standby" name="standby" value="on"/>
+ </instance_attributes>
+ </node>
+ </nodes>
+ <resources>
+ <primitive class="ocf" id="dummy" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="dummy-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+ <op id="dummy-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+ <op id="dummy-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+ <op id="dummy-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+ <op id="dummy-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+ <op id="dummy-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ </resources>
+ <constraints>
+ <rsc_location id="ban-dummy-by_test_val" rsc="dummy">
+ <rule id="ban-dummy-rule" score="-INFINITY">
+ <expression attribute="test_val" id="ban-dummy-expr" operation="eq" type="number" value="3.4"/>
+ </rule>
+ </rsc_location>
+ </constraints>
+ <acls/>
+ </configuration>
+ <status>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="dummy" type="Dummy" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.4.1" transition-key="1:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" transition-magic="0:0;1:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" exit-reason="" on_node="node1" call-id="11" rc-code="0" op-status="0" interval="0" last-rc-change="1596926857" last-run="1596926857" exec-time="48" queue-time="4" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="dummy_monitor_10000" operation_key="dummy_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.4.1" transition-key="2:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" transition-magic="0:0;2:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" exit-reason="" on_node="node1" call-id="12" rc-code="0" op-status="0" interval="10000" last-rc-change="1596926857" exec-time="18" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="2" uname="node2" crmd="online" crm-debug-origin="do_state_transition" in_ccm="true" join="member" expected="member">
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="dummy" type="Dummy" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="dummy_last_0" operation_key="dummy_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.4.1" transition-key="2:152:0:7da33dac-2387-4da8-99fa-177eb69326d7" transition-magic="0:0;2:152:0:7da33dac-2387-4da8-99fa-177eb69326d7" exit-reason="" on_node="node2" call-id="9" rc-code="0" op-status="0" interval="0" last-rc-change="1596926778" last-run="1596926778" exec-time="13" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/rule-dbl-as-number-no-match.dot b/cts/scheduler/rule-dbl-as-number-no-match.dot
new file mode 100644
index 0000000000..d8f1c9f22b
--- /dev/null
+++ b/cts/scheduler/rule-dbl-as-number-no-match.dot
@@ -0,0 +1,2 @@
+ digraph "g" {
+}
diff --git a/cts/scheduler/rule-dbl-as-number-no-match.exp b/cts/scheduler/rule-dbl-as-number-no-match.exp
new file mode 100644
index 0000000000..56e315ff01
--- /dev/null
+++ b/cts/scheduler/rule-dbl-as-number-no-match.exp
@@ -0,0 +1 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0"/>
diff --git a/cts/scheduler/rule-dbl-as-number-no-match.scores b/cts/scheduler/rule-dbl-as-number-no-match.scores
new file mode 100644
index 0000000000..f473fc348e
--- /dev/null
+++ b/cts/scheduler/rule-dbl-as-number-no-match.scores
@@ -0,0 +1,3 @@
+Allocation scores:
+pcmk__native_allocate: dummy allocation score on node1: 0
+pcmk__native_allocate: dummy allocation score on node2: 0
diff --git a/cts/scheduler/rule-dbl-as-number-no-match.summary b/cts/scheduler/rule-dbl-as-number-no-match.summary
new file mode 100644
index 0000000000..8f1cd180c6
--- /dev/null
+++ b/cts/scheduler/rule-dbl-as-number-no-match.summary
@@ -0,0 +1,17 @@
+
+Current cluster status:
+Node node2 (2): standby
+Online: [ node1 ]
+
+ dummy (ocf::heartbeat:Dummy): Started node1
+
+Transition Summary:
+
+Executing cluster transition:
+
+Revised cluster status:
+Node node2 (2): standby
+Online: [ node1 ]
+
+ dummy (ocf::heartbeat:Dummy): Started node1
+
diff --git a/cts/scheduler/rule-dbl-as-number-no-match.xml b/cts/scheduler/rule-dbl-as-number-no-match.xml
new file mode 100644
index 0000000000..56ecf77e34
--- /dev/null
+++ b/cts/scheduler/rule-dbl-as-number-no-match.xml
@@ -0,0 +1,67 @@
+<cib crm_feature_set="3.4.1" validate-with="pacemaker-3.5" epoch="9" num_updates="10" admin_epoch="1" cib-last-written="Sat Aug 8 15:47:36 2020" update-origin="node1" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="false"/>
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.0.4-80b75c64a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="testcluster"/>
+ <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1">
+ <instance_attributes id="nodes-1">
+ <nvpair id="nodes-1-test_val" name="test_val" value="3.4"/>
+ </instance_attributes>
+ </node>
+ <node id="2" uname="node2">
+ <instance_attributes id="nodes-2">
+ <nvpair id="nodes-2-standby" name="standby" value="on"/>
+ </instance_attributes>
+ </node>
+ </nodes>
+ <resources>
+ <primitive class="ocf" id="dummy" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="dummy-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+ <op id="dummy-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+ <op id="dummy-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+ <op id="dummy-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+ <op id="dummy-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+ <op id="dummy-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ </resources>
+ <constraints>
+ <rsc_location id="ban-dummy-by_test_val" rsc="dummy">
+ <rule id="ban-dummy-rule" score="-INFINITY">
+ <expression attribute="test_val" id="ban-dummy-expr" operation="eq" type="number" value="3.6"/>
+ </rule>
+ </rsc_location>
+ </constraints>
+ <acls/>
+ </configuration>
+ <status>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="dummy" type="Dummy" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.4.1" transition-key="1:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" transition-magic="0:0;1:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" exit-reason="" on_node="node1" call-id="11" rc-code="0" op-status="0" interval="0" last-rc-change="1596926857" last-run="1596926857" exec-time="48" queue-time="4" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="dummy_monitor_10000" operation_key="dummy_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.4.1" transition-key="2:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" transition-magic="0:0;2:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" exit-reason="" on_node="node1" call-id="12" rc-code="0" op-status="0" interval="10000" last-rc-change="1596926857" exec-time="18" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="2" uname="node2" crmd="online" crm-debug-origin="do_state_transition" in_ccm="true" join="member" expected="member">
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="dummy" type="Dummy" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="dummy_last_0" operation_key="dummy_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.4.1" transition-key="2:152:0:7da33dac-2387-4da8-99fa-177eb69326d7" transition-magic="0:0;2:152:0:7da33dac-2387-4da8-99fa-177eb69326d7" exit-reason="" on_node="node2" call-id="9" rc-code="0" op-status="0" interval="0" last-rc-change="1596926778" last-run="1596926778" exec-time="13" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/rule-dbl-parse-fail-default-str-match.dot b/cts/scheduler/rule-dbl-parse-fail-default-str-match.dot
new file mode 100644
index 0000000000..d300b99530
--- /dev/null
+++ b/cts/scheduler/rule-dbl-parse-fail-default-str-match.dot
@@ -0,0 +1,3 @@
+ digraph "g" {
+"dummy_stop_0 node1" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/rule-dbl-parse-fail-default-str-match.exp b/cts/scheduler/rule-dbl-parse-fail-default-str-match.exp
new file mode 100644
index 0000000000..002df23ce6
--- /dev/null
+++ b/cts/scheduler/rule-dbl-parse-fail-default-str-match.exp
@@ -0,0 +1,11 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <rsc_op id="2" operation="stop" operation_key="dummy_stop_0" on_node="node1" on_node_uuid="1">
+ <primitive id="dummy" class="ocf" provider="heartbeat" type="Dummy"/>
+ <attributes CRM_meta_name="stop" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/rule-dbl-parse-fail-default-str-match.scores b/cts/scheduler/rule-dbl-parse-fail-default-str-match.scores
new file mode 100644
index 0000000000..08024752e9
--- /dev/null
+++ b/cts/scheduler/rule-dbl-parse-fail-default-str-match.scores
@@ -0,0 +1,3 @@
+Allocation scores:
+pcmk__native_allocate: dummy allocation score on node1: -INFINITY
+pcmk__native_allocate: dummy allocation score on node2: 0
diff --git a/cts/scheduler/rule-dbl-parse-fail-default-str-match.summary b/cts/scheduler/rule-dbl-parse-fail-default-str-match.summary
new file mode 100644
index 0000000000..090e574119
--- /dev/null
+++ b/cts/scheduler/rule-dbl-parse-fail-default-str-match.summary
@@ -0,0 +1,19 @@
+
+Current cluster status:
+Node node2 (2): standby
+Online: [ node1 ]
+
+ dummy (ocf::heartbeat:Dummy): Started node1
+
+Transition Summary:
+ * Stop dummy ( node1 ) due to node availability
+
+Executing cluster transition:
+ * Resource action: dummy stop on node1
+
+Revised cluster status:
+Node node2 (2): standby
+Online: [ node1 ]
+
+ dummy (ocf::heartbeat:Dummy): Stopped
+
diff --git a/cts/scheduler/rule-dbl-parse-fail-default-str-match.xml b/cts/scheduler/rule-dbl-parse-fail-default-str-match.xml
new file mode 100644
index 0000000000..ab712656f9
--- /dev/null
+++ b/cts/scheduler/rule-dbl-parse-fail-default-str-match.xml
@@ -0,0 +1,67 @@
+<cib crm_feature_set="3.4.1" validate-with="pacemaker-3.5" epoch="9" num_updates="10" admin_epoch="1" cib-last-written="Sat Aug 8 15:47:36 2020" update-origin="node1" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="false"/>
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.0.4-80b75c64a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="testcluster"/>
+ <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1">
+ <instance_attributes id="nodes-1">
+ <nvpair id="nodes-1-test_val" name="test_val" value="as2.0"/>
+ </instance_attributes>
+ </node>
+ <node id="2" uname="node2">
+ <instance_attributes id="nodes-2">
+ <nvpair id="nodes-2-standby" name="standby" value="on"/>
+ </instance_attributes>
+ </node>
+ </nodes>
+ <resources>
+ <primitive class="ocf" id="dummy" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="dummy-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+ <op id="dummy-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+ <op id="dummy-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+ <op id="dummy-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+ <op id="dummy-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+ <op id="dummy-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ </resources>
+ <constraints>
+ <rsc_location id="ban-dummy-by_test_val" rsc="dummy">
+ <rule id="ban-dummy-rule" score="-INFINITY">
+ <expression attribute="test_val" id="ban-dummy-expr" operation="gt" type="number" value="as10.0"/>
+ </rule>
+ </rsc_location>
+ </constraints>
+ <acls/>
+ </configuration>
+ <status>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="dummy" type="Dummy" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.4.1" transition-key="1:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" transition-magic="0:0;1:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" exit-reason="" on_node="node1" call-id="11" rc-code="0" op-status="0" interval="0" last-rc-change="1596926857" last-run="1596926857" exec-time="48" queue-time="4" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="dummy_monitor_10000" operation_key="dummy_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.4.1" transition-key="2:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" transition-magic="0:0;2:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" exit-reason="" on_node="node1" call-id="12" rc-code="0" op-status="0" interval="10000" last-rc-change="1596926857" exec-time="18" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="2" uname="node2" crmd="online" crm-debug-origin="do_state_transition" in_ccm="true" join="member" expected="member">
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="dummy" type="Dummy" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="dummy_last_0" operation_key="dummy_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.4.1" transition-key="2:152:0:7da33dac-2387-4da8-99fa-177eb69326d7" transition-magic="0:0;2:152:0:7da33dac-2387-4da8-99fa-177eb69326d7" exit-reason="" on_node="node2" call-id="9" rc-code="0" op-status="0" interval="0" last-rc-change="1596926778" last-run="1596926778" exec-time="13" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/rule-dbl-parse-fail-default-str-no-match.dot b/cts/scheduler/rule-dbl-parse-fail-default-str-no-match.dot
new file mode 100644
index 0000000000..d8f1c9f22b
--- /dev/null
+++ b/cts/scheduler/rule-dbl-parse-fail-default-str-no-match.dot
@@ -0,0 +1,2 @@
+ digraph "g" {
+}
diff --git a/cts/scheduler/rule-dbl-parse-fail-default-str-no-match.exp b/cts/scheduler/rule-dbl-parse-fail-default-str-no-match.exp
new file mode 100644
index 0000000000..56e315ff01
--- /dev/null
+++ b/cts/scheduler/rule-dbl-parse-fail-default-str-no-match.exp
@@ -0,0 +1 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0"/>
diff --git a/cts/scheduler/rule-dbl-parse-fail-default-str-no-match.scores b/cts/scheduler/rule-dbl-parse-fail-default-str-no-match.scores
new file mode 100644
index 0000000000..f473fc348e
--- /dev/null
+++ b/cts/scheduler/rule-dbl-parse-fail-default-str-no-match.scores
@@ -0,0 +1,3 @@
+Allocation scores:
+pcmk__native_allocate: dummy allocation score on node1: 0
+pcmk__native_allocate: dummy allocation score on node2: 0
diff --git a/cts/scheduler/rule-dbl-parse-fail-default-str-no-match.summary b/cts/scheduler/rule-dbl-parse-fail-default-str-no-match.summary
new file mode 100644
index 0000000000..8f1cd180c6
--- /dev/null
+++ b/cts/scheduler/rule-dbl-parse-fail-default-str-no-match.summary
@@ -0,0 +1,17 @@
+
+Current cluster status:
+Node node2 (2): standby
+Online: [ node1 ]
+
+ dummy (ocf::heartbeat:Dummy): Started node1
+
+Transition Summary:
+
+Executing cluster transition:
+
+Revised cluster status:
+Node node2 (2): standby
+Online: [ node1 ]
+
+ dummy (ocf::heartbeat:Dummy): Started node1
+
diff --git a/cts/scheduler/rule-dbl-parse-fail-default-str-no-match.xml b/cts/scheduler/rule-dbl-parse-fail-default-str-no-match.xml
new file mode 100644
index 0000000000..363b38fd68
--- /dev/null
+++ b/cts/scheduler/rule-dbl-parse-fail-default-str-no-match.xml
@@ -0,0 +1,67 @@
+<cib crm_feature_set="3.4.1" validate-with="pacemaker-3.5" epoch="9" num_updates="10" admin_epoch="1" cib-last-written="Sat Aug 8 15:47:36 2020" update-origin="node1" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="false"/>
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.0.4-80b75c64a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="testcluster"/>
+ <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1">
+ <instance_attributes id="nodes-1">
+ <nvpair id="nodes-1-test_val" name="test_val" value="as10.0"/>
+ </instance_attributes>
+ </node>
+ <node id="2" uname="node2">
+ <instance_attributes id="nodes-2">
+ <nvpair id="nodes-2-standby" name="standby" value="on"/>
+ </instance_attributes>
+ </node>
+ </nodes>
+ <resources>
+ <primitive class="ocf" id="dummy" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="dummy-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+ <op id="dummy-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+ <op id="dummy-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+ <op id="dummy-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+ <op id="dummy-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+ <op id="dummy-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ </resources>
+ <constraints>
+ <rsc_location id="ban-dummy-by_test_val" rsc="dummy">
+ <rule id="ban-dummy-rule" score="-INFINITY">
+ <expression attribute="test_val" id="ban-dummy-expr" operation="gt" type="number" value="as2.0"/>
+ </rule>
+ </rsc_location>
+ </constraints>
+ <acls/>
+ </configuration>
+ <status>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="dummy" type="Dummy" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.4.1" transition-key="1:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" transition-magic="0:0;1:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" exit-reason="" on_node="node1" call-id="11" rc-code="0" op-status="0" interval="0" last-rc-change="1596926857" last-run="1596926857" exec-time="48" queue-time="4" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="dummy_monitor_10000" operation_key="dummy_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.4.1" transition-key="2:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" transition-magic="0:0;2:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" exit-reason="" on_node="node1" call-id="12" rc-code="0" op-status="0" interval="10000" last-rc-change="1596926857" exec-time="18" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="2" uname="node2" crmd="online" crm-debug-origin="do_state_transition" in_ccm="true" join="member" expected="member">
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="dummy" type="Dummy" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="dummy_last_0" operation_key="dummy_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.4.1" transition-key="2:152:0:7da33dac-2387-4da8-99fa-177eb69326d7" transition-magic="0:0;2:152:0:7da33dac-2387-4da8-99fa-177eb69326d7" exit-reason="" on_node="node2" call-id="9" rc-code="0" op-status="0" interval="0" last-rc-change="1596926778" last-run="1596926778" exec-time="13" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/rule-int-as-auto-integer-match.dot b/cts/scheduler/rule-int-as-auto-integer-match.dot
new file mode 100644
index 0000000000..d300b99530
--- /dev/null
+++ b/cts/scheduler/rule-int-as-auto-integer-match.dot
@@ -0,0 +1,3 @@
+ digraph "g" {
+"dummy_stop_0 node1" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/rule-int-as-auto-integer-match.exp b/cts/scheduler/rule-int-as-auto-integer-match.exp
new file mode 100644
index 0000000000..002df23ce6
--- /dev/null
+++ b/cts/scheduler/rule-int-as-auto-integer-match.exp
@@ -0,0 +1,11 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <rsc_op id="2" operation="stop" operation_key="dummy_stop_0" on_node="node1" on_node_uuid="1">
+ <primitive id="dummy" class="ocf" provider="heartbeat" type="Dummy"/>
+ <attributes CRM_meta_name="stop" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/rule-int-as-auto-integer-match.scores b/cts/scheduler/rule-int-as-auto-integer-match.scores
new file mode 100644
index 0000000000..08024752e9
--- /dev/null
+++ b/cts/scheduler/rule-int-as-auto-integer-match.scores
@@ -0,0 +1,3 @@
+Allocation scores:
+pcmk__native_allocate: dummy allocation score on node1: -INFINITY
+pcmk__native_allocate: dummy allocation score on node2: 0
diff --git a/cts/scheduler/rule-int-as-auto-integer-match.summary b/cts/scheduler/rule-int-as-auto-integer-match.summary
new file mode 100644
index 0000000000..090e574119
--- /dev/null
+++ b/cts/scheduler/rule-int-as-auto-integer-match.summary
@@ -0,0 +1,19 @@
+
+Current cluster status:
+Node node2 (2): standby
+Online: [ node1 ]
+
+ dummy (ocf::heartbeat:Dummy): Started node1
+
+Transition Summary:
+ * Stop dummy ( node1 ) due to node availability
+
+Executing cluster transition:
+ * Resource action: dummy stop on node1
+
+Revised cluster status:
+Node node2 (2): standby
+Online: [ node1 ]
+
+ dummy (ocf::heartbeat:Dummy): Stopped
+
diff --git a/cts/scheduler/rule-int-as-auto-integer-match.xml b/cts/scheduler/rule-int-as-auto-integer-match.xml
new file mode 100644
index 0000000000..5161700511
--- /dev/null
+++ b/cts/scheduler/rule-int-as-auto-integer-match.xml
@@ -0,0 +1,67 @@
+<cib crm_feature_set="3.4.1" validate-with="pacemaker-3.5" epoch="9" num_updates="10" admin_epoch="1" cib-last-written="Sat Aug 8 15:47:36 2020" update-origin="node1" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="false"/>
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.0.4-80b75c64a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="testcluster"/>
+ <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1">
+ <instance_attributes id="nodes-1">
+ <nvpair id="nodes-1-test_val" name="test_val" value="10"/>
+ </instance_attributes>
+ </node>
+ <node id="2" uname="node2">
+ <instance_attributes id="nodes-2">
+ <nvpair id="nodes-2-standby" name="standby" value="on"/>
+ </instance_attributes>
+ </node>
+ </nodes>
+ <resources>
+ <primitive class="ocf" id="dummy" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="dummy-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+ <op id="dummy-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+ <op id="dummy-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+ <op id="dummy-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+ <op id="dummy-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+ <op id="dummy-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ </resources>
+ <constraints>
+ <rsc_location id="ban-dummy-by_test_val" rsc="dummy">
+ <rule id="ban-dummy-rule" score="-INFINITY">
+ <expression attribute="test_val" id="ban-dummy-expr" operation="gt" value="3"/>
+ </rule>
+ </rsc_location>
+ </constraints>
+ <acls/>
+ </configuration>
+ <status>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="dummy" type="Dummy" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.4.1" transition-key="1:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" transition-magic="0:0;1:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" exit-reason="" on_node="node1" call-id="11" rc-code="0" op-status="0" interval="0" last-rc-change="1596926857" last-run="1596926857" exec-time="48" queue-time="4" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="dummy_monitor_10000" operation_key="dummy_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.4.1" transition-key="2:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" transition-magic="0:0;2:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" exit-reason="" on_node="node1" call-id="12" rc-code="0" op-status="0" interval="10000" last-rc-change="1596926857" exec-time="18" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="2" uname="node2" crmd="online" crm-debug-origin="do_state_transition" in_ccm="true" join="member" expected="member">
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="dummy" type="Dummy" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="dummy_last_0" operation_key="dummy_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.4.1" transition-key="2:152:0:7da33dac-2387-4da8-99fa-177eb69326d7" transition-magic="0:0;2:152:0:7da33dac-2387-4da8-99fa-177eb69326d7" exit-reason="" on_node="node2" call-id="9" rc-code="0" op-status="0" interval="0" last-rc-change="1596926778" last-run="1596926778" exec-time="13" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/rule-int-as-auto-integer-no-match.dot b/cts/scheduler/rule-int-as-auto-integer-no-match.dot
new file mode 100644
index 0000000000..d8f1c9f22b
--- /dev/null
+++ b/cts/scheduler/rule-int-as-auto-integer-no-match.dot
@@ -0,0 +1,2 @@
+ digraph "g" {
+}
diff --git a/cts/scheduler/rule-int-as-auto-integer-no-match.exp b/cts/scheduler/rule-int-as-auto-integer-no-match.exp
new file mode 100644
index 0000000000..56e315ff01
--- /dev/null
+++ b/cts/scheduler/rule-int-as-auto-integer-no-match.exp
@@ -0,0 +1 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0"/>
diff --git a/cts/scheduler/rule-int-as-auto-integer-no-match.scores b/cts/scheduler/rule-int-as-auto-integer-no-match.scores
new file mode 100644
index 0000000000..f473fc348e
--- /dev/null
+++ b/cts/scheduler/rule-int-as-auto-integer-no-match.scores
@@ -0,0 +1,3 @@
+Allocation scores:
+pcmk__native_allocate: dummy allocation score on node1: 0
+pcmk__native_allocate: dummy allocation score on node2: 0
diff --git a/cts/scheduler/rule-int-as-auto-integer-no-match.summary b/cts/scheduler/rule-int-as-auto-integer-no-match.summary
new file mode 100644
index 0000000000..8f1cd180c6
--- /dev/null
+++ b/cts/scheduler/rule-int-as-auto-integer-no-match.summary
@@ -0,0 +1,17 @@
+
+Current cluster status:
+Node node2 (2): standby
+Online: [ node1 ]
+
+ dummy (ocf::heartbeat:Dummy): Started node1
+
+Transition Summary:
+
+Executing cluster transition:
+
+Revised cluster status:
+Node node2 (2): standby
+Online: [ node1 ]
+
+ dummy (ocf::heartbeat:Dummy): Started node1
+
diff --git a/cts/scheduler/rule-int-as-auto-integer-no-match.xml b/cts/scheduler/rule-int-as-auto-integer-no-match.xml
new file mode 100644
index 0000000000..cd69c90ef6
--- /dev/null
+++ b/cts/scheduler/rule-int-as-auto-integer-no-match.xml
@@ -0,0 +1,67 @@
+<cib crm_feature_set="3.4.1" validate-with="pacemaker-3.5" epoch="9" num_updates="10" admin_epoch="1" cib-last-written="Sat Aug 8 15:47:36 2020" update-origin="node1" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="false"/>
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.0.4-80b75c64a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="testcluster"/>
+ <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1">
+ <instance_attributes id="nodes-1">
+ <nvpair id="nodes-1-test_val" name="test_val" value="3"/>
+ </instance_attributes>
+ </node>
+ <node id="2" uname="node2">
+ <instance_attributes id="nodes-2">
+ <nvpair id="nodes-2-standby" name="standby" value="on"/>
+ </instance_attributes>
+ </node>
+ </nodes>
+ <resources>
+ <primitive class="ocf" id="dummy" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="dummy-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+ <op id="dummy-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+ <op id="dummy-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+ <op id="dummy-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+ <op id="dummy-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+ <op id="dummy-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ </resources>
+ <constraints>
+ <rsc_location id="ban-dummy-by_test_val" rsc="dummy">
+ <rule id="ban-dummy-rule" score="-INFINITY">
+ <expression attribute="test_val" id="ban-dummy-expr" operation="gt" value="10"/>
+ </rule>
+ </rsc_location>
+ </constraints>
+ <acls/>
+ </configuration>
+ <status>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="dummy" type="Dummy" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.4.1" transition-key="1:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" transition-magic="0:0;1:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" exit-reason="" on_node="node1" call-id="11" rc-code="0" op-status="0" interval="0" last-rc-change="1596926857" last-run="1596926857" exec-time="48" queue-time="4" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="dummy_monitor_10000" operation_key="dummy_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.4.1" transition-key="2:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" transition-magic="0:0;2:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" exit-reason="" on_node="node1" call-id="12" rc-code="0" op-status="0" interval="10000" last-rc-change="1596926857" exec-time="18" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="2" uname="node2" crmd="online" crm-debug-origin="do_state_transition" in_ccm="true" join="member" expected="member">
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="dummy" type="Dummy" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="dummy_last_0" operation_key="dummy_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.4.1" transition-key="2:152:0:7da33dac-2387-4da8-99fa-177eb69326d7" transition-magic="0:0;2:152:0:7da33dac-2387-4da8-99fa-177eb69326d7" exit-reason="" on_node="node2" call-id="9" rc-code="0" op-status="0" interval="0" last-rc-change="1596926778" last-run="1596926778" exec-time="13" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/rule-int-as-integer-match.dot b/cts/scheduler/rule-int-as-integer-match.dot
new file mode 100644
index 0000000000..d300b99530
--- /dev/null
+++ b/cts/scheduler/rule-int-as-integer-match.dot
@@ -0,0 +1,3 @@
+ digraph "g" {
+"dummy_stop_0 node1" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/rule-int-as-integer-match.exp b/cts/scheduler/rule-int-as-integer-match.exp
new file mode 100644
index 0000000000..002df23ce6
--- /dev/null
+++ b/cts/scheduler/rule-int-as-integer-match.exp
@@ -0,0 +1,11 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <rsc_op id="2" operation="stop" operation_key="dummy_stop_0" on_node="node1" on_node_uuid="1">
+ <primitive id="dummy" class="ocf" provider="heartbeat" type="Dummy"/>
+ <attributes CRM_meta_name="stop" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/rule-int-as-integer-match.scores b/cts/scheduler/rule-int-as-integer-match.scores
new file mode 100644
index 0000000000..08024752e9
--- /dev/null
+++ b/cts/scheduler/rule-int-as-integer-match.scores
@@ -0,0 +1,3 @@
+Allocation scores:
+pcmk__native_allocate: dummy allocation score on node1: -INFINITY
+pcmk__native_allocate: dummy allocation score on node2: 0
diff --git a/cts/scheduler/rule-int-as-integer-match.summary b/cts/scheduler/rule-int-as-integer-match.summary
new file mode 100644
index 0000000000..090e574119
--- /dev/null
+++ b/cts/scheduler/rule-int-as-integer-match.summary
@@ -0,0 +1,19 @@
+
+Current cluster status:
+Node node2 (2): standby
+Online: [ node1 ]
+
+ dummy (ocf::heartbeat:Dummy): Started node1
+
+Transition Summary:
+ * Stop dummy ( node1 ) due to node availability
+
+Executing cluster transition:
+ * Resource action: dummy stop on node1
+
+Revised cluster status:
+Node node2 (2): standby
+Online: [ node1 ]
+
+ dummy (ocf::heartbeat:Dummy): Stopped
+
diff --git a/cts/scheduler/rule-int-as-integer-match.xml b/cts/scheduler/rule-int-as-integer-match.xml
new file mode 100644
index 0000000000..8547cfce61
--- /dev/null
+++ b/cts/scheduler/rule-int-as-integer-match.xml
@@ -0,0 +1,67 @@
+<cib crm_feature_set="3.4.1" validate-with="pacemaker-3.5" epoch="9" num_updates="10" admin_epoch="1" cib-last-written="Sat Aug 8 15:47:36 2020" update-origin="node1" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="false"/>
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.0.4-80b75c64a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="testcluster"/>
+ <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1">
+ <instance_attributes id="nodes-1">
+ <nvpair id="nodes-1-test_val" name="test_val" value="3"/>
+ </instance_attributes>
+ </node>
+ <node id="2" uname="node2">
+ <instance_attributes id="nodes-2">
+ <nvpair id="nodes-2-standby" name="standby" value="on"/>
+ </instance_attributes>
+ </node>
+ </nodes>
+ <resources>
+ <primitive class="ocf" id="dummy" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="dummy-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+ <op id="dummy-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+ <op id="dummy-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+ <op id="dummy-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+ <op id="dummy-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+ <op id="dummy-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ </resources>
+ <constraints>
+ <rsc_location id="ban-dummy-by_test_val" rsc="dummy">
+ <rule id="ban-dummy-rule" score="-INFINITY">
+ <expression attribute="test_val" id="ban-dummy-expr" operation="eq" type="integer" value="3"/>
+ </rule>
+ </rsc_location>
+ </constraints>
+ <acls/>
+ </configuration>
+ <status>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="dummy" type="Dummy" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.4.1" transition-key="1:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" transition-magic="0:0;1:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" exit-reason="" on_node="node1" call-id="11" rc-code="0" op-status="0" interval="0" last-rc-change="1596926857" last-run="1596926857" exec-time="48" queue-time="4" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="dummy_monitor_10000" operation_key="dummy_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.4.1" transition-key="2:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" transition-magic="0:0;2:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" exit-reason="" on_node="node1" call-id="12" rc-code="0" op-status="0" interval="10000" last-rc-change="1596926857" exec-time="18" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="2" uname="node2" crmd="online" crm-debug-origin="do_state_transition" in_ccm="true" join="member" expected="member">
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="dummy" type="Dummy" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="dummy_last_0" operation_key="dummy_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.4.1" transition-key="2:152:0:7da33dac-2387-4da8-99fa-177eb69326d7" transition-magic="0:0;2:152:0:7da33dac-2387-4da8-99fa-177eb69326d7" exit-reason="" on_node="node2" call-id="9" rc-code="0" op-status="0" interval="0" last-rc-change="1596926778" last-run="1596926778" exec-time="13" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/rule-int-as-integer-no-match.dot b/cts/scheduler/rule-int-as-integer-no-match.dot
new file mode 100644
index 0000000000..d8f1c9f22b
--- /dev/null
+++ b/cts/scheduler/rule-int-as-integer-no-match.dot
@@ -0,0 +1,2 @@
+ digraph "g" {
+}
diff --git a/cts/scheduler/rule-int-as-integer-no-match.exp b/cts/scheduler/rule-int-as-integer-no-match.exp
new file mode 100644
index 0000000000..56e315ff01
--- /dev/null
+++ b/cts/scheduler/rule-int-as-integer-no-match.exp
@@ -0,0 +1 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0"/>
diff --git a/cts/scheduler/rule-int-as-integer-no-match.scores b/cts/scheduler/rule-int-as-integer-no-match.scores
new file mode 100644
index 0000000000..f473fc348e
--- /dev/null
+++ b/cts/scheduler/rule-int-as-integer-no-match.scores
@@ -0,0 +1,3 @@
+Allocation scores:
+pcmk__native_allocate: dummy allocation score on node1: 0
+pcmk__native_allocate: dummy allocation score on node2: 0
diff --git a/cts/scheduler/rule-int-as-integer-no-match.summary b/cts/scheduler/rule-int-as-integer-no-match.summary
new file mode 100644
index 0000000000..8f1cd180c6
--- /dev/null
+++ b/cts/scheduler/rule-int-as-integer-no-match.summary
@@ -0,0 +1,17 @@
+
+Current cluster status:
+Node node2 (2): standby
+Online: [ node1 ]
+
+ dummy (ocf::heartbeat:Dummy): Started node1
+
+Transition Summary:
+
+Executing cluster transition:
+
+Revised cluster status:
+Node node2 (2): standby
+Online: [ node1 ]
+
+ dummy (ocf::heartbeat:Dummy): Started node1
+
diff --git a/cts/scheduler/rule-int-as-integer-no-match.xml b/cts/scheduler/rule-int-as-integer-no-match.xml
new file mode 100644
index 0000000000..222ee30fa8
--- /dev/null
+++ b/cts/scheduler/rule-int-as-integer-no-match.xml
@@ -0,0 +1,67 @@
+<cib crm_feature_set="3.4.1" validate-with="pacemaker-3.5" epoch="9" num_updates="10" admin_epoch="1" cib-last-written="Sat Aug 8 15:47:36 2020" update-origin="node1" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="false"/>
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.0.4-80b75c64a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="testcluster"/>
+ <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1">
+ <instance_attributes id="nodes-1">
+ <nvpair id="nodes-1-test_val" name="test_val" value="3"/>
+ </instance_attributes>
+ </node>
+ <node id="2" uname="node2">
+ <instance_attributes id="nodes-2">
+ <nvpair id="nodes-2-standby" name="standby" value="on"/>
+ </instance_attributes>
+ </node>
+ </nodes>
+ <resources>
+ <primitive class="ocf" id="dummy" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="dummy-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+ <op id="dummy-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+ <op id="dummy-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+ <op id="dummy-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+ <op id="dummy-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+ <op id="dummy-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ </resources>
+ <constraints>
+ <rsc_location id="ban-dummy-by_test_val" rsc="dummy">
+ <rule id="ban-dummy-rule" score="-INFINITY">
+ <expression attribute="test_val" id="ban-dummy-expr" operation="eq" type="integer" value="4"/>
+ </rule>
+ </rsc_location>
+ </constraints>
+ <acls/>
+ </configuration>
+ <status>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="dummy" type="Dummy" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.4.1" transition-key="1:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" transition-magic="0:0;1:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" exit-reason="" on_node="node1" call-id="11" rc-code="0" op-status="0" interval="0" last-rc-change="1596926857" last-run="1596926857" exec-time="48" queue-time="4" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="dummy_monitor_10000" operation_key="dummy_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.4.1" transition-key="2:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" transition-magic="0:0;2:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" exit-reason="" on_node="node1" call-id="12" rc-code="0" op-status="0" interval="10000" last-rc-change="1596926857" exec-time="18" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="2" uname="node2" crmd="online" crm-debug-origin="do_state_transition" in_ccm="true" join="member" expected="member">
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="dummy" type="Dummy" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="dummy_last_0" operation_key="dummy_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.4.1" transition-key="2:152:0:7da33dac-2387-4da8-99fa-177eb69326d7" transition-magic="0:0;2:152:0:7da33dac-2387-4da8-99fa-177eb69326d7" exit-reason="" on_node="node2" call-id="9" rc-code="0" op-status="0" interval="0" last-rc-change="1596926778" last-run="1596926778" exec-time="13" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/rule-int-as-number-match.dot b/cts/scheduler/rule-int-as-number-match.dot
new file mode 100644
index 0000000000..d300b99530
--- /dev/null
+++ b/cts/scheduler/rule-int-as-number-match.dot
@@ -0,0 +1,3 @@
+ digraph "g" {
+"dummy_stop_0 node1" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/rule-int-as-number-match.exp b/cts/scheduler/rule-int-as-number-match.exp
new file mode 100644
index 0000000000..002df23ce6
--- /dev/null
+++ b/cts/scheduler/rule-int-as-number-match.exp
@@ -0,0 +1,11 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <rsc_op id="2" operation="stop" operation_key="dummy_stop_0" on_node="node1" on_node_uuid="1">
+ <primitive id="dummy" class="ocf" provider="heartbeat" type="Dummy"/>
+ <attributes CRM_meta_name="stop" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/rule-int-as-number-match.scores b/cts/scheduler/rule-int-as-number-match.scores
new file mode 100644
index 0000000000..08024752e9
--- /dev/null
+++ b/cts/scheduler/rule-int-as-number-match.scores
@@ -0,0 +1,3 @@
+Allocation scores:
+pcmk__native_allocate: dummy allocation score on node1: -INFINITY
+pcmk__native_allocate: dummy allocation score on node2: 0
diff --git a/cts/scheduler/rule-int-as-number-match.summary b/cts/scheduler/rule-int-as-number-match.summary
new file mode 100644
index 0000000000..090e574119
--- /dev/null
+++ b/cts/scheduler/rule-int-as-number-match.summary
@@ -0,0 +1,19 @@
+
+Current cluster status:
+Node node2 (2): standby
+Online: [ node1 ]
+
+ dummy (ocf::heartbeat:Dummy): Started node1
+
+Transition Summary:
+ * Stop dummy ( node1 ) due to node availability
+
+Executing cluster transition:
+ * Resource action: dummy stop on node1
+
+Revised cluster status:
+Node node2 (2): standby
+Online: [ node1 ]
+
+ dummy (ocf::heartbeat:Dummy): Stopped
+
diff --git a/cts/scheduler/rule-int-as-number-match.xml b/cts/scheduler/rule-int-as-number-match.xml
new file mode 100644
index 0000000000..9393e5e6ea
--- /dev/null
+++ b/cts/scheduler/rule-int-as-number-match.xml
@@ -0,0 +1,67 @@
+<cib crm_feature_set="3.4.1" validate-with="pacemaker-3.5" epoch="9" num_updates="10" admin_epoch="1" cib-last-written="Sat Aug 8 15:47:36 2020" update-origin="node1" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="false"/>
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.0.4-80b75c64a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="testcluster"/>
+ <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1">
+ <instance_attributes id="nodes-1">
+ <nvpair id="nodes-1-test_val" name="test_val" value="3"/>
+ </instance_attributes>
+ </node>
+ <node id="2" uname="node2">
+ <instance_attributes id="nodes-2">
+ <nvpair id="nodes-2-standby" name="standby" value="on"/>
+ </instance_attributes>
+ </node>
+ </nodes>
+ <resources>
+ <primitive class="ocf" id="dummy" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="dummy-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+ <op id="dummy-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+ <op id="dummy-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+ <op id="dummy-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+ <op id="dummy-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+ <op id="dummy-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ </resources>
+ <constraints>
+ <rsc_location id="ban-dummy-by_test_val" rsc="dummy">
+ <rule id="ban-dummy-rule" score="-INFINITY">
+ <expression attribute="test_val" id="ban-dummy-expr" operation="eq" type="number" value="3"/>
+ </rule>
+ </rsc_location>
+ </constraints>
+ <acls/>
+ </configuration>
+ <status>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="dummy" type="Dummy" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.4.1" transition-key="1:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" transition-magic="0:0;1:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" exit-reason="" on_node="node1" call-id="11" rc-code="0" op-status="0" interval="0" last-rc-change="1596926857" last-run="1596926857" exec-time="48" queue-time="4" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="dummy_monitor_10000" operation_key="dummy_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.4.1" transition-key="2:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" transition-magic="0:0;2:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" exit-reason="" on_node="node1" call-id="12" rc-code="0" op-status="0" interval="10000" last-rc-change="1596926857" exec-time="18" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="2" uname="node2" crmd="online" crm-debug-origin="do_state_transition" in_ccm="true" join="member" expected="member">
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="dummy" type="Dummy" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="dummy_last_0" operation_key="dummy_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.4.1" transition-key="2:152:0:7da33dac-2387-4da8-99fa-177eb69326d7" transition-magic="0:0;2:152:0:7da33dac-2387-4da8-99fa-177eb69326d7" exit-reason="" on_node="node2" call-id="9" rc-code="0" op-status="0" interval="0" last-rc-change="1596926778" last-run="1596926778" exec-time="13" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/rule-int-as-number-no-match.dot b/cts/scheduler/rule-int-as-number-no-match.dot
new file mode 100644
index 0000000000..d8f1c9f22b
--- /dev/null
+++ b/cts/scheduler/rule-int-as-number-no-match.dot
@@ -0,0 +1,2 @@
+ digraph "g" {
+}
diff --git a/cts/scheduler/rule-int-as-number-no-match.exp b/cts/scheduler/rule-int-as-number-no-match.exp
new file mode 100644
index 0000000000..56e315ff01
--- /dev/null
+++ b/cts/scheduler/rule-int-as-number-no-match.exp
@@ -0,0 +1 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0"/>
diff --git a/cts/scheduler/rule-int-as-number-no-match.scores b/cts/scheduler/rule-int-as-number-no-match.scores
new file mode 100644
index 0000000000..f473fc348e
--- /dev/null
+++ b/cts/scheduler/rule-int-as-number-no-match.scores
@@ -0,0 +1,3 @@
+Allocation scores:
+pcmk__native_allocate: dummy allocation score on node1: 0
+pcmk__native_allocate: dummy allocation score on node2: 0
diff --git a/cts/scheduler/rule-int-as-number-no-match.summary b/cts/scheduler/rule-int-as-number-no-match.summary
new file mode 100644
index 0000000000..8f1cd180c6
--- /dev/null
+++ b/cts/scheduler/rule-int-as-number-no-match.summary
@@ -0,0 +1,17 @@
+
+Current cluster status:
+Node node2 (2): standby
+Online: [ node1 ]
+
+ dummy (ocf::heartbeat:Dummy): Started node1
+
+Transition Summary:
+
+Executing cluster transition:
+
+Revised cluster status:
+Node node2 (2): standby
+Online: [ node1 ]
+
+ dummy (ocf::heartbeat:Dummy): Started node1
+
diff --git a/cts/scheduler/rule-int-as-number-no-match.xml b/cts/scheduler/rule-int-as-number-no-match.xml
new file mode 100644
index 0000000000..8ca62c5e4f
--- /dev/null
+++ b/cts/scheduler/rule-int-as-number-no-match.xml
@@ -0,0 +1,67 @@
+<cib crm_feature_set="3.4.1" validate-with="pacemaker-3.5" epoch="9" num_updates="10" admin_epoch="1" cib-last-written="Sat Aug 8 15:47:36 2020" update-origin="node1" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="false"/>
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.0.4-80b75c64a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="testcluster"/>
+ <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1">
+ <instance_attributes id="nodes-1">
+ <nvpair id="nodes-1-test_val" name="test_val" value="3"/>
+ </instance_attributes>
+ </node>
+ <node id="2" uname="node2">
+ <instance_attributes id="nodes-2">
+ <nvpair id="nodes-2-standby" name="standby" value="on"/>
+ </instance_attributes>
+ </node>
+ </nodes>
+ <resources>
+ <primitive class="ocf" id="dummy" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="dummy-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+ <op id="dummy-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+ <op id="dummy-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+ <op id="dummy-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+ <op id="dummy-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+ <op id="dummy-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ </resources>
+ <constraints>
+ <rsc_location id="ban-dummy-by_test_val" rsc="dummy">
+ <rule id="ban-dummy-rule" score="-INFINITY">
+ <expression attribute="test_val" id="ban-dummy-expr" operation="eq" type="number" value="3.6"/>
+ </rule>
+ </rsc_location>
+ </constraints>
+ <acls/>
+ </configuration>
+ <status>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="dummy" type="Dummy" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.4.1" transition-key="1:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" transition-magic="0:0;1:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" exit-reason="" on_node="node1" call-id="11" rc-code="0" op-status="0" interval="0" last-rc-change="1596926857" last-run="1596926857" exec-time="48" queue-time="4" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="dummy_monitor_10000" operation_key="dummy_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.4.1" transition-key="2:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" transition-magic="0:0;2:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" exit-reason="" on_node="node1" call-id="12" rc-code="0" op-status="0" interval="10000" last-rc-change="1596926857" exec-time="18" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="2" uname="node2" crmd="online" crm-debug-origin="do_state_transition" in_ccm="true" join="member" expected="member">
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="dummy" type="Dummy" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="dummy_last_0" operation_key="dummy_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.4.1" transition-key="2:152:0:7da33dac-2387-4da8-99fa-177eb69326d7" transition-magic="0:0;2:152:0:7da33dac-2387-4da8-99fa-177eb69326d7" exit-reason="" on_node="node2" call-id="9" rc-code="0" op-status="0" interval="0" last-rc-change="1596926778" last-run="1596926778" exec-time="13" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/rule-int-parse-fail-default-str-match.dot b/cts/scheduler/rule-int-parse-fail-default-str-match.dot
new file mode 100644
index 0000000000..d300b99530
--- /dev/null
+++ b/cts/scheduler/rule-int-parse-fail-default-str-match.dot
@@ -0,0 +1,3 @@
+ digraph "g" {
+"dummy_stop_0 node1" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/rule-int-parse-fail-default-str-match.exp b/cts/scheduler/rule-int-parse-fail-default-str-match.exp
new file mode 100644
index 0000000000..002df23ce6
--- /dev/null
+++ b/cts/scheduler/rule-int-parse-fail-default-str-match.exp
@@ -0,0 +1,11 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <rsc_op id="2" operation="stop" operation_key="dummy_stop_0" on_node="node1" on_node_uuid="1">
+ <primitive id="dummy" class="ocf" provider="heartbeat" type="Dummy"/>
+ <attributes CRM_meta_name="stop" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/rule-int-parse-fail-default-str-match.scores b/cts/scheduler/rule-int-parse-fail-default-str-match.scores
new file mode 100644
index 0000000000..08024752e9
--- /dev/null
+++ b/cts/scheduler/rule-int-parse-fail-default-str-match.scores
@@ -0,0 +1,3 @@
+Allocation scores:
+pcmk__native_allocate: dummy allocation score on node1: -INFINITY
+pcmk__native_allocate: dummy allocation score on node2: 0
diff --git a/cts/scheduler/rule-int-parse-fail-default-str-match.summary b/cts/scheduler/rule-int-parse-fail-default-str-match.summary
new file mode 100644
index 0000000000..090e574119
--- /dev/null
+++ b/cts/scheduler/rule-int-parse-fail-default-str-match.summary
@@ -0,0 +1,19 @@
+
+Current cluster status:
+Node node2 (2): standby
+Online: [ node1 ]
+
+ dummy (ocf::heartbeat:Dummy): Started node1
+
+Transition Summary:
+ * Stop dummy ( node1 ) due to node availability
+
+Executing cluster transition:
+ * Resource action: dummy stop on node1
+
+Revised cluster status:
+Node node2 (2): standby
+Online: [ node1 ]
+
+ dummy (ocf::heartbeat:Dummy): Stopped
+
diff --git a/cts/scheduler/rule-int-parse-fail-default-str-match.xml b/cts/scheduler/rule-int-parse-fail-default-str-match.xml
new file mode 100644
index 0000000000..6fb8755415
--- /dev/null
+++ b/cts/scheduler/rule-int-parse-fail-default-str-match.xml
@@ -0,0 +1,67 @@
+<cib crm_feature_set="3.4.1" validate-with="pacemaker-3.5" epoch="9" num_updates="10" admin_epoch="1" cib-last-written="Sat Aug 8 15:47:36 2020" update-origin="node1" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="false"/>
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.0.4-80b75c64a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="testcluster"/>
+ <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1">
+ <instance_attributes id="nodes-1">
+ <nvpair id="nodes-1-test_val" name="test_val" value="as2"/>
+ </instance_attributes>
+ </node>
+ <node id="2" uname="node2">
+ <instance_attributes id="nodes-2">
+ <nvpair id="nodes-2-standby" name="standby" value="on"/>
+ </instance_attributes>
+ </node>
+ </nodes>
+ <resources>
+ <primitive class="ocf" id="dummy" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="dummy-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+ <op id="dummy-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+ <op id="dummy-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+ <op id="dummy-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+ <op id="dummy-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+ <op id="dummy-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ </resources>
+ <constraints>
+ <rsc_location id="ban-dummy-by_test_val" rsc="dummy">
+ <rule id="ban-dummy-rule" score="-INFINITY">
+ <expression attribute="test_val" id="ban-dummy-expr" operation="gt" type="integer" value="as10"/>
+ </rule>
+ </rsc_location>
+ </constraints>
+ <acls/>
+ </configuration>
+ <status>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="dummy" type="Dummy" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.4.1" transition-key="1:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" transition-magic="0:0;1:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" exit-reason="" on_node="node1" call-id="11" rc-code="0" op-status="0" interval="0" last-rc-change="1596926857" last-run="1596926857" exec-time="48" queue-time="4" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="dummy_monitor_10000" operation_key="dummy_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.4.1" transition-key="2:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" transition-magic="0:0;2:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" exit-reason="" on_node="node1" call-id="12" rc-code="0" op-status="0" interval="10000" last-rc-change="1596926857" exec-time="18" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="2" uname="node2" crmd="online" crm-debug-origin="do_state_transition" in_ccm="true" join="member" expected="member">
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="dummy" type="Dummy" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="dummy_last_0" operation_key="dummy_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.4.1" transition-key="2:152:0:7da33dac-2387-4da8-99fa-177eb69326d7" transition-magic="0:0;2:152:0:7da33dac-2387-4da8-99fa-177eb69326d7" exit-reason="" on_node="node2" call-id="9" rc-code="0" op-status="0" interval="0" last-rc-change="1596926778" last-run="1596926778" exec-time="13" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/rule-int-parse-fail-default-str-no-match.dot b/cts/scheduler/rule-int-parse-fail-default-str-no-match.dot
new file mode 100644
index 0000000000..d8f1c9f22b
--- /dev/null
+++ b/cts/scheduler/rule-int-parse-fail-default-str-no-match.dot
@@ -0,0 +1,2 @@
+ digraph "g" {
+}
diff --git a/cts/scheduler/rule-int-parse-fail-default-str-no-match.exp b/cts/scheduler/rule-int-parse-fail-default-str-no-match.exp
new file mode 100644
index 0000000000..56e315ff01
--- /dev/null
+++ b/cts/scheduler/rule-int-parse-fail-default-str-no-match.exp
@@ -0,0 +1 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0"/>
diff --git a/cts/scheduler/rule-int-parse-fail-default-str-no-match.scores b/cts/scheduler/rule-int-parse-fail-default-str-no-match.scores
new file mode 100644
index 0000000000..f473fc348e
--- /dev/null
+++ b/cts/scheduler/rule-int-parse-fail-default-str-no-match.scores
@@ -0,0 +1,3 @@
+Allocation scores:
+pcmk__native_allocate: dummy allocation score on node1: 0
+pcmk__native_allocate: dummy allocation score on node2: 0
diff --git a/cts/scheduler/rule-int-parse-fail-default-str-no-match.summary b/cts/scheduler/rule-int-parse-fail-default-str-no-match.summary
new file mode 100644
index 0000000000..8f1cd180c6
--- /dev/null
+++ b/cts/scheduler/rule-int-parse-fail-default-str-no-match.summary
@@ -0,0 +1,17 @@
+
+Current cluster status:
+Node node2 (2): standby
+Online: [ node1 ]
+
+ dummy (ocf::heartbeat:Dummy): Started node1
+
+Transition Summary:
+
+Executing cluster transition:
+
+Revised cluster status:
+Node node2 (2): standby
+Online: [ node1 ]
+
+ dummy (ocf::heartbeat:Dummy): Started node1
+
diff --git a/cts/scheduler/rule-int-parse-fail-default-str-no-match.xml b/cts/scheduler/rule-int-parse-fail-default-str-no-match.xml
new file mode 100644
index 0000000000..32b6e8157d
--- /dev/null
+++ b/cts/scheduler/rule-int-parse-fail-default-str-no-match.xml
@@ -0,0 +1,67 @@
+<cib crm_feature_set="3.4.1" validate-with="pacemaker-3.5" epoch="9" num_updates="10" admin_epoch="1" cib-last-written="Sat Aug 8 15:47:36 2020" update-origin="node1" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="false"/>
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.0.4-80b75c64a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="testcluster"/>
+ <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1">
+ <instance_attributes id="nodes-1">
+ <nvpair id="nodes-1-test_val" name="test_val" value="as10"/>
+ </instance_attributes>
+ </node>
+ <node id="2" uname="node2">
+ <instance_attributes id="nodes-2">
+ <nvpair id="nodes-2-standby" name="standby" value="on"/>
+ </instance_attributes>
+ </node>
+ </nodes>
+ <resources>
+ <primitive class="ocf" id="dummy" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="dummy-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+ <op id="dummy-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+ <op id="dummy-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+ <op id="dummy-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+ <op id="dummy-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+ <op id="dummy-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ </resources>
+ <constraints>
+ <rsc_location id="ban-dummy-by_test_val" rsc="dummy">
+ <rule id="ban-dummy-rule" score="-INFINITY">
+ <expression attribute="test_val" id="ban-dummy-expr" operation="gt" type="integer" value="as2"/>
+ </rule>
+ </rsc_location>
+ </constraints>
+ <acls/>
+ </configuration>
+ <status>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="dummy" type="Dummy" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.4.1" transition-key="1:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" transition-magic="0:0;1:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" exit-reason="" on_node="node1" call-id="11" rc-code="0" op-status="0" interval="0" last-rc-change="1596926857" last-run="1596926857" exec-time="48" queue-time="4" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="dummy_monitor_10000" operation_key="dummy_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.4.1" transition-key="2:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" transition-magic="0:0;2:156:0:7da33dac-2387-4da8-99fa-177eb69326d7" exit-reason="" on_node="node1" call-id="12" rc-code="0" op-status="0" interval="10000" last-rc-change="1596926857" exec-time="18" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="2" uname="node2" crmd="online" crm-debug-origin="do_state_transition" in_ccm="true" join="member" expected="member">
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="dummy" type="Dummy" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="dummy_last_0" operation_key="dummy_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.4.1" transition-key="2:152:0:7da33dac-2387-4da8-99fa-177eb69326d7" transition-magic="0:0;2:152:0:7da33dac-2387-4da8-99fa-177eb69326d7" exit-reason="" on_node="node2" call-id="9" rc-code="0" op-status="0" interval="0" last-rc-change="1596926778" last-run="1596926778" exec-time="13" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ </status>
+</cib>
diff --git a/doc/Pacemaker_Explained/en-US/Ch-Rules.txt b/doc/Pacemaker_Explained/en-US/Ch-Rules.txt
index 5df5f82117..3a37772921 100644
--- a/doc/Pacemaker_Explained/en-US/Ch-Rules.txt
+++ b/doc/Pacemaker_Explained/en-US/Ch-Rules.txt
@@ -1,928 +1,935 @@
:compat-mode: legacy
= Rules =
////
We prefer [[ch-rules]], but older versions of asciidoc don't deal well
with that construct for chapter headings
////
anchor:ch-rules[Chapter 8, Rules]
indexterm:[Constraint,Rule]
Rules can be used to make your configuration more dynamic, allowing values to
change depending on the time or the value of a node attribute. Examples of
things rules are useful for:
* Set a higher value for <<s-resource-options,+resource-stickiness+>> during
working hours, to minimize downtime, and a lower value on weekends, to allow
resources to move to their most preferred locations when people aren't around
to notice.
* Automatically place the cluster into maintenance mode during a scheduled
maintenance window.
* Assign certain nodes and resources to a particular department via custom
node attributes and meta-attributes, and add a single location constraint
that restricts the department's resources to run only on those nodes.
Each constraint type or property set that supports rules may contain one or more
+rule+ elements specifying conditions under which the constraint or properties
take effect. Examples later in this chapter will make this clearer.
== Rule Properties ==
indexterm:[XML element,rule element]
.Attributes of a rule Element
[width="95%",cols="2m,1,<5",options="header",align="center"]
|=========================================================
|Attribute
|Default
|Description
|id
|
|A unique name for the rule (required)
indexterm:[XML attribute,id attribute,rule element]
indexterm:[XML element,rule element,id attribute]
|role
|+Started+
|The rule is in effect only when the resource is in the specified
role. Allowed values are +Started+, +Slave+, and +Master+. A rule
with +role="Master"+ cannot determine the initial location of a
clone instance and will only affect which of the active instances
will be promoted.
indexterm:[XML attribute,role attribute,rule element]
indexterm:[XML element,rule element,role attribute]
|score
|
|If this rule is used in a location constraint and evaluates to true, apply
this score to the constraint. Only one of +score+ and +score-attribute+ may be
used.
indexterm:[XML attribute,score attribute,rule element]
indexterm:[XML element,rule element,score attribute]
|score-attribute
|
|If this rule is used in a location constraint and evaluates to true, use the
value of this node attribute as the score to apply to the constraint. Only one
of +score+ and +score-attribute+ may be used.
indexterm:[XML attribute,score-attribute attribute,rule element]
indexterm:[XML element,rule element,score-attribute attribute]
|boolean-op
|+and+
|If this rule contains more than one condition, a value of +and+ specifies that
the rule evaluates to true only if all conditions are true, and a value of
+or+ specifies that the rule evaluates to true if any condition is true.
indexterm:[XML attribute,boolean-op attribute,rule element]
indexterm:[XML element,rule element,boolean-op attribute]
|=========================================================
A +rule+ element must contain one or more conditions. A condition may be an
+expression+ element, a +date_expression+ element, or another +rule+ element.
== Node Attribute Expressions ==
[[node-attribute-expressions]]
indexterm:[Rule,Node Attribute Expression]
indexterm:[XML element,expression element]
Expressions are rule conditions based on the values of node attributes.
.Attributes of an expression Element
[width="95%",cols="2m,1,<5",options="header",align="center"]
|=========================================================
|Field
|Default
|Description
|id
|
|A unique name for the expression (required)
indexterm:[XML attribute,id attribute,expression element]
indexterm:[XML element,expression element,id attribute]
|attribute
|
|The node attribute to test (required)
indexterm:[XML attribute,attribute attribute,expression element]
indexterm:[XML element,expression element,attribute attribute]
|type
|+string+
+|The default type for +lt+, +gt+, +lte+, and +gte+ operations is
+ +number+ if either value contains a decimal point character, or
+ +integer+ otherwise. The default type for all other operations is
+ +string+. If a numeric parse fails for either value, then the values
+ compared as type +string+.
|How the node attributes should be compared. Allowed values are
- +string+, +integer+, and +version+.
+ +string+, +integer+, +number+, and +version+. +integer+ truncates
+ floating-point values if necessary before performing an integer
+ comparison. +number+ performs a floating-point comparison.
indexterm:[XML attribute,type attribute,expression element]
indexterm:[XML element,expression element,type attribute]
|operation
|
a|The comparison to perform (required). Allowed values:
* +lt:+ True if the node attribute value is less than the comparison value
* +gt:+ True if the node attribute value is greater than the comparison value
* +lte:+ True if the node attribute value is less than or equal to the comparison value
* +gte:+ True if the node attribute value is greater than or equal to the comparison value
* +eq:+ True if the node attribute value is equal to the comparison value
* +ne:+ True if the node attribute value is not equal to the comparison value
* +defined:+ True if the node has the named attribute
* +not_defined:+ True if the node does not have the named attribute
indexterm:[XML attribute,operation attribute,expression element]
indexterm:[XML element,expression element,operation attribute]
|value
|
|User-supplied value for comparison (required for operations other than
+defined+ and +not_defined+)
indexterm:[XML attribute,value attribute,expression element]
indexterm:[XML element,expression element,value attribute]
|value-source
|+literal+
a|How the +value+ is derived. Allowed values:
* +literal+: +value+ is a literal string to compare against
* +param+: +value+ is the name of a resource parameter to compare against (only
valid in location constraints)
* +meta+: +value+ is the name of a resource meta-attribute to compare against
(only valid in location constraints)
indexterm:[XML attribute,value-source attribute,expression element]
indexterm:[XML element,expression element,value-source attribute]
|=========================================================
[[node-attribute-expressions-special]]
In addition to custom node attributes defined by the administrator, the cluster
defines special, built-in node attributes for each node that can also be used
in rule expressions.
.Built-in Node Attributes
[width="95%",cols="1m,<5",options="header",align="center"]
|=========================================================
|Name
|Value
|#uname
|Node <<s-node-name,name>>
|#id
|Node ID
|#kind
|Node type. Possible values are +cluster+, +remote+, and +container+. Kind is
+remote+ for Pacemaker Remote nodes created with the +ocf:pacemaker:remote+
resource, and +container+ for Pacemaker Remote guest nodes and bundle nodes
|#is_dc
|"true" if this node is a Designated Controller (DC), "false" otherwise
|#cluster-name
|The value of the +cluster-name+ cluster property, if set
|#site-name
|The value of the +site-name+ node attribute, if set, otherwise identical to
+#cluster-name+
|#role
a|The role the relevant promotable clone resource has on this node. Valid only within
a rule for a location constraint for a promotable clone resource.
////
// if uncommenting, put a pipe in front of first two lines
#ra-version
The installed version of the resource agent on the node, as defined
by the +version+ attribute of the +resource-agent+ tag in the agent's
metadata. Valid only within rules controlling resource options. This can be
useful during rolling upgrades of a backward-incompatible resource agent.
'(coming in x.x.x)'
////
|=========================================================
== Date/Time Expressions ==
indexterm:[Rule,Date/Time Expression]
indexterm:[XML element,date_expression element]
Date/time expressions are rule conditions based (as the name suggests) on the
current date and time.
A +date_expression+ element may optionally contain a +date_spec+ or +duration+
element depending on the context.
.Attributes of a date_expression Element
[width="95%",cols="2m,<5",options="header",align="center"]
|=========================================================
|Field
|Description
|id
|A unique name for the expression (required)
indexterm:[XML attribute,id attribute,date_expression element]
indexterm:[XML element,date_expression element,id attribute]
|start
|A date/time conforming to the http://en.wikipedia.org/wiki/ISO_8601[ISO8601]
specification. May be used when +operation+ is +in_range+ (in which case at
least one of +start+ or +end+ must be specified) or +gt+ (in which case
+start+ is required).
indexterm:[XML attribute,start attribute,date_expression element]
indexterm:[XML element,date_expression element,start attribute]
|end
|A date/time conforming to the http://en.wikipedia.org/wiki/ISO_8601[ISO8601]
specification. May be used when +operation+ is +in_range+ (in which case at
least one of +start+ or +end+ must be specified) or +lt+ (in which case
+end+ is required).
indexterm:[XML attribute,end attribute,date_expression element]
indexterm:[XML element,date_expression element,end attribute]
|operation
a|Compares the current date/time with the start and/or end date,
depending on the context. Allowed values:
* +gt:+ True if the current date/time is after +start+
* +lt:+ True if the current date/time is before +end+
* +in_range:+ True if the current date/time is after +start+ (if specified)
and before either +end+ (if specified) or +start+ plus the value of the
+duration+ element (if one is contained in the +date_expression+)
* +date_spec:+ True if the current date/time matches the specification
given in the contained +date_spec+ element (described below)
indexterm:[XML attribute,operation attribute,date_expression element]
indexterm:[XML element,date_expression element,operation attribute]
|=========================================================
[NOTE]
======
There is no +eq+, +neq+, +gte+, or +lte+ operation, since they would be valid
only for a single second.
======
=== Date Specifications ===
indexterm:[Rule,Date/Time Expression,Date Specification]
indexterm:[XML element,date_spec element]
A +date_spec+ element is used to create a cron-like expression relating
to time. Each field can contain a single number or range. Any field not
supplied is ignored.
.Attributes of a date_spec Element
[width="95%",cols="2m,<5",options="header",align="center"]
|=========================================================
|Field
|Description
|id
|A unique name for the object (required)
indexterm:[XML attribute,id attribute,date_spec element]
indexterm:[XML element,date_spec element,id attribute]
|hours
|Allowed values: 0-23 (where 0 is midnight and 23 is 11 p.m.)
indexterm:[XML attribute,hours attribute,date_spec element]
indexterm:[XML element,date_spec element,hours attribute]
|monthdays
|Allowed values: 1-31 (depending on month and year)
indexterm:[XML attribute,monthdays attribute,date_spec element]
indexterm:[XML element,date_spec element,monthdays attribute]
|weekdays
|Allowed values: 1-7 (where 1 is Monday and 7 is Sunday)
indexterm:[XML attribute,weekdays attribute,date_spec element]
indexterm:[XML element,date_spec element,weekdays attribute]
|yeardays
|Allowed values: 1-366 (depending on the year)
indexterm:[XML attribute,yeardays attribute,date_spec element]
indexterm:[XML element,date_spec element,yeardays attribute]
|months
|Allowed values: 1-12
indexterm:[XML attribute,months attribute,date_spec element]
indexterm:[XML element,date_spec element,months attribute]
|weeks
|Allowed values: 1-53 (depending on weekyear)
indexterm:[XML attribute,weeks attribute,date_spec element]
indexterm:[XML element,date_spec element,weeks attribute]
|years
|Year according to the Gregorian calendar
indexterm:[XML attribute,years attribute,date_spec element]
indexterm:[XML element,date_spec element,years attribute]
|weekyears
|Year in which the week started; for example, 1 January 2005 can be specified
in ISO 8601 as '2005-001 Ordinal', '2005-01-01 Gregorian' or
'2004-W53-6 Weekly' and thus would match +years="2005"+ or +weekyears="2004"+
indexterm:[XML attribute,weekyears attribute,date_spec element]
indexterm:[XML element,date_spec element,weekyears attribute]
|moon
|Allowed values are 0-7 (where 0 is the new moon and 4 is full moon).
Seriously, you can use this. This was implemented to demonstrate the ease with
which new comparisons could be added.
indexterm:[XML attribute,moon attribute,date_spec element]
indexterm:[XML element,date_spec element,moon attribute]
|=========================================================
For example, +monthdays="1"+ matches the first day of every month, and
+hours="09-17"+ matches the hours between 9 a.m. and 5 p.m. (inclusive).
At this time, multiple ranges (e.g. +weekdays="1,2"+ or +weekdays="1-2,5-6"+)
are not supported.
[NOTE]
====
Pacemaker can calculate when evaluation of a +date_expression+ with an
+operation+ of +gt+, +lt+, or +in_range+ will next change, and schedule a
cluster re-check for that time. However, it does not do this for +date_spec+.
Instead, it evaluates the +date_spec+ whenever a cluster re-check naturally
happens via a cluster event or the +cluster-recheck-interval+ cluster option.
For example, if you have a +date_spec+ enabling a resource from 9 a.m. to 5 p.m.,
and +cluster-recheck-interval+ has been set to 5 minutes, then sometime between
9 a.m. and 9:05 a.m. the cluster would notice that it needs to start the
resource, and sometime between 5 p.m. and 5:05 p.m. it would realize that it
needs to stop the resource. The timing of the actual start and stop actions
will further depend on factors such as any other actions the cluster may need
to perform first, and the load of the machine.
====
=== Durations ===
indexterm:[Rule,Date/Time Expression,Duration]
indexterm:[XML element,duration element]
A +duration+ is used to calculate a value for +end+ when one is not supplied to
+in_range+ operations. It contains one or more attributes each containing a
single number. Any attribute not supplied is ignored.
.Attributes of a duration Element
[width="95%",cols="2m,<5",options="header",align="center"]
|=========================================================
|Field
|Description
|id
|A unique name for this duration element (required)
indexterm:[XML attribute,id attribute,duration element]
indexterm:[XML element,duration element,id attribute]
|seconds
|This many seconds will be added to the total duration
indexterm:[XML attribute,seconds attribute,duration element]
indexterm:[XML element,duration element,seconds attribute]
|minutes
|This many minutes will be added to the total duration
indexterm:[XML attribute,minutes attribute,duration element]
indexterm:[XML element,duration element,minutes attribute]
|hours
|This many hours will be added to the total duration
indexterm:[XML attribute,hours attribute,duration element]
indexterm:[XML element,duration element,hours attribute]
|weeks
|This many weeks will be added to the total duration
indexterm:[XML attribute,weeks attribute,duration element]
indexterm:[XML element,duration element,weeks attribute]
|months
|This many months will be added to the total duration
indexterm:[XML attribute,months attribute,duration element]
indexterm:[XML element,duration element,months attribute]
|years
|This many years will be added to the total duration
indexterm:[XML attribute,years attribute,duration element]
indexterm:[XML element,duration element,years attribute]
|=========================================================
=== Example Time-Based Expressions ===
A small sample of how time-based expressions can be used:
.True if now is any time in the year 2005
====
[source,XML]
----
<rule id="rule1" score="INFINITY">
<date_expression id="date_expr1" start="2005-001" operation="in_range">
<duration id="duration1" years="1"/>
</date_expression>
</rule>
----
====
.Equivalent expression
====
[source,XML]
----
<rule id="rule2" score="INFINITY">
<date_expression id="date_expr2" operation="date_spec">
<date_spec id="date_spec2" years="2005"/>
</date_expression>
</rule>
----
====
.9am-5pm Monday-Friday
====
[source,XML]
-------
<rule id="rule3" score="INFINITY">
<date_expression id="date_expr3" operation="date_spec">
<date_spec id="date_spec3" hours="9-16" weekdays="1-5"/>
</date_expression>
</rule>
-------
====
Please note that the +16+ matches up to +16:59:59+, as the numeric
value (hour) still matches!
.9am-6pm Monday through Friday or anytime Saturday
====
[source,XML]
-------
<rule id="rule4" score="INFINITY" boolean-op="or">
<date_expression id="date_expr4-1" operation="date_spec">
<date_spec id="date_spec4-1" hours="9-16" weekdays="1-5"/>
</date_expression>
<date_expression id="date_expr4-2" operation="date_spec">
<date_spec id="date_spec4-2" weekdays="6"/>
</date_expression>
</rule>
-------
====
.9am-5pm or 9pm-12am Monday through Friday
====
[source,XML]
-------
<rule id="rule5" score="INFINITY" boolean-op="and">
<rule id="rule5-nested1" score="INFINITY" boolean-op="or">
<date_expression id="date_expr5-1" operation="date_spec">
<date_spec id="date_spec5-1" hours="9-16"/>
</date_expression>
<date_expression id="date_expr5-2" operation="date_spec">
<date_spec id="date_spec5-2" hours="21-23"/>
</date_expression>
</rule>
<date_expression id="date_expr5-3" operation="date_spec">
<date_spec id="date_spec5-3" weekdays="1-5"/>
</date_expression>
</rule>
-------
====
.Mondays in March 2005
====
[source,XML]
-------
<rule id="rule6" score="INFINITY" boolean-op="and">
<date_expression id="date_expr6-1" operation="date_spec">
<date_spec id="date_spec6" weekdays="1"/>
</date_expression>
<date_expression id="date_expr6-2" operation="in_range"
start="2005-03-01" end="2005-04-01"/>
</rule>
-------
====
[NOTE]
======
Because no time is specified with the above dates, 00:00:00 is implied. This
means that the range includes all of 2005-03-01 but none of 2005-04-01.
You may wish to write +end="2005-03-31T23:59:59"+ to avoid confusion.
======
.A full moon on Friday the 13th
=====
[source,XML]
-------
<rule id="rule7" score="INFINITY" boolean-op="and">
<date_expression id="date_expr7" operation="date_spec">
<date_spec id="date_spec7" weekdays="5" monthdays="13" moon="4"/>
</date_expression>
</rule>
-------
=====
== Resource Expressions ==
An +rsc_expression+ is a rule condition based on a resource agent's properties.
This rule is only valid within an +rsc_defaults+ or +op_defaults+ context. None
of the matching attributes of +class+, +provider+, and +type+ are required. If
one is omitted, all values of that attribute will match. For instance, omitting
+type+ means every type will match.
.Attributes of an rsc_expression Element
[width="95%",cols="2m,<5",options="header",align="center"]
|=========================================================
|Field
|Description
|id
|A unique name for the expression (required)
indexterm:[XML attribute,id attribute,rsc_expression element]
indexterm:[XML element,rsc_expression element,id attribute]
|class
|The standard name to be matched against resource agents
indexterm:[XML attribute,class attribute,rsc_expression element]
indexterm:[XML element,rsc_expression element,class attribute]
|provider
|If given, the vendor to be matched against resource agents. This
only makes sense for agents using the OCF spec.
indexterm:[XML attribute,provider attribute,rsc_expression element]
indexterm:[XML element,rsc_expression element,provider attribute]
|type
|The name of the resource agent to be matched
indexterm:[XML attribute,type attribute,rsc_expression element]
indexterm:[XML element,rsc_expression element,type attribute]
|=========================================================
=== Example Resource-Based Expressions ===
A small sample of how resource-based expressions can be used:
.True for all ocf:heartbeat:IPaddr2 resources
====
[source,XML]
----
<rule id="rule1" score="INFINITY">
<rsc_expression id="rule_expr1" class="ocf" provider="heartbeat" type="IPaddr2"/>
</rule>
----
====
.Provider doesn't apply to non-OCF resources
====
[source,XML]
----
<rule id="rule2" score="INFINITY">
<rsc_expression id="rule_expr2" class="stonith" type="fence_xvm"/>
</rule>
----
====
== Operation Expressions ==
An +op_expression+ is a rule condition based on an action of some resource
agent. This rule is only valid within an +op_defaults+ context.
.Attributes of an op_expression Element
[width="95%",cols="2m,<5",options="header",align="center"]
|=========================================================
|Field
|Description
|id
|A unique name for the expression (required)
indexterm:[XML attribute,id attribute,op_expression element]
indexterm:[XML element,op_expression element,id attribute]
|name
|The action name to match against. This can be any action supported by
the resource agent; common values include +monitor+, +start+, and +stop+
(required).
indexterm:[XML attribute,name attribute,op_expression element]
indexterm:[XML element,op_expression element,name attribute]
|interval
|The interval of the action to match against. If not given, only
the name attribute will be used to match.
indexterm:[XML attribute,interval attribute,op_expression element]
indexterm:[XML element,op_expression element,interval attribute]
|=========================================================
=== Example Operation-Based Expressions ===
A small sample of how operation-based expressions can be used:
.True for all monitor actions
====
[source,XML]
----
<rule id="rule1" score="INFINITY">
<op_expression id="rule_expr1" name="monitor"/>
</rule>
----
====
.True for all monitor actions with a 10 second interval
====
[source,XML]
----
<rule id="rule2" score="INFINITY">
<op_expression id="rule_expr2" name="monitor" interval="10s"/>
</rule>
----
====
== Using Rules to Determine Resource Location ==
indexterm:[Rule,Determine Resource Location]
indexterm:[Resource,Location,Determine by Rules]
A location constraint may contain one or more top-level rules. The cluster
will act as if there is a separate location constraint for each rule that
evaluates as true.
Consider the following simple location constraint:
.Prevent resource "webserver" from running on node3
=====
[source,XML]
-------
<rsc_location id="ban-apache-on-node3" rsc="webserver"
score="-INFINITY" node="node3"/>
-------
=====
The constraint can be more verbosely written using a rule:
.Prevent resource "webserver" from running on node3 using rule
=====
[source,XML]
-------
<rsc_location id="ban-apache-on-node3" rsc="webserver">
<rule id="ban-apache-rule" score="-INFINITY">
<expression id="ban-apache-expr" attribute="#uname"
operation="eq" value="node3"/>
</rule>
</rsc_location>
-------
=====
The advantage of using the expanded form is that one could add more expressions
(for example, limiting the constraint to certain days of the week), or activate
the constraint by some node attribute other than node name.
=== Location Rules Based on Other Node Properties ===
The expanded form allows us to match on node properties other than its name.
If we rated each machine's CPU power such that the cluster had the
following nodes section:
.A sample nodes section for use with score-attribute
=====
[source,XML]
-------
<nodes>
<node id="uuid1" uname="c001n01" type="normal">
<instance_attributes id="uuid1-custom_attrs">
<nvpair id="uuid1-cpu_mips" name="cpu_mips" value="1234"/>
</instance_attributes>
</node>
<node id="uuid2" uname="c001n02" type="normal">
<instance_attributes id="uuid2-custom_attrs">
<nvpair id="uuid2-cpu_mips" name="cpu_mips" value="5678"/>
</instance_attributes>
</node>
</nodes>
-------
=====
then we could prevent resources from running on underpowered machines with this rule:
[source,XML]
-------
<rule id="need-more-power-rule" score="-INFINITY">
<expression id="need-more-power-expr" attribute="cpu_mips"
operation="lt" value="3000"/>
</rule>
-------
=== Using +score-attribute+ Instead of +score+ ===
When using +score-attribute+ instead of +score+, each node matched by
the rule has its score adjusted differently, according to its value
for the named node attribute. Thus, in the previous example, if a
rule used +score-attribute="cpu_mips"+, +c001n01+ would have its
preference to run the resource increased by +1234+ whereas +c001n02+
would have its preference increased by +5678+.
== Using Rules to Define Options ==
Rules may be used to control a variety of options:
* <<s-cluster-options,Cluster options>> (+cluster_property_set+ elements)
* <<s-node-attributes,Node attributes>> (as +instance_attributes+ or
+utilization+ elements inside a +node+ element)
* <<s-resource-options,Resource options>> (as +utilization+, +meta_attributes+,
or +instance_attributes+ elements inside a resource definition element or
+op+ , +rsc_defaults+, +op_defaults+, or +template+ element)
* <<s-operation-properties,Operation properties>> (+meta_attributes+
inside an +op+ or +op_defaults+ element)
=== Using Rules to Control Resource Options ===
Often some cluster nodes will be different from their peers. Sometimes,
these differences -- e.g. the location of a binary or the names of network
interfaces -- require resources to be configured differently depending
on the machine they're hosted on.
By defining multiple +instance_attributes+ objects for the resource
and adding a rule to each, we can easily handle these special cases.
In the example below, +mySpecialRsc+ will use eth1 and port 9999 when
run on +node1+, eth2 and port 8888 on +node2+ and default to eth0 and
port 9999 for all other nodes.
.Defining different resource options based on the node name
=====
[source,XML]
-------
<primitive id="mySpecialRsc" class="ocf" type="Special" provider="me">
<instance_attributes id="special-node1" score="3">
<rule id="node1-special-case" score="INFINITY" >
<expression id="node1-special-case-expr" attribute="#uname"
operation="eq" value="node1"/>
</rule>
<nvpair id="node1-interface" name="interface" value="eth1"/>
</instance_attributes>
<instance_attributes id="special-node2" score="2" >
<rule id="node2-special-case" score="INFINITY">
<expression id="node2-special-case-expr" attribute="#uname"
operation="eq" value="node2"/>
</rule>
<nvpair id="node2-interface" name="interface" value="eth2"/>
<nvpair id="node2-port" name="port" value="8888"/>
</instance_attributes>
<instance_attributes id="defaults" score="1" >
<nvpair id="default-interface" name="interface" value="eth0"/>
<nvpair id="default-port" name="port" value="9999"/>
</instance_attributes>
</primitive>
-------
=====
The order in which +instance_attributes+ objects are evaluated is
determined by their score (highest to lowest). If not supplied, score
defaults to zero, and objects with an equal score are processed in
listed order. If the +instance_attributes+ object has no rule
or a +rule+ that evaluates to +true+, then for any parameter the resource does
not yet have a value for, the resource will use the parameter values defined by
the +instance_attributes+.
For example, given the configuration above, if the resource is placed on node1:
. +special-node1+ has the highest score (3) and so is evaluated first;
its rule evaluates to +true+, so +interface+ is set to +eth1+.
. +special-node2+ is evaluated next with score 2, but its rule evaluates to +false+,
so it is ignored.
. +defaults+ is evaluated last with score 1, and has no rule, so its values
are examined; +interface+ is already defined, so the value here is not used,
but +port+ is not yet defined, so +port+ is set to +9999+.
=== Using Rules to Control Resource Defaults ===
Rules can be used for resource and operation defaults. The following example
illustrates how to set a different +resource-stickiness+ value during and
outside work hours. This allows resources to automatically move back to their
most preferred hosts, but at a time that (in theory) does not interfere with
business activities.
.Change +resource-stickiness+ during working hours
=====
[source,XML]
-------
<rsc_defaults>
<meta_attributes id="core-hours" score="2">
<rule id="core-hour-rule" score="0">
<date_expression id="nine-to-five-Mon-to-Fri" operation="date_spec">
<date_spec id="nine-to-five-Mon-to-Fri-spec" hours="9-16" weekdays="1-5"/>
</date_expression>
</rule>
<nvpair id="core-stickiness" name="resource-stickiness" value="INFINITY"/>
</meta_attributes>
<meta_attributes id="after-hours" score="1" >
<nvpair id="after-stickiness" name="resource-stickiness" value="0"/>
</meta_attributes>
</rsc_defaults>
-------
=====
Rules may be used similarly in +instance_attributes+ or +utilization+ blocks.
Any single block may directly contain only a single rule, but that rule may
itself contain any number of rules.
+rsc_expression+ and +op_expression+ blocks may additionally be used to set defaults
on either a single resource or across an entire class of resources with a single
rule. +rsc_expression+ may be used to select resource agents within both +rsc_defaults+
and +op_defaults+, while +op_expression+ may only be used within +op_defaults+. If
multiple rules succeed for a given resource agent, the last one specified will be
the one that takes effect. As with any other rule, boolean operations may be used
to make more complicated expressions.
.Set all IPaddr2 resources to stopped
=====
[source,XML]
-------
<rsc_defaults>
<meta_attributes id="op-target-role">
<rule id="op-target-role-rule" score="INFINITY">
<rsc_expression id="op-target-role-expr" class="ocf" provider="heartbeat"
type="IPaddr2"/>
</rule>
<nvpair id="op-target-role-nvpair" name="target-role" value="Stopped"/>
</meta_attributes>
</rsc_defaults>
-------
=====
.Set all monitor action timeouts to 7 seconds
=====
[source,XML]
-------
<op_defaults>
<meta_attributes id="op-monitor-defaults">
<rule id="op-monitor-default-rule" score="INFINITY">
<op_expression id="op-monitor-default-expr" name="monitor"/>
</rule>
<nvpair id="op-monitor-timeout" name="timeout" value="7s"/>
</meta_attributes>
</op_defaults>
-------
=====
.Set the monitor action timeout on all IPaddr2 resources with a given monitor interval to 8 seconds
=====
[source,XML]
-------
<op_defaults>
<meta_attributes id="op-monitor-and">
<rule id="op-monitor-and-rule" score="INFINITY">
<rsc_expression id="op-monitor-and-rsc-expr" class="ocf" provider="heartbeat"
type="IPaddr2"/>
<op_expression id="op-monitor-and-op-expr" name="monitor" interval="10s"/>
</rule>
<nvpair id="op-monitor-and-timeout" name="timeout" value="8s"/>
</meta_attributes>
</op_defaults>
-------
=====
=== Using Rules to Control Cluster Options ===
indexterm:[Rule,Controlling Cluster Options]
indexterm:[Cluster,Setting Options with Rules]
Controlling cluster options is achieved in much the same manner as
specifying different resource options on different nodes.
The difference is that because they are cluster options, one cannot (or should
not, because they won't work) use attribute-based expressions. The following
example illustrates how to set +maintenance_mode+ during a scheduled
maintenance window. This will keep the cluster running but not monitor, start,
or stop resources during this time.
.Schedule a maintenance window for 9 to 11 p.m. CDT Sept. 20, 2019
=====
[source,XML]
-------
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="bootstrap-stonith-enabled" name="stonith-enabled" value="1"/>
</cluster_property_set>
<cluster_property_set id="normal-set" score="10">
<nvpair id="normal-maintenance-mode" name="maintenance-mode" value="false"/>
</cluster_property_set>
<cluster_property_set id="maintenance-window-set" score="1000">
<nvpair id="maintenance-nvpair1" name="maintenance-mode" value="true"/>
<rule id="maintenance-rule1" score="INFINITY">
<date_expression id="maintenance-date1" operation="in_range"
start="2019-09-20 21:00:00 -05:00" end="2019-09-20 23:00:00 -05:00"/>
</rule>
</cluster_property_set>
</crm_config>
-------
=====
[IMPORTANT]
====
The +cluster_property_set+ with an +id+ set to "cib-bootstrap-options" will
'always' have the highest priority, regardless of any scores. Therefore,
rules in another +cluster_property_set+ can never take effect for any
properties listed in the bootstrap set.
====
diff --git a/doc/sphinx/Pacemaker_Explained/rules.rst b/doc/sphinx/Pacemaker_Explained/rules.rst
index 49bf4df0dc..365e04c073 100644
--- a/doc/sphinx/Pacemaker_Explained/rules.rst
+++ b/doc/sphinx/Pacemaker_Explained/rules.rst
@@ -1,925 +1,932 @@
Rules
-----
.. Convert_to_RST:
anchor:ch-rules[Chapter 8, Rules]
indexterm:[Constraint,Rule]
Rules can be used to make your configuration more dynamic, allowing values to
change depending on the time or the value of a node attribute. Examples of
things rules are useful for:
* Set a higher value for <<s-resource-options,+resource-stickiness+>> during
working hours, to minimize downtime, and a lower value on weekends, to allow
resources to move to their most preferred locations when people aren't around
to notice.
* Automatically place the cluster into maintenance mode during a scheduled
maintenance window.
* Assign certain nodes and resources to a particular department via custom
node attributes and meta-attributes, and add a single location constraint
that restricts the department's resources to run only on those nodes.
Each constraint type or property set that supports rules may contain one or more
+rule+ elements specifying conditions under which the constraint or properties
take effect. Examples later in this chapter will make this clearer.
== Rule Properties ==
indexterm:[XML element,rule element]
.Attributes of a rule Element
[width="95%",cols="2m,1,<5",options="header",align="center"]
|=========================================================
|Attribute
|Default
|Description
|id
|
|A unique name for the rule (required)
indexterm:[XML attribute,id attribute,rule element]
indexterm:[XML element,rule element,id attribute]
|role
|+Started+
|The rule is in effect only when the resource is in the specified
role. Allowed values are +Started+, +Slave+, and +Master+. A rule
with +role="Master"+ cannot determine the initial location of a
clone instance and will only affect which of the active instances
will be promoted.
indexterm:[XML attribute,role attribute,rule element]
indexterm:[XML element,rule element,role attribute]
|score
|
|If this rule is used in a location constraint and evaluates to true, apply
this score to the constraint. Only one of +score+ and +score-attribute+ may be
used.
indexterm:[XML attribute,score attribute,rule element]
indexterm:[XML element,rule element,score attribute]
|score-attribute
|
|If this rule is used in a location constraint and evaluates to true, use the
value of this node attribute as the score to apply to the constraint. Only one
of +score+ and +score-attribute+ may be used.
indexterm:[XML attribute,score-attribute attribute,rule element]
indexterm:[XML element,rule element,score-attribute attribute]
|boolean-op
|+and+
|If this rule contains more than one condition, a value of +and+ specifies that
the rule evaluates to true only if all conditions are true, and a value of
+or+ specifies that the rule evaluates to true if any condition is true.
indexterm:[XML attribute,boolean-op attribute,rule element]
indexterm:[XML element,rule element,boolean-op attribute]
|=========================================================
A +rule+ element must contain one or more conditions. A condition may be an
+expression+ element, a +date_expression+ element, or another +rule+ element.
== Node Attribute Expressions ==
[[node-attribute-expressions]]
indexterm:[Rule,Node Attribute Expression]
indexterm:[XML element,expression element]
Expressions are rule conditions based on the values of node attributes.
.Attributes of an expression Element
[width="95%",cols="2m,1,<5",options="header",align="center"]
|=========================================================
|Field
|Default
|Description
|id
|
|A unique name for the expression (required)
indexterm:[XML attribute,id attribute,expression element]
indexterm:[XML element,expression element,id attribute]
|attribute
|
|The node attribute to test (required)
indexterm:[XML attribute,attribute attribute,expression element]
indexterm:[XML element,expression element,attribute attribute]
|type
- |+string+
+ |The default type for +lt+, +gt+, +lte+, and +gte+ operations is
+ +number+ if either value contains a decimal point character, or
+ +integer+ otherwise. The default type for all other operations is
+ +string+. If a numeric parse fails for either value, then the values
+ are compared as type +string+.
|How the node attributes should be compared. Allowed values are
- +string+, +integer+, and +version+.
+ +string+, +integer+, +number+, and +version+. +integer+ truncates
+ floating-point values if necessary before performing an integer
+ comparison. +number+ performs a floating-point comparison.
+
indexterm:[XML attribute,type attribute,expression element]
indexterm:[XML element,expression element,type attribute]
|operation
|
a|The comparison to perform (required). Allowed values:
* +lt:+ True if the node attribute value is less than the comparison value
* +gt:+ True if the node attribute value is greater than the comparison value
* +lte:+ True if the node attribute value is less than or equal to the comparison value
* +gte:+ True if the node attribute value is greater than or equal to the comparison value
* +eq:+ True if the node attribute value is equal to the comparison value
* +ne:+ True if the node attribute value is not equal to the comparison value
* +defined:+ True if the node has the named attribute
* +not_defined:+ True if the node does not have the named attribute
indexterm:[XML attribute,operation attribute,expression element]
indexterm:[XML element,expression element,operation attribute]
|value
|
|User-supplied value for comparison (required for operations other than
+defined+ and +not_defined+)
indexterm:[XML attribute,value attribute,expression element]
indexterm:[XML element,expression element,value attribute]
|value-source
|+literal+
a|How the +value+ is derived. Allowed values:
* +literal+: +value+ is a literal string to compare against
* +param+: +value+ is the name of a resource parameter to compare against (only
valid in location constraints)
* +meta+: +value+ is the name of a resource meta-attribute to compare against
(only valid in location constraints)
indexterm:[XML attribute,value-source attribute,expression element]
indexterm:[XML element,expression element,value-source attribute]
|=========================================================
[[node-attribute-expressions-special]]
In addition to custom node attributes defined by the administrator, the cluster
defines special, built-in node attributes for each node that can also be used
in rule expressions.
.Built-in Node Attributes
[width="95%",cols="1m,<5",options="header",align="center"]
|=========================================================
|Name
|Value
|#uname
|Node <<s-node-name,name>>
|#id
|Node ID
|#kind
|Node type. Possible values are +cluster+, +remote+, and +container+. Kind is
+remote+ for Pacemaker Remote nodes created with the +ocf:pacemaker:remote+
resource, and +container+ for Pacemaker Remote guest nodes and bundle nodes
|#is_dc
|"true" if this node is a Designated Controller (DC), "false" otherwise
|#cluster-name
|The value of the +cluster-name+ cluster property, if set
|#site-name
|The value of the +site-name+ node attribute, if set, otherwise identical to
+#cluster-name+
|#role
a|The role the relevant promotable clone resource has on this node. Valid only within
a rule for a location constraint for a promotable clone resource.
////
// if uncommenting, put a pipe in front of first two lines
#ra-version
The installed version of the resource agent on the node, as defined
by the +version+ attribute of the +resource-agent+ tag in the agent's
metadata. Valid only within rules controlling resource options. This can be
useful during rolling upgrades of a backward-incompatible resource agent.
'(coming in x.x.x)'
////
|=========================================================
== Date/Time Expressions ==
indexterm:[Rule,Date/Time Expression]
indexterm:[XML element,date_expression element]
Date/time expressions are rule conditions based (as the name suggests) on the
current date and time.
A +date_expression+ element may optionally contain a +date_spec+ or +duration+
element depending on the context.
.Attributes of a date_expression Element
[width="95%",cols="2m,<5",options="header",align="center"]
|=========================================================
|Field
|Description
|id
|A unique name for the expression (required)
indexterm:[XML attribute,id attribute,date_expression element]
indexterm:[XML element,date_expression element,id attribute]
|start
|A date/time conforming to the http://en.wikipedia.org/wiki/ISO_8601[ISO8601]
specification. May be used when +operation+ is +in_range+ (in which case at
least one of +start+ or +end+ must be specified) or +gt+ (in which case
+start+ is required).
indexterm:[XML attribute,start attribute,date_expression element]
indexterm:[XML element,date_expression element,start attribute]
|end
|A date/time conforming to the http://en.wikipedia.org/wiki/ISO_8601[ISO8601]
specification. May be used when +operation+ is +in_range+ (in which case at
least one of +start+ or +end+ must be specified) or +lt+ (in which case
+end+ is required).
indexterm:[XML attribute,end attribute,date_expression element]
indexterm:[XML element,date_expression element,end attribute]
|operation
a|Compares the current date/time with the start and/or end date,
depending on the context. Allowed values:
* +gt:+ True if the current date/time is after +start+
* +lt:+ True if the current date/time is before +end+
* +in_range:+ True if the current date/time is after +start+ (if specified)
and before either +end+ (if specified) or +start+ plus the value of the
+duration+ element (if one is contained in the +date_expression+)
* +date_spec:+ True if the current date/time matches the specification
given in the contained +date_spec+ element (described below)
indexterm:[XML attribute,operation attribute,date_expression element]
indexterm:[XML element,date_expression element,operation attribute]
|=========================================================
[NOTE]
======
There is no +eq+, +neq+, +gte+, or +lte+ operation, since they would be valid
only for a single second.
======
=== Date Specifications ===
indexterm:[Rule,Date/Time Expression,Date Specification]
indexterm:[XML element,date_spec element]
A +date_spec+ element is used to create a cron-like expression relating
to time. Each field can contain a single number or range. Any field not
supplied is ignored.
.Attributes of a date_spec Element
[width="95%",cols="2m,<5",options="header",align="center"]
|=========================================================
|Field
|Description
|id
|A unique name for the object (required)
indexterm:[XML attribute,id attribute,date_spec element]
indexterm:[XML element,date_spec element,id attribute]
|hours
|Allowed values: 0-23 (where 0 is midnight and 23 is 11 p.m.)
indexterm:[XML attribute,hours attribute,date_spec element]
indexterm:[XML element,date_spec element,hours attribute]
|monthdays
|Allowed values: 1-31 (depending on month and year)
indexterm:[XML attribute,monthdays attribute,date_spec element]
indexterm:[XML element,date_spec element,monthdays attribute]
|weekdays
|Allowed values: 1-7 (where 1 is Monday and 7 is Sunday)
indexterm:[XML attribute,weekdays attribute,date_spec element]
indexterm:[XML element,date_spec element,weekdays attribute]
|yeardays
|Allowed values: 1-366 (depending on the year)
indexterm:[XML attribute,yeardays attribute,date_spec element]
indexterm:[XML element,date_spec element,yeardays attribute]
|months
|Allowed values: 1-12
indexterm:[XML attribute,months attribute,date_spec element]
indexterm:[XML element,date_spec element,months attribute]
|weeks
|Allowed values: 1-53 (depending on weekyear)
indexterm:[XML attribute,weeks attribute,date_spec element]
indexterm:[XML element,date_spec element,weeks attribute]
|years
|Year according to the Gregorian calendar
indexterm:[XML attribute,years attribute,date_spec element]
indexterm:[XML element,date_spec element,years attribute]
|weekyears
|Year in which the week started; for example, 1 January 2005 can be specified
in ISO 8601 as '2005-001 Ordinal', '2005-01-01 Gregorian' or
'2004-W53-6 Weekly' and thus would match +years="2005"+ or +weekyears="2004"+
indexterm:[XML attribute,weekyears attribute,date_spec element]
indexterm:[XML element,date_spec element,weekyears attribute]
|moon
|Allowed values are 0-7 (where 0 is the new moon and 4 is full moon).
Seriously, you can use this. This was implemented to demonstrate the ease with
which new comparisons could be added.
indexterm:[XML attribute,moon attribute,date_spec element]
indexterm:[XML element,date_spec element,moon attribute]
|=========================================================
For example, +monthdays="1"+ matches the first day of every month, and
+hours="09-17"+ matches the hours between 9 a.m. and 5 p.m. (inclusive).
At this time, multiple ranges (e.g. +weekdays="1,2"+ or +weekdays="1-2,5-6"+)
are not supported.
[NOTE]
====
Pacemaker can calculate when evaluation of a +date_expression+ with an
+operation+ of +gt+, +lt+, or +in_range+ will next change, and schedule a
cluster re-check for that time. However, it does not do this for +date_spec+.
Instead, it evaluates the +date_spec+ whenever a cluster re-check naturally
happens via a cluster event or the +cluster-recheck-interval+ cluster option.
For example, if you have a +date_spec+ enabling a resource from 9 a.m. to 5 p.m.,
and +cluster-recheck-interval+ has been set to 5 minutes, then sometime between
9 a.m. and 9:05 a.m. the cluster would notice that it needs to start the
resource, and sometime between 5 p.m. and 5:05 p.m. it would realize that it
needs to stop the resource. The timing of the actual start and stop actions
will further depend on factors such as any other actions the cluster may need
to perform first, and the load of the machine.
====
=== Durations ===
indexterm:[Rule,Date/Time Expression,Duration]
indexterm:[XML element,duration element]
A +duration+ is used to calculate a value for +end+ when one is not supplied to
+in_range+ operations. It contains one or more attributes each containing a
single number. Any attribute not supplied is ignored.
.Attributes of a duration Element
[width="95%",cols="2m,<5",options="header",align="center"]
|=========================================================
|Field
|Description
|id
|A unique name for this duration element (required)
indexterm:[XML attribute,id attribute,duration element]
indexterm:[XML element,duration element,id attribute]
|seconds
|This many seconds will be added to the total duration
indexterm:[XML attribute,seconds attribute,duration element]
indexterm:[XML element,duration element,seconds attribute]
|minutes
|This many minutes will be added to the total duration
indexterm:[XML attribute,minutes attribute,duration element]
indexterm:[XML element,duration element,minutes attribute]
|hours
|This many hours will be added to the total duration
indexterm:[XML attribute,hours attribute,duration element]
indexterm:[XML element,duration element,hours attribute]
|weeks
|This many weeks will be added to the total duration
indexterm:[XML attribute,weeks attribute,duration element]
indexterm:[XML element,duration element,weeks attribute]
|months
|This many months will be added to the total duration
indexterm:[XML attribute,months attribute,duration element]
indexterm:[XML element,duration element,months attribute]
|years
|This many years will be added to the total duration
indexterm:[XML attribute,years attribute,duration element]
indexterm:[XML element,duration element,years attribute]
|=========================================================
=== Example Time-Based Expressions ===
A small sample of how time-based expressions can be used:
.True if now is any time in the year 2005
====
[source,XML]
----
<rule id="rule1" score="INFINITY">
<date_expression id="date_expr1" start="2005-001" operation="in_range">
<duration id="duration1" years="1"/>
</date_expression>
</rule>
----
====
.Equivalent expression
====
[source,XML]
----
<rule id="rule2" score="INFINITY">
<date_expression id="date_expr2" operation="date_spec">
<date_spec id="date_spec2" years="2005"/>
</date_expression>
</rule>
----
====
.9am-5pm Monday-Friday
====
[source,XML]
-------
<rule id="rule3" score="INFINITY">
<date_expression id="date_expr3" operation="date_spec">
<date_spec id="date_spec3" hours="9-16" weekdays="1-5"/>
</date_expression>
</rule>
-------
====
Please note that the +16+ matches up to +16:59:59+, as the numeric
value (hour) still matches!
.9am-6pm Monday through Friday or anytime Saturday
====
[source,XML]
-------
<rule id="rule4" score="INFINITY" boolean-op="or">
<date_expression id="date_expr4-1" operation="date_spec">
<date_spec id="date_spec4-1" hours="9-16" weekdays="1-5"/>
</date_expression>
<date_expression id="date_expr4-2" operation="date_spec">
<date_spec id="date_spec4-2" weekdays="6"/>
</date_expression>
</rule>
-------
====
.9am-5pm or 9pm-12am Monday through Friday
====
[source,XML]
-------
<rule id="rule5" score="INFINITY" boolean-op="and">
<rule id="rule5-nested1" score="INFINITY" boolean-op="or">
<date_expression id="date_expr5-1" operation="date_spec">
<date_spec id="date_spec5-1" hours="9-16"/>
</date_expression>
<date_expression id="date_expr5-2" operation="date_spec">
<date_spec id="date_spec5-2" hours="21-23"/>
</date_expression>
</rule>
<date_expression id="date_expr5-3" operation="date_spec">
<date_spec id="date_spec5-3" weekdays="1-5"/>
</date_expression>
</rule>
-------
====
.Mondays in March 2005
====
[source,XML]
-------
<rule id="rule6" score="INFINITY" boolean-op="and">
<date_expression id="date_expr6-1" operation="date_spec">
<date_spec id="date_spec6" weekdays="1"/>
</date_expression>
<date_expression id="date_expr6-2" operation="in_range"
start="2005-03-01" end="2005-04-01"/>
</rule>
-------
====
[NOTE]
======
Because no time is specified with the above dates, 00:00:00 is implied. This
means that the range includes all of 2005-03-01 but none of 2005-04-01.
You may wish to write +end="2005-03-31T23:59:59"+ to avoid confusion.
======
.A full moon on Friday the 13th
=====
[source,XML]
-------
<rule id="rule7" score="INFINITY" boolean-op="and">
<date_expression id="date_expr7" operation="date_spec">
<date_spec id="date_spec7" weekdays="5" monthdays="13" moon="4"/>
</date_expression>
</rule>
-------
=====
== Resource Expressions ==
An +rsc_expression+ is a rule condition based on a resource agent's properties.
This rule is only valid within an +rsc_defaults+ or +op_defaults+ context. None
of the matching attributes of +class+, +provider+, and +type+ are required. If
one is omitted, all values of that attribute will match. For instance, omitting
+type+ means every type will match.
.Attributes of an rsc_expression Element
[width="95%",cols="2m,<5",options="header",align="center"]
|=========================================================
|Field
|Description
|id
|A unique name for the expression (required)
indexterm:[XML attribute,id attribute,rsc_expression element]
indexterm:[XML element,rsc_expression element,id attribute]
|class
|The standard name to be matched against resource agents
indexterm:[XML attribute,class attribute,rsc_expression element]
indexterm:[XML element,rsc_expression element,class attribute]
|provider
|If given, the vendor to be matched against resource agents. This
only makes sense for agents using the OCF spec.
indexterm:[XML attribute,provider attribute,rsc_expression element]
indexterm:[XML element,rsc_expression element,provider attribute]
|type
|The name of the resource agent to be matched
indexterm:[XML attribute,type attribute,rsc_expression element]
indexterm:[XML element,rsc_expression element,type attribute]
|=========================================================
=== Example Resource-Based Expressions ===
A small sample of how resource-based expressions can be used:
.True for all ocf:heartbeat:IPaddr2 resources
====
[source,XML]
----
<rule id="rule1" score="INFINITY">
<rsc_expression id="rule_expr1" class="ocf" provider="heartbeat" type="IPaddr2"/>
</rule>
----
====
.Provider doesn't apply to non-OCF resources
====
[source,XML]
----
<rule id="rule2" score="INFINITY">
<rsc_expression id="rule_expr2" class="stonith" type="fence_xvm"/>
</rule>
----
====
== Operation Expressions ==
An +op_expression+ is a rule condition based on an action of some resource
agent. This rule is only valid within an +op_defaults+ context.
.Attributes of an op_expression Element
[width="95%",cols="2m,<5",options="header",align="center"]
|=========================================================
|Field
|Description
|id
|A unique name for the expression (required)
indexterm:[XML attribute,id attribute,op_expression element]
indexterm:[XML element,op_expression element,id attribute]
|name
|The action name to match against. This can be any action supported by
the resource agent; common values include +monitor+, +start+, and +stop+
(required).
indexterm:[XML attribute,name attribute,op_expression element]
indexterm:[XML element,op_expression element,name attribute]
|interval
|The interval of the action to match against. If not given, only
the name attribute will be used to match.
indexterm:[XML attribute,interval attribute,op_expression element]
indexterm:[XML element,op_expression element,interval attribute]
|=========================================================
=== Example Operation-Based Expressions ===
A small sample of how operation-based expressions can be used:
.True for all monitor actions
====
[source,XML]
----
<rule id="rule1" score="INFINITY">
<op_expression id="rule_expr1" name="monitor"/>
</rule>
----
====
.True for all monitor actions with a 10 second interval
====
[source,XML]
----
<rule id="rule2" score="INFINITY">
<op_expression id="rule_expr2" name="monitor" interval="10s"/>
</rule>
----
====
== Using Rules to Determine Resource Location ==
indexterm:[Rule,Determine Resource Location]
indexterm:[Resource,Location,Determine by Rules]
A location constraint may contain one or more top-level rules. The cluster
will act as if there is a separate location constraint for each rule that
evaluates as true.
Consider the following simple location constraint:
.Prevent resource "webserver" from running on node3
=====
[source,XML]
-------
<rsc_location id="ban-apache-on-node3" rsc="webserver"
score="-INFINITY" node="node3"/>
-------
=====
The constraint can be more verbosely written using a rule:
.Prevent resource "webserver" from running on node3 using rule
=====
[source,XML]
-------
<rsc_location id="ban-apache-on-node3" rsc="webserver">
<rule id="ban-apache-rule" score="-INFINITY">
<expression id="ban-apache-expr" attribute="#uname"
operation="eq" value="node3"/>
</rule>
</rsc_location>
-------
=====
The advantage of using the expanded form is that one could add more expressions
(for example, limiting the constraint to certain days of the week), or activate
the constraint by some node attribute other than node name.
=== Location Rules Based on Other Node Properties ===
The expanded form allows us to match on node properties other than its name.
If we rated each machine's CPU power such that the cluster had the
following nodes section:
.A sample nodes section for use with score-attribute
=====
[source,XML]
-------
<nodes>
<node id="uuid1" uname="c001n01" type="normal">
<instance_attributes id="uuid1-custom_attrs">
<nvpair id="uuid1-cpu_mips" name="cpu_mips" value="1234"/>
</instance_attributes>
</node>
<node id="uuid2" uname="c001n02" type="normal">
<instance_attributes id="uuid2-custom_attrs">
<nvpair id="uuid2-cpu_mips" name="cpu_mips" value="5678"/>
</instance_attributes>
</node>
</nodes>
-------
=====
then we could prevent resources from running on underpowered machines with this rule:
[source,XML]
-------
<rule id="need-more-power-rule" score="-INFINITY">
<expression id="need-more-power-expr" attribute="cpu_mips"
operation="lt" value="3000"/>
</rule>
-------
=== Using +score-attribute+ Instead of +score+ ===
When using +score-attribute+ instead of +score+, each node matched by
the rule has its score adjusted differently, according to its value
for the named node attribute. Thus, in the previous example, if a
rule used +score-attribute="cpu_mips"+, +c001n01+ would have its
preference to run the resource increased by +1234+ whereas +c001n02+
would have its preference increased by +5678+.
== Using Rules to Define Options ==
Rules may be used to control a variety of options:
* <<s-cluster-options,Cluster options>> (+cluster_property_set+ elements)
* <<s-node-attributes,Node attributes>> (as +instance_attributes+ or
+utilization+ elements inside a +node+ element)
* <<s-resource-options,Resource options>> (as +utilization+, +meta_attributes+,
or +instance_attributes+ elements inside a resource definition element or
+op+ , +rsc_defaults+, +op_defaults+, or +template+ element)
* <<s-operation-properties,Operation properties>> (+meta_attributes+
inside an +op+ or +op_defaults+ element)
=== Using Rules to Control Resource Options ===
Often some cluster nodes will be different from their peers. Sometimes,
these differences -- e.g. the location of a binary or the names of network
interfaces -- require resources to be configured differently depending
on the machine they're hosted on.
By defining multiple +instance_attributes+ objects for the resource
and adding a rule to each, we can easily handle these special cases.
In the example below, +mySpecialRsc+ will use eth1 and port 9999 when
run on +node1+, eth2 and port 8888 on +node2+ and default to eth0 and
port 9999 for all other nodes.
.Defining different resource options based on the node name
=====
[source,XML]
-------
<primitive id="mySpecialRsc" class="ocf" type="Special" provider="me">
<instance_attributes id="special-node1" score="3">
<rule id="node1-special-case" score="INFINITY" >
<expression id="node1-special-case-expr" attribute="#uname"
operation="eq" value="node1"/>
</rule>
<nvpair id="node1-interface" name="interface" value="eth1"/>
</instance_attributes>
<instance_attributes id="special-node2" score="2" >
<rule id="node2-special-case" score="INFINITY">
<expression id="node2-special-case-expr" attribute="#uname"
operation="eq" value="node2"/>
</rule>
<nvpair id="node2-interface" name="interface" value="eth2"/>
<nvpair id="node2-port" name="port" value="8888"/>
</instance_attributes>
<instance_attributes id="defaults" score="1" >
<nvpair id="default-interface" name="interface" value="eth0"/>
<nvpair id="default-port" name="port" value="9999"/>
</instance_attributes>
</primitive>
-------
=====
The order in which +instance_attributes+ objects are evaluated is
determined by their score (highest to lowest). If not supplied, score
defaults to zero, and objects with an equal score are processed in
listed order. If the +instance_attributes+ object has no rule
or a +rule+ that evaluates to +true+, then for any parameter the resource does
not yet have a value for, the resource will use the parameter values defined by
the +instance_attributes+.
For example, given the configuration above, if the resource is placed on node1:
. +special-node1+ has the highest score (3) and so is evaluated first;
its rule evaluates to +true+, so +interface+ is set to +eth1+.
. +special-node2+ is evaluated next with score 2, but its rule evaluates to +false+,
so it is ignored.
. +defaults+ is evaluated last with score 1, and has no rule, so its values
are examined; +interface+ is already defined, so the value here is not used,
but +port+ is not yet defined, so +port+ is set to +9999+.
=== Using Rules to Control Resource Defaults ===
Rules can be used for resource and operation defaults. The following example
illustrates how to set a different +resource-stickiness+ value during and
outside work hours. This allows resources to automatically move back to their
most preferred hosts, but at a time that (in theory) does not interfere with
business activities.
.Change +resource-stickiness+ during working hours
=====
[source,XML]
-------
<rsc_defaults>
<meta_attributes id="core-hours" score="2">
<rule id="core-hour-rule" score="0">
<date_expression id="nine-to-five-Mon-to-Fri" operation="date_spec">
<date_spec id="nine-to-five-Mon-to-Fri-spec" hours="9-16" weekdays="1-5"/>
</date_expression>
</rule>
<nvpair id="core-stickiness" name="resource-stickiness" value="INFINITY"/>
</meta_attributes>
<meta_attributes id="after-hours" score="1" >
<nvpair id="after-stickiness" name="resource-stickiness" value="0"/>
</meta_attributes>
</rsc_defaults>
-------
=====
Rules may be used similarly in +instance_attributes+ or +utilization+ blocks.
Any single block may directly contain only a single rule, but that rule may
itself contain any number of rules.
+rsc_expression+ and +op_expression+ blocks may additionally be used to set defaults
on either a single resource or across an entire class of resources with a single
rule. +rsc_expression+ may be used to select resource agents within both +rsc_defaults+
and +op_defaults+, while +op_expression+ may only be used within +op_defaults+. If
multiple rules succeed for a given resource agent, the last one specified will be
the one that takes effect. As with any other rule, boolean operations may be used
to make more complicated expressions.
.Set all IPaddr2 resources to stopped
=====
[source,XML]
-------
<rsc_defaults>
<meta_attributes id="op-target-role">
<rule id="op-target-role-rule" score="INFINITY">
<rsc_expression id="op-target-role-expr" class="ocf" provider="heartbeat"
type="IPaddr2"/>
</rule>
<nvpair id="op-target-role-nvpair" name="target-role" value="Stopped"/>
</meta_attributes>
</rsc_defaults>
-------
=====
.Set all monitor action timeouts to 7 seconds
=====
[source,XML]
-------
<op_defaults>
<meta_attributes id="op-monitor-defaults">
<rule id="op-monitor-default-rule" score="INFINITY">
<op_expression id="op-monitor-default-expr" name="monitor"/>
</rule>
<nvpair id="op-monitor-timeout" name="timeout" value="7s"/>
</meta_attributes>
</op_defaults>
-------
=====
.Set the monitor action timeout on all IPaddr2 resources with a given monitor interval to 8 seconds
=====
[source,XML]
-------
<op_defaults>
<meta_attributes id="op-monitor-and">
<rule id="op-monitor-and-rule" score="INFINITY">
<rsc_expression id="op-monitor-and-rsc-expr" class="ocf" provider="heartbeat"
type="IPaddr2"/>
<op_expression id="op-monitor-and-op-expr" name="monitor" interval="10s"/>
</rule>
<nvpair id="op-monitor-and-timeout" name="timeout" value="8s"/>
</meta_attributes>
</op_defaults>
-------
=====
=== Using Rules to Control Cluster Options ===
indexterm:[Rule,Controlling Cluster Options]
indexterm:[Cluster,Setting Options with Rules]
Controlling cluster options is achieved in much the same manner as
specifying different resource options on different nodes.
The difference is that because they are cluster options, one cannot (or should
not, because they won't work) use attribute-based expressions. The following
example illustrates how to set +maintenance_mode+ during a scheduled
maintenance window. This will keep the cluster running but not monitor, start,
or stop resources during this time.
.Schedule a maintenance window for 9 to 11 p.m. CDT Sept. 20, 2019
=====
[source,XML]
-------
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="bootstrap-stonith-enabled" name="stonith-enabled" value="1"/>
</cluster_property_set>
<cluster_property_set id="normal-set" score="10">
<nvpair id="normal-maintenance-mode" name="maintenance-mode" value="false"/>
</cluster_property_set>
<cluster_property_set id="maintenance-window-set" score="1000">
<nvpair id="maintenance-nvpair1" name="maintenance-mode" value="true"/>
<rule id="maintenance-rule1" score="INFINITY">
<date_expression id="maintenance-date1" operation="in_range"
start="2019-09-20 21:00:00 -05:00" end="2019-09-20 23:00:00 -05:00"/>
</rule>
</cluster_property_set>
</crm_config>
-------
=====
[IMPORTANT]
====
The +cluster_property_set+ with an +id+ set to "cib-bootstrap-options" will
'always' have the highest priority, regardless of any scores. Therefore,
rules in another +cluster_property_set+ can never take effect for any
properties listed in the bootstrap set.
====
diff --git a/include/crm/common/results.h b/include/crm/common/results.h
index b7b2cbcd0a..d9fb68a8aa 100644
--- a/include/crm/common/results.h
+++ b/include/crm/common/results.h
@@ -1,239 +1,240 @@
/*
* Copyright 2012-2020 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef CRM_RESULTS__H
# define CRM_RESULTS__H
#ifdef __cplusplus
extern "C" {
#endif
/*!
* \file
* \brief Function and executable result codes
* \ingroup core
*/
// Lifted from config.h
/* The _Noreturn keyword of C11. */
#ifndef _Noreturn
# if (defined __cplusplus \
&& ((201103 <= __cplusplus && !(__GNUC__ == 4 && __GNUC_MINOR__ == 7)) \
|| (defined _MSC_VER && 1900 <= _MSC_VER)))
# define _Noreturn [[noreturn]]
# elif ((!defined __cplusplus || defined __clang__) \
&& (201112 <= (defined __STDC_VERSION__ ? __STDC_VERSION__ : 0) \
|| 4 < __GNUC__ + (7 <= __GNUC_MINOR__)))
/* _Noreturn works as-is. */
# elif 2 < __GNUC__ + (8 <= __GNUC_MINOR__) || 0x5110 <= __SUNPRO_C
# define _Noreturn __attribute__ ((__noreturn__))
# elif 1200 <= (defined _MSC_VER ? _MSC_VER : 0)
# define _Noreturn __declspec (noreturn)
# else
# define _Noreturn
# endif
#endif
# define CRM_ASSERT(expr) do { \
if(__unlikely((expr) == FALSE)) { \
crm_abort(__FILE__, __FUNCTION__, __LINE__, #expr, TRUE, FALSE); \
abort(); /* Redundant but it makes static analyzers happy */ \
} \
} while(0)
/*
* Function return codes
*
* Most Pacemaker API functions return an integer return code. There are two
* alternative interpretations. The legacy interpration is that the absolute
* value of the return code is either a system error number or a custom
* pcmk_err_* number. This is less than ideal because system error numbers are
* constrained only to the positive int range, so there's the possibility
* (though not noticed in the wild) that system errors and custom errors could
* collide. The new intepretation is that negative values are from the pcmk_rc_e
* enum, and positive values are system error numbers. Both use 0 for success.
*
* For system error codes, see:
* - /usr/include/asm-generic/errno.h
* - /usr/include/asm-generic/errno-base.h
*/
// Legacy custom return codes for Pacemaker API functions (deprecated)
# define pcmk_ok 0
# define PCMK_ERROR_OFFSET 190 /* Replacements on non-linux systems, see include/portability.h */
# define PCMK_CUSTOM_OFFSET 200 /* Purely custom codes */
# define pcmk_err_generic 201
# define pcmk_err_no_quorum 202
# define pcmk_err_schema_validation 203
# define pcmk_err_transform_failed 204
# define pcmk_err_old_data 205
# define pcmk_err_diff_failed 206
# define pcmk_err_diff_resync 207
# define pcmk_err_cib_modified 208
# define pcmk_err_cib_backup 209
# define pcmk_err_cib_save 210
# define pcmk_err_schema_unchanged 211
# define pcmk_err_cib_corrupt 212
# define pcmk_err_multiple 213
# define pcmk_err_node_unknown 214
# define pcmk_err_already 215
/* On HPPA 215 is ENOSYM (Unknown error 215), which hopefully never happens. */
#ifdef __hppa__
# define pcmk_err_bad_nvpair 250 /* 216 is ENOTSOCK */
# define pcmk_err_unknown_format 252 /* 217 is EDESTADDRREQ */
#else
# define pcmk_err_bad_nvpair 216
# define pcmk_err_unknown_format 217
#endif
/*!
* \enum pcmk_rc_e
* \brief Return codes for Pacemaker API functions
*
* Any Pacemaker API function documented as returning a "standard Pacemaker
* return code" will return pcmk_rc_ok (0) on success, and one of this
* enumeration's other (negative) values or a (positive) system error number
* otherwise. The custom codes are at -1001 and lower, so that the caller may
* use -1 through -1000 for their own custom values if desired. While generally
* referred to as "errors", nonzero values simply indicate a result, which might
* or might not be an error depending on the calling context.
*/
enum pcmk_rc_e {
/* When adding new values, use consecutively lower numbers, update the array
* in lib/common/results.c, and test with crm_error.
*/
+ pcmk_rc_underflow = -1028,
pcmk_rc_no_input = -1027,
pcmk_rc_no_output = -1026,
pcmk_rc_after_range = -1025,
pcmk_rc_within_range = -1024,
pcmk_rc_before_range = -1023,
pcmk_rc_undetermined = -1022,
pcmk_rc_op_unsatisfied = -1021,
pcmk_rc_ipc_pid_only = -1020,
pcmk_rc_ipc_unresponsive = -1019,
pcmk_rc_ipc_unauthorized = -1018,
pcmk_rc_no_quorum = -1017,
pcmk_rc_schema_validation = -1016,
pcmk_rc_schema_unchanged = -1015,
pcmk_rc_transform_failed = -1014,
pcmk_rc_old_data = -1013,
pcmk_rc_diff_failed = -1012,
pcmk_rc_diff_resync = -1011,
pcmk_rc_cib_modified = -1010,
pcmk_rc_cib_backup = -1009,
pcmk_rc_cib_save = -1008,
pcmk_rc_cib_corrupt = -1007,
pcmk_rc_multiple = -1006,
pcmk_rc_node_unknown = -1005,
pcmk_rc_already = -1004,
pcmk_rc_bad_nvpair = -1003,
pcmk_rc_unknown_format = -1002,
// Developers: Use a more specific code than pcmk_rc_error whenever possible
pcmk_rc_error = -1001,
// Values -1 through -1000 reserved for caller use
pcmk_rc_ok = 0
// Positive values reserved for system error numbers
};
/*
* Exit status codes
*
* We want well-specified (i.e. OS-invariant) exit status codes for our daemons
* and applications so they can be relied on by callers. (Function return codes
* and errno's do not make good exit statuses.)
*
* The only hard rule is that exit statuses must be between 0 and 255; all else
* is convention. Universally, 0 is success, and 1 is generic error (excluding
* OSes we don't support -- for example, OpenVMS considers 1 success!).
*
* For init scripts, the LSB gives meaning to 0-7, and sets aside 150-199 for
* application use. OCF adds 8-9 and 189-199.
*
* sysexits.h was an attempt to give additional meanings, but never really
* caught on. It uses 0 and 64-78.
*
* Bash reserves 2 ("incorrect builtin usage") and 126-255 (126 is "command
* found but not executable", 127 is "command not found", 128 + n is
* "interrupted by signal n").
*
* tldp.org recommends 64-113 for application use.
*
* We try to overlap with the above conventions when practical.
*/
typedef enum crm_exit_e {
// Common convention
CRM_EX_OK = 0,
CRM_EX_ERROR = 1,
// LSB + OCF
CRM_EX_INVALID_PARAM = 2,
CRM_EX_UNIMPLEMENT_FEATURE = 3,
CRM_EX_INSUFFICIENT_PRIV = 4,
CRM_EX_NOT_INSTALLED = 5,
CRM_EX_NOT_CONFIGURED = 6,
CRM_EX_NOT_RUNNING = 7,
// sysexits.h
CRM_EX_USAGE = 64, // command line usage error
CRM_EX_DATAERR = 65, // user-supplied data incorrect
CRM_EX_NOINPUT = 66, // input file not available
CRM_EX_NOUSER = 67, // user does not exist
CRM_EX_NOHOST = 68, // host unknown
CRM_EX_UNAVAILABLE = 69, // needed service unavailable
CRM_EX_SOFTWARE = 70, // internal software bug
CRM_EX_OSERR = 71, // external (OS/environmental) problem
CRM_EX_OSFILE = 72, // system file not usable
CRM_EX_CANTCREAT = 73, // file couldn't be created
CRM_EX_IOERR = 74, // file I/O error
CRM_EX_TEMPFAIL = 75, // try again
CRM_EX_PROTOCOL = 76, // protocol violated
CRM_EX_NOPERM = 77, // non-file permission issue
CRM_EX_CONFIG = 78, // misconfiguration
// Custom
CRM_EX_FATAL = 100, // do not respawn
CRM_EX_PANIC = 101, // panic the local host
CRM_EX_DISCONNECT = 102, // lost connection to something
CRM_EX_OLD = 103, // update older than existing config
CRM_EX_DIGEST = 104, // digest comparison failed
CRM_EX_NOSUCH = 105, // requested item does not exist
CRM_EX_QUORUM = 106, // local partition does not have quorum
CRM_EX_UNSAFE = 107, // requires --force or new conditions
CRM_EX_EXISTS = 108, // requested item already exists
CRM_EX_MULTIPLE = 109, // requested item has multiple matches
CRM_EX_EXPIRED = 110, // requested item has expired
CRM_EX_NOT_YET_IN_EFFECT = 111, // requested item is not in effect
CRM_EX_INDETERMINATE = 112, // could not determine status
CRM_EX_UNSATISFIED = 113, // requested item does not satisfy constraints
// Other
CRM_EX_TIMEOUT = 124, // convention from timeout(1)
CRM_EX_MAX = 255, // ensure crm_exit_t can hold this
} crm_exit_t;
const char *pcmk_rc_name(int rc);
const char *pcmk_rc_str(int rc);
crm_exit_t pcmk_rc2exitc(int rc);
int pcmk_rc2legacy(int rc);
int pcmk_legacy2rc(int legacy_rc);
const char *pcmk_strerror(int rc);
const char *pcmk_errorname(int rc);
const char *bz2_strerror(int rc);
crm_exit_t crm_errno2exit(int rc);
const char *crm_exit_name(crm_exit_t exit_code);
const char *crm_exit_str(crm_exit_t exit_code);
_Noreturn crm_exit_t crm_exit(crm_exit_t rc);
#ifdef __cplusplus
}
#endif
#endif
diff --git a/include/crm/common/strings_internal.h b/include/crm/common/strings_internal.h
index e61d0253d6..faba8758c1 100644
--- a/include/crm/common/strings_internal.h
+++ b/include/crm/common/strings_internal.h
@@ -1,78 +1,86 @@
/*
* Copyright 2015-2020 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef PCMK__STRINGS_INTERNAL__H
#define PCMK__STRINGS_INTERNAL__H
#include <stdbool.h> // bool
#include <glib.h> // guint, GList, GHashTable
+/* internal constants for generic string functions (from strings.c) */
+
+#define PCMK__PARSE_INT_DEFAULT -1
+#define PCMK__PARSE_DBL_DEFAULT -1.0
+
/* internal generic string functions (from strings.c) */
enum pcmk__str_flags {
pcmk__str_none = 0,
pcmk__str_casei = 1 << 0,
pcmk__str_null_matches = 1 << 1,
pcmk__str_regex = 1 << 2
};
+int pcmk__scan_double(const char *text, double *result,
+ const char *default_text, char **end_text);
int pcmk__guint_from_hash(GHashTable *table, const char *key, guint default_val,
guint *result);
bool pcmk__starts_with(const char *str, const char *prefix);
bool pcmk__ends_with(const char *s, const char *match);
bool pcmk__ends_with_ext(const char *s, const char *match);
char *pcmk__add_word(char *list, const char *word);
int pcmk__compress(const char *data, unsigned int length, unsigned int max,
char **result, unsigned int *result_len);
int pcmk__parse_ll_range(const char *srcstring, long long *start, long long *end);
gboolean pcmk__str_in_list(GList *lst, const gchar *s);
bool pcmk__strcase_any_of(const char *s, ...) G_GNUC_NULL_TERMINATED;
bool pcmk__str_any_of(const char *s, ...) G_GNUC_NULL_TERMINATED;
+bool pcmk__char_in_any_str(int ch, ...) G_GNUC_NULL_TERMINATED;
int pcmk__strcmp(const char *s1, const char *s2, uint32_t flags);
static inline bool
pcmk__str_eq(const char *s1, const char *s2, uint32_t flags)
{
return pcmk__strcmp(s1, s2, flags) == 0;
}
/* Correctly displaying singular or plural is complicated; consider "1 node has"
* vs. "2 nodes have". A flexible solution is to pluralize entire strings, e.g.
*
* if (a == 1) {
* crm_info("singular message"):
* } else {
* crm_info("plural message");
* }
*
* though even that's not sufficient for all languages besides English (if we
* ever desire to do translations of output and log messages). But the following
* convenience macros are "good enough" and more concise for many cases.
*/
/* Example:
* crm_info("Found %d %s", nentries,
* pcmk__plural_alt(nentries, "entry", "entries"));
*/
#define pcmk__plural_alt(i, s1, s2) (((i) == 1)? (s1) : (s2))
// Example: crm_info("Found %d node%s", nnodes, pcmk__plural_s(nnodes));
#define pcmk__plural_s(i) pcmk__plural_alt(i, "", "s")
static inline int
pcmk__str_empty(const char *s)
{
return (s == NULL) || (s[0] == '\0');
}
#endif /* PCMK__STRINGS_INTERNAL__H */
diff --git a/include/crm/crm.h b/include/crm/crm.h
index 9676bd210b..520387a9ae 100644
--- a/include/crm/crm.h
+++ b/include/crm/crm.h
@@ -1,232 +1,232 @@
/*
* Copyright 2004-2020 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef CRM__H
# define CRM__H
#ifdef __cplusplus
extern "C" {
#endif
/**
* \file
* \brief A dumping ground
* \ingroup core
*/
# include <crm_config.h>
# include <stdlib.h>
# include <glib.h>
# include <stdbool.h>
# include <string.h>
# include <libxml/tree.h>
/*!
* The CRM feature set assists with compatibility in mixed-version clusters.
* The major version number increases when nodes with different versions
* would not work (rolling upgrades are not allowed). The minor version
* number increases when mixed-version clusters are allowed only during
* rolling upgrades (a node with the oldest feature set will be elected DC). The
* minor-minor version number is ignored, but allows resource agents to detect
* cluster support for various features.
*
* The feature set also affects the processing of old saved CIBs (such as for
* many scheduler regression tests).
*
* Particular feature points currently used by pacemaker:
*
* >2.1: Operation updates include timing data
* >=3.0.5: XML v2 digests are created
* >=3.0.8: Peers do not need acks for cancellations
* >=3.0.9: DC will send its own shutdown request to all peers
* XML v2 patchsets are created by default
* >=3.0.13: Fail counts include operation name and interval
* >=3.2.0: DC supports PCMK_LRM_OP_INVALID and PCMK_LRM_OP_NOT_CONNECTED
*/
-# define CRM_FEATURE_SET "3.4.1"
+# define CRM_FEATURE_SET "3.5.0"
# define EOS '\0'
# define DIMOF(a) ((int) (sizeof(a)/sizeof(a[0])) )
# ifndef MAX_NAME
# define MAX_NAME 256
# endif
# ifndef __GNUC__
# define __builtin_expect(expr, result) (expr)
# endif
/* Some handy macros used by the Linux kernel */
# define __likely(expr) __builtin_expect(expr, 1)
# define __unlikely(expr) __builtin_expect(expr, 0)
# define CRM_META "CRM_meta"
extern char *crm_system_name;
/* *INDENT-OFF* */
// Used for some internal IPC timeouts (maybe should be configurable option)
# define MAX_IPC_DELAY 120
// How we represent "infinite" scores
# define CRM_SCORE_INFINITY 1000000
# define CRM_INFINITY_S "INFINITY"
# define CRM_PLUS_INFINITY_S "+" CRM_INFINITY_S
# define CRM_MINUS_INFINITY_S "-" CRM_INFINITY_S
/* @COMPAT API < 2.0.0 Deprecated "infinity" aliases
*
* INFINITY might be defined elsewhere (e.g. math.h), so undefine it first.
* This, of course, complicates any attempt to use the other definition in any
* code that includes this header.
*/
# undef INFINITY
# define INFINITY_S "INFINITY"
# define MINUS_INFINITY_S "-INFINITY"
# define INFINITY 1000000
/* Sub-systems */
# define CRM_SYSTEM_DC "dc"
# define CRM_SYSTEM_DCIB "dcib"
/* The master CIB */
# define CRM_SYSTEM_CIB "cib"
# define CRM_SYSTEM_CRMD "crmd"
# define CRM_SYSTEM_LRMD "lrmd"
# define CRM_SYSTEM_PENGINE "pengine"
# define CRM_SYSTEM_TENGINE "tengine"
# define CRM_SYSTEM_STONITHD "stonithd"
# define CRM_SYSTEM_MCP "pacemakerd"
// Names of internally generated node attributes
# define CRM_ATTR_UNAME "#uname"
# define CRM_ATTR_ID "#id"
# define CRM_ATTR_KIND "#kind"
# define CRM_ATTR_ROLE "#role"
# define CRM_ATTR_IS_DC "#is_dc"
# define CRM_ATTR_CLUSTER_NAME "#cluster-name"
# define CRM_ATTR_SITE_NAME "#site-name"
# define CRM_ATTR_UNFENCED "#node-unfenced"
# define CRM_ATTR_DIGESTS_ALL "#digests-all"
# define CRM_ATTR_DIGESTS_SECURE "#digests-secure"
# define CRM_ATTR_RA_VERSION "#ra-version"
# define CRM_ATTR_PROTOCOL "#attrd-protocol"
/* Valid operations */
# define CRM_OP_NOOP "noop"
# define CRM_OP_JOIN_ANNOUNCE "join_announce"
# define CRM_OP_JOIN_OFFER "join_offer"
# define CRM_OP_JOIN_REQUEST "join_request"
# define CRM_OP_JOIN_ACKNAK "join_ack_nack"
# define CRM_OP_JOIN_CONFIRM "join_confirm"
# define CRM_OP_PING "ping"
# define CRM_OP_NODE_INFO "node-info"
# define CRM_OP_THROTTLE "throttle"
# define CRM_OP_VOTE "vote"
# define CRM_OP_NOVOTE "no-vote"
# define CRM_OP_HELLO "hello"
# define CRM_OP_PECALC "pe_calc"
# define CRM_OP_QUIT "quit"
# define CRM_OP_LOCAL_SHUTDOWN "start_shutdown"
# define CRM_OP_SHUTDOWN_REQ "req_shutdown"
# define CRM_OP_SHUTDOWN "do_shutdown"
# define CRM_OP_FENCE "stonith"
# define CRM_OP_REGISTER "register"
# define CRM_OP_IPC_FWD "ipc_fwd"
# define CRM_OP_INVOKE_LRM "lrm_invoke"
# define CRM_OP_LRM_REFRESH "lrm_refresh" /* Deprecated */
# define CRM_OP_LRM_QUERY "lrm_query"
# define CRM_OP_LRM_DELETE "lrm_delete"
# define CRM_OP_LRM_FAIL "lrm_fail"
# define CRM_OP_PROBED "probe_complete"
# define CRM_OP_REPROBE "probe_again"
# define CRM_OP_CLEAR_FAILCOUNT "clear_failcount"
# define CRM_OP_REMOTE_STATE "remote_state"
# define CRM_OP_RELAXED_SET "one-or-more"
# define CRM_OP_RELAXED_CLONE "clone-one-or-more"
# define CRM_OP_RM_NODE_CACHE "rm_node_cache"
# define CRM_OP_MAINTENANCE_NODES "maintenance_nodes"
/* Possible cluster membership states */
# define CRMD_JOINSTATE_DOWN "down"
# define CRMD_JOINSTATE_PENDING "pending"
# define CRMD_JOINSTATE_MEMBER "member"
# define CRMD_JOINSTATE_NACK "banned"
# define CRMD_ACTION_DELETE "delete"
# define CRMD_ACTION_CANCEL "cancel"
# define CRMD_ACTION_RELOAD "reload"
# define CRMD_ACTION_MIGRATE "migrate_to"
# define CRMD_ACTION_MIGRATED "migrate_from"
# define CRMD_ACTION_START "start"
# define CRMD_ACTION_STARTED "running"
# define CRMD_ACTION_STOP "stop"
# define CRMD_ACTION_STOPPED "stopped"
# define CRMD_ACTION_PROMOTE "promote"
# define CRMD_ACTION_PROMOTED "promoted"
# define CRMD_ACTION_DEMOTE "demote"
# define CRMD_ACTION_DEMOTED "demoted"
# define CRMD_ACTION_NOTIFY "notify"
# define CRMD_ACTION_NOTIFIED "notified"
# define CRMD_ACTION_STATUS "monitor"
# define CRMD_ACTION_METADATA "meta-data"
# define CRMD_METADATA_CALL_TIMEOUT 30000
/* short names */
# define RSC_DELETE CRMD_ACTION_DELETE
# define RSC_CANCEL CRMD_ACTION_CANCEL
# define RSC_MIGRATE CRMD_ACTION_MIGRATE
# define RSC_MIGRATED CRMD_ACTION_MIGRATED
# define RSC_START CRMD_ACTION_START
# define RSC_STARTED CRMD_ACTION_STARTED
# define RSC_STOP CRMD_ACTION_STOP
# define RSC_STOPPED CRMD_ACTION_STOPPED
# define RSC_PROMOTE CRMD_ACTION_PROMOTE
# define RSC_PROMOTED CRMD_ACTION_PROMOTED
# define RSC_DEMOTE CRMD_ACTION_DEMOTE
# define RSC_DEMOTED CRMD_ACTION_DEMOTED
# define RSC_NOTIFY CRMD_ACTION_NOTIFY
# define RSC_NOTIFIED CRMD_ACTION_NOTIFIED
# define RSC_STATUS CRMD_ACTION_STATUS
# define RSC_METADATA CRMD_ACTION_METADATA
/* *INDENT-ON* */
typedef GList *GListPtr;
# include <crm/common/logging.h>
# include <crm/common/util.h>
static inline const char *
crm_action_str(const char *task, guint interval_ms) {
if ((task != NULL) && (interval_ms == 0)
&& (strcasecmp(task, RSC_STATUS) == 0)) {
return "probe";
}
return task;
}
#ifdef __cplusplus
}
#endif
#endif
diff --git a/lib/common/results.c b/lib/common/results.c
index d4baef0019..ea0cf91d94 100644
--- a/lib/common/results.c
+++ b/lib/common/results.c
@@ -1,777 +1,782 @@
/*
* Copyright 2004-2020 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#ifndef _GNU_SOURCE
# define _GNU_SOURCE
#endif
#include <bzlib.h>
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include <qb/qbdefs.h>
#include <crm/common/mainloop.h>
#include <crm/common/xml.h>
G_DEFINE_QUARK(pcmk-rc-error-quark, pcmk__rc_error)
G_DEFINE_QUARK(pcmk-exitc-error-quark, pcmk__exitc_error)
// @COMPAT Legacy function return codes
//! \deprecated Use standard return codes and pcmk_rc_name() instead
const char *
pcmk_errorname(int rc)
{
rc = abs(rc);
switch (rc) {
case pcmk_err_generic: return "pcmk_err_generic";
case pcmk_err_no_quorum: return "pcmk_err_no_quorum";
case pcmk_err_schema_validation: return "pcmk_err_schema_validation";
case pcmk_err_transform_failed: return "pcmk_err_transform_failed";
case pcmk_err_old_data: return "pcmk_err_old_data";
case pcmk_err_diff_failed: return "pcmk_err_diff_failed";
case pcmk_err_diff_resync: return "pcmk_err_diff_resync";
case pcmk_err_cib_modified: return "pcmk_err_cib_modified";
case pcmk_err_cib_backup: return "pcmk_err_cib_backup";
case pcmk_err_cib_save: return "pcmk_err_cib_save";
case pcmk_err_cib_corrupt: return "pcmk_err_cib_corrupt";
case pcmk_err_multiple: return "pcmk_err_multiple";
case pcmk_err_node_unknown: return "pcmk_err_node_unknown";
case pcmk_err_already: return "pcmk_err_already";
case pcmk_err_bad_nvpair: return "pcmk_err_bad_nvpair";
case pcmk_err_unknown_format: return "pcmk_err_unknown_format";
default: return pcmk_rc_name(rc); // system errno
}
}
//! \deprecated Use standard return codes and pcmk_rc_str() instead
const char *
pcmk_strerror(int rc)
{
if (rc == 0) {
return "OK";
}
rc = abs(rc);
// Of course rc > 0 ... unless someone passed INT_MIN as rc
if ((rc > 0) && (rc < PCMK_ERROR_OFFSET)) {
return strerror(rc);
}
switch (rc) {
case pcmk_err_generic:
return "Generic Pacemaker error";
case pcmk_err_no_quorum:
return "Operation requires quorum";
case pcmk_err_schema_validation:
return "Update does not conform to the configured schema";
case pcmk_err_transform_failed:
return "Schema transform failed";
case pcmk_err_old_data:
return "Update was older than existing configuration";
case pcmk_err_diff_failed:
return "Application of an update diff failed";
case pcmk_err_diff_resync:
return "Application of an update diff failed, requesting a full refresh";
case pcmk_err_cib_modified:
return "The on-disk configuration was manually modified";
case pcmk_err_cib_backup:
return "Could not archive the previous configuration";
case pcmk_err_cib_save:
return "Could not save the new configuration to disk";
case pcmk_err_cib_corrupt:
return "Could not parse on-disk configuration";
case pcmk_err_multiple:
return "Resource active on multiple nodes";
case pcmk_err_node_unknown:
return "Node not found";
case pcmk_err_already:
return "Situation already as requested";
case pcmk_err_bad_nvpair:
return "Bad name/value pair given";
case pcmk_err_schema_unchanged:
return "Schema is already the latest available";
case pcmk_err_unknown_format:
return "Unknown output format";
/* The following cases will only be hit on systems for which they are non-standard */
/* coverity[dead_error_condition] False positive on non-Linux */
case ENOTUNIQ:
return "Name not unique on network";
/* coverity[dead_error_condition] False positive on non-Linux */
case ECOMM:
return "Communication error on send";
/* coverity[dead_error_condition] False positive on non-Linux */
case ELIBACC:
return "Can not access a needed shared library";
/* coverity[dead_error_condition] False positive on non-Linux */
case EREMOTEIO:
return "Remote I/O error";
/* coverity[dead_error_condition] False positive on non-Linux */
case EUNATCH:
return "Protocol driver not attached";
/* coverity[dead_error_condition] False positive on non-Linux */
case ENOKEY:
return "Required key not available";
}
crm_err("Unknown error code: %d", rc);
return "Unknown error";
}
// Standard Pacemaker API return codes
/* This array is used only for nonzero values of pcmk_rc_e. Its values must be
* kept in the exact reverse order of the enum value numbering (i.e. add new
* values to the end of the array).
*/
static struct pcmk__rc_info {
const char *name;
const char *desc;
int legacy_rc;
} pcmk__rcs[] = {
{ "pcmk_rc_error",
"Error",
-pcmk_err_generic,
},
{ "pcmk_rc_unknown_format",
"Unknown output format",
-pcmk_err_unknown_format,
},
{ "pcmk_rc_bad_nvpair",
"Bad name/value pair given",
-pcmk_err_bad_nvpair,
},
{ "pcmk_rc_already",
"Already in requested state",
-pcmk_err_already,
},
{ "pcmk_rc_node_unknown",
"Node not found",
-pcmk_err_node_unknown,
},
{ "pcmk_rc_multiple",
"Resource active on multiple nodes",
-pcmk_err_multiple,
},
{ "pcmk_rc_cib_corrupt",
"Could not parse on-disk configuration",
-pcmk_err_cib_corrupt,
},
{ "pcmk_rc_cib_save",
"Could not save new configuration to disk",
-pcmk_err_cib_save,
},
{ "pcmk_rc_cib_backup",
"Could not archive previous configuration",
-pcmk_err_cib_backup,
},
{ "pcmk_rc_cib_modified",
"On-disk configuration was manually modified",
-pcmk_err_cib_modified,
},
{ "pcmk_rc_diff_resync",
"Application of update diff failed, requesting full refresh",
-pcmk_err_diff_resync,
},
{ "pcmk_rc_diff_failed",
"Application of update diff failed",
-pcmk_err_diff_failed,
},
{ "pcmk_rc_old_data",
"Update was older than existing configuration",
-pcmk_err_old_data,
},
{ "pcmk_rc_transform_failed",
"Schema transform failed",
-pcmk_err_transform_failed,
},
{ "pcmk_rc_schema_unchanged",
"Schema is already the latest available",
-pcmk_err_schema_unchanged,
},
{ "pcmk_rc_schema_validation",
"Update does not conform to the configured schema",
-pcmk_err_schema_validation,
},
{ "pcmk_rc_no_quorum",
"Operation requires quorum",
-pcmk_err_no_quorum,
},
{ "pcmk_rc_ipc_pid_only",
"IPC server process is active but not accepting connections",
-pcmk_err_generic,
},
{ "pcmk_rc_ipc_unresponsive",
"IPC server is unresponsive",
-pcmk_err_generic,
},
{ "pcmk_rc_ipc_unauthorized",
"IPC server is blocked by unauthorized process",
-pcmk_err_generic,
},
{ "pcmk_rc_op_unsatisifed",
"Not applicable under current conditions",
-pcmk_err_generic,
},
{ "pcmk_rc_undetermined",
"Result undetermined",
-pcmk_err_generic,
},
{ "pcmk_rc_before_range",
"Result occurs before given range",
-pcmk_err_generic,
},
{ "pcmk_rc_within_range",
"Result occurs within given range",
-pcmk_err_generic,
},
{ "pcmk_rc_after_range",
"Result occurs after given range",
-pcmk_err_generic,
},
{ "pcmk_rc_no_output",
"Output message produced no output",
-pcmk_err_generic,
},
{ "pcmk_rc_no_input",
"Input file not available",
-pcmk_err_generic,
+ },
+ { "pcmk_rc_underflow",
+ "Value too small to be stored in data type",
+ -pcmk_err_generic,
}
};
#define PCMK__N_RC (sizeof(pcmk__rcs) / sizeof(struct pcmk__rc_info))
/*!
* \brief Get a return code constant name as a string
*
* \param[in] rc Integer return code to convert
*
* \return String of constant name corresponding to rc
*/
const char *
pcmk_rc_name(int rc)
{
if ((rc <= pcmk_rc_error) && ((pcmk_rc_error - rc) < PCMK__N_RC)) {
return pcmk__rcs[pcmk_rc_error - rc].name;
}
switch (rc) {
case pcmk_rc_ok: return "pcmk_rc_ok";
case E2BIG: return "E2BIG";
case EACCES: return "EACCES";
case EADDRINUSE: return "EADDRINUSE";
case EADDRNOTAVAIL: return "EADDRNOTAVAIL";
case EAFNOSUPPORT: return "EAFNOSUPPORT";
case EAGAIN: return "EAGAIN";
case EALREADY: return "EALREADY";
case EBADF: return "EBADF";
case EBADMSG: return "EBADMSG";
case EBUSY: return "EBUSY";
case ECANCELED: return "ECANCELED";
case ECHILD: return "ECHILD";
case ECOMM: return "ECOMM";
case ECONNABORTED: return "ECONNABORTED";
case ECONNREFUSED: return "ECONNREFUSED";
case ECONNRESET: return "ECONNRESET";
/* case EDEADLK: return "EDEADLK"; */
case EDESTADDRREQ: return "EDESTADDRREQ";
case EDOM: return "EDOM";
case EDQUOT: return "EDQUOT";
case EEXIST: return "EEXIST";
case EFAULT: return "EFAULT";
case EFBIG: return "EFBIG";
case EHOSTDOWN: return "EHOSTDOWN";
case EHOSTUNREACH: return "EHOSTUNREACH";
case EIDRM: return "EIDRM";
case EILSEQ: return "EILSEQ";
case EINPROGRESS: return "EINPROGRESS";
case EINTR: return "EINTR";
case EINVAL: return "EINVAL";
case EIO: return "EIO";
case EISCONN: return "EISCONN";
case EISDIR: return "EISDIR";
case ELIBACC: return "ELIBACC";
case ELOOP: return "ELOOP";
case EMFILE: return "EMFILE";
case EMLINK: return "EMLINK";
case EMSGSIZE: return "EMSGSIZE";
#ifdef EMULTIHOP // Not available on OpenBSD
case EMULTIHOP: return "EMULTIHOP";
#endif
case ENAMETOOLONG: return "ENAMETOOLONG";
case ENETDOWN: return "ENETDOWN";
case ENETRESET: return "ENETRESET";
case ENETUNREACH: return "ENETUNREACH";
case ENFILE: return "ENFILE";
case ENOBUFS: return "ENOBUFS";
case ENODATA: return "ENODATA";
case ENODEV: return "ENODEV";
case ENOENT: return "ENOENT";
case ENOEXEC: return "ENOEXEC";
case ENOKEY: return "ENOKEY";
case ENOLCK: return "ENOLCK";
#ifdef ENOLINK // Not available on OpenBSD
case ENOLINK: return "ENOLINK";
#endif
case ENOMEM: return "ENOMEM";
case ENOMSG: return "ENOMSG";
case ENOPROTOOPT: return "ENOPROTOOPT";
case ENOSPC: return "ENOSPC";
case ENOSR: return "ENOSR";
case ENOSTR: return "ENOSTR";
case ENOSYS: return "ENOSYS";
case ENOTBLK: return "ENOTBLK";
case ENOTCONN: return "ENOTCONN";
case ENOTDIR: return "ENOTDIR";
case ENOTEMPTY: return "ENOTEMPTY";
case ENOTSOCK: return "ENOTSOCK";
#if ENOTSUP != EOPNOTSUPP
case ENOTSUP: return "ENOTSUP";
#endif
case ENOTTY: return "ENOTTY";
case ENOTUNIQ: return "ENOTUNIQ";
case ENXIO: return "ENXIO";
case EOPNOTSUPP: return "EOPNOTSUPP";
case EOVERFLOW: return "EOVERFLOW";
case EPERM: return "EPERM";
case EPFNOSUPPORT: return "EPFNOSUPPORT";
case EPIPE: return "EPIPE";
case EPROTO: return "EPROTO";
case EPROTONOSUPPORT: return "EPROTONOSUPPORT";
case EPROTOTYPE: return "EPROTOTYPE";
case ERANGE: return "ERANGE";
case EREMOTE: return "EREMOTE";
case EREMOTEIO: return "EREMOTEIO";
case EROFS: return "EROFS";
case ESHUTDOWN: return "ESHUTDOWN";
case ESPIPE: return "ESPIPE";
case ESOCKTNOSUPPORT: return "ESOCKTNOSUPPORT";
case ESRCH: return "ESRCH";
case ESTALE: return "ESTALE";
case ETIME: return "ETIME";
case ETIMEDOUT: return "ETIMEDOUT";
case ETXTBSY: return "ETXTBSY";
case EUNATCH: return "EUNATCH";
case EUSERS: return "EUSERS";
/* case EWOULDBLOCK: return "EWOULDBLOCK"; */
case EXDEV: return "EXDEV";
#ifdef EBADE // Not available on OS X
case EBADE: return "EBADE";
case EBADFD: return "EBADFD";
case EBADSLT: return "EBADSLT";
case EDEADLOCK: return "EDEADLOCK";
case EBADR: return "EBADR";
case EBADRQC: return "EBADRQC";
case ECHRNG: return "ECHRNG";
#ifdef EISNAM // Not available on OS X, Illumos, Solaris
case EISNAM: return "EISNAM";
case EKEYEXPIRED: return "EKEYEXPIRED";
case EKEYREJECTED: return "EKEYREJECTED";
case EKEYREVOKED: return "EKEYREVOKED";
#endif
case EL2HLT: return "EL2HLT";
case EL2NSYNC: return "EL2NSYNC";
case EL3HLT: return "EL3HLT";
case EL3RST: return "EL3RST";
case ELIBBAD: return "ELIBBAD";
case ELIBMAX: return "ELIBMAX";
case ELIBSCN: return "ELIBSCN";
case ELIBEXEC: return "ELIBEXEC";
#ifdef ENOMEDIUM // Not available on OS X, Illumos, Solaris
case ENOMEDIUM: return "ENOMEDIUM";
case EMEDIUMTYPE: return "EMEDIUMTYPE";
#endif
case ENONET: return "ENONET";
case ENOPKG: return "ENOPKG";
case EREMCHG: return "EREMCHG";
case ERESTART: return "ERESTART";
case ESTRPIPE: return "ESTRPIPE";
#ifdef EUCLEAN // Not available on OS X, Illumos, Solaris
case EUCLEAN: return "EUCLEAN";
#endif
case EXFULL: return "EXFULL";
#endif // EBADE
default: return "Unknown";
}
}
/*!
* \brief Get a user-friendly description of a return code
*
* \param[in] rc Integer return code to convert
*
* \return String description of rc
*/
const char *
pcmk_rc_str(int rc)
{
if (rc == pcmk_rc_ok) {
return "OK";
}
if ((rc <= pcmk_rc_error) && ((pcmk_rc_error - rc) < PCMK__N_RC)) {
return pcmk__rcs[pcmk_rc_error - rc].desc;
}
if (rc < 0) {
return "Unknown error";
}
return strerror(rc);
}
// This returns negative values for errors
//! \deprecated Use standard return codes instead
int
pcmk_rc2legacy(int rc)
{
if (rc >= 0) {
return -rc; // OK or system errno
}
if ((rc <= pcmk_rc_error) && ((pcmk_rc_error - rc) < PCMK__N_RC)) {
return pcmk__rcs[pcmk_rc_error - rc].legacy_rc;
}
return -pcmk_err_generic;
}
//! \deprecated Use standard return codes instead
int
pcmk_legacy2rc(int legacy_rc)
{
legacy_rc = abs(legacy_rc);
switch (legacy_rc) {
case pcmk_err_no_quorum: return pcmk_rc_no_quorum;
case pcmk_err_schema_validation: return pcmk_rc_schema_validation;
case pcmk_err_schema_unchanged: return pcmk_rc_schema_unchanged;
case pcmk_err_transform_failed: return pcmk_rc_transform_failed;
case pcmk_err_old_data: return pcmk_rc_old_data;
case pcmk_err_diff_failed: return pcmk_rc_diff_failed;
case pcmk_err_diff_resync: return pcmk_rc_diff_resync;
case pcmk_err_cib_modified: return pcmk_rc_cib_modified;
case pcmk_err_cib_backup: return pcmk_rc_cib_backup;
case pcmk_err_cib_save: return pcmk_rc_cib_save;
case pcmk_err_cib_corrupt: return pcmk_rc_cib_corrupt;
case pcmk_err_multiple: return pcmk_rc_multiple;
case pcmk_err_node_unknown: return pcmk_rc_node_unknown;
case pcmk_err_already: return pcmk_rc_already;
case pcmk_err_bad_nvpair: return pcmk_rc_bad_nvpair;
case pcmk_err_unknown_format: return pcmk_rc_unknown_format;
case pcmk_err_generic: return pcmk_rc_error;
case pcmk_ok: return pcmk_rc_ok;
default: return legacy_rc; // system errno
}
}
// Exit status codes
const char *
crm_exit_name(crm_exit_t exit_code)
{
switch (exit_code) {
case CRM_EX_OK: return "CRM_EX_OK";
case CRM_EX_ERROR: return "CRM_EX_ERROR";
case CRM_EX_INVALID_PARAM: return "CRM_EX_INVALID_PARAM";
case CRM_EX_UNIMPLEMENT_FEATURE: return "CRM_EX_UNIMPLEMENT_FEATURE";
case CRM_EX_INSUFFICIENT_PRIV: return "CRM_EX_INSUFFICIENT_PRIV";
case CRM_EX_NOT_INSTALLED: return "CRM_EX_NOT_INSTALLED";
case CRM_EX_NOT_CONFIGURED: return "CRM_EX_NOT_CONFIGURED";
case CRM_EX_NOT_RUNNING: return "CRM_EX_NOT_RUNNING";
case CRM_EX_USAGE: return "CRM_EX_USAGE";
case CRM_EX_DATAERR: return "CRM_EX_DATAERR";
case CRM_EX_NOINPUT: return "CRM_EX_NOINPUT";
case CRM_EX_NOUSER: return "CRM_EX_NOUSER";
case CRM_EX_NOHOST: return "CRM_EX_NOHOST";
case CRM_EX_UNAVAILABLE: return "CRM_EX_UNAVAILABLE";
case CRM_EX_SOFTWARE: return "CRM_EX_SOFTWARE";
case CRM_EX_OSERR: return "CRM_EX_OSERR";
case CRM_EX_OSFILE: return "CRM_EX_OSFILE";
case CRM_EX_CANTCREAT: return "CRM_EX_CANTCREAT";
case CRM_EX_IOERR: return "CRM_EX_IOERR";
case CRM_EX_TEMPFAIL: return "CRM_EX_TEMPFAIL";
case CRM_EX_PROTOCOL: return "CRM_EX_PROTOCOL";
case CRM_EX_NOPERM: return "CRM_EX_NOPERM";
case CRM_EX_CONFIG: return "CRM_EX_CONFIG";
case CRM_EX_FATAL: return "CRM_EX_FATAL";
case CRM_EX_PANIC: return "CRM_EX_PANIC";
case CRM_EX_DISCONNECT: return "CRM_EX_DISCONNECT";
case CRM_EX_DIGEST: return "CRM_EX_DIGEST";
case CRM_EX_NOSUCH: return "CRM_EX_NOSUCH";
case CRM_EX_QUORUM: return "CRM_EX_QUORUM";
case CRM_EX_UNSAFE: return "CRM_EX_UNSAFE";
case CRM_EX_EXISTS: return "CRM_EX_EXISTS";
case CRM_EX_MULTIPLE: return "CRM_EX_MULTIPLE";
case CRM_EX_EXPIRED: return "CRM_EX_EXPIRED";
case CRM_EX_NOT_YET_IN_EFFECT: return "CRM_EX_NOT_YET_IN_EFFECT";
case CRM_EX_INDETERMINATE: return "CRM_EX_INDETERMINATE";
case CRM_EX_UNSATISFIED: return "CRM_EX_UNSATISFIED";
case CRM_EX_OLD: return "CRM_EX_OLD";
case CRM_EX_TIMEOUT: return "CRM_EX_TIMEOUT";
case CRM_EX_MAX: return "CRM_EX_UNKNOWN";
}
return "CRM_EX_UNKNOWN";
}
const char *
crm_exit_str(crm_exit_t exit_code)
{
switch (exit_code) {
case CRM_EX_OK: return "OK";
case CRM_EX_ERROR: return "Error occurred";
case CRM_EX_INVALID_PARAM: return "Invalid parameter";
case CRM_EX_UNIMPLEMENT_FEATURE: return "Unimplemented";
case CRM_EX_INSUFFICIENT_PRIV: return "Insufficient privileges";
case CRM_EX_NOT_INSTALLED: return "Not installed";
case CRM_EX_NOT_CONFIGURED: return "Not configured";
case CRM_EX_NOT_RUNNING: return "Not running";
case CRM_EX_USAGE: return "Incorrect usage";
case CRM_EX_DATAERR: return "Invalid data given";
case CRM_EX_NOINPUT: return "Input file not available";
case CRM_EX_NOUSER: return "User does not exist";
case CRM_EX_NOHOST: return "Host does not exist";
case CRM_EX_UNAVAILABLE: return "Necessary service unavailable";
case CRM_EX_SOFTWARE: return "Internal software bug";
case CRM_EX_OSERR: return "Operating system error occurred";
case CRM_EX_OSFILE: return "System file not available";
case CRM_EX_CANTCREAT: return "Cannot create output file";
case CRM_EX_IOERR: return "I/O error occurred";
case CRM_EX_TEMPFAIL: return "Temporary failure, try again";
case CRM_EX_PROTOCOL: return "Protocol violated";
case CRM_EX_NOPERM: return "Insufficient privileges";
case CRM_EX_CONFIG: return "Invalid configuration";
case CRM_EX_FATAL: return "Fatal error occurred, will not respawn";
case CRM_EX_PANIC: return "System panic required";
case CRM_EX_DISCONNECT: return "Not connected";
case CRM_EX_DIGEST: return "Digest mismatch";
case CRM_EX_NOSUCH: return "No such object";
case CRM_EX_QUORUM: return "Quorum required";
case CRM_EX_UNSAFE: return "Operation not safe";
case CRM_EX_EXISTS: return "Requested item already exists";
case CRM_EX_MULTIPLE: return "Multiple items match request";
case CRM_EX_EXPIRED: return "Requested item has expired";
case CRM_EX_NOT_YET_IN_EFFECT: return "Requested item is not yet in effect";
case CRM_EX_INDETERMINATE: return "Could not determine status";
case CRM_EX_UNSATISFIED: return "Not applicable under current conditions";
case CRM_EX_OLD: return "Update was older than existing configuration";
case CRM_EX_TIMEOUT: return "Timeout occurred";
case CRM_EX_MAX: return "Error occurred";
}
if ((exit_code > 128) && (exit_code < CRM_EX_MAX)) {
return "Interrupted by signal";
}
return "Unknown exit status";
}
//! \deprecated Use standard return codes and pcmk_rc2exitc() instead
crm_exit_t
crm_errno2exit(int rc)
{
rc = abs(rc); // Convenience for functions that return -errno
switch (rc) {
case pcmk_ok:
return CRM_EX_OK;
case pcmk_err_no_quorum:
return CRM_EX_QUORUM;
case pcmk_err_old_data:
return CRM_EX_OLD;
case pcmk_err_schema_validation:
case pcmk_err_transform_failed:
return CRM_EX_CONFIG;
case pcmk_err_bad_nvpair:
return CRM_EX_INVALID_PARAM;
case pcmk_err_already:
return CRM_EX_EXISTS;
case pcmk_err_multiple:
return CRM_EX_MULTIPLE;
case pcmk_err_node_unknown:
case pcmk_err_unknown_format:
return CRM_EX_NOSUCH;
default:
return pcmk_rc2exitc(rc); // system errno
}
}
/*!
* \brief Map a function return code to the most similar exit code
*
* \param[in] rc Function return code
*
* \return Most similar exit code
*/
crm_exit_t
pcmk_rc2exitc(int rc)
{
switch (rc) {
case pcmk_rc_ok:
return CRM_EX_OK;
case pcmk_rc_no_quorum:
return CRM_EX_QUORUM;
case pcmk_rc_old_data:
return CRM_EX_OLD;
case pcmk_rc_schema_validation:
case pcmk_rc_transform_failed:
return CRM_EX_CONFIG;
case pcmk_rc_bad_nvpair:
return CRM_EX_INVALID_PARAM;
case EACCES:
return CRM_EX_INSUFFICIENT_PRIV;
case EBADF:
case EINVAL:
case EFAULT:
case ENOSYS:
case EOVERFLOW:
+ case pcmk_rc_underflow:
return CRM_EX_SOFTWARE;
case EBADMSG:
case EMSGSIZE:
case ENOMSG:
case ENOPROTOOPT:
case EPROTO:
case EPROTONOSUPPORT:
case EPROTOTYPE:
return CRM_EX_PROTOCOL;
case ECOMM:
case ENOMEM:
return CRM_EX_OSERR;
case ECONNABORTED:
case ECONNREFUSED:
case ECONNRESET:
case ENOTCONN:
return CRM_EX_DISCONNECT;
case EEXIST:
case pcmk_rc_already:
return CRM_EX_EXISTS;
case EIO:
case pcmk_rc_no_output:
return CRM_EX_IOERR;
case ENOTSUP:
#if EOPNOTSUPP != ENOTSUP
case EOPNOTSUPP:
#endif
return CRM_EX_UNIMPLEMENT_FEATURE;
case ENOTUNIQ:
case pcmk_rc_multiple:
return CRM_EX_MULTIPLE;
case ENXIO:
case pcmk_rc_node_unknown:
case pcmk_rc_unknown_format:
return CRM_EX_NOSUCH;
case ETIME:
case ETIMEDOUT:
return CRM_EX_TIMEOUT;
case EAGAIN:
case EBUSY:
return CRM_EX_UNSATISFIED;
case pcmk_rc_before_range:
return CRM_EX_NOT_YET_IN_EFFECT;
case pcmk_rc_after_range:
return CRM_EX_EXPIRED;
case pcmk_rc_undetermined:
return CRM_EX_INDETERMINATE;
case pcmk_rc_op_unsatisfied:
return CRM_EX_UNSATISFIED;
case pcmk_rc_within_range:
return CRM_EX_OK;
case pcmk_rc_no_input:
return CRM_EX_NOINPUT;
default:
return CRM_EX_ERROR;
}
}
// Other functions
const char *
bz2_strerror(int rc)
{
// See ftp://sources.redhat.com/pub/bzip2/docs/manual_3.html#SEC17
switch (rc) {
case BZ_OK:
case BZ_RUN_OK:
case BZ_FLUSH_OK:
case BZ_FINISH_OK:
case BZ_STREAM_END:
return "Ok";
case BZ_CONFIG_ERROR:
return "libbz2 has been improperly compiled on your platform";
case BZ_SEQUENCE_ERROR:
return "library functions called in the wrong order";
case BZ_PARAM_ERROR:
return "parameter is out of range or otherwise incorrect";
case BZ_MEM_ERROR:
return "memory allocation failed";
case BZ_DATA_ERROR:
return "data integrity error is detected during decompression";
case BZ_DATA_ERROR_MAGIC:
return "the compressed stream does not start with the correct magic bytes";
case BZ_IO_ERROR:
return "error reading or writing in the compressed file";
case BZ_UNEXPECTED_EOF:
return "compressed file finishes before the logical end of stream is detected";
case BZ_OUTBUFF_FULL:
return "output data will not fit into the buffer provided";
}
return "Unknown error";
}
crm_exit_t
crm_exit(crm_exit_t rc)
{
/* A compiler could theoretically use any type for crm_exit_t, but an int
* should always hold it, so cast to int to keep static analysis happy.
*/
if ((((int) rc) < 0) || (((int) rc) > CRM_EX_MAX)) {
rc = CRM_EX_ERROR;
}
mainloop_cleanup();
crm_xml_cleanup();
pcmk__cli_option_cleanup();
if (crm_system_name) {
crm_info("Exiting %s " CRM_XS " with status %d", crm_system_name, rc);
free(crm_system_name);
} else {
crm_trace("Exiting with status %d", rc);
}
qb_log_fini(); // Don't log anything after this point
exit(rc);
}
diff --git a/lib/common/strings.c b/lib/common/strings.c
index d2e56bf723..00edd3585b 100644
--- a/lib/common/strings.c
+++ b/lib/common/strings.c
@@ -1,917 +1,1082 @@
/*
* Copyright 2004-2020 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#ifndef _GNU_SOURCE
# define _GNU_SOURCE
#endif
#include <regex.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <ctype.h>
+#include <float.h> // DBL_MIN
#include <limits.h>
+#include <math.h> // fabs()
#include <bzlib.h>
#include <sys/types.h>
char *
crm_itoa_stack(int an_int, char *buffer, size_t len)
{
if (buffer != NULL) {
snprintf(buffer, len, "%d", an_int);
}
return buffer;
}
/*!
* \internal
* \brief Scan a long long integer from a string
*
* \param[in] text String to scan
* \param[out] result If not NULL, where to store scanned value
* \param[out] end_text If not NULL, where to store pointer to just after value
*
- * \return Standard Pacemaker return code (also set errno on error)
+ * \return Standard Pacemaker return code (\c pcmk_rc_ok on success,
+ * \c EINVAL on failed string conversion due to invalid input,
+ * or \c EOVERFLOW on arithmetic overflow)
+ * \note Sets \c errno on error
*/
static int
scan_ll(const char *text, long long *result, char **end_text)
{
- long long local_result = -1;
+ long long local_result = PCMK__PARSE_INT_DEFAULT;
char *local_end_text = NULL;
int rc = pcmk_rc_ok;
errno = 0;
if (text != NULL) {
#ifdef ANSI_ONLY
local_result = (long long) strtol(text, &local_end_text, 10);
#else
local_result = strtoll(text, &local_end_text, 10);
#endif
if (errno == ERANGE) {
- rc = errno;
+ rc = EOVERFLOW;
crm_warn("Integer parsed from %s was clipped to %lld",
text, local_result);
} else if (errno != 0) {
rc = errno;
- local_result = -1;
- crm_err("Could not parse integer from %s (using -1 instead): %s",
- text, pcmk_rc_str(rc));
+ local_result = PCMK__PARSE_INT_DEFAULT;
+ crm_err("Could not parse integer from %s (using %d instead): %s",
+ text, PCMK__PARSE_INT_DEFAULT, pcmk_rc_str(rc));
} else if (local_end_text == text) {
rc = EINVAL;
- local_result = -1;
- crm_err("Could not parse integer from %s (using -1 instead): "
- "No digits found", text);
+ local_result = PCMK__PARSE_INT_DEFAULT;
+ crm_err("Could not parse integer from %s (using %d instead): "
+ "No digits found", text, PCMK__PARSE_INT_DEFAULT);
}
- if ((end_text == NULL) && (local_end_text != NULL)
- && (local_end_text[0] != '\0')) {
+ if ((end_text == NULL) && !pcmk__str_empty(local_end_text)) {
crm_warn("Characters left over after parsing '%s': '%s'",
text, local_end_text);
}
errno = rc;
}
if (end_text != NULL) {
*end_text = local_end_text;
}
if (result != NULL) {
*result = local_result;
}
return rc;
}
/*!
* \brief Parse a long long integer value from a string
*
* \param[in] text The string to parse
* \param[in] default_text Default string to parse if text is NULL
*
- * \return Parsed value on success, -1 (and set errno) on error
+ * \return Parsed value on success, PCMK__PARSE_INT_DEFAULT (and set
+ * errno) on error
*/
long long
crm_parse_ll(const char *text, const char *default_text)
{
long long result;
if (text == NULL) {
text = default_text;
if (text == NULL) {
crm_err("No default conversion value supplied");
errno = EINVAL;
- return -1;
+ return PCMK__PARSE_INT_DEFAULT;
}
}
scan_ll(text, &result, NULL);
return result;
}
/*!
* \brief Parse an integer value from a string
*
* \param[in] text The string to parse
* \param[in] default_text Default string to parse if text is NULL
*
- * \return Parsed value on success, INT_MIN or INT_MAX (and set errno to ERANGE)
- * if parsed value is out of integer range, otherwise -1 (and set errno)
+ * \return Parsed value on success, INT_MIN or INT_MAX (and set errno to
+ * ERANGE) if parsed value is out of integer range, otherwise
+ * PCMK__PARSE_INT_DEFAULT (and set errno)
*/
int
crm_parse_int(const char *text, const char *default_text)
{
long long result = crm_parse_ll(text, default_text);
if (result < INT_MIN) {
// If errno is ERANGE, crm_parse_ll() has already logged a message
if (errno != ERANGE) {
crm_err("Conversion of %s was clipped: %lld", text, result);
errno = ERANGE;
}
return INT_MIN;
} else if (result > INT_MAX) {
// If errno is ERANGE, crm_parse_ll() has already logged a message
if (errno != ERANGE) {
crm_err("Conversion of %s was clipped: %lld", text, result);
errno = ERANGE;
}
return INT_MAX;
}
return (int) result;
}
+/*!
+ * \internal
+ * \brief Scan a double-precision floating-point value from a string
+ *
+ * \param[in] text The string to parse
+ * \param[out] result Parsed value on success, or
+ * \c PCMK__PARSE_DBL_DEFAULT on error
+ * \param[in] default_text Default string to parse if \p text is
+ * \c NULL
+ * \param[out] end_text If not \c NULL, where to store a pointer
+ * to the position immediately after the
+ * value
+ *
+ * \return Standard Pacemaker return code (\c pcmk_rc_ok on success,
+ * \c EINVAL on failed string conversion due to invalid input,
+ * \c EOVERFLOW on arithmetic overflow, \c pcmk_rc_underflow
+ * on arithmetic underflow, or \c errno from \c strtod() on
+ * other parse errors)
+ */
+int
+pcmk__scan_double(const char *text, double *result, const char *default_text,
+ char **end_text)
+{
+ int rc = pcmk_rc_ok;
+ char *local_end_text = NULL;
+
+ CRM_ASSERT(result != NULL);
+ *result = PCMK__PARSE_DBL_DEFAULT;
+
+ text = (text != NULL) ? text : default_text;
+
+ if (text == NULL) {
+ rc = EINVAL;
+ crm_debug("No text and no default conversion value supplied");
+
+ } else {
+ errno = 0;
+ *result = strtod(text, &local_end_text);
+
+ if (errno == ERANGE) {
+ /*
+ * Overflow: strtod() returns +/- HUGE_VAL and sets errno to
+ * ERANGE
+ *
+ * Underflow: strtod() returns "a value whose magnitude is
+ * no greater than the smallest normalized
+ * positive" double. Whether ERANGE is set is
+ * implementation-defined.
+ */
+ const char *over_under;
+
+ if (fabs(*result) > DBL_MIN) {
+ rc = EOVERFLOW;
+ over_under = "over";
+ } else {
+ rc = pcmk_rc_underflow;
+ over_under = "under";
+ }
+
+ crm_debug("Floating-point value parsed from '%s' would %sflow "
+ "(using %g instead)", text, over_under, *result);
+
+ } else if (errno != 0) {
+ rc = errno;
+ // strtod() set *result = 0 on parse failure
+ *result = PCMK__PARSE_DBL_DEFAULT;
+
+ crm_debug("Could not parse floating-point value from '%s' (using "
+ "%.1f instead): %s", text, PCMK__PARSE_DBL_DEFAULT,
+ pcmk_rc_str(rc));
+
+ } else if (local_end_text == text) {
+ // errno == 0, but nothing was parsed
+ rc = EINVAL;
+ *result = PCMK__PARSE_DBL_DEFAULT;
+
+ crm_debug("Could not parse floating-point value from '%s' (using "
+ "%.1f instead): No digits found", text,
+ PCMK__PARSE_DBL_DEFAULT);
+
+ } else if (fabs(*result) <= DBL_MIN) {
+ /*
+ * errno == 0 and text was parsed, but value might have
+ * underflowed.
+ *
+ * ERANGE might not be set for underflow. Check magnitude
+ * of *result, but also make sure the input number is not
+ * actually zero (0 <= DBL_MIN is not underflow).
+ *
+ * This check must come last. A parse failure in strtod()
+ * also sets *result == 0, so a parse failure would match
+ * this test condition prematurely.
+ */
+ for (const char *p = text; p != local_end_text; p++) {
+ if (strchr("0.eE", *p) == NULL) {
+ rc = pcmk_rc_underflow;
+ crm_debug("Floating-point value parsed from '%s' would "
+ "underflow (using %g instead)", text, *result);
+ break;
+ }
+ }
+
+ } else {
+ crm_trace("Floating-point value parsed successfully from "
+ "'%s': %g", text, *result);
+ }
+
+ if ((end_text == NULL) && !pcmk__str_empty(local_end_text)) {
+ crm_debug("Characters left over after parsing '%s': '%s'",
+ text, local_end_text);
+ }
+ }
+
+ if (end_text != NULL) {
+ *end_text = local_end_text;
+ }
+
+ return rc;
+}
+
/*!
* \internal
* \brief Parse a guint from a string stored in a hash table
*
* \param[in] table Hash table to search
* \param[in] key Hash table key to use to retrieve string
* \param[in] default_val What to use if key has no entry in table
* \param[out] result If not NULL, where to store parsed integer
*
* \return Standard Pacemaker return code
*/
int
pcmk__guint_from_hash(GHashTable *table, const char *key, guint default_val,
guint *result)
{
const char *value;
long long value_ll;
CRM_CHECK((table != NULL) && (key != NULL), return EINVAL);
value = g_hash_table_lookup(table, key);
if (value == NULL) {
if (result != NULL) {
*result = default_val;
}
return pcmk_rc_ok;
}
errno = 0;
value_ll = crm_parse_ll(value, NULL);
if (errno != 0) {
return errno; // Message already logged
}
if ((value_ll < 0) || (value_ll > G_MAXUINT)) {
crm_warn("Could not parse non-negative integer from %s", value);
return ERANGE;
}
if (result != NULL) {
*result = (guint) value_ll;
}
return pcmk_rc_ok;
}
#ifndef NUMCHARS
# define NUMCHARS "0123456789."
#endif
#ifndef WHITESPACE
# define WHITESPACE " \t\n\r\f"
#endif
/*!
* \brief Parse a time+units string and return milliseconds equivalent
*
* \param[in] input String with a number and units (optionally with whitespace
* before and/or after the number)
*
- * \return Milliseconds corresponding to string expression, or -1 on error
+ * \return Milliseconds corresponding to string expression, or
+ * PCMK__PARSE_INT_DEFAULT on error
*/
long long
crm_get_msec(const char *input)
{
const char *num_start = NULL;
const char *units;
long long multiplier = 1000;
long long divisor = 1;
- long long msec = -1;
+ long long msec = PCMK__PARSE_INT_DEFAULT;
size_t num_len = 0;
char *end_text = NULL;
if (input == NULL) {
- return -1;
+ return PCMK__PARSE_INT_DEFAULT;
}
num_start = input + strspn(input, WHITESPACE);
num_len = strspn(num_start, NUMCHARS);
if (num_len < 1) {
- return -1;
+ return PCMK__PARSE_INT_DEFAULT;
}
units = num_start + num_len;
units += strspn(units, WHITESPACE);
if (!strncasecmp(units, "ms", 2) || !strncasecmp(units, "msec", 4)) {
multiplier = 1;
divisor = 1;
} else if (!strncasecmp(units, "us", 2) || !strncasecmp(units, "usec", 4)) {
multiplier = 1;
divisor = 1000;
} else if (!strncasecmp(units, "s", 1) || !strncasecmp(units, "sec", 3)) {
multiplier = 1000;
divisor = 1;
} else if (!strncasecmp(units, "m", 1) || !strncasecmp(units, "min", 3)) {
multiplier = 60 * 1000;
divisor = 1;
} else if (!strncasecmp(units, "h", 1) || !strncasecmp(units, "hr", 2)) {
multiplier = 60 * 60 * 1000;
divisor = 1;
} else if ((*units != EOS) && (*units != '\n') && (*units != '\r')) {
- return -1;
+ return PCMK__PARSE_INT_DEFAULT;
}
scan_ll(num_start, &msec, &end_text);
if (msec > (LLONG_MAX / multiplier)) {
// Arithmetics overflow while multiplier/divisor mutually exclusive
return LLONG_MAX;
}
msec *= multiplier;
msec /= divisor;
return msec;
}
gboolean
crm_is_true(const char *s)
{
gboolean ret = FALSE;
if (s != NULL) {
crm_str_to_boolean(s, &ret);
}
return ret;
}
int
crm_str_to_boolean(const char *s, int *ret)
{
if (s == NULL) {
return -1;
} else if (strcasecmp(s, "true") == 0
|| strcasecmp(s, "on") == 0
|| strcasecmp(s, "yes") == 0 || strcasecmp(s, "y") == 0 || strcasecmp(s, "1") == 0) {
*ret = TRUE;
return 1;
} else if (strcasecmp(s, "false") == 0
|| strcasecmp(s, "off") == 0
|| strcasecmp(s, "no") == 0 || strcasecmp(s, "n") == 0 || strcasecmp(s, "0") == 0) {
*ret = FALSE;
return 1;
}
return -1;
}
char *
crm_strip_trailing_newline(char *str)
{
int len;
if (str == NULL) {
return str;
}
for (len = strlen(str) - 1; len >= 0 && str[len] == '\n'; len--) {
str[len] = '\0';
}
return str;
}
/*!
* \brief Check whether a string starts with a certain sequence
*
* \param[in] str String to check
* \param[in] prefix Sequence to match against beginning of \p str
*
* \return \c true if \p str begins with match, \c false otherwise
* \note This is equivalent to !strncmp(s, prefix, strlen(prefix))
* but is likely less efficient when prefix is a string literal
* if the compiler optimizes away the strlen() at compile time,
* and more efficient otherwise.
*/
bool
pcmk__starts_with(const char *str, const char *prefix)
{
const char *s = str;
const char *p = prefix;
if (!s || !p) {
return false;
}
while (*s && *p) {
if (*s++ != *p++) {
return false;
}
}
return (*p == 0);
}
static inline bool
ends_with(const char *s, const char *match, bool as_extension)
{
if (pcmk__str_empty(match)) {
return true;
} else if (s == NULL) {
return false;
} else {
size_t slen, mlen;
/* Besides as_extension, we could also check
!strchr(&match[1], match[0]) but that would be inefficient.
*/
if (as_extension) {
s = strrchr(s, match[0]);
return (s == NULL)? false : !strcmp(s, match);
}
mlen = strlen(match);
slen = strlen(s);
return ((slen >= mlen) && !strcmp(s + slen - mlen, match));
}
}
/*!
* \internal
* \brief Check whether a string ends with a certain sequence
*
* \param[in] s String to check
* \param[in] match Sequence to match against end of \p s
*
* \return \c true if \p s ends case-sensitively with match, \c false otherwise
* \note pcmk__ends_with_ext() can be used if the first character of match
* does not recur in match.
*/
bool
pcmk__ends_with(const char *s, const char *match)
{
return ends_with(s, match, false);
}
/*!
* \internal
* \brief Check whether a string ends with a certain "extension"
*
* \param[in] s String to check
* \param[in] match Extension to match against end of \p s, that is,
* its first character must not occur anywhere
* in the rest of that very sequence (example: file
* extension where the last dot is its delimiter,
* e.g., ".html"); incorrect results may be
* returned otherwise.
*
* \return \c true if \p s ends (verbatim, i.e., case sensitively)
* with "extension" designated as \p match (including empty
* string), \c false otherwise
*
* \note Main incentive to prefer this function over \c pcmk__ends_with()
* where possible is the efficiency (at the cost of added
* restriction on \p match as stated; the complexity class
* remains the same, though: BigO(M+N) vs. BigO(M+2N)).
*/
bool
pcmk__ends_with_ext(const char *s, const char *match)
{
return ends_with(s, match, true);
}
/*
* This re-implements g_str_hash as it was prior to glib2-2.28:
*
* https://gitlab.gnome.org/GNOME/glib/commit/354d655ba8a54b754cb5a3efb42767327775696c
*
* Note that the new g_str_hash is presumably a *better* hash (it's actually
* a correct implementation of DJB's hash), but we need to preserve existing
* behaviour, because the hash key ultimately determines the "sort" order
* when iterating through GHashTables, which affects allocation of scores to
* clone instances when iterating through rsc->allowed_nodes. It (somehow)
* also appears to have some minor impact on the ordering of a few
* pseudo_event IDs in the transition graph.
*/
guint
g_str_hash_traditional(gconstpointer v)
{
const signed char *p;
guint32 h = 0;
for (p = v; *p != '\0'; p++)
h = (h << 5) - h + *p;
return h;
}
/* used with hash tables where case does not matter */
gboolean
crm_strcase_equal(gconstpointer a, gconstpointer b)
{
return pcmk__str_eq((const char *)a, (const char *)b, pcmk__str_casei);
}
guint
crm_strcase_hash(gconstpointer v)
{
const signed char *p;
guint32 h = 0;
for (p = v; *p != '\0'; p++)
h = (h << 5) - h + g_ascii_tolower(*p);
return h;
}
static void
copy_str_table_entry(gpointer key, gpointer value, gpointer user_data)
{
if (key && value && user_data) {
g_hash_table_insert((GHashTable*)user_data, strdup(key), strdup(value));
}
}
GHashTable *
crm_str_table_dup(GHashTable *old_table)
{
GHashTable *new_table = NULL;
if (old_table) {
new_table = crm_str_table_new();
g_hash_table_foreach(old_table, copy_str_table_entry, new_table);
}
return new_table;
}
/*!
* \internal
* \brief Add a word to a space-separated string list
*
* \param[in,out] list Pointer to beginning of list
* \param[in] word Word to add to list
*
* \return (Potentially new) beginning of list
* \note This dynamically reallocates list as needed.
*/
char *
pcmk__add_word(char *list, const char *word)
{
if (word != NULL) {
size_t len = list? strlen(list) : 0;
list = realloc_safe(list, len + strlen(word) + 2); // 2 = space + EOS
sprintf(list + len, " %s", word);
}
return list;
}
/*!
* \internal
* \brief Compress data
*
* \param[in] data Data to compress
* \param[in] length Number of characters of data to compress
* \param[in] max Maximum size of compressed data (or 0 to estimate)
* \param[out] result Where to store newly allocated compressed result
* \param[out] result_len Where to store actual compressed length of result
*
* \return Standard Pacemaker return code
*/
int
pcmk__compress(const char *data, unsigned int length, unsigned int max,
char **result, unsigned int *result_len)
{
int rc;
char *compressed = NULL;
char *uncompressed = strdup(data);
#ifdef CLOCK_MONOTONIC
struct timespec after_t;
struct timespec before_t;
#endif
if (max == 0) {
max = (length * 1.01) + 601; // Size guaranteed to hold result
}
#ifdef CLOCK_MONOTONIC
clock_gettime(CLOCK_MONOTONIC, &before_t);
#endif
compressed = calloc((size_t) max, sizeof(char));
CRM_ASSERT(compressed);
*result_len = max;
rc = BZ2_bzBuffToBuffCompress(compressed, result_len, uncompressed, length,
CRM_BZ2_BLOCKS, 0, CRM_BZ2_WORK);
free(uncompressed);
if (rc != BZ_OK) {
crm_err("Compression of %d bytes failed: %s " CRM_XS " bzerror=%d",
length, bz2_strerror(rc), rc);
free(compressed);
return pcmk_rc_error;
}
#ifdef CLOCK_MONOTONIC
clock_gettime(CLOCK_MONOTONIC, &after_t);
crm_trace("Compressed %d bytes into %d (ratio %d:1) in %.0fms",
length, *result_len, length / (*result_len),
(after_t.tv_sec - before_t.tv_sec) * 1000 +
(after_t.tv_nsec - before_t.tv_nsec) / 1e6);
#else
crm_trace("Compressed %d bytes into %d (ratio %d:1)",
length, *result_len, length / (*result_len));
#endif
*result = compressed;
return pcmk_rc_ok;
}
char *
crm_strdup_printf(char const *format, ...)
{
va_list ap;
int len = 0;
char *string = NULL;
va_start(ap, format);
len = vasprintf (&string, format, ap);
CRM_ASSERT(len > 0);
va_end(ap);
return string;
}
int
pcmk__parse_ll_range(const char *srcstring, long long *start, long long *end)
{
char *remainder = NULL;
CRM_ASSERT(start != NULL && end != NULL);
- *start = -1;
- *end = -1;
+ *start = PCMK__PARSE_INT_DEFAULT;
+ *end = PCMK__PARSE_INT_DEFAULT;
crm_trace("Attempting to decode: [%s]", srcstring);
if (pcmk__str_empty(srcstring) || !strcmp(srcstring, "-")) {
return pcmk_rc_unknown_format;
}
/* String starts with a dash, so this is either a range with
* no beginning or garbage.
* */
if (*srcstring == '-') {
int rc = scan_ll(srcstring+1, end, &remainder);
if (rc != pcmk_rc_ok || *remainder != '\0') {
return pcmk_rc_unknown_format;
} else {
return pcmk_rc_ok;
}
}
if (scan_ll(srcstring, start, &remainder) != pcmk_rc_ok) {
return pcmk_rc_unknown_format;
}
if (*remainder && *remainder == '-') {
if (*(remainder+1)) {
char *more_remainder = NULL;
int rc = scan_ll(remainder+1, end, &more_remainder);
if (rc != pcmk_rc_ok || *more_remainder != '\0') {
return pcmk_rc_unknown_format;
}
}
} else if (*remainder && *remainder != '-') {
- *start = -1;
+ *start = PCMK__PARSE_INT_DEFAULT;
return pcmk_rc_unknown_format;
} else {
/* The input string contained only one number. Set start and end
* to the same value and return pcmk_rc_ok. This gives the caller
* a way to tell this condition apart from a range with no end.
*/
*end = *start;
}
return pcmk_rc_ok;
}
/*!
* \internal
* \brief Find a string in a list of strings
*
* Search \p lst for \p s, taking case into account. As a special case,
* if "*" is the only element of \p lst, the search is successful.
*
* \param[in] lst List to search
* \param[in] s String to search for
*
* \return \c TRUE if \p s is in \p lst, or \c FALSE otherwise
*/
gboolean
pcmk__str_in_list(GList *lst, const gchar *s)
{
if (lst == NULL) {
return FALSE;
}
if (strcmp(lst->data, "*") == 0 && lst->next == NULL) {
return TRUE;
}
return g_list_find_custom(lst, s, (GCompareFunc) strcmp) != NULL;
}
static bool
str_any_of(bool casei, const char *s, va_list args)
{
bool rc = false;
while (1) {
const char *ele = va_arg(args, const char *);
if (ele == NULL) {
break;
} else if (pcmk__str_eq(s, ele, casei ? pcmk__str_casei : pcmk__str_none)) {
rc = true;
break;
}
}
return rc;
}
/*!
* \internal
* \brief Is a string a member of a list of strings?
*
* \param[in] s String to search for in \p ...
* \param[in] ... Strings to compare \p s against. The final string
* must be NULL.
*
* \note The comparison is done case-insensitively. The function name is
* meant to be reminiscent of strcasecmp.
*
* \return \c true if \p s is in \p ..., or \c false otherwise
*/
bool
pcmk__strcase_any_of(const char *s, ...)
{
va_list ap;
bool rc;
va_start(ap, s);
rc = str_any_of(true, s, ap);
va_end(ap);
return rc;
}
/*!
* \internal
* \brief Is a string a member of a list of strings?
*
* \param[in] s String to search for in \p ...
* \param[in] ... Strings to compare \p s against. The final string
* must be NULL.
*
* \note The comparison is done taking case into account.
*
* \return \c true if \p s is in \p ..., or \c false otherwise
*/
bool
pcmk__str_any_of(const char *s, ...)
{
va_list ap;
bool rc;
va_start(ap, s);
rc = str_any_of(false, s, ap);
va_end(ap);
return rc;
}
-/*
+/*!
+ * \internal
+ * \brief Check whether a character is in any of a list of strings
+ *
+ * \param[in] ch Character (ASCII) to search for
+ * \param[in] ... Strings to search. Final argument must be
+ * \c NULL.
+ *
+ * \return \c true if any of \p ... contain \p ch, \c false otherwise
+ * \note \p ... must contain at least one argument (\c NULL).
+ */
+bool
+pcmk__char_in_any_str(int ch, ...)
+{
+ bool rc = false;
+ va_list ap;
+
+ /*
+ * Passing a char to va_start() can generate compiler warnings,
+ * so ch is declared as an int.
+ */
+ va_start(ap, ch);
+
+ while (1) {
+ const char *ele = va_arg(ap, const char *);
+
+ if (ele == NULL) {
+ break;
+ } else if (strchr(ele, ch) != NULL) {
+ rc = true;
+ break;
+ }
+ }
+
+ va_end(ap);
+ return rc;
+}
+
+/*!
* \brief Sort strings, with numeric portions sorted numerically
*
* Sort two strings case-insensitively like strcasecmp(), but with any numeric
* portions of the string sorted numerically. This is particularly useful for
* node names (for example, "node10" will sort higher than "node9" but lower
* than "remotenode9").
*
* \param[in] s1 First string to compare (must not be NULL)
* \param[in] s2 Second string to compare (must not be NULL)
*
* \retval -1 \p s1 comes before \p s2
* \retval 0 \p s1 and \p s2 are equal
* \retval 1 \p s1 comes after \p s2
*/
int
pcmk_numeric_strcasecmp(const char *s1, const char *s2)
{
while (*s1 && *s2) {
if (isdigit(*s1) && isdigit(*s2)) {
// If node names contain a number, sort numerically
char *end1 = NULL;
char *end2 = NULL;
long num1 = strtol(s1, &end1, 10);
long num2 = strtol(s2, &end2, 10);
// allow ordering e.g. 007 > 7
size_t len1 = end1 - s1;
size_t len2 = end2 - s2;
if (num1 < num2) {
return -1;
} else if (num1 > num2) {
return 1;
} else if (len1 < len2) {
return -1;
} else if (len1 > len2) {
return 1;
}
s1 = end1;
s2 = end2;
} else {
// Compare non-digits case-insensitively
int lower1 = tolower(*s1);
int lower2 = tolower(*s2);
if (lower1 < lower2) {
return -1;
} else if (lower1 > lower2) {
return 1;
}
++s1;
++s2;
}
}
if (!*s1 && *s2) {
return -1;
} else if (*s1 && !*s2) {
return 1;
}
return 0;
}
/*
* \brief Sort strings.
*
* This is your one-stop function for string comparison. By default, this
* function works like g_strcmp0. That is, like strcmp but a NULL string
* sorts before a non-NULL string.
*
* Behavior can be changed with various flags:
*
* - pcmk__str_regex - The second string is a regular expression that the
* first string will be matched against.
* - pcmk__str_casei - By default, comparisons are done taking case into
* account. This flag makes comparisons case-insensitive.
* This can be combined with pcmk__str_regex.
* - pcmk__str_null_matches - If one string is NULL and the other is not,
* still return 0.
*
* \param[in] s1 First string to compare
* \param[in] s2 Second string to compare, or a regular expression to
* match if pcmk__str_regex is set
* \param[in] flags A bitfield of pcmk__str_flags to modify operation
*
* \retval -1 \p s1 is NULL or comes before \p s2
* \retval 0 \p s1 and \p s2 are equal, or \p s1 is found in \p s2 if
* pcmk__str_regex is set
* \retval 1 \p s2 is NULL or \p s1 comes after \p s2, or if \p s2
* is an invalid regular expression, or \p s1 was not found
* in \p s2 if pcmk__str_regex is set.
*/
int
pcmk__strcmp(const char *s1, const char *s2, uint32_t flags)
{
/* If this flag is set, the second string is a regex. */
if (is_set(flags, pcmk__str_regex)) {
regex_t *r_patt = calloc(1, sizeof(regex_t));
int reg_flags = REG_EXTENDED | REG_NOSUB | (is_set(flags, pcmk__str_casei) ? REG_ICASE : 0);
int regcomp_rc = 0;
int rc = 0;
if (s1 == NULL || s2 == NULL) {
free(r_patt);
return 1;
}
regcomp_rc = regcomp(r_patt, s2, reg_flags);
if (regcomp_rc != 0) {
rc = 1;
crm_err("Bad regex '%s' for update: %s", s2, strerror(regcomp_rc));
} else {
rc = regexec(r_patt, s1, 0, NULL, 0);
if (rc != 0) {
rc = 1;
}
}
regfree(r_patt);
free(r_patt);
return rc;
}
/* If the strings are the same pointer, return 0 immediately. */
if (s1 == s2) {
return 0;
}
/* If this flag is set, return 0 if either (or both) of the input strings
* are NULL. If neither one is NULL, we need to continue and compare
* them normally.
*/
if (is_set(flags, pcmk__str_null_matches)) {
if (s1 == NULL || s2 == NULL) {
return 0;
}
}
/* Handle the cases where one is NULL and the str_null_matches flag is not set.
* A NULL string always sorts to the beginning.
*/
if (s1 == NULL) {
return -1;
} else if (s2 == NULL) {
return 1;
}
if (is_set(flags, pcmk__str_casei)) {
return strcasecmp(s1, s2);
} else {
return strcmp(s1, s2);
}
}
// Deprecated functions kept only for backward API compatibility
gboolean safe_str_neq(const char *a, const char *b);
gboolean crm_str_eq(const char *a, const char *b, gboolean use_case);
//! \deprecated Use pcmk__str_eq() instead
gboolean
safe_str_neq(const char *a, const char *b)
{
if (a == b) {
return FALSE;
} else if (a == NULL || b == NULL) {
return TRUE;
} else if (strcasecmp(a, b) == 0) {
return FALSE;
}
return TRUE;
}
//! \deprecated Use pcmk__str_eq() instead
gboolean
crm_str_eq(const char *a, const char *b, gboolean use_case)
{
if (use_case) {
return g_strcmp0(a, b) == 0;
/* TODO - Figure out which calls, if any, really need to be case independent */
} else if (a == b) {
return TRUE;
} else if (a == NULL || b == NULL) {
/* shouldn't be comparing NULLs */
return FALSE;
} else if (strcasecmp(a, b) == 0) {
return TRUE;
}
return FALSE;
}
diff --git a/lib/common/tests/strings/Makefile.am b/lib/common/tests/strings/Makefile.am
index 11c3bd3c00..6e2202c4fe 100644
--- a/lib/common/tests/strings/Makefile.am
+++ b/lib/common/tests/strings/Makefile.am
@@ -1,21 +1,23 @@
AM_CPPFLAGS = -I$(top_srcdir)/include -I$(top_builddir)/include
LDADD = $(top_builddir)/lib/common/libcrmcommon.la
include $(top_srcdir)/mk/glib-tap.mk
# Add each test program here. Each test should be written as a little standalone
# program using the glib unit testing functions. See the documentation for more
# information.
#
# https://developer.gnome.org/glib/unstable/glib-Testing.html
-test_programs = pcmk__parse_ll_range \
+test_programs = pcmk__scan_double \
+ pcmk__parse_ll_range \
pcmk__str_any_of \
- pcmk__strcmp
+ pcmk__strcmp \
+ pcmk__char_in_any_str
# If any extra data needs to be added to the source distribution, add it to the
# following list.
dist_test_data =
# If any extra data needs to be used by tests but should not be added to the
# source distribution, add it to the following list.
test_data =
diff --git a/lib/common/tests/strings/pcmk__char_in_any_str.c b/lib/common/tests/strings/pcmk__char_in_any_str.c
new file mode 100644
index 0000000000..7bc3d1bdc1
--- /dev/null
+++ b/lib/common/tests/strings/pcmk__char_in_any_str.c
@@ -0,0 +1,44 @@
+#include <glib.h>
+
+#include <crm_internal.h>
+
+static void
+empty_list(void)
+{
+ g_assert_false(pcmk__char_in_any_str('x', NULL));
+ g_assert_false(pcmk__char_in_any_str('\0', NULL));
+}
+
+static void
+null_char(void)
+{
+ g_assert_true(pcmk__char_in_any_str('\0', "xxx", "yyy", NULL));
+ g_assert_true(pcmk__char_in_any_str('\0', "", NULL));
+}
+
+static void
+in_list(void)
+{
+ g_assert_true(pcmk__char_in_any_str('x', "aaa", "bbb", "xxx", NULL));
+}
+
+static void
+not_in_list(void)
+{
+ g_assert_false(pcmk__char_in_any_str('x', "aaa", "bbb", NULL));
+ g_assert_false(pcmk__char_in_any_str('A', "aaa", "bbb", NULL));
+ g_assert_false(pcmk__char_in_any_str('x', "", NULL));
+}
+
+int main(int argc, char **argv)
+{
+ g_test_init(&argc, &argv, NULL);
+
+ g_test_add_func("/common/strings/char_in_any_str/empty_list", empty_list);
+ g_test_add_func("/common/strings/char_in_any_str/null_char", null_char);
+ g_test_add_func("/common/strings/char_in_any_str/in", in_list);
+ g_test_add_func("/common/strings/char_in_any_str/not_in", not_in_list);
+
+ return g_test_run();
+}
+
diff --git a/lib/common/tests/strings/pcmk__parse_ll_range.c b/lib/common/tests/strings/pcmk__parse_ll_range.c
index 144d01b429..d94a9e36bb 100644
--- a/lib/common/tests/strings/pcmk__parse_ll_range.c
+++ b/lib/common/tests/strings/pcmk__parse_ll_range.c
@@ -1,84 +1,100 @@
#include <glib.h>
#include <crm_internal.h>
static void
-empty_input_string(void) {
+empty_input_string(void)
+{
long long start, end;
- g_assert(pcmk__parse_ll_range(NULL, &start, &end) == pcmk_rc_unknown_format);
- g_assert(pcmk__parse_ll_range("", &start, &end) == pcmk_rc_unknown_format);
+ g_assert_cmpint(pcmk__parse_ll_range(NULL, &start, &end), ==,
+ pcmk_rc_unknown_format);
+ g_assert_cmpint(pcmk__parse_ll_range("", &start, &end), ==,
+ pcmk_rc_unknown_format);
}
static void
-missing_separator(void) {
+missing_separator(void)
+{
long long start, end;
- g_assert(pcmk__parse_ll_range("1234", &start, &end) == pcmk_rc_ok);
+ g_assert_cmpint(pcmk__parse_ll_range("1234", &start, &end), ==, pcmk_rc_ok);
g_assert_cmpint(start, ==, 1234);
g_assert_cmpint(end, ==, 1234);
}
static void
-only_separator(void) {
+only_separator(void)
+{
long long start, end;
- g_assert(pcmk__parse_ll_range("-", &start, &end) == pcmk_rc_unknown_format);
- g_assert_cmpint(start, ==, -1);
- g_assert_cmpint(end, ==, -1);
+ g_assert_cmpint(pcmk__parse_ll_range("-", &start, &end), ==,
+ pcmk_rc_unknown_format);
+ g_assert_cmpint(start, ==, PCMK__PARSE_INT_DEFAULT);
+ g_assert_cmpint(end, ==, PCMK__PARSE_INT_DEFAULT);
}
static void
-no_range_end(void) {
+no_range_end(void)
+{
long long start, end;
- g_assert(pcmk__parse_ll_range("2000-", &start, &end) == pcmk_rc_ok);
+ g_assert_cmpint(pcmk__parse_ll_range("2000-", &start, &end), ==,
+ pcmk_rc_ok);
g_assert_cmpint(start, ==, 2000);
- g_assert_cmpint(end, ==, -1);
+ g_assert_cmpint(end, ==, PCMK__PARSE_INT_DEFAULT);
}
static void
-no_range_start(void) {
+no_range_start(void)
+{
long long start, end;
- g_assert(pcmk__parse_ll_range("-2020", &start, &end) == pcmk_rc_ok);
- g_assert_cmpint(start, ==, -1);
+ g_assert_cmpint(pcmk__parse_ll_range("-2020", &start, &end), ==,
+ pcmk_rc_ok);
+ g_assert_cmpint(start, ==, PCMK__PARSE_INT_DEFAULT);
g_assert_cmpint(end, ==, 2020);
}
static void
-range_start_and_end(void) {
+range_start_and_end(void)
+{
long long start, end;
- g_assert(pcmk__parse_ll_range("2000-2020", &start, &end) == pcmk_rc_ok);
+ g_assert_cmpint(pcmk__parse_ll_range("2000-2020", &start, &end), ==,
+ pcmk_rc_ok);
g_assert_cmpint(start, ==, 2000);
g_assert_cmpint(end, ==, 2020);
}
static void
-garbage(void) {
+garbage(void)
+{
long long start, end;
- g_assert(pcmk__parse_ll_range("2000x-", &start, &end) == pcmk_rc_unknown_format);
- g_assert_cmpint(start, ==, -1);
- g_assert_cmpint(end, ==, -1);
+ g_assert_cmpint(pcmk__parse_ll_range("2000x-", &start, &end), ==,
+ pcmk_rc_unknown_format);
+ g_assert_cmpint(start, ==, PCMK__PARSE_INT_DEFAULT);
+ g_assert_cmpint(end, ==, PCMK__PARSE_INT_DEFAULT);
- g_assert(pcmk__parse_ll_range("-x2000", &start, &end) == pcmk_rc_unknown_format);
- g_assert_cmpint(start, ==, -1);
- g_assert_cmpint(end, ==, -1);
+ g_assert_cmpint(pcmk__parse_ll_range("-x2000", &start, &end), ==,
+ pcmk_rc_unknown_format);
+ g_assert_cmpint(start, ==, PCMK__PARSE_INT_DEFAULT);
+ g_assert_cmpint(end, ==, PCMK__PARSE_INT_DEFAULT);
}
-int main(int argc, char **argv) {
+int main(int argc, char **argv)
+{
g_test_init(&argc, &argv, NULL);
g_test_add_func("/common/strings/range/empty", empty_input_string);
g_test_add_func("/common/strings/range/no_sep", missing_separator);
g_test_add_func("/common/strings/range/only_sep", only_separator);
g_test_add_func("/common/strings/range/no_end", no_range_end);
g_test_add_func("/common/strings/range/no_start", no_range_start);
g_test_add_func("/common/strings/range/start_and_end", range_start_and_end);
g_test_add_func("/common/strings/range/garbage", garbage);
return g_test_run();
}
diff --git a/lib/common/tests/strings/pcmk__scan_double.c b/lib/common/tests/strings/pcmk__scan_double.c
new file mode 100644
index 0000000000..1e6138a2e6
--- /dev/null
+++ b/lib/common/tests/strings/pcmk__scan_double.c
@@ -0,0 +1,161 @@
+#include <float.h> // DBL_MAX, etc.
+#include <math.h> // fabs()
+
+#include <glib.h>
+
+#include <crm_internal.h>
+
+// Ensure plenty of characters for %f display
+#define LOCAL_BUF_SIZE 2 * DBL_MAX_10_EXP
+
+/*
+ * Avoids compiler warnings for floating-point equality checks.
+ * Use for comparing numbers (e.g., 1.0 == 1.0), not expression values.
+ */
+#define ASSERT_DBL_EQ(d1, d2) g_assert_cmpfloat(fabs(d1 - d2), \
+ <, DBL_EPSILON);
+
+static void
+empty_input_string(void)
+{
+ double result;
+
+ // Without default_text
+ g_assert_cmpint(pcmk__scan_double(NULL, &result, NULL, NULL), ==, EINVAL);
+ ASSERT_DBL_EQ(result, PCMK__PARSE_DBL_DEFAULT);
+
+ g_assert_cmpint(pcmk__scan_double("", &result, NULL, NULL), ==, EINVAL);
+ ASSERT_DBL_EQ(result, PCMK__PARSE_DBL_DEFAULT);
+
+ // With default_text
+ g_assert_cmpint(pcmk__scan_double(NULL, &result, "2.0", NULL), ==,
+ pcmk_rc_ok);
+ ASSERT_DBL_EQ(result, 2.0);
+
+ g_assert_cmpint(pcmk__scan_double("", &result, "2.0", NULL), ==, EINVAL);
+ ASSERT_DBL_EQ(result, PCMK__PARSE_DBL_DEFAULT);
+}
+
+static void
+bad_input_string(void)
+{
+ double result;
+
+ // Without default text
+ g_assert_cmpint(pcmk__scan_double("asdf", &result, NULL, NULL), ==, EINVAL);
+ ASSERT_DBL_EQ(result, PCMK__PARSE_DBL_DEFAULT);
+
+ g_assert_cmpint(pcmk__scan_double("as2.0", &result, NULL, NULL), ==,
+ EINVAL);
+ ASSERT_DBL_EQ(result, PCMK__PARSE_DBL_DEFAULT);
+
+ // With default text (not used)
+ g_assert_cmpint(pcmk__scan_double("asdf", &result, "2.0", NULL), ==,
+ EINVAL);
+ ASSERT_DBL_EQ(result, PCMK__PARSE_DBL_DEFAULT);
+
+ g_assert_cmpint(pcmk__scan_double("as2.0", &result, "2.0", NULL), ==,
+ EINVAL);
+ ASSERT_DBL_EQ(result, PCMK__PARSE_DBL_DEFAULT);
+}
+
+static void
+trailing_chars(void)
+{
+ double result;
+
+ g_assert_cmpint(pcmk__scan_double("2.0asdf", &result, NULL, NULL), ==,
+ pcmk_rc_ok);
+ ASSERT_DBL_EQ(result, 2.0);
+}
+
+static void
+typical_case(void)
+{
+ char str[LOCAL_BUF_SIZE];
+ double result;
+
+ g_assert_cmpint(pcmk__scan_double("0.0", &result, NULL, NULL), ==,
+ pcmk_rc_ok);
+ ASSERT_DBL_EQ(result, 0.0);
+
+ g_assert_cmpint(pcmk__scan_double("1.0", &result, NULL, NULL), ==,
+ pcmk_rc_ok);
+ ASSERT_DBL_EQ(result, 1.0);
+
+ g_assert_cmpint(pcmk__scan_double("-1.0", &result, NULL, NULL), ==,
+ pcmk_rc_ok);
+ ASSERT_DBL_EQ(result, -1.0);
+
+ snprintf(str, LOCAL_BUF_SIZE, "%f", DBL_MAX);
+ g_assert_cmpint(pcmk__scan_double(str, &result, NULL, NULL), ==,
+ pcmk_rc_ok);
+ ASSERT_DBL_EQ(result, DBL_MAX);
+
+ snprintf(str, LOCAL_BUF_SIZE, "%f", -DBL_MAX);
+ g_assert_cmpint(pcmk__scan_double(str, &result, NULL, NULL), ==,
+ pcmk_rc_ok);
+ ASSERT_DBL_EQ(result, -DBL_MAX);
+}
+
+static void
+double_overflow(void)
+{
+ char str[LOCAL_BUF_SIZE];
+ double result;
+
+ /*
+ * 1e(DBL_MAX_10_EXP + 1) produces an inf value
+ * Can't use ASSERT_DBL_EQ() because (inf - inf) == NaN
+ */
+ snprintf(str, LOCAL_BUF_SIZE, "1e%d", DBL_MAX_10_EXP + 1);
+ g_assert_cmpint(pcmk__scan_double(str, &result, NULL, NULL), ==, EOVERFLOW);
+ g_assert_cmpfloat(result, >, DBL_MAX);
+
+ snprintf(str, LOCAL_BUF_SIZE, "-1e%d", DBL_MAX_10_EXP + 1);
+ g_assert_cmpint(pcmk__scan_double(str, &result, NULL, NULL), ==, EOVERFLOW);
+ g_assert_cmpfloat(result, <, -DBL_MAX);
+}
+
+static void
+double_underflow(void)
+{
+ char str[LOCAL_BUF_SIZE];
+ double result;
+
+ /*
+ * 1e(DBL_MIN_10_EXP - 1) produces a denormalized value (between 0
+ * and DBL_MIN)
+ *
+ * C99/C11: result will be **no greater than** DBL_MIN
+ */
+ snprintf(str, LOCAL_BUF_SIZE, "1e%d", DBL_MIN_10_EXP - 1);
+ g_assert_cmpint(pcmk__scan_double(str, &result, NULL, NULL), ==,
+ pcmk_rc_underflow);
+ g_assert_cmpfloat(result, >=, 0.0);
+ g_assert_cmpfloat(result, <=, DBL_MIN);
+
+ snprintf(str, LOCAL_BUF_SIZE, "-1e%d", DBL_MIN_10_EXP - 1);
+ g_assert_cmpint(pcmk__scan_double(str, &result, NULL, NULL), ==,
+ pcmk_rc_underflow);
+ g_assert_cmpfloat(result, <=, 0.0);
+ g_assert_cmpfloat(result, >=, -DBL_MIN);
+}
+
+int main(int argc, char **argv)
+{
+ g_test_init(&argc, &argv, NULL);
+
+ // Test for input string issues
+ g_test_add_func("/common/strings/double/empty_input", empty_input_string);
+ g_test_add_func("/common/strings/double/bad_input", bad_input_string);
+ g_test_add_func("/common/strings/double/trailing_chars", trailing_chars);
+
+ // Test for numeric issues
+ g_test_add_func("/common/strings/double/typical", typical_case);
+ g_test_add_func("/common/strings/double/overflow", double_overflow);
+ g_test_add_func("/common/strings/double/underflow", double_underflow);
+
+ return g_test_run();
+}
+
diff --git a/lib/common/tests/strings/pcmk__str_any_of.c b/lib/common/tests/strings/pcmk__str_any_of.c
index 44cc329f63..0a45638a9d 100644
--- a/lib/common/tests/strings/pcmk__str_any_of.c
+++ b/lib/common/tests/strings/pcmk__str_any_of.c
@@ -1,45 +1,45 @@
#include <glib.h>
#include <crm_internal.h>
static void
empty_input_list(void) {
- g_assert(pcmk__strcase_any_of("xxx", NULL) == false);
- g_assert(pcmk__str_any_of("xxx", NULL) == false);
- g_assert(pcmk__strcase_any_of("", NULL) == false);
- g_assert(pcmk__str_any_of("", NULL) == false);
+ g_assert_false(pcmk__strcase_any_of("xxx", NULL));
+ g_assert_false(pcmk__str_any_of("xxx", NULL));
+ g_assert_false(pcmk__strcase_any_of("", NULL));
+ g_assert_false(pcmk__str_any_of("", NULL));
}
static void
empty_string(void) {
- g_assert(pcmk__strcase_any_of("", "xxx", "yyy", NULL) == false);
- g_assert(pcmk__str_any_of("", "xxx", "yyy", NULL) == false);
- g_assert(pcmk__strcase_any_of(NULL, "xxx", "yyy", NULL) == false);
- g_assert(pcmk__str_any_of(NULL, "xxx", "yyy", NULL) == false);
+ g_assert_false(pcmk__strcase_any_of("", "xxx", "yyy", NULL));
+ g_assert_false(pcmk__str_any_of("", "xxx", "yyy", NULL));
+ g_assert_false(pcmk__strcase_any_of(NULL, "xxx", "yyy", NULL));
+ g_assert_false(pcmk__str_any_of(NULL, "xxx", "yyy", NULL));
}
static void
in_list(void) {
- g_assert(pcmk__strcase_any_of("xxx", "aaa", "bbb", "xxx", NULL) == true);
- g_assert(pcmk__str_any_of("xxx", "aaa", "bbb", "xxx", NULL) == true);
- g_assert(pcmk__strcase_any_of("XXX", "aaa", "bbb", "xxx", NULL) == true);
- g_assert(pcmk__str_any_of("XXX", "aaa", "bbb", "xxx", NULL) == false);
+ g_assert_true(pcmk__strcase_any_of("xxx", "aaa", "bbb", "xxx", NULL));
+ g_assert_true(pcmk__str_any_of("xxx", "aaa", "bbb", "xxx", NULL));
+ g_assert_true(pcmk__strcase_any_of("XXX", "aaa", "bbb", "xxx", NULL));
}
static void
not_in_list(void) {
- g_assert(pcmk__strcase_any_of("xxx", "aaa", "bbb", NULL) == false);
- g_assert(pcmk__str_any_of("xxx", "aaa", "bbb", NULL) == false);
- g_assert(pcmk__str_any_of("AAA", "aaa", "bbb", NULL) == false);
+ g_assert_false(pcmk__strcase_any_of("xxx", "aaa", "bbb", NULL));
+ g_assert_false(pcmk__str_any_of("xxx", "aaa", "bbb", NULL));
+ g_assert_false(pcmk__str_any_of("AAA", "aaa", "bbb", NULL));
}
-int main(int argc, char **argv) {
+int main(int argc, char **argv)
+{
g_test_init(&argc, &argv, NULL);
g_test_add_func("/common/strings/any_of/empty_list", empty_input_list);
g_test_add_func("/common/strings/any_of/empty_string", empty_string);
g_test_add_func("/common/strings/any_of/in", in_list);
g_test_add_func("/common/strings/any_of/not_in", not_in_list);
return g_test_run();
}
diff --git a/lib/common/tests/utils/pcmk_str_is_infinity.c b/lib/common/tests/utils/pcmk_str_is_infinity.c
index d790921d97..72e26cb195 100644
--- a/lib/common/tests/utils/pcmk_str_is_infinity.c
+++ b/lib/common/tests/utils/pcmk_str_is_infinity.c
@@ -1,48 +1,54 @@
#include <glib.h>
#include <crm_internal.h>
static void
-uppercase_str_passes(void) {
- g_assert(pcmk_str_is_infinity("INFINITY") == TRUE);
- g_assert(pcmk_str_is_infinity("+INFINITY") == TRUE);
+uppercase_str_passes(void)
+{
+ g_assert_true(pcmk_str_is_infinity("INFINITY"));
+ g_assert_true(pcmk_str_is_infinity("+INFINITY"));
}
static void
-mixed_case_str_fails(void) {
- g_assert(pcmk_str_is_infinity("infinity") == FALSE);
- g_assert(pcmk_str_is_infinity("+infinity") == FALSE);
- g_assert(pcmk_str_is_infinity("Infinity") == FALSE);
- g_assert(pcmk_str_is_infinity("+Infinity") == FALSE);
+mixed_case_str_fails(void)
+{
+ g_assert_false(pcmk_str_is_infinity("infinity"));
+ g_assert_false(pcmk_str_is_infinity("+infinity"));
+ g_assert_false(pcmk_str_is_infinity("Infinity"));
+ g_assert_false(pcmk_str_is_infinity("+Infinity"));
}
static void
-added_whitespace_fails(void) {
- g_assert(pcmk_str_is_infinity(" INFINITY") == FALSE);
- g_assert(pcmk_str_is_infinity("INFINITY ") == FALSE);
- g_assert(pcmk_str_is_infinity(" INFINITY ") == FALSE);
- g_assert(pcmk_str_is_infinity("+ INFINITY") == FALSE);
+added_whitespace_fails(void)
+{
+ g_assert_false(pcmk_str_is_infinity(" INFINITY"));
+ g_assert_false(pcmk_str_is_infinity("INFINITY "));
+ g_assert_false(pcmk_str_is_infinity(" INFINITY "));
+ g_assert_false(pcmk_str_is_infinity("+ INFINITY"));
}
static void
-empty_str_fails(void) {
- g_assert(pcmk_str_is_infinity(NULL) == FALSE);
- g_assert(pcmk_str_is_infinity("") == FALSE);
+empty_str_fails(void)
+{
+ g_assert_false(pcmk_str_is_infinity(NULL));
+ g_assert_false(pcmk_str_is_infinity(""));
}
static void
-minus_infinity_fails(void) {
- g_assert(pcmk_str_is_infinity("-INFINITY") == FALSE);
+minus_infinity_fails(void)
+{
+ g_assert_false(pcmk_str_is_infinity("-INFINITY"));
}
-int main(int argc, char **argv) {
+int main(int argc, char **argv)
+{
g_test_init(&argc, &argv, NULL);
g_test_add_func("/common/utils/infinity/uppercase", uppercase_str_passes);
g_test_add_func("/common/utils/infinity/mixed_case", mixed_case_str_fails);
g_test_add_func("/common/utils/infinity/whitespace", added_whitespace_fails);
g_test_add_func("/common/utils/infinity/empty", empty_str_fails);
g_test_add_func("/common/utils/infinity/minus_infinity", minus_infinity_fails);
return g_test_run();
}
diff --git a/lib/common/tests/utils/pcmk_str_is_minus_infinity.c b/lib/common/tests/utils/pcmk_str_is_minus_infinity.c
index 7dab4c5c0d..fe8b080fbe 100644
--- a/lib/common/tests/utils/pcmk_str_is_minus_infinity.c
+++ b/lib/common/tests/utils/pcmk_str_is_minus_infinity.c
@@ -1,45 +1,51 @@
#include <glib.h>
#include <crm_internal.h>
static void
-uppercase_str_passes(void) {
- g_assert(pcmk_str_is_minus_infinity("-INFINITY") == TRUE);
+uppercase_str_passes(void)
+{
+ g_assert_true(pcmk_str_is_minus_infinity("-INFINITY"));
}
static void
-mixed_case_str_fails(void) {
- g_assert(pcmk_str_is_minus_infinity("-infinity") == FALSE);
- g_assert(pcmk_str_is_minus_infinity("-Infinity") == FALSE);
+mixed_case_str_fails(void)
+{
+ g_assert_false(pcmk_str_is_minus_infinity("-infinity"));
+ g_assert_false(pcmk_str_is_minus_infinity("-Infinity"));
}
static void
-added_whitespace_fails(void) {
- g_assert(pcmk_str_is_minus_infinity(" -INFINITY") == FALSE);
- g_assert(pcmk_str_is_minus_infinity("-INFINITY ") == FALSE);
- g_assert(pcmk_str_is_minus_infinity(" -INFINITY ") == FALSE);
- g_assert(pcmk_str_is_minus_infinity("- INFINITY") == FALSE);
+added_whitespace_fails(void)
+{
+ g_assert_false(pcmk_str_is_minus_infinity(" -INFINITY"));
+ g_assert_false(pcmk_str_is_minus_infinity("-INFINITY "));
+ g_assert_false(pcmk_str_is_minus_infinity(" -INFINITY "));
+ g_assert_false(pcmk_str_is_minus_infinity("- INFINITY"));
}
static void
-empty_str_fails(void) {
- g_assert(pcmk_str_is_minus_infinity(NULL) == FALSE);
- g_assert(pcmk_str_is_minus_infinity("") == FALSE);
+empty_str_fails(void)
+{
+ g_assert_false(pcmk_str_is_minus_infinity(NULL));
+ g_assert_false(pcmk_str_is_minus_infinity(""));
}
static void
-infinity_fails(void) {
- g_assert(pcmk_str_is_minus_infinity("INFINITY") == FALSE);
+infinity_fails(void)
+{
+ g_assert_false(pcmk_str_is_minus_infinity("INFINITY"));
}
-int main(int argc, char **argv) {
+int main(int argc, char **argv)
+{
g_test_init(&argc, &argv, NULL);
g_test_add_func("/common/utils/minus_infinity/uppercase", uppercase_str_passes);
g_test_add_func("/common/utils/minus_infinity/mixed_case", mixed_case_str_fails);
g_test_add_func("/common/utils/minus_infinity/whitespace", added_whitespace_fails);
g_test_add_func("/common/utils/minus_infinity/empty", empty_str_fails);
g_test_add_func("/common/utils/minus_infinity/infinity", infinity_fails);
return g_test_run();
}
diff --git a/lib/pengine/rules.c b/lib/pengine/rules.c
index 74034c4b8e..d186be7877 100644
--- a/lib/pengine/rules.c
+++ b/lib/pengine/rules.c
@@ -1,1407 +1,1476 @@
/*
* Copyright 2004-2019 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <crm/crm.h>
#include <crm/msg_xml.h>
#include <crm/common/xml.h>
#include <glib.h>
#include <crm/pengine/rules.h>
#include <crm/pengine/rules_internal.h>
#include <crm/pengine/internal.h>
#include <sys/types.h>
#include <regex.h>
#include <ctype.h>
CRM_TRACE_INIT_DATA(pe_rules);
/*!
* \brief Evaluate any rules contained by given XML element
*
* \param[in] xml XML element to check for rules
* \param[in] node_hash Node attributes to use when evaluating expressions
* \param[in] now Time to use when evaluating expressions
* \param[out] next_change If not NULL, set to when evaluation will change
*
* \return TRUE if no rules, or any of rules present is in effect, else FALSE
*/
gboolean
pe_evaluate_rules(xmlNode *ruleset, GHashTable *node_hash, crm_time_t *now,
crm_time_t *next_change)
{
pe_rule_eval_data_t rule_data = {
.node_hash = node_hash,
.role = RSC_ROLE_UNKNOWN,
.now = now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
return pe_eval_rules(ruleset, &rule_data, next_change);
}
gboolean
pe_test_rule(xmlNode *rule, GHashTable *node_hash, enum rsc_role_e role,
crm_time_t *now, crm_time_t *next_change,
pe_match_data_t *match_data)
{
pe_rule_eval_data_t rule_data = {
.node_hash = node_hash,
.role = role,
.now = now,
.match_data = match_data,
.rsc_data = NULL,
.op_data = NULL
};
return pe_eval_expr(rule, &rule_data, next_change);
}
/*!
* \brief Evaluate one rule subelement (pass/fail)
*
* A rule element may contain another rule, a node attribute expression, or a
* date expression. Given any one of those, evaluate it and return whether it
* passed.
*
* \param[in] expr Rule subelement XML
* \param[in] node_hash Node attributes to use when evaluating expression
* \param[in] role Resource role to use when evaluating expression
* \param[in] now Time to use when evaluating expression
* \param[out] next_change If not NULL, set to when evaluation will change
* \param[in] match_data If not NULL, resource back-references and params
*
* \return TRUE if expression is in effect under given conditions, else FALSE
*/
gboolean
pe_test_expression(xmlNode *expr, GHashTable *node_hash, enum rsc_role_e role,
crm_time_t *now, crm_time_t *next_change,
pe_match_data_t *match_data)
{
pe_rule_eval_data_t rule_data = {
.node_hash = node_hash,
.role = role,
.now = now,
.match_data = match_data,
.rsc_data = NULL,
.op_data = NULL
};
return pe_eval_subexpr(expr, &rule_data, next_change);
}
enum expression_type
find_expression_type(xmlNode * expr)
{
const char *tag = NULL;
const char *attr = NULL;
attr = crm_element_value(expr, XML_EXPR_ATTR_ATTRIBUTE);
tag = crm_element_name(expr);
if (pcmk__str_eq(tag, "date_expression", pcmk__str_casei)) {
return time_expr;
} else if (pcmk__str_eq(tag, "rsc_expression", pcmk__str_casei)) {
return rsc_expr;
} else if (pcmk__str_eq(tag, "op_expression", pcmk__str_casei)) {
return op_expr;
} else if (pcmk__str_eq(tag, XML_TAG_RULE, pcmk__str_casei)) {
return nested_rule;
} else if (!pcmk__str_eq(tag, "expression", pcmk__str_casei)) {
return not_expr;
} else if (pcmk__strcase_any_of(attr, CRM_ATTR_UNAME, CRM_ATTR_KIND, CRM_ATTR_ID, NULL)) {
return loc_expr;
} else if (pcmk__str_eq(attr, CRM_ATTR_ROLE, pcmk__str_casei)) {
return role_expr;
#if ENABLE_VERSIONED_ATTRS
} else if (pcmk__str_eq(attr, CRM_ATTR_RA_VERSION, pcmk__str_casei)) {
return version_expr;
#endif
}
return attr_expr;
}
gboolean
pe_test_role_expression(xmlNode *expr, enum rsc_role_e role, crm_time_t *now)
{
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
.role = role,
.now = now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
return pe__eval_role_expr(expr, &rule_data);
}
gboolean
pe_test_attr_expression(xmlNode *expr, GHashTable *hash, crm_time_t *now,
pe_match_data_t *match_data)
{
pe_rule_eval_data_t rule_data = {
.node_hash = hash,
.role = RSC_ROLE_UNKNOWN,
.now = now,
.match_data = match_data,
.rsc_data = NULL,
.op_data = NULL
};
return pe__eval_attr_expr(expr, &rule_data);
}
/* As per the nethack rules:
*
* moon period = 29.53058 days ~= 30, year = 365.2422 days
* days moon phase advances on first day of year compared to preceding year
* = 365.2422 - 12*29.53058 ~= 11
* years in Metonic cycle (time until same phases fall on the same days of
* the month) = 18.6 ~= 19
* moon phase on first day of year (epact) ~= (11*(year%19) + 29) % 30
* (29 as initial condition)
* current phase in days = first day phase + days elapsed in year
* 6 moons ~= 177 days
* 177 ~= 8 reported phases * 22
* + 11/22 for rounding
*
* 0-7, with 0: new, 4: full
*/
static int
phase_of_the_moon(crm_time_t * now)
{
uint32_t epact, diy, goldn;
uint32_t y;
crm_time_get_ordinal(now, &y, &diy);
goldn = (y % 19) + 1;
epact = (11 * goldn + 18) % 30;
if ((epact == 25 && goldn > 11) || epact == 24)
epact++;
return ((((((diy + epact) * 6) + 11) % 177) / 22) & 7);
}
static int
check_one(xmlNode *cron_spec, const char *xml_field, uint32_t time_field) {
int rc = pcmk_rc_undetermined;
const char *value = crm_element_value(cron_spec, xml_field);
long long low, high;
if (value == NULL) {
/* Return pe_date_result_undetermined if the field is missing. */
goto bail;
}
if (pcmk__parse_ll_range(value, &low, &high) == pcmk_rc_unknown_format) {
goto bail;
} else if (low == high) {
/* A single number was given, not a range. */
if (time_field < low) {
rc = pcmk_rc_before_range;
} else if (time_field > high) {
rc = pcmk_rc_after_range;
} else {
rc = pcmk_rc_within_range;
}
} else if (low != -1 && high != -1) {
/* This is a range with both bounds. */
if (time_field < low) {
rc = pcmk_rc_before_range;
} else if (time_field > high) {
rc = pcmk_rc_after_range;
} else {
rc = pcmk_rc_within_range;
}
} else if (low == -1) {
/* This is a range with no starting value. */
rc = time_field <= high ? pcmk_rc_within_range : pcmk_rc_after_range;
} else if (high == -1) {
/* This is a range with no ending value. */
rc = time_field >= low ? pcmk_rc_within_range : pcmk_rc_before_range;
}
bail:
if (rc == pcmk_rc_within_range) {
crm_debug("Condition '%s' in %s: passed", value, xml_field);
} else {
crm_debug("Condition '%s' in %s: failed", value, xml_field);
}
return rc;
}
static gboolean
check_passes(int rc) {
/* _within_range is obvious. _undetermined is a pass because
* this is the return value if a field is not given. In this
* case, we just want to ignore it and check other fields to
* see if they place some restriction on what can pass.
*/
return rc == pcmk_rc_within_range || rc == pcmk_rc_undetermined;
}
#define CHECK_ONE(spec, name, var) do { \
int subpart_rc = check_one(spec, name, var); \
if (check_passes(subpart_rc) == FALSE) { \
return subpart_rc; \
} \
} while (0)
int
pe_cron_range_satisfied(crm_time_t * now, xmlNode * cron_spec)
{
uint32_t h, m, s, y, d, w;
CRM_CHECK(now != NULL, return pcmk_rc_op_unsatisfied);
crm_time_get_gregorian(now, &y, &m, &d);
CHECK_ONE(cron_spec, "years", y);
CHECK_ONE(cron_spec, "months", m);
CHECK_ONE(cron_spec, "monthdays", d);
crm_time_get_timeofday(now, &h, &m, &s);
CHECK_ONE(cron_spec, "hours", h);
CHECK_ONE(cron_spec, "minutes", m);
CHECK_ONE(cron_spec, "seconds", s);
crm_time_get_ordinal(now, &y, &d);
CHECK_ONE(cron_spec, "yeardays", d);
crm_time_get_isoweek(now, &y, &w, &d);
CHECK_ONE(cron_spec, "weekyears", y);
CHECK_ONE(cron_spec, "weeks", w);
CHECK_ONE(cron_spec, "weekdays", d);
CHECK_ONE(cron_spec, "moon", phase_of_the_moon(now));
/* If we get here, either no fields were specified (which is success), or all
* the fields that were specified had their conditions met (which is also a
* success). Thus, the result is success.
*/
return pcmk_rc_ok;
}
#define update_field(xml_field, time_fn) \
value = crm_element_value(duration_spec, xml_field); \
if(value != NULL) { \
int value_i = crm_parse_int(value, "0"); \
time_fn(end, value_i); \
}
crm_time_t *
pe_parse_xml_duration(crm_time_t * start, xmlNode * duration_spec)
{
crm_time_t *end = NULL;
const char *value = NULL;
end = crm_time_new(NULL);
crm_time_set(end, start);
update_field("years", crm_time_add_years);
update_field("months", crm_time_add_months);
update_field("weeks", crm_time_add_weeks);
update_field("days", crm_time_add_days);
update_field("hours", crm_time_add_hours);
update_field("minutes", crm_time_add_minutes);
update_field("seconds", crm_time_add_seconds);
return end;
}
/*!
* \internal
* \brief Test a date expression (pass/fail) for a specific time
*
* \param[in] time_expr date_expression XML
* \param[in] now Time for which to evaluate expression
* \param[out] next_change If not NULL, set to when evaluation will change
*
* \return TRUE if date expression is in effect at given time, FALSE otherwise
*/
gboolean
pe_test_date_expression(xmlNode *expr, crm_time_t *now, crm_time_t *next_change)
{
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
.role = RSC_ROLE_UNKNOWN,
.now = now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
switch (pe__eval_date_expr(expr, &rule_data, next_change)) {
case pcmk_rc_within_range:
case pcmk_rc_ok:
return TRUE;
default:
return FALSE;
}
}
// Set next_change to t if t is earlier
static void
crm_time_set_if_earlier(crm_time_t *next_change, crm_time_t *t)
{
if ((next_change != NULL) && (t != NULL)) {
if (!crm_time_is_defined(next_change)
|| (crm_time_compare(t, next_change) < 0)) {
crm_time_set(next_change, t);
}
}
}
/*!
* \internal
* \brief Evaluate a date expression for a specific time
*
* \param[in] time_expr date_expression XML
* \param[in] now Time for which to evaluate expression
* \param[out] next_change If not NULL, set to when evaluation will change
*
* \return Standard Pacemaker return code
*/
int
pe_eval_date_expression(xmlNode *expr, crm_time_t *now, crm_time_t *next_change)
{
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
.role = RSC_ROLE_UNKNOWN,
.now = now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
return pe__eval_date_expr(expr, &rule_data, next_change);
}
// Information about a block of nvpair elements
typedef struct sorted_set_s {
int score; // This block's score for sorting
const char *name; // This block's ID
const char *special_name; // ID that should sort first
xmlNode *attr_set; // This block
} sorted_set_t;
static gint
sort_pairs(gconstpointer a, gconstpointer b)
{
const sorted_set_t *pair_a = a;
const sorted_set_t *pair_b = b;
if (a == NULL && b == NULL) {
return 0;
} else if (a == NULL) {
return 1;
} else if (b == NULL) {
return -1;
}
if (pcmk__str_eq(pair_a->name, pair_a->special_name, pcmk__str_casei)) {
return -1;
} else if (pcmk__str_eq(pair_b->name, pair_a->special_name, pcmk__str_casei)) {
return 1;
}
if (pair_a->score < pair_b->score) {
return 1;
} else if (pair_a->score > pair_b->score) {
return -1;
}
return 0;
}
static void
populate_hash(xmlNode * nvpair_list, GHashTable * hash, gboolean overwrite, xmlNode * top)
{
const char *name = NULL;
const char *value = NULL;
const char *old_value = NULL;
xmlNode *list = nvpair_list;
xmlNode *an_attr = NULL;
name = crm_element_name(list->children);
if (pcmk__str_eq(XML_TAG_ATTRS, name, pcmk__str_casei)) {
list = list->children;
}
for (an_attr = __xml_first_child_element(list); an_attr != NULL;
an_attr = __xml_next_element(an_attr)) {
if (pcmk__str_eq((const char *)an_attr->name, XML_CIB_TAG_NVPAIR, pcmk__str_none)) {
xmlNode *ref_nvpair = expand_idref(an_attr, top);
name = crm_element_value(an_attr, XML_NVPAIR_ATTR_NAME);
if (name == NULL) {
name = crm_element_value(ref_nvpair, XML_NVPAIR_ATTR_NAME);
}
value = crm_element_value(an_attr, XML_NVPAIR_ATTR_VALUE);
if (value == NULL) {
value = crm_element_value(ref_nvpair, XML_NVPAIR_ATTR_VALUE);
}
if (name == NULL || value == NULL) {
continue;
}
old_value = g_hash_table_lookup(hash, name);
if (pcmk__str_eq(value, "#default", pcmk__str_casei)) {
if (old_value) {
crm_trace("Removing value for %s (%s)", name, value);
g_hash_table_remove(hash, name);
}
continue;
} else if (old_value == NULL) {
crm_trace("Setting attribute: %s = %s", name, value);
g_hash_table_insert(hash, strdup(name), strdup(value));
} else if (overwrite) {
crm_debug("Overwriting value of %s: %s -> %s", name, old_value, value);
g_hash_table_replace(hash, strdup(name), strdup(value));
}
}
}
}
#if ENABLE_VERSIONED_ATTRS
static xmlNode*
get_versioned_rule(xmlNode * attr_set)
{
xmlNode * rule = NULL;
xmlNode * expr = NULL;
for (rule = __xml_first_child_element(attr_set); rule != NULL;
rule = __xml_next_element(rule)) {
if (pcmk__str_eq((const char *)rule->name, XML_TAG_RULE, pcmk__str_none)) {
for (expr = __xml_first_child_element(rule); expr != NULL;
expr = __xml_next_element(expr)) {
if (find_expression_type(expr) == version_expr) {
return rule;
}
}
}
}
return NULL;
}
static void
add_versioned_attributes(xmlNode * attr_set, xmlNode * versioned_attrs)
{
xmlNode *attr_set_copy = NULL;
xmlNode *rule = NULL;
xmlNode *expr = NULL;
if (!attr_set || !versioned_attrs) {
return;
}
attr_set_copy = copy_xml(attr_set);
rule = get_versioned_rule(attr_set_copy);
if (!rule) {
free_xml(attr_set_copy);
return;
}
expr = __xml_first_child_element(rule);
while (expr != NULL) {
if (find_expression_type(expr) != version_expr) {
xmlNode *node = expr;
expr = __xml_next_element(expr);
free_xml(node);
} else {
expr = __xml_next_element(expr);
}
}
add_node_nocopy(versioned_attrs, NULL, attr_set_copy);
}
#endif
typedef struct unpack_data_s {
gboolean overwrite;
void *hash;
crm_time_t *next_change;
pe_rule_eval_data_t *rule_data;
xmlNode *top;
} unpack_data_t;
static void
unpack_attr_set(gpointer data, gpointer user_data)
{
sorted_set_t *pair = data;
unpack_data_t *unpack_data = user_data;
if (!pe_eval_rules(pair->attr_set, unpack_data->rule_data,
unpack_data->next_change)) {
return;
}
#if ENABLE_VERSIONED_ATTRS
if (get_versioned_rule(pair->attr_set) && !(unpack_data->rule_data->node_hash &&
g_hash_table_lookup_extended(unpack_data->rule_data->node_hash,
CRM_ATTR_RA_VERSION, NULL, NULL))) {
// we haven't actually tested versioned expressions yet
return;
}
#endif
crm_trace("Adding attributes from %s", pair->name);
populate_hash(pair->attr_set, unpack_data->hash, unpack_data->overwrite, unpack_data->top);
}
#if ENABLE_VERSIONED_ATTRS
static void
unpack_versioned_attr_set(gpointer data, gpointer user_data)
{
sorted_set_t *pair = data;
unpack_data_t *unpack_data = user_data;
if (pe_eval_rules(pair->attr_set, unpack_data->rule_data,
unpack_data->next_change)) {
add_versioned_attributes(pair->attr_set, unpack_data->hash);
}
}
#endif
/*!
* \internal
* \brief Create a sorted list of nvpair blocks
*
* \param[in] top XML document root (used to expand id-ref's)
* \param[in] xml_obj XML element containing blocks of nvpair elements
* \param[in] set_name If not NULL, only get blocks of this element type
* \param[in] always_first If not NULL, sort block with this ID as first
*
* \return List of sorted_set_t entries for nvpair blocks
*/
static GList *
make_pairs(xmlNode *top, xmlNode *xml_obj, const char *set_name,
const char *always_first)
{
GListPtr unsorted = NULL;
const char *score = NULL;
sorted_set_t *pair = NULL;
xmlNode *attr_set = NULL;
if (xml_obj == NULL) {
crm_trace("No instance attributes");
return NULL;
}
crm_trace("Checking for attributes");
for (attr_set = __xml_first_child_element(xml_obj); attr_set != NULL;
attr_set = __xml_next_element(attr_set)) {
/* Uncertain if set_name == NULL check is strictly necessary here */
if (pcmk__str_eq(set_name, (const char *)attr_set->name, pcmk__str_null_matches)) {
pair = NULL;
attr_set = expand_idref(attr_set, top);
if (attr_set == NULL) {
continue;
}
pair = calloc(1, sizeof(sorted_set_t));
pair->name = ID(attr_set);
pair->special_name = always_first;
pair->attr_set = attr_set;
score = crm_element_value(attr_set, XML_RULE_ATTR_SCORE);
pair->score = char2score(score);
unsorted = g_list_prepend(unsorted, pair);
}
}
return g_list_sort(unsorted, sort_pairs);
}
/*!
* \internal
* \brief Extract nvpair blocks contained by an XML element into a hash table
*
* \param[in] top XML document root (used to expand id-ref's)
* \param[in] xml_obj XML element containing blocks of nvpair elements
* \param[in] set_name If not NULL, only use blocks of this element type
* \param[out] hash Where to store extracted name/value pairs
* \param[in] always_first If not NULL, process block with this ID first
* \param[in] overwrite Whether to replace existing values with same name
* \param[in] rule_data Matching parameters to use when unpacking
* \param[out] next_change If not NULL, set to when rule evaluation will change
* \param[in] unpack_func Function to call to unpack each block
*/
static void
unpack_nvpair_blocks(xmlNode *top, xmlNode *xml_obj, const char *set_name,
void *hash, const char *always_first, gboolean overwrite,
pe_rule_eval_data_t *rule_data, crm_time_t *next_change,
GFunc unpack_func)
{
GList *pairs = make_pairs(top, xml_obj, set_name, always_first);
if (pairs) {
unpack_data_t data = {
.hash = hash,
.overwrite = overwrite,
.next_change = next_change,
.top = top,
.rule_data = rule_data
};
g_list_foreach(pairs, unpack_func, &data);
g_list_free_full(pairs, free);
}
}
void
pe_eval_nvpairs(xmlNode *top, xmlNode *xml_obj, const char *set_name,
pe_rule_eval_data_t *rule_data, GHashTable *hash,
const char *always_first, gboolean overwrite,
crm_time_t *next_change)
{
unpack_nvpair_blocks(top, xml_obj, set_name, hash, always_first,
overwrite, rule_data, next_change, unpack_attr_set);
}
/*!
* \brief Extract nvpair blocks contained by an XML element into a hash table
*
* \param[in] top XML document root (used to expand id-ref's)
* \param[in] xml_obj XML element containing blocks of nvpair elements
* \param[in] set_name Element name to identify nvpair blocks
* \param[in] node_hash Node attributes to use when evaluating rules
* \param[out] hash Where to store extracted name/value pairs
* \param[in] always_first If not NULL, process block with this ID first
* \param[in] overwrite Whether to replace existing values with same name
* \param[in] now Time to use when evaluating rules
* \param[out] next_change If not NULL, set to when rule evaluation will change
*/
void
pe_unpack_nvpairs(xmlNode *top, xmlNode *xml_obj, const char *set_name,
GHashTable *node_hash, GHashTable *hash,
const char *always_first, gboolean overwrite,
crm_time_t *now, crm_time_t *next_change)
{
pe_rule_eval_data_t rule_data = {
.node_hash = node_hash,
.role = RSC_ROLE_UNKNOWN,
.now = now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
pe_eval_nvpairs(top, xml_obj, set_name, &rule_data, hash,
always_first, overwrite, next_change);
}
#if ENABLE_VERSIONED_ATTRS
void
pe_eval_versioned_attributes(xmlNode *top, xmlNode *xml_obj, const char *set_name,
pe_rule_eval_data_t *rule_data, xmlNode *hash,
crm_time_t *next_change)
{
unpack_nvpair_blocks(top, xml_obj, set_name, hash, NULL, FALSE, rule_data,
next_change, unpack_versioned_attr_set);
}
void
pe_unpack_versioned_attributes(xmlNode *top, xmlNode *xml_obj,
const char *set_name, GHashTable *node_hash,
xmlNode *hash, crm_time_t *now,
crm_time_t *next_change)
{
pe_rule_eval_data_t rule_data = {
.node_hash = node_hash,
.role = RSC_ROLE_UNKNOWN,
.now = now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
unpack_nvpair_blocks(top, xml_obj, set_name, hash, NULL, FALSE,
&rule_data, next_change, unpack_versioned_attr_set);
}
#endif
char *
pe_expand_re_matches(const char *string, pe_re_match_data_t *match_data)
{
size_t len = 0;
int i;
const char *p, *last_match_index;
char *p_dst, *result = NULL;
if (pcmk__str_empty(string) || !match_data) {
return NULL;
}
p = last_match_index = string;
while (*p) {
if (*p == '%' && *(p + 1) && isdigit(*(p + 1))) {
i = *(p + 1) - '0';
if (match_data->nregs >= i && match_data->pmatch[i].rm_so != -1 &&
match_data->pmatch[i].rm_eo > match_data->pmatch[i].rm_so) {
len += p - last_match_index + (match_data->pmatch[i].rm_eo - match_data->pmatch[i].rm_so);
last_match_index = p + 2;
}
p++;
}
p++;
}
len += p - last_match_index + 1;
/* FIXME: Excessive? */
if (len - 1 <= 0) {
return NULL;
}
p_dst = result = calloc(1, len);
p = string;
while (*p) {
if (*p == '%' && *(p + 1) && isdigit(*(p + 1))) {
i = *(p + 1) - '0';
if (match_data->nregs >= i && match_data->pmatch[i].rm_so != -1 &&
match_data->pmatch[i].rm_eo > match_data->pmatch[i].rm_so) {
/* rm_eo can be equal to rm_so, but then there is nothing to do */
int match_len = match_data->pmatch[i].rm_eo - match_data->pmatch[i].rm_so;
memcpy(p_dst, match_data->string + match_data->pmatch[i].rm_so, match_len);
p_dst += match_len;
}
p++;
} else {
*(p_dst) = *(p);
p_dst++;
}
p++;
}
return result;
}
#if ENABLE_VERSIONED_ATTRS
GHashTable*
pe_unpack_versioned_parameters(xmlNode *versioned_params, const char *ra_version)
{
GHashTable *hash = crm_str_table_new();
if (versioned_params && ra_version) {
GHashTable *node_hash = crm_str_table_new();
xmlNode *attr_set = __xml_first_child_element(versioned_params);
if (attr_set) {
g_hash_table_insert(node_hash, strdup(CRM_ATTR_RA_VERSION),
strdup(ra_version));
pe_unpack_nvpairs(NULL, versioned_params,
crm_element_name(attr_set), node_hash, hash, NULL,
FALSE, NULL, NULL);
}
g_hash_table_destroy(node_hash);
}
return hash;
}
#endif
gboolean
pe_eval_rules(xmlNode *ruleset, pe_rule_eval_data_t *rule_data, crm_time_t *next_change)
{
// If there are no rules, pass by default
gboolean ruleset_default = TRUE;
for (xmlNode *rule = first_named_child(ruleset, XML_TAG_RULE);
rule != NULL; rule = crm_next_same_xml(rule)) {
ruleset_default = FALSE;
if (pe_eval_expr(rule, rule_data, next_change)) {
/* Only the deprecated "lifetime" element of location constraints
* may contain more than one rule at the top level -- the schema
* limits a block of nvpairs to a single top-level rule. So, this
* effectively means that a lifetime is active if any rule it
* contains is active.
*/
return TRUE;
}
}
return ruleset_default;
}
gboolean
pe_eval_expr(xmlNode *rule, pe_rule_eval_data_t *rule_data, crm_time_t *next_change)
{
xmlNode *expr = NULL;
gboolean test = TRUE;
gboolean empty = TRUE;
gboolean passed = TRUE;
gboolean do_and = TRUE;
const char *value = NULL;
rule = expand_idref(rule, NULL);
value = crm_element_value(rule, XML_RULE_ATTR_BOOLEAN_OP);
if (pcmk__str_eq(value, "or", pcmk__str_casei)) {
do_and = FALSE;
passed = FALSE;
}
crm_trace("Testing rule %s", ID(rule));
for (expr = __xml_first_child_element(rule); expr != NULL;
expr = __xml_next_element(expr)) {
test = pe_eval_subexpr(expr, rule_data, next_change);
empty = FALSE;
if (test && do_and == FALSE) {
crm_trace("Expression %s/%s passed", ID(rule), ID(expr));
return TRUE;
} else if (test == FALSE && do_and) {
crm_trace("Expression %s/%s failed", ID(rule), ID(expr));
return FALSE;
}
}
if (empty) {
crm_err("Invalid Rule %s: rules must contain at least one expression", ID(rule));
}
crm_trace("Rule %s %s", ID(rule), passed ? "passed" : "failed");
return passed;
}
gboolean
pe_eval_subexpr(xmlNode *expr, pe_rule_eval_data_t *rule_data, crm_time_t *next_change)
{
gboolean accept = FALSE;
const char *uname = NULL;
switch (find_expression_type(expr)) {
case nested_rule:
accept = pe_eval_expr(expr, rule_data, next_change);
break;
case attr_expr:
case loc_expr:
/* these expressions can never succeed if there is
* no node to compare with
*/
if (rule_data->node_hash != NULL) {
accept = pe__eval_attr_expr(expr, rule_data);
}
break;
case time_expr:
accept = pe_test_date_expression(expr, rule_data->now, next_change);
break;
case role_expr:
accept = pe__eval_role_expr(expr, rule_data);
break;
case rsc_expr:
accept = pe__eval_rsc_expr(expr, rule_data);
break;
case op_expr:
accept = pe__eval_op_expr(expr, rule_data);
break;
#if ENABLE_VERSIONED_ATTRS
case version_expr:
if (rule_data->node_hash &&
g_hash_table_lookup_extended(rule_data->node_hash,
CRM_ATTR_RA_VERSION, NULL, NULL)) {
accept = pe__eval_attr_expr(expr, rule_data);
} else {
// we are going to test it when we have ra-version
accept = TRUE;
}
break;
#endif
default:
CRM_CHECK(FALSE /* bad type */ , return FALSE);
accept = FALSE;
}
if (rule_data->node_hash) {
uname = g_hash_table_lookup(rule_data->node_hash, CRM_ATTR_UNAME);
}
crm_trace("Expression %s %s on %s",
ID(expr), accept ? "passed" : "failed", uname ? uname : "all nodes");
return accept;
}
+/*!
+ * \internal
+ * \brief Compare two values in a rule's node attribute expression
+ *
+ * \param[in] l_val Value on left-hand side of comparison
+ * \param[in] r_val Value on right-hand side of comparison
+ * \param[in] type How to interpret the values (allowed values:
+ * \c "string", \c "integer", \c "number",
+ * \c "version", \c NULL)
+ * \param[in] op Type of comparison
+ *
+ * \return -1 if <tt>(l_val < r_val)</tt>,
+ * 0 if <tt>(l_val == r_val)</tt>,
+ * 1 if <tt>(l_val > r_val)</tt>
+ */
+static int
+compare_attr_expr_vals(const char *l_val, const char *r_val, const char *type,
+ const char *op)
+{
+ int cmp = 0;
+
+ if (l_val != NULL && r_val != NULL) {
+ if (type == NULL) {
+ if (pcmk__strcase_any_of(op, "lt", "lte", "gt", "gte", NULL)) {
+ if (pcmk__char_in_any_str('.', l_val, r_val, NULL)) {
+ type = "number";
+ } else {
+ type = "integer";
+ }
+
+ } else {
+ type = "string";
+ }
+ crm_trace("Defaulting to %s based comparison for '%s' op", type, op);
+ }
+
+ if (pcmk__str_eq(type, "string", pcmk__str_casei)) {
+ cmp = strcasecmp(l_val, r_val);
+
+ } else if (pcmk__str_eq(type, "integer", pcmk__str_casei)) {
+ long long l_val_num = crm_parse_ll(l_val, NULL);
+ int rc1 = errno;
+
+ long long r_val_num = crm_parse_ll(r_val, NULL);
+ int rc2 = errno;
+
+ if (rc1 == 0 && rc2 == 0) {
+ if (l_val_num < r_val_num) {
+ cmp = -1;
+ } else if (l_val_num > r_val_num) {
+ cmp = 1;
+ } else {
+ cmp = 0;
+ }
+
+ } else {
+ crm_debug("Integer parse error. Comparing %s and %s as strings",
+ l_val, r_val);
+ cmp = compare_attr_expr_vals(l_val, r_val, "string", op);
+ }
+
+ } else if (pcmk__str_eq(type, "number", pcmk__str_casei)) {
+ double l_val_num;
+ double r_val_num;
+
+ int rc1 = pcmk__scan_double(l_val, &l_val_num, NULL, NULL);
+ int rc2 = pcmk__scan_double(r_val, &r_val_num, NULL, NULL);
+
+ if (rc1 == pcmk_rc_ok && rc2 == pcmk_rc_ok) {
+ if (l_val_num < r_val_num) {
+ cmp = -1;
+ } else if (l_val_num > r_val_num) {
+ cmp = 1;
+ } else {
+ cmp = 0;
+ }
+
+ } else {
+ crm_debug("Floating-point parse error. Comparing %s and %s as "
+ "strings", l_val, r_val);
+ cmp = compare_attr_expr_vals(l_val, r_val, "string", op);
+ }
+
+ } else if (pcmk__str_eq(type, "version", pcmk__str_casei)) {
+ cmp = compare_version(l_val, r_val);
+
+ }
+
+ } else if (l_val == NULL && r_val == NULL) {
+ cmp = 0;
+ } else if (r_val == NULL) {
+ cmp = 1;
+ } else { // l_val == NULL && r_val != NULL
+ cmp = -1;
+ }
+
+ return cmp;
+}
+
+/*!
+ * \internal
+ * \brief Check whether an attribute expression evaluates to \c true
+ *
+ * \param[in] l_val Value on left-hand side of comparison
+ * \param[in] r_val Value on right-hand side of comparison
+ * \param[in] type How to interpret the values (allowed values:
+ * \c "string", \c "integer", \c "number",
+ * \c "version", \c NULL)
+ * \param[in] op Type of comparison.
+ *
+ * \return \c true if expression evaluates to \c true, \c false
+ * otherwise
+ */
+static bool
+accept_attr_expr(const char *l_val, const char *r_val, const char *type,
+ const char *op)
+{
+ int cmp;
+
+ if (pcmk__str_eq(op, "defined", pcmk__str_casei)) {
+ return (l_val != NULL);
+
+ } else if (pcmk__str_eq(op, "not_defined", pcmk__str_casei)) {
+ return (l_val == NULL);
+
+ }
+
+ cmp = compare_attr_expr_vals(l_val, r_val, type, op);
+
+ if (pcmk__str_eq(op, "eq", pcmk__str_casei)) {
+ return (cmp == 0);
+
+ } else if (pcmk__str_eq(op, "ne", pcmk__str_casei)) {
+ return (cmp != 0);
+
+ } else if (l_val == NULL || r_val == NULL) {
+ // The comparison is meaningless from this point on
+ return false;
+
+ } else if (pcmk__str_eq(op, "lt", pcmk__str_casei)) {
+ return (cmp < 0);
+
+ } else if (pcmk__str_eq(op, "lte", pcmk__str_casei)) {
+ return (cmp <= 0);
+
+ } else if (pcmk__str_eq(op, "gt", pcmk__str_casei)) {
+ return (cmp > 0);
+
+ } else if (pcmk__str_eq(op, "gte", pcmk__str_casei)) {
+ return (cmp >= 0);
+ }
+
+ return false; // Should never reach this point
+}
+
gboolean
pe__eval_attr_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data)
{
- gboolean accept = FALSE;
gboolean attr_allocated = FALSE;
- int cmp = 0;
const char *h_val = NULL;
GHashTable *table = NULL;
const char *op = NULL;
const char *type = NULL;
const char *attr = NULL;
const char *value = NULL;
const char *value_source = NULL;
attr = crm_element_value(expr, XML_EXPR_ATTR_ATTRIBUTE);
op = crm_element_value(expr, XML_EXPR_ATTR_OPERATION);
value = crm_element_value(expr, XML_EXPR_ATTR_VALUE);
type = crm_element_value(expr, XML_EXPR_ATTR_TYPE);
value_source = crm_element_value(expr, XML_EXPR_ATTR_VALUE_SOURCE);
if (attr == NULL || op == NULL) {
pe_err("Invalid attribute or operation in expression"
" (\'%s\' \'%s\' \'%s\')", crm_str(attr), crm_str(op), crm_str(value));
return FALSE;
}
if (rule_data->match_data) {
if (rule_data->match_data->re) {
char *resolved_attr = pe_expand_re_matches(attr, rule_data->match_data->re);
if (resolved_attr) {
attr = (const char *) resolved_attr;
attr_allocated = TRUE;
}
}
if (pcmk__str_eq(value_source, "param", pcmk__str_casei)) {
table = rule_data->match_data->params;
} else if (pcmk__str_eq(value_source, "meta", pcmk__str_casei)) {
table = rule_data->match_data->meta;
}
}
if (table) {
const char *param_name = value;
const char *param_value = NULL;
if (param_name && param_name[0]) {
if ((param_value = (const char *)g_hash_table_lookup(table, param_name))) {
value = param_value;
}
}
}
if (rule_data->node_hash != NULL) {
h_val = (const char *)g_hash_table_lookup(rule_data->node_hash, attr);
}
if (attr_allocated) {
free((char *)attr);
attr = NULL;
}
- if (value != NULL && h_val != NULL) {
- if (type == NULL) {
- if (pcmk__strcase_any_of(op, "lt", "lte", "gt", "gte", NULL)) {
- type = "number";
-
- } else {
- type = "string";
- }
- crm_trace("Defaulting to %s based comparison for '%s' op", type, op);
- }
-
- if (pcmk__str_eq(type, "string", pcmk__str_casei)) {
- cmp = strcasecmp(h_val, value);
-
- } else if (pcmk__str_eq(type, "number", pcmk__str_casei)) {
- int h_val_f = crm_parse_int(h_val, NULL);
- int value_f = crm_parse_int(value, NULL);
-
- if (h_val_f < value_f) {
- cmp = -1;
- } else if (h_val_f > value_f) {
- cmp = 1;
- } else {
- cmp = 0;
- }
-
- } else if (pcmk__str_eq(type, "version", pcmk__str_casei)) {
- cmp = compare_version(h_val, value);
-
- }
-
- } else if (value == NULL && h_val == NULL) {
- cmp = 0;
- } else if (value == NULL) {
- cmp = 1;
- } else {
- cmp = -1;
- }
-
- if (pcmk__str_eq(op, "defined", pcmk__str_casei)) {
- if (h_val != NULL) {
- accept = TRUE;
- }
-
- } else if (pcmk__str_eq(op, "not_defined", pcmk__str_casei)) {
- if (h_val == NULL) {
- accept = TRUE;
- }
-
- } else if (pcmk__str_eq(op, "eq", pcmk__str_casei)) {
- if ((h_val == value) || cmp == 0) {
- accept = TRUE;
- }
-
- } else if (pcmk__str_eq(op, "ne", pcmk__str_casei)) {
- if ((h_val == NULL && value != NULL)
- || (h_val != NULL && value == NULL)
- || cmp != 0) {
- accept = TRUE;
- }
-
- } else if (value == NULL || h_val == NULL) {
- // The comparison is meaningless from this point on
- accept = FALSE;
-
- } else if (pcmk__str_eq(op, "lt", pcmk__str_casei)) {
- if (cmp < 0) {
- accept = TRUE;
- }
-
- } else if (pcmk__str_eq(op, "lte", pcmk__str_casei)) {
- if (cmp <= 0) {
- accept = TRUE;
- }
-
- } else if (pcmk__str_eq(op, "gt", pcmk__str_casei)) {
- if (cmp > 0) {
- accept = TRUE;
- }
+ return accept_attr_expr(h_val, value, type, op);
+}
- } else if (pcmk__str_eq(op, "gte", pcmk__str_casei)) {
- if (cmp >= 0) {
- accept = TRUE;
- }
- }
- return accept;
-}
int
pe__eval_date_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data, crm_time_t *next_change)
{
crm_time_t *start = NULL;
crm_time_t *end = NULL;
const char *value = NULL;
const char *op = crm_element_value(expr, "operation");
xmlNode *duration_spec = NULL;
xmlNode *date_spec = NULL;
// "undetermined" will also be returned for parsing errors
int rc = pcmk_rc_undetermined;
crm_trace("Testing expression: %s", ID(expr));
duration_spec = first_named_child(expr, "duration");
date_spec = first_named_child(expr, "date_spec");
value = crm_element_value(expr, "start");
if (value != NULL) {
start = crm_time_new(value);
}
value = crm_element_value(expr, "end");
if (value != NULL) {
end = crm_time_new(value);
}
if (start != NULL && end == NULL && duration_spec != NULL) {
end = pe_parse_xml_duration(start, duration_spec);
}
if (pcmk__str_eq(op, "in_range", pcmk__str_null_matches | pcmk__str_casei)) {
if ((start == NULL) && (end == NULL)) {
// in_range requires at least one of start or end
} else if ((start != NULL) && (crm_time_compare(rule_data->now, start) < 0)) {
rc = pcmk_rc_before_range;
crm_time_set_if_earlier(next_change, start);
} else if ((end != NULL) && (crm_time_compare(rule_data->now, end) > 0)) {
rc = pcmk_rc_after_range;
} else {
rc = pcmk_rc_within_range;
if (end && next_change) {
// Evaluation doesn't change until second after end
crm_time_add_seconds(end, 1);
crm_time_set_if_earlier(next_change, end);
}
}
} else if (pcmk__str_eq(op, "date_spec", pcmk__str_casei)) {
rc = pe_cron_range_satisfied(rule_data->now, date_spec);
// @TODO set next_change appropriately
} else if (pcmk__str_eq(op, "gt", pcmk__str_casei)) {
if (start == NULL) {
// gt requires start
} else if (crm_time_compare(rule_data->now, start) > 0) {
rc = pcmk_rc_within_range;
} else {
rc = pcmk_rc_before_range;
// Evaluation doesn't change until second after start
crm_time_add_seconds(start, 1);
crm_time_set_if_earlier(next_change, start);
}
} else if (pcmk__str_eq(op, "lt", pcmk__str_casei)) {
if (end == NULL) {
// lt requires end
} else if (crm_time_compare(rule_data->now, end) < 0) {
rc = pcmk_rc_within_range;
crm_time_set_if_earlier(next_change, end);
} else {
rc = pcmk_rc_after_range;
}
}
crm_time_free(start);
crm_time_free(end);
return rc;
}
gboolean
pe__eval_op_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data) {
const char *name = crm_element_value(expr, XML_NVPAIR_ATTR_NAME);
const char *interval_s = crm_element_value(expr, XML_LRM_ATTR_INTERVAL);
guint interval;
crm_trace("Testing op_defaults expression: %s", ID(expr));
if (rule_data->op_data == NULL) {
crm_trace("No operations data provided");
return FALSE;
}
interval = crm_parse_interval_spec(interval_s);
if (interval == 0 && errno != 0) {
crm_trace("Could not parse interval: %s", interval_s);
return FALSE;
}
if (interval_s != NULL && interval != rule_data->op_data->interval) {
crm_trace("Interval doesn't match: %d != %d", interval, rule_data->op_data->interval);
return FALSE;
}
if (!pcmk__str_eq(name, rule_data->op_data->op_name, pcmk__str_none)) {
crm_trace("Name doesn't match: %s != %s", name, rule_data->op_data->op_name);
return FALSE;
}
return TRUE;
}
gboolean
pe__eval_role_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data)
{
gboolean accept = FALSE;
const char *op = NULL;
const char *value = NULL;
if (rule_data->role == RSC_ROLE_UNKNOWN) {
return accept;
}
value = crm_element_value(expr, XML_EXPR_ATTR_VALUE);
op = crm_element_value(expr, XML_EXPR_ATTR_OPERATION);
if (pcmk__str_eq(op, "defined", pcmk__str_casei)) {
if (rule_data->role > RSC_ROLE_STARTED) {
accept = TRUE;
}
} else if (pcmk__str_eq(op, "not_defined", pcmk__str_casei)) {
if (rule_data->role < RSC_ROLE_SLAVE && rule_data->role > RSC_ROLE_UNKNOWN) {
accept = TRUE;
}
} else if (pcmk__str_eq(op, "eq", pcmk__str_casei)) {
if (text2role(value) == rule_data->role) {
accept = TRUE;
}
} else if (pcmk__str_eq(op, "ne", pcmk__str_casei)) {
// Test "ne" only with promotable clone roles
if (rule_data->role < RSC_ROLE_SLAVE && rule_data->role > RSC_ROLE_UNKNOWN) {
accept = FALSE;
} else if (text2role(value) != rule_data->role) {
accept = TRUE;
}
}
return accept;
}
gboolean
pe__eval_rsc_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data)
{
const char *class = crm_element_value(expr, XML_AGENT_ATTR_CLASS);
const char *provider = crm_element_value(expr, XML_AGENT_ATTR_PROVIDER);
const char *type = crm_element_value(expr, XML_EXPR_ATTR_TYPE);
crm_trace("Testing rsc_defaults expression: %s", ID(expr));
if (rule_data->rsc_data == NULL) {
crm_trace("No resource data provided");
return FALSE;
}
if (class != NULL &&
!pcmk__str_eq(class, rule_data->rsc_data->standard, pcmk__str_none)) {
crm_trace("Class doesn't match: %s != %s", class, rule_data->rsc_data->standard);
return FALSE;
}
if ((provider == NULL && rule_data->rsc_data->provider != NULL) ||
(provider != NULL && rule_data->rsc_data->provider == NULL) ||
!pcmk__str_eq(provider, rule_data->rsc_data->provider, pcmk__str_none)) {
crm_trace("Provider doesn't match: %s != %s", provider, rule_data->rsc_data->provider);
return FALSE;
}
if (type != NULL &&
!pcmk__str_eq(type, rule_data->rsc_data->agent, pcmk__str_none)) {
crm_trace("Agent doesn't match: %s != %s", type, rule_data->rsc_data->agent);
return FALSE;
}
return TRUE;
}
// Deprecated functions kept only for backward API compatibility
gboolean test_ruleset(xmlNode *ruleset, GHashTable *node_hash, crm_time_t *now);
gboolean test_rule(xmlNode *rule, GHashTable *node_hash, enum rsc_role_e role,
crm_time_t *now);
gboolean pe_test_rule_re(xmlNode *rule, GHashTable *node_hash,
enum rsc_role_e role, crm_time_t *now,
pe_re_match_data_t *re_match_data);
gboolean pe_test_rule_full(xmlNode *rule, GHashTable *node_hash,
enum rsc_role_e role, crm_time_t *now,
pe_match_data_t *match_data);
gboolean test_expression(xmlNode *expr, GHashTable *node_hash,
enum rsc_role_e role, crm_time_t *now);
gboolean pe_test_expression_re(xmlNode *expr, GHashTable *node_hash,
enum rsc_role_e role, crm_time_t *now,
pe_re_match_data_t *re_match_data);
gboolean pe_test_expression_full(xmlNode *expr, GHashTable *node_hash,
enum rsc_role_e role, crm_time_t *now,
pe_match_data_t *match_data);
void unpack_instance_attributes(xmlNode *top, xmlNode *xml_obj,
const char *set_name, GHashTable *node_hash,
GHashTable *hash, const char *always_first,
gboolean overwrite, crm_time_t *now);
gboolean
test_ruleset(xmlNode *ruleset, GHashTable *node_hash, crm_time_t *now)
{
return pe_evaluate_rules(ruleset, node_hash, now, NULL);
}
gboolean
test_rule(xmlNode * rule, GHashTable * node_hash, enum rsc_role_e role, crm_time_t * now)
{
return pe_test_rule(rule, node_hash, role, now, NULL, NULL);
}
gboolean
pe_test_rule_re(xmlNode * rule, GHashTable * node_hash, enum rsc_role_e role, crm_time_t * now, pe_re_match_data_t * re_match_data)
{
pe_match_data_t match_data = {
.re = re_match_data,
.params = NULL,
.meta = NULL,
};
return pe_test_rule(rule, node_hash, role, now, NULL, &match_data);
}
gboolean
pe_test_rule_full(xmlNode *rule, GHashTable *node_hash, enum rsc_role_e role,
crm_time_t *now, pe_match_data_t *match_data)
{
return pe_test_rule(rule, node_hash, role, now, NULL, match_data);
}
gboolean
test_expression(xmlNode * expr, GHashTable * node_hash, enum rsc_role_e role, crm_time_t * now)
{
return pe_test_expression(expr, node_hash, role, now, NULL, NULL);
}
gboolean
pe_test_expression_re(xmlNode * expr, GHashTable * node_hash, enum rsc_role_e role, crm_time_t * now, pe_re_match_data_t * re_match_data)
{
pe_match_data_t match_data = {
.re = re_match_data,
.params = NULL,
.meta = NULL,
};
return pe_test_expression(expr, node_hash, role, now, NULL, &match_data);
}
gboolean
pe_test_expression_full(xmlNode *expr, GHashTable *node_hash,
enum rsc_role_e role, crm_time_t *now,
pe_match_data_t *match_data)
{
return pe_test_expression(expr, node_hash, role, now, NULL, match_data);
}
void
unpack_instance_attributes(xmlNode *top, xmlNode *xml_obj, const char *set_name,
GHashTable *node_hash, GHashTable *hash,
const char *always_first, gboolean overwrite,
crm_time_t *now)
{
pe_rule_eval_data_t rule_data = {
.node_hash = node_hash,
.role = RSC_ROLE_UNKNOWN,
.now = now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
unpack_nvpair_blocks(top, xml_obj, set_name, hash, always_first,
overwrite, &rule_data, NULL, unpack_attr_set);
}
diff --git a/xml/alerts-3.5.rng b/xml/alerts-3.5.rng
new file mode 100644
index 0000000000..b563e90ab5
--- /dev/null
+++ b/xml/alerts-3.5.rng
@@ -0,0 +1,49 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="element-alerts"/>
+ </start>
+
+ <define name="element-alerts">
+ <optional>
+ <element name="alerts">
+ <zeroOrMore>
+ <element name="alert">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <!-- path to the script called for alert -->
+ <attribute name="path"><text/></attribute>
+ <ref name="element-alert-extra"/>
+ <zeroOrMore>
+ <element name="recipient">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <attribute name="value"><text/></attribute>
+ <ref name="element-alert-extra"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </zeroOrMore>
+ </element>
+ </optional>
+ </define>
+
+ <define name="element-alert-extra">
+ <zeroOrMore>
+ <choice>
+ <element name="meta_attributes">
+ <externalRef href="nvset-3.5.rng"/>
+ </element>
+ <element name="instance_attributes">
+ <externalRef href="nvset-3.5.rng"/>
+ </element>
+ </choice>
+ </zeroOrMore>
+ </define>
+
+</grammar>
diff --git a/xml/constraints-next.rng b/xml/constraints-3.5.rng
similarity index 91%
copy from xml/constraints-next.rng
copy to xml/constraints-3.5.rng
index 1fa3e7557e..b0d94e4126 100644
--- a/xml/constraints-next.rng
+++ b/xml/constraints-3.5.rng
@@ -1,263 +1,256 @@
<?xml version="1.0" encoding="UTF-8"?>
<grammar xmlns="http://relaxng.org/ns/structure/1.0"
datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
<start>
<ref name="element-constraints"/>
</start>
<define name="element-constraints">
<element name="constraints">
<zeroOrMore>
<choice>
<ref name="element-location"/>
<ref name="element-colocation"/>
<ref name="element-order"/>
<ref name="element-rsc_ticket"/>
</choice>
</zeroOrMore>
</element>
</define>
<define name="element-location">
<element name="rsc_location">
<attribute name="id"><data type="ID"/></attribute>
<choice>
<group>
<choice>
<attribute name="rsc"><data type="IDREF"/></attribute>
<attribute name="rsc-pattern"><text/></attribute>
</choice>
<optional>
<attribute name="role">
<ref name="attribute-roles"/>
</attribute>
</optional>
</group>
<oneOrMore>
<ref name="element-resource-set"/>
</oneOrMore>
</choice>
<choice>
<group>
<externalRef href="score.rng"/>
<attribute name="node"><text/></attribute>
</group>
<oneOrMore>
- <externalRef href="rule-3.4.rng"/>
+ <externalRef href="rule-3.5.rng"/>
</oneOrMore>
</choice>
<optional>
<ref name="element-lifetime"/>
</optional>
<optional>
<attribute name="resource-discovery">
<ref name="attribute-discovery"/>
</attribute>
</optional>
</element>
</define>
<define name="element-resource-set">
<element name="resource_set">
<choice>
<attribute name="id-ref"><data type="IDREF"/></attribute>
<group>
<attribute name="id"><data type="ID"/></attribute>
<optional>
<attribute name="sequential"><data type="boolean"/></attribute>
</optional>
<optional>
<attribute name="require-all"><data type="boolean"/></attribute>
</optional>
<optional>
<attribute name="ordering">
<choice>
<value>group</value>
<value>listed</value>
</choice>
</attribute>
</optional>
<optional>
<attribute name="action">
<ref name="attribute-actions"/>
</attribute>
</optional>
<optional>
<attribute name="role">
<ref name="attribute-roles"/>
</attribute>
</optional>
<optional>
- <externalRef href="score.rng"/>
+ <choice>
+ <externalRef href="score.rng"/>
+ <attribute name="kind">
+ <ref name="order-types"/>
+ </attribute>
+ </choice>
</optional>
<oneOrMore>
<element name="resource_ref">
<attribute name="id"><data type="IDREF"/></attribute>
</element>
</oneOrMore>
</group>
</choice>
</element>
</define>
<define name="element-colocation">
<element name="rsc_colocation">
<attribute name="id"><data type="ID"/></attribute>
<optional>
<externalRef href="score.rng"/>
</optional>
<optional>
<ref name="element-lifetime"/>
</optional>
<choice>
<oneOrMore>
<ref name="element-resource-set"/>
</oneOrMore>
<group>
<attribute name="rsc"><data type="IDREF"/></attribute>
<attribute name="with-rsc"><data type="IDREF"/></attribute>
<optional>
<attribute name="node-attribute"><text/></attribute>
</optional>
<optional>
<attribute name="rsc-role">
<ref name="attribute-roles"/>
</attribute>
</optional>
<optional>
<attribute name="with-rsc-role">
<ref name="attribute-roles"/>
</attribute>
</optional>
- <optional>
- <attribute name="rsc-instance"><data type="integer"/></attribute>
- </optional>
- <optional>
- <attribute name="with-rsc-instance"><data type="integer"/></attribute>
- </optional>
</group>
</choice>
</element>
</define>
<define name="element-order">
<element name="rsc_order">
<attribute name="id"><data type="ID"/></attribute>
<optional>
<ref name="element-lifetime"/>
</optional>
<optional>
<attribute name="symmetrical"><data type="boolean"/></attribute>
</optional>
<optional>
<attribute name="require-all"><data type="boolean"/></attribute>
</optional>
<optional>
<choice>
<externalRef href="score.rng"/>
<attribute name="kind">
<ref name="order-types"/>
</attribute>
</choice>
</optional>
<choice>
<oneOrMore>
<ref name="element-resource-set"/>
</oneOrMore>
<group>
<attribute name="first"><data type="IDREF"/></attribute>
<attribute name="then"><data type="IDREF"/></attribute>
<optional>
<attribute name="first-action">
<ref name="attribute-actions"/>
</attribute>
</optional>
<optional>
<attribute name="then-action">
<ref name="attribute-actions"/>
</attribute>
</optional>
- <optional>
- <attribute name="first-instance"><data type="integer"/></attribute>
- </optional>
- <optional>
- <attribute name="then-instance"><data type="integer"/></attribute>
- </optional>
</group>
</choice>
</element>
</define>
<define name="element-rsc_ticket">
<element name="rsc_ticket">
<attribute name="id"><data type="ID"/></attribute>
<choice>
<oneOrMore>
<ref name="element-resource-set"/>
</oneOrMore>
<group>
<attribute name="rsc"><data type="IDREF"/></attribute>
<optional>
<attribute name="rsc-role">
<ref name="attribute-roles"/>
</attribute>
</optional>
</group>
</choice>
<attribute name="ticket"><text/></attribute>
<optional>
<attribute name="loss-policy">
<choice>
<value>stop</value>
<value>demote</value>
<value>fence</value>
<value>freeze</value>
</choice>
</attribute>
</optional>
</element>
</define>
<define name="attribute-discovery">
<choice>
<value>always</value>
<value>never</value>
<value>exclusive</value>
</choice>
</define>
<define name="attribute-actions">
<choice>
<value>start</value>
<value>promote</value>
<value>demote</value>
<value>stop</value>
</choice>
</define>
<define name="attribute-roles">
<choice>
<value>Stopped</value>
<value>Started</value>
<value>Master</value>
<value>Slave</value>
</choice>
</define>
<define name="order-types">
<choice>
<value>Optional</value>
<value>Mandatory</value>
<value>Serialize</value>
</choice>
</define>
<define name="element-lifetime">
<element name="lifetime">
<oneOrMore>
- <externalRef href="rule-3.4.rng"/>
+ <externalRef href="rule-3.5.rng"/>
</oneOrMore>
</element>
</define>
</grammar>
diff --git a/xml/constraints-next.rng b/xml/constraints-next.rng
index 1fa3e7557e..b6223fdc6e 100644
--- a/xml/constraints-next.rng
+++ b/xml/constraints-next.rng
@@ -1,263 +1,263 @@
<?xml version="1.0" encoding="UTF-8"?>
<grammar xmlns="http://relaxng.org/ns/structure/1.0"
datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
<start>
<ref name="element-constraints"/>
</start>
<define name="element-constraints">
<element name="constraints">
<zeroOrMore>
<choice>
<ref name="element-location"/>
<ref name="element-colocation"/>
<ref name="element-order"/>
<ref name="element-rsc_ticket"/>
</choice>
</zeroOrMore>
</element>
</define>
<define name="element-location">
<element name="rsc_location">
<attribute name="id"><data type="ID"/></attribute>
<choice>
<group>
<choice>
<attribute name="rsc"><data type="IDREF"/></attribute>
<attribute name="rsc-pattern"><text/></attribute>
</choice>
<optional>
<attribute name="role">
<ref name="attribute-roles"/>
</attribute>
</optional>
</group>
<oneOrMore>
<ref name="element-resource-set"/>
</oneOrMore>
</choice>
<choice>
<group>
<externalRef href="score.rng"/>
<attribute name="node"><text/></attribute>
</group>
<oneOrMore>
- <externalRef href="rule-3.4.rng"/>
+ <externalRef href="rule-3.5.rng"/>
</oneOrMore>
</choice>
<optional>
<ref name="element-lifetime"/>
</optional>
<optional>
<attribute name="resource-discovery">
<ref name="attribute-discovery"/>
</attribute>
</optional>
</element>
</define>
<define name="element-resource-set">
<element name="resource_set">
<choice>
<attribute name="id-ref"><data type="IDREF"/></attribute>
<group>
<attribute name="id"><data type="ID"/></attribute>
<optional>
<attribute name="sequential"><data type="boolean"/></attribute>
</optional>
<optional>
<attribute name="require-all"><data type="boolean"/></attribute>
</optional>
<optional>
<attribute name="ordering">
<choice>
<value>group</value>
<value>listed</value>
</choice>
</attribute>
</optional>
<optional>
<attribute name="action">
<ref name="attribute-actions"/>
</attribute>
</optional>
<optional>
<attribute name="role">
<ref name="attribute-roles"/>
</attribute>
</optional>
<optional>
<externalRef href="score.rng"/>
</optional>
<oneOrMore>
<element name="resource_ref">
<attribute name="id"><data type="IDREF"/></attribute>
</element>
</oneOrMore>
</group>
</choice>
</element>
</define>
<define name="element-colocation">
<element name="rsc_colocation">
<attribute name="id"><data type="ID"/></attribute>
<optional>
<externalRef href="score.rng"/>
</optional>
<optional>
<ref name="element-lifetime"/>
</optional>
<choice>
<oneOrMore>
<ref name="element-resource-set"/>
</oneOrMore>
<group>
<attribute name="rsc"><data type="IDREF"/></attribute>
<attribute name="with-rsc"><data type="IDREF"/></attribute>
<optional>
<attribute name="node-attribute"><text/></attribute>
</optional>
<optional>
<attribute name="rsc-role">
<ref name="attribute-roles"/>
</attribute>
</optional>
<optional>
<attribute name="with-rsc-role">
<ref name="attribute-roles"/>
</attribute>
</optional>
<optional>
<attribute name="rsc-instance"><data type="integer"/></attribute>
</optional>
<optional>
<attribute name="with-rsc-instance"><data type="integer"/></attribute>
</optional>
</group>
</choice>
</element>
</define>
<define name="element-order">
<element name="rsc_order">
<attribute name="id"><data type="ID"/></attribute>
<optional>
<ref name="element-lifetime"/>
</optional>
<optional>
<attribute name="symmetrical"><data type="boolean"/></attribute>
</optional>
<optional>
<attribute name="require-all"><data type="boolean"/></attribute>
</optional>
<optional>
<choice>
<externalRef href="score.rng"/>
<attribute name="kind">
<ref name="order-types"/>
</attribute>
</choice>
</optional>
<choice>
<oneOrMore>
<ref name="element-resource-set"/>
</oneOrMore>
<group>
<attribute name="first"><data type="IDREF"/></attribute>
<attribute name="then"><data type="IDREF"/></attribute>
<optional>
<attribute name="first-action">
<ref name="attribute-actions"/>
</attribute>
</optional>
<optional>
<attribute name="then-action">
<ref name="attribute-actions"/>
</attribute>
</optional>
<optional>
<attribute name="first-instance"><data type="integer"/></attribute>
</optional>
<optional>
<attribute name="then-instance"><data type="integer"/></attribute>
</optional>
</group>
</choice>
</element>
</define>
<define name="element-rsc_ticket">
<element name="rsc_ticket">
<attribute name="id"><data type="ID"/></attribute>
<choice>
<oneOrMore>
<ref name="element-resource-set"/>
</oneOrMore>
<group>
<attribute name="rsc"><data type="IDREF"/></attribute>
<optional>
<attribute name="rsc-role">
<ref name="attribute-roles"/>
</attribute>
</optional>
</group>
</choice>
<attribute name="ticket"><text/></attribute>
<optional>
<attribute name="loss-policy">
<choice>
<value>stop</value>
<value>demote</value>
<value>fence</value>
<value>freeze</value>
</choice>
</attribute>
</optional>
</element>
</define>
<define name="attribute-discovery">
<choice>
<value>always</value>
<value>never</value>
<value>exclusive</value>
</choice>
</define>
<define name="attribute-actions">
<choice>
<value>start</value>
<value>promote</value>
<value>demote</value>
<value>stop</value>
</choice>
</define>
<define name="attribute-roles">
<choice>
<value>Stopped</value>
<value>Started</value>
<value>Master</value>
<value>Slave</value>
</choice>
</define>
<define name="order-types">
<choice>
<value>Optional</value>
<value>Mandatory</value>
<value>Serialize</value>
</choice>
</define>
<define name="element-lifetime">
<element name="lifetime">
<oneOrMore>
- <externalRef href="rule-3.4.rng"/>
+ <externalRef href="rule-3.5.rng"/>
</oneOrMore>
</element>
</define>
</grammar>
diff --git a/xml/nodes-3.5.rng b/xml/nodes-3.5.rng
new file mode 100644
index 0000000000..e7a94094e4
--- /dev/null
+++ b/xml/nodes-3.5.rng
@@ -0,0 +1,44 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="element-nodes"/>
+ </start>
+
+ <define name="element-nodes">
+ <element name="nodes">
+ <zeroOrMore>
+ <element name="node">
+ <attribute name="id"><text/></attribute>
+ <attribute name="uname"><text/></attribute>
+ <optional>
+ <attribute name="type">
+ <choice>
+ <value>member</value>
+ <value>ping</value>
+ <value>remote</value>
+ </choice>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <optional>
+ <externalRef href="score.rng"/>
+ </optional>
+ <zeroOrMore>
+ <choice>
+ <element name="instance_attributes">
+ <externalRef href="nvset-3.5.rng"/>
+ </element>
+ <element name="utilization">
+ <externalRef href="nvset-3.5.rng"/>
+ </element>
+ </choice>
+ </zeroOrMore>
+ </element>
+ </zeroOrMore>
+ </element>
+ </define>
+
+</grammar>
diff --git a/xml/nvset-3.5.rng b/xml/nvset-3.5.rng
new file mode 100644
index 0000000000..92225f9345
--- /dev/null
+++ b/xml/nvset-3.5.rng
@@ -0,0 +1,63 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- just as nvset-2.9.rng, but allows for instantiated @name restrictions -->
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="element-nvset"/>
+ </start>
+
+ <!-- nvpair/@name:
+ * generic string by default, parent grammar may want to prohibit
+ enumerated names -->
+ <define name="element-nvset.name">
+ <attribute name="name">
+ <text/>
+ </attribute>
+ </define>
+
+ <!-- nvpair/@name:
+ * defer element-nvset.name grammar item
+ nvpair/@value:
+ generic string by default, parent grammar may want to restrict
+ enumerated pairs (i.e. related to @name) at once -->
+ <define name="element-nvset.name-value">
+ <ref name="element-nvset.name"/>
+ <optional>
+ <attribute name="value"><text/></attribute>
+ </optional>
+ </define>
+
+ <define name="element-nvset">
+ <choice>
+ <attribute name="id-ref"><data type="IDREF"/></attribute>
+ <group>
+ <attribute name="id"><data type="ID"/></attribute>
+ <interleave>
+ <optional>
+ <externalRef href="rule-3.5.rng"/>
+ </optional>
+ <zeroOrMore>
+ <element name="nvpair">
+ <choice>
+ <group>
+ <attribute name="id-ref"><data type="IDREF"/></attribute>
+ <optional>
+ <attribute name="name"><text/></attribute>
+ </optional>
+ </group>
+ <group>
+ <attribute name="id"><data type="ID"/></attribute>
+ <ref name="element-nvset.name-value"/>
+ </group>
+ </choice>
+ </element>
+ </zeroOrMore>
+ <optional>
+ <externalRef href="score.rng"/>
+ </optional>
+ </interleave>
+ </group>
+ </choice>
+ </define>
+
+</grammar>
diff --git a/xml/options-3.5.rng b/xml/options-3.5.rng
new file mode 100644
index 0000000000..ffcebb7ebe
--- /dev/null
+++ b/xml/options-3.5.rng
@@ -0,0 +1,111 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="options"/>
+ </start>
+
+ <!--
+ see upgrade-2.10.xsl
+ - cibtr:table for="cluster-properties"
+ -->
+ <define name="cluster_property_set.nvpair.name-value-unsupported">
+ <choice>
+ <group>
+ <attribute name="name">
+ <value type="string">cluster-infrastructure</value>
+ </attribute>
+ <attribute name="value">
+ <data type="string">
+ <except>
+ <choice>
+ <value>heartbeat</value>
+ <value>openais</value>
+ <value>classic openais</value>
+ <value>classic openais (with plugin)</value>
+ <value>cman</value>
+ </choice>
+ </except>
+ </data>
+ </attribute>
+ </group>
+ <group>
+ <attribute name="name">
+ <data type="string">
+ <except>
+ <choice>
+ <value>cluster-infrastructure</value>
+ <value>cluster_recheck_interval</value>
+ <value>dc_deadtime</value>
+ <value>default-action-timeout</value>
+ <value>default_action_timeout</value>
+ <value>default-migration-threshold</value>
+ <value>default_migration_threshold</value>
+ <value>default-resource-failure-stickiness</value>
+ <value>default_resource_failure_stickiness</value>
+ <value>default-resource-stickiness</value>
+ <value>default_resource_stickiness</value>
+ <value>election_timeout</value>
+ <value>expected-quorum-votes</value>
+ <value>is-managed-default</value>
+ <value>is_managed_default</value>
+ <value>no_quorum_policy</value>
+ <value>notification-agent</value>
+ <value>notification-recipient</value>
+ <value>remove_after_stop</value>
+ <value>shutdown_escalation</value>
+ <value>startup_fencing</value>
+ <value>stonith_action</value>
+ <value>stonith_enabled</value>
+ <value>stop_orphan_actions</value>
+ <value>stop_orphan_resources</value>
+ <value>symmetric_cluster</value>
+ <value>transition_idle_timeout</value>
+ </choice>
+ </except>
+ </data>
+ </attribute>
+ <optional>
+ <attribute name="value"><text/></attribute>
+ </optional>
+ </group>
+ </choice>
+ </define>
+
+ <define name="options">
+ <interleave>
+ <element name="crm_config">
+ <zeroOrMore>
+ <element name="cluster_property_set">
+ <grammar>
+ <include href="nvset-3.5.rng">
+ <define name="element-nvset.name-value">
+ <parentRef name="cluster_property_set.nvpair.name-value-unsupported"/>
+ </define>
+ </include>
+ </grammar>
+ </element>
+ </zeroOrMore>
+ </element>
+ <optional>
+ <element name="rsc_defaults">
+ <zeroOrMore>
+ <element name="meta_attributes">
+ <externalRef href="nvset-3.5.rng"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </optional>
+ <optional>
+ <element name="op_defaults">
+ <zeroOrMore>
+ <element name="meta_attributes">
+ <externalRef href="nvset-3.5.rng"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </optional>
+ </interleave>
+ </define>
+
+</grammar>
diff --git a/xml/resources-3.5.rng b/xml/resources-3.5.rng
new file mode 100644
index 0000000000..5c6624f3e3
--- /dev/null
+++ b/xml/resources-3.5.rng
@@ -0,0 +1,426 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="element-resources"/>
+ </start>
+
+ <define name="element-resources">
+ <element name="resources">
+ <zeroOrMore>
+ <choice>
+ <ref name="element-primitive"/>
+ <ref name="element-template"/>
+ <ref name="element-group"/>
+ <ref name="element-clone"/>
+ <ref name="element-master"/>
+ <ref name="element-bundle"/>
+ </choice>
+ </zeroOrMore>
+ </element>
+ </define>
+
+ <!--
+ see upgrade-2.10.xsl
+ - cibtr:table for="resource-meta-attributes"
+ -->
+ <define name="primitive-template.meta_attributes.nvpair.name-unsupported">
+ <attribute name="name">
+ <data type="string">
+ <except>
+ <choice>
+ <value>isolation</value>
+ <value>isolation-host</value>
+ <value>isolation-instance</value>
+ <value>isolation-wrapper</value>
+ </choice>
+ </except>
+ </data>
+ </attribute>
+ </define>
+
+ <define name="element-resource-extra.primitive-template">
+ <zeroOrMore>
+ <choice>
+ <element name="meta_attributes">
+ <grammar>
+ <include href="nvset-3.5.rng">
+ <define name="element-nvset.name">
+ <parentRef name="primitive-template.meta_attributes.nvpair.name-unsupported"/>
+ </define>
+ </include>
+ </grammar>
+ </element>
+ <element name="instance_attributes">
+ <externalRef href="nvset-3.5.rng"/>
+ </element>
+ </choice>
+ </zeroOrMore>
+ </define>
+
+ <define name="element-primitive">
+ <element name="primitive">
+ <interleave>
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <group>
+ <ref name="element-resource-class"/>
+ <attribute name="type"><text/></attribute>
+ </group>
+ <attribute name="template"><data type="IDREF"/></attribute>
+ </choice>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <ref name="element-resource-extra.primitive-template"/>
+ <ref name="element-operations"/>
+ <zeroOrMore>
+ <element name="utilization">
+ <externalRef href="nvset-3.5.rng"/>
+ </element>
+ </zeroOrMore>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-template">
+ <element name="template">
+ <interleave>
+ <attribute name="id"><data type="ID"/></attribute>
+ <ref name="element-resource-class"/>
+ <attribute name="type"><text/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <ref name="element-resource-extra.primitive-template"/>
+ <ref name="element-operations"/>
+ <zeroOrMore>
+ <element name="utilization">
+ <externalRef href="nvset-3.5.rng"/>
+ </element>
+ </zeroOrMore>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-bundle">
+ <element name="bundle">
+ <interleave>
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <ref name="element-resource-extra"/>
+ <choice>
+ <element name="docker">
+ <attribute name="image"><text/></attribute>
+ <optional>
+ <attribute name="replicas"><data type="integer"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="replicas-per-host"><data type="integer"/></attribute>
+ </optional>
+ <optional>
+ <choice>
+ <attribute name="masters"><data type="integer"/></attribute>
+ <attribute name="promoted-max"><data type="integer"/></attribute>
+ </choice>
+ </optional>
+ <optional>
+ <attribute name="run-command"> <text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="network"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="options"><text/></attribute>
+ </optional>
+ </element>
+ <element name="rkt">
+ <attribute name="image"><text/></attribute>
+ <optional>
+ <attribute name="replicas"><data type="integer"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="replicas-per-host"><data type="integer"/></attribute>
+ </optional>
+ <optional>
+ <choice>
+ <attribute name="masters"><data type="integer"/></attribute>
+ <attribute name="promoted-max"><data type="integer"/></attribute>
+ </choice>
+ </optional>
+ <optional>
+ <attribute name="run-command"> <text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="network"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="options"><text/></attribute>
+ </optional>
+ </element>
+ <element name="podman">
+ <attribute name="image"><text/></attribute>
+ <optional>
+ <attribute name="replicas"><data type="integer"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="replicas-per-host"><data type="integer"/></attribute>
+ </optional>
+ <optional>
+ <choice>
+ <attribute name="masters"><data type="integer"/></attribute>
+ <attribute name="promoted-max"><data type="integer"/></attribute>
+ </choice>
+ </optional>
+ <optional>
+ <attribute name="run-command"> <text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="network"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="options"><text/></attribute>
+ </optional>
+ </element>
+ </choice>
+ <optional>
+ <element name="network">
+ <optional>
+ <attribute name="ip-range-start"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="control-port"><data type="integer"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="host-interface"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="host-netmask"><data type="integer"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="add-host"><data type="boolean"/></attribute>
+ </optional>
+ <zeroOrMore>
+ <element name="port-mapping">
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <group>
+ <attribute name="port"><data type="integer"/></attribute>
+ <optional>
+ <attribute name="internal-port"><data type="integer"/></attribute>
+ </optional>
+ </group>
+ <attribute name="range">
+ <data type="string">
+ <param name="pattern">([0-9\-]+)</param>
+ </data>
+ </attribute>
+ </choice>
+ </element>
+ </zeroOrMore>
+ </element>
+ </optional>
+ <optional>
+ <element name="storage">
+ <zeroOrMore>
+ <element name="storage-mapping">
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <attribute name="source-dir"><text/></attribute>
+ <attribute name="source-dir-root"><text/></attribute>
+ </choice>
+ <attribute name="target-dir"><text/></attribute>
+ <optional>
+ <attribute name="options"><text/></attribute>
+ </optional>
+ </element>
+ </zeroOrMore>
+ </element>
+ </optional>
+ <optional>
+ <ref name="element-primitive"/>
+ </optional>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-group">
+ <element name="group">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <interleave>
+ <ref name="element-resource-extra"/>
+ <oneOrMore>
+ <ref name="element-primitive"/>
+ </oneOrMore>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-clone">
+ <element name="clone">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <interleave>
+ <ref name="element-resource-extra"/>
+ <choice>
+ <ref name="element-primitive"/>
+ <ref name="element-group"/>
+ </choice>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-master">
+ <element name="master">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <interleave>
+ <ref name="element-resource-extra"/>
+ <choice>
+ <ref name="element-primitive"/>
+ <ref name="element-group"/>
+ </choice>
+ </interleave>
+ </element>
+ </define>
+
+ <define name="element-resource-extra">
+ <zeroOrMore>
+ <choice>
+ <element name="meta_attributes">
+ <externalRef href="nvset-3.5.rng"/>
+ </element>
+ <element name="instance_attributes">
+ <externalRef href="nvset-3.5.rng"/>
+ </element>
+ </choice>
+ </zeroOrMore>
+ </define>
+
+ <!--
+ see upgrade-2.10.xsl
+ - cibtr:table for="resources-operation"
+ -->
+ <define name="op.meta_attributes.nvpair.name-unsupported">
+ <attribute name="name">
+ <data type="string">
+ <except>
+ <choice>
+ <value>requires</value>
+ </choice>
+ </except>
+ </data>
+ </attribute>
+ </define>
+
+ <define name="element-resource-extra.op">
+ <zeroOrMore>
+ <choice>
+ <element name="meta_attributes">
+ <grammar>
+ <include href="nvset-3.5.rng">
+ <define name="element-nvset.name">
+ <parentRef name="op.meta_attributes.nvpair.name-unsupported"/>
+ </define>
+ </include>
+ </grammar>
+ </element>
+ <element name="instance_attributes">
+ <externalRef href="nvset-3.5.rng"/>
+ </element>
+ </choice>
+ </zeroOrMore>
+ </define>
+
+ <define name="element-operations">
+ <optional>
+ <element name="operations">
+ <optional>
+ <attribute name="id"><data type="ID"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="id-ref"><data type="IDREF"/></attribute>
+ </optional>
+ <zeroOrMore>
+ <element name="op">
+ <attribute name="id"><data type="ID"/></attribute>
+ <attribute name="name"><text/></attribute>
+ <attribute name="interval"><text/></attribute>
+ <optional>
+ <attribute name="description"><text/></attribute>
+ </optional>
+ <optional>
+ <choice>
+ <attribute name="start-delay"><text/></attribute>
+ <attribute name="interval-origin"><text/></attribute>
+ </choice>
+ </optional>
+ <optional>
+ <attribute name="timeout"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="enabled"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="record-pending"><data type="boolean"/></attribute>
+ </optional>
+ <optional>
+ <attribute name="role">
+ <choice>
+ <value>Stopped</value>
+ <value>Started</value>
+ <value>Slave</value>
+ <value>Master</value>
+ </choice>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="on-fail">
+ <choice>
+ <value>ignore</value>
+ <value>block</value>
+ <value>demote</value>
+ <value>stop</value>
+ <value>restart</value>
+ <value>standby</value>
+ <value>fence</value>
+ <value>restart-container</value>
+ </choice>
+ </attribute>
+ </optional>
+ <ref name="element-resource-extra.op"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </optional>
+ </define>
+
+ <define name="element-resource-class">
+ <choice>
+ <group>
+ <attribute name="class"><value>ocf</value></attribute>
+ <attribute name="provider"><text/></attribute>
+ </group>
+ <attribute name="class">
+ <choice>
+ <value>lsb</value>
+ <value>heartbeat</value>
+ <value>stonith</value>
+ <value>upstart</value>
+ <value>service</value>
+ <value>systemd</value>
+ <value>nagios</value>
+ </choice>
+ </attribute>
+ </choice>
+ </define>
+</grammar>
diff --git a/xml/rule-3.5.rng b/xml/rule-3.5.rng
new file mode 100644
index 0000000000..82be4d6ed7
--- /dev/null
+++ b/xml/rule-3.5.rng
@@ -0,0 +1,166 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ xmlns:ann="http://relaxng.org/ns/compatibility/annotations/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="element-rule"/>
+ </start>
+
+ <define name="element-rule">
+ <element name="rule">
+ <choice>
+ <attribute name="id-ref"><data type="IDREF"/></attribute>
+ <group>
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <externalRef href="score.rng"/>
+ <attribute name="score-attribute"><text/></attribute>
+ </choice>
+ <optional>
+ <attribute name="boolean-op">
+ <choice>
+ <value>or</value>
+ <value>and</value>
+ </choice>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="role"><text/></attribute>
+ </optional>
+ <oneOrMore>
+ <choice>
+ <element name="expression">
+ <attribute name="id"><data type="ID"/></attribute>
+ <attribute name="attribute"><text/></attribute>
+ <attribute name="operation">
+ <choice>
+ <value>lt</value>
+ <value>gt</value>
+ <value>lte</value>
+ <value>gte</value>
+ <value>eq</value>
+ <value>ne</value>
+ <value>defined</value>
+ <value>not_defined</value>
+ </choice>
+ </attribute>
+ <optional>
+ <attribute name="value"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="type" ann:defaultValue="string">
+ <choice>
+ <value>string</value>
+ <value>integer</value>
+ <value>number</value>
+ <value>version</value>
+ </choice>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="value-source" ann:defaultValue="literal">
+ <choice>
+ <value>literal</value>
+ <value>param</value>
+ <value>meta</value>
+ </choice>
+ </attribute>
+ </optional>
+ </element>
+ <element name="date_expression">
+ <attribute name="id"><data type="ID"/></attribute>
+ <choice>
+ <group>
+ <attribute name="operation"><value>in_range</value></attribute>
+ <choice>
+ <group>
+ <optional>
+ <attribute name="start"><text/></attribute>
+ </optional>
+ <attribute name="end"><text/></attribute>
+ </group>
+ <group>
+ <attribute name="start"><text/></attribute>
+ <element name="duration">
+ <ref name="date-common"/>
+ </element>
+ </group>
+ </choice>
+ </group>
+ <group>
+ <attribute name="operation"><value>gt</value></attribute>
+ <attribute name="start"><text/></attribute>
+ </group>
+ <group>
+ <attribute name="operation"><value>lt</value></attribute>
+ <choice>
+ <attribute name="end"><text/></attribute>
+ </choice>
+ </group>
+ <group>
+ <attribute name="operation"><value>date_spec</value></attribute>
+ <element name="date_spec">
+ <ref name="date-common"/>
+ </element>
+ </group>
+ </choice>
+ </element>
+ <element name="rsc_expression">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="class"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="provider"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="type"><text/></attribute>
+ </optional>
+ </element>
+ <element name="op_expression">
+ <attribute name="id"><data type="ID"/></attribute>
+ <attribute name="name"><text/></attribute>
+ <optional>
+ <attribute name="interval"><text/></attribute>
+ </optional>
+ </element>
+ <ref name="element-rule"/>
+ </choice>
+ </oneOrMore>
+ </group>
+ </choice>
+ </element>
+ </define>
+
+ <define name="date-common">
+ <attribute name="id"><data type="ID"/></attribute>
+ <optional>
+ <attribute name="hours"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="monthdays"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="weekdays"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="yearsdays"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="months"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="weeks"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="years"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="weekyears"><text/></attribute>
+ </optional>
+ <optional>
+ <attribute name="moon"><text/></attribute>
+ </optional>
+ </define>
+
+</grammar>

File Metadata

Mime Type
text/x-diff
Expires
Sat, Nov 23, 6:53 AM (1 d, 19 h)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
1018329
Default Alt Text
(672 KB)

Event Timeline