Page Menu
Home
ClusterLabs Projects
Search
Configure Global Search
Log In
Files
F1841924
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
127 KB
Referenced Files
None
Subscribers
None
View Options
diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in
index ea9c660f53..15c7d605e8 100644
--- a/cts/cts-scheduler.in
+++ b/cts/cts-scheduler.in
@@ -1,1514 +1,1516 @@
#!@PYTHON@
""" Regression tests for Pacemaker's scheduler
"""
# Pacemaker targets compatibility with Python 2.7 and 3.2+
from __future__ import print_function, unicode_literals, absolute_import, division
__copyright__ = "Copyright 2004-2020 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import io
import os
import re
import sys
import stat
import shlex
import shutil
import argparse
import subprocess
import platform
DESC = """Regression tests for Pacemaker's scheduler"""
# Each entry in TESTS is a group of tests, where each test consists of a
# test base name, test description, and additional test arguments.
# Test groups will be separated by newlines in output.
TESTS = [
[
[ "simple1", "Offline" ],
[ "simple2", "Start" ],
[ "simple3", "Start 2" ],
[ "simple4", "Start Failed" ],
[ "simple6", "Stop Start" ],
[ "simple7", "Shutdown" ],
#[ "simple8", "Stonith" ],
#[ "simple9", "Lower version" ],
#[ "simple10", "Higher version" ],
[ "simple11", "Priority (ne)" ],
[ "simple12", "Priority (eq)" ],
[ "simple8", "Stickiness" ],
],
[
[ "group1", "Group" ],
[ "group2", "Group + Native" ],
[ "group3", "Group + Group" ],
[ "group4", "Group + Native (nothing)" ],
[ "group5", "Group + Native (move)" ],
[ "group6", "Group + Group (move)" ],
[ "group7", "Group colocation" ],
[ "group13", "Group colocation (cant run)" ],
[ "group8", "Group anti-colocation" ],
[ "group9", "Group recovery" ],
[ "group10", "Group partial recovery" ],
[ "group11", "Group target_role" ],
[ "group14", "Group stop (graph terminated)" ],
[ "group15", "Negative group colocation" ],
[ "bug-1573", "Partial stop of a group with two children" ],
[ "bug-1718", "Mandatory group ordering - Stop group_FUN" ],
[ "bug-lf-2613", "Move group on failure" ],
[ "bug-lf-2619", "Move group on clone failure" ],
[ "group-fail", "Ensure stop order is preserved for partially active groups" ],
[ "group-unmanaged", "No need to restart r115 because r114 is unmanaged" ],
[ "group-unmanaged-stopped", "Make sure r115 is stopped when r114 fails" ],
[ "group-dependents", "Account for the location preferences of things colocated with a group" ],
[ "group-stop-ordering", "Ensure blocked group member stop does not force other member stops" ],
],
[
[ "rsc_dep1", "Must not" ],
[ "rsc_dep3", "Must" ],
[ "rsc_dep5", "Must not 3" ],
[ "rsc_dep7", "Must 3" ],
[ "rsc_dep10", "Must (but cant)" ],
[ "rsc_dep2", "Must (running)" ],
[ "rsc_dep8", "Must (running : alt)" ],
[ "rsc_dep4", "Must (running + move)" ],
[ "asymmetric", "Asymmetric - require explicit location constraints" ],
],
[
[ "orphan-0", "Orphan ignore" ],
[ "orphan-1", "Orphan stop" ],
[ "orphan-2", "Orphan stop, remove failcount" ],
],
[
[ "params-0", "Params: No change" ],
[ "params-1", "Params: Changed" ],
[ "params-2", "Params: Resource definition" ],
[ "params-3", "Params: Restart instead of reload if start pending" ],
[ "params-4", "Params: Reload" ],
[ "params-5", "Params: Restart based on probe digest" ],
[ "novell-251689", "Resource definition change + target_role=stopped" ],
[ "bug-lf-2106", "Restart all anonymous clone instances after config change" ],
[ "params-6", "Params: Detect reload in previously migrated resource" ],
[ "nvpair-id-ref", "Support id-ref in nvpair with optional name" ],
[ "not-reschedule-unneeded-monitor",
"Do not reschedule unneeded monitors while resource definitions have changed" ],
[ "reload-becomes-restart", "Cancel reload if restart becomes required" ],
],
[
[ "target-0", "Target Role : baseline" ],
[ "target-1", "Target Role : master" ],
[ "target-2", "Target Role : invalid" ],
],
[
[ "base-score", "Set a node's default score for all nodes" ],
],
[
[ "date-1", "Dates", [ "-t", "2005-020" ] ],
[ "date-2", "Date Spec - Pass", [ "-t", "2005-020T12:30" ] ],
[ "date-3", "Date Spec - Fail", [ "-t", "2005-020T11:30" ] ],
[ "origin", "Timing of recurring operations", [ "-t", "2014-05-07 00:28:00" ] ],
[ "probe-0", "Probe (anon clone)" ],
[ "probe-1", "Pending Probe" ],
[ "probe-2", "Correctly re-probe cloned groups" ],
[ "probe-3", "Probe (pending node)" ],
[ "probe-4", "Probe (pending node + stopped resource)" ],
[ "standby", "Standby" ],
[ "comments", "Comments" ],
],
[
[ "one-or-more-0", "Everything starts" ],
[ "one-or-more-1", "Nothing starts because of A" ],
[ "one-or-more-2", "D can start because of C" ],
[ "one-or-more-3", "D cannot start because of B and C" ],
[ "one-or-more-4", "D cannot start because of target-role" ],
[ "one-or-more-5", "Start A and F even though C and D are stopped" ],
[ "one-or-more-6", "Leave A running even though B is stopped" ],
[ "one-or-more-7", "Leave A running even though C is stopped" ],
[ "bug-5140-require-all-false", "Allow basegrp:0 to stop" ],
[ "clone-require-all-1", "clone B starts node 3 and 4" ],
[ "clone-require-all-2", "clone B remains stopped everywhere" ],
[ "clone-require-all-3", "clone B stops everywhere because A stops everywhere" ],
[ "clone-require-all-4", "clone B remains on node 3 and 4 with only one instance of A remaining" ],
[ "clone-require-all-5", "clone B starts on node 1 3 and 4" ],
[ "clone-require-all-6", "clone B remains active after shutting down instances of A" ],
[ "clone-require-all-7",
"clone A and B both start at the same time. all instances of A start before B" ],
[ "clone-require-all-no-interleave-1", "C starts everywhere after A and B" ],
[ "clone-require-all-no-interleave-2",
"C starts on nodes 1, 2, and 4 with only one active instance of B" ],
[ "clone-require-all-no-interleave-3",
"C remains active when instance of B is stopped on one node and started on another" ],
[ "one-or-more-unrunnable-instances", "Avoid dependencies on instances that won't ever be started" ],
],
[
[ "location-date-rules-1", "Use location constraints with ineffective date-based rules" ],
[ "location-date-rules-2", "Use location constraints with effective date-based rules" ],
[ "nvpair-date-rules-1", "Use nvpair blocks with a variety of date-based rules" ],
[ "rule-dbl-as-auto-number-match",
"Floating-point rule values default to number comparison: match" ],
[ "rule-dbl-as-auto-number-no-match",
"Floating-point rule values default to number comparison: no "
"match" ],
[ "rule-dbl-as-integer-match",
"Floating-point rule values set to integer comparison: match" ],
[ "rule-dbl-as-integer-no-match",
"Floating-point rule values set to integer comparison: no match" ],
[ "rule-dbl-as-number-match",
"Floating-point rule values set to number comparison: match" ],
[ "rule-dbl-as-number-no-match",
"Floating-point rule values set to number comparison: no match" ],
[ "rule-dbl-parse-fail-default-str-match",
"Floating-point rule values fail to parse, default to string "
"comparison: match" ],
[ "rule-dbl-parse-fail-default-str-no-match",
"Floating-point rule values fail to parse, default to string "
"comparison: no match" ],
[ "rule-int-as-auto-integer-match",
"Integer rule values default to integer comparison: match" ],
[ "rule-int-as-auto-integer-no-match",
"Integer rule values default to integer comparison: no match" ],
[ "rule-int-as-integer-match",
"Integer rule values set to integer comparison: match" ],
[ "rule-int-as-integer-no-match",
"Integer rule values set to integer comparison: no match" ],
[ "rule-int-as-number-match",
"Integer rule values set to number comparison: match" ],
[ "rule-int-as-number-no-match",
"Integer rule values set to number comparison: no match" ],
[ "rule-int-parse-fail-default-str-match",
"Integer rule values fail to parse, default to string "
"comparison: match" ],
[ "rule-int-parse-fail-default-str-no-match",
"Integer rule values fail to parse, default to string "
"comparison: no match" ],
],
[
[ "order1", "Order start 1" ],
[ "order2", "Order start 2" ],
[ "order3", "Order stop" ],
[ "order4", "Order (multiple)" ],
[ "order5", "Order (move)" ],
[ "order6", "Order (move w/ restart)" ],
[ "order7", "Order (mandatory)" ],
[ "order-optional", "Order (score=0)" ],
[ "order-required", "Order (score=INFINITY)" ],
[ "bug-lf-2171", "Prevent group start when clone is stopped" ],
[ "order-clone", "Clone ordering should be able to prevent startup of dependent clones" ],
[ "order-sets", "Ordering for resource sets" ],
[ "order-serialize", "Serialize resources without inhibiting migration" ],
[ "order-serialize-set", "Serialize a set of resources without inhibiting migration" ],
[ "clone-order-primitive", "Order clone start after a primitive" ],
[ "clone-order-16instances", "Verify ordering of 16 cloned resources" ],
[ "order-optional-keyword", "Order (optional keyword)" ],
[ "order-mandatory", "Order (mandatory keyword)" ],
[ "bug-lf-2493",
"Don't imply colocation requirements when applying ordering constraints with clones" ],
[ "ordered-set-basic-startup", "Constraint set with default order settings" ],
[ "ordered-set-natural", "Allow natural set ordering" ],
[ "order-wrong-kind", "Order (error)" ],
],
[
[ "coloc-loop", "Colocation - loop" ],
[ "coloc-many-one", "Colocation - many-to-one" ],
[ "coloc-list", "Colocation - many-to-one with list" ],
[ "coloc-group", "Colocation - groups" ],
[ "coloc-slave-anti", "Anti-colocation with slave shouldn't prevent master colocation" ],
[ "coloc-attr", "Colocation based on node attributes" ],
[ "coloc-negative-group", "Negative colocation with a group" ],
[ "coloc-intra-set", "Intra-set colocation" ],
[ "bug-lf-2435", "Colocation sets with a negative score" ],
[ "coloc-clone-stays-active",
"Ensure clones don't get stopped/demoted because a dependent must stop" ],
[ "coloc_fp_logic", "Verify floating point calculations in colocation are working" ],
[ "colo_master_w_native",
"cl#5070 - Verify promotion order is affected when colocating master to native rsc" ],
[ "colo_slave_w_native",
"cl#5070 - Verify promotion order is affected when colocating slave to native rsc" ],
[ "anti-colocation-order",
"cl#5187 - Prevent resources in an anti-colocation from even temporarily running on a same node" ],
[ "anti-colocation-master", "Organize order of actions for master resources in anti-colocations" ],
[ "anti-colocation-slave", "Organize order of actions for slave resources in anti-colocations" ],
[ "enforce-colo1", "Always enforce B with A INFINITY" ],
[ "complex_enforce_colo", "Always enforce B with A INFINITY. (make sure heat-engine stops)" ],
[ "coloc-dependee-should-stay", "Stickiness outweighs group colocation" ],
[ "coloc-dependee-should-move", "Group colocation outweighs stickiness" ],
],
[
[ "rsc-sets-seq-true", "Resource Sets - sequential=false" ],
[ "rsc-sets-seq-false", "Resource Sets - sequential=true" ],
[ "rsc-sets-clone", "Resource Sets - Clone" ],
[ "rsc-sets-master", "Resource Sets - Master" ],
[ "rsc-sets-clone-1", "Resource Sets - Clone (lf#2404)" ],
],
[
[ "attrs1", "string: eq (and)" ],
[ "attrs2", "string: lt / gt (and)" ],
[ "attrs3", "string: ne (or)" ],
[ "attrs4", "string: exists" ],
[ "attrs5", "string: not_exists" ],
[ "attrs6", "is_dc: true" ],
[ "attrs7", "is_dc: false" ],
[ "attrs8", "score_attribute" ],
[ "per-node-attrs", "Per node resource parameters" ],
],
[
[ "mon-rsc-1", "Schedule Monitor - start" ],
[ "mon-rsc-2", "Schedule Monitor - move" ],
[ "mon-rsc-3", "Schedule Monitor - pending start" ],
[ "mon-rsc-4", "Schedule Monitor - move/pending start" ],
],
[
[ "rec-rsc-0", "Resource Recover - no start" ],
[ "rec-rsc-1", "Resource Recover - start" ],
[ "rec-rsc-2", "Resource Recover - monitor" ],
[ "rec-rsc-3", "Resource Recover - stop - ignore" ],
[ "rec-rsc-4", "Resource Recover - stop - block" ],
[ "rec-rsc-5", "Resource Recover - stop - fence" ],
[ "rec-rsc-6", "Resource Recover - multiple - restart" ],
[ "rec-rsc-7", "Resource Recover - multiple - stop" ],
[ "rec-rsc-8", "Resource Recover - multiple - block" ],
[ "rec-rsc-9", "Resource Recover - group/group" ],
[ "monitor-recovery", "on-fail=block + resource recovery detected by recurring monitor" ],
[ "stop-failure-no-quorum", "Stop failure without quorum" ],
[ "stop-failure-no-fencing", "Stop failure without fencing available" ],
[ "stop-failure-with-fencing", "Stop failure with fencing available" ],
[ "multiple-active-block-group", "Support of multiple-active=block for resource groups" ],
[ "multiple-monitor-one-failed",
"Consider resource failed if any of the configured monitor operations failed" ],
],
[
[ "quorum-1", "No quorum - ignore" ],
[ "quorum-2", "No quorum - freeze" ],
[ "quorum-3", "No quorum - stop" ],
[ "quorum-4", "No quorum - start anyway" ],
[ "quorum-5", "No quorum - start anyway (group)" ],
[ "quorum-6", "No quorum - start anyway (clone)" ],
[ "bug-cl-5212", "No promotion with no-quorum-policy=freeze" ],
[ "suicide-needed-inquorate", "no-quorum-policy=suicide: suicide necessary" ],
[ "suicide-not-needed-initial-quorum",
"no-quorum-policy=suicide: suicide not necessary at initial quorum" ],
[ "suicide-not-needed-never-quorate",
"no-quorum-policy=suicide: suicide not necessary if never quorate" ],
[ "suicide-not-needed-quorate", "no-quorum-policy=suicide: suicide necessary if quorate" ],
],
[
[ "rec-node-1", "Node Recover - Startup - no fence" ],
[ "rec-node-2", "Node Recover - Startup - fence" ],
[ "rec-node-3", "Node Recover - HA down - no fence" ],
[ "rec-node-4", "Node Recover - HA down - fence" ],
[ "rec-node-5", "Node Recover - CRM down - no fence" ],
[ "rec-node-6", "Node Recover - CRM down - fence" ],
[ "rec-node-7", "Node Recover - no quorum - ignore" ],
[ "rec-node-8", "Node Recover - no quorum - freeze" ],
[ "rec-node-9", "Node Recover - no quorum - stop" ],
[ "rec-node-10", "Node Recover - no quorum - stop w/fence" ],
[ "rec-node-11", "Node Recover - CRM down w/ group - fence" ],
[ "rec-node-12", "Node Recover - nothing active - fence" ],
[ "rec-node-13", "Node Recover - failed resource + shutdown - fence" ],
[ "rec-node-15", "Node Recover - unknown lrm section" ],
[ "rec-node-14", "Serialize all stonith's" ],
],
[
[ "multi1", "Multiple Active (stop/start)" ],
],
[
[ "migrate-begin", "Normal migration" ],
[ "migrate-success", "Completed migration" ],
[ "migrate-partial-1", "Completed migration, missing stop on source" ],
[ "migrate-partial-2", "Successful migrate_to only" ],
[ "migrate-partial-3", "Successful migrate_to only, target down" ],
[ "migrate-partial-4", "Migrate from the correct host after migrate_to+migrate_from" ],
[ "bug-5186-partial-migrate", "Handle partial migration when src node loses membership" ],
[ "migrate-fail-2", "Failed migrate_from" ],
[ "migrate-fail-3", "Failed migrate_from + stop on source" ],
[ "migrate-fail-4",
"Failed migrate_from + stop on target - ideally we wouldn't need to re-stop on target" ],
[ "migrate-fail-5", "Failed migrate_from + stop on source and target" ],
[ "migrate-fail-6", "Failed migrate_to" ],
[ "migrate-fail-7", "Failed migrate_to + stop on source" ],
[ "migrate-fail-8",
"Failed migrate_to + stop on target - ideally we wouldn't need to re-stop on target" ],
[ "migrate-fail-9", "Failed migrate_to + stop on source and target" ],
[ "migration-ping-pong", "Old migrate_to failure + successful migrate_from on same node" ],
[ "migrate-stop", "Migration in a stopping stack" ],
[ "migrate-start", "Migration in a starting stack" ],
[ "migrate-stop_start", "Migration in a restarting stack" ],
[ "migrate-stop-complex", "Migration in a complex stopping stack" ],
[ "migrate-start-complex", "Migration in a complex starting stack" ],
[ "migrate-stop-start-complex", "Migration in a complex moving stack" ],
[ "migrate-shutdown", "Order the post-migration 'stop' before node shutdown" ],
[ "migrate-1", "Migrate (migrate)" ],
[ "migrate-2", "Migrate (stable)" ],
[ "migrate-3", "Migrate (failed migrate_to)" ],
[ "migrate-4", "Migrate (failed migrate_from)" ],
[ "novell-252693", "Migration in a stopping stack" ],
[ "novell-252693-2", "Migration in a starting stack" ],
[ "novell-252693-3", "Non-Migration in a starting and stopping stack" ],
[ "bug-1820", "Migration in a group" ],
[ "bug-1820-1", "Non-migration in a group" ],
[ "migrate-5", "Primitive migration with a clone" ],
[ "migrate-fencing", "Migration after Fencing" ],
[ "migrate-both-vms", "Migrate two VMs that have no colocation" ],
[ "migration-behind-migrating-remote", "Migrate resource behind migrating remote connection" ],
[ "1-a-then-bm-move-b", "Advanced migrate logic. A then B. migrate B" ],
[ "2-am-then-b-move-a", "Advanced migrate logic, A then B, migrate A without stopping B" ],
[ "3-am-then-bm-both-migrate", "Advanced migrate logic. A then B. migrate both" ],
[ "4-am-then-bm-b-not-migratable", "Advanced migrate logic, A then B, B not migratable" ],
[ "5-am-then-bm-a-not-migratable", "Advanced migrate logic. A then B. move both, a not migratable" ],
[ "6-migrate-group", "Advanced migrate logic, migrate a group" ],
[ "7-migrate-group-one-unmigratable",
"Advanced migrate logic, migrate group mixed with allow-migrate true/false" ],
[ "8-am-then-bm-a-migrating-b-stopping",
"Advanced migrate logic, A then B, A migrating, B stopping" ],
[ "9-am-then-bm-b-migrating-a-stopping",
"Advanced migrate logic, A then B, B migrate, A stopping" ],
[ "10-a-then-bm-b-move-a-clone",
"Advanced migrate logic, A clone then B, migrate B while stopping A" ],
[ "11-a-then-bm-b-move-a-clone-starting",
"Advanced migrate logic, A clone then B, B moving while A is start/stopping" ],
[ "a-promote-then-b-migrate", "A promote then B start. migrate B" ],
[ "a-demote-then-b-migrate", "A demote then B stop. migrate B" ],
# @TODO: If pacemaker implements versioned attributes, uncomment this test
#[ "migrate-versioned", "Disable migration for versioned resources" ],
[ "bug-lf-2422", "Dependency on partially active group - stop ocfs:*" ],
],
[
[ "clone-anon-probe-1", "Probe the correct (anonymous) clone instance for each node" ],
[ "clone-anon-probe-2", "Avoid needless re-probing of anonymous clones" ],
[ "clone-anon-failcount", "Merge failcounts for anonymous clones" ],
[ "force-anon-clone-max", "Update clone-max properly when forcing a clone to be anonymous" ],
[ "anon-instance-pending", "Assign anonymous clone instance numbers properly when action pending" ],
[ "inc0", "Incarnation start" ],
[ "inc1", "Incarnation start order" ],
[ "inc2", "Incarnation silent restart, stop, move" ],
[ "inc3", "Inter-incarnation ordering, silent restart, stop, move" ],
[ "inc4", "Inter-incarnation ordering, silent restart, stop, move (ordered)" ],
[ "inc5", "Inter-incarnation ordering, silent restart, stop, move (restart 1)" ],
[ "inc6", "Inter-incarnation ordering, silent restart, stop, move (restart 2)" ],
[ "inc7", "Clone colocation" ],
[ "inc8", "Clone anti-colocation" ],
[ "inc9", "Non-unique clone" ],
[ "inc10", "Non-unique clone (stop)" ],
[ "inc11", "Primitive colocation with clones" ],
[ "inc12", "Clone shutdown" ],
[ "cloned-group", "Make sure only the correct number of cloned groups are started" ],
[ "cloned-group-stop", "Ensure stopping qpidd also stops glance and cinder" ],
[ "clone-no-shuffle", "Don't prioritize allocation of instances that must be moved" ],
[ "clone-max-zero", "Orphan processing with clone-max=0" ],
[ "clone-anon-dup",
"Bug LF#2087 - Correctly parse the state of anonymous clones that are active more than once per node" ],
[ "bug-lf-2160", "Don't shuffle clones due to colocation" ],
[ "bug-lf-2213", "clone-node-max enforcement for cloned groups" ],
[ "bug-lf-2153", "Clone ordering constraints" ],
[ "bug-lf-2361", "Ensure clones observe mandatory ordering constraints if the LHS is unrunnable" ],
[ "bug-lf-2317", "Avoid needless restart of primitive depending on a clone" ],
[ "clone-colocate-instance-1", "Colocation with a specific clone instance (negative example)" ],
[ "clone-colocate-instance-2", "Colocation with a specific clone instance" ],
[ "clone-order-instance", "Ordering with specific clone instances" ],
[ "bug-lf-2453", "Enforce mandatory clone ordering without colocation" ],
[ "bug-lf-2508", "Correctly reconstruct the status of anonymous cloned groups" ],
[ "bug-lf-2544", "Balanced clone placement" ],
[ "bug-lf-2445", "Redistribute clones with node-max > 1 and stickiness = 0" ],
[ "bug-lf-2574", "Avoid clone shuffle" ],
[ "bug-lf-2581", "Avoid group restart due to unrelated clone (re)start" ],
[ "bug-cl-5168", "Don't shuffle clones" ],
[ "bug-cl-5170", "Prevent clone from starting with on-fail=block" ],
[ "clone-fail-block-colocation", "Move colocated group when failed clone has on-fail=block" ],
[ "clone-interleave-1",
"Clone-3 cannot start on pcmk-1 due to interleaved ordering (no colocation)" ],
[ "clone-interleave-2", "Clone-3 must stop on pcmk-1 due to interleaved ordering (no colocation)" ],
[ "clone-interleave-3",
"Clone-3 must be recovered on pcmk-1 due to interleaved ordering (no colocation)" ],
[ "rebalance-unique-clones", "Rebalance unique clone instances with no stickiness" ],
[ "clone-requires-quorum-recovery", "Clone with requires=quorum on failed node needing recovery" ],
[ "clone-requires-quorum",
"Clone with requires=quorum with presumed-inactive instance on failed node" ],
],
[
[ "cloned_start_one", "order first clone then clone... first clone_min=2" ],
[ "cloned_start_two", "order first clone then clone... first clone_min=2" ],
[ "cloned_stop_one", "order first clone then clone... first clone_min=2" ],
[ "cloned_stop_two", "order first clone then clone... first clone_min=2" ],
[ "clone_min_interleave_start_one",
"order first clone then clone... first clone_min=2 and then has interleave=true" ],
[ "clone_min_interleave_start_two",
"order first clone then clone... first clone_min=2 and then has interleave=true" ],
[ "clone_min_interleave_stop_one",
"order first clone then clone... first clone_min=2 and then has interleave=true" ],
[ "clone_min_interleave_stop_two",
"order first clone then clone... first clone_min=2 and then has interleave=true" ],
[ "clone_min_start_one", "order first clone then primitive... first clone_min=2" ],
[ "clone_min_start_two", "order first clone then primitive... first clone_min=2" ],
[ "clone_min_stop_all", "order first clone then primitive... first clone_min=2" ],
[ "clone_min_stop_one", "order first clone then primitive... first clone_min=2" ],
[ "clone_min_stop_two", "order first clone then primitive... first clone_min=2" ],
],
[
[ "unfence-startup", "Clean unfencing" ],
[ "unfence-definition", "Unfencing when the agent changes" ],
[ "unfence-parameters", "Unfencing when the agent parameters changes" ],
[ "unfence-device", "Unfencing when a cluster has only fence devices" ],
],
[
[ "master-0", "Stopped -> Slave" ],
[ "master-1", "Stopped -> Promote" ],
[ "master-2", "Stopped -> Promote : notify" ],
[ "master-3", "Stopped -> Promote : master location" ],
[ "master-4", "Started -> Promote : master location" ],
[ "master-5", "Promoted -> Promoted" ],
[ "master-6", "Promoted -> Promoted (2)" ],
[ "master-7", "Promoted -> Fenced" ],
[ "master-8", "Promoted -> Fenced -> Moved" ],
[ "master-9", "Stopped + Promotable + No quorum" ],
[ "master-10", "Stopped -> Promotable : notify with monitor" ],
[ "master-11", "Stopped -> Promote : colocation" ],
[ "novell-239082", "Demote/Promote ordering" ],
[ "novell-239087", "Stable master placement" ],
[ "master-12", "Promotion based solely on rsc_location constraints" ],
[ "master-13", "Include preferences of colocated resources when placing master" ],
[ "master-demote", "Ordering when actions depends on demoting a slave resource" ],
[ "master-ordering", "Prevent resources from starting that need a master" ],
[ "bug-1765", "Master-Master Colocation (do not stop the slaves)" ],
[ "master-group", "Promotion of cloned groups" ],
[ "bug-lf-1852", "Don't shuffle master/slave instances unnecessarily" ],
[ "master-failed-demote", "Don't retry failed demote actions" ],
[ "master-failed-demote-2", "Don't retry failed demote actions (notify=false)" ],
[ "master-depend",
"Ensure resources that depend on the master don't get allocated until the master does" ],
[ "master-reattach", "Re-attach to a running master" ],
[ "master-allow-start", "Don't include master score if it would prevent allocation" ],
[ "master-colocation",
"Allow master instances placemaker to be influenced by colocation constraints" ],
[ "master-pseudo", "Make sure promote/demote pseudo actions are created correctly" ],
[ "master-role", "Prevent target-role from promoting more than master-max instances" ],
[ "bug-lf-2358", "Master-Master anti-colocation" ],
[ "master-promotion-constraint", "Mandatory master colocation constraints" ],
[ "unmanaged-master", "Ensure role is preserved for unmanaged resources" ],
[ "master-unmanaged-monitor", "Start the correct monitor operation for unmanaged masters" ],
[ "master-demote-2", "Demote does not clear past failure" ],
[ "master-move", "Move master based on failure of colocated group" ],
[ "master-probed-score", "Observe the promotion score of probed resources" ],
[ "colocation_constraint_stops_master",
"cl#5054 - Ensure master is demoted when stopped by colocation constraint" ],
[ "colocation_constraint_stops_slave",
"cl#5054 - Ensure slave is not demoted when stopped by colocation constraint" ],
[ "order_constraint_stops_master",
"cl#5054 - Ensure master is demoted when stopped by order constraint" ],
[ "order_constraint_stops_slave",
"cl#5054 - Ensure slave is not demoted when stopped by order constraint" ],
[ "master_monitor_restart", "cl#5072 - Ensure master monitor operation will start after promotion" ],
[ "bug-rh-880249", "Handle replacement of an m/s resource with a primitive" ],
[ "bug-5143-ms-shuffle", "Prevent master shuffling due to promotion score" ],
[ "master-demote-block", "Block promotion if demote fails with on-fail=block" ],
[ "master-dependent-ban",
"Don't stop instances from being active because a dependent is banned from that host" ],
[ "master-stop", "Stop instances due to location constraint with role=Started" ],
[ "master-partially-demoted-group", "Allow partially demoted group to finish demoting" ],
[ "bug-cl-5213", "Ensure role colocation with -INFINITY is enforced" ],
[ "bug-cl-5219", "Allow unrelated resources with a common colocation target to remain promoted" ],
[ "master-asymmetrical-order",
"Fix the behaviors of multi-state resources with asymmetrical ordering" ],
[ "master-notify", "Master promotion with notifies" ],
[ "master-score-startup", "Use permanent master scores without LRM history" ],
[ "failed-demote-recovery", "Recover resource in slave role after demote fails" ],
[ "failed-demote-recovery-master", "Recover resource in master role after demote fails" ],
[ "on_fail_demote1", "Recovery with on-fail=\"demote\" on healthy cluster, remote, guest, and bundle nodes" ],
[ "on_fail_demote2", "Recovery with on-fail=\"demote\" with promotion on different node" ],
[ "on_fail_demote3", "Recovery with on-fail=\"demote\" with no promotion" ],
[ "on_fail_demote4", "Recovery with on-fail=\"demote\" on failed cluster, remote, guest, and bundle nodes" ],
[ "no_quorum_demote", "Promotable demotion and primitive stop with no-quorum-policy=\"demote\"" ],
],
[
[ "history-1", "Correctly parse stateful-1 resource state" ],
],
[
[ "managed-0", "Managed (reference)" ],
[ "managed-1", "Not managed - down" ],
[ "managed-2", "Not managed - up" ],
[ "bug-5028", "Shutdown should block if anything depends on an unmanaged resource" ],
[ "bug-5028-detach", "Ensure detach still works" ],
[ "bug-5028-bottom",
"Ensure shutdown still blocks if the blocked resource is at the bottom of the stack" ],
[ "unmanaged-stop-1",
"cl#5155 - Block the stop of resources if any depending resource is unmanaged" ],
[ "unmanaged-stop-2",
"cl#5155 - Block the stop of resources if the first resource in a mandatory stop order is unmanaged" ],
[ "unmanaged-stop-3",
"cl#5155 - Block the stop of resources if any depending resource in a group is unmanaged" ],
[ "unmanaged-stop-4",
"cl#5155 - Block the stop of resources if any depending resource in the middle of a group is unmanaged" ],
[ "unmanaged-block-restart",
"Block restart of resources if any dependent resource in a group is unmanaged" ],
],
[
[ "interleave-0", "Interleave (reference)" ],
[ "interleave-1", "coloc - not interleaved" ],
[ "interleave-2", "coloc - interleaved" ],
[ "interleave-3", "coloc - interleaved (2)" ],
[ "interleave-pseudo-stop", "Interleaved clone during stonith" ],
[ "interleave-stop", "Interleaved clone during stop" ],
[ "interleave-restart", "Interleaved clone during dependency restart" ],
],
[
[ "notify-0", "Notify reference" ],
[ "notify-1", "Notify simple" ],
[ "notify-2", "Notify simple, confirm" ],
[ "notify-3", "Notify move, confirm" ],
[ "novell-239079", "Notification priority" ],
#[ "notify-2", "Notify - 764" ],
[ "notifs-for-unrunnable", "Don't schedule notifications for an unrunnable action" ],
[ "route-remote-notify", "Route remote notify actions through correct cluster node" ],
[ "notify-behind-stopping-remote", "Don't schedule notifications behind stopped remote" ],
],
[
[ "594", "OSDL #594 - Unrunnable actions scheduled in transition" ],
[ "662", "OSDL #662 - Two resources start on one node when incarnation_node_max = 1" ],
[ "696", "OSDL #696 - CRM starts stonith RA without monitor" ],
[ "726", "OSDL #726 - Attempting to schedule rsc_posic041_monitor_5000 _after_ a stop" ],
[ "735", "OSDL #735 - Correctly detect that rsc_hadev1 is stopped on hadev3" ],
[ "764", "OSDL #764 - Missing monitor op for DoFencing:child_DoFencing:1" ],
[ "797", "OSDL #797 - Assert triggered: task_id_i > max_call_id" ],
[ "829", "OSDL #829" ],
[ "994",
"OSDL #994 - Stopping the last resource in a resource group causes the entire group to be restarted" ],
[ "994-2", "OSDL #994 - with a dependent resource" ],
[ "1360", "OSDL #1360 - Clone stickiness" ],
[ "1484", "OSDL #1484 - on_fail=stop" ],
[ "1494", "OSDL #1494 - Clone stability" ],
[ "unrunnable-1", "Unrunnable" ],
[ "unrunnable-2", "Unrunnable 2" ],
[ "stonith-0", "Stonith loop - 1" ],
[ "stonith-1", "Stonith loop - 2" ],
[ "stonith-2", "Stonith loop - 3" ],
[ "stonith-3", "Stonith startup" ],
[ "stonith-4", "Stonith node state" ],
[ "dc-fence-ordering", "DC needs fencing while other nodes are shutting down" ],
[ "bug-1572-1", "Recovery of groups depending on master/slave" ],
[ "bug-1572-2", "Recovery of groups depending on master/slave when the master is never re-promoted" ],
[ "bug-1685", "Depends-on-master ordering" ],
[ "bug-1822", "Don't promote partially active groups" ],
[ "bug-pm-11", "New resource added to a m/s group" ],
[ "bug-pm-12", "Recover only the failed portion of a cloned group" ],
[ "bug-n-387749", "Don't shuffle clone instances" ],
[ "bug-n-385265",
"Don't ignore the failure stickiness of group children - resource_idvscommon should stay stopped" ],
[ "bug-n-385265-2",
"Ensure groups are migrated instead of remaining partially active on the current node" ],
[ "bug-lf-1920", "Correctly handle probes that find active resources" ],
[ "bnc-515172", "Location constraint with multiple expressions" ],
[ "colocate-primitive-with-clone", "Optional colocation with a clone" ],
[ "use-after-free-merge", "Use-after-free in native_merge_weights" ],
[ "bug-lf-2551", "STONITH ordering for stop" ],
[ "bug-lf-2606", "Stonith implies demote" ],
[ "bug-lf-2474", "Ensure resource op timeout takes precedence over op_defaults" ],
[ "bug-suse-707150", "Prevent vm-01 from starting due to colocation/ordering" ],
[ "bug-5014-A-start-B-start", "Verify when A starts B starts using symmetrical=false" ],
[ "bug-5014-A-stop-B-started",
"Verify when A stops B does not stop if it has already started using symmetric=false" ],
[ "bug-5014-A-stopped-B-stopped",
"Verify when A is stopped and B has not started, B does not start before A using symmetric=false" ],
[ "bug-5014-CthenAthenB-C-stopped",
"Verify when C then A is symmetrical=true, A then B is symmetric=false, and C is stopped that nothing starts" ],
[ "bug-5014-CLONE-A-start-B-start",
"Verify when A starts B starts using clone resources with symmetric=false" ],
[ "bug-5014-CLONE-A-stop-B-started",
"Verify when A stops B does not stop if it has already started using clone resources with symmetric=false" ],
[ "bug-5014-GROUP-A-start-B-start",
"Verify when A starts B starts when using group resources with symmetric=false" ],
[ "bug-5014-GROUP-A-stopped-B-started",
"Verify when A stops B does not stop if it has already started using group resources with symmetric=false" ],
[ "bug-5014-GROUP-A-stopped-B-stopped",
"Verify when A is stopped and B has not started, B does not start before A using group resources with symmetric=false" ],
[ "bug-5014-ordered-set-symmetrical-false",
"Verify ordered sets work with symmetrical=false" ],
[ "bug-5014-ordered-set-symmetrical-true",
"Verify ordered sets work with symmetrical=true" ],
[ "bug-5007-masterslave_colocation",
"Verify use of colocation scores other than INFINITY and -INFINITY work on multi-state resources" ],
[ "bug-5038", "Prevent restart of anonymous clones when clone-max decreases" ],
[ "bug-5025-1", "Automatically clean up failcount after resource config change with reload" ],
[ "bug-5025-2", "Make sure clear failcount action isn't set when config does not change" ],
[ "bug-5025-3", "Automatically clean up failcount after resource config change with restart" ],
[ "bug-5025-4", "Clear failcount when last failure is a start op and rsc attributes changed" ],
[ "failcount", "Ensure failcounts are correctly expired" ],
[ "failcount-block", "Ensure failcounts are not expired when on-fail=block is present" ],
[ "per-op-failcount", "Ensure per-operation failcount is handled and not passed to fence agent" ],
[ "on-fail-ignore", "Ensure on-fail=ignore works even beyond migration-threshold" ],
[ "monitor-onfail-restart", "bug-5058 - Monitor failure with on-fail set to restart" ],
[ "monitor-onfail-stop", "bug-5058 - Monitor failure wiht on-fail set to stop" ],
[ "bug-5059", "No need to restart p_stateful1:*" ],
[ "bug-5069-op-enabled", "Test on-fail=ignore with failure when monitor is enabled" ],
[ "bug-5069-op-disabled", "Test on-fail-ignore with failure when monitor is disabled" ],
[ "obsolete-lrm-resource", "cl#5115 - Do not use obsolete lrm_resource sections" ],
[ "expire-non-blocked-failure",
"Ignore failure-timeout only if the failed operation has on-fail=block" ],
[ "asymmetrical-order-move", "Respect asymmetrical ordering when trying to move resources" ],
[ "asymmetrical-order-restart", "Respect asymmetrical ordering when restarting dependent resource" ],
[ "start-then-stop-with-unfence", "Avoid graph loop with start-then-stop constraint plus unfencing" ],
[ "order-expired-failure", "Order failcount cleanup after remote fencing" ],
[ "ignore_stonith_rsc_order1",
"cl#5056- Ignore order constraint between stonith and non-stonith rsc" ],
[ "ignore_stonith_rsc_order2",
"cl#5056- Ignore order constraint with group rsc containing mixed stonith and non-stonith" ],
[ "ignore_stonith_rsc_order3", "cl#5056- Ignore order constraint, stonith clone and mixed group" ],
[ "ignore_stonith_rsc_order4",
"cl#5056- Ignore order constraint, stonith clone and clone with nested mixed group" ],
[ "honor_stonith_rsc_order1",
"cl#5056- Honor order constraint, stonith clone and pure stonith group(single rsc)" ],
[ "honor_stonith_rsc_order2",
"cl#5056- Honor order constraint, stonith clone and pure stonith group(multiple rsc)" ],
[ "honor_stonith_rsc_order3",
"cl#5056- Honor order constraint, stonith clones with nested pure stonith group" ],
[ "honor_stonith_rsc_order4",
"cl#5056- Honor order constraint, between two native stonith rscs" ],
[ "multiply-active-stonith", "Multiply active stonith" ],
[ "probe-timeout", "cl#5099 - Default probe timeout" ],
[ "order-first-probes",
"cl#5301 - respect order constraints when relevant resources are being probed" ],
[ "concurrent-fencing", "Allow performing fencing operations in parallel" ],
[ "priority-fencing-delay", "Delay fencing targeting the more significant node" ],
],
[
[ "systemhealth1", "System Health () #1" ],
[ "systemhealth2", "System Health () #2" ],
[ "systemhealth3", "System Health () #3" ],
[ "systemhealthn1", "System Health (None) #1" ],
[ "systemhealthn2", "System Health (None) #2" ],
[ "systemhealthn3", "System Health (None) #3" ],
[ "systemhealthm1", "System Health (Migrate On Red) #1" ],
[ "systemhealthm2", "System Health (Migrate On Red) #2" ],
[ "systemhealthm3", "System Health (Migrate On Red) #3" ],
[ "systemhealtho1", "System Health (Only Green) #1" ],
[ "systemhealtho2", "System Health (Only Green) #2" ],
[ "systemhealtho3", "System Health (Only Green) #3" ],
[ "systemhealthp1", "System Health (Progessive) #1" ],
[ "systemhealthp2", "System Health (Progessive) #2" ],
[ "systemhealthp3", "System Health (Progessive) #3" ],
],
[
[ "utilization", "Placement Strategy - utilization" ],
[ "minimal", "Placement Strategy - minimal" ],
[ "balanced", "Placement Strategy - balanced" ],
],
[
[ "placement-stickiness", "Optimized Placement Strategy - stickiness" ],
[ "placement-priority", "Optimized Placement Strategy - priority" ],
[ "placement-location", "Optimized Placement Strategy - location" ],
[ "placement-capacity", "Optimized Placement Strategy - capacity" ],
],
[
[ "utilization-order1", "Utilization Order - Simple" ],
[ "utilization-order2", "Utilization Order - Complex" ],
[ "utilization-order3", "Utilization Order - Migrate" ],
[ "utilization-order4", "Utilization Order - Live Migration (bnc#695440)" ],
[ "utilization-shuffle",
"Don't displace prmExPostgreSQLDB2 on act2, Start prmExPostgreSQLDB1 on act3" ],
[ "load-stopped-loop", "Avoid transition loop due to load_stopped (cl#5044)" ],
[ "load-stopped-loop-2",
"cl#5235 - Prevent graph loops that can be introduced by load_stopped -> migrate_to ordering" ],
],
[
[ "colocated-utilization-primitive-1", "Colocated Utilization - Primitive" ],
[ "colocated-utilization-primitive-2", "Colocated Utilization - Choose the most capable node" ],
[ "colocated-utilization-group", "Colocated Utilization - Group" ],
[ "colocated-utilization-clone", "Colocated Utilization - Clone" ],
[ "utilization-check-allowed-nodes",
"Only check the capacities of the nodes that can run the resource" ],
],
[
[ "reprobe-target_rc", "Ensure correct target_rc for reprobe of inactive resources" ],
[ "node-maintenance-1", "cl#5128 - Node maintenance" ],
[ "node-maintenance-2", "cl#5128 - Node maintenance (coming out of maintenance mode)" ],
[ "shutdown-maintenance-node", "Do not fence a maintenance node if it shuts down cleanly" ],
[ "rsc-maintenance", "Per-resource maintenance" ],
],
[
[ "not-installed-agent", "The resource agent is missing" ],
[ "not-installed-tools", "Something the resource agent needs is missing" ],
],
[
[ "stopped-monitor-00", "Stopped Monitor - initial start" ],
[ "stopped-monitor-01", "Stopped Monitor - failed started" ],
[ "stopped-monitor-02", "Stopped Monitor - started multi-up" ],
[ "stopped-monitor-03", "Stopped Monitor - stop started" ],
[ "stopped-monitor-04", "Stopped Monitor - failed stop" ],
[ "stopped-monitor-05", "Stopped Monitor - start unmanaged" ],
[ "stopped-monitor-06", "Stopped Monitor - unmanaged multi-up" ],
[ "stopped-monitor-07", "Stopped Monitor - start unmanaged multi-up" ],
[ "stopped-monitor-08", "Stopped Monitor - migrate" ],
[ "stopped-monitor-09", "Stopped Monitor - unmanage started" ],
[ "stopped-monitor-10", "Stopped Monitor - unmanaged started multi-up" ],
[ "stopped-monitor-11", "Stopped Monitor - stop unmanaged started" ],
[ "stopped-monitor-12", "Stopped Monitor - unmanaged started multi-up (target-role=Stopped)" ],
[ "stopped-monitor-20", "Stopped Monitor - initial stop" ],
[ "stopped-monitor-21", "Stopped Monitor - stopped single-up" ],
[ "stopped-monitor-22", "Stopped Monitor - stopped multi-up" ],
[ "stopped-monitor-23", "Stopped Monitor - start stopped" ],
[ "stopped-monitor-24", "Stopped Monitor - unmanage stopped" ],
[ "stopped-monitor-25", "Stopped Monitor - unmanaged stopped multi-up" ],
[ "stopped-monitor-26", "Stopped Monitor - start unmanaged stopped" ],
[ "stopped-monitor-27", "Stopped Monitor - unmanaged stopped multi-up (target-role=Started)" ],
[ "stopped-monitor-30", "Stopped Monitor - new node started" ],
[ "stopped-monitor-31", "Stopped Monitor - new node stopped" ],
],
[
# This is a combo test to check:
# - probe timeout defaults to the minimum-interval monitor's
# - duplicate recurring operations are ignored
# - if timeout spec is bad, the default timeout is used
# - failure is blocked with on-fail=block even if ISO8601 interval is specified
# - started/stopped role monitors are started/stopped on right nodes
[ "intervals", "Recurring monitor interval handling" ],
],
[
[ "ticket-primitive-1", "Ticket - Primitive (loss-policy=stop, initial)" ],
[ "ticket-primitive-2", "Ticket - Primitive (loss-policy=stop, granted)" ],
[ "ticket-primitive-3", "Ticket - Primitive (loss-policy-stop, revoked)" ],
[ "ticket-primitive-4", "Ticket - Primitive (loss-policy=demote, initial)" ],
[ "ticket-primitive-5", "Ticket - Primitive (loss-policy=demote, granted)" ],
[ "ticket-primitive-6", "Ticket - Primitive (loss-policy=demote, revoked)" ],
[ "ticket-primitive-7", "Ticket - Primitive (loss-policy=fence, initial)" ],
[ "ticket-primitive-8", "Ticket - Primitive (loss-policy=fence, granted)" ],
[ "ticket-primitive-9", "Ticket - Primitive (loss-policy=fence, revoked)" ],
[ "ticket-primitive-10", "Ticket - Primitive (loss-policy=freeze, initial)" ],
[ "ticket-primitive-11", "Ticket - Primitive (loss-policy=freeze, granted)" ],
[ "ticket-primitive-12", "Ticket - Primitive (loss-policy=freeze, revoked)" ],
[ "ticket-primitive-13", "Ticket - Primitive (loss-policy=stop, standby, granted)" ],
[ "ticket-primitive-14", "Ticket - Primitive (loss-policy=stop, granted, standby)" ],
[ "ticket-primitive-15", "Ticket - Primitive (loss-policy=stop, standby, revoked)" ],
[ "ticket-primitive-16", "Ticket - Primitive (loss-policy=demote, standby, granted)" ],
[ "ticket-primitive-17", "Ticket - Primitive (loss-policy=demote, granted, standby)" ],
[ "ticket-primitive-18", "Ticket - Primitive (loss-policy=demote, standby, revoked)" ],
[ "ticket-primitive-19", "Ticket - Primitive (loss-policy=fence, standby, granted)" ],
[ "ticket-primitive-20", "Ticket - Primitive (loss-policy=fence, granted, standby)" ],
[ "ticket-primitive-21", "Ticket - Primitive (loss-policy=fence, standby, revoked)" ],
[ "ticket-primitive-22", "Ticket - Primitive (loss-policy=freeze, standby, granted)" ],
[ "ticket-primitive-23", "Ticket - Primitive (loss-policy=freeze, granted, standby)" ],
[ "ticket-primitive-24", "Ticket - Primitive (loss-policy=freeze, standby, revoked)" ],
],
[
[ "ticket-group-1", "Ticket - Group (loss-policy=stop, initial)" ],
[ "ticket-group-2", "Ticket - Group (loss-policy=stop, granted)" ],
[ "ticket-group-3", "Ticket - Group (loss-policy-stop, revoked)" ],
[ "ticket-group-4", "Ticket - Group (loss-policy=demote, initial)" ],
[ "ticket-group-5", "Ticket - Group (loss-policy=demote, granted)" ],
[ "ticket-group-6", "Ticket - Group (loss-policy=demote, revoked)" ],
[ "ticket-group-7", "Ticket - Group (loss-policy=fence, initial)" ],
[ "ticket-group-8", "Ticket - Group (loss-policy=fence, granted)" ],
[ "ticket-group-9", "Ticket - Group (loss-policy=fence, revoked)" ],
[ "ticket-group-10", "Ticket - Group (loss-policy=freeze, initial)" ],
[ "ticket-group-11", "Ticket - Group (loss-policy=freeze, granted)" ],
[ "ticket-group-12", "Ticket - Group (loss-policy=freeze, revoked)" ],
[ "ticket-group-13", "Ticket - Group (loss-policy=stop, standby, granted)" ],
[ "ticket-group-14", "Ticket - Group (loss-policy=stop, granted, standby)" ],
[ "ticket-group-15", "Ticket - Group (loss-policy=stop, standby, revoked)" ],
[ "ticket-group-16", "Ticket - Group (loss-policy=demote, standby, granted)" ],
[ "ticket-group-17", "Ticket - Group (loss-policy=demote, granted, standby)" ],
[ "ticket-group-18", "Ticket - Group (loss-policy=demote, standby, revoked)" ],
[ "ticket-group-19", "Ticket - Group (loss-policy=fence, standby, granted)" ],
[ "ticket-group-20", "Ticket - Group (loss-policy=fence, granted, standby)" ],
[ "ticket-group-21", "Ticket - Group (loss-policy=fence, standby, revoked)" ],
[ "ticket-group-22", "Ticket - Group (loss-policy=freeze, standby, granted)" ],
[ "ticket-group-23", "Ticket - Group (loss-policy=freeze, granted, standby)" ],
[ "ticket-group-24", "Ticket - Group (loss-policy=freeze, standby, revoked)" ],
],
[
[ "ticket-clone-1", "Ticket - Clone (loss-policy=stop, initial)" ],
[ "ticket-clone-2", "Ticket - Clone (loss-policy=stop, granted)" ],
[ "ticket-clone-3", "Ticket - Clone (loss-policy-stop, revoked)" ],
[ "ticket-clone-4", "Ticket - Clone (loss-policy=demote, initial)" ],
[ "ticket-clone-5", "Ticket - Clone (loss-policy=demote, granted)" ],
[ "ticket-clone-6", "Ticket - Clone (loss-policy=demote, revoked)" ],
[ "ticket-clone-7", "Ticket - Clone (loss-policy=fence, initial)" ],
[ "ticket-clone-8", "Ticket - Clone (loss-policy=fence, granted)" ],
[ "ticket-clone-9", "Ticket - Clone (loss-policy=fence, revoked)" ],
[ "ticket-clone-10", "Ticket - Clone (loss-policy=freeze, initial)" ],
[ "ticket-clone-11", "Ticket - Clone (loss-policy=freeze, granted)" ],
[ "ticket-clone-12", "Ticket - Clone (loss-policy=freeze, revoked)" ],
[ "ticket-clone-13", "Ticket - Clone (loss-policy=stop, standby, granted)" ],
[ "ticket-clone-14", "Ticket - Clone (loss-policy=stop, granted, standby)" ],
[ "ticket-clone-15", "Ticket - Clone (loss-policy=stop, standby, revoked)" ],
[ "ticket-clone-16", "Ticket - Clone (loss-policy=demote, standby, granted)" ],
[ "ticket-clone-17", "Ticket - Clone (loss-policy=demote, granted, standby)" ],
[ "ticket-clone-18", "Ticket - Clone (loss-policy=demote, standby, revoked)" ],
[ "ticket-clone-19", "Ticket - Clone (loss-policy=fence, standby, granted)" ],
[ "ticket-clone-20", "Ticket - Clone (loss-policy=fence, granted, standby)" ],
[ "ticket-clone-21", "Ticket - Clone (loss-policy=fence, standby, revoked)" ],
[ "ticket-clone-22", "Ticket - Clone (loss-policy=freeze, standby, granted)" ],
[ "ticket-clone-23", "Ticket - Clone (loss-policy=freeze, granted, standby)" ],
[ "ticket-clone-24", "Ticket - Clone (loss-policy=freeze, standby, revoked)" ],
],
[
[ "ticket-master-1", "Ticket - Master (loss-policy=stop, initial)" ],
[ "ticket-master-2", "Ticket - Master (loss-policy=stop, granted)" ],
[ "ticket-master-3", "Ticket - Master (loss-policy-stop, revoked)" ],
[ "ticket-master-4", "Ticket - Master (loss-policy=demote, initial)" ],
[ "ticket-master-5", "Ticket - Master (loss-policy=demote, granted)" ],
[ "ticket-master-6", "Ticket - Master (loss-policy=demote, revoked)" ],
[ "ticket-master-7", "Ticket - Master (loss-policy=fence, initial)" ],
[ "ticket-master-8", "Ticket - Master (loss-policy=fence, granted)" ],
[ "ticket-master-9", "Ticket - Master (loss-policy=fence, revoked)" ],
[ "ticket-master-10", "Ticket - Master (loss-policy=freeze, initial)" ],
[ "ticket-master-11", "Ticket - Master (loss-policy=freeze, granted)" ],
[ "ticket-master-12", "Ticket - Master (loss-policy=freeze, revoked)" ],
[ "ticket-master-13", "Ticket - Master (loss-policy=stop, standby, granted)" ],
[ "ticket-master-14", "Ticket - Master (loss-policy=stop, granted, standby)" ],
[ "ticket-master-15", "Ticket - Master (loss-policy=stop, standby, revoked)" ],
[ "ticket-master-16", "Ticket - Master (loss-policy=demote, standby, granted)" ],
[ "ticket-master-17", "Ticket - Master (loss-policy=demote, granted, standby)" ],
[ "ticket-master-18", "Ticket - Master (loss-policy=demote, standby, revoked)" ],
[ "ticket-master-19", "Ticket - Master (loss-policy=fence, standby, granted)" ],
[ "ticket-master-20", "Ticket - Master (loss-policy=fence, granted, standby)" ],
[ "ticket-master-21", "Ticket - Master (loss-policy=fence, standby, revoked)" ],
[ "ticket-master-22", "Ticket - Master (loss-policy=freeze, standby, granted)" ],
[ "ticket-master-23", "Ticket - Master (loss-policy=freeze, granted, standby)" ],
[ "ticket-master-24", "Ticket - Master (loss-policy=freeze, standby, revoked)" ],
],
[
[ "ticket-rsc-sets-1", "Ticket - Resource sets (1 ticket, initial)" ],
[ "ticket-rsc-sets-2", "Ticket - Resource sets (1 ticket, granted)" ],
[ "ticket-rsc-sets-3", "Ticket - Resource sets (1 ticket, revoked)" ],
[ "ticket-rsc-sets-4", "Ticket - Resource sets (2 tickets, initial)" ],
[ "ticket-rsc-sets-5", "Ticket - Resource sets (2 tickets, granted)" ],
[ "ticket-rsc-sets-6", "Ticket - Resource sets (2 tickets, granted)" ],
[ "ticket-rsc-sets-7", "Ticket - Resource sets (2 tickets, revoked)" ],
[ "ticket-rsc-sets-8", "Ticket - Resource sets (1 ticket, standby, granted)" ],
[ "ticket-rsc-sets-9", "Ticket - Resource sets (1 ticket, granted, standby)" ],
[ "ticket-rsc-sets-10", "Ticket - Resource sets (1 ticket, standby, revoked)" ],
[ "ticket-rsc-sets-11", "Ticket - Resource sets (2 tickets, standby, granted)" ],
[ "ticket-rsc-sets-12", "Ticket - Resource sets (2 tickets, standby, granted)" ],
[ "ticket-rsc-sets-13", "Ticket - Resource sets (2 tickets, granted, standby)" ],
[ "ticket-rsc-sets-14", "Ticket - Resource sets (2 tickets, standby, revoked)" ],
[ "cluster-specific-params", "Cluster-specific instance attributes based on rules" ],
[ "site-specific-params", "Site-specific instance attributes based on rules" ],
],
[
[ "template-1", "Template - 1" ],
[ "template-2", "Template - 2" ],
[ "template-3", "Template - 3 (merge operations)" ],
[ "template-coloc-1", "Template - Colocation 1" ],
[ "template-coloc-2", "Template - Colocation 2" ],
[ "template-coloc-3", "Template - Colocation 3" ],
[ "template-order-1", "Template - Order 1" ],
[ "template-order-2", "Template - Order 2" ],
[ "template-order-3", "Template - Order 3" ],
[ "template-ticket", "Template - Ticket" ],
[ "template-rsc-sets-1", "Template - Resource Sets 1" ],
[ "template-rsc-sets-2", "Template - Resource Sets 2" ],
[ "template-rsc-sets-3", "Template - Resource Sets 3" ],
[ "template-rsc-sets-4", "Template - Resource Sets 4" ],
[ "template-clone-primitive", "Cloned primitive from template" ],
[ "template-clone-group", "Cloned group from template" ],
[ "location-sets-templates", "Resource sets and templates - Location" ],
[ "tags-coloc-order-1", "Tags - Colocation and Order (Simple)" ],
[ "tags-coloc-order-2", "Tags - Colocation and Order (Resource Sets with Templates)" ],
[ "tags-location", "Tags - Location" ],
[ "tags-ticket", "Tags - Ticket" ],
],
[
[ "container-1", "Container - initial" ],
[ "container-2", "Container - monitor failed" ],
[ "container-3", "Container - stop failed" ],
[ "container-4", "Container - reached migration-threshold" ],
[ "container-group-1", "Container in group - initial" ],
[ "container-group-2", "Container in group - monitor failed" ],
[ "container-group-3", "Container in group - stop failed" ],
[ "container-group-4", "Container in group - reached migration-threshold" ],
[ "container-is-remote-node", "Place resource within container when container is remote-node" ],
[ "bug-rh-1097457", "Kill user defined container/contents ordering" ],
[ "bug-cl-5247", "Graph loop when recovering m/s resource in a container" ],
[ "bundle-order-startup", "Bundle startup ordering" ],
[ "bundle-order-partial-start",
"Bundle startup ordering when some dependencies are already running" ],
[ "bundle-order-partial-start-2",
"Bundle startup ordering when some dependencies and the container are already running" ],
[ "bundle-order-stop", "Bundle stop ordering" ],
[ "bundle-order-partial-stop", "Bundle startup ordering when some dependencies are already stopped" ],
[ "bundle-order-stop-on-remote", "Stop nested resource after bringing up the connection" ],
[ "bundle-order-startup-clone", "Prevent startup because bundle isn't promoted" ],
[ "bundle-order-startup-clone-2", "Bundle startup with clones" ],
[ "bundle-order-stop-clone", "Stop bundle because clone is stopping" ],
[ "bundle-nested-colocation", "Colocation of nested connection resources" ],
[ "bundle-order-fencing",
"Order pseudo bundle fencing after parent node fencing if both are happening" ],
[ "bundle-probe-order-1", "order 1" ],
[ "bundle-probe-order-2", "order 2" ],
[ "bundle-probe-order-3", "order 3" ],
[ "bundle-probe-remotes", "Ensure remotes get probed too" ],
[ "bundle-replicas-change", "Change bundle from 1 replica to multiple" ],
[ "nested-remote-recovery", "Recover bundle's container hosted on remote node" ],
],
[
[ "whitebox-fail1", "Fail whitebox container rsc" ],
[ "whitebox-fail2", "Fail cluster connection to guest node" ],
[ "whitebox-fail3", "Failed containers should not run nested on remote nodes" ],
[ "whitebox-start", "Start whitebox container with resources assigned to it" ],
[ "whitebox-stop", "Stop whitebox container with resources assigned to it" ],
[ "whitebox-move", "Move whitebox container with resources assigned to it" ],
[ "whitebox-asymmetric", "Verify connection rsc opts-in based on container resource" ],
[ "whitebox-ms-ordering", "Verify promote/demote can not occur before connection is established" ],
[ "whitebox-ms-ordering-move", "Stop/Start cycle within a moving container" ],
[ "whitebox-orphaned", "Properly shutdown orphaned whitebox container" ],
[ "whitebox-orphan-ms", "Properly tear down orphan ms resources on remote-nodes" ],
[ "whitebox-unexpectedly-running", "Recover container nodes the cluster did not start" ],
[ "whitebox-migrate1", "Migrate both container and connection resource" ],
[ "whitebox-imply-stop-on-fence",
"imply stop action on container node rsc when host node is fenced" ],
[ "whitebox-nested-group", "Verify guest remote-node works nested in a group" ],
[ "guest-node-host-dies", "Verify guest node is recovered if host goes away" ],
[ "guest-node-cleanup", "Order guest node connection recovery after container probe" ],
[ "guest-host-not-fenceable", "Actions on guest node are unrunnable if host is unclean and cannot be fenced" ],
],
[
[ "remote-startup-probes", "Baremetal remote-node startup probes" ],
[ "remote-startup", "Startup a newly discovered remote-nodes with no status" ],
[ "remote-fence-unclean", "Fence unclean baremetal remote-node" ],
[ "remote-fence-unclean2",
"Fence baremetal remote-node after cluster node fails and connection can not be recovered" ],
[ "remote-fence-unclean-3", "Probe failed remote nodes (triggers fencing)" ],
[ "remote-move", "Move remote-node connection resource" ],
[ "remote-disable", "Disable a baremetal remote-node" ],
[ "remote-probe-disable", "Probe then stop a baremetal remote-node" ],
[ "remote-orphaned", "Properly shutdown orphaned connection resource" ],
[ "remote-orphaned2",
"verify we can handle orphaned remote connections with active resources on the remote" ],
[ "remote-recover", "Recover connection resource after cluster-node fails" ],
[ "remote-stale-node-entry",
"Make sure we properly handle leftover remote-node entries in the node section" ],
[ "remote-partial-migrate",
"Make sure partial migrations are handled before ops on the remote node" ],
[ "remote-partial-migrate2",
"Make sure partial migration target is prefered for remote connection" ],
[ "remote-recover-fail", "Make sure start failure causes fencing if rsc are active on remote" ],
[ "remote-start-fail",
"Make sure a start failure does not result in fencing if no active resources are on remote" ],
[ "remote-unclean2",
"Make monitor failure always results in fencing, even if no rsc are active on remote" ],
[ "remote-fence-before-reconnect", "Fence before clearing recurring monitor failure" ],
[ "remote-recovery", "Recover remote connections before attempting demotion" ],
[ "remote-recover-connection", "Optimistically recovery of only the connection" ],
[ "remote-recover-all", "Fencing when the connection has no home" ],
[ "remote-recover-no-resources", "Fencing when the connection has no home and no active resources" ],
[ "remote-recover-unknown",
"Fencing when the connection has no home and the remote has no operation history" ],
[ "remote-reconnect-delay", "Waiting for remote reconnect interval to expire" ],
[ "remote-connection-unrecoverable",
"Remote connection host must be fenced, with connection unrecoverable" ],
],
[
[ "resource-discovery", "Exercises resource-discovery location constraint option" ],
[ "rsc-discovery-per-node", "Disable resource discovery per node" ],
[ "shutdown-lock", "Ensure shutdown lock works properly" ],
[ "shutdown-lock-expiration", "Ensure shutdown lock expiration works properly" ],
],
[
[ "op-defaults", "Test op_defaults conditional expressions" ],
[ "op-defaults-2", "Test op_defaults AND'ed conditional expressions" ],
[ "op-defaults-3", "Test op_defaults precedence" ],
[ "rsc-defaults", "Test rsc_defaults conditional expressions" ],
[ "rsc-defaults-2", "Test rsc_defaults conditional expressions without type" ],
],
+ [ [ "stop-all-resources", "Test stop-all-resources=true "],
+ ],
# @TODO: If pacemaker implements versioned attributes, uncomment these tests
#[
# [ "versioned-resources", "Start resources with #ra-version rules" ],
# [ "restart-versioned", "Restart resources on #ra-version change" ],
# [ "reload-versioned", "Reload resources on #ra-version change" ],
#],
#[
# [ "versioned-operations-1", "Use #ra-version to configure operations of native resources" ],
# [ "versioned-operations-2", "Use #ra-version to configure operations of stonith resources" ],
# [ "versioned-operations-3", "Use #ra-version to configure operations of master/slave resources" ],
# [ "versioned-operations-4", "Use #ra-version to configure operations of groups of the resources" ],
#],
]
TESTS_64BIT = [
[
[ "year-2038", "Check handling of timestamps beyond 2038-01-19 03:14:08 UTC" ],
],
]
# Constants substituted in the build process
class BuildVars(object):
SBINDIR = "@sbindir@"
BUILDDIR = "@abs_top_builddir@"
CRM_SCHEMA_DIRECTORY = "@CRM_SCHEMA_DIRECTORY@"
# These values must be kept in sync with include/crm/crm.h
class CrmExit(object):
OK = 0
ERROR = 1
NOT_INSTALLED = 5
NOINPUT = 66
def is_executable(path):
""" Check whether a file at a given path is executable. """
try:
return os.stat(path)[stat.ST_MODE] & stat.S_IXUSR
except OSError:
return False
def diff(file1, file2, **kwargs):
""" Call diff on two files """
return subprocess.call([ "diff", "-u", "-N", "--ignore-all-space",
"--ignore-blank-lines", file1, file2 ], **kwargs)
def sort_file(filename):
""" Sort a file alphabetically """
with io.open(filename, "rt") as f:
lines = sorted(f)
with io.open(filename, "wt") as f:
f.writelines(lines)
def remove_files(filenames):
""" Remove a list of files """
for filename in filenames:
try:
os.remove(filename)
except OSError:
pass
def normalize(filename):
""" Remove text from a file that isn't important for comparison """
if not hasattr(normalize, "patterns"):
normalize.patterns = [
re.compile(r'crm_feature_set="[^"]*"'),
re.compile(r'batch-limit="[0-9]*"')
]
if os.path.isfile(filename):
with io.open(filename, "rt") as f:
lines = f.readlines()
with io.open(filename, "wt") as f:
for line in lines:
for pattern in normalize.patterns:
line = pattern.sub("", line)
f.write(line)
def cat(filename, dest=sys.stdout):
""" Copy a file to a destination file descriptor """
with io.open(filename, "rt") as f:
shutil.copyfileobj(f, dest)
class CtsScheduler(object):
""" Regression tests for Pacemaker's scheduler """
def _parse_args(self, argv):
""" Parse command-line arguments """
parser = argparse.ArgumentParser(description=DESC)
parser.add_argument('-V', '--verbose', action='count',
help='Display any differences from expected output')
parser.add_argument('--run', metavar='TEST',
help=('Run only single specified test (any further '
'arguments will be passed to crm_simulate)'))
parser.add_argument('--update', action='store_true',
help='Update expected results with actual results')
parser.add_argument('-b', '--binary', metavar='PATH',
help='Specify path to crm_simulate')
parser.add_argument('-i', '--io-dir', metavar='PATH',
help='Specify path to regression test data directory')
parser.add_argument('-o', '--out-dir', metavar='PATH',
help='Specify where intermediate and output files should go')
parser.add_argument('-v', '--valgrind', action='store_true',
help='Run all commands under valgrind')
parser.add_argument('--valgrind-dhat', action='store_true',
help='Run all commands under valgrind with heap analyzer')
parser.add_argument('--valgrind-skip-output', action='store_true',
help='If running under valgrind, do not display output')
parser.add_argument('--testcmd-options', metavar='OPTIONS', default='',
help='Additional options for command under test')
# argparse can't handle "everything after --run TEST", so grab that
self.single_test_args = []
narg = 0
for arg in argv:
narg = narg + 1
if arg == '--run':
(argv, self.single_test_args) = (argv[:narg+1], argv[narg+1:])
break
self.args = parser.parse_args(argv[1:])
def _error(self, s):
print(" * ERROR: %s" % s)
def _failed(self, s):
print(" * FAILED: %s" % s)
def _get_valgrind_cmd(self):
""" Return command arguments needed (or not) to run valgrind """
if self.args.valgrind:
os.environ['G_SLICE'] = "always-malloc"
return [
"valgrind",
"-q",
"--gen-suppressions=all",
"--time-stamp=yes",
"--trace-children=no",
"--show-reachable=no",
"--leak-check=full",
"--num-callers=20",
"--suppressions=%s/valgrind-pcmk.suppressions" % (self.test_home)
]
if self.args.valgrind_dhat:
os.environ['G_SLICE'] = "always-malloc"
return [
"valgrind",
"--tool=exp-dhat",
"--time-stamp=yes",
"--trace-children=no",
"--show-top-n=100",
"--num-callers=4"
]
return []
def _get_simulator_cmd(self):
""" Locate the simulation binary """
if self.args.binary is None:
self.args.binary = BuildVars.BUILDDIR + "/tools/crm_simulate"
if not is_executable(self.args.binary):
self.args.binary = BuildVars.SBINDIR + "/crm_simulate"
if not is_executable(self.args.binary):
# @TODO it would be more pythonic to raise an exception
self._error("Test binary " + self.args.binary + " not found")
sys.exit(CrmExit.NOT_INSTALLED)
return [ self.args.binary ] + shlex.split(self.args.testcmd_options)
def set_schema_env(self):
""" Ensure schema directory environment variable is set, if possible """
try:
return os.environ['PCMK_schema_directory']
except KeyError:
for d in [ os.path.join(BuildVars.BUILDDIR, "xml"),
BuildVars.CRM_SCHEMA_DIRECTORY ]:
if os.path.isdir(d):
os.environ['PCMK_schema_directory'] = d
return d
return None
def __init__(self, argv=sys.argv):
self._parse_args(argv)
# Where this executable lives
self.test_home = os.path.dirname(os.path.realpath(argv[0]))
# Where test data resides
if self.args.io_dir is None:
self.args.io_dir = os.path.join(self.test_home, "scheduler")
# Where to store generated files
if self.args.out_dir is None:
self.args.out_dir = self.args.io_dir
self.failed_filename = os.path.join(self.test_home, ".regression.failed.diff")
else:
self.failed_filename = os.path.join(self.args.out_dir, ".regression.failed.diff")
os.environ['CIB_shadow_dir'] = self.args.out_dir
self.failed_file = None
# Single test mode (if requested)
try:
# User can give test base name or file name of a test input
self.args.run = os.path.splitext(os.path.basename(self.args.run))[0]
except (AttributeError, TypeError):
pass # --run was not specified
self.set_schema_env()
# Arguments needed (or not) to run commands
self.valgrind_args = self._get_valgrind_cmd()
self.simulate_args = self._get_simulator_cmd()
# Test counters
self.num_failed = 0
self.num_tests = 0
def _compare_files(self, filename1, filename2):
""" Add any file differences to failed results """
with io.open("/dev/null", "wt") as dev_null:
if diff(filename1, filename2, stdout=dev_null) != 0:
diff(filename1, filename2, stdout=self.failed_file, stderr=dev_null)
self.failed_file.write("\n");
return True
return False
def run_one(self, test_name, test_desc, test_args=[]):
""" Run one scheduler test """
print(" Test %-25s %s" % ((test_name + ":"), test_desc))
did_fail = False
self.num_tests = self.num_tests + 1
# Test inputs
input_filename = "%s/%s.xml" % (self.args.io_dir, test_name)
expected_filename = "%s/%s.exp" % (self.args.io_dir, test_name)
dot_expected_filename = "%s/%s.dot" % (self.args.io_dir, test_name)
scores_filename = "%s/%s.scores" % (self.args.io_dir, test_name)
summary_filename = "%s/%s.summary" % (self.args.io_dir, test_name)
stderr_expected_filename = "%s/%s.stderr" % (self.args.io_dir, test_name)
# (Intermediate) test outputs
output_filename = "%s/%s.out" % (self.args.out_dir, test_name)
dot_output_filename = "%s/%s.pe.dot" % (self.args.out_dir, test_name)
score_output_filename = "%s/%s.scores.pe" % (self.args.out_dir, test_name)
summary_output_filename = "%s/%s.summary.pe" % (self.args.out_dir, test_name)
stderr_output_filename = "%s/%s.stderr.pe" % (self.args.out_dir, test_name)
valgrind_output_filename = "%s/%s.valgrind" % (self.args.out_dir, test_name)
# Common arguments for running test
test_cmd = []
if self.valgrind_args:
test_cmd = self.valgrind_args + [ "--log-file=%s" % valgrind_output_filename ]
test_cmd = test_cmd + self.simulate_args
# @TODO It would be more pythonic to raise exceptions for errors,
# then perhaps it would be nice to make a single-test class
# Ensure necessary test inputs exist
if not os.path.isfile(input_filename):
self._error("No input")
self.num_failed = self.num_failed + 1
return CrmExit.NOINPUT
if not self.args.update and not os.path.isfile(expected_filename):
self._error("no stored output")
return CrmExit.NOINPUT
# Run simulation to generate summary output
if self.args.run: # Single test mode
test_cmd_full = test_cmd + [ '-x', input_filename, '-S' ] + test_args
print(" ".join(test_cmd_full))
else:
# @TODO Why isn't test_args added here?
test_cmd_full = test_cmd + [ '-x', input_filename, '-S' ]
with io.open(summary_output_filename, "wt") as f:
subprocess.call(test_cmd_full, stdout=f, stderr=subprocess.STDOUT, env=os.environ)
if self.args.run:
cat(summary_output_filename)
# Re-run simulation to generate dot, graph, and scores
test_cmd_full = test_cmd + [
'-x', input_filename,
'-D', dot_output_filename,
'-G', output_filename,
'-sSQ' ] + test_args
with io.open(stderr_output_filename, "wt") as f_stderr, \
io.open(score_output_filename, "wt") as f_score:
rc = subprocess.call(test_cmd_full, stdout=f_score, stderr=f_stderr, env=os.environ)
# Check for test command failure
if rc != CrmExit.OK:
self._failed("Test returned: %d" % rc)
did_fail = True
print(" ".join(test_cmd_full))
# Check for valgrind errors
if self.valgrind_args and not self.args.valgrind_skip_output:
if os.stat(valgrind_output_filename).st_size > 0:
self._failed("Valgrind reported errors")
did_fail = True
cat(valgrind_output_filename)
remove_files([ valgrind_output_filename ])
# Check for core dump
if os.path.isfile("core"):
self._failed("Core-file detected: core." + test_name)
did_fail = True
os.rename("core", "%s/core.%s" % (self.test_home, test_name))
# Check any stderr output
if os.path.isfile(stderr_expected_filename):
if self._compare_files(stderr_expected_filename, stderr_output_filename):
self._failed("stderr changed")
did_fail = True
elif os.stat(stderr_output_filename).st_size > 0:
self._failed("Output was written to stderr")
did_fail = True
cat(stderr_output_filename)
remove_files([ stderr_output_filename ])
# Check whether output graph exists, and normalize it
if (not os.path.isfile(output_filename)
or os.stat(output_filename).st_size == 0):
self._error("No graph produced")
did_fail = True
self.num_failed = self.num_failed + 1
remove_files([ output_filename ])
return CrmExit.ERROR
normalize(output_filename)
# Check whether dot output exists, and sort it
if (not os.path.isfile(dot_output_filename) or
os.stat(dot_output_filename).st_size == 0):
self._error("No dot-file summary produced")
did_fail = True
self.num_failed = self.num_failed + 1
remove_files([ dot_output_filename, output_filename ])
return CrmExit.ERROR
with io.open(dot_output_filename, "rt") as f:
first_line = f.readline() # "digraph" line with opening brace
lines = f.readlines()
last_line = lines[-1] # closing brace
del lines[-1]
lines = sorted(set(lines)) # unique sort
with io.open(dot_output_filename, "wt") as f:
f.write(first_line)
f.writelines(lines)
f.write(last_line)
# Check whether score output exists, and sort it
if (not os.path.isfile(score_output_filename)
or os.stat(score_output_filename).st_size == 0):
self._error("No allocation scores produced")
did_fail = True
self.num_failed = self.num_failed + 1
remove_files([ score_output_filename, output_filename ])
return CrmExit.ERROR
else:
sort_file(score_output_filename)
if self.args.update:
shutil.copyfile(output_filename, expected_filename)
shutil.copyfile(dot_output_filename, dot_expected_filename)
shutil.copyfile(score_output_filename, scores_filename)
shutil.copyfile(summary_output_filename, summary_filename)
print(" Updated expected outputs")
if self._compare_files(summary_filename, summary_output_filename):
self._failed("summary changed")
did_fail = True
if self._compare_files(dot_expected_filename, dot_output_filename):
self._failed("dot-file summary changed")
did_fail = True
else:
remove_files([ dot_output_filename ])
if self._compare_files(expected_filename, output_filename):
self._failed("xml-file changed")
did_fail = True
if self._compare_files(scores_filename, score_output_filename):
self._failed("scores-file changed")
did_fail = True
remove_files([ output_filename,
score_output_filename,
summary_output_filename])
if did_fail:
self.num_failed = self.num_failed + 1
return CrmExit.ERROR
return CrmExit.OK
def run_all(self):
""" Run all defined tests """
if platform.architecture()[0] == "64bit":
TESTS.extend(TESTS_64BIT)
for group in TESTS:
for test in group:
try:
args = test[2]
except IndexError:
args = []
self.run_one(test[0], test[1], args)
print()
def _print_summary(self):
""" Print a summary of parameters for this test run """
print("Test home is:\t" + self.test_home)
print("Test binary is:\t" + self.args.binary)
if 'PCMK_schema_directory' in os.environ:
print("Schema home is:\t" + os.environ['PCMK_schema_directory'])
if self.valgrind_args != []:
print("Activating memory testing with valgrind")
print()
def _test_results(self):
if self.num_failed == 0:
return CrmExit.OK
if os.path.isfile(self.failed_filename) and os.stat(self.failed_filename).st_size != 0:
if self.args.verbose:
self._error("Results of %d failed tests (out of %d):" %
(self.num_failed, self.num_tests))
cat(self.failed_filename)
else:
self._error("Results of %d failed tests (out of %d) are in %s" %
(self.num_failed, self.num_tests, self.failed_filename))
self._error("Use -V to display them after running the tests")
else:
self._error("%d (of %d) tests failed (no diff results)" %
(self.num_failed, self.num_tests))
if os.path.isfile(self.failed_filename):
os.remove(self.failed_filename)
return CrmExit.ERROR
def run(self):
""" Run test(s) as specified """
self._print_summary()
# Zero out the error log
self.failed_file = io.open(self.failed_filename, "wt")
if self.args.run is None:
print("Performing the following tests from " + self.args.io_dir)
print()
self.run_all()
print()
self.failed_file.close()
rc = self._test_results()
else:
rc = self.run_one(self.args.run, "Single shot", self.single_test_args)
self.failed_file.close()
cat(self.failed_filename)
return rc
if __name__ == "__main__":
sys.exit(CtsScheduler().run())
# vim: set filetype=python expandtab tabstop=4 softtabstop=4 shiftwidth=4 textwidth=120:
diff --git a/cts/scheduler/stop-all-resources.dot b/cts/scheduler/stop-all-resources.dot
new file mode 100644
index 0000000000..897b0ad9f6
--- /dev/null
+++ b/cts/scheduler/stop-all-resources.dot
@@ -0,0 +1,38 @@
+ digraph "g" {
+"Email_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"Email_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"Fencing_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"Fencing_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"Public-IP_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"Public-IP_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"dummy_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"dummy_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"httpd-bundle-0_monitor_0 cluster01" [ style=dashed color="red" fontcolor="black"]
+"httpd-bundle-0_monitor_0 cluster02" [ style=dashed color="red" fontcolor="black"]
+"httpd-bundle-1_monitor_0 cluster01" [ style=dashed color="red" fontcolor="black"]
+"httpd-bundle-1_monitor_0 cluster02" [ style=dashed color="red" fontcolor="black"]
+"httpd-bundle-2_monitor_0 cluster01" [ style=dashed color="red" fontcolor="black"]
+"httpd-bundle-2_monitor_0 cluster02" [ style=dashed color="red" fontcolor="black"]
+"httpd-bundle-docker-0_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"httpd-bundle-docker-0_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"httpd-bundle-docker-1_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"httpd-bundle-docker-1_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"httpd-bundle-docker-2_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"httpd-bundle-docker-2_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"httpd-bundle-ip-192.168.122.131_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"httpd-bundle-ip-192.168.122.131_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"httpd-bundle-ip-192.168.122.132_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"httpd-bundle-ip-192.168.122.132_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"httpd-bundle-ip-192.168.122.133_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"httpd-bundle-ip-192.168.122.133_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"inactive-dhcpd:0_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"inactive-dhcpd:0_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"inactive-dummy-1_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"inactive-dummy-1_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"inactive-dummy-2_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"inactive-dummy-2_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"mysql-proxy:0_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"mysql-proxy:0_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+"ping:0_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"]
+"ping:0_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/stop-all-resources.exp b/cts/scheduler/stop-all-resources.exp
new file mode 100644
index 0000000000..478a28b302
--- /dev/null
+++ b/cts/scheduler/stop-all-resources.exp
@@ -0,0 +1,272 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <rsc_op id="19" operation="monitor" operation_key="ping:0_monitor_0" on_node="cluster02" on_node_uuid="2">
+ <primitive id="ping" long-id="ping:0" class="ocf" provider="pacemaker" type="ping"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_on_node="cluster02" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="60000" dampen="5s" host_list="192.168.122.1" multiplier="1000"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="1">
+ <action_set>
+ <rsc_op id="1" operation="monitor" operation_key="ping:0_monitor_0" on_node="cluster01" on_node_uuid="1">
+ <primitive id="ping" long-id="ping:0" class="ocf" provider="pacemaker" type="ping"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_on_node="cluster01" CRM_meta_on_node_uuid="1" CRM_meta_op_target_rc="7" CRM_meta_timeout="60000" dampen="5s" host_list="192.168.122.1" multiplier="1000"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="2">
+ <action_set>
+ <rsc_op id="20" operation="monitor" operation_key="Fencing_monitor_0" on_node="cluster02" on_node_uuid="2">
+ <primitive id="Fencing" class="stonith" type="fence_xvm"/>
+ <attributes CRM_meta_on_node="cluster02" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" ip_family="ipv4"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="3">
+ <action_set>
+ <rsc_op id="2" operation="monitor" operation_key="Fencing_monitor_0" on_node="cluster01" on_node_uuid="1">
+ <primitive id="Fencing" class="stonith" type="fence_xvm"/>
+ <attributes CRM_meta_on_node="cluster01" CRM_meta_on_node_uuid="1" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" ip_family="ipv4"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="4">
+ <action_set>
+ <rsc_op id="21" operation="monitor" operation_key="dummy_monitor_0" on_node="cluster02" on_node_uuid="2">
+ <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_on_node="cluster02" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" op_sleep="6"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="5">
+ <action_set>
+ <rsc_op id="3" operation="monitor" operation_key="dummy_monitor_0" on_node="cluster01" on_node_uuid="1">
+ <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_on_node="cluster01" CRM_meta_on_node_uuid="1" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" op_sleep="6"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="6">
+ <action_set>
+ <rsc_op id="22" operation="monitor" operation_key="inactive-dhcpd:0_monitor_0" on_node="cluster02" on_node_uuid="2">
+ <primitive id="inactive-dhcpd" long-id="inactive-dhcpd:0" class="lsb" type="dhcpd"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_on_node="cluster02" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="7">
+ <action_set>
+ <rsc_op id="4" operation="monitor" operation_key="inactive-dhcpd:0_monitor_0" on_node="cluster01" on_node_uuid="1">
+ <primitive id="inactive-dhcpd" long-id="inactive-dhcpd:0" class="lsb" type="dhcpd"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_on_node="cluster01" CRM_meta_on_node_uuid="1" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="8">
+ <action_set>
+ <rsc_op id="23" operation="monitor" operation_key="inactive-dummy-1_monitor_0" on_node="cluster02" on_node_uuid="2">
+ <primitive id="inactive-dummy-1" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_on_node="cluster02" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="9">
+ <action_set>
+ <rsc_op id="5" operation="monitor" operation_key="inactive-dummy-1_monitor_0" on_node="cluster01" on_node_uuid="1">
+ <primitive id="inactive-dummy-1" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_on_node="cluster01" CRM_meta_on_node_uuid="1" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="10">
+ <action_set>
+ <rsc_op id="24" operation="monitor" operation_key="inactive-dummy-2_monitor_0" on_node="cluster02" on_node_uuid="2">
+ <primitive id="inactive-dummy-2" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_on_node="cluster02" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="11">
+ <action_set>
+ <rsc_op id="6" operation="monitor" operation_key="inactive-dummy-2_monitor_0" on_node="cluster01" on_node_uuid="1">
+ <primitive id="inactive-dummy-2" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_on_node="cluster01" CRM_meta_on_node_uuid="1" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="12">
+ <action_set>
+ <rsc_op id="25" operation="monitor" operation_key="httpd-bundle-ip-192.168.122.131_monitor_0" on_node="cluster02" on_node_uuid="2">
+ <primitive id="httpd-bundle-ip-192.168.122.131" class="ocf" provider="heartbeat" type="IPaddr2"/>
+ <attributes CRM_meta_on_node="cluster02" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" cidr_netmask="24" ip="192.168.122.131" nic="eth0"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="13">
+ <action_set>
+ <rsc_op id="7" operation="monitor" operation_key="httpd-bundle-ip-192.168.122.131_monitor_0" on_node="cluster01" on_node_uuid="1">
+ <primitive id="httpd-bundle-ip-192.168.122.131" class="ocf" provider="heartbeat" type="IPaddr2"/>
+ <attributes CRM_meta_on_node="cluster01" CRM_meta_on_node_uuid="1" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" cidr_netmask="24" ip="192.168.122.131" nic="eth0"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="14">
+ <action_set>
+ <rsc_op id="26" operation="monitor" operation_key="httpd-bundle-docker-0_monitor_0" on_node="cluster02" on_node_uuid="2">
+ <primitive id="httpd-bundle-docker-0" class="ocf" provider="heartbeat" type="docker"/>
+ <attributes CRM_meta_on_node="cluster02" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" allow_pull="true" force_kill="false" image="pcmk:http" monitor_cmd="/bin/true" mount_points="/var/log/pacemaker/bundles/httpd-bundle-0,/var/log/pacemaker/bundles/httpd-bundle-0" reuse="false" run_cmd="/usr/sbin/pacemaker-remoted" run_opts=" --restart=no -h httpd-bundle-0 -e PCMK_stderr=1 -e PCMK_remote_port=3121 -v /dev/log:/dev/log:rw -v /srv/html:/var/www/html:rw -v /var/log/pacemaker/bundles/httpd-bundle-0:/etc/httpd/logs:rw -v /etc/pacemaker/authkey:/etc/pacemaker/authkey -v /var/log/pacemaker/bundles/httpd-bundle-0:/var/log -p 192.168.122.131:80:80 -p 192.168.122.131:3121:3121 --add-host=httpd-bundle-0:192.168.122.131 --add-host=httpd-bundle-1:192.168.122.132 --add-host=httpd-bundle-2:192.168.122.133"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="15">
+ <action_set>
+ <rsc_op id="8" operation="monitor" operation_key="httpd-bundle-docker-0_monitor_0" on_node="cluster01" on_node_uuid="1">
+ <primitive id="httpd-bundle-docker-0" class="ocf" provider="heartbeat" type="docker"/>
+ <attributes CRM_meta_on_node="cluster01" CRM_meta_on_node_uuid="1" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" allow_pull="true" force_kill="false" image="pcmk:http" monitor_cmd="/bin/true" mount_points="/var/log/pacemaker/bundles/httpd-bundle-0,/var/log/pacemaker/bundles/httpd-bundle-0" reuse="false" run_cmd="/usr/sbin/pacemaker-remoted" run_opts=" --restart=no -h httpd-bundle-0 -e PCMK_stderr=1 -e PCMK_remote_port=3121 -v /dev/log:/dev/log:rw -v /srv/html:/var/www/html:rw -v /var/log/pacemaker/bundles/httpd-bundle-0:/etc/httpd/logs:rw -v /etc/pacemaker/authkey:/etc/pacemaker/authkey -v /var/log/pacemaker/bundles/httpd-bundle-0:/var/log -p 192.168.122.131:80:80 -p 192.168.122.131:3121:3121 --add-host=httpd-bundle-0:192.168.122.131 --add-host=httpd-bundle-1:192.168.122.132 --add-host=httpd-bundle-2:192.168.122.133"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="16">
+ <action_set>
+ <rsc_op id="28" operation="monitor" operation_key="httpd-bundle-ip-192.168.122.132_monitor_0" on_node="cluster02" on_node_uuid="2">
+ <primitive id="httpd-bundle-ip-192.168.122.132" class="ocf" provider="heartbeat" type="IPaddr2"/>
+ <attributes CRM_meta_on_node="cluster02" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" cidr_netmask="24" ip="192.168.122.132" nic="eth0"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="17">
+ <action_set>
+ <rsc_op id="10" operation="monitor" operation_key="httpd-bundle-ip-192.168.122.132_monitor_0" on_node="cluster01" on_node_uuid="1">
+ <primitive id="httpd-bundle-ip-192.168.122.132" class="ocf" provider="heartbeat" type="IPaddr2"/>
+ <attributes CRM_meta_on_node="cluster01" CRM_meta_on_node_uuid="1" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" cidr_netmask="24" ip="192.168.122.132" nic="eth0"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="18">
+ <action_set>
+ <rsc_op id="29" operation="monitor" operation_key="httpd-bundle-docker-1_monitor_0" on_node="cluster02" on_node_uuid="2">
+ <primitive id="httpd-bundle-docker-1" class="ocf" provider="heartbeat" type="docker"/>
+ <attributes CRM_meta_on_node="cluster02" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" allow_pull="true" force_kill="false" image="pcmk:http" monitor_cmd="/bin/true" mount_points="/var/log/pacemaker/bundles/httpd-bundle-1,/var/log/pacemaker/bundles/httpd-bundle-1" reuse="false" run_cmd="/usr/sbin/pacemaker-remoted" run_opts=" --restart=no -h httpd-bundle-1 -e PCMK_stderr=1 -e PCMK_remote_port=3121 -v /dev/log:/dev/log:rw -v /srv/html:/var/www/html:rw -v /var/log/pacemaker/bundles/httpd-bundle-1:/etc/httpd/logs:rw -v /etc/pacemaker/authkey:/etc/pacemaker/authkey -v /var/log/pacemaker/bundles/httpd-bundle-1:/var/log -p 192.168.122.132:80:80 -p 192.168.122.132:3121:3121 --add-host=httpd-bundle-0:192.168.122.131 --add-host=httpd-bundle-1:192.168.122.132 --add-host=httpd-bundle-2:192.168.122.133"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="19">
+ <action_set>
+ <rsc_op id="11" operation="monitor" operation_key="httpd-bundle-docker-1_monitor_0" on_node="cluster01" on_node_uuid="1">
+ <primitive id="httpd-bundle-docker-1" class="ocf" provider="heartbeat" type="docker"/>
+ <attributes CRM_meta_on_node="cluster01" CRM_meta_on_node_uuid="1" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" allow_pull="true" force_kill="false" image="pcmk:http" monitor_cmd="/bin/true" mount_points="/var/log/pacemaker/bundles/httpd-bundle-1,/var/log/pacemaker/bundles/httpd-bundle-1" reuse="false" run_cmd="/usr/sbin/pacemaker-remoted" run_opts=" --restart=no -h httpd-bundle-1 -e PCMK_stderr=1 -e PCMK_remote_port=3121 -v /dev/log:/dev/log:rw -v /srv/html:/var/www/html:rw -v /var/log/pacemaker/bundles/httpd-bundle-1:/etc/httpd/logs:rw -v /etc/pacemaker/authkey:/etc/pacemaker/authkey -v /var/log/pacemaker/bundles/httpd-bundle-1:/var/log -p 192.168.122.132:80:80 -p 192.168.122.132:3121:3121 --add-host=httpd-bundle-0:192.168.122.131 --add-host=httpd-bundle-1:192.168.122.132 --add-host=httpd-bundle-2:192.168.122.133"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="20">
+ <action_set>
+ <rsc_op id="31" operation="monitor" operation_key="httpd-bundle-ip-192.168.122.133_monitor_0" on_node="cluster02" on_node_uuid="2">
+ <primitive id="httpd-bundle-ip-192.168.122.133" class="ocf" provider="heartbeat" type="IPaddr2"/>
+ <attributes CRM_meta_on_node="cluster02" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" cidr_netmask="24" ip="192.168.122.133" nic="eth0"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="21">
+ <action_set>
+ <rsc_op id="13" operation="monitor" operation_key="httpd-bundle-ip-192.168.122.133_monitor_0" on_node="cluster01" on_node_uuid="1">
+ <primitive id="httpd-bundle-ip-192.168.122.133" class="ocf" provider="heartbeat" type="IPaddr2"/>
+ <attributes CRM_meta_on_node="cluster01" CRM_meta_on_node_uuid="1" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" cidr_netmask="24" ip="192.168.122.133" nic="eth0"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="22">
+ <action_set>
+ <rsc_op id="32" operation="monitor" operation_key="httpd-bundle-docker-2_monitor_0" on_node="cluster02" on_node_uuid="2">
+ <primitive id="httpd-bundle-docker-2" class="ocf" provider="heartbeat" type="docker"/>
+ <attributes CRM_meta_on_node="cluster02" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" allow_pull="true" force_kill="false" image="pcmk:http" monitor_cmd="/bin/true" mount_points="/var/log/pacemaker/bundles/httpd-bundle-2,/var/log/pacemaker/bundles/httpd-bundle-2" reuse="false" run_cmd="/usr/sbin/pacemaker-remoted" run_opts=" --restart=no -h httpd-bundle-2 -e PCMK_stderr=1 -e PCMK_remote_port=3121 -v /dev/log:/dev/log:rw -v /srv/html:/var/www/html:rw -v /var/log/pacemaker/bundles/httpd-bundle-2:/etc/httpd/logs:rw -v /etc/pacemaker/authkey:/etc/pacemaker/authkey -v /var/log/pacemaker/bundles/httpd-bundle-2:/var/log -p 192.168.122.133:80:80 -p 192.168.122.133:3121:3121 --add-host=httpd-bundle-0:192.168.122.131 --add-host=httpd-bundle-1:192.168.122.132 --add-host=httpd-bundle-2:192.168.122.133"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="23">
+ <action_set>
+ <rsc_op id="14" operation="monitor" operation_key="httpd-bundle-docker-2_monitor_0" on_node="cluster01" on_node_uuid="1">
+ <primitive id="httpd-bundle-docker-2" class="ocf" provider="heartbeat" type="docker"/>
+ <attributes CRM_meta_on_node="cluster01" CRM_meta_on_node_uuid="1" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" allow_pull="true" force_kill="false" image="pcmk:http" monitor_cmd="/bin/true" mount_points="/var/log/pacemaker/bundles/httpd-bundle-2,/var/log/pacemaker/bundles/httpd-bundle-2" reuse="false" run_cmd="/usr/sbin/pacemaker-remoted" run_opts=" --restart=no -h httpd-bundle-2 -e PCMK_stderr=1 -e PCMK_remote_port=3121 -v /dev/log:/dev/log:rw -v /srv/html:/var/www/html:rw -v /var/log/pacemaker/bundles/httpd-bundle-2:/etc/httpd/logs:rw -v /etc/pacemaker/authkey:/etc/pacemaker/authkey -v /var/log/pacemaker/bundles/httpd-bundle-2:/var/log -p 192.168.122.133:80:80 -p 192.168.122.133:3121:3121 --add-host=httpd-bundle-0:192.168.122.131 --add-host=httpd-bundle-1:192.168.122.132 --add-host=httpd-bundle-2:192.168.122.133"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="24">
+ <action_set>
+ <rsc_op id="34" operation="monitor" operation_key="Public-IP_monitor_0" on_node="cluster02" on_node_uuid="2">
+ <primitive id="Public-IP" class="ocf" provider="heartbeat" type="IPaddr"/>
+ <attributes CRM_meta_on_node="cluster02" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" ip="192.168.1.1"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="25">
+ <action_set>
+ <rsc_op id="16" operation="monitor" operation_key="Public-IP_monitor_0" on_node="cluster01" on_node_uuid="1">
+ <primitive id="Public-IP" class="ocf" provider="heartbeat" type="IPaddr"/>
+ <attributes CRM_meta_on_node="cluster01" CRM_meta_on_node_uuid="1" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" ip="192.168.1.1"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="26">
+ <action_set>
+ <rsc_op id="35" operation="monitor" operation_key="Email_monitor_0" on_node="cluster02" on_node_uuid="2">
+ <primitive id="Email" class="lsb" type="exim"/>
+ <attributes CRM_meta_on_node="cluster02" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="27">
+ <action_set>
+ <rsc_op id="17" operation="monitor" operation_key="Email_monitor_0" on_node="cluster01" on_node_uuid="1">
+ <primitive id="Email" class="lsb" type="exim"/>
+ <attributes CRM_meta_on_node="cluster01" CRM_meta_on_node_uuid="1" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="28">
+ <action_set>
+ <rsc_op id="36" operation="monitor" operation_key="mysql-proxy:0_monitor_0" on_node="cluster02" on_node_uuid="2">
+ <primitive id="mysql-proxy" long-id="mysql-proxy:0" class="lsb" type="mysql-proxy"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="5" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_on_node="cluster02" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="29">
+ <action_set>
+ <rsc_op id="18" operation="monitor" operation_key="mysql-proxy:0_monitor_0" on_node="cluster01" on_node_uuid="1">
+ <primitive id="mysql-proxy" long-id="mysql-proxy:0" class="lsb" type="mysql-proxy"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="5" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_on_node="cluster01" CRM_meta_on_node_uuid="1" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/stop-all-resources.scores b/cts/scheduler/stop-all-resources.scores
new file mode 100644
index 0000000000..5669c11b56
--- /dev/null
+++ b/cts/scheduler/stop-all-resources.scores
@@ -0,0 +1,166 @@
+Allocation scores:
+pcmk__bundle_allocate: httpd-bundle allocation score on cluster01: 0
+pcmk__bundle_allocate: httpd-bundle allocation score on cluster02: 0
+pcmk__bundle_allocate: httpd-bundle-0 allocation score on cluster01: 0
+pcmk__bundle_allocate: httpd-bundle-0 allocation score on cluster02: 0
+pcmk__bundle_allocate: httpd-bundle-1 allocation score on cluster01: 0
+pcmk__bundle_allocate: httpd-bundle-1 allocation score on cluster02: 0
+pcmk__bundle_allocate: httpd-bundle-2 allocation score on cluster01: 0
+pcmk__bundle_allocate: httpd-bundle-2 allocation score on cluster02: 0
+pcmk__bundle_allocate: httpd-bundle-clone allocation score on cluster01: 0
+pcmk__bundle_allocate: httpd-bundle-clone allocation score on cluster02: 0
+pcmk__bundle_allocate: httpd-bundle-clone allocation score on httpd-bundle-0: -INFINITY
+pcmk__bundle_allocate: httpd-bundle-clone allocation score on httpd-bundle-1: -INFINITY
+pcmk__bundle_allocate: httpd-bundle-clone allocation score on httpd-bundle-2: -INFINITY
+pcmk__bundle_allocate: httpd-bundle-docker-0 allocation score on cluster01: 0
+pcmk__bundle_allocate: httpd-bundle-docker-0 allocation score on cluster02: 0
+pcmk__bundle_allocate: httpd-bundle-docker-1 allocation score on cluster01: 0
+pcmk__bundle_allocate: httpd-bundle-docker-1 allocation score on cluster02: 0
+pcmk__bundle_allocate: httpd-bundle-docker-2 allocation score on cluster01: 0
+pcmk__bundle_allocate: httpd-bundle-docker-2 allocation score on cluster02: 0
+pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.131 allocation score on cluster01: 0
+pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.131 allocation score on cluster02: 0
+pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.132 allocation score on cluster01: 0
+pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.132 allocation score on cluster02: 0
+pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.133 allocation score on cluster01: 0
+pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.133 allocation score on cluster02: 0
+pcmk__bundle_allocate: httpd:0 allocation score on httpd-bundle-0: 500
+pcmk__bundle_allocate: httpd:1 allocation score on httpd-bundle-1: 500
+pcmk__bundle_allocate: httpd:2 allocation score on httpd-bundle-2: 500
+pcmk__clone_allocate: httpd-bundle-clone allocation score on cluster01: -INFINITY
+pcmk__clone_allocate: httpd-bundle-clone allocation score on cluster02: -INFINITY
+pcmk__clone_allocate: httpd-bundle-clone allocation score on httpd-bundle-0: 0
+pcmk__clone_allocate: httpd-bundle-clone allocation score on httpd-bundle-1: 0
+pcmk__clone_allocate: httpd-bundle-clone allocation score on httpd-bundle-2: 0
+pcmk__clone_allocate: httpd:0 allocation score on httpd-bundle-0: INFINITY
+pcmk__clone_allocate: httpd:1 allocation score on httpd-bundle-1: INFINITY
+pcmk__clone_allocate: httpd:2 allocation score on httpd-bundle-2: INFINITY
+pcmk__clone_allocate: inactive-clone allocation score on cluster01: 0
+pcmk__clone_allocate: inactive-clone allocation score on cluster02: 0
+pcmk__clone_allocate: inactive-dhcpd:0 allocation score on cluster01: 0
+pcmk__clone_allocate: inactive-dhcpd:0 allocation score on cluster02: 0
+pcmk__clone_allocate: inactive-dhcpd:1 allocation score on cluster01: 0
+pcmk__clone_allocate: inactive-dhcpd:1 allocation score on cluster02: 0
+pcmk__clone_allocate: mysql-clone-group allocation score on cluster01: 0
+pcmk__clone_allocate: mysql-clone-group allocation score on cluster02: 0
+pcmk__clone_allocate: mysql-group:0 allocation score on cluster01: 0
+pcmk__clone_allocate: mysql-group:0 allocation score on cluster02: 0
+pcmk__clone_allocate: mysql-group:1 allocation score on cluster01: 0
+pcmk__clone_allocate: mysql-group:1 allocation score on cluster02: 0
+pcmk__clone_allocate: mysql-group:2 allocation score on cluster01: 0
+pcmk__clone_allocate: mysql-group:2 allocation score on cluster02: 0
+pcmk__clone_allocate: mysql-group:3 allocation score on cluster01: 0
+pcmk__clone_allocate: mysql-group:3 allocation score on cluster02: 0
+pcmk__clone_allocate: mysql-group:4 allocation score on cluster01: 0
+pcmk__clone_allocate: mysql-group:4 allocation score on cluster02: 0
+pcmk__clone_allocate: mysql-proxy:0 allocation score on cluster01: 0
+pcmk__clone_allocate: mysql-proxy:0 allocation score on cluster02: 0
+pcmk__clone_allocate: mysql-proxy:1 allocation score on cluster01: 0
+pcmk__clone_allocate: mysql-proxy:1 allocation score on cluster02: 0
+pcmk__clone_allocate: mysql-proxy:2 allocation score on cluster01: 0
+pcmk__clone_allocate: mysql-proxy:2 allocation score on cluster02: 0
+pcmk__clone_allocate: mysql-proxy:3 allocation score on cluster01: 0
+pcmk__clone_allocate: mysql-proxy:3 allocation score on cluster02: 0
+pcmk__clone_allocate: mysql-proxy:4 allocation score on cluster01: 0
+pcmk__clone_allocate: mysql-proxy:4 allocation score on cluster02: 0
+pcmk__clone_allocate: ping-clone allocation score on cluster01: 0
+pcmk__clone_allocate: ping-clone allocation score on cluster02: 0
+pcmk__clone_allocate: ping:0 allocation score on cluster01: 0
+pcmk__clone_allocate: ping:0 allocation score on cluster02: 0
+pcmk__clone_allocate: ping:1 allocation score on cluster01: 0
+pcmk__clone_allocate: ping:1 allocation score on cluster02: 0
+pcmk__group_allocate: Email allocation score on cluster01: 0
+pcmk__group_allocate: Email allocation score on cluster02: 0
+pcmk__group_allocate: Public-IP allocation score on cluster01: 0
+pcmk__group_allocate: Public-IP allocation score on cluster02: 0
+pcmk__group_allocate: exim-group allocation score on cluster01: 0
+pcmk__group_allocate: exim-group allocation score on cluster02: 0
+pcmk__group_allocate: inactive-dummy-1 allocation score on cluster01: 0
+pcmk__group_allocate: inactive-dummy-1 allocation score on cluster02: 0
+pcmk__group_allocate: inactive-dummy-2 allocation score on cluster01: 0
+pcmk__group_allocate: inactive-dummy-2 allocation score on cluster02: 0
+pcmk__group_allocate: inactive-group allocation score on cluster01: 0
+pcmk__group_allocate: inactive-group allocation score on cluster02: 0
+pcmk__group_allocate: mysql-group:0 allocation score on cluster01: 0
+pcmk__group_allocate: mysql-group:0 allocation score on cluster02: 0
+pcmk__group_allocate: mysql-group:1 allocation score on cluster01: 0
+pcmk__group_allocate: mysql-group:1 allocation score on cluster02: 0
+pcmk__group_allocate: mysql-group:2 allocation score on cluster01: 0
+pcmk__group_allocate: mysql-group:2 allocation score on cluster02: 0
+pcmk__group_allocate: mysql-group:3 allocation score on cluster01: 0
+pcmk__group_allocate: mysql-group:3 allocation score on cluster02: 0
+pcmk__group_allocate: mysql-group:4 allocation score on cluster01: 0
+pcmk__group_allocate: mysql-group:4 allocation score on cluster02: 0
+pcmk__group_allocate: mysql-proxy:0 allocation score on cluster01: 0
+pcmk__group_allocate: mysql-proxy:0 allocation score on cluster02: 0
+pcmk__group_allocate: mysql-proxy:1 allocation score on cluster01: 0
+pcmk__group_allocate: mysql-proxy:1 allocation score on cluster02: 0
+pcmk__group_allocate: mysql-proxy:2 allocation score on cluster01: 0
+pcmk__group_allocate: mysql-proxy:2 allocation score on cluster02: 0
+pcmk__group_allocate: mysql-proxy:3 allocation score on cluster01: 0
+pcmk__group_allocate: mysql-proxy:3 allocation score on cluster02: 0
+pcmk__group_allocate: mysql-proxy:4 allocation score on cluster01: 0
+pcmk__group_allocate: mysql-proxy:4 allocation score on cluster02: 0
+pcmk__native_allocate: Email allocation score on cluster01: -INFINITY
+pcmk__native_allocate: Email allocation score on cluster02: -INFINITY
+pcmk__native_allocate: Fencing allocation score on cluster01: 0
+pcmk__native_allocate: Fencing allocation score on cluster02: 0
+pcmk__native_allocate: Public-IP allocation score on cluster01: 0
+pcmk__native_allocate: Public-IP allocation score on cluster02: 0
+pcmk__native_allocate: dummy allocation score on cluster01: 0
+pcmk__native_allocate: dummy allocation score on cluster02: 0
+pcmk__native_allocate: httpd-bundle-0 allocation score on cluster01: 0
+pcmk__native_allocate: httpd-bundle-0 allocation score on cluster02: 0
+pcmk__native_allocate: httpd-bundle-1 allocation score on cluster01: 0
+pcmk__native_allocate: httpd-bundle-1 allocation score on cluster02: 0
+pcmk__native_allocate: httpd-bundle-2 allocation score on cluster01: 0
+pcmk__native_allocate: httpd-bundle-2 allocation score on cluster02: 0
+pcmk__native_allocate: httpd-bundle-docker-0 allocation score on cluster01: 0
+pcmk__native_allocate: httpd-bundle-docker-0 allocation score on cluster02: 0
+pcmk__native_allocate: httpd-bundle-docker-1 allocation score on cluster01: 0
+pcmk__native_allocate: httpd-bundle-docker-1 allocation score on cluster02: 0
+pcmk__native_allocate: httpd-bundle-docker-2 allocation score on cluster01: 0
+pcmk__native_allocate: httpd-bundle-docker-2 allocation score on cluster02: 0
+pcmk__native_allocate: httpd-bundle-ip-192.168.122.131 allocation score on cluster01: -INFINITY
+pcmk__native_allocate: httpd-bundle-ip-192.168.122.131 allocation score on cluster02: -INFINITY
+pcmk__native_allocate: httpd-bundle-ip-192.168.122.132 allocation score on cluster01: -INFINITY
+pcmk__native_allocate: httpd-bundle-ip-192.168.122.132 allocation score on cluster02: -INFINITY
+pcmk__native_allocate: httpd-bundle-ip-192.168.122.133 allocation score on cluster01: -INFINITY
+pcmk__native_allocate: httpd-bundle-ip-192.168.122.133 allocation score on cluster02: -INFINITY
+pcmk__native_allocate: httpd:0 allocation score on httpd-bundle-0: INFINITY
+pcmk__native_allocate: httpd:1 allocation score on httpd-bundle-1: INFINITY
+pcmk__native_allocate: httpd:2 allocation score on httpd-bundle-2: INFINITY
+pcmk__native_allocate: inactive-dhcpd:0 allocation score on cluster01: -INFINITY
+pcmk__native_allocate: inactive-dhcpd:0 allocation score on cluster02: -INFINITY
+pcmk__native_allocate: inactive-dhcpd:0 allocation score on httpd-bundle-0: -INFINITY
+pcmk__native_allocate: inactive-dhcpd:0 allocation score on httpd-bundle-1: -INFINITY
+pcmk__native_allocate: inactive-dhcpd:0 allocation score on httpd-bundle-2: -INFINITY
+pcmk__native_allocate: inactive-dhcpd:1 allocation score on cluster01: -INFINITY
+pcmk__native_allocate: inactive-dhcpd:1 allocation score on cluster02: -INFINITY
+pcmk__native_allocate: inactive-dhcpd:1 allocation score on httpd-bundle-0: -INFINITY
+pcmk__native_allocate: inactive-dhcpd:1 allocation score on httpd-bundle-1: -INFINITY
+pcmk__native_allocate: inactive-dhcpd:1 allocation score on httpd-bundle-2: -INFINITY
+pcmk__native_allocate: inactive-dummy-1 allocation score on cluster01: -INFINITY
+pcmk__native_allocate: inactive-dummy-1 allocation score on cluster02: -INFINITY
+pcmk__native_allocate: inactive-dummy-1 allocation score on httpd-bundle-0: -INFINITY
+pcmk__native_allocate: inactive-dummy-1 allocation score on httpd-bundle-1: -INFINITY
+pcmk__native_allocate: inactive-dummy-1 allocation score on httpd-bundle-2: -INFINITY
+pcmk__native_allocate: inactive-dummy-2 allocation score on cluster01: -INFINITY
+pcmk__native_allocate: inactive-dummy-2 allocation score on cluster02: -INFINITY
+pcmk__native_allocate: inactive-dummy-2 allocation score on httpd-bundle-0: -INFINITY
+pcmk__native_allocate: inactive-dummy-2 allocation score on httpd-bundle-1: -INFINITY
+pcmk__native_allocate: inactive-dummy-2 allocation score on httpd-bundle-2: -INFINITY
+pcmk__native_allocate: mysql-proxy:0 allocation score on cluster01: 0
+pcmk__native_allocate: mysql-proxy:0 allocation score on cluster02: 0
+pcmk__native_allocate: mysql-proxy:1 allocation score on cluster01: 0
+pcmk__native_allocate: mysql-proxy:1 allocation score on cluster02: 0
+pcmk__native_allocate: mysql-proxy:2 allocation score on cluster01: 0
+pcmk__native_allocate: mysql-proxy:2 allocation score on cluster02: 0
+pcmk__native_allocate: mysql-proxy:3 allocation score on cluster01: 0
+pcmk__native_allocate: mysql-proxy:3 allocation score on cluster02: 0
+pcmk__native_allocate: mysql-proxy:4 allocation score on cluster01: 0
+pcmk__native_allocate: mysql-proxy:4 allocation score on cluster02: 0
+pcmk__native_allocate: ping:0 allocation score on cluster01: 0
+pcmk__native_allocate: ping:0 allocation score on cluster02: 0
+pcmk__native_allocate: ping:1 allocation score on cluster01: 0
+pcmk__native_allocate: ping:1 allocation score on cluster02: 0
diff --git a/cts/scheduler/stop-all-resources.summary b/cts/scheduler/stop-all-resources.summary
new file mode 100644
index 0000000000..fa4ca66344
--- /dev/null
+++ b/cts/scheduler/stop-all-resources.summary
@@ -0,0 +1,80 @@
+4 of 27 resource instances DISABLED and 0 BLOCKED from further action due to failure
+
+Current cluster status:
+Online: [ cluster01 cluster02 ]
+
+ Clone Set: ping-clone [ping]
+ Stopped: [ cluster01 cluster02 ]
+ Fencing (stonith:fence_xvm): Stopped
+ dummy (ocf::pacemaker:Dummy): Stopped
+ Clone Set: inactive-clone [inactive-dhcpd]
+ Stopped (disabled): [ cluster01 cluster02 ]
+ Resource Group: inactive-group
+ inactive-dummy-1 (ocf::pacemaker:Dummy): Stopped (disabled)
+ inactive-dummy-2 (ocf::pacemaker:Dummy): Stopped (disabled)
+ Container bundle set: httpd-bundle [pcmk:http]
+ httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Stopped
+ httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped
+ httpd-bundle-2 (192.168.122.133) (ocf::heartbeat:apache): Stopped
+ Resource Group: exim-group
+ Public-IP (ocf::heartbeat:IPaddr): Stopped
+ Email (lsb:exim): Stopped
+ Clone Set: mysql-clone-group [mysql-group]
+ Stopped: [ cluster01 cluster02 ]
+
+Transition Summary:
+
+Executing cluster transition:
+ * Resource action: ping:0 monitor on cluster02
+ * Resource action: ping:0 monitor on cluster01
+ * Resource action: Fencing monitor on cluster02
+ * Resource action: Fencing monitor on cluster01
+ * Resource action: dummy monitor on cluster02
+ * Resource action: dummy monitor on cluster01
+ * Resource action: inactive-dhcpd:0 monitor on cluster02
+ * Resource action: inactive-dhcpd:0 monitor on cluster01
+ * Resource action: inactive-dummy-1 monitor on cluster02
+ * Resource action: inactive-dummy-1 monitor on cluster01
+ * Resource action: inactive-dummy-2 monitor on cluster02
+ * Resource action: inactive-dummy-2 monitor on cluster01
+ * Resource action: httpd-bundle-ip-192.168.122.131 monitor on cluster02
+ * Resource action: httpd-bundle-ip-192.168.122.131 monitor on cluster01
+ * Resource action: httpd-bundle-docker-0 monitor on cluster02
+ * Resource action: httpd-bundle-docker-0 monitor on cluster01
+ * Resource action: httpd-bundle-ip-192.168.122.132 monitor on cluster02
+ * Resource action: httpd-bundle-ip-192.168.122.132 monitor on cluster01
+ * Resource action: httpd-bundle-docker-1 monitor on cluster02
+ * Resource action: httpd-bundle-docker-1 monitor on cluster01
+ * Resource action: httpd-bundle-ip-192.168.122.133 monitor on cluster02
+ * Resource action: httpd-bundle-ip-192.168.122.133 monitor on cluster01
+ * Resource action: httpd-bundle-docker-2 monitor on cluster02
+ * Resource action: httpd-bundle-docker-2 monitor on cluster01
+ * Resource action: Public-IP monitor on cluster02
+ * Resource action: Public-IP monitor on cluster01
+ * Resource action: Email monitor on cluster02
+ * Resource action: Email monitor on cluster01
+ * Resource action: mysql-proxy:0 monitor on cluster02
+ * Resource action: mysql-proxy:0 monitor on cluster01
+
+Revised cluster status:
+Online: [ cluster01 cluster02 ]
+
+ Clone Set: ping-clone [ping]
+ Stopped: [ cluster01 cluster02 ]
+ Fencing (stonith:fence_xvm): Stopped
+ dummy (ocf::pacemaker:Dummy): Stopped
+ Clone Set: inactive-clone [inactive-dhcpd]
+ Stopped (disabled): [ cluster01 cluster02 ]
+ Resource Group: inactive-group
+ inactive-dummy-1 (ocf::pacemaker:Dummy): Stopped (disabled)
+ inactive-dummy-2 (ocf::pacemaker:Dummy): Stopped (disabled)
+ Container bundle set: httpd-bundle [pcmk:http]
+ httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Stopped
+ httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped
+ httpd-bundle-2 (192.168.122.133) (ocf::heartbeat:apache): Stopped
+ Resource Group: exim-group
+ Public-IP (ocf::heartbeat:IPaddr): Stopped
+ Email (lsb:exim): Stopped
+ Clone Set: mysql-clone-group [mysql-group]
+ Stopped: [ cluster01 cluster02 ]
+
diff --git a/cts/scheduler/stop-all-resources.xml b/cts/scheduler/stop-all-resources.xml
new file mode 100644
index 0000000000..6ecd4d6d73
--- /dev/null
+++ b/cts/scheduler/stop-all-resources.xml
@@ -0,0 +1,107 @@
+<cib crm_feature_set="3.3.0" validate-with="pacemaker-3.3" epoch="1" num_updates="1" admin_epoch="1" cib-last-written="Tue May 5 12:04:36 2020" update-origin="cluster01" update-client="crmd" update-user="hacluster" have-quorum="1" dc-uuid="2">
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.0.4-1.e97f9675f.git.el7-e97f9675f"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test-cluster"/>
+ <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
+ <nvpair id="cib-bootstrap-options-maintenance-mode" name="maintenance-mode" value="false"/>
+ <nvpair id="cib-bootstrap-options-stop-all-resources" name="stop-all-resources" value="true"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="cluster01"/>
+ <node id="2" uname="cluster02"/>
+ </nodes>
+ <resources>
+ <clone id="ping-clone">
+ <primitive class="ocf" id="ping" provider="pacemaker" type="ping">
+ <instance_attributes id="ping-instance_attributes">
+ <nvpair id="ping-instance_attributes-dampen" name="dampen" value="5s"/>
+ <nvpair id="ping-instance_attributes-host_list" name="host_list" value="192.168.122.1"/>
+ <nvpair id="ping-instance_attributes-multiplier" name="multiplier" value="1000"/>
+ </instance_attributes>
+ <operations>
+ <op id="ping-monitor-interval-10s" interval="10s" name="monitor" timeout="60s"/>
+ <op id="ping-start-interval-0s" interval="0s" name="start" timeout="60s"/>
+ <op id="ping-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ </clone>
+ <primitive class="stonith" id="Fencing" type="fence_xvm">
+ <instance_attributes id="Fencing-instance_attributes">
+ <nvpair id="Fencing-instance_attributes-ip_family" name="ip_family" value="ipv4"/>
+ </instance_attributes>
+ <operations>
+ <op id="Fencing-monitor-interval-60s" interval="60s" name="monitor"/>
+ </operations>
+ </primitive>
+ <primitive class="ocf" id="dummy" provider="pacemaker" type="Dummy">
+ <instance_attributes id="dummy-instance_attributes">
+ <nvpair id="dummy-instance_attributes-op_sleep" name="op_sleep" value="6"/>
+ </instance_attributes>
+ <operations>
+ <op id="dummy-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+ <op id="dummy-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+ <op id="dummy-monitor-interval-60s" interval="60s" name="monitor" on-fail="stop"/>
+ <op id="dummy-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+ <op id="dummy-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+ <op id="dummy-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ <clone id="inactive-clone">
+ <meta_attributes id="inactive-clone-meta_attributes">
+ <nvpair id="inactive-clone-meta_attributes-target-role" name="target-role" value="stopped"/>
+ </meta_attributes>
+ <primitive id="inactive-dhcpd" class="lsb" type="dhcpd"/>
+ </clone>
+ <group id="inactive-group">
+ <meta_attributes id="inactive-group-meta_attributes">
+ <nvpair id="inactive-group-meta_attributes-target-role" name="target-role" value="stopped"/>
+ </meta_attributes>
+ <primitive class="ocf" id="inactive-dummy-1" provider="pacemaker" type="Dummy"/>
+ <primitive class="ocf" id="inactive-dummy-2" provider="pacemaker" type="Dummy"/>
+ </group>
+ <bundle id="httpd-bundle">
+ <docker image="pcmk:http" replicas="3"/>
+ <network ip-range-start="192.168.122.131" host-netmask="24" host-interface="eth0">
+ <port-mapping id="httpd-port" port="80"/>
+ </network>
+ <storage>
+ <storage-mapping id="httpd-syslog" source-dir="/dev/log" target-dir="/dev/log" options="rw"/>
+ <storage-mapping id="httpd-root" source-dir="/srv/html" target-dir="/var/www/html" options="rw"/>
+ <storage-mapping id="httpd-logs" source-dir-root="/var/log/pacemaker/bundles" target-dir="/etc/httpd/logs" options="rw"/>
+ </storage>
+ <primitive class="ocf" id="httpd" provider="heartbeat" type="apache"/>
+ <meta_attributes id="bundle-meta_attributes">
+ <nvpair id="bundle-meta_attributes-target-role" name="target-role" value="Started"/>
+ </meta_attributes>
+ </bundle>
+ <group id="exim-group">
+ <primitive id="Public-IP" class="ocf" type="IPaddr" provider="heartbeat">
+ <instance_attributes id="params-public-ip">
+ <nvpair id="public-ip-addr" name="ip" value="192.168.1.1"/>
+ </instance_attributes>
+ </primitive>
+ <primitive id="Email" class="lsb" type="exim"/>
+ </group>
+ <clone id="mysql-clone-group">
+ <group id="mysql-group">
+ <primitive id="mysql-proxy" class="lsb" type="mysql-proxy">
+ <operations>
+ <op name="monitor" interval="10s" id="mysql-proxy_mon" timeout="20s"/>
+ </operations>
+ </primitive>
+ </group>
+ </clone>
+ </resources>
+ <constraints/>
+ <op_defaults/>
+ </configuration>
+ <status>
+ <node_state id="2" uname="cluster02" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member"/>
+ <node_state id="1" uname="cluster01" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member"/>
+ </status>
+</cib>
File Metadata
Details
Attached
Mime Type
text/x-diff
Expires
Sat, Nov 23, 10:03 AM (1 d, 13 h)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
1018557
Default Alt Text
(127 KB)
Attached To
Mode
rP Pacemaker
Attached
Detach File
Event Timeline
Log In to Comment