Page MenuHomeClusterLabs Projects

No OneTemporary

diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in
index 50c32f6c7c..278b9e0799 100644
--- a/cts/cts-scheduler.in
+++ b/cts/cts-scheduler.in
@@ -1,1714 +1,1708 @@
#!@PYTHON@
""" Regression tests for Pacemaker's scheduler
"""
-__copyright__ = "Copyright 2004-2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2004-2024 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import io
import os
import re
import sys
import stat
import shlex
import shutil
import argparse
import subprocess
import platform
import tempfile
# These imports allow running from a source checkout after running `make`.
# Note that while this doesn't necessarily mean it will successfully run tests,
# but being able to see --help output can be useful.
if os.path.exists("@abs_top_srcdir@/python"):
sys.path.insert(0, "@abs_top_srcdir@/python")
if os.path.exists("@abs_top_builddir@/python") and "@abs_top_builddir@" != "@abs_top_srcdir@":
sys.path.insert(0, "@abs_top_builddir@/python")
from pacemaker.buildoptions import BuildOptions
from pacemaker.exitstatus import ExitStatus
DESC = """Regression tests for Pacemaker's scheduler"""
+class SchedulerTest:
+ def __init__(self, name, desc, args=None):
+ self.name = name
+ self.desc = desc
+
+ if args is None:
+ self.args = []
+ else:
+ self.args = args
+
+class SchedulerTestGroup:
+ def __init__(self, tests):
+ self.tests = tests
+
# Each entry in TESTS is a group of tests, where each test consists of a
# test base name, test description, and additional test arguments.
# Test groups will be separated by newlines in output.
TESTS = [
- [
- [ "simple1", "Offline" ],
- [ "simple2", "Start" ],
- [ "simple3", "Start 2" ],
- [ "simple4", "Start Failed" ],
- [ "simple6", "Stop Start" ],
- [ "simple7", "Shutdown" ],
- #[ "simple8", "Stonith" ],
- #[ "simple9", "Lower version" ],
- #[ "simple10", "Higher version" ],
- [ "simple11", "Priority (ne)" ],
- [ "simple12", "Priority (eq)" ],
- [ "simple8", "Stickiness" ],
- ],
- [
- [ "group1", "Group" ],
- [ "group2", "Group + Native" ],
- [ "group3", "Group + Group" ],
- [ "group4", "Group + Native (nothing)" ],
- [ "group5", "Group + Native (move)" ],
- [ "group6", "Group + Group (move)" ],
- [ "group7", "Group colocation" ],
- [ "group13", "Group colocation (cant run)" ],
- [ "group8", "Group anti-colocation" ],
- [ "group9", "Group recovery" ],
- [ "group10", "Group partial recovery" ],
- [ "group11", "Group target_role" ],
- [ "group14", "Group stop (graph terminated)" ],
- [ "group15", "Negative group colocation" ],
- [ "bug-1573", "Partial stop of a group with two children" ],
- [ "bug-1718", "Mandatory group ordering - Stop group_FUN" ],
- [ "failed-sticky-group", "Move group on last member failure despite infinite stickiness" ],
- [ "failed-sticky-anticolocated-group",
- "Move group on last member failure despite infinite stickiness and optional anti-colocation"
- ],
- [ "bug-lf-2619", "Move group on clone failure" ],
- [ "group-fail", "Ensure stop order is preserved for partially active groups" ],
- [ "group-unmanaged", "No need to restart r115 because r114 is unmanaged" ],
- [ "group-unmanaged-stopped", "Make sure r115 is stopped when r114 fails" ],
- [ "partial-unmanaged-group",
- "New member in partially unmanaged group"
- ],
- [ "group-dependents", "Account for the location preferences of things colocated with a group" ],
- [ "group-stop-ordering", "Ensure blocked group member stop does not force other member stops" ],
- [ "colocate-unmanaged-group", "Respect mandatory colocations even if earlier group member is unmanaged" ],
- [
- "coloc-with-inner-group-member",
- "Consider explicit colocations with inner group members"
- ],
- ],
- [
- [ "rsc_dep1", "Must not" ],
- [ "rsc_dep3", "Must" ],
- [ "rsc_dep5", "Must not 3" ],
- [ "rsc_dep7", "Must 3" ],
- [ "rsc_dep10", "Must (but cant)" ],
- [ "rsc_dep2", "Must (running)" ],
- [ "rsc_dep8", "Must (running : alt)" ],
- [ "rsc_dep4", "Must (running + move)" ],
- [ "asymmetric", "Asymmetric - require explicit location constraints" ],
- ],
- [
- [ "orphan-0", "Orphan ignore" ],
- [ "orphan-1", "Orphan stop" ],
- [ "orphan-2", "Orphan stop, remove failcount" ],
- ],
- [
- [ "params-0", "Params: No change" ],
- [ "params-1", "Params: Changed" ],
- [ "params-2", "Params: Resource definition" ],
- [ "params-3", "Params: Restart instead of reload if start pending" ],
- [ "params-4", "Params: Reload" ],
- [ "params-5", "Params: Restart based on probe digest" ],
- [ "novell-251689", "Resource definition change + target_role=stopped" ],
- [ "bug-lf-2106", "Restart all anonymous clone instances after config change" ],
- [ "params-6", "Params: Detect reload in previously migrated resource" ],
- [ "nvpair-id-ref", "Support id-ref in nvpair with optional name" ],
- [ "not-reschedule-unneeded-monitor",
- "Do not reschedule unneeded monitors while resource definitions have changed" ],
- [ "reload-becomes-restart", "Cancel reload if restart becomes required" ],
- [ "restart-with-extra-op-params", "Restart if with extra operation parameters upon changes of any" ],
- ],
- [
- [ "target-0", "Target Role : baseline" ],
- [ "target-1", "Target Role : promoted" ],
- [ "target-2", "Target Role : invalid" ],
- ],
- [
- [ "base-score", "Set a node's default score for all nodes" ],
- ],
- [
- [ "date-1", "Dates", [ "-t", "2005-020" ] ],
- [ "date-2", "Date Spec - Pass", [ "-t", "2005-020T12:30" ] ],
- [ "date-3", "Date Spec - Fail", [ "-t", "2005-020T11:30" ] ],
- [ "origin", "Timing of recurring operations", [ "-t", "2014-05-07 00:28:00" ] ],
- [ "probe-0", "Probe (anon clone)" ],
- [ "probe-1", "Pending Probe" ],
- [ "probe-2", "Correctly re-probe cloned groups" ],
- [ "probe-3", "Probe (pending node)" ],
- [ "probe-4", "Probe (pending node + stopped resource)" ],
- [ "probe-pending-node", "Probe (pending node + unmanaged resource)" ],
- [ "failed-probe-primitive", "Maskable vs. unmaskable probe failures on primitive resources" ],
- [ "failed-probe-clone", "Maskable vs. unmaskable probe failures on cloned resources" ],
- [ "expired-failed-probe-primitive", "Maskable, expired probe failure on primitive resources" ],
- [ "standby", "Standby" ],
- [ "comments", "Comments" ],
- ],
- [
- [ "one-or-more-0", "Everything starts" ],
- [ "one-or-more-1", "Nothing starts because of A" ],
- [ "one-or-more-2", "D can start because of C" ],
- [ "one-or-more-3", "D cannot start because of B and C" ],
- [ "one-or-more-4", "D cannot start because of target-role" ],
- [ "one-or-more-5", "Start A and F even though C and D are stopped" ],
- [ "one-or-more-6", "Leave A running even though B is stopped" ],
- [ "one-or-more-7", "Leave A running even though C is stopped" ],
- [ "bug-5140-require-all-false", "Allow basegrp:0 to stop" ],
- [ "clone-require-all-1", "clone B starts node 3 and 4" ],
- [ "clone-require-all-2", "clone B remains stopped everywhere" ],
- [ "clone-require-all-3", "clone B stops everywhere because A stops everywhere" ],
- [ "clone-require-all-4", "clone B remains on node 3 and 4 with only one instance of A remaining" ],
- [ "clone-require-all-5", "clone B starts on node 1 3 and 4" ],
- [ "clone-require-all-6", "clone B remains active after shutting down instances of A" ],
- [ "clone-require-all-7",
- "clone A and B both start at the same time. all instances of A start before B" ],
- [ "clone-require-all-no-interleave-1", "C starts everywhere after A and B" ],
- [ "clone-require-all-no-interleave-2",
- "C starts on nodes 1, 2, and 4 with only one active instance of B" ],
- [ "clone-require-all-no-interleave-3",
- "C remains active when instance of B is stopped on one node and started on another" ],
- [ "one-or-more-unrunnable-instances", "Avoid dependencies on instances that won't ever be started" ],
- ],
- [
- [ "location-date-rules-1", "Use location constraints with ineffective date-based rules" ],
- [ "location-date-rules-2", "Use location constraints with effective date-based rules" ],
- [ "nvpair-date-rules-1", "Use nvpair blocks with a variety of date-based rules" ],
- [ "value-source", "Use location constraints with node attribute expressions using value-source" ],
- [ "rule-dbl-as-auto-number-match",
- "Floating-point rule values default to number comparison: match" ],
- [ "rule-dbl-as-auto-number-no-match",
- "Floating-point rule values default to number comparison: no "
- "match" ],
- [ "rule-dbl-as-integer-match",
- "Floating-point rule values set to integer comparison: match" ],
- [ "rule-dbl-as-integer-no-match",
- "Floating-point rule values set to integer comparison: no match" ],
- [ "rule-dbl-as-number-match",
- "Floating-point rule values set to number comparison: match" ],
- [ "rule-dbl-as-number-no-match",
- "Floating-point rule values set to number comparison: no match" ],
- [ "rule-dbl-parse-fail-default-str-match",
- "Floating-point rule values fail to parse, default to string "
- "comparison: match" ],
- [ "rule-dbl-parse-fail-default-str-no-match",
- "Floating-point rule values fail to parse, default to string "
- "comparison: no match" ],
- [ "rule-int-as-auto-integer-match",
- "Integer rule values default to integer comparison: match" ],
- [ "rule-int-as-auto-integer-no-match",
- "Integer rule values default to integer comparison: no match" ],
- [ "rule-int-as-integer-match",
- "Integer rule values set to integer comparison: match" ],
- [ "rule-int-as-integer-no-match",
- "Integer rule values set to integer comparison: no match" ],
- [ "rule-int-as-number-match",
- "Integer rule values set to number comparison: match" ],
- [ "rule-int-as-number-no-match",
- "Integer rule values set to number comparison: no match" ],
- [ "rule-int-parse-fail-default-str-match",
- "Integer rule values fail to parse, default to string "
- "comparison: match" ],
- [ "rule-int-parse-fail-default-str-no-match",
- "Integer rule values fail to parse, default to string "
- "comparison: no match" ],
- [ "timeout-by-node", "Start timeout varies by node" ],
- ],
- [
- [ "order1", "Order start 1" ],
- [ "order2", "Order start 2" ],
- [ "order3", "Order stop" ],
- [ "order4", "Order (multiple)" ],
- [ "order5", "Order (move)" ],
- [ "order6", "Order (move w/ restart)" ],
- [ "order7", "Order (mandatory)" ],
- [ "order-optional", "Order (score=0)" ],
- [ "order-required", "Order (score=INFINITY)" ],
- [ "bug-lf-2171", "Prevent group start when clone is stopped" ],
- [ "order-clone", "Clone ordering should be able to prevent startup of dependent clones" ],
- [ "order-sets", "Ordering for resource sets" ],
- [ "order-serialize", "Serialize resources without inhibiting migration" ],
- [ "order-serialize-set", "Serialize a set of resources without inhibiting migration" ],
- [ "clone-order-primitive", "Order clone start after a primitive" ],
- [ "clone-order-16instances", "Verify ordering of 16 cloned resources" ],
- [ "order-optional-keyword", "Order (optional keyword)" ],
- [ "order-mandatory", "Order (mandatory keyword)" ],
- [ "bug-lf-2493",
- "Don't imply colocation requirements when applying ordering constraints with clones" ],
- [ "ordered-set-basic-startup", "Constraint set with default order settings" ],
- [ "ordered-set-natural", "Allow natural set ordering" ],
- [ "order-wrong-kind", "Order (error)" ],
- ],
- [
- [ "coloc-loop", "Colocation - loop" ],
- [ "coloc-many-one", "Colocation - many-to-one" ],
- [ "coloc-list", "Colocation - many-to-one with list" ],
- [ "coloc-group", "Colocation - groups" ],
- [ "coloc-unpromoted-anti", "Anti-colocation with unpromoted shouldn't prevent promoted colocation" ],
- [ "coloc-attr", "Colocation based on node attributes" ],
- [ "coloc-negative-group", "Negative colocation with a group" ],
- [ "coloc-intra-set", "Intra-set colocation" ],
- [ "bug-lf-2435", "Colocation sets with a negative score" ],
- [ "coloc-clone-stays-active",
- "Ensure clones don't get stopped/demoted because a dependent must stop" ],
- [ "coloc_fp_logic", "Verify floating point calculations in colocation are working" ],
- [ "colo_promoted_w_native",
- "cl#5070 - Verify promotion order is affected when colocating promoted with primitive" ],
- [ "colo_unpromoted_w_native",
- "cl#5070 - Verify promotion order is affected when colocating unpromoted with primitive" ],
- [ "anti-colocation-order",
- "cl#5187 - Prevent resources in an anti-colocation from even temporarily running on a same node" ],
- [ "anti-colocation-promoted", "Organize order of actions for promoted resources in anti-colocations" ],
- [ "anti-colocation-unpromoted", "Organize order of actions for unpromoted resources in anti-colocations" ],
- [ "group-anticolocation", "Group with failed last member anti-colocated with another group" ],
- [ "group-anticolocation-2",
- "Group with failed last member anti-colocated with another sticky group"
- ],
- [ "group-anticolocation-3",
- "Group with failed last member mandatorily anti-colocated with another group"
- ],
- [ "group-anticolocation-4",
- "Group with failed last member anti-colocated without influence with another group"
- ],
- [ "group-anticolocation-5",
- "Group with failed last member anti-colocated with another group (third node allowed)"
- ],
- [ "group-colocation-failure",
- "Group with sole member failed, colocated with another group"
- ],
- [ "enforce-colo1", "Always enforce B with A INFINITY" ],
- [ "complex_enforce_colo", "Always enforce B with A INFINITY. (make sure heat-engine stops)" ],
- [ "coloc-dependee-should-stay", "Stickiness outweighs group colocation" ],
- [ "coloc-dependee-should-move", "Group colocation outweighs stickiness" ],
- [ "colocation-influence", "Respect colocation influence" ],
- [ "colocation-priority-group", "Apply group colocations in order of primary priority" ],
- [ "colocation-vs-stickiness", "Group stickiness outweighs anti-colocation score" ],
- [ "promoted-with-blocked", "Promoted role colocated with a resource with blocked start" ],
- [ "primitive-with-group-with-clone",
- "Consider group dependent when colocating with clone"
- ],
- [ "primitive-with-group-with-promoted",
- "Consider group dependent when colocating with promoted role"
- ],
- [ "primitive-with-unrunnable-group",
- "Block primitive colocated with group that can't start",
- ],
- ],
- [
- [ "rsc-sets-seq-true", "Resource Sets - sequential=false" ],
- [ "rsc-sets-seq-false", "Resource Sets - sequential=true" ],
- [ "rsc-sets-clone", "Resource Sets - Clone" ],
- [ "rsc-sets-promoted", "Resource Sets - Promoted" ],
- [ "rsc-sets-clone-1", "Resource Sets - Clone (lf#2404)" ],
- ],
- [
- [ "attrs1", "string: eq (and)" ],
- [ "attrs2", "string: lt / gt (and)" ],
- [ "attrs3", "string: ne (or)" ],
- [ "attrs4", "string: exists" ],
- [ "attrs5", "string: not_exists" ],
- [ "attrs6", "is_dc: true" ],
- [ "attrs7", "is_dc: false" ],
- [ "attrs8", "score_attribute" ],
- [ "per-node-attrs", "Per node resource parameters" ],
- ],
- [
- [ "mon-rsc-1", "Schedule Monitor - start" ],
- [ "mon-rsc-2", "Schedule Monitor - move" ],
- [ "mon-rsc-3", "Schedule Monitor - pending start" ],
- [ "mon-rsc-4", "Schedule Monitor - move/pending start" ],
- ],
- [
- [ "rec-rsc-0", "Resource Recover - no start" ],
- [ "rec-rsc-1", "Resource Recover - start" ],
- [ "rec-rsc-2", "Resource Recover - monitor" ],
- [ "rec-rsc-3", "Resource Recover - stop - ignore" ],
- [ "rec-rsc-4", "Resource Recover - stop - block" ],
- [ "rec-rsc-5", "Resource Recover - stop - fence" ],
- [ "rec-rsc-6", "Resource Recover - multiple - restart" ],
- [ "rec-rsc-7", "Resource Recover - multiple - stop" ],
- [ "rec-rsc-8", "Resource Recover - multiple - block" ],
- [ "rec-rsc-9", "Resource Recover - group/group" ],
- [ "stop-unexpected", "Recover multiply active group with stop_unexpected" ],
- [ "stop-unexpected-2", "Resource multiply active primitve with stop_unexpected" ],
- [ "monitor-recovery", "on-fail=block + resource recovery detected by recurring monitor" ],
- [ "stop-failure-no-quorum", "Stop failure without quorum" ],
- [ "stop-failure-no-fencing", "Stop failure without fencing available" ],
- [ "stop-failure-with-fencing", "Stop failure with fencing available" ],
- [ "multiple-active-block-group", "Support of multiple-active=block for resource groups" ],
- [ "multiple-monitor-one-failed",
- "Consider resource failed if any of the configured monitor operations failed" ],
- ],
- [
- [ "quorum-1", "No quorum - ignore" ],
- [ "quorum-2", "No quorum - freeze" ],
- [ "quorum-3", "No quorum - stop" ],
- [ "quorum-4", "No quorum - start anyway" ],
- [ "quorum-5", "No quorum - start anyway (group)" ],
- [ "quorum-6", "No quorum - start anyway (clone)" ],
- [ "bug-cl-5212", "No promotion with no-quorum-policy=freeze" ],
- [ "suicide-needed-inquorate", "no-quorum-policy=suicide: suicide necessary" ],
- [ "suicide-not-needed-initial-quorum",
- "no-quorum-policy=suicide: suicide not necessary at initial quorum" ],
- [ "suicide-not-needed-never-quorate",
- "no-quorum-policy=suicide: suicide not necessary if never quorate" ],
- [ "suicide-not-needed-quorate", "no-quorum-policy=suicide: suicide necessary if quorate" ],
- ],
- [
- [ "rec-node-1", "Node Recover - Startup - no fence" ],
- [ "rec-node-2", "Node Recover - Startup - fence" ],
- [ "rec-node-3", "Node Recover - HA down - no fence" ],
- [ "rec-node-4", "Node Recover - HA down - fence" ],
- [ "rec-node-5", "Node Recover - CRM down - no fence" ],
- [ "rec-node-6", "Node Recover - CRM down - fence" ],
- [ "rec-node-7", "Node Recover - no quorum - ignore" ],
- [ "rec-node-8", "Node Recover - no quorum - freeze" ],
- [ "rec-node-9", "Node Recover - no quorum - stop" ],
- [ "rec-node-10", "Node Recover - no quorum - stop w/fence" ],
- [ "rec-node-11", "Node Recover - CRM down w/ group - fence" ],
- [ "rec-node-12", "Node Recover - nothing active - fence" ],
- [ "rec-node-13", "Node Recover - failed resource + shutdown - fence" ],
- [ "rec-node-15", "Node Recover - unknown lrm section" ],
- [ "rec-node-14", "Serialize all stonith's" ],
- ],
- [
- [ "multi1", "Multiple Active (stop/start)" ],
- ],
- [
- [ "migrate-begin", "Normal migration" ],
- [ "migrate-success", "Completed migration" ],
- [ "migrate-partial-1", "Completed migration, missing stop on source" ],
- [ "migrate-partial-2", "Successful migrate_to only" ],
- [ "migrate-partial-3", "Successful migrate_to only, target down" ],
- [ "migrate-partial-4", "Migrate from the correct host after migrate_to+migrate_from" ],
- [ "bug-5186-partial-migrate", "Handle partial migration when src node loses membership" ],
- [ "migrate-fail-2", "Failed migrate_from" ],
- [ "migrate-fail-3", "Failed migrate_from + stop on source" ],
- [ "migrate-fail-4",
- "Failed migrate_from + stop on target - ideally we wouldn't need to re-stop on target" ],
- [ "migrate-fail-5", "Failed migrate_from + stop on source and target" ],
- [ "migrate-fail-6", "Failed migrate_to" ],
- [ "migrate-fail-7", "Failed migrate_to + stop on source" ],
- [ "migrate-fail-8",
- "Failed migrate_to + stop on target - ideally we wouldn't need to re-stop on target" ],
- [ "migrate-fail-9", "Failed migrate_to + stop on source and target" ],
- [ "migration-ping-pong", "Old migrate_to failure + successful migrate_from on same node" ],
- [ "migrate-stop", "Migration in a stopping stack" ],
- [ "migrate-start", "Migration in a starting stack" ],
- [ "migrate-stop_start", "Migration in a restarting stack" ],
- [ "migrate-stop-complex", "Migration in a complex stopping stack" ],
- [ "migrate-start-complex", "Migration in a complex starting stack" ],
- [ "migrate-stop-start-complex", "Migration in a complex moving stack" ],
- [ "migrate-shutdown", "Order the post-migration 'stop' before node shutdown" ],
- [ "migrate-1", "Migrate (migrate)" ],
- [ "migrate-2", "Migrate (stable)" ],
- [ "migrate-3", "Migrate (failed migrate_to)" ],
- [ "migrate-4", "Migrate (failed migrate_from)" ],
- [ "novell-252693", "Migration in a stopping stack" ],
- [ "novell-252693-2", "Migration in a starting stack" ],
- [ "novell-252693-3", "Non-Migration in a starting and stopping stack" ],
- [ "bug-1820", "Migration in a group" ],
- [ "bug-1820-1", "Non-migration in a group" ],
- [ "migrate-5", "Primitive migration with a clone" ],
- [ "migrate-fencing", "Migration after Fencing" ],
- [ "migrate-both-vms", "Migrate two VMs that have no colocation" ],
- [ "migration-behind-migrating-remote", "Migrate resource behind migrating remote connection" ],
- [ "1-a-then-bm-move-b", "Advanced migrate logic. A then B. migrate B" ],
- [ "2-am-then-b-move-a", "Advanced migrate logic, A then B, migrate A without stopping B" ],
- [ "3-am-then-bm-both-migrate", "Advanced migrate logic. A then B. migrate both" ],
- [ "4-am-then-bm-b-not-migratable", "Advanced migrate logic, A then B, B not migratable" ],
- [ "5-am-then-bm-a-not-migratable", "Advanced migrate logic. A then B. move both, a not migratable" ],
- [ "6-migrate-group", "Advanced migrate logic, migrate a group" ],
- [ "7-migrate-group-one-unmigratable",
- "Advanced migrate logic, migrate group mixed with allow-migrate true/false" ],
- [ "8-am-then-bm-a-migrating-b-stopping",
- "Advanced migrate logic, A then B, A migrating, B stopping" ],
- [ "9-am-then-bm-b-migrating-a-stopping",
- "Advanced migrate logic, A then B, B migrate, A stopping" ],
- [ "10-a-then-bm-b-move-a-clone",
- "Advanced migrate logic, A clone then B, migrate B while stopping A" ],
- [ "11-a-then-bm-b-move-a-clone-starting",
- "Advanced migrate logic, A clone then B, B moving while A is start/stopping" ],
- [ "a-promote-then-b-migrate", "A promote then B start. migrate B" ],
- [ "a-demote-then-b-migrate", "A demote then B stop. migrate B" ],
- [ "probe-target-of-failed-migrate_to-1", "Failed migrate_to, target rejoins" ],
- [ "probe-target-of-failed-migrate_to-2", "Failed migrate_to, target rejoined and probed" ],
- [ "partial-live-migration-multiple-active", "Prevent running on multiple nodes due to partial live migration" ],
- [ "migration-intermediary-cleaned",
- "Probe live-migration intermediary with no history"
- ],
- [ "bug-lf-2422", "Dependency on partially active group - stop ocfs:*" ],
- ],
- [
- [ "clone-anon-probe-1", "Probe the correct (anonymous) clone instance for each node" ],
- [ "clone-anon-probe-2", "Avoid needless re-probing of anonymous clones" ],
- [ "clone-anon-failcount", "Merge failcounts for anonymous clones" ],
- [ "force-anon-clone-max", "Update clone-max properly when forcing a clone to be anonymous" ],
- [ "anon-instance-pending", "Assign anonymous clone instance numbers properly when action pending" ],
- [ "inc0", "Incarnation start" ],
- [ "inc1", "Incarnation start order" ],
- [ "inc2", "Incarnation silent restart, stop, move" ],
- [ "inc3", "Inter-incarnation ordering, silent restart, stop, move" ],
- [ "inc4", "Inter-incarnation ordering, silent restart, stop, move (ordered)" ],
- [ "inc5", "Inter-incarnation ordering, silent restart, stop, move (restart 1)" ],
- [ "inc6", "Inter-incarnation ordering, silent restart, stop, move (restart 2)" ],
- [ "inc7", "Clone colocation" ],
- [ "inc8", "Clone anti-colocation" ],
- [ "inc9", "Non-unique clone" ],
- [ "inc10", "Non-unique clone (stop)" ],
- [ "inc11", "Primitive colocation with clones" ],
- [ "inc12", "Clone shutdown" ],
- [ "cloned-group", "Make sure only the correct number of cloned groups are started" ],
- [ "cloned-group-stop", "Ensure stopping qpidd also stops glance and cinder" ],
- [ "clone-no-shuffle", "Don't prioritize allocation of instances that must be moved" ],
- [ "clone-recover-no-shuffle-1",
- "Don't shuffle instances when starting a new primitive instance" ],
- [ "clone-recover-no-shuffle-2",
- "Don't shuffle instances when starting a new group instance" ],
- [ "clone-recover-no-shuffle-3",
- "Don't shuffle instances when starting a new bundle instance" ],
- [ "clone-recover-no-shuffle-4",
- "Don't shuffle instances when starting a new primitive instance with "
- "location preference "],
- [ "clone-recover-no-shuffle-5",
- "Don't shuffle instances when starting a new group instance with "
- "location preference" ],
- [ "clone-recover-no-shuffle-6",
- "Don't shuffle instances when starting a new bundle instance with "
- "location preference" ],
- [ "clone-recover-no-shuffle-7",
- "Don't shuffle instances when starting a new primitive instance that "
- "will be promoted" ],
- [ "clone-recover-no-shuffle-8",
- "Don't shuffle instances when starting a new group instance that "
- "will be promoted " ],
- [ "clone-recover-no-shuffle-9",
- "Don't shuffle instances when starting a new bundle instance that "
- "will be promoted " ],
- [ "clone-recover-no-shuffle-10",
- "Don't shuffle instances when starting a new primitive instance that "
- "won't be promoted" ],
- [ "clone-recover-no-shuffle-11",
- "Don't shuffle instances when starting a new group instance that "
- "won't be promoted " ],
- [ "clone-recover-no-shuffle-12",
- "Don't shuffle instances when starting a new bundle instance that "
- "won't be promoted " ],
- [ "clone-max-zero", "Orphan processing with clone-max=0" ],
- [ "clone-anon-dup",
- "Bug LF#2087 - Correctly parse the state of anonymous clones that are active more than once per node" ],
- [ "bug-lf-2160", "Don't shuffle clones due to colocation" ],
- [ "bug-lf-2213", "clone-node-max enforcement for cloned groups" ],
- [ "bug-lf-2153", "Clone ordering constraints" ],
- [ "bug-lf-2361", "Ensure clones observe mandatory ordering constraints if the LHS is unrunnable" ],
- [ "bug-lf-2317", "Avoid needless restart of primitive depending on a clone" ],
- [ "bug-lf-2453", "Enforce mandatory clone ordering without colocation" ],
- [ "bug-lf-2508", "Correctly reconstruct the status of anonymous cloned groups" ],
- [ "bug-lf-2544", "Balanced clone placement" ],
- [ "bug-lf-2445", "Redistribute clones with node-max > 1 and stickiness = 0" ],
- [ "bug-lf-2574", "Avoid clone shuffle" ],
- [ "bug-lf-2581", "Avoid group restart due to unrelated clone (re)start" ],
- [ "bug-cl-5168", "Don't shuffle clones" ],
- [ "bug-cl-5170", "Prevent clone from starting with on-fail=block" ],
- [ "clone-fail-block-colocation", "Move colocated group when failed clone has on-fail=block" ],
- [ "clone-interleave-1",
- "Clone-3 cannot start on pcmk-1 due to interleaved ordering (no colocation)" ],
- [ "clone-interleave-2", "Clone-3 must stop on pcmk-1 due to interleaved ordering (no colocation)" ],
- [ "clone-interleave-3",
- "Clone-3 must be recovered on pcmk-1 due to interleaved ordering (no colocation)" ],
- [ "rebalance-unique-clones", "Rebalance unique clone instances with no stickiness" ],
- [ "clone-requires-quorum-recovery", "Clone with requires=quorum on failed node needing recovery" ],
- [ "clone-requires-quorum",
- "Clone with requires=quorum with presumed-inactive instance on failed node" ],
- ],
- [
- [ "cloned_start_one", "order first clone then clone... first clone_min=2" ],
- [ "cloned_start_two", "order first clone then clone... first clone_min=2" ],
- [ "cloned_stop_one", "order first clone then clone... first clone_min=2" ],
- [ "cloned_stop_two", "order first clone then clone... first clone_min=2" ],
- [ "clone_min_interleave_start_one",
- "order first clone then clone... first clone_min=2 and then has interleave=true" ],
- [ "clone_min_interleave_start_two",
- "order first clone then clone... first clone_min=2 and then has interleave=true" ],
- [ "clone_min_interleave_stop_one",
- "order first clone then clone... first clone_min=2 and then has interleave=true" ],
- [ "clone_min_interleave_stop_two",
- "order first clone then clone... first clone_min=2 and then has interleave=true" ],
- [ "clone_min_start_one", "order first clone then primitive... first clone_min=2" ],
- [ "clone_min_start_two", "order first clone then primitive... first clone_min=2" ],
- [ "clone_min_stop_all", "order first clone then primitive... first clone_min=2" ],
- [ "clone_min_stop_one", "order first clone then primitive... first clone_min=2" ],
- [ "clone_min_stop_two", "order first clone then primitive... first clone_min=2" ],
- ],
- [
- [ "unfence-startup", "Clean unfencing" ],
- [ "unfence-definition", "Unfencing when the agent changes" ],
- [ "unfence-parameters", "Unfencing when the agent parameters changes" ],
- [ "unfence-device", "Unfencing when a cluster has only fence devices" ],
- ],
- [
- [ "promoted-0", "Stopped -> Unpromoted" ],
- [ "promoted-1", "Stopped -> Promote" ],
- [ "promoted-2", "Stopped -> Promote : notify" ],
- [ "promoted-3", "Stopped -> Promote : promoted location" ],
- [ "promoted-4", "Started -> Promote : promoted location" ],
- [ "promoted-5", "Promoted -> Promoted" ],
- [ "promoted-6", "Promoted -> Promoted (2)" ],
- [ "promoted-7", "Promoted -> Fenced" ],
- [ "promoted-8", "Promoted -> Fenced -> Moved" ],
- [ "promoted-9", "Stopped + Promotable + No quorum" ],
- [ "promoted-10", "Stopped -> Promotable : notify with monitor" ],
- [ "promoted-11", "Stopped -> Promote : colocation" ],
- [ "novell-239082", "Demote/Promote ordering" ],
- [ "novell-239087", "Stable promoted placement" ],
- [ "promoted-12", "Promotion based solely on rsc_location constraints" ],
- [ "promoted-13", "Include preferences of colocated resources when placing promoted" ],
- [ "promoted-demote", "Ordering when actions depends on demoting an unpromoted resource" ],
- [ "promoted-ordering", "Prevent resources from starting that need a promoted" ],
- [ "bug-1765", "Verify promoted-with-promoted colocation does not stop unpromoted instances" ],
- [ "promoted-group", "Promotion of cloned groups" ],
- [ "bug-lf-1852", "Don't shuffle promotable instances unnecessarily" ],
- [ "promoted-failed-demote", "Don't retry failed demote actions" ],
- [ "promoted-failed-demote-2", "Don't retry failed demote actions (notify=false)" ],
- [ "promoted-depend",
- "Ensure resources that depend on promoted instance don't get allocated until that does" ],
- [ "promoted-reattach", "Re-attach to a running promoted" ],
- [ "promoted-allow-start", "Don't include promoted score if it would prevent allocation" ],
- [ "promoted-colocation",
- "Allow promoted instances placemaker to be influenced by colocation constraints" ],
- [ "promoted-pseudo", "Make sure promote/demote pseudo actions are created correctly" ],
- [ "promoted-role", "Prevent target-role from promoting more than promoted-max instances" ],
- [ "bug-lf-2358", "Anti-colocation of promoted instances" ],
- [ "promoted-promotion-constraint", "Mandatory promoted colocation constraints" ],
- [ "unmanaged-promoted", "Ensure role is preserved for unmanaged resources" ],
- [ "promoted-unmanaged-monitor", "Start correct monitor for unmanaged promoted instances" ],
- [ "promoted-demote-2", "Demote does not clear past failure" ],
- [ "promoted-move", "Move promoted based on failure of colocated group" ],
- [ "promoted-probed-score", "Observe the promotion score of probed resources" ],
- [ "colocation_constraint_stops_promoted",
- "cl#5054 - Ensure promoted is demoted when stopped by colocation constraint" ],
- [ "colocation_constraint_stops_unpromoted",
- "cl#5054 - Ensure unpromoted is not demoted when stopped by colocation constraint" ],
- [ "order_constraint_stops_promoted",
- "cl#5054 - Ensure promoted is demoted when stopped by order constraint" ],
- [ "order_constraint_stops_unpromoted",
- "cl#5054 - Ensure unpromoted is not demoted when stopped by order constraint" ],
- [ "promoted_monitor_restart", "cl#5072 - Ensure promoted monitor operation will start after promotion" ],
- [ "bug-rh-880249", "Handle replacement of an m/s resource with a primitive" ],
- [ "bug-5143-ms-shuffle", "Prevent promoted instance shuffling due to promotion score" ],
- [ "promoted-demote-block", "Block promotion if demote fails with on-fail=block" ],
- [ "promoted-dependent-ban",
- "Don't stop instances from being active because a dependent is banned from that host" ],
- [ "promoted-stop", "Stop instances due to location constraint with role=Started" ],
- [ "promoted-partially-demoted-group", "Allow partially demoted group to finish demoting" ],
- [ "bug-cl-5213", "Ensure role colocation with -INFINITY is enforced" ],
- [ "bug-cl-5219", "Allow unrelated resources with a common colocation target to remain promoted" ],
- [ "promoted-asymmetrical-order",
- "Fix the behaviors of multi-state resources with asymmetrical ordering" ],
- [ "promoted-notify", "Promotion with notifications" ],
- [ "promoted-score-startup", "Use permanent promoted scores without LRM history" ],
- [ "failed-demote-recovery", "Recover resource in unpromoted role after demote fails" ],
- [ "failed-demote-recovery-promoted", "Recover resource in promoted role after demote fails" ],
- [ "on_fail_demote1", "Recovery with on-fail=\"demote\" on healthy cluster, remote, guest, and bundle nodes" ],
- [ "on_fail_demote2", "Recovery with on-fail=\"demote\" with promotion on different node" ],
- [ "on_fail_demote3", "Recovery with on-fail=\"demote\" with no promotion" ],
- [ "on_fail_demote4", "Recovery with on-fail=\"demote\" on failed cluster, remote, guest, and bundle nodes" ],
- [ "no_quorum_demote", "Promotable demotion and primitive stop with no-quorum-policy=\"demote\"" ],
- [ "no-promote-on-unrunnable-guest", "Don't select bundle instance for promotion when container can't run" ],
- [ "leftover-pending-monitor", "Prevent a leftover pending monitor from causing unexpected stop of other instances" ],
- ],
- [
- [ "history-1", "Correctly parse stateful-1 resource state" ],
- ],
- [
- [ "managed-0", "Managed (reference)" ],
- [ "managed-1", "Not managed - down" ],
- [ "managed-2", "Not managed - up" ],
- [ "bug-5028", "Shutdown should block if anything depends on an unmanaged resource" ],
- [ "bug-5028-detach", "Ensure detach still works" ],
- [ "bug-5028-bottom",
- "Ensure shutdown still blocks if the blocked resource is at the bottom of the stack" ],
- [ "unmanaged-stop-1",
- "cl#5155 - Block the stop of resources if any depending resource is unmanaged" ],
- [ "unmanaged-stop-2",
- "cl#5155 - Block the stop of resources if the first resource in a mandatory stop order is unmanaged" ],
- [ "unmanaged-stop-3",
- "cl#5155 - Block the stop of resources if any depending resource in a group is unmanaged" ],
- [ "unmanaged-stop-4",
- "cl#5155 - Block the stop of resources if any depending resource in the middle of a group is unmanaged" ],
- [ "unmanaged-block-restart",
- "Block restart of resources if any dependent resource in a group is unmanaged" ],
- ],
- [
- [ "interleave-0", "Interleave (reference)" ],
- [ "interleave-1", "coloc - not interleaved" ],
- [ "interleave-2", "coloc - interleaved" ],
- [ "interleave-3", "coloc - interleaved (2)" ],
- [ "interleave-pseudo-stop", "Interleaved clone during stonith" ],
- [ "interleave-stop", "Interleaved clone during stop" ],
- [ "interleave-restart", "Interleaved clone during dependency restart" ],
- ],
- [
- [ "notify-0", "Notify reference" ],
- [ "notify-1", "Notify simple" ],
- [ "notify-2", "Notify simple, confirm" ],
- [ "notify-3", "Notify move, confirm" ],
- [ "novell-239079", "Notification priority" ],
- #[ "notify-2", "Notify - 764" ],
- [ "notifs-for-unrunnable", "Don't schedule notifications for an unrunnable action" ],
- [ "route-remote-notify", "Route remote notify actions through correct cluster node" ],
- [ "notify-behind-stopping-remote", "Don't schedule notifications behind stopped remote" ],
- ],
- [
- [ "594", "OSDL #594 - Unrunnable actions scheduled in transition" ],
- [ "662", "OSDL #662 - Two resources start on one node when incarnation_node_max = 1" ],
- [ "696", "OSDL #696 - CRM starts stonith RA without monitor" ],
- [ "726", "OSDL #726 - Attempting to schedule rsc_posic041_monitor_5000 _after_ a stop" ],
- [ "735", "OSDL #735 - Correctly detect that rsc_hadev1 is stopped on hadev3" ],
- [ "764", "OSDL #764 - Missing monitor op for DoFencing:child_DoFencing:1" ],
- [ "797", "OSDL #797 - Assert triggered: task_id_i > max_call_id" ],
- [ "829", "OSDL #829" ],
- [ "994",
- "OSDL #994 - Stopping the last resource in a resource group causes the entire group to be restarted" ],
- [ "994-2", "OSDL #994 - with a dependent resource" ],
- [ "1360", "OSDL #1360 - Clone stickiness" ],
- [ "1484", "OSDL #1484 - on_fail=stop" ],
- [ "1494", "OSDL #1494 - Clone stability" ],
- [ "unrunnable-1", "Unrunnable" ],
- [ "unrunnable-2", "Unrunnable 2" ],
- [ "stonith-0", "Stonith loop - 1" ],
- [ "stonith-1", "Stonith loop - 2" ],
- [ "stonith-2", "Stonith loop - 3" ],
- [ "stonith-3", "Stonith startup" ],
- [ "stonith-4", "Stonith node state" ],
- [ "dc-fence-ordering", "DC needs fencing while other nodes are shutting down" ],
- [ "bug-1572-1", "Recovery of groups depending on promotable role" ],
- [ "bug-1572-2", "Recovery of groups depending on promotable role when promoted is not re-promoted" ],
- [ "bug-1685", "Depends-on-promoted ordering" ],
- [ "bug-1822", "Don't promote partially active groups" ],
- [ "bug-pm-11", "New resource added to a m/s group" ],
- [ "bug-pm-12", "Recover only the failed portion of a cloned group" ],
- [ "bug-n-387749", "Don't shuffle clone instances" ],
- [ "bug-n-385265",
- "Don't ignore the failure stickiness of group children - resource_idvscommon should stay stopped" ],
- [ "bug-n-385265-2",
- "Ensure groups are migrated instead of remaining partially active on the current node" ],
- [ "bug-lf-1920", "Correctly handle probes that find active resources" ],
- [ "bnc-515172", "Location constraint with multiple expressions" ],
- [ "colocate-primitive-with-clone", "Optional colocation with a clone" ],
- [ "use-after-free-merge", "Use-after-free in native_merge_weights" ],
- [ "bug-lf-2551", "STONITH ordering for stop" ],
- [ "bug-lf-2606", "Stonith implies demote" ],
- [ "bug-lf-2474", "Ensure resource op timeout takes precedence over op_defaults" ],
- [ "bug-suse-707150", "Prevent vm-01 from starting due to colocation/ordering" ],
- [ "bug-5014-A-start-B-start", "Verify when A starts B starts using symmetrical=false" ],
- [ "bug-5014-A-stop-B-started",
- "Verify when A stops B does not stop if it has already started using symmetric=false" ],
- [ "bug-5014-A-stopped-B-stopped",
- "Verify when A is stopped and B has not started, B does not start before A using symmetric=false" ],
- [ "bug-5014-CthenAthenB-C-stopped",
- "Verify when C then A is symmetrical=true, A then B is symmetric=false, and C is stopped that nothing starts" ],
- [ "bug-5014-CLONE-A-start-B-start",
- "Verify when A starts B starts using clone resources with symmetric=false" ],
- [ "bug-5014-CLONE-A-stop-B-started",
- "Verify when A stops B does not stop if it has already started using clone resources with symmetric=false" ],
- [ "bug-5014-GROUP-A-start-B-start",
- "Verify when A starts B starts when using group resources with symmetric=false" ],
- [ "bug-5014-GROUP-A-stopped-B-started",
- "Verify when A stops B does not stop if it has already started using group resources with symmetric=false" ],
- [ "bug-5014-GROUP-A-stopped-B-stopped",
- "Verify when A is stopped and B has not started, B does not start before A using group resources with symmetric=false" ],
- [ "bug-5014-ordered-set-symmetrical-false",
- "Verify ordered sets work with symmetrical=false" ],
- [ "bug-5014-ordered-set-symmetrical-true",
- "Verify ordered sets work with symmetrical=true" ],
- [ "clbz5007-promotable-colocation",
- "Verify use of colocation scores other than INFINITY and -INFINITY work on multi-state resources" ],
- [ "bug-5038", "Prevent restart of anonymous clones when clone-max decreases" ],
- [ "bug-5025-1", "Automatically clean up failcount after resource config change with reload" ],
- [ "bug-5025-2", "Make sure clear failcount action isn't set when config does not change" ],
- [ "bug-5025-3", "Automatically clean up failcount after resource config change with restart" ],
- [ "bug-5025-4", "Clear failcount when last failure is a start op and rsc attributes changed" ],
- [ "failcount", "Ensure failcounts are correctly expired" ],
- [ "failcount-block", "Ensure failcounts are not expired when on-fail=block is present" ],
- [ "per-op-failcount", "Ensure per-operation failcount is handled and not passed to fence agent" ],
- [ "on-fail-ignore", "Ensure on-fail=ignore works even beyond migration-threshold" ],
- [ "monitor-onfail-restart", "bug-5058 - Monitor failure with on-fail set to restart" ],
- [ "monitor-onfail-stop", "bug-5058 - Monitor failure wiht on-fail set to stop" ],
- [ "bug-5059", "No need to restart p_stateful1:*" ],
- [ "bug-5069-op-enabled", "Test on-fail=ignore with failure when monitor is enabled" ],
- [ "bug-5069-op-disabled", "Test on-fail-ignore with failure when monitor is disabled" ],
- [ "obsolete-lrm-resource", "cl#5115 - Do not use obsolete lrm_resource sections" ],
- [ "expire-non-blocked-failure",
- "Ignore failure-timeout only if the failed operation has on-fail=block" ],
- [ "asymmetrical-order-move", "Respect asymmetrical ordering when trying to move resources" ],
- [ "asymmetrical-order-restart", "Respect asymmetrical ordering when restarting dependent resource" ],
- [ "start-then-stop-with-unfence", "Avoid graph loop with start-then-stop constraint plus unfencing" ],
- [ "order-expired-failure", "Order failcount cleanup after remote fencing" ],
- [ "expired-stop-1", "Expired stop failure should not block resource" ],
-
- [ "ignore_stonith_rsc_order1",
- "cl#5056- Ignore order constraint between stonith and non-stonith rsc" ],
- [ "ignore_stonith_rsc_order2",
- "cl#5056- Ignore order constraint with group rsc containing mixed stonith and non-stonith" ],
- [ "ignore_stonith_rsc_order3", "cl#5056- Ignore order constraint, stonith clone and mixed group" ],
- [ "ignore_stonith_rsc_order4",
- "cl#5056- Ignore order constraint, stonith clone and clone with nested mixed group" ],
- [ "honor_stonith_rsc_order1",
- "cl#5056- Honor order constraint, stonith clone and pure stonith group(single rsc)" ],
- [ "honor_stonith_rsc_order2",
- "cl#5056- Honor order constraint, stonith clone and pure stonith group(multiple rsc)" ],
- [ "honor_stonith_rsc_order3",
- "cl#5056- Honor order constraint, stonith clones with nested pure stonith group" ],
- [ "honor_stonith_rsc_order4",
- "cl#5056- Honor order constraint, between two native stonith rscs" ],
- [ "multiply-active-stonith", "Multiply active stonith" ],
- [ "probe-timeout", "cl#5099 - Default probe timeout" ],
- [ "order-first-probes",
- "cl#5301 - respect order constraints when relevant resources are being probed" ],
- [ "concurrent-fencing", "Allow performing fencing operations in parallel" ],
- [ "priority-fencing-delay", "Delay fencing targeting the more significant node" ],
- [ "pending-node-no-uname", "Do not fence a pending node that doesn't have an uname in node state yet" ],
- [ "node-pending-timeout", "Fence a pending node that has reached `node-pending-timeout`" ],
- ],
- [
- [ "systemhealth1", "System Health () #1" ],
- [ "systemhealth2", "System Health () #2" ],
- [ "systemhealth3", "System Health () #3" ],
- [ "systemhealthn1", "System Health (None) #1" ],
- [ "systemhealthn2", "System Health (None) #2" ],
- [ "systemhealthn3", "System Health (None) #3" ],
- [ "systemhealthm1", "System Health (Migrate On Red) #1" ],
- [ "systemhealthm2", "System Health (Migrate On Red) #2" ],
- [ "systemhealthm3", "System Health (Migrate On Red) #3" ],
- [ "systemhealtho1", "System Health (Only Green) #1" ],
- [ "systemhealtho2", "System Health (Only Green) #2" ],
- [ "systemhealtho3", "System Health (Only Green) #3" ],
- [ "systemhealthp1", "System Health (Progessive) #1" ],
- [ "systemhealthp2", "System Health (Progessive) #2" ],
- [ "systemhealthp3", "System Health (Progessive) #3" ],
- [ "allow-unhealthy-nodes", "System Health (migrate-on-red + allow-unhealth-nodes)" ],
- ],
- [
- [ "utilization", "Placement Strategy - utilization" ],
- [ "minimal", "Placement Strategy - minimal" ],
- [ "balanced", "Placement Strategy - balanced" ],
- ],
- [
- [ "placement-stickiness", "Optimized Placement Strategy - stickiness" ],
- [ "placement-priority", "Optimized Placement Strategy - priority" ],
- [ "placement-location", "Optimized Placement Strategy - location" ],
- [ "placement-capacity", "Optimized Placement Strategy - capacity" ],
- ],
- [
- [ "utilization-order1", "Utilization Order - Simple" ],
- [ "utilization-order2", "Utilization Order - Complex" ],
- [ "utilization-order3", "Utilization Order - Migrate" ],
- [ "utilization-order4", "Utilization Order - Live Migration (bnc#695440)" ],
- [ "utilization-complex", "Utilization with complex relationships" ],
- [ "utilization-shuffle",
- "Don't displace prmExPostgreSQLDB2 on act2, Start prmExPostgreSQLDB1 on act3" ],
- [ "load-stopped-loop", "Avoid transition loop due to load_stopped (cl#5044)" ],
- [ "load-stopped-loop-2",
- "cl#5235 - Prevent graph loops that can be introduced by load_stopped -> migrate_to ordering" ],
- ],
- [
- [ "colocated-utilization-primitive-1", "Colocated Utilization - Primitive" ],
- [ "colocated-utilization-primitive-2", "Colocated Utilization - Choose the most capable node" ],
- [ "colocated-utilization-group", "Colocated Utilization - Group" ],
- [ "colocated-utilization-clone", "Colocated Utilization - Clone" ],
- [ "utilization-check-allowed-nodes",
- "Only check the capacities of the nodes that can run the resource" ],
- ],
- [
- [ "reprobe-target_rc", "Ensure correct target_rc for reprobe of inactive resources" ],
- [ "node-maintenance-1", "cl#5128 - Node maintenance" ],
- [ "node-maintenance-2", "cl#5128 - Node maintenance (coming out of maintenance mode)" ],
- [ "shutdown-maintenance-node", "Do not fence a maintenance node if it shuts down cleanly" ],
- [ "rsc-maintenance", "Per-resource maintenance" ],
- ],
- [
- [ "not-installed-agent", "The resource agent is missing" ],
- [ "not-installed-tools", "Something the resource agent needs is missing" ],
- ],
- [
- [ "stopped-monitor-00", "Stopped Monitor - initial start" ],
- [ "stopped-monitor-01", "Stopped Monitor - failed started" ],
- [ "stopped-monitor-02", "Stopped Monitor - started multi-up" ],
- [ "stopped-monitor-03", "Stopped Monitor - stop started" ],
- [ "stopped-monitor-04", "Stopped Monitor - failed stop" ],
- [ "stopped-monitor-05", "Stopped Monitor - start unmanaged" ],
- [ "stopped-monitor-06", "Stopped Monitor - unmanaged multi-up" ],
- [ "stopped-monitor-07", "Stopped Monitor - start unmanaged multi-up" ],
- [ "stopped-monitor-08", "Stopped Monitor - migrate" ],
- [ "stopped-monitor-09", "Stopped Monitor - unmanage started" ],
- [ "stopped-monitor-10", "Stopped Monitor - unmanaged started multi-up" ],
- [ "stopped-monitor-11", "Stopped Monitor - stop unmanaged started" ],
- [ "stopped-monitor-12", "Stopped Monitor - unmanaged started multi-up (target-role=Stopped)" ],
- [ "stopped-monitor-20", "Stopped Monitor - initial stop" ],
- [ "stopped-monitor-21", "Stopped Monitor - stopped single-up" ],
- [ "stopped-monitor-22", "Stopped Monitor - stopped multi-up" ],
- [ "stopped-monitor-23", "Stopped Monitor - start stopped" ],
- [ "stopped-monitor-24", "Stopped Monitor - unmanage stopped" ],
- [ "stopped-monitor-25", "Stopped Monitor - unmanaged stopped multi-up" ],
- [ "stopped-monitor-26", "Stopped Monitor - start unmanaged stopped" ],
- [ "stopped-monitor-27", "Stopped Monitor - unmanaged stopped multi-up (target-role=Started)" ],
- [ "stopped-monitor-30", "Stopped Monitor - new node started" ],
- [ "stopped-monitor-31", "Stopped Monitor - new node stopped" ],
- ],
- [
+ SchedulerTestGroup([
+ SchedulerTest("simple1", "Offline"),
+ SchedulerTest("simple2", "Start"),
+ SchedulerTest("simple3", "Start 2"),
+ SchedulerTest("simple4", "Start Failed"),
+ SchedulerTest("simple6", "Stop Start"),
+ SchedulerTest("simple7", "Shutdown"),
+ # SchedulerTest("simple8", "Stonith"),
+ # SchedulerTest("simple9", "Lower version"),
+ # SchedulerTest("simple10", "Higher version"),
+ SchedulerTest("simple11", "Priority (ne)"),
+ SchedulerTest("simple12", "Priority (eq)"),
+ SchedulerTest("simple8", "Stickiness"),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("group1", "Group"),
+ SchedulerTest("group2", "Group + Native"),
+ SchedulerTest("group3", "Group + Group"),
+ SchedulerTest("group4", "Group + Native (nothing)"),
+ SchedulerTest("group5", "Group + Native (move)"),
+ SchedulerTest("group6", "Group + Group (move)"),
+ SchedulerTest("group7", "Group colocation"),
+ SchedulerTest("group13", "Group colocation (cant run)"),
+ SchedulerTest("group8", "Group anti-colocation"),
+ SchedulerTest("group9", "Group recovery"),
+ SchedulerTest("group10", "Group partial recovery"),
+ SchedulerTest("group11", "Group target_role"),
+ SchedulerTest("group14", "Group stop (graph terminated)"),
+ SchedulerTest("group15", "Negative group colocation"),
+ SchedulerTest("bug-1573", "Partial stop of a group with two children"),
+ SchedulerTest("bug-1718", "Mandatory group ordering - Stop group_FUN"),
+ SchedulerTest("failed-sticky-group", "Move group on last member failure despite infinite stickiness"),
+ SchedulerTest("failed-sticky-anticolocated-group",
+ "Move group on last member failure despite infinite stickiness and optional anti-colocation"),
+ SchedulerTest("bug-lf-2619", "Move group on clone failure"),
+ SchedulerTest("group-fail", "Ensure stop order is preserved for partially active groups"),
+ SchedulerTest("group-unmanaged", "No need to restart r115 because r114 is unmanaged"),
+ SchedulerTest("group-unmanaged-stopped", "Make sure r115 is stopped when r114 fails"),
+ SchedulerTest("partial-unmanaged-group", "New member in partially unmanaged group"),
+ SchedulerTest("group-dependents", "Account for the location preferences of things colocated with a group"),
+ SchedulerTest("group-stop-ordering", "Ensure blocked group member stop does not force other member stops"),
+ SchedulerTest("colocate-unmanaged-group", "Respect mandatory colocations even if earlier group member is unmanaged"),
+ SchedulerTest("coloc-with-inner-group-member", "Consider explicit colocations with inner group members"),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("rsc_dep1", "Must not"),
+ SchedulerTest("rsc_dep3", "Must"),
+ SchedulerTest("rsc_dep5", "Must not 3"),
+ SchedulerTest("rsc_dep7", "Must 3"),
+ SchedulerTest("rsc_dep10", "Must (but cant)"),
+ SchedulerTest("rsc_dep2", "Must (running)"),
+ SchedulerTest("rsc_dep8", "Must (running : alt)"),
+ SchedulerTest("rsc_dep4", "Must (running + move)"),
+ SchedulerTest("asymmetric", "Asymmetric - require explicit location constraints"),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("orphan-0", "Orphan ignore"),
+ SchedulerTest("orphan-1", "Orphan stop"),
+ SchedulerTest("orphan-2", "Orphan stop, remove failcount"),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("params-0", "Params: No change"),
+ SchedulerTest("params-1", "Params: Changed"),
+ SchedulerTest("params-2", "Params: Resource definition"),
+ SchedulerTest("params-3", "Params: Restart instead of reload if start pending"),
+ SchedulerTest("params-4", "Params: Reload"),
+ SchedulerTest("params-5", "Params: Restart based on probe digest"),
+ SchedulerTest("novell-251689", "Resource definition change + target_role=stopped"),
+ SchedulerTest("bug-lf-2106", "Restart all anonymous clone instances after config change"),
+ SchedulerTest("params-6", "Params: Detect reload in previously migrated resource"),
+ SchedulerTest("nvpair-id-ref", "Support id-ref in nvpair with optional name"),
+ SchedulerTest("not-reschedule-unneeded-monitor",
+ "Do not reschedule unneeded monitors while resource definitions have changed"),
+ SchedulerTest("reload-becomes-restart", "Cancel reload if restart becomes required"),
+ SchedulerTest("restart-with-extra-op-params", "Restart if with extra operation parameters upon changes of any"),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("target-0", "Target Role : baseline"),
+ SchedulerTest("target-1", "Target Role : promoted"),
+ SchedulerTest("target-2", "Target Role : invalid"),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("base-score", "Set a node's default score for all nodes"),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("date-1", "Dates", ["-t", "2005-020"]),
+ SchedulerTest("date-2", "Date Spec - Pass", ["-t", "2005-020T12:30"]),
+ SchedulerTest("date-3", "Date Spec - Fail", ["-t", "2005-020T11:30"]),
+ SchedulerTest("origin", "Timing of recurring operations", ["-t", "2014-05-07 00:28:00"]),
+ SchedulerTest("probe-0", "Probe (anon clone)"),
+ SchedulerTest("probe-1", "Pending Probe"),
+ SchedulerTest("probe-2", "Correctly re-probe cloned groups"),
+ SchedulerTest("probe-3", "Probe (pending node)"),
+ SchedulerTest("probe-4", "Probe (pending node + stopped resource)"),
+ SchedulerTest("probe-pending-node", "Probe (pending node + unmanaged resource)"),
+ SchedulerTest("failed-probe-primitive", "Maskable vs. unmaskable probe failures on primitive resources"),
+ SchedulerTest("failed-probe-clone", "Maskable vs. unmaskable probe failures on cloned resources"),
+ SchedulerTest("expired-failed-probe-primitive", "Maskable, expired probe failure on primitive resources"),
+ SchedulerTest("standby", "Standby"),
+ SchedulerTest("comments", "Comments"),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("one-or-more-0", "Everything starts"),
+ SchedulerTest("one-or-more-1", "Nothing starts because of A"),
+ SchedulerTest("one-or-more-2", "D can start because of C"),
+ SchedulerTest("one-or-more-3", "D cannot start because of B and C"),
+ SchedulerTest("one-or-more-4", "D cannot start because of target-role"),
+ SchedulerTest("one-or-more-5", "Start A and F even though C and D are stopped"),
+ SchedulerTest("one-or-more-6", "Leave A running even though B is stopped"),
+ SchedulerTest("one-or-more-7", "Leave A running even though C is stopped"),
+ SchedulerTest("bug-5140-require-all-false", "Allow basegrp:0 to stop"),
+ SchedulerTest("clone-require-all-1", "clone B starts node 3 and 4"),
+ SchedulerTest("clone-require-all-2", "clone B remains stopped everywhere"),
+ SchedulerTest("clone-require-all-3", "clone B stops everywhere because A stops everywhere"),
+ SchedulerTest("clone-require-all-4", "clone B remains on node 3 and 4 with only one instance of A remaining"),
+ SchedulerTest("clone-require-all-5", "clone B starts on node 1 3 and 4"),
+ SchedulerTest("clone-require-all-6", "clone B remains active after shutting down instances of A"),
+ SchedulerTest("clone-require-all-7",
+ "clone A and B both start at the same time. all instances of A start before B"),
+ SchedulerTest("clone-require-all-no-interleave-1", "C starts everywhere after A and B"),
+ SchedulerTest("clone-require-all-no-interleave-2",
+ "C starts on nodes 1, 2, and 4 with only one active instance of B"),
+ SchedulerTest("clone-require-all-no-interleave-3",
+ "C remains active when instance of B is stopped on one node and started on another"),
+ SchedulerTest("one-or-more-unrunnable-instances", "Avoid dependencies on instances that won't ever be started"),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("location-date-rules-1", "Use location constraints with ineffective date-based rules"),
+ SchedulerTest("location-date-rules-2", "Use location constraints with effective date-based rules"),
+ SchedulerTest("nvpair-date-rules-1", "Use nvpair blocks with a variety of date-based rules"),
+ SchedulerTest("value-source", "Use location constraints with node attribute expressions using value-source"),
+ SchedulerTest("rule-dbl-as-auto-number-match",
+ "Floating-point rule values default to number comparison: match"),
+ SchedulerTest("rule-dbl-as-auto-number-no-match",
+ "Floating-point rule values default to number comparison: no match"),
+ SchedulerTest("rule-dbl-as-integer-match",
+ "Floating-point rule values set to integer comparison: match"),
+ SchedulerTest("rule-dbl-as-integer-no-match",
+ "Floating-point rule values set to integer comparison: no match"),
+ SchedulerTest("rule-dbl-as-number-match",
+ "Floating-point rule values set to number comparison: match"),
+ SchedulerTest("rule-dbl-as-number-no-match",
+ "Floating-point rule values set to number comparison: no match"),
+ SchedulerTest("rule-dbl-parse-fail-default-str-match",
+ "Floating-point rule values fail to parse, default to string "
+ "comparison: match"),
+ SchedulerTest("rule-dbl-parse-fail-default-str-no-match",
+ "Floating-point rule values fail to parse, default to string "
+ "comparison: no match"),
+ SchedulerTest("rule-int-as-auto-integer-match",
+ "Integer rule values default to integer comparison: match"),
+ SchedulerTest("rule-int-as-auto-integer-no-match",
+ "Integer rule values default to integer comparison: no match"),
+ SchedulerTest("rule-int-as-integer-match",
+ "Integer rule values set to integer comparison: match"),
+ SchedulerTest("rule-int-as-integer-no-match",
+ "Integer rule values set to integer comparison: no match"),
+ SchedulerTest("rule-int-as-number-match",
+ "Integer rule values set to number comparison: match"),
+ SchedulerTest("rule-int-as-number-no-match",
+ "Integer rule values set to number comparison: no match"),
+ SchedulerTest("rule-int-parse-fail-default-str-match",
+ "Integer rule values fail to parse, default to string "
+ "comparison: match"),
+ SchedulerTest("rule-int-parse-fail-default-str-no-match",
+ "Integer rule values fail to parse, default to string "
+ "comparison: no match"),
+ SchedulerTest("timeout-by-node", "Start timeout varies by node"),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("order1", "Order start 1"),
+ SchedulerTest("order2", "Order start 2"),
+ SchedulerTest("order3", "Order stop"),
+ SchedulerTest("order4", "Order (multiple)"),
+ SchedulerTest("order5", "Order (move)"),
+ SchedulerTest("order6", "Order (move w/ restart)"),
+ SchedulerTest("order7", "Order (mandatory)"),
+ SchedulerTest("order-optional", "Order (score=0)"),
+ SchedulerTest("order-required", "Order (score=INFINITY)"),
+ SchedulerTest("bug-lf-2171", "Prevent group start when clone is stopped"),
+ SchedulerTest("order-clone", "Clone ordering should be able to prevent startup of dependent clones"),
+ SchedulerTest("order-sets", "Ordering for resource sets"),
+ SchedulerTest("order-serialize", "Serialize resources without inhibiting migration"),
+ SchedulerTest("order-serialize-set", "Serialize a set of resources without inhibiting migration"),
+ SchedulerTest("clone-order-primitive", "Order clone start after a primitive"),
+ SchedulerTest("clone-order-16instances", "Verify ordering of 16 cloned resources"),
+ SchedulerTest("order-optional-keyword", "Order (optional keyword)"),
+ SchedulerTest("order-mandatory", "Order (mandatory keyword)"),
+ SchedulerTest("bug-lf-2493", "Don't imply colocation requirements when applying ordering constraints with clones"),
+ SchedulerTest("ordered-set-basic-startup", "Constraint set with default order settings"),
+ SchedulerTest("ordered-set-natural", "Allow natural set ordering"),
+ SchedulerTest("order-wrong-kind", "Order (error)"),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("coloc-loop", "Colocation - loop"),
+ SchedulerTest("coloc-many-one", "Colocation - many-to-one"),
+ SchedulerTest("coloc-list", "Colocation - many-to-one with list"),
+ SchedulerTest("coloc-group", "Colocation - groups"),
+ SchedulerTest("coloc-unpromoted-anti", "Anti-colocation with unpromoted shouldn't prevent promoted colocation"),
+ SchedulerTest("coloc-attr", "Colocation based on node attributes"),
+ SchedulerTest("coloc-negative-group", "Negative colocation with a group"),
+ SchedulerTest("coloc-intra-set", "Intra-set colocation"),
+ SchedulerTest("bug-lf-2435", "Colocation sets with a negative score"),
+ SchedulerTest("coloc-clone-stays-active",
+ "Ensure clones don't get stopped/demoted because a dependent must stop"),
+ SchedulerTest("coloc_fp_logic", "Verify floating point calculations in colocation are working"),
+ SchedulerTest("colo_promoted_w_native",
+ "cl#5070 - Verify promotion order is affected when colocating promoted with primitive"),
+ SchedulerTest("colo_unpromoted_w_native",
+ "cl#5070 - Verify promotion order is affected when colocating unpromoted with primitive"),
+ SchedulerTest("anti-colocation-order",
+ "cl#5187 - Prevent resources in an anti-colocation from even temporarily running on a same node"),
+ SchedulerTest("anti-colocation-promoted", "Organize order of actions for promoted resources in anti-colocations"),
+ SchedulerTest("anti-colocation-unpromoted", "Organize order of actions for unpromoted resources in anti-colocations"),
+ SchedulerTest("group-anticolocation", "Group with failed last member anti-colocated with another group"),
+ SchedulerTest("group-anticolocation-2",
+ "Group with failed last member anti-colocated with another sticky group"),
+ SchedulerTest("group-anticolocation-3",
+ "Group with failed last member mandatorily anti-colocated with another group"),
+ SchedulerTest("group-anticolocation-4",
+ "Group with failed last member anti-colocated without influence with another group"),
+ SchedulerTest("group-anticolocation-5",
+ "Group with failed last member anti-colocated with another group (third node allowed)"),
+ SchedulerTest("group-colocation-failure",
+ "Group with sole member failed, colocated with another group"),
+ SchedulerTest("enforce-colo1", "Always enforce B with A INFINITY"),
+ SchedulerTest("complex_enforce_colo", "Always enforce B with A INFINITY. (make sure heat-engine stops)"),
+ SchedulerTest("coloc-dependee-should-stay", "Stickiness outweighs group colocation"),
+ SchedulerTest("coloc-dependee-should-move", "Group colocation outweighs stickiness"),
+ SchedulerTest("colocation-influence", "Respect colocation influence"),
+ SchedulerTest("colocation-priority-group", "Apply group colocations in order of primary priority"),
+ SchedulerTest("colocation-vs-stickiness", "Group stickiness outweighs anti-colocation score"),
+ SchedulerTest("promoted-with-blocked", "Promoted role colocated with a resource with blocked start"),
+ SchedulerTest("primitive-with-group-with-clone",
+ "Consider group dependent when colocating with clone"),
+ SchedulerTest("primitive-with-group-with-promoted",
+ "Consider group dependent when colocating with promoted role"),
+ SchedulerTest("primitive-with-unrunnable-group",
+ "Block primitive colocated with group that can't start"),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("rsc-sets-seq-true", "Resource Sets - sequential=false"),
+ SchedulerTest("rsc-sets-seq-false", "Resource Sets - sequential=true"),
+ SchedulerTest("rsc-sets-clone", "Resource Sets - Clone"),
+ SchedulerTest("rsc-sets-promoted", "Resource Sets - Promoted"),
+ SchedulerTest("rsc-sets-clone-1", "Resource Sets - Clone (lf#2404)"),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("attrs1", "string: eq (and)"),
+ SchedulerTest("attrs2", "string: lt / gt (and)"),
+ SchedulerTest("attrs3", "string: ne (or)"),
+ SchedulerTest("attrs4", "string: exists"),
+ SchedulerTest("attrs5", "string: not_exists"),
+ SchedulerTest("attrs6", "is_dc: true"),
+ SchedulerTest("attrs7", "is_dc: false"),
+ SchedulerTest("attrs8", "score_attribute"),
+ SchedulerTest("per-node-attrs", "Per node resource parameters"),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("mon-rsc-1", "Schedule Monitor - start"),
+ SchedulerTest("mon-rsc-2", "Schedule Monitor - move"),
+ SchedulerTest("mon-rsc-3", "Schedule Monitor - pending start"),
+ SchedulerTest("mon-rsc-4", "Schedule Monitor - move/pending start"),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("rec-rsc-0", "Resource Recover - no start"),
+ SchedulerTest("rec-rsc-1", "Resource Recover - start"),
+ SchedulerTest("rec-rsc-2", "Resource Recover - monitor"),
+ SchedulerTest("rec-rsc-3", "Resource Recover - stop - ignore"),
+ SchedulerTest("rec-rsc-4", "Resource Recover - stop - block"),
+ SchedulerTest("rec-rsc-5", "Resource Recover - stop - fence"),
+ SchedulerTest("rec-rsc-6", "Resource Recover - multiple - restart"),
+ SchedulerTest("rec-rsc-7", "Resource Recover - multiple - stop"),
+ SchedulerTest("rec-rsc-8", "Resource Recover - multiple - block"),
+ SchedulerTest("rec-rsc-9", "Resource Recover - group/group"),
+ SchedulerTest("stop-unexpected", "Recover multiply active group with stop_unexpected"),
+ SchedulerTest("stop-unexpected-2", "Resource multiply active primitve with stop_unexpected"),
+ SchedulerTest("monitor-recovery", "on-fail=block + resource recovery detected by recurring monitor"),
+ SchedulerTest("stop-failure-no-quorum", "Stop failure without quorum"),
+ SchedulerTest("stop-failure-no-fencing", "Stop failure without fencing available"),
+ SchedulerTest("stop-failure-with-fencing", "Stop failure with fencing available"),
+ SchedulerTest("multiple-active-block-group", "Support of multiple-active=block for resource groups"),
+ SchedulerTest("multiple-monitor-one-failed",
+ "Consider resource failed if any of the configured monitor operations failed"),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("quorum-1", "No quorum - ignore"),
+ SchedulerTest("quorum-2", "No quorum - freeze"),
+ SchedulerTest("quorum-3", "No quorum - stop"),
+ SchedulerTest("quorum-4", "No quorum - start anyway"),
+ SchedulerTest("quorum-5", "No quorum - start anyway (group)"),
+ SchedulerTest("quorum-6", "No quorum - start anyway (clone)"),
+ SchedulerTest("bug-cl-5212", "No promotion with no-quorum-policy=freeze"),
+ SchedulerTest("suicide-needed-inquorate", "no-quorum-policy=suicide: suicide necessary"),
+ SchedulerTest("suicide-not-needed-initial-quorum",
+ "no-quorum-policy=suicide: suicide not necessary at initial quorum"),
+ SchedulerTest("suicide-not-needed-never-quorate",
+ "no-quorum-policy=suicide: suicide not necessary if never quorate"),
+ SchedulerTest("suicide-not-needed-quorate", "no-quorum-policy=suicide: suicide necessary if quorate"),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("rec-node-1", "Node Recover - Startup - no fence"),
+ SchedulerTest("rec-node-2", "Node Recover - Startup - fence"),
+ SchedulerTest("rec-node-3", "Node Recover - HA down - no fence"),
+ SchedulerTest("rec-node-4", "Node Recover - HA down - fence"),
+ SchedulerTest("rec-node-5", "Node Recover - CRM down - no fence"),
+ SchedulerTest("rec-node-6", "Node Recover - CRM down - fence"),
+ SchedulerTest("rec-node-7", "Node Recover - no quorum - ignore"),
+ SchedulerTest("rec-node-8", "Node Recover - no quorum - freeze"),
+ SchedulerTest("rec-node-9", "Node Recover - no quorum - stop"),
+ SchedulerTest("rec-node-10", "Node Recover - no quorum - stop w/fence"),
+ SchedulerTest("rec-node-11", "Node Recover - CRM down w/ group - fence"),
+ SchedulerTest("rec-node-12", "Node Recover - nothing active - fence"),
+ SchedulerTest("rec-node-13", "Node Recover - failed resource + shutdown - fence"),
+ SchedulerTest("rec-node-15", "Node Recover - unknown lrm section"),
+ SchedulerTest("rec-node-14", "Serialize all stonith's"),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("multi1", "Multiple Active (stop/start)"),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("migrate-begin", "Normal migration"),
+ SchedulerTest("migrate-success", "Completed migration"),
+ SchedulerTest("migrate-partial-1", "Completed migration, missing stop on source"),
+ SchedulerTest("migrate-partial-2", "Successful migrate_to only"),
+ SchedulerTest("migrate-partial-3", "Successful migrate_to only, target down"),
+ SchedulerTest("migrate-partial-4", "Migrate from the correct host after migrate_to+migrate_from"),
+ SchedulerTest("bug-5186-partial-migrate", "Handle partial migration when src node loses membership"),
+ SchedulerTest("migrate-fail-2", "Failed migrate_from"),
+ SchedulerTest("migrate-fail-3", "Failed migrate_from + stop on source"),
+ SchedulerTest("migrate-fail-4",
+ "Failed migrate_from + stop on target - ideally we wouldn't need to re-stop on target"),
+ SchedulerTest("migrate-fail-5", "Failed migrate_from + stop on source and target"),
+ SchedulerTest("migrate-fail-6", "Failed migrate_to"),
+ SchedulerTest("migrate-fail-7", "Failed migrate_to + stop on source"),
+ SchedulerTest("migrate-fail-8",
+ "Failed migrate_to + stop on target - ideally we wouldn't need to re-stop on target"),
+ SchedulerTest("migrate-fail-9", "Failed migrate_to + stop on source and target"),
+ SchedulerTest("migration-ping-pong", "Old migrate_to failure + successful migrate_from on same node"),
+ SchedulerTest("migrate-stop", "Migration in a stopping stack"),
+ SchedulerTest("migrate-start", "Migration in a starting stack"),
+ SchedulerTest("migrate-stop_start", "Migration in a restarting stack"),
+ SchedulerTest("migrate-stop-complex", "Migration in a complex stopping stack"),
+ SchedulerTest("migrate-start-complex", "Migration in a complex starting stack"),
+ SchedulerTest("migrate-stop-start-complex", "Migration in a complex moving stack"),
+ SchedulerTest("migrate-shutdown", "Order the post-migration 'stop' before node shutdown"),
+ SchedulerTest("migrate-1", "Migrate (migrate)"),
+ SchedulerTest("migrate-2", "Migrate (stable)"),
+ SchedulerTest("migrate-3", "Migrate (failed migrate_to)"),
+ SchedulerTest("migrate-4", "Migrate (failed migrate_from)"),
+ SchedulerTest("novell-252693", "Migration in a stopping stack"),
+ SchedulerTest("novell-252693-2", "Migration in a starting stack"),
+ SchedulerTest("novell-252693-3", "Non-Migration in a starting and stopping stack"),
+ SchedulerTest("bug-1820", "Migration in a group"),
+ SchedulerTest("bug-1820-1", "Non-migration in a group"),
+ SchedulerTest("migrate-5", "Primitive migration with a clone"),
+ SchedulerTest("migrate-fencing", "Migration after Fencing"),
+ SchedulerTest("migrate-both-vms", "Migrate two VMs that have no colocation"),
+ SchedulerTest("migration-behind-migrating-remote", "Migrate resource behind migrating remote connection"),
+ SchedulerTest("1-a-then-bm-move-b", "Advanced migrate logic. A then B. migrate B"),
+ SchedulerTest("2-am-then-b-move-a", "Advanced migrate logic, A then B, migrate A without stopping B"),
+ SchedulerTest("3-am-then-bm-both-migrate", "Advanced migrate logic. A then B. migrate both"),
+ SchedulerTest("4-am-then-bm-b-not-migratable", "Advanced migrate logic, A then B, B not migratable"),
+ SchedulerTest("5-am-then-bm-a-not-migratable", "Advanced migrate logic. A then B. move both, a not migratable"),
+ SchedulerTest("6-migrate-group", "Advanced migrate logic, migrate a group"),
+ SchedulerTest("7-migrate-group-one-unmigratable",
+ "Advanced migrate logic, migrate group mixed with allow-migrate true/false"),
+ SchedulerTest("8-am-then-bm-a-migrating-b-stopping",
+ "Advanced migrate logic, A then B, A migrating, B stopping"),
+ SchedulerTest("9-am-then-bm-b-migrating-a-stopping",
+ "Advanced migrate logic, A then B, B migrate, A stopping"),
+ SchedulerTest("10-a-then-bm-b-move-a-clone",
+ "Advanced migrate logic, A clone then B, migrate B while stopping A"),
+ SchedulerTest("11-a-then-bm-b-move-a-clone-starting",
+ "Advanced migrate logic, A clone then B, B moving while A is start/stopping"),
+ SchedulerTest("a-promote-then-b-migrate", "A promote then B start. migrate B"),
+ SchedulerTest("a-demote-then-b-migrate", "A demote then B stop. migrate B"),
+ SchedulerTest("probe-target-of-failed-migrate_to-1", "Failed migrate_to, target rejoins"),
+ SchedulerTest("probe-target-of-failed-migrate_to-2", "Failed migrate_to, target rejoined and probed"),
+ SchedulerTest("partial-live-migration-multiple-active", "Prevent running on multiple nodes due to partial live migration"),
+ SchedulerTest("migration-intermediary-cleaned",
+ "Probe live-migration intermediary with no history"),
+ SchedulerTest("bug-lf-2422", "Dependency on partially active group - stop ocfs:*"),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("clone-anon-probe-1", "Probe the correct (anonymous) clone instance for each node"),
+ SchedulerTest("clone-anon-probe-2", "Avoid needless re-probing of anonymous clones"),
+ SchedulerTest("clone-anon-failcount", "Merge failcounts for anonymous clones"),
+ SchedulerTest("force-anon-clone-max", "Update clone-max properly when forcing a clone to be anonymous"),
+ SchedulerTest("anon-instance-pending", "Assign anonymous clone instance numbers properly when action pending"),
+ SchedulerTest("inc0", "Incarnation start"),
+ SchedulerTest("inc1", "Incarnation start order"),
+ SchedulerTest("inc2", "Incarnation silent restart, stop, move"),
+ SchedulerTest("inc3", "Inter-incarnation ordering, silent restart, stop, move"),
+ SchedulerTest("inc4", "Inter-incarnation ordering, silent restart, stop, move (ordered)"),
+ SchedulerTest("inc5", "Inter-incarnation ordering, silent restart, stop, move (restart 1)"),
+ SchedulerTest("inc6", "Inter-incarnation ordering, silent restart, stop, move (restart 2)"),
+ SchedulerTest("inc7", "Clone colocation"),
+ SchedulerTest("inc8", "Clone anti-colocation"),
+ SchedulerTest("inc9", "Non-unique clone"),
+ SchedulerTest("inc10", "Non-unique clone (stop)"),
+ SchedulerTest("inc11", "Primitive colocation with clones"),
+ SchedulerTest("inc12", "Clone shutdown"),
+ SchedulerTest("cloned-group", "Make sure only the correct number of cloned groups are started"),
+ SchedulerTest("cloned-group-stop", "Ensure stopping qpidd also stops glance and cinder"),
+ SchedulerTest("clone-no-shuffle", "Don't prioritize allocation of instances that must be moved"),
+ SchedulerTest("clone-recover-no-shuffle-1",
+ "Don't shuffle instances when starting a new primitive instance"),
+ SchedulerTest("clone-recover-no-shuffle-2",
+ "Don't shuffle instances when starting a new group instance"),
+ SchedulerTest("clone-recover-no-shuffle-3",
+ "Don't shuffle instances when starting a new bundle instance"),
+ SchedulerTest("clone-recover-no-shuffle-4",
+ "Don't shuffle instances when starting a new primitive instance with "
+ "location preference "),
+ SchedulerTest("clone-recover-no-shuffle-5",
+ "Don't shuffle instances when starting a new group instance with "
+ "location preference"),
+ SchedulerTest("clone-recover-no-shuffle-6",
+ "Don't shuffle instances when starting a new bundle instance with "
+ "location preference"),
+ SchedulerTest("clone-recover-no-shuffle-7",
+ "Don't shuffle instances when starting a new primitive instance that "
+ "will be promoted"),
+ SchedulerTest("clone-recover-no-shuffle-8",
+ "Don't shuffle instances when starting a new group instance that "
+ "will be promoted "),
+ SchedulerTest("clone-recover-no-shuffle-9",
+ "Don't shuffle instances when starting a new bundle instance that "
+ "will be promoted "),
+ SchedulerTest("clone-recover-no-shuffle-10",
+ "Don't shuffle instances when starting a new primitive instance that "
+ "won't be promoted"),
+ SchedulerTest("clone-recover-no-shuffle-11",
+ "Don't shuffle instances when starting a new group instance that "
+ "won't be promoted "),
+ SchedulerTest("clone-recover-no-shuffle-12",
+ "Don't shuffle instances when starting a new bundle instance that "
+ "won't be promoted "),
+ SchedulerTest("clone-max-zero", "Orphan processing with clone-max=0"),
+ SchedulerTest("clone-anon-dup",
+ "Bug LF#2087 - Correctly parse the state of anonymous clones that are active more than once per node"),
+ SchedulerTest("bug-lf-2160", "Don't shuffle clones due to colocation"),
+ SchedulerTest("bug-lf-2213", "clone-node-max enforcement for cloned groups"),
+ SchedulerTest("bug-lf-2153", "Clone ordering constraints"),
+ SchedulerTest("bug-lf-2361", "Ensure clones observe mandatory ordering constraints if the LHS is unrunnable"),
+ SchedulerTest("bug-lf-2317", "Avoid needless restart of primitive depending on a clone"),
+ SchedulerTest("bug-lf-2453", "Enforce mandatory clone ordering without colocation"),
+ SchedulerTest("bug-lf-2508", "Correctly reconstruct the status of anonymous cloned groups"),
+ SchedulerTest("bug-lf-2544", "Balanced clone placement"),
+ SchedulerTest("bug-lf-2445", "Redistribute clones with node-max > 1 and stickiness = 0"),
+ SchedulerTest("bug-lf-2574", "Avoid clone shuffle"),
+ SchedulerTest("bug-lf-2581", "Avoid group restart due to unrelated clone (re)start"),
+ SchedulerTest("bug-cl-5168", "Don't shuffle clones"),
+ SchedulerTest("bug-cl-5170", "Prevent clone from starting with on-fail=block"),
+ SchedulerTest("clone-fail-block-colocation", "Move colocated group when failed clone has on-fail=block"),
+ SchedulerTest("clone-interleave-1",
+ "Clone-3 cannot start on pcmk-1 due to interleaved ordering (no colocation)"),
+ SchedulerTest("clone-interleave-2", "Clone-3 must stop on pcmk-1 due to interleaved ordering (no colocation)"),
+ SchedulerTest("clone-interleave-3",
+ "Clone-3 must be recovered on pcmk-1 due to interleaved ordering (no colocation)"),
+ SchedulerTest("rebalance-unique-clones", "Rebalance unique clone instances with no stickiness"),
+ SchedulerTest("clone-requires-quorum-recovery", "Clone with requires=quorum on failed node needing recovery"),
+ SchedulerTest("clone-requires-quorum",
+ "Clone with requires=quorum with presumed-inactive instance on failed node"),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("cloned_start_one", "order first clone then clone... first clone_min=2"),
+ SchedulerTest("cloned_start_two", "order first clone then clone... first clone_min=2"),
+ SchedulerTest("cloned_stop_one", "order first clone then clone... first clone_min=2"),
+ SchedulerTest("cloned_stop_two", "order first clone then clone... first clone_min=2"),
+ SchedulerTest("clone_min_interleave_start_one",
+ "order first clone then clone... first clone_min=2 and then has interleave=true"),
+ SchedulerTest("clone_min_interleave_start_two",
+ "order first clone then clone... first clone_min=2 and then has interleave=true"),
+ SchedulerTest("clone_min_interleave_stop_one",
+ "order first clone then clone... first clone_min=2 and then has interleave=true"),
+ SchedulerTest("clone_min_interleave_stop_two",
+ "order first clone then clone... first clone_min=2 and then has interleave=true"),
+ SchedulerTest("clone_min_start_one", "order first clone then primitive... first clone_min=2"),
+ SchedulerTest("clone_min_start_two", "order first clone then primitive... first clone_min=2"),
+ SchedulerTest("clone_min_stop_all", "order first clone then primitive... first clone_min=2"),
+ SchedulerTest("clone_min_stop_one", "order first clone then primitive... first clone_min=2"),
+ SchedulerTest("clone_min_stop_two", "order first clone then primitive... first clone_min=2"),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("unfence-startup", "Clean unfencing"),
+ SchedulerTest("unfence-definition", "Unfencing when the agent changes"),
+ SchedulerTest("unfence-parameters", "Unfencing when the agent parameters changes"),
+ SchedulerTest("unfence-device", "Unfencing when a cluster has only fence devices"),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("promoted-0", "Stopped -> Unpromoted"),
+ SchedulerTest("promoted-1", "Stopped -> Promote"),
+ SchedulerTest("promoted-2", "Stopped -> Promote : notify"),
+ SchedulerTest("promoted-3", "Stopped -> Promote : promoted location"),
+ SchedulerTest("promoted-4", "Started -> Promote : promoted location"),
+ SchedulerTest("promoted-5", "Promoted -> Promoted"),
+ SchedulerTest("promoted-6", "Promoted -> Promoted (2)"),
+ SchedulerTest("promoted-7", "Promoted -> Fenced"),
+ SchedulerTest("promoted-8", "Promoted -> Fenced -> Moved"),
+ SchedulerTest("promoted-9", "Stopped + Promotable + No quorum"),
+ SchedulerTest("promoted-10", "Stopped -> Promotable : notify with monitor"),
+ SchedulerTest("promoted-11", "Stopped -> Promote : colocation"),
+ SchedulerTest("novell-239082", "Demote/Promote ordering"),
+ SchedulerTest("novell-239087", "Stable promoted placement"),
+ SchedulerTest("promoted-12", "Promotion based solely on rsc_location constraints"),
+ SchedulerTest("promoted-13", "Include preferences of colocated resources when placing promoted"),
+ SchedulerTest("promoted-demote", "Ordering when actions depends on demoting an unpromoted resource"),
+ SchedulerTest("promoted-ordering", "Prevent resources from starting that need a promoted"),
+ SchedulerTest("bug-1765", "Verify promoted-with-promoted colocation does not stop unpromoted instances"),
+ SchedulerTest("promoted-group", "Promotion of cloned groups"),
+ SchedulerTest("bug-lf-1852", "Don't shuffle promotable instances unnecessarily"),
+ SchedulerTest("promoted-failed-demote", "Don't retry failed demote actions"),
+ SchedulerTest("promoted-failed-demote-2", "Don't retry failed demote actions (notify=false)"),
+ SchedulerTest("promoted-depend",
+ "Ensure resources that depend on promoted instance don't get allocated until that does"),
+ SchedulerTest("promoted-reattach", "Re-attach to a running promoted"),
+ SchedulerTest("promoted-allow-start", "Don't include promoted score if it would prevent allocation"),
+ SchedulerTest("promoted-colocation",
+ "Allow promoted instances placemaker to be influenced by colocation constraints"),
+ SchedulerTest("promoted-pseudo", "Make sure promote/demote pseudo actions are created correctly"),
+ SchedulerTest("promoted-role", "Prevent target-role from promoting more than promoted-max instances"),
+ SchedulerTest("bug-lf-2358", "Anti-colocation of promoted instances"),
+ SchedulerTest("promoted-promotion-constraint", "Mandatory promoted colocation constraints"),
+ SchedulerTest("unmanaged-promoted", "Ensure role is preserved for unmanaged resources"),
+ SchedulerTest("promoted-unmanaged-monitor", "Start correct monitor for unmanaged promoted instances"),
+ SchedulerTest("promoted-demote-2", "Demote does not clear past failure"),
+ SchedulerTest("promoted-move", "Move promoted based on failure of colocated group"),
+ SchedulerTest("promoted-probed-score", "Observe the promotion score of probed resources"),
+ SchedulerTest("colocation_constraint_stops_promoted",
+ "cl#5054 - Ensure promoted is demoted when stopped by colocation constraint"),
+ SchedulerTest("colocation_constraint_stops_unpromoted",
+ "cl#5054 - Ensure unpromoted is not demoted when stopped by colocation constraint"),
+ SchedulerTest("order_constraint_stops_promoted",
+ "cl#5054 - Ensure promoted is demoted when stopped by order constraint"),
+ SchedulerTest("order_constraint_stops_unpromoted",
+ "cl#5054 - Ensure unpromoted is not demoted when stopped by order constraint"),
+ SchedulerTest("promoted_monitor_restart", "cl#5072 - Ensure promoted monitor operation will start after promotion"),
+ SchedulerTest("bug-rh-880249", "Handle replacement of an m/s resource with a primitive"),
+ SchedulerTest("bug-5143-ms-shuffle", "Prevent promoted instance shuffling due to promotion score"),
+ SchedulerTest("promoted-demote-block", "Block promotion if demote fails with on-fail=block"),
+ SchedulerTest("promoted-dependent-ban",
+ "Don't stop instances from being active because a dependent is banned from that host"),
+ SchedulerTest("promoted-stop", "Stop instances due to location constraint with role=Started"),
+ SchedulerTest("promoted-partially-demoted-group", "Allow partially demoted group to finish demoting"),
+ SchedulerTest("bug-cl-5213", "Ensure role colocation with -INFINITY is enforced"),
+ SchedulerTest("bug-cl-5219", "Allow unrelated resources with a common colocation target to remain promoted"),
+ SchedulerTest("promoted-asymmetrical-order",
+ "Fix the behaviors of multi-state resources with asymmetrical ordering"),
+ SchedulerTest("promoted-notify", "Promotion with notifications"),
+ SchedulerTest("promoted-score-startup", "Use permanent promoted scores without LRM history"),
+ SchedulerTest("failed-demote-recovery", "Recover resource in unpromoted role after demote fails"),
+ SchedulerTest("failed-demote-recovery-promoted", "Recover resource in promoted role after demote fails"),
+ SchedulerTest("on_fail_demote1", "Recovery with on-fail=\"demote\" on healthy cluster, remote, guest, and bundle nodes"),
+ SchedulerTest("on_fail_demote2", "Recovery with on-fail=\"demote\" with promotion on different node"),
+ SchedulerTest("on_fail_demote3", "Recovery with on-fail=\"demote\" with no promotion"),
+ SchedulerTest("on_fail_demote4", "Recovery with on-fail=\"demote\" on failed cluster, remote, guest, and bundle nodes"),
+ SchedulerTest("no_quorum_demote", "Promotable demotion and primitive stop with no-quorum-policy=\"demote\""),
+ SchedulerTest("no-promote-on-unrunnable-guest", "Don't select bundle instance for promotion when container can't run"),
+ SchedulerTest("leftover-pending-monitor", "Prevent a leftover pending monitor from causing unexpected stop of other instances"),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("history-1", "Correctly parse stateful-1 resource state"),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("managed-0", "Managed (reference)"),
+ SchedulerTest("managed-1", "Not managed - down"),
+ SchedulerTest("managed-2", "Not managed - up"),
+ SchedulerTest("bug-5028", "Shutdown should block if anything depends on an unmanaged resource"),
+ SchedulerTest("bug-5028-detach", "Ensure detach still works"),
+ SchedulerTest("bug-5028-bottom",
+ "Ensure shutdown still blocks if the blocked resource is at the bottom of the stack"),
+ SchedulerTest("unmanaged-stop-1",
+ "cl#5155 - Block the stop of resources if any depending resource is unmanaged"),
+ SchedulerTest("unmanaged-stop-2",
+ "cl#5155 - Block the stop of resources if the first resource in a mandatory stop order is unmanaged"),
+ SchedulerTest("unmanaged-stop-3",
+ "cl#5155 - Block the stop of resources if any depending resource in a group is unmanaged"),
+ SchedulerTest("unmanaged-stop-4",
+ "cl#5155 - Block the stop of resources if any depending resource in the middle of a group is unmanaged"),
+ SchedulerTest("unmanaged-block-restart",
+ "Block restart of resources if any dependent resource in a group is unmanaged"),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("interleave-0", "Interleave (reference)"),
+ SchedulerTest("interleave-1", "coloc - not interleaved"),
+ SchedulerTest("interleave-2", "coloc - interleaved"),
+ SchedulerTest("interleave-3", "coloc - interleaved (2)"),
+ SchedulerTest("interleave-pseudo-stop", "Interleaved clone during stonith"),
+ SchedulerTest("interleave-stop", "Interleaved clone during stop"),
+ SchedulerTest("interleave-restart", "Interleaved clone during dependency restart"),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("notify-0", "Notify reference"),
+ SchedulerTest("notify-1", "Notify simple"),
+ SchedulerTest("notify-2", "Notify simple, confirm"),
+ SchedulerTest("notify-3", "Notify move, confirm"),
+ SchedulerTest("novell-239079", "Notification priority"),
+ # SchedulerTest("notify-2", "Notify - 764"),
+ SchedulerTest("notifs-for-unrunnable", "Don't schedule notifications for an unrunnable action"),
+ SchedulerTest("route-remote-notify", "Route remote notify actions through correct cluster node"),
+ SchedulerTest("notify-behind-stopping-remote", "Don't schedule notifications behind stopped remote"),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("594", "OSDL #594 - Unrunnable actions scheduled in transition"),
+ SchedulerTest("662", "OSDL #662 - Two resources start on one node when incarnation_node_max = 1"),
+ SchedulerTest("696", "OSDL #696 - CRM starts stonith RA without monitor"),
+ SchedulerTest("726", "OSDL #726 - Attempting to schedule rsc_posic041_monitor_5000 _after_ a stop"),
+ SchedulerTest("735", "OSDL #735 - Correctly detect that rsc_hadev1 is stopped on hadev3"),
+ SchedulerTest("764", "OSDL #764 - Missing monitor op for DoFencing:child_DoFencing:1"),
+ SchedulerTest("797", "OSDL #797 - Assert triggered: task_id_i > max_call_id"),
+ SchedulerTest("829", "OSDL #829"),
+ SchedulerTest("994",
+ "OSDL #994 - Stopping the last resource in a resource group causes the entire group to be restarted"),
+ SchedulerTest("994-2", "OSDL #994 - with a dependent resource"),
+ SchedulerTest("1360", "OSDL #1360 - Clone stickiness"),
+ SchedulerTest("1484", "OSDL #1484 - on_fail=stop"),
+ SchedulerTest("1494", "OSDL #1494 - Clone stability"),
+ SchedulerTest("unrunnable-1", "Unrunnable"),
+ SchedulerTest("unrunnable-2", "Unrunnable 2"),
+ SchedulerTest("stonith-0", "Stonith loop - 1"),
+ SchedulerTest("stonith-1", "Stonith loop - 2"),
+ SchedulerTest("stonith-2", "Stonith loop - 3"),
+ SchedulerTest("stonith-3", "Stonith startup"),
+ SchedulerTest("stonith-4", "Stonith node state"),
+ SchedulerTest("dc-fence-ordering", "DC needs fencing while other nodes are shutting down"),
+ SchedulerTest("bug-1572-1", "Recovery of groups depending on promotable role"),
+ SchedulerTest("bug-1572-2", "Recovery of groups depending on promotable role when promoted is not re-promoted"),
+ SchedulerTest("bug-1685", "Depends-on-promoted ordering"),
+ SchedulerTest("bug-1822", "Don't promote partially active groups"),
+ SchedulerTest("bug-pm-11", "New resource added to a m/s group"),
+ SchedulerTest("bug-pm-12", "Recover only the failed portion of a cloned group"),
+ SchedulerTest("bug-n-387749", "Don't shuffle clone instances"),
+ SchedulerTest("bug-n-385265",
+ "Don't ignore the failure stickiness of group children - resource_idvscommon should stay stopped"),
+ SchedulerTest("bug-n-385265-2",
+ "Ensure groups are migrated instead of remaining partially active on the current node"),
+ SchedulerTest("bug-lf-1920", "Correctly handle probes that find active resources"),
+ SchedulerTest("bnc-515172", "Location constraint with multiple expressions"),
+ SchedulerTest("colocate-primitive-with-clone", "Optional colocation with a clone"),
+ SchedulerTest("use-after-free-merge", "Use-after-free in native_merge_weights"),
+ SchedulerTest("bug-lf-2551", "STONITH ordering for stop"),
+ SchedulerTest("bug-lf-2606", "Stonith implies demote"),
+ SchedulerTest("bug-lf-2474", "Ensure resource op timeout takes precedence over op_defaults"),
+ SchedulerTest("bug-suse-707150", "Prevent vm-01 from starting due to colocation/ordering"),
+ SchedulerTest("bug-5014-A-start-B-start", "Verify when A starts B starts using symmetrical=false"),
+ SchedulerTest("bug-5014-A-stop-B-started",
+ "Verify when A stops B does not stop if it has already started using symmetric=false"),
+ SchedulerTest("bug-5014-A-stopped-B-stopped",
+ "Verify when A is stopped and B has not started, B does not start before A using symmetric=false"),
+ SchedulerTest("bug-5014-CthenAthenB-C-stopped",
+ "Verify when C then A is symmetrical=true, A then B is symmetric=false, and C is stopped that nothing starts"),
+ SchedulerTest("bug-5014-CLONE-A-start-B-start",
+ "Verify when A starts B starts using clone resources with symmetric=false"),
+ SchedulerTest("bug-5014-CLONE-A-stop-B-started",
+ "Verify when A stops B does not stop if it has already started using clone resources with symmetric=false"),
+ SchedulerTest("bug-5014-GROUP-A-start-B-start",
+ "Verify when A starts B starts when using group resources with symmetric=false"),
+ SchedulerTest("bug-5014-GROUP-A-stopped-B-started",
+ "Verify when A stops B does not stop if it has already started using group resources with symmetric=false"),
+ SchedulerTest("bug-5014-GROUP-A-stopped-B-stopped",
+ "Verify when A is stopped and B has not started, B does not start before A using group resources with symmetric=false"),
+ SchedulerTest("bug-5014-ordered-set-symmetrical-false",
+ "Verify ordered sets work with symmetrical=false"),
+ SchedulerTest("bug-5014-ordered-set-symmetrical-true",
+ "Verify ordered sets work with symmetrical=true"),
+ SchedulerTest("clbz5007-promotable-colocation",
+ "Verify use of colocation scores other than INFINITY and -INFINITY work on multi-state resources"),
+ SchedulerTest("bug-5038", "Prevent restart of anonymous clones when clone-max decreases"),
+ SchedulerTest("bug-5025-1", "Automatically clean up failcount after resource config change with reload"),
+ SchedulerTest("bug-5025-2", "Make sure clear failcount action isn't set when config does not change"),
+ SchedulerTest("bug-5025-3", "Automatically clean up failcount after resource config change with restart"),
+ SchedulerTest("bug-5025-4", "Clear failcount when last failure is a start op and rsc attributes changed"),
+ SchedulerTest("failcount", "Ensure failcounts are correctly expired"),
+ SchedulerTest("failcount-block", "Ensure failcounts are not expired when on-fail=block is present"),
+ SchedulerTest("per-op-failcount", "Ensure per-operation failcount is handled and not passed to fence agent"),
+ SchedulerTest("on-fail-ignore", "Ensure on-fail=ignore works even beyond migration-threshold"),
+ SchedulerTest("monitor-onfail-restart", "bug-5058 - Monitor failure with on-fail set to restart"),
+ SchedulerTest("monitor-onfail-stop", "bug-5058 - Monitor failure wiht on-fail set to stop"),
+ SchedulerTest("bug-5059", "No need to restart p_stateful1:*"),
+ SchedulerTest("bug-5069-op-enabled", "Test on-fail=ignore with failure when monitor is enabled"),
+ SchedulerTest("bug-5069-op-disabled", "Test on-fail-ignore with failure when monitor is disabled"),
+ SchedulerTest("obsolete-lrm-resource", "cl#5115 - Do not use obsolete lrm_resource sections"),
+ SchedulerTest("expire-non-blocked-failure",
+ "Ignore failure-timeout only if the failed operation has on-fail=block"),
+ SchedulerTest("asymmetrical-order-move", "Respect asymmetrical ordering when trying to move resources"),
+ SchedulerTest("asymmetrical-order-restart", "Respect asymmetrical ordering when restarting dependent resource"),
+ SchedulerTest("start-then-stop-with-unfence", "Avoid graph loop with start-then-stop constraint plus unfencing"),
+ SchedulerTest("order-expired-failure", "Order failcount cleanup after remote fencing"),
+ SchedulerTest("expired-stop-1", "Expired stop failure should not block resource"),
+ SchedulerTest("ignore_stonith_rsc_order1",
+ "cl#5056- Ignore order constraint between stonith and non-stonith rsc"),
+ SchedulerTest("ignore_stonith_rsc_order2",
+ "cl#5056- Ignore order constraint with group rsc containing mixed stonith and non-stonith"),
+ SchedulerTest("ignore_stonith_rsc_order3", "cl#5056- Ignore order constraint, stonith clone and mixed group"),
+ SchedulerTest("ignore_stonith_rsc_order4",
+ "cl#5056- Ignore order constraint, stonith clone and clone with nested mixed group"),
+ SchedulerTest("honor_stonith_rsc_order1",
+ "cl#5056- Honor order constraint, stonith clone and pure stonith group(single rsc)"),
+ SchedulerTest("honor_stonith_rsc_order2",
+ "cl#5056- Honor order constraint, stonith clone and pure stonith group(multiple rsc)"),
+ SchedulerTest("honor_stonith_rsc_order3",
+ "cl#5056- Honor order constraint, stonith clones with nested pure stonith group"),
+ SchedulerTest("honor_stonith_rsc_order4",
+ "cl#5056- Honor order constraint, between two native stonith rscs"),
+ SchedulerTest("multiply-active-stonith", "Multiply active stonith"),
+ SchedulerTest("probe-timeout", "cl#5099 - Default probe timeout"),
+ SchedulerTest("order-first-probes",
+ "cl#5301 - respect order constraints when relevant resources are being probed"),
+ SchedulerTest("concurrent-fencing", "Allow performing fencing operations in parallel"),
+ SchedulerTest("priority-fencing-delay", "Delay fencing targeting the more significant node"),
+ SchedulerTest("pending-node-no-uname", "Do not fence a pending node that doesn't have an uname in node state yet"),
+ SchedulerTest("node-pending-timeout", "Fence a pending node that has reached `node-pending-timeout`"),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("systemhealth1", "System Health () #1"),
+ SchedulerTest("systemhealth2", "System Health () #2"),
+ SchedulerTest("systemhealth3", "System Health () #3"),
+ SchedulerTest("systemhealthn1", "System Health (None) #1"),
+ SchedulerTest("systemhealthn2", "System Health (None) #2"),
+ SchedulerTest("systemhealthn3", "System Health (None) #3"),
+ SchedulerTest("systemhealthm1", "System Health (Migrate On Red) #1"),
+ SchedulerTest("systemhealthm2", "System Health (Migrate On Red) #2"),
+ SchedulerTest("systemhealthm3", "System Health (Migrate On Red) #3"),
+ SchedulerTest("systemhealtho1", "System Health (Only Green) #1"),
+ SchedulerTest("systemhealtho2", "System Health (Only Green) #2"),
+ SchedulerTest("systemhealtho3", "System Health (Only Green) #3"),
+ SchedulerTest("systemhealthp1", "System Health (Progessive) #1"),
+ SchedulerTest("systemhealthp2", "System Health (Progessive) #2"),
+ SchedulerTest("systemhealthp3", "System Health (Progessive) #3"),
+ SchedulerTest("allow-unhealthy-nodes", "System Health (migrate-on-red + allow-unhealth-nodes)"),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("utilization", "Placement Strategy - utilization"),
+ SchedulerTest("minimal", "Placement Strategy - minimal"),
+ SchedulerTest("balanced", "Placement Strategy - balanced"),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("placement-stickiness", "Optimized Placement Strategy - stickiness"),
+ SchedulerTest("placement-priority", "Optimized Placement Strategy - priority"),
+ SchedulerTest("placement-location", "Optimized Placement Strategy - location"),
+ SchedulerTest("placement-capacity", "Optimized Placement Strategy - capacity"),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("utilization-order1", "Utilization Order - Simple"),
+ SchedulerTest("utilization-order2", "Utilization Order - Complex"),
+ SchedulerTest("utilization-order3", "Utilization Order - Migrate"),
+ SchedulerTest("utilization-order4", "Utilization Order - Live Migration (bnc#695440)"),
+ SchedulerTest("utilization-complex", "Utilization with complex relationships"),
+ SchedulerTest("utilization-shuffle",
+ "Don't displace prmExPostgreSQLDB2 on act2, Start prmExPostgreSQLDB1 on act3"),
+ SchedulerTest("load-stopped-loop", "Avoid transition loop due to load_stopped (cl#5044)"),
+ SchedulerTest("load-stopped-loop-2",
+ "cl#5235 - Prevent graph loops that can be introduced by load_stopped -> migrate_to ordering"),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("colocated-utilization-primitive-1", "Colocated Utilization - Primitive"),
+ SchedulerTest("colocated-utilization-primitive-2", "Colocated Utilization - Choose the most capable node"),
+ SchedulerTest("colocated-utilization-group", "Colocated Utilization - Group"),
+ SchedulerTest("colocated-utilization-clone", "Colocated Utilization - Clone"),
+ SchedulerTest("utilization-check-allowed-nodes",
+ "Only check the capacities of the nodes that can run the resource"),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("reprobe-target_rc", "Ensure correct target_rc for reprobe of inactive resources"),
+ SchedulerTest("node-maintenance-1", "cl#5128 - Node maintenance"),
+ SchedulerTest("node-maintenance-2", "cl#5128 - Node maintenance (coming out of maintenance mode)"),
+ SchedulerTest("shutdown-maintenance-node", "Do not fence a maintenance node if it shuts down cleanly"),
+ SchedulerTest("rsc-maintenance", "Per-resource maintenance"),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("not-installed-agent", "The resource agent is missing"),
+ SchedulerTest("not-installed-tools", "Something the resource agent needs is missing"),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("stopped-monitor-00", "Stopped Monitor - initial start"),
+ SchedulerTest("stopped-monitor-01", "Stopped Monitor - failed started"),
+ SchedulerTest("stopped-monitor-02", "Stopped Monitor - started multi-up"),
+ SchedulerTest("stopped-monitor-03", "Stopped Monitor - stop started"),
+ SchedulerTest("stopped-monitor-04", "Stopped Monitor - failed stop"),
+ SchedulerTest("stopped-monitor-05", "Stopped Monitor - start unmanaged"),
+ SchedulerTest("stopped-monitor-06", "Stopped Monitor - unmanaged multi-up"),
+ SchedulerTest("stopped-monitor-07", "Stopped Monitor - start unmanaged multi-up"),
+ SchedulerTest("stopped-monitor-08", "Stopped Monitor - migrate"),
+ SchedulerTest("stopped-monitor-09", "Stopped Monitor - unmanage started"),
+ SchedulerTest("stopped-monitor-10", "Stopped Monitor - unmanaged started multi-up"),
+ SchedulerTest("stopped-monitor-11", "Stopped Monitor - stop unmanaged started"),
+ SchedulerTest("stopped-monitor-12", "Stopped Monitor - unmanaged started multi-up (target-role=Stopped)"),
+ SchedulerTest("stopped-monitor-20", "Stopped Monitor - initial stop"),
+ SchedulerTest("stopped-monitor-21", "Stopped Monitor - stopped single-up"),
+ SchedulerTest("stopped-monitor-22", "Stopped Monitor - stopped multi-up"),
+ SchedulerTest("stopped-monitor-23", "Stopped Monitor - start stopped"),
+ SchedulerTest("stopped-monitor-24", "Stopped Monitor - unmanage stopped"),
+ SchedulerTest("stopped-monitor-25", "Stopped Monitor - unmanaged stopped multi-up"),
+ SchedulerTest("stopped-monitor-26", "Stopped Monitor - start unmanaged stopped"),
+ SchedulerTest("stopped-monitor-27", "Stopped Monitor - unmanaged stopped multi-up (target-role=Started)"),
+ SchedulerTest("stopped-monitor-30", "Stopped Monitor - new node started"),
+ SchedulerTest("stopped-monitor-31", "Stopped Monitor - new node stopped"),
+ ]),
+ SchedulerTestGroup([
# This is a combo test to check:
# - probe timeout defaults to the minimum-interval monitor's
# - duplicate recurring operations are ignored
# - if timeout spec is bad, the default timeout is used
# - failure is blocked with on-fail=block even if ISO8601 interval is specified
# - started/stopped role monitors are started/stopped on right nodes
- [ "intervals", "Recurring monitor interval handling" ],
- ],
- [
- [ "ticket-primitive-1", "Ticket - Primitive (loss-policy=stop, initial)" ],
- [ "ticket-primitive-2", "Ticket - Primitive (loss-policy=stop, granted)" ],
- [ "ticket-primitive-3", "Ticket - Primitive (loss-policy-stop, revoked)" ],
- [ "ticket-primitive-4", "Ticket - Primitive (loss-policy=demote, initial)" ],
- [ "ticket-primitive-5", "Ticket - Primitive (loss-policy=demote, granted)" ],
- [ "ticket-primitive-6", "Ticket - Primitive (loss-policy=demote, revoked)" ],
- [ "ticket-primitive-7", "Ticket - Primitive (loss-policy=fence, initial)" ],
- [ "ticket-primitive-8", "Ticket - Primitive (loss-policy=fence, granted)" ],
- [ "ticket-primitive-9", "Ticket - Primitive (loss-policy=fence, revoked)" ],
- [ "ticket-primitive-10", "Ticket - Primitive (loss-policy=freeze, initial)" ],
- [ "ticket-primitive-11", "Ticket - Primitive (loss-policy=freeze, granted)" ],
- [ "ticket-primitive-12", "Ticket - Primitive (loss-policy=freeze, revoked)" ],
- [ "ticket-primitive-13", "Ticket - Primitive (loss-policy=stop, standby, granted)" ],
- [ "ticket-primitive-14", "Ticket - Primitive (loss-policy=stop, granted, standby)" ],
- [ "ticket-primitive-15", "Ticket - Primitive (loss-policy=stop, standby, revoked)" ],
- [ "ticket-primitive-16", "Ticket - Primitive (loss-policy=demote, standby, granted)" ],
- [ "ticket-primitive-17", "Ticket - Primitive (loss-policy=demote, granted, standby)" ],
- [ "ticket-primitive-18", "Ticket - Primitive (loss-policy=demote, standby, revoked)" ],
- [ "ticket-primitive-19", "Ticket - Primitive (loss-policy=fence, standby, granted)" ],
- [ "ticket-primitive-20", "Ticket - Primitive (loss-policy=fence, granted, standby)" ],
- [ "ticket-primitive-21", "Ticket - Primitive (loss-policy=fence, standby, revoked)" ],
- [ "ticket-primitive-22", "Ticket - Primitive (loss-policy=freeze, standby, granted)" ],
- [ "ticket-primitive-23", "Ticket - Primitive (loss-policy=freeze, granted, standby)" ],
- [ "ticket-primitive-24", "Ticket - Primitive (loss-policy=freeze, standby, revoked)" ],
- ],
- [
- [ "ticket-group-1", "Ticket - Group (loss-policy=stop, initial)" ],
- [ "ticket-group-2", "Ticket - Group (loss-policy=stop, granted)" ],
- [ "ticket-group-3", "Ticket - Group (loss-policy-stop, revoked)" ],
- [ "ticket-group-4", "Ticket - Group (loss-policy=demote, initial)" ],
- [ "ticket-group-5", "Ticket - Group (loss-policy=demote, granted)" ],
- [ "ticket-group-6", "Ticket - Group (loss-policy=demote, revoked)" ],
- [ "ticket-group-7", "Ticket - Group (loss-policy=fence, initial)" ],
- [ "ticket-group-8", "Ticket - Group (loss-policy=fence, granted)" ],
- [ "ticket-group-9", "Ticket - Group (loss-policy=fence, revoked)" ],
- [ "ticket-group-10", "Ticket - Group (loss-policy=freeze, initial)" ],
- [ "ticket-group-11", "Ticket - Group (loss-policy=freeze, granted)" ],
- [ "ticket-group-12", "Ticket - Group (loss-policy=freeze, revoked)" ],
- [ "ticket-group-13", "Ticket - Group (loss-policy=stop, standby, granted)" ],
- [ "ticket-group-14", "Ticket - Group (loss-policy=stop, granted, standby)" ],
- [ "ticket-group-15", "Ticket - Group (loss-policy=stop, standby, revoked)" ],
- [ "ticket-group-16", "Ticket - Group (loss-policy=demote, standby, granted)" ],
- [ "ticket-group-17", "Ticket - Group (loss-policy=demote, granted, standby)" ],
- [ "ticket-group-18", "Ticket - Group (loss-policy=demote, standby, revoked)" ],
- [ "ticket-group-19", "Ticket - Group (loss-policy=fence, standby, granted)" ],
- [ "ticket-group-20", "Ticket - Group (loss-policy=fence, granted, standby)" ],
- [ "ticket-group-21", "Ticket - Group (loss-policy=fence, standby, revoked)" ],
- [ "ticket-group-22", "Ticket - Group (loss-policy=freeze, standby, granted)" ],
- [ "ticket-group-23", "Ticket - Group (loss-policy=freeze, granted, standby)" ],
- [ "ticket-group-24", "Ticket - Group (loss-policy=freeze, standby, revoked)" ],
- ],
- [
- [ "ticket-clone-1", "Ticket - Clone (loss-policy=stop, initial)" ],
- [ "ticket-clone-2", "Ticket - Clone (loss-policy=stop, granted)" ],
- [ "ticket-clone-3", "Ticket - Clone (loss-policy-stop, revoked)" ],
- [ "ticket-clone-4", "Ticket - Clone (loss-policy=demote, initial)" ],
- [ "ticket-clone-5", "Ticket - Clone (loss-policy=demote, granted)" ],
- [ "ticket-clone-6", "Ticket - Clone (loss-policy=demote, revoked)" ],
- [ "ticket-clone-7", "Ticket - Clone (loss-policy=fence, initial)" ],
- [ "ticket-clone-8", "Ticket - Clone (loss-policy=fence, granted)" ],
- [ "ticket-clone-9", "Ticket - Clone (loss-policy=fence, revoked)" ],
- [ "ticket-clone-10", "Ticket - Clone (loss-policy=freeze, initial)" ],
- [ "ticket-clone-11", "Ticket - Clone (loss-policy=freeze, granted)" ],
- [ "ticket-clone-12", "Ticket - Clone (loss-policy=freeze, revoked)" ],
- [ "ticket-clone-13", "Ticket - Clone (loss-policy=stop, standby, granted)" ],
- [ "ticket-clone-14", "Ticket - Clone (loss-policy=stop, granted, standby)" ],
- [ "ticket-clone-15", "Ticket - Clone (loss-policy=stop, standby, revoked)" ],
- [ "ticket-clone-16", "Ticket - Clone (loss-policy=demote, standby, granted)" ],
- [ "ticket-clone-17", "Ticket - Clone (loss-policy=demote, granted, standby)" ],
- [ "ticket-clone-18", "Ticket - Clone (loss-policy=demote, standby, revoked)" ],
- [ "ticket-clone-19", "Ticket - Clone (loss-policy=fence, standby, granted)" ],
- [ "ticket-clone-20", "Ticket - Clone (loss-policy=fence, granted, standby)" ],
- [ "ticket-clone-21", "Ticket - Clone (loss-policy=fence, standby, revoked)" ],
- [ "ticket-clone-22", "Ticket - Clone (loss-policy=freeze, standby, granted)" ],
- [ "ticket-clone-23", "Ticket - Clone (loss-policy=freeze, granted, standby)" ],
- [ "ticket-clone-24", "Ticket - Clone (loss-policy=freeze, standby, revoked)" ],
- ],
- [
- [ "ticket-promoted-1", "Ticket - Promoted (loss-policy=stop, initial)" ],
- [ "ticket-promoted-2", "Ticket - Promoted (loss-policy=stop, granted)" ],
- [ "ticket-promoted-3", "Ticket - Promoted (loss-policy-stop, revoked)" ],
- [ "ticket-promoted-4", "Ticket - Promoted (loss-policy=demote, initial)" ],
- [ "ticket-promoted-5", "Ticket - Promoted (loss-policy=demote, granted)" ],
- [ "ticket-promoted-6", "Ticket - Promoted (loss-policy=demote, revoked)" ],
- [ "ticket-promoted-7", "Ticket - Promoted (loss-policy=fence, initial)" ],
- [ "ticket-promoted-8", "Ticket - Promoted (loss-policy=fence, granted)" ],
- [ "ticket-promoted-9", "Ticket - Promoted (loss-policy=fence, revoked)" ],
- [ "ticket-promoted-10", "Ticket - Promoted (loss-policy=freeze, initial)" ],
- [ "ticket-promoted-11", "Ticket - Promoted (loss-policy=freeze, granted)" ],
- [ "ticket-promoted-12", "Ticket - Promoted (loss-policy=freeze, revoked)" ],
- [ "ticket-promoted-13", "Ticket - Promoted (loss-policy=stop, standby, granted)" ],
- [ "ticket-promoted-14", "Ticket - Promoted (loss-policy=stop, granted, standby)" ],
- [ "ticket-promoted-15", "Ticket - Promoted (loss-policy=stop, standby, revoked)" ],
- [ "ticket-promoted-16", "Ticket - Promoted (loss-policy=demote, standby, granted)" ],
- [ "ticket-promoted-17", "Ticket - Promoted (loss-policy=demote, granted, standby)" ],
- [ "ticket-promoted-18", "Ticket - Promoted (loss-policy=demote, standby, revoked)" ],
- [ "ticket-promoted-19", "Ticket - Promoted (loss-policy=fence, standby, granted)" ],
- [ "ticket-promoted-20", "Ticket - Promoted (loss-policy=fence, granted, standby)" ],
- [ "ticket-promoted-21", "Ticket - Promoted (loss-policy=fence, standby, revoked)" ],
- [ "ticket-promoted-22", "Ticket - Promoted (loss-policy=freeze, standby, granted)" ],
- [ "ticket-promoted-23", "Ticket - Promoted (loss-policy=freeze, granted, standby)" ],
- [ "ticket-promoted-24", "Ticket - Promoted (loss-policy=freeze, standby, revoked)" ],
- ],
- [
- [ "ticket-rsc-sets-1", "Ticket - Resource sets (1 ticket, initial)" ],
- [ "ticket-rsc-sets-2", "Ticket - Resource sets (1 ticket, granted)" ],
- [ "ticket-rsc-sets-3", "Ticket - Resource sets (1 ticket, revoked)" ],
- [ "ticket-rsc-sets-4", "Ticket - Resource sets (2 tickets, initial)" ],
- [ "ticket-rsc-sets-5", "Ticket - Resource sets (2 tickets, granted)" ],
- [ "ticket-rsc-sets-6", "Ticket - Resource sets (2 tickets, granted)" ],
- [ "ticket-rsc-sets-7", "Ticket - Resource sets (2 tickets, revoked)" ],
- [ "ticket-rsc-sets-8", "Ticket - Resource sets (1 ticket, standby, granted)" ],
- [ "ticket-rsc-sets-9", "Ticket - Resource sets (1 ticket, granted, standby)" ],
- [ "ticket-rsc-sets-10", "Ticket - Resource sets (1 ticket, standby, revoked)" ],
- [ "ticket-rsc-sets-11", "Ticket - Resource sets (2 tickets, standby, granted)" ],
- [ "ticket-rsc-sets-12", "Ticket - Resource sets (2 tickets, standby, granted)" ],
- [ "ticket-rsc-sets-13", "Ticket - Resource sets (2 tickets, granted, standby)" ],
- [ "ticket-rsc-sets-14", "Ticket - Resource sets (2 tickets, standby, revoked)" ],
- [ "cluster-specific-params", "Cluster-specific instance attributes based on rules" ],
- [ "site-specific-params", "Site-specific instance attributes based on rules" ],
- ],
- [
- [ "template-1", "Template - 1" ],
- [ "template-2", "Template - 2" ],
- [ "template-3", "Template - 3 (merge operations)" ],
- [ "template-coloc-1", "Template - Colocation 1" ],
- [ "template-coloc-2", "Template - Colocation 2" ],
- [ "template-coloc-3", "Template - Colocation 3" ],
- [ "template-order-1", "Template - Order 1" ],
- [ "template-order-2", "Template - Order 2" ],
- [ "template-order-3", "Template - Order 3" ],
- [ "template-ticket", "Template - Ticket" ],
- [ "template-rsc-sets-1", "Template - Resource Sets 1" ],
- [ "template-rsc-sets-2", "Template - Resource Sets 2" ],
- [ "template-rsc-sets-3", "Template - Resource Sets 3" ],
- [ "template-rsc-sets-4", "Template - Resource Sets 4" ],
- [ "template-clone-primitive", "Cloned primitive from template" ],
- [ "template-clone-group", "Cloned group from template" ],
- [ "location-sets-templates", "Resource sets and templates - Location" ],
- [ "tags-coloc-order-1", "Tags - Colocation and Order (Simple)" ],
- [ "tags-coloc-order-2", "Tags - Colocation and Order (Resource Sets with Templates)" ],
- [ "tags-location", "Tags - Location" ],
- [ "tags-ticket", "Tags - Ticket" ],
- ],
- [
- [ "container-1", "Container - initial" ],
- [ "container-2", "Container - monitor failed" ],
- [ "container-3", "Container - stop failed" ],
- [ "container-4", "Container - reached migration-threshold" ],
- [ "container-group-1", "Container in group - initial" ],
- [ "container-group-2", "Container in group - monitor failed" ],
- [ "container-group-3", "Container in group - stop failed" ],
- [ "container-group-4", "Container in group - reached migration-threshold" ],
- [ "container-is-remote-node", "Place resource within container when container is remote-node" ],
- [ "bug-rh-1097457", "Kill user defined container/contents ordering" ],
- [ "bug-cl-5247", "Graph loop when recovering m/s resource in a container" ],
- [ "bundle-order-startup", "Bundle startup ordering" ],
- [ "bundle-order-partial-start",
- "Bundle startup ordering when some dependencies are already running" ],
- [ "bundle-order-partial-start-2",
- "Bundle startup ordering when some dependencies and the container are already running" ],
- [ "bundle-order-stop", "Bundle stop ordering" ],
- [ "bundle-order-partial-stop", "Bundle startup ordering when some dependencies are already stopped" ],
- [ "bundle-order-stop-on-remote", "Stop nested resource after bringing up the connection" ],
- [ "bundle-order-startup-clone", "Prevent startup because bundle isn't promoted" ],
- [ "bundle-order-startup-clone-2", "Bundle startup with clones" ],
- [ "bundle-order-stop-clone", "Stop bundle because clone is stopping" ],
- [ "bundle-interleave-start", "Interleave bundle starts" ],
- [ "bundle-interleave-promote", "Interleave bundle promotes" ],
- [ "bundle-nested-colocation", "Colocation of nested connection resources" ],
- [ "bundle-order-fencing",
- "Order pseudo bundle fencing after parent node fencing if both are happening" ],
- [ "bundle-probe-order-1", "order 1" ],
- [ "bundle-probe-order-2", "order 2" ],
- [ "bundle-probe-order-3", "order 3" ],
- [ "bundle-probe-remotes", "Ensure remotes get probed too" ],
- [ "bundle-replicas-change", "Change bundle from 1 replica to multiple" ],
- [ "bundle-connection-with-container", "Don't move a container due to connection preferences" ],
- [ "nested-remote-recovery", "Recover bundle's container hosted on remote node" ],
- [ "bundle-promoted-location-1",
- "Promotable bundle, positive location" ],
- [ "bundle-promoted-location-2",
- "Promotable bundle, negative location" ],
- [ "bundle-promoted-location-3",
- "Promotable bundle, positive location for promoted role" ],
- [ "bundle-promoted-location-4",
- "Promotable bundle, negative location for promoted role" ],
- [ "bundle-promoted-location-5",
- "Promotable bundle, positive location for unpromoted role" ],
- [ "bundle-promoted-location-6",
- "Promotable bundle, negative location for unpromoted role" ],
- [ "bundle-promoted-colocation-1",
- "Primary promoted bundle, dependent primitive (mandatory coloc)" ],
- [ "bundle-promoted-colocation-2",
- "Primary promoted bundle, dependent primitive (optional coloc)" ],
- [ "bundle-promoted-colocation-3",
- "Dependent promoted bundle, primary primitive (mandatory coloc)" ],
- [ "bundle-promoted-colocation-4",
- "Dependent promoted bundle, primary primitive (optional coloc)" ],
- [ "bundle-promoted-colocation-5",
- "Primary and dependent promoted bundle instances (mandatory coloc)" ],
- [ "bundle-promoted-colocation-6",
- "Primary and dependent promoted bundle instances (optional coloc)" ],
- [ "bundle-promoted-anticolocation-1",
- "Primary promoted bundle, dependent primitive (mandatory anti)" ],
- [ "bundle-promoted-anticolocation-2",
- "Primary promoted bundle, dependent primitive (optional anti)" ],
- [ "bundle-promoted-anticolocation-3",
- "Dependent promoted bundle, primary primitive (mandatory anti)" ],
- [ "bundle-promoted-anticolocation-4",
- "Dependent promoted bundle, primary primitive (optional anti)" ],
- [ "bundle-promoted-anticolocation-5",
- "Primary and dependent promoted bundle instances (mandatory anti)" ],
- [ "bundle-promoted-anticolocation-6",
- "Primary and dependent promoted bundle instances (optional anti)" ],
- ],
- [
- [ "whitebox-fail1", "Fail whitebox container rsc" ],
- [ "whitebox-fail2", "Fail cluster connection to guest node" ],
- [ "whitebox-fail3", "Failed containers should not run nested on remote nodes" ],
- [ "whitebox-start", "Start whitebox container with resources assigned to it" ],
- [ "whitebox-stop", "Stop whitebox container with resources assigned to it" ],
- [ "whitebox-move", "Move whitebox container with resources assigned to it" ],
- [ "whitebox-asymmetric", "Verify connection rsc opts-in based on container resource" ],
- [ "whitebox-ms-ordering", "Verify promote/demote can not occur before connection is established" ],
- [ "whitebox-ms-ordering-move", "Stop/Start cycle within a moving container" ],
- [ "whitebox-orphaned", "Properly shutdown orphaned whitebox container" ],
- [ "whitebox-orphan-ms", "Properly tear down orphan ms resources on remote-nodes" ],
- [ "whitebox-unexpectedly-running", "Recover container nodes the cluster did not start" ],
- [ "whitebox-migrate1", "Migrate both container and connection resource" ],
- [ "whitebox-imply-stop-on-fence",
- "imply stop action on container node rsc when host node is fenced" ],
- [ "whitebox-nested-group", "Verify guest remote-node works nested in a group" ],
- [ "guest-node-host-dies", "Verify guest node is recovered if host goes away" ],
- [ "guest-node-cleanup", "Order guest node connection recovery after container probe" ],
- [ "guest-host-not-fenceable", "Actions on guest node are unrunnable if host is unclean and cannot be fenced" ],
- ],
- [
- [ "remote-startup-probes", "Baremetal remote-node startup probes" ],
- [ "remote-startup", "Startup a newly discovered remote-nodes with no status" ],
- [ "remote-fence-unclean", "Fence unclean baremetal remote-node" ],
- [ "remote-fence-unclean2",
- "Fence baremetal remote-node after cluster node fails and connection can not be recovered" ],
- [ "remote-fence-unclean-3", "Probe failed remote nodes (triggers fencing)" ],
- [ "remote-move", "Move remote-node connection resource" ],
- [ "remote-disable", "Disable a baremetal remote-node" ],
- [ "remote-probe-disable", "Probe then stop a baremetal remote-node" ],
- [ "remote-orphaned", "Properly shutdown orphaned connection resource" ],
- [ "remote-orphaned2",
- "verify we can handle orphaned remote connections with active resources on the remote" ],
- [ "remote-recover", "Recover connection resource after cluster-node fails" ],
- [ "remote-stale-node-entry",
- "Make sure we properly handle leftover remote-node entries in the node section" ],
- [ "remote-partial-migrate",
- "Make sure partial migrations are handled before ops on the remote node" ],
- [ "remote-partial-migrate2",
- "Make sure partial migration target is prefered for remote connection" ],
- [ "remote-recover-fail", "Make sure start failure causes fencing if rsc are active on remote" ],
- [ "remote-start-fail",
- "Make sure a start failure does not result in fencing if no active resources are on remote" ],
- [ "remote-unclean2",
- "Make monitor failure always results in fencing, even if no rsc are active on remote" ],
- [ "remote-fence-before-reconnect", "Fence before clearing recurring monitor failure" ],
- [ "remote-recovery", "Recover remote connections before attempting demotion" ],
- [ "remote-recover-connection", "Optimistically recovery of only the connection" ],
- [ "remote-recover-all", "Fencing when the connection has no home" ],
- [ "remote-recover-no-resources", "Fencing when the connection has no home and no active resources" ],
- [ "remote-recover-unknown",
- "Fencing when the connection has no home and the remote has no operation history" ],
- [ "remote-reconnect-delay", "Waiting for remote reconnect interval to expire" ],
- [ "remote-connection-unrecoverable",
- "Remote connection host must be fenced, with connection unrecoverable" ],
- [ "remote-connection-shutdown", "Remote connection shutdown" ],
- [ "cancel-behind-moving-remote",
- "Route recurring monitor cancellations through original node of a moving remote connection" ],
- ],
- [
- [ "resource-discovery", "Exercises resource-discovery location constraint option" ],
- [ "rsc-discovery-per-node", "Disable resource discovery per node" ],
- [ "shutdown-lock", "Ensure shutdown lock works properly" ],
- [ "shutdown-lock-expiration", "Ensure shutdown lock expiration works properly" ],
- ],
- [
- [ "op-defaults", "Test op_defaults conditional expressions" ],
- [ "op-defaults-2", "Test op_defaults AND'ed conditional expressions" ],
- [ "op-defaults-3", "Test op_defaults precedence" ],
- [ "rsc-defaults", "Test rsc_defaults conditional expressions" ],
- [ "rsc-defaults-2", "Test rsc_defaults conditional expressions without type" ],
- ],
- [ [ "stop-all-resources", "Test stop-all-resources=true "],
- ],
- [ [ "ocf_degraded-remap-ocf_ok", "Test degraded remapped to OK" ],
- [ "ocf_degraded_promoted-remap-ocf_ok", "Test degraded promoted remapped to OK"],
- ],
+ SchedulerTest("intervals", "Recurring monitor interval handling"),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("ticket-primitive-1", "Ticket - Primitive (loss-policy=stop, initial)"),
+ SchedulerTest("ticket-primitive-2", "Ticket - Primitive (loss-policy=stop, granted)"),
+ SchedulerTest("ticket-primitive-3", "Ticket - Primitive (loss-policy-stop, revoked)"),
+ SchedulerTest("ticket-primitive-4", "Ticket - Primitive (loss-policy=demote, initial)"),
+ SchedulerTest("ticket-primitive-5", "Ticket - Primitive (loss-policy=demote, granted)"),
+ SchedulerTest("ticket-primitive-6", "Ticket - Primitive (loss-policy=demote, revoked)"),
+ SchedulerTest("ticket-primitive-7", "Ticket - Primitive (loss-policy=fence, initial)"),
+ SchedulerTest("ticket-primitive-8", "Ticket - Primitive (loss-policy=fence, granted)"),
+ SchedulerTest("ticket-primitive-9", "Ticket - Primitive (loss-policy=fence, revoked)"),
+ SchedulerTest("ticket-primitive-10", "Ticket - Primitive (loss-policy=freeze, initial)"),
+ SchedulerTest("ticket-primitive-11", "Ticket - Primitive (loss-policy=freeze, granted)"),
+ SchedulerTest("ticket-primitive-12", "Ticket - Primitive (loss-policy=freeze, revoked)"),
+ SchedulerTest("ticket-primitive-13", "Ticket - Primitive (loss-policy=stop, standby, granted)"),
+ SchedulerTest("ticket-primitive-14", "Ticket - Primitive (loss-policy=stop, granted, standby)"),
+ SchedulerTest("ticket-primitive-15", "Ticket - Primitive (loss-policy=stop, standby, revoked)"),
+ SchedulerTest("ticket-primitive-16", "Ticket - Primitive (loss-policy=demote, standby, granted)"),
+ SchedulerTest("ticket-primitive-17", "Ticket - Primitive (loss-policy=demote, granted, standby)"),
+ SchedulerTest("ticket-primitive-18", "Ticket - Primitive (loss-policy=demote, standby, revoked)"),
+ SchedulerTest("ticket-primitive-19", "Ticket - Primitive (loss-policy=fence, standby, granted)"),
+ SchedulerTest("ticket-primitive-20", "Ticket - Primitive (loss-policy=fence, granted, standby)"),
+ SchedulerTest("ticket-primitive-21", "Ticket - Primitive (loss-policy=fence, standby, revoked)"),
+ SchedulerTest("ticket-primitive-22", "Ticket - Primitive (loss-policy=freeze, standby, granted)"),
+ SchedulerTest("ticket-primitive-23", "Ticket - Primitive (loss-policy=freeze, granted, standby)"),
+ SchedulerTest("ticket-primitive-24", "Ticket - Primitive (loss-policy=freeze, standby, revoked)"),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("ticket-group-1", "Ticket - Group (loss-policy=stop, initial)"),
+ SchedulerTest("ticket-group-2", "Ticket - Group (loss-policy=stop, granted)"),
+ SchedulerTest("ticket-group-3", "Ticket - Group (loss-policy-stop, revoked)"),
+ SchedulerTest("ticket-group-4", "Ticket - Group (loss-policy=demote, initial)"),
+ SchedulerTest("ticket-group-5", "Ticket - Group (loss-policy=demote, granted)"),
+ SchedulerTest("ticket-group-6", "Ticket - Group (loss-policy=demote, revoked)"),
+ SchedulerTest("ticket-group-7", "Ticket - Group (loss-policy=fence, initial)"),
+ SchedulerTest("ticket-group-8", "Ticket - Group (loss-policy=fence, granted)"),
+ SchedulerTest("ticket-group-9", "Ticket - Group (loss-policy=fence, revoked)"),
+ SchedulerTest("ticket-group-10", "Ticket - Group (loss-policy=freeze, initial)"),
+ SchedulerTest("ticket-group-11", "Ticket - Group (loss-policy=freeze, granted)"),
+ SchedulerTest("ticket-group-12", "Ticket - Group (loss-policy=freeze, revoked)"),
+ SchedulerTest("ticket-group-13", "Ticket - Group (loss-policy=stop, standby, granted)"),
+ SchedulerTest("ticket-group-14", "Ticket - Group (loss-policy=stop, granted, standby)"),
+ SchedulerTest("ticket-group-15", "Ticket - Group (loss-policy=stop, standby, revoked)"),
+ SchedulerTest("ticket-group-16", "Ticket - Group (loss-policy=demote, standby, granted)"),
+ SchedulerTest("ticket-group-17", "Ticket - Group (loss-policy=demote, granted, standby)"),
+ SchedulerTest("ticket-group-18", "Ticket - Group (loss-policy=demote, standby, revoked)"),
+ SchedulerTest("ticket-group-19", "Ticket - Group (loss-policy=fence, standby, granted)"),
+ SchedulerTest("ticket-group-20", "Ticket - Group (loss-policy=fence, granted, standby)"),
+ SchedulerTest("ticket-group-21", "Ticket - Group (loss-policy=fence, standby, revoked)"),
+ SchedulerTest("ticket-group-22", "Ticket - Group (loss-policy=freeze, standby, granted)"),
+ SchedulerTest("ticket-group-23", "Ticket - Group (loss-policy=freeze, granted, standby)"),
+ SchedulerTest("ticket-group-24", "Ticket - Group (loss-policy=freeze, standby, revoked)"),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("ticket-clone-1", "Ticket - Clone (loss-policy=stop, initial)"),
+ SchedulerTest("ticket-clone-2", "Ticket - Clone (loss-policy=stop, granted)"),
+ SchedulerTest("ticket-clone-3", "Ticket - Clone (loss-policy-stop, revoked)"),
+ SchedulerTest("ticket-clone-4", "Ticket - Clone (loss-policy=demote, initial)"),
+ SchedulerTest("ticket-clone-5", "Ticket - Clone (loss-policy=demote, granted)"),
+ SchedulerTest("ticket-clone-6", "Ticket - Clone (loss-policy=demote, revoked)"),
+ SchedulerTest("ticket-clone-7", "Ticket - Clone (loss-policy=fence, initial)"),
+ SchedulerTest("ticket-clone-8", "Ticket - Clone (loss-policy=fence, granted)"),
+ SchedulerTest("ticket-clone-9", "Ticket - Clone (loss-policy=fence, revoked)"),
+ SchedulerTest("ticket-clone-10", "Ticket - Clone (loss-policy=freeze, initial)"),
+ SchedulerTest("ticket-clone-11", "Ticket - Clone (loss-policy=freeze, granted)"),
+ SchedulerTest("ticket-clone-12", "Ticket - Clone (loss-policy=freeze, revoked)"),
+ SchedulerTest("ticket-clone-13", "Ticket - Clone (loss-policy=stop, standby, granted)"),
+ SchedulerTest("ticket-clone-14", "Ticket - Clone (loss-policy=stop, granted, standby)"),
+ SchedulerTest("ticket-clone-15", "Ticket - Clone (loss-policy=stop, standby, revoked)"),
+ SchedulerTest("ticket-clone-16", "Ticket - Clone (loss-policy=demote, standby, granted)"),
+ SchedulerTest("ticket-clone-17", "Ticket - Clone (loss-policy=demote, granted, standby)"),
+ SchedulerTest("ticket-clone-18", "Ticket - Clone (loss-policy=demote, standby, revoked)"),
+ SchedulerTest("ticket-clone-19", "Ticket - Clone (loss-policy=fence, standby, granted)"),
+ SchedulerTest("ticket-clone-20", "Ticket - Clone (loss-policy=fence, granted, standby)"),
+ SchedulerTest("ticket-clone-21", "Ticket - Clone (loss-policy=fence, standby, revoked)"),
+ SchedulerTest("ticket-clone-22", "Ticket - Clone (loss-policy=freeze, standby, granted)"),
+ SchedulerTest("ticket-clone-23", "Ticket - Clone (loss-policy=freeze, granted, standby)"),
+ SchedulerTest("ticket-clone-24", "Ticket - Clone (loss-policy=freeze, standby, revoked)"),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("ticket-promoted-1", "Ticket - Promoted (loss-policy=stop, initial)"),
+ SchedulerTest("ticket-promoted-2", "Ticket - Promoted (loss-policy=stop, granted)"),
+ SchedulerTest("ticket-promoted-3", "Ticket - Promoted (loss-policy-stop, revoked)"),
+ SchedulerTest("ticket-promoted-4", "Ticket - Promoted (loss-policy=demote, initial)"),
+ SchedulerTest("ticket-promoted-5", "Ticket - Promoted (loss-policy=demote, granted)"),
+ SchedulerTest("ticket-promoted-6", "Ticket - Promoted (loss-policy=demote, revoked)"),
+ SchedulerTest("ticket-promoted-7", "Ticket - Promoted (loss-policy=fence, initial)"),
+ SchedulerTest("ticket-promoted-8", "Ticket - Promoted (loss-policy=fence, granted)"),
+ SchedulerTest("ticket-promoted-9", "Ticket - Promoted (loss-policy=fence, revoked)"),
+ SchedulerTest("ticket-promoted-10", "Ticket - Promoted (loss-policy=freeze, initial)"),
+ SchedulerTest("ticket-promoted-11", "Ticket - Promoted (loss-policy=freeze, granted)"),
+ SchedulerTest("ticket-promoted-12", "Ticket - Promoted (loss-policy=freeze, revoked)"),
+ SchedulerTest("ticket-promoted-13", "Ticket - Promoted (loss-policy=stop, standby, granted)"),
+ SchedulerTest("ticket-promoted-14", "Ticket - Promoted (loss-policy=stop, granted, standby)"),
+ SchedulerTest("ticket-promoted-15", "Ticket - Promoted (loss-policy=stop, standby, revoked)"),
+ SchedulerTest("ticket-promoted-16", "Ticket - Promoted (loss-policy=demote, standby, granted)"),
+ SchedulerTest("ticket-promoted-17", "Ticket - Promoted (loss-policy=demote, granted, standby)"),
+ SchedulerTest("ticket-promoted-18", "Ticket - Promoted (loss-policy=demote, standby, revoked)"),
+ SchedulerTest("ticket-promoted-19", "Ticket - Promoted (loss-policy=fence, standby, granted)"),
+ SchedulerTest("ticket-promoted-20", "Ticket - Promoted (loss-policy=fence, granted, standby)"),
+ SchedulerTest("ticket-promoted-21", "Ticket - Promoted (loss-policy=fence, standby, revoked)"),
+ SchedulerTest("ticket-promoted-22", "Ticket - Promoted (loss-policy=freeze, standby, granted)"),
+ SchedulerTest("ticket-promoted-23", "Ticket - Promoted (loss-policy=freeze, granted, standby)"),
+ SchedulerTest("ticket-promoted-24", "Ticket - Promoted (loss-policy=freeze, standby, revoked)"),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("ticket-rsc-sets-1", "Ticket - Resource sets (1 ticket, initial)"),
+ SchedulerTest("ticket-rsc-sets-2", "Ticket - Resource sets (1 ticket, granted)"),
+ SchedulerTest("ticket-rsc-sets-3", "Ticket - Resource sets (1 ticket, revoked)"),
+ SchedulerTest("ticket-rsc-sets-4", "Ticket - Resource sets (2 tickets, initial)"),
+ SchedulerTest("ticket-rsc-sets-5", "Ticket - Resource sets (2 tickets, granted)"),
+ SchedulerTest("ticket-rsc-sets-6", "Ticket - Resource sets (2 tickets, granted)"),
+ SchedulerTest("ticket-rsc-sets-7", "Ticket - Resource sets (2 tickets, revoked)"),
+ SchedulerTest("ticket-rsc-sets-8", "Ticket - Resource sets (1 ticket, standby, granted)"),
+ SchedulerTest("ticket-rsc-sets-9", "Ticket - Resource sets (1 ticket, granted, standby)"),
+ SchedulerTest("ticket-rsc-sets-10", "Ticket - Resource sets (1 ticket, standby, revoked)"),
+ SchedulerTest("ticket-rsc-sets-11", "Ticket - Resource sets (2 tickets, standby, granted)"),
+ SchedulerTest("ticket-rsc-sets-12", "Ticket - Resource sets (2 tickets, standby, granted)"),
+ SchedulerTest("ticket-rsc-sets-13", "Ticket - Resource sets (2 tickets, granted, standby)"),
+ SchedulerTest("ticket-rsc-sets-14", "Ticket - Resource sets (2 tickets, standby, revoked)"),
+ SchedulerTest("cluster-specific-params", "Cluster-specific instance attributes based on rules"),
+ SchedulerTest("site-specific-params", "Site-specific instance attributes based on rules"),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("template-1", "Template - 1"),
+ SchedulerTest("template-2", "Template - 2"),
+ SchedulerTest("template-3", "Template - 3 (merge operations)"),
+ SchedulerTest("template-coloc-1", "Template - Colocation 1"),
+ SchedulerTest("template-coloc-2", "Template - Colocation 2"),
+ SchedulerTest("template-coloc-3", "Template - Colocation 3"),
+ SchedulerTest("template-order-1", "Template - Order 1"),
+ SchedulerTest("template-order-2", "Template - Order 2"),
+ SchedulerTest("template-order-3", "Template - Order 3"),
+ SchedulerTest("template-ticket", "Template - Ticket"),
+ SchedulerTest("template-rsc-sets-1", "Template - Resource Sets 1"),
+ SchedulerTest("template-rsc-sets-2", "Template - Resource Sets 2"),
+ SchedulerTest("template-rsc-sets-3", "Template - Resource Sets 3"),
+ SchedulerTest("template-rsc-sets-4", "Template - Resource Sets 4"),
+ SchedulerTest("template-clone-primitive", "Cloned primitive from template"),
+ SchedulerTest("template-clone-group", "Cloned group from template"),
+ SchedulerTest("location-sets-templates", "Resource sets and templates - Location"),
+ SchedulerTest("tags-coloc-order-1", "Tags - Colocation and Order (Simple)"),
+ SchedulerTest("tags-coloc-order-2", "Tags - Colocation and Order (Resource Sets with Templates)"),
+ SchedulerTest("tags-location", "Tags - Location"),
+ SchedulerTest("tags-ticket", "Tags - Ticket"),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("container-1", "Container - initial"),
+ SchedulerTest("container-2", "Container - monitor failed"),
+ SchedulerTest("container-3", "Container - stop failed"),
+ SchedulerTest("container-4", "Container - reached migration-threshold"),
+ SchedulerTest("container-group-1", "Container in group - initial"),
+ SchedulerTest("container-group-2", "Container in group - monitor failed"),
+ SchedulerTest("container-group-3", "Container in group - stop failed"),
+ SchedulerTest("container-group-4", "Container in group - reached migration-threshold"),
+ SchedulerTest("container-is-remote-node", "Place resource within container when container is remote-node"),
+ SchedulerTest("bug-rh-1097457", "Kill user defined container/contents ordering"),
+ SchedulerTest("bug-cl-5247", "Graph loop when recovering m/s resource in a container"),
+ SchedulerTest("bundle-order-startup", "Bundle startup ordering"),
+ SchedulerTest("bundle-order-partial-start",
+ "Bundle startup ordering when some dependencies are already running"),
+ SchedulerTest("bundle-order-partial-start-2",
+ "Bundle startup ordering when some dependencies and the container are already running"),
+ SchedulerTest("bundle-order-stop", "Bundle stop ordering"),
+ SchedulerTest("bundle-order-partial-stop", "Bundle startup ordering when some dependencies are already stopped"),
+ SchedulerTest("bundle-order-stop-on-remote", "Stop nested resource after bringing up the connection"),
+ SchedulerTest("bundle-order-startup-clone", "Prevent startup because bundle isn't promoted"),
+ SchedulerTest("bundle-order-startup-clone-2", "Bundle startup with clones"),
+ SchedulerTest("bundle-order-stop-clone", "Stop bundle because clone is stopping"),
+ SchedulerTest("bundle-interleave-start", "Interleave bundle starts"),
+ SchedulerTest("bundle-interleave-promote", "Interleave bundle promotes"),
+ SchedulerTest("bundle-nested-colocation", "Colocation of nested connection resources"),
+ SchedulerTest("bundle-order-fencing",
+ "Order pseudo bundle fencing after parent node fencing if both are happening"),
+ SchedulerTest("bundle-probe-order-1", "order 1"),
+ SchedulerTest("bundle-probe-order-2", "order 2"),
+ SchedulerTest("bundle-probe-order-3", "order 3"),
+ SchedulerTest("bundle-probe-remotes", "Ensure remotes get probed too"),
+ SchedulerTest("bundle-replicas-change", "Change bundle from 1 replica to multiple"),
+ SchedulerTest("bundle-connection-with-container", "Don't move a container due to connection preferences"),
+ SchedulerTest("nested-remote-recovery", "Recover bundle's container hosted on remote node"),
+ SchedulerTest("bundle-promoted-location-1",
+ "Promotable bundle, positive location"),
+ SchedulerTest("bundle-promoted-location-2",
+ "Promotable bundle, negative location"),
+ SchedulerTest("bundle-promoted-location-3",
+ "Promotable bundle, positive location for promoted role"),
+ SchedulerTest("bundle-promoted-location-4",
+ "Promotable bundle, negative location for promoted role"),
+ SchedulerTest("bundle-promoted-location-5",
+ "Promotable bundle, positive location for unpromoted role"),
+ SchedulerTest("bundle-promoted-location-6",
+ "Promotable bundle, negative location for unpromoted role"),
+ SchedulerTest("bundle-promoted-colocation-1",
+ "Primary promoted bundle, dependent primitive (mandatory coloc)"),
+ SchedulerTest("bundle-promoted-colocation-2",
+ "Primary promoted bundle, dependent primitive (optional coloc)"),
+ SchedulerTest("bundle-promoted-colocation-3",
+ "Dependent promoted bundle, primary primitive (mandatory coloc)"),
+ SchedulerTest("bundle-promoted-colocation-4",
+ "Dependent promoted bundle, primary primitive (optional coloc)"),
+ SchedulerTest("bundle-promoted-colocation-5",
+ "Primary and dependent promoted bundle instances (mandatory coloc)"),
+ SchedulerTest("bundle-promoted-colocation-6",
+ "Primary and dependent promoted bundle instances (optional coloc)"),
+ SchedulerTest("bundle-promoted-anticolocation-1",
+ "Primary promoted bundle, dependent primitive (mandatory anti)"),
+ SchedulerTest("bundle-promoted-anticolocation-2",
+ "Primary promoted bundle, dependent primitive (optional anti)"),
+ SchedulerTest("bundle-promoted-anticolocation-3",
+ "Dependent promoted bundle, primary primitive (mandatory anti)"),
+ SchedulerTest("bundle-promoted-anticolocation-4",
+ "Dependent promoted bundle, primary primitive (optional anti)"),
+ SchedulerTest("bundle-promoted-anticolocation-5",
+ "Primary and dependent promoted bundle instances (mandatory anti)"),
+ SchedulerTest("bundle-promoted-anticolocation-6",
+ "Primary and dependent promoted bundle instances (optional anti)"),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("whitebox-fail1", "Fail whitebox container rsc"),
+ SchedulerTest("whitebox-fail2", "Fail cluster connection to guest node"),
+ SchedulerTest("whitebox-fail3", "Failed containers should not run nested on remote nodes"),
+ SchedulerTest("whitebox-start", "Start whitebox container with resources assigned to it"),
+ SchedulerTest("whitebox-stop", "Stop whitebox container with resources assigned to it"),
+ SchedulerTest("whitebox-move", "Move whitebox container with resources assigned to it"),
+ SchedulerTest("whitebox-asymmetric", "Verify connection rsc opts-in based on container resource"),
+ SchedulerTest("whitebox-ms-ordering", "Verify promote/demote can not occur before connection is established"),
+ SchedulerTest("whitebox-ms-ordering-move", "Stop/Start cycle within a moving container"),
+ SchedulerTest("whitebox-orphaned", "Properly shutdown orphaned whitebox container"),
+ SchedulerTest("whitebox-orphan-ms", "Properly tear down orphan ms resources on remote-nodes"),
+ SchedulerTest("whitebox-unexpectedly-running", "Recover container nodes the cluster did not start"),
+ SchedulerTest("whitebox-migrate1", "Migrate both container and connection resource"),
+ SchedulerTest("whitebox-imply-stop-on-fence",
+ "imply stop action on container node rsc when host node is fenced"),
+ SchedulerTest("whitebox-nested-group", "Verify guest remote-node works nested in a group"),
+ SchedulerTest("guest-node-host-dies", "Verify guest node is recovered if host goes away"),
+ SchedulerTest("guest-node-cleanup", "Order guest node connection recovery after container probe"),
+ SchedulerTest("guest-host-not-fenceable", "Actions on guest node are unrunnable if host is unclean and cannot be fenced"),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("remote-startup-probes", "Baremetal remote-node startup probes"),
+ SchedulerTest("remote-startup", "Startup a newly discovered remote-nodes with no status"),
+ SchedulerTest("remote-fence-unclean", "Fence unclean baremetal remote-node"),
+ SchedulerTest("remote-fence-unclean2",
+ "Fence baremetal remote-node after cluster node fails and connection can not be recovered"),
+ SchedulerTest("remote-fence-unclean-3", "Probe failed remote nodes (triggers fencing)"),
+ SchedulerTest("remote-move", "Move remote-node connection resource"),
+ SchedulerTest("remote-disable", "Disable a baremetal remote-node"),
+ SchedulerTest("remote-probe-disable", "Probe then stop a baremetal remote-node"),
+ SchedulerTest("remote-orphaned", "Properly shutdown orphaned connection resource"),
+ SchedulerTest("remote-orphaned2",
+ "verify we can handle orphaned remote connections with active resources on the remote"),
+ SchedulerTest("remote-recover", "Recover connection resource after cluster-node fails"),
+ SchedulerTest("remote-stale-node-entry",
+ "Make sure we properly handle leftover remote-node entries in the node section"),
+ SchedulerTest("remote-partial-migrate",
+ "Make sure partial migrations are handled before ops on the remote node"),
+ SchedulerTest("remote-partial-migrate2",
+ "Make sure partial migration target is prefered for remote connection"),
+ SchedulerTest("remote-recover-fail", "Make sure start failure causes fencing if rsc are active on remote"),
+ SchedulerTest("remote-start-fail",
+ "Make sure a start failure does not result in fencing if no active resources are on remote"),
+ SchedulerTest("remote-unclean2",
+ "Make monitor failure always results in fencing, even if no rsc are active on remote"),
+ SchedulerTest("remote-fence-before-reconnect", "Fence before clearing recurring monitor failure"),
+ SchedulerTest("remote-recovery", "Recover remote connections before attempting demotion"),
+ SchedulerTest("remote-recover-connection", "Optimistically recovery of only the connection"),
+ SchedulerTest("remote-recover-all", "Fencing when the connection has no home"),
+ SchedulerTest("remote-recover-no-resources", "Fencing when the connection has no home and no active resources"),
+ SchedulerTest("remote-recover-unknown",
+ "Fencing when the connection has no home and the remote has no operation history"),
+ SchedulerTest("remote-reconnect-delay", "Waiting for remote reconnect interval to expire"),
+ SchedulerTest("remote-connection-unrecoverable",
+ "Remote connection host must be fenced, with connection unrecoverable"),
+ SchedulerTest("remote-connection-shutdown", "Remote connection shutdown"),
+ SchedulerTest("cancel-behind-moving-remote",
+ "Route recurring monitor cancellations through original node of a moving remote connection"),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("resource-discovery", "Exercises resource-discovery location constraint option"),
+ SchedulerTest("rsc-discovery-per-node", "Disable resource discovery per node"),
+ SchedulerTest("shutdown-lock", "Ensure shutdown lock works properly"),
+ SchedulerTest("shutdown-lock-expiration", "Ensure shutdown lock expiration works properly"),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("op-defaults", "Test op_defaults conditional expressions"),
+ SchedulerTest("op-defaults-2", "Test op_defaults AND'ed conditional expressions"),
+ SchedulerTest("op-defaults-3", "Test op_defaults precedence"),
+ SchedulerTest("rsc-defaults", "Test rsc_defaults conditional expressions"),
+ SchedulerTest("rsc-defaults-2", "Test rsc_defaults conditional expressions without type"),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("stop-all-resources", "Test stop-all-resources=true "),
+ ]),
+ SchedulerTestGroup([
+ SchedulerTest("ocf_degraded-remap-ocf_ok", "Test degraded remapped to OK"),
+ SchedulerTest("ocf_degraded_promoted-remap-ocf_ok", "Test degraded promoted remapped to OK"),
+ ]),
]
TESTS_64BIT = [
- [
- [ "year-2038", "Check handling of timestamps beyond 2038-01-19 03:14:08 UTC" ],
- ],
+ SchedulerTestGroup([
+ SchedulerTest("year-2038", "Check handling of timestamps beyond 2038-01-19 03:14:08 UTC"),
+ ]),
]
def is_executable(path):
""" Check whether a file at a given path is executable. """
try:
return os.stat(path)[stat.ST_MODE] & stat.S_IXUSR
except OSError:
return False
def diff(file1, file2, **kwargs):
""" Call diff on two files """
return subprocess.call([ "diff", "-u", "-N", "--ignore-all-space",
"--ignore-blank-lines", file1, file2 ], **kwargs)
def sort_file(filename):
""" Sort a file alphabetically """
with io.open(filename, "rt") as f:
lines = sorted(f)
with io.open(filename, "wt") as f:
f.writelines(lines)
def remove_files(filenames):
""" Remove a list of files """
for filename in filenames:
try:
os.remove(filename)
except OSError:
pass
def normalize(filename):
""" Remove text from a file that isn't important for comparison """
if not hasattr(normalize, "patterns"):
normalize.patterns = [
re.compile(r'crm_feature_set="[^"]*"'),
re.compile(r'batch-limit="[0-9]*"')
]
if os.path.isfile(filename):
with io.open(filename, "rt") as f:
lines = f.readlines()
with io.open(filename, "wt") as f:
for line in lines:
for pattern in normalize.patterns:
line = pattern.sub("", line)
f.write(line)
def cat(filename, dest=sys.stdout):
""" Copy a file to a destination file descriptor """
with io.open(filename, "rt") as f:
shutil.copyfileobj(f, dest)
class CtsScheduler(object):
""" Regression tests for Pacemaker's scheduler """
def _parse_args(self, argv):
""" Parse command-line arguments """
parser = argparse.ArgumentParser(description=DESC)
parser.add_argument('-V', '--verbose', action='count',
help='Display any differences from expected output')
parser.add_argument('--run', metavar='TEST',
help=('Run only single specified test (any further '
'arguments will be passed to crm_simulate)'))
parser.add_argument('--update', action='store_true',
help='Update expected results with actual results')
parser.add_argument('-b', '--binary', metavar='PATH',
help='Specify path to crm_simulate')
parser.add_argument('-i', '--io-dir', metavar='PATH',
help='Specify path to regression test data directory')
parser.add_argument('-o', '--out-dir', metavar='PATH',
help='Specify where intermediate and output files should go')
parser.add_argument('-v', '--valgrind', action='store_true',
help='Run all commands under valgrind')
parser.add_argument('--valgrind-dhat', action='store_true',
help='Run all commands under valgrind with heap analyzer')
parser.add_argument('--valgrind-skip-output', action='store_true',
help='If running under valgrind, do not display output')
parser.add_argument('--testcmd-options', metavar='OPTIONS', default='',
help='Additional options for command under test')
# argparse can't handle "everything after --run TEST", so grab that
self.single_test_args = []
narg = 0
for arg in argv:
narg = narg + 1
if arg == '--run':
(argv, self.single_test_args) = (argv[:narg+1], argv[narg+1:])
break
self.args = parser.parse_args(argv[1:])
def _error(self, s):
print(" * ERROR: %s" % s)
def _failed(self, s):
print(" * FAILED: %s" % s)
def _get_valgrind_cmd(self):
""" Return command arguments needed (or not) to run valgrind """
if self.args.valgrind:
os.environ['G_SLICE'] = "always-malloc"
return [
"valgrind",
"-q",
"--gen-suppressions=all",
"--time-stamp=yes",
"--trace-children=no",
"--show-reachable=no",
"--leak-check=full",
"--num-callers=20",
"--suppressions=%s/valgrind-pcmk.suppressions" % (self.test_home)
]
if self.args.valgrind_dhat:
os.environ['G_SLICE'] = "always-malloc"
return [
"valgrind",
"--tool=exp-dhat",
"--time-stamp=yes",
"--trace-children=no",
"--show-top-n=100",
"--num-callers=4"
]
return []
def _get_simulator_cmd(self):
""" Locate the simulation binary """
if self.args.binary is None:
self.args.binary = BuildOptions._BUILD_DIR + "/tools/crm_simulate"
if not is_executable(self.args.binary):
self.args.binary = BuildOptions.SBIN_DIR + "/crm_simulate"
if not is_executable(self.args.binary):
# @TODO it would be more pythonic to raise an exception
self._error("Test binary " + self.args.binary + " not found")
sys.exit(ExitStatus.NOT_INSTALLED)
return [ self.args.binary ] + shlex.split(self.args.testcmd_options)
def set_schema_env(self):
""" Ensure schema directory environment variable is set, if possible """
try:
return os.environ['PCMK_schema_directory']
except KeyError:
for d in [ os.path.join(BuildOptions._BUILD_DIR, "xml"),
BuildOptions.SCHEMA_DIR ]:
if os.path.isdir(d):
os.environ['PCMK_schema_directory'] = d
return d
return None
def __init__(self, argv=sys.argv):
# Ensure all command output is in portable locale for comparison
os.environ['LC_ALL'] = "C"
self._parse_args(argv)
# Where this executable lives
self.test_home = os.path.dirname(os.path.realpath(argv[0]))
# Where test data resides
if self.args.io_dir is None:
self.args.io_dir = os.path.join(self.test_home, "scheduler")
self.xml_input_dir = os.path.join(self.args.io_dir, "xml")
self.expected_dir = os.path.join(self.args.io_dir, "exp")
self.dot_expected_dir = os.path.join(self.args.io_dir, "dot")
self.scores_dir = os.path.join(self.args.io_dir, "scores")
self.summary_dir = os.path.join(self.args.io_dir, "summary")
self.stderr_expected_dir = os.path.join(self.args.io_dir, "stderr")
# Create a temporary directory to store diff file
self.failed_dir = tempfile.mkdtemp(prefix='cts-scheduler_')
# Where to store generated files
if self.args.out_dir is None:
self.args.out_dir = self.args.io_dir
self.failed_filename = os.path.join(self.failed_dir, "test-output.diff")
else:
self.failed_filename = os.path.join(self.args.out_dir, "test-output.diff")
os.environ['CIB_shadow_dir'] = self.args.out_dir
self.failed_file = None
self.outfile_out_dir = os.path.join(self.args.out_dir, "out")
self.dot_out_dir = os.path.join(self.args.out_dir, "dot")
self.scores_out_dir = os.path.join(self.args.out_dir, "scores")
self.summary_out_dir = os.path.join(self.args.out_dir, "summary")
self.stderr_out_dir = os.path.join(self.args.out_dir, "stderr")
self.valgrind_out_dir = os.path.join(self.args.out_dir, "valgrind")
# Single test mode (if requested)
try:
# User can give test base name or file name of a test input
self.args.run = os.path.splitext(os.path.basename(self.args.run))[0]
except (AttributeError, TypeError):
pass # --run was not specified
self.set_schema_env()
# Arguments needed (or not) to run commands
self.valgrind_args = self._get_valgrind_cmd()
self.simulate_args = self._get_simulator_cmd()
# Test counters
self.num_failed = 0
self.num_tests = 0
# Ensure that the main output directory exists
# We don't want to create it with os.makedirs below
if not os.path.isdir(self.args.out_dir):
self._error("Output directory missing; can't create output files")
sys.exit(ExitStatus.CANTCREAT)
# Create output subdirectories if they don't exist
try:
os.makedirs(self.outfile_out_dir, 0o755, True)
os.makedirs(self.dot_out_dir, 0o755, True)
os.makedirs(self.scores_out_dir, 0o755, True)
os.makedirs(self.summary_out_dir, 0o755, True)
os.makedirs(self.stderr_out_dir, 0o755, True)
if self.valgrind_args:
os.makedirs(self.valgrind_out_dir, 0o755, True)
except OSError as ex:
self._error("Unable to create output subdirectory: %s" % ex)
remove_files([
self.outfile_out_dir,
self.dot_out_dir,
self.scores_out_dir,
self.summary_out_dir,
self.stderr_out_dir,
])
sys.exit(ExitStatus.CANTCREAT)
def _compare_files(self, filename1, filename2):
""" Add any file differences to failed results """
if diff(filename1, filename2, stdout=subprocess.DEVNULL) != 0:
diff(filename1, filename2, stdout=self.failed_file, stderr=subprocess.DEVNULL)
self.failed_file.write("\n")
return True
return False
- def run_one(self, test_name, test_desc, test_args=[]):
+ def run_one(self, test_name, test_desc, test_args):
""" Run one scheduler test """
print(" Test %-25s %s" % ((test_name + ":"), test_desc))
did_fail = False
self.num_tests = self.num_tests + 1
# Test inputs
input_filename = os.path.join(
self.xml_input_dir, "%s.xml" % test_name)
expected_filename = os.path.join(
self.expected_dir, "%s.exp" % test_name)
dot_expected_filename = os.path.join(
self.dot_expected_dir, "%s.dot" % test_name)
scores_filename = os.path.join(
self.scores_dir, "%s.scores" % test_name)
summary_filename = os.path.join(
self.summary_dir, "%s.summary" % test_name)
stderr_expected_filename = os.path.join(
self.stderr_expected_dir, "%s.stderr" % test_name)
# (Intermediate) test outputs
output_filename = os.path.join(
self.outfile_out_dir, "%s.out" % test_name)
dot_output_filename = os.path.join(
self.dot_out_dir, "%s.dot.pe" % test_name)
score_output_filename = os.path.join(
self.scores_out_dir, "%s.scores.pe" % test_name)
summary_output_filename = os.path.join(
self.summary_out_dir, "%s.summary.pe" % test_name)
stderr_output_filename = os.path.join(
self.stderr_out_dir, "%s.stderr.pe" % test_name)
valgrind_output_filename = os.path.join(
self.valgrind_out_dir, "%s.valgrind" % test_name)
# Common arguments for running test
test_cmd = []
if self.valgrind_args:
test_cmd = self.valgrind_args + [ "--log-file=%s" % valgrind_output_filename ]
test_cmd = test_cmd + self.simulate_args
# @TODO It would be more pythonic to raise exceptions for errors,
# then perhaps it would be nice to make a single-test class
# Ensure necessary test inputs exist
if not os.path.isfile(input_filename):
self._error("No input")
self.num_failed = self.num_failed + 1
return ExitStatus.NOINPUT
if not self.args.update and not os.path.isfile(expected_filename):
self._error("no stored output")
return ExitStatus.NOINPUT
# Run simulation to generate summary output
if self.args.run: # Single test mode
test_cmd_full = test_cmd + [ '-x', input_filename, '-S' ] + test_args
print(" ".join(test_cmd_full))
else:
# @TODO Why isn't test_args added here?
test_cmd_full = test_cmd + [ '-x', input_filename, '-S' ]
with io.open(summary_output_filename, "wt") as f:
simulation = subprocess.Popen(test_cmd_full, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=os.environ)
# This makes diff happy regardless of --enable-compat-2.0.
# Use sed -E to make Linux and BSD special characters more compatible.
sed = subprocess.Popen(["sed", "-E",
"-e", "s/ocf::/ocf:/g",
"-e", r"s/Masters:/Promoted:/",
"-e", r"s/Slaves:/Unpromoted:/",
"-e", r"s/ Master( |\[|$)/ Promoted\1/",
"-e", r"s/ Slave / Unpromoted /",
], stdin=simulation.stdout, stdout=f,
stderr=subprocess.STDOUT)
simulation.stdout.close()
sed.communicate()
if self.args.run:
cat(summary_output_filename)
# Re-run simulation to generate dot, graph, and scores
test_cmd_full = test_cmd + [
'-x', input_filename,
'-D', dot_output_filename,
'-G', output_filename,
'-sSQ' ] + test_args
with io.open(stderr_output_filename, "wt") as f_stderr, \
io.open(score_output_filename, "wt") as f_score:
rc = subprocess.call(test_cmd_full, stdout=f_score, stderr=f_stderr, env=os.environ)
# Check for test command failure
if rc != ExitStatus.OK:
self._failed("Test returned: %d" % rc)
did_fail = True
print(" ".join(test_cmd_full))
# Check for valgrind errors
if self.valgrind_args and not self.args.valgrind_skip_output:
if os.stat(valgrind_output_filename).st_size > 0:
self._failed("Valgrind reported errors")
did_fail = True
cat(valgrind_output_filename)
remove_files([ valgrind_output_filename ])
# Check for core dump
if os.path.isfile("core"):
self._failed("Core-file detected: core." + test_name)
did_fail = True
os.rename("core", "%s/core.%s" % (self.test_home, test_name))
# Check any stderr output
if os.path.isfile(stderr_expected_filename):
if self._compare_files(stderr_expected_filename, stderr_output_filename):
self._failed("stderr changed")
did_fail = True
elif os.stat(stderr_output_filename).st_size > 0:
self._failed("Output was written to stderr")
did_fail = True
cat(stderr_output_filename)
remove_files([ stderr_output_filename ])
# Check whether output graph exists, and normalize it
if (not os.path.isfile(output_filename)
or os.stat(output_filename).st_size == 0):
self._error("No graph produced")
did_fail = True
self.num_failed = self.num_failed + 1
remove_files([ output_filename ])
return ExitStatus.ERROR
normalize(output_filename)
# Check whether dot output exists, and sort it
if (not os.path.isfile(dot_output_filename) or
os.stat(dot_output_filename).st_size == 0):
self._error("No dot-file summary produced")
did_fail = True
self.num_failed = self.num_failed + 1
remove_files([ dot_output_filename, output_filename ])
return ExitStatus.ERROR
with io.open(dot_output_filename, "rt") as f:
first_line = f.readline() # "digraph" line with opening brace
lines = f.readlines()
last_line = lines[-1] # closing brace
del lines[-1]
lines = sorted(set(lines)) # unique sort
with io.open(dot_output_filename, "wt") as f:
f.write(first_line)
f.writelines(lines)
f.write(last_line)
# Check whether score output exists, and sort it
if (not os.path.isfile(score_output_filename)
or os.stat(score_output_filename).st_size == 0):
self._error("No allocation scores produced")
did_fail = True
self.num_failed = self.num_failed + 1
remove_files([ score_output_filename, output_filename ])
return ExitStatus.ERROR
else:
sort_file(score_output_filename)
if self.args.update:
shutil.copyfile(output_filename, expected_filename)
shutil.copyfile(dot_output_filename, dot_expected_filename)
shutil.copyfile(score_output_filename, scores_filename)
shutil.copyfile(summary_output_filename, summary_filename)
print(" Updated expected outputs")
if self._compare_files(summary_filename, summary_output_filename):
self._failed("summary changed")
did_fail = True
if self._compare_files(dot_expected_filename, dot_output_filename):
self._failed("dot-file summary changed")
did_fail = True
else:
remove_files([ dot_output_filename ])
if self._compare_files(expected_filename, output_filename):
self._failed("xml-file changed")
did_fail = True
if self._compare_files(scores_filename, score_output_filename):
self._failed("scores-file changed")
did_fail = True
remove_files([ output_filename,
dot_output_filename,
score_output_filename,
summary_output_filename])
if did_fail:
self.num_failed = self.num_failed + 1
return ExitStatus.ERROR
return ExitStatus.OK
def run_all(self):
""" Run all defined tests """
if platform.architecture()[0] == "64bit":
TESTS.extend(TESTS_64BIT)
for group in TESTS:
- for test in group:
- try:
- args = test[2]
- except IndexError:
- args = []
- self.run_one(test[0], test[1], args)
+ for test in group.tests:
+ self.run_one(test.name, test.desc, test.args)
print()
def _print_summary(self):
""" Print a summary of parameters for this test run """
print("Test home is:\t" + self.test_home)
print("Test binary is:\t" + self.args.binary)
if 'PCMK_schema_directory' in os.environ:
print("Schema home is:\t" + os.environ['PCMK_schema_directory'])
if self.valgrind_args != []:
print("Activating memory testing with valgrind")
print()
def _test_results(self):
if self.num_failed == 0:
shutil.rmtree(self.failed_dir)
return ExitStatus.OK
if os.path.isfile(self.failed_filename) and os.stat(self.failed_filename).st_size != 0:
if self.args.verbose:
self._error("Results of %d failed tests (out of %d):" %
(self.num_failed, self.num_tests))
cat(self.failed_filename)
else:
self._error("Results of %d failed tests (out of %d) are in %s" %
(self.num_failed, self.num_tests, self.failed_filename))
self._error("Use -V to display them after running the tests")
else:
self._error("%d (of %d) tests failed (no diff results)" %
(self.num_failed, self.num_tests))
if os.path.isfile(self.failed_filename):
shutil.rmtree(self.failed_dir)
return ExitStatus.ERROR
def run(self):
""" Run test(s) as specified """
# Check for pre-existing core so we don't think it's from us
if os.path.exists("core"):
self._failed("Can't run with core already present in " + self.test_home)
return ExitStatus.OSFILE
self._print_summary()
# Zero out the error log
self.failed_file = io.open(self.failed_filename, "wt")
if self.args.run is None:
print("Performing the following tests from " + self.args.io_dir)
print()
self.run_all()
print()
self.failed_file.close()
rc = self._test_results()
else:
rc = self.run_one(self.args.run, "Single shot", self.single_test_args)
self.failed_file.close()
if self.num_failed > 0:
print("\nFailures:\nThese have also been written to: " + self.failed_filename + "\n")
cat(self.failed_filename)
shutil.rmtree(self.failed_dir)
return rc
if __name__ == "__main__":
sys.exit(CtsScheduler().run())
# vim: set filetype=python expandtab tabstop=4 softtabstop=4 shiftwidth=4 textwidth=120:

File Metadata

Mime Type
text/x-diff
Expires
Mon, Apr 21, 6:24 PM (22 h, 33 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
1665215
Default Alt Text
(177 KB)

Event Timeline