Page MenuHomeClusterLabs Projects

No OneTemporary

diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in
index f2957bad66..99436f1bb8 100644
--- a/cts/cts-scheduler.in
+++ b/cts/cts-scheduler.in
@@ -1,1461 +1,1462 @@
#!@PYTHON@
""" Regression tests for Pacemaker's scheduler
"""
# Pacemaker targets compatibility with Python 2.7 and 3.2+
from __future__ import print_function, unicode_literals, absolute_import, division
__copyright__ = "Copyright 2004-2020 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import io
import os
import re
import sys
import stat
import shlex
import shutil
import argparse
import subprocess
import platform
DESC = """Regression tests for Pacemaker's scheduler"""
# Each entry in TESTS is a group of tests, where each test consists of a
# test base name, test description, and additional test arguments.
# Test groups will be separated by newlines in output.
TESTS = [
[
[ "simple1", "Offline" ],
[ "simple2", "Start" ],
[ "simple3", "Start 2" ],
[ "simple4", "Start Failed" ],
[ "simple6", "Stop Start" ],
[ "simple7", "Shutdown" ],
#[ "simple8", "Stonith" ],
#[ "simple9", "Lower version" ],
#[ "simple10", "Higher version" ],
[ "simple11", "Priority (ne)" ],
[ "simple12", "Priority (eq)" ],
[ "simple8", "Stickiness" ],
],
[
[ "group1", "Group" ],
[ "group2", "Group + Native" ],
[ "group3", "Group + Group" ],
[ "group4", "Group + Native (nothing)" ],
[ "group5", "Group + Native (move)" ],
[ "group6", "Group + Group (move)" ],
[ "group7", "Group colocation" ],
[ "group13", "Group colocation (cant run)" ],
[ "group8", "Group anti-colocation" ],
[ "group9", "Group recovery" ],
[ "group10", "Group partial recovery" ],
[ "group11", "Group target_role" ],
[ "group14", "Group stop (graph terminated)" ],
[ "group15", "Negative group colocation" ],
[ "bug-1573", "Partial stop of a group with two children" ],
[ "bug-1718", "Mandatory group ordering - Stop group_FUN" ],
[ "bug-lf-2613", "Move group on failure" ],
[ "bug-lf-2619", "Move group on clone failure" ],
[ "group-fail", "Ensure stop order is preserved for partially active groups" ],
[ "group-unmanaged", "No need to restart r115 because r114 is unmanaged" ],
[ "group-unmanaged-stopped", "Make sure r115 is stopped when r114 fails" ],
[ "group-dependents", "Account for the location preferences of things colocated with a group" ],
[ "group-stop-ordering", "Ensure blocked group member stop does not force other member stops" ],
],
[
[ "rsc_dep1", "Must not" ],
[ "rsc_dep3", "Must" ],
[ "rsc_dep5", "Must not 3" ],
[ "rsc_dep7", "Must 3" ],
[ "rsc_dep10", "Must (but cant)" ],
[ "rsc_dep2", "Must (running)" ],
[ "rsc_dep8", "Must (running : alt)" ],
[ "rsc_dep4", "Must (running + move)" ],
[ "asymmetric", "Asymmetric - require explicit location constraints" ],
],
[
[ "orphan-0", "Orphan ignore" ],
[ "orphan-1", "Orphan stop" ],
[ "orphan-2", "Orphan stop, remove failcount" ],
],
[
[ "params-0", "Params: No change" ],
[ "params-1", "Params: Changed" ],
[ "params-2", "Params: Resource definition" ],
[ "params-4", "Params: Reload" ],
[ "params-5", "Params: Restart based on probe digest" ],
[ "novell-251689", "Resource definition change + target_role=stopped" ],
[ "bug-lf-2106", "Restart all anonymous clone instances after config change" ],
[ "params-6", "Params: Detect reload in previously migrated resource" ],
[ "nvpair-id-ref", "Support id-ref in nvpair with optional name" ],
[ "not-reschedule-unneeded-monitor",
"Do not reschedule unneeded monitors while resource definitions have changed" ],
[ "reload-becomes-restart", "Cancel reload if restart becomes required" ],
],
[
[ "target-0", "Target Role : baseline" ],
[ "target-1", "Target Role : master" ],
[ "target-2", "Target Role : invalid" ],
],
[
[ "base-score", "Set a node's default score for all nodes" ],
],
[
[ "date-1", "Dates", [ "-t", "2005-020" ] ],
[ "date-2", "Date Spec - Pass", [ "-t", "2005-020T12:30" ] ],
[ "date-3", "Date Spec - Fail", [ "-t", "2005-020T11:30" ] ],
[ "origin", "Timing of recurring operations", [ "-t", "2014-05-07 00:28:00" ] ],
[ "probe-0", "Probe (anon clone)" ],
[ "probe-1", "Pending Probe" ],
[ "probe-2", "Correctly re-probe cloned groups" ],
[ "probe-3", "Probe (pending node)" ],
[ "probe-4", "Probe (pending node + stopped resource)" ],
[ "standby", "Standby" ],
[ "comments", "Comments" ],
],
[
[ "one-or-more-0", "Everything starts" ],
[ "one-or-more-1", "Nothing starts because of A" ],
[ "one-or-more-2", "D can start because of C" ],
[ "one-or-more-3", "D cannot start because of B and C" ],
[ "one-or-more-4", "D cannot start because of target-role" ],
[ "one-or-more-5", "Start A and F even though C and D are stopped" ],
[ "one-or-more-6", "Leave A running even though B is stopped" ],
[ "one-or-more-7", "Leave A running even though C is stopped" ],
[ "bug-5140-require-all-false", "Allow basegrp:0 to stop" ],
[ "clone-require-all-1", "clone B starts node 3 and 4" ],
[ "clone-require-all-2", "clone B remains stopped everywhere" ],
[ "clone-require-all-3", "clone B stops everywhere because A stops everywhere" ],
[ "clone-require-all-4", "clone B remains on node 3 and 4 with only one instance of A remaining" ],
[ "clone-require-all-5", "clone B starts on node 1 3 and 4" ],
[ "clone-require-all-6", "clone B remains active after shutting down instances of A" ],
[ "clone-require-all-7",
"clone A and B both start at the same time. all instances of A start before B" ],
[ "clone-require-all-no-interleave-1", "C starts everywhere after A and B" ],
[ "clone-require-all-no-interleave-2",
"C starts on nodes 1, 2, and 4 with only one active instance of B" ],
[ "clone-require-all-no-interleave-3",
"C remains active when instance of B is stopped on one node and started on another" ],
[ "one-or-more-unrunnable-instances", "Avoid dependencies on instances that won't ever be started" ],
],
[
[ "location-date-rules-1", "Use location constraints with ineffective date-based rules" ],
[ "location-date-rules-2", "Use location constraints with effective date-based rules" ],
[ "nvpair-date-rules-1", "Use nvpair blocks with a variety of date-based rules" ],
],
[
[ "order1", "Order start 1" ],
[ "order2", "Order start 2" ],
[ "order3", "Order stop" ],
[ "order4", "Order (multiple)" ],
[ "order5", "Order (move)" ],
[ "order6", "Order (move w/ restart)" ],
[ "order7", "Order (mandatory)" ],
[ "order-optional", "Order (score=0)" ],
[ "order-required", "Order (score=INFINITY)" ],
[ "bug-lf-2171", "Prevent group start when clone is stopped" ],
[ "order-clone", "Clone ordering should be able to prevent startup of dependent clones" ],
[ "order-sets", "Ordering for resource sets" ],
[ "order-serialize", "Serialize resources without inhibiting migration" ],
[ "order-serialize-set", "Serialize a set of resources without inhibiting migration" ],
[ "clone-order-primitive", "Order clone start after a primitive" ],
[ "clone-order-16instances", "Verify ordering of 16 cloned resources" ],
[ "order-optional-keyword", "Order (optional keyword)" ],
[ "order-mandatory", "Order (mandatory keyword)" ],
[ "bug-lf-2493",
"Don't imply colocation requirements when applying ordering constraints with clones" ],
[ "ordered-set-basic-startup", "Constraint set with default order settings" ],
[ "ordered-set-natural", "Allow natural set ordering" ],
[ "order-wrong-kind", "Order (error)" ],
],
[
[ "coloc-loop", "Colocation - loop" ],
[ "coloc-many-one", "Colocation - many-to-one" ],
[ "coloc-list", "Colocation - many-to-one with list" ],
[ "coloc-group", "Colocation - groups" ],
[ "coloc-slave-anti", "Anti-colocation with slave shouldn't prevent master colocation" ],
[ "coloc-attr", "Colocation based on node attributes" ],
[ "coloc-negative-group", "Negative colocation with a group" ],
[ "coloc-intra-set", "Intra-set colocation" ],
[ "bug-lf-2435", "Colocation sets with a negative score" ],
[ "coloc-clone-stays-active",
"Ensure clones don't get stopped/demoted because a dependent must stop" ],
[ "coloc_fp_logic", "Verify floating point calculations in colocation are working" ],
[ "colo_master_w_native",
"cl#5070 - Verify promotion order is affected when colocating master to native rsc" ],
[ "colo_slave_w_native",
"cl#5070 - Verify promotion order is affected when colocating slave to native rsc" ],
[ "anti-colocation-order",
"cl#5187 - Prevent resources in an anti-colocation from even temporarily running on a same node" ],
[ "anti-colocation-master", "Organize order of actions for master resources in anti-colocations" ],
[ "anti-colocation-slave", "Organize order of actions for slave resources in anti-colocations" ],
[ "enforce-colo1", "Always enforce B with A INFINITY" ],
[ "complex_enforce_colo", "Always enforce B with A INFINITY. (make sure heat-engine stops)" ],
],
[
[ "rsc-sets-seq-true", "Resource Sets - sequential=false" ],
[ "rsc-sets-seq-false", "Resource Sets - sequential=true" ],
[ "rsc-sets-clone", "Resource Sets - Clone" ],
[ "rsc-sets-master", "Resource Sets - Master" ],
[ "rsc-sets-clone-1", "Resource Sets - Clone (lf#2404)" ],
],
[
[ "attrs1", "string: eq (and)" ],
[ "attrs2", "string: lt / gt (and)" ],
[ "attrs3", "string: ne (or)" ],
[ "attrs4", "string: exists" ],
[ "attrs5", "string: not_exists" ],
[ "attrs6", "is_dc: true" ],
[ "attrs7", "is_dc: false" ],
[ "attrs8", "score_attribute" ],
[ "per-node-attrs", "Per node resource parameters" ],
],
[
[ "mon-rsc-1", "Schedule Monitor - start" ],
[ "mon-rsc-2", "Schedule Monitor - move" ],
[ "mon-rsc-3", "Schedule Monitor - pending start" ],
[ "mon-rsc-4", "Schedule Monitor - move/pending start" ],
],
[
[ "rec-rsc-0", "Resource Recover - no start" ],
[ "rec-rsc-1", "Resource Recover - start" ],
[ "rec-rsc-2", "Resource Recover - monitor" ],
[ "rec-rsc-3", "Resource Recover - stop - ignore" ],
[ "rec-rsc-4", "Resource Recover - stop - block" ],
[ "rec-rsc-5", "Resource Recover - stop - fence" ],
[ "rec-rsc-6", "Resource Recover - multiple - restart" ],
[ "rec-rsc-7", "Resource Recover - multiple - stop" ],
[ "rec-rsc-8", "Resource Recover - multiple - block" ],
[ "rec-rsc-9", "Resource Recover - group/group" ],
[ "monitor-recovery", "on-fail=block + resource recovery detected by recurring monitor" ],
[ "stop-failure-no-quorum", "Stop failure without quorum" ],
[ "stop-failure-no-fencing", "Stop failure without fencing available" ],
[ "stop-failure-with-fencing", "Stop failure with fencing available" ],
[ "multiple-active-block-group", "Support of multiple-active=block for resource groups" ],
[ "multiple-monitor-one-failed",
"Consider resource failed if any of the configured monitor operations failed" ],
],
[
[ "quorum-1", "No quorum - ignore" ],
[ "quorum-2", "No quorum - freeze" ],
[ "quorum-3", "No quorum - stop" ],
[ "quorum-4", "No quorum - start anyway" ],
[ "quorum-5", "No quorum - start anyway (group)" ],
[ "quorum-6", "No quorum - start anyway (clone)" ],
[ "bug-cl-5212", "No promotion with no-quorum-policy=freeze" ],
[ "suicide-needed-inquorate", "no-quorum-policy=suicide: suicide necessary" ],
[ "suicide-not-needed-initial-quorum",
"no-quorum-policy=suicide: suicide not necessary at initial quorum" ],
[ "suicide-not-needed-never-quorate",
"no-quorum-policy=suicide: suicide not necessary if never quorate" ],
[ "suicide-not-needed-quorate", "no-quorum-policy=suicide: suicide necessary if quorate" ],
],
[
[ "rec-node-1", "Node Recover - Startup - no fence" ],
[ "rec-node-2", "Node Recover - Startup - fence" ],
[ "rec-node-3", "Node Recover - HA down - no fence" ],
[ "rec-node-4", "Node Recover - HA down - fence" ],
[ "rec-node-5", "Node Recover - CRM down - no fence" ],
[ "rec-node-6", "Node Recover - CRM down - fence" ],
[ "rec-node-7", "Node Recover - no quorum - ignore" ],
[ "rec-node-8", "Node Recover - no quorum - freeze" ],
[ "rec-node-9", "Node Recover - no quorum - stop" ],
[ "rec-node-10", "Node Recover - no quorum - stop w/fence" ],
[ "rec-node-11", "Node Recover - CRM down w/ group - fence" ],
[ "rec-node-12", "Node Recover - nothing active - fence" ],
[ "rec-node-13", "Node Recover - failed resource + shutdown - fence" ],
[ "rec-node-15", "Node Recover - unknown lrm section" ],
[ "rec-node-14", "Serialize all stonith's" ],
],
[
[ "multi1", "Multiple Active (stop/start)" ],
],
[
[ "migrate-begin", "Normal migration" ],
[ "migrate-success", "Completed migration" ],
[ "migrate-partial-1", "Completed migration, missing stop on source" ],
[ "migrate-partial-2", "Successful migrate_to only" ],
[ "migrate-partial-3", "Successful migrate_to only, target down" ],
[ "migrate-partial-4", "Migrate from the correct host after migrate_to+migrate_from" ],
[ "bug-5186-partial-migrate", "Handle partial migration when src node loses membership" ],
[ "migrate-fail-2", "Failed migrate_from" ],
[ "migrate-fail-3", "Failed migrate_from + stop on source" ],
[ "migrate-fail-4",
"Failed migrate_from + stop on target - ideally we wouldn't need to re-stop on target" ],
[ "migrate-fail-5", "Failed migrate_from + stop on source and target" ],
[ "migrate-fail-6", "Failed migrate_to" ],
[ "migrate-fail-7", "Failed migrate_to + stop on source" ],
[ "migrate-fail-8",
"Failed migrate_to + stop on target - ideally we wouldn't need to re-stop on target" ],
[ "migrate-fail-9", "Failed migrate_to + stop on source and target" ],
[ "migration-ping-pong", "Old migrate_to failure + successful migrate_from on same node" ],
[ "migrate-stop", "Migration in a stopping stack" ],
[ "migrate-start", "Migration in a starting stack" ],
[ "migrate-stop_start", "Migration in a restarting stack" ],
[ "migrate-stop-complex", "Migration in a complex stopping stack" ],
[ "migrate-start-complex", "Migration in a complex starting stack" ],
[ "migrate-stop-start-complex", "Migration in a complex moving stack" ],
[ "migrate-shutdown", "Order the post-migration 'stop' before node shutdown" ],
[ "migrate-1", "Migrate (migrate)" ],
[ "migrate-2", "Migrate (stable)" ],
[ "migrate-3", "Migrate (failed migrate_to)" ],
[ "migrate-4", "Migrate (failed migrate_from)" ],
[ "novell-252693", "Migration in a stopping stack" ],
[ "novell-252693-2", "Migration in a starting stack" ],
[ "novell-252693-3", "Non-Migration in a starting and stopping stack" ],
[ "bug-1820", "Migration in a group" ],
[ "bug-1820-1", "Non-migration in a group" ],
[ "migrate-5", "Primitive migration with a clone" ],
[ "migrate-fencing", "Migration after Fencing" ],
[ "migrate-both-vms", "Migrate two VMs that have no colocation" ],
[ "migration-behind-migrating-remote", "Migrate resource behind migrating remote connection" ],
[ "1-a-then-bm-move-b", "Advanced migrate logic. A then B. migrate B" ],
[ "2-am-then-b-move-a", "Advanced migrate logic, A then B, migrate A without stopping B" ],
[ "3-am-then-bm-both-migrate", "Advanced migrate logic. A then B. migrate both" ],
[ "4-am-then-bm-b-not-migratable", "Advanced migrate logic, A then B, B not migratable" ],
[ "5-am-then-bm-a-not-migratable", "Advanced migrate logic. A then B. move both, a not migratable" ],
[ "6-migrate-group", "Advanced migrate logic, migrate a group" ],
[ "7-migrate-group-one-unmigratable",
"Advanced migrate logic, migrate group mixed with allow-migrate true/false" ],
[ "8-am-then-bm-a-migrating-b-stopping",
"Advanced migrate logic, A then B, A migrating, B stopping" ],
[ "9-am-then-bm-b-migrating-a-stopping",
"Advanced migrate logic, A then B, B migrate, A stopping" ],
[ "10-a-then-bm-b-move-a-clone",
"Advanced migrate logic, A clone then B, migrate B while stopping A" ],
[ "11-a-then-bm-b-move-a-clone-starting",
"Advanced migrate logic, A clone then B, B moving while A is start/stopping" ],
[ "a-promote-then-b-migrate", "A promote then B start. migrate B" ],
[ "a-demote-then-b-migrate", "A demote then B stop. migrate B" ],
# @TODO: If pacemaker implements versioned attributes, uncomment this test
#[ "migrate-versioned", "Disable migration for versioned resources" ],
[ "bug-lf-2422", "Dependency on partially active group - stop ocfs:*" ],
],
[
[ "clone-anon-probe-1", "Probe the correct (anonymous) clone instance for each node" ],
[ "clone-anon-probe-2", "Avoid needless re-probing of anonymous clones" ],
[ "clone-anon-failcount", "Merge failcounts for anonymous clones" ],
[ "force-anon-clone-max", "Update clone-max properly when forcing a clone to be anonymous" ],
[ "anon-instance-pending", "Assign anonymous clone instance numbers properly when action pending" ],
[ "inc0", "Incarnation start" ],
[ "inc1", "Incarnation start order" ],
[ "inc2", "Incarnation silent restart, stop, move" ],
[ "inc3", "Inter-incarnation ordering, silent restart, stop, move" ],
[ "inc4", "Inter-incarnation ordering, silent restart, stop, move (ordered)" ],
[ "inc5", "Inter-incarnation ordering, silent restart, stop, move (restart 1)" ],
[ "inc6", "Inter-incarnation ordering, silent restart, stop, move (restart 2)" ],
[ "inc7", "Clone colocation" ],
[ "inc8", "Clone anti-colocation" ],
[ "inc9", "Non-unique clone" ],
[ "inc10", "Non-unique clone (stop)" ],
[ "inc11", "Primitive colocation with clones" ],
[ "inc12", "Clone shutdown" ],
[ "cloned-group", "Make sure only the correct number of cloned groups are started" ],
[ "cloned-group-stop", "Ensure stopping qpidd also stops glance and cinder" ],
[ "clone-no-shuffle", "Don't prioritize allocation of instances that must be moved" ],
[ "clone-max-zero", "Orphan processing with clone-max=0" ],
[ "clone-anon-dup",
"Bug LF#2087 - Correctly parse the state of anonymous clones that are active more than once per node" ],
[ "bug-lf-2160", "Don't shuffle clones due to colocation" ],
[ "bug-lf-2213", "clone-node-max enforcement for cloned groups" ],
[ "bug-lf-2153", "Clone ordering constraints" ],
[ "bug-lf-2361", "Ensure clones observe mandatory ordering constraints if the LHS is unrunnable" ],
[ "bug-lf-2317", "Avoid needless restart of primitive depending on a clone" ],
[ "clone-colocate-instance-1", "Colocation with a specific clone instance (negative example)" ],
[ "clone-colocate-instance-2", "Colocation with a specific clone instance" ],
[ "clone-order-instance", "Ordering with specific clone instances" ],
[ "bug-lf-2453", "Enforce mandatory clone ordering without colocation" ],
[ "bug-lf-2508", "Correctly reconstruct the status of anonymous cloned groups" ],
[ "bug-lf-2544", "Balanced clone placement" ],
[ "bug-lf-2445", "Redistribute clones with node-max > 1 and stickiness = 0" ],
[ "bug-lf-2574", "Avoid clone shuffle" ],
[ "bug-lf-2581", "Avoid group restart due to unrelated clone (re)start" ],
[ "bug-cl-5168", "Don't shuffle clones" ],
[ "bug-cl-5170", "Prevent clone from starting with on-fail=block" ],
[ "clone-fail-block-colocation", "Move colocated group when failed clone has on-fail=block" ],
[ "clone-interleave-1",
"Clone-3 cannot start on pcmk-1 due to interleaved ordering (no colocation)" ],
[ "clone-interleave-2", "Clone-3 must stop on pcmk-1 due to interleaved ordering (no colocation)" ],
[ "clone-interleave-3",
"Clone-3 must be recovered on pcmk-1 due to interleaved ordering (no colocation)" ],
[ "rebalance-unique-clones", "Rebalance unique clone instances with no stickiness" ],
[ "clone-requires-quorum-recovery", "Clone with requires=quorum on failed node needing recovery" ],
[ "clone-requires-quorum",
"Clone with requires=quorum with presumed-inactive instance on failed node" ],
],
[
[ "cloned_start_one", "order first clone then clone... first clone_min=2" ],
[ "cloned_start_two", "order first clone then clone... first clone_min=2" ],
[ "cloned_stop_one", "order first clone then clone... first clone_min=2" ],
[ "cloned_stop_two", "order first clone then clone... first clone_min=2" ],
[ "clone_min_interleave_start_one",
"order first clone then clone... first clone_min=2 and then has interleave=true" ],
[ "clone_min_interleave_start_two",
"order first clone then clone... first clone_min=2 and then has interleave=true" ],
[ "clone_min_interleave_stop_one",
"order first clone then clone... first clone_min=2 and then has interleave=true" ],
[ "clone_min_interleave_stop_two",
"order first clone then clone... first clone_min=2 and then has interleave=true" ],
[ "clone_min_start_one", "order first clone then primitive... first clone_min=2" ],
[ "clone_min_start_two", "order first clone then primitive... first clone_min=2" ],
[ "clone_min_stop_all", "order first clone then primitive... first clone_min=2" ],
[ "clone_min_stop_one", "order first clone then primitive... first clone_min=2" ],
[ "clone_min_stop_two", "order first clone then primitive... first clone_min=2" ],
],
[
[ "unfence-startup", "Clean unfencing" ],
[ "unfence-definition", "Unfencing when the agent changes" ],
[ "unfence-parameters", "Unfencing when the agent parameters changes" ],
[ "unfence-device", "Unfencing when a cluster has only fence devices" ],
],
[
[ "master-0", "Stopped -> Slave" ],
[ "master-1", "Stopped -> Promote" ],
[ "master-2", "Stopped -> Promote : notify" ],
[ "master-3", "Stopped -> Promote : master location" ],
[ "master-4", "Started -> Promote : master location" ],
[ "master-5", "Promoted -> Promoted" ],
[ "master-6", "Promoted -> Promoted (2)" ],
[ "master-7", "Promoted -> Fenced" ],
[ "master-8", "Promoted -> Fenced -> Moved" ],
[ "master-9", "Stopped + Promotable + No quorum" ],
[ "master-10", "Stopped -> Promotable : notify with monitor" ],
[ "master-11", "Stopped -> Promote : colocation" ],
[ "novell-239082", "Demote/Promote ordering" ],
[ "novell-239087", "Stable master placement" ],
[ "master-12", "Promotion based solely on rsc_location constraints" ],
[ "master-13", "Include preferences of colocated resources when placing master" ],
[ "master-demote", "Ordering when actions depends on demoting a slave resource" ],
[ "master-ordering", "Prevent resources from starting that need a master" ],
[ "bug-1765", "Master-Master Colocation (do not stop the slaves)" ],
[ "master-group", "Promotion of cloned groups" ],
[ "bug-lf-1852", "Don't shuffle master/slave instances unnecessarily" ],
[ "master-failed-demote", "Don't retry failed demote actions" ],
[ "master-failed-demote-2", "Don't retry failed demote actions (notify=false)" ],
[ "master-depend",
"Ensure resources that depend on the master don't get allocated until the master does" ],
[ "master-reattach", "Re-attach to a running master" ],
[ "master-allow-start", "Don't include master score if it would prevent allocation" ],
[ "master-colocation",
"Allow master instances placemaker to be influenced by colocation constraints" ],
[ "master-pseudo", "Make sure promote/demote pseudo actions are created correctly" ],
[ "master-role", "Prevent target-role from promoting more than master-max instances" ],
[ "bug-lf-2358", "Master-Master anti-colocation" ],
[ "master-promotion-constraint", "Mandatory master colocation constraints" ],
[ "unmanaged-master", "Ensure role is preserved for unmanaged resources" ],
[ "master-unmanaged-monitor", "Start the correct monitor operation for unmanaged masters" ],
[ "master-demote-2", "Demote does not clear past failure" ],
[ "master-move", "Move master based on failure of colocated group" ],
[ "master-probed-score", "Observe the promotion score of probed resources" ],
[ "colocation_constraint_stops_master",
"cl#5054 - Ensure master is demoted when stopped by colocation constraint" ],
[ "colocation_constraint_stops_slave",
"cl#5054 - Ensure slave is not demoted when stopped by colocation constraint" ],
[ "order_constraint_stops_master",
"cl#5054 - Ensure master is demoted when stopped by order constraint" ],
[ "order_constraint_stops_slave",
"cl#5054 - Ensure slave is not demoted when stopped by order constraint" ],
[ "master_monitor_restart", "cl#5072 - Ensure master monitor operation will start after promotion" ],
[ "bug-rh-880249", "Handle replacement of an m/s resource with a primitive" ],
[ "bug-5143-ms-shuffle", "Prevent master shuffling due to promotion score" ],
[ "master-demote-block", "Block promotion if demote fails with on-fail=block" ],
[ "master-dependent-ban",
"Don't stop instances from being active because a dependent is banned from that host" ],
[ "master-stop", "Stop instances due to location constraint with role=Started" ],
[ "master-partially-demoted-group", "Allow partially demoted group to finish demoting" ],
[ "bug-cl-5213", "Ensure role colocation with -INFINITY is enforced" ],
[ "bug-cl-5219", "Allow unrelated resources with a common colocation target to remain promoted" ],
[ "master-asymmetrical-order",
"Fix the behaviors of multi-state resources with asymmetrical ordering" ],
[ "master-notify", "Master promotion with notifies" ],
[ "master-score-startup", "Use permanent master scores without LRM history" ],
[ "failed-demote-recovery", "Recover resource in slave role after demote fails" ],
[ "failed-demote-recovery-master", "Recover resource in master role after demote fails" ],
],
[
[ "history-1", "Correctly parse stateful-1 resource state" ],
],
[
[ "managed-0", "Managed (reference)" ],
[ "managed-1", "Not managed - down" ],
[ "managed-2", "Not managed - up" ],
[ "bug-5028", "Shutdown should block if anything depends on an unmanaged resource" ],
[ "bug-5028-detach", "Ensure detach still works" ],
[ "bug-5028-bottom",
"Ensure shutdown still blocks if the blocked resource is at the bottom of the stack" ],
[ "unmanaged-stop-1",
"cl#5155 - Block the stop of resources if any depending resource is unmanaged" ],
[ "unmanaged-stop-2",
"cl#5155 - Block the stop of resources if the first resource in a mandatory stop order is unmanaged" ],
[ "unmanaged-stop-3",
"cl#5155 - Block the stop of resources if any depending resource in a group is unmanaged" ],
[ "unmanaged-stop-4",
"cl#5155 - Block the stop of resources if any depending resource in the middle of a group is unmanaged" ],
[ "unmanaged-block-restart",
"Block restart of resources if any dependent resource in a group is unmanaged" ],
],
[
[ "interleave-0", "Interleave (reference)" ],
[ "interleave-1", "coloc - not interleaved" ],
[ "interleave-2", "coloc - interleaved" ],
[ "interleave-3", "coloc - interleaved (2)" ],
[ "interleave-pseudo-stop", "Interleaved clone during stonith" ],
[ "interleave-stop", "Interleaved clone during stop" ],
[ "interleave-restart", "Interleaved clone during dependency restart" ],
],
[
[ "notify-0", "Notify reference" ],
[ "notify-1", "Notify simple" ],
[ "notify-2", "Notify simple, confirm" ],
[ "notify-3", "Notify move, confirm" ],
[ "novell-239079", "Notification priority" ],
#[ "notify-2", "Notify - 764" ],
[ "notifs-for-unrunnable", "Don't schedule notifications for an unrunnable action" ],
[ "route-remote-notify", "Route remote notify actions through correct cluster node" ],
[ "notify-behind-stopping-remote", "Don't schedule notifications behind stopped remote" ],
],
[
[ "594", "OSDL #594 - Unrunnable actions scheduled in transition" ],
[ "662", "OSDL #662 - Two resources start on one node when incarnation_node_max = 1" ],
[ "696", "OSDL #696 - CRM starts stonith RA without monitor" ],
[ "726", "OSDL #726 - Attempting to schedule rsc_posic041_monitor_5000 _after_ a stop" ],
[ "735", "OSDL #735 - Correctly detect that rsc_hadev1 is stopped on hadev3" ],
[ "764", "OSDL #764 - Missing monitor op for DoFencing:child_DoFencing:1" ],
[ "797", "OSDL #797 - Assert triggered: task_id_i > max_call_id" ],
[ "829", "OSDL #829" ],
[ "994",
"OSDL #994 - Stopping the last resource in a resource group causes the entire group to be restarted" ],
[ "994-2", "OSDL #994 - with a dependent resource" ],
[ "1360", "OSDL #1360 - Clone stickiness" ],
[ "1484", "OSDL #1484 - on_fail=stop" ],
[ "1494", "OSDL #1494 - Clone stability" ],
[ "unrunnable-1", "Unrunnable" ],
[ "unrunnable-2", "Unrunnable 2" ],
[ "stonith-0", "Stonith loop - 1" ],
[ "stonith-1", "Stonith loop - 2" ],
[ "stonith-2", "Stonith loop - 3" ],
[ "stonith-3", "Stonith startup" ],
[ "stonith-4", "Stonith node state" ],
[ "dc-fence-ordering", "DC needs fencing while other nodes are shutting down" ],
[ "bug-1572-1", "Recovery of groups depending on master/slave" ],
[ "bug-1572-2", "Recovery of groups depending on master/slave when the master is never re-promoted" ],
[ "bug-1685", "Depends-on-master ordering" ],
[ "bug-1822", "Don't promote partially active groups" ],
[ "bug-pm-11", "New resource added to a m/s group" ],
[ "bug-pm-12", "Recover only the failed portion of a cloned group" ],
[ "bug-n-387749", "Don't shuffle clone instances" ],
[ "bug-n-385265",
"Don't ignore the failure stickiness of group children - resource_idvscommon should stay stopped" ],
[ "bug-n-385265-2",
"Ensure groups are migrated instead of remaining partially active on the current node" ],
[ "bug-lf-1920", "Correctly handle probes that find active resources" ],
[ "bnc-515172", "Location constraint with multiple expressions" ],
[ "colocate-primitive-with-clone", "Optional colocation with a clone" ],
[ "use-after-free-merge", "Use-after-free in native_merge_weights" ],
[ "bug-lf-2551", "STONITH ordering for stop" ],
[ "bug-lf-2606", "Stonith implies demote" ],
[ "bug-lf-2474", "Ensure resource op timeout takes precedence over op_defaults" ],
[ "bug-suse-707150", "Prevent vm-01 from starting due to colocation/ordering" ],
[ "bug-5014-A-start-B-start", "Verify when A starts B starts using symmetrical=false" ],
[ "bug-5014-A-stop-B-started",
"Verify when A stops B does not stop if it has already started using symmetric=false" ],
[ "bug-5014-A-stopped-B-stopped",
"Verify when A is stopped and B has not started, B does not start before A using symmetric=false" ],
[ "bug-5014-CthenAthenB-C-stopped",
"Verify when C then A is symmetrical=true, A then B is symmetric=false, and C is stopped that nothing starts" ],
[ "bug-5014-CLONE-A-start-B-start",
"Verify when A starts B starts using clone resources with symmetric=false" ],
[ "bug-5014-CLONE-A-stop-B-started",
"Verify when A stops B does not stop if it has already started using clone resources with symmetric=false" ],
[ "bug-5014-GROUP-A-start-B-start",
"Verify when A starts B starts when using group resources with symmetric=false" ],
[ "bug-5014-GROUP-A-stopped-B-started",
"Verify when A stops B does not stop if it has already started using group resources with symmetric=false" ],
[ "bug-5014-GROUP-A-stopped-B-stopped",
"Verify when A is stopped and B has not started, B does not start before A using group resources with symmetric=false" ],
[ "bug-5014-ordered-set-symmetrical-false",
"Verify ordered sets work with symmetrical=false" ],
[ "bug-5014-ordered-set-symmetrical-true",
"Verify ordered sets work with symmetrical=true" ],
[ "bug-5007-masterslave_colocation",
"Verify use of colocation scores other than INFINITY and -INFINITY work on multi-state resources" ],
[ "bug-5038", "Prevent restart of anonymous clones when clone-max decreases" ],
[ "bug-5025-1", "Automatically clean up failcount after resource config change with reload" ],
[ "bug-5025-2", "Make sure clear failcount action isn't set when config does not change" ],
[ "bug-5025-3", "Automatically clean up failcount after resource config change with restart" ],
[ "bug-5025-4", "Clear failcount when last failure is a start op and rsc attributes changed" ],
[ "failcount", "Ensure failcounts are correctly expired" ],
[ "failcount-block", "Ensure failcounts are not expired when on-fail=block is present" ],
[ "per-op-failcount", "Ensure per-operation failcount is handled and not passed to fence agent" ],
[ "on-fail-ignore", "Ensure on-fail=ignore works even beyond migration-threshold" ],
[ "monitor-onfail-restart", "bug-5058 - Monitor failure with on-fail set to restart" ],
[ "monitor-onfail-stop", "bug-5058 - Monitor failure wiht on-fail set to stop" ],
[ "bug-5059", "No need to restart p_stateful1:*" ],
[ "bug-5069-op-enabled", "Test on-fail=ignore with failure when monitor is enabled" ],
[ "bug-5069-op-disabled", "Test on-fail-ignore with failure when monitor is disabled" ],
[ "obsolete-lrm-resource", "cl#5115 - Do not use obsolete lrm_resource sections" ],
[ "expire-non-blocked-failure",
"Ignore failure-timeout only if the failed operation has on-fail=block" ],
[ "asymmetrical-order-move", "Respect asymmetrical ordering when trying to move resources" ],
[ "asymmetrical-order-restart", "Respect asymmetrical ordering when restarting dependent resource" ],
[ "start-then-stop-with-unfence", "Avoid graph loop with start-then-stop constraint plus unfencing" ],
[ "order-expired-failure", "Order failcount cleanup after remote fencing" ],
[ "ignore_stonith_rsc_order1",
"cl#5056- Ignore order constraint between stonith and non-stonith rsc" ],
[ "ignore_stonith_rsc_order2",
"cl#5056- Ignore order constraint with group rsc containing mixed stonith and non-stonith" ],
[ "ignore_stonith_rsc_order3", "cl#5056- Ignore order constraint, stonith clone and mixed group" ],
[ "ignore_stonith_rsc_order4",
"cl#5056- Ignore order constraint, stonith clone and clone with nested mixed group" ],
[ "honor_stonith_rsc_order1",
"cl#5056- Honor order constraint, stonith clone and pure stonith group(single rsc)" ],
[ "honor_stonith_rsc_order2",
"cl#5056- Honor order constraint, stonith clone and pure stonith group(multiple rsc)" ],
[ "honor_stonith_rsc_order3",
"cl#5056- Honor order constraint, stonith clones with nested pure stonith group" ],
[ "honor_stonith_rsc_order4",
"cl#5056- Honor order constraint, between two native stonith rscs" ],
[ "multiply-active-stonith", "Multiply active stonith" ],
[ "probe-timeout", "cl#5099 - Default probe timeout" ],
[ "order-first-probes",
"cl#5301 - respect order constraints when relevant resources are being probed" ],
[ "concurrent-fencing", "Allow performing fencing operations in parallel" ],
+ [ "priority-fencing-delay", "Delay fencing targeting the more significant node" ],
],
[
[ "systemhealth1", "System Health () #1" ],
[ "systemhealth2", "System Health () #2" ],
[ "systemhealth3", "System Health () #3" ],
[ "systemhealthn1", "System Health (None) #1" ],
[ "systemhealthn2", "System Health (None) #2" ],
[ "systemhealthn3", "System Health (None) #3" ],
[ "systemhealthm1", "System Health (Migrate On Red) #1" ],
[ "systemhealthm2", "System Health (Migrate On Red) #2" ],
[ "systemhealthm3", "System Health (Migrate On Red) #3" ],
[ "systemhealtho1", "System Health (Only Green) #1" ],
[ "systemhealtho2", "System Health (Only Green) #2" ],
[ "systemhealtho3", "System Health (Only Green) #3" ],
[ "systemhealthp1", "System Health (Progessive) #1" ],
[ "systemhealthp2", "System Health (Progessive) #2" ],
[ "systemhealthp3", "System Health (Progessive) #3" ],
],
[
[ "utilization", "Placement Strategy - utilization" ],
[ "minimal", "Placement Strategy - minimal" ],
[ "balanced", "Placement Strategy - balanced" ],
],
[
[ "placement-stickiness", "Optimized Placement Strategy - stickiness" ],
[ "placement-priority", "Optimized Placement Strategy - priority" ],
[ "placement-location", "Optimized Placement Strategy - location" ],
[ "placement-capacity", "Optimized Placement Strategy - capacity" ],
],
[
[ "utilization-order1", "Utilization Order - Simple" ],
[ "utilization-order2", "Utilization Order - Complex" ],
[ "utilization-order3", "Utilization Order - Migrate" ],
[ "utilization-order4", "Utilization Order - Live Migration (bnc#695440)" ],
[ "utilization-shuffle",
"Don't displace prmExPostgreSQLDB2 on act2, Start prmExPostgreSQLDB1 on act3" ],
[ "load-stopped-loop", "Avoid transition loop due to load_stopped (cl#5044)" ],
[ "load-stopped-loop-2",
"cl#5235 - Prevent graph loops that can be introduced by load_stopped -> migrate_to ordering" ],
],
[
[ "colocated-utilization-primitive-1", "Colocated Utilization - Primitive" ],
[ "colocated-utilization-primitive-2", "Colocated Utilization - Choose the most capable node" ],
[ "colocated-utilization-group", "Colocated Utilization - Group" ],
[ "colocated-utilization-clone", "Colocated Utilization - Clone" ],
[ "utilization-check-allowed-nodes",
"Only check the capacities of the nodes that can run the resource" ],
],
[
[ "reprobe-target_rc", "Ensure correct target_rc for reprobe of inactive resources" ],
[ "node-maintenance-1", "cl#5128 - Node maintenance" ],
[ "node-maintenance-2", "cl#5128 - Node maintenance (coming out of maintenance mode)" ],
[ "shutdown-maintenance-node", "Do not fence a maintenance node if it shuts down cleanly" ],
[ "rsc-maintenance", "Per-resource maintenance" ],
],
[
[ "not-installed-agent", "The resource agent is missing" ],
[ "not-installed-tools", "Something the resource agent needs is missing" ],
],
[
[ "stopped-monitor-00", "Stopped Monitor - initial start" ],
[ "stopped-monitor-01", "Stopped Monitor - failed started" ],
[ "stopped-monitor-02", "Stopped Monitor - started multi-up" ],
[ "stopped-monitor-03", "Stopped Monitor - stop started" ],
[ "stopped-monitor-04", "Stopped Monitor - failed stop" ],
[ "stopped-monitor-05", "Stopped Monitor - start unmanaged" ],
[ "stopped-monitor-06", "Stopped Monitor - unmanaged multi-up" ],
[ "stopped-monitor-07", "Stopped Monitor - start unmanaged multi-up" ],
[ "stopped-monitor-08", "Stopped Monitor - migrate" ],
[ "stopped-monitor-09", "Stopped Monitor - unmanage started" ],
[ "stopped-monitor-10", "Stopped Monitor - unmanaged started multi-up" ],
[ "stopped-monitor-11", "Stopped Monitor - stop unmanaged started" ],
[ "stopped-monitor-12", "Stopped Monitor - unmanaged started multi-up (target-role=Stopped)" ],
[ "stopped-monitor-20", "Stopped Monitor - initial stop" ],
[ "stopped-monitor-21", "Stopped Monitor - stopped single-up" ],
[ "stopped-monitor-22", "Stopped Monitor - stopped multi-up" ],
[ "stopped-monitor-23", "Stopped Monitor - start stopped" ],
[ "stopped-monitor-24", "Stopped Monitor - unmanage stopped" ],
[ "stopped-monitor-25", "Stopped Monitor - unmanaged stopped multi-up" ],
[ "stopped-monitor-26", "Stopped Monitor - start unmanaged stopped" ],
[ "stopped-monitor-27", "Stopped Monitor - unmanaged stopped multi-up (target-role=Started)" ],
[ "stopped-monitor-30", "Stopped Monitor - new node started" ],
[ "stopped-monitor-31", "Stopped Monitor - new node stopped" ],
],
[
# This is a combo test to check:
# - probe timeout defaults to the minimum-interval monitor's
# - duplicate recurring operations are ignored
# - if timeout spec is bad, the default timeout is used
# - failure is blocked with on-fail=block even if ISO8601 interval is specified
# - started/stopped role monitors are started/stopped on right nodes
[ "intervals", "Recurring monitor interval handling" ],
],
[
[ "ticket-primitive-1", "Ticket - Primitive (loss-policy=stop, initial)" ],
[ "ticket-primitive-2", "Ticket - Primitive (loss-policy=stop, granted)" ],
[ "ticket-primitive-3", "Ticket - Primitive (loss-policy-stop, revoked)" ],
[ "ticket-primitive-4", "Ticket - Primitive (loss-policy=demote, initial)" ],
[ "ticket-primitive-5", "Ticket - Primitive (loss-policy=demote, granted)" ],
[ "ticket-primitive-6", "Ticket - Primitive (loss-policy=demote, revoked)" ],
[ "ticket-primitive-7", "Ticket - Primitive (loss-policy=fence, initial)" ],
[ "ticket-primitive-8", "Ticket - Primitive (loss-policy=fence, granted)" ],
[ "ticket-primitive-9", "Ticket - Primitive (loss-policy=fence, revoked)" ],
[ "ticket-primitive-10", "Ticket - Primitive (loss-policy=freeze, initial)" ],
[ "ticket-primitive-11", "Ticket - Primitive (loss-policy=freeze, granted)" ],
[ "ticket-primitive-12", "Ticket - Primitive (loss-policy=freeze, revoked)" ],
[ "ticket-primitive-13", "Ticket - Primitive (loss-policy=stop, standby, granted)" ],
[ "ticket-primitive-14", "Ticket - Primitive (loss-policy=stop, granted, standby)" ],
[ "ticket-primitive-15", "Ticket - Primitive (loss-policy=stop, standby, revoked)" ],
[ "ticket-primitive-16", "Ticket - Primitive (loss-policy=demote, standby, granted)" ],
[ "ticket-primitive-17", "Ticket - Primitive (loss-policy=demote, granted, standby)" ],
[ "ticket-primitive-18", "Ticket - Primitive (loss-policy=demote, standby, revoked)" ],
[ "ticket-primitive-19", "Ticket - Primitive (loss-policy=fence, standby, granted)" ],
[ "ticket-primitive-20", "Ticket - Primitive (loss-policy=fence, granted, standby)" ],
[ "ticket-primitive-21", "Ticket - Primitive (loss-policy=fence, standby, revoked)" ],
[ "ticket-primitive-22", "Ticket - Primitive (loss-policy=freeze, standby, granted)" ],
[ "ticket-primitive-23", "Ticket - Primitive (loss-policy=freeze, granted, standby)" ],
[ "ticket-primitive-24", "Ticket - Primitive (loss-policy=freeze, standby, revoked)" ],
],
[
[ "ticket-group-1", "Ticket - Group (loss-policy=stop, initial)" ],
[ "ticket-group-2", "Ticket - Group (loss-policy=stop, granted)" ],
[ "ticket-group-3", "Ticket - Group (loss-policy-stop, revoked)" ],
[ "ticket-group-4", "Ticket - Group (loss-policy=demote, initial)" ],
[ "ticket-group-5", "Ticket - Group (loss-policy=demote, granted)" ],
[ "ticket-group-6", "Ticket - Group (loss-policy=demote, revoked)" ],
[ "ticket-group-7", "Ticket - Group (loss-policy=fence, initial)" ],
[ "ticket-group-8", "Ticket - Group (loss-policy=fence, granted)" ],
[ "ticket-group-9", "Ticket - Group (loss-policy=fence, revoked)" ],
[ "ticket-group-10", "Ticket - Group (loss-policy=freeze, initial)" ],
[ "ticket-group-11", "Ticket - Group (loss-policy=freeze, granted)" ],
[ "ticket-group-12", "Ticket - Group (loss-policy=freeze, revoked)" ],
[ "ticket-group-13", "Ticket - Group (loss-policy=stop, standby, granted)" ],
[ "ticket-group-14", "Ticket - Group (loss-policy=stop, granted, standby)" ],
[ "ticket-group-15", "Ticket - Group (loss-policy=stop, standby, revoked)" ],
[ "ticket-group-16", "Ticket - Group (loss-policy=demote, standby, granted)" ],
[ "ticket-group-17", "Ticket - Group (loss-policy=demote, granted, standby)" ],
[ "ticket-group-18", "Ticket - Group (loss-policy=demote, standby, revoked)" ],
[ "ticket-group-19", "Ticket - Group (loss-policy=fence, standby, granted)" ],
[ "ticket-group-20", "Ticket - Group (loss-policy=fence, granted, standby)" ],
[ "ticket-group-21", "Ticket - Group (loss-policy=fence, standby, revoked)" ],
[ "ticket-group-22", "Ticket - Group (loss-policy=freeze, standby, granted)" ],
[ "ticket-group-23", "Ticket - Group (loss-policy=freeze, granted, standby)" ],
[ "ticket-group-24", "Ticket - Group (loss-policy=freeze, standby, revoked)" ],
],
[
[ "ticket-clone-1", "Ticket - Clone (loss-policy=stop, initial)" ],
[ "ticket-clone-2", "Ticket - Clone (loss-policy=stop, granted)" ],
[ "ticket-clone-3", "Ticket - Clone (loss-policy-stop, revoked)" ],
[ "ticket-clone-4", "Ticket - Clone (loss-policy=demote, initial)" ],
[ "ticket-clone-5", "Ticket - Clone (loss-policy=demote, granted)" ],
[ "ticket-clone-6", "Ticket - Clone (loss-policy=demote, revoked)" ],
[ "ticket-clone-7", "Ticket - Clone (loss-policy=fence, initial)" ],
[ "ticket-clone-8", "Ticket - Clone (loss-policy=fence, granted)" ],
[ "ticket-clone-9", "Ticket - Clone (loss-policy=fence, revoked)" ],
[ "ticket-clone-10", "Ticket - Clone (loss-policy=freeze, initial)" ],
[ "ticket-clone-11", "Ticket - Clone (loss-policy=freeze, granted)" ],
[ "ticket-clone-12", "Ticket - Clone (loss-policy=freeze, revoked)" ],
[ "ticket-clone-13", "Ticket - Clone (loss-policy=stop, standby, granted)" ],
[ "ticket-clone-14", "Ticket - Clone (loss-policy=stop, granted, standby)" ],
[ "ticket-clone-15", "Ticket - Clone (loss-policy=stop, standby, revoked)" ],
[ "ticket-clone-16", "Ticket - Clone (loss-policy=demote, standby, granted)" ],
[ "ticket-clone-17", "Ticket - Clone (loss-policy=demote, granted, standby)" ],
[ "ticket-clone-18", "Ticket - Clone (loss-policy=demote, standby, revoked)" ],
[ "ticket-clone-19", "Ticket - Clone (loss-policy=fence, standby, granted)" ],
[ "ticket-clone-20", "Ticket - Clone (loss-policy=fence, granted, standby)" ],
[ "ticket-clone-21", "Ticket - Clone (loss-policy=fence, standby, revoked)" ],
[ "ticket-clone-22", "Ticket - Clone (loss-policy=freeze, standby, granted)" ],
[ "ticket-clone-23", "Ticket - Clone (loss-policy=freeze, granted, standby)" ],
[ "ticket-clone-24", "Ticket - Clone (loss-policy=freeze, standby, revoked)" ],
],
[
[ "ticket-master-1", "Ticket - Master (loss-policy=stop, initial)" ],
[ "ticket-master-2", "Ticket - Master (loss-policy=stop, granted)" ],
[ "ticket-master-3", "Ticket - Master (loss-policy-stop, revoked)" ],
[ "ticket-master-4", "Ticket - Master (loss-policy=demote, initial)" ],
[ "ticket-master-5", "Ticket - Master (loss-policy=demote, granted)" ],
[ "ticket-master-6", "Ticket - Master (loss-policy=demote, revoked)" ],
[ "ticket-master-7", "Ticket - Master (loss-policy=fence, initial)" ],
[ "ticket-master-8", "Ticket - Master (loss-policy=fence, granted)" ],
[ "ticket-master-9", "Ticket - Master (loss-policy=fence, revoked)" ],
[ "ticket-master-10", "Ticket - Master (loss-policy=freeze, initial)" ],
[ "ticket-master-11", "Ticket - Master (loss-policy=freeze, granted)" ],
[ "ticket-master-12", "Ticket - Master (loss-policy=freeze, revoked)" ],
[ "ticket-master-13", "Ticket - Master (loss-policy=stop, standby, granted)" ],
[ "ticket-master-14", "Ticket - Master (loss-policy=stop, granted, standby)" ],
[ "ticket-master-15", "Ticket - Master (loss-policy=stop, standby, revoked)" ],
[ "ticket-master-16", "Ticket - Master (loss-policy=demote, standby, granted)" ],
[ "ticket-master-17", "Ticket - Master (loss-policy=demote, granted, standby)" ],
[ "ticket-master-18", "Ticket - Master (loss-policy=demote, standby, revoked)" ],
[ "ticket-master-19", "Ticket - Master (loss-policy=fence, standby, granted)" ],
[ "ticket-master-20", "Ticket - Master (loss-policy=fence, granted, standby)" ],
[ "ticket-master-21", "Ticket - Master (loss-policy=fence, standby, revoked)" ],
[ "ticket-master-22", "Ticket - Master (loss-policy=freeze, standby, granted)" ],
[ "ticket-master-23", "Ticket - Master (loss-policy=freeze, granted, standby)" ],
[ "ticket-master-24", "Ticket - Master (loss-policy=freeze, standby, revoked)" ],
],
[
[ "ticket-rsc-sets-1", "Ticket - Resource sets (1 ticket, initial)" ],
[ "ticket-rsc-sets-2", "Ticket - Resource sets (1 ticket, granted)" ],
[ "ticket-rsc-sets-3", "Ticket - Resource sets (1 ticket, revoked)" ],
[ "ticket-rsc-sets-4", "Ticket - Resource sets (2 tickets, initial)" ],
[ "ticket-rsc-sets-5", "Ticket - Resource sets (2 tickets, granted)" ],
[ "ticket-rsc-sets-6", "Ticket - Resource sets (2 tickets, granted)" ],
[ "ticket-rsc-sets-7", "Ticket - Resource sets (2 tickets, revoked)" ],
[ "ticket-rsc-sets-8", "Ticket - Resource sets (1 ticket, standby, granted)" ],
[ "ticket-rsc-sets-9", "Ticket - Resource sets (1 ticket, granted, standby)" ],
[ "ticket-rsc-sets-10", "Ticket - Resource sets (1 ticket, standby, revoked)" ],
[ "ticket-rsc-sets-11", "Ticket - Resource sets (2 tickets, standby, granted)" ],
[ "ticket-rsc-sets-12", "Ticket - Resource sets (2 tickets, standby, granted)" ],
[ "ticket-rsc-sets-13", "Ticket - Resource sets (2 tickets, granted, standby)" ],
[ "ticket-rsc-sets-14", "Ticket - Resource sets (2 tickets, standby, revoked)" ],
[ "cluster-specific-params", "Cluster-specific instance attributes based on rules" ],
[ "site-specific-params", "Site-specific instance attributes based on rules" ],
],
[
[ "template-1", "Template - 1" ],
[ "template-2", "Template - 2" ],
[ "template-3", "Template - 3 (merge operations)" ],
[ "template-coloc-1", "Template - Colocation 1" ],
[ "template-coloc-2", "Template - Colocation 2" ],
[ "template-coloc-3", "Template - Colocation 3" ],
[ "template-order-1", "Template - Order 1" ],
[ "template-order-2", "Template - Order 2" ],
[ "template-order-3", "Template - Order 3" ],
[ "template-ticket", "Template - Ticket" ],
[ "template-rsc-sets-1", "Template - Resource Sets 1" ],
[ "template-rsc-sets-2", "Template - Resource Sets 2" ],
[ "template-rsc-sets-3", "Template - Resource Sets 3" ],
[ "template-rsc-sets-4", "Template - Resource Sets 4" ],
[ "template-clone-primitive", "Cloned primitive from template" ],
[ "template-clone-group", "Cloned group from template" ],
[ "location-sets-templates", "Resource sets and templates - Location" ],
[ "tags-coloc-order-1", "Tags - Colocation and Order (Simple)" ],
[ "tags-coloc-order-2", "Tags - Colocation and Order (Resource Sets with Templates)" ],
[ "tags-location", "Tags - Location" ],
[ "tags-ticket", "Tags - Ticket" ],
],
[
[ "container-1", "Container - initial" ],
[ "container-2", "Container - monitor failed" ],
[ "container-3", "Container - stop failed" ],
[ "container-4", "Container - reached migration-threshold" ],
[ "container-group-1", "Container in group - initial" ],
[ "container-group-2", "Container in group - monitor failed" ],
[ "container-group-3", "Container in group - stop failed" ],
[ "container-group-4", "Container in group - reached migration-threshold" ],
[ "container-is-remote-node", "Place resource within container when container is remote-node" ],
[ "bug-rh-1097457", "Kill user defined container/contents ordering" ],
[ "bug-cl-5247", "Graph loop when recovering m/s resource in a container" ],
[ "bundle-order-startup", "Bundle startup ordering" ],
[ "bundle-order-partial-start",
"Bundle startup ordering when some dependencies are already running" ],
[ "bundle-order-partial-start-2",
"Bundle startup ordering when some dependencies and the container are already running" ],
[ "bundle-order-stop", "Bundle stop ordering" ],
[ "bundle-order-partial-stop", "Bundle startup ordering when some dependencies are already stopped" ],
[ "bundle-order-stop-on-remote", "Stop nested resource after bringing up the connection" ],
[ "bundle-order-startup-clone", "Prevent startup because bundle isn't promoted" ],
[ "bundle-order-startup-clone-2", "Bundle startup with clones" ],
[ "bundle-order-stop-clone", "Stop bundle because clone is stopping" ],
[ "bundle-nested-colocation", "Colocation of nested connection resources" ],
[ "bundle-order-fencing",
"Order pseudo bundle fencing after parent node fencing if both are happening" ],
[ "bundle-probe-order-1", "order 1" ],
[ "bundle-probe-order-2", "order 2" ],
[ "bundle-probe-order-3", "order 3" ],
[ "bundle-probe-remotes", "Ensure remotes get probed too" ],
[ "bundle-replicas-change", "Change bundle from 1 replica to multiple" ],
[ "nested-remote-recovery", "Recover bundle's container hosted on remote node" ],
],
[
[ "whitebox-fail1", "Fail whitebox container rsc" ],
[ "whitebox-fail2", "Fail cluster connection to guest node" ],
[ "whitebox-fail3", "Failed containers should not run nested on remote nodes" ],
[ "whitebox-start", "Start whitebox container with resources assigned to it" ],
[ "whitebox-stop", "Stop whitebox container with resources assigned to it" ],
[ "whitebox-move", "Move whitebox container with resources assigned to it" ],
[ "whitebox-asymmetric", "Verify connection rsc opts-in based on container resource" ],
[ "whitebox-ms-ordering", "Verify promote/demote can not occur before connection is established" ],
[ "whitebox-ms-ordering-move", "Stop/Start cycle within a moving container" ],
[ "whitebox-orphaned", "Properly shutdown orphaned whitebox container" ],
[ "whitebox-orphan-ms", "Properly tear down orphan ms resources on remote-nodes" ],
[ "whitebox-unexpectedly-running", "Recover container nodes the cluster did not start" ],
[ "whitebox-migrate1", "Migrate both container and connection resource" ],
[ "whitebox-imply-stop-on-fence",
"imply stop action on container node rsc when host node is fenced" ],
[ "whitebox-nested-group", "Verify guest remote-node works nested in a group" ],
[ "guest-node-host-dies", "Verify guest node is recovered if host goes away" ],
[ "guest-node-cleanup", "Order guest node connection recovery after container probe" ],
[ "guest-host-not-fenceable", "Actions on guest node are unrunnable if host is unclean and cannot be fenced" ],
],
[
[ "remote-startup-probes", "Baremetal remote-node startup probes" ],
[ "remote-startup", "Startup a newly discovered remote-nodes with no status" ],
[ "remote-fence-unclean", "Fence unclean baremetal remote-node" ],
[ "remote-fence-unclean2",
"Fence baremetal remote-node after cluster node fails and connection can not be recovered" ],
[ "remote-fence-unclean-3", "Probe failed remote nodes (triggers fencing)" ],
[ "remote-move", "Move remote-node connection resource" ],
[ "remote-disable", "Disable a baremetal remote-node" ],
[ "remote-probe-disable", "Probe then stop a baremetal remote-node" ],
[ "remote-orphaned", "Properly shutdown orphaned connection resource" ],
[ "remote-orphaned2",
"verify we can handle orphaned remote connections with active resources on the remote" ],
[ "remote-recover", "Recover connection resource after cluster-node fails" ],
[ "remote-stale-node-entry",
"Make sure we properly handle leftover remote-node entries in the node section" ],
[ "remote-partial-migrate",
"Make sure partial migrations are handled before ops on the remote node" ],
[ "remote-partial-migrate2",
"Make sure partial migration target is prefered for remote connection" ],
[ "remote-recover-fail", "Make sure start failure causes fencing if rsc are active on remote" ],
[ "remote-start-fail",
"Make sure a start failure does not result in fencing if no active resources are on remote" ],
[ "remote-unclean2",
"Make monitor failure always results in fencing, even if no rsc are active on remote" ],
[ "remote-fence-before-reconnect", "Fence before clearing recurring monitor failure" ],
[ "remote-recovery", "Recover remote connections before attempting demotion" ],
[ "remote-recover-connection", "Optimistically recovery of only the connection" ],
[ "remote-recover-all", "Fencing when the connection has no home" ],
[ "remote-recover-no-resources", "Fencing when the connection has no home and no active resources" ],
[ "remote-recover-unknown",
"Fencing when the connection has no home and the remote has no operation history" ],
[ "remote-reconnect-delay", "Waiting for remote reconnect interval to expire" ],
[ "remote-connection-unrecoverable",
"Remote connection host must be fenced, with connection unrecoverable" ],
],
[
[ "resource-discovery", "Exercises resource-discovery location constraint option" ],
[ "rsc-discovery-per-node", "Disable resource discovery per node" ],
[ "shutdown-lock", "Ensure shutdown lock works properly" ],
[ "shutdown-lock-expiration", "Ensure shutdown lock expiration works properly" ],
],
# @TODO: If pacemaker implements versioned attributes, uncomment these tests
#[
# [ "versioned-resources", "Start resources with #ra-version rules" ],
# [ "restart-versioned", "Restart resources on #ra-version change" ],
# [ "reload-versioned", "Reload resources on #ra-version change" ],
#],
#[
# [ "versioned-operations-1", "Use #ra-version to configure operations of native resources" ],
# [ "versioned-operations-2", "Use #ra-version to configure operations of stonith resources" ],
# [ "versioned-operations-3", "Use #ra-version to configure operations of master/slave resources" ],
# [ "versioned-operations-4", "Use #ra-version to configure operations of groups of the resources" ],
#],
]
TESTS_64BIT = [
[
[ "year-2038", "Check handling of timestamps beyond 2038-01-19 03:14:08 UTC" ],
],
]
# Constants substituted in the build process
class BuildVars(object):
SBINDIR = "@sbindir@"
BUILDDIR = "@abs_top_builddir@"
CRM_SCHEMA_DIRECTORY = "@CRM_SCHEMA_DIRECTORY@"
# These values must be kept in sync with include/crm/crm.h
class CrmExit(object):
OK = 0
ERROR = 1
NOT_INSTALLED = 5
NOINPUT = 66
def is_executable(path):
""" Check whether a file at a given path is executable. """
try:
return os.stat(path)[stat.ST_MODE] & stat.S_IXUSR
except OSError:
return False
def diff(file1, file2, **kwargs):
""" Call diff on two files """
return subprocess.call([ "diff", "-u", "-N", "--ignore-all-space",
"--ignore-blank-lines", file1, file2 ], **kwargs)
def sort_file(filename):
""" Sort a file alphabetically """
with io.open(filename, "rt") as f:
lines = sorted(f)
with io.open(filename, "wt") as f:
f.writelines(lines)
def remove_files(filenames):
""" Remove a list of files """
for filename in filenames:
try:
os.remove(filename)
except OSError:
pass
def normalize(filename):
""" Remove text from a file that isn't important for comparison """
if not hasattr(normalize, "patterns"):
normalize.patterns = [
re.compile(r'crm_feature_set="[^"]*"'),
re.compile(r'batch-limit="[0-9]*"')
]
if os.path.isfile(filename):
with io.open(filename, "rt") as f:
lines = f.readlines()
with io.open(filename, "wt") as f:
for line in lines:
for pattern in normalize.patterns:
line = pattern.sub("", line)
f.write(line)
def cat(filename, dest=sys.stdout):
""" Copy a file to a destination file descriptor """
with io.open(filename, "rt") as f:
shutil.copyfileobj(f, dest)
class CtsScheduler(object):
""" Regression tests for Pacemaker's scheduler """
def _parse_args(self, argv):
""" Parse command-line arguments """
parser = argparse.ArgumentParser(description=DESC)
parser.add_argument('-V', '--verbose', action='count',
help='Display any differences from expected output')
parser.add_argument('--run', metavar='TEST',
help=('Run only single specified test (any further '
'arguments will be passed to crm_simulate)'))
parser.add_argument('--update', action='store_true',
help='Update expected results with actual results')
parser.add_argument('-b', '--binary', metavar='PATH',
help='Specify path to crm_simulate')
parser.add_argument('-i', '--io-dir', metavar='PATH',
help='Specify path to regression test data directory')
parser.add_argument('-o', '--out-dir', metavar='PATH',
help='Specify where intermediate and output files should go')
parser.add_argument('-v', '--valgrind', action='store_true',
help='Run all commands under valgrind')
parser.add_argument('--valgrind-dhat', action='store_true',
help='Run all commands under valgrind with heap analyzer')
parser.add_argument('--valgrind-skip-output', action='store_true',
help='If running under valgrind, do not display output')
parser.add_argument('--testcmd-options', metavar='OPTIONS', default='',
help='Additional options for command under test')
# argparse can't handle "everything after --run TEST", so grab that
self.single_test_args = []
narg = 0
for arg in argv:
narg = narg + 1
if arg == '--run':
(argv, self.single_test_args) = (argv[:narg+1], argv[narg+1:])
break
self.args = parser.parse_args(argv[1:])
def _error(self, s):
print(" * ERROR: %s" % s)
def _failed(self, s):
print(" * FAILED: %s" % s)
def _get_valgrind_cmd(self):
""" Return command arguments needed (or not) to run valgrind """
if self.args.valgrind:
os.environ['G_SLICE'] = "always-malloc"
return [
"valgrind",
"-q",
"--gen-suppressions=all",
"--time-stamp=yes",
"--trace-children=no",
"--show-reachable=no",
"--leak-check=full",
"--num-callers=20",
"--suppressions=%s/valgrind-pcmk.suppressions" % (self.test_home)
]
if self.args.valgrind_dhat:
os.environ['G_SLICE'] = "always-malloc"
return [
"valgrind",
"--tool=exp-dhat",
"--time-stamp=yes",
"--trace-children=no",
"--show-top-n=100",
"--num-callers=4"
]
return []
def _get_simulator_cmd(self):
""" Locate the simulation binary """
if self.args.binary is None:
self.args.binary = BuildVars.BUILDDIR + "/tools/crm_simulate"
if not is_executable(self.args.binary):
self.args.binary = BuildVars.SBINDIR + "/crm_simulate"
if not is_executable(self.args.binary):
# @TODO it would be more pythonic to raise an exception
self._error("Test binary " + self.args.binary + " not found")
sys.exit(CrmExit.NOT_INSTALLED)
return [ self.args.binary ] + shlex.split(self.args.testcmd_options)
def set_schema_env(self):
""" Ensure schema directory environment variable is set, if possible """
try:
return os.environ['PCMK_schema_directory']
except KeyError:
for d in [ os.path.join(BuildVars.BUILDDIR, "xml"),
BuildVars.CRM_SCHEMA_DIRECTORY ]:
if os.path.isdir(d):
os.environ['PCMK_schema_directory'] = d
return d
return None
def __init__(self, argv=sys.argv):
self._parse_args(argv)
# Where this executable lives
self.test_home = os.path.dirname(os.path.realpath(argv[0]))
# Where test data resides
if self.args.io_dir is None:
self.args.io_dir = os.path.join(self.test_home, "scheduler")
# Where to store generated files
if self.args.out_dir is None:
self.args.out_dir = self.args.io_dir
self.failed_filename = os.path.join(self.test_home, ".regression.failed.diff")
else:
self.failed_filename = os.path.join(self.args.out_dir, ".regression.failed.diff")
os.environ['CIB_shadow_dir'] = self.args.out_dir
self.failed_file = None
# Single test mode (if requested)
try:
# User can give test base name or file name of a test input
self.args.run = os.path.splitext(os.path.basename(self.args.run))[0]
except (AttributeError, TypeError):
pass # --run was not specified
self.set_schema_env()
# Arguments needed (or not) to run commands
self.valgrind_args = self._get_valgrind_cmd()
self.simulate_args = self._get_simulator_cmd()
# Test counters
self.num_failed = 0
self.num_tests = 0
def _compare_files(self, filename1, filename2):
""" Add any file differences to failed results """
with io.open("/dev/null", "wt") as dev_null:
if diff(filename1, filename2, stdout=dev_null) != 0:
diff(filename1, filename2, stdout=self.failed_file, stderr=dev_null)
self.failed_file.write("\n");
return True
return False
def run_one(self, test_name, test_desc, test_args=[]):
""" Run one scheduler test """
print(" Test %-25s %s" % ((test_name + ":"), test_desc))
did_fail = False
self.num_tests = self.num_tests + 1
# Test inputs
input_filename = "%s/%s.xml" % (self.args.io_dir, test_name)
expected_filename = "%s/%s.exp" % (self.args.io_dir, test_name)
dot_expected_filename = "%s/%s.dot" % (self.args.io_dir, test_name)
scores_filename = "%s/%s.scores" % (self.args.io_dir, test_name)
summary_filename = "%s/%s.summary" % (self.args.io_dir, test_name)
stderr_expected_filename = "%s/%s.stderr" % (self.args.io_dir, test_name)
# (Intermediate) test outputs
output_filename = "%s/%s.out" % (self.args.out_dir, test_name)
dot_output_filename = "%s/%s.pe.dot" % (self.args.out_dir, test_name)
score_output_filename = "%s/%s.scores.pe" % (self.args.out_dir, test_name)
summary_output_filename = "%s/%s.summary.pe" % (self.args.out_dir, test_name)
stderr_output_filename = "%s/%s.stderr.pe" % (self.args.out_dir, test_name)
valgrind_output_filename = "%s/%s.valgrind" % (self.args.out_dir, test_name)
# Common arguments for running test
test_cmd = []
if self.valgrind_args:
test_cmd = self.valgrind_args + [ "--log-file=%s" % valgrind_output_filename ]
test_cmd = test_cmd + self.simulate_args
# @TODO It would be more pythonic to raise exceptions for errors,
# then perhaps it would be nice to make a single-test class
# Ensure necessary test inputs exist
if not os.path.isfile(input_filename):
self._error("No input")
self.num_failed = self.num_failed + 1
return CrmExit.NOINPUT
if not self.args.update and not os.path.isfile(expected_filename):
self._error("no stored output")
return CrmExit.NOINPUT
# Run simulation to generate summary output
if self.args.run: # Single test mode
test_cmd_full = test_cmd + [ '-x', input_filename, '-S' ] + test_args
print(" ".join(test_cmd_full))
else:
# @TODO Why isn't test_args added here?
test_cmd_full = test_cmd + [ '-x', input_filename, '-S' ]
with io.open(summary_output_filename, "wt") as f:
subprocess.call(test_cmd_full, stdout=f, stderr=subprocess.STDOUT, env=os.environ)
if self.args.run:
cat(summary_output_filename)
# Re-run simulation to generate dot, graph, and scores
test_cmd_full = test_cmd + [
'-x', input_filename,
'-D', dot_output_filename,
'-G', output_filename,
'-sSQ' ] + test_args
with io.open(stderr_output_filename, "wt") as f_stderr, \
io.open(score_output_filename, "wt") as f_score:
rc = subprocess.call(test_cmd_full, stdout=f_score, stderr=f_stderr, env=os.environ)
# Check for test command failure
if rc != CrmExit.OK:
self._failed("Test returned: %d" % rc)
did_fail = True
print(" ".join(test_cmd_full))
# Check for valgrind errors
if self.valgrind_args and not self.args.valgrind_skip_output:
if os.stat(valgrind_output_filename).st_size > 0:
self._failed("Valgrind reported errors")
did_fail = True
cat(valgrind_output_filename)
remove_files([ valgrind_output_filename ])
# Check for core dump
if os.path.isfile("core"):
self._failed("Core-file detected: core." + test_name)
did_fail = True
os.rename("core", "%s/core.%s" % (self.test_home, test_name))
# Check any stderr output
if os.path.isfile(stderr_expected_filename):
if self._compare_files(stderr_expected_filename, stderr_output_filename):
self._failed("stderr changed")
did_fail = True
elif os.stat(stderr_output_filename).st_size > 0:
self._failed("Output was written to stderr")
did_fail = True
cat(stderr_output_filename)
remove_files([ stderr_output_filename ])
# Check whether output graph exists, and normalize it
if (not os.path.isfile(output_filename)
or os.stat(output_filename).st_size == 0):
self._error("No graph produced")
did_fail = True
self.num_failed = self.num_failed + 1
remove_files([ output_filename ])
return CrmExit.ERROR
normalize(output_filename)
# Check whether dot output exists, and sort it
if (not os.path.isfile(dot_output_filename) or
os.stat(dot_output_filename).st_size == 0):
self._error("No dot-file summary produced")
did_fail = True
self.num_failed = self.num_failed + 1
remove_files([ dot_output_filename, output_filename ])
return CrmExit.ERROR
with io.open(dot_output_filename, "rt") as f:
first_line = f.readline() # "digraph" line with opening brace
lines = f.readlines()
last_line = lines[-1] # closing brace
del lines[-1]
lines = sorted(set(lines)) # unique sort
with io.open(dot_output_filename, "wt") as f:
f.write(first_line)
f.writelines(lines)
f.write(last_line)
# Check whether score output exists, and sort it
if (not os.path.isfile(score_output_filename)
or os.stat(score_output_filename).st_size == 0):
self._error("No allocation scores produced")
did_fail = True
self.num_failed = self.num_failed + 1
remove_files([ score_output_filename, output_filename ])
return CrmExit.ERROR
else:
sort_file(score_output_filename)
if self.args.update:
shutil.copyfile(output_filename, expected_filename)
shutil.copyfile(dot_output_filename, dot_expected_filename)
shutil.copyfile(score_output_filename, scores_filename)
shutil.copyfile(summary_output_filename, summary_filename)
print(" Updated expected outputs")
if self._compare_files(summary_filename, summary_output_filename):
self._failed("summary changed")
did_fail = True
if self._compare_files(dot_expected_filename, dot_output_filename):
self._failed("dot-file summary changed")
did_fail = True
else:
remove_files([ dot_output_filename ])
if self._compare_files(expected_filename, output_filename):
self._failed("xml-file changed")
did_fail = True
if self._compare_files(scores_filename, score_output_filename):
self._failed("scores-file changed")
did_fail = True
remove_files([ output_filename,
score_output_filename,
summary_output_filename])
if did_fail:
self.num_failed = self.num_failed + 1
return CrmExit.ERROR
return CrmExit.OK
def run_all(self):
""" Run all defined tests """
if platform.architecture()[0] == "64bit":
TESTS.extend(TESTS_64BIT)
for group in TESTS:
for test in group:
try:
args = test[2]
except IndexError:
args = []
self.run_one(test[0], test[1], args)
print()
def _print_summary(self):
""" Print a summary of parameters for this test run """
print("Test home is:\t" + self.test_home)
print("Test binary is:\t" + self.args.binary)
if 'PCMK_schema_directory' in os.environ:
print("Schema home is:\t" + os.environ['PCMK_schema_directory'])
if self.valgrind_args != []:
print("Activating memory testing with valgrind")
print()
def _test_results(self):
if self.num_failed == 0:
return CrmExit.OK
if os.path.isfile(self.failed_filename) and os.stat(self.failed_filename).st_size != 0:
if self.args.verbose:
self._error("Results of %d failed tests (out of %d):" %
(self.num_failed, self.num_tests))
cat(self.failed_filename)
else:
self._error("Results of %d failed tests (out of %d) are in %s" %
(self.num_failed, self.num_tests, self.failed_filename))
self._error("Use -V to display them after running the tests")
else:
self._error("%d (of %d) tests failed (no diff results)" %
(self.num_failed, self.num_tests))
if os.path.isfile(self.failed_filename):
os.remove(self.failed_filename)
return CrmExit.ERROR
def run(self):
""" Run test(s) as specified """
self._print_summary()
# Zero out the error log
self.failed_file = io.open(self.failed_filename, "wt")
if self.args.run is None:
print("Performing the following tests from " + self.args.io_dir)
print()
self.run_all()
print()
self.failed_file.close()
rc = self._test_results()
else:
rc = self.run_one(self.args.run, "Single shot", self.single_test_args)
self.failed_file.close()
cat(self.failed_filename)
return rc
if __name__ == "__main__":
sys.exit(CtsScheduler().run())
# vim: set filetype=python expandtab tabstop=4 softtabstop=4 shiftwidth=4 textwidth=120:
diff --git a/cts/scheduler/priority-fencing-delay.dot b/cts/scheduler/priority-fencing-delay.dot
new file mode 100644
index 0000000000..62ba699eb6
--- /dev/null
+++ b/cts/scheduler/priority-fencing-delay.dot
@@ -0,0 +1,109 @@
+ digraph "g" {
+"R-lxc-01_kiff-01_monitor_10000 kiff-02" [ style=bold color="green" fontcolor="black"]
+"R-lxc-01_kiff-01_start_0 kiff-02" -> "R-lxc-01_kiff-01_monitor_10000 kiff-02" [ style = bold]
+"R-lxc-01_kiff-01_start_0 kiff-02" -> "lxc-01_kiff-01_start_0 kiff-02" [ style = bold]
+"R-lxc-01_kiff-01_start_0 kiff-02" -> "vm-fs_start_0 lxc-01_kiff-01" [ style = bold]
+"R-lxc-01_kiff-01_start_0 kiff-02" [ style=bold color="green" fontcolor="black"]
+"R-lxc-01_kiff-01_stop_0 kiff-01" -> "R-lxc-01_kiff-01_start_0 kiff-02" [ style = bold]
+"R-lxc-01_kiff-01_stop_0 kiff-01" -> "shared0-clone_stop_0" [ style = bold]
+"R-lxc-01_kiff-01_stop_0 kiff-01" [ style=bold color="green" fontcolor="orange"]
+"R-lxc-02_kiff-01_monitor_10000 kiff-02" [ style=bold color="green" fontcolor="black"]
+"R-lxc-02_kiff-01_start_0 kiff-02" -> "R-lxc-02_kiff-01_monitor_10000 kiff-02" [ style = bold]
+"R-lxc-02_kiff-01_start_0 kiff-02" -> "lxc-02_kiff-01_start_0 kiff-02" [ style = bold]
+"R-lxc-02_kiff-01_start_0 kiff-02" [ style=bold color="green" fontcolor="black"]
+"R-lxc-02_kiff-01_stop_0 kiff-01" -> "R-lxc-02_kiff-01_start_0 kiff-02" [ style = bold]
+"R-lxc-02_kiff-01_stop_0 kiff-01" -> "shared0-clone_stop_0" [ style = bold]
+"R-lxc-02_kiff-01_stop_0 kiff-01" -> "vm-fs_start_0 lxc-01_kiff-01" [ style = bold]
+"R-lxc-02_kiff-01_stop_0 kiff-01" [ style=bold color="green" fontcolor="orange"]
+"clvmd-clone_stop_0" -> "clvmd-clone_stopped_0" [ style = bold]
+"clvmd-clone_stop_0" -> "clvmd_stop_0 kiff-01" [ style = bold]
+"clvmd-clone_stop_0" [ style=bold color="green" fontcolor="orange"]
+"clvmd-clone_stopped_0" -> "dlm-clone_stop_0" [ style = bold]
+"clvmd-clone_stopped_0" [ style=bold color="green" fontcolor="orange"]
+"clvmd_monitor_0 lxc-01_kiff-02" -> "clvmd-clone_stopped_0" [ style = bold]
+"clvmd_monitor_0 lxc-01_kiff-02" [ style=bold color="green" fontcolor="black"]
+"clvmd_monitor_0 lxc-02_kiff-02" -> "clvmd-clone_stopped_0" [ style = bold]
+"clvmd_monitor_0 lxc-02_kiff-02" [ style=bold color="green" fontcolor="black"]
+"clvmd_stop_0 kiff-01" -> "clvmd-clone_stopped_0" [ style = bold]
+"clvmd_stop_0 kiff-01" -> "dlm_stop_0 kiff-01" [ style = bold]
+"clvmd_stop_0 kiff-01" [ style=bold color="green" fontcolor="orange"]
+"dlm-clone_stop_0" -> "dlm-clone_stopped_0" [ style = bold]
+"dlm-clone_stop_0" -> "dlm_stop_0 kiff-01" [ style = bold]
+"dlm-clone_stop_0" [ style=bold color="green" fontcolor="orange"]
+"dlm-clone_stopped_0" [ style=bold color="green" fontcolor="orange"]
+"dlm_monitor_0 lxc-01_kiff-02" -> "dlm-clone_stopped_0" [ style = bold]
+"dlm_monitor_0 lxc-01_kiff-02" [ style=bold color="green" fontcolor="black"]
+"dlm_monitor_0 lxc-02_kiff-02" -> "dlm-clone_stopped_0" [ style = bold]
+"dlm_monitor_0 lxc-02_kiff-02" [ style=bold color="green" fontcolor="black"]
+"dlm_stop_0 kiff-01" -> "dlm-clone_stopped_0" [ style = bold]
+"dlm_stop_0 kiff-01" [ style=bold color="green" fontcolor="orange"]
+"fence-kiff-02_monitor_60000 kiff-02" [ style=bold color="green" fontcolor="black"]
+"fence-kiff-02_start_0 kiff-02" -> "fence-kiff-02_monitor_60000 kiff-02" [ style = bold]
+"fence-kiff-02_start_0 kiff-02" [ style=bold color="green" fontcolor="black"]
+"fence-kiff-02_stop_0 kiff-01" -> "fence-kiff-02_start_0 kiff-02" [ style = bold]
+"fence-kiff-02_stop_0 kiff-01" [ style=bold color="green" fontcolor="orange"]
+"lxc-01_kiff-01_monitor_30000 kiff-02" [ style=bold color="green" fontcolor="black"]
+"lxc-01_kiff-01_start_0 kiff-02" -> "lxc-01_kiff-01_monitor_30000 kiff-02" [ style = bold]
+"lxc-01_kiff-01_start_0 kiff-02" -> "vm-fs_monitor_20000 lxc-01_kiff-01" [ style = bold]
+"lxc-01_kiff-01_start_0 kiff-02" -> "vm-fs_start_0 lxc-01_kiff-01" [ style = bold]
+"lxc-01_kiff-01_start_0 kiff-02" [ style=bold color="green" fontcolor="black"]
+"lxc-01_kiff-01_stop_0 kiff-01" -> "R-lxc-01_kiff-01_stop_0 kiff-01" [ style = bold]
+"lxc-01_kiff-01_stop_0 kiff-01" -> "lxc-01_kiff-01_start_0 kiff-02" [ style = bold]
+"lxc-01_kiff-01_stop_0 kiff-01" [ style=bold color="green" fontcolor="orange"]
+"lxc-02_kiff-01_monitor_30000 kiff-02" [ style=bold color="green" fontcolor="black"]
+"lxc-02_kiff-01_start_0 kiff-02" -> "lxc-02_kiff-01_monitor_30000 kiff-02" [ style = bold]
+"lxc-02_kiff-01_start_0 kiff-02" [ style=bold color="green" fontcolor="black"]
+"lxc-02_kiff-01_stop_0 kiff-01" -> "R-lxc-02_kiff-01_stop_0 kiff-01" [ style = bold]
+"lxc-02_kiff-01_stop_0 kiff-01" -> "lxc-02_kiff-01_start_0 kiff-02" [ style = bold]
+"lxc-02_kiff-01_stop_0 kiff-01" [ style=bold color="green" fontcolor="orange"]
+"shared0-clone_stop_0" -> "shared0-clone_stopped_0" [ style = bold]
+"shared0-clone_stop_0" -> "shared0_stop_0 kiff-01" [ style = bold]
+"shared0-clone_stop_0" [ style=bold color="green" fontcolor="orange"]
+"shared0-clone_stopped_0" -> "clvmd-clone_stop_0" [ style = bold]
+"shared0-clone_stopped_0" [ style=bold color="green" fontcolor="orange"]
+"shared0_monitor_0 lxc-01_kiff-02" -> "shared0-clone_stopped_0" [ style = bold]
+"shared0_monitor_0 lxc-01_kiff-02" [ style=bold color="green" fontcolor="black"]
+"shared0_monitor_0 lxc-02_kiff-02" -> "shared0-clone_stopped_0" [ style = bold]
+"shared0_monitor_0 lxc-02_kiff-02" [ style=bold color="green" fontcolor="black"]
+"shared0_stop_0 kiff-01" -> "clvmd_stop_0 kiff-01" [ style = bold]
+"shared0_stop_0 kiff-01" -> "shared0-clone_stopped_0" [ style = bold]
+"shared0_stop_0 kiff-01" [ style=bold color="green" fontcolor="orange"]
+"stonith 'reboot' kiff-01" -> "R-lxc-01_kiff-01_start_0 kiff-02" [ style = bold]
+"stonith 'reboot' kiff-01" -> "R-lxc-01_kiff-01_stop_0 kiff-01" [ style = bold]
+"stonith 'reboot' kiff-01" -> "R-lxc-02_kiff-01_start_0 kiff-02" [ style = bold]
+"stonith 'reboot' kiff-01" -> "R-lxc-02_kiff-01_stop_0 kiff-01" [ style = bold]
+"stonith 'reboot' kiff-01" -> "clvmd-clone_stop_0" [ style = bold]
+"stonith 'reboot' kiff-01" -> "clvmd_stop_0 kiff-01" [ style = bold]
+"stonith 'reboot' kiff-01" -> "dlm-clone_stop_0" [ style = bold]
+"stonith 'reboot' kiff-01" -> "dlm_stop_0 kiff-01" [ style = bold]
+"stonith 'reboot' kiff-01" -> "shared0-clone_stop_0" [ style = bold]
+"stonith 'reboot' kiff-01" -> "shared0_stop_0 kiff-01" [ style = bold]
+"stonith 'reboot' kiff-01" -> "stonith 'reboot' lxc-01_kiff-01" [ style = bold]
+"stonith 'reboot' kiff-01" -> "stonith 'reboot' lxc-02_kiff-01" [ style = bold]
+"stonith 'reboot' kiff-01" -> "vm-fs_start_0 lxc-01_kiff-01" [ style = bold]
+"stonith 'reboot' kiff-01" [ style=bold color="green" fontcolor="black"]
+"stonith 'reboot' lxc-01_kiff-01" -> "R-lxc-01_kiff-01_start_0 kiff-02" [ style = bold]
+"stonith 'reboot' lxc-01_kiff-01" -> "R-lxc-02_kiff-01_start_0 kiff-02" [ style = bold]
+"stonith 'reboot' lxc-01_kiff-01" -> "fence-kiff-02_start_0 kiff-02" [ style = bold]
+"stonith 'reboot' lxc-01_kiff-01" -> "lxc-01_kiff-01_start_0 kiff-02" [ style = bold]
+"stonith 'reboot' lxc-01_kiff-01" -> "lxc-02_kiff-01_start_0 kiff-02" [ style = bold]
+"stonith 'reboot' lxc-01_kiff-01" -> "vm-fs_start_0 lxc-01_kiff-01" [ style = bold]
+"stonith 'reboot' lxc-01_kiff-01" -> "vm-fs_stop_0 lxc-01_kiff-01" [ style = bold]
+"stonith 'reboot' lxc-01_kiff-01" [ style=bold color="green" fontcolor="orange"]
+"stonith 'reboot' lxc-02_kiff-01" -> "R-lxc-01_kiff-01_start_0 kiff-02" [ style = bold]
+"stonith 'reboot' lxc-02_kiff-01" -> "R-lxc-02_kiff-01_start_0 kiff-02" [ style = bold]
+"stonith 'reboot' lxc-02_kiff-01" -> "fence-kiff-02_start_0 kiff-02" [ style = bold]
+"stonith 'reboot' lxc-02_kiff-01" -> "lxc-01_kiff-01_start_0 kiff-02" [ style = bold]
+"stonith 'reboot' lxc-02_kiff-01" -> "lxc-02_kiff-01_start_0 kiff-02" [ style = bold]
+"stonith 'reboot' lxc-02_kiff-01" -> "vm-fs_start_0 lxc-01_kiff-01" [ style = bold]
+"stonith 'reboot' lxc-02_kiff-01" [ style=bold color="green" fontcolor="orange"]
+"vm-fs_monitor_0 lxc-01_kiff-02" -> "vm-fs_start_0 lxc-01_kiff-01" [ style = bold]
+"vm-fs_monitor_0 lxc-01_kiff-02" [ style=bold color="green" fontcolor="black"]
+"vm-fs_monitor_0 lxc-02_kiff-02" -> "vm-fs_start_0 lxc-01_kiff-01" [ style = bold]
+"vm-fs_monitor_0 lxc-02_kiff-02" [ style=bold color="green" fontcolor="black"]
+"vm-fs_monitor_20000 lxc-01_kiff-01" [ style=bold color="green" fontcolor="black"]
+"vm-fs_start_0 lxc-01_kiff-01" -> "vm-fs_monitor_20000 lxc-01_kiff-01" [ style = bold]
+"vm-fs_start_0 lxc-01_kiff-01" [ style=bold color="green" fontcolor="black"]
+"vm-fs_stop_0 lxc-01_kiff-01" -> "vm-fs_start_0 lxc-01_kiff-01" [ style = bold]
+"vm-fs_stop_0 lxc-01_kiff-01" [ style=bold color="green" fontcolor="orange"]
+}
diff --git a/cts/scheduler/priority-fencing-delay.exp b/cts/scheduler/priority-fencing-delay.exp
new file mode 100644
index 0000000000..c6315a1280
--- /dev/null
+++ b/cts/scheduler/priority-fencing-delay.exp
@@ -0,0 +1,570 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0" priority="100">
+ <action_set>
+ <rsc_op id="20" operation="monitor" operation_key="vm-fs_monitor_20000" on_node="lxc-01_kiff-01" on_node_uuid="lxc-01_kiff-01" router_node="kiff-02">
+ <primitive id="vm-fs" class="ocf" provider="heartbeat" type="Filesystem"/>
+ <attributes CRM_meta_interval="20000" CRM_meta_name="monitor" CRM_meta_on_node="lxc-01_kiff-01" CRM_meta_on_node_uuid="lxc-01_kiff-01" CRM_meta_physical_host="kiff-02" CRM_meta_timeout="40000" device="/root" directory="/mnt/vm-fs" fstype="none" options="bind"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="19" operation="start" operation_key="vm-fs_start_0" on_node="lxc-01_kiff-01" on_node_uuid="lxc-01_kiff-01" router_node="kiff-02"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="58" operation="start" operation_key="lxc-01_kiff-01_start_0" on_node="kiff-02" on_node_uuid="2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="1" priority="100">
+ <action_set>
+ <rsc_op id="19" operation="start" operation_key="vm-fs_start_0" on_node="lxc-01_kiff-01" on_node_uuid="lxc-01_kiff-01" router_node="kiff-02">
+ <primitive id="vm-fs" class="ocf" provider="heartbeat" type="Filesystem"/>
+ <attributes CRM_meta_name="start" CRM_meta_on_node="lxc-01_kiff-01" CRM_meta_on_node_uuid="lxc-01_kiff-01" CRM_meta_physical_host="kiff-02" CRM_meta_timeout="60000" device="/root" directory="/mnt/vm-fs" fstype="none" options="bind"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <crm_event id="1" operation="stonith" operation_key="stonith-kiff-01-reboot" on_node="kiff-01" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="10" operation="monitor" operation_key="vm-fs_monitor_0" on_node="lxc-01_kiff-02" on_node_uuid="lxc-01_kiff-02" router_node="kiff-02"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="14" operation="monitor" operation_key="vm-fs_monitor_0" on_node="lxc-02_kiff-02" on_node_uuid="lxc-02_kiff-02" router_node="kiff-02"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="18" operation="stop" operation_key="vm-fs_stop_0"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="50" operation="start" operation_key="R-lxc-01_kiff-01_start_0" on_node="kiff-02" on_node_uuid="2"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="52" operation="stop" operation_key="R-lxc-02_kiff-01_stop_0"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="58" operation="start" operation_key="lxc-01_kiff-01_start_0" on_node="kiff-02" on_node_uuid="2"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="67" operation="stonith" operation_key="stonith-lxc-01_kiff-01-reboot" on_node="lxc-01_kiff-01" on_node_uuid="lxc-01_kiff-01"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="68" operation="stonith" operation_key="stonith-lxc-02_kiff-01-reboot" on_node="lxc-02_kiff-01" on_node_uuid="lxc-02_kiff-01"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="2" priority="100">
+ <action_set>
+ <pseudo_event id="18" operation="stop" operation_key="vm-fs_stop_0">
+ <attributes CRM_meta_name="stop" CRM_meta_physical_host="kiff-01" CRM_meta_timeout="60000" device="/root" directory="/mnt/vm-fs" fstype="none" options="bind"/>
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="67" operation="stonith" operation_key="stonith-lxc-01_kiff-01-reboot" on_node="lxc-01_kiff-01" on_node_uuid="lxc-01_kiff-01"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="3" priority="100">
+ <action_set>
+ <rsc_op id="14" operation="monitor" operation_key="vm-fs_monitor_0" on_node="lxc-02_kiff-02" on_node_uuid="lxc-02_kiff-02" router_node="kiff-02">
+ <primitive id="vm-fs" class="ocf" provider="heartbeat" type="Filesystem"/>
+ <attributes CRM_meta_on_node="lxc-02_kiff-02" CRM_meta_on_node_uuid="lxc-02_kiff-02" CRM_meta_op_target_rc="7" CRM_meta_physical_host="kiff-02" CRM_meta_timeout="40000" device="/root" directory="/mnt/vm-fs" fstype="none" options="bind"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="4" priority="100">
+ <action_set>
+ <rsc_op id="10" operation="monitor" operation_key="vm-fs_monitor_0" on_node="lxc-01_kiff-02" on_node_uuid="lxc-01_kiff-02" router_node="kiff-02">
+ <primitive id="vm-fs" class="ocf" provider="heartbeat" type="Filesystem"/>
+ <attributes CRM_meta_on_node="lxc-01_kiff-02" CRM_meta_on_node_uuid="lxc-01_kiff-02" CRM_meta_op_target_rc="7" CRM_meta_physical_host="kiff-02" CRM_meta_timeout="40000" device="/root" directory="/mnt/vm-fs" fstype="none" options="bind"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="5">
+ <action_set>
+ <rsc_op id="27" operation="monitor" operation_key="fence-kiff-02_monitor_60000" on_node="kiff-02" on_node_uuid="2">
+ <primitive id="fence-kiff-02" class="stonith" type="fence_ipmilan"/>
+ <attributes CRM_meta_interval="60000" CRM_meta_name="monitor" CRM_meta_on_node="kiff-02" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" action="reboot" ipaddr="kiff-02-ilo" login="admin" passwd="admin" pcmk_host_check="static-list" pcmk_host_list="kiff-02"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="26" operation="start" operation_key="fence-kiff-02_start_0" on_node="kiff-02" on_node_uuid="2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="6">
+ <action_set>
+ <rsc_op id="26" operation="start" operation_key="fence-kiff-02_start_0" on_node="kiff-02" on_node_uuid="2">
+ <primitive id="fence-kiff-02" class="stonith" type="fence_ipmilan"/>
+ <attributes CRM_meta_on_node="kiff-02" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" action="reboot" ipaddr="kiff-02-ilo" login="admin" passwd="admin" pcmk_host_check="static-list" pcmk_host_list="kiff-02"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="25" operation="stop" operation_key="fence-kiff-02_stop_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="67" operation="stonith" operation_key="stonith-lxc-01_kiff-01-reboot" on_node="lxc-01_kiff-01" on_node_uuid="lxc-01_kiff-01"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="68" operation="stonith" operation_key="stonith-lxc-02_kiff-01-reboot" on_node="lxc-02_kiff-01" on_node_uuid="lxc-02_kiff-01"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="7">
+ <action_set>
+ <pseudo_event id="25" operation="stop" operation_key="fence-kiff-02_stop_0">
+ <attributes CRM_meta_timeout="20000" action="reboot" ipaddr="kiff-02-ilo" login="admin" passwd="admin" pcmk_host_check="static-list" pcmk_host_list="kiff-02"/>
+ </pseudo_event>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="8">
+ <action_set>
+ <pseudo_event id="28" operation="stop" operation_key="dlm_stop_0" internal_operation_key="dlm:0_stop_0">
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="6" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="stop" CRM_meta_notify="false" CRM_meta_timeout="100000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <crm_event id="1" operation="stonith" operation_key="stonith-kiff-01-reboot" on_node="kiff-01" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="33" operation="stop" operation_key="dlm-clone_stop_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="35" operation="stop" operation_key="clvmd_stop_0" internal_operation_key="clvmd:0_stop_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="9">
+ <action_set>
+ <rsc_op id="15" operation="monitor" operation_key="dlm_monitor_0" internal_operation_key="dlm:0_monitor_0" on_node="lxc-02_kiff-02" on_node_uuid="lxc-02_kiff-02" router_node="kiff-02">
+ <primitive id="dlm" long-id="dlm:0" class="ocf" provider="pacemaker" type="controld"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="6" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_on_node="lxc-02_kiff-02" CRM_meta_on_node_uuid="lxc-02_kiff-02" CRM_meta_op_target_rc="7" CRM_meta_physical_host="kiff-02" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="10">
+ <action_set>
+ <rsc_op id="11" operation="monitor" operation_key="dlm_monitor_0" internal_operation_key="dlm:0_monitor_0" on_node="lxc-01_kiff-02" on_node_uuid="lxc-01_kiff-02" router_node="kiff-02">
+ <primitive id="dlm" long-id="dlm:0" class="ocf" provider="pacemaker" type="controld"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="6" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_on_node="lxc-01_kiff-02" CRM_meta_on_node_uuid="lxc-01_kiff-02" CRM_meta_op_target_rc="7" CRM_meta_physical_host="kiff-02" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="11" priority="1000000">
+ <action_set>
+ <pseudo_event id="34" operation="stopped" operation_key="dlm-clone_stopped_0">
+ <attributes CRM_meta_clone_max="6" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="11" operation="monitor" operation_key="dlm_monitor_0" internal_operation_key="dlm:0_monitor_0" on_node="lxc-01_kiff-02" on_node_uuid="lxc-01_kiff-02" router_node="kiff-02"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="15" operation="monitor" operation_key="dlm_monitor_0" internal_operation_key="dlm:0_monitor_0" on_node="lxc-02_kiff-02" on_node_uuid="lxc-02_kiff-02" router_node="kiff-02"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="28" operation="stop" operation_key="dlm_stop_0" internal_operation_key="dlm:0_stop_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="33" operation="stop" operation_key="dlm-clone_stop_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="12">
+ <action_set>
+ <pseudo_event id="33" operation="stop" operation_key="dlm-clone_stop_0">
+ <attributes CRM_meta_clone_max="6" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <crm_event id="1" operation="stonith" operation_key="stonith-kiff-01-reboot" on_node="kiff-01" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="41" operation="stopped" operation_key="clvmd-clone_stopped_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="13">
+ <action_set>
+ <pseudo_event id="35" operation="stop" operation_key="clvmd_stop_0" internal_operation_key="clvmd:0_stop_0">
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="6" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="stop" CRM_meta_notify="false" CRM_meta_timeout="90000" with_cmirrord="1"/>
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <crm_event id="1" operation="stonith" operation_key="stonith-kiff-01-reboot" on_node="kiff-01" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="40" operation="stop" operation_key="clvmd-clone_stop_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="42" operation="stop" operation_key="shared0_stop_0" internal_operation_key="shared0:0_stop_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="14">
+ <action_set>
+ <rsc_op id="16" operation="monitor" operation_key="clvmd_monitor_0" internal_operation_key="clvmd:0_monitor_0" on_node="lxc-02_kiff-02" on_node_uuid="lxc-02_kiff-02" router_node="kiff-02">
+ <primitive id="clvmd" long-id="clvmd:0" class="ocf" provider="heartbeat" type="clvm"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="6" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_on_node="lxc-02_kiff-02" CRM_meta_on_node_uuid="lxc-02_kiff-02" CRM_meta_op_target_rc="7" CRM_meta_physical_host="kiff-02" CRM_meta_timeout="20000" with_cmirrord="1"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="15">
+ <action_set>
+ <rsc_op id="12" operation="monitor" operation_key="clvmd_monitor_0" internal_operation_key="clvmd:0_monitor_0" on_node="lxc-01_kiff-02" on_node_uuid="lxc-01_kiff-02" router_node="kiff-02">
+ <primitive id="clvmd" long-id="clvmd:0" class="ocf" provider="heartbeat" type="clvm"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="6" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_on_node="lxc-01_kiff-02" CRM_meta_on_node_uuid="lxc-01_kiff-02" CRM_meta_op_target_rc="7" CRM_meta_physical_host="kiff-02" CRM_meta_timeout="20000" with_cmirrord="1"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="16" priority="1000000">
+ <action_set>
+ <pseudo_event id="41" operation="stopped" operation_key="clvmd-clone_stopped_0">
+ <attributes CRM_meta_clone_max="6" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="12" operation="monitor" operation_key="clvmd_monitor_0" internal_operation_key="clvmd:0_monitor_0" on_node="lxc-01_kiff-02" on_node_uuid="lxc-01_kiff-02" router_node="kiff-02"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="16" operation="monitor" operation_key="clvmd_monitor_0" internal_operation_key="clvmd:0_monitor_0" on_node="lxc-02_kiff-02" on_node_uuid="lxc-02_kiff-02" router_node="kiff-02"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="35" operation="stop" operation_key="clvmd_stop_0" internal_operation_key="clvmd:0_stop_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="40" operation="stop" operation_key="clvmd-clone_stop_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="17">
+ <action_set>
+ <pseudo_event id="40" operation="stop" operation_key="clvmd-clone_stop_0">
+ <attributes CRM_meta_clone_max="6" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <crm_event id="1" operation="stonith" operation_key="stonith-kiff-01-reboot" on_node="kiff-01" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="48" operation="stopped" operation_key="shared0-clone_stopped_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="18">
+ <action_set>
+ <pseudo_event id="42" operation="stop" operation_key="shared0_stop_0" internal_operation_key="shared0:0_stop_0">
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="6" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="stop" CRM_meta_notify="false" CRM_meta_timeout="60000" device="/dev/shared/shared0" directory="/mnt/shared0" fstype="gfs2" options="errors=panic"/>
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <crm_event id="1" operation="stonith" operation_key="stonith-kiff-01-reboot" on_node="kiff-01" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="47" operation="stop" operation_key="shared0-clone_stop_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="19">
+ <action_set>
+ <rsc_op id="17" operation="monitor" operation_key="shared0_monitor_0" internal_operation_key="shared0:0_monitor_0" on_node="lxc-02_kiff-02" on_node_uuid="lxc-02_kiff-02" router_node="kiff-02">
+ <primitive id="shared0" long-id="shared0:0" class="ocf" provider="heartbeat" type="Filesystem"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="6" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_on_node="lxc-02_kiff-02" CRM_meta_on_node_uuid="lxc-02_kiff-02" CRM_meta_op_target_rc="7" CRM_meta_physical_host="kiff-02" CRM_meta_timeout="20000" device="/dev/shared/shared0" directory="/mnt/shared0" fstype="gfs2" options="errors=panic"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="20">
+ <action_set>
+ <rsc_op id="13" operation="monitor" operation_key="shared0_monitor_0" internal_operation_key="shared0:0_monitor_0" on_node="lxc-01_kiff-02" on_node_uuid="lxc-01_kiff-02" router_node="kiff-02">
+ <primitive id="shared0" long-id="shared0:0" class="ocf" provider="heartbeat" type="Filesystem"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="6" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_on_node="lxc-01_kiff-02" CRM_meta_on_node_uuid="lxc-01_kiff-02" CRM_meta_op_target_rc="7" CRM_meta_physical_host="kiff-02" CRM_meta_timeout="20000" device="/dev/shared/shared0" directory="/mnt/shared0" fstype="gfs2" options="errors=panic"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="21" priority="1000000">
+ <action_set>
+ <pseudo_event id="48" operation="stopped" operation_key="shared0-clone_stopped_0">
+ <attributes CRM_meta_clone_max="6" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="13" operation="monitor" operation_key="shared0_monitor_0" internal_operation_key="shared0:0_monitor_0" on_node="lxc-01_kiff-02" on_node_uuid="lxc-01_kiff-02" router_node="kiff-02"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="17" operation="monitor" operation_key="shared0_monitor_0" internal_operation_key="shared0:0_monitor_0" on_node="lxc-02_kiff-02" on_node_uuid="lxc-02_kiff-02" router_node="kiff-02"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="42" operation="stop" operation_key="shared0_stop_0" internal_operation_key="shared0:0_stop_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="47" operation="stop" operation_key="shared0-clone_stop_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="22">
+ <action_set>
+ <pseudo_event id="47" operation="stop" operation_key="shared0-clone_stop_0">
+ <attributes CRM_meta_clone_max="6" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <crm_event id="1" operation="stonith" operation_key="stonith-kiff-01-reboot" on_node="kiff-01" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="49" operation="stop" operation_key="R-lxc-01_kiff-01_stop_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="52" operation="stop" operation_key="R-lxc-02_kiff-01_stop_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="23">
+ <action_set>
+ <rsc_op id="51" operation="monitor" operation_key="R-lxc-01_kiff-01_monitor_10000" on_node="kiff-02" on_node_uuid="2">
+ <primitive id="R-lxc-01_kiff-01" class="ocf" provider="heartbeat" type="VirtualDomain"/>
+ <attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_on_node="kiff-02" CRM_meta_on_node_uuid="2" CRM_meta_remote_node="lxc-01_kiff-01" CRM_meta_timeout="30000" config="/mnt/shared0/test-virtual-domain/lxc/lxc-01_kiff-01.xml" force_stop="true" hypervisor="lxc:///"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="50" operation="start" operation_key="R-lxc-01_kiff-01_start_0" on_node="kiff-02" on_node_uuid="2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="24">
+ <action_set>
+ <rsc_op id="50" operation="start" operation_key="R-lxc-01_kiff-01_start_0" on_node="kiff-02" on_node_uuid="2">
+ <primitive id="R-lxc-01_kiff-01" class="ocf" provider="heartbeat" type="VirtualDomain"/>
+ <attributes CRM_meta_name="start" CRM_meta_on_node="kiff-02" CRM_meta_on_node_uuid="2" CRM_meta_remote_node="lxc-01_kiff-01" CRM_meta_timeout="90000" config="/mnt/shared0/test-virtual-domain/lxc/lxc-01_kiff-01.xml" force_stop="true" hypervisor="lxc:///"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <crm_event id="1" operation="stonith" operation_key="stonith-kiff-01-reboot" on_node="kiff-01" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="49" operation="stop" operation_key="R-lxc-01_kiff-01_stop_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="67" operation="stonith" operation_key="stonith-lxc-01_kiff-01-reboot" on_node="lxc-01_kiff-01" on_node_uuid="lxc-01_kiff-01"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="68" operation="stonith" operation_key="stonith-lxc-02_kiff-01-reboot" on_node="lxc-02_kiff-01" on_node_uuid="lxc-02_kiff-01"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="25">
+ <action_set>
+ <pseudo_event id="49" operation="stop" operation_key="R-lxc-01_kiff-01_stop_0">
+ <attributes CRM_meta_name="stop" CRM_meta_remote_node="lxc-01_kiff-01" CRM_meta_timeout="90000" config="/mnt/shared0/test-virtual-domain/lxc/lxc-01_kiff-01.xml" force_stop="true" hypervisor="lxc:///"/>
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <crm_event id="1" operation="stonith" operation_key="stonith-kiff-01-reboot" on_node="kiff-01" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="57" operation="stop" operation_key="lxc-01_kiff-01_stop_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="26">
+ <action_set>
+ <rsc_op id="54" operation="monitor" operation_key="R-lxc-02_kiff-01_monitor_10000" on_node="kiff-02" on_node_uuid="2">
+ <primitive id="R-lxc-02_kiff-01" class="ocf" provider="heartbeat" type="VirtualDomain"/>
+ <attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_on_node="kiff-02" CRM_meta_on_node_uuid="2" CRM_meta_remote_node="lxc-02_kiff-01" CRM_meta_timeout="30000" config="/mnt/shared0/test-virtual-domain/lxc/lxc-02_kiff-01.xml" force_stop="true" hypervisor="lxc:///"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="53" operation="start" operation_key="R-lxc-02_kiff-01_start_0" on_node="kiff-02" on_node_uuid="2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="27">
+ <action_set>
+ <rsc_op id="53" operation="start" operation_key="R-lxc-02_kiff-01_start_0" on_node="kiff-02" on_node_uuid="2">
+ <primitive id="R-lxc-02_kiff-01" class="ocf" provider="heartbeat" type="VirtualDomain"/>
+ <attributes CRM_meta_name="start" CRM_meta_on_node="kiff-02" CRM_meta_on_node_uuid="2" CRM_meta_remote_node="lxc-02_kiff-01" CRM_meta_timeout="90000" config="/mnt/shared0/test-virtual-domain/lxc/lxc-02_kiff-01.xml" force_stop="true" hypervisor="lxc:///"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <crm_event id="1" operation="stonith" operation_key="stonith-kiff-01-reboot" on_node="kiff-01" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="52" operation="stop" operation_key="R-lxc-02_kiff-01_stop_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="67" operation="stonith" operation_key="stonith-lxc-01_kiff-01-reboot" on_node="lxc-01_kiff-01" on_node_uuid="lxc-01_kiff-01"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="68" operation="stonith" operation_key="stonith-lxc-02_kiff-01-reboot" on_node="lxc-02_kiff-01" on_node_uuid="lxc-02_kiff-01"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="28">
+ <action_set>
+ <pseudo_event id="52" operation="stop" operation_key="R-lxc-02_kiff-01_stop_0">
+ <attributes CRM_meta_name="stop" CRM_meta_remote_node="lxc-02_kiff-01" CRM_meta_timeout="90000" config="/mnt/shared0/test-virtual-domain/lxc/lxc-02_kiff-01.xml" force_stop="true" hypervisor="lxc:///"/>
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <crm_event id="1" operation="stonith" operation_key="stonith-kiff-01-reboot" on_node="kiff-01" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="60" operation="stop" operation_key="lxc-02_kiff-01_stop_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="29">
+ <action_set>
+ <rsc_op id="59" operation="monitor" operation_key="lxc-01_kiff-01_monitor_30000" on_node="kiff-02" on_node_uuid="2">
+ <primitive id="lxc-01_kiff-01" class="ocf" provider="pacemaker" type="remote"/>
+ <attributes CRM_meta_container="R-lxc-01_kiff-01" CRM_meta_interval="30000" CRM_meta_name="monitor" CRM_meta_on_node="kiff-02" CRM_meta_on_node_uuid="2" CRM_meta_timeout="30000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="58" operation="start" operation_key="lxc-01_kiff-01_start_0" on_node="kiff-02" on_node_uuid="2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="30">
+ <action_set>
+ <rsc_op id="58" operation="start" operation_key="lxc-01_kiff-01_start_0" on_node="kiff-02" on_node_uuid="2">
+ <primitive id="lxc-01_kiff-01" class="ocf" provider="pacemaker" type="remote"/>
+ <attributes CRM_meta_container="R-lxc-01_kiff-01" CRM_meta_name="start" CRM_meta_on_node="kiff-02" CRM_meta_on_node_uuid="2" CRM_meta_timeout="60000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="50" operation="start" operation_key="R-lxc-01_kiff-01_start_0" on_node="kiff-02" on_node_uuid="2"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="57" operation="stop" operation_key="lxc-01_kiff-01_stop_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="67" operation="stonith" operation_key="stonith-lxc-01_kiff-01-reboot" on_node="lxc-01_kiff-01" on_node_uuid="lxc-01_kiff-01"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="68" operation="stonith" operation_key="stonith-lxc-02_kiff-01-reboot" on_node="lxc-02_kiff-01" on_node_uuid="lxc-02_kiff-01"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="31">
+ <action_set>
+ <pseudo_event id="57" operation="stop" operation_key="lxc-01_kiff-01_stop_0">
+ <attributes CRM_meta_container="R-lxc-01_kiff-01" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="32">
+ <action_set>
+ <rsc_op id="62" operation="monitor" operation_key="lxc-02_kiff-01_monitor_30000" on_node="kiff-02" on_node_uuid="2">
+ <primitive id="lxc-02_kiff-01" class="ocf" provider="pacemaker" type="remote"/>
+ <attributes CRM_meta_container="R-lxc-02_kiff-01" CRM_meta_interval="30000" CRM_meta_name="monitor" CRM_meta_on_node="kiff-02" CRM_meta_on_node_uuid="2" CRM_meta_timeout="30000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="61" operation="start" operation_key="lxc-02_kiff-01_start_0" on_node="kiff-02" on_node_uuid="2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="33">
+ <action_set>
+ <rsc_op id="61" operation="start" operation_key="lxc-02_kiff-01_start_0" on_node="kiff-02" on_node_uuid="2">
+ <primitive id="lxc-02_kiff-01" class="ocf" provider="pacemaker" type="remote"/>
+ <attributes CRM_meta_container="R-lxc-02_kiff-01" CRM_meta_name="start" CRM_meta_on_node="kiff-02" CRM_meta_on_node_uuid="2" CRM_meta_timeout="60000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="53" operation="start" operation_key="R-lxc-02_kiff-01_start_0" on_node="kiff-02" on_node_uuid="2"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="60" operation="stop" operation_key="lxc-02_kiff-01_stop_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="67" operation="stonith" operation_key="stonith-lxc-01_kiff-01-reboot" on_node="lxc-01_kiff-01" on_node_uuid="lxc-01_kiff-01"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="68" operation="stonith" operation_key="stonith-lxc-02_kiff-01-reboot" on_node="lxc-02_kiff-01" on_node_uuid="lxc-02_kiff-01"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="34">
+ <action_set>
+ <pseudo_event id="60" operation="stop" operation_key="lxc-02_kiff-01_stop_0">
+ <attributes CRM_meta_container="R-lxc-02_kiff-01" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="35">
+ <action_set>
+ <pseudo_event id="68" operation="stonith" operation_key="stonith-lxc-02_kiff-01-reboot" on_node="lxc-02_kiff-01" on_node_uuid="lxc-02_kiff-01">
+ <attributes CRM_meta_on_node="lxc-02_kiff-01" CRM_meta_on_node_uuid="lxc-02_kiff-01" CRM_meta_stonith_action="reboot" />
+ <downed>
+ <node id="lxc-02_kiff-01"/>
+ </downed>
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <crm_event id="1" operation="stonith" operation_key="stonith-kiff-01-reboot" on_node="kiff-01" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="36">
+ <action_set>
+ <pseudo_event id="67" operation="stonith" operation_key="stonith-lxc-01_kiff-01-reboot" on_node="lxc-01_kiff-01" on_node_uuid="lxc-01_kiff-01">
+ <attributes CRM_meta_on_node="lxc-01_kiff-01" CRM_meta_on_node_uuid="lxc-01_kiff-01" CRM_meta_stonith_action="reboot" />
+ <downed>
+ <node id="lxc-01_kiff-01"/>
+ </downed>
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <crm_event id="1" operation="stonith" operation_key="stonith-kiff-01-reboot" on_node="kiff-01" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="37">
+ <action_set>
+ <crm_event id="1" operation="stonith" operation_key="stonith-kiff-01-reboot" on_node="kiff-01" on_node_uuid="1">
+ <attributes CRM_meta_on_node="kiff-01" CRM_meta_on_node_uuid="1" CRM_meta_priority_fencing_delay="15" CRM_meta_probe_complete="true" CRM_meta_shutdown="0" CRM_meta_stonith_action="reboot" />
+ <downed>
+ <node id="1"/>
+ <node id="lxc-01_kiff-01"/>
+ <node id="lxc-02_kiff-01"/>
+ </downed>
+ </crm_event>
+ </action_set>
+ <inputs/>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/priority-fencing-delay.scores b/cts/scheduler/priority-fencing-delay.scores
new file mode 100644
index 0000000000..b96175efbf
--- /dev/null
+++ b/cts/scheduler/priority-fencing-delay.scores
@@ -0,0 +1,301 @@
+Allocation scores:
+pcmk__clone_allocate: clvmd-clone allocation score on kiff-01: 0
+pcmk__clone_allocate: clvmd-clone allocation score on kiff-02: 0
+pcmk__clone_allocate: clvmd-clone allocation score on lxc-01_kiff-01: 0
+pcmk__clone_allocate: clvmd-clone allocation score on lxc-01_kiff-02: 0
+pcmk__clone_allocate: clvmd-clone allocation score on lxc-02_kiff-01: 0
+pcmk__clone_allocate: clvmd-clone allocation score on lxc-02_kiff-02: 0
+pcmk__clone_allocate: clvmd:0 allocation score on kiff-01: 1
+pcmk__clone_allocate: clvmd:0 allocation score on kiff-02: 0
+pcmk__clone_allocate: clvmd:0 allocation score on lxc-01_kiff-01: 0
+pcmk__clone_allocate: clvmd:0 allocation score on lxc-01_kiff-02: 0
+pcmk__clone_allocate: clvmd:0 allocation score on lxc-02_kiff-01: 0
+pcmk__clone_allocate: clvmd:0 allocation score on lxc-02_kiff-02: 0
+pcmk__clone_allocate: clvmd:1 allocation score on kiff-01: 0
+pcmk__clone_allocate: clvmd:1 allocation score on kiff-02: 1
+pcmk__clone_allocate: clvmd:1 allocation score on lxc-01_kiff-01: 0
+pcmk__clone_allocate: clvmd:1 allocation score on lxc-01_kiff-02: 0
+pcmk__clone_allocate: clvmd:1 allocation score on lxc-02_kiff-01: 0
+pcmk__clone_allocate: clvmd:1 allocation score on lxc-02_kiff-02: 0
+pcmk__clone_allocate: clvmd:2 allocation score on kiff-01: 0
+pcmk__clone_allocate: clvmd:2 allocation score on kiff-02: 0
+pcmk__clone_allocate: clvmd:2 allocation score on lxc-01_kiff-01: 0
+pcmk__clone_allocate: clvmd:2 allocation score on lxc-01_kiff-02: 0
+pcmk__clone_allocate: clvmd:2 allocation score on lxc-02_kiff-01: 0
+pcmk__clone_allocate: clvmd:2 allocation score on lxc-02_kiff-02: 0
+pcmk__clone_allocate: clvmd:3 allocation score on kiff-01: 0
+pcmk__clone_allocate: clvmd:3 allocation score on kiff-02: 0
+pcmk__clone_allocate: clvmd:3 allocation score on lxc-01_kiff-01: 0
+pcmk__clone_allocate: clvmd:3 allocation score on lxc-01_kiff-02: 0
+pcmk__clone_allocate: clvmd:3 allocation score on lxc-02_kiff-01: 0
+pcmk__clone_allocate: clvmd:3 allocation score on lxc-02_kiff-02: 0
+pcmk__clone_allocate: clvmd:4 allocation score on kiff-01: 0
+pcmk__clone_allocate: clvmd:4 allocation score on kiff-02: 0
+pcmk__clone_allocate: clvmd:4 allocation score on lxc-01_kiff-01: 0
+pcmk__clone_allocate: clvmd:4 allocation score on lxc-01_kiff-02: 0
+pcmk__clone_allocate: clvmd:4 allocation score on lxc-02_kiff-01: 0
+pcmk__clone_allocate: clvmd:4 allocation score on lxc-02_kiff-02: 0
+pcmk__clone_allocate: clvmd:5 allocation score on kiff-01: 0
+pcmk__clone_allocate: clvmd:5 allocation score on kiff-02: 0
+pcmk__clone_allocate: clvmd:5 allocation score on lxc-01_kiff-01: 0
+pcmk__clone_allocate: clvmd:5 allocation score on lxc-01_kiff-02: 0
+pcmk__clone_allocate: clvmd:5 allocation score on lxc-02_kiff-01: 0
+pcmk__clone_allocate: clvmd:5 allocation score on lxc-02_kiff-02: 0
+pcmk__clone_allocate: dlm-clone allocation score on kiff-01: 0
+pcmk__clone_allocate: dlm-clone allocation score on kiff-02: 0
+pcmk__clone_allocate: dlm-clone allocation score on lxc-01_kiff-01: -INFINITY
+pcmk__clone_allocate: dlm-clone allocation score on lxc-01_kiff-02: -INFINITY
+pcmk__clone_allocate: dlm-clone allocation score on lxc-02_kiff-01: -INFINITY
+pcmk__clone_allocate: dlm-clone allocation score on lxc-02_kiff-02: -INFINITY
+pcmk__clone_allocate: dlm:0 allocation score on kiff-01: 1
+pcmk__clone_allocate: dlm:0 allocation score on kiff-02: 0
+pcmk__clone_allocate: dlm:0 allocation score on lxc-01_kiff-01: -INFINITY
+pcmk__clone_allocate: dlm:0 allocation score on lxc-01_kiff-02: -INFINITY
+pcmk__clone_allocate: dlm:0 allocation score on lxc-02_kiff-01: -INFINITY
+pcmk__clone_allocate: dlm:0 allocation score on lxc-02_kiff-02: -INFINITY
+pcmk__clone_allocate: dlm:1 allocation score on kiff-01: 0
+pcmk__clone_allocate: dlm:1 allocation score on kiff-02: 1
+pcmk__clone_allocate: dlm:1 allocation score on lxc-01_kiff-01: -INFINITY
+pcmk__clone_allocate: dlm:1 allocation score on lxc-01_kiff-02: -INFINITY
+pcmk__clone_allocate: dlm:1 allocation score on lxc-02_kiff-01: -INFINITY
+pcmk__clone_allocate: dlm:1 allocation score on lxc-02_kiff-02: -INFINITY
+pcmk__clone_allocate: dlm:2 allocation score on kiff-01: 0
+pcmk__clone_allocate: dlm:2 allocation score on kiff-02: 0
+pcmk__clone_allocate: dlm:2 allocation score on lxc-01_kiff-01: -INFINITY
+pcmk__clone_allocate: dlm:2 allocation score on lxc-01_kiff-02: -INFINITY
+pcmk__clone_allocate: dlm:2 allocation score on lxc-02_kiff-01: -INFINITY
+pcmk__clone_allocate: dlm:2 allocation score on lxc-02_kiff-02: -INFINITY
+pcmk__clone_allocate: dlm:3 allocation score on kiff-01: 0
+pcmk__clone_allocate: dlm:3 allocation score on kiff-02: 0
+pcmk__clone_allocate: dlm:3 allocation score on lxc-01_kiff-01: -INFINITY
+pcmk__clone_allocate: dlm:3 allocation score on lxc-01_kiff-02: -INFINITY
+pcmk__clone_allocate: dlm:3 allocation score on lxc-02_kiff-01: -INFINITY
+pcmk__clone_allocate: dlm:3 allocation score on lxc-02_kiff-02: -INFINITY
+pcmk__clone_allocate: dlm:4 allocation score on kiff-01: 0
+pcmk__clone_allocate: dlm:4 allocation score on kiff-02: 0
+pcmk__clone_allocate: dlm:4 allocation score on lxc-01_kiff-01: -INFINITY
+pcmk__clone_allocate: dlm:4 allocation score on lxc-01_kiff-02: -INFINITY
+pcmk__clone_allocate: dlm:4 allocation score on lxc-02_kiff-01: -INFINITY
+pcmk__clone_allocate: dlm:4 allocation score on lxc-02_kiff-02: -INFINITY
+pcmk__clone_allocate: dlm:5 allocation score on kiff-01: 0
+pcmk__clone_allocate: dlm:5 allocation score on kiff-02: 0
+pcmk__clone_allocate: dlm:5 allocation score on lxc-01_kiff-01: -INFINITY
+pcmk__clone_allocate: dlm:5 allocation score on lxc-01_kiff-02: -INFINITY
+pcmk__clone_allocate: dlm:5 allocation score on lxc-02_kiff-01: -INFINITY
+pcmk__clone_allocate: dlm:5 allocation score on lxc-02_kiff-02: -INFINITY
+pcmk__clone_allocate: shared0-clone allocation score on kiff-01: 0
+pcmk__clone_allocate: shared0-clone allocation score on kiff-02: 0
+pcmk__clone_allocate: shared0-clone allocation score on lxc-01_kiff-01: 0
+pcmk__clone_allocate: shared0-clone allocation score on lxc-01_kiff-02: 0
+pcmk__clone_allocate: shared0-clone allocation score on lxc-02_kiff-01: 0
+pcmk__clone_allocate: shared0-clone allocation score on lxc-02_kiff-02: 0
+pcmk__clone_allocate: shared0:0 allocation score on kiff-01: 1
+pcmk__clone_allocate: shared0:0 allocation score on kiff-02: 0
+pcmk__clone_allocate: shared0:0 allocation score on lxc-01_kiff-01: 0
+pcmk__clone_allocate: shared0:0 allocation score on lxc-01_kiff-02: 0
+pcmk__clone_allocate: shared0:0 allocation score on lxc-02_kiff-01: 0
+pcmk__clone_allocate: shared0:0 allocation score on lxc-02_kiff-02: 0
+pcmk__clone_allocate: shared0:1 allocation score on kiff-01: 0
+pcmk__clone_allocate: shared0:1 allocation score on kiff-02: 1
+pcmk__clone_allocate: shared0:1 allocation score on lxc-01_kiff-01: 0
+pcmk__clone_allocate: shared0:1 allocation score on lxc-01_kiff-02: 0
+pcmk__clone_allocate: shared0:1 allocation score on lxc-02_kiff-01: 0
+pcmk__clone_allocate: shared0:1 allocation score on lxc-02_kiff-02: 0
+pcmk__clone_allocate: shared0:2 allocation score on kiff-01: 0
+pcmk__clone_allocate: shared0:2 allocation score on kiff-02: 0
+pcmk__clone_allocate: shared0:2 allocation score on lxc-01_kiff-01: 0
+pcmk__clone_allocate: shared0:2 allocation score on lxc-01_kiff-02: 0
+pcmk__clone_allocate: shared0:2 allocation score on lxc-02_kiff-01: 0
+pcmk__clone_allocate: shared0:2 allocation score on lxc-02_kiff-02: 0
+pcmk__clone_allocate: shared0:3 allocation score on kiff-01: 0
+pcmk__clone_allocate: shared0:3 allocation score on kiff-02: 0
+pcmk__clone_allocate: shared0:3 allocation score on lxc-01_kiff-01: 0
+pcmk__clone_allocate: shared0:3 allocation score on lxc-01_kiff-02: 0
+pcmk__clone_allocate: shared0:3 allocation score on lxc-02_kiff-01: 0
+pcmk__clone_allocate: shared0:3 allocation score on lxc-02_kiff-02: 0
+pcmk__clone_allocate: shared0:4 allocation score on kiff-01: 0
+pcmk__clone_allocate: shared0:4 allocation score on kiff-02: 0
+pcmk__clone_allocate: shared0:4 allocation score on lxc-01_kiff-01: 0
+pcmk__clone_allocate: shared0:4 allocation score on lxc-01_kiff-02: 0
+pcmk__clone_allocate: shared0:4 allocation score on lxc-02_kiff-01: 0
+pcmk__clone_allocate: shared0:4 allocation score on lxc-02_kiff-02: 0
+pcmk__clone_allocate: shared0:5 allocation score on kiff-01: 0
+pcmk__clone_allocate: shared0:5 allocation score on kiff-02: 0
+pcmk__clone_allocate: shared0:5 allocation score on lxc-01_kiff-01: 0
+pcmk__clone_allocate: shared0:5 allocation score on lxc-01_kiff-02: 0
+pcmk__clone_allocate: shared0:5 allocation score on lxc-02_kiff-01: 0
+pcmk__clone_allocate: shared0:5 allocation score on lxc-02_kiff-02: 0
+pcmk__native_allocate: R-lxc-01_kiff-01 allocation score on kiff-01: -INFINITY
+pcmk__native_allocate: R-lxc-01_kiff-01 allocation score on kiff-02: 0
+pcmk__native_allocate: R-lxc-01_kiff-01 allocation score on lxc-01_kiff-01: -INFINITY
+pcmk__native_allocate: R-lxc-01_kiff-01 allocation score on lxc-01_kiff-02: -INFINITY
+pcmk__native_allocate: R-lxc-01_kiff-01 allocation score on lxc-02_kiff-01: -INFINITY
+pcmk__native_allocate: R-lxc-01_kiff-01 allocation score on lxc-02_kiff-02: -INFINITY
+pcmk__native_allocate: R-lxc-01_kiff-02 allocation score on kiff-01: -INFINITY
+pcmk__native_allocate: R-lxc-01_kiff-02 allocation score on kiff-02: 100
+pcmk__native_allocate: R-lxc-01_kiff-02 allocation score on lxc-01_kiff-01: -INFINITY
+pcmk__native_allocate: R-lxc-01_kiff-02 allocation score on lxc-01_kiff-02: -INFINITY
+pcmk__native_allocate: R-lxc-01_kiff-02 allocation score on lxc-02_kiff-01: -INFINITY
+pcmk__native_allocate: R-lxc-01_kiff-02 allocation score on lxc-02_kiff-02: -INFINITY
+pcmk__native_allocate: R-lxc-02_kiff-01 allocation score on kiff-01: -INFINITY
+pcmk__native_allocate: R-lxc-02_kiff-01 allocation score on kiff-02: 0
+pcmk__native_allocate: R-lxc-02_kiff-01 allocation score on lxc-01_kiff-01: -INFINITY
+pcmk__native_allocate: R-lxc-02_kiff-01 allocation score on lxc-01_kiff-02: -INFINITY
+pcmk__native_allocate: R-lxc-02_kiff-01 allocation score on lxc-02_kiff-01: -INFINITY
+pcmk__native_allocate: R-lxc-02_kiff-01 allocation score on lxc-02_kiff-02: -INFINITY
+pcmk__native_allocate: R-lxc-02_kiff-02 allocation score on kiff-01: -INFINITY
+pcmk__native_allocate: R-lxc-02_kiff-02 allocation score on kiff-02: 100
+pcmk__native_allocate: R-lxc-02_kiff-02 allocation score on lxc-01_kiff-01: -INFINITY
+pcmk__native_allocate: R-lxc-02_kiff-02 allocation score on lxc-01_kiff-02: -INFINITY
+pcmk__native_allocate: R-lxc-02_kiff-02 allocation score on lxc-02_kiff-01: -INFINITY
+pcmk__native_allocate: R-lxc-02_kiff-02 allocation score on lxc-02_kiff-02: -INFINITY
+pcmk__native_allocate: clvmd:0 allocation score on kiff-01: -INFINITY
+pcmk__native_allocate: clvmd:0 allocation score on kiff-02: -INFINITY
+pcmk__native_allocate: clvmd:0 allocation score on lxc-01_kiff-01: -INFINITY
+pcmk__native_allocate: clvmd:0 allocation score on lxc-01_kiff-02: -INFINITY
+pcmk__native_allocate: clvmd:0 allocation score on lxc-02_kiff-01: -INFINITY
+pcmk__native_allocate: clvmd:0 allocation score on lxc-02_kiff-02: -INFINITY
+pcmk__native_allocate: clvmd:1 allocation score on kiff-01: -INFINITY
+pcmk__native_allocate: clvmd:1 allocation score on kiff-02: 1
+pcmk__native_allocate: clvmd:1 allocation score on lxc-01_kiff-01: -INFINITY
+pcmk__native_allocate: clvmd:1 allocation score on lxc-01_kiff-02: -INFINITY
+pcmk__native_allocate: clvmd:1 allocation score on lxc-02_kiff-01: -INFINITY
+pcmk__native_allocate: clvmd:1 allocation score on lxc-02_kiff-02: -INFINITY
+pcmk__native_allocate: clvmd:2 allocation score on kiff-01: -INFINITY
+pcmk__native_allocate: clvmd:2 allocation score on kiff-02: -INFINITY
+pcmk__native_allocate: clvmd:2 allocation score on lxc-01_kiff-01: -INFINITY
+pcmk__native_allocate: clvmd:2 allocation score on lxc-01_kiff-02: -INFINITY
+pcmk__native_allocate: clvmd:2 allocation score on lxc-02_kiff-01: -INFINITY
+pcmk__native_allocate: clvmd:2 allocation score on lxc-02_kiff-02: -INFINITY
+pcmk__native_allocate: clvmd:3 allocation score on kiff-01: -INFINITY
+pcmk__native_allocate: clvmd:3 allocation score on kiff-02: -INFINITY
+pcmk__native_allocate: clvmd:3 allocation score on lxc-01_kiff-01: -INFINITY
+pcmk__native_allocate: clvmd:3 allocation score on lxc-01_kiff-02: -INFINITY
+pcmk__native_allocate: clvmd:3 allocation score on lxc-02_kiff-01: -INFINITY
+pcmk__native_allocate: clvmd:3 allocation score on lxc-02_kiff-02: -INFINITY
+pcmk__native_allocate: clvmd:4 allocation score on kiff-01: -INFINITY
+pcmk__native_allocate: clvmd:4 allocation score on kiff-02: -INFINITY
+pcmk__native_allocate: clvmd:4 allocation score on lxc-01_kiff-01: -INFINITY
+pcmk__native_allocate: clvmd:4 allocation score on lxc-01_kiff-02: -INFINITY
+pcmk__native_allocate: clvmd:4 allocation score on lxc-02_kiff-01: -INFINITY
+pcmk__native_allocate: clvmd:4 allocation score on lxc-02_kiff-02: -INFINITY
+pcmk__native_allocate: clvmd:5 allocation score on kiff-01: -INFINITY
+pcmk__native_allocate: clvmd:5 allocation score on kiff-02: -INFINITY
+pcmk__native_allocate: clvmd:5 allocation score on lxc-01_kiff-01: -INFINITY
+pcmk__native_allocate: clvmd:5 allocation score on lxc-01_kiff-02: -INFINITY
+pcmk__native_allocate: clvmd:5 allocation score on lxc-02_kiff-01: -INFINITY
+pcmk__native_allocate: clvmd:5 allocation score on lxc-02_kiff-02: -INFINITY
+pcmk__native_allocate: dlm:0 allocation score on kiff-01: -INFINITY
+pcmk__native_allocate: dlm:0 allocation score on kiff-02: -INFINITY
+pcmk__native_allocate: dlm:0 allocation score on lxc-01_kiff-01: -INFINITY
+pcmk__native_allocate: dlm:0 allocation score on lxc-01_kiff-02: -INFINITY
+pcmk__native_allocate: dlm:0 allocation score on lxc-02_kiff-01: -INFINITY
+pcmk__native_allocate: dlm:0 allocation score on lxc-02_kiff-02: -INFINITY
+pcmk__native_allocate: dlm:1 allocation score on kiff-01: -INFINITY
+pcmk__native_allocate: dlm:1 allocation score on kiff-02: 1
+pcmk__native_allocate: dlm:1 allocation score on lxc-01_kiff-01: -INFINITY
+pcmk__native_allocate: dlm:1 allocation score on lxc-01_kiff-02: -INFINITY
+pcmk__native_allocate: dlm:1 allocation score on lxc-02_kiff-01: -INFINITY
+pcmk__native_allocate: dlm:1 allocation score on lxc-02_kiff-02: -INFINITY
+pcmk__native_allocate: dlm:2 allocation score on kiff-01: -INFINITY
+pcmk__native_allocate: dlm:2 allocation score on kiff-02: -INFINITY
+pcmk__native_allocate: dlm:2 allocation score on lxc-01_kiff-01: -INFINITY
+pcmk__native_allocate: dlm:2 allocation score on lxc-01_kiff-02: -INFINITY
+pcmk__native_allocate: dlm:2 allocation score on lxc-02_kiff-01: -INFINITY
+pcmk__native_allocate: dlm:2 allocation score on lxc-02_kiff-02: -INFINITY
+pcmk__native_allocate: dlm:3 allocation score on kiff-01: -INFINITY
+pcmk__native_allocate: dlm:3 allocation score on kiff-02: -INFINITY
+pcmk__native_allocate: dlm:3 allocation score on lxc-01_kiff-01: -INFINITY
+pcmk__native_allocate: dlm:3 allocation score on lxc-01_kiff-02: -INFINITY
+pcmk__native_allocate: dlm:3 allocation score on lxc-02_kiff-01: -INFINITY
+pcmk__native_allocate: dlm:3 allocation score on lxc-02_kiff-02: -INFINITY
+pcmk__native_allocate: dlm:4 allocation score on kiff-01: -INFINITY
+pcmk__native_allocate: dlm:4 allocation score on kiff-02: -INFINITY
+pcmk__native_allocate: dlm:4 allocation score on lxc-01_kiff-01: -INFINITY
+pcmk__native_allocate: dlm:4 allocation score on lxc-01_kiff-02: -INFINITY
+pcmk__native_allocate: dlm:4 allocation score on lxc-02_kiff-01: -INFINITY
+pcmk__native_allocate: dlm:4 allocation score on lxc-02_kiff-02: -INFINITY
+pcmk__native_allocate: dlm:5 allocation score on kiff-01: -INFINITY
+pcmk__native_allocate: dlm:5 allocation score on kiff-02: -INFINITY
+pcmk__native_allocate: dlm:5 allocation score on lxc-01_kiff-01: -INFINITY
+pcmk__native_allocate: dlm:5 allocation score on lxc-01_kiff-02: -INFINITY
+pcmk__native_allocate: dlm:5 allocation score on lxc-02_kiff-01: -INFINITY
+pcmk__native_allocate: dlm:5 allocation score on lxc-02_kiff-02: -INFINITY
+pcmk__native_allocate: fence-kiff-01 allocation score on kiff-01: 0
+pcmk__native_allocate: fence-kiff-01 allocation score on kiff-02: 0
+pcmk__native_allocate: fence-kiff-01 allocation score on lxc-01_kiff-01: -INFINITY
+pcmk__native_allocate: fence-kiff-01 allocation score on lxc-01_kiff-02: -INFINITY
+pcmk__native_allocate: fence-kiff-01 allocation score on lxc-02_kiff-01: -INFINITY
+pcmk__native_allocate: fence-kiff-01 allocation score on lxc-02_kiff-02: -INFINITY
+pcmk__native_allocate: fence-kiff-02 allocation score on kiff-01: 0
+pcmk__native_allocate: fence-kiff-02 allocation score on kiff-02: 0
+pcmk__native_allocate: fence-kiff-02 allocation score on lxc-01_kiff-01: -INFINITY
+pcmk__native_allocate: fence-kiff-02 allocation score on lxc-01_kiff-02: -INFINITY
+pcmk__native_allocate: fence-kiff-02 allocation score on lxc-02_kiff-01: -INFINITY
+pcmk__native_allocate: fence-kiff-02 allocation score on lxc-02_kiff-02: -INFINITY
+pcmk__native_allocate: lxc-01_kiff-01 allocation score on kiff-01: -INFINITY
+pcmk__native_allocate: lxc-01_kiff-01 allocation score on kiff-02: 0
+pcmk__native_allocate: lxc-01_kiff-01 allocation score on lxc-01_kiff-01: -INFINITY
+pcmk__native_allocate: lxc-01_kiff-01 allocation score on lxc-01_kiff-02: -INFINITY
+pcmk__native_allocate: lxc-01_kiff-01 allocation score on lxc-02_kiff-01: -INFINITY
+pcmk__native_allocate: lxc-01_kiff-01 allocation score on lxc-02_kiff-02: -INFINITY
+pcmk__native_allocate: lxc-01_kiff-02 allocation score on kiff-01: -INFINITY
+pcmk__native_allocate: lxc-01_kiff-02 allocation score on kiff-02: 0
+pcmk__native_allocate: lxc-01_kiff-02 allocation score on lxc-01_kiff-01: -INFINITY
+pcmk__native_allocate: lxc-01_kiff-02 allocation score on lxc-01_kiff-02: -INFINITY
+pcmk__native_allocate: lxc-01_kiff-02 allocation score on lxc-02_kiff-01: -INFINITY
+pcmk__native_allocate: lxc-01_kiff-02 allocation score on lxc-02_kiff-02: -INFINITY
+pcmk__native_allocate: lxc-02_kiff-01 allocation score on kiff-01: -INFINITY
+pcmk__native_allocate: lxc-02_kiff-01 allocation score on kiff-02: 0
+pcmk__native_allocate: lxc-02_kiff-01 allocation score on lxc-01_kiff-01: -INFINITY
+pcmk__native_allocate: lxc-02_kiff-01 allocation score on lxc-01_kiff-02: -INFINITY
+pcmk__native_allocate: lxc-02_kiff-01 allocation score on lxc-02_kiff-01: -INFINITY
+pcmk__native_allocate: lxc-02_kiff-01 allocation score on lxc-02_kiff-02: -INFINITY
+pcmk__native_allocate: lxc-02_kiff-02 allocation score on kiff-01: -INFINITY
+pcmk__native_allocate: lxc-02_kiff-02 allocation score on kiff-02: 0
+pcmk__native_allocate: lxc-02_kiff-02 allocation score on lxc-01_kiff-01: -INFINITY
+pcmk__native_allocate: lxc-02_kiff-02 allocation score on lxc-01_kiff-02: -INFINITY
+pcmk__native_allocate: lxc-02_kiff-02 allocation score on lxc-02_kiff-01: -INFINITY
+pcmk__native_allocate: lxc-02_kiff-02 allocation score on lxc-02_kiff-02: -INFINITY
+pcmk__native_allocate: shared0:0 allocation score on kiff-01: -INFINITY
+pcmk__native_allocate: shared0:0 allocation score on kiff-02: -INFINITY
+pcmk__native_allocate: shared0:0 allocation score on lxc-01_kiff-01: -INFINITY
+pcmk__native_allocate: shared0:0 allocation score on lxc-01_kiff-02: -INFINITY
+pcmk__native_allocate: shared0:0 allocation score on lxc-02_kiff-01: -INFINITY
+pcmk__native_allocate: shared0:0 allocation score on lxc-02_kiff-02: -INFINITY
+pcmk__native_allocate: shared0:1 allocation score on kiff-01: -INFINITY
+pcmk__native_allocate: shared0:1 allocation score on kiff-02: 1
+pcmk__native_allocate: shared0:1 allocation score on lxc-01_kiff-01: -INFINITY
+pcmk__native_allocate: shared0:1 allocation score on lxc-01_kiff-02: -INFINITY
+pcmk__native_allocate: shared0:1 allocation score on lxc-02_kiff-01: -INFINITY
+pcmk__native_allocate: shared0:1 allocation score on lxc-02_kiff-02: -INFINITY
+pcmk__native_allocate: shared0:2 allocation score on kiff-01: -INFINITY
+pcmk__native_allocate: shared0:2 allocation score on kiff-02: -INFINITY
+pcmk__native_allocate: shared0:2 allocation score on lxc-01_kiff-01: -INFINITY
+pcmk__native_allocate: shared0:2 allocation score on lxc-01_kiff-02: -INFINITY
+pcmk__native_allocate: shared0:2 allocation score on lxc-02_kiff-01: -INFINITY
+pcmk__native_allocate: shared0:2 allocation score on lxc-02_kiff-02: -INFINITY
+pcmk__native_allocate: shared0:3 allocation score on kiff-01: -INFINITY
+pcmk__native_allocate: shared0:3 allocation score on kiff-02: -INFINITY
+pcmk__native_allocate: shared0:3 allocation score on lxc-01_kiff-01: -INFINITY
+pcmk__native_allocate: shared0:3 allocation score on lxc-01_kiff-02: -INFINITY
+pcmk__native_allocate: shared0:3 allocation score on lxc-02_kiff-01: -INFINITY
+pcmk__native_allocate: shared0:3 allocation score on lxc-02_kiff-02: -INFINITY
+pcmk__native_allocate: shared0:4 allocation score on kiff-01: -INFINITY
+pcmk__native_allocate: shared0:4 allocation score on kiff-02: -INFINITY
+pcmk__native_allocate: shared0:4 allocation score on lxc-01_kiff-01: -INFINITY
+pcmk__native_allocate: shared0:4 allocation score on lxc-01_kiff-02: -INFINITY
+pcmk__native_allocate: shared0:4 allocation score on lxc-02_kiff-01: -INFINITY
+pcmk__native_allocate: shared0:4 allocation score on lxc-02_kiff-02: -INFINITY
+pcmk__native_allocate: shared0:5 allocation score on kiff-01: -INFINITY
+pcmk__native_allocate: shared0:5 allocation score on kiff-02: -INFINITY
+pcmk__native_allocate: shared0:5 allocation score on lxc-01_kiff-01: -INFINITY
+pcmk__native_allocate: shared0:5 allocation score on lxc-01_kiff-02: -INFINITY
+pcmk__native_allocate: shared0:5 allocation score on lxc-02_kiff-01: -INFINITY
+pcmk__native_allocate: shared0:5 allocation score on lxc-02_kiff-02: -INFINITY
+pcmk__native_allocate: vm-fs allocation score on kiff-01: 0
+pcmk__native_allocate: vm-fs allocation score on kiff-02: 0
+pcmk__native_allocate: vm-fs allocation score on lxc-01_kiff-01: 0
+pcmk__native_allocate: vm-fs allocation score on lxc-01_kiff-02: 0
+pcmk__native_allocate: vm-fs allocation score on lxc-02_kiff-01: 0
+pcmk__native_allocate: vm-fs allocation score on lxc-02_kiff-02: 0
diff --git a/cts/scheduler/priority-fencing-delay.summary b/cts/scheduler/priority-fencing-delay.summary
new file mode 100644
index 0000000000..20a9a4a91b
--- /dev/null
+++ b/cts/scheduler/priority-fencing-delay.summary
@@ -0,0 +1,102 @@
+
+Current cluster status:
+Node kiff-01 (1): UNCLEAN (offline)
+Online: [ kiff-02 ]
+GuestOnline: [ lxc-01_kiff-02:R-lxc-01_kiff-02 lxc-02_kiff-02:R-lxc-02_kiff-02 ]
+
+ vm-fs (ocf::heartbeat:Filesystem): FAILED lxc-01_kiff-01
+ R-lxc-01_kiff-02 (ocf::heartbeat:VirtualDomain): Started kiff-02
+ fence-kiff-01 (stonith:fence_ipmilan): Started kiff-02
+ fence-kiff-02 (stonith:fence_ipmilan): Started kiff-01 (UNCLEAN)
+ Clone Set: dlm-clone [dlm]
+ dlm (ocf::pacemaker:controld): Started kiff-01 (UNCLEAN)
+ Started: [ kiff-02 ]
+ Stopped: [ lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ]
+ Clone Set: clvmd-clone [clvmd]
+ clvmd (ocf::heartbeat:clvm): Started kiff-01 (UNCLEAN)
+ Started: [ kiff-02 ]
+ Stopped: [ lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ]
+ Clone Set: shared0-clone [shared0]
+ shared0 (ocf::heartbeat:Filesystem): Started kiff-01 (UNCLEAN)
+ Started: [ kiff-02 ]
+ Stopped: [ lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ]
+ R-lxc-01_kiff-01 (ocf::heartbeat:VirtualDomain): FAILED kiff-01 (UNCLEAN)
+ R-lxc-02_kiff-01 (ocf::heartbeat:VirtualDomain): Started kiff-01 (UNCLEAN)
+ R-lxc-02_kiff-02 (ocf::heartbeat:VirtualDomain): Started kiff-02
+
+Transition Summary:
+ * Fence (reboot) lxc-02_kiff-01 (resource: R-lxc-02_kiff-01) 'guest is unclean'
+ * Fence (reboot) lxc-01_kiff-01 (resource: R-lxc-01_kiff-01) 'guest is unclean'
+ * Fence (reboot) kiff-01 'peer is no longer part of the cluster'
+ * Recover vm-fs ( lxc-01_kiff-01 )
+ * Move fence-kiff-02 ( kiff-01 -> kiff-02 )
+ * Stop dlm:0 ( kiff-01 ) due to node availability
+ * Stop clvmd:0 ( kiff-01 ) due to node availability
+ * Stop shared0:0 ( kiff-01 ) due to node availability
+ * Recover R-lxc-01_kiff-01 ( kiff-01 -> kiff-02 )
+ * Move R-lxc-02_kiff-01 ( kiff-01 -> kiff-02 )
+ * Move lxc-01_kiff-01 ( kiff-01 -> kiff-02 )
+ * Move lxc-02_kiff-01 ( kiff-01 -> kiff-02 )
+
+Executing cluster transition:
+ * Resource action: vm-fs monitor on lxc-02_kiff-02
+ * Resource action: vm-fs monitor on lxc-01_kiff-02
+ * Pseudo action: fence-kiff-02_stop_0
+ * Resource action: dlm monitor on lxc-02_kiff-02
+ * Resource action: dlm monitor on lxc-01_kiff-02
+ * Resource action: clvmd monitor on lxc-02_kiff-02
+ * Resource action: clvmd monitor on lxc-01_kiff-02
+ * Resource action: shared0 monitor on lxc-02_kiff-02
+ * Resource action: shared0 monitor on lxc-01_kiff-02
+ * Pseudo action: lxc-01_kiff-01_stop_0
+ * Pseudo action: lxc-02_kiff-01_stop_0
+ * Fencing kiff-01 (reboot)
+ * Pseudo action: R-lxc-01_kiff-01_stop_0
+ * Pseudo action: R-lxc-02_kiff-01_stop_0
+ * Pseudo action: stonith-lxc-02_kiff-01-reboot on lxc-02_kiff-01
+ * Pseudo action: stonith-lxc-01_kiff-01-reboot on lxc-01_kiff-01
+ * Pseudo action: vm-fs_stop_0
+ * Resource action: fence-kiff-02 start on kiff-02
+ * Pseudo action: shared0-clone_stop_0
+ * Resource action: R-lxc-01_kiff-01 start on kiff-02
+ * Resource action: R-lxc-02_kiff-01 start on kiff-02
+ * Resource action: lxc-01_kiff-01 start on kiff-02
+ * Resource action: lxc-02_kiff-01 start on kiff-02
+ * Resource action: vm-fs start on lxc-01_kiff-01
+ * Resource action: fence-kiff-02 monitor=60000 on kiff-02
+ * Pseudo action: shared0_stop_0
+ * Pseudo action: shared0-clone_stopped_0
+ * Resource action: R-lxc-01_kiff-01 monitor=10000 on kiff-02
+ * Resource action: R-lxc-02_kiff-01 monitor=10000 on kiff-02
+ * Resource action: lxc-01_kiff-01 monitor=30000 on kiff-02
+ * Resource action: lxc-02_kiff-01 monitor=30000 on kiff-02
+ * Resource action: vm-fs monitor=20000 on lxc-01_kiff-01
+ * Pseudo action: clvmd-clone_stop_0
+ * Pseudo action: clvmd_stop_0
+ * Pseudo action: clvmd-clone_stopped_0
+ * Pseudo action: dlm-clone_stop_0
+ * Pseudo action: dlm_stop_0
+ * Pseudo action: dlm-clone_stopped_0
+
+Revised cluster status:
+Online: [ kiff-02 ]
+OFFLINE: [ kiff-01 ]
+GuestOnline: [ lxc-01_kiff-01:R-lxc-01_kiff-01 lxc-01_kiff-02:R-lxc-01_kiff-02 lxc-02_kiff-01:R-lxc-02_kiff-01 lxc-02_kiff-02:R-lxc-02_kiff-02 ]
+
+ vm-fs (ocf::heartbeat:Filesystem): Started lxc-01_kiff-01
+ R-lxc-01_kiff-02 (ocf::heartbeat:VirtualDomain): Started kiff-02
+ fence-kiff-01 (stonith:fence_ipmilan): Started kiff-02
+ fence-kiff-02 (stonith:fence_ipmilan): Started kiff-02
+ Clone Set: dlm-clone [dlm]
+ Started: [ kiff-02 ]
+ Stopped: [ kiff-01 lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ]
+ Clone Set: clvmd-clone [clvmd]
+ Started: [ kiff-02 ]
+ Stopped: [ kiff-01 lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ]
+ Clone Set: shared0-clone [shared0]
+ Started: [ kiff-02 ]
+ Stopped: [ kiff-01 lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ]
+ R-lxc-01_kiff-01 (ocf::heartbeat:VirtualDomain): Started kiff-02
+ R-lxc-02_kiff-01 (ocf::heartbeat:VirtualDomain): Started kiff-02
+ R-lxc-02_kiff-02 (ocf::heartbeat:VirtualDomain): Started kiff-02
+
diff --git a/cts/scheduler/priority-fencing-delay.xml b/cts/scheduler/priority-fencing-delay.xml
new file mode 100644
index 0000000000..04d1d3e290
--- /dev/null
+++ b/cts/scheduler/priority-fencing-delay.xml
@@ -0,0 +1,351 @@
+<cib admin_epoch="0" cib-last-written="Fri Dec 19 18:49:12 2014" crm_feature_set="3.0.9" dc-uuid="2" epoch="51" have-quorum="1" num_updates="78" validate-with="pacemaker-1.3">
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="1.1.12-a14efad"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="STSRHTS29816"/>
+ <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="freeze"/>
+ <nvpair id="cib-bootstrap-options-priority-fencing-delay" name="priority-fencing-delay" value="15"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="kiff-01"/>
+ <node id="2" uname="kiff-02"/>
+ </nodes>
+ <resources>
+ <primitive class="stonith" id="fence-kiff-01" type="fence_ipmilan">
+ <instance_attributes id="fence-kiff-01-instance_attributes">
+ <nvpair id="fence-kiff-01-instance_attributes-action" name="action" value="reboot"/>
+ <nvpair id="fence-kiff-01-instance_attributes-ipaddr" name="ipaddr" value="kiff-01-ilo"/>
+ <nvpair id="fence-kiff-01-instance_attributes-login" name="login" value="admin"/>
+ <nvpair id="fence-kiff-01-instance_attributes-passwd" name="passwd" value="admin"/>
+ <nvpair id="fence-kiff-01-instance_attributes-pcmk_host_check" name="pcmk_host_check" value="static-list"/>
+ <nvpair id="fence-kiff-01-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="kiff-01"/>
+ </instance_attributes>
+ <operations>
+ <op id="fence-kiff-01-monitor-interval-60s" interval="60s" name="monitor"/>
+ </operations>
+ </primitive>
+ <primitive class="stonith" id="fence-kiff-02" type="fence_ipmilan">
+ <instance_attributes id="fence-kiff-02-instance_attributes">
+ <nvpair id="fence-kiff-02-instance_attributes-action" name="action" value="reboot"/>
+ <nvpair id="fence-kiff-02-instance_attributes-ipaddr" name="ipaddr" value="kiff-02-ilo"/>
+ <nvpair id="fence-kiff-02-instance_attributes-login" name="login" value="admin"/>
+ <nvpair id="fence-kiff-02-instance_attributes-passwd" name="passwd" value="admin"/>
+ <nvpair id="fence-kiff-02-instance_attributes-pcmk_host_check" name="pcmk_host_check" value="static-list"/>
+ <nvpair id="fence-kiff-02-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="kiff-02"/>
+ </instance_attributes>
+ <operations>
+ <op id="fence-kiff-02-monitor-interval-60s" interval="60s" name="monitor"/>
+ </operations>
+ </primitive>
+ <clone id="dlm-clone">
+ <primitive class="ocf" id="dlm" provider="pacemaker" type="controld">
+ <operations>
+ <op id="dlm-start-timeout-90" interval="0s" name="start" timeout="90"/>
+ <op id="dlm-stop-timeout-100" interval="0s" name="stop" timeout="100"/>
+ <op id="dlm-monitor-interval-30s" interval="30s" name="monitor" on-fail="fence"/>
+ </operations>
+ </primitive>
+ <meta_attributes id="dlm-clone-meta">
+ <nvpair id="dlm-interleave" name="interleave" value="true"/>
+ <nvpair id="dlm-ordered" name="ordered" value="true"/>
+ </meta_attributes>
+ </clone>
+ <clone id="clvmd-clone">
+ <primitive class="ocf" id="clvmd" provider="heartbeat" type="clvm">
+ <instance_attributes id="clvmd-instance_attributes">
+ <nvpair id="clvmd-instance_attributes-with_cmirrord" name="with_cmirrord" value="1"/>
+ </instance_attributes>
+ <operations>
+ <op id="clvmd-start-timeout-90" interval="0s" name="start" timeout="90"/>
+ <op id="clvmd-stop-timeout-90" interval="0s" name="stop" timeout="90"/>
+ <op id="clvmd-monitor-interval-30s" interval="30s" name="monitor" on-fail="fence"/>
+ </operations>
+ </primitive>
+ <meta_attributes id="clvmd-clone-meta">
+ <nvpair id="clvmd-interleave" name="interleave" value="true"/>
+ <nvpair id="clvmd-ordered" name="ordered" value="true"/>
+ </meta_attributes>
+ </clone>
+ <clone id="shared0-clone">
+ <primitive class="ocf" id="shared0" provider="heartbeat" type="Filesystem">
+ <instance_attributes id="shared0-instance_attributes">
+ <nvpair id="shared0-instance_attributes-device" name="device" value="/dev/shared/shared0"/>
+ <nvpair id="shared0-instance_attributes-directory" name="directory" value="/mnt/shared0"/>
+ <nvpair id="shared0-instance_attributes-fstype" name="fstype" value="gfs2"/>
+ <nvpair id="shared0-instance_attributes-options" name="options" value="errors=panic"/>
+ </instance_attributes>
+ <operations>
+ <op id="shared0-start-timeout-60" interval="0s" name="start" timeout="60"/>
+ <op id="shared0-stop-timeout-60" interval="0s" name="stop" timeout="60"/>
+ <op id="shared0-monitor-interval-10s" interval="10s" name="monitor" on-fail="fence"/>
+ </operations>
+ </primitive>
+ <meta_attributes id="shared0-clone-meta">
+ <nvpair id="shared0-interleave" name="interleave" value="true"/>
+ </meta_attributes>
+ </clone>
+ <primitive class="ocf" id="R-lxc-01_kiff-01" provider="heartbeat" type="VirtualDomain">
+ <instance_attributes id="R-lxc-01_kiff-01-instance_attributes">
+ <nvpair id="R-lxc-01_kiff-01-instance_attributes-hypervisor" name="hypervisor" value="lxc:///"/>
+ <nvpair id="R-lxc-01_kiff-01-instance_attributes-config" name="config" value="/mnt/shared0/test-virtual-domain/lxc/lxc-01_kiff-01.xml"/>
+ <nvpair id="R-lxc-01_kiff-01-instance_attributes-force_stop" name="force_stop" value="true"/>
+ </instance_attributes>
+ <operations>
+ <op id="R-lxc-01_kiff-01-start-timeout-90" interval="0s" name="start" timeout="90"/>
+ <op id="R-lxc-01_kiff-01-stop-timeout-90" interval="0s" name="stop" timeout="90"/>
+ <op id="R-lxc-01_kiff-01-monitor-interval-10" interval="10" name="monitor" timeout="30"/>
+ </operations>
+ <meta_attributes id="R-lxc-01_kiff-01-meta_attributes">
+ <nvpair id="R-lxc-01_kiff-01-meta_attributes-remote-node" name="remote-node" value="lxc-01_kiff-01"/>
+ </meta_attributes>
+ <utilization id="R-lxc-01_kiff-01-utilization">
+ <nvpair id="R-lxc-01_kiff-01-utilization-cpu" name="cpu" value="1"/>
+ <nvpair id="R-lxc-01_kiff-01-utilization-hv_memory" name="hv_memory" value="100"/>
+ </utilization>
+ </primitive>
+ <primitive class="ocf" id="R-lxc-02_kiff-01" provider="heartbeat" type="VirtualDomain">
+ <instance_attributes id="R-lxc-02_kiff-01-instance_attributes">
+ <nvpair id="R-lxc-02_kiff-01-instance_attributes-hypervisor" name="hypervisor" value="lxc:///"/>
+ <nvpair id="R-lxc-02_kiff-01-instance_attributes-config" name="config" value="/mnt/shared0/test-virtual-domain/lxc/lxc-02_kiff-01.xml"/>
+ <nvpair id="R-lxc-02_kiff-01-instance_attributes-force_stop" name="force_stop" value="true"/>
+ </instance_attributes>
+ <operations>
+ <op id="R-lxc-02_kiff-01-start-timeout-90" interval="0s" name="start" timeout="90"/>
+ <op id="R-lxc-02_kiff-01-stop-timeout-90" interval="0s" name="stop" timeout="90"/>
+ <op id="R-lxc-02_kiff-01-monitor-interval-10" interval="10" name="monitor" timeout="30"/>
+ </operations>
+ <meta_attributes id="R-lxc-02_kiff-01-meta_attributes">
+ <nvpair id="R-lxc-02_kiff-01-meta_attributes-remote-node" name="remote-node" value="lxc-02_kiff-01"/>
+ </meta_attributes>
+ <utilization id="R-lxc-02_kiff-01-utilization">
+ <nvpair id="R-lxc-02_kiff-01-utilization-cpu" name="cpu" value="1"/>
+ <nvpair id="R-lxc-02_kiff-01-utilization-hv_memory" name="hv_memory" value="100"/>
+ </utilization>
+ </primitive>
+ <primitive class="ocf" id="R-lxc-01_kiff-02" provider="heartbeat" type="VirtualDomain">
+ <instance_attributes id="R-lxc-01_kiff-02-instance_attributes">
+ <nvpair id="R-lxc-01_kiff-02-instance_attributes-hypervisor" name="hypervisor" value="lxc:///"/>
+ <nvpair id="R-lxc-01_kiff-02-instance_attributes-config" name="config" value="/mnt/shared0/test-virtual-domain/lxc/lxc-01_kiff-02.xml"/>
+ <nvpair id="R-lxc-01_kiff-02-instance_attributes-force_stop" name="force_stop" value="true"/>
+ </instance_attributes>
+ <operations>
+ <op id="R-lxc-01_kiff-02-start-timeout-90" interval="0s" name="start" timeout="90"/>
+ <op id="R-lxc-01_kiff-02-stop-timeout-90" interval="0s" name="stop" timeout="90"/>
+ <op id="R-lxc-01_kiff-02-monitor-interval-10" interval="10" name="monitor" timeout="30"/>
+ </operations>
+ <meta_attributes id="R-lxc-01_kiff-02-meta_attributes">
+ <nvpair id="R-lxc-01_kiff-02-meta_attributes-remote-node" name="remote-node" value="lxc-01_kiff-02"/>
+ <nvpair id="R-lxc-01_kiff-02-meta_attributes-priority" name="priority" value="50"/>
+ </meta_attributes>
+ <utilization id="R-lxc-01_kiff-02-utilization">
+ <nvpair id="R-lxc-01_kiff-02-utilization-cpu" name="cpu" value="1"/>
+ <nvpair id="R-lxc-01_kiff-02-utilization-hv_memory" name="hv_memory" value="100"/>
+ </utilization>
+ </primitive>
+ <primitive class="ocf" id="R-lxc-02_kiff-02" provider="heartbeat" type="VirtualDomain">
+ <instance_attributes id="R-lxc-02_kiff-02-instance_attributes">
+ <nvpair id="R-lxc-02_kiff-02-instance_attributes-hypervisor" name="hypervisor" value="lxc:///"/>
+ <nvpair id="R-lxc-02_kiff-02-instance_attributes-config" name="config" value="/mnt/shared0/test-virtual-domain/lxc/lxc-02_kiff-02.xml"/>
+ <nvpair id="R-lxc-02_kiff-02-instance_attributes-force_stop" name="force_stop" value="true"/>
+ </instance_attributes>
+ <operations>
+ <op id="R-lxc-02_kiff-02-start-timeout-90" interval="0s" name="start" timeout="90"/>
+ <op id="R-lxc-02_kiff-02-stop-timeout-90" interval="0s" name="stop" timeout="90"/>
+ <op id="R-lxc-02_kiff-02-monitor-interval-10" interval="10" name="monitor" timeout="30"/>
+ </operations>
+ <meta_attributes id="R-lxc-02_kiff-02-meta_attributes">
+ <nvpair id="R-lxc-02_kiff-02-meta_attributes-remote-node" name="remote-node" value="lxc-02_kiff-02"/>
+ </meta_attributes>
+ <utilization id="R-lxc-02_kiff-02-utilization">
+ <nvpair id="R-lxc-02_kiff-02-utilization-cpu" name="cpu" value="1"/>
+ <nvpair id="R-lxc-02_kiff-02-utilization-hv_memory" name="hv_memory" value="100"/>
+ </utilization>
+ </primitive>
+ <primitive class="ocf" id="vm-fs" provider="heartbeat" type="Filesystem">
+ <instance_attributes id="vm-fs-instance_attributes">
+ <nvpair id="vm-fs-instance_attributes-device" name="device" value="/root"/>
+ <nvpair id="vm-fs-instance_attributes-directory" name="directory" value="/mnt/vm-fs"/>
+ <nvpair id="vm-fs-instance_attributes-fstype" name="fstype" value="none"/>
+ <nvpair id="vm-fs-instance_attributes-options" name="options" value="bind"/>
+ </instance_attributes>
+ <operations>
+ <op id="vm-fs-start-timeout-60" interval="0s" name="start" timeout="60"/>
+ <op id="vm-fs-stop-timeout-60" interval="0s" name="stop" timeout="60"/>
+ <op id="vm-fs-monitor-interval-20" interval="20" name="monitor" timeout="40"/>
+ </operations>
+ <meta_attributes id="vm-fs-meta_attributes">
+ <nvpair id="vm-fs-meta_attributes-priority" name="priority" value="100"/>
+ </meta_attributes>
+ </primitive>
+ </resources>
+ <constraints>
+ <rsc_order first="dlm-clone" first-action="start" id="order-dlm-clone-clvmd-clone-mandatory" then="clvmd-clone" then-action="start"/>
+ <rsc_colocation id="colocation-clvmd-clone-dlm-clone-INFINITY" rsc="clvmd-clone" score="INFINITY" with-rsc="dlm-clone"/>
+ <rsc_order first="clvmd-clone" first-action="start" id="order-clvmd-clone-shared0-clone-mandatory" then="shared0-clone" then-action="start"/>
+ <rsc_colocation id="colocation-shared0-clone-clvmd-clone-INFINITY" rsc="shared0-clone" score="INFINITY" with-rsc="clvmd-clone"/>
+ <rsc_location id="location-dlm-clone-lxc-01_kiff-01--INFINITY" node="lxc-01_kiff-01" rsc="dlm-clone" score="-INFINITY"/>
+ <rsc_order first="shared0-clone" first-action="start" id="order-shared0-clone-R-lxc-01_kiff-01-mandatory" then="R-lxc-01_kiff-01" then-action="start"/>
+ <rsc_location id="location-R-lxc-01_kiff-01-kiff-01-100" node="kiff-01" rsc="R-lxc-01_kiff-01" score="100"/>
+ <rsc_location id="location-dlm-clone-lxc-02_kiff-01--INFINITY" node="lxc-02_kiff-01" rsc="dlm-clone" score="-INFINITY"/>
+ <rsc_order first="shared0-clone" first-action="start" id="order-shared0-clone-R-lxc-02_kiff-01-mandatory" then="R-lxc-02_kiff-01" then-action="start"/>
+ <rsc_location id="location-R-lxc-02_kiff-01-kiff-01-100" node="kiff-01" rsc="R-lxc-02_kiff-01" score="100"/>
+ <rsc_location id="location-dlm-clone-lxc-01_kiff-02--INFINITY" node="lxc-01_kiff-02" rsc="dlm-clone" score="-INFINITY"/>
+ <rsc_order first="shared0-clone" first-action="start" id="order-shared0-clone-R-lxc-01_kiff-02-mandatory" then="R-lxc-01_kiff-02" then-action="start"/>
+ <rsc_location id="location-R-lxc-01_kiff-02-kiff-02-100" node="kiff-02" rsc="R-lxc-01_kiff-02" score="100"/>
+ <rsc_location id="location-dlm-clone-lxc-02_kiff-02--INFINITY" node="lxc-02_kiff-02" rsc="dlm-clone" score="-INFINITY"/>
+ <rsc_order first="shared0-clone" first-action="start" id="order-shared0-clone-R-lxc-02_kiff-02-mandatory" then="R-lxc-02_kiff-02" then-action="start"/>
+ <rsc_location id="location-R-lxc-02_kiff-02-kiff-02-100" node="kiff-02" rsc="R-lxc-02_kiff-02" score="100"/>
+ <rsc_location id="location-vm-fs-lxc-01_reno-01-100" node="lxc-01_reno-01" rsc="vm-fs" score="100"/>
+ </constraints>
+ </configuration>
+ <status>
+ <node_state crm-debug-origin="post_cache_update" crmd="offline" expected="member" id="1" in_ccm="false" join="member" uname="kiff-01">
+ <transient_attributes id="1">
+ <instance_attributes id="status-1">
+ <nvpair id="status-1-shutdown" name="shutdown" value="0"/>
+ <nvpair id="status-1-probe_complete" name="probe_complete" value="true"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="fence-kiff-01" type="fence_ipmilan" class="stonith">
+ <lrm_rsc_op id="fence-kiff-01_last_0" operation_key="fence-kiff-01_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="19:2:7:ed834806-99be-45de-9c12-c980ee8706a6" transition-magic="0:7;19:2:7:ed834806-99be-45de-9c12-c980ee8706a6" call-id="5" rc-code="7" op-status="0" interval="0" last-run="1419011896" last-rc-change="1419011896" exec-time="2" queue-time="0" op-digest="4daeb50c5b907ee60049bf819d52bf47" on_node="kiff-01"/>
+ </lrm_resource>
+ <lrm_resource id="fence-kiff-02" type="fence_ipmilan" class="stonith">
+ <lrm_rsc_op id="fence-kiff-02_last_0" operation_key="fence-kiff-02_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="33:2:0:ed834806-99be-45de-9c12-c980ee8706a6" transition-magic="0:0;33:2:0:ed834806-99be-45de-9c12-c980ee8706a6" call-id="46" rc-code="0" op-status="0" interval="0" last-run="1419011897" last-rc-change="1419011897" exec-time="105" queue-time="0" op-digest="721cfc00d183a798aab10fb147ff0518" on_node="kiff-01"/>
+ <lrm_rsc_op id="fence-kiff-02_monitor_60000" operation_key="fence-kiff-02_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="34:2:0:ed834806-99be-45de-9c12-c980ee8706a6" transition-magic="0:0;34:2:0:ed834806-99be-45de-9c12-c980ee8706a6" call-id="47" rc-code="0" op-status="0" interval="60000" last-rc-change="1419011897" exec-time="95" queue-time="0" op-digest="3edb9bb87d2e2ef5d171c831a712b481" on_node="kiff-01"/>
+ </lrm_resource>
+ <lrm_resource id="R-lxc-01_kiff-01" type="VirtualDomain" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="R-lxc-01_kiff-01_last_0" operation_key="R-lxc-01_kiff-01_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="44:3:0:ed834806-99be-45de-9c12-c980ee8706a6" transition-magic="0:0;44:3:0:ed834806-99be-45de-9c12-c980ee8706a6" call-id="53" rc-code="0" op-status="0" interval="0" last-run="1419011901" last-rc-change="1419011901" exec-time="907" queue-time="0" op-digest="1120ab98948626507460f24405da6147" on_node="kiff-01"/>
+ <lrm_rsc_op id="R-lxc-01_kiff-01_monitor_10000" operation_key="R-lxc-01_kiff-01_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="48:4:0:ed834806-99be-45de-9c12-c980ee8706a6" transition-magic="0:0;48:4:0:ed834806-99be-45de-9c12-c980ee8706a6" call-id="55" rc-code="0" op-status="0" interval="10000" last-rc-change="1419011902" exec-time="211" queue-time="0" op-digest="45b6ccfbfd5dd7fe94f80ec3d8d6d78d" on_node="kiff-01"/>
+ </lrm_resource>
+ <lrm_resource id="R-lxc-02_kiff-01" type="VirtualDomain" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="R-lxc-02_kiff-01_last_0" operation_key="R-lxc-02_kiff-01_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="46:3:0:ed834806-99be-45de-9c12-c980ee8706a6" transition-magic="0:0;46:3:0:ed834806-99be-45de-9c12-c980ee8706a6" call-id="54" rc-code="0" op-status="0" interval="0" last-run="1419011901" last-rc-change="1419011901" exec-time="895" queue-time="0" op-digest="6d28982e9d5ab858dea79f0a4bc07ba6" on_node="kiff-01"/>
+ <lrm_rsc_op id="R-lxc-02_kiff-01_monitor_10000" operation_key="R-lxc-02_kiff-01_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="51:4:0:ed834806-99be-45de-9c12-c980ee8706a6" transition-magic="0:0;51:4:0:ed834806-99be-45de-9c12-c980ee8706a6" call-id="56" rc-code="0" op-status="0" interval="10000" last-rc-change="1419011902" exec-time="208" queue-time="0" op-digest="5aa25dd0c846987518afaba029b0a0a8" on_node="kiff-01"/>
+ </lrm_resource>
+ <lrm_resource id="R-lxc-01_kiff-02" type="VirtualDomain" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="R-lxc-01_kiff-02_last_0" operation_key="R-lxc-01_kiff-02_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="26:2:7:ed834806-99be-45de-9c12-c980ee8706a6" transition-magic="0:7;26:2:7:ed834806-99be-45de-9c12-c980ee8706a6" call-id="36" rc-code="7" op-status="0" interval="0" last-run="1419011897" last-rc-change="1419011897" exec-time="66" queue-time="0" op-digest="6ac2457c88e8834990f95e871a51d1fc" on_node="kiff-01"/>
+ </lrm_resource>
+ <lrm_resource id="R-lxc-02_kiff-02" type="VirtualDomain" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="R-lxc-02_kiff-02_last_0" operation_key="R-lxc-02_kiff-02_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="27:2:7:ed834806-99be-45de-9c12-c980ee8706a6" transition-magic="0:7;27:2:7:ed834806-99be-45de-9c12-c980ee8706a6" call-id="40" rc-code="7" op-status="0" interval="0" last-run="1419011897" last-rc-change="1419011897" exec-time="65" queue-time="0" op-digest="749edaca3e37d0e4526f44849ed63d4b" on_node="kiff-01"/>
+ </lrm_resource>
+ <lrm_resource id="dlm" type="controld" class="ocf" provider="pacemaker">
+ <lrm_rsc_op id="dlm_last_0" operation_key="dlm_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="37:2:0:ed834806-99be-45de-9c12-c980ee8706a6" transition-magic="0:0;37:2:0:ed834806-99be-45de-9c12-c980ee8706a6" call-id="45" rc-code="0" op-status="0" interval="0" last-run="1419011897" last-rc-change="1419011897" exec-time="1150" queue-time="71" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="kiff-01"/>
+ <lrm_rsc_op id="dlm_monitor_30000" operation_key="dlm_monitor_30000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="21:3:0:ed834806-99be-45de-9c12-c980ee8706a6" transition-magic="0:0;21:3:0:ed834806-99be-45de-9c12-c980ee8706a6" call-id="48" rc-code="0" op-status="0" interval="30000" last-rc-change="1419011898" exec-time="42" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" on_node="kiff-01"/>
+ </lrm_resource>
+ <lrm_resource id="vm-fs" type="Filesystem" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="vm-fs_last_0" operation_key="vm-fs_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="28:2:7:ed834806-99be-45de-9c12-c980ee8706a6" transition-magic="0:7;28:2:7:ed834806-99be-45de-9c12-c980ee8706a6" call-id="44" rc-code="7" op-status="0" interval="0" last-run="1419011897" last-rc-change="1419011897" exec-time="81" queue-time="0" op-digest="4e01fcdd304402f9cf3416cee0dc30d2" on_node="kiff-01"/>
+ </lrm_resource>
+ <lrm_resource id="shared0" type="Filesystem" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="shared0_last_0" operation_key="shared0_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="38:3:0:ed834806-99be-45de-9c12-c980ee8706a6" transition-magic="0:0;38:3:0:ed834806-99be-45de-9c12-c980ee8706a6" call-id="51" rc-code="0" op-status="0" interval="0" last-run="1419011900" last-rc-change="1419011900" exec-time="917" queue-time="0" op-digest="1684b65207e6da00ef41166b1d2e7147" on_node="kiff-01"/>
+ <lrm_rsc_op id="shared0_monitor_10000" operation_key="shared0_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="39:3:0:ed834806-99be-45de-9c12-c980ee8706a6" transition-magic="0:0;39:3:0:ed834806-99be-45de-9c12-c980ee8706a6" call-id="52" rc-code="0" op-status="0" interval="10000" last-rc-change="1419011901" exec-time="63" queue-time="0" op-digest="a976d846b49206dece2f5eecf97fc980" on_node="kiff-01"/>
+ </lrm_resource>
+ <lrm_resource id="clvmd" type="clvm" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="clvmd_last_0" operation_key="clvmd_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="30:3:0:ed834806-99be-45de-9c12-c980ee8706a6" transition-magic="0:0;30:3:0:ed834806-99be-45de-9c12-c980ee8706a6" call-id="49" rc-code="0" op-status="0" interval="0" last-run="1419011898" last-rc-change="1419011898" exec-time="1787" queue-time="0" op-digest="8c0920550ecae28ee15ea3371898e446" on_node="kiff-01"/>
+ <lrm_rsc_op id="clvmd_monitor_30000" operation_key="clvmd_monitor_30000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="31:3:0:ed834806-99be-45de-9c12-c980ee8706a6" transition-magic="0:0;31:3:0:ed834806-99be-45de-9c12-c980ee8706a6" call-id="50" rc-code="0" op-status="0" interval="30000" last-rc-change="1419011900" exec-time="40" queue-time="0" op-digest="93512119a9313fd62f16acd7cc587734" on_node="kiff-01"/>
+ </lrm_resource>
+ <lrm_resource id="lxc-01_kiff-01" type="remote" class="ocf" provider="pacemaker" container="R-lxc-01_kiff-01">
+ <lrm_rsc_op id="lxc-01_kiff-01_last_0" operation_key="lxc-01_kiff-01_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="58:4:0:ed834806-99be-45de-9c12-c980ee8706a6" transition-magic="0:0;58:4:0:ed834806-99be-45de-9c12-c980ee8706a6" call-id="1" rc-code="0" op-status="0" interval="0" last-run="1419011902" last-rc-change="1419011902" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="kiff-01"/>
+ <lrm_rsc_op id="lxc-01_kiff-01_monitor_30000" operation_key="lxc-01_kiff-01_monitor_30000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="59:4:0:ed834806-99be-45de-9c12-c980ee8706a6" transition-magic="0:0;59:4:0:ed834806-99be-45de-9c12-c980ee8706a6" call-id="3" rc-code="0" op-status="0" interval="30000" last-rc-change="1419011907" exec-time="0" queue-time="0" op-digest="02a5bcf940fc8d3239701acb11438d6a" on_node="kiff-01"/>
+ </lrm_resource>
+ <lrm_resource id="lxc-02_kiff-01" type="remote" class="ocf" provider="pacemaker" container="R-lxc-02_kiff-01">
+ <lrm_rsc_op id="lxc-02_kiff-01_last_0" operation_key="lxc-02_kiff-01_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="60:4:0:ed834806-99be-45de-9c12-c980ee8706a6" transition-magic="0:0;60:4:0:ed834806-99be-45de-9c12-c980ee8706a6" call-id="2" rc-code="0" op-status="0" interval="0" last-run="1419011902" last-rc-change="1419011902" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="kiff-01"/>
+ <lrm_rsc_op id="lxc-02_kiff-01_monitor_30000" operation_key="lxc-02_kiff-01_monitor_30000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="61:4:0:ed834806-99be-45de-9c12-c980ee8706a6" transition-magic="0:0;61:4:0:ed834806-99be-45de-9c12-c980ee8706a6" call-id="4" rc-code="0" op-status="0" interval="30000" last-rc-change="1419011907" exec-time="0" queue-time="0" op-digest="02a5bcf940fc8d3239701acb11438d6a" on_node="kiff-01"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state crm-debug-origin="post_cache_update" crmd="online" expected="member" id="2" in_ccm="true" join="member" uname="kiff-02">
+ <transient_attributes id="2">
+ <instance_attributes id="status-2">
+ <nvpair id="status-2-shutdown" name="shutdown" value="0"/>
+ <nvpair id="status-2-probe_complete" name="probe_complete" value="true"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="fence-kiff-01" type="fence_ipmilan" class="stonith">
+ <lrm_rsc_op id="fence-kiff-01_last_0" operation_key="fence-kiff-01_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.9" transition-key="15:18:0:1c60b862-2217-42e1-857f-645cedc2afd9" transition-magic="0:0;15:18:0:1c60b862-2217-42e1-857f-645cedc2afd9" call-id="67" rc-code="0" op-status="0" interval="0" last-run="1419011236" last-rc-change="1419011236" exec-time="102" queue-time="0" op-digest="4daeb50c5b907ee60049bf819d52bf47" on_node="kiff-02"/>
+ <lrm_rsc_op id="fence-kiff-01_monitor_60000" operation_key="fence-kiff-01_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.9" transition-key="16:18:0:1c60b862-2217-42e1-857f-645cedc2afd9" transition-magic="0:0;16:18:0:1c60b862-2217-42e1-857f-645cedc2afd9" call-id="68" rc-code="0" op-status="0" interval="60000" last-rc-change="1419011236" exec-time="93" queue-time="0" op-digest="5a67a2af25e4748196ea722d1770543a" on_node="kiff-02"/>
+ </lrm_resource>
+ <lrm_resource id="fence-kiff-02" type="fence_ipmilan" class="stonith">
+ <lrm_rsc_op id="fence-kiff-02_last_0" operation_key="fence-kiff-02_stop_0" operation="stop" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="32:2:0:ed834806-99be-45de-9c12-c980ee8706a6" transition-magic="0:0;32:2:0:ed834806-99be-45de-9c12-c980ee8706a6" call-id="88" rc-code="0" op-status="0" interval="0" last-run="1419011896" last-rc-change="1419011896" exec-time="0" queue-time="0" op-digest="721cfc00d183a798aab10fb147ff0518" on_node="kiff-02"/>
+ <lrm_rsc_op id="fence-kiff-02_monitor_60000" operation_key="fence-kiff-02_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.9" transition-key="16:0:0:ed834806-99be-45de-9c12-c980ee8706a6" transition-magic="0:0;16:0:0:ed834806-99be-45de-9c12-c980ee8706a6" call-id="82" rc-code="0" op-status="0" interval="60000" last-rc-change="1419011480" exec-time="93" queue-time="0" op-digest="3edb9bb87d2e2ef5d171c831a712b481" on_node="kiff-02"/>
+ </lrm_resource>
+ <lrm_resource id="clvmd" type="clvm" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="clvmd_last_0" operation_key="clvmd_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.9" transition-key="25:5:0:1c60b862-2217-42e1-857f-645cedc2afd9" transition-magic="0:0;25:5:0:1c60b862-2217-42e1-857f-645cedc2afd9" call-id="24" rc-code="0" op-status="0" interval="0" last-run="1419011029" last-rc-change="1419011029" exec-time="1557" queue-time="0" op-digest="8c0920550ecae28ee15ea3371898e446" on_node="kiff-02"/>
+ <lrm_rsc_op id="clvmd_monitor_30000" operation_key="clvmd_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.9" transition-key="26:5:0:1c60b862-2217-42e1-857f-645cedc2afd9" transition-magic="0:0;26:5:0:1c60b862-2217-42e1-857f-645cedc2afd9" call-id="25" rc-code="0" op-status="0" interval="30000" last-rc-change="1419011030" exec-time="39" queue-time="0" op-digest="93512119a9313fd62f16acd7cc587734" on_node="kiff-02"/>
+ </lrm_resource>
+ <lrm_resource id="shared0" type="Filesystem" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="shared0_last_0" operation_key="shared0_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.9" transition-key="35:6:0:1c60b862-2217-42e1-857f-645cedc2afd9" transition-magic="0:0;35:6:0:1c60b862-2217-42e1-857f-645cedc2afd9" call-id="31" rc-code="0" op-status="0" interval="0" last-run="1419011061" last-rc-change="1419011061" exec-time="1397" queue-time="0" op-digest="1684b65207e6da00ef41166b1d2e7147" on_node="kiff-02"/>
+ <lrm_rsc_op id="shared0_monitor_10000" operation_key="shared0_monitor_10000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.9" transition-key="36:6:0:1c60b862-2217-42e1-857f-645cedc2afd9" transition-magic="0:0;36:6:0:1c60b862-2217-42e1-857f-645cedc2afd9" call-id="32" rc-code="0" op-status="0" interval="10000" last-rc-change="1419011062" exec-time="62" queue-time="0" op-digest="a976d846b49206dece2f5eecf97fc980" on_node="kiff-02"/>
+ </lrm_resource>
+ <lrm_resource id="lxc-01_kiff-01" type="remote" class="ocf" provider="pacemaker" container="R-lxc-01_kiff-01">
+ <lrm_rsc_op id="lxc-01_kiff-01_last_0" operation_key="lxc-01_kiff-01_stop_0" operation="stop" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="71:2:0:ed834806-99be-45de-9c12-c980ee8706a6" transition-magic="0:0;71:2:0:ed834806-99be-45de-9c12-c980ee8706a6" call-id="15" rc-code="0" op-status="0" interval="0" last-run="1419011897" last-rc-change="1419011897" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="kiff-02"/>
+ <lrm_rsc_op id="lxc-01_kiff-01_monitor_30000" operation_key="lxc-01_kiff-01_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.9" transition-key="49:1:0:ed834806-99be-45de-9c12-c980ee8706a6" transition-magic="0:0;49:1:0:ed834806-99be-45de-9c12-c980ee8706a6" call-id="13" rc-code="0" op-status="0" interval="30000" last-rc-change="1419011486" exec-time="0" queue-time="0" op-digest="02a5bcf940fc8d3239701acb11438d6a" on_node="kiff-02"/>
+ </lrm_resource>
+ <lrm_resource id="dlm" type="controld" class="ocf" provider="pacemaker">
+ <lrm_rsc_op id="dlm_last_0" operation_key="dlm_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.9" transition-key="17:5:0:1c60b862-2217-42e1-857f-645cedc2afd9" transition-magic="0:0;17:5:0:1c60b862-2217-42e1-857f-645cedc2afd9" call-id="22" rc-code="0" op-status="0" interval="0" last-run="1419011026" last-rc-change="1419011026" exec-time="1100" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="kiff-02"/>
+ <lrm_rsc_op id="dlm_monitor_30000" operation_key="dlm_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.9" transition-key="18:5:0:1c60b862-2217-42e1-857f-645cedc2afd9" transition-magic="0:0;18:5:0:1c60b862-2217-42e1-857f-645cedc2afd9" call-id="23" rc-code="0" op-status="0" interval="30000" last-rc-change="1419011027" exec-time="39" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" on_node="kiff-02"/>
+ </lrm_resource>
+ <lrm_resource id="R-lxc-01_kiff-01" type="VirtualDomain" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="R-lxc-01_kiff-01_last_0" operation_key="R-lxc-01_kiff-01_stop_0" operation="stop" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="59:2:0:ed834806-99be-45de-9c12-c980ee8706a6" transition-magic="0:0;59:2:0:ed834806-99be-45de-9c12-c980ee8706a6" call-id="90" rc-code="0" op-status="0" interval="0" last-run="1419011897" last-rc-change="1419011897" exec-time="401" queue-time="0" op-digest="1120ab98948626507460f24405da6147" on_node="kiff-02"/>
+ <lrm_rsc_op id="R-lxc-01_kiff-01_monitor_10000" operation_key="R-lxc-01_kiff-01_monitor_10000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.9" transition-key="38:1:0:ed834806-99be-45de-9c12-c980ee8706a6" transition-magic="0:0;38:1:0:ed834806-99be-45de-9c12-c980ee8706a6" call-id="83" rc-code="0" op-status="0" interval="10000" last-rc-change="1419011481" exec-time="208" queue-time="0" op-digest="45b6ccfbfd5dd7fe94f80ec3d8d6d78d" on_node="kiff-02"/>
+ </lrm_resource>
+ <lrm_resource id="R-lxc-01_kiff-02" type="VirtualDomain" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="R-lxc-01_kiff-02_last_0" operation_key="R-lxc-01_kiff-02_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.9" transition-key="50:20:0:1c60b862-2217-42e1-857f-645cedc2afd9" transition-magic="0:0;50:20:0:1c60b862-2217-42e1-857f-645cedc2afd9" call-id="69" rc-code="0" op-status="0" interval="0" last-run="1419011248" last-rc-change="1419011248" exec-time="533" queue-time="0" op-digest="6ac2457c88e8834990f95e871a51d1fc" on_node="kiff-02"/>
+ <lrm_rsc_op id="R-lxc-01_kiff-02_monitor_10000" operation_key="R-lxc-01_kiff-02_monitor_10000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.9" transition-key="51:20:0:1c60b862-2217-42e1-857f-645cedc2afd9" transition-magic="0:0;51:20:0:1c60b862-2217-42e1-857f-645cedc2afd9" call-id="70" rc-code="0" op-status="0" interval="10000" last-rc-change="1419011249" exec-time="200" queue-time="0" op-digest="7d8f2504ce062c9a895982856e699610" on_node="kiff-02"/>
+ </lrm_resource>
+ <lrm_resource id="lxc-01_kiff-02" type="remote" class="ocf" provider="pacemaker" container="R-lxc-01_kiff-02">
+ <lrm_rsc_op id="lxc-01_kiff-02_last_0" operation_key="lxc-01_kiff-02_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.9" transition-key="56:20:0:1c60b862-2217-42e1-857f-645cedc2afd9" transition-magic="0:0;56:20:0:1c60b862-2217-42e1-857f-645cedc2afd9" call-id="7" rc-code="0" op-status="0" interval="0" last-run="1419011249" last-rc-change="1419011249" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="kiff-02"/>
+ <lrm_rsc_op id="lxc-01_kiff-02_monitor_30000" operation_key="lxc-01_kiff-02_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.9" transition-key="57:20:0:1c60b862-2217-42e1-857f-645cedc2afd9" transition-magic="0:0;57:20:0:1c60b862-2217-42e1-857f-645cedc2afd9" call-id="8" rc-code="0" op-status="0" interval="30000" last-rc-change="1419011254" exec-time="0" queue-time="0" op-digest="02a5bcf940fc8d3239701acb11438d6a" on_node="kiff-02"/>
+ </lrm_resource>
+ <lrm_resource id="vm-fs" type="Filesystem" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="vm-fs_last_0" operation_key="vm-fs_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.9" transition-key="23:22:7:1c60b862-2217-42e1-857f-645cedc2afd9" transition-magic="0:7;23:22:7:1c60b862-2217-42e1-857f-645cedc2afd9" call-id="78" rc-code="7" op-status="0" interval="0" last-run="1419011347" last-rc-change="1419011347" exec-time="58" queue-time="0" op-digest="4e01fcdd304402f9cf3416cee0dc30d2" on_node="kiff-02"/>
+ </lrm_resource>
+ <lrm_resource id="lxc-02_kiff-01" type="remote" class="ocf" provider="pacemaker" container="R-lxc-02_kiff-01">
+ <lrm_rsc_op id="lxc-02_kiff-01_last_0" operation_key="lxc-02_kiff-01_stop_0" operation="stop" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="74:2:0:ed834806-99be-45de-9c12-c980ee8706a6" transition-magic="0:0;74:2:0:ed834806-99be-45de-9c12-c980ee8706a6" call-id="16" rc-code="0" op-status="0" interval="0" last-run="1419011897" last-rc-change="1419011897" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="kiff-02"/>
+ <lrm_rsc_op id="lxc-02_kiff-01_monitor_30000" operation_key="lxc-02_kiff-01_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.9" transition-key="51:1:0:ed834806-99be-45de-9c12-c980ee8706a6" transition-magic="0:0;51:1:0:ed834806-99be-45de-9c12-c980ee8706a6" call-id="14" rc-code="0" op-status="0" interval="30000" last-rc-change="1419011486" exec-time="0" queue-time="0" op-digest="02a5bcf940fc8d3239701acb11438d6a" on_node="kiff-02"/>
+ </lrm_resource>
+ <lrm_resource id="lxc-02_kiff-02" type="remote" class="ocf" provider="pacemaker" container="R-lxc-02_kiff-02">
+ <lrm_rsc_op id="lxc-02_kiff-02_last_0" operation_key="lxc-02_kiff-02_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.9" transition-key="63:21:0:1c60b862-2217-42e1-857f-645cedc2afd9" transition-magic="0:0;63:21:0:1c60b862-2217-42e1-857f-645cedc2afd9" call-id="9" rc-code="0" op-status="0" interval="0" last-run="1419011256" last-rc-change="1419011256" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="kiff-02"/>
+ <lrm_rsc_op id="lxc-02_kiff-02_monitor_30000" operation_key="lxc-02_kiff-02_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.9" transition-key="64:21:0:1c60b862-2217-42e1-857f-645cedc2afd9" transition-magic="0:0;64:21:0:1c60b862-2217-42e1-857f-645cedc2afd9" call-id="10" rc-code="0" op-status="0" interval="30000" last-rc-change="1419011261" exec-time="0" queue-time="0" op-digest="02a5bcf940fc8d3239701acb11438d6a" on_node="kiff-02"/>
+ </lrm_resource>
+ <lrm_resource id="R-lxc-02_kiff-01" type="VirtualDomain" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="R-lxc-02_kiff-01_last_0" operation_key="R-lxc-02_kiff-01_stop_0" operation="stop" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="62:2:0:ed834806-99be-45de-9c12-c980ee8706a6" transition-magic="0:0;62:2:0:ed834806-99be-45de-9c12-c980ee8706a6" call-id="92" rc-code="0" op-status="0" interval="0" last-run="1419011897" last-rc-change="1419011897" exec-time="376" queue-time="0" op-digest="6d28982e9d5ab858dea79f0a4bc07ba6" on_node="kiff-02"/>
+ <lrm_rsc_op id="R-lxc-02_kiff-01_monitor_10000" operation_key="R-lxc-02_kiff-01_monitor_10000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.9" transition-key="41:1:0:ed834806-99be-45de-9c12-c980ee8706a6" transition-magic="0:0;41:1:0:ed834806-99be-45de-9c12-c980ee8706a6" call-id="84" rc-code="0" op-status="0" interval="10000" last-rc-change="1419011481" exec-time="205" queue-time="0" op-digest="5aa25dd0c846987518afaba029b0a0a8" on_node="kiff-02"/>
+ </lrm_resource>
+ <lrm_resource id="R-lxc-02_kiff-02" type="VirtualDomain" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="R-lxc-02_kiff-02_last_0" operation_key="R-lxc-02_kiff-02_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.9" transition-key="55:21:0:1c60b862-2217-42e1-857f-645cedc2afd9" transition-magic="0:0;55:21:0:1c60b862-2217-42e1-857f-645cedc2afd9" call-id="73" rc-code="0" op-status="0" interval="0" last-run="1419011255" last-rc-change="1419011255" exec-time="558" queue-time="0" op-digest="749edaca3e37d0e4526f44849ed63d4b" on_node="kiff-02"/>
+ <lrm_rsc_op id="R-lxc-02_kiff-02_monitor_10000" operation_key="R-lxc-02_kiff-02_monitor_10000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.9" transition-key="56:21:0:1c60b862-2217-42e1-857f-645cedc2afd9" transition-magic="0:0;56:21:0:1c60b862-2217-42e1-857f-645cedc2afd9" call-id="74" rc-code="0" op-status="0" interval="10000" last-rc-change="1419011256" exec-time="200" queue-time="0" op-digest="160233456c9f63f111238ddb49d9cc0d" on_node="kiff-02"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state remote_node="true" id="lxc-01_kiff-01" uname="lxc-01_kiff-01" crm-debug-origin="post_cache_update">
+ <lrm id="lxc-01_kiff-01">
+ <lrm_resources>
+ <lrm_resource id="vm-fs" type="Filesystem" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="vm-fs_last_0" operation_key="vm-fs_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="56:4:0:ed834806-99be-45de-9c12-c980ee8706a6" transition-magic="0:0;56:4:0:ed834806-99be-45de-9c12-c980ee8706a6" call-id="5" rc-code="0" op-status="0" interval="0" last-run="1419011907" last-rc-change="1419011907" exec-time="82" queue-time="0" op-digest="4e01fcdd304402f9cf3416cee0dc30d2" on_node="kiff-01"/>
+ <lrm_rsc_op id="vm-fs_monitor_20000" operation_key="vm-fs_monitor_20000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="57:4:0:ed834806-99be-45de-9c12-c980ee8706a6" transition-magic="0:0;57:4:0:ed834806-99be-45de-9c12-c980ee8706a6" call-id="7" rc-code="0" op-status="0" interval="20000" last-rc-change="1419011907" exec-time="57" queue-time="0" op-digest="47b8f50d415ca40cdf0d0044c4b63685" on_node="kiff-01"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state remote_node="true" id="lxc-02_kiff-01" uname="lxc-02_kiff-01" crm-debug-origin="post_cache_update"/>
+ <node_state remote_node="true" id="lxc-01_kiff-02" uname="lxc-01_kiff-02" crm-debug-origin="post_cache_update">
+ <transient_attributes id="lxc-01_kiff-02">
+ <instance_attributes id="status-lxc-01_kiff-02"/>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="lxc-02_kiff-02" uname="lxc-02_kiff-02" crm-debug-origin="post_cache_update">
+ <transient_attributes id="lxc-02_kiff-02">
+ <instance_attributes id="status-lxc-02_kiff-02"/>
+ </transient_attributes>
+ </node_state>
+ </status>
+</cib>

File Metadata

Mime Type
text/x-diff
Expires
Mon, Apr 21, 8:13 PM (6 h, 26 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
1665700
Default Alt Text
(189 KB)

Event Timeline