diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in index 6d9d078bcb..95e7126564 100644 --- a/cts/cts-scheduler.in +++ b/cts/cts-scheduler.in @@ -1,1581 +1,1587 @@ #!@PYTHON@ """ Regression tests for Pacemaker's scheduler """ __copyright__ = "Copyright 2004-2021 the Pacemaker project contributors" __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" import io import os import re import sys import stat import shlex import shutil import argparse import subprocess import platform DESC = """Regression tests for Pacemaker's scheduler""" # Each entry in TESTS is a group of tests, where each test consists of a # test base name, test description, and additional test arguments. # Test groups will be separated by newlines in output. TESTS = [ [ [ "simple1", "Offline" ], [ "simple2", "Start" ], [ "simple3", "Start 2" ], [ "simple4", "Start Failed" ], [ "simple6", "Stop Start" ], [ "simple7", "Shutdown" ], #[ "simple8", "Stonith" ], #[ "simple9", "Lower version" ], #[ "simple10", "Higher version" ], [ "simple11", "Priority (ne)" ], [ "simple12", "Priority (eq)" ], [ "simple8", "Stickiness" ], ], [ [ "group1", "Group" ], [ "group2", "Group + Native" ], [ "group3", "Group + Group" ], [ "group4", "Group + Native (nothing)" ], [ "group5", "Group + Native (move)" ], [ "group6", "Group + Group (move)" ], [ "group7", "Group colocation" ], [ "group13", "Group colocation (cant run)" ], [ "group8", "Group anti-colocation" ], [ "group9", "Group recovery" ], [ "group10", "Group partial recovery" ], [ "group11", "Group target_role" ], [ "group14", "Group stop (graph terminated)" ], [ "group15", "Negative group colocation" ], [ "bug-1573", "Partial stop of a group with two children" ], [ "bug-1718", "Mandatory group ordering - Stop group_FUN" ], [ "bug-lf-2613", "Move group on failure" ], [ "bug-lf-2619", "Move group on clone failure" ], [ "group-fail", "Ensure stop order is preserved for partially active groups" ], [ "group-unmanaged", "No need to restart r115 because r114 is unmanaged" ], [ "group-unmanaged-stopped", "Make sure r115 is stopped when r114 fails" ], [ "group-dependents", "Account for the location preferences of things colocated with a group" ], [ "group-stop-ordering", "Ensure blocked group member stop does not force other member stops" ], ], [ [ "rsc_dep1", "Must not" ], [ "rsc_dep3", "Must" ], [ "rsc_dep5", "Must not 3" ], [ "rsc_dep7", "Must 3" ], [ "rsc_dep10", "Must (but cant)" ], [ "rsc_dep2", "Must (running)" ], [ "rsc_dep8", "Must (running : alt)" ], [ "rsc_dep4", "Must (running + move)" ], [ "asymmetric", "Asymmetric - require explicit location constraints" ], ], [ [ "orphan-0", "Orphan ignore" ], [ "orphan-1", "Orphan stop" ], [ "orphan-2", "Orphan stop, remove failcount" ], ], [ [ "params-0", "Params: No change" ], [ "params-1", "Params: Changed" ], [ "params-2", "Params: Resource definition" ], [ "params-3", "Params: Restart instead of reload if start pending" ], [ "params-4", "Params: Reload" ], [ "params-5", "Params: Restart based on probe digest" ], [ "novell-251689", "Resource definition change + target_role=stopped" ], [ "bug-lf-2106", "Restart all anonymous clone instances after config change" ], [ "params-6", "Params: Detect reload in previously migrated resource" ], [ "nvpair-id-ref", "Support id-ref in nvpair with optional name" ], [ "not-reschedule-unneeded-monitor", "Do not reschedule unneeded monitors while resource definitions have changed" ], [ "reload-becomes-restart", "Cancel reload if restart becomes required" ], ], [ [ "target-0", "Target Role : baseline" ], [ "target-1", "Target Role : master" ], [ "target-2", "Target Role : invalid" ], ], [ [ "base-score", "Set a node's default score for all nodes" ], ], [ [ "date-1", "Dates", [ "-t", "2005-020" ] ], [ "date-2", "Date Spec - Pass", [ "-t", "2005-020T12:30" ] ], [ "date-3", "Date Spec - Fail", [ "-t", "2005-020T11:30" ] ], [ "origin", "Timing of recurring operations", [ "-t", "2014-05-07 00:28:00" ] ], [ "probe-0", "Probe (anon clone)" ], [ "probe-1", "Pending Probe" ], [ "probe-2", "Correctly re-probe cloned groups" ], [ "probe-3", "Probe (pending node)" ], [ "probe-4", "Probe (pending node + stopped resource)" ], [ "standby", "Standby" ], [ "comments", "Comments" ], ], [ [ "one-or-more-0", "Everything starts" ], [ "one-or-more-1", "Nothing starts because of A" ], [ "one-or-more-2", "D can start because of C" ], [ "one-or-more-3", "D cannot start because of B and C" ], [ "one-or-more-4", "D cannot start because of target-role" ], [ "one-or-more-5", "Start A and F even though C and D are stopped" ], [ "one-or-more-6", "Leave A running even though B is stopped" ], [ "one-or-more-7", "Leave A running even though C is stopped" ], [ "bug-5140-require-all-false", "Allow basegrp:0 to stop" ], [ "clone-require-all-1", "clone B starts node 3 and 4" ], [ "clone-require-all-2", "clone B remains stopped everywhere" ], [ "clone-require-all-3", "clone B stops everywhere because A stops everywhere" ], [ "clone-require-all-4", "clone B remains on node 3 and 4 with only one instance of A remaining" ], [ "clone-require-all-5", "clone B starts on node 1 3 and 4" ], [ "clone-require-all-6", "clone B remains active after shutting down instances of A" ], [ "clone-require-all-7", "clone A and B both start at the same time. all instances of A start before B" ], [ "clone-require-all-no-interleave-1", "C starts everywhere after A and B" ], [ "clone-require-all-no-interleave-2", "C starts on nodes 1, 2, and 4 with only one active instance of B" ], [ "clone-require-all-no-interleave-3", "C remains active when instance of B is stopped on one node and started on another" ], [ "one-or-more-unrunnable-instances", "Avoid dependencies on instances that won't ever be started" ], ], [ [ "location-date-rules-1", "Use location constraints with ineffective date-based rules" ], [ "location-date-rules-2", "Use location constraints with effective date-based rules" ], [ "nvpair-date-rules-1", "Use nvpair blocks with a variety of date-based rules" ], [ "value-source", "Use location constraints with node attribute expressions using value-source" ], [ "rule-dbl-as-auto-number-match", "Floating-point rule values default to number comparison: match" ], [ "rule-dbl-as-auto-number-no-match", "Floating-point rule values default to number comparison: no " "match" ], [ "rule-dbl-as-integer-match", "Floating-point rule values set to integer comparison: match" ], [ "rule-dbl-as-integer-no-match", "Floating-point rule values set to integer comparison: no match" ], [ "rule-dbl-as-number-match", "Floating-point rule values set to number comparison: match" ], [ "rule-dbl-as-number-no-match", "Floating-point rule values set to number comparison: no match" ], [ "rule-dbl-parse-fail-default-str-match", "Floating-point rule values fail to parse, default to string " "comparison: match" ], [ "rule-dbl-parse-fail-default-str-no-match", "Floating-point rule values fail to parse, default to string " "comparison: no match" ], [ "rule-int-as-auto-integer-match", "Integer rule values default to integer comparison: match" ], [ "rule-int-as-auto-integer-no-match", "Integer rule values default to integer comparison: no match" ], [ "rule-int-as-integer-match", "Integer rule values set to integer comparison: match" ], [ "rule-int-as-integer-no-match", "Integer rule values set to integer comparison: no match" ], [ "rule-int-as-number-match", "Integer rule values set to number comparison: match" ], [ "rule-int-as-number-no-match", "Integer rule values set to number comparison: no match" ], [ "rule-int-parse-fail-default-str-match", "Integer rule values fail to parse, default to string " "comparison: match" ], [ "rule-int-parse-fail-default-str-no-match", "Integer rule values fail to parse, default to string " "comparison: no match" ], ], [ [ "order1", "Order start 1" ], [ "order2", "Order start 2" ], [ "order3", "Order stop" ], [ "order4", "Order (multiple)" ], [ "order5", "Order (move)" ], [ "order6", "Order (move w/ restart)" ], [ "order7", "Order (mandatory)" ], [ "order-optional", "Order (score=0)" ], [ "order-required", "Order (score=INFINITY)" ], [ "bug-lf-2171", "Prevent group start when clone is stopped" ], [ "order-clone", "Clone ordering should be able to prevent startup of dependent clones" ], [ "order-sets", "Ordering for resource sets" ], [ "order-serialize", "Serialize resources without inhibiting migration" ], [ "order-serialize-set", "Serialize a set of resources without inhibiting migration" ], [ "clone-order-primitive", "Order clone start after a primitive" ], [ "clone-order-16instances", "Verify ordering of 16 cloned resources" ], [ "order-optional-keyword", "Order (optional keyword)" ], [ "order-mandatory", "Order (mandatory keyword)" ], [ "bug-lf-2493", "Don't imply colocation requirements when applying ordering constraints with clones" ], [ "ordered-set-basic-startup", "Constraint set with default order settings" ], [ "ordered-set-natural", "Allow natural set ordering" ], [ "order-wrong-kind", "Order (error)" ], ], [ [ "coloc-loop", "Colocation - loop" ], [ "coloc-many-one", "Colocation - many-to-one" ], [ "coloc-list", "Colocation - many-to-one with list" ], [ "coloc-group", "Colocation - groups" ], [ "coloc-slave-anti", "Anti-colocation with slave shouldn't prevent master colocation" ], [ "coloc-attr", "Colocation based on node attributes" ], [ "coloc-negative-group", "Negative colocation with a group" ], [ "coloc-intra-set", "Intra-set colocation" ], [ "bug-lf-2435", "Colocation sets with a negative score" ], [ "coloc-clone-stays-active", "Ensure clones don't get stopped/demoted because a dependent must stop" ], [ "coloc_fp_logic", "Verify floating point calculations in colocation are working" ], [ "colo_master_w_native", "cl#5070 - Verify promotion order is affected when colocating master to native rsc" ], [ "colo_slave_w_native", "cl#5070 - Verify promotion order is affected when colocating slave to native rsc" ], [ "anti-colocation-order", "cl#5187 - Prevent resources in an anti-colocation from even temporarily running on a same node" ], [ "anti-colocation-master", "Organize order of actions for master resources in anti-colocations" ], [ "anti-colocation-slave", "Organize order of actions for slave resources in anti-colocations" ], [ "enforce-colo1", "Always enforce B with A INFINITY" ], [ "complex_enforce_colo", "Always enforce B with A INFINITY. (make sure heat-engine stops)" ], [ "coloc-dependee-should-stay", "Stickiness outweighs group colocation" ], [ "coloc-dependee-should-move", "Group colocation outweighs stickiness" ], [ "colocation-influence", "Respect colocation influence" ], ], [ [ "rsc-sets-seq-true", "Resource Sets - sequential=false" ], [ "rsc-sets-seq-false", "Resource Sets - sequential=true" ], [ "rsc-sets-clone", "Resource Sets - Clone" ], [ "rsc-sets-master", "Resource Sets - Master" ], [ "rsc-sets-clone-1", "Resource Sets - Clone (lf#2404)" ], ], [ [ "attrs1", "string: eq (and)" ], [ "attrs2", "string: lt / gt (and)" ], [ "attrs3", "string: ne (or)" ], [ "attrs4", "string: exists" ], [ "attrs5", "string: not_exists" ], [ "attrs6", "is_dc: true" ], [ "attrs7", "is_dc: false" ], [ "attrs8", "score_attribute" ], [ "per-node-attrs", "Per node resource parameters" ], ], [ [ "mon-rsc-1", "Schedule Monitor - start" ], [ "mon-rsc-2", "Schedule Monitor - move" ], [ "mon-rsc-3", "Schedule Monitor - pending start" ], [ "mon-rsc-4", "Schedule Monitor - move/pending start" ], ], [ [ "rec-rsc-0", "Resource Recover - no start" ], [ "rec-rsc-1", "Resource Recover - start" ], [ "rec-rsc-2", "Resource Recover - monitor" ], [ "rec-rsc-3", "Resource Recover - stop - ignore" ], [ "rec-rsc-4", "Resource Recover - stop - block" ], [ "rec-rsc-5", "Resource Recover - stop - fence" ], [ "rec-rsc-6", "Resource Recover - multiple - restart" ], [ "rec-rsc-7", "Resource Recover - multiple - stop" ], [ "rec-rsc-8", "Resource Recover - multiple - block" ], [ "rec-rsc-9", "Resource Recover - group/group" ], [ "monitor-recovery", "on-fail=block + resource recovery detected by recurring monitor" ], [ "stop-failure-no-quorum", "Stop failure without quorum" ], [ "stop-failure-no-fencing", "Stop failure without fencing available" ], [ "stop-failure-with-fencing", "Stop failure with fencing available" ], [ "multiple-active-block-group", "Support of multiple-active=block for resource groups" ], [ "multiple-monitor-one-failed", "Consider resource failed if any of the configured monitor operations failed" ], ], [ [ "quorum-1", "No quorum - ignore" ], [ "quorum-2", "No quorum - freeze" ], [ "quorum-3", "No quorum - stop" ], [ "quorum-4", "No quorum - start anyway" ], [ "quorum-5", "No quorum - start anyway (group)" ], [ "quorum-6", "No quorum - start anyway (clone)" ], [ "bug-cl-5212", "No promotion with no-quorum-policy=freeze" ], [ "suicide-needed-inquorate", "no-quorum-policy=suicide: suicide necessary" ], [ "suicide-not-needed-initial-quorum", "no-quorum-policy=suicide: suicide not necessary at initial quorum" ], [ "suicide-not-needed-never-quorate", "no-quorum-policy=suicide: suicide not necessary if never quorate" ], [ "suicide-not-needed-quorate", "no-quorum-policy=suicide: suicide necessary if quorate" ], ], [ [ "rec-node-1", "Node Recover - Startup - no fence" ], [ "rec-node-2", "Node Recover - Startup - fence" ], [ "rec-node-3", "Node Recover - HA down - no fence" ], [ "rec-node-4", "Node Recover - HA down - fence" ], [ "rec-node-5", "Node Recover - CRM down - no fence" ], [ "rec-node-6", "Node Recover - CRM down - fence" ], [ "rec-node-7", "Node Recover - no quorum - ignore" ], [ "rec-node-8", "Node Recover - no quorum - freeze" ], [ "rec-node-9", "Node Recover - no quorum - stop" ], [ "rec-node-10", "Node Recover - no quorum - stop w/fence" ], [ "rec-node-11", "Node Recover - CRM down w/ group - fence" ], [ "rec-node-12", "Node Recover - nothing active - fence" ], [ "rec-node-13", "Node Recover - failed resource + shutdown - fence" ], [ "rec-node-15", "Node Recover - unknown lrm section" ], [ "rec-node-14", "Serialize all stonith's" ], ], [ [ "multi1", "Multiple Active (stop/start)" ], ], [ [ "migrate-begin", "Normal migration" ], [ "migrate-success", "Completed migration" ], [ "migrate-partial-1", "Completed migration, missing stop on source" ], [ "migrate-partial-2", "Successful migrate_to only" ], [ "migrate-partial-3", "Successful migrate_to only, target down" ], [ "migrate-partial-4", "Migrate from the correct host after migrate_to+migrate_from" ], [ "bug-5186-partial-migrate", "Handle partial migration when src node loses membership" ], [ "migrate-fail-2", "Failed migrate_from" ], [ "migrate-fail-3", "Failed migrate_from + stop on source" ], [ "migrate-fail-4", "Failed migrate_from + stop on target - ideally we wouldn't need to re-stop on target" ], [ "migrate-fail-5", "Failed migrate_from + stop on source and target" ], [ "migrate-fail-6", "Failed migrate_to" ], [ "migrate-fail-7", "Failed migrate_to + stop on source" ], [ "migrate-fail-8", "Failed migrate_to + stop on target - ideally we wouldn't need to re-stop on target" ], [ "migrate-fail-9", "Failed migrate_to + stop on source and target" ], [ "migration-ping-pong", "Old migrate_to failure + successful migrate_from on same node" ], [ "migrate-stop", "Migration in a stopping stack" ], [ "migrate-start", "Migration in a starting stack" ], [ "migrate-stop_start", "Migration in a restarting stack" ], [ "migrate-stop-complex", "Migration in a complex stopping stack" ], [ "migrate-start-complex", "Migration in a complex starting stack" ], [ "migrate-stop-start-complex", "Migration in a complex moving stack" ], [ "migrate-shutdown", "Order the post-migration 'stop' before node shutdown" ], [ "migrate-1", "Migrate (migrate)" ], [ "migrate-2", "Migrate (stable)" ], [ "migrate-3", "Migrate (failed migrate_to)" ], [ "migrate-4", "Migrate (failed migrate_from)" ], [ "novell-252693", "Migration in a stopping stack" ], [ "novell-252693-2", "Migration in a starting stack" ], [ "novell-252693-3", "Non-Migration in a starting and stopping stack" ], [ "bug-1820", "Migration in a group" ], [ "bug-1820-1", "Non-migration in a group" ], [ "migrate-5", "Primitive migration with a clone" ], [ "migrate-fencing", "Migration after Fencing" ], [ "migrate-both-vms", "Migrate two VMs that have no colocation" ], [ "migration-behind-migrating-remote", "Migrate resource behind migrating remote connection" ], [ "1-a-then-bm-move-b", "Advanced migrate logic. A then B. migrate B" ], [ "2-am-then-b-move-a", "Advanced migrate logic, A then B, migrate A without stopping B" ], [ "3-am-then-bm-both-migrate", "Advanced migrate logic. A then B. migrate both" ], [ "4-am-then-bm-b-not-migratable", "Advanced migrate logic, A then B, B not migratable" ], [ "5-am-then-bm-a-not-migratable", "Advanced migrate logic. A then B. move both, a not migratable" ], [ "6-migrate-group", "Advanced migrate logic, migrate a group" ], [ "7-migrate-group-one-unmigratable", "Advanced migrate logic, migrate group mixed with allow-migrate true/false" ], [ "8-am-then-bm-a-migrating-b-stopping", "Advanced migrate logic, A then B, A migrating, B stopping" ], [ "9-am-then-bm-b-migrating-a-stopping", "Advanced migrate logic, A then B, B migrate, A stopping" ], [ "10-a-then-bm-b-move-a-clone", "Advanced migrate logic, A clone then B, migrate B while stopping A" ], [ "11-a-then-bm-b-move-a-clone-starting", "Advanced migrate logic, A clone then B, B moving while A is start/stopping" ], [ "a-promote-then-b-migrate", "A promote then B start. migrate B" ], [ "a-demote-then-b-migrate", "A demote then B stop. migrate B" ], # @TODO: If pacemaker implements versioned attributes, uncomment this test #[ "migrate-versioned", "Disable migration for versioned resources" ], [ "bug-lf-2422", "Dependency on partially active group - stop ocfs:*" ], ], [ [ "clone-anon-probe-1", "Probe the correct (anonymous) clone instance for each node" ], [ "clone-anon-probe-2", "Avoid needless re-probing of anonymous clones" ], [ "clone-anon-failcount", "Merge failcounts for anonymous clones" ], [ "force-anon-clone-max", "Update clone-max properly when forcing a clone to be anonymous" ], [ "anon-instance-pending", "Assign anonymous clone instance numbers properly when action pending" ], [ "inc0", "Incarnation start" ], [ "inc1", "Incarnation start order" ], [ "inc2", "Incarnation silent restart, stop, move" ], [ "inc3", "Inter-incarnation ordering, silent restart, stop, move" ], [ "inc4", "Inter-incarnation ordering, silent restart, stop, move (ordered)" ], [ "inc5", "Inter-incarnation ordering, silent restart, stop, move (restart 1)" ], [ "inc6", "Inter-incarnation ordering, silent restart, stop, move (restart 2)" ], [ "inc7", "Clone colocation" ], [ "inc8", "Clone anti-colocation" ], [ "inc9", "Non-unique clone" ], [ "inc10", "Non-unique clone (stop)" ], [ "inc11", "Primitive colocation with clones" ], [ "inc12", "Clone shutdown" ], [ "cloned-group", "Make sure only the correct number of cloned groups are started" ], [ "cloned-group-stop", "Ensure stopping qpidd also stops glance and cinder" ], [ "clone-no-shuffle", "Don't prioritize allocation of instances that must be moved" ], [ "clone-max-zero", "Orphan processing with clone-max=0" ], [ "clone-anon-dup", "Bug LF#2087 - Correctly parse the state of anonymous clones that are active more than once per node" ], [ "bug-lf-2160", "Don't shuffle clones due to colocation" ], [ "bug-lf-2213", "clone-node-max enforcement for cloned groups" ], [ "bug-lf-2153", "Clone ordering constraints" ], [ "bug-lf-2361", "Ensure clones observe mandatory ordering constraints if the LHS is unrunnable" ], [ "bug-lf-2317", "Avoid needless restart of primitive depending on a clone" ], [ "clone-colocate-instance-1", "Colocation with a specific clone instance (negative example)" ], [ "clone-colocate-instance-2", "Colocation with a specific clone instance" ], [ "clone-order-instance", "Ordering with specific clone instances" ], [ "bug-lf-2453", "Enforce mandatory clone ordering without colocation" ], [ "bug-lf-2508", "Correctly reconstruct the status of anonymous cloned groups" ], [ "bug-lf-2544", "Balanced clone placement" ], [ "bug-lf-2445", "Redistribute clones with node-max > 1 and stickiness = 0" ], [ "bug-lf-2574", "Avoid clone shuffle" ], [ "bug-lf-2581", "Avoid group restart due to unrelated clone (re)start" ], [ "bug-cl-5168", "Don't shuffle clones" ], [ "bug-cl-5170", "Prevent clone from starting with on-fail=block" ], [ "clone-fail-block-colocation", "Move colocated group when failed clone has on-fail=block" ], [ "clone-interleave-1", "Clone-3 cannot start on pcmk-1 due to interleaved ordering (no colocation)" ], [ "clone-interleave-2", "Clone-3 must stop on pcmk-1 due to interleaved ordering (no colocation)" ], [ "clone-interleave-3", "Clone-3 must be recovered on pcmk-1 due to interleaved ordering (no colocation)" ], [ "rebalance-unique-clones", "Rebalance unique clone instances with no stickiness" ], [ "clone-requires-quorum-recovery", "Clone with requires=quorum on failed node needing recovery" ], [ "clone-requires-quorum", "Clone with requires=quorum with presumed-inactive instance on failed node" ], ], [ [ "cloned_start_one", "order first clone then clone... first clone_min=2" ], [ "cloned_start_two", "order first clone then clone... first clone_min=2" ], [ "cloned_stop_one", "order first clone then clone... first clone_min=2" ], [ "cloned_stop_two", "order first clone then clone... first clone_min=2" ], [ "clone_min_interleave_start_one", "order first clone then clone... first clone_min=2 and then has interleave=true" ], [ "clone_min_interleave_start_two", "order first clone then clone... first clone_min=2 and then has interleave=true" ], [ "clone_min_interleave_stop_one", "order first clone then clone... first clone_min=2 and then has interleave=true" ], [ "clone_min_interleave_stop_two", "order first clone then clone... first clone_min=2 and then has interleave=true" ], [ "clone_min_start_one", "order first clone then primitive... first clone_min=2" ], [ "clone_min_start_two", "order first clone then primitive... first clone_min=2" ], [ "clone_min_stop_all", "order first clone then primitive... first clone_min=2" ], [ "clone_min_stop_one", "order first clone then primitive... first clone_min=2" ], [ "clone_min_stop_two", "order first clone then primitive... first clone_min=2" ], ], [ [ "unfence-startup", "Clean unfencing" ], [ "unfence-definition", "Unfencing when the agent changes" ], [ "unfence-parameters", "Unfencing when the agent parameters changes" ], [ "unfence-device", "Unfencing when a cluster has only fence devices" ], ], [ [ "master-0", "Stopped -> Slave" ], [ "master-1", "Stopped -> Promote" ], [ "master-2", "Stopped -> Promote : notify" ], [ "master-3", "Stopped -> Promote : master location" ], [ "master-4", "Started -> Promote : master location" ], [ "master-5", "Promoted -> Promoted" ], [ "master-6", "Promoted -> Promoted (2)" ], [ "master-7", "Promoted -> Fenced" ], [ "master-8", "Promoted -> Fenced -> Moved" ], [ "master-9", "Stopped + Promotable + No quorum" ], [ "master-10", "Stopped -> Promotable : notify with monitor" ], [ "master-11", "Stopped -> Promote : colocation" ], [ "novell-239082", "Demote/Promote ordering" ], [ "novell-239087", "Stable master placement" ], [ "master-12", "Promotion based solely on rsc_location constraints" ], [ "master-13", "Include preferences of colocated resources when placing master" ], [ "master-demote", "Ordering when actions depends on demoting a slave resource" ], [ "master-ordering", "Prevent resources from starting that need a master" ], [ "bug-1765", "Master-Master Colocation (do not stop the slaves)" ], [ "master-group", "Promotion of cloned groups" ], [ "bug-lf-1852", "Don't shuffle master/slave instances unnecessarily" ], [ "master-failed-demote", "Don't retry failed demote actions" ], [ "master-failed-demote-2", "Don't retry failed demote actions (notify=false)" ], [ "master-depend", "Ensure resources that depend on the master don't get allocated until the master does" ], [ "master-reattach", "Re-attach to a running master" ], [ "master-allow-start", "Don't include master score if it would prevent allocation" ], [ "master-colocation", "Allow master instances placemaker to be influenced by colocation constraints" ], [ "master-pseudo", "Make sure promote/demote pseudo actions are created correctly" ], [ "master-role", "Prevent target-role from promoting more than master-max instances" ], [ "bug-lf-2358", "Master-Master anti-colocation" ], [ "master-promotion-constraint", "Mandatory master colocation constraints" ], [ "unmanaged-master", "Ensure role is preserved for unmanaged resources" ], [ "master-unmanaged-monitor", "Start the correct monitor operation for unmanaged masters" ], [ "master-demote-2", "Demote does not clear past failure" ], [ "master-move", "Move master based on failure of colocated group" ], [ "master-probed-score", "Observe the promotion score of probed resources" ], [ "colocation_constraint_stops_master", "cl#5054 - Ensure master is demoted when stopped by colocation constraint" ], [ "colocation_constraint_stops_slave", "cl#5054 - Ensure slave is not demoted when stopped by colocation constraint" ], [ "order_constraint_stops_master", "cl#5054 - Ensure master is demoted when stopped by order constraint" ], [ "order_constraint_stops_slave", "cl#5054 - Ensure slave is not demoted when stopped by order constraint" ], [ "master_monitor_restart", "cl#5072 - Ensure master monitor operation will start after promotion" ], [ "bug-rh-880249", "Handle replacement of an m/s resource with a primitive" ], [ "bug-5143-ms-shuffle", "Prevent master shuffling due to promotion score" ], [ "master-demote-block", "Block promotion if demote fails with on-fail=block" ], [ "master-dependent-ban", "Don't stop instances from being active because a dependent is banned from that host" ], [ "master-stop", "Stop instances due to location constraint with role=Started" ], [ "master-partially-demoted-group", "Allow partially demoted group to finish demoting" ], [ "bug-cl-5213", "Ensure role colocation with -INFINITY is enforced" ], [ "bug-cl-5219", "Allow unrelated resources with a common colocation target to remain promoted" ], [ "master-asymmetrical-order", "Fix the behaviors of multi-state resources with asymmetrical ordering" ], [ "master-notify", "Master promotion with notifies" ], [ "master-score-startup", "Use permanent master scores without LRM history" ], [ "failed-demote-recovery", "Recover resource in slave role after demote fails" ], [ "failed-demote-recovery-master", "Recover resource in master role after demote fails" ], [ "on_fail_demote1", "Recovery with on-fail=\"demote\" on healthy cluster, remote, guest, and bundle nodes" ], [ "on_fail_demote2", "Recovery with on-fail=\"demote\" with promotion on different node" ], [ "on_fail_demote3", "Recovery with on-fail=\"demote\" with no promotion" ], [ "on_fail_demote4", "Recovery with on-fail=\"demote\" on failed cluster, remote, guest, and bundle nodes" ], [ "no_quorum_demote", "Promotable demotion and primitive stop with no-quorum-policy=\"demote\"" ], [ "no-promote-on-unrunnable-guest", "Don't select bundle instance for promotion when container can't run" ], ], [ [ "history-1", "Correctly parse stateful-1 resource state" ], ], [ [ "managed-0", "Managed (reference)" ], [ "managed-1", "Not managed - down" ], [ "managed-2", "Not managed - up" ], [ "bug-5028", "Shutdown should block if anything depends on an unmanaged resource" ], [ "bug-5028-detach", "Ensure detach still works" ], [ "bug-5028-bottom", "Ensure shutdown still blocks if the blocked resource is at the bottom of the stack" ], [ "unmanaged-stop-1", "cl#5155 - Block the stop of resources if any depending resource is unmanaged" ], [ "unmanaged-stop-2", "cl#5155 - Block the stop of resources if the first resource in a mandatory stop order is unmanaged" ], [ "unmanaged-stop-3", "cl#5155 - Block the stop of resources if any depending resource in a group is unmanaged" ], [ "unmanaged-stop-4", "cl#5155 - Block the stop of resources if any depending resource in the middle of a group is unmanaged" ], [ "unmanaged-block-restart", "Block restart of resources if any dependent resource in a group is unmanaged" ], ], [ [ "interleave-0", "Interleave (reference)" ], [ "interleave-1", "coloc - not interleaved" ], [ "interleave-2", "coloc - interleaved" ], [ "interleave-3", "coloc - interleaved (2)" ], [ "interleave-pseudo-stop", "Interleaved clone during stonith" ], [ "interleave-stop", "Interleaved clone during stop" ], [ "interleave-restart", "Interleaved clone during dependency restart" ], ], [ [ "notify-0", "Notify reference" ], [ "notify-1", "Notify simple" ], [ "notify-2", "Notify simple, confirm" ], [ "notify-3", "Notify move, confirm" ], [ "novell-239079", "Notification priority" ], #[ "notify-2", "Notify - 764" ], [ "notifs-for-unrunnable", "Don't schedule notifications for an unrunnable action" ], [ "route-remote-notify", "Route remote notify actions through correct cluster node" ], [ "notify-behind-stopping-remote", "Don't schedule notifications behind stopped remote" ], ], [ [ "594", "OSDL #594 - Unrunnable actions scheduled in transition" ], [ "662", "OSDL #662 - Two resources start on one node when incarnation_node_max = 1" ], [ "696", "OSDL #696 - CRM starts stonith RA without monitor" ], [ "726", "OSDL #726 - Attempting to schedule rsc_posic041_monitor_5000 _after_ a stop" ], [ "735", "OSDL #735 - Correctly detect that rsc_hadev1 is stopped on hadev3" ], [ "764", "OSDL #764 - Missing monitor op for DoFencing:child_DoFencing:1" ], [ "797", "OSDL #797 - Assert triggered: task_id_i > max_call_id" ], [ "829", "OSDL #829" ], [ "994", "OSDL #994 - Stopping the last resource in a resource group causes the entire group to be restarted" ], [ "994-2", "OSDL #994 - with a dependent resource" ], [ "1360", "OSDL #1360 - Clone stickiness" ], [ "1484", "OSDL #1484 - on_fail=stop" ], [ "1494", "OSDL #1494 - Clone stability" ], [ "unrunnable-1", "Unrunnable" ], [ "unrunnable-2", "Unrunnable 2" ], [ "stonith-0", "Stonith loop - 1" ], [ "stonith-1", "Stonith loop - 2" ], [ "stonith-2", "Stonith loop - 3" ], [ "stonith-3", "Stonith startup" ], [ "stonith-4", "Stonith node state" ], [ "dc-fence-ordering", "DC needs fencing while other nodes are shutting down" ], [ "bug-1572-1", "Recovery of groups depending on master/slave" ], [ "bug-1572-2", "Recovery of groups depending on master/slave when the master is never re-promoted" ], [ "bug-1685", "Depends-on-master ordering" ], [ "bug-1822", "Don't promote partially active groups" ], [ "bug-pm-11", "New resource added to a m/s group" ], [ "bug-pm-12", "Recover only the failed portion of a cloned group" ], [ "bug-n-387749", "Don't shuffle clone instances" ], [ "bug-n-385265", "Don't ignore the failure stickiness of group children - resource_idvscommon should stay stopped" ], [ "bug-n-385265-2", "Ensure groups are migrated instead of remaining partially active on the current node" ], [ "bug-lf-1920", "Correctly handle probes that find active resources" ], [ "bnc-515172", "Location constraint with multiple expressions" ], [ "colocate-primitive-with-clone", "Optional colocation with a clone" ], [ "use-after-free-merge", "Use-after-free in native_merge_weights" ], [ "bug-lf-2551", "STONITH ordering for stop" ], [ "bug-lf-2606", "Stonith implies demote" ], [ "bug-lf-2474", "Ensure resource op timeout takes precedence over op_defaults" ], [ "bug-suse-707150", "Prevent vm-01 from starting due to colocation/ordering" ], [ "bug-5014-A-start-B-start", "Verify when A starts B starts using symmetrical=false" ], [ "bug-5014-A-stop-B-started", "Verify when A stops B does not stop if it has already started using symmetric=false" ], [ "bug-5014-A-stopped-B-stopped", "Verify when A is stopped and B has not started, B does not start before A using symmetric=false" ], [ "bug-5014-CthenAthenB-C-stopped", "Verify when C then A is symmetrical=true, A then B is symmetric=false, and C is stopped that nothing starts" ], [ "bug-5014-CLONE-A-start-B-start", "Verify when A starts B starts using clone resources with symmetric=false" ], [ "bug-5014-CLONE-A-stop-B-started", "Verify when A stops B does not stop if it has already started using clone resources with symmetric=false" ], [ "bug-5014-GROUP-A-start-B-start", "Verify when A starts B starts when using group resources with symmetric=false" ], [ "bug-5014-GROUP-A-stopped-B-started", "Verify when A stops B does not stop if it has already started using group resources with symmetric=false" ], [ "bug-5014-GROUP-A-stopped-B-stopped", "Verify when A is stopped and B has not started, B does not start before A using group resources with symmetric=false" ], [ "bug-5014-ordered-set-symmetrical-false", "Verify ordered sets work with symmetrical=false" ], [ "bug-5014-ordered-set-symmetrical-true", "Verify ordered sets work with symmetrical=true" ], [ "bug-5007-masterslave_colocation", "Verify use of colocation scores other than INFINITY and -INFINITY work on multi-state resources" ], [ "bug-5038", "Prevent restart of anonymous clones when clone-max decreases" ], [ "bug-5025-1", "Automatically clean up failcount after resource config change with reload" ], [ "bug-5025-2", "Make sure clear failcount action isn't set when config does not change" ], [ "bug-5025-3", "Automatically clean up failcount after resource config change with restart" ], [ "bug-5025-4", "Clear failcount when last failure is a start op and rsc attributes changed" ], [ "failcount", "Ensure failcounts are correctly expired" ], [ "failcount-block", "Ensure failcounts are not expired when on-fail=block is present" ], [ "per-op-failcount", "Ensure per-operation failcount is handled and not passed to fence agent" ], [ "on-fail-ignore", "Ensure on-fail=ignore works even beyond migration-threshold" ], [ "monitor-onfail-restart", "bug-5058 - Monitor failure with on-fail set to restart" ], [ "monitor-onfail-stop", "bug-5058 - Monitor failure wiht on-fail set to stop" ], [ "bug-5059", "No need to restart p_stateful1:*" ], [ "bug-5069-op-enabled", "Test on-fail=ignore with failure when monitor is enabled" ], [ "bug-5069-op-disabled", "Test on-fail-ignore with failure when monitor is disabled" ], [ "obsolete-lrm-resource", "cl#5115 - Do not use obsolete lrm_resource sections" ], [ "expire-non-blocked-failure", "Ignore failure-timeout only if the failed operation has on-fail=block" ], [ "asymmetrical-order-move", "Respect asymmetrical ordering when trying to move resources" ], [ "asymmetrical-order-restart", "Respect asymmetrical ordering when restarting dependent resource" ], [ "start-then-stop-with-unfence", "Avoid graph loop with start-then-stop constraint plus unfencing" ], [ "order-expired-failure", "Order failcount cleanup after remote fencing" ], [ "ignore_stonith_rsc_order1", "cl#5056- Ignore order constraint between stonith and non-stonith rsc" ], [ "ignore_stonith_rsc_order2", "cl#5056- Ignore order constraint with group rsc containing mixed stonith and non-stonith" ], [ "ignore_stonith_rsc_order3", "cl#5056- Ignore order constraint, stonith clone and mixed group" ], [ "ignore_stonith_rsc_order4", "cl#5056- Ignore order constraint, stonith clone and clone with nested mixed group" ], [ "honor_stonith_rsc_order1", "cl#5056- Honor order constraint, stonith clone and pure stonith group(single rsc)" ], [ "honor_stonith_rsc_order2", "cl#5056- Honor order constraint, stonith clone and pure stonith group(multiple rsc)" ], [ "honor_stonith_rsc_order3", "cl#5056- Honor order constraint, stonith clones with nested pure stonith group" ], [ "honor_stonith_rsc_order4", "cl#5056- Honor order constraint, between two native stonith rscs" ], [ "multiply-active-stonith", "Multiply active stonith" ], [ "probe-timeout", "cl#5099 - Default probe timeout" ], [ "order-first-probes", "cl#5301 - respect order constraints when relevant resources are being probed" ], [ "concurrent-fencing", "Allow performing fencing operations in parallel" ], [ "priority-fencing-delay", "Delay fencing targeting the more significant node" ], ], [ [ "systemhealth1", "System Health () #1" ], [ "systemhealth2", "System Health () #2" ], [ "systemhealth3", "System Health () #3" ], [ "systemhealthn1", "System Health (None) #1" ], [ "systemhealthn2", "System Health (None) #2" ], [ "systemhealthn3", "System Health (None) #3" ], [ "systemhealthm1", "System Health (Migrate On Red) #1" ], [ "systemhealthm2", "System Health (Migrate On Red) #2" ], [ "systemhealthm3", "System Health (Migrate On Red) #3" ], [ "systemhealtho1", "System Health (Only Green) #1" ], [ "systemhealtho2", "System Health (Only Green) #2" ], [ "systemhealtho3", "System Health (Only Green) #3" ], [ "systemhealthp1", "System Health (Progessive) #1" ], [ "systemhealthp2", "System Health (Progessive) #2" ], [ "systemhealthp3", "System Health (Progessive) #3" ], ], [ [ "utilization", "Placement Strategy - utilization" ], [ "minimal", "Placement Strategy - minimal" ], [ "balanced", "Placement Strategy - balanced" ], ], [ [ "placement-stickiness", "Optimized Placement Strategy - stickiness" ], [ "placement-priority", "Optimized Placement Strategy - priority" ], [ "placement-location", "Optimized Placement Strategy - location" ], [ "placement-capacity", "Optimized Placement Strategy - capacity" ], ], [ [ "utilization-order1", "Utilization Order - Simple" ], [ "utilization-order2", "Utilization Order - Complex" ], [ "utilization-order3", "Utilization Order - Migrate" ], [ "utilization-order4", "Utilization Order - Live Migration (bnc#695440)" ], [ "utilization-shuffle", "Don't displace prmExPostgreSQLDB2 on act2, Start prmExPostgreSQLDB1 on act3" ], [ "load-stopped-loop", "Avoid transition loop due to load_stopped (cl#5044)" ], [ "load-stopped-loop-2", "cl#5235 - Prevent graph loops that can be introduced by load_stopped -> migrate_to ordering" ], ], [ [ "colocated-utilization-primitive-1", "Colocated Utilization - Primitive" ], [ "colocated-utilization-primitive-2", "Colocated Utilization - Choose the most capable node" ], [ "colocated-utilization-group", "Colocated Utilization - Group" ], [ "colocated-utilization-clone", "Colocated Utilization - Clone" ], [ "utilization-check-allowed-nodes", "Only check the capacities of the nodes that can run the resource" ], ], [ [ "reprobe-target_rc", "Ensure correct target_rc for reprobe of inactive resources" ], [ "node-maintenance-1", "cl#5128 - Node maintenance" ], [ "node-maintenance-2", "cl#5128 - Node maintenance (coming out of maintenance mode)" ], [ "shutdown-maintenance-node", "Do not fence a maintenance node if it shuts down cleanly" ], [ "rsc-maintenance", "Per-resource maintenance" ], ], [ [ "not-installed-agent", "The resource agent is missing" ], [ "not-installed-tools", "Something the resource agent needs is missing" ], ], [ [ "stopped-monitor-00", "Stopped Monitor - initial start" ], [ "stopped-monitor-01", "Stopped Monitor - failed started" ], [ "stopped-monitor-02", "Stopped Monitor - started multi-up" ], [ "stopped-monitor-03", "Stopped Monitor - stop started" ], [ "stopped-monitor-04", "Stopped Monitor - failed stop" ], [ "stopped-monitor-05", "Stopped Monitor - start unmanaged" ], [ "stopped-monitor-06", "Stopped Monitor - unmanaged multi-up" ], [ "stopped-monitor-07", "Stopped Monitor - start unmanaged multi-up" ], [ "stopped-monitor-08", "Stopped Monitor - migrate" ], [ "stopped-monitor-09", "Stopped Monitor - unmanage started" ], [ "stopped-monitor-10", "Stopped Monitor - unmanaged started multi-up" ], [ "stopped-monitor-11", "Stopped Monitor - stop unmanaged started" ], [ "stopped-monitor-12", "Stopped Monitor - unmanaged started multi-up (target-role=Stopped)" ], [ "stopped-monitor-20", "Stopped Monitor - initial stop" ], [ "stopped-monitor-21", "Stopped Monitor - stopped single-up" ], [ "stopped-monitor-22", "Stopped Monitor - stopped multi-up" ], [ "stopped-monitor-23", "Stopped Monitor - start stopped" ], [ "stopped-monitor-24", "Stopped Monitor - unmanage stopped" ], [ "stopped-monitor-25", "Stopped Monitor - unmanaged stopped multi-up" ], [ "stopped-monitor-26", "Stopped Monitor - start unmanaged stopped" ], [ "stopped-monitor-27", "Stopped Monitor - unmanaged stopped multi-up (target-role=Started)" ], [ "stopped-monitor-30", "Stopped Monitor - new node started" ], [ "stopped-monitor-31", "Stopped Monitor - new node stopped" ], ], [ # This is a combo test to check: # - probe timeout defaults to the minimum-interval monitor's # - duplicate recurring operations are ignored # - if timeout spec is bad, the default timeout is used # - failure is blocked with on-fail=block even if ISO8601 interval is specified # - started/stopped role monitors are started/stopped on right nodes [ "intervals", "Recurring monitor interval handling" ], ], [ [ "ticket-primitive-1", "Ticket - Primitive (loss-policy=stop, initial)" ], [ "ticket-primitive-2", "Ticket - Primitive (loss-policy=stop, granted)" ], [ "ticket-primitive-3", "Ticket - Primitive (loss-policy-stop, revoked)" ], [ "ticket-primitive-4", "Ticket - Primitive (loss-policy=demote, initial)" ], [ "ticket-primitive-5", "Ticket - Primitive (loss-policy=demote, granted)" ], [ "ticket-primitive-6", "Ticket - Primitive (loss-policy=demote, revoked)" ], [ "ticket-primitive-7", "Ticket - Primitive (loss-policy=fence, initial)" ], [ "ticket-primitive-8", "Ticket - Primitive (loss-policy=fence, granted)" ], [ "ticket-primitive-9", "Ticket - Primitive (loss-policy=fence, revoked)" ], [ "ticket-primitive-10", "Ticket - Primitive (loss-policy=freeze, initial)" ], [ "ticket-primitive-11", "Ticket - Primitive (loss-policy=freeze, granted)" ], [ "ticket-primitive-12", "Ticket - Primitive (loss-policy=freeze, revoked)" ], [ "ticket-primitive-13", "Ticket - Primitive (loss-policy=stop, standby, granted)" ], [ "ticket-primitive-14", "Ticket - Primitive (loss-policy=stop, granted, standby)" ], [ "ticket-primitive-15", "Ticket - Primitive (loss-policy=stop, standby, revoked)" ], [ "ticket-primitive-16", "Ticket - Primitive (loss-policy=demote, standby, granted)" ], [ "ticket-primitive-17", "Ticket - Primitive (loss-policy=demote, granted, standby)" ], [ "ticket-primitive-18", "Ticket - Primitive (loss-policy=demote, standby, revoked)" ], [ "ticket-primitive-19", "Ticket - Primitive (loss-policy=fence, standby, granted)" ], [ "ticket-primitive-20", "Ticket - Primitive (loss-policy=fence, granted, standby)" ], [ "ticket-primitive-21", "Ticket - Primitive (loss-policy=fence, standby, revoked)" ], [ "ticket-primitive-22", "Ticket - Primitive (loss-policy=freeze, standby, granted)" ], [ "ticket-primitive-23", "Ticket - Primitive (loss-policy=freeze, granted, standby)" ], [ "ticket-primitive-24", "Ticket - Primitive (loss-policy=freeze, standby, revoked)" ], ], [ [ "ticket-group-1", "Ticket - Group (loss-policy=stop, initial)" ], [ "ticket-group-2", "Ticket - Group (loss-policy=stop, granted)" ], [ "ticket-group-3", "Ticket - Group (loss-policy-stop, revoked)" ], [ "ticket-group-4", "Ticket - Group (loss-policy=demote, initial)" ], [ "ticket-group-5", "Ticket - Group (loss-policy=demote, granted)" ], [ "ticket-group-6", "Ticket - Group (loss-policy=demote, revoked)" ], [ "ticket-group-7", "Ticket - Group (loss-policy=fence, initial)" ], [ "ticket-group-8", "Ticket - Group (loss-policy=fence, granted)" ], [ "ticket-group-9", "Ticket - Group (loss-policy=fence, revoked)" ], [ "ticket-group-10", "Ticket - Group (loss-policy=freeze, initial)" ], [ "ticket-group-11", "Ticket - Group (loss-policy=freeze, granted)" ], [ "ticket-group-12", "Ticket - Group (loss-policy=freeze, revoked)" ], [ "ticket-group-13", "Ticket - Group (loss-policy=stop, standby, granted)" ], [ "ticket-group-14", "Ticket - Group (loss-policy=stop, granted, standby)" ], [ "ticket-group-15", "Ticket - Group (loss-policy=stop, standby, revoked)" ], [ "ticket-group-16", "Ticket - Group (loss-policy=demote, standby, granted)" ], [ "ticket-group-17", "Ticket - Group (loss-policy=demote, granted, standby)" ], [ "ticket-group-18", "Ticket - Group (loss-policy=demote, standby, revoked)" ], [ "ticket-group-19", "Ticket - Group (loss-policy=fence, standby, granted)" ], [ "ticket-group-20", "Ticket - Group (loss-policy=fence, granted, standby)" ], [ "ticket-group-21", "Ticket - Group (loss-policy=fence, standby, revoked)" ], [ "ticket-group-22", "Ticket - Group (loss-policy=freeze, standby, granted)" ], [ "ticket-group-23", "Ticket - Group (loss-policy=freeze, granted, standby)" ], [ "ticket-group-24", "Ticket - Group (loss-policy=freeze, standby, revoked)" ], ], [ [ "ticket-clone-1", "Ticket - Clone (loss-policy=stop, initial)" ], [ "ticket-clone-2", "Ticket - Clone (loss-policy=stop, granted)" ], [ "ticket-clone-3", "Ticket - Clone (loss-policy-stop, revoked)" ], [ "ticket-clone-4", "Ticket - Clone (loss-policy=demote, initial)" ], [ "ticket-clone-5", "Ticket - Clone (loss-policy=demote, granted)" ], [ "ticket-clone-6", "Ticket - Clone (loss-policy=demote, revoked)" ], [ "ticket-clone-7", "Ticket - Clone (loss-policy=fence, initial)" ], [ "ticket-clone-8", "Ticket - Clone (loss-policy=fence, granted)" ], [ "ticket-clone-9", "Ticket - Clone (loss-policy=fence, revoked)" ], [ "ticket-clone-10", "Ticket - Clone (loss-policy=freeze, initial)" ], [ "ticket-clone-11", "Ticket - Clone (loss-policy=freeze, granted)" ], [ "ticket-clone-12", "Ticket - Clone (loss-policy=freeze, revoked)" ], [ "ticket-clone-13", "Ticket - Clone (loss-policy=stop, standby, granted)" ], [ "ticket-clone-14", "Ticket - Clone (loss-policy=stop, granted, standby)" ], [ "ticket-clone-15", "Ticket - Clone (loss-policy=stop, standby, revoked)" ], [ "ticket-clone-16", "Ticket - Clone (loss-policy=demote, standby, granted)" ], [ "ticket-clone-17", "Ticket - Clone (loss-policy=demote, granted, standby)" ], [ "ticket-clone-18", "Ticket - Clone (loss-policy=demote, standby, revoked)" ], [ "ticket-clone-19", "Ticket - Clone (loss-policy=fence, standby, granted)" ], [ "ticket-clone-20", "Ticket - Clone (loss-policy=fence, granted, standby)" ], [ "ticket-clone-21", "Ticket - Clone (loss-policy=fence, standby, revoked)" ], [ "ticket-clone-22", "Ticket - Clone (loss-policy=freeze, standby, granted)" ], [ "ticket-clone-23", "Ticket - Clone (loss-policy=freeze, granted, standby)" ], [ "ticket-clone-24", "Ticket - Clone (loss-policy=freeze, standby, revoked)" ], ], [ [ "ticket-master-1", "Ticket - Master (loss-policy=stop, initial)" ], [ "ticket-master-2", "Ticket - Master (loss-policy=stop, granted)" ], [ "ticket-master-3", "Ticket - Master (loss-policy-stop, revoked)" ], [ "ticket-master-4", "Ticket - Master (loss-policy=demote, initial)" ], [ "ticket-master-5", "Ticket - Master (loss-policy=demote, granted)" ], [ "ticket-master-6", "Ticket - Master (loss-policy=demote, revoked)" ], [ "ticket-master-7", "Ticket - Master (loss-policy=fence, initial)" ], [ "ticket-master-8", "Ticket - Master (loss-policy=fence, granted)" ], [ "ticket-master-9", "Ticket - Master (loss-policy=fence, revoked)" ], [ "ticket-master-10", "Ticket - Master (loss-policy=freeze, initial)" ], [ "ticket-master-11", "Ticket - Master (loss-policy=freeze, granted)" ], [ "ticket-master-12", "Ticket - Master (loss-policy=freeze, revoked)" ], [ "ticket-master-13", "Ticket - Master (loss-policy=stop, standby, granted)" ], [ "ticket-master-14", "Ticket - Master (loss-policy=stop, granted, standby)" ], [ "ticket-master-15", "Ticket - Master (loss-policy=stop, standby, revoked)" ], [ "ticket-master-16", "Ticket - Master (loss-policy=demote, standby, granted)" ], [ "ticket-master-17", "Ticket - Master (loss-policy=demote, granted, standby)" ], [ "ticket-master-18", "Ticket - Master (loss-policy=demote, standby, revoked)" ], [ "ticket-master-19", "Ticket - Master (loss-policy=fence, standby, granted)" ], [ "ticket-master-20", "Ticket - Master (loss-policy=fence, granted, standby)" ], [ "ticket-master-21", "Ticket - Master (loss-policy=fence, standby, revoked)" ], [ "ticket-master-22", "Ticket - Master (loss-policy=freeze, standby, granted)" ], [ "ticket-master-23", "Ticket - Master (loss-policy=freeze, granted, standby)" ], [ "ticket-master-24", "Ticket - Master (loss-policy=freeze, standby, revoked)" ], ], [ [ "ticket-rsc-sets-1", "Ticket - Resource sets (1 ticket, initial)" ], [ "ticket-rsc-sets-2", "Ticket - Resource sets (1 ticket, granted)" ], [ "ticket-rsc-sets-3", "Ticket - Resource sets (1 ticket, revoked)" ], [ "ticket-rsc-sets-4", "Ticket - Resource sets (2 tickets, initial)" ], [ "ticket-rsc-sets-5", "Ticket - Resource sets (2 tickets, granted)" ], [ "ticket-rsc-sets-6", "Ticket - Resource sets (2 tickets, granted)" ], [ "ticket-rsc-sets-7", "Ticket - Resource sets (2 tickets, revoked)" ], [ "ticket-rsc-sets-8", "Ticket - Resource sets (1 ticket, standby, granted)" ], [ "ticket-rsc-sets-9", "Ticket - Resource sets (1 ticket, granted, standby)" ], [ "ticket-rsc-sets-10", "Ticket - Resource sets (1 ticket, standby, revoked)" ], [ "ticket-rsc-sets-11", "Ticket - Resource sets (2 tickets, standby, granted)" ], [ "ticket-rsc-sets-12", "Ticket - Resource sets (2 tickets, standby, granted)" ], [ "ticket-rsc-sets-13", "Ticket - Resource sets (2 tickets, granted, standby)" ], [ "ticket-rsc-sets-14", "Ticket - Resource sets (2 tickets, standby, revoked)" ], [ "cluster-specific-params", "Cluster-specific instance attributes based on rules" ], [ "site-specific-params", "Site-specific instance attributes based on rules" ], ], [ [ "template-1", "Template - 1" ], [ "template-2", "Template - 2" ], [ "template-3", "Template - 3 (merge operations)" ], [ "template-coloc-1", "Template - Colocation 1" ], [ "template-coloc-2", "Template - Colocation 2" ], [ "template-coloc-3", "Template - Colocation 3" ], [ "template-order-1", "Template - Order 1" ], [ "template-order-2", "Template - Order 2" ], [ "template-order-3", "Template - Order 3" ], [ "template-ticket", "Template - Ticket" ], [ "template-rsc-sets-1", "Template - Resource Sets 1" ], [ "template-rsc-sets-2", "Template - Resource Sets 2" ], [ "template-rsc-sets-3", "Template - Resource Sets 3" ], [ "template-rsc-sets-4", "Template - Resource Sets 4" ], [ "template-clone-primitive", "Cloned primitive from template" ], [ "template-clone-group", "Cloned group from template" ], [ "location-sets-templates", "Resource sets and templates - Location" ], [ "tags-coloc-order-1", "Tags - Colocation and Order (Simple)" ], [ "tags-coloc-order-2", "Tags - Colocation and Order (Resource Sets with Templates)" ], [ "tags-location", "Tags - Location" ], [ "tags-ticket", "Tags - Ticket" ], ], [ [ "container-1", "Container - initial" ], [ "container-2", "Container - monitor failed" ], [ "container-3", "Container - stop failed" ], [ "container-4", "Container - reached migration-threshold" ], [ "container-group-1", "Container in group - initial" ], [ "container-group-2", "Container in group - monitor failed" ], [ "container-group-3", "Container in group - stop failed" ], [ "container-group-4", "Container in group - reached migration-threshold" ], [ "container-is-remote-node", "Place resource within container when container is remote-node" ], [ "bug-rh-1097457", "Kill user defined container/contents ordering" ], [ "bug-cl-5247", "Graph loop when recovering m/s resource in a container" ], [ "bundle-order-startup", "Bundle startup ordering" ], [ "bundle-order-partial-start", "Bundle startup ordering when some dependencies are already running" ], [ "bundle-order-partial-start-2", "Bundle startup ordering when some dependencies and the container are already running" ], [ "bundle-order-stop", "Bundle stop ordering" ], [ "bundle-order-partial-stop", "Bundle startup ordering when some dependencies are already stopped" ], [ "bundle-order-stop-on-remote", "Stop nested resource after bringing up the connection" ], [ "bundle-order-startup-clone", "Prevent startup because bundle isn't promoted" ], [ "bundle-order-startup-clone-2", "Bundle startup with clones" ], [ "bundle-order-stop-clone", "Stop bundle because clone is stopping" ], [ "bundle-nested-colocation", "Colocation of nested connection resources" ], [ "bundle-order-fencing", "Order pseudo bundle fencing after parent node fencing if both are happening" ], [ "bundle-probe-order-1", "order 1" ], [ "bundle-probe-order-2", "order 2" ], [ "bundle-probe-order-3", "order 3" ], [ "bundle-probe-remotes", "Ensure remotes get probed too" ], [ "bundle-replicas-change", "Change bundle from 1 replica to multiple" ], [ "nested-remote-recovery", "Recover bundle's container hosted on remote node" ], ], [ [ "whitebox-fail1", "Fail whitebox container rsc" ], [ "whitebox-fail2", "Fail cluster connection to guest node" ], [ "whitebox-fail3", "Failed containers should not run nested on remote nodes" ], [ "whitebox-start", "Start whitebox container with resources assigned to it" ], [ "whitebox-stop", "Stop whitebox container with resources assigned to it" ], [ "whitebox-move", "Move whitebox container with resources assigned to it" ], [ "whitebox-asymmetric", "Verify connection rsc opts-in based on container resource" ], [ "whitebox-ms-ordering", "Verify promote/demote can not occur before connection is established" ], [ "whitebox-ms-ordering-move", "Stop/Start cycle within a moving container" ], [ "whitebox-orphaned", "Properly shutdown orphaned whitebox container" ], [ "whitebox-orphan-ms", "Properly tear down orphan ms resources on remote-nodes" ], [ "whitebox-unexpectedly-running", "Recover container nodes the cluster did not start" ], [ "whitebox-migrate1", "Migrate both container and connection resource" ], [ "whitebox-imply-stop-on-fence", "imply stop action on container node rsc when host node is fenced" ], [ "whitebox-nested-group", "Verify guest remote-node works nested in a group" ], [ "guest-node-host-dies", "Verify guest node is recovered if host goes away" ], [ "guest-node-cleanup", "Order guest node connection recovery after container probe" ], [ "guest-host-not-fenceable", "Actions on guest node are unrunnable if host is unclean and cannot be fenced" ], ], [ [ "remote-startup-probes", "Baremetal remote-node startup probes" ], [ "remote-startup", "Startup a newly discovered remote-nodes with no status" ], [ "remote-fence-unclean", "Fence unclean baremetal remote-node" ], [ "remote-fence-unclean2", "Fence baremetal remote-node after cluster node fails and connection can not be recovered" ], [ "remote-fence-unclean-3", "Probe failed remote nodes (triggers fencing)" ], [ "remote-move", "Move remote-node connection resource" ], [ "remote-disable", "Disable a baremetal remote-node" ], [ "remote-probe-disable", "Probe then stop a baremetal remote-node" ], [ "remote-orphaned", "Properly shutdown orphaned connection resource" ], [ "remote-orphaned2", "verify we can handle orphaned remote connections with active resources on the remote" ], [ "remote-recover", "Recover connection resource after cluster-node fails" ], [ "remote-stale-node-entry", "Make sure we properly handle leftover remote-node entries in the node section" ], [ "remote-partial-migrate", "Make sure partial migrations are handled before ops on the remote node" ], [ "remote-partial-migrate2", "Make sure partial migration target is prefered for remote connection" ], [ "remote-recover-fail", "Make sure start failure causes fencing if rsc are active on remote" ], [ "remote-start-fail", "Make sure a start failure does not result in fencing if no active resources are on remote" ], [ "remote-unclean2", "Make monitor failure always results in fencing, even if no rsc are active on remote" ], [ "remote-fence-before-reconnect", "Fence before clearing recurring monitor failure" ], [ "remote-recovery", "Recover remote connections before attempting demotion" ], [ "remote-recover-connection", "Optimistically recovery of only the connection" ], [ "remote-recover-all", "Fencing when the connection has no home" ], [ "remote-recover-no-resources", "Fencing when the connection has no home and no active resources" ], [ "remote-recover-unknown", "Fencing when the connection has no home and the remote has no operation history" ], [ "remote-reconnect-delay", "Waiting for remote reconnect interval to expire" ], [ "remote-connection-unrecoverable", "Remote connection host must be fenced, with connection unrecoverable" ], [ "cancel-behind-moving-remote", "Route recurring monitor cancellations through original node of a moving remote connection" ], ], [ [ "resource-discovery", "Exercises resource-discovery location constraint option" ], [ "rsc-discovery-per-node", "Disable resource discovery per node" ], [ "shutdown-lock", "Ensure shutdown lock works properly" ], [ "shutdown-lock-expiration", "Ensure shutdown lock expiration works properly" ], ], [ [ "op-defaults", "Test op_defaults conditional expressions" ], [ "op-defaults-2", "Test op_defaults AND'ed conditional expressions" ], [ "op-defaults-3", "Test op_defaults precedence" ], [ "rsc-defaults", "Test rsc_defaults conditional expressions" ], [ "rsc-defaults-2", "Test rsc_defaults conditional expressions without type" ], ], [ [ "stop-all-resources", "Test stop-all-resources=true "], ], [ [ "ocf_degraded-remap-ocf_ok", "Test DEGRADED remapped to OK" ], [ "ocf_degraded_master-remap-ocf_ok", "Test DEGRADED_MASTER remapped to OK"], ], # @TODO: If pacemaker implements versioned attributes, uncomment these tests #[ # [ "versioned-resources", "Start resources with #ra-version rules" ], # [ "restart-versioned", "Restart resources on #ra-version change" ], # [ "reload-versioned", "Reload resources on #ra-version change" ], #], #[ # [ "versioned-operations-1", "Use #ra-version to configure operations of native resources" ], # [ "versioned-operations-2", "Use #ra-version to configure operations of stonith resources" ], # [ "versioned-operations-3", "Use #ra-version to configure operations of master/slave resources" ], # [ "versioned-operations-4", "Use #ra-version to configure operations of groups of the resources" ], #], ] TESTS_64BIT = [ [ [ "year-2038", "Check handling of timestamps beyond 2038-01-19 03:14:08 UTC" ], ], ] # Constants substituted in the build process class BuildVars(object): SBINDIR = "@sbindir@" BUILDDIR = "@abs_top_builddir@" CRM_SCHEMA_DIRECTORY = "@CRM_SCHEMA_DIRECTORY@" # These values must be kept in sync with crm_exit_t class CrmExit(object): OK = 0 ERROR = 1 NOT_INSTALLED = 5 NOINPUT = 66 CANTCREAT = 73 def is_executable(path): """ Check whether a file at a given path is executable. """ try: return os.stat(path)[stat.ST_MODE] & stat.S_IXUSR except OSError: return False def diff(file1, file2, **kwargs): """ Call diff on two files """ return subprocess.call([ "diff", "-u", "-N", "--ignore-all-space", "--ignore-blank-lines", file1, file2 ], **kwargs) def sort_file(filename): """ Sort a file alphabetically """ with io.open(filename, "rt") as f: lines = sorted(f) with io.open(filename, "wt") as f: f.writelines(lines) def remove_files(filenames): """ Remove a list of files """ for filename in filenames: try: os.remove(filename) except OSError: pass def normalize(filename): """ Remove text from a file that isn't important for comparison """ if not hasattr(normalize, "patterns"): normalize.patterns = [ re.compile(r'crm_feature_set="[^"]*"'), re.compile(r'batch-limit="[0-9]*"') ] if os.path.isfile(filename): with io.open(filename, "rt") as f: lines = f.readlines() with io.open(filename, "wt") as f: for line in lines: for pattern in normalize.patterns: line = pattern.sub("", line) f.write(line) def cat(filename, dest=sys.stdout): """ Copy a file to a destination file descriptor """ with io.open(filename, "rt") as f: shutil.copyfileobj(f, dest) class CtsScheduler(object): """ Regression tests for Pacemaker's scheduler """ def _parse_args(self, argv): """ Parse command-line arguments """ parser = argparse.ArgumentParser(description=DESC) parser.add_argument('-V', '--verbose', action='count', help='Display any differences from expected output') parser.add_argument('--run', metavar='TEST', help=('Run only single specified test (any further ' 'arguments will be passed to crm_simulate)')) parser.add_argument('--update', action='store_true', help='Update expected results with actual results') parser.add_argument('-b', '--binary', metavar='PATH', help='Specify path to crm_simulate') parser.add_argument('-i', '--io-dir', metavar='PATH', help='Specify path to regression test data directory') parser.add_argument('-o', '--out-dir', metavar='PATH', help='Specify where intermediate and output files should go') parser.add_argument('-v', '--valgrind', action='store_true', help='Run all commands under valgrind') parser.add_argument('--valgrind-dhat', action='store_true', help='Run all commands under valgrind with heap analyzer') parser.add_argument('--valgrind-skip-output', action='store_true', help='If running under valgrind, do not display output') parser.add_argument('--testcmd-options', metavar='OPTIONS', default='', help='Additional options for command under test') # argparse can't handle "everything after --run TEST", so grab that self.single_test_args = [] narg = 0 for arg in argv: narg = narg + 1 if arg == '--run': (argv, self.single_test_args) = (argv[:narg+1], argv[narg+1:]) break self.args = parser.parse_args(argv[1:]) def _error(self, s): print(" * ERROR: %s" % s) def _failed(self, s): print(" * FAILED: %s" % s) def _get_valgrind_cmd(self): """ Return command arguments needed (or not) to run valgrind """ if self.args.valgrind: os.environ['G_SLICE'] = "always-malloc" return [ "valgrind", "-q", "--gen-suppressions=all", "--time-stamp=yes", "--trace-children=no", "--show-reachable=no", "--leak-check=full", "--num-callers=20", "--suppressions=%s/valgrind-pcmk.suppressions" % (self.test_home) ] if self.args.valgrind_dhat: os.environ['G_SLICE'] = "always-malloc" return [ "valgrind", "--tool=exp-dhat", "--time-stamp=yes", "--trace-children=no", "--show-top-n=100", "--num-callers=4" ] return [] def _get_simulator_cmd(self): """ Locate the simulation binary """ if self.args.binary is None: self.args.binary = BuildVars.BUILDDIR + "/tools/crm_simulate" if not is_executable(self.args.binary): self.args.binary = BuildVars.SBINDIR + "/crm_simulate" if not is_executable(self.args.binary): # @TODO it would be more pythonic to raise an exception self._error("Test binary " + self.args.binary + " not found") sys.exit(CrmExit.NOT_INSTALLED) return [ self.args.binary ] + shlex.split(self.args.testcmd_options) def set_schema_env(self): """ Ensure schema directory environment variable is set, if possible """ try: return os.environ['PCMK_schema_directory'] except KeyError: for d in [ os.path.join(BuildVars.BUILDDIR, "xml"), BuildVars.CRM_SCHEMA_DIRECTORY ]: if os.path.isdir(d): os.environ['PCMK_schema_directory'] = d return d return None def __init__(self, argv=sys.argv): self._parse_args(argv) # Where this executable lives self.test_home = os.path.dirname(os.path.realpath(argv[0])) # Where test data resides if self.args.io_dir is None: self.args.io_dir = os.path.join(self.test_home, "scheduler") self.xml_input_dir = os.path.join(self.args.io_dir, "xml") self.expected_dir = os.path.join(self.args.io_dir, "exp") self.dot_expected_dir = os.path.join(self.args.io_dir, "dot") self.scores_dir = os.path.join(self.args.io_dir, "scores") self.summary_dir = os.path.join(self.args.io_dir, "summary") self.stderr_expected_dir = os.path.join(self.args.io_dir, "stderr") # Where to store generated files if self.args.out_dir is None: self.args.out_dir = self.args.io_dir self.failed_filename = os.path.join(self.test_home, ".regression.failed.diff") else: self.failed_filename = os.path.join(self.args.out_dir, ".regression.failed.diff") os.environ['CIB_shadow_dir'] = self.args.out_dir self.failed_file = None self.outfile_out_dir = os.path.join(self.args.out_dir, "out") self.dot_out_dir = os.path.join(self.args.out_dir, "dot") self.scores_out_dir = os.path.join(self.args.out_dir, "scores") self.summary_out_dir = os.path.join(self.args.out_dir, "summary") self.stderr_out_dir = os.path.join(self.args.out_dir, "stderr") self.valgrind_out_dir = os.path.join(self.args.out_dir, "valgrind") # Single test mode (if requested) try: # User can give test base name or file name of a test input self.args.run = os.path.splitext(os.path.basename(self.args.run))[0] except (AttributeError, TypeError): pass # --run was not specified self.set_schema_env() # Arguments needed (or not) to run commands self.valgrind_args = self._get_valgrind_cmd() self.simulate_args = self._get_simulator_cmd() # Test counters self.num_failed = 0 self.num_tests = 0 # Ensure that the main output directory exists # We don't want to create it with os.makedirs below if not os.path.isdir(self.args.out_dir): self._error("Output directory missing; can't create output files") sys.exit(CrmExit.CANTCREAT) # Create output subdirectories if they don't exist try: os.makedirs(self.outfile_out_dir, 0o755, True) os.makedirs(self.dot_out_dir, 0o755, True) os.makedirs(self.scores_out_dir, 0o755, True) os.makedirs(self.summary_out_dir, 0o755, True) os.makedirs(self.stderr_out_dir, 0o755, True) if self.valgrind_args: os.makedirs(self.valgrind_out_dir, 0o755, True) except OSError as ex: self._error("Unable to create output subdirectory: %s" % ex) remove_files([ self.outfile_out_dir, self.dot_out_dir, self.scores_out_dir, self.summary_out_dir, self.stderr_out_dir, ]) sys.exit(CrmExit.CANTCREAT) def _compare_files(self, filename1, filename2): """ Add any file differences to failed results """ if diff(filename1, filename2, stdout=subprocess.DEVNULL) != 0: diff(filename1, filename2, stdout=self.failed_file, stderr=subprocess.DEVNULL) self.failed_file.write("\n"); return True return False def run_one(self, test_name, test_desc, test_args=[]): """ Run one scheduler test """ print(" Test %-25s %s" % ((test_name + ":"), test_desc)) did_fail = False self.num_tests = self.num_tests + 1 # Test inputs input_filename = os.path.join( self.xml_input_dir, "%s.xml" % test_name) expected_filename = os.path.join( self.expected_dir, "%s.exp" % test_name) dot_expected_filename = os.path.join( self.dot_expected_dir, "%s.dot" % test_name) scores_filename = os.path.join( self.scores_dir, "%s.scores" % test_name) summary_filename = os.path.join( self.summary_dir, "%s.summary" % test_name) stderr_expected_filename = os.path.join( self.stderr_expected_dir, "%s.stderr" % test_name) # (Intermediate) test outputs output_filename = os.path.join( self.outfile_out_dir, "%s.out" % test_name) dot_output_filename = os.path.join( self.dot_out_dir, "%s.dot.pe" % test_name) score_output_filename = os.path.join( self.scores_out_dir, "%s.scores.pe" % test_name) summary_output_filename = os.path.join( self.summary_out_dir, "%s.summary.pe" % test_name) stderr_output_filename = os.path.join( self.stderr_out_dir, "%s.stderr.pe" % test_name) valgrind_output_filename = os.path.join( self.valgrind_out_dir, "%s.valgrind" % test_name) # Common arguments for running test test_cmd = [] if self.valgrind_args: test_cmd = self.valgrind_args + [ "--log-file=%s" % valgrind_output_filename ] test_cmd = test_cmd + self.simulate_args # @TODO It would be more pythonic to raise exceptions for errors, # then perhaps it would be nice to make a single-test class # Ensure necessary test inputs exist if not os.path.isfile(input_filename): self._error("No input") self.num_failed = self.num_failed + 1 return CrmExit.NOINPUT if not self.args.update and not os.path.isfile(expected_filename): self._error("no stored output") return CrmExit.NOINPUT # Run simulation to generate summary output if self.args.run: # Single test mode test_cmd_full = test_cmd + [ '-x', input_filename, '-S' ] + test_args print(" ".join(test_cmd_full)) else: # @TODO Why isn't test_args added here? test_cmd_full = test_cmd + [ '-x', input_filename, '-S' ] with io.open(summary_output_filename, "wt") as f: simulation = subprocess.Popen(test_cmd_full, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=os.environ) - # This makes diff happy regardless of PCMK__COMPAT_2_0 - sed = subprocess.Popen(["sed", "-e", "s/ocf::/ocf:/g"], - stdin=simulation.stdout, stdout=f, + # This makes diff happy regardless of --enable-compat-2.0. + # Use sed -E to make Linux and BSD special characters more compatible. + sed = subprocess.Popen(["sed", "-E", + "-e", "s/ocf::/ocf:/g", + "-e", r"s/Masters:/Promoted:/", + "-e", r"s/Slaves:/Unpromoted:/", + "-e", r"s/ Master( |\[|$)/ Promoted\1/", + "-e", r"s/ Slave / Unpromoted /", + ], stdin=simulation.stdout, stdout=f, stderr=subprocess.STDOUT) simulation.stdout.close() sed.communicate() if self.args.run: cat(summary_output_filename) # Re-run simulation to generate dot, graph, and scores test_cmd_full = test_cmd + [ '-x', input_filename, '-D', dot_output_filename, '-G', output_filename, '-sSQ' ] + test_args with io.open(stderr_output_filename, "wt") as f_stderr, \ io.open(score_output_filename, "wt") as f_score: rc = subprocess.call(test_cmd_full, stdout=f_score, stderr=f_stderr, env=os.environ) # Check for test command failure if rc != CrmExit.OK: self._failed("Test returned: %d" % rc) did_fail = True print(" ".join(test_cmd_full)) # Check for valgrind errors if self.valgrind_args and not self.args.valgrind_skip_output: if os.stat(valgrind_output_filename).st_size > 0: self._failed("Valgrind reported errors") did_fail = True cat(valgrind_output_filename) remove_files([ valgrind_output_filename ]) # Check for core dump if os.path.isfile("core"): self._failed("Core-file detected: core." + test_name) did_fail = True os.rename("core", "%s/core.%s" % (self.test_home, test_name)) # Check any stderr output if os.path.isfile(stderr_expected_filename): if self._compare_files(stderr_expected_filename, stderr_output_filename): self._failed("stderr changed") did_fail = True elif os.stat(stderr_output_filename).st_size > 0: self._failed("Output was written to stderr") did_fail = True cat(stderr_output_filename) remove_files([ stderr_output_filename ]) # Check whether output graph exists, and normalize it if (not os.path.isfile(output_filename) or os.stat(output_filename).st_size == 0): self._error("No graph produced") did_fail = True self.num_failed = self.num_failed + 1 remove_files([ output_filename ]) return CrmExit.ERROR normalize(output_filename) # Check whether dot output exists, and sort it if (not os.path.isfile(dot_output_filename) or os.stat(dot_output_filename).st_size == 0): self._error("No dot-file summary produced") did_fail = True self.num_failed = self.num_failed + 1 remove_files([ dot_output_filename, output_filename ]) return CrmExit.ERROR with io.open(dot_output_filename, "rt") as f: first_line = f.readline() # "digraph" line with opening brace lines = f.readlines() last_line = lines[-1] # closing brace del lines[-1] lines = sorted(set(lines)) # unique sort with io.open(dot_output_filename, "wt") as f: f.write(first_line) f.writelines(lines) f.write(last_line) # Check whether score output exists, and sort it if (not os.path.isfile(score_output_filename) or os.stat(score_output_filename).st_size == 0): self._error("No allocation scores produced") did_fail = True self.num_failed = self.num_failed + 1 remove_files([ score_output_filename, output_filename ]) return CrmExit.ERROR else: sort_file(score_output_filename) if self.args.update: shutil.copyfile(output_filename, expected_filename) shutil.copyfile(dot_output_filename, dot_expected_filename) shutil.copyfile(score_output_filename, scores_filename) shutil.copyfile(summary_output_filename, summary_filename) print(" Updated expected outputs") if self._compare_files(summary_filename, summary_output_filename): self._failed("summary changed") did_fail = True if self._compare_files(dot_expected_filename, dot_output_filename): self._failed("dot-file summary changed") did_fail = True else: remove_files([ dot_output_filename ]) if self._compare_files(expected_filename, output_filename): self._failed("xml-file changed") did_fail = True if self._compare_files(scores_filename, score_output_filename): self._failed("scores-file changed") did_fail = True remove_files([ output_filename, score_output_filename, summary_output_filename]) if did_fail: self.num_failed = self.num_failed + 1 return CrmExit.ERROR return CrmExit.OK def run_all(self): """ Run all defined tests """ if platform.architecture()[0] == "64bit": TESTS.extend(TESTS_64BIT) for group in TESTS: for test in group: try: args = test[2] except IndexError: args = [] self.run_one(test[0], test[1], args) print() def _print_summary(self): """ Print a summary of parameters for this test run """ print("Test home is:\t" + self.test_home) print("Test binary is:\t" + self.args.binary) if 'PCMK_schema_directory' in os.environ: print("Schema home is:\t" + os.environ['PCMK_schema_directory']) if self.valgrind_args != []: print("Activating memory testing with valgrind") print() def _test_results(self): if self.num_failed == 0: return CrmExit.OK if os.path.isfile(self.failed_filename) and os.stat(self.failed_filename).st_size != 0: if self.args.verbose: self._error("Results of %d failed tests (out of %d):" % (self.num_failed, self.num_tests)) cat(self.failed_filename) else: self._error("Results of %d failed tests (out of %d) are in %s" % (self.num_failed, self.num_tests, self.failed_filename)) self._error("Use -V to display them after running the tests") else: self._error("%d (of %d) tests failed (no diff results)" % (self.num_failed, self.num_tests)) if os.path.isfile(self.failed_filename): os.remove(self.failed_filename) return CrmExit.ERROR def run(self): """ Run test(s) as specified """ self._print_summary() # Zero out the error log self.failed_file = io.open(self.failed_filename, "wt") if self.args.run is None: print("Performing the following tests from " + self.args.io_dir) print() self.run_all() print() self.failed_file.close() rc = self._test_results() else: rc = self.run_one(self.args.run, "Single shot", self.single_test_args) self.failed_file.close() cat(self.failed_filename) return rc if __name__ == "__main__": sys.exit(CtsScheduler().run()) # vim: set filetype=python expandtab tabstop=4 softtabstop=4 shiftwidth=4 textwidth=120: diff --git a/cts/scheduler/summary/a-demote-then-b-migrate.summary b/cts/scheduler/summary/a-demote-then-b-migrate.summary index 17e00f2475..1f5c90b8ac 100644 --- a/cts/scheduler/summary/a-demote-then-b-migrate.summary +++ b/cts/scheduler/summary/a-demote-then-b-migrate.summary @@ -1,57 +1,57 @@ Current cluster status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * Clone Set: ms1 [rsc1] (promotable): - * Masters: [ node1 ] - * Slaves: [ node2 ] + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] * rsc2 (ocf:pacemaker:Dummy): Started node1 Transition Summary: - * Demote rsc1:0 ( Master -> Slave node1 ) - * Promote rsc1:1 ( Slave -> Master node2 ) + * Demote rsc1:0 ( Promoted -> Unpromoted node1 ) + * Promote rsc1:1 ( Unpromoted -> Promoted node2 ) * Migrate rsc2 ( node1 -> node2 ) Executing Cluster Transition: * Resource action: rsc1:1 cancel=5000 on node1 * Resource action: rsc1:0 cancel=10000 on node2 * Pseudo action: ms1_pre_notify_demote_0 * Resource action: rsc1:1 notify on node1 * Resource action: rsc1:0 notify on node2 * Pseudo action: ms1_confirmed-pre_notify_demote_0 * Pseudo action: ms1_demote_0 * Resource action: rsc1:1 demote on node1 * Pseudo action: ms1_demoted_0 * Pseudo action: ms1_post_notify_demoted_0 * Resource action: rsc1:1 notify on node1 * Resource action: rsc1:0 notify on node2 * Pseudo action: ms1_confirmed-post_notify_demoted_0 * Pseudo action: ms1_pre_notify_promote_0 * Resource action: rsc2 migrate_to on node1 * Resource action: rsc1:1 notify on node1 * Resource action: rsc1:0 notify on node2 * Pseudo action: ms1_confirmed-pre_notify_promote_0 * Resource action: rsc2 migrate_from on node2 * Resource action: rsc2 stop on node1 * Pseudo action: rsc2_start_0 * Pseudo action: ms1_promote_0 * Resource action: rsc2 monitor=5000 on node2 * Resource action: rsc1:0 promote on node2 * Pseudo action: ms1_promoted_0 * Pseudo action: ms1_post_notify_promoted_0 * Resource action: rsc1:1 notify on node1 * Resource action: rsc1:0 notify on node2 * Pseudo action: ms1_confirmed-post_notify_promoted_0 * Resource action: rsc1:1 monitor=10000 on node1 * Resource action: rsc1:0 monitor=5000 on node2 Revised Cluster Status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * Clone Set: ms1 [rsc1] (promotable): - * Masters: [ node2 ] - * Slaves: [ node1 ] + * Promoted: [ node2 ] + * Unpromoted: [ node1 ] * rsc2 (ocf:pacemaker:Dummy): Started node2 diff --git a/cts/scheduler/summary/a-promote-then-b-migrate.summary b/cts/scheduler/summary/a-promote-then-b-migrate.summary index 1f1e987336..17486c56f2 100644 --- a/cts/scheduler/summary/a-promote-then-b-migrate.summary +++ b/cts/scheduler/summary/a-promote-then-b-migrate.summary @@ -1,42 +1,42 @@ Current cluster status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * Clone Set: ms1 [rsc1] (promotable): - * Masters: [ node1 ] - * Slaves: [ node2 ] + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] * rsc2 (ocf:pacemaker:Dummy): Started node1 Transition Summary: - * Promote rsc1:1 ( Slave -> Master node2 ) + * Promote rsc1:1 ( Unpromoted -> Promoted node2 ) * Migrate rsc2 ( node1 -> node2 ) Executing Cluster Transition: * Resource action: rsc1:1 cancel=10000 on node2 * Pseudo action: ms1_pre_notify_promote_0 * Resource action: rsc1:0 notify on node1 * Resource action: rsc1:1 notify on node2 * Pseudo action: ms1_confirmed-pre_notify_promote_0 * Pseudo action: ms1_promote_0 * Resource action: rsc1:1 promote on node2 * Pseudo action: ms1_promoted_0 * Pseudo action: ms1_post_notify_promoted_0 * Resource action: rsc1:0 notify on node1 * Resource action: rsc1:1 notify on node2 * Pseudo action: ms1_confirmed-post_notify_promoted_0 * Resource action: rsc2 migrate_to on node1 * Resource action: rsc1:1 monitor=5000 on node2 * Resource action: rsc2 migrate_from on node2 * Resource action: rsc2 stop on node1 * Pseudo action: rsc2_start_0 * Resource action: rsc2 monitor=5000 on node2 Revised Cluster Status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * Clone Set: ms1 [rsc1] (promotable): - * Masters: [ node1 node2 ] + * Promoted: [ node1 node2 ] * rsc2 (ocf:pacemaker:Dummy): Started node2 diff --git a/cts/scheduler/summary/anon-instance-pending.summary b/cts/scheduler/summary/anon-instance-pending.summary index 6f181842db..379fbce612 100644 --- a/cts/scheduler/summary/anon-instance-pending.summary +++ b/cts/scheduler/summary/anon-instance-pending.summary @@ -1,224 +1,224 @@ Current cluster status: * Node List: * Online: [ node1 node2 node3 node4 node5 node6 node7 node8 node9 node10 node11 ] * Full List of Resources: * Fencing (stonith:fence_imaginary): Started node1 * Clone Set: clone1 [clone1rsc] (promotable): * clone1rsc (ocf:pacemaker:Stateful): Starting node4 - * Masters: [ node3 ] - * Slaves: [ node1 node2 ] + * Promoted: [ node3 ] + * Unpromoted: [ node1 node2 ] * Stopped: [ node5 node6 node7 node8 node9 node10 node11 ] * Clone Set: clone2 [clone2rsc]: * clone2rsc (ocf:pacemaker:Dummy): Starting node4 * Started: [ node2 ] * Stopped: [ node1 node3 node5 node6 node7 node8 node9 node10 node11 ] * Clone Set: clone3 [clone3rsc]: * Started: [ node3 ] * Stopped: [ node1 node2 node4 node5 node6 node7 node8 node9 node10 node11 ] * Clone Set: clone4 [clone4rsc]: * clone4rsc (ocf:pacemaker:Dummy): Stopping node8 * clone4rsc (ocf:pacemaker:Dummy): ORPHANED Started node9 * Started: [ node1 node5 node6 node7 ] * Stopped: [ node2 node3 node4 node10 node11 ] * Clone Set: clone5 [clone5group]: * Resource Group: clone5group:2: * clone5rsc1 (ocf:pacemaker:Dummy): Started node3 * clone5rsc2 (ocf:pacemaker:Dummy): Starting node3 * clone5rsc3 (ocf:pacemaker:Dummy): Stopped * Started: [ node1 node2 ] * Stopped: [ node4 node5 node6 node7 node8 node9 node10 node11 ] Transition Summary: * Start clone1rsc:4 ( node9 ) * Start clone1rsc:5 ( node10 ) * Start clone1rsc:6 ( node11 ) * Start clone1rsc:7 ( node5 ) * Start clone1rsc:8 ( node6 ) * Start clone1rsc:9 ( node7 ) * Start clone1rsc:10 ( node8 ) * Start clone2rsc:2 ( node10 ) * Start clone2rsc:3 ( node11 ) * Start clone2rsc:4 ( node3 ) * Start clone3rsc:1 ( node5 ) * Start clone3rsc:2 ( node6 ) * Start clone3rsc:3 ( node7 ) * Start clone3rsc:4 ( node8 ) * Start clone3rsc:5 ( node9 ) * Start clone3rsc:6 ( node1 ) * Start clone3rsc:7 ( node10 ) * Start clone3rsc:8 ( node11 ) * Start clone3rsc:9 ( node2 ) * Start clone3rsc:10 ( node4 ) * Stop clone4rsc:5 ( node9 ) due to node availability * Start clone5rsc3:2 ( node3 ) * Start clone5rsc1:3 ( node9 ) * Start clone5rsc2:3 ( node9 ) * Start clone5rsc3:3 ( node9 ) * Start clone5rsc1:4 ( node10 ) * Start clone5rsc2:4 ( node10 ) * Start clone5rsc3:4 ( node10 ) * Start clone5rsc1:5 ( node11 ) * Start clone5rsc2:5 ( node11 ) * Start clone5rsc3:5 ( node11 ) * Start clone5rsc1:6 ( node4 ) * Start clone5rsc2:6 ( node4 ) * Start clone5rsc3:6 ( node4 ) * Start clone5rsc1:7 ( node5 ) * Start clone5rsc2:7 ( node5 ) * Start clone5rsc3:7 ( node5 ) * Start clone5rsc1:8 ( node6 ) * Start clone5rsc2:8 ( node6 ) * Start clone5rsc3:8 ( node6 ) * Start clone5rsc1:9 ( node7 ) * Start clone5rsc2:9 ( node7 ) * Start clone5rsc3:9 ( node7 ) * Start clone5rsc1:10 ( node8 ) * Start clone5rsc2:10 ( node8 ) * Start clone5rsc3:10 ( node8 ) Executing Cluster Transition: * Pseudo action: clone1_start_0 * Pseudo action: clone2_start_0 * Resource action: clone3rsc monitor on node2 * Pseudo action: clone3_start_0 * Pseudo action: clone4_stop_0 * Pseudo action: clone5_start_0 * Resource action: clone1rsc start on node4 * Resource action: clone1rsc start on node9 * Resource action: clone1rsc start on node10 * Resource action: clone1rsc start on node11 * Resource action: clone1rsc start on node5 * Resource action: clone1rsc start on node6 * Resource action: clone1rsc start on node7 * Resource action: clone1rsc start on node8 * Pseudo action: clone1_running_0 * Resource action: clone2rsc start on node4 * Resource action: clone2rsc start on node10 * Resource action: clone2rsc start on node11 * Resource action: clone2rsc start on node3 * Pseudo action: clone2_running_0 * Resource action: clone3rsc start on node5 * Resource action: clone3rsc start on node6 * Resource action: clone3rsc start on node7 * Resource action: clone3rsc start on node8 * Resource action: clone3rsc start on node9 * Resource action: clone3rsc start on node1 * Resource action: clone3rsc start on node10 * Resource action: clone3rsc start on node11 * Resource action: clone3rsc start on node2 * Resource action: clone3rsc start on node4 * Pseudo action: clone3_running_0 * Resource action: clone4rsc stop on node9 * Pseudo action: clone4_stopped_0 * Pseudo action: clone5group:2_start_0 * Resource action: clone5rsc2 start on node3 * Resource action: clone5rsc3 start on node3 * Pseudo action: clone5group:3_start_0 * Resource action: clone5rsc1 start on node9 * Resource action: clone5rsc2 start on node9 * Resource action: clone5rsc3 start on node9 * Pseudo action: clone5group:4_start_0 * Resource action: clone5rsc1 start on node10 * Resource action: clone5rsc2 start on node10 * Resource action: clone5rsc3 start on node10 * Pseudo action: clone5group:5_start_0 * Resource action: clone5rsc1 start on node11 * Resource action: clone5rsc2 start on node11 * Resource action: clone5rsc3 start on node11 * Pseudo action: clone5group:6_start_0 * Resource action: clone5rsc1 start on node4 * Resource action: clone5rsc2 start on node4 * Resource action: clone5rsc3 start on node4 * Pseudo action: clone5group:7_start_0 * Resource action: clone5rsc1 start on node5 * Resource action: clone5rsc2 start on node5 * Resource action: clone5rsc3 start on node5 * Pseudo action: clone5group:8_start_0 * Resource action: clone5rsc1 start on node6 * Resource action: clone5rsc2 start on node6 * Resource action: clone5rsc3 start on node6 * Pseudo action: clone5group:9_start_0 * Resource action: clone5rsc1 start on node7 * Resource action: clone5rsc2 start on node7 * Resource action: clone5rsc3 start on node7 * Pseudo action: clone5group:10_start_0 * Resource action: clone5rsc1 start on node8 * Resource action: clone5rsc2 start on node8 * Resource action: clone5rsc3 start on node8 * Resource action: clone1rsc monitor=10000 on node4 * Resource action: clone1rsc monitor=10000 on node9 * Resource action: clone1rsc monitor=10000 on node10 * Resource action: clone1rsc monitor=10000 on node11 * Resource action: clone1rsc monitor=10000 on node5 * Resource action: clone1rsc monitor=10000 on node6 * Resource action: clone1rsc monitor=10000 on node7 * Resource action: clone1rsc monitor=10000 on node8 * Resource action: clone2rsc monitor=10000 on node4 * Resource action: clone2rsc monitor=10000 on node10 * Resource action: clone2rsc monitor=10000 on node11 * Resource action: clone2rsc monitor=10000 on node3 * Resource action: clone3rsc monitor=10000 on node5 * Resource action: clone3rsc monitor=10000 on node6 * Resource action: clone3rsc monitor=10000 on node7 * Resource action: clone3rsc monitor=10000 on node8 * Resource action: clone3rsc monitor=10000 on node9 * Resource action: clone3rsc monitor=10000 on node1 * Resource action: clone3rsc monitor=10000 on node10 * Resource action: clone3rsc monitor=10000 on node11 * Resource action: clone3rsc monitor=10000 on node2 * Resource action: clone3rsc monitor=10000 on node4 * Pseudo action: clone5group:2_running_0 * Resource action: clone5rsc2 monitor=10000 on node3 * Resource action: clone5rsc3 monitor=10000 on node3 * Pseudo action: clone5group:3_running_0 * Resource action: clone5rsc1 monitor=10000 on node9 * Resource action: clone5rsc2 monitor=10000 on node9 * Resource action: clone5rsc3 monitor=10000 on node9 * Pseudo action: clone5group:4_running_0 * Resource action: clone5rsc1 monitor=10000 on node10 * Resource action: clone5rsc2 monitor=10000 on node10 * Resource action: clone5rsc3 monitor=10000 on node10 * Pseudo action: clone5group:5_running_0 * Resource action: clone5rsc1 monitor=10000 on node11 * Resource action: clone5rsc2 monitor=10000 on node11 * Resource action: clone5rsc3 monitor=10000 on node11 * Pseudo action: clone5group:6_running_0 * Resource action: clone5rsc1 monitor=10000 on node4 * Resource action: clone5rsc2 monitor=10000 on node4 * Resource action: clone5rsc3 monitor=10000 on node4 * Pseudo action: clone5group:7_running_0 * Resource action: clone5rsc1 monitor=10000 on node5 * Resource action: clone5rsc2 monitor=10000 on node5 * Resource action: clone5rsc3 monitor=10000 on node5 * Pseudo action: clone5group:8_running_0 * Resource action: clone5rsc1 monitor=10000 on node6 * Resource action: clone5rsc2 monitor=10000 on node6 * Resource action: clone5rsc3 monitor=10000 on node6 * Pseudo action: clone5group:9_running_0 * Resource action: clone5rsc1 monitor=10000 on node7 * Resource action: clone5rsc2 monitor=10000 on node7 * Resource action: clone5rsc3 monitor=10000 on node7 * Pseudo action: clone5group:10_running_0 * Resource action: clone5rsc1 monitor=10000 on node8 * Resource action: clone5rsc2 monitor=10000 on node8 * Resource action: clone5rsc3 monitor=10000 on node8 * Pseudo action: clone5_running_0 Revised Cluster Status: * Node List: * Online: [ node1 node2 node3 node4 node5 node6 node7 node8 node9 node10 node11 ] * Full List of Resources: * Fencing (stonith:fence_imaginary): Started node1 * Clone Set: clone1 [clone1rsc] (promotable): - * Masters: [ node3 ] - * Slaves: [ node1 node2 node4 node5 node6 node7 node8 node9 node10 node11 ] + * Promoted: [ node3 ] + * Unpromoted: [ node1 node2 node4 node5 node6 node7 node8 node9 node10 node11 ] * Clone Set: clone2 [clone2rsc]: * Started: [ node2 node3 node4 node10 node11 ] * Clone Set: clone3 [clone3rsc]: * Started: [ node1 node2 node3 node4 node5 node6 node7 node8 node9 node10 node11 ] * Clone Set: clone4 [clone4rsc]: * Started: [ node1 node5 node6 node7 node8 ] * Clone Set: clone5 [clone5group]: * Started: [ node1 node2 node3 node4 node5 node6 node7 node8 node9 node10 node11 ] diff --git a/cts/scheduler/summary/anti-colocation-master.summary b/cts/scheduler/summary/anti-colocation-master.summary index c579c3f954..c1b88cab47 100644 --- a/cts/scheduler/summary/anti-colocation-master.summary +++ b/cts/scheduler/summary/anti-colocation-master.summary @@ -1,38 +1,38 @@ Using the original execution date of: 2016-04-29 09:06:59Z Current cluster status: * Node List: * Online: [ sle12sp2-1 sle12sp2-2 ] * Full List of Resources: * st_sbd (stonith:external/sbd): Started sle12sp2-2 * dummy1 (ocf:pacemaker:Dummy): Started sle12sp2-2 * Clone Set: ms1 [state1] (promotable): - * Masters: [ sle12sp2-1 ] - * Slaves: [ sle12sp2-2 ] + * Promoted: [ sle12sp2-1 ] + * Unpromoted: [ sle12sp2-2 ] Transition Summary: * Move dummy1 ( sle12sp2-2 -> sle12sp2-1 ) - * Promote state1:0 ( Slave -> Master sle12sp2-2 ) - * Demote state1:1 ( Master -> Slave sle12sp2-1 ) + * Promote state1:0 ( Unpromoted -> Promoted sle12sp2-2 ) + * Demote state1:1 ( Promoted -> Unpromoted sle12sp2-1 ) Executing Cluster Transition: * Resource action: dummy1 stop on sle12sp2-2 * Pseudo action: ms1_demote_0 * Resource action: state1 demote on sle12sp2-1 * Pseudo action: ms1_demoted_0 * Pseudo action: ms1_promote_0 * Resource action: dummy1 start on sle12sp2-1 * Resource action: state1 promote on sle12sp2-2 * Pseudo action: ms1_promoted_0 Using the original execution date of: 2016-04-29 09:06:59Z Revised Cluster Status: * Node List: * Online: [ sle12sp2-1 sle12sp2-2 ] * Full List of Resources: * st_sbd (stonith:external/sbd): Started sle12sp2-2 * dummy1 (ocf:pacemaker:Dummy): Started sle12sp2-1 * Clone Set: ms1 [state1] (promotable): - * Masters: [ sle12sp2-2 ] - * Slaves: [ sle12sp2-1 ] + * Promoted: [ sle12sp2-2 ] + * Unpromoted: [ sle12sp2-1 ] diff --git a/cts/scheduler/summary/anti-colocation-slave.summary b/cts/scheduler/summary/anti-colocation-slave.summary index 8c2d310bc2..42aa106b10 100644 --- a/cts/scheduler/summary/anti-colocation-slave.summary +++ b/cts/scheduler/summary/anti-colocation-slave.summary @@ -1,36 +1,36 @@ Current cluster status: * Node List: * Online: [ sle12sp2-1 sle12sp2-2 ] * Full List of Resources: * st_sbd (stonith:external/sbd): Started sle12sp2-1 * Clone Set: ms1 [state1] (promotable): - * Masters: [ sle12sp2-1 ] - * Slaves: [ sle12sp2-2 ] + * Promoted: [ sle12sp2-1 ] + * Unpromoted: [ sle12sp2-2 ] * dummy1 (ocf:pacemaker:Dummy): Started sle12sp2-1 Transition Summary: - * Demote state1:0 ( Master -> Slave sle12sp2-1 ) - * Promote state1:1 ( Slave -> Master sle12sp2-2 ) + * Demote state1:0 ( Promoted -> Unpromoted sle12sp2-1 ) + * Promote state1:1 ( Unpromoted -> Promoted sle12sp2-2 ) * Move dummy1 ( sle12sp2-1 -> sle12sp2-2 ) Executing Cluster Transition: * Resource action: dummy1 stop on sle12sp2-1 * Pseudo action: ms1_demote_0 * Resource action: state1 demote on sle12sp2-1 * Pseudo action: ms1_demoted_0 * Pseudo action: ms1_promote_0 * Resource action: state1 promote on sle12sp2-2 * Pseudo action: ms1_promoted_0 * Resource action: dummy1 start on sle12sp2-2 Revised Cluster Status: * Node List: * Online: [ sle12sp2-1 sle12sp2-2 ] * Full List of Resources: * st_sbd (stonith:external/sbd): Started sle12sp2-1 * Clone Set: ms1 [state1] (promotable): - * Masters: [ sle12sp2-2 ] - * Slaves: [ sle12sp2-1 ] + * Promoted: [ sle12sp2-2 ] + * Unpromoted: [ sle12sp2-1 ] * dummy1 (ocf:pacemaker:Dummy): Started sle12sp2-2 diff --git a/cts/scheduler/summary/asymmetric.summary b/cts/scheduler/summary/asymmetric.summary index e9a8167a4e..f9c8f7e202 100644 --- a/cts/scheduler/summary/asymmetric.summary +++ b/cts/scheduler/summary/asymmetric.summary @@ -1,29 +1,29 @@ Current cluster status: * Node List: * Online: [ puma1 puma3 ] * Full List of Resources: * Clone Set: ms_drbd_poolA [ebe3fb6e-7778-426e-be58-190ab1ff3dd3] (promotable): - * Masters: [ puma3 ] - * Slaves: [ puma1 ] + * Promoted: [ puma3 ] + * Unpromoted: [ puma1 ] * vpool_ip_poolA (ocf:heartbeat:IPaddr2): Stopped * drbd_target_poolA (ocf:vpools:iscsi_target): Stopped Transition Summary: Executing Cluster Transition: * Resource action: ebe3fb6e-7778-426e-be58-190ab1ff3dd3:1 monitor=19000 on puma1 * Resource action: ebe3fb6e-7778-426e-be58-190ab1ff3dd3:0 monitor=20000 on puma3 * Resource action: drbd_target_poolA monitor on puma3 * Resource action: drbd_target_poolA monitor on puma1 Revised Cluster Status: * Node List: * Online: [ puma1 puma3 ] * Full List of Resources: * Clone Set: ms_drbd_poolA [ebe3fb6e-7778-426e-be58-190ab1ff3dd3] (promotable): - * Masters: [ puma3 ] - * Slaves: [ puma1 ] + * Promoted: [ puma3 ] + * Unpromoted: [ puma1 ] * vpool_ip_poolA (ocf:heartbeat:IPaddr2): Stopped * drbd_target_poolA (ocf:vpools:iscsi_target): Stopped diff --git a/cts/scheduler/summary/bug-1572-1.summary b/cts/scheduler/summary/bug-1572-1.summary index 7b0dee350d..6abedea530 100644 --- a/cts/scheduler/summary/bug-1572-1.summary +++ b/cts/scheduler/summary/bug-1572-1.summary @@ -1,85 +1,85 @@ Current cluster status: * Node List: * Online: [ arc-dknightlx arc-tkincaidlx.wsicorp.com ] * Full List of Resources: * Clone Set: ms_drbd_7788 [rsc_drbd_7788] (promotable): - * Masters: [ arc-tkincaidlx.wsicorp.com ] - * Slaves: [ arc-dknightlx ] + * Promoted: [ arc-tkincaidlx.wsicorp.com ] + * Unpromoted: [ arc-dknightlx ] * Resource Group: grp_pgsql_mirror: * fs_mirror (ocf:heartbeat:Filesystem): Started arc-tkincaidlx.wsicorp.com * pgsql_5555 (ocf:heartbeat:pgsql): Started arc-tkincaidlx.wsicorp.com * IPaddr_147_81_84_133 (ocf:heartbeat:IPaddr): Started arc-tkincaidlx.wsicorp.com Transition Summary: - * Stop rsc_drbd_7788:0 ( Slave arc-dknightlx ) due to node availability - * Restart rsc_drbd_7788:1 ( Master arc-tkincaidlx.wsicorp.com ) due to resource definition change + * Stop rsc_drbd_7788:0 ( Unpromoted arc-dknightlx ) due to node availability + * Restart rsc_drbd_7788:1 ( Promoted arc-tkincaidlx.wsicorp.com ) due to resource definition change * Restart fs_mirror ( arc-tkincaidlx.wsicorp.com ) due to required ms_drbd_7788 notified * Restart pgsql_5555 ( arc-tkincaidlx.wsicorp.com ) due to required fs_mirror start * Restart IPaddr_147_81_84_133 ( arc-tkincaidlx.wsicorp.com ) due to required pgsql_5555 start Executing Cluster Transition: * Pseudo action: ms_drbd_7788_pre_notify_demote_0 * Pseudo action: grp_pgsql_mirror_stop_0 * Resource action: IPaddr_147_81_84_133 stop on arc-tkincaidlx.wsicorp.com * Resource action: rsc_drbd_7788:0 notify on arc-dknightlx * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com * Pseudo action: ms_drbd_7788_confirmed-pre_notify_demote_0 * Resource action: pgsql_5555 stop on arc-tkincaidlx.wsicorp.com * Resource action: fs_mirror stop on arc-tkincaidlx.wsicorp.com * Pseudo action: grp_pgsql_mirror_stopped_0 * Pseudo action: ms_drbd_7788_demote_0 * Resource action: rsc_drbd_7788:1 demote on arc-tkincaidlx.wsicorp.com * Pseudo action: ms_drbd_7788_demoted_0 * Pseudo action: ms_drbd_7788_post_notify_demoted_0 * Resource action: rsc_drbd_7788:0 notify on arc-dknightlx * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com * Pseudo action: ms_drbd_7788_confirmed-post_notify_demoted_0 * Pseudo action: ms_drbd_7788_pre_notify_stop_0 * Resource action: rsc_drbd_7788:0 notify on arc-dknightlx * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com * Pseudo action: ms_drbd_7788_confirmed-pre_notify_stop_0 * Pseudo action: ms_drbd_7788_stop_0 * Resource action: rsc_drbd_7788:0 stop on arc-dknightlx * Resource action: rsc_drbd_7788:1 stop on arc-tkincaidlx.wsicorp.com * Pseudo action: ms_drbd_7788_stopped_0 * Cluster action: do_shutdown on arc-dknightlx * Pseudo action: ms_drbd_7788_post_notify_stopped_0 * Pseudo action: ms_drbd_7788_confirmed-post_notify_stopped_0 * Pseudo action: ms_drbd_7788_pre_notify_start_0 * Pseudo action: ms_drbd_7788_confirmed-pre_notify_start_0 * Pseudo action: ms_drbd_7788_start_0 * Resource action: rsc_drbd_7788:1 start on arc-tkincaidlx.wsicorp.com * Pseudo action: ms_drbd_7788_running_0 * Pseudo action: ms_drbd_7788_post_notify_running_0 * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com * Pseudo action: ms_drbd_7788_confirmed-post_notify_running_0 * Pseudo action: ms_drbd_7788_pre_notify_promote_0 * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com * Pseudo action: ms_drbd_7788_confirmed-pre_notify_promote_0 * Pseudo action: ms_drbd_7788_promote_0 * Resource action: rsc_drbd_7788:1 promote on arc-tkincaidlx.wsicorp.com * Pseudo action: ms_drbd_7788_promoted_0 * Pseudo action: ms_drbd_7788_post_notify_promoted_0 * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com * Pseudo action: ms_drbd_7788_confirmed-post_notify_promoted_0 * Pseudo action: grp_pgsql_mirror_start_0 * Resource action: fs_mirror start on arc-tkincaidlx.wsicorp.com * Resource action: pgsql_5555 start on arc-tkincaidlx.wsicorp.com * Resource action: pgsql_5555 monitor=30000 on arc-tkincaidlx.wsicorp.com * Resource action: IPaddr_147_81_84_133 start on arc-tkincaidlx.wsicorp.com * Resource action: IPaddr_147_81_84_133 monitor=25000 on arc-tkincaidlx.wsicorp.com * Pseudo action: grp_pgsql_mirror_running_0 Revised Cluster Status: * Node List: * Online: [ arc-dknightlx arc-tkincaidlx.wsicorp.com ] * Full List of Resources: * Clone Set: ms_drbd_7788 [rsc_drbd_7788] (promotable): - * Masters: [ arc-tkincaidlx.wsicorp.com ] + * Promoted: [ arc-tkincaidlx.wsicorp.com ] * Stopped: [ arc-dknightlx ] * Resource Group: grp_pgsql_mirror: * fs_mirror (ocf:heartbeat:Filesystem): Started arc-tkincaidlx.wsicorp.com * pgsql_5555 (ocf:heartbeat:pgsql): Started arc-tkincaidlx.wsicorp.com * IPaddr_147_81_84_133 (ocf:heartbeat:IPaddr): Started arc-tkincaidlx.wsicorp.com diff --git a/cts/scheduler/summary/bug-1572-2.summary b/cts/scheduler/summary/bug-1572-2.summary index c92af407e4..7d4921dc36 100644 --- a/cts/scheduler/summary/bug-1572-2.summary +++ b/cts/scheduler/summary/bug-1572-2.summary @@ -1,61 +1,61 @@ Current cluster status: * Node List: * Online: [ arc-dknightlx arc-tkincaidlx.wsicorp.com ] * Full List of Resources: * Clone Set: ms_drbd_7788 [rsc_drbd_7788] (promotable): - * Masters: [ arc-tkincaidlx.wsicorp.com ] - * Slaves: [ arc-dknightlx ] + * Promoted: [ arc-tkincaidlx.wsicorp.com ] + * Unpromoted: [ arc-dknightlx ] * Resource Group: grp_pgsql_mirror: * fs_mirror (ocf:heartbeat:Filesystem): Started arc-tkincaidlx.wsicorp.com * pgsql_5555 (ocf:heartbeat:pgsql): Started arc-tkincaidlx.wsicorp.com * IPaddr_147_81_84_133 (ocf:heartbeat:IPaddr): Started arc-tkincaidlx.wsicorp.com Transition Summary: - * Stop rsc_drbd_7788:0 ( Slave arc-dknightlx ) due to node availability - * Demote rsc_drbd_7788:1 ( Master -> Slave arc-tkincaidlx.wsicorp.com ) + * Stop rsc_drbd_7788:0 ( Unpromoted arc-dknightlx ) due to node availability + * Demote rsc_drbd_7788:1 ( Promoted -> Unpromoted arc-tkincaidlx.wsicorp.com ) * Stop fs_mirror ( arc-tkincaidlx.wsicorp.com ) due to node availability * Stop pgsql_5555 ( arc-tkincaidlx.wsicorp.com ) due to node availability * Stop IPaddr_147_81_84_133 ( arc-tkincaidlx.wsicorp.com ) due to node availability Executing Cluster Transition: * Pseudo action: ms_drbd_7788_pre_notify_demote_0 * Pseudo action: grp_pgsql_mirror_stop_0 * Resource action: IPaddr_147_81_84_133 stop on arc-tkincaidlx.wsicorp.com * Resource action: rsc_drbd_7788:0 notify on arc-dknightlx * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com * Pseudo action: ms_drbd_7788_confirmed-pre_notify_demote_0 * Resource action: pgsql_5555 stop on arc-tkincaidlx.wsicorp.com * Resource action: fs_mirror stop on arc-tkincaidlx.wsicorp.com * Pseudo action: grp_pgsql_mirror_stopped_0 * Pseudo action: ms_drbd_7788_demote_0 * Resource action: rsc_drbd_7788:1 demote on arc-tkincaidlx.wsicorp.com * Pseudo action: ms_drbd_7788_demoted_0 * Pseudo action: ms_drbd_7788_post_notify_demoted_0 * Resource action: rsc_drbd_7788:0 notify on arc-dknightlx * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com * Pseudo action: ms_drbd_7788_confirmed-post_notify_demoted_0 * Pseudo action: ms_drbd_7788_pre_notify_stop_0 * Resource action: rsc_drbd_7788:0 notify on arc-dknightlx * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com * Pseudo action: ms_drbd_7788_confirmed-pre_notify_stop_0 * Pseudo action: ms_drbd_7788_stop_0 * Resource action: rsc_drbd_7788:0 stop on arc-dknightlx * Pseudo action: ms_drbd_7788_stopped_0 * Cluster action: do_shutdown on arc-dknightlx * Pseudo action: ms_drbd_7788_post_notify_stopped_0 * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com * Pseudo action: ms_drbd_7788_confirmed-post_notify_stopped_0 Revised Cluster Status: * Node List: * Online: [ arc-dknightlx arc-tkincaidlx.wsicorp.com ] * Full List of Resources: * Clone Set: ms_drbd_7788 [rsc_drbd_7788] (promotable): - * Slaves: [ arc-tkincaidlx.wsicorp.com ] + * Unpromoted: [ arc-tkincaidlx.wsicorp.com ] * Stopped: [ arc-dknightlx ] * Resource Group: grp_pgsql_mirror: * fs_mirror (ocf:heartbeat:Filesystem): Stopped * pgsql_5555 (ocf:heartbeat:pgsql): Stopped * IPaddr_147_81_84_133 (ocf:heartbeat:IPaddr): Stopped diff --git a/cts/scheduler/summary/bug-1685.summary b/cts/scheduler/summary/bug-1685.summary index 53f777c5e4..044b018f19 100644 --- a/cts/scheduler/summary/bug-1685.summary +++ b/cts/scheduler/summary/bug-1685.summary @@ -1,38 +1,38 @@ Current cluster status: * Node List: * Online: [ redun1 redun2 ] * Full List of Resources: * Clone Set: shared_storage [prim_shared_storage] (promotable): - * Slaves: [ redun1 redun2 ] + * Unpromoted: [ redun1 redun2 ] * shared_filesystem (ocf:heartbeat:Filesystem): Stopped Transition Summary: - * Promote prim_shared_storage:0 ( Slave -> Master redun2 ) + * Promote prim_shared_storage:0 ( Unpromoted -> Promoted redun2 ) * Start shared_filesystem ( redun2 ) Executing Cluster Transition: * Pseudo action: shared_storage_pre_notify_promote_0 * Resource action: prim_shared_storage:0 notify on redun2 * Resource action: prim_shared_storage:1 notify on redun1 * Pseudo action: shared_storage_confirmed-pre_notify_promote_0 * Pseudo action: shared_storage_promote_0 * Resource action: prim_shared_storage:0 promote on redun2 * Pseudo action: shared_storage_promoted_0 * Pseudo action: shared_storage_post_notify_promoted_0 * Resource action: prim_shared_storage:0 notify on redun2 * Resource action: prim_shared_storage:1 notify on redun1 * Pseudo action: shared_storage_confirmed-post_notify_promoted_0 * Resource action: shared_filesystem start on redun2 * Resource action: prim_shared_storage:1 monitor=120000 on redun1 * Resource action: shared_filesystem monitor=120000 on redun2 Revised Cluster Status: * Node List: * Online: [ redun1 redun2 ] * Full List of Resources: * Clone Set: shared_storage [prim_shared_storage] (promotable): - * Masters: [ redun2 ] - * Slaves: [ redun1 ] + * Promoted: [ redun2 ] + * Unpromoted: [ redun1 ] * shared_filesystem (ocf:heartbeat:Filesystem): Started redun2 diff --git a/cts/scheduler/summary/bug-1765.summary b/cts/scheduler/summary/bug-1765.summary index 789fc73761..ae851fe922 100644 --- a/cts/scheduler/summary/bug-1765.summary +++ b/cts/scheduler/summary/bug-1765.summary @@ -1,38 +1,38 @@ Current cluster status: * Node List: * Online: [ sles236 sles238 ] * Full List of Resources: * Clone Set: ms-drbd0 [drbd0] (promotable): - * Masters: [ sles236 ] + * Promoted: [ sles236 ] * Stopped: [ sles238 ] * Clone Set: ms-drbd1 [drbd1] (promotable): - * Masters: [ sles236 ] - * Slaves: [ sles238 ] + * Promoted: [ sles236 ] + * Unpromoted: [ sles238 ] Transition Summary: * Start drbd0:1 ( sles238 ) Executing Cluster Transition: * Pseudo action: ms-drbd0_pre_notify_start_0 * Resource action: drbd0:0 notify on sles236 * Pseudo action: ms-drbd0_confirmed-pre_notify_start_0 * Pseudo action: ms-drbd0_start_0 * Resource action: drbd0:1 start on sles238 * Pseudo action: ms-drbd0_running_0 * Pseudo action: ms-drbd0_post_notify_running_0 * Resource action: drbd0:0 notify on sles236 * Resource action: drbd0:1 notify on sles238 * Pseudo action: ms-drbd0_confirmed-post_notify_running_0 Revised Cluster Status: * Node List: * Online: [ sles236 sles238 ] * Full List of Resources: * Clone Set: ms-drbd0 [drbd0] (promotable): - * Masters: [ sles236 ] - * Slaves: [ sles238 ] + * Promoted: [ sles236 ] + * Unpromoted: [ sles238 ] * Clone Set: ms-drbd1 [drbd1] (promotable): - * Masters: [ sles236 ] - * Slaves: [ sles238 ] + * Promoted: [ sles236 ] + * Unpromoted: [ sles238 ] diff --git a/cts/scheduler/summary/bug-1822.summary b/cts/scheduler/summary/bug-1822.summary index 19ae6a3b36..f0e4b9b0d1 100644 --- a/cts/scheduler/summary/bug-1822.summary +++ b/cts/scheduler/summary/bug-1822.summary @@ -1,44 +1,44 @@ Current cluster status: * Node List: * Online: [ process1a process2b ] * Full List of Resources: * Clone Set: ms-sf [ms-sf_group] (promotable) (unique): * Resource Group: ms-sf_group:0: - * master_slave_Stateful:0 (ocf:heartbeat:Dummy-statful): Slave process2b + * master_slave_Stateful:0 (ocf:heartbeat:Dummy-statful): Unpromoted process2b * master_slave_procdctl:0 (ocf:heartbeat:procdctl): Stopped * Resource Group: ms-sf_group:1: - * master_slave_Stateful:1 (ocf:heartbeat:Dummy-statful): Master process1a - * master_slave_procdctl:1 (ocf:heartbeat:procdctl): Master process1a + * master_slave_Stateful:1 (ocf:heartbeat:Dummy-statful): Promoted process1a + * master_slave_procdctl:1 (ocf:heartbeat:procdctl): Promoted process1a Transition Summary: - * Stop master_slave_Stateful:1 ( Master process1a ) due to node availability - * Stop master_slave_procdctl:1 ( Master process1a ) due to node availability + * Stop master_slave_Stateful:1 ( Promoted process1a ) due to node availability + * Stop master_slave_procdctl:1 ( Promoted process1a ) due to node availability Executing Cluster Transition: * Pseudo action: ms-sf_demote_0 * Pseudo action: ms-sf_group:1_demote_0 * Resource action: master_slave_Stateful:1 demote on process1a * Resource action: master_slave_procdctl:1 demote on process1a * Pseudo action: ms-sf_group:1_demoted_0 * Pseudo action: ms-sf_demoted_0 * Pseudo action: ms-sf_stop_0 * Pseudo action: ms-sf_group:1_stop_0 * Resource action: master_slave_Stateful:1 stop on process1a * Resource action: master_slave_procdctl:1 stop on process1a * Cluster action: do_shutdown on process1a * Pseudo action: ms-sf_group:1_stopped_0 * Pseudo action: ms-sf_stopped_0 Revised Cluster Status: * Node List: * Online: [ process1a process2b ] * Full List of Resources: * Clone Set: ms-sf [ms-sf_group] (promotable) (unique): * Resource Group: ms-sf_group:0: - * master_slave_Stateful:0 (ocf:heartbeat:Dummy-statful): Slave process2b + * master_slave_Stateful:0 (ocf:heartbeat:Dummy-statful): Unpromoted process2b * master_slave_procdctl:0 (ocf:heartbeat:procdctl): Stopped * Resource Group: ms-sf_group:1: * master_slave_Stateful:1 (ocf:heartbeat:Dummy-statful): Stopped * master_slave_procdctl:1 (ocf:heartbeat:procdctl): Stopped diff --git a/cts/scheduler/summary/bug-5007-masterslave_colocation.summary b/cts/scheduler/summary/bug-5007-masterslave_colocation.summary index ca1f08c279..463fc10dde 100644 --- a/cts/scheduler/summary/bug-5007-masterslave_colocation.summary +++ b/cts/scheduler/summary/bug-5007-masterslave_colocation.summary @@ -1,31 +1,31 @@ Current cluster status: * Node List: * Online: [ fc16-builder fc16-builder2 ] * Full List of Resources: * Clone Set: MS_DUMMY [DUMMY] (promotable): - * Masters: [ fc16-builder ] - * Slaves: [ fc16-builder2 ] + * Promoted: [ fc16-builder ] + * Unpromoted: [ fc16-builder2 ] * SLAVE_IP (ocf:pacemaker:Dummy): Started fc16-builder * MASTER_IP (ocf:pacemaker:Dummy): Started fc16-builder2 Transition Summary: * Move SLAVE_IP ( fc16-builder -> fc16-builder2 ) * Move MASTER_IP ( fc16-builder2 -> fc16-builder ) Executing Cluster Transition: * Resource action: SLAVE_IP stop on fc16-builder * Resource action: MASTER_IP stop on fc16-builder2 * Resource action: SLAVE_IP start on fc16-builder2 * Resource action: MASTER_IP start on fc16-builder Revised Cluster Status: * Node List: * Online: [ fc16-builder fc16-builder2 ] * Full List of Resources: * Clone Set: MS_DUMMY [DUMMY] (promotable): - * Masters: [ fc16-builder ] - * Slaves: [ fc16-builder2 ] + * Promoted: [ fc16-builder ] + * Unpromoted: [ fc16-builder2 ] * SLAVE_IP (ocf:pacemaker:Dummy): Started fc16-builder2 * MASTER_IP (ocf:pacemaker:Dummy): Started fc16-builder diff --git a/cts/scheduler/summary/bug-5059.summary b/cts/scheduler/summary/bug-5059.summary index e67ba8f6d2..a33a2f60a2 100644 --- a/cts/scheduler/summary/bug-5059.summary +++ b/cts/scheduler/summary/bug-5059.summary @@ -1,77 +1,77 @@ Current cluster status: * Node List: * Node gluster03.h: standby * Online: [ gluster01.h gluster02.h ] * OFFLINE: [ gluster04.h ] * Full List of Resources: * Clone Set: ms_stateful [g_stateful] (promotable): * Resource Group: g_stateful:0: - * p_stateful1 (ocf:pacemaker:Stateful): Slave gluster01.h + * p_stateful1 (ocf:pacemaker:Stateful): Unpromoted gluster01.h * p_stateful2 (ocf:pacemaker:Stateful): Stopped * Resource Group: g_stateful:1: - * p_stateful1 (ocf:pacemaker:Stateful): Slave gluster02.h + * p_stateful1 (ocf:pacemaker:Stateful): Unpromoted gluster02.h * p_stateful2 (ocf:pacemaker:Stateful): Stopped * Stopped: [ gluster03.h gluster04.h ] * Clone Set: c_dummy [p_dummy1]: * Started: [ gluster01.h gluster02.h ] Transition Summary: - * Promote p_stateful1:0 ( Slave -> Master gluster01.h ) - * Promote p_stateful2:0 ( Stopped -> Master gluster01.h ) + * Promote p_stateful1:0 ( Unpromoted -> Promoted gluster01.h ) + * Promote p_stateful2:0 ( Stopped -> Promoted gluster01.h ) * Start p_stateful2:1 ( gluster02.h ) Executing Cluster Transition: * Pseudo action: ms_stateful_pre_notify_start_0 * Resource action: iptest delete on gluster02.h * Resource action: ipsrc2 delete on gluster02.h * Resource action: p_stateful1:0 notify on gluster01.h * Resource action: p_stateful1:1 notify on gluster02.h * Pseudo action: ms_stateful_confirmed-pre_notify_start_0 * Pseudo action: ms_stateful_start_0 * Pseudo action: g_stateful:0_start_0 * Resource action: p_stateful2:0 start on gluster01.h * Pseudo action: g_stateful:1_start_0 * Resource action: p_stateful2:1 start on gluster02.h * Pseudo action: g_stateful:0_running_0 * Pseudo action: g_stateful:1_running_0 * Pseudo action: ms_stateful_running_0 * Pseudo action: ms_stateful_post_notify_running_0 * Resource action: p_stateful1:0 notify on gluster01.h * Resource action: p_stateful2:0 notify on gluster01.h * Resource action: p_stateful1:1 notify on gluster02.h * Resource action: p_stateful2:1 notify on gluster02.h * Pseudo action: ms_stateful_confirmed-post_notify_running_0 * Pseudo action: ms_stateful_pre_notify_promote_0 * Resource action: p_stateful1:0 notify on gluster01.h * Resource action: p_stateful2:0 notify on gluster01.h * Resource action: p_stateful1:1 notify on gluster02.h * Resource action: p_stateful2:1 notify on gluster02.h * Pseudo action: ms_stateful_confirmed-pre_notify_promote_0 * Pseudo action: ms_stateful_promote_0 * Pseudo action: g_stateful:0_promote_0 * Resource action: p_stateful1:0 promote on gluster01.h * Resource action: p_stateful2:0 promote on gluster01.h * Pseudo action: g_stateful:0_promoted_0 * Pseudo action: ms_stateful_promoted_0 * Pseudo action: ms_stateful_post_notify_promoted_0 * Resource action: p_stateful1:0 notify on gluster01.h * Resource action: p_stateful2:0 notify on gluster01.h * Resource action: p_stateful1:1 notify on gluster02.h * Resource action: p_stateful2:1 notify on gluster02.h * Pseudo action: ms_stateful_confirmed-post_notify_promoted_0 * Resource action: p_stateful1:1 monitor=10000 on gluster02.h * Resource action: p_stateful2:1 monitor=10000 on gluster02.h Revised Cluster Status: * Node List: * Node gluster03.h: standby * Online: [ gluster01.h gluster02.h ] * OFFLINE: [ gluster04.h ] * Full List of Resources: * Clone Set: ms_stateful [g_stateful] (promotable): - * Masters: [ gluster01.h ] - * Slaves: [ gluster02.h ] + * Promoted: [ gluster01.h ] + * Unpromoted: [ gluster02.h ] * Clone Set: c_dummy [p_dummy1]: * Started: [ gluster01.h gluster02.h ] diff --git a/cts/scheduler/summary/bug-5143-ms-shuffle.summary b/cts/scheduler/summary/bug-5143-ms-shuffle.summary index 465ead3645..18f2566aa5 100644 --- a/cts/scheduler/summary/bug-5143-ms-shuffle.summary +++ b/cts/scheduler/summary/bug-5143-ms-shuffle.summary @@ -1,78 +1,78 @@ 1 of 34 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: * Node List: * Online: [ hex-1 hex-2 hex-3 ] * Full List of Resources: * fencing (stonith:external/sbd): Started hex-1 * Clone Set: baseclone [basegrp]: * Started: [ hex-1 hex-2 hex-3 ] * fs-xfs-1 (ocf:heartbeat:Filesystem): Started hex-2 * Clone Set: fs2 [fs-ocfs-2]: * Started: [ hex-1 hex-2 hex-3 ] * Clone Set: ms-r0 [drbd-r0] (promotable): - * Masters: [ hex-1 ] - * Slaves: [ hex-2 ] + * Promoted: [ hex-1 ] + * Unpromoted: [ hex-2 ] * Clone Set: ms-r1 [drbd-r1] (promotable): - * Slaves: [ hex-2 hex-3 ] + * Unpromoted: [ hex-2 hex-3 ] * Resource Group: md0-group: * md0 (ocf:heartbeat:Raid1): Started hex-3 * vg-md0 (ocf:heartbeat:LVM): Started hex-3 * fs-md0 (ocf:heartbeat:Filesystem): Started hex-3 * dummy1 (ocf:heartbeat:Delay): Started hex-3 * dummy3 (ocf:heartbeat:Delay): Started hex-1 * dummy4 (ocf:heartbeat:Delay): Started hex-2 * dummy5 (ocf:heartbeat:Delay): Started hex-1 * dummy6 (ocf:heartbeat:Delay): Started hex-2 * Resource Group: r0-group: * fs-r0 (ocf:heartbeat:Filesystem): Stopped (disabled) * dummy2 (ocf:heartbeat:Delay): Stopped Transition Summary: - * Promote drbd-r1:1 ( Slave -> Master hex-3 ) + * Promote drbd-r1:1 ( Unpromoted -> Promoted hex-3 ) Executing Cluster Transition: * Pseudo action: ms-r1_pre_notify_promote_0 * Resource action: drbd-r1 notify on hex-2 * Resource action: drbd-r1 notify on hex-3 * Pseudo action: ms-r1_confirmed-pre_notify_promote_0 * Pseudo action: ms-r1_promote_0 * Resource action: drbd-r1 promote on hex-3 * Pseudo action: ms-r1_promoted_0 * Pseudo action: ms-r1_post_notify_promoted_0 * Resource action: drbd-r1 notify on hex-2 * Resource action: drbd-r1 notify on hex-3 * Pseudo action: ms-r1_confirmed-post_notify_promoted_0 * Resource action: drbd-r1 monitor=29000 on hex-2 * Resource action: drbd-r1 monitor=31000 on hex-3 Revised Cluster Status: * Node List: * Online: [ hex-1 hex-2 hex-3 ] * Full List of Resources: * fencing (stonith:external/sbd): Started hex-1 * Clone Set: baseclone [basegrp]: * Started: [ hex-1 hex-2 hex-3 ] * fs-xfs-1 (ocf:heartbeat:Filesystem): Started hex-2 * Clone Set: fs2 [fs-ocfs-2]: * Started: [ hex-1 hex-2 hex-3 ] * Clone Set: ms-r0 [drbd-r0] (promotable): - * Masters: [ hex-1 ] - * Slaves: [ hex-2 ] + * Promoted: [ hex-1 ] + * Unpromoted: [ hex-2 ] * Clone Set: ms-r1 [drbd-r1] (promotable): - * Masters: [ hex-3 ] - * Slaves: [ hex-2 ] + * Promoted: [ hex-3 ] + * Unpromoted: [ hex-2 ] * Resource Group: md0-group: * md0 (ocf:heartbeat:Raid1): Started hex-3 * vg-md0 (ocf:heartbeat:LVM): Started hex-3 * fs-md0 (ocf:heartbeat:Filesystem): Started hex-3 * dummy1 (ocf:heartbeat:Delay): Started hex-3 * dummy3 (ocf:heartbeat:Delay): Started hex-1 * dummy4 (ocf:heartbeat:Delay): Started hex-2 * dummy5 (ocf:heartbeat:Delay): Started hex-1 * dummy6 (ocf:heartbeat:Delay): Started hex-2 * Resource Group: r0-group: * fs-r0 (ocf:heartbeat:Filesystem): Stopped (disabled) * dummy2 (ocf:heartbeat:Delay): Stopped diff --git a/cts/scheduler/summary/bug-cl-5168.summary b/cts/scheduler/summary/bug-cl-5168.summary index b053f0f2f5..11064b0f4e 100644 --- a/cts/scheduler/summary/bug-cl-5168.summary +++ b/cts/scheduler/summary/bug-cl-5168.summary @@ -1,76 +1,76 @@ Current cluster status: * Node List: * Online: [ hex-1 hex-2 hex-3 ] * Full List of Resources: * fencing (stonith:external/sbd): Started hex-1 * Clone Set: baseclone [basegrp]: * Started: [ hex-1 hex-2 hex-3 ] * fs-xfs-1 (ocf:heartbeat:Filesystem): Started hex-2 * Clone Set: fs2 [fs-ocfs-2]: * Started: [ hex-1 hex-2 hex-3 ] * Clone Set: ms-r0 [drbd-r0] (promotable): - * Masters: [ hex-1 ] - * Slaves: [ hex-2 ] + * Promoted: [ hex-1 ] + * Unpromoted: [ hex-2 ] * Resource Group: md0-group: * md0 (ocf:heartbeat:Raid1): Started hex-3 * vg-md0 (ocf:heartbeat:LVM): Started hex-3 * fs-md0 (ocf:heartbeat:Filesystem): Started hex-3 * dummy1 (ocf:heartbeat:Delay): Started hex-3 * dummy3 (ocf:heartbeat:Delay): Started hex-1 * dummy4 (ocf:heartbeat:Delay): Started hex-2 * dummy5 (ocf:heartbeat:Delay): Started hex-1 * dummy6 (ocf:heartbeat:Delay): Started hex-2 * Resource Group: r0-group: * fs-r0 (ocf:heartbeat:Filesystem): Started hex-1 * dummy2 (ocf:heartbeat:Delay): Started hex-1 * Clone Set: ms-r1 [drbd-r1] (promotable): - * Slaves: [ hex-2 hex-3 ] + * Unpromoted: [ hex-2 hex-3 ] Transition Summary: - * Promote drbd-r1:1 ( Slave -> Master hex-3 ) + * Promote drbd-r1:1 ( Unpromoted -> Promoted hex-3 ) Executing Cluster Transition: * Pseudo action: ms-r1_pre_notify_promote_0 * Resource action: drbd-r1 notify on hex-2 * Resource action: drbd-r1 notify on hex-3 * Pseudo action: ms-r1_confirmed-pre_notify_promote_0 * Pseudo action: ms-r1_promote_0 * Resource action: drbd-r1 promote on hex-3 * Pseudo action: ms-r1_promoted_0 * Pseudo action: ms-r1_post_notify_promoted_0 * Resource action: drbd-r1 notify on hex-2 * Resource action: drbd-r1 notify on hex-3 * Pseudo action: ms-r1_confirmed-post_notify_promoted_0 * Resource action: drbd-r1 monitor=29000 on hex-2 * Resource action: drbd-r1 monitor=31000 on hex-3 Revised Cluster Status: * Node List: * Online: [ hex-1 hex-2 hex-3 ] * Full List of Resources: * fencing (stonith:external/sbd): Started hex-1 * Clone Set: baseclone [basegrp]: * Started: [ hex-1 hex-2 hex-3 ] * fs-xfs-1 (ocf:heartbeat:Filesystem): Started hex-2 * Clone Set: fs2 [fs-ocfs-2]: * Started: [ hex-1 hex-2 hex-3 ] * Clone Set: ms-r0 [drbd-r0] (promotable): - * Masters: [ hex-1 ] - * Slaves: [ hex-2 ] + * Promoted: [ hex-1 ] + * Unpromoted: [ hex-2 ] * Resource Group: md0-group: * md0 (ocf:heartbeat:Raid1): Started hex-3 * vg-md0 (ocf:heartbeat:LVM): Started hex-3 * fs-md0 (ocf:heartbeat:Filesystem): Started hex-3 * dummy1 (ocf:heartbeat:Delay): Started hex-3 * dummy3 (ocf:heartbeat:Delay): Started hex-1 * dummy4 (ocf:heartbeat:Delay): Started hex-2 * dummy5 (ocf:heartbeat:Delay): Started hex-1 * dummy6 (ocf:heartbeat:Delay): Started hex-2 * Resource Group: r0-group: * fs-r0 (ocf:heartbeat:Filesystem): Started hex-1 * dummy2 (ocf:heartbeat:Delay): Started hex-1 * Clone Set: ms-r1 [drbd-r1] (promotable): - * Masters: [ hex-3 ] - * Slaves: [ hex-2 ] + * Promoted: [ hex-3 ] + * Unpromoted: [ hex-2 ] diff --git a/cts/scheduler/summary/bug-cl-5212.summary b/cts/scheduler/summary/bug-cl-5212.summary index aff62dbfeb..48cb54bedc 100644 --- a/cts/scheduler/summary/bug-cl-5212.summary +++ b/cts/scheduler/summary/bug-cl-5212.summary @@ -1,69 +1,69 @@ Current cluster status: * Node List: * Node srv01: UNCLEAN (offline) * Node srv02: UNCLEAN (offline) * Online: [ srv03 ] * Full List of Resources: * Resource Group: grpStonith1: * prmStonith1-1 (stonith:external/ssh): Started srv02 (UNCLEAN) * Resource Group: grpStonith2: * prmStonith2-1 (stonith:external/ssh): Started srv01 (UNCLEAN) * Resource Group: grpStonith3: * prmStonith3-1 (stonith:external/ssh): Started srv01 (UNCLEAN) * Clone Set: msPostgresql [pgsql] (promotable): - * pgsql (ocf:pacemaker:Stateful): Slave srv02 (UNCLEAN) - * pgsql (ocf:pacemaker:Stateful): Master srv01 (UNCLEAN) - * Slaves: [ srv03 ] + * pgsql (ocf:pacemaker:Stateful): Unpromoted srv02 (UNCLEAN) + * pgsql (ocf:pacemaker:Stateful): Promoted srv01 (UNCLEAN) + * Unpromoted: [ srv03 ] * Clone Set: clnPingd [prmPingd]: * prmPingd (ocf:pacemaker:ping): Started srv02 (UNCLEAN) * prmPingd (ocf:pacemaker:ping): Started srv01 (UNCLEAN) * Started: [ srv03 ] Transition Summary: * Stop prmStonith1-1 ( srv02 ) blocked * Stop prmStonith2-1 ( srv01 ) blocked * Stop prmStonith3-1 ( srv01 ) due to node availability (blocked) - * Stop pgsql:0 ( Slave srv02 ) due to node availability (blocked) - * Stop pgsql:1 ( Master srv01 ) due to node availability (blocked) + * Stop pgsql:0 ( Unpromoted srv02 ) due to node availability (blocked) + * Stop pgsql:1 ( Promoted srv01 ) due to node availability (blocked) * Stop prmPingd:0 ( srv02 ) due to node availability (blocked) * Stop prmPingd:1 ( srv01 ) due to node availability (blocked) Executing Cluster Transition: * Pseudo action: grpStonith1_stop_0 * Pseudo action: grpStonith1_start_0 * Pseudo action: grpStonith2_stop_0 * Pseudo action: grpStonith2_start_0 * Pseudo action: grpStonith3_stop_0 * Pseudo action: msPostgresql_pre_notify_stop_0 * Pseudo action: clnPingd_stop_0 * Resource action: pgsql notify on srv03 * Pseudo action: msPostgresql_confirmed-pre_notify_stop_0 * Pseudo action: msPostgresql_stop_0 * Pseudo action: clnPingd_stopped_0 * Pseudo action: msPostgresql_stopped_0 * Pseudo action: msPostgresql_post_notify_stopped_0 * Resource action: pgsql notify on srv03 * Pseudo action: msPostgresql_confirmed-post_notify_stopped_0 Revised Cluster Status: * Node List: * Node srv01: UNCLEAN (offline) * Node srv02: UNCLEAN (offline) * Online: [ srv03 ] * Full List of Resources: * Resource Group: grpStonith1: * prmStonith1-1 (stonith:external/ssh): Started srv02 (UNCLEAN) * Resource Group: grpStonith2: * prmStonith2-1 (stonith:external/ssh): Started srv01 (UNCLEAN) * Resource Group: grpStonith3: * prmStonith3-1 (stonith:external/ssh): Started srv01 (UNCLEAN) * Clone Set: msPostgresql [pgsql] (promotable): - * pgsql (ocf:pacemaker:Stateful): Slave srv02 (UNCLEAN) - * pgsql (ocf:pacemaker:Stateful): Master srv01 (UNCLEAN) - * Slaves: [ srv03 ] + * pgsql (ocf:pacemaker:Stateful): Unpromoted srv02 (UNCLEAN) + * pgsql (ocf:pacemaker:Stateful): Promoted srv01 (UNCLEAN) + * Unpromoted: [ srv03 ] * Clone Set: clnPingd [prmPingd]: * prmPingd (ocf:pacemaker:ping): Started srv02 (UNCLEAN) * prmPingd (ocf:pacemaker:ping): Started srv01 (UNCLEAN) * Started: [ srv03 ] diff --git a/cts/scheduler/summary/bug-cl-5213.summary b/cts/scheduler/summary/bug-cl-5213.summary index f29257e22b..047f75d48d 100644 --- a/cts/scheduler/summary/bug-cl-5213.summary +++ b/cts/scheduler/summary/bug-cl-5213.summary @@ -1,22 +1,22 @@ Current cluster status: * Node List: * Online: [ srv01 srv02 ] * Full List of Resources: * A-master (ocf:heartbeat:Dummy): Started srv02 * Clone Set: msPostgresql [pgsql] (promotable): - * Slaves: [ srv01 srv02 ] + * Unpromoted: [ srv01 srv02 ] Transition Summary: Executing Cluster Transition: * Resource action: pgsql monitor=10000 on srv01 Revised Cluster Status: * Node List: * Online: [ srv01 srv02 ] * Full List of Resources: * A-master (ocf:heartbeat:Dummy): Started srv02 * Clone Set: msPostgresql [pgsql] (promotable): - * Slaves: [ srv01 srv02 ] + * Unpromoted: [ srv01 srv02 ] diff --git a/cts/scheduler/summary/bug-cl-5219.summary b/cts/scheduler/summary/bug-cl-5219.summary index 050a3e27cc..c5935e1465 100644 --- a/cts/scheduler/summary/bug-cl-5219.summary +++ b/cts/scheduler/summary/bug-cl-5219.summary @@ -1,43 +1,43 @@ 1 of 9 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: * Node List: * Online: [ ha1.test.anchor.net.au ha2.test.anchor.net.au ] * Full List of Resources: * child1-service (ocf:pacemaker:Dummy): Started ha2.test.anchor.net.au (disabled) * child2-service (ocf:pacemaker:Dummy): Started ha2.test.anchor.net.au * parent-service (ocf:pacemaker:Dummy): Started ha2.test.anchor.net.au * Clone Set: child1 [stateful-child1] (promotable): - * Masters: [ ha2.test.anchor.net.au ] - * Slaves: [ ha1.test.anchor.net.au ] + * Promoted: [ ha2.test.anchor.net.au ] + * Unpromoted: [ ha1.test.anchor.net.au ] * Clone Set: child2 [stateful-child2] (promotable): - * Masters: [ ha2.test.anchor.net.au ] - * Slaves: [ ha1.test.anchor.net.au ] + * Promoted: [ ha2.test.anchor.net.au ] + * Unpromoted: [ ha1.test.anchor.net.au ] * Clone Set: parent [stateful-parent] (promotable): - * Masters: [ ha2.test.anchor.net.au ] - * Slaves: [ ha1.test.anchor.net.au ] + * Promoted: [ ha2.test.anchor.net.au ] + * Unpromoted: [ ha1.test.anchor.net.au ] Transition Summary: * Stop child1-service ( ha2.test.anchor.net.au ) due to node availability Executing Cluster Transition: * Resource action: child1-service stop on ha2.test.anchor.net.au Revised Cluster Status: * Node List: * Online: [ ha1.test.anchor.net.au ha2.test.anchor.net.au ] * Full List of Resources: * child1-service (ocf:pacemaker:Dummy): Stopped (disabled) * child2-service (ocf:pacemaker:Dummy): Started ha2.test.anchor.net.au * parent-service (ocf:pacemaker:Dummy): Started ha2.test.anchor.net.au * Clone Set: child1 [stateful-child1] (promotable): - * Masters: [ ha2.test.anchor.net.au ] - * Slaves: [ ha1.test.anchor.net.au ] + * Promoted: [ ha2.test.anchor.net.au ] + * Unpromoted: [ ha1.test.anchor.net.au ] * Clone Set: child2 [stateful-child2] (promotable): - * Masters: [ ha2.test.anchor.net.au ] - * Slaves: [ ha1.test.anchor.net.au ] + * Promoted: [ ha2.test.anchor.net.au ] + * Unpromoted: [ ha1.test.anchor.net.au ] * Clone Set: parent [stateful-parent] (promotable): - * Masters: [ ha2.test.anchor.net.au ] - * Slaves: [ ha1.test.anchor.net.au ] + * Promoted: [ ha2.test.anchor.net.au ] + * Unpromoted: [ ha1.test.anchor.net.au ] diff --git a/cts/scheduler/summary/bug-cl-5247.summary b/cts/scheduler/summary/bug-cl-5247.summary index 8d68708c6c..056e526490 100644 --- a/cts/scheduler/summary/bug-cl-5247.summary +++ b/cts/scheduler/summary/bug-cl-5247.summary @@ -1,87 +1,87 @@ Using the original execution date of: 2015-08-12 02:53:40Z Current cluster status: * Node List: * Online: [ bl460g8n3 bl460g8n4 ] * GuestOnline: [ pgsr01@bl460g8n3 ] * Full List of Resources: * prmDB1 (ocf:heartbeat:VirtualDomain): Started bl460g8n3 * prmDB2 (ocf:heartbeat:VirtualDomain): FAILED bl460g8n4 * Resource Group: grpStonith1: * prmStonith1-2 (stonith:external/ipmi): Started bl460g8n4 * Resource Group: grpStonith2: * prmStonith2-2 (stonith:external/ipmi): Started bl460g8n3 * Resource Group: master-group: * vip-master (ocf:heartbeat:Dummy): FAILED pgsr02 * vip-rep (ocf:heartbeat:Dummy): FAILED pgsr02 * Clone Set: msPostgresql [pgsql] (promotable): - * Masters: [ pgsr01 ] + * Promoted: [ pgsr01 ] * Stopped: [ bl460g8n3 bl460g8n4 ] Transition Summary: * Fence (off) pgsr02 (resource: prmDB2) 'guest is unclean' * Stop prmDB2 ( bl460g8n4 ) due to node availability * Recover vip-master ( pgsr02 -> pgsr01 ) * Recover vip-rep ( pgsr02 -> pgsr01 ) - * Stop pgsql:0 ( Master pgsr02 ) due to node availability + * Stop pgsql:0 ( Promoted pgsr02 ) due to node availability * Stop pgsr02 ( bl460g8n4 ) due to node availability Executing Cluster Transition: * Resource action: vip-master monitor on pgsr01 * Resource action: vip-rep monitor on pgsr01 * Pseudo action: msPostgresql_pre_notify_demote_0 * Resource action: pgsr01 monitor on bl460g8n4 * Resource action: pgsr02 stop on bl460g8n4 * Resource action: pgsr02 monitor on bl460g8n3 * Resource action: prmDB2 stop on bl460g8n4 * Resource action: pgsql notify on pgsr01 * Pseudo action: msPostgresql_confirmed-pre_notify_demote_0 * Pseudo action: msPostgresql_demote_0 * Pseudo action: stonith-pgsr02-off on pgsr02 * Pseudo action: pgsql_post_notify_stop_0 * Pseudo action: pgsql_demote_0 * Pseudo action: msPostgresql_demoted_0 * Pseudo action: msPostgresql_post_notify_demoted_0 * Resource action: pgsql notify on pgsr01 * Pseudo action: msPostgresql_confirmed-post_notify_demoted_0 * Pseudo action: msPostgresql_pre_notify_stop_0 * Pseudo action: master-group_stop_0 * Pseudo action: vip-rep_stop_0 * Resource action: pgsql notify on pgsr01 * Pseudo action: msPostgresql_confirmed-pre_notify_stop_0 * Pseudo action: msPostgresql_stop_0 * Pseudo action: vip-master_stop_0 * Pseudo action: pgsql_stop_0 * Pseudo action: msPostgresql_stopped_0 * Pseudo action: master-group_stopped_0 * Pseudo action: master-group_start_0 * Resource action: vip-master start on pgsr01 * Resource action: vip-rep start on pgsr01 * Pseudo action: msPostgresql_post_notify_stopped_0 * Pseudo action: master-group_running_0 * Resource action: vip-master monitor=10000 on pgsr01 * Resource action: vip-rep monitor=10000 on pgsr01 * Resource action: pgsql notify on pgsr01 * Pseudo action: msPostgresql_confirmed-post_notify_stopped_0 * Pseudo action: pgsql_notified_0 * Resource action: pgsql monitor=9000 on pgsr01 Using the original execution date of: 2015-08-12 02:53:40Z Revised Cluster Status: * Node List: * Online: [ bl460g8n3 bl460g8n4 ] * GuestOnline: [ pgsr01@bl460g8n3 ] * Full List of Resources: * prmDB1 (ocf:heartbeat:VirtualDomain): Started bl460g8n3 * prmDB2 (ocf:heartbeat:VirtualDomain): FAILED * Resource Group: grpStonith1: * prmStonith1-2 (stonith:external/ipmi): Started bl460g8n4 * Resource Group: grpStonith2: * prmStonith2-2 (stonith:external/ipmi): Started bl460g8n3 * Resource Group: master-group: * vip-master (ocf:heartbeat:Dummy): FAILED [ pgsr01 pgsr02 ] * vip-rep (ocf:heartbeat:Dummy): FAILED [ pgsr01 pgsr02 ] * Clone Set: msPostgresql [pgsql] (promotable): - * Masters: [ pgsr01 ] + * Promoted: [ pgsr01 ] * Stopped: [ bl460g8n3 bl460g8n4 ] diff --git a/cts/scheduler/summary/bug-lf-1852.summary b/cts/scheduler/summary/bug-lf-1852.summary index 743679c1f5..26c73e166a 100644 --- a/cts/scheduler/summary/bug-lf-1852.summary +++ b/cts/scheduler/summary/bug-lf-1852.summary @@ -1,40 +1,40 @@ Current cluster status: * Node List: * Online: [ mysql-01 mysql-02 ] * Full List of Resources: * Clone Set: ms-drbd0 [drbd0] (promotable): - * Masters: [ mysql-02 ] + * Promoted: [ mysql-02 ] * Stopped: [ mysql-01 ] * Resource Group: fs_mysql_ip: * fs0 (ocf:heartbeat:Filesystem): Started mysql-02 * mysqlid (lsb:mysql): Started mysql-02 * ip_resource (ocf:heartbeat:IPaddr2): Started mysql-02 Transition Summary: * Start drbd0:1 ( mysql-01 ) Executing Cluster Transition: * Pseudo action: ms-drbd0_pre_notify_start_0 * Resource action: drbd0:0 notify on mysql-02 * Pseudo action: ms-drbd0_confirmed-pre_notify_start_0 * Pseudo action: ms-drbd0_start_0 * Resource action: drbd0:1 start on mysql-01 * Pseudo action: ms-drbd0_running_0 * Pseudo action: ms-drbd0_post_notify_running_0 * Resource action: drbd0:0 notify on mysql-02 * Resource action: drbd0:1 notify on mysql-01 * Pseudo action: ms-drbd0_confirmed-post_notify_running_0 Revised Cluster Status: * Node List: * Online: [ mysql-01 mysql-02 ] * Full List of Resources: * Clone Set: ms-drbd0 [drbd0] (promotable): - * Masters: [ mysql-02 ] - * Slaves: [ mysql-01 ] + * Promoted: [ mysql-02 ] + * Unpromoted: [ mysql-01 ] * Resource Group: fs_mysql_ip: * fs0 (ocf:heartbeat:Filesystem): Started mysql-02 * mysqlid (lsb:mysql): Started mysql-02 * ip_resource (ocf:heartbeat:IPaddr2): Started mysql-02 diff --git a/cts/scheduler/summary/bug-lf-2106.summary b/cts/scheduler/summary/bug-lf-2106.summary index 0cee80e49d..391b5fb10b 100644 --- a/cts/scheduler/summary/bug-lf-2106.summary +++ b/cts/scheduler/summary/bug-lf-2106.summary @@ -1,91 +1,91 @@ Current cluster status: * Node List: * Online: [ cl-virt-1 cl-virt-2 ] * Full List of Resources: * apcstonith (stonith:apcmastersnmp): Started cl-virt-1 * Clone Set: pingdclone [pingd]: * Started: [ cl-virt-1 cl-virt-2 ] * Resource Group: ssh: * ssh-ip1 (ocf:heartbeat:IPaddr2): Started cl-virt-2 * ssh-ip2 (ocf:heartbeat:IPaddr2): Started cl-virt-2 * ssh-bin (ocf:dk:opensshd): Started cl-virt-2 * itwiki (ocf:heartbeat:VirtualDomain): Started cl-virt-2 * Clone Set: ms-itwiki [drbd-itwiki] (promotable): - * Masters: [ cl-virt-2 ] - * Slaves: [ cl-virt-1 ] + * Promoted: [ cl-virt-2 ] + * Unpromoted: [ cl-virt-1 ] * bugtrack (ocf:heartbeat:VirtualDomain): Started cl-virt-2 * Clone Set: ms-bugtrack [drbd-bugtrack] (promotable): - * Masters: [ cl-virt-2 ] - * Slaves: [ cl-virt-1 ] + * Promoted: [ cl-virt-2 ] + * Unpromoted: [ cl-virt-1 ] * servsyslog (ocf:heartbeat:VirtualDomain): Started cl-virt-2 * Clone Set: ms-servsyslog [drbd-servsyslog] (promotable): - * Masters: [ cl-virt-2 ] - * Slaves: [ cl-virt-1 ] + * Promoted: [ cl-virt-2 ] + * Unpromoted: [ cl-virt-1 ] * smsprod2 (ocf:heartbeat:VirtualDomain): Started cl-virt-2 * Clone Set: ms-smsprod2 [drbd-smsprod2] (promotable): - * Masters: [ cl-virt-2 ] - * Slaves: [ cl-virt-1 ] + * Promoted: [ cl-virt-2 ] + * Unpromoted: [ cl-virt-1 ] * medomus-cvs (ocf:heartbeat:VirtualDomain): Started cl-virt-2 * Clone Set: ms-medomus-cvs [drbd-medomus-cvs] (promotable): - * Masters: [ cl-virt-2 ] - * Slaves: [ cl-virt-1 ] + * Promoted: [ cl-virt-2 ] + * Unpromoted: [ cl-virt-1 ] * infotos (ocf:heartbeat:VirtualDomain): Started cl-virt-2 * Clone Set: ms-infotos [drbd-infotos] (promotable): - * Masters: [ cl-virt-2 ] - * Slaves: [ cl-virt-1 ] + * Promoted: [ cl-virt-2 ] + * Unpromoted: [ cl-virt-1 ] Transition Summary: * Restart pingd:0 ( cl-virt-1 ) due to resource definition change * Restart pingd:1 ( cl-virt-2 ) due to resource definition change Executing Cluster Transition: * Cluster action: clear_failcount for pingd on cl-virt-1 * Cluster action: clear_failcount for pingd on cl-virt-2 * Pseudo action: pingdclone_stop_0 * Resource action: pingd:0 stop on cl-virt-1 * Resource action: pingd:0 stop on cl-virt-2 * Pseudo action: pingdclone_stopped_0 * Pseudo action: pingdclone_start_0 * Resource action: pingd:0 start on cl-virt-1 * Resource action: pingd:0 monitor=30000 on cl-virt-1 * Resource action: pingd:0 start on cl-virt-2 * Resource action: pingd:0 monitor=30000 on cl-virt-2 * Pseudo action: pingdclone_running_0 Revised Cluster Status: * Node List: * Online: [ cl-virt-1 cl-virt-2 ] * Full List of Resources: * apcstonith (stonith:apcmastersnmp): Started cl-virt-1 * Clone Set: pingdclone [pingd]: * Started: [ cl-virt-1 cl-virt-2 ] * Resource Group: ssh: * ssh-ip1 (ocf:heartbeat:IPaddr2): Started cl-virt-2 * ssh-ip2 (ocf:heartbeat:IPaddr2): Started cl-virt-2 * ssh-bin (ocf:dk:opensshd): Started cl-virt-2 * itwiki (ocf:heartbeat:VirtualDomain): Started cl-virt-2 * Clone Set: ms-itwiki [drbd-itwiki] (promotable): - * Masters: [ cl-virt-2 ] - * Slaves: [ cl-virt-1 ] + * Promoted: [ cl-virt-2 ] + * Unpromoted: [ cl-virt-1 ] * bugtrack (ocf:heartbeat:VirtualDomain): Started cl-virt-2 * Clone Set: ms-bugtrack [drbd-bugtrack] (promotable): - * Masters: [ cl-virt-2 ] - * Slaves: [ cl-virt-1 ] + * Promoted: [ cl-virt-2 ] + * Unpromoted: [ cl-virt-1 ] * servsyslog (ocf:heartbeat:VirtualDomain): Started cl-virt-2 * Clone Set: ms-servsyslog [drbd-servsyslog] (promotable): - * Masters: [ cl-virt-2 ] - * Slaves: [ cl-virt-1 ] + * Promoted: [ cl-virt-2 ] + * Unpromoted: [ cl-virt-1 ] * smsprod2 (ocf:heartbeat:VirtualDomain): Started cl-virt-2 * Clone Set: ms-smsprod2 [drbd-smsprod2] (promotable): - * Masters: [ cl-virt-2 ] - * Slaves: [ cl-virt-1 ] + * Promoted: [ cl-virt-2 ] + * Unpromoted: [ cl-virt-1 ] * medomus-cvs (ocf:heartbeat:VirtualDomain): Started cl-virt-2 * Clone Set: ms-medomus-cvs [drbd-medomus-cvs] (promotable): - * Masters: [ cl-virt-2 ] - * Slaves: [ cl-virt-1 ] + * Promoted: [ cl-virt-2 ] + * Unpromoted: [ cl-virt-1 ] * infotos (ocf:heartbeat:VirtualDomain): Started cl-virt-2 * Clone Set: ms-infotos [drbd-infotos] (promotable): - * Masters: [ cl-virt-2 ] - * Slaves: [ cl-virt-1 ] + * Promoted: [ cl-virt-2 ] + * Unpromoted: [ cl-virt-1 ] diff --git a/cts/scheduler/summary/bug-lf-2153.summary b/cts/scheduler/summary/bug-lf-2153.summary index ad4ff326ce..8b4d223eed 100644 --- a/cts/scheduler/summary/bug-lf-2153.summary +++ b/cts/scheduler/summary/bug-lf-2153.summary @@ -1,59 +1,59 @@ Current cluster status: * Node List: * Node bob: standby (with active resources) * Online: [ alice ] * Full List of Resources: * Clone Set: ms_drbd_iscsivg01 [res_drbd_iscsivg01] (promotable): - * Masters: [ alice ] - * Slaves: [ bob ] + * Promoted: [ alice ] + * Unpromoted: [ bob ] * Clone Set: cl_tgtd [res_tgtd]: * Started: [ alice bob ] * Resource Group: rg_iscsivg01: * res_portblock_iscsivg01_block (ocf:heartbeat:portblock): Started alice * res_lvm_iscsivg01 (ocf:heartbeat:LVM): Started alice * res_target_iscsivg01 (ocf:heartbeat:iSCSITarget): Started alice * res_lu_iscsivg01_lun1 (ocf:heartbeat:iSCSILogicalUnit): Started alice * res_lu_iscsivg01_lun2 (ocf:heartbeat:iSCSILogicalUnit): Started alice * res_ip_alicebob01 (ocf:heartbeat:IPaddr2): Started alice * res_portblock_iscsivg01_unblock (ocf:heartbeat:portblock): Started alice Transition Summary: - * Stop res_drbd_iscsivg01:0 ( Slave bob ) due to node availability + * Stop res_drbd_iscsivg01:0 ( Unpromoted bob ) due to node availability * Stop res_tgtd:0 ( bob ) due to node availability Executing Cluster Transition: * Pseudo action: ms_drbd_iscsivg01_pre_notify_stop_0 * Pseudo action: cl_tgtd_stop_0 * Resource action: res_drbd_iscsivg01:0 notify on bob * Resource action: res_drbd_iscsivg01:1 notify on alice * Pseudo action: ms_drbd_iscsivg01_confirmed-pre_notify_stop_0 * Pseudo action: ms_drbd_iscsivg01_stop_0 * Resource action: res_tgtd:0 stop on bob * Pseudo action: cl_tgtd_stopped_0 * Resource action: res_drbd_iscsivg01:0 stop on bob * Pseudo action: ms_drbd_iscsivg01_stopped_0 * Pseudo action: ms_drbd_iscsivg01_post_notify_stopped_0 * Resource action: res_drbd_iscsivg01:1 notify on alice * Pseudo action: ms_drbd_iscsivg01_confirmed-post_notify_stopped_0 Revised Cluster Status: * Node List: * Node bob: standby * Online: [ alice ] * Full List of Resources: * Clone Set: ms_drbd_iscsivg01 [res_drbd_iscsivg01] (promotable): - * Masters: [ alice ] + * Promoted: [ alice ] * Stopped: [ bob ] * Clone Set: cl_tgtd [res_tgtd]: * Started: [ alice ] * Stopped: [ bob ] * Resource Group: rg_iscsivg01: * res_portblock_iscsivg01_block (ocf:heartbeat:portblock): Started alice * res_lvm_iscsivg01 (ocf:heartbeat:LVM): Started alice * res_target_iscsivg01 (ocf:heartbeat:iSCSITarget): Started alice * res_lu_iscsivg01_lun1 (ocf:heartbeat:iSCSILogicalUnit): Started alice * res_lu_iscsivg01_lun2 (ocf:heartbeat:iSCSILogicalUnit): Started alice * res_ip_alicebob01 (ocf:heartbeat:IPaddr2): Started alice * res_portblock_iscsivg01_unblock (ocf:heartbeat:portblock): Started alice diff --git a/cts/scheduler/summary/bug-lf-2317.summary b/cts/scheduler/summary/bug-lf-2317.summary index c7c4276a97..96603fdee7 100644 --- a/cts/scheduler/summary/bug-lf-2317.summary +++ b/cts/scheduler/summary/bug-lf-2317.summary @@ -1,36 +1,36 @@ Current cluster status: * Node List: * Online: [ ibm1.isg.si ibm2.isg.si ] * Full List of Resources: * HostingIsg (ocf:heartbeat:Xen): Started ibm2.isg.si * Clone Set: ms_drbd_r0 [drbd_r0] (promotable): - * Masters: [ ibm2.isg.si ] - * Slaves: [ ibm1.isg.si ] + * Promoted: [ ibm2.isg.si ] + * Unpromoted: [ ibm1.isg.si ] Transition Summary: - * Promote drbd_r0:1 ( Slave -> Master ibm1.isg.si ) + * Promote drbd_r0:1 ( Unpromoted -> Promoted ibm1.isg.si ) Executing Cluster Transition: * Resource action: drbd_r0:0 cancel=30000 on ibm1.isg.si * Pseudo action: ms_drbd_r0_pre_notify_promote_0 * Resource action: drbd_r0:1 notify on ibm2.isg.si * Resource action: drbd_r0:0 notify on ibm1.isg.si * Pseudo action: ms_drbd_r0_confirmed-pre_notify_promote_0 * Pseudo action: ms_drbd_r0_promote_0 * Resource action: drbd_r0:0 promote on ibm1.isg.si * Pseudo action: ms_drbd_r0_promoted_0 * Pseudo action: ms_drbd_r0_post_notify_promoted_0 * Resource action: drbd_r0:1 notify on ibm2.isg.si * Resource action: drbd_r0:0 notify on ibm1.isg.si * Pseudo action: ms_drbd_r0_confirmed-post_notify_promoted_0 * Resource action: drbd_r0:0 monitor=15000 on ibm1.isg.si Revised Cluster Status: * Node List: * Online: [ ibm1.isg.si ibm2.isg.si ] * Full List of Resources: * HostingIsg (ocf:heartbeat:Xen): Started ibm2.isg.si * Clone Set: ms_drbd_r0 [drbd_r0] (promotable): - * Masters: [ ibm1.isg.si ibm2.isg.si ] + * Promoted: [ ibm1.isg.si ibm2.isg.si ] diff --git a/cts/scheduler/summary/bug-lf-2358.summary b/cts/scheduler/summary/bug-lf-2358.summary index f1b3e7dc68..7c2c3d220b 100644 --- a/cts/scheduler/summary/bug-lf-2358.summary +++ b/cts/scheduler/summary/bug-lf-2358.summary @@ -1,68 +1,68 @@ 2 of 15 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: * Node List: * Online: [ alice.demo bob.demo ] * Full List of Resources: * Clone Set: ms_drbd_nfsexport [res_drbd_nfsexport] (promotable) (disabled): * Stopped (disabled): [ alice.demo bob.demo ] * Resource Group: rg_nfs: * res_fs_nfsexport (ocf:heartbeat:Filesystem): Stopped * res_ip_nfs (ocf:heartbeat:IPaddr2): Stopped * res_nfs (lsb:nfs): Stopped * Resource Group: rg_mysql1: * res_fs_mysql1 (ocf:heartbeat:Filesystem): Started bob.demo * res_ip_mysql1 (ocf:heartbeat:IPaddr2): Started bob.demo * res_mysql1 (ocf:heartbeat:mysql): Started bob.demo * Clone Set: ms_drbd_mysql1 [res_drbd_mysql1] (promotable): - * Masters: [ bob.demo ] + * Promoted: [ bob.demo ] * Stopped: [ alice.demo ] * Clone Set: ms_drbd_mysql2 [res_drbd_mysql2] (promotable): - * Masters: [ alice.demo ] - * Slaves: [ bob.demo ] + * Promoted: [ alice.demo ] + * Unpromoted: [ bob.demo ] * Resource Group: rg_mysql2: * res_fs_mysql2 (ocf:heartbeat:Filesystem): Started alice.demo * res_ip_mysql2 (ocf:heartbeat:IPaddr2): Started alice.demo * res_mysql2 (ocf:heartbeat:mysql): Started alice.demo Transition Summary: * Start res_drbd_mysql1:1 ( alice.demo ) Executing Cluster Transition: * Pseudo action: ms_drbd_mysql1_pre_notify_start_0 * Resource action: res_drbd_mysql1:0 notify on bob.demo * Pseudo action: ms_drbd_mysql1_confirmed-pre_notify_start_0 * Pseudo action: ms_drbd_mysql1_start_0 * Resource action: res_drbd_mysql1:1 start on alice.demo * Pseudo action: ms_drbd_mysql1_running_0 * Pseudo action: ms_drbd_mysql1_post_notify_running_0 * Resource action: res_drbd_mysql1:0 notify on bob.demo * Resource action: res_drbd_mysql1:1 notify on alice.demo * Pseudo action: ms_drbd_mysql1_confirmed-post_notify_running_0 Revised Cluster Status: * Node List: * Online: [ alice.demo bob.demo ] * Full List of Resources: * Clone Set: ms_drbd_nfsexport [res_drbd_nfsexport] (promotable) (disabled): * Stopped (disabled): [ alice.demo bob.demo ] * Resource Group: rg_nfs: * res_fs_nfsexport (ocf:heartbeat:Filesystem): Stopped * res_ip_nfs (ocf:heartbeat:IPaddr2): Stopped * res_nfs (lsb:nfs): Stopped * Resource Group: rg_mysql1: * res_fs_mysql1 (ocf:heartbeat:Filesystem): Started bob.demo * res_ip_mysql1 (ocf:heartbeat:IPaddr2): Started bob.demo * res_mysql1 (ocf:heartbeat:mysql): Started bob.demo * Clone Set: ms_drbd_mysql1 [res_drbd_mysql1] (promotable): - * Masters: [ bob.demo ] - * Slaves: [ alice.demo ] + * Promoted: [ bob.demo ] + * Unpromoted: [ alice.demo ] * Clone Set: ms_drbd_mysql2 [res_drbd_mysql2] (promotable): - * Masters: [ alice.demo ] - * Slaves: [ bob.demo ] + * Promoted: [ alice.demo ] + * Unpromoted: [ bob.demo ] * Resource Group: rg_mysql2: * res_fs_mysql2 (ocf:heartbeat:Filesystem): Started alice.demo * res_ip_mysql2 (ocf:heartbeat:IPaddr2): Started alice.demo * res_mysql2 (ocf:heartbeat:mysql): Started alice.demo diff --git a/cts/scheduler/summary/bug-lf-2361.summary b/cts/scheduler/summary/bug-lf-2361.summary index ef52fdc4a9..2d0b692be3 100644 --- a/cts/scheduler/summary/bug-lf-2361.summary +++ b/cts/scheduler/summary/bug-lf-2361.summary @@ -1,44 +1,44 @@ Current cluster status: * Node List: * Online: [ alice.demo bob.demo ] * Full List of Resources: * dummy1 (ocf:heartbeat:Dummy): Stopped * Clone Set: ms_stateful [stateful] (promotable): * Stopped: [ alice.demo bob.demo ] * Clone Set: cl_dummy2 [dummy2]: * Stopped: [ alice.demo bob.demo ] Transition Summary: * Start stateful:0 ( alice.demo ) * Start stateful:1 ( bob.demo ) * Start dummy2:0 ( alice.demo ) due to unrunnable dummy1 start (blocked) * Start dummy2:1 ( bob.demo ) due to unrunnable dummy1 start (blocked) Executing Cluster Transition: * Pseudo action: ms_stateful_pre_notify_start_0 * Resource action: service2:0 delete on alice.demo * Resource action: service2:0 delete on bob.demo * Resource action: service2:1 delete on bob.demo * Resource action: service1 delete on alice.demo * Resource action: service1 delete on bob.demo * Pseudo action: ms_stateful_confirmed-pre_notify_start_0 * Pseudo action: ms_stateful_start_0 * Resource action: stateful:0 start on alice.demo * Resource action: stateful:1 start on bob.demo * Pseudo action: ms_stateful_running_0 * Pseudo action: ms_stateful_post_notify_running_0 * Resource action: stateful:0 notify on alice.demo * Resource action: stateful:1 notify on bob.demo * Pseudo action: ms_stateful_confirmed-post_notify_running_0 Revised Cluster Status: * Node List: * Online: [ alice.demo bob.demo ] * Full List of Resources: * dummy1 (ocf:heartbeat:Dummy): Stopped * Clone Set: ms_stateful [stateful] (promotable): - * Slaves: [ alice.demo bob.demo ] + * Unpromoted: [ alice.demo bob.demo ] * Clone Set: cl_dummy2 [dummy2]: * Stopped: [ alice.demo bob.demo ] diff --git a/cts/scheduler/summary/bug-lf-2493.summary b/cts/scheduler/summary/bug-lf-2493.summary index 54e7a89186..35749b2e8b 100644 --- a/cts/scheduler/summary/bug-lf-2493.summary +++ b/cts/scheduler/summary/bug-lf-2493.summary @@ -1,66 +1,66 @@ Current cluster status: * Node List: * Online: [ hpn07 hpn08 ] * Full List of Resources: * p_dummy1 (ocf:pacemaker:Dummy): Started hpn07 * p_dummy2 (ocf:pacemaker:Dummy): Stopped * p_dummy4 (ocf:pacemaker:Dummy): Stopped * p_dummy3 (ocf:pacemaker:Dummy): Stopped * Clone Set: ms_stateful1 [p_stateful1] (promotable): - * Masters: [ hpn07 ] - * Slaves: [ hpn08 ] + * Promoted: [ hpn07 ] + * Unpromoted: [ hpn08 ] Transition Summary: * Start p_dummy2 ( hpn08 ) * Start p_dummy4 ( hpn07 ) * Start p_dummy3 ( hpn08 ) Executing Cluster Transition: * Resource action: p_dummy2 start on hpn08 * Resource action: p_dummy3 start on hpn08 * Resource action: res_Filesystem_nfs_fs1 delete on hpn08 * Resource action: res_Filesystem_nfs_fs1 delete on hpn07 * Resource action: res_drbd_nfs:0 delete on hpn08 * Resource action: res_drbd_nfs:0 delete on hpn07 * Resource action: res_Filesystem_nfs_fs2 delete on hpn08 * Resource action: res_Filesystem_nfs_fs2 delete on hpn07 * Resource action: res_Filesystem_nfs_fs3 delete on hpn08 * Resource action: res_Filesystem_nfs_fs3 delete on hpn07 * Resource action: res_exportfs_fs1 delete on hpn08 * Resource action: res_exportfs_fs1 delete on hpn07 * Resource action: res_exportfs_fs2 delete on hpn08 * Resource action: res_exportfs_fs2 delete on hpn07 * Resource action: res_exportfs_fs3 delete on hpn08 * Resource action: res_exportfs_fs3 delete on hpn07 * Resource action: res_drbd_nfs:1 delete on hpn08 * Resource action: res_drbd_nfs:1 delete on hpn07 * Resource action: res_LVM_nfs delete on hpn08 * Resource action: res_LVM_nfs delete on hpn07 * Resource action: res_LVM_p_vg-sap delete on hpn08 * Resource action: res_LVM_p_vg-sap delete on hpn07 * Resource action: res_exportfs_rootfs:0 delete on hpn07 * Resource action: res_IPaddr2_nfs delete on hpn08 * Resource action: res_IPaddr2_nfs delete on hpn07 * Resource action: res_drbd_hpn78:0 delete on hpn08 * Resource action: res_drbd_hpn78:0 delete on hpn07 * Resource action: res_Filesystem_sap_db delete on hpn08 * Resource action: res_Filesystem_sap_db delete on hpn07 * Resource action: res_Filesystem_sap_ci delete on hpn08 * Resource action: res_Filesystem_sap_ci delete on hpn07 * Resource action: res_exportfs_rootfs:1 delete on hpn08 * Resource action: res_drbd_hpn78:1 delete on hpn08 * Resource action: p_dummy4 start on hpn07 Revised Cluster Status: * Node List: * Online: [ hpn07 hpn08 ] * Full List of Resources: * p_dummy1 (ocf:pacemaker:Dummy): Started hpn07 * p_dummy2 (ocf:pacemaker:Dummy): Started hpn08 * p_dummy4 (ocf:pacemaker:Dummy): Started hpn07 * p_dummy3 (ocf:pacemaker:Dummy): Started hpn08 * Clone Set: ms_stateful1 [p_stateful1] (promotable): - * Masters: [ hpn07 ] - * Slaves: [ hpn08 ] + * Promoted: [ hpn07 ] + * Unpromoted: [ hpn08 ] diff --git a/cts/scheduler/summary/bug-lf-2544.summary b/cts/scheduler/summary/bug-lf-2544.summary index ce1ec38245..b21de80b40 100644 --- a/cts/scheduler/summary/bug-lf-2544.summary +++ b/cts/scheduler/summary/bug-lf-2544.summary @@ -1,24 +1,24 @@ Current cluster status: * Node List: * Online: [ node-0 node-1 ] * Full List of Resources: * Clone Set: ms0 [s0] (promotable): - * Slaves: [ node-0 node-1 ] + * Unpromoted: [ node-0 node-1 ] Transition Summary: - * Promote s0:1 ( Slave -> Master node-1 ) + * Promote s0:1 ( Unpromoted -> Promoted node-1 ) Executing Cluster Transition: * Pseudo action: ms0_promote_0 * Resource action: s0:1 promote on node-1 * Pseudo action: ms0_promoted_0 Revised Cluster Status: * Node List: * Online: [ node-0 node-1 ] * Full List of Resources: * Clone Set: ms0 [s0] (promotable): - * Masters: [ node-1 ] - * Slaves: [ node-0 ] + * Promoted: [ node-1 ] + * Unpromoted: [ node-0 ] diff --git a/cts/scheduler/summary/bug-lf-2606.summary b/cts/scheduler/summary/bug-lf-2606.summary index c4b279b793..e0b7ebf0e6 100644 --- a/cts/scheduler/summary/bug-lf-2606.summary +++ b/cts/scheduler/summary/bug-lf-2606.summary @@ -1,46 +1,46 @@ 1 of 5 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: * Node List: * Node node2: UNCLEAN (online) * Online: [ node1 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * rsc1 (ocf:pacemaker:Dummy): FAILED node2 (disabled) * rsc2 (ocf:pacemaker:Dummy): Started node2 * Clone Set: ms3 [rsc3] (promotable): - * Masters: [ node2 ] - * Slaves: [ node1 ] + * Promoted: [ node2 ] + * Unpromoted: [ node1 ] Transition Summary: * Fence (reboot) node2 'rsc1 failed there' * Stop rsc1 ( node2 ) due to node availability * Move rsc2 ( node2 -> node1 ) - * Stop rsc3:1 ( Master node2 ) due to node availability + * Stop rsc3:1 ( Promoted node2 ) due to node availability Executing Cluster Transition: * Pseudo action: ms3_demote_0 * Fencing node2 (reboot) * Pseudo action: rsc1_stop_0 * Pseudo action: rsc2_stop_0 * Pseudo action: rsc3:1_demote_0 * Pseudo action: ms3_demoted_0 * Pseudo action: ms3_stop_0 * Resource action: rsc2 start on node1 * Pseudo action: rsc3:1_stop_0 * Pseudo action: ms3_stopped_0 * Resource action: rsc2 monitor=10000 on node1 Revised Cluster Status: * Node List: * Online: [ node1 ] * OFFLINE: [ node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * rsc1 (ocf:pacemaker:Dummy): Stopped (disabled) * rsc2 (ocf:pacemaker:Dummy): Started node1 * Clone Set: ms3 [rsc3] (promotable): - * Slaves: [ node1 ] + * Unpromoted: [ node1 ] * Stopped: [ node2 ] diff --git a/cts/scheduler/summary/bug-pm-11.summary b/cts/scheduler/summary/bug-pm-11.summary index eabe980673..f638b3fc4f 100644 --- a/cts/scheduler/summary/bug-pm-11.summary +++ b/cts/scheduler/summary/bug-pm-11.summary @@ -1,48 +1,48 @@ Current cluster status: * Node List: * Online: [ node-a node-b ] * Full List of Resources: * Clone Set: ms-sf [group] (promotable) (unique): * Resource Group: group:0: - * stateful-1:0 (ocf:heartbeat:Stateful): Slave node-b + * stateful-1:0 (ocf:heartbeat:Stateful): Unpromoted node-b * stateful-2:0 (ocf:heartbeat:Stateful): Stopped * Resource Group: group:1: - * stateful-1:1 (ocf:heartbeat:Stateful): Master node-a + * stateful-1:1 (ocf:heartbeat:Stateful): Promoted node-a * stateful-2:1 (ocf:heartbeat:Stateful): Stopped Transition Summary: * Start stateful-2:0 ( node-b ) - * Promote stateful-2:1 ( Stopped -> Master node-a ) + * Promote stateful-2:1 ( Stopped -> Promoted node-a ) Executing Cluster Transition: * Resource action: stateful-2:0 monitor on node-b * Resource action: stateful-2:0 monitor on node-a * Resource action: stateful-2:1 monitor on node-b * Resource action: stateful-2:1 monitor on node-a * Pseudo action: ms-sf_start_0 * Pseudo action: group:0_start_0 * Resource action: stateful-2:0 start on node-b * Pseudo action: group:1_start_0 * Resource action: stateful-2:1 start on node-a * Pseudo action: group:0_running_0 * Pseudo action: group:1_running_0 * Pseudo action: ms-sf_running_0 * Pseudo action: ms-sf_promote_0 * Pseudo action: group:1_promote_0 * Resource action: stateful-2:1 promote on node-a * Pseudo action: group:1_promoted_0 * Pseudo action: ms-sf_promoted_0 Revised Cluster Status: * Node List: * Online: [ node-a node-b ] * Full List of Resources: * Clone Set: ms-sf [group] (promotable) (unique): * Resource Group: group:0: - * stateful-1:0 (ocf:heartbeat:Stateful): Slave node-b - * stateful-2:0 (ocf:heartbeat:Stateful): Slave node-b + * stateful-1:0 (ocf:heartbeat:Stateful): Unpromoted node-b + * stateful-2:0 (ocf:heartbeat:Stateful): Unpromoted node-b * Resource Group: group:1: - * stateful-1:1 (ocf:heartbeat:Stateful): Master node-a - * stateful-2:1 (ocf:heartbeat:Stateful): Master node-a + * stateful-1:1 (ocf:heartbeat:Stateful): Promoted node-a + * stateful-2:1 (ocf:heartbeat:Stateful): Promoted node-a diff --git a/cts/scheduler/summary/bug-pm-12.summary b/cts/scheduler/summary/bug-pm-12.summary index 4beeaed9a7..7b811d1a02 100644 --- a/cts/scheduler/summary/bug-pm-12.summary +++ b/cts/scheduler/summary/bug-pm-12.summary @@ -1,57 +1,57 @@ Current cluster status: * Node List: * Online: [ node-a node-b ] * Full List of Resources: * Clone Set: ms-sf [group] (promotable) (unique): * Resource Group: group:0: - * stateful-1:0 (ocf:heartbeat:Stateful): Slave node-b - * stateful-2:0 (ocf:heartbeat:Stateful): Slave node-b + * stateful-1:0 (ocf:heartbeat:Stateful): Unpromoted node-b + * stateful-2:0 (ocf:heartbeat:Stateful): Unpromoted node-b * Resource Group: group:1: - * stateful-1:1 (ocf:heartbeat:Stateful): Master node-a - * stateful-2:1 (ocf:heartbeat:Stateful): Master node-a + * stateful-1:1 (ocf:heartbeat:Stateful): Promoted node-a + * stateful-2:1 (ocf:heartbeat:Stateful): Promoted node-a Transition Summary: - * Restart stateful-2:0 ( Slave node-b ) due to resource definition change - * Restart stateful-2:1 ( Master node-a ) due to resource definition change + * Restart stateful-2:0 ( Unpromoted node-b ) due to resource definition change + * Restart stateful-2:1 ( Promoted node-a ) due to resource definition change Executing Cluster Transition: * Pseudo action: ms-sf_demote_0 * Pseudo action: group:1_demote_0 * Resource action: stateful-2:1 demote on node-a * Pseudo action: group:1_demoted_0 * Pseudo action: ms-sf_demoted_0 * Pseudo action: ms-sf_stop_0 * Pseudo action: group:0_stop_0 * Resource action: stateful-2:0 stop on node-b * Pseudo action: group:1_stop_0 * Resource action: stateful-2:1 stop on node-a * Pseudo action: group:0_stopped_0 * Pseudo action: group:1_stopped_0 * Pseudo action: ms-sf_stopped_0 * Pseudo action: ms-sf_start_0 * Pseudo action: group:0_start_0 * Resource action: stateful-2:0 start on node-b * Pseudo action: group:1_start_0 * Resource action: stateful-2:1 start on node-a * Pseudo action: group:0_running_0 * Pseudo action: group:1_running_0 * Pseudo action: ms-sf_running_0 * Pseudo action: ms-sf_promote_0 * Pseudo action: group:1_promote_0 * Resource action: stateful-2:1 promote on node-a * Pseudo action: group:1_promoted_0 * Pseudo action: ms-sf_promoted_0 Revised Cluster Status: * Node List: * Online: [ node-a node-b ] * Full List of Resources: * Clone Set: ms-sf [group] (promotable) (unique): * Resource Group: group:0: - * stateful-1:0 (ocf:heartbeat:Stateful): Slave node-b - * stateful-2:0 (ocf:heartbeat:Stateful): Slave node-b + * stateful-1:0 (ocf:heartbeat:Stateful): Unpromoted node-b + * stateful-2:0 (ocf:heartbeat:Stateful): Unpromoted node-b * Resource Group: group:1: - * stateful-1:1 (ocf:heartbeat:Stateful): Master node-a - * stateful-2:1 (ocf:heartbeat:Stateful): Master node-a + * stateful-1:1 (ocf:heartbeat:Stateful): Promoted node-a + * stateful-2:1 (ocf:heartbeat:Stateful): Promoted node-a diff --git a/cts/scheduler/summary/bug-rh-880249.summary b/cts/scheduler/summary/bug-rh-880249.summary index 326eabfdc5..4cf3fe8bff 100644 --- a/cts/scheduler/summary/bug-rh-880249.summary +++ b/cts/scheduler/summary/bug-rh-880249.summary @@ -1,29 +1,29 @@ Current cluster status: * Node List: * Online: [ 18node1 18node2 18node3 ] * Full List of Resources: * shoot1 (stonith:fence_xvm): Started 18node1 * shoot2 (stonith:fence_xvm): Started 18node2 - * dummystateful (ocf:pacemaker:Stateful): Master [ 18node2 18node1 18node3 ] + * dummystateful (ocf:pacemaker:Stateful): Promoted [ 18node2 18node1 18node3 ] Transition Summary: - * Move dummystateful ( Master 18node2 -> Started 18node3 ) + * Move dummystateful ( Promoted 18node2 -> Started 18node3 ) Executing Cluster Transition: * Resource action: dummystateful demote on 18node3 * Resource action: dummystateful demote on 18node1 * Resource action: dummystateful demote on 18node2 * Resource action: dummystateful stop on 18node3 * Resource action: dummystateful stop on 18node1 * Resource action: dummystateful stop on 18node2 * Resource action: dummystateful start on 18node3 Revised Cluster Status: * Node List: * Online: [ 18node1 18node2 18node3 ] * Full List of Resources: * shoot1 (stonith:fence_xvm): Started 18node1 * shoot2 (stonith:fence_xvm): Started 18node2 * dummystateful (ocf:pacemaker:Stateful): Started 18node3 diff --git a/cts/scheduler/summary/bundle-order-fencing.summary b/cts/scheduler/summary/bundle-order-fencing.summary index efa3de4c41..387c05532a 100644 --- a/cts/scheduler/summary/bundle-order-fencing.summary +++ b/cts/scheduler/summary/bundle-order-fencing.summary @@ -1,220 +1,220 @@ Using the original execution date of: 2017-09-12 10:51:59Z Current cluster status: * Node List: * Node controller-0: UNCLEAN (offline) * Online: [ controller-1 controller-2 ] * GuestOnline: [ galera-bundle-1@controller-1 galera-bundle-2@controller-2 rabbitmq-bundle-1@controller-1 rabbitmq-bundle-2@controller-2 redis-bundle-1@controller-1 redis-bundle-2@controller-2 ] * Full List of Resources: * Container bundle set: rabbitmq-bundle [192.168.24.1:8787/rhosp12/openstack-rabbitmq-docker:pcmklatest]: * rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): FAILED controller-0 (UNCLEAN) * rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Started controller-1 * rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Started controller-2 * Container bundle set: galera-bundle [192.168.24.1:8787/rhosp12/openstack-mariadb-docker:pcmklatest]: - * galera-bundle-0 (ocf:heartbeat:galera): FAILED Master controller-0 (UNCLEAN) - * galera-bundle-1 (ocf:heartbeat:galera): Master controller-1 - * galera-bundle-2 (ocf:heartbeat:galera): Master controller-2 + * galera-bundle-0 (ocf:heartbeat:galera): FAILED Promoted controller-0 (UNCLEAN) + * galera-bundle-1 (ocf:heartbeat:galera): Promoted controller-1 + * galera-bundle-2 (ocf:heartbeat:galera): Promoted controller-2 * Container bundle set: redis-bundle [192.168.24.1:8787/rhosp12/openstack-redis-docker:pcmklatest]: - * redis-bundle-0 (ocf:heartbeat:redis): FAILED Master controller-0 (UNCLEAN) - * redis-bundle-1 (ocf:heartbeat:redis): Slave controller-1 - * redis-bundle-2 (ocf:heartbeat:redis): Slave controller-2 + * redis-bundle-0 (ocf:heartbeat:redis): FAILED Promoted controller-0 (UNCLEAN) + * redis-bundle-1 (ocf:heartbeat:redis): Unpromoted controller-1 + * redis-bundle-2 (ocf:heartbeat:redis): Unpromoted controller-2 * ip-192.168.24.7 (ocf:heartbeat:IPaddr2): Started controller-0 (UNCLEAN) * ip-10.0.0.109 (ocf:heartbeat:IPaddr2): Started controller-0 (UNCLEAN) * ip-172.17.1.14 (ocf:heartbeat:IPaddr2): Started controller-2 * ip-172.17.1.19 (ocf:heartbeat:IPaddr2): Started controller-2 * ip-172.17.3.19 (ocf:heartbeat:IPaddr2): Started controller-2 * ip-172.17.4.11 (ocf:heartbeat:IPaddr2): Started controller-0 (UNCLEAN) * Container bundle set: haproxy-bundle [192.168.24.1:8787/rhosp12/openstack-haproxy-docker:pcmklatest]: * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started controller-0 (UNCLEAN) * haproxy-bundle-docker-1 (ocf:heartbeat:docker): Started controller-2 * haproxy-bundle-docker-2 (ocf:heartbeat:docker): Started controller-1 * openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-2 * stonith-fence_ipmilan-525400efba5c (stonith:fence_ipmilan): Started controller-2 * stonith-fence_ipmilan-5254003e8e97 (stonith:fence_ipmilan): Started controller-0 (UNCLEAN) * stonith-fence_ipmilan-5254000dcb3f (stonith:fence_ipmilan): Started controller-0 (UNCLEAN) Transition Summary: * Fence (off) redis-bundle-0 (resource: redis-bundle-docker-0) 'guest is unclean' * Fence (off) rabbitmq-bundle-0 (resource: rabbitmq-bundle-docker-0) 'guest is unclean' * Fence (off) galera-bundle-0 (resource: galera-bundle-docker-0) 'guest is unclean' * Fence (reboot) controller-0 'peer is no longer part of the cluster' * Stop rabbitmq-bundle-docker-0 ( controller-0 ) due to node availability * Stop rabbitmq-bundle-0 ( controller-0 ) due to unrunnable rabbitmq-bundle-docker-0 start * Stop rabbitmq:0 ( rabbitmq-bundle-0 ) due to unrunnable rabbitmq-bundle-docker-0 start * Stop galera-bundle-docker-0 ( controller-0 ) due to node availability * Stop galera-bundle-0 ( controller-0 ) due to unrunnable galera-bundle-docker-0 start - * Stop galera:0 ( Master galera-bundle-0 ) due to unrunnable galera-bundle-docker-0 start + * Stop galera:0 ( Promoted galera-bundle-0 ) due to unrunnable galera-bundle-docker-0 start * Stop redis-bundle-docker-0 ( controller-0 ) due to node availability * Stop redis-bundle-0 ( controller-0 ) due to unrunnable redis-bundle-docker-0 start - * Stop redis:0 ( Master redis-bundle-0 ) due to unrunnable redis-bundle-docker-0 start - * Promote redis:1 ( Slave -> Master redis-bundle-1 ) + * Stop redis:0 ( Promoted redis-bundle-0 ) due to unrunnable redis-bundle-docker-0 start + * Promote redis:1 ( Unpromoted -> Promoted redis-bundle-1 ) * Move ip-192.168.24.7 ( controller-0 -> controller-2 ) * Move ip-10.0.0.109 ( controller-0 -> controller-1 ) * Move ip-172.17.4.11 ( controller-0 -> controller-1 ) * Stop haproxy-bundle-docker-0 ( controller-0 ) due to node availability * Move stonith-fence_ipmilan-5254003e8e97 ( controller-0 -> controller-1 ) * Move stonith-fence_ipmilan-5254000dcb3f ( controller-0 -> controller-2 ) Executing Cluster Transition: * Pseudo action: rabbitmq-bundle-clone_pre_notify_stop_0 * Pseudo action: rabbitmq-bundle-0_stop_0 * Resource action: rabbitmq-bundle-0 monitor on controller-2 * Resource action: rabbitmq-bundle-0 monitor on controller-1 * Resource action: rabbitmq-bundle-1 monitor on controller-2 * Resource action: rabbitmq-bundle-2 monitor on controller-1 * Pseudo action: galera-bundle-0_stop_0 * Resource action: galera-bundle-0 monitor on controller-2 * Resource action: galera-bundle-0 monitor on controller-1 * Resource action: galera-bundle-1 monitor on controller-2 * Resource action: galera-bundle-2 monitor on controller-1 * Resource action: redis cancel=45000 on redis-bundle-1 * Resource action: redis cancel=60000 on redis-bundle-1 * Pseudo action: redis-bundle-master_pre_notify_demote_0 * Pseudo action: redis-bundle-0_stop_0 * Resource action: redis-bundle-0 monitor on controller-2 * Resource action: redis-bundle-0 monitor on controller-1 * Resource action: redis-bundle-1 monitor on controller-2 * Resource action: redis-bundle-2 monitor on controller-1 * Pseudo action: stonith-fence_ipmilan-5254003e8e97_stop_0 * Pseudo action: stonith-fence_ipmilan-5254000dcb3f_stop_0 * Pseudo action: haproxy-bundle_stop_0 * Pseudo action: redis-bundle_demote_0 * Pseudo action: galera-bundle_demote_0 * Pseudo action: rabbitmq-bundle_stop_0 * Pseudo action: rabbitmq-bundle_start_0 * Fencing controller-0 (reboot) * Resource action: rabbitmq notify on rabbitmq-bundle-1 * Resource action: rabbitmq notify on rabbitmq-bundle-2 * Pseudo action: rabbitmq-bundle-clone_confirmed-pre_notify_stop_0 * Pseudo action: rabbitmq-bundle-docker-0_stop_0 * Pseudo action: galera-bundle-master_demote_0 * Resource action: redis notify on redis-bundle-1 * Resource action: redis notify on redis-bundle-2 * Pseudo action: redis-bundle-master_confirmed-pre_notify_demote_0 * Pseudo action: redis-bundle-master_demote_0 * Pseudo action: haproxy-bundle-docker-0_stop_0 * Resource action: stonith-fence_ipmilan-5254003e8e97 start on controller-1 * Resource action: stonith-fence_ipmilan-5254000dcb3f start on controller-2 * Pseudo action: stonith-redis-bundle-0-off on redis-bundle-0 * Pseudo action: stonith-rabbitmq-bundle-0-off on rabbitmq-bundle-0 * Pseudo action: stonith-galera-bundle-0-off on galera-bundle-0 * Pseudo action: haproxy-bundle_stopped_0 * Pseudo action: rabbitmq_post_notify_stop_0 * Pseudo action: rabbitmq-bundle-clone_stop_0 * Pseudo action: galera_demote_0 * Pseudo action: galera-bundle-master_demoted_0 * Pseudo action: redis_post_notify_stop_0 * Pseudo action: redis_demote_0 * Pseudo action: redis-bundle-master_demoted_0 * Pseudo action: ip-192.168.24.7_stop_0 * Pseudo action: ip-10.0.0.109_stop_0 * Pseudo action: ip-172.17.4.11_stop_0 * Resource action: stonith-fence_ipmilan-5254003e8e97 monitor=60000 on controller-1 * Resource action: stonith-fence_ipmilan-5254000dcb3f monitor=60000 on controller-2 * Pseudo action: galera-bundle_demoted_0 * Pseudo action: galera-bundle_stop_0 * Pseudo action: rabbitmq_stop_0 * Pseudo action: rabbitmq-bundle-clone_stopped_0 * Pseudo action: galera-bundle-master_stop_0 * Pseudo action: galera-bundle-docker-0_stop_0 * Pseudo action: redis-bundle-master_post_notify_demoted_0 * Resource action: ip-192.168.24.7 start on controller-2 * Resource action: ip-10.0.0.109 start on controller-1 * Resource action: ip-172.17.4.11 start on controller-1 * Pseudo action: rabbitmq-bundle-clone_post_notify_stopped_0 * Pseudo action: galera_stop_0 * Pseudo action: galera-bundle-master_stopped_0 * Pseudo action: galera-bundle-master_start_0 * Resource action: redis notify on redis-bundle-1 * Resource action: redis notify on redis-bundle-2 * Pseudo action: redis-bundle-master_confirmed-post_notify_demoted_0 * Pseudo action: redis-bundle-master_pre_notify_stop_0 * Resource action: ip-192.168.24.7 monitor=10000 on controller-2 * Resource action: ip-10.0.0.109 monitor=10000 on controller-1 * Resource action: ip-172.17.4.11 monitor=10000 on controller-1 * Pseudo action: redis-bundle_demoted_0 * Pseudo action: redis-bundle_stop_0 * Pseudo action: galera-bundle_stopped_0 * Resource action: rabbitmq notify on rabbitmq-bundle-1 * Resource action: rabbitmq notify on rabbitmq-bundle-2 * Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_stopped_0 * Pseudo action: rabbitmq-bundle-clone_pre_notify_start_0 * Pseudo action: galera-bundle-master_running_0 * Resource action: redis notify on redis-bundle-1 * Resource action: redis notify on redis-bundle-2 * Pseudo action: redis-bundle-master_confirmed-pre_notify_stop_0 * Pseudo action: redis-bundle-master_stop_0 * Pseudo action: redis-bundle-docker-0_stop_0 * Pseudo action: galera-bundle_running_0 * Pseudo action: rabbitmq-bundle_stopped_0 * Pseudo action: rabbitmq_notified_0 * Pseudo action: rabbitmq-bundle-clone_confirmed-pre_notify_start_0 * Pseudo action: rabbitmq-bundle-clone_start_0 * Pseudo action: redis_stop_0 * Pseudo action: redis-bundle-master_stopped_0 * Pseudo action: rabbitmq-bundle-clone_running_0 * Pseudo action: redis-bundle-master_post_notify_stopped_0 * Pseudo action: rabbitmq-bundle-clone_post_notify_running_0 * Resource action: redis notify on redis-bundle-1 * Resource action: redis notify on redis-bundle-2 * Pseudo action: redis-bundle-master_confirmed-post_notify_stopped_0 * Pseudo action: redis-bundle-master_pre_notify_start_0 * Pseudo action: redis-bundle_stopped_0 * Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_running_0 * Pseudo action: redis_notified_0 * Pseudo action: redis-bundle-master_confirmed-pre_notify_start_0 * Pseudo action: redis-bundle-master_start_0 * Pseudo action: rabbitmq-bundle_running_0 * Pseudo action: redis-bundle-master_running_0 * Pseudo action: redis-bundle-master_post_notify_running_0 * Pseudo action: redis-bundle-master_confirmed-post_notify_running_0 * Pseudo action: redis-bundle_running_0 * Pseudo action: redis-bundle-master_pre_notify_promote_0 * Pseudo action: redis-bundle_promote_0 * Resource action: redis notify on redis-bundle-1 * Resource action: redis notify on redis-bundle-2 * Pseudo action: redis-bundle-master_confirmed-pre_notify_promote_0 * Pseudo action: redis-bundle-master_promote_0 * Resource action: redis promote on redis-bundle-1 * Pseudo action: redis-bundle-master_promoted_0 * Pseudo action: redis-bundle-master_post_notify_promoted_0 * Resource action: redis notify on redis-bundle-1 * Resource action: redis notify on redis-bundle-2 * Pseudo action: redis-bundle-master_confirmed-post_notify_promoted_0 * Pseudo action: redis-bundle_promoted_0 * Resource action: redis monitor=20000 on redis-bundle-1 Using the original execution date of: 2017-09-12 10:51:59Z Revised Cluster Status: * Node List: * Online: [ controller-1 controller-2 ] * OFFLINE: [ controller-0 ] * GuestOnline: [ galera-bundle-1@controller-1 galera-bundle-2@controller-2 rabbitmq-bundle-1@controller-1 rabbitmq-bundle-2@controller-2 redis-bundle-1@controller-1 redis-bundle-2@controller-2 ] * Full List of Resources: * Container bundle set: rabbitmq-bundle [192.168.24.1:8787/rhosp12/openstack-rabbitmq-docker:pcmklatest]: * rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): FAILED * rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Started controller-1 * rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Started controller-2 * Container bundle set: galera-bundle [192.168.24.1:8787/rhosp12/openstack-mariadb-docker:pcmklatest]: - * galera-bundle-0 (ocf:heartbeat:galera): FAILED Master - * galera-bundle-1 (ocf:heartbeat:galera): Master controller-1 - * galera-bundle-2 (ocf:heartbeat:galera): Master controller-2 + * galera-bundle-0 (ocf:heartbeat:galera): FAILED Promoted + * galera-bundle-1 (ocf:heartbeat:galera): Promoted controller-1 + * galera-bundle-2 (ocf:heartbeat:galera): Promoted controller-2 * Container bundle set: redis-bundle [192.168.24.1:8787/rhosp12/openstack-redis-docker:pcmklatest]: - * redis-bundle-0 (ocf:heartbeat:redis): FAILED Master - * redis-bundle-1 (ocf:heartbeat:redis): Master controller-1 - * redis-bundle-2 (ocf:heartbeat:redis): Slave controller-2 + * redis-bundle-0 (ocf:heartbeat:redis): FAILED Promoted + * redis-bundle-1 (ocf:heartbeat:redis): Promoted controller-1 + * redis-bundle-2 (ocf:heartbeat:redis): Unpromoted controller-2 * ip-192.168.24.7 (ocf:heartbeat:IPaddr2): Started controller-2 * ip-10.0.0.109 (ocf:heartbeat:IPaddr2): Started controller-1 * ip-172.17.1.14 (ocf:heartbeat:IPaddr2): Started controller-2 * ip-172.17.1.19 (ocf:heartbeat:IPaddr2): Started controller-2 * ip-172.17.3.19 (ocf:heartbeat:IPaddr2): Started controller-2 * ip-172.17.4.11 (ocf:heartbeat:IPaddr2): Started controller-1 * Container bundle set: haproxy-bundle [192.168.24.1:8787/rhosp12/openstack-haproxy-docker:pcmklatest]: * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Stopped * haproxy-bundle-docker-1 (ocf:heartbeat:docker): Started controller-2 * haproxy-bundle-docker-2 (ocf:heartbeat:docker): Started controller-1 * openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-2 * stonith-fence_ipmilan-525400efba5c (stonith:fence_ipmilan): Started controller-2 * stonith-fence_ipmilan-5254003e8e97 (stonith:fence_ipmilan): Started controller-1 * stonith-fence_ipmilan-5254000dcb3f (stonith:fence_ipmilan): Started controller-2 diff --git a/cts/scheduler/summary/bundle-order-partial-start-2.summary b/cts/scheduler/summary/bundle-order-partial-start-2.summary index 50da2d197a..7575a2511e 100644 --- a/cts/scheduler/summary/bundle-order-partial-start-2.summary +++ b/cts/scheduler/summary/bundle-order-partial-start-2.summary @@ -1,100 +1,100 @@ Current cluster status: * Node List: * Online: [ undercloud ] * GuestOnline: [ galera-bundle-0@undercloud rabbitmq-bundle-0@undercloud redis-bundle-0@undercloud ] * Full List of Resources: * Container bundle: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest]: * rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Stopped undercloud * Container bundle: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]: * galera-bundle-0 (ocf:heartbeat:galera): Stopped undercloud * Container bundle: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]: - * redis-bundle-0 (ocf:heartbeat:redis): Slave undercloud + * redis-bundle-0 (ocf:heartbeat:redis): Unpromoted undercloud * ip-192.168.122.254 (ocf:heartbeat:IPaddr2): Started undercloud * ip-192.168.122.250 (ocf:heartbeat:IPaddr2): Started undercloud * ip-192.168.122.249 (ocf:heartbeat:IPaddr2): Started undercloud * ip-192.168.122.253 (ocf:heartbeat:IPaddr2): Started undercloud * ip-192.168.122.247 (ocf:heartbeat:IPaddr2): Started undercloud * ip-192.168.122.248 (ocf:heartbeat:IPaddr2): Started undercloud * Container bundle: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]: * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Stopped * Container bundle: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest]: * openstack-cinder-volume-docker-0 (ocf:heartbeat:docker): Started undercloud Transition Summary: * Start rabbitmq:0 ( rabbitmq-bundle-0 ) * Restart galera-bundle-docker-0 ( undercloud ) due to required haproxy-bundle running * Restart galera-bundle-0 ( undercloud ) due to required galera-bundle-docker-0 start * Start galera:0 ( galera-bundle-0 ) - * Promote redis:0 ( Slave -> Master redis-bundle-0 ) + * Promote redis:0 ( Unpromoted -> Promoted redis-bundle-0 ) * Start haproxy-bundle-docker-0 ( undercloud ) Executing Cluster Transition: * Resource action: rabbitmq:0 monitor on rabbitmq-bundle-0 * Pseudo action: rabbitmq-bundle-clone_pre_notify_start_0 * Resource action: galera-bundle-0 stop on undercloud * Pseudo action: redis-bundle-master_pre_notify_promote_0 * Resource action: haproxy-bundle-docker-0 monitor on undercloud * Pseudo action: haproxy-bundle_start_0 * Pseudo action: redis-bundle_promote_0 * Pseudo action: galera-bundle_stop_0 * Pseudo action: rabbitmq-bundle_start_0 * Pseudo action: rabbitmq-bundle-clone_confirmed-pre_notify_start_0 * Pseudo action: rabbitmq-bundle-clone_start_0 * Resource action: galera-bundle-docker-0 stop on undercloud * Resource action: redis notify on redis-bundle-0 * Pseudo action: redis-bundle-master_confirmed-pre_notify_promote_0 * Pseudo action: redis-bundle-master_promote_0 * Resource action: haproxy-bundle-docker-0 start on undercloud * Pseudo action: haproxy-bundle_running_0 * Pseudo action: galera-bundle_stopped_0 * Resource action: rabbitmq:0 start on rabbitmq-bundle-0 * Pseudo action: rabbitmq-bundle-clone_running_0 * Resource action: redis promote on redis-bundle-0 * Pseudo action: redis-bundle-master_promoted_0 * Resource action: haproxy-bundle-docker-0 monitor=60000 on undercloud * Pseudo action: rabbitmq-bundle-clone_post_notify_running_0 * Pseudo action: redis-bundle-master_post_notify_promoted_0 * Resource action: rabbitmq:0 notify on rabbitmq-bundle-0 * Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_running_0 * Resource action: redis notify on redis-bundle-0 * Pseudo action: redis-bundle-master_confirmed-post_notify_promoted_0 * Pseudo action: redis-bundle_promoted_0 * Pseudo action: rabbitmq-bundle_running_0 * Resource action: rabbitmq:0 monitor=10000 on rabbitmq-bundle-0 * Resource action: redis monitor=20000 on redis-bundle-0 * Pseudo action: galera-bundle_start_0 * Resource action: galera-bundle-docker-0 start on undercloud * Resource action: galera-bundle-docker-0 monitor=60000 on undercloud * Resource action: galera-bundle-0 start on undercloud * Resource action: galera-bundle-0 monitor=30000 on undercloud * Resource action: galera:0 monitor on galera-bundle-0 * Pseudo action: galera-bundle-master_start_0 * Resource action: galera:0 start on galera-bundle-0 * Pseudo action: galera-bundle-master_running_0 * Pseudo action: galera-bundle_running_0 * Resource action: galera:0 monitor=30000 on galera-bundle-0 * Resource action: galera:0 monitor=20000 on galera-bundle-0 Revised Cluster Status: * Node List: * Online: [ undercloud ] * GuestOnline: [ galera-bundle-0@undercloud rabbitmq-bundle-0@undercloud redis-bundle-0@undercloud ] * Full List of Resources: * Container bundle: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest]: * rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started undercloud * Container bundle: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]: - * galera-bundle-0 (ocf:heartbeat:galera): Slave undercloud + * galera-bundle-0 (ocf:heartbeat:galera): Unpromoted undercloud * Container bundle: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]: - * redis-bundle-0 (ocf:heartbeat:redis): Master undercloud + * redis-bundle-0 (ocf:heartbeat:redis): Promoted undercloud * ip-192.168.122.254 (ocf:heartbeat:IPaddr2): Started undercloud * ip-192.168.122.250 (ocf:heartbeat:IPaddr2): Started undercloud * ip-192.168.122.249 (ocf:heartbeat:IPaddr2): Started undercloud * ip-192.168.122.253 (ocf:heartbeat:IPaddr2): Started undercloud * ip-192.168.122.247 (ocf:heartbeat:IPaddr2): Started undercloud * ip-192.168.122.248 (ocf:heartbeat:IPaddr2): Started undercloud * Container bundle: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]: * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started undercloud * Container bundle: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest]: * openstack-cinder-volume-docker-0 (ocf:heartbeat:docker): Started undercloud diff --git a/cts/scheduler/summary/bundle-order-partial-start.summary b/cts/scheduler/summary/bundle-order-partial-start.summary index 4eab75b166..3c45f4f974 100644 --- a/cts/scheduler/summary/bundle-order-partial-start.summary +++ b/cts/scheduler/summary/bundle-order-partial-start.summary @@ -1,97 +1,97 @@ Current cluster status: * Node List: * Online: [ undercloud ] * GuestOnline: [ rabbitmq-bundle-0@undercloud redis-bundle-0@undercloud ] * Full List of Resources: * Container bundle: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest]: * rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Stopped undercloud * Container bundle: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]: * galera-bundle-0 (ocf:heartbeat:galera): Stopped * Container bundle: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]: - * redis-bundle-0 (ocf:heartbeat:redis): Slave undercloud + * redis-bundle-0 (ocf:heartbeat:redis): Unpromoted undercloud * ip-192.168.122.254 (ocf:heartbeat:IPaddr2): Started undercloud * ip-192.168.122.250 (ocf:heartbeat:IPaddr2): Started undercloud * ip-192.168.122.249 (ocf:heartbeat:IPaddr2): Started undercloud * ip-192.168.122.253 (ocf:heartbeat:IPaddr2): Started undercloud * ip-192.168.122.247 (ocf:heartbeat:IPaddr2): Started undercloud * ip-192.168.122.248 (ocf:heartbeat:IPaddr2): Started undercloud * Container bundle: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]: * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Stopped * Container bundle: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest]: * openstack-cinder-volume-docker-0 (ocf:heartbeat:docker): Started undercloud Transition Summary: * Start rabbitmq:0 ( rabbitmq-bundle-0 ) * Start galera-bundle-docker-0 ( undercloud ) * Start galera-bundle-0 ( undercloud ) * Start galera:0 ( galera-bundle-0 ) - * Promote redis:0 ( Slave -> Master redis-bundle-0 ) + * Promote redis:0 ( Unpromoted -> Promoted redis-bundle-0 ) * Start haproxy-bundle-docker-0 ( undercloud ) Executing Cluster Transition: * Resource action: rabbitmq:0 monitor on rabbitmq-bundle-0 * Pseudo action: rabbitmq-bundle-clone_pre_notify_start_0 * Resource action: galera-bundle-docker-0 monitor on undercloud * Pseudo action: redis-bundle-master_pre_notify_promote_0 * Resource action: haproxy-bundle-docker-0 monitor on undercloud * Pseudo action: haproxy-bundle_start_0 * Pseudo action: redis-bundle_promote_0 * Pseudo action: rabbitmq-bundle_start_0 * Pseudo action: rabbitmq-bundle-clone_confirmed-pre_notify_start_0 * Pseudo action: rabbitmq-bundle-clone_start_0 * Resource action: redis notify on redis-bundle-0 * Pseudo action: redis-bundle-master_confirmed-pre_notify_promote_0 * Pseudo action: redis-bundle-master_promote_0 * Resource action: haproxy-bundle-docker-0 start on undercloud * Pseudo action: haproxy-bundle_running_0 * Resource action: rabbitmq:0 start on rabbitmq-bundle-0 * Pseudo action: rabbitmq-bundle-clone_running_0 * Resource action: redis promote on redis-bundle-0 * Pseudo action: redis-bundle-master_promoted_0 * Resource action: haproxy-bundle-docker-0 monitor=60000 on undercloud * Pseudo action: rabbitmq-bundle-clone_post_notify_running_0 * Pseudo action: redis-bundle-master_post_notify_promoted_0 * Resource action: rabbitmq:0 notify on rabbitmq-bundle-0 * Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_running_0 * Resource action: redis notify on redis-bundle-0 * Pseudo action: redis-bundle-master_confirmed-post_notify_promoted_0 * Pseudo action: redis-bundle_promoted_0 * Pseudo action: rabbitmq-bundle_running_0 * Resource action: rabbitmq:0 monitor=10000 on rabbitmq-bundle-0 * Resource action: redis monitor=20000 on redis-bundle-0 * Pseudo action: galera-bundle_start_0 * Pseudo action: galera-bundle-master_start_0 * Resource action: galera-bundle-docker-0 start on undercloud * Resource action: galera-bundle-0 monitor on undercloud * Resource action: galera-bundle-docker-0 monitor=60000 on undercloud * Resource action: galera-bundle-0 start on undercloud * Resource action: galera:0 start on galera-bundle-0 * Pseudo action: galera-bundle-master_running_0 * Resource action: galera-bundle-0 monitor=30000 on undercloud * Pseudo action: galera-bundle_running_0 * Resource action: galera:0 monitor=30000 on galera-bundle-0 * Resource action: galera:0 monitor=20000 on galera-bundle-0 Revised Cluster Status: * Node List: * Online: [ undercloud ] * GuestOnline: [ galera-bundle-0@undercloud rabbitmq-bundle-0@undercloud redis-bundle-0@undercloud ] * Full List of Resources: * Container bundle: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest]: * rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started undercloud * Container bundle: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]: - * galera-bundle-0 (ocf:heartbeat:galera): Slave undercloud + * galera-bundle-0 (ocf:heartbeat:galera): Unpromoted undercloud * Container bundle: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]: - * redis-bundle-0 (ocf:heartbeat:redis): Master undercloud + * redis-bundle-0 (ocf:heartbeat:redis): Promoted undercloud * ip-192.168.122.254 (ocf:heartbeat:IPaddr2): Started undercloud * ip-192.168.122.250 (ocf:heartbeat:IPaddr2): Started undercloud * ip-192.168.122.249 (ocf:heartbeat:IPaddr2): Started undercloud * ip-192.168.122.253 (ocf:heartbeat:IPaddr2): Started undercloud * ip-192.168.122.247 (ocf:heartbeat:IPaddr2): Started undercloud * ip-192.168.122.248 (ocf:heartbeat:IPaddr2): Started undercloud * Container bundle: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]: * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started undercloud * Container bundle: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest]: * openstack-cinder-volume-docker-0 (ocf:heartbeat:docker): Started undercloud diff --git a/cts/scheduler/summary/bundle-order-partial-stop.summary b/cts/scheduler/summary/bundle-order-partial-stop.summary index bbb9df15b9..0954c59992 100644 --- a/cts/scheduler/summary/bundle-order-partial-stop.summary +++ b/cts/scheduler/summary/bundle-order-partial-stop.summary @@ -1,127 +1,127 @@ Current cluster status: * Node List: * Online: [ undercloud ] * GuestOnline: [ galera-bundle-0@undercloud rabbitmq-bundle-0@undercloud redis-bundle-0@undercloud ] * Full List of Resources: * Container bundle: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest]: * rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started undercloud * Container bundle: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]: - * galera-bundle-0 (ocf:heartbeat:galera): Master undercloud + * galera-bundle-0 (ocf:heartbeat:galera): Promoted undercloud * Container bundle: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]: - * redis-bundle-0 (ocf:heartbeat:redis): Master undercloud + * redis-bundle-0 (ocf:heartbeat:redis): Promoted undercloud * ip-192.168.122.254 (ocf:heartbeat:IPaddr2): Started undercloud * ip-192.168.122.250 (ocf:heartbeat:IPaddr2): Started undercloud * ip-192.168.122.249 (ocf:heartbeat:IPaddr2): Started undercloud * ip-192.168.122.253 (ocf:heartbeat:IPaddr2): Started undercloud * ip-192.168.122.247 (ocf:heartbeat:IPaddr2): Started undercloud * ip-192.168.122.248 (ocf:heartbeat:IPaddr2): Started undercloud * Container bundle: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]: * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started undercloud * Container bundle: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest]: * openstack-cinder-volume-docker-0 (ocf:heartbeat:docker): Started undercloud Transition Summary: * Stop rabbitmq-bundle-docker-0 ( undercloud ) due to node availability * Stop rabbitmq-bundle-0 ( undercloud ) due to node availability * Stop rabbitmq:0 ( rabbitmq-bundle-0 ) due to unrunnable rabbitmq-bundle-0 start * Stop galera-bundle-docker-0 ( undercloud ) due to node availability * Stop galera-bundle-0 ( undercloud ) due to node availability - * Stop galera:0 ( Master galera-bundle-0 ) due to unrunnable galera-bundle-0 start + * Stop galera:0 ( Promoted galera-bundle-0 ) due to unrunnable galera-bundle-0 start * Stop redis-bundle-docker-0 ( undercloud ) due to node availability * Stop redis-bundle-0 ( undercloud ) due to node availability - * Stop redis:0 ( Master redis-bundle-0 ) due to unrunnable redis-bundle-0 start + * Stop redis:0 ( Promoted redis-bundle-0 ) due to unrunnable redis-bundle-0 start * Stop ip-192.168.122.254 ( undercloud ) due to node availability * Stop ip-192.168.122.250 ( undercloud ) due to node availability * Stop ip-192.168.122.249 ( undercloud ) due to node availability * Stop ip-192.168.122.253 ( undercloud ) due to node availability * Stop ip-192.168.122.247 ( undercloud ) due to node availability * Stop ip-192.168.122.248 ( undercloud ) due to node availability * Stop haproxy-bundle-docker-0 ( undercloud ) due to node availability * Stop openstack-cinder-volume-docker-0 ( undercloud ) due to node availability Executing Cluster Transition: * Pseudo action: rabbitmq-bundle-clone_pre_notify_stop_0 * Resource action: galera cancel=10000 on galera-bundle-0 * Resource action: redis cancel=20000 on redis-bundle-0 * Pseudo action: redis-bundle-master_pre_notify_demote_0 * Pseudo action: openstack-cinder-volume_stop_0 * Pseudo action: redis-bundle_demote_0 * Pseudo action: galera-bundle_demote_0 * Pseudo action: rabbitmq-bundle_stop_0 * Resource action: rabbitmq notify on rabbitmq-bundle-0 * Pseudo action: rabbitmq-bundle-clone_confirmed-pre_notify_stop_0 * Pseudo action: rabbitmq-bundle-clone_stop_0 * Pseudo action: galera-bundle-master_demote_0 * Resource action: redis notify on redis-bundle-0 * Pseudo action: redis-bundle-master_confirmed-pre_notify_demote_0 * Pseudo action: redis-bundle-master_demote_0 * Resource action: openstack-cinder-volume-docker-0 stop on undercloud * Pseudo action: openstack-cinder-volume_stopped_0 * Resource action: rabbitmq stop on rabbitmq-bundle-0 * Pseudo action: rabbitmq-bundle-clone_stopped_0 * Resource action: rabbitmq-bundle-0 stop on undercloud * Resource action: galera demote on galera-bundle-0 * Pseudo action: galera-bundle-master_demoted_0 * Resource action: redis demote on redis-bundle-0 * Pseudo action: redis-bundle-master_demoted_0 * Pseudo action: galera-bundle_demoted_0 * Pseudo action: galera-bundle_stop_0 * Pseudo action: rabbitmq-bundle-clone_post_notify_stopped_0 * Resource action: rabbitmq-bundle-docker-0 stop on undercloud * Pseudo action: galera-bundle-master_stop_0 * Pseudo action: redis-bundle-master_post_notify_demoted_0 * Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_stopped_0 * Resource action: galera stop on galera-bundle-0 * Pseudo action: galera-bundle-master_stopped_0 * Resource action: galera-bundle-0 stop on undercloud * Resource action: redis notify on redis-bundle-0 * Pseudo action: redis-bundle-master_confirmed-post_notify_demoted_0 * Pseudo action: redis-bundle-master_pre_notify_stop_0 * Pseudo action: redis-bundle_demoted_0 * Pseudo action: rabbitmq-bundle_stopped_0 * Resource action: galera-bundle-docker-0 stop on undercloud * Resource action: redis notify on redis-bundle-0 * Pseudo action: redis-bundle-master_confirmed-pre_notify_stop_0 * Pseudo action: galera-bundle_stopped_0 * Pseudo action: redis-bundle_stop_0 * Pseudo action: redis-bundle-master_stop_0 * Resource action: redis stop on redis-bundle-0 * Pseudo action: redis-bundle-master_stopped_0 * Resource action: redis-bundle-0 stop on undercloud * Pseudo action: redis-bundle-master_post_notify_stopped_0 * Resource action: redis-bundle-docker-0 stop on undercloud * Pseudo action: redis-bundle-master_confirmed-post_notify_stopped_0 * Pseudo action: redis-bundle_stopped_0 * Pseudo action: haproxy-bundle_stop_0 * Resource action: haproxy-bundle-docker-0 stop on undercloud * Pseudo action: haproxy-bundle_stopped_0 * Resource action: ip-192.168.122.254 stop on undercloud * Resource action: ip-192.168.122.250 stop on undercloud * Resource action: ip-192.168.122.249 stop on undercloud * Resource action: ip-192.168.122.253 stop on undercloud * Resource action: ip-192.168.122.247 stop on undercloud * Resource action: ip-192.168.122.248 stop on undercloud * Cluster action: do_shutdown on undercloud Revised Cluster Status: * Node List: * Online: [ undercloud ] * Full List of Resources: * Container bundle: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest]: * rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Stopped * Container bundle: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]: * galera-bundle-0 (ocf:heartbeat:galera): Stopped * Container bundle: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]: * redis-bundle-0 (ocf:heartbeat:redis): Stopped * ip-192.168.122.254 (ocf:heartbeat:IPaddr2): Stopped * ip-192.168.122.250 (ocf:heartbeat:IPaddr2): Stopped * ip-192.168.122.249 (ocf:heartbeat:IPaddr2): Stopped * ip-192.168.122.253 (ocf:heartbeat:IPaddr2): Stopped * ip-192.168.122.247 (ocf:heartbeat:IPaddr2): Stopped * ip-192.168.122.248 (ocf:heartbeat:IPaddr2): Stopped * Container bundle: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]: * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Stopped * Container bundle: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest]: * openstack-cinder-volume-docker-0 (ocf:heartbeat:docker): Stopped diff --git a/cts/scheduler/summary/bundle-order-startup-clone-2.summary b/cts/scheduler/summary/bundle-order-startup-clone-2.summary index a733392080..cb63d78fd1 100644 --- a/cts/scheduler/summary/bundle-order-startup-clone-2.summary +++ b/cts/scheduler/summary/bundle-order-startup-clone-2.summary @@ -1,213 +1,213 @@ Current cluster status: * Node List: * Online: [ metal-1 metal-2 metal-3 ] * RemoteOFFLINE: [ rabbitmq-bundle-0 ] * Full List of Resources: * Clone Set: storage-clone [storage]: * Stopped: [ metal-1 metal-2 metal-3 rabbitmq-bundle-0 ] * Container bundle set: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]: * galera-bundle-0 (ocf:heartbeat:galera): Stopped * galera-bundle-1 (ocf:heartbeat:galera): Stopped * galera-bundle-2 (ocf:heartbeat:galera): Stopped * Container bundle set: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]: * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Stopped * haproxy-bundle-docker-1 (ocf:heartbeat:docker): Stopped * haproxy-bundle-docker-2 (ocf:heartbeat:docker): Stopped * Container bundle set: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]: * redis-bundle-0 (ocf:heartbeat:redis): Stopped * redis-bundle-1 (ocf:heartbeat:redis): Stopped * redis-bundle-2 (ocf:heartbeat:redis): Stopped Transition Summary: * Start storage:0 ( metal-1 ) * Start storage:1 ( metal-2 ) * Start storage:2 ( metal-3 ) * Start galera-bundle-docker-0 ( metal-1 ) * Start galera-bundle-0 ( metal-1 ) * Start galera:0 ( galera-bundle-0 ) * Start galera-bundle-docker-1 ( metal-2 ) * Start galera-bundle-1 ( metal-2 ) * Start galera:1 ( galera-bundle-1 ) * Start galera-bundle-docker-2 ( metal-3 ) * Start galera-bundle-2 ( metal-3 ) * Start galera:2 ( galera-bundle-2 ) * Start haproxy-bundle-docker-0 ( metal-1 ) * Start haproxy-bundle-docker-1 ( metal-2 ) * Start haproxy-bundle-docker-2 ( metal-3 ) * Start redis-bundle-docker-0 ( metal-1 ) * Start redis-bundle-0 ( metal-1 ) - * Promote redis:0 ( Stopped -> Master redis-bundle-0 ) + * Promote redis:0 ( Stopped -> Promoted redis-bundle-0 ) * Start redis-bundle-docker-1 ( metal-2 ) * Start redis-bundle-1 ( metal-2 ) - * Promote redis:1 ( Stopped -> Master redis-bundle-1 ) + * Promote redis:1 ( Stopped -> Promoted redis-bundle-1 ) * Start redis-bundle-docker-2 ( metal-3 ) * Start redis-bundle-2 ( metal-3 ) - * Promote redis:2 ( Stopped -> Master redis-bundle-2 ) + * Promote redis:2 ( Stopped -> Promoted redis-bundle-2 ) Executing Cluster Transition: * Resource action: storage:0 monitor on metal-1 * Resource action: storage:1 monitor on metal-2 * Resource action: storage:2 monitor on metal-3 * Pseudo action: storage-clone_pre_notify_start_0 * Resource action: galera-bundle-docker-0 monitor on metal-3 * Resource action: galera-bundle-docker-0 monitor on metal-2 * Resource action: galera-bundle-docker-0 monitor on metal-1 * Resource action: galera-bundle-docker-1 monitor on metal-3 * Resource action: galera-bundle-docker-1 monitor on metal-2 * Resource action: galera-bundle-docker-1 monitor on metal-1 * Resource action: galera-bundle-docker-2 monitor on metal-3 * Resource action: galera-bundle-docker-2 monitor on metal-2 * Resource action: galera-bundle-docker-2 monitor on metal-1 * Resource action: haproxy-bundle-docker-0 monitor on metal-3 * Resource action: haproxy-bundle-docker-0 monitor on metal-2 * Resource action: haproxy-bundle-docker-0 monitor on metal-1 * Resource action: haproxy-bundle-docker-1 monitor on metal-3 * Resource action: haproxy-bundle-docker-1 monitor on metal-2 * Resource action: haproxy-bundle-docker-1 monitor on metal-1 * Resource action: haproxy-bundle-docker-2 monitor on metal-3 * Resource action: haproxy-bundle-docker-2 monitor on metal-2 * Resource action: haproxy-bundle-docker-2 monitor on metal-1 * Pseudo action: redis-bundle-master_pre_notify_start_0 * Resource action: redis-bundle-docker-0 monitor on metal-3 * Resource action: redis-bundle-docker-0 monitor on metal-2 * Resource action: redis-bundle-docker-0 monitor on metal-1 * Resource action: redis-bundle-docker-1 monitor on metal-3 * Resource action: redis-bundle-docker-1 monitor on metal-2 * Resource action: redis-bundle-docker-1 monitor on metal-1 * Resource action: redis-bundle-docker-2 monitor on metal-3 * Resource action: redis-bundle-docker-2 monitor on metal-2 * Resource action: redis-bundle-docker-2 monitor on metal-1 * Pseudo action: redis-bundle_start_0 * Pseudo action: haproxy-bundle_start_0 * Pseudo action: storage-clone_confirmed-pre_notify_start_0 * Resource action: haproxy-bundle-docker-0 start on metal-1 * Resource action: haproxy-bundle-docker-1 start on metal-2 * Resource action: haproxy-bundle-docker-2 start on metal-3 * Pseudo action: redis-bundle-master_confirmed-pre_notify_start_0 * Pseudo action: redis-bundle-master_start_0 * Resource action: redis-bundle-docker-0 start on metal-1 * Resource action: redis-bundle-0 monitor on metal-3 * Resource action: redis-bundle-0 monitor on metal-2 * Resource action: redis-bundle-0 monitor on metal-1 * Resource action: redis-bundle-docker-1 start on metal-2 * Resource action: redis-bundle-1 monitor on metal-3 * Resource action: redis-bundle-1 monitor on metal-2 * Resource action: redis-bundle-1 monitor on metal-1 * Resource action: redis-bundle-docker-2 start on metal-3 * Resource action: redis-bundle-2 monitor on metal-3 * Resource action: redis-bundle-2 monitor on metal-2 * Resource action: redis-bundle-2 monitor on metal-1 * Pseudo action: haproxy-bundle_running_0 * Resource action: haproxy-bundle-docker-0 monitor=60000 on metal-1 * Resource action: haproxy-bundle-docker-1 monitor=60000 on metal-2 * Resource action: haproxy-bundle-docker-2 monitor=60000 on metal-3 * Resource action: redis-bundle-docker-0 monitor=60000 on metal-1 * Resource action: redis-bundle-0 start on metal-1 * Resource action: redis-bundle-docker-1 monitor=60000 on metal-2 * Resource action: redis-bundle-1 start on metal-2 * Resource action: redis-bundle-docker-2 monitor=60000 on metal-3 * Resource action: redis-bundle-2 start on metal-3 * Resource action: redis:0 start on redis-bundle-0 * Resource action: redis:1 start on redis-bundle-1 * Resource action: redis:2 start on redis-bundle-2 * Pseudo action: redis-bundle-master_running_0 * Resource action: redis-bundle-0 monitor=30000 on metal-1 * Resource action: redis-bundle-1 monitor=30000 on metal-2 * Resource action: redis-bundle-2 monitor=30000 on metal-3 * Pseudo action: redis-bundle-master_post_notify_running_0 * Resource action: redis:0 notify on redis-bundle-0 * Resource action: redis:1 notify on redis-bundle-1 * Resource action: redis:2 notify on redis-bundle-2 * Pseudo action: redis-bundle-master_confirmed-post_notify_running_0 * Pseudo action: redis-bundle_running_0 * Pseudo action: redis-bundle-master_pre_notify_promote_0 * Pseudo action: redis-bundle_promote_0 * Resource action: redis:0 notify on redis-bundle-0 * Resource action: redis:1 notify on redis-bundle-1 * Resource action: redis:2 notify on redis-bundle-2 * Pseudo action: redis-bundle-master_confirmed-pre_notify_promote_0 * Pseudo action: redis-bundle-master_promote_0 * Resource action: redis:0 promote on redis-bundle-0 * Resource action: redis:1 promote on redis-bundle-1 * Resource action: redis:2 promote on redis-bundle-2 * Pseudo action: redis-bundle-master_promoted_0 * Pseudo action: redis-bundle-master_post_notify_promoted_0 * Resource action: redis:0 notify on redis-bundle-0 * Resource action: redis:1 notify on redis-bundle-1 * Resource action: redis:2 notify on redis-bundle-2 * Pseudo action: redis-bundle-master_confirmed-post_notify_promoted_0 * Pseudo action: redis-bundle_promoted_0 * Pseudo action: storage-clone_start_0 * Resource action: redis:0 monitor=20000 on redis-bundle-0 * Resource action: redis:1 monitor=20000 on redis-bundle-1 * Resource action: redis:2 monitor=20000 on redis-bundle-2 * Resource action: storage:0 start on metal-1 * Resource action: storage:1 start on metal-2 * Resource action: storage:2 start on metal-3 * Pseudo action: storage-clone_running_0 * Pseudo action: storage-clone_post_notify_running_0 * Resource action: storage:0 notify on metal-1 * Resource action: storage:1 notify on metal-2 * Resource action: storage:2 notify on metal-3 * Pseudo action: storage-clone_confirmed-post_notify_running_0 * Pseudo action: galera-bundle_start_0 * Resource action: storage:0 monitor=30000 on metal-1 * Resource action: storage:1 monitor=30000 on metal-2 * Resource action: storage:2 monitor=30000 on metal-3 * Pseudo action: galera-bundle-master_start_0 * Resource action: galera-bundle-docker-0 start on metal-1 * Resource action: galera-bundle-0 monitor on metal-3 * Resource action: galera-bundle-0 monitor on metal-2 * Resource action: galera-bundle-0 monitor on metal-1 * Resource action: galera-bundle-docker-1 start on metal-2 * Resource action: galera-bundle-1 monitor on metal-3 * Resource action: galera-bundle-1 monitor on metal-2 * Resource action: galera-bundle-1 monitor on metal-1 * Resource action: galera-bundle-docker-2 start on metal-3 * Resource action: galera-bundle-2 monitor on metal-3 * Resource action: galera-bundle-2 monitor on metal-2 * Resource action: galera-bundle-2 monitor on metal-1 * Resource action: galera-bundle-docker-0 monitor=60000 on metal-1 * Resource action: galera-bundle-0 start on metal-1 * Resource action: galera-bundle-docker-1 monitor=60000 on metal-2 * Resource action: galera-bundle-1 start on metal-2 * Resource action: galera-bundle-docker-2 monitor=60000 on metal-3 * Resource action: galera-bundle-2 start on metal-3 * Resource action: galera:0 start on galera-bundle-0 * Resource action: galera:1 start on galera-bundle-1 * Resource action: galera:2 start on galera-bundle-2 * Pseudo action: galera-bundle-master_running_0 * Resource action: galera-bundle-0 monitor=30000 on metal-1 * Resource action: galera-bundle-1 monitor=30000 on metal-2 * Resource action: galera-bundle-2 monitor=30000 on metal-3 * Pseudo action: galera-bundle_running_0 * Resource action: galera:0 monitor=30000 on galera-bundle-0 * Resource action: galera:0 monitor=20000 on galera-bundle-0 * Resource action: galera:1 monitor=30000 on galera-bundle-1 * Resource action: galera:1 monitor=20000 on galera-bundle-1 * Resource action: galera:2 monitor=30000 on galera-bundle-2 * Resource action: galera:2 monitor=20000 on galera-bundle-2 Revised Cluster Status: * Node List: * Online: [ metal-1 metal-2 metal-3 ] * RemoteOFFLINE: [ rabbitmq-bundle-0 ] * GuestOnline: [ galera-bundle-0@metal-1 galera-bundle-1@metal-2 galera-bundle-2@metal-3 redis-bundle-0@metal-1 redis-bundle-1@metal-2 redis-bundle-2@metal-3 ] * Full List of Resources: * Clone Set: storage-clone [storage]: * Started: [ metal-1 metal-2 metal-3 ] * Stopped: [ rabbitmq-bundle-0 ] * Container bundle set: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]: - * galera-bundle-0 (ocf:heartbeat:galera): Slave metal-1 - * galera-bundle-1 (ocf:heartbeat:galera): Slave metal-2 - * galera-bundle-2 (ocf:heartbeat:galera): Slave metal-3 + * galera-bundle-0 (ocf:heartbeat:galera): Unpromoted metal-1 + * galera-bundle-1 (ocf:heartbeat:galera): Unpromoted metal-2 + * galera-bundle-2 (ocf:heartbeat:galera): Unpromoted metal-3 * Container bundle set: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]: * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started metal-1 * haproxy-bundle-docker-1 (ocf:heartbeat:docker): Started metal-2 * haproxy-bundle-docker-2 (ocf:heartbeat:docker): Started metal-3 * Container bundle set: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]: - * redis-bundle-0 (ocf:heartbeat:redis): Master metal-1 - * redis-bundle-1 (ocf:heartbeat:redis): Master metal-2 - * redis-bundle-2 (ocf:heartbeat:redis): Master metal-3 + * redis-bundle-0 (ocf:heartbeat:redis): Promoted metal-1 + * redis-bundle-1 (ocf:heartbeat:redis): Promoted metal-2 + * redis-bundle-2 (ocf:heartbeat:redis): Promoted metal-3 diff --git a/cts/scheduler/summary/bundle-order-startup-clone.summary b/cts/scheduler/summary/bundle-order-startup-clone.summary index 7db49d7f99..4f6d9165d6 100644 --- a/cts/scheduler/summary/bundle-order-startup-clone.summary +++ b/cts/scheduler/summary/bundle-order-startup-clone.summary @@ -1,79 +1,79 @@ Current cluster status: * Node List: * Online: [ metal-1 metal-2 metal-3 ] * RemoteOFFLINE: [ rabbitmq-bundle-0 ] * Full List of Resources: * Clone Set: storage-clone [storage]: * Stopped: [ metal-1 metal-2 metal-3 rabbitmq-bundle-0 ] * Container bundle: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]: * galera-bundle-0 (ocf:heartbeat:galera): Stopped * Container bundle: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]: * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Stopped * Container bundle: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]: * redis-bundle-0 (ocf:heartbeat:redis): Stopped Transition Summary: * Start storage:0 ( metal-1 ) due to unrunnable redis-bundle promoted (blocked) * Start storage:1 ( metal-2 ) due to unrunnable redis-bundle promoted (blocked) * Start storage:2 ( metal-3 ) due to unrunnable redis-bundle promoted (blocked) * Start galera-bundle-docker-0 ( metal-1 ) due to unrunnable storage-clone notified (blocked) * Start galera-bundle-0 ( metal-1 ) due to unrunnable galera-bundle-docker-0 start (blocked) * Start galera:0 ( galera-bundle-0 ) due to unrunnable galera-bundle-docker-0 start (blocked) * Start haproxy-bundle-docker-0 ( metal-2 ) * Start redis-bundle-docker-0 ( metal-2 ) * Start redis-bundle-0 ( metal-2 ) * Start redis:0 ( redis-bundle-0 ) Executing Cluster Transition: * Resource action: storage:0 monitor on metal-1 * Resource action: storage:1 monitor on metal-2 * Resource action: storage:2 monitor on metal-3 * Resource action: galera-bundle-docker-0 monitor on metal-3 * Resource action: galera-bundle-docker-0 monitor on metal-2 * Resource action: galera-bundle-docker-0 monitor on metal-1 * Resource action: haproxy-bundle-docker-0 monitor on metal-3 * Resource action: haproxy-bundle-docker-0 monitor on metal-2 * Resource action: haproxy-bundle-docker-0 monitor on metal-1 * Pseudo action: redis-bundle-master_pre_notify_start_0 * Resource action: redis-bundle-docker-0 monitor on metal-3 * Resource action: redis-bundle-docker-0 monitor on metal-2 * Resource action: redis-bundle-docker-0 monitor on metal-1 * Pseudo action: redis-bundle_start_0 * Pseudo action: haproxy-bundle_start_0 * Resource action: haproxy-bundle-docker-0 start on metal-2 * Pseudo action: redis-bundle-master_confirmed-pre_notify_start_0 * Pseudo action: redis-bundle-master_start_0 * Resource action: redis-bundle-docker-0 start on metal-2 * Resource action: redis-bundle-0 monitor on metal-3 * Resource action: redis-bundle-0 monitor on metal-2 * Resource action: redis-bundle-0 monitor on metal-1 * Pseudo action: haproxy-bundle_running_0 * Resource action: haproxy-bundle-docker-0 monitor=60000 on metal-2 * Resource action: redis-bundle-docker-0 monitor=60000 on metal-2 * Resource action: redis-bundle-0 start on metal-2 * Resource action: redis:0 start on redis-bundle-0 * Pseudo action: redis-bundle-master_running_0 * Resource action: redis-bundle-0 monitor=30000 on metal-2 * Pseudo action: redis-bundle-master_post_notify_running_0 * Resource action: redis:0 notify on redis-bundle-0 * Pseudo action: redis-bundle-master_confirmed-post_notify_running_0 * Pseudo action: redis-bundle_running_0 * Resource action: redis:0 monitor=60000 on redis-bundle-0 * Resource action: redis:0 monitor=45000 on redis-bundle-0 Revised Cluster Status: * Node List: * Online: [ metal-1 metal-2 metal-3 ] * RemoteOFFLINE: [ rabbitmq-bundle-0 ] * GuestOnline: [ redis-bundle-0@metal-2 ] * Full List of Resources: * Clone Set: storage-clone [storage]: * Stopped: [ metal-1 metal-2 metal-3 rabbitmq-bundle-0 ] * Container bundle: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]: * galera-bundle-0 (ocf:heartbeat:galera): Stopped * Container bundle: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]: * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started metal-2 * Container bundle: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]: - * redis-bundle-0 (ocf:heartbeat:redis): Slave metal-2 + * redis-bundle-0 (ocf:heartbeat:redis): Unpromoted metal-2 diff --git a/cts/scheduler/summary/bundle-order-startup.summary b/cts/scheduler/summary/bundle-order-startup.summary index 1901aea89f..3144e83a7b 100644 --- a/cts/scheduler/summary/bundle-order-startup.summary +++ b/cts/scheduler/summary/bundle-order-startup.summary @@ -1,141 +1,141 @@ Current cluster status: * Node List: * Online: [ undercloud ] * Full List of Resources: * Container bundle: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest]: * rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Stopped * Container bundle: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]: * galera-bundle-0 (ocf:heartbeat:galera): Stopped * Container bundle: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]: * redis-bundle-0 (ocf:heartbeat:redis): Stopped * ip-192.168.122.254 (ocf:heartbeat:IPaddr2): Stopped * ip-192.168.122.250 (ocf:heartbeat:IPaddr2): Stopped * ip-192.168.122.249 (ocf:heartbeat:IPaddr2): Stopped * ip-192.168.122.253 (ocf:heartbeat:IPaddr2): Stopped * ip-192.168.122.247 (ocf:heartbeat:IPaddr2): Stopped * ip-192.168.122.248 (ocf:heartbeat:IPaddr2): Stopped * Container bundle: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]: * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Stopped * Container bundle: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest]: * openstack-cinder-volume-docker-0 (ocf:heartbeat:docker): Stopped Transition Summary: * Start rabbitmq-bundle-docker-0 ( undercloud ) * Start rabbitmq-bundle-0 ( undercloud ) * Start rabbitmq:0 ( rabbitmq-bundle-0 ) * Start galera-bundle-docker-0 ( undercloud ) * Start galera-bundle-0 ( undercloud ) * Start galera:0 ( galera-bundle-0 ) * Start redis-bundle-docker-0 ( undercloud ) * Start redis-bundle-0 ( undercloud ) * Start redis:0 ( redis-bundle-0 ) * Start ip-192.168.122.254 ( undercloud ) * Start ip-192.168.122.250 ( undercloud ) * Start ip-192.168.122.249 ( undercloud ) * Start ip-192.168.122.253 ( undercloud ) * Start ip-192.168.122.247 ( undercloud ) * Start ip-192.168.122.248 ( undercloud ) * Start haproxy-bundle-docker-0 ( undercloud ) * Start openstack-cinder-volume-docker-0 ( undercloud ) Executing Cluster Transition: * Pseudo action: rabbitmq-bundle-clone_pre_notify_start_0 * Resource action: rabbitmq-bundle-docker-0 monitor on undercloud * Resource action: galera-bundle-docker-0 monitor on undercloud * Pseudo action: redis-bundle-master_pre_notify_start_0 * Resource action: redis-bundle-docker-0 monitor on undercloud * Resource action: ip-192.168.122.254 monitor on undercloud * Resource action: ip-192.168.122.250 monitor on undercloud * Resource action: ip-192.168.122.249 monitor on undercloud * Resource action: ip-192.168.122.253 monitor on undercloud * Resource action: ip-192.168.122.247 monitor on undercloud * Resource action: ip-192.168.122.248 monitor on undercloud * Resource action: haproxy-bundle-docker-0 monitor on undercloud * Resource action: openstack-cinder-volume-docker-0 monitor on undercloud * Pseudo action: openstack-cinder-volume_start_0 * Pseudo action: rabbitmq-bundle_start_0 * Pseudo action: rabbitmq-bundle-clone_confirmed-pre_notify_start_0 * Pseudo action: rabbitmq-bundle-clone_start_0 * Resource action: rabbitmq-bundle-docker-0 start on undercloud * Resource action: rabbitmq-bundle-0 monitor on undercloud * Pseudo action: redis-bundle-master_confirmed-pre_notify_start_0 * Resource action: ip-192.168.122.254 start on undercloud * Resource action: ip-192.168.122.250 start on undercloud * Resource action: ip-192.168.122.249 start on undercloud * Resource action: ip-192.168.122.253 start on undercloud * Resource action: ip-192.168.122.247 start on undercloud * Resource action: ip-192.168.122.248 start on undercloud * Resource action: openstack-cinder-volume-docker-0 start on undercloud * Pseudo action: openstack-cinder-volume_running_0 * Pseudo action: haproxy-bundle_start_0 * Resource action: rabbitmq-bundle-docker-0 monitor=60000 on undercloud * Resource action: rabbitmq-bundle-0 start on undercloud * Resource action: ip-192.168.122.254 monitor=10000 on undercloud * Resource action: ip-192.168.122.250 monitor=10000 on undercloud * Resource action: ip-192.168.122.249 monitor=10000 on undercloud * Resource action: ip-192.168.122.253 monitor=10000 on undercloud * Resource action: ip-192.168.122.247 monitor=10000 on undercloud * Resource action: ip-192.168.122.248 monitor=10000 on undercloud * Resource action: haproxy-bundle-docker-0 start on undercloud * Resource action: openstack-cinder-volume-docker-0 monitor=60000 on undercloud * Pseudo action: haproxy-bundle_running_0 * Pseudo action: redis-bundle_start_0 * Resource action: rabbitmq:0 start on rabbitmq-bundle-0 * Pseudo action: rabbitmq-bundle-clone_running_0 * Resource action: rabbitmq-bundle-0 monitor=30000 on undercloud * Pseudo action: redis-bundle-master_start_0 * Resource action: redis-bundle-docker-0 start on undercloud * Resource action: redis-bundle-0 monitor on undercloud * Resource action: haproxy-bundle-docker-0 monitor=60000 on undercloud * Pseudo action: rabbitmq-bundle-clone_post_notify_running_0 * Resource action: redis-bundle-docker-0 monitor=60000 on undercloud * Resource action: redis-bundle-0 start on undercloud * Resource action: rabbitmq:0 notify on rabbitmq-bundle-0 * Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_running_0 * Resource action: redis:0 start on redis-bundle-0 * Pseudo action: redis-bundle-master_running_0 * Resource action: redis-bundle-0 monitor=30000 on undercloud * Pseudo action: rabbitmq-bundle_running_0 * Resource action: rabbitmq:0 monitor=10000 on rabbitmq-bundle-0 * Pseudo action: redis-bundle-master_post_notify_running_0 * Resource action: redis:0 notify on redis-bundle-0 * Pseudo action: redis-bundle-master_confirmed-post_notify_running_0 * Pseudo action: redis-bundle_running_0 * Pseudo action: galera-bundle_start_0 * Pseudo action: galera-bundle-master_start_0 * Resource action: galera-bundle-docker-0 start on undercloud * Resource action: galera-bundle-0 monitor on undercloud * Resource action: redis:0 monitor=60000 on redis-bundle-0 * Resource action: redis:0 monitor=45000 on redis-bundle-0 * Resource action: galera-bundle-docker-0 monitor=60000 on undercloud * Resource action: galera-bundle-0 start on undercloud * Resource action: galera:0 start on galera-bundle-0 * Pseudo action: galera-bundle-master_running_0 * Resource action: galera-bundle-0 monitor=30000 on undercloud * Pseudo action: galera-bundle_running_0 * Resource action: galera:0 monitor=30000 on galera-bundle-0 * Resource action: galera:0 monitor=20000 on galera-bundle-0 Revised Cluster Status: * Node List: * Online: [ undercloud ] * GuestOnline: [ galera-bundle-0@undercloud rabbitmq-bundle-0@undercloud redis-bundle-0@undercloud ] * Full List of Resources: * Container bundle: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest]: * rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started undercloud * Container bundle: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]: - * galera-bundle-0 (ocf:heartbeat:galera): Slave undercloud + * galera-bundle-0 (ocf:heartbeat:galera): Unpromoted undercloud * Container bundle: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]: - * redis-bundle-0 (ocf:heartbeat:redis): Slave undercloud + * redis-bundle-0 (ocf:heartbeat:redis): Unpromoted undercloud * ip-192.168.122.254 (ocf:heartbeat:IPaddr2): Started undercloud * ip-192.168.122.250 (ocf:heartbeat:IPaddr2): Started undercloud * ip-192.168.122.249 (ocf:heartbeat:IPaddr2): Started undercloud * ip-192.168.122.253 (ocf:heartbeat:IPaddr2): Started undercloud * ip-192.168.122.247 (ocf:heartbeat:IPaddr2): Started undercloud * ip-192.168.122.248 (ocf:heartbeat:IPaddr2): Started undercloud * Container bundle: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]: * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started undercloud * Container bundle: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest]: * openstack-cinder-volume-docker-0 (ocf:heartbeat:docker): Started undercloud diff --git a/cts/scheduler/summary/bundle-order-stop-clone.summary b/cts/scheduler/summary/bundle-order-stop-clone.summary index 2d816657cc..db3b9344b2 100644 --- a/cts/scheduler/summary/bundle-order-stop-clone.summary +++ b/cts/scheduler/summary/bundle-order-stop-clone.summary @@ -1,88 +1,88 @@ Current cluster status: * Node List: * Online: [ metal-1 metal-2 metal-3 ] * RemoteOFFLINE: [ rabbitmq-bundle-0 ] * GuestOnline: [ galera-bundle-0@metal-1 galera-bundle-1@metal-2 galera-bundle-2@metal-3 redis-bundle-0@metal-1 redis-bundle-1@metal-2 redis-bundle-2@metal-3 ] * Full List of Resources: * Clone Set: storage-clone [storage]: * Started: [ metal-1 metal-2 metal-3 ] * Stopped: [ rabbitmq-bundle-0 ] * Container bundle set: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]: - * galera-bundle-0 (ocf:heartbeat:galera): Slave metal-1 - * galera-bundle-1 (ocf:heartbeat:galera): Slave metal-2 - * galera-bundle-2 (ocf:heartbeat:galera): Slave metal-3 + * galera-bundle-0 (ocf:heartbeat:galera): Unpromoted metal-1 + * galera-bundle-1 (ocf:heartbeat:galera): Unpromoted metal-2 + * galera-bundle-2 (ocf:heartbeat:galera): Unpromoted metal-3 * Container bundle set: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]: * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started metal-1 * haproxy-bundle-docker-1 (ocf:heartbeat:docker): Started metal-2 * haproxy-bundle-docker-2 (ocf:heartbeat:docker): Started metal-3 * Container bundle set: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]: - * redis-bundle-0 (ocf:heartbeat:redis): Master metal-1 - * redis-bundle-1 (ocf:heartbeat:redis): Master metal-2 - * redis-bundle-2 (ocf:heartbeat:redis): Master metal-3 + * redis-bundle-0 (ocf:heartbeat:redis): Promoted metal-1 + * redis-bundle-1 (ocf:heartbeat:redis): Promoted metal-2 + * redis-bundle-2 (ocf:heartbeat:redis): Promoted metal-3 Transition Summary: * Stop storage:0 ( metal-1 ) due to node availability * Stop galera-bundle-docker-0 ( metal-1 ) due to node availability * Stop galera-bundle-0 ( metal-1 ) due to unrunnable galera-bundle-docker-0 start - * Stop galera:0 ( Slave galera-bundle-0 ) due to unrunnable galera-bundle-docker-0 start + * Stop galera:0 ( Unpromoted galera-bundle-0 ) due to unrunnable galera-bundle-docker-0 start Executing Cluster Transition: * Pseudo action: storage-clone_pre_notify_stop_0 * Resource action: galera-bundle-0 monitor on metal-3 * Resource action: galera-bundle-0 monitor on metal-2 * Resource action: galera-bundle-1 monitor on metal-3 * Resource action: galera-bundle-1 monitor on metal-1 * Resource action: galera-bundle-2 monitor on metal-2 * Resource action: galera-bundle-2 monitor on metal-1 * Resource action: redis-bundle-0 monitor on metal-3 * Resource action: redis-bundle-0 monitor on metal-2 * Resource action: redis-bundle-1 monitor on metal-3 * Resource action: redis-bundle-1 monitor on metal-1 * Resource action: redis-bundle-2 monitor on metal-2 * Resource action: redis-bundle-2 monitor on metal-1 * Pseudo action: galera-bundle_stop_0 * Resource action: storage:0 notify on metal-1 * Resource action: storage:1 notify on metal-2 * Resource action: storage:2 notify on metal-3 * Pseudo action: storage-clone_confirmed-pre_notify_stop_0 * Pseudo action: galera-bundle-master_stop_0 * Resource action: galera:0 stop on galera-bundle-0 * Pseudo action: galera-bundle-master_stopped_0 * Resource action: galera-bundle-0 stop on metal-1 * Resource action: galera-bundle-docker-0 stop on metal-1 * Pseudo action: galera-bundle_stopped_0 * Pseudo action: galera-bundle_start_0 * Pseudo action: storage-clone_stop_0 * Pseudo action: galera-bundle-master_start_0 * Resource action: storage:0 stop on metal-1 * Pseudo action: storage-clone_stopped_0 * Pseudo action: galera-bundle-master_running_0 * Pseudo action: galera-bundle_running_0 * Pseudo action: storage-clone_post_notify_stopped_0 * Resource action: storage:1 notify on metal-2 * Resource action: storage:2 notify on metal-3 * Pseudo action: storage-clone_confirmed-post_notify_stopped_0 Revised Cluster Status: * Node List: * Online: [ metal-1 metal-2 metal-3 ] * RemoteOFFLINE: [ rabbitmq-bundle-0 ] * GuestOnline: [ galera-bundle-1@metal-2 galera-bundle-2@metal-3 redis-bundle-0@metal-1 redis-bundle-1@metal-2 redis-bundle-2@metal-3 ] * Full List of Resources: * Clone Set: storage-clone [storage]: * Started: [ metal-2 metal-3 ] * Stopped: [ metal-1 rabbitmq-bundle-0 ] * Container bundle set: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]: * galera-bundle-0 (ocf:heartbeat:galera): Stopped - * galera-bundle-1 (ocf:heartbeat:galera): Slave metal-2 - * galera-bundle-2 (ocf:heartbeat:galera): Slave metal-3 + * galera-bundle-1 (ocf:heartbeat:galera): Unpromoted metal-2 + * galera-bundle-2 (ocf:heartbeat:galera): Unpromoted metal-3 * Container bundle set: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]: * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started metal-1 * haproxy-bundle-docker-1 (ocf:heartbeat:docker): Started metal-2 * haproxy-bundle-docker-2 (ocf:heartbeat:docker): Started metal-3 * Container bundle set: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]: - * redis-bundle-0 (ocf:heartbeat:redis): Master metal-1 - * redis-bundle-1 (ocf:heartbeat:redis): Master metal-2 - * redis-bundle-2 (ocf:heartbeat:redis): Master metal-3 + * redis-bundle-0 (ocf:heartbeat:redis): Promoted metal-1 + * redis-bundle-1 (ocf:heartbeat:redis): Promoted metal-2 + * redis-bundle-2 (ocf:heartbeat:redis): Promoted metal-3 diff --git a/cts/scheduler/summary/bundle-order-stop-on-remote.summary b/cts/scheduler/summary/bundle-order-stop-on-remote.summary index f8e1e46fa8..bf94ce3c72 100644 --- a/cts/scheduler/summary/bundle-order-stop-on-remote.summary +++ b/cts/scheduler/summary/bundle-order-stop-on-remote.summary @@ -1,224 +1,224 @@ Current cluster status: * Node List: * RemoteNode database-0: UNCLEAN (offline) * RemoteNode database-2: UNCLEAN (offline) * Online: [ controller-0 controller-1 controller-2 ] * RemoteOnline: [ database-1 messaging-0 messaging-1 messaging-2 ] * GuestOnline: [ galera-bundle-1@controller-2 rabbitmq-bundle-0@controller-2 rabbitmq-bundle-1@controller-2 rabbitmq-bundle-2@controller-2 redis-bundle-0@controller-0 redis-bundle-2@controller-2 ] * Full List of Resources: * database-0 (ocf:pacemaker:remote): Stopped * database-1 (ocf:pacemaker:remote): Started controller-2 * database-2 (ocf:pacemaker:remote): Stopped * messaging-0 (ocf:pacemaker:remote): Started controller-2 * messaging-1 (ocf:pacemaker:remote): Started controller-2 * messaging-2 (ocf:pacemaker:remote): Started controller-2 * Container bundle set: rabbitmq-bundle [192.168.24.1:8787/rhosp12/openstack-rabbitmq-docker:pcmklatest]: * rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started messaging-0 * rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Started messaging-1 * rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Started messaging-2 * Container bundle set: galera-bundle [192.168.24.1:8787/rhosp12/openstack-mariadb-docker:pcmklatest]: - * galera-bundle-0 (ocf:heartbeat:galera): FAILED Master database-0 (UNCLEAN) - * galera-bundle-1 (ocf:heartbeat:galera): Master database-1 - * galera-bundle-2 (ocf:heartbeat:galera): FAILED Master database-2 (UNCLEAN) + * galera-bundle-0 (ocf:heartbeat:galera): FAILED Promoted database-0 (UNCLEAN) + * galera-bundle-1 (ocf:heartbeat:galera): Promoted database-1 + * galera-bundle-2 (ocf:heartbeat:galera): FAILED Promoted database-2 (UNCLEAN) * Container bundle set: redis-bundle [192.168.24.1:8787/rhosp12/openstack-redis-docker:pcmklatest]: - * redis-bundle-0 (ocf:heartbeat:redis): Slave controller-0 + * redis-bundle-0 (ocf:heartbeat:redis): Unpromoted controller-0 * redis-bundle-1 (ocf:heartbeat:redis): Stopped - * redis-bundle-2 (ocf:heartbeat:redis): Slave controller-2 + * redis-bundle-2 (ocf:heartbeat:redis): Unpromoted controller-2 * ip-192.168.24.11 (ocf:heartbeat:IPaddr2): Stopped * ip-10.0.0.104 (ocf:heartbeat:IPaddr2): Stopped * ip-172.17.1.19 (ocf:heartbeat:IPaddr2): Started controller-2 * ip-172.17.1.11 (ocf:heartbeat:IPaddr2): Stopped * ip-172.17.3.13 (ocf:heartbeat:IPaddr2): Stopped * ip-172.17.4.19 (ocf:heartbeat:IPaddr2): Started controller-2 * Container bundle set: haproxy-bundle [192.168.24.1:8787/rhosp12/openstack-haproxy-docker:pcmklatest]: * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started controller-0 * haproxy-bundle-docker-1 (ocf:heartbeat:docker): Stopped * haproxy-bundle-docker-2 (ocf:heartbeat:docker): Started controller-2 * openstack-cinder-volume (systemd:openstack-cinder-volume): Stopped * stonith-fence_ipmilan-525400244e09 (stonith:fence_ipmilan): Started controller-2 * stonith-fence_ipmilan-525400cdec10 (stonith:fence_ipmilan): Started controller-2 * stonith-fence_ipmilan-525400c709f7 (stonith:fence_ipmilan): Stopped * stonith-fence_ipmilan-525400a7f9e0 (stonith:fence_ipmilan): Started controller-0 * stonith-fence_ipmilan-525400a25787 (stonith:fence_ipmilan): Started controller-2 * stonith-fence_ipmilan-5254005ea387 (stonith:fence_ipmilan): Stopped * stonith-fence_ipmilan-525400542c06 (stonith:fence_ipmilan): Stopped * stonith-fence_ipmilan-525400aac413 (stonith:fence_ipmilan): Started controller-2 * stonith-fence_ipmilan-525400498d34 (stonith:fence_ipmilan): Stopped Transition Summary: * Fence (reboot) galera-bundle-2 (resource: galera-bundle-docker-2) 'guest is unclean' * Fence (reboot) galera-bundle-0 (resource: galera-bundle-docker-0) 'guest is unclean' * Start database-0 ( controller-0 ) * Start database-2 ( controller-1 ) * Recover galera-bundle-docker-0 ( database-0 ) * Start galera-bundle-0 ( controller-0 ) - * Recover galera:0 ( Master galera-bundle-0 ) + * Recover galera:0 ( Promoted galera-bundle-0 ) * Recover galera-bundle-docker-2 ( database-2 ) * Start galera-bundle-2 ( controller-1 ) - * Recover galera:2 ( Master galera-bundle-2 ) - * Promote redis:0 ( Slave -> Master redis-bundle-0 ) + * Recover galera:2 ( Promoted galera-bundle-2 ) + * Promote redis:0 ( Unpromoted -> Promoted redis-bundle-0 ) * Start redis-bundle-docker-1 ( controller-1 ) * Start redis-bundle-1 ( controller-1 ) * Start redis:1 ( redis-bundle-1 ) * Start ip-192.168.24.11 ( controller-0 ) * Start ip-10.0.0.104 ( controller-1 ) * Start ip-172.17.1.11 ( controller-0 ) * Start ip-172.17.3.13 ( controller-1 ) * Start haproxy-bundle-docker-1 ( controller-1 ) * Start openstack-cinder-volume ( controller-0 ) * Start stonith-fence_ipmilan-525400c709f7 ( controller-1 ) * Start stonith-fence_ipmilan-5254005ea387 ( controller-1 ) * Start stonith-fence_ipmilan-525400542c06 ( controller-0 ) * Start stonith-fence_ipmilan-525400498d34 ( controller-1 ) Executing Cluster Transition: * Resource action: database-0 start on controller-0 * Resource action: database-2 start on controller-1 * Pseudo action: redis-bundle-master_pre_notify_start_0 * Resource action: stonith-fence_ipmilan-525400c709f7 start on controller-1 * Resource action: stonith-fence_ipmilan-5254005ea387 start on controller-1 * Resource action: stonith-fence_ipmilan-525400542c06 start on controller-0 * Resource action: stonith-fence_ipmilan-525400498d34 start on controller-1 * Pseudo action: redis-bundle_start_0 * Pseudo action: galera-bundle_demote_0 * Resource action: database-0 monitor=20000 on controller-0 * Resource action: database-2 monitor=20000 on controller-1 * Pseudo action: galera-bundle-master_demote_0 * Resource action: redis notify on redis-bundle-0 * Resource action: redis notify on redis-bundle-2 * Pseudo action: redis-bundle-master_confirmed-pre_notify_start_0 * Pseudo action: redis-bundle-master_start_0 * Resource action: stonith-fence_ipmilan-525400c709f7 monitor=60000 on controller-1 * Resource action: stonith-fence_ipmilan-5254005ea387 monitor=60000 on controller-1 * Resource action: stonith-fence_ipmilan-525400542c06 monitor=60000 on controller-0 * Resource action: stonith-fence_ipmilan-525400498d34 monitor=60000 on controller-1 * Pseudo action: galera_demote_0 * Pseudo action: galera_demote_0 * Pseudo action: galera-bundle-master_demoted_0 * Pseudo action: galera-bundle_demoted_0 * Pseudo action: galera-bundle_stop_0 * Resource action: galera-bundle-docker-0 stop on database-0 * Resource action: galera-bundle-docker-2 stop on database-2 * Pseudo action: stonith-galera-bundle-2-reboot on galera-bundle-2 * Pseudo action: stonith-galera-bundle-0-reboot on galera-bundle-0 * Pseudo action: galera-bundle-master_stop_0 * Resource action: redis-bundle-docker-1 start on controller-1 * Resource action: redis-bundle-1 monitor on controller-1 * Resource action: ip-192.168.24.11 start on controller-0 * Resource action: ip-10.0.0.104 start on controller-1 * Resource action: ip-172.17.1.11 start on controller-0 * Resource action: ip-172.17.3.13 start on controller-1 * Resource action: openstack-cinder-volume start on controller-0 * Pseudo action: haproxy-bundle_start_0 * Pseudo action: galera_stop_0 * Resource action: redis-bundle-docker-1 monitor=60000 on controller-1 * Resource action: redis-bundle-1 start on controller-1 * Resource action: ip-192.168.24.11 monitor=10000 on controller-0 * Resource action: ip-10.0.0.104 monitor=10000 on controller-1 * Resource action: ip-172.17.1.11 monitor=10000 on controller-0 * Resource action: ip-172.17.3.13 monitor=10000 on controller-1 * Resource action: haproxy-bundle-docker-1 start on controller-1 * Resource action: openstack-cinder-volume monitor=60000 on controller-0 * Pseudo action: haproxy-bundle_running_0 * Pseudo action: galera_stop_0 * Pseudo action: galera-bundle-master_stopped_0 * Resource action: redis start on redis-bundle-1 * Pseudo action: redis-bundle-master_running_0 * Resource action: redis-bundle-1 monitor=30000 on controller-1 * Resource action: haproxy-bundle-docker-1 monitor=60000 on controller-1 * Pseudo action: galera-bundle_stopped_0 * Pseudo action: galera-bundle_start_0 * Pseudo action: galera-bundle-master_start_0 * Resource action: galera-bundle-docker-0 start on database-0 * Resource action: galera-bundle-0 monitor on controller-1 * Resource action: galera-bundle-docker-2 start on database-2 * Resource action: galera-bundle-2 monitor on controller-1 * Pseudo action: redis-bundle-master_post_notify_running_0 * Resource action: galera-bundle-docker-0 monitor=60000 on database-0 * Resource action: galera-bundle-0 start on controller-0 * Resource action: galera-bundle-docker-2 monitor=60000 on database-2 * Resource action: galera-bundle-2 start on controller-1 * Resource action: redis notify on redis-bundle-0 * Resource action: redis notify on redis-bundle-1 * Resource action: redis notify on redis-bundle-2 * Pseudo action: redis-bundle-master_confirmed-post_notify_running_0 * Pseudo action: redis-bundle_running_0 * Resource action: galera start on galera-bundle-0 * Resource action: galera start on galera-bundle-2 * Pseudo action: galera-bundle-master_running_0 * Resource action: galera-bundle-0 monitor=30000 on controller-0 * Resource action: galera-bundle-2 monitor=30000 on controller-1 * Pseudo action: redis-bundle-master_pre_notify_promote_0 * Pseudo action: redis-bundle_promote_0 * Pseudo action: galera-bundle_running_0 * Resource action: redis notify on redis-bundle-0 * Resource action: redis notify on redis-bundle-1 * Resource action: redis notify on redis-bundle-2 * Pseudo action: redis-bundle-master_confirmed-pre_notify_promote_0 * Pseudo action: redis-bundle-master_promote_0 * Pseudo action: galera-bundle_promote_0 * Pseudo action: galera-bundle-master_promote_0 * Resource action: redis promote on redis-bundle-0 * Pseudo action: redis-bundle-master_promoted_0 * Resource action: galera promote on galera-bundle-0 * Resource action: galera promote on galera-bundle-2 * Pseudo action: galera-bundle-master_promoted_0 * Pseudo action: redis-bundle-master_post_notify_promoted_0 * Pseudo action: galera-bundle_promoted_0 * Resource action: galera monitor=10000 on galera-bundle-0 * Resource action: galera monitor=10000 on galera-bundle-2 * Resource action: redis notify on redis-bundle-0 * Resource action: redis notify on redis-bundle-1 * Resource action: redis notify on redis-bundle-2 * Pseudo action: redis-bundle-master_confirmed-post_notify_promoted_0 * Pseudo action: redis-bundle_promoted_0 * Resource action: redis monitor=20000 on redis-bundle-0 * Resource action: redis monitor=60000 on redis-bundle-1 * Resource action: redis monitor=45000 on redis-bundle-1 Revised Cluster Status: * Node List: * Online: [ controller-0 controller-1 controller-2 ] * RemoteOnline: [ database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ] * GuestOnline: [ galera-bundle-0@controller-0 galera-bundle-1@controller-2 galera-bundle-2@controller-1 rabbitmq-bundle-0@controller-2 rabbitmq-bundle-1@controller-2 rabbitmq-bundle-2@controller-2 redis-bundle-0@controller-0 redis-bundle-1@controller-1 redis-bundle-2@controller-2 ] * Full List of Resources: * database-0 (ocf:pacemaker:remote): Started controller-0 * database-1 (ocf:pacemaker:remote): Started controller-2 * database-2 (ocf:pacemaker:remote): Started controller-1 * messaging-0 (ocf:pacemaker:remote): Started controller-2 * messaging-1 (ocf:pacemaker:remote): Started controller-2 * messaging-2 (ocf:pacemaker:remote): Started controller-2 * Container bundle set: rabbitmq-bundle [192.168.24.1:8787/rhosp12/openstack-rabbitmq-docker:pcmklatest]: * rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started messaging-0 * rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Started messaging-1 * rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Started messaging-2 * Container bundle set: galera-bundle [192.168.24.1:8787/rhosp12/openstack-mariadb-docker:pcmklatest]: - * galera-bundle-0 (ocf:heartbeat:galera): Master database-0 - * galera-bundle-1 (ocf:heartbeat:galera): Master database-1 - * galera-bundle-2 (ocf:heartbeat:galera): Master database-2 + * galera-bundle-0 (ocf:heartbeat:galera): Promoted database-0 + * galera-bundle-1 (ocf:heartbeat:galera): Promoted database-1 + * galera-bundle-2 (ocf:heartbeat:galera): Promoted database-2 * Container bundle set: redis-bundle [192.168.24.1:8787/rhosp12/openstack-redis-docker:pcmklatest]: - * redis-bundle-0 (ocf:heartbeat:redis): Master controller-0 - * redis-bundle-1 (ocf:heartbeat:redis): Slave controller-1 - * redis-bundle-2 (ocf:heartbeat:redis): Slave controller-2 + * redis-bundle-0 (ocf:heartbeat:redis): Promoted controller-0 + * redis-bundle-1 (ocf:heartbeat:redis): Unpromoted controller-1 + * redis-bundle-2 (ocf:heartbeat:redis): Unpromoted controller-2 * ip-192.168.24.11 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-10.0.0.104 (ocf:heartbeat:IPaddr2): Started controller-1 * ip-172.17.1.19 (ocf:heartbeat:IPaddr2): Started controller-2 * ip-172.17.1.11 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-172.17.3.13 (ocf:heartbeat:IPaddr2): Started controller-1 * ip-172.17.4.19 (ocf:heartbeat:IPaddr2): Started controller-2 * Container bundle set: haproxy-bundle [192.168.24.1:8787/rhosp12/openstack-haproxy-docker:pcmklatest]: * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started controller-0 * haproxy-bundle-docker-1 (ocf:heartbeat:docker): Started controller-1 * haproxy-bundle-docker-2 (ocf:heartbeat:docker): Started controller-2 * openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 * stonith-fence_ipmilan-525400244e09 (stonith:fence_ipmilan): Started controller-2 * stonith-fence_ipmilan-525400cdec10 (stonith:fence_ipmilan): Started controller-2 * stonith-fence_ipmilan-525400c709f7 (stonith:fence_ipmilan): Started controller-1 * stonith-fence_ipmilan-525400a7f9e0 (stonith:fence_ipmilan): Started controller-0 * stonith-fence_ipmilan-525400a25787 (stonith:fence_ipmilan): Started controller-2 * stonith-fence_ipmilan-5254005ea387 (stonith:fence_ipmilan): Started controller-1 * stonith-fence_ipmilan-525400542c06 (stonith:fence_ipmilan): Started controller-0 * stonith-fence_ipmilan-525400aac413 (stonith:fence_ipmilan): Started controller-2 * stonith-fence_ipmilan-525400498d34 (stonith:fence_ipmilan): Started controller-1 diff --git a/cts/scheduler/summary/bundle-order-stop.summary b/cts/scheduler/summary/bundle-order-stop.summary index bbb9df15b9..0954c59992 100644 --- a/cts/scheduler/summary/bundle-order-stop.summary +++ b/cts/scheduler/summary/bundle-order-stop.summary @@ -1,127 +1,127 @@ Current cluster status: * Node List: * Online: [ undercloud ] * GuestOnline: [ galera-bundle-0@undercloud rabbitmq-bundle-0@undercloud redis-bundle-0@undercloud ] * Full List of Resources: * Container bundle: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest]: * rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started undercloud * Container bundle: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]: - * galera-bundle-0 (ocf:heartbeat:galera): Master undercloud + * galera-bundle-0 (ocf:heartbeat:galera): Promoted undercloud * Container bundle: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]: - * redis-bundle-0 (ocf:heartbeat:redis): Master undercloud + * redis-bundle-0 (ocf:heartbeat:redis): Promoted undercloud * ip-192.168.122.254 (ocf:heartbeat:IPaddr2): Started undercloud * ip-192.168.122.250 (ocf:heartbeat:IPaddr2): Started undercloud * ip-192.168.122.249 (ocf:heartbeat:IPaddr2): Started undercloud * ip-192.168.122.253 (ocf:heartbeat:IPaddr2): Started undercloud * ip-192.168.122.247 (ocf:heartbeat:IPaddr2): Started undercloud * ip-192.168.122.248 (ocf:heartbeat:IPaddr2): Started undercloud * Container bundle: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]: * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started undercloud * Container bundle: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest]: * openstack-cinder-volume-docker-0 (ocf:heartbeat:docker): Started undercloud Transition Summary: * Stop rabbitmq-bundle-docker-0 ( undercloud ) due to node availability * Stop rabbitmq-bundle-0 ( undercloud ) due to node availability * Stop rabbitmq:0 ( rabbitmq-bundle-0 ) due to unrunnable rabbitmq-bundle-0 start * Stop galera-bundle-docker-0 ( undercloud ) due to node availability * Stop galera-bundle-0 ( undercloud ) due to node availability - * Stop galera:0 ( Master galera-bundle-0 ) due to unrunnable galera-bundle-0 start + * Stop galera:0 ( Promoted galera-bundle-0 ) due to unrunnable galera-bundle-0 start * Stop redis-bundle-docker-0 ( undercloud ) due to node availability * Stop redis-bundle-0 ( undercloud ) due to node availability - * Stop redis:0 ( Master redis-bundle-0 ) due to unrunnable redis-bundle-0 start + * Stop redis:0 ( Promoted redis-bundle-0 ) due to unrunnable redis-bundle-0 start * Stop ip-192.168.122.254 ( undercloud ) due to node availability * Stop ip-192.168.122.250 ( undercloud ) due to node availability * Stop ip-192.168.122.249 ( undercloud ) due to node availability * Stop ip-192.168.122.253 ( undercloud ) due to node availability * Stop ip-192.168.122.247 ( undercloud ) due to node availability * Stop ip-192.168.122.248 ( undercloud ) due to node availability * Stop haproxy-bundle-docker-0 ( undercloud ) due to node availability * Stop openstack-cinder-volume-docker-0 ( undercloud ) due to node availability Executing Cluster Transition: * Pseudo action: rabbitmq-bundle-clone_pre_notify_stop_0 * Resource action: galera cancel=10000 on galera-bundle-0 * Resource action: redis cancel=20000 on redis-bundle-0 * Pseudo action: redis-bundle-master_pre_notify_demote_0 * Pseudo action: openstack-cinder-volume_stop_0 * Pseudo action: redis-bundle_demote_0 * Pseudo action: galera-bundle_demote_0 * Pseudo action: rabbitmq-bundle_stop_0 * Resource action: rabbitmq notify on rabbitmq-bundle-0 * Pseudo action: rabbitmq-bundle-clone_confirmed-pre_notify_stop_0 * Pseudo action: rabbitmq-bundle-clone_stop_0 * Pseudo action: galera-bundle-master_demote_0 * Resource action: redis notify on redis-bundle-0 * Pseudo action: redis-bundle-master_confirmed-pre_notify_demote_0 * Pseudo action: redis-bundle-master_demote_0 * Resource action: openstack-cinder-volume-docker-0 stop on undercloud * Pseudo action: openstack-cinder-volume_stopped_0 * Resource action: rabbitmq stop on rabbitmq-bundle-0 * Pseudo action: rabbitmq-bundle-clone_stopped_0 * Resource action: rabbitmq-bundle-0 stop on undercloud * Resource action: galera demote on galera-bundle-0 * Pseudo action: galera-bundle-master_demoted_0 * Resource action: redis demote on redis-bundle-0 * Pseudo action: redis-bundle-master_demoted_0 * Pseudo action: galera-bundle_demoted_0 * Pseudo action: galera-bundle_stop_0 * Pseudo action: rabbitmq-bundle-clone_post_notify_stopped_0 * Resource action: rabbitmq-bundle-docker-0 stop on undercloud * Pseudo action: galera-bundle-master_stop_0 * Pseudo action: redis-bundle-master_post_notify_demoted_0 * Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_stopped_0 * Resource action: galera stop on galera-bundle-0 * Pseudo action: galera-bundle-master_stopped_0 * Resource action: galera-bundle-0 stop on undercloud * Resource action: redis notify on redis-bundle-0 * Pseudo action: redis-bundle-master_confirmed-post_notify_demoted_0 * Pseudo action: redis-bundle-master_pre_notify_stop_0 * Pseudo action: redis-bundle_demoted_0 * Pseudo action: rabbitmq-bundle_stopped_0 * Resource action: galera-bundle-docker-0 stop on undercloud * Resource action: redis notify on redis-bundle-0 * Pseudo action: redis-bundle-master_confirmed-pre_notify_stop_0 * Pseudo action: galera-bundle_stopped_0 * Pseudo action: redis-bundle_stop_0 * Pseudo action: redis-bundle-master_stop_0 * Resource action: redis stop on redis-bundle-0 * Pseudo action: redis-bundle-master_stopped_0 * Resource action: redis-bundle-0 stop on undercloud * Pseudo action: redis-bundle-master_post_notify_stopped_0 * Resource action: redis-bundle-docker-0 stop on undercloud * Pseudo action: redis-bundle-master_confirmed-post_notify_stopped_0 * Pseudo action: redis-bundle_stopped_0 * Pseudo action: haproxy-bundle_stop_0 * Resource action: haproxy-bundle-docker-0 stop on undercloud * Pseudo action: haproxy-bundle_stopped_0 * Resource action: ip-192.168.122.254 stop on undercloud * Resource action: ip-192.168.122.250 stop on undercloud * Resource action: ip-192.168.122.249 stop on undercloud * Resource action: ip-192.168.122.253 stop on undercloud * Resource action: ip-192.168.122.247 stop on undercloud * Resource action: ip-192.168.122.248 stop on undercloud * Cluster action: do_shutdown on undercloud Revised Cluster Status: * Node List: * Online: [ undercloud ] * Full List of Resources: * Container bundle: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest]: * rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Stopped * Container bundle: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]: * galera-bundle-0 (ocf:heartbeat:galera): Stopped * Container bundle: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]: * redis-bundle-0 (ocf:heartbeat:redis): Stopped * ip-192.168.122.254 (ocf:heartbeat:IPaddr2): Stopped * ip-192.168.122.250 (ocf:heartbeat:IPaddr2): Stopped * ip-192.168.122.249 (ocf:heartbeat:IPaddr2): Stopped * ip-192.168.122.253 (ocf:heartbeat:IPaddr2): Stopped * ip-192.168.122.247 (ocf:heartbeat:IPaddr2): Stopped * ip-192.168.122.248 (ocf:heartbeat:IPaddr2): Stopped * Container bundle: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]: * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Stopped * Container bundle: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest]: * openstack-cinder-volume-docker-0 (ocf:heartbeat:docker): Stopped diff --git a/cts/scheduler/summary/cancel-behind-moving-remote.summary b/cts/scheduler/summary/cancel-behind-moving-remote.summary index 20470c9ace..3c16b75ea0 100644 --- a/cts/scheduler/summary/cancel-behind-moving-remote.summary +++ b/cts/scheduler/summary/cancel-behind-moving-remote.summary @@ -1,211 +1,211 @@ Using the original execution date of: 2021-02-15 01:40:51Z Current cluster status: * Node List: * Online: [ controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-2 ] * OFFLINE: [ messaging-1 ] * RemoteOnline: [ compute-0 compute-1 ] * GuestOnline: [ galera-bundle-0@database-0 galera-bundle-1@database-1 galera-bundle-2@database-2 ovn-dbs-bundle-1@controller-2 ovn-dbs-bundle-2@controller-1 rabbitmq-bundle-0@messaging-0 rabbitmq-bundle-2@messaging-2 redis-bundle-0@controller-2 redis-bundle-1@controller-0 redis-bundle-2@controller-1 ] * Full List of Resources: * compute-0 (ocf:pacemaker:remote): Started controller-1 * compute-1 (ocf:pacemaker:remote): Started controller-2 * Container bundle set: galera-bundle [cluster.common.tag/rhosp16-openstack-mariadb:pcmklatest]: - * galera-bundle-0 (ocf:heartbeat:galera): Master database-0 - * galera-bundle-1 (ocf:heartbeat:galera): Master database-1 - * galera-bundle-2 (ocf:heartbeat:galera): Master database-2 + * galera-bundle-0 (ocf:heartbeat:galera): Promoted database-0 + * galera-bundle-1 (ocf:heartbeat:galera): Promoted database-1 + * galera-bundle-2 (ocf:heartbeat:galera): Promoted database-2 * Container bundle set: rabbitmq-bundle [cluster.common.tag/rhosp16-openstack-rabbitmq:pcmklatest]: * rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started messaging-0 * rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Stopped * rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Started messaging-2 * Container bundle set: redis-bundle [cluster.common.tag/rhosp16-openstack-redis:pcmklatest]: - * redis-bundle-0 (ocf:heartbeat:redis): Master controller-2 - * redis-bundle-1 (ocf:heartbeat:redis): Slave controller-0 - * redis-bundle-2 (ocf:heartbeat:redis): Slave controller-1 + * redis-bundle-0 (ocf:heartbeat:redis): Promoted controller-2 + * redis-bundle-1 (ocf:heartbeat:redis): Unpromoted controller-0 + * redis-bundle-2 (ocf:heartbeat:redis): Unpromoted controller-1 * ip-192.168.24.150 (ocf:heartbeat:IPaddr2): Started controller-1 * ip-10.0.0.150 (ocf:heartbeat:IPaddr2): Started controller-2 * ip-172.17.1.151 (ocf:heartbeat:IPaddr2): Started controller-1 * ip-172.17.1.150 (ocf:heartbeat:IPaddr2): Started controller-1 * ip-172.17.3.150 (ocf:heartbeat:IPaddr2): Started controller-1 * ip-172.17.4.150 (ocf:heartbeat:IPaddr2): Started controller-2 * Container bundle set: haproxy-bundle [cluster.common.tag/rhosp16-openstack-haproxy:pcmklatest]: * haproxy-bundle-podman-0 (ocf:heartbeat:podman): Started controller-2 * haproxy-bundle-podman-1 (ocf:heartbeat:podman): Started controller-0 * haproxy-bundle-podman-2 (ocf:heartbeat:podman): Started controller-1 * Container bundle set: ovn-dbs-bundle [cluster.common.tag/rhosp16-openstack-ovn-northd:pcmklatest]: * ovn-dbs-bundle-0 (ocf:ovn:ovndb-servers): Stopped - * ovn-dbs-bundle-1 (ocf:ovn:ovndb-servers): Slave controller-2 - * ovn-dbs-bundle-2 (ocf:ovn:ovndb-servers): Slave controller-1 + * ovn-dbs-bundle-1 (ocf:ovn:ovndb-servers): Unpromoted controller-2 + * ovn-dbs-bundle-2 (ocf:ovn:ovndb-servers): Unpromoted controller-1 * ip-172.17.1.87 (ocf:heartbeat:IPaddr2): Stopped * stonith-fence_compute-fence-nova (stonith:fence_compute): Started database-1 * Clone Set: compute-unfence-trigger-clone [compute-unfence-trigger]: * Started: [ compute-0 compute-1 ] * Stopped: [ controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ] * nova-evacuate (ocf:openstack:NovaEvacuate): Started database-2 * stonith-fence_ipmilan-525400aa1373 (stonith:fence_ipmilan): Started messaging-0 * stonith-fence_ipmilan-525400dc23e0 (stonith:fence_ipmilan): Started messaging-2 * stonith-fence_ipmilan-52540040bb56 (stonith:fence_ipmilan): Started messaging-2 * stonith-fence_ipmilan-525400addd38 (stonith:fence_ipmilan): Started messaging-0 * stonith-fence_ipmilan-52540078fb07 (stonith:fence_ipmilan): Started database-0 * stonith-fence_ipmilan-525400ea59b0 (stonith:fence_ipmilan): Started database-1 * stonith-fence_ipmilan-525400066e50 (stonith:fence_ipmilan): Started database-2 * stonith-fence_ipmilan-525400e1534e (stonith:fence_ipmilan): Started database-1 * stonith-fence_ipmilan-52540060dbba (stonith:fence_ipmilan): Started database-2 * stonith-fence_ipmilan-525400e018b6 (stonith:fence_ipmilan): Started database-0 * stonith-fence_ipmilan-525400c87cdb (stonith:fence_ipmilan): Started messaging-0 * Container bundle: openstack-cinder-volume [cluster.common.tag/rhosp16-openstack-cinder-volume:pcmklatest]: * openstack-cinder-volume-podman-0 (ocf:heartbeat:podman): Started controller-2 Transition Summary: * Start rabbitmq-bundle-1 ( controller-0 ) due to unrunnable rabbitmq-bundle-podman-1 start (blocked) * Start rabbitmq:1 ( rabbitmq-bundle-1 ) due to unrunnable rabbitmq-bundle-podman-1 start (blocked) * Start ovn-dbs-bundle-podman-0 ( controller-2 ) * Start ovn-dbs-bundle-0 ( controller-2 ) * Start ovndb_servers:0 ( ovn-dbs-bundle-0 ) * Move ovn-dbs-bundle-podman-1 ( controller-2 -> controller-0 ) * Move ovn-dbs-bundle-1 ( controller-2 -> controller-0 ) - * Restart ovndb_servers:1 ( Slave -> Master ovn-dbs-bundle-1 ) due to required ovn-dbs-bundle-podman-1 start + * Restart ovndb_servers:1 ( Unpromoted -> Promoted ovn-dbs-bundle-1 ) due to required ovn-dbs-bundle-podman-1 start * Start ip-172.17.1.87 ( controller-0 ) * Move stonith-fence_ipmilan-52540040bb56 ( messaging-2 -> database-0 ) * Move stonith-fence_ipmilan-525400e1534e ( database-1 -> messaging-2 ) Executing Cluster Transition: * Pseudo action: rabbitmq-bundle-clone_pre_notify_start_0 * Resource action: ovndb_servers cancel=30000 on ovn-dbs-bundle-1 * Pseudo action: ovn-dbs-bundle-master_pre_notify_stop_0 * Cluster action: clear_failcount for ovn-dbs-bundle-0 on controller-0 * Cluster action: clear_failcount for ovn-dbs-bundle-1 on controller-2 * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on messaging-0 * Cluster action: clear_failcount for nova-evacuate on messaging-0 * Cluster action: clear_failcount for stonith-fence_ipmilan-525400aa1373 on database-0 * Cluster action: clear_failcount for stonith-fence_ipmilan-525400dc23e0 on database-2 * Resource action: stonith-fence_ipmilan-52540040bb56 stop on messaging-2 * Cluster action: clear_failcount for stonith-fence_ipmilan-52540078fb07 on messaging-2 * Cluster action: clear_failcount for stonith-fence_ipmilan-525400ea59b0 on database-0 * Cluster action: clear_failcount for stonith-fence_ipmilan-525400066e50 on messaging-2 * Resource action: stonith-fence_ipmilan-525400e1534e stop on database-1 * Cluster action: clear_failcount for stonith-fence_ipmilan-525400e1534e on database-2 * Cluster action: clear_failcount for stonith-fence_ipmilan-52540060dbba on messaging-0 * Cluster action: clear_failcount for stonith-fence_ipmilan-525400e018b6 on database-0 * Cluster action: clear_failcount for stonith-fence_ipmilan-525400c87cdb on database-2 * Pseudo action: ovn-dbs-bundle_stop_0 * Pseudo action: rabbitmq-bundle_start_0 * Pseudo action: rabbitmq-bundle-clone_confirmed-pre_notify_start_0 * Pseudo action: rabbitmq-bundle-clone_start_0 * Resource action: ovndb_servers notify on ovn-dbs-bundle-1 * Resource action: ovndb_servers notify on ovn-dbs-bundle-2 * Pseudo action: ovn-dbs-bundle-master_confirmed-pre_notify_stop_0 * Pseudo action: ovn-dbs-bundle-master_stop_0 * Resource action: stonith-fence_ipmilan-52540040bb56 start on database-0 * Resource action: stonith-fence_ipmilan-525400e1534e start on messaging-2 * Pseudo action: rabbitmq-bundle-clone_running_0 * Resource action: ovndb_servers stop on ovn-dbs-bundle-1 * Pseudo action: ovn-dbs-bundle-master_stopped_0 * Resource action: ovn-dbs-bundle-1 stop on controller-2 * Resource action: stonith-fence_ipmilan-52540040bb56 monitor=60000 on database-0 * Resource action: stonith-fence_ipmilan-525400e1534e monitor=60000 on messaging-2 * Pseudo action: rabbitmq-bundle-clone_post_notify_running_0 * Pseudo action: ovn-dbs-bundle-master_post_notify_stopped_0 * Resource action: ovn-dbs-bundle-podman-1 stop on controller-2 * Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_running_0 * Resource action: ovndb_servers notify on ovn-dbs-bundle-2 * Pseudo action: ovn-dbs-bundle-master_confirmed-post_notify_stopped_0 * Pseudo action: ovn-dbs-bundle-master_pre_notify_start_0 * Pseudo action: ovn-dbs-bundle_stopped_0 * Pseudo action: ovn-dbs-bundle_start_0 * Pseudo action: rabbitmq-bundle_running_0 * Resource action: ovndb_servers notify on ovn-dbs-bundle-2 * Pseudo action: ovn-dbs-bundle-master_confirmed-pre_notify_start_0 * Pseudo action: ovn-dbs-bundle-master_start_0 * Resource action: ovn-dbs-bundle-podman-0 start on controller-2 * Resource action: ovn-dbs-bundle-0 start on controller-2 * Resource action: ovn-dbs-bundle-podman-1 start on controller-0 * Resource action: ovn-dbs-bundle-1 start on controller-0 * Resource action: ovndb_servers start on ovn-dbs-bundle-0 * Resource action: ovndb_servers start on ovn-dbs-bundle-1 * Pseudo action: ovn-dbs-bundle-master_running_0 * Resource action: ovn-dbs-bundle-podman-0 monitor=60000 on controller-2 * Resource action: ovn-dbs-bundle-0 monitor=30000 on controller-2 * Resource action: ovn-dbs-bundle-podman-1 monitor=60000 on controller-0 * Resource action: ovn-dbs-bundle-1 monitor=30000 on controller-0 * Pseudo action: ovn-dbs-bundle-master_post_notify_running_0 * Resource action: ovndb_servers notify on ovn-dbs-bundle-0 * Resource action: ovndb_servers notify on ovn-dbs-bundle-1 * Resource action: ovndb_servers notify on ovn-dbs-bundle-2 * Pseudo action: ovn-dbs-bundle-master_confirmed-post_notify_running_0 * Pseudo action: ovn-dbs-bundle_running_0 * Pseudo action: ovn-dbs-bundle-master_pre_notify_promote_0 * Pseudo action: ovn-dbs-bundle_promote_0 * Resource action: ovndb_servers notify on ovn-dbs-bundle-0 * Resource action: ovndb_servers notify on ovn-dbs-bundle-1 * Resource action: ovndb_servers notify on ovn-dbs-bundle-2 * Pseudo action: ovn-dbs-bundle-master_confirmed-pre_notify_promote_0 * Pseudo action: ovn-dbs-bundle-master_promote_0 * Resource action: ovndb_servers promote on ovn-dbs-bundle-1 * Pseudo action: ovn-dbs-bundle-master_promoted_0 * Pseudo action: ovn-dbs-bundle-master_post_notify_promoted_0 * Resource action: ovndb_servers notify on ovn-dbs-bundle-0 * Resource action: ovndb_servers notify on ovn-dbs-bundle-1 * Resource action: ovndb_servers notify on ovn-dbs-bundle-2 * Pseudo action: ovn-dbs-bundle-master_confirmed-post_notify_promoted_0 * Pseudo action: ovn-dbs-bundle_promoted_0 * Resource action: ovndb_servers monitor=30000 on ovn-dbs-bundle-0 * Resource action: ovndb_servers monitor=10000 on ovn-dbs-bundle-1 * Resource action: ip-172.17.1.87 start on controller-0 * Resource action: ip-172.17.1.87 monitor=10000 on controller-0 Using the original execution date of: 2021-02-15 01:40:51Z Revised Cluster Status: * Node List: * Online: [ controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-2 ] * OFFLINE: [ messaging-1 ] * RemoteOnline: [ compute-0 compute-1 ] * GuestOnline: [ galera-bundle-0@database-0 galera-bundle-1@database-1 galera-bundle-2@database-2 ovn-dbs-bundle-0@controller-2 ovn-dbs-bundle-1@controller-0 ovn-dbs-bundle-2@controller-1 rabbitmq-bundle-0@messaging-0 rabbitmq-bundle-2@messaging-2 redis-bundle-0@controller-2 redis-bundle-1@controller-0 redis-bundle-2@controller-1 ] * Full List of Resources: * compute-0 (ocf:pacemaker:remote): Started controller-1 * compute-1 (ocf:pacemaker:remote): Started controller-2 * Container bundle set: galera-bundle [cluster.common.tag/rhosp16-openstack-mariadb:pcmklatest]: - * galera-bundle-0 (ocf:heartbeat:galera): Master database-0 - * galera-bundle-1 (ocf:heartbeat:galera): Master database-1 - * galera-bundle-2 (ocf:heartbeat:galera): Master database-2 + * galera-bundle-0 (ocf:heartbeat:galera): Promoted database-0 + * galera-bundle-1 (ocf:heartbeat:galera): Promoted database-1 + * galera-bundle-2 (ocf:heartbeat:galera): Promoted database-2 * Container bundle set: rabbitmq-bundle [cluster.common.tag/rhosp16-openstack-rabbitmq:pcmklatest]: * rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started messaging-0 * rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Stopped * rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Started messaging-2 * Container bundle set: redis-bundle [cluster.common.tag/rhosp16-openstack-redis:pcmklatest]: - * redis-bundle-0 (ocf:heartbeat:redis): Master controller-2 - * redis-bundle-1 (ocf:heartbeat:redis): Slave controller-0 - * redis-bundle-2 (ocf:heartbeat:redis): Slave controller-1 + * redis-bundle-0 (ocf:heartbeat:redis): Promoted controller-2 + * redis-bundle-1 (ocf:heartbeat:redis): Unpromoted controller-0 + * redis-bundle-2 (ocf:heartbeat:redis): Unpromoted controller-1 * ip-192.168.24.150 (ocf:heartbeat:IPaddr2): Started controller-1 * ip-10.0.0.150 (ocf:heartbeat:IPaddr2): Started controller-2 * ip-172.17.1.151 (ocf:heartbeat:IPaddr2): Started controller-1 * ip-172.17.1.150 (ocf:heartbeat:IPaddr2): Started controller-1 * ip-172.17.3.150 (ocf:heartbeat:IPaddr2): Started controller-1 * ip-172.17.4.150 (ocf:heartbeat:IPaddr2): Started controller-2 * Container bundle set: haproxy-bundle [cluster.common.tag/rhosp16-openstack-haproxy:pcmklatest]: * haproxy-bundle-podman-0 (ocf:heartbeat:podman): Started controller-2 * haproxy-bundle-podman-1 (ocf:heartbeat:podman): Started controller-0 * haproxy-bundle-podman-2 (ocf:heartbeat:podman): Started controller-1 * Container bundle set: ovn-dbs-bundle [cluster.common.tag/rhosp16-openstack-ovn-northd:pcmklatest]: - * ovn-dbs-bundle-0 (ocf:ovn:ovndb-servers): Slave controller-2 - * ovn-dbs-bundle-1 (ocf:ovn:ovndb-servers): Master controller-0 - * ovn-dbs-bundle-2 (ocf:ovn:ovndb-servers): Slave controller-1 + * ovn-dbs-bundle-0 (ocf:ovn:ovndb-servers): Unpromoted controller-2 + * ovn-dbs-bundle-1 (ocf:ovn:ovndb-servers): Promoted controller-0 + * ovn-dbs-bundle-2 (ocf:ovn:ovndb-servers): Unpromoted controller-1 * ip-172.17.1.87 (ocf:heartbeat:IPaddr2): Started controller-0 * stonith-fence_compute-fence-nova (stonith:fence_compute): Started database-1 * Clone Set: compute-unfence-trigger-clone [compute-unfence-trigger]: * Started: [ compute-0 compute-1 ] * Stopped: [ controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ] * nova-evacuate (ocf:openstack:NovaEvacuate): Started database-2 * stonith-fence_ipmilan-525400aa1373 (stonith:fence_ipmilan): Started messaging-0 * stonith-fence_ipmilan-525400dc23e0 (stonith:fence_ipmilan): Started messaging-2 * stonith-fence_ipmilan-52540040bb56 (stonith:fence_ipmilan): Started database-0 * stonith-fence_ipmilan-525400addd38 (stonith:fence_ipmilan): Started messaging-0 * stonith-fence_ipmilan-52540078fb07 (stonith:fence_ipmilan): Started database-0 * stonith-fence_ipmilan-525400ea59b0 (stonith:fence_ipmilan): Started database-1 * stonith-fence_ipmilan-525400066e50 (stonith:fence_ipmilan): Started database-2 * stonith-fence_ipmilan-525400e1534e (stonith:fence_ipmilan): Started messaging-2 * stonith-fence_ipmilan-52540060dbba (stonith:fence_ipmilan): Started database-2 * stonith-fence_ipmilan-525400e018b6 (stonith:fence_ipmilan): Started database-0 * stonith-fence_ipmilan-525400c87cdb (stonith:fence_ipmilan): Started messaging-0 * Container bundle: openstack-cinder-volume [cluster.common.tag/rhosp16-openstack-cinder-volume:pcmklatest]: * openstack-cinder-volume-podman-0 (ocf:heartbeat:podman): Started controller-2 diff --git a/cts/scheduler/summary/clone-no-shuffle.summary b/cts/scheduler/summary/clone-no-shuffle.summary index e3bb876cc9..9dbee84c2f 100644 --- a/cts/scheduler/summary/clone-no-shuffle.summary +++ b/cts/scheduler/summary/clone-no-shuffle.summary @@ -1,61 +1,61 @@ Current cluster status: * Node List: * Online: [ dktest1sles10 dktest2sles10 ] * Full List of Resources: * stonith-1 (stonith:dummy): Stopped * Clone Set: ms-drbd1 [drbd1] (promotable): - * Masters: [ dktest2sles10 ] + * Promoted: [ dktest2sles10 ] * Stopped: [ dktest1sles10 ] * testip (ocf:heartbeat:IPaddr2): Started dktest2sles10 Transition Summary: * Start stonith-1 ( dktest1sles10 ) - * Stop drbd1:0 ( Master dktest2sles10 ) due to node availability + * Stop drbd1:0 ( Promoted dktest2sles10 ) due to node availability * Start drbd1:1 ( dktest1sles10 ) * Stop testip ( dktest2sles10 ) due to node availability Executing Cluster Transition: * Resource action: stonith-1 monitor on dktest2sles10 * Resource action: stonith-1 monitor on dktest1sles10 * Resource action: drbd1:1 monitor on dktest1sles10 * Pseudo action: ms-drbd1_pre_notify_demote_0 * Resource action: testip stop on dktest2sles10 * Resource action: testip monitor on dktest1sles10 * Resource action: stonith-1 start on dktest1sles10 * Resource action: drbd1:0 notify on dktest2sles10 * Pseudo action: ms-drbd1_confirmed-pre_notify_demote_0 * Pseudo action: ms-drbd1_demote_0 * Resource action: drbd1:0 demote on dktest2sles10 * Pseudo action: ms-drbd1_demoted_0 * Pseudo action: ms-drbd1_post_notify_demoted_0 * Resource action: drbd1:0 notify on dktest2sles10 * Pseudo action: ms-drbd1_confirmed-post_notify_demoted_0 * Pseudo action: ms-drbd1_pre_notify_stop_0 * Resource action: drbd1:0 notify on dktest2sles10 * Pseudo action: ms-drbd1_confirmed-pre_notify_stop_0 * Pseudo action: ms-drbd1_stop_0 * Resource action: drbd1:0 stop on dktest2sles10 * Pseudo action: ms-drbd1_stopped_0 * Pseudo action: ms-drbd1_post_notify_stopped_0 * Pseudo action: ms-drbd1_confirmed-post_notify_stopped_0 * Pseudo action: ms-drbd1_pre_notify_start_0 * Pseudo action: ms-drbd1_confirmed-pre_notify_start_0 * Pseudo action: ms-drbd1_start_0 * Resource action: drbd1:1 start on dktest1sles10 * Pseudo action: ms-drbd1_running_0 * Pseudo action: ms-drbd1_post_notify_running_0 * Resource action: drbd1:1 notify on dktest1sles10 * Pseudo action: ms-drbd1_confirmed-post_notify_running_0 * Resource action: drbd1:1 monitor=11000 on dktest1sles10 Revised Cluster Status: * Node List: * Online: [ dktest1sles10 dktest2sles10 ] * Full List of Resources: * stonith-1 (stonith:dummy): Started dktest1sles10 * Clone Set: ms-drbd1 [drbd1] (promotable): - * Slaves: [ dktest1sles10 ] + * Unpromoted: [ dktest1sles10 ] * Stopped: [ dktest2sles10 ] * testip (ocf:heartbeat:IPaddr2): Stopped diff --git a/cts/scheduler/summary/clone-requires-quorum-recovery.summary b/cts/scheduler/summary/clone-requires-quorum-recovery.summary index be5ebd5a15..364dabec82 100644 --- a/cts/scheduler/summary/clone-requires-quorum-recovery.summary +++ b/cts/scheduler/summary/clone-requires-quorum-recovery.summary @@ -1,48 +1,48 @@ Using the original execution date of: 2018-05-24 15:29:56Z Current cluster status: * Node List: * Node rhel7-5: UNCLEAN (offline) * Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started rhel7-1 * FencingFail (stonith:fence_dummy): Started rhel7-2 * dummy-solo (ocf:pacemaker:Dummy): Started rhel7-3 * Clone Set: dummy-crowd-clone [dummy-crowd]: * dummy-crowd (ocf:pacemaker:Dummy): ORPHANED Started rhel7-5 (UNCLEAN) * Started: [ rhel7-1 rhel7-4 ] * Stopped: [ rhel7-2 rhel7-3 ] * Clone Set: dummy-boss-clone [dummy-boss] (promotable): - * Masters: [ rhel7-3 ] - * Slaves: [ rhel7-2 rhel7-4 ] + * Promoted: [ rhel7-3 ] + * Unpromoted: [ rhel7-2 rhel7-4 ] Transition Summary: * Fence (reboot) rhel7-5 'peer is no longer part of the cluster' * Start dummy-crowd:2 ( rhel7-2 ) * Stop dummy-crowd:3 ( rhel7-5 ) due to node availability Executing Cluster Transition: * Pseudo action: dummy-crowd-clone_stop_0 * Fencing rhel7-5 (reboot) * Pseudo action: dummy-crowd_stop_0 * Pseudo action: dummy-crowd-clone_stopped_0 * Pseudo action: dummy-crowd-clone_start_0 * Resource action: dummy-crowd start on rhel7-2 * Pseudo action: dummy-crowd-clone_running_0 * Resource action: dummy-crowd monitor=10000 on rhel7-2 Using the original execution date of: 2018-05-24 15:29:56Z Revised Cluster Status: * Node List: * Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 ] * OFFLINE: [ rhel7-5 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started rhel7-1 * FencingFail (stonith:fence_dummy): Started rhel7-2 * dummy-solo (ocf:pacemaker:Dummy): Started rhel7-3 * Clone Set: dummy-crowd-clone [dummy-crowd]: * Started: [ rhel7-1 rhel7-2 rhel7-4 ] * Clone Set: dummy-boss-clone [dummy-boss] (promotable): - * Masters: [ rhel7-3 ] - * Slaves: [ rhel7-2 rhel7-4 ] + * Promoted: [ rhel7-3 ] + * Unpromoted: [ rhel7-2 rhel7-4 ] diff --git a/cts/scheduler/summary/clone-requires-quorum.summary b/cts/scheduler/summary/clone-requires-quorum.summary index c9178a1a6c..e45b0312eb 100644 --- a/cts/scheduler/summary/clone-requires-quorum.summary +++ b/cts/scheduler/summary/clone-requires-quorum.summary @@ -1,42 +1,42 @@ Using the original execution date of: 2018-05-24 15:30:29Z Current cluster status: * Node List: * Node rhel7-5: UNCLEAN (offline) * Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started rhel7-1 * FencingFail (stonith:fence_dummy): Started rhel7-2 * dummy-solo (ocf:pacemaker:Dummy): Started rhel7-3 * Clone Set: dummy-crowd-clone [dummy-crowd]: * dummy-crowd (ocf:pacemaker:Dummy): ORPHANED Started rhel7-5 (UNCLEAN) * Started: [ rhel7-1 rhel7-2 rhel7-4 ] * Clone Set: dummy-boss-clone [dummy-boss] (promotable): - * Masters: [ rhel7-3 ] - * Slaves: [ rhel7-2 rhel7-4 ] + * Promoted: [ rhel7-3 ] + * Unpromoted: [ rhel7-2 rhel7-4 ] Transition Summary: * Fence (reboot) rhel7-5 'peer is no longer part of the cluster' * Stop dummy-crowd:3 ( rhel7-5 ) due to node availability Executing Cluster Transition: * Pseudo action: dummy-crowd-clone_stop_0 * Fencing rhel7-5 (reboot) * Pseudo action: dummy-crowd_stop_0 * Pseudo action: dummy-crowd-clone_stopped_0 Using the original execution date of: 2018-05-24 15:30:29Z Revised Cluster Status: * Node List: * Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 ] * OFFLINE: [ rhel7-5 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started rhel7-1 * FencingFail (stonith:fence_dummy): Started rhel7-2 * dummy-solo (ocf:pacemaker:Dummy): Started rhel7-3 * Clone Set: dummy-crowd-clone [dummy-crowd]: * Started: [ rhel7-1 rhel7-2 rhel7-4 ] * Clone Set: dummy-boss-clone [dummy-boss] (promotable): - * Masters: [ rhel7-3 ] - * Slaves: [ rhel7-2 rhel7-4 ] + * Promoted: [ rhel7-3 ] + * Unpromoted: [ rhel7-2 rhel7-4 ] diff --git a/cts/scheduler/summary/colo_master_w_native.summary b/cts/scheduler/summary/colo_master_w_native.summary index 20e369ec44..ad67078d88 100644 --- a/cts/scheduler/summary/colo_master_w_native.summary +++ b/cts/scheduler/summary/colo_master_w_native.summary @@ -1,49 +1,49 @@ Current cluster status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * A (ocf:pacemaker:Dummy): Started node1 * Clone Set: MS_RSC [MS_RSC_NATIVE] (promotable): - * Masters: [ node2 ] - * Slaves: [ node1 ] + * Promoted: [ node2 ] + * Unpromoted: [ node1 ] Transition Summary: - * Demote MS_RSC_NATIVE:0 ( Master -> Slave node2 ) - * Promote MS_RSC_NATIVE:1 ( Slave -> Master node1 ) + * Demote MS_RSC_NATIVE:0 ( Promoted -> Unpromoted node2 ) + * Promote MS_RSC_NATIVE:1 ( Unpromoted -> Promoted node1 ) Executing Cluster Transition: * Resource action: MS_RSC_NATIVE:1 cancel=15000 on node1 * Pseudo action: MS_RSC_pre_notify_demote_0 * Resource action: MS_RSC_NATIVE:0 notify on node2 * Resource action: MS_RSC_NATIVE:1 notify on node1 * Pseudo action: MS_RSC_confirmed-pre_notify_demote_0 * Pseudo action: MS_RSC_demote_0 * Resource action: MS_RSC_NATIVE:0 demote on node2 * Pseudo action: MS_RSC_demoted_0 * Pseudo action: MS_RSC_post_notify_demoted_0 * Resource action: MS_RSC_NATIVE:0 notify on node2 * Resource action: MS_RSC_NATIVE:1 notify on node1 * Pseudo action: MS_RSC_confirmed-post_notify_demoted_0 * Pseudo action: MS_RSC_pre_notify_promote_0 * Resource action: MS_RSC_NATIVE:0 notify on node2 * Resource action: MS_RSC_NATIVE:1 notify on node1 * Pseudo action: MS_RSC_confirmed-pre_notify_promote_0 * Pseudo action: MS_RSC_promote_0 * Resource action: MS_RSC_NATIVE:1 promote on node1 * Pseudo action: MS_RSC_promoted_0 * Pseudo action: MS_RSC_post_notify_promoted_0 * Resource action: MS_RSC_NATIVE:0 notify on node2 * Resource action: MS_RSC_NATIVE:1 notify on node1 * Pseudo action: MS_RSC_confirmed-post_notify_promoted_0 * Resource action: MS_RSC_NATIVE:0 monitor=15000 on node2 Revised Cluster Status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * A (ocf:pacemaker:Dummy): Started node1 * Clone Set: MS_RSC [MS_RSC_NATIVE] (promotable): - * Masters: [ node1 ] - * Slaves: [ node2 ] + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] diff --git a/cts/scheduler/summary/colo_slave_w_native.summary b/cts/scheduler/summary/colo_slave_w_native.summary index d0cb4a998d..477d1c6866 100644 --- a/cts/scheduler/summary/colo_slave_w_native.summary +++ b/cts/scheduler/summary/colo_slave_w_native.summary @@ -1,53 +1,53 @@ Current cluster status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * A (ocf:pacemaker:Dummy): Started node1 * Clone Set: MS_RSC [MS_RSC_NATIVE] (promotable): - * Masters: [ node2 ] - * Slaves: [ node1 ] + * Promoted: [ node2 ] + * Unpromoted: [ node1 ] Transition Summary: * Move A ( node1 -> node2 ) - * Demote MS_RSC_NATIVE:0 ( Master -> Slave node2 ) - * Promote MS_RSC_NATIVE:1 ( Slave -> Master node1 ) + * Demote MS_RSC_NATIVE:0 ( Promoted -> Unpromoted node2 ) + * Promote MS_RSC_NATIVE:1 ( Unpromoted -> Promoted node1 ) Executing Cluster Transition: * Resource action: A stop on node1 * Resource action: MS_RSC_NATIVE:1 cancel=15000 on node1 * Pseudo action: MS_RSC_pre_notify_demote_0 * Resource action: A start on node2 * Resource action: MS_RSC_NATIVE:0 notify on node2 * Resource action: MS_RSC_NATIVE:1 notify on node1 * Pseudo action: MS_RSC_confirmed-pre_notify_demote_0 * Pseudo action: MS_RSC_demote_0 * Resource action: A monitor=10000 on node2 * Resource action: MS_RSC_NATIVE:0 demote on node2 * Pseudo action: MS_RSC_demoted_0 * Pseudo action: MS_RSC_post_notify_demoted_0 * Resource action: MS_RSC_NATIVE:0 notify on node2 * Resource action: MS_RSC_NATIVE:1 notify on node1 * Pseudo action: MS_RSC_confirmed-post_notify_demoted_0 * Pseudo action: MS_RSC_pre_notify_promote_0 * Resource action: MS_RSC_NATIVE:0 notify on node2 * Resource action: MS_RSC_NATIVE:1 notify on node1 * Pseudo action: MS_RSC_confirmed-pre_notify_promote_0 * Pseudo action: MS_RSC_promote_0 * Resource action: MS_RSC_NATIVE:1 promote on node1 * Pseudo action: MS_RSC_promoted_0 * Pseudo action: MS_RSC_post_notify_promoted_0 * Resource action: MS_RSC_NATIVE:0 notify on node2 * Resource action: MS_RSC_NATIVE:1 notify on node1 * Pseudo action: MS_RSC_confirmed-post_notify_promoted_0 * Resource action: MS_RSC_NATIVE:0 monitor=15000 on node2 Revised Cluster Status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * A (ocf:pacemaker:Dummy): Started node2 * Clone Set: MS_RSC [MS_RSC_NATIVE] (promotable): - * Masters: [ node1 ] - * Slaves: [ node2 ] + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] diff --git a/cts/scheduler/summary/coloc-clone-stays-active.summary b/cts/scheduler/summary/coloc-clone-stays-active.summary index 218907acdd..cb212e1cde 100644 --- a/cts/scheduler/summary/coloc-clone-stays-active.summary +++ b/cts/scheduler/summary/coloc-clone-stays-active.summary @@ -1,209 +1,209 @@ 9 of 87 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: * Node List: * Online: [ s01-0 s01-1 ] * Full List of Resources: * stonith-s01-0 (stonith:external/ipmi): Started s01-1 * stonith-s01-1 (stonith:external/ipmi): Started s01-0 * Resource Group: iscsi-pool-0-target-all: * iscsi-pool-0-target (ocf:vds-ok:iSCSITarget): Started s01-0 * iscsi-pool-0-lun-1 (ocf:vds-ok:iSCSILogicalUnit): Started s01-0 * Resource Group: iscsi-pool-0-vips: * vip-235 (ocf:heartbeat:IPaddr2): Started s01-0 * vip-236 (ocf:heartbeat:IPaddr2): Started s01-0 * Resource Group: iscsi-pool-1-target-all: * iscsi-pool-1-target (ocf:vds-ok:iSCSITarget): Started s01-1 * iscsi-pool-1-lun-1 (ocf:vds-ok:iSCSILogicalUnit): Started s01-1 * Resource Group: iscsi-pool-1-vips: * vip-237 (ocf:heartbeat:IPaddr2): Started s01-1 * vip-238 (ocf:heartbeat:IPaddr2): Started s01-1 * Clone Set: ms-drbd-pool-0 [drbd-pool-0] (promotable): - * Masters: [ s01-0 ] - * Slaves: [ s01-1 ] + * Promoted: [ s01-0 ] + * Unpromoted: [ s01-1 ] * Clone Set: ms-drbd-pool-1 [drbd-pool-1] (promotable): - * Masters: [ s01-1 ] - * Slaves: [ s01-0 ] + * Promoted: [ s01-1 ] + * Unpromoted: [ s01-0 ] * Clone Set: ms-iscsi-pool-0-vips-fw [iscsi-pool-0-vips-fw] (promotable): - * Masters: [ s01-0 ] - * Slaves: [ s01-1 ] + * Promoted: [ s01-0 ] + * Unpromoted: [ s01-1 ] * Clone Set: ms-iscsi-pool-1-vips-fw [iscsi-pool-1-vips-fw] (promotable): - * Masters: [ s01-1 ] - * Slaves: [ s01-0 ] + * Promoted: [ s01-1 ] + * Unpromoted: [ s01-0 ] * Clone Set: cl-o2cb [o2cb] (disabled): * Stopped (disabled): [ s01-0 s01-1 ] * Clone Set: ms-drbd-s01-service [drbd-s01-service] (promotable): - * Masters: [ s01-0 s01-1 ] + * Promoted: [ s01-0 s01-1 ] * Clone Set: cl-s01-service-fs [s01-service-fs]: * Started: [ s01-0 s01-1 ] * Clone Set: cl-ietd [ietd]: * Started: [ s01-0 s01-1 ] * Clone Set: cl-dhcpd [dhcpd] (disabled): * Stopped (disabled): [ s01-0 s01-1 ] * Resource Group: http-server: * vip-233 (ocf:heartbeat:IPaddr2): Started s01-0 * nginx (lsb:nginx): Stopped (disabled) * Clone Set: ms-drbd-s01-logs [drbd-s01-logs] (promotable): - * Masters: [ s01-0 s01-1 ] + * Promoted: [ s01-0 s01-1 ] * Clone Set: cl-s01-logs-fs [s01-logs-fs]: * Started: [ s01-0 s01-1 ] * Resource Group: syslog-server: * vip-234 (ocf:heartbeat:IPaddr2): Started s01-1 * syslog-ng (ocf:heartbeat:syslog-ng): Started s01-1 * Resource Group: tftp-server: * vip-232 (ocf:heartbeat:IPaddr2): Stopped * tftpd (ocf:heartbeat:Xinetd): Stopped * Clone Set: cl-xinetd [xinetd]: * Started: [ s01-0 s01-1 ] * Clone Set: cl-ospf-routing [ospf-routing]: * Started: [ s01-0 s01-1 ] * Clone Set: connected-outer [ping-bmc-and-switch]: * Started: [ s01-0 s01-1 ] * Resource Group: iscsi-vds-dom0-stateless-0-target-all (disabled): * iscsi-vds-dom0-stateless-0-target (ocf:vds-ok:iSCSITarget): Stopped (disabled) * iscsi-vds-dom0-stateless-0-lun-1 (ocf:vds-ok:iSCSILogicalUnit): Stopped (disabled) * Resource Group: iscsi-vds-dom0-stateless-0-vips: * vip-227 (ocf:heartbeat:IPaddr2): Stopped * vip-228 (ocf:heartbeat:IPaddr2): Stopped * Clone Set: ms-drbd-vds-dom0-stateless-0 [drbd-vds-dom0-stateless-0] (promotable): - * Masters: [ s01-0 ] - * Slaves: [ s01-1 ] + * Promoted: [ s01-0 ] + * Unpromoted: [ s01-1 ] * Clone Set: ms-iscsi-vds-dom0-stateless-0-vips-fw [iscsi-vds-dom0-stateless-0-vips-fw] (promotable): - * Slaves: [ s01-0 s01-1 ] + * Unpromoted: [ s01-0 s01-1 ] * Clone Set: cl-dlm [dlm]: * Started: [ s01-0 s01-1 ] * Clone Set: ms-drbd-vds-tftpboot [drbd-vds-tftpboot] (promotable): - * Masters: [ s01-0 s01-1 ] + * Promoted: [ s01-0 s01-1 ] * Clone Set: cl-vds-tftpboot-fs [vds-tftpboot-fs] (disabled): * Stopped (disabled): [ s01-0 s01-1 ] * Clone Set: cl-gfs2 [gfs2]: * Started: [ s01-0 s01-1 ] * Clone Set: ms-drbd-vds-http [drbd-vds-http] (promotable): - * Masters: [ s01-0 s01-1 ] + * Promoted: [ s01-0 s01-1 ] * Clone Set: cl-vds-http-fs [vds-http-fs]: * Started: [ s01-0 s01-1 ] * Clone Set: cl-clvmd [clvmd]: * Started: [ s01-0 s01-1 ] * Clone Set: ms-drbd-s01-vm-data [drbd-s01-vm-data] (promotable): - * Masters: [ s01-0 s01-1 ] + * Promoted: [ s01-0 s01-1 ] * Clone Set: cl-s01-vm-data-metadata-fs [s01-vm-data-metadata-fs]: * Started: [ s01-0 s01-1 ] * Clone Set: cl-vg-s01-vm-data [vg-s01-vm-data]: * Started: [ s01-0 s01-1 ] * mgmt-vm (ocf:vds-ok:VirtualDomain): Started s01-0 * Clone Set: cl-drbdlinks-s01-service [drbdlinks-s01-service]: * Started: [ s01-0 s01-1 ] * Clone Set: cl-libvirtd [libvirtd]: * Started: [ s01-0 s01-1 ] * Clone Set: cl-s01-vm-data-storage-pool [s01-vm-data-storage-pool]: * Started: [ s01-0 s01-1 ] Transition Summary: * Migrate mgmt-vm ( s01-0 -> s01-1 ) Executing Cluster Transition: * Resource action: mgmt-vm migrate_to on s01-0 * Resource action: mgmt-vm migrate_from on s01-1 * Resource action: mgmt-vm stop on s01-0 * Pseudo action: mgmt-vm_start_0 * Resource action: mgmt-vm monitor=10000 on s01-1 Revised Cluster Status: * Node List: * Online: [ s01-0 s01-1 ] * Full List of Resources: * stonith-s01-0 (stonith:external/ipmi): Started s01-1 * stonith-s01-1 (stonith:external/ipmi): Started s01-0 * Resource Group: iscsi-pool-0-target-all: * iscsi-pool-0-target (ocf:vds-ok:iSCSITarget): Started s01-0 * iscsi-pool-0-lun-1 (ocf:vds-ok:iSCSILogicalUnit): Started s01-0 * Resource Group: iscsi-pool-0-vips: * vip-235 (ocf:heartbeat:IPaddr2): Started s01-0 * vip-236 (ocf:heartbeat:IPaddr2): Started s01-0 * Resource Group: iscsi-pool-1-target-all: * iscsi-pool-1-target (ocf:vds-ok:iSCSITarget): Started s01-1 * iscsi-pool-1-lun-1 (ocf:vds-ok:iSCSILogicalUnit): Started s01-1 * Resource Group: iscsi-pool-1-vips: * vip-237 (ocf:heartbeat:IPaddr2): Started s01-1 * vip-238 (ocf:heartbeat:IPaddr2): Started s01-1 * Clone Set: ms-drbd-pool-0 [drbd-pool-0] (promotable): - * Masters: [ s01-0 ] - * Slaves: [ s01-1 ] + * Promoted: [ s01-0 ] + * Unpromoted: [ s01-1 ] * Clone Set: ms-drbd-pool-1 [drbd-pool-1] (promotable): - * Masters: [ s01-1 ] - * Slaves: [ s01-0 ] + * Promoted: [ s01-1 ] + * Unpromoted: [ s01-0 ] * Clone Set: ms-iscsi-pool-0-vips-fw [iscsi-pool-0-vips-fw] (promotable): - * Masters: [ s01-0 ] - * Slaves: [ s01-1 ] + * Promoted: [ s01-0 ] + * Unpromoted: [ s01-1 ] * Clone Set: ms-iscsi-pool-1-vips-fw [iscsi-pool-1-vips-fw] (promotable): - * Masters: [ s01-1 ] - * Slaves: [ s01-0 ] + * Promoted: [ s01-1 ] + * Unpromoted: [ s01-0 ] * Clone Set: cl-o2cb [o2cb] (disabled): * Stopped (disabled): [ s01-0 s01-1 ] * Clone Set: ms-drbd-s01-service [drbd-s01-service] (promotable): - * Masters: [ s01-0 s01-1 ] + * Promoted: [ s01-0 s01-1 ] * Clone Set: cl-s01-service-fs [s01-service-fs]: * Started: [ s01-0 s01-1 ] * Clone Set: cl-ietd [ietd]: * Started: [ s01-0 s01-1 ] * Clone Set: cl-dhcpd [dhcpd] (disabled): * Stopped (disabled): [ s01-0 s01-1 ] * Resource Group: http-server: * vip-233 (ocf:heartbeat:IPaddr2): Started s01-0 * nginx (lsb:nginx): Stopped (disabled) * Clone Set: ms-drbd-s01-logs [drbd-s01-logs] (promotable): - * Masters: [ s01-0 s01-1 ] + * Promoted: [ s01-0 s01-1 ] * Clone Set: cl-s01-logs-fs [s01-logs-fs]: * Started: [ s01-0 s01-1 ] * Resource Group: syslog-server: * vip-234 (ocf:heartbeat:IPaddr2): Started s01-1 * syslog-ng (ocf:heartbeat:syslog-ng): Started s01-1 * Resource Group: tftp-server: * vip-232 (ocf:heartbeat:IPaddr2): Stopped * tftpd (ocf:heartbeat:Xinetd): Stopped * Clone Set: cl-xinetd [xinetd]: * Started: [ s01-0 s01-1 ] * Clone Set: cl-ospf-routing [ospf-routing]: * Started: [ s01-0 s01-1 ] * Clone Set: connected-outer [ping-bmc-and-switch]: * Started: [ s01-0 s01-1 ] * Resource Group: iscsi-vds-dom0-stateless-0-target-all (disabled): * iscsi-vds-dom0-stateless-0-target (ocf:vds-ok:iSCSITarget): Stopped (disabled) * iscsi-vds-dom0-stateless-0-lun-1 (ocf:vds-ok:iSCSILogicalUnit): Stopped (disabled) * Resource Group: iscsi-vds-dom0-stateless-0-vips: * vip-227 (ocf:heartbeat:IPaddr2): Stopped * vip-228 (ocf:heartbeat:IPaddr2): Stopped * Clone Set: ms-drbd-vds-dom0-stateless-0 [drbd-vds-dom0-stateless-0] (promotable): - * Masters: [ s01-0 ] - * Slaves: [ s01-1 ] + * Promoted: [ s01-0 ] + * Unpromoted: [ s01-1 ] * Clone Set: ms-iscsi-vds-dom0-stateless-0-vips-fw [iscsi-vds-dom0-stateless-0-vips-fw] (promotable): - * Slaves: [ s01-0 s01-1 ] + * Unpromoted: [ s01-0 s01-1 ] * Clone Set: cl-dlm [dlm]: * Started: [ s01-0 s01-1 ] * Clone Set: ms-drbd-vds-tftpboot [drbd-vds-tftpboot] (promotable): - * Masters: [ s01-0 s01-1 ] + * Promoted: [ s01-0 s01-1 ] * Clone Set: cl-vds-tftpboot-fs [vds-tftpboot-fs] (disabled): * Stopped (disabled): [ s01-0 s01-1 ] * Clone Set: cl-gfs2 [gfs2]: * Started: [ s01-0 s01-1 ] * Clone Set: ms-drbd-vds-http [drbd-vds-http] (promotable): - * Masters: [ s01-0 s01-1 ] + * Promoted: [ s01-0 s01-1 ] * Clone Set: cl-vds-http-fs [vds-http-fs]: * Started: [ s01-0 s01-1 ] * Clone Set: cl-clvmd [clvmd]: * Started: [ s01-0 s01-1 ] * Clone Set: ms-drbd-s01-vm-data [drbd-s01-vm-data] (promotable): - * Masters: [ s01-0 s01-1 ] + * Promoted: [ s01-0 s01-1 ] * Clone Set: cl-s01-vm-data-metadata-fs [s01-vm-data-metadata-fs]: * Started: [ s01-0 s01-1 ] * Clone Set: cl-vg-s01-vm-data [vg-s01-vm-data]: * Started: [ s01-0 s01-1 ] * mgmt-vm (ocf:vds-ok:VirtualDomain): Started s01-1 * Clone Set: cl-drbdlinks-s01-service [drbdlinks-s01-service]: * Started: [ s01-0 s01-1 ] * Clone Set: cl-libvirtd [libvirtd]: * Started: [ s01-0 s01-1 ] * Clone Set: cl-s01-vm-data-storage-pool [s01-vm-data-storage-pool]: * Started: [ s01-0 s01-1 ] diff --git a/cts/scheduler/summary/coloc-slave-anti.summary b/cts/scheduler/summary/coloc-slave-anti.summary index 00658aacf0..a8518d3719 100644 --- a/cts/scheduler/summary/coloc-slave-anti.summary +++ b/cts/scheduler/summary/coloc-slave-anti.summary @@ -1,48 +1,48 @@ Current cluster status: * Node List: * Online: [ pollux sirius ] * Full List of Resources: * Clone Set: pingd-clone [pingd-1]: * Started: [ pollux sirius ] * Clone Set: drbd-msr [drbd-r0] (promotable): - * Masters: [ pollux ] - * Slaves: [ sirius ] + * Promoted: [ pollux ] + * Unpromoted: [ sirius ] * Resource Group: group-1: * fs-1 (ocf:heartbeat:Filesystem): Stopped * ip-198 (ocf:heartbeat:IPaddr2): Stopped * apache (ocf:custom:apache2): Stopped * pollux-fencing (stonith:external/ipmi-soft): Started sirius * sirius-fencing (stonith:external/ipmi-soft): Started pollux Transition Summary: * Start fs-1 ( pollux ) * Start ip-198 ( pollux ) * Start apache ( pollux ) Executing Cluster Transition: * Pseudo action: group-1_start_0 * Resource action: fs-1 start on pollux * Resource action: ip-198 start on pollux * Resource action: apache start on pollux * Pseudo action: group-1_running_0 * Resource action: fs-1 monitor=20000 on pollux * Resource action: ip-198 monitor=30000 on pollux * Resource action: apache monitor=60000 on pollux Revised Cluster Status: * Node List: * Online: [ pollux sirius ] * Full List of Resources: * Clone Set: pingd-clone [pingd-1]: * Started: [ pollux sirius ] * Clone Set: drbd-msr [drbd-r0] (promotable): - * Masters: [ pollux ] - * Slaves: [ sirius ] + * Promoted: [ pollux ] + * Unpromoted: [ sirius ] * Resource Group: group-1: * fs-1 (ocf:heartbeat:Filesystem): Started pollux * ip-198 (ocf:heartbeat:IPaddr2): Started pollux * apache (ocf:custom:apache2): Started pollux * pollux-fencing (stonith:external/ipmi-soft): Started sirius * sirius-fencing (stonith:external/ipmi-soft): Started pollux diff --git a/cts/scheduler/summary/colocation-influence.summary b/cts/scheduler/summary/colocation-influence.summary index fbc02f5070..3ea8b3f545 100644 --- a/cts/scheduler/summary/colocation-influence.summary +++ b/cts/scheduler/summary/colocation-influence.summary @@ -1,170 +1,170 @@ Current cluster status: * Node List: * Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] * GuestOnline: [ bundle10-0@rhel7-2 bundle10-1@rhel7-3 bundle11-0@rhel7-1 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started rhel7-1 * rsc1a (ocf:pacemaker:Dummy): Started rhel7-2 * rsc1b (ocf:pacemaker:Dummy): Started rhel7-2 * rsc2a (ocf:pacemaker:Dummy): Started rhel7-4 * rsc2b (ocf:pacemaker:Dummy): Started rhel7-4 * rsc3a (ocf:pacemaker:Dummy): Stopped * rsc3b (ocf:pacemaker:Dummy): Stopped * rsc4a (ocf:pacemaker:Dummy): Started rhel7-3 * rsc4b (ocf:pacemaker:Dummy): Started rhel7-3 * rsc5a (ocf:pacemaker:Dummy): Started rhel7-1 * Resource Group: group5a: * rsc5a1 (ocf:pacemaker:Dummy): Started rhel7-1 * rsc5a2 (ocf:pacemaker:Dummy): Started rhel7-1 * Resource Group: group6a: * rsc6a1 (ocf:pacemaker:Dummy): Started rhel7-2 * rsc6a2 (ocf:pacemaker:Dummy): Started rhel7-2 * rsc6a (ocf:pacemaker:Dummy): Started rhel7-2 * Resource Group: group7a: * rsc7a1 (ocf:pacemaker:Dummy): Started rhel7-3 * rsc7a2 (ocf:pacemaker:Dummy): Started rhel7-3 * Clone Set: rsc8a-clone [rsc8a]: * Started: [ rhel7-1 rhel7-3 rhel7-4 ] * Clone Set: rsc8b-clone [rsc8b]: * Started: [ rhel7-1 rhel7-3 rhel7-4 ] * rsc9a (ocf:pacemaker:Dummy): Started rhel7-4 * rsc9b (ocf:pacemaker:Dummy): Started rhel7-4 * rsc9c (ocf:pacemaker:Dummy): Started rhel7-4 * rsc10a (ocf:pacemaker:Dummy): Started rhel7-2 * rsc11a (ocf:pacemaker:Dummy): Started rhel7-1 * rsc12a (ocf:pacemaker:Dummy): Started rhel7-1 * rsc12b (ocf:pacemaker:Dummy): Started rhel7-1 * rsc12c (ocf:pacemaker:Dummy): Started rhel7-1 * Container bundle set: bundle10 [pcmktest:http]: * bundle10-0 (192.168.122.131) (ocf:heartbeat:apache): Started rhel7-2 * bundle10-1 (192.168.122.132) (ocf:heartbeat:apache): Started rhel7-3 * Container bundle set: bundle11 [pcmktest:http]: * bundle11-0 (192.168.122.134) (ocf:pacemaker:Dummy): Started rhel7-1 * bundle11-1 (192.168.122.135) (ocf:pacemaker:Dummy): Stopped * rsc13a (ocf:pacemaker:Dummy): Started rhel7-3 * Clone Set: rsc13b-clone [rsc13b] (promotable): - * Masters: [ rhel7-3 ] - * Slaves: [ rhel7-1 rhel7-2 rhel7-4 ] + * Promoted: [ rhel7-3 ] + * Unpromoted: [ rhel7-1 rhel7-2 rhel7-4 ] * Stopped: [ rhel7-5 ] * rsc14b (ocf:pacemaker:Dummy): Started rhel7-4 * Clone Set: rsc14a-clone [rsc14a] (promotable): - * Masters: [ rhel7-4 ] - * Slaves: [ rhel7-1 rhel7-2 rhel7-3 ] + * Promoted: [ rhel7-4 ] + * Unpromoted: [ rhel7-1 rhel7-2 rhel7-3 ] * Stopped: [ rhel7-5 ] Transition Summary: * Move rsc1a ( rhel7-2 -> rhel7-3 ) * Move rsc1b ( rhel7-2 -> rhel7-3 ) * Stop rsc2a ( rhel7-4 ) due to node availability * Start rsc3a ( rhel7-2 ) * Start rsc3b ( rhel7-2 ) * Stop rsc4a ( rhel7-3 ) due to node availability * Stop rsc5a ( rhel7-1 ) due to node availability * Stop rsc6a1 ( rhel7-2 ) due to node availability * Stop rsc6a2 ( rhel7-2 ) due to node availability * Stop rsc7a2 ( rhel7-3 ) due to node availability * Stop rsc8a:1 ( rhel7-4 ) due to node availability * Stop rsc9c ( rhel7-4 ) due to node availability * Move rsc10a ( rhel7-2 -> rhel7-3 ) * Stop rsc12b ( rhel7-1 ) due to node availability * Start bundle11-1 ( rhel7-5 ) due to unrunnable bundle11-docker-1 start (blocked) * Start bundle11a:1 ( bundle11-1 ) due to unrunnable bundle11-docker-1 start (blocked) * Stop rsc13a ( rhel7-3 ) due to node availability - * Stop rsc14a:1 ( Master rhel7-4 ) due to node availability + * Stop rsc14a:1 ( Promoted rhel7-4 ) due to node availability Executing Cluster Transition: * Resource action: rsc1a stop on rhel7-2 * Resource action: rsc1b stop on rhel7-2 * Resource action: rsc2a stop on rhel7-4 * Resource action: rsc3a start on rhel7-2 * Resource action: rsc3b start on rhel7-2 * Resource action: rsc4a stop on rhel7-3 * Resource action: rsc5a stop on rhel7-1 * Pseudo action: group6a_stop_0 * Resource action: rsc6a2 stop on rhel7-2 * Pseudo action: group7a_stop_0 * Resource action: rsc7a2 stop on rhel7-3 * Pseudo action: rsc8a-clone_stop_0 * Resource action: rsc9c stop on rhel7-4 * Resource action: rsc10a stop on rhel7-2 * Resource action: rsc12b stop on rhel7-1 * Resource action: rsc13a stop on rhel7-3 * Pseudo action: rsc14a-clone_demote_0 * Pseudo action: bundle11_start_0 * Resource action: rsc1a start on rhel7-3 * Resource action: rsc1b start on rhel7-3 * Resource action: rsc3a monitor=10000 on rhel7-2 * Resource action: rsc3b monitor=10000 on rhel7-2 * Resource action: rsc6a1 stop on rhel7-2 * Pseudo action: group7a_stopped_0 * Resource action: rsc8a stop on rhel7-4 * Pseudo action: rsc8a-clone_stopped_0 * Resource action: rsc10a start on rhel7-3 * Pseudo action: bundle11-clone_start_0 * Resource action: rsc14a demote on rhel7-4 * Pseudo action: rsc14a-clone_demoted_0 * Pseudo action: rsc14a-clone_stop_0 * Resource action: rsc1a monitor=10000 on rhel7-3 * Resource action: rsc1b monitor=10000 on rhel7-3 * Pseudo action: group6a_stopped_0 * Resource action: rsc10a monitor=10000 on rhel7-3 * Pseudo action: bundle11-clone_running_0 * Resource action: rsc14a stop on rhel7-4 * Pseudo action: rsc14a-clone_stopped_0 * Pseudo action: bundle11_running_0 Revised Cluster Status: * Node List: * Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] * GuestOnline: [ bundle10-0@rhel7-2 bundle10-1@rhel7-3 bundle11-0@rhel7-1 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started rhel7-1 * rsc1a (ocf:pacemaker:Dummy): Started rhel7-3 * rsc1b (ocf:pacemaker:Dummy): Started rhel7-3 * rsc2a (ocf:pacemaker:Dummy): Stopped * rsc2b (ocf:pacemaker:Dummy): Started rhel7-4 * rsc3a (ocf:pacemaker:Dummy): Started rhel7-2 * rsc3b (ocf:pacemaker:Dummy): Started rhel7-2 * rsc4a (ocf:pacemaker:Dummy): Stopped * rsc4b (ocf:pacemaker:Dummy): Started rhel7-3 * rsc5a (ocf:pacemaker:Dummy): Stopped * Resource Group: group5a: * rsc5a1 (ocf:pacemaker:Dummy): Started rhel7-1 * rsc5a2 (ocf:pacemaker:Dummy): Started rhel7-1 * Resource Group: group6a: * rsc6a1 (ocf:pacemaker:Dummy): Stopped * rsc6a2 (ocf:pacemaker:Dummy): Stopped * rsc6a (ocf:pacemaker:Dummy): Started rhel7-2 * Resource Group: group7a: * rsc7a1 (ocf:pacemaker:Dummy): Started rhel7-3 * rsc7a2 (ocf:pacemaker:Dummy): Stopped * Clone Set: rsc8a-clone [rsc8a]: * Started: [ rhel7-1 rhel7-3 ] * Stopped: [ rhel7-2 rhel7-4 rhel7-5 ] * Clone Set: rsc8b-clone [rsc8b]: * Started: [ rhel7-1 rhel7-3 rhel7-4 ] * rsc9a (ocf:pacemaker:Dummy): Started rhel7-4 * rsc9b (ocf:pacemaker:Dummy): Started rhel7-4 * rsc9c (ocf:pacemaker:Dummy): Stopped * rsc10a (ocf:pacemaker:Dummy): Started rhel7-3 * rsc11a (ocf:pacemaker:Dummy): Started rhel7-1 * rsc12a (ocf:pacemaker:Dummy): Started rhel7-1 * rsc12b (ocf:pacemaker:Dummy): Stopped * rsc12c (ocf:pacemaker:Dummy): Started rhel7-1 * Container bundle set: bundle10 [pcmktest:http]: * bundle10-0 (192.168.122.131) (ocf:heartbeat:apache): Started rhel7-2 * bundle10-1 (192.168.122.132) (ocf:heartbeat:apache): Started rhel7-3 * Container bundle set: bundle11 [pcmktest:http]: * bundle11-0 (192.168.122.134) (ocf:pacemaker:Dummy): Started rhel7-1 * bundle11-1 (192.168.122.135) (ocf:pacemaker:Dummy): Stopped * rsc13a (ocf:pacemaker:Dummy): Stopped * Clone Set: rsc13b-clone [rsc13b] (promotable): - * Masters: [ rhel7-3 ] - * Slaves: [ rhel7-1 rhel7-2 rhel7-4 ] + * Promoted: [ rhel7-3 ] + * Unpromoted: [ rhel7-1 rhel7-2 rhel7-4 ] * Stopped: [ rhel7-5 ] * rsc14b (ocf:pacemaker:Dummy): Started rhel7-4 * Clone Set: rsc14a-clone [rsc14a] (promotable): - * Slaves: [ rhel7-1 rhel7-2 rhel7-3 ] + * Unpromoted: [ rhel7-1 rhel7-2 rhel7-3 ] * Stopped: [ rhel7-4 rhel7-5 ] diff --git a/cts/scheduler/summary/colocation_constraint_stops_master.summary b/cts/scheduler/summary/colocation_constraint_stops_master.summary index 279763ec78..f33575ba12 100644 --- a/cts/scheduler/summary/colocation_constraint_stops_master.summary +++ b/cts/scheduler/summary/colocation_constraint_stops_master.summary @@ -1,38 +1,38 @@ Current cluster status: * Node List: * Online: [ fc16-builder fc16-builder2 ] * Full List of Resources: * Clone Set: MASTER_RSC_A [NATIVE_RSC_A] (promotable): - * Masters: [ fc16-builder ] + * Promoted: [ fc16-builder ] Transition Summary: - * Stop NATIVE_RSC_A:0 ( Master fc16-builder ) due to node availability + * Stop NATIVE_RSC_A:0 ( Promoted fc16-builder ) due to node availability Executing Cluster Transition: * Pseudo action: MASTER_RSC_A_pre_notify_demote_0 * Resource action: NATIVE_RSC_A:0 notify on fc16-builder * Pseudo action: MASTER_RSC_A_confirmed-pre_notify_demote_0 * Pseudo action: MASTER_RSC_A_demote_0 * Resource action: NATIVE_RSC_A:0 demote on fc16-builder * Pseudo action: MASTER_RSC_A_demoted_0 * Pseudo action: MASTER_RSC_A_post_notify_demoted_0 * Resource action: NATIVE_RSC_A:0 notify on fc16-builder * Pseudo action: MASTER_RSC_A_confirmed-post_notify_demoted_0 * Pseudo action: MASTER_RSC_A_pre_notify_stop_0 * Resource action: NATIVE_RSC_A:0 notify on fc16-builder * Pseudo action: MASTER_RSC_A_confirmed-pre_notify_stop_0 * Pseudo action: MASTER_RSC_A_stop_0 * Resource action: NATIVE_RSC_A:0 stop on fc16-builder * Resource action: NATIVE_RSC_A:0 delete on fc16-builder2 * Pseudo action: MASTER_RSC_A_stopped_0 * Pseudo action: MASTER_RSC_A_post_notify_stopped_0 * Pseudo action: MASTER_RSC_A_confirmed-post_notify_stopped_0 Revised Cluster Status: * Node List: * Online: [ fc16-builder fc16-builder2 ] * Full List of Resources: * Clone Set: MASTER_RSC_A [NATIVE_RSC_A] (promotable): * Stopped: [ fc16-builder fc16-builder2 ] diff --git a/cts/scheduler/summary/colocation_constraint_stops_slave.summary b/cts/scheduler/summary/colocation_constraint_stops_slave.summary index 33c0e86f14..4b16656a2c 100644 --- a/cts/scheduler/summary/colocation_constraint_stops_slave.summary +++ b/cts/scheduler/summary/colocation_constraint_stops_slave.summary @@ -1,36 +1,36 @@ 1 of 2 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: * Node List: * Online: [ fc16-builder ] * OFFLINE: [ fc16-builder2 ] * Full List of Resources: * Clone Set: MASTER_RSC_A [NATIVE_RSC_A] (promotable): - * Slaves: [ fc16-builder ] + * Unpromoted: [ fc16-builder ] * NATIVE_RSC_B (ocf:pacemaker:Dummy): Started fc16-builder (disabled) Transition Summary: - * Stop NATIVE_RSC_A:0 ( Slave fc16-builder ) due to node availability + * Stop NATIVE_RSC_A:0 ( Unpromoted fc16-builder ) due to node availability * Stop NATIVE_RSC_B ( fc16-builder ) due to node availability Executing Cluster Transition: * Pseudo action: MASTER_RSC_A_pre_notify_stop_0 * Resource action: NATIVE_RSC_B stop on fc16-builder * Resource action: NATIVE_RSC_A:0 notify on fc16-builder * Pseudo action: MASTER_RSC_A_confirmed-pre_notify_stop_0 * Pseudo action: MASTER_RSC_A_stop_0 * Resource action: NATIVE_RSC_A:0 stop on fc16-builder * Pseudo action: MASTER_RSC_A_stopped_0 * Pseudo action: MASTER_RSC_A_post_notify_stopped_0 * Pseudo action: MASTER_RSC_A_confirmed-post_notify_stopped_0 Revised Cluster Status: * Node List: * Online: [ fc16-builder ] * OFFLINE: [ fc16-builder2 ] * Full List of Resources: * Clone Set: MASTER_RSC_A [NATIVE_RSC_A] (promotable): * Stopped: [ fc16-builder fc16-builder2 ] * NATIVE_RSC_B (ocf:pacemaker:Dummy): Stopped (disabled) diff --git a/cts/scheduler/summary/complex_enforce_colo.summary b/cts/scheduler/summary/complex_enforce_colo.summary index e5739789b9..195ad856ee 100644 --- a/cts/scheduler/summary/complex_enforce_colo.summary +++ b/cts/scheduler/summary/complex_enforce_colo.summary @@ -1,455 +1,455 @@ 3 of 132 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: * Node List: * Online: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Full List of Resources: * node1-fence (stonith:fence_xvm): Started rhos6-node1 * node2-fence (stonith:fence_xvm): Started rhos6-node2 * node3-fence (stonith:fence_xvm): Started rhos6-node3 * Clone Set: lb-haproxy-clone [lb-haproxy]: * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * vip-db (ocf:heartbeat:IPaddr2): Started rhos6-node1 * vip-rabbitmq (ocf:heartbeat:IPaddr2): Started rhos6-node2 * vip-qpid (ocf:heartbeat:IPaddr2): Started rhos6-node3 * vip-keystone (ocf:heartbeat:IPaddr2): Started rhos6-node1 * vip-glance (ocf:heartbeat:IPaddr2): Started rhos6-node2 * vip-cinder (ocf:heartbeat:IPaddr2): Started rhos6-node3 * vip-swift (ocf:heartbeat:IPaddr2): Started rhos6-node1 * vip-neutron (ocf:heartbeat:IPaddr2): Started rhos6-node2 * vip-nova (ocf:heartbeat:IPaddr2): Started rhos6-node3 * vip-horizon (ocf:heartbeat:IPaddr2): Started rhos6-node1 * vip-heat (ocf:heartbeat:IPaddr2): Started rhos6-node2 * vip-ceilometer (ocf:heartbeat:IPaddr2): Started rhos6-node3 * Clone Set: galera-master [galera] (promotable): - * Masters: [ rhos6-node1 rhos6-node2 rhos6-node3 ] + * Promoted: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: rabbitmq-server-clone [rabbitmq-server]: * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: memcached-clone [memcached]: * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: mongodb-clone [mongodb]: * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: keystone-clone [keystone]: * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: glance-fs-clone [glance-fs]: * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: glance-registry-clone [glance-registry]: * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: glance-api-clone [glance-api]: * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * cinder-api (systemd:openstack-cinder-api): Started rhos6-node1 * cinder-scheduler (systemd:openstack-cinder-scheduler): Started rhos6-node1 * cinder-volume (systemd:openstack-cinder-volume): Started rhos6-node1 * Clone Set: swift-fs-clone [swift-fs]: * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: swift-account-clone [swift-account]: * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: swift-container-clone [swift-container]: * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: swift-object-clone [swift-object]: * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: swift-proxy-clone [swift-proxy]: * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * swift-object-expirer (systemd:openstack-swift-object-expirer): Started rhos6-node2 * Clone Set: neutron-server-clone [neutron-server]: * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: neutron-scale-clone [neutron-scale] (unique): * neutron-scale:0 (ocf:neutron:NeutronScale): Started rhos6-node3 * neutron-scale:1 (ocf:neutron:NeutronScale): Started rhos6-node2 * neutron-scale:2 (ocf:neutron:NeutronScale): Started rhos6-node1 * Clone Set: neutron-ovs-cleanup-clone [neutron-ovs-cleanup]: * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: neutron-netns-cleanup-clone [neutron-netns-cleanup]: * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: neutron-openvswitch-agent-clone [neutron-openvswitch-agent]: * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: neutron-dhcp-agent-clone [neutron-dhcp-agent]: * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: neutron-l3-agent-clone [neutron-l3-agent]: * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: neutron-metadata-agent-clone [neutron-metadata-agent]: * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: nova-consoleauth-clone [nova-consoleauth]: * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: nova-novncproxy-clone [nova-novncproxy]: * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: nova-api-clone [nova-api]: * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: nova-scheduler-clone [nova-scheduler]: * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: nova-conductor-clone [nova-conductor]: * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * ceilometer-central (systemd:openstack-ceilometer-central): Started rhos6-node3 * Clone Set: ceilometer-collector-clone [ceilometer-collector]: * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: ceilometer-api-clone [ceilometer-api]: * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: ceilometer-delay-clone [ceilometer-delay]: * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: ceilometer-alarm-evaluator-clone [ceilometer-alarm-evaluator]: * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: ceilometer-alarm-notifier-clone [ceilometer-alarm-notifier]: * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: ceilometer-notification-clone [ceilometer-notification]: * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: heat-api-clone [heat-api]: * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: heat-api-cfn-clone [heat-api-cfn]: * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: heat-api-cloudwatch-clone [heat-api-cloudwatch]: * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * heat-engine (systemd:openstack-heat-engine): Started rhos6-node2 * Clone Set: horizon-clone [horizon]: * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] Transition Summary: * Stop keystone:0 ( rhos6-node1 ) due to node availability * Stop keystone:1 ( rhos6-node2 ) due to node availability * Stop keystone:2 ( rhos6-node3 ) due to node availability * Stop glance-registry:0 ( rhos6-node1 ) * Stop glance-registry:1 ( rhos6-node2 ) * Stop glance-registry:2 ( rhos6-node3 ) * Stop glance-api:0 ( rhos6-node1 ) * Stop glance-api:1 ( rhos6-node2 ) * Stop glance-api:2 ( rhos6-node3 ) * Stop cinder-api ( rhos6-node1 ) due to unrunnable keystone-clone running * Stop cinder-scheduler ( rhos6-node1 ) due to required cinder-api start * Stop cinder-volume ( rhos6-node1 ) due to colocation with cinder-scheduler * Stop swift-account:0 ( rhos6-node1 ) * Stop swift-account:1 ( rhos6-node2 ) * Stop swift-account:2 ( rhos6-node3 ) * Stop swift-container:0 ( rhos6-node1 ) * Stop swift-container:1 ( rhos6-node2 ) * Stop swift-container:2 ( rhos6-node3 ) * Stop swift-object:0 ( rhos6-node1 ) * Stop swift-object:1 ( rhos6-node2 ) * Stop swift-object:2 ( rhos6-node3 ) * Stop swift-proxy:0 ( rhos6-node1 ) * Stop swift-proxy:1 ( rhos6-node2 ) * Stop swift-proxy:2 ( rhos6-node3 ) * Stop swift-object-expirer ( rhos6-node2 ) due to required swift-proxy-clone running * Stop neutron-server:0 ( rhos6-node1 ) * Stop neutron-server:1 ( rhos6-node2 ) * Stop neutron-server:2 ( rhos6-node3 ) * Stop neutron-scale:0 ( rhos6-node3 ) * Stop neutron-scale:1 ( rhos6-node2 ) * Stop neutron-scale:2 ( rhos6-node1 ) * Stop neutron-ovs-cleanup:0 ( rhos6-node1 ) * Stop neutron-ovs-cleanup:1 ( rhos6-node2 ) * Stop neutron-ovs-cleanup:2 ( rhos6-node3 ) * Stop neutron-netns-cleanup:0 ( rhos6-node1 ) * Stop neutron-netns-cleanup:1 ( rhos6-node2 ) * Stop neutron-netns-cleanup:2 ( rhos6-node3 ) * Stop neutron-openvswitch-agent:0 ( rhos6-node1 ) * Stop neutron-openvswitch-agent:1 ( rhos6-node2 ) * Stop neutron-openvswitch-agent:2 ( rhos6-node3 ) * Stop neutron-dhcp-agent:0 ( rhos6-node1 ) * Stop neutron-dhcp-agent:1 ( rhos6-node2 ) * Stop neutron-dhcp-agent:2 ( rhos6-node3 ) * Stop neutron-l3-agent:0 ( rhos6-node1 ) * Stop neutron-l3-agent:1 ( rhos6-node2 ) * Stop neutron-l3-agent:2 ( rhos6-node3 ) * Stop neutron-metadata-agent:0 ( rhos6-node1 ) * Stop neutron-metadata-agent:1 ( rhos6-node2 ) * Stop neutron-metadata-agent:2 ( rhos6-node3 ) * Stop nova-consoleauth:0 ( rhos6-node1 ) * Stop nova-consoleauth:1 ( rhos6-node2 ) * Stop nova-consoleauth:2 ( rhos6-node3 ) * Stop nova-novncproxy:0 ( rhos6-node1 ) * Stop nova-novncproxy:1 ( rhos6-node2 ) * Stop nova-novncproxy:2 ( rhos6-node3 ) * Stop nova-api:0 ( rhos6-node1 ) * Stop nova-api:1 ( rhos6-node2 ) * Stop nova-api:2 ( rhos6-node3 ) * Stop nova-scheduler:0 ( rhos6-node1 ) * Stop nova-scheduler:1 ( rhos6-node2 ) * Stop nova-scheduler:2 ( rhos6-node3 ) * Stop nova-conductor:0 ( rhos6-node1 ) * Stop nova-conductor:1 ( rhos6-node2 ) * Stop nova-conductor:2 ( rhos6-node3 ) * Stop ceilometer-central ( rhos6-node3 ) due to unrunnable keystone-clone running * Stop ceilometer-collector:0 ( rhos6-node1 ) due to required ceilometer-central start * Stop ceilometer-collector:1 ( rhos6-node2 ) due to required ceilometer-central start * Stop ceilometer-collector:2 ( rhos6-node3 ) due to required ceilometer-central start * Stop ceilometer-api:0 ( rhos6-node1 ) due to required ceilometer-collector:0 start * Stop ceilometer-api:1 ( rhos6-node2 ) due to required ceilometer-collector:1 start * Stop ceilometer-api:2 ( rhos6-node3 ) due to required ceilometer-collector:2 start * Stop ceilometer-delay:0 ( rhos6-node1 ) due to required ceilometer-api:0 start * Stop ceilometer-delay:1 ( rhos6-node2 ) due to required ceilometer-api:1 start * Stop ceilometer-delay:2 ( rhos6-node3 ) due to required ceilometer-api:2 start * Stop ceilometer-alarm-evaluator:0 ( rhos6-node1 ) due to required ceilometer-delay:0 start * Stop ceilometer-alarm-evaluator:1 ( rhos6-node2 ) due to required ceilometer-delay:1 start * Stop ceilometer-alarm-evaluator:2 ( rhos6-node3 ) due to required ceilometer-delay:2 start * Stop ceilometer-alarm-notifier:0 ( rhos6-node1 ) due to required ceilometer-alarm-evaluator:0 start * Stop ceilometer-alarm-notifier:1 ( rhos6-node2 ) due to required ceilometer-alarm-evaluator:1 start * Stop ceilometer-alarm-notifier:2 ( rhos6-node3 ) due to required ceilometer-alarm-evaluator:2 start * Stop ceilometer-notification:0 ( rhos6-node1 ) due to required ceilometer-alarm-notifier:0 start * Stop ceilometer-notification:1 ( rhos6-node2 ) due to required ceilometer-alarm-notifier:1 start * Stop ceilometer-notification:2 ( rhos6-node3 ) due to required ceilometer-alarm-notifier:2 start * Stop heat-api:0 ( rhos6-node1 ) due to required ceilometer-notification:0 start * Stop heat-api:1 ( rhos6-node2 ) due to required ceilometer-notification:1 start * Stop heat-api:2 ( rhos6-node3 ) due to required ceilometer-notification:2 start * Stop heat-api-cfn:0 ( rhos6-node1 ) due to required heat-api:0 start * Stop heat-api-cfn:1 ( rhos6-node2 ) due to required heat-api:1 start * Stop heat-api-cfn:2 ( rhos6-node3 ) due to required heat-api:2 start * Stop heat-api-cloudwatch:0 ( rhos6-node1 ) due to required heat-api-cfn:0 start * Stop heat-api-cloudwatch:1 ( rhos6-node2 ) due to required heat-api-cfn:1 start * Stop heat-api-cloudwatch:2 ( rhos6-node3 ) due to required heat-api-cfn:2 start * Stop heat-engine ( rhos6-node2 ) due to colocation with heat-api-cloudwatch-clone Executing Cluster Transition: * Pseudo action: glance-api-clone_stop_0 * Resource action: cinder-volume stop on rhos6-node1 * Pseudo action: swift-object-clone_stop_0 * Resource action: swift-object-expirer stop on rhos6-node2 * Pseudo action: neutron-metadata-agent-clone_stop_0 * Pseudo action: nova-conductor-clone_stop_0 * Resource action: heat-engine stop on rhos6-node2 * Resource action: glance-api stop on rhos6-node1 * Resource action: glance-api stop on rhos6-node2 * Resource action: glance-api stop on rhos6-node3 * Pseudo action: glance-api-clone_stopped_0 * Resource action: cinder-scheduler stop on rhos6-node1 * Resource action: swift-object stop on rhos6-node1 * Resource action: swift-object stop on rhos6-node2 * Resource action: swift-object stop on rhos6-node3 * Pseudo action: swift-object-clone_stopped_0 * Pseudo action: swift-proxy-clone_stop_0 * Resource action: neutron-metadata-agent stop on rhos6-node1 * Resource action: neutron-metadata-agent stop on rhos6-node2 * Resource action: neutron-metadata-agent stop on rhos6-node3 * Pseudo action: neutron-metadata-agent-clone_stopped_0 * Resource action: nova-conductor stop on rhos6-node1 * Resource action: nova-conductor stop on rhos6-node2 * Resource action: nova-conductor stop on rhos6-node3 * Pseudo action: nova-conductor-clone_stopped_0 * Pseudo action: heat-api-cloudwatch-clone_stop_0 * Pseudo action: glance-registry-clone_stop_0 * Resource action: cinder-api stop on rhos6-node1 * Pseudo action: swift-container-clone_stop_0 * Resource action: swift-proxy stop on rhos6-node1 * Resource action: swift-proxy stop on rhos6-node2 * Resource action: swift-proxy stop on rhos6-node3 * Pseudo action: swift-proxy-clone_stopped_0 * Pseudo action: neutron-l3-agent-clone_stop_0 * Pseudo action: nova-scheduler-clone_stop_0 * Resource action: heat-api-cloudwatch stop on rhos6-node1 * Resource action: heat-api-cloudwatch stop on rhos6-node2 * Resource action: heat-api-cloudwatch stop on rhos6-node3 * Pseudo action: heat-api-cloudwatch-clone_stopped_0 * Resource action: glance-registry stop on rhos6-node1 * Resource action: glance-registry stop on rhos6-node2 * Resource action: glance-registry stop on rhos6-node3 * Pseudo action: glance-registry-clone_stopped_0 * Resource action: swift-container stop on rhos6-node1 * Resource action: swift-container stop on rhos6-node2 * Resource action: swift-container stop on rhos6-node3 * Pseudo action: swift-container-clone_stopped_0 * Resource action: neutron-l3-agent stop on rhos6-node1 * Resource action: neutron-l3-agent stop on rhos6-node2 * Resource action: neutron-l3-agent stop on rhos6-node3 * Pseudo action: neutron-l3-agent-clone_stopped_0 * Resource action: nova-scheduler stop on rhos6-node1 * Resource action: nova-scheduler stop on rhos6-node2 * Resource action: nova-scheduler stop on rhos6-node3 * Pseudo action: nova-scheduler-clone_stopped_0 * Pseudo action: heat-api-cfn-clone_stop_0 * Pseudo action: swift-account-clone_stop_0 * Pseudo action: neutron-dhcp-agent-clone_stop_0 * Pseudo action: nova-api-clone_stop_0 * Resource action: heat-api-cfn stop on rhos6-node1 * Resource action: heat-api-cfn stop on rhos6-node2 * Resource action: heat-api-cfn stop on rhos6-node3 * Pseudo action: heat-api-cfn-clone_stopped_0 * Resource action: swift-account stop on rhos6-node1 * Resource action: swift-account stop on rhos6-node2 * Resource action: swift-account stop on rhos6-node3 * Pseudo action: swift-account-clone_stopped_0 * Resource action: neutron-dhcp-agent stop on rhos6-node1 * Resource action: neutron-dhcp-agent stop on rhos6-node2 * Resource action: neutron-dhcp-agent stop on rhos6-node3 * Pseudo action: neutron-dhcp-agent-clone_stopped_0 * Resource action: nova-api stop on rhos6-node1 * Resource action: nova-api stop on rhos6-node2 * Resource action: nova-api stop on rhos6-node3 * Pseudo action: nova-api-clone_stopped_0 * Pseudo action: heat-api-clone_stop_0 * Pseudo action: neutron-openvswitch-agent-clone_stop_0 * Pseudo action: nova-novncproxy-clone_stop_0 * Resource action: heat-api stop on rhos6-node1 * Resource action: heat-api stop on rhos6-node2 * Resource action: heat-api stop on rhos6-node3 * Pseudo action: heat-api-clone_stopped_0 * Resource action: neutron-openvswitch-agent stop on rhos6-node1 * Resource action: neutron-openvswitch-agent stop on rhos6-node2 * Resource action: neutron-openvswitch-agent stop on rhos6-node3 * Pseudo action: neutron-openvswitch-agent-clone_stopped_0 * Resource action: nova-novncproxy stop on rhos6-node1 * Resource action: nova-novncproxy stop on rhos6-node2 * Resource action: nova-novncproxy stop on rhos6-node3 * Pseudo action: nova-novncproxy-clone_stopped_0 * Pseudo action: ceilometer-notification-clone_stop_0 * Pseudo action: neutron-netns-cleanup-clone_stop_0 * Pseudo action: nova-consoleauth-clone_stop_0 * Resource action: ceilometer-notification stop on rhos6-node1 * Resource action: ceilometer-notification stop on rhos6-node2 * Resource action: ceilometer-notification stop on rhos6-node3 * Pseudo action: ceilometer-notification-clone_stopped_0 * Resource action: neutron-netns-cleanup stop on rhos6-node1 * Resource action: neutron-netns-cleanup stop on rhos6-node2 * Resource action: neutron-netns-cleanup stop on rhos6-node3 * Pseudo action: neutron-netns-cleanup-clone_stopped_0 * Resource action: nova-consoleauth stop on rhos6-node1 * Resource action: nova-consoleauth stop on rhos6-node2 * Resource action: nova-consoleauth stop on rhos6-node3 * Pseudo action: nova-consoleauth-clone_stopped_0 * Pseudo action: ceilometer-alarm-notifier-clone_stop_0 * Pseudo action: neutron-ovs-cleanup-clone_stop_0 * Resource action: ceilometer-alarm-notifier stop on rhos6-node1 * Resource action: ceilometer-alarm-notifier stop on rhos6-node2 * Resource action: ceilometer-alarm-notifier stop on rhos6-node3 * Pseudo action: ceilometer-alarm-notifier-clone_stopped_0 * Resource action: neutron-ovs-cleanup stop on rhos6-node1 * Resource action: neutron-ovs-cleanup stop on rhos6-node2 * Resource action: neutron-ovs-cleanup stop on rhos6-node3 * Pseudo action: neutron-ovs-cleanup-clone_stopped_0 * Pseudo action: ceilometer-alarm-evaluator-clone_stop_0 * Pseudo action: neutron-scale-clone_stop_0 * Resource action: ceilometer-alarm-evaluator stop on rhos6-node1 * Resource action: ceilometer-alarm-evaluator stop on rhos6-node2 * Resource action: ceilometer-alarm-evaluator stop on rhos6-node3 * Pseudo action: ceilometer-alarm-evaluator-clone_stopped_0 * Resource action: neutron-scale:0 stop on rhos6-node3 * Resource action: neutron-scale:1 stop on rhos6-node2 * Resource action: neutron-scale:2 stop on rhos6-node1 * Pseudo action: neutron-scale-clone_stopped_0 * Pseudo action: ceilometer-delay-clone_stop_0 * Pseudo action: neutron-server-clone_stop_0 * Resource action: ceilometer-delay stop on rhos6-node1 * Resource action: ceilometer-delay stop on rhos6-node2 * Resource action: ceilometer-delay stop on rhos6-node3 * Pseudo action: ceilometer-delay-clone_stopped_0 * Resource action: neutron-server stop on rhos6-node1 * Resource action: neutron-server stop on rhos6-node2 * Resource action: neutron-server stop on rhos6-node3 * Pseudo action: neutron-server-clone_stopped_0 * Pseudo action: ceilometer-api-clone_stop_0 * Resource action: ceilometer-api stop on rhos6-node1 * Resource action: ceilometer-api stop on rhos6-node2 * Resource action: ceilometer-api stop on rhos6-node3 * Pseudo action: ceilometer-api-clone_stopped_0 * Pseudo action: ceilometer-collector-clone_stop_0 * Resource action: ceilometer-collector stop on rhos6-node1 * Resource action: ceilometer-collector stop on rhos6-node2 * Resource action: ceilometer-collector stop on rhos6-node3 * Pseudo action: ceilometer-collector-clone_stopped_0 * Resource action: ceilometer-central stop on rhos6-node3 * Pseudo action: keystone-clone_stop_0 * Resource action: keystone stop on rhos6-node1 * Resource action: keystone stop on rhos6-node2 * Resource action: keystone stop on rhos6-node3 * Pseudo action: keystone-clone_stopped_0 Revised Cluster Status: * Node List: * Online: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Full List of Resources: * node1-fence (stonith:fence_xvm): Started rhos6-node1 * node2-fence (stonith:fence_xvm): Started rhos6-node2 * node3-fence (stonith:fence_xvm): Started rhos6-node3 * Clone Set: lb-haproxy-clone [lb-haproxy]: * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * vip-db (ocf:heartbeat:IPaddr2): Started rhos6-node1 * vip-rabbitmq (ocf:heartbeat:IPaddr2): Started rhos6-node2 * vip-qpid (ocf:heartbeat:IPaddr2): Started rhos6-node3 * vip-keystone (ocf:heartbeat:IPaddr2): Started rhos6-node1 * vip-glance (ocf:heartbeat:IPaddr2): Started rhos6-node2 * vip-cinder (ocf:heartbeat:IPaddr2): Started rhos6-node3 * vip-swift (ocf:heartbeat:IPaddr2): Started rhos6-node1 * vip-neutron (ocf:heartbeat:IPaddr2): Started rhos6-node2 * vip-nova (ocf:heartbeat:IPaddr2): Started rhos6-node3 * vip-horizon (ocf:heartbeat:IPaddr2): Started rhos6-node1 * vip-heat (ocf:heartbeat:IPaddr2): Started rhos6-node2 * vip-ceilometer (ocf:heartbeat:IPaddr2): Started rhos6-node3 * Clone Set: galera-master [galera] (promotable): - * Masters: [ rhos6-node1 rhos6-node2 rhos6-node3 ] + * Promoted: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: rabbitmq-server-clone [rabbitmq-server]: * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: memcached-clone [memcached]: * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: mongodb-clone [mongodb]: * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: keystone-clone [keystone]: * Stopped (disabled): [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: glance-fs-clone [glance-fs]: * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: glance-registry-clone [glance-registry]: * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: glance-api-clone [glance-api]: * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * cinder-api (systemd:openstack-cinder-api): Stopped * cinder-scheduler (systemd:openstack-cinder-scheduler): Stopped * cinder-volume (systemd:openstack-cinder-volume): Stopped * Clone Set: swift-fs-clone [swift-fs]: * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: swift-account-clone [swift-account]: * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: swift-container-clone [swift-container]: * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: swift-object-clone [swift-object]: * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: swift-proxy-clone [swift-proxy]: * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * swift-object-expirer (systemd:openstack-swift-object-expirer): Stopped * Clone Set: neutron-server-clone [neutron-server]: * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: neutron-scale-clone [neutron-scale] (unique): * neutron-scale:0 (ocf:neutron:NeutronScale): Stopped * neutron-scale:1 (ocf:neutron:NeutronScale): Stopped * neutron-scale:2 (ocf:neutron:NeutronScale): Stopped * Clone Set: neutron-ovs-cleanup-clone [neutron-ovs-cleanup]: * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: neutron-netns-cleanup-clone [neutron-netns-cleanup]: * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: neutron-openvswitch-agent-clone [neutron-openvswitch-agent]: * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: neutron-dhcp-agent-clone [neutron-dhcp-agent]: * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: neutron-l3-agent-clone [neutron-l3-agent]: * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: neutron-metadata-agent-clone [neutron-metadata-agent]: * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: nova-consoleauth-clone [nova-consoleauth]: * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: nova-novncproxy-clone [nova-novncproxy]: * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: nova-api-clone [nova-api]: * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: nova-scheduler-clone [nova-scheduler]: * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: nova-conductor-clone [nova-conductor]: * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * ceilometer-central (systemd:openstack-ceilometer-central): Stopped * Clone Set: ceilometer-collector-clone [ceilometer-collector]: * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: ceilometer-api-clone [ceilometer-api]: * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: ceilometer-delay-clone [ceilometer-delay]: * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: ceilometer-alarm-evaluator-clone [ceilometer-alarm-evaluator]: * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: ceilometer-alarm-notifier-clone [ceilometer-alarm-notifier]: * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: ceilometer-notification-clone [ceilometer-notification]: * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: heat-api-clone [heat-api]: * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: heat-api-cfn-clone [heat-api-cfn]: * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * Clone Set: heat-api-cloudwatch-clone [heat-api-cloudwatch]: * Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ] * heat-engine (systemd:openstack-heat-engine): Stopped * Clone Set: horizon-clone [horizon]: * Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ] diff --git a/cts/scheduler/summary/dc-fence-ordering.summary b/cts/scheduler/summary/dc-fence-ordering.summary index f1da1386d2..ac46031f07 100644 --- a/cts/scheduler/summary/dc-fence-ordering.summary +++ b/cts/scheduler/summary/dc-fence-ordering.summary @@ -1,82 +1,82 @@ Using the original execution date of: 2018-11-28 18:37:16Z Current cluster status: * Node List: * Node rhel7-1: UNCLEAN (online) * Online: [ rhel7-2 rhel7-4 rhel7-5 ] * OFFLINE: [ rhel7-3 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Stopped * FencingPass (stonith:fence_dummy): Stopped * FencingFail (stonith:fence_dummy): Stopped * rsc_rhel7-1 (ocf:heartbeat:IPaddr2): Stopped * rsc_rhel7-2 (ocf:heartbeat:IPaddr2): Stopped * rsc_rhel7-3 (ocf:heartbeat:IPaddr2): Stopped * rsc_rhel7-4 (ocf:heartbeat:IPaddr2): Stopped * rsc_rhel7-5 (ocf:heartbeat:IPaddr2): Stopped * migrator (ocf:pacemaker:Dummy): Stopped * Clone Set: Connectivity [ping-1]: * Stopped: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] * Clone Set: promotable-1 [stateful-1] (promotable): - * Masters: [ rhel7-1 ] - * Slaves: [ rhel7-2 rhel7-4 rhel7-5 ] + * Promoted: [ rhel7-1 ] + * Unpromoted: [ rhel7-2 rhel7-4 rhel7-5 ] * Stopped: [ rhel7-3 ] * Resource Group: group-1: * r192.168.122.207 (ocf:heartbeat:IPaddr2): Started rhel7-1 * petulant (service:pacemaker-cts-dummyd@10): FAILED rhel7-1 * r192.168.122.208 (ocf:heartbeat:IPaddr2): Stopped * lsb-dummy (lsb:LSBDummy): Stopped Transition Summary: * Fence (reboot) rhel7-1 'petulant failed there' - * Stop stateful-1:0 ( Slave rhel7-5 ) due to node availability - * Stop stateful-1:1 ( Master rhel7-1 ) due to node availability - * Stop stateful-1:2 ( Slave rhel7-2 ) due to node availability - * Stop stateful-1:3 ( Slave rhel7-4 ) due to node availability + * Stop stateful-1:0 ( Unpromoted rhel7-5 ) due to node availability + * Stop stateful-1:1 ( Promoted rhel7-1 ) due to node availability + * Stop stateful-1:2 ( Unpromoted rhel7-2 ) due to node availability + * Stop stateful-1:3 ( Unpromoted rhel7-4 ) due to node availability * Stop r192.168.122.207 ( rhel7-1 ) due to node availability * Stop petulant ( rhel7-1 ) due to node availability Executing Cluster Transition: * Fencing rhel7-1 (reboot) * Pseudo action: group-1_stop_0 * Pseudo action: petulant_stop_0 * Pseudo action: r192.168.122.207_stop_0 * Pseudo action: group-1_stopped_0 * Pseudo action: promotable-1_demote_0 * Pseudo action: stateful-1_demote_0 * Pseudo action: promotable-1_demoted_0 * Pseudo action: promotable-1_stop_0 * Resource action: stateful-1 stop on rhel7-5 * Pseudo action: stateful-1_stop_0 * Resource action: stateful-1 stop on rhel7-2 * Resource action: stateful-1 stop on rhel7-4 * Pseudo action: promotable-1_stopped_0 * Cluster action: do_shutdown on rhel7-5 * Cluster action: do_shutdown on rhel7-4 * Cluster action: do_shutdown on rhel7-2 Using the original execution date of: 2018-11-28 18:37:16Z Revised Cluster Status: * Node List: * Online: [ rhel7-2 rhel7-4 rhel7-5 ] * OFFLINE: [ rhel7-1 rhel7-3 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Stopped * FencingPass (stonith:fence_dummy): Stopped * FencingFail (stonith:fence_dummy): Stopped * rsc_rhel7-1 (ocf:heartbeat:IPaddr2): Stopped * rsc_rhel7-2 (ocf:heartbeat:IPaddr2): Stopped * rsc_rhel7-3 (ocf:heartbeat:IPaddr2): Stopped * rsc_rhel7-4 (ocf:heartbeat:IPaddr2): Stopped * rsc_rhel7-5 (ocf:heartbeat:IPaddr2): Stopped * migrator (ocf:pacemaker:Dummy): Stopped * Clone Set: Connectivity [ping-1]: * Stopped: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] * Clone Set: promotable-1 [stateful-1] (promotable): * Stopped: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] * Resource Group: group-1: * r192.168.122.207 (ocf:heartbeat:IPaddr2): Stopped * petulant (service:pacemaker-cts-dummyd@10): Stopped * r192.168.122.208 (ocf:heartbeat:IPaddr2): Stopped * lsb-dummy (lsb:LSBDummy): Stopped diff --git a/cts/scheduler/summary/failed-demote-recovery-master.summary b/cts/scheduler/summary/failed-demote-recovery-master.summary index 356aa38b4b..2d11c46050 100644 --- a/cts/scheduler/summary/failed-demote-recovery-master.summary +++ b/cts/scheduler/summary/failed-demote-recovery-master.summary @@ -1,60 +1,60 @@ Using the original execution date of: 2017-11-30 12:37:50Z Current cluster status: * Node List: * Online: [ fastvm-rhel-7-4-95 fastvm-rhel-7-4-96 ] * Full List of Resources: * fence-fastvm-rhel-7-4-95 (stonith:fence_xvm): Started fastvm-rhel-7-4-96 * fence-fastvm-rhel-7-4-96 (stonith:fence_xvm): Started fastvm-rhel-7-4-95 * Clone Set: DB2_HADR-master [DB2_HADR] (promotable): * DB2_HADR (ocf:heartbeat:db2): FAILED fastvm-rhel-7-4-96 - * Slaves: [ fastvm-rhel-7-4-95 ] + * Unpromoted: [ fastvm-rhel-7-4-95 ] Transition Summary: - * Recover DB2_HADR:1 ( Slave -> Master fastvm-rhel-7-4-96 ) + * Recover DB2_HADR:1 ( Unpromoted -> Promoted fastvm-rhel-7-4-96 ) Executing Cluster Transition: * Pseudo action: DB2_HADR-master_pre_notify_stop_0 * Resource action: DB2_HADR notify on fastvm-rhel-7-4-95 * Resource action: DB2_HADR notify on fastvm-rhel-7-4-96 * Pseudo action: DB2_HADR-master_confirmed-pre_notify_stop_0 * Pseudo action: DB2_HADR-master_stop_0 * Resource action: DB2_HADR stop on fastvm-rhel-7-4-96 * Pseudo action: DB2_HADR-master_stopped_0 * Pseudo action: DB2_HADR-master_post_notify_stopped_0 * Resource action: DB2_HADR notify on fastvm-rhel-7-4-95 * Pseudo action: DB2_HADR-master_confirmed-post_notify_stopped_0 * Pseudo action: DB2_HADR-master_pre_notify_start_0 * Resource action: DB2_HADR notify on fastvm-rhel-7-4-95 * Pseudo action: DB2_HADR-master_confirmed-pre_notify_start_0 * Pseudo action: DB2_HADR-master_start_0 * Resource action: DB2_HADR start on fastvm-rhel-7-4-96 * Pseudo action: DB2_HADR-master_running_0 * Pseudo action: DB2_HADR-master_post_notify_running_0 * Resource action: DB2_HADR notify on fastvm-rhel-7-4-95 * Resource action: DB2_HADR notify on fastvm-rhel-7-4-96 * Pseudo action: DB2_HADR-master_confirmed-post_notify_running_0 * Pseudo action: DB2_HADR-master_pre_notify_promote_0 * Resource action: DB2_HADR notify on fastvm-rhel-7-4-95 * Resource action: DB2_HADR notify on fastvm-rhel-7-4-96 * Pseudo action: DB2_HADR-master_confirmed-pre_notify_promote_0 * Pseudo action: DB2_HADR-master_promote_0 * Resource action: DB2_HADR promote on fastvm-rhel-7-4-96 * Pseudo action: DB2_HADR-master_promoted_0 * Pseudo action: DB2_HADR-master_post_notify_promoted_0 * Resource action: DB2_HADR notify on fastvm-rhel-7-4-95 * Resource action: DB2_HADR notify on fastvm-rhel-7-4-96 * Pseudo action: DB2_HADR-master_confirmed-post_notify_promoted_0 * Resource action: DB2_HADR monitor=22000 on fastvm-rhel-7-4-96 Using the original execution date of: 2017-11-30 12:37:50Z Revised Cluster Status: * Node List: * Online: [ fastvm-rhel-7-4-95 fastvm-rhel-7-4-96 ] * Full List of Resources: * fence-fastvm-rhel-7-4-95 (stonith:fence_xvm): Started fastvm-rhel-7-4-96 * fence-fastvm-rhel-7-4-96 (stonith:fence_xvm): Started fastvm-rhel-7-4-95 * Clone Set: DB2_HADR-master [DB2_HADR] (promotable): - * Masters: [ fastvm-rhel-7-4-96 ] - * Slaves: [ fastvm-rhel-7-4-95 ] + * Promoted: [ fastvm-rhel-7-4-96 ] + * Unpromoted: [ fastvm-rhel-7-4-95 ] diff --git a/cts/scheduler/summary/failed-demote-recovery.summary b/cts/scheduler/summary/failed-demote-recovery.summary index 898b1c86c6..8c91259cd9 100644 --- a/cts/scheduler/summary/failed-demote-recovery.summary +++ b/cts/scheduler/summary/failed-demote-recovery.summary @@ -1,48 +1,48 @@ Using the original execution date of: 2017-11-30 12:37:50Z Current cluster status: * Node List: * Online: [ fastvm-rhel-7-4-95 fastvm-rhel-7-4-96 ] * Full List of Resources: * fence-fastvm-rhel-7-4-95 (stonith:fence_xvm): Started fastvm-rhel-7-4-96 * fence-fastvm-rhel-7-4-96 (stonith:fence_xvm): Started fastvm-rhel-7-4-95 * Clone Set: DB2_HADR-master [DB2_HADR] (promotable): * DB2_HADR (ocf:heartbeat:db2): FAILED fastvm-rhel-7-4-96 - * Slaves: [ fastvm-rhel-7-4-95 ] + * Unpromoted: [ fastvm-rhel-7-4-95 ] Transition Summary: - * Recover DB2_HADR:1 ( Slave fastvm-rhel-7-4-96 ) + * Recover DB2_HADR:1 ( Unpromoted fastvm-rhel-7-4-96 ) Executing Cluster Transition: * Pseudo action: DB2_HADR-master_pre_notify_stop_0 * Resource action: DB2_HADR notify on fastvm-rhel-7-4-95 * Resource action: DB2_HADR notify on fastvm-rhel-7-4-96 * Pseudo action: DB2_HADR-master_confirmed-pre_notify_stop_0 * Pseudo action: DB2_HADR-master_stop_0 * Resource action: DB2_HADR stop on fastvm-rhel-7-4-96 * Pseudo action: DB2_HADR-master_stopped_0 * Pseudo action: DB2_HADR-master_post_notify_stopped_0 * Resource action: DB2_HADR notify on fastvm-rhel-7-4-95 * Pseudo action: DB2_HADR-master_confirmed-post_notify_stopped_0 * Pseudo action: DB2_HADR-master_pre_notify_start_0 * Resource action: DB2_HADR notify on fastvm-rhel-7-4-95 * Pseudo action: DB2_HADR-master_confirmed-pre_notify_start_0 * Pseudo action: DB2_HADR-master_start_0 * Resource action: DB2_HADR start on fastvm-rhel-7-4-96 * Pseudo action: DB2_HADR-master_running_0 * Pseudo action: DB2_HADR-master_post_notify_running_0 * Resource action: DB2_HADR notify on fastvm-rhel-7-4-95 * Resource action: DB2_HADR notify on fastvm-rhel-7-4-96 * Pseudo action: DB2_HADR-master_confirmed-post_notify_running_0 * Resource action: DB2_HADR monitor=5000 on fastvm-rhel-7-4-96 Using the original execution date of: 2017-11-30 12:37:50Z Revised Cluster Status: * Node List: * Online: [ fastvm-rhel-7-4-95 fastvm-rhel-7-4-96 ] * Full List of Resources: * fence-fastvm-rhel-7-4-95 (stonith:fence_xvm): Started fastvm-rhel-7-4-96 * fence-fastvm-rhel-7-4-96 (stonith:fence_xvm): Started fastvm-rhel-7-4-95 * Clone Set: DB2_HADR-master [DB2_HADR] (promotable): - * Slaves: [ fastvm-rhel-7-4-95 fastvm-rhel-7-4-96 ] + * Unpromoted: [ fastvm-rhel-7-4-95 fastvm-rhel-7-4-96 ] diff --git a/cts/scheduler/summary/group-dependents.summary b/cts/scheduler/summary/group-dependents.summary index 0a9cd8a7ba..ae880fbb4c 100644 --- a/cts/scheduler/summary/group-dependents.summary +++ b/cts/scheduler/summary/group-dependents.summary @@ -1,196 +1,196 @@ Current cluster status: * Node List: * Online: [ asttest1 asttest2 ] * Full List of Resources: * Resource Group: voip: * mysqld (lsb:mysql): Started asttest1 * dahdi (lsb:dahdi): Started asttest1 * fonulator (lsb:fonulator): Stopped * asterisk (lsb:asterisk-11.0.1): Stopped * iax2_mon (lsb:iax2_mon): Stopped * httpd (lsb:apache2): Stopped * tftp (lsb:tftp-srce): Stopped * Resource Group: ip_voip_routes: * ip_voip_route_test1 (ocf:heartbeat:Route): Started asttest1 * ip_voip_route_test2 (ocf:heartbeat:Route): Started asttest1 * Resource Group: ip_voip_addresses_p: * ip_voip_vlan850 (ocf:heartbeat:IPaddr2): Started asttest1 * ip_voip_vlan998 (ocf:heartbeat:IPaddr2): Started asttest1 * ip_voip_vlan851 (ocf:heartbeat:IPaddr2): Started asttest1 * ip_voip_vlan852 (ocf:heartbeat:IPaddr2): Started asttest1 * ip_voip_vlan853 (ocf:heartbeat:IPaddr2): Started asttest1 * ip_voip_vlan854 (ocf:heartbeat:IPaddr2): Started asttest1 * ip_voip_vlan855 (ocf:heartbeat:IPaddr2): Started asttest1 * ip_voip_vlan856 (ocf:heartbeat:IPaddr2): Started asttest1 * Clone Set: cl_route [ip_voip_route_default]: * Started: [ asttest1 asttest2 ] * fs_drbd (ocf:heartbeat:Filesystem): Started asttest1 * Clone Set: ms_drbd [drbd] (promotable): - * Masters: [ asttest1 ] - * Slaves: [ asttest2 ] + * Promoted: [ asttest1 ] + * Unpromoted: [ asttest2 ] Transition Summary: * Migrate mysqld ( asttest1 -> asttest2 ) * Migrate dahdi ( asttest1 -> asttest2 ) * Start fonulator ( asttest2 ) * Start asterisk ( asttest2 ) * Start iax2_mon ( asttest2 ) * Start httpd ( asttest2 ) * Start tftp ( asttest2 ) * Migrate ip_voip_route_test1 ( asttest1 -> asttest2 ) * Migrate ip_voip_route_test2 ( asttest1 -> asttest2 ) * Migrate ip_voip_vlan850 ( asttest1 -> asttest2 ) * Migrate ip_voip_vlan998 ( asttest1 -> asttest2 ) * Migrate ip_voip_vlan851 ( asttest1 -> asttest2 ) * Migrate ip_voip_vlan852 ( asttest1 -> asttest2 ) * Migrate ip_voip_vlan853 ( asttest1 -> asttest2 ) * Migrate ip_voip_vlan854 ( asttest1 -> asttest2 ) * Migrate ip_voip_vlan855 ( asttest1 -> asttest2 ) * Migrate ip_voip_vlan856 ( asttest1 -> asttest2 ) * Move fs_drbd ( asttest1 -> asttest2 ) - * Demote drbd:0 ( Master -> Slave asttest1 ) - * Promote drbd:1 ( Slave -> Master asttest2 ) + * Demote drbd:0 ( Promoted -> Unpromoted asttest1 ) + * Promote drbd:1 ( Unpromoted -> Promoted asttest2 ) Executing Cluster Transition: * Pseudo action: voip_stop_0 * Resource action: mysqld migrate_to on asttest1 * Resource action: ip_voip_route_test1 migrate_to on asttest1 * Resource action: ip_voip_route_test2 migrate_to on asttest1 * Resource action: ip_voip_vlan850 migrate_to on asttest1 * Resource action: ip_voip_vlan998 migrate_to on asttest1 * Resource action: ip_voip_vlan851 migrate_to on asttest1 * Resource action: ip_voip_vlan852 migrate_to on asttest1 * Resource action: ip_voip_vlan853 migrate_to on asttest1 * Resource action: ip_voip_vlan854 migrate_to on asttest1 * Resource action: ip_voip_vlan855 migrate_to on asttest1 * Resource action: ip_voip_vlan856 migrate_to on asttest1 * Resource action: drbd:1 cancel=31000 on asttest2 * Pseudo action: ms_drbd_pre_notify_demote_0 * Resource action: mysqld migrate_from on asttest2 * Resource action: dahdi migrate_to on asttest1 * Resource action: ip_voip_route_test1 migrate_from on asttest2 * Resource action: ip_voip_route_test2 migrate_from on asttest2 * Resource action: ip_voip_vlan850 migrate_from on asttest2 * Resource action: ip_voip_vlan998 migrate_from on asttest2 * Resource action: ip_voip_vlan851 migrate_from on asttest2 * Resource action: ip_voip_vlan852 migrate_from on asttest2 * Resource action: ip_voip_vlan853 migrate_from on asttest2 * Resource action: ip_voip_vlan854 migrate_from on asttest2 * Resource action: ip_voip_vlan855 migrate_from on asttest2 * Resource action: ip_voip_vlan856 migrate_from on asttest2 * Resource action: drbd:0 notify on asttest1 * Resource action: drbd:1 notify on asttest2 * Pseudo action: ms_drbd_confirmed-pre_notify_demote_0 * Resource action: dahdi migrate_from on asttest2 * Resource action: dahdi stop on asttest1 * Resource action: mysqld stop on asttest1 * Pseudo action: voip_stopped_0 * Pseudo action: ip_voip_routes_stop_0 * Resource action: ip_voip_route_test1 stop on asttest1 * Resource action: ip_voip_route_test2 stop on asttest1 * Pseudo action: ip_voip_routes_stopped_0 * Pseudo action: ip_voip_addresses_p_stop_0 * Resource action: ip_voip_vlan850 stop on asttest1 * Resource action: ip_voip_vlan998 stop on asttest1 * Resource action: ip_voip_vlan851 stop on asttest1 * Resource action: ip_voip_vlan852 stop on asttest1 * Resource action: ip_voip_vlan853 stop on asttest1 * Resource action: ip_voip_vlan854 stop on asttest1 * Resource action: ip_voip_vlan855 stop on asttest1 * Resource action: ip_voip_vlan856 stop on asttest1 * Pseudo action: ip_voip_addresses_p_stopped_0 * Resource action: fs_drbd stop on asttest1 * Pseudo action: ms_drbd_demote_0 * Resource action: drbd:0 demote on asttest1 * Pseudo action: ms_drbd_demoted_0 * Pseudo action: ms_drbd_post_notify_demoted_0 * Resource action: drbd:0 notify on asttest1 * Resource action: drbd:1 notify on asttest2 * Pseudo action: ms_drbd_confirmed-post_notify_demoted_0 * Pseudo action: ms_drbd_pre_notify_promote_0 * Resource action: drbd:0 notify on asttest1 * Resource action: drbd:1 notify on asttest2 * Pseudo action: ms_drbd_confirmed-pre_notify_promote_0 * Pseudo action: ms_drbd_promote_0 * Resource action: drbd:1 promote on asttest2 * Pseudo action: ms_drbd_promoted_0 * Pseudo action: ms_drbd_post_notify_promoted_0 * Resource action: drbd:0 notify on asttest1 * Resource action: drbd:1 notify on asttest2 * Pseudo action: ms_drbd_confirmed-post_notify_promoted_0 * Resource action: fs_drbd start on asttest2 * Resource action: drbd:0 monitor=31000 on asttest1 * Pseudo action: ip_voip_addresses_p_start_0 * Pseudo action: ip_voip_vlan850_start_0 * Pseudo action: ip_voip_vlan998_start_0 * Pseudo action: ip_voip_vlan851_start_0 * Pseudo action: ip_voip_vlan852_start_0 * Pseudo action: ip_voip_vlan853_start_0 * Pseudo action: ip_voip_vlan854_start_0 * Pseudo action: ip_voip_vlan855_start_0 * Pseudo action: ip_voip_vlan856_start_0 * Resource action: fs_drbd monitor=1000 on asttest2 * Pseudo action: ip_voip_addresses_p_running_0 * Resource action: ip_voip_vlan850 monitor=1000 on asttest2 * Resource action: ip_voip_vlan998 monitor=1000 on asttest2 * Resource action: ip_voip_vlan851 monitor=1000 on asttest2 * Resource action: ip_voip_vlan852 monitor=1000 on asttest2 * Resource action: ip_voip_vlan853 monitor=1000 on asttest2 * Resource action: ip_voip_vlan854 monitor=1000 on asttest2 * Resource action: ip_voip_vlan855 monitor=1000 on asttest2 * Resource action: ip_voip_vlan856 monitor=1000 on asttest2 * Pseudo action: ip_voip_routes_start_0 * Pseudo action: ip_voip_route_test1_start_0 * Pseudo action: ip_voip_route_test2_start_0 * Pseudo action: ip_voip_routes_running_0 * Resource action: ip_voip_route_test1 monitor=1000 on asttest2 * Resource action: ip_voip_route_test2 monitor=1000 on asttest2 * Pseudo action: voip_start_0 * Pseudo action: mysqld_start_0 * Pseudo action: dahdi_start_0 * Resource action: fonulator start on asttest2 * Resource action: asterisk start on asttest2 * Resource action: iax2_mon start on asttest2 * Resource action: httpd start on asttest2 * Resource action: tftp start on asttest2 * Pseudo action: voip_running_0 * Resource action: mysqld monitor=1000 on asttest2 * Resource action: dahdi monitor=1000 on asttest2 * Resource action: fonulator monitor=1000 on asttest2 * Resource action: asterisk monitor=1000 on asttest2 * Resource action: iax2_mon monitor=60000 on asttest2 * Resource action: httpd monitor=1000 on asttest2 * Resource action: tftp monitor=60000 on asttest2 Revised Cluster Status: * Node List: * Online: [ asttest1 asttest2 ] * Full List of Resources: * Resource Group: voip: * mysqld (lsb:mysql): Started asttest2 * dahdi (lsb:dahdi): Started asttest2 * fonulator (lsb:fonulator): Started asttest2 * asterisk (lsb:asterisk-11.0.1): Started asttest2 * iax2_mon (lsb:iax2_mon): Started asttest2 * httpd (lsb:apache2): Started asttest2 * tftp (lsb:tftp-srce): Started asttest2 * Resource Group: ip_voip_routes: * ip_voip_route_test1 (ocf:heartbeat:Route): Started asttest2 * ip_voip_route_test2 (ocf:heartbeat:Route): Started asttest2 * Resource Group: ip_voip_addresses_p: * ip_voip_vlan850 (ocf:heartbeat:IPaddr2): Started asttest2 * ip_voip_vlan998 (ocf:heartbeat:IPaddr2): Started asttest2 * ip_voip_vlan851 (ocf:heartbeat:IPaddr2): Started asttest2 * ip_voip_vlan852 (ocf:heartbeat:IPaddr2): Started asttest2 * ip_voip_vlan853 (ocf:heartbeat:IPaddr2): Started asttest2 * ip_voip_vlan854 (ocf:heartbeat:IPaddr2): Started asttest2 * ip_voip_vlan855 (ocf:heartbeat:IPaddr2): Started asttest2 * ip_voip_vlan856 (ocf:heartbeat:IPaddr2): Started asttest2 * Clone Set: cl_route [ip_voip_route_default]: * Started: [ asttest1 asttest2 ] * fs_drbd (ocf:heartbeat:Filesystem): Started asttest2 * Clone Set: ms_drbd [drbd] (promotable): - * Masters: [ asttest2 ] - * Slaves: [ asttest1 ] + * Promoted: [ asttest2 ] + * Unpromoted: [ asttest1 ] diff --git a/cts/scheduler/summary/guest-host-not-fenceable.summary b/cts/scheduler/summary/guest-host-not-fenceable.summary index dc2d6885f7..69b456a22f 100644 --- a/cts/scheduler/summary/guest-host-not-fenceable.summary +++ b/cts/scheduler/summary/guest-host-not-fenceable.summary @@ -1,91 +1,91 @@ Using the original execution date of: 2019-08-26 04:52:42Z Current cluster status: * Node List: * Node node2: UNCLEAN (offline) * Node node3: UNCLEAN (offline) * Online: [ node1 ] * GuestOnline: [ galera-bundle-0@node1 rabbitmq-bundle-0@node1 ] * Full List of Resources: * Container bundle set: rabbitmq-bundle [192.168.122.139:8787/rhosp13/openstack-rabbitmq:pcmklatest]: * rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started node1 * rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): FAILED node2 (UNCLEAN) * rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): FAILED node3 (UNCLEAN) * Container bundle set: galera-bundle [192.168.122.139:8787/rhosp13/openstack-mariadb:pcmklatest]: - * galera-bundle-0 (ocf:heartbeat:galera): FAILED Master node1 - * galera-bundle-1 (ocf:heartbeat:galera): FAILED Master node2 (UNCLEAN) - * galera-bundle-2 (ocf:heartbeat:galera): FAILED Master node3 (UNCLEAN) + * galera-bundle-0 (ocf:heartbeat:galera): FAILED Promoted node1 + * galera-bundle-1 (ocf:heartbeat:galera): FAILED Promoted node2 (UNCLEAN) + * galera-bundle-2 (ocf:heartbeat:galera): FAILED Promoted node3 (UNCLEAN) * stonith-fence_ipmilan-node1 (stonith:fence_ipmilan): Started node2 (UNCLEAN) * stonith-fence_ipmilan-node3 (stonith:fence_ipmilan): Started node2 (UNCLEAN) * stonith-fence_ipmilan-node2 (stonith:fence_ipmilan): Started node3 (UNCLEAN) Transition Summary: * Stop rabbitmq-bundle-docker-0 ( node1 ) due to no quorum * Stop rabbitmq-bundle-0 ( node1 ) due to no quorum * Stop rabbitmq:0 ( rabbitmq-bundle-0 ) due to no quorum * Stop rabbitmq-bundle-docker-1 ( node2 ) due to node availability (blocked) * Stop rabbitmq-bundle-1 ( node2 ) due to no quorum (blocked) * Stop rabbitmq:1 ( rabbitmq-bundle-1 ) due to no quorum (blocked) * Stop rabbitmq-bundle-docker-2 ( node3 ) due to node availability (blocked) * Stop rabbitmq-bundle-2 ( node3 ) due to no quorum (blocked) * Stop rabbitmq:2 ( rabbitmq-bundle-2 ) due to no quorum (blocked) * Stop galera-bundle-docker-0 ( node1 ) due to no quorum * Stop galera-bundle-0 ( node1 ) due to no quorum - * Stop galera:0 ( Master galera-bundle-0 ) due to no quorum + * Stop galera:0 ( Promoted galera-bundle-0 ) due to no quorum * Stop galera-bundle-docker-1 ( node2 ) due to node availability (blocked) * Stop galera-bundle-1 ( node2 ) due to no quorum (blocked) - * Stop galera:1 ( Master galera-bundle-1 ) due to no quorum (blocked) + * Stop galera:1 ( Promoted galera-bundle-1 ) due to no quorum (blocked) * Stop galera-bundle-docker-2 ( node3 ) due to node availability (blocked) * Stop galera-bundle-2 ( node3 ) due to no quorum (blocked) - * Stop galera:2 ( Master galera-bundle-2 ) due to no quorum (blocked) + * Stop galera:2 ( Promoted galera-bundle-2 ) due to no quorum (blocked) * Stop stonith-fence_ipmilan-node1 ( node2 ) due to node availability (blocked) * Stop stonith-fence_ipmilan-node3 ( node2 ) due to no quorum (blocked) * Stop stonith-fence_ipmilan-node2 ( node3 ) due to no quorum (blocked) Executing Cluster Transition: * Pseudo action: rabbitmq-bundle-clone_pre_notify_stop_0 * Pseudo action: galera-bundle_demote_0 * Pseudo action: rabbitmq-bundle_stop_0 * Resource action: rabbitmq notify on rabbitmq-bundle-0 * Pseudo action: rabbitmq-bundle-clone_confirmed-pre_notify_stop_0 * Pseudo action: rabbitmq-bundle-clone_stop_0 * Pseudo action: galera-bundle-master_demote_0 * Resource action: rabbitmq stop on rabbitmq-bundle-0 * Pseudo action: rabbitmq-bundle-clone_stopped_0 * Resource action: rabbitmq-bundle-0 stop on node1 * Resource action: rabbitmq-bundle-0 cancel=60000 on node1 * Resource action: galera demote on galera-bundle-0 * Pseudo action: galera-bundle-master_demoted_0 * Pseudo action: galera-bundle_demoted_0 * Pseudo action: galera-bundle_stop_0 * Pseudo action: rabbitmq-bundle-clone_post_notify_stopped_0 * Resource action: rabbitmq-bundle-docker-0 stop on node1 * Pseudo action: galera-bundle-master_stop_0 * Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_stopped_0 * Resource action: galera stop on galera-bundle-0 * Pseudo action: galera-bundle-master_stopped_0 * Resource action: galera-bundle-0 stop on node1 * Resource action: galera-bundle-0 cancel=60000 on node1 * Pseudo action: rabbitmq-bundle_stopped_0 * Resource action: galera-bundle-docker-0 stop on node1 * Pseudo action: galera-bundle_stopped_0 Using the original execution date of: 2019-08-26 04:52:42Z Revised Cluster Status: * Node List: * Node node2: UNCLEAN (offline) * Node node3: UNCLEAN (offline) * Online: [ node1 ] * Full List of Resources: * Container bundle set: rabbitmq-bundle [192.168.122.139:8787/rhosp13/openstack-rabbitmq:pcmklatest]: * rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Stopped * rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): FAILED node2 (UNCLEAN) * rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): FAILED node3 (UNCLEAN) * Container bundle set: galera-bundle [192.168.122.139:8787/rhosp13/openstack-mariadb:pcmklatest]: * galera-bundle-0 (ocf:heartbeat:galera): Stopped - * galera-bundle-1 (ocf:heartbeat:galera): FAILED Master node2 (UNCLEAN) - * galera-bundle-2 (ocf:heartbeat:galera): FAILED Master node3 (UNCLEAN) + * galera-bundle-1 (ocf:heartbeat:galera): FAILED Promoted node2 (UNCLEAN) + * galera-bundle-2 (ocf:heartbeat:galera): FAILED Promoted node3 (UNCLEAN) * stonith-fence_ipmilan-node1 (stonith:fence_ipmilan): Started node2 (UNCLEAN) * stonith-fence_ipmilan-node3 (stonith:fence_ipmilan): Started node2 (UNCLEAN) * stonith-fence_ipmilan-node2 (stonith:fence_ipmilan): Started node3 (UNCLEAN) diff --git a/cts/scheduler/summary/guest-node-cleanup.summary b/cts/scheduler/summary/guest-node-cleanup.summary index 02c083f6a0..4a7ac74a18 100644 --- a/cts/scheduler/summary/guest-node-cleanup.summary +++ b/cts/scheduler/summary/guest-node-cleanup.summary @@ -1,55 +1,55 @@ Using the original execution date of: 2018-10-15 16:02:04Z Current cluster status: * Node List: * Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] * GuestOnline: [ lxc2@rhel7-1 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started rhel7-2 * FencingPass (stonith:fence_dummy): Started rhel7-3 * container1 (ocf:heartbeat:VirtualDomain): FAILED * container2 (ocf:heartbeat:VirtualDomain): Started rhel7-1 * Clone Set: lxc-ms-master [lxc-ms] (promotable): - * Slaves: [ lxc2 ] + * Unpromoted: [ lxc2 ] * Stopped: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] Transition Summary: * Fence (reboot) lxc1 (resource: container1) 'guest is unclean' * Start container1 ( rhel7-1 ) - * Recover lxc-ms:1 ( Master lxc1 ) + * Recover lxc-ms:1 ( Promoted lxc1 ) * Restart lxc1 ( rhel7-1 ) due to required container1 start Executing Cluster Transition: * Resource action: container1 monitor on rhel7-1 * Pseudo action: lxc-ms-master_demote_0 * Resource action: lxc1 stop on rhel7-1 * Pseudo action: stonith-lxc1-reboot on lxc1 * Resource action: container1 start on rhel7-1 * Pseudo action: lxc-ms_demote_0 * Pseudo action: lxc-ms-master_demoted_0 * Pseudo action: lxc-ms-master_stop_0 * Resource action: lxc1 start on rhel7-1 * Resource action: lxc1 monitor=30000 on rhel7-1 * Pseudo action: lxc-ms_stop_0 * Pseudo action: lxc-ms-master_stopped_0 * Pseudo action: lxc-ms-master_start_0 * Resource action: lxc-ms start on lxc1 * Pseudo action: lxc-ms-master_running_0 * Pseudo action: lxc-ms-master_promote_0 * Resource action: lxc-ms promote on lxc1 * Pseudo action: lxc-ms-master_promoted_0 Using the original execution date of: 2018-10-15 16:02:04Z Revised Cluster Status: * Node List: * Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] * GuestOnline: [ lxc1@rhel7-1 lxc2@rhel7-1 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started rhel7-2 * FencingPass (stonith:fence_dummy): Started rhel7-3 * container1 (ocf:heartbeat:VirtualDomain): Started rhel7-1 * container2 (ocf:heartbeat:VirtualDomain): Started rhel7-1 * Clone Set: lxc-ms-master [lxc-ms] (promotable): - * Masters: [ lxc1 ] - * Slaves: [ lxc2 ] + * Promoted: [ lxc1 ] + * Unpromoted: [ lxc2 ] diff --git a/cts/scheduler/summary/guest-node-host-dies.summary b/cts/scheduler/summary/guest-node-host-dies.summary index 3faabcef3d..b0286b2846 100644 --- a/cts/scheduler/summary/guest-node-host-dies.summary +++ b/cts/scheduler/summary/guest-node-host-dies.summary @@ -1,82 +1,82 @@ Current cluster status: * Node List: * Node rhel7-1: UNCLEAN (offline) * Online: [ rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started rhel7-4 * rsc_rhel7-1 (ocf:heartbeat:IPaddr2): Started rhel7-1 (UNCLEAN) * container1 (ocf:heartbeat:VirtualDomain): FAILED rhel7-1 (UNCLEAN) * container2 (ocf:heartbeat:VirtualDomain): FAILED rhel7-1 (UNCLEAN) * Clone Set: lxc-ms-master [lxc-ms] (promotable): * Stopped: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] Transition Summary: * Fence (reboot) lxc2 (resource: container2) 'guest is unclean' * Fence (reboot) lxc1 (resource: container1) 'guest is unclean' * Fence (reboot) rhel7-1 'rsc_rhel7-1 is thought to be active there' * Restart Fencing ( rhel7-4 ) due to resource definition change * Move rsc_rhel7-1 ( rhel7-1 -> rhel7-5 ) * Recover container1 ( rhel7-1 -> rhel7-2 ) * Recover container2 ( rhel7-1 -> rhel7-3 ) - * Recover lxc-ms:0 ( Master lxc1 ) - * Recover lxc-ms:1 ( Slave lxc2 ) + * Recover lxc-ms:0 ( Promoted lxc1 ) + * Recover lxc-ms:1 ( Unpromoted lxc2 ) * Move lxc1 ( rhel7-1 -> rhel7-2 ) * Move lxc2 ( rhel7-1 -> rhel7-3 ) Executing Cluster Transition: * Resource action: Fencing stop on rhel7-4 * Pseudo action: lxc-ms-master_demote_0 * Pseudo action: lxc1_stop_0 * Resource action: lxc1 monitor on rhel7-5 * Resource action: lxc1 monitor on rhel7-4 * Resource action: lxc1 monitor on rhel7-3 * Pseudo action: lxc2_stop_0 * Resource action: lxc2 monitor on rhel7-5 * Resource action: lxc2 monitor on rhel7-4 * Resource action: lxc2 monitor on rhel7-2 * Fencing rhel7-1 (reboot) * Pseudo action: rsc_rhel7-1_stop_0 * Pseudo action: container1_stop_0 * Pseudo action: container2_stop_0 * Pseudo action: stonith-lxc2-reboot on lxc2 * Pseudo action: stonith-lxc1-reboot on lxc1 * Resource action: Fencing start on rhel7-4 * Resource action: Fencing monitor=120000 on rhel7-4 * Resource action: rsc_rhel7-1 start on rhel7-5 * Resource action: container1 start on rhel7-2 * Resource action: container2 start on rhel7-3 * Pseudo action: lxc-ms_demote_0 * Pseudo action: lxc-ms-master_demoted_0 * Pseudo action: lxc-ms-master_stop_0 * Resource action: lxc1 start on rhel7-2 * Resource action: lxc2 start on rhel7-3 * Resource action: rsc_rhel7-1 monitor=5000 on rhel7-5 * Pseudo action: lxc-ms_stop_0 * Pseudo action: lxc-ms_stop_0 * Pseudo action: lxc-ms-master_stopped_0 * Pseudo action: lxc-ms-master_start_0 * Resource action: lxc1 monitor=30000 on rhel7-2 * Resource action: lxc2 monitor=30000 on rhel7-3 * Resource action: lxc-ms start on lxc1 * Resource action: lxc-ms start on lxc2 * Pseudo action: lxc-ms-master_running_0 * Resource action: lxc-ms monitor=10000 on lxc2 * Pseudo action: lxc-ms-master_promote_0 * Resource action: lxc-ms promote on lxc1 * Pseudo action: lxc-ms-master_promoted_0 Revised Cluster Status: * Node List: * Online: [ rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] * OFFLINE: [ rhel7-1 ] * GuestOnline: [ lxc1@rhel7-2 lxc2@rhel7-3 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started rhel7-4 * rsc_rhel7-1 (ocf:heartbeat:IPaddr2): Started rhel7-5 * container1 (ocf:heartbeat:VirtualDomain): Started rhel7-2 * container2 (ocf:heartbeat:VirtualDomain): Started rhel7-3 * Clone Set: lxc-ms-master [lxc-ms] (promotable): - * Masters: [ lxc1 ] - * Slaves: [ lxc2 ] + * Promoted: [ lxc1 ] + * Unpromoted: [ lxc2 ] diff --git a/cts/scheduler/summary/history-1.summary b/cts/scheduler/summary/history-1.summary index 5c8eef854b..74d31ec281 100644 --- a/cts/scheduler/summary/history-1.summary +++ b/cts/scheduler/summary/history-1.summary @@ -1,55 +1,55 @@ Current cluster status: * Node List: * Online: [ pcmk-1 pcmk-2 pcmk-3 ] * OFFLINE: [ pcmk-4 ] * Full List of Resources: * Clone Set: Fencing [FencingChild]: * Started: [ pcmk-1 pcmk-2 pcmk-3 ] * Stopped: [ pcmk-4 ] * Resource Group: group-1: * r192.168.101.181 (ocf:heartbeat:IPaddr): Stopped * r192.168.101.182 (ocf:heartbeat:IPaddr): Stopped * r192.168.101.183 (ocf:heartbeat:IPaddr): Stopped * rsc_pcmk-1 (ocf:heartbeat:IPaddr): Started pcmk-1 * rsc_pcmk-2 (ocf:heartbeat:IPaddr): Started pcmk-2 * rsc_pcmk-3 (ocf:heartbeat:IPaddr): Started pcmk-3 * rsc_pcmk-4 (ocf:heartbeat:IPaddr): Started pcmk-3 * lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Stopped * migrator (ocf:pacemaker:Dummy): Started pcmk-1 * Clone Set: Connectivity [ping-1]: * Started: [ pcmk-1 pcmk-2 pcmk-3 ] * Stopped: [ pcmk-4 ] * Clone Set: master-1 [stateful-1] (promotable): - * Slaves: [ pcmk-1 pcmk-2 pcmk-3 ] + * Unpromoted: [ pcmk-1 pcmk-2 pcmk-3 ] * Stopped: [ pcmk-4 ] Transition Summary: Executing Cluster Transition: Revised Cluster Status: * Node List: * Online: [ pcmk-1 pcmk-2 pcmk-3 ] * OFFLINE: [ pcmk-4 ] * Full List of Resources: * Clone Set: Fencing [FencingChild]: * Started: [ pcmk-1 pcmk-2 pcmk-3 ] * Stopped: [ pcmk-4 ] * Resource Group: group-1: * r192.168.101.181 (ocf:heartbeat:IPaddr): Stopped * r192.168.101.182 (ocf:heartbeat:IPaddr): Stopped * r192.168.101.183 (ocf:heartbeat:IPaddr): Stopped * rsc_pcmk-1 (ocf:heartbeat:IPaddr): Started pcmk-1 * rsc_pcmk-2 (ocf:heartbeat:IPaddr): Started pcmk-2 * rsc_pcmk-3 (ocf:heartbeat:IPaddr): Started pcmk-3 * rsc_pcmk-4 (ocf:heartbeat:IPaddr): Started pcmk-3 * lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Stopped * migrator (ocf:pacemaker:Dummy): Started pcmk-1 * Clone Set: Connectivity [ping-1]: * Started: [ pcmk-1 pcmk-2 pcmk-3 ] * Stopped: [ pcmk-4 ] * Clone Set: master-1 [stateful-1] (promotable): - * Slaves: [ pcmk-1 pcmk-2 pcmk-3 ] + * Unpromoted: [ pcmk-1 pcmk-2 pcmk-3 ] * Stopped: [ pcmk-4 ] diff --git a/cts/scheduler/summary/inc11.summary b/cts/scheduler/summary/inc11.summary index 743a6eda8c..51e838c374 100644 --- a/cts/scheduler/summary/inc11.summary +++ b/cts/scheduler/summary/inc11.summary @@ -1,43 +1,43 @@ Current cluster status: * Node List: * Online: [ node0 node1 node2 ] * Full List of Resources: * simple-rsc (ocf:heartbeat:apache): Stopped * Clone Set: rsc1 [child_rsc1] (promotable) (unique): * child_rsc1:0 (ocf:heartbeat:apache): Stopped * child_rsc1:1 (ocf:heartbeat:apache): Stopped Transition Summary: * Start simple-rsc ( node2 ) * Start child_rsc1:0 ( node1 ) - * Promote child_rsc1:1 ( Stopped -> Master node2 ) + * Promote child_rsc1:1 ( Stopped -> Promoted node2 ) Executing Cluster Transition: * Resource action: simple-rsc monitor on node2 * Resource action: simple-rsc monitor on node1 * Resource action: simple-rsc monitor on node0 * Resource action: child_rsc1:0 monitor on node2 * Resource action: child_rsc1:0 monitor on node1 * Resource action: child_rsc1:0 monitor on node0 * Resource action: child_rsc1:1 monitor on node2 * Resource action: child_rsc1:1 monitor on node1 * Resource action: child_rsc1:1 monitor on node0 * Pseudo action: rsc1_start_0 * Resource action: simple-rsc start on node2 * Resource action: child_rsc1:0 start on node1 * Resource action: child_rsc1:1 start on node2 * Pseudo action: rsc1_running_0 * Pseudo action: rsc1_promote_0 * Resource action: child_rsc1:1 promote on node2 * Pseudo action: rsc1_promoted_0 Revised Cluster Status: * Node List: * Online: [ node0 node1 node2 ] * Full List of Resources: * simple-rsc (ocf:heartbeat:apache): Started node2 * Clone Set: rsc1 [child_rsc1] (promotable) (unique): - * child_rsc1:0 (ocf:heartbeat:apache): Slave node1 - * child_rsc1:1 (ocf:heartbeat:apache): Master node2 + * child_rsc1:0 (ocf:heartbeat:apache): Unpromoted node1 + * child_rsc1:1 (ocf:heartbeat:apache): Promoted node2 diff --git a/cts/scheduler/summary/inc12.summary b/cts/scheduler/summary/inc12.summary index 39f1502c73..1ada08dda0 100644 --- a/cts/scheduler/summary/inc12.summary +++ b/cts/scheduler/summary/inc12.summary @@ -1,132 +1,132 @@ Current cluster status: * Node List: * Online: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 ] * Full List of Resources: * DcIPaddr (ocf:heartbeat:IPaddr): Stopped * Resource Group: group-1: * ocf_192.168.100.181 (ocf:heartbeat:IPaddr): Started c001n02 * heartbeat_192.168.100.182 (ocf:heartbeat:IPaddr): Started c001n02 * ocf_192.168.100.183 (ocf:heartbeat:IPaddr): Started c001n02 * lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n04 * rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n05 * rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02 * rsc_c001n04 (ocf:heartbeat:IPaddr): Started c001n04 * rsc_c001n05 (ocf:heartbeat:IPaddr): Started c001n05 * rsc_c001n06 (ocf:heartbeat:IPaddr): Started c001n06 * rsc_c001n07 (ocf:heartbeat:IPaddr): Started c001n07 * Clone Set: DoFencing [child_DoFencing]: * Started: [ c001n02 c001n04 c001n05 c001n06 c001n07 ] * Stopped: [ c001n03 ] * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique): * ocf_msdummy:0 (ocf:heartbeat:Stateful): Stopped * ocf_msdummy:1 (ocf:heartbeat:Stateful): Stopped - * ocf_msdummy:2 (ocf:heartbeat:Stateful): Slave c001n04 - * ocf_msdummy:3 (ocf:heartbeat:Stateful): Slave c001n04 - * ocf_msdummy:4 (ocf:heartbeat:Stateful): Slave c001n05 - * ocf_msdummy:5 (ocf:heartbeat:Stateful): Slave c001n05 - * ocf_msdummy:6 (ocf:heartbeat:Stateful): Slave c001n06 - * ocf_msdummy:7 (ocf:heartbeat:Stateful): Slave c001n06 - * ocf_msdummy:8 (ocf:heartbeat:Stateful): Slave c001n07 - * ocf_msdummy:9 (ocf:heartbeat:Stateful): Slave c001n07 - * ocf_msdummy:10 (ocf:heartbeat:Stateful): Slave c001n02 - * ocf_msdummy:11 (ocf:heartbeat:Stateful): Slave c001n02 + * ocf_msdummy:2 (ocf:heartbeat:Stateful): Unpromoted c001n04 + * ocf_msdummy:3 (ocf:heartbeat:Stateful): Unpromoted c001n04 + * ocf_msdummy:4 (ocf:heartbeat:Stateful): Unpromoted c001n05 + * ocf_msdummy:5 (ocf:heartbeat:Stateful): Unpromoted c001n05 + * ocf_msdummy:6 (ocf:heartbeat:Stateful): Unpromoted c001n06 + * ocf_msdummy:7 (ocf:heartbeat:Stateful): Unpromoted c001n06 + * ocf_msdummy:8 (ocf:heartbeat:Stateful): Unpromoted c001n07 + * ocf_msdummy:9 (ocf:heartbeat:Stateful): Unpromoted c001n07 + * ocf_msdummy:10 (ocf:heartbeat:Stateful): Unpromoted c001n02 + * ocf_msdummy:11 (ocf:heartbeat:Stateful): Unpromoted c001n02 Transition Summary: * Stop ocf_192.168.100.181 ( c001n02 ) due to node availability * Stop heartbeat_192.168.100.182 ( c001n02 ) due to node availability * Stop ocf_192.168.100.183 ( c001n02 ) due to node availability * Stop lsb_dummy ( c001n04 ) due to node availability * Stop rsc_c001n03 ( c001n05 ) due to node availability * Stop rsc_c001n02 ( c001n02 ) due to node availability * Stop rsc_c001n04 ( c001n04 ) due to node availability * Stop rsc_c001n05 ( c001n05 ) due to node availability * Stop rsc_c001n06 ( c001n06 ) due to node availability * Stop rsc_c001n07 ( c001n07 ) due to node availability * Stop child_DoFencing:0 ( c001n02 ) due to node availability * Stop child_DoFencing:1 ( c001n04 ) due to node availability * Stop child_DoFencing:2 ( c001n05 ) due to node availability * Stop child_DoFencing:3 ( c001n06 ) due to node availability * Stop child_DoFencing:4 ( c001n07 ) due to node availability - * Stop ocf_msdummy:2 ( Slave c001n04 ) due to node availability - * Stop ocf_msdummy:3 ( Slave c001n04 ) due to node availability - * Stop ocf_msdummy:4 ( Slave c001n05 ) due to node availability - * Stop ocf_msdummy:5 ( Slave c001n05 ) due to node availability - * Stop ocf_msdummy:6 ( Slave c001n06 ) due to node availability - * Stop ocf_msdummy:7 ( Slave c001n06 ) due to node availability - * Stop ocf_msdummy:8 ( Slave c001n07 ) due to node availability - * Stop ocf_msdummy:9 ( Slave c001n07 ) due to node availability - * Stop ocf_msdummy:10 ( Slave c001n02 ) due to node availability - * Stop ocf_msdummy:11 ( Slave c001n02 ) due to node availability + * Stop ocf_msdummy:2 ( Unpromoted c001n04 ) due to node availability + * Stop ocf_msdummy:3 ( Unpromoted c001n04 ) due to node availability + * Stop ocf_msdummy:4 ( Unpromoted c001n05 ) due to node availability + * Stop ocf_msdummy:5 ( Unpromoted c001n05 ) due to node availability + * Stop ocf_msdummy:6 ( Unpromoted c001n06 ) due to node availability + * Stop ocf_msdummy:7 ( Unpromoted c001n06 ) due to node availability + * Stop ocf_msdummy:8 ( Unpromoted c001n07 ) due to node availability + * Stop ocf_msdummy:9 ( Unpromoted c001n07 ) due to node availability + * Stop ocf_msdummy:10 ( Unpromoted c001n02 ) due to node availability + * Stop ocf_msdummy:11 ( Unpromoted c001n02 ) due to node availability Executing Cluster Transition: * Pseudo action: group-1_stop_0 * Resource action: ocf_192.168.100.183 stop on c001n02 * Resource action: lsb_dummy stop on c001n04 * Resource action: rsc_c001n03 stop on c001n05 * Resource action: rsc_c001n02 stop on c001n02 * Resource action: rsc_c001n04 stop on c001n04 * Resource action: rsc_c001n05 stop on c001n05 * Resource action: rsc_c001n06 stop on c001n06 * Resource action: rsc_c001n07 stop on c001n07 * Pseudo action: DoFencing_stop_0 * Pseudo action: master_rsc_1_stop_0 * Resource action: heartbeat_192.168.100.182 stop on c001n02 * Resource action: child_DoFencing:1 stop on c001n02 * Resource action: child_DoFencing:2 stop on c001n04 * Resource action: child_DoFencing:3 stop on c001n05 * Resource action: child_DoFencing:4 stop on c001n06 * Resource action: child_DoFencing:5 stop on c001n07 * Pseudo action: DoFencing_stopped_0 * Resource action: ocf_msdummy:2 stop on c001n04 * Resource action: ocf_msdummy:3 stop on c001n04 * Resource action: ocf_msdummy:4 stop on c001n05 * Resource action: ocf_msdummy:5 stop on c001n05 * Resource action: ocf_msdummy:6 stop on c001n06 * Resource action: ocf_msdummy:7 stop on c001n06 * Resource action: ocf_msdummy:8 stop on c001n07 * Resource action: ocf_msdummy:9 stop on c001n07 * Resource action: ocf_msdummy:10 stop on c001n02 * Resource action: ocf_msdummy:11 stop on c001n02 * Pseudo action: master_rsc_1_stopped_0 * Cluster action: do_shutdown on c001n07 * Cluster action: do_shutdown on c001n06 * Cluster action: do_shutdown on c001n05 * Cluster action: do_shutdown on c001n04 * Resource action: ocf_192.168.100.181 stop on c001n02 * Cluster action: do_shutdown on c001n02 * Pseudo action: group-1_stopped_0 * Cluster action: do_shutdown on c001n03 Revised Cluster Status: * Node List: * Online: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 ] * Full List of Resources: * DcIPaddr (ocf:heartbeat:IPaddr): Stopped * Resource Group: group-1: * ocf_192.168.100.181 (ocf:heartbeat:IPaddr): Stopped * heartbeat_192.168.100.182 (ocf:heartbeat:IPaddr): Stopped * ocf_192.168.100.183 (ocf:heartbeat:IPaddr): Stopped * lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Stopped * rsc_c001n03 (ocf:heartbeat:IPaddr): Stopped * rsc_c001n02 (ocf:heartbeat:IPaddr): Stopped * rsc_c001n04 (ocf:heartbeat:IPaddr): Stopped * rsc_c001n05 (ocf:heartbeat:IPaddr): Stopped * rsc_c001n06 (ocf:heartbeat:IPaddr): Stopped * rsc_c001n07 (ocf:heartbeat:IPaddr): Stopped * Clone Set: DoFencing [child_DoFencing]: * Stopped: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 ] * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique): * ocf_msdummy:0 (ocf:heartbeat:Stateful): Stopped * ocf_msdummy:1 (ocf:heartbeat:Stateful): Stopped * ocf_msdummy:2 (ocf:heartbeat:Stateful): Stopped * ocf_msdummy:3 (ocf:heartbeat:Stateful): Stopped * ocf_msdummy:4 (ocf:heartbeat:Stateful): Stopped * ocf_msdummy:5 (ocf:heartbeat:Stateful): Stopped * ocf_msdummy:6 (ocf:heartbeat:Stateful): Stopped * ocf_msdummy:7 (ocf:heartbeat:Stateful): Stopped * ocf_msdummy:8 (ocf:heartbeat:Stateful): Stopped * ocf_msdummy:9 (ocf:heartbeat:Stateful): Stopped * ocf_msdummy:10 (ocf:heartbeat:Stateful): Stopped * ocf_msdummy:11 (ocf:heartbeat:Stateful): Stopped diff --git a/cts/scheduler/summary/master-0.summary b/cts/scheduler/summary/master-0.summary index d69607178c..b80bb106cf 100644 --- a/cts/scheduler/summary/master-0.summary +++ b/cts/scheduler/summary/master-0.summary @@ -1,47 +1,47 @@ Current cluster status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * Clone Set: rsc1 [child_rsc1] (promotable) (unique): * child_rsc1:0 (ocf:heartbeat:apache): Stopped * child_rsc1:1 (ocf:heartbeat:apache): Stopped * child_rsc1:2 (ocf:heartbeat:apache): Stopped * child_rsc1:3 (ocf:heartbeat:apache): Stopped * child_rsc1:4 (ocf:heartbeat:apache): Stopped Transition Summary: * Start child_rsc1:0 ( node1 ) * Start child_rsc1:1 ( node2 ) * Start child_rsc1:2 ( node1 ) * Start child_rsc1:3 ( node2 ) Executing Cluster Transition: * Resource action: child_rsc1:0 monitor on node2 * Resource action: child_rsc1:0 monitor on node1 * Resource action: child_rsc1:1 monitor on node2 * Resource action: child_rsc1:1 monitor on node1 * Resource action: child_rsc1:2 monitor on node2 * Resource action: child_rsc1:2 monitor on node1 * Resource action: child_rsc1:3 monitor on node2 * Resource action: child_rsc1:3 monitor on node1 * Resource action: child_rsc1:4 monitor on node2 * Resource action: child_rsc1:4 monitor on node1 * Pseudo action: rsc1_start_0 * Resource action: child_rsc1:0 start on node1 * Resource action: child_rsc1:1 start on node2 * Resource action: child_rsc1:2 start on node1 * Resource action: child_rsc1:3 start on node2 * Pseudo action: rsc1_running_0 Revised Cluster Status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * Clone Set: rsc1 [child_rsc1] (promotable) (unique): - * child_rsc1:0 (ocf:heartbeat:apache): Slave node1 - * child_rsc1:1 (ocf:heartbeat:apache): Slave node2 - * child_rsc1:2 (ocf:heartbeat:apache): Slave node1 - * child_rsc1:3 (ocf:heartbeat:apache): Slave node2 + * child_rsc1:0 (ocf:heartbeat:apache): Unpromoted node1 + * child_rsc1:1 (ocf:heartbeat:apache): Unpromoted node2 + * child_rsc1:2 (ocf:heartbeat:apache): Unpromoted node1 + * child_rsc1:3 (ocf:heartbeat:apache): Unpromoted node2 * child_rsc1:4 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/master-1.summary b/cts/scheduler/summary/master-1.summary index df81b8e4a9..161f51834a 100644 --- a/cts/scheduler/summary/master-1.summary +++ b/cts/scheduler/summary/master-1.summary @@ -1,50 +1,50 @@ Current cluster status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * Clone Set: rsc1 [child_rsc1] (promotable) (unique): * child_rsc1:0 (ocf:heartbeat:apache): Stopped * child_rsc1:1 (ocf:heartbeat:apache): Stopped * child_rsc1:2 (ocf:heartbeat:apache): Stopped * child_rsc1:3 (ocf:heartbeat:apache): Stopped * child_rsc1:4 (ocf:heartbeat:apache): Stopped Transition Summary: * Start child_rsc1:0 ( node1 ) - * Promote child_rsc1:1 ( Stopped -> Master node2 ) + * Promote child_rsc1:1 ( Stopped -> Promoted node2 ) * Start child_rsc1:2 ( node1 ) * Start child_rsc1:3 ( node2 ) Executing Cluster Transition: * Resource action: child_rsc1:0 monitor on node2 * Resource action: child_rsc1:0 monitor on node1 * Resource action: child_rsc1:1 monitor on node2 * Resource action: child_rsc1:1 monitor on node1 * Resource action: child_rsc1:2 monitor on node2 * Resource action: child_rsc1:2 monitor on node1 * Resource action: child_rsc1:3 monitor on node2 * Resource action: child_rsc1:3 monitor on node1 * Resource action: child_rsc1:4 monitor on node2 * Resource action: child_rsc1:4 monitor on node1 * Pseudo action: rsc1_start_0 * Resource action: child_rsc1:0 start on node1 * Resource action: child_rsc1:1 start on node2 * Resource action: child_rsc1:2 start on node1 * Resource action: child_rsc1:3 start on node2 * Pseudo action: rsc1_running_0 * Pseudo action: rsc1_promote_0 * Resource action: child_rsc1:1 promote on node2 * Pseudo action: rsc1_promoted_0 Revised Cluster Status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * Clone Set: rsc1 [child_rsc1] (promotable) (unique): - * child_rsc1:0 (ocf:heartbeat:apache): Slave node1 - * child_rsc1:1 (ocf:heartbeat:apache): Master node2 - * child_rsc1:2 (ocf:heartbeat:apache): Slave node1 - * child_rsc1:3 (ocf:heartbeat:apache): Slave node2 + * child_rsc1:0 (ocf:heartbeat:apache): Unpromoted node1 + * child_rsc1:1 (ocf:heartbeat:apache): Promoted node2 + * child_rsc1:2 (ocf:heartbeat:apache): Unpromoted node1 + * child_rsc1:3 (ocf:heartbeat:apache): Unpromoted node2 * child_rsc1:4 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/master-10.summary b/cts/scheduler/summary/master-10.summary index 286302bb52..54dbcd7e69 100644 --- a/cts/scheduler/summary/master-10.summary +++ b/cts/scheduler/summary/master-10.summary @@ -1,75 +1,75 @@ Current cluster status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * Clone Set: rsc1 [child_rsc1] (promotable) (unique): * child_rsc1:0 (ocf:heartbeat:apache): Stopped * child_rsc1:1 (ocf:heartbeat:apache): Stopped * child_rsc1:2 (ocf:heartbeat:apache): Stopped * child_rsc1:3 (ocf:heartbeat:apache): Stopped * child_rsc1:4 (ocf:heartbeat:apache): Stopped Transition Summary: - * Promote child_rsc1:0 ( Stopped -> Master node1 ) + * Promote child_rsc1:0 ( Stopped -> Promoted node1 ) * Start child_rsc1:1 ( node2 ) * Start child_rsc1:2 ( node1 ) - * Promote child_rsc1:3 ( Stopped -> Master node2 ) + * Promote child_rsc1:3 ( Stopped -> Promoted node2 ) Executing Cluster Transition: * Resource action: child_rsc1:0 monitor on node2 * Resource action: child_rsc1:0 monitor on node1 * Resource action: child_rsc1:1 monitor on node2 * Resource action: child_rsc1:1 monitor on node1 * Resource action: child_rsc1:2 monitor on node2 * Resource action: child_rsc1:2 monitor on node1 * Resource action: child_rsc1:3 monitor on node2 * Resource action: child_rsc1:3 monitor on node1 * Resource action: child_rsc1:4 monitor on node2 * Resource action: child_rsc1:4 monitor on node1 * Pseudo action: rsc1_pre_notify_start_0 * Pseudo action: rsc1_confirmed-pre_notify_start_0 * Pseudo action: rsc1_start_0 * Resource action: child_rsc1:0 start on node1 * Resource action: child_rsc1:1 start on node2 * Resource action: child_rsc1:2 start on node1 * Resource action: child_rsc1:3 start on node2 * Pseudo action: rsc1_running_0 * Pseudo action: rsc1_post_notify_running_0 * Resource action: child_rsc1:0 notify on node1 * Resource action: child_rsc1:1 notify on node2 * Resource action: child_rsc1:2 notify on node1 * Resource action: child_rsc1:3 notify on node2 * Pseudo action: rsc1_confirmed-post_notify_running_0 * Pseudo action: rsc1_pre_notify_promote_0 * Resource action: child_rsc1:0 notify on node1 * Resource action: child_rsc1:1 notify on node2 * Resource action: child_rsc1:2 notify on node1 * Resource action: child_rsc1:3 notify on node2 * Pseudo action: rsc1_confirmed-pre_notify_promote_0 * Pseudo action: rsc1_promote_0 * Resource action: child_rsc1:0 promote on node1 * Resource action: child_rsc1:3 promote on node2 * Pseudo action: rsc1_promoted_0 * Pseudo action: rsc1_post_notify_promoted_0 * Resource action: child_rsc1:0 notify on node1 * Resource action: child_rsc1:1 notify on node2 * Resource action: child_rsc1:2 notify on node1 * Resource action: child_rsc1:3 notify on node2 * Pseudo action: rsc1_confirmed-post_notify_promoted_0 * Resource action: child_rsc1:0 monitor=11000 on node1 * Resource action: child_rsc1:1 monitor=1000 on node2 * Resource action: child_rsc1:2 monitor=1000 on node1 * Resource action: child_rsc1:3 monitor=11000 on node2 Revised Cluster Status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * Clone Set: rsc1 [child_rsc1] (promotable) (unique): - * child_rsc1:0 (ocf:heartbeat:apache): Master node1 - * child_rsc1:1 (ocf:heartbeat:apache): Slave node2 - * child_rsc1:2 (ocf:heartbeat:apache): Slave node1 - * child_rsc1:3 (ocf:heartbeat:apache): Master node2 + * child_rsc1:0 (ocf:heartbeat:apache): Promoted node1 + * child_rsc1:1 (ocf:heartbeat:apache): Unpromoted node2 + * child_rsc1:2 (ocf:heartbeat:apache): Unpromoted node1 + * child_rsc1:3 (ocf:heartbeat:apache): Promoted node2 * child_rsc1:4 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/master-11.summary b/cts/scheduler/summary/master-11.summary index dceca1d5f1..89fb85776d 100644 --- a/cts/scheduler/summary/master-11.summary +++ b/cts/scheduler/summary/master-11.summary @@ -1,40 +1,40 @@ Current cluster status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * simple-rsc (ocf:heartbeat:apache): Stopped * Clone Set: rsc1 [child_rsc1] (promotable) (unique): * child_rsc1:0 (ocf:heartbeat:apache): Stopped * child_rsc1:1 (ocf:heartbeat:apache): Stopped Transition Summary: * Start simple-rsc ( node2 ) * Start child_rsc1:0 ( node1 ) - * Promote child_rsc1:1 ( Stopped -> Master node2 ) + * Promote child_rsc1:1 ( Stopped -> Promoted node2 ) Executing Cluster Transition: * Resource action: simple-rsc monitor on node2 * Resource action: simple-rsc monitor on node1 * Resource action: child_rsc1:0 monitor on node2 * Resource action: child_rsc1:0 monitor on node1 * Resource action: child_rsc1:1 monitor on node2 * Resource action: child_rsc1:1 monitor on node1 * Pseudo action: rsc1_start_0 * Resource action: simple-rsc start on node2 * Resource action: child_rsc1:0 start on node1 * Resource action: child_rsc1:1 start on node2 * Pseudo action: rsc1_running_0 * Pseudo action: rsc1_promote_0 * Resource action: child_rsc1:1 promote on node2 * Pseudo action: rsc1_promoted_0 Revised Cluster Status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * simple-rsc (ocf:heartbeat:apache): Started node2 * Clone Set: rsc1 [child_rsc1] (promotable) (unique): - * child_rsc1:0 (ocf:heartbeat:apache): Slave node1 - * child_rsc1:1 (ocf:heartbeat:apache): Master node2 + * child_rsc1:0 (ocf:heartbeat:apache): Unpromoted node1 + * child_rsc1:1 (ocf:heartbeat:apache): Promoted node2 diff --git a/cts/scheduler/summary/master-12.summary b/cts/scheduler/summary/master-12.summary index 722d39caf4..878d366883 100644 --- a/cts/scheduler/summary/master-12.summary +++ b/cts/scheduler/summary/master-12.summary @@ -1,33 +1,33 @@ Current cluster status: * Node List: * Online: [ sel3 sel4 ] * Full List of Resources: * Clone Set: ms-drbd0 [drbd0] (promotable): - * Masters: [ sel3 ] - * Slaves: [ sel4 ] + * Promoted: [ sel3 ] + * Unpromoted: [ sel4 ] * Clone Set: ms-sf [sf] (promotable) (unique): - * sf:0 (ocf:heartbeat:Stateful): Slave sel3 - * sf:1 (ocf:heartbeat:Stateful): Slave sel4 + * sf:0 (ocf:heartbeat:Stateful): Unpromoted sel3 + * sf:1 (ocf:heartbeat:Stateful): Unpromoted sel4 * fs0 (ocf:heartbeat:Filesystem): Started sel3 Transition Summary: - * Promote sf:0 ( Slave -> Master sel3 ) + * Promote sf:0 ( Unpromoted -> Promoted sel3 ) Executing Cluster Transition: * Pseudo action: ms-sf_promote_0 * Resource action: sf:0 promote on sel3 * Pseudo action: ms-sf_promoted_0 Revised Cluster Status: * Node List: * Online: [ sel3 sel4 ] * Full List of Resources: * Clone Set: ms-drbd0 [drbd0] (promotable): - * Masters: [ sel3 ] - * Slaves: [ sel4 ] + * Promoted: [ sel3 ] + * Unpromoted: [ sel4 ] * Clone Set: ms-sf [sf] (promotable) (unique): - * sf:0 (ocf:heartbeat:Stateful): Master sel3 - * sf:1 (ocf:heartbeat:Stateful): Slave sel4 + * sf:0 (ocf:heartbeat:Stateful): Promoted sel3 + * sf:1 (ocf:heartbeat:Stateful): Unpromoted sel4 * fs0 (ocf:heartbeat:Filesystem): Started sel3 diff --git a/cts/scheduler/summary/master-13.summary b/cts/scheduler/summary/master-13.summary index 9e38cb1740..67a95cad79 100644 --- a/cts/scheduler/summary/master-13.summary +++ b/cts/scheduler/summary/master-13.summary @@ -1,62 +1,62 @@ Current cluster status: * Node List: * Online: [ frigg odin ] * Full List of Resources: * Clone Set: ms_drbd [drbd0] (promotable): - * Masters: [ frigg ] - * Slaves: [ odin ] + * Promoted: [ frigg ] + * Unpromoted: [ odin ] * Resource Group: group: * IPaddr0 (ocf:heartbeat:IPaddr): Stopped * MailTo (ocf:heartbeat:MailTo): Stopped Transition Summary: - * Promote drbd0:0 ( Slave -> Master odin ) - * Demote drbd0:1 ( Master -> Slave frigg ) + * Promote drbd0:0 ( Unpromoted -> Promoted odin ) + * Demote drbd0:1 ( Promoted -> Unpromoted frigg ) * Start IPaddr0 ( odin ) * Start MailTo ( odin ) Executing Cluster Transition: * Resource action: drbd0:1 cancel=12000 on odin * Resource action: drbd0:0 cancel=10000 on frigg * Pseudo action: ms_drbd_pre_notify_demote_0 * Resource action: drbd0:1 notify on odin * Resource action: drbd0:0 notify on frigg * Pseudo action: ms_drbd_confirmed-pre_notify_demote_0 * Pseudo action: ms_drbd_demote_0 * Resource action: drbd0:0 demote on frigg * Pseudo action: ms_drbd_demoted_0 * Pseudo action: ms_drbd_post_notify_demoted_0 * Resource action: drbd0:1 notify on odin * Resource action: drbd0:0 notify on frigg * Pseudo action: ms_drbd_confirmed-post_notify_demoted_0 * Pseudo action: ms_drbd_pre_notify_promote_0 * Resource action: drbd0:1 notify on odin * Resource action: drbd0:0 notify on frigg * Pseudo action: ms_drbd_confirmed-pre_notify_promote_0 * Pseudo action: ms_drbd_promote_0 * Resource action: drbd0:1 promote on odin * Pseudo action: ms_drbd_promoted_0 * Pseudo action: ms_drbd_post_notify_promoted_0 * Resource action: drbd0:1 notify on odin * Resource action: drbd0:0 notify on frigg * Pseudo action: ms_drbd_confirmed-post_notify_promoted_0 * Pseudo action: group_start_0 * Resource action: IPaddr0 start on odin * Resource action: MailTo start on odin * Resource action: drbd0:1 monitor=10000 on odin * Resource action: drbd0:0 monitor=12000 on frigg * Pseudo action: group_running_0 * Resource action: IPaddr0 monitor=5000 on odin Revised Cluster Status: * Node List: * Online: [ frigg odin ] * Full List of Resources: * Clone Set: ms_drbd [drbd0] (promotable): - * Masters: [ odin ] - * Slaves: [ frigg ] + * Promoted: [ odin ] + * Unpromoted: [ frigg ] * Resource Group: group: * IPaddr0 (ocf:heartbeat:IPaddr): Started odin * MailTo (ocf:heartbeat:MailTo): Started odin diff --git a/cts/scheduler/summary/master-2.summary b/cts/scheduler/summary/master-2.summary index 6326e25993..3258499fc8 100644 --- a/cts/scheduler/summary/master-2.summary +++ b/cts/scheduler/summary/master-2.summary @@ -1,71 +1,71 @@ Current cluster status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * Clone Set: rsc1 [child_rsc1] (promotable) (unique): * child_rsc1:0 (ocf:heartbeat:apache): Stopped * child_rsc1:1 (ocf:heartbeat:apache): Stopped * child_rsc1:2 (ocf:heartbeat:apache): Stopped * child_rsc1:3 (ocf:heartbeat:apache): Stopped * child_rsc1:4 (ocf:heartbeat:apache): Stopped Transition Summary: - * Promote child_rsc1:0 ( Stopped -> Master node1 ) + * Promote child_rsc1:0 ( Stopped -> Promoted node1 ) * Start child_rsc1:1 ( node2 ) * Start child_rsc1:2 ( node1 ) - * Promote child_rsc1:3 ( Stopped -> Master node2 ) + * Promote child_rsc1:3 ( Stopped -> Promoted node2 ) Executing Cluster Transition: * Resource action: child_rsc1:0 monitor on node2 * Resource action: child_rsc1:0 monitor on node1 * Resource action: child_rsc1:1 monitor on node2 * Resource action: child_rsc1:1 monitor on node1 * Resource action: child_rsc1:2 monitor on node2 * Resource action: child_rsc1:2 monitor on node1 * Resource action: child_rsc1:3 monitor on node2 * Resource action: child_rsc1:3 monitor on node1 * Resource action: child_rsc1:4 monitor on node2 * Resource action: child_rsc1:4 monitor on node1 * Pseudo action: rsc1_pre_notify_start_0 * Pseudo action: rsc1_confirmed-pre_notify_start_0 * Pseudo action: rsc1_start_0 * Resource action: child_rsc1:0 start on node1 * Resource action: child_rsc1:1 start on node2 * Resource action: child_rsc1:2 start on node1 * Resource action: child_rsc1:3 start on node2 * Pseudo action: rsc1_running_0 * Pseudo action: rsc1_post_notify_running_0 * Resource action: child_rsc1:0 notify on node1 * Resource action: child_rsc1:1 notify on node2 * Resource action: child_rsc1:2 notify on node1 * Resource action: child_rsc1:3 notify on node2 * Pseudo action: rsc1_confirmed-post_notify_running_0 * Pseudo action: rsc1_pre_notify_promote_0 * Resource action: child_rsc1:0 notify on node1 * Resource action: child_rsc1:1 notify on node2 * Resource action: child_rsc1:2 notify on node1 * Resource action: child_rsc1:3 notify on node2 * Pseudo action: rsc1_confirmed-pre_notify_promote_0 * Pseudo action: rsc1_promote_0 * Resource action: child_rsc1:0 promote on node1 * Resource action: child_rsc1:3 promote on node2 * Pseudo action: rsc1_promoted_0 * Pseudo action: rsc1_post_notify_promoted_0 * Resource action: child_rsc1:0 notify on node1 * Resource action: child_rsc1:1 notify on node2 * Resource action: child_rsc1:2 notify on node1 * Resource action: child_rsc1:3 notify on node2 * Pseudo action: rsc1_confirmed-post_notify_promoted_0 Revised Cluster Status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * Clone Set: rsc1 [child_rsc1] (promotable) (unique): - * child_rsc1:0 (ocf:heartbeat:apache): Master node1 - * child_rsc1:1 (ocf:heartbeat:apache): Slave node2 - * child_rsc1:2 (ocf:heartbeat:apache): Slave node1 - * child_rsc1:3 (ocf:heartbeat:apache): Master node2 + * child_rsc1:0 (ocf:heartbeat:apache): Promoted node1 + * child_rsc1:1 (ocf:heartbeat:apache): Unpromoted node2 + * child_rsc1:2 (ocf:heartbeat:apache): Unpromoted node1 + * child_rsc1:3 (ocf:heartbeat:apache): Promoted node2 * child_rsc1:4 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/master-3.summary b/cts/scheduler/summary/master-3.summary index df81b8e4a9..161f51834a 100644 --- a/cts/scheduler/summary/master-3.summary +++ b/cts/scheduler/summary/master-3.summary @@ -1,50 +1,50 @@ Current cluster status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * Clone Set: rsc1 [child_rsc1] (promotable) (unique): * child_rsc1:0 (ocf:heartbeat:apache): Stopped * child_rsc1:1 (ocf:heartbeat:apache): Stopped * child_rsc1:2 (ocf:heartbeat:apache): Stopped * child_rsc1:3 (ocf:heartbeat:apache): Stopped * child_rsc1:4 (ocf:heartbeat:apache): Stopped Transition Summary: * Start child_rsc1:0 ( node1 ) - * Promote child_rsc1:1 ( Stopped -> Master node2 ) + * Promote child_rsc1:1 ( Stopped -> Promoted node2 ) * Start child_rsc1:2 ( node1 ) * Start child_rsc1:3 ( node2 ) Executing Cluster Transition: * Resource action: child_rsc1:0 monitor on node2 * Resource action: child_rsc1:0 monitor on node1 * Resource action: child_rsc1:1 monitor on node2 * Resource action: child_rsc1:1 monitor on node1 * Resource action: child_rsc1:2 monitor on node2 * Resource action: child_rsc1:2 monitor on node1 * Resource action: child_rsc1:3 monitor on node2 * Resource action: child_rsc1:3 monitor on node1 * Resource action: child_rsc1:4 monitor on node2 * Resource action: child_rsc1:4 monitor on node1 * Pseudo action: rsc1_start_0 * Resource action: child_rsc1:0 start on node1 * Resource action: child_rsc1:1 start on node2 * Resource action: child_rsc1:2 start on node1 * Resource action: child_rsc1:3 start on node2 * Pseudo action: rsc1_running_0 * Pseudo action: rsc1_promote_0 * Resource action: child_rsc1:1 promote on node2 * Pseudo action: rsc1_promoted_0 Revised Cluster Status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * Clone Set: rsc1 [child_rsc1] (promotable) (unique): - * child_rsc1:0 (ocf:heartbeat:apache): Slave node1 - * child_rsc1:1 (ocf:heartbeat:apache): Master node2 - * child_rsc1:2 (ocf:heartbeat:apache): Slave node1 - * child_rsc1:3 (ocf:heartbeat:apache): Slave node2 + * child_rsc1:0 (ocf:heartbeat:apache): Unpromoted node1 + * child_rsc1:1 (ocf:heartbeat:apache): Promoted node2 + * child_rsc1:2 (ocf:heartbeat:apache): Unpromoted node1 + * child_rsc1:3 (ocf:heartbeat:apache): Unpromoted node2 * child_rsc1:4 (ocf:heartbeat:apache): Stopped diff --git a/cts/scheduler/summary/master-4.summary b/cts/scheduler/summary/master-4.summary index 4ec0a163cc..0dfe7c7263 100644 --- a/cts/scheduler/summary/master-4.summary +++ b/cts/scheduler/summary/master-4.summary @@ -1,94 +1,94 @@ Current cluster status: * Node List: * Online: [ c001n01 c001n02 c001n03 c001n08 ] * Full List of Resources: * DcIPaddr (ocf:heartbeat:IPaddr): Started c001n08 * Resource Group: group-1: * ocf_child (ocf:heartbeat:IPaddr): Started c001n03 * heartbeat_child (ocf:heartbeat:IPaddr): Started c001n03 * lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n01 * rsc_c001n08 (ocf:heartbeat:IPaddr): Started c001n08 * rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02 * rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n03 * rsc_c001n01 (ocf:heartbeat:IPaddr): Started c001n01 * Clone Set: DoFencing [child_DoFencing] (unique): * child_DoFencing:0 (stonith:ssh): Started c001n08 * child_DoFencing:1 (stonith:ssh): Started c001n03 * child_DoFencing:2 (stonith:ssh): Started c001n01 * child_DoFencing:3 (stonith:ssh): Started c001n02 * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique): - * ocf_msdummy:0 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 - * ocf_msdummy:1 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 - * ocf_msdummy:2 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n01 - * ocf_msdummy:3 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 - * ocf_msdummy:4 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 - * ocf_msdummy:5 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n01 - * ocf_msdummy:6 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 - * ocf_msdummy:7 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 + * ocf_msdummy:0 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n08 + * ocf_msdummy:1 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n03 + * ocf_msdummy:2 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n01 + * ocf_msdummy:3 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n08 + * ocf_msdummy:4 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n03 + * ocf_msdummy:5 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n01 + * ocf_msdummy:6 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02 + * ocf_msdummy:7 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02 Transition Summary: - * Promote ocf_msdummy:0 ( Slave -> Master c001n08 ) + * Promote ocf_msdummy:0 ( Unpromoted -> Promoted c001n08 ) Executing Cluster Transition: * Resource action: child_DoFencing:1 monitor on c001n08 * Resource action: child_DoFencing:1 monitor on c001n02 * Resource action: child_DoFencing:1 monitor on c001n01 * Resource action: child_DoFencing:2 monitor on c001n08 * Resource action: child_DoFencing:2 monitor on c001n03 * Resource action: child_DoFencing:2 monitor on c001n02 * Resource action: child_DoFencing:3 monitor on c001n08 * Resource action: child_DoFencing:3 monitor on c001n03 * Resource action: child_DoFencing:3 monitor on c001n01 * Resource action: ocf_msdummy:0 cancel=5000 on c001n08 * Resource action: ocf_msdummy:2 monitor on c001n08 * Resource action: ocf_msdummy:2 monitor on c001n03 * Resource action: ocf_msdummy:2 monitor on c001n02 * Resource action: ocf_msdummy:3 monitor on c001n03 * Resource action: ocf_msdummy:3 monitor on c001n02 * Resource action: ocf_msdummy:3 monitor on c001n01 * Resource action: ocf_msdummy:4 monitor on c001n08 * Resource action: ocf_msdummy:4 monitor on c001n02 * Resource action: ocf_msdummy:4 monitor on c001n01 * Resource action: ocf_msdummy:5 monitor on c001n08 * Resource action: ocf_msdummy:5 monitor on c001n03 * Resource action: ocf_msdummy:5 monitor on c001n02 * Resource action: ocf_msdummy:6 monitor on c001n08 * Resource action: ocf_msdummy:6 monitor on c001n03 * Resource action: ocf_msdummy:6 monitor on c001n01 * Resource action: ocf_msdummy:7 monitor on c001n08 * Resource action: ocf_msdummy:7 monitor on c001n03 * Resource action: ocf_msdummy:7 monitor on c001n01 * Pseudo action: master_rsc_1_promote_0 * Resource action: ocf_msdummy:0 promote on c001n08 * Pseudo action: master_rsc_1_promoted_0 * Resource action: ocf_msdummy:0 monitor=6000 on c001n08 Revised Cluster Status: * Node List: * Online: [ c001n01 c001n02 c001n03 c001n08 ] * Full List of Resources: * DcIPaddr (ocf:heartbeat:IPaddr): Started c001n08 * Resource Group: group-1: * ocf_child (ocf:heartbeat:IPaddr): Started c001n03 * heartbeat_child (ocf:heartbeat:IPaddr): Started c001n03 * lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n01 * rsc_c001n08 (ocf:heartbeat:IPaddr): Started c001n08 * rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02 * rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n03 * rsc_c001n01 (ocf:heartbeat:IPaddr): Started c001n01 * Clone Set: DoFencing [child_DoFencing] (unique): * child_DoFencing:0 (stonith:ssh): Started c001n08 * child_DoFencing:1 (stonith:ssh): Started c001n03 * child_DoFencing:2 (stonith:ssh): Started c001n01 * child_DoFencing:3 (stonith:ssh): Started c001n02 * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique): - * ocf_msdummy:0 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Master c001n08 - * ocf_msdummy:1 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 - * ocf_msdummy:2 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n01 - * ocf_msdummy:3 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 - * ocf_msdummy:4 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 - * ocf_msdummy:5 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n01 - * ocf_msdummy:6 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 - * ocf_msdummy:7 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 + * ocf_msdummy:0 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Promoted c001n08 + * ocf_msdummy:1 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n03 + * ocf_msdummy:2 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n01 + * ocf_msdummy:3 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n08 + * ocf_msdummy:4 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n03 + * ocf_msdummy:5 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n01 + * ocf_msdummy:6 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02 + * ocf_msdummy:7 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02 diff --git a/cts/scheduler/summary/master-5.summary b/cts/scheduler/summary/master-5.summary index 2df8889a29..00fa1c2154 100644 --- a/cts/scheduler/summary/master-5.summary +++ b/cts/scheduler/summary/master-5.summary @@ -1,88 +1,88 @@ Current cluster status: * Node List: * Online: [ c001n01 c001n02 c001n03 c001n08 ] * Full List of Resources: * DcIPaddr (ocf:heartbeat:IPaddr): Started c001n08 * Resource Group: group-1: * ocf_child (ocf:heartbeat:IPaddr): Started c001n03 * heartbeat_child (ocf:heartbeat:IPaddr): Started c001n03 * lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n01 * rsc_c001n08 (ocf:heartbeat:IPaddr): Started c001n08 * rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02 * rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n03 * rsc_c001n01 (ocf:heartbeat:IPaddr): Started c001n01 * Clone Set: DoFencing [child_DoFencing] (unique): * child_DoFencing:0 (stonith:ssh): Started c001n08 * child_DoFencing:1 (stonith:ssh): Started c001n03 * child_DoFencing:2 (stonith:ssh): Started c001n01 * child_DoFencing:3 (stonith:ssh): Started c001n02 * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique): - * ocf_msdummy:0 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Master c001n08 - * ocf_msdummy:1 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 - * ocf_msdummy:2 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n01 - * ocf_msdummy:3 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 - * ocf_msdummy:4 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 - * ocf_msdummy:5 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n01 - * ocf_msdummy:6 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 - * ocf_msdummy:7 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 + * ocf_msdummy:0 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Promoted c001n08 + * ocf_msdummy:1 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n03 + * ocf_msdummy:2 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n01 + * ocf_msdummy:3 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n08 + * ocf_msdummy:4 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n03 + * ocf_msdummy:5 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n01 + * ocf_msdummy:6 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02 + * ocf_msdummy:7 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02 Transition Summary: Executing Cluster Transition: * Resource action: child_DoFencing:1 monitor on c001n08 * Resource action: child_DoFencing:1 monitor on c001n02 * Resource action: child_DoFencing:1 monitor on c001n01 * Resource action: child_DoFencing:2 monitor on c001n08 * Resource action: child_DoFencing:2 monitor on c001n03 * Resource action: child_DoFencing:2 monitor on c001n02 * Resource action: child_DoFencing:3 monitor on c001n08 * Resource action: child_DoFencing:3 monitor on c001n03 * Resource action: child_DoFencing:3 monitor on c001n01 * Resource action: ocf_msdummy:2 monitor on c001n08 * Resource action: ocf_msdummy:2 monitor on c001n03 * Resource action: ocf_msdummy:2 monitor on c001n02 * Resource action: ocf_msdummy:3 monitor on c001n03 * Resource action: ocf_msdummy:3 monitor on c001n02 * Resource action: ocf_msdummy:3 monitor on c001n01 * Resource action: ocf_msdummy:4 monitor on c001n08 * Resource action: ocf_msdummy:4 monitor on c001n02 * Resource action: ocf_msdummy:4 monitor on c001n01 * Resource action: ocf_msdummy:5 monitor on c001n08 * Resource action: ocf_msdummy:5 monitor on c001n03 * Resource action: ocf_msdummy:5 monitor on c001n02 * Resource action: ocf_msdummy:6 monitor on c001n08 * Resource action: ocf_msdummy:6 monitor on c001n03 * Resource action: ocf_msdummy:6 monitor on c001n01 * Resource action: ocf_msdummy:7 monitor on c001n08 * Resource action: ocf_msdummy:7 monitor on c001n03 * Resource action: ocf_msdummy:7 monitor on c001n01 Revised Cluster Status: * Node List: * Online: [ c001n01 c001n02 c001n03 c001n08 ] * Full List of Resources: * DcIPaddr (ocf:heartbeat:IPaddr): Started c001n08 * Resource Group: group-1: * ocf_child (ocf:heartbeat:IPaddr): Started c001n03 * heartbeat_child (ocf:heartbeat:IPaddr): Started c001n03 * lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n01 * rsc_c001n08 (ocf:heartbeat:IPaddr): Started c001n08 * rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02 * rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n03 * rsc_c001n01 (ocf:heartbeat:IPaddr): Started c001n01 * Clone Set: DoFencing [child_DoFencing] (unique): * child_DoFencing:0 (stonith:ssh): Started c001n08 * child_DoFencing:1 (stonith:ssh): Started c001n03 * child_DoFencing:2 (stonith:ssh): Started c001n01 * child_DoFencing:3 (stonith:ssh): Started c001n02 * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique): - * ocf_msdummy:0 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Master c001n08 - * ocf_msdummy:1 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 - * ocf_msdummy:2 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n01 - * ocf_msdummy:3 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 - * ocf_msdummy:4 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 - * ocf_msdummy:5 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n01 - * ocf_msdummy:6 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 - * ocf_msdummy:7 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 + * ocf_msdummy:0 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Promoted c001n08 + * ocf_msdummy:1 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n03 + * ocf_msdummy:2 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n01 + * ocf_msdummy:3 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n08 + * ocf_msdummy:4 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n03 + * ocf_msdummy:5 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n01 + * ocf_msdummy:6 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02 + * ocf_msdummy:7 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02 diff --git a/cts/scheduler/summary/master-6.summary b/cts/scheduler/summary/master-6.summary index e5abf9fea9..13c12dfc1c 100644 --- a/cts/scheduler/summary/master-6.summary +++ b/cts/scheduler/summary/master-6.summary @@ -1,87 +1,87 @@ Current cluster status: * Node List: * Online: [ c001n01 c001n02 c001n03 c001n08 ] * Full List of Resources: * DcIPaddr (ocf:heartbeat:IPaddr): Started c001n08 * Resource Group: group-1: * ocf_192.168.100.181 (ocf:heartbeat:IPaddr): Started c001n02 * heartbeat_192.168.100.182 (ocf:heartbeat:IPaddr): Started c001n02 * ocf_192.168.100.183 (ocf:heartbeat:IPaddr): Started c001n02 * lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n03 * rsc_c001n08 (ocf:heartbeat:IPaddr): Started c001n08 * rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02 * rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n03 * rsc_c001n01 (ocf:heartbeat:IPaddr): Started c001n01 * Clone Set: DoFencing [child_DoFencing] (unique): * child_DoFencing:0 (stonith:ssh): Started c001n08 * child_DoFencing:1 (stonith:ssh): Started c001n02 * child_DoFencing:2 (stonith:ssh): Started c001n03 * child_DoFencing:3 (stonith:ssh): Started c001n01 * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique): - * ocf_msdummy:0 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Master c001n08 - * ocf_msdummy:1 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 - * ocf_msdummy:2 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 - * ocf_msdummy:3 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 - * ocf_msdummy:4 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 - * ocf_msdummy:5 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 - * ocf_msdummy:6 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n01 - * ocf_msdummy:7 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n01 + * ocf_msdummy:0 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Promoted c001n08 + * ocf_msdummy:1 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02 + * ocf_msdummy:2 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n03 + * ocf_msdummy:3 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n08 + * ocf_msdummy:4 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02 + * ocf_msdummy:5 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n03 + * ocf_msdummy:6 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n01 + * ocf_msdummy:7 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n01 Transition Summary: Executing Cluster Transition: * Resource action: child_DoFencing:1 monitor on c001n08 * Resource action: child_DoFencing:1 monitor on c001n03 * Resource action: child_DoFencing:1 monitor on c001n01 * Resource action: child_DoFencing:2 monitor on c001n08 * Resource action: child_DoFencing:2 monitor on c001n01 * Resource action: child_DoFencing:3 monitor on c001n08 * Resource action: child_DoFencing:3 monitor on c001n03 * Resource action: child_DoFencing:3 monitor on c001n02 * Resource action: ocf_msdummy:2 monitor on c001n08 * Resource action: ocf_msdummy:2 monitor on c001n01 * Resource action: ocf_msdummy:3 monitor on c001n03 * Resource action: ocf_msdummy:3 monitor on c001n01 * Resource action: ocf_msdummy:4 monitor on c001n08 * Resource action: ocf_msdummy:4 monitor on c001n03 * Resource action: ocf_msdummy:4 monitor on c001n01 * Resource action: ocf_msdummy:5 monitor on c001n08 * Resource action: ocf_msdummy:5 monitor on c001n02 * Resource action: ocf_msdummy:5 monitor on c001n01 * Resource action: ocf_msdummy:6 monitor on c001n08 * Resource action: ocf_msdummy:6 monitor on c001n03 * Resource action: ocf_msdummy:6 monitor on c001n02 * Resource action: ocf_msdummy:7 monitor on c001n08 * Resource action: ocf_msdummy:7 monitor on c001n03 * Resource action: ocf_msdummy:7 monitor on c001n02 Revised Cluster Status: * Node List: * Online: [ c001n01 c001n02 c001n03 c001n08 ] * Full List of Resources: * DcIPaddr (ocf:heartbeat:IPaddr): Started c001n08 * Resource Group: group-1: * ocf_192.168.100.181 (ocf:heartbeat:IPaddr): Started c001n02 * heartbeat_192.168.100.182 (ocf:heartbeat:IPaddr): Started c001n02 * ocf_192.168.100.183 (ocf:heartbeat:IPaddr): Started c001n02 * lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n03 * rsc_c001n08 (ocf:heartbeat:IPaddr): Started c001n08 * rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02 * rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n03 * rsc_c001n01 (ocf:heartbeat:IPaddr): Started c001n01 * Clone Set: DoFencing [child_DoFencing] (unique): * child_DoFencing:0 (stonith:ssh): Started c001n08 * child_DoFencing:1 (stonith:ssh): Started c001n02 * child_DoFencing:2 (stonith:ssh): Started c001n03 * child_DoFencing:3 (stonith:ssh): Started c001n01 * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique): - * ocf_msdummy:0 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Master c001n08 - * ocf_msdummy:1 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 - * ocf_msdummy:2 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 - * ocf_msdummy:3 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 - * ocf_msdummy:4 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 - * ocf_msdummy:5 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 - * ocf_msdummy:6 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n01 - * ocf_msdummy:7 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n01 + * ocf_msdummy:0 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Promoted c001n08 + * ocf_msdummy:1 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02 + * ocf_msdummy:2 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n03 + * ocf_msdummy:3 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n08 + * ocf_msdummy:4 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02 + * ocf_msdummy:5 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n03 + * ocf_msdummy:6 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n01 + * ocf_msdummy:7 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n01 diff --git a/cts/scheduler/summary/master-7.summary b/cts/scheduler/summary/master-7.summary index 1468642830..4fc3a85e9a 100644 --- a/cts/scheduler/summary/master-7.summary +++ b/cts/scheduler/summary/master-7.summary @@ -1,121 +1,121 @@ Current cluster status: * Node List: * Node c001n01: UNCLEAN (offline) * Online: [ c001n02 c001n03 c001n08 ] * Full List of Resources: * DcIPaddr (ocf:heartbeat:IPaddr): Started c001n01 (UNCLEAN) * Resource Group: group-1: * ocf_192.168.100.181 (ocf:heartbeat:IPaddr): Started c001n03 * heartbeat_192.168.100.182 (ocf:heartbeat:IPaddr): Started c001n03 * ocf_192.168.100.183 (ocf:heartbeat:IPaddr): Started c001n03 * lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n02 * rsc_c001n01 (ocf:heartbeat:IPaddr): Started c001n01 (UNCLEAN) * rsc_c001n08 (ocf:heartbeat:IPaddr): Started c001n08 * rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02 * rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n03 * Clone Set: DoFencing [child_DoFencing] (unique): * child_DoFencing:0 (stonith:ssh): Started c001n01 (UNCLEAN) * child_DoFencing:1 (stonith:ssh): Started c001n03 * child_DoFencing:2 (stonith:ssh): Started c001n02 * child_DoFencing:3 (stonith:ssh): Started c001n08 * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique): - * ocf_msdummy:0 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Master c001n01 (UNCLEAN) - * ocf_msdummy:1 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 - * ocf_msdummy:2 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 - * ocf_msdummy:3 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 - * ocf_msdummy:4 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n01 (UNCLEAN) - * ocf_msdummy:5 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 - * ocf_msdummy:6 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 - * ocf_msdummy:7 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 + * ocf_msdummy:0 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Promoted c001n01 (UNCLEAN) + * ocf_msdummy:1 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n03 + * ocf_msdummy:2 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02 + * ocf_msdummy:3 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n08 + * ocf_msdummy:4 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n01 (UNCLEAN) + * ocf_msdummy:5 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n03 + * ocf_msdummy:6 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02 + * ocf_msdummy:7 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n08 Transition Summary: * Fence (reboot) c001n01 'peer is no longer part of the cluster' * Move DcIPaddr ( c001n01 -> c001n03 ) * Move ocf_192.168.100.181 ( c001n03 -> c001n02 ) * Move heartbeat_192.168.100.182 ( c001n03 -> c001n02 ) * Move ocf_192.168.100.183 ( c001n03 -> c001n02 ) * Move lsb_dummy ( c001n02 -> c001n08 ) * Move rsc_c001n01 ( c001n01 -> c001n03 ) * Stop child_DoFencing:0 ( c001n01 ) due to node availability - * Stop ocf_msdummy:0 ( Master c001n01 ) due to node availability - * Stop ocf_msdummy:4 ( Slave c001n01 ) due to node availability + * Stop ocf_msdummy:0 ( Promoted c001n01 ) due to node availability + * Stop ocf_msdummy:4 ( Unpromoted c001n01 ) due to node availability Executing Cluster Transition: * Pseudo action: group-1_stop_0 * Resource action: ocf_192.168.100.183 stop on c001n03 * Resource action: lsb_dummy stop on c001n02 * Resource action: child_DoFencing:2 monitor on c001n08 * Resource action: child_DoFencing:2 monitor on c001n03 * Resource action: child_DoFencing:3 monitor on c001n03 * Resource action: child_DoFencing:3 monitor on c001n02 * Pseudo action: DoFencing_stop_0 * Resource action: ocf_msdummy:4 monitor on c001n08 * Resource action: ocf_msdummy:4 monitor on c001n03 * Resource action: ocf_msdummy:4 monitor on c001n02 * Resource action: ocf_msdummy:5 monitor on c001n08 * Resource action: ocf_msdummy:5 monitor on c001n02 * Resource action: ocf_msdummy:6 monitor on c001n08 * Resource action: ocf_msdummy:6 monitor on c001n03 * Resource action: ocf_msdummy:7 monitor on c001n03 * Resource action: ocf_msdummy:7 monitor on c001n02 * Pseudo action: master_rsc_1_demote_0 * Fencing c001n01 (reboot) * Pseudo action: DcIPaddr_stop_0 * Resource action: heartbeat_192.168.100.182 stop on c001n03 * Resource action: lsb_dummy start on c001n08 * Pseudo action: rsc_c001n01_stop_0 * Pseudo action: child_DoFencing:0_stop_0 * Pseudo action: DoFencing_stopped_0 * Pseudo action: ocf_msdummy:0_demote_0 * Pseudo action: master_rsc_1_demoted_0 * Pseudo action: master_rsc_1_stop_0 * Resource action: DcIPaddr start on c001n03 * Resource action: ocf_192.168.100.181 stop on c001n03 * Resource action: lsb_dummy monitor=5000 on c001n08 * Resource action: rsc_c001n01 start on c001n03 * Pseudo action: ocf_msdummy:0_stop_0 * Pseudo action: ocf_msdummy:4_stop_0 * Pseudo action: master_rsc_1_stopped_0 * Resource action: DcIPaddr monitor=5000 on c001n03 * Pseudo action: group-1_stopped_0 * Pseudo action: group-1_start_0 * Resource action: ocf_192.168.100.181 start on c001n02 * Resource action: heartbeat_192.168.100.182 start on c001n02 * Resource action: ocf_192.168.100.183 start on c001n02 * Resource action: rsc_c001n01 monitor=5000 on c001n03 * Pseudo action: group-1_running_0 * Resource action: ocf_192.168.100.181 monitor=5000 on c001n02 * Resource action: heartbeat_192.168.100.182 monitor=5000 on c001n02 * Resource action: ocf_192.168.100.183 monitor=5000 on c001n02 Revised Cluster Status: * Node List: * Online: [ c001n02 c001n03 c001n08 ] * OFFLINE: [ c001n01 ] * Full List of Resources: * DcIPaddr (ocf:heartbeat:IPaddr): Started c001n03 * Resource Group: group-1: * ocf_192.168.100.181 (ocf:heartbeat:IPaddr): Started c001n02 * heartbeat_192.168.100.182 (ocf:heartbeat:IPaddr): Started c001n02 * ocf_192.168.100.183 (ocf:heartbeat:IPaddr): Started c001n02 * lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n08 * rsc_c001n01 (ocf:heartbeat:IPaddr): Started c001n03 * rsc_c001n08 (ocf:heartbeat:IPaddr): Started c001n08 * rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02 * rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n03 * Clone Set: DoFencing [child_DoFencing] (unique): * child_DoFencing:0 (stonith:ssh): Stopped * child_DoFencing:1 (stonith:ssh): Started c001n03 * child_DoFencing:2 (stonith:ssh): Started c001n02 * child_DoFencing:3 (stonith:ssh): Started c001n08 * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique): * ocf_msdummy:0 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped - * ocf_msdummy:1 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 - * ocf_msdummy:2 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 - * ocf_msdummy:3 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 + * ocf_msdummy:1 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n03 + * ocf_msdummy:2 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02 + * ocf_msdummy:3 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n08 * ocf_msdummy:4 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped - * ocf_msdummy:5 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 - * ocf_msdummy:6 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 - * ocf_msdummy:7 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 + * ocf_msdummy:5 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n03 + * ocf_msdummy:6 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02 + * ocf_msdummy:7 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n08 diff --git a/cts/scheduler/summary/master-8.summary b/cts/scheduler/summary/master-8.summary index 3eb67ec7ec..32417ff1ea 100644 --- a/cts/scheduler/summary/master-8.summary +++ b/cts/scheduler/summary/master-8.summary @@ -1,124 +1,124 @@ Current cluster status: * Node List: * Node c001n01: UNCLEAN (offline) * Online: [ c001n02 c001n03 c001n08 ] * Full List of Resources: * DcIPaddr (ocf:heartbeat:IPaddr): Started c001n01 (UNCLEAN) * Resource Group: group-1: * ocf_192.168.100.181 (ocf:heartbeat:IPaddr): Started c001n03 * heartbeat_192.168.100.182 (ocf:heartbeat:IPaddr): Started c001n03 * ocf_192.168.100.183 (ocf:heartbeat:IPaddr): Started c001n03 * lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n02 * rsc_c001n01 (ocf:heartbeat:IPaddr): Started c001n01 (UNCLEAN) * rsc_c001n08 (ocf:heartbeat:IPaddr): Started c001n08 * rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02 * rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n03 * Clone Set: DoFencing [child_DoFencing] (unique): * child_DoFencing:0 (stonith:ssh): Started c001n01 (UNCLEAN) * child_DoFencing:1 (stonith:ssh): Started c001n03 * child_DoFencing:2 (stonith:ssh): Started c001n02 * child_DoFencing:3 (stonith:ssh): Started c001n08 * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique): - * ocf_msdummy:0 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Master c001n01 (UNCLEAN) - * ocf_msdummy:1 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 - * ocf_msdummy:2 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 - * ocf_msdummy:3 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 + * ocf_msdummy:0 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Promoted c001n01 (UNCLEAN) + * ocf_msdummy:1 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n03 + * ocf_msdummy:2 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02 + * ocf_msdummy:3 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n08 * ocf_msdummy:4 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped * ocf_msdummy:5 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped - * ocf_msdummy:6 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 - * ocf_msdummy:7 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 + * ocf_msdummy:6 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02 + * ocf_msdummy:7 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n08 Transition Summary: * Fence (reboot) c001n01 'peer is no longer part of the cluster' * Move DcIPaddr ( c001n01 -> c001n03 ) * Move ocf_192.168.100.181 ( c001n03 -> c001n02 ) * Move heartbeat_192.168.100.182 ( c001n03 -> c001n02 ) * Move ocf_192.168.100.183 ( c001n03 -> c001n02 ) * Move lsb_dummy ( c001n02 -> c001n08 ) * Move rsc_c001n01 ( c001n01 -> c001n03 ) * Stop child_DoFencing:0 ( c001n01 ) due to node availability - * Move ocf_msdummy:0 ( Master c001n01 -> Slave c001n03 ) + * Move ocf_msdummy:0 ( Promoted c001n01 -> Unpromoted c001n03 ) Executing Cluster Transition: * Pseudo action: group-1_stop_0 * Resource action: ocf_192.168.100.183 stop on c001n03 * Resource action: lsb_dummy stop on c001n02 * Resource action: child_DoFencing:2 monitor on c001n08 * Resource action: child_DoFencing:2 monitor on c001n03 * Resource action: child_DoFencing:3 monitor on c001n03 * Resource action: child_DoFencing:3 monitor on c001n02 * Pseudo action: DoFencing_stop_0 * Resource action: ocf_msdummy:4 monitor on c001n08 * Resource action: ocf_msdummy:4 monitor on c001n03 * Resource action: ocf_msdummy:4 monitor on c001n02 * Resource action: ocf_msdummy:5 monitor on c001n08 * Resource action: ocf_msdummy:5 monitor on c001n03 * Resource action: ocf_msdummy:5 monitor on c001n02 * Resource action: ocf_msdummy:6 monitor on c001n08 * Resource action: ocf_msdummy:6 monitor on c001n03 * Resource action: ocf_msdummy:7 monitor on c001n03 * Resource action: ocf_msdummy:7 monitor on c001n02 * Pseudo action: master_rsc_1_demote_0 * Fencing c001n01 (reboot) * Pseudo action: DcIPaddr_stop_0 * Resource action: heartbeat_192.168.100.182 stop on c001n03 * Resource action: lsb_dummy start on c001n08 * Pseudo action: rsc_c001n01_stop_0 * Pseudo action: child_DoFencing:0_stop_0 * Pseudo action: DoFencing_stopped_0 * Pseudo action: ocf_msdummy:0_demote_0 * Pseudo action: master_rsc_1_demoted_0 * Pseudo action: master_rsc_1_stop_0 * Resource action: DcIPaddr start on c001n03 * Resource action: ocf_192.168.100.181 stop on c001n03 * Resource action: lsb_dummy monitor=5000 on c001n08 * Resource action: rsc_c001n01 start on c001n03 * Pseudo action: ocf_msdummy:0_stop_0 * Pseudo action: master_rsc_1_stopped_0 * Pseudo action: master_rsc_1_start_0 * Resource action: DcIPaddr monitor=5000 on c001n03 * Pseudo action: group-1_stopped_0 * Pseudo action: group-1_start_0 * Resource action: ocf_192.168.100.181 start on c001n02 * Resource action: heartbeat_192.168.100.182 start on c001n02 * Resource action: ocf_192.168.100.183 start on c001n02 * Resource action: rsc_c001n01 monitor=5000 on c001n03 * Resource action: ocf_msdummy:0 start on c001n03 * Pseudo action: master_rsc_1_running_0 * Pseudo action: group-1_running_0 * Resource action: ocf_192.168.100.181 monitor=5000 on c001n02 * Resource action: heartbeat_192.168.100.182 monitor=5000 on c001n02 * Resource action: ocf_192.168.100.183 monitor=5000 on c001n02 * Resource action: ocf_msdummy:0 monitor=5000 on c001n03 Revised Cluster Status: * Node List: * Online: [ c001n02 c001n03 c001n08 ] * OFFLINE: [ c001n01 ] * Full List of Resources: * DcIPaddr (ocf:heartbeat:IPaddr): Started c001n03 * Resource Group: group-1: * ocf_192.168.100.181 (ocf:heartbeat:IPaddr): Started c001n02 * heartbeat_192.168.100.182 (ocf:heartbeat:IPaddr): Started c001n02 * ocf_192.168.100.183 (ocf:heartbeat:IPaddr): Started c001n02 * lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n08 * rsc_c001n01 (ocf:heartbeat:IPaddr): Started c001n03 * rsc_c001n08 (ocf:heartbeat:IPaddr): Started c001n08 * rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02 * rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n03 * Clone Set: DoFencing [child_DoFencing] (unique): * child_DoFencing:0 (stonith:ssh): Stopped * child_DoFencing:1 (stonith:ssh): Started c001n03 * child_DoFencing:2 (stonith:ssh): Started c001n02 * child_DoFencing:3 (stonith:ssh): Started c001n08 * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique): - * ocf_msdummy:0 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 - * ocf_msdummy:1 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n03 - * ocf_msdummy:2 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 - * ocf_msdummy:3 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 + * ocf_msdummy:0 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n03 + * ocf_msdummy:1 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n03 + * ocf_msdummy:2 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02 + * ocf_msdummy:3 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n08 * ocf_msdummy:4 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped * ocf_msdummy:5 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped - * ocf_msdummy:6 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 - * ocf_msdummy:7 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 + * ocf_msdummy:6 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02 + * ocf_msdummy:7 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n08 diff --git a/cts/scheduler/summary/master-9.summary b/cts/scheduler/summary/master-9.summary index f8656e7add..2da56a62db 100644 --- a/cts/scheduler/summary/master-9.summary +++ b/cts/scheduler/summary/master-9.summary @@ -1,100 +1,100 @@ Current cluster status: * Node List: * Node sgi2: UNCLEAN (offline) * Node test02: UNCLEAN (offline) * Online: [ ibm1 va1 ] * Full List of Resources: * DcIPaddr (ocf:heartbeat:IPaddr): Stopped * Resource Group: group-1: * ocf_127.0.0.11 (ocf:heartbeat:IPaddr): Stopped * heartbeat_127.0.0.12 (ocf:heartbeat:IPaddr): Stopped * ocf_127.0.0.13 (ocf:heartbeat:IPaddr): Stopped * lsb_dummy (lsb:/usr/lib64/heartbeat/cts/LSBDummy): Stopped * rsc_sgi2 (ocf:heartbeat:IPaddr): Stopped * rsc_ibm1 (ocf:heartbeat:IPaddr): Stopped * rsc_va1 (ocf:heartbeat:IPaddr): Stopped * rsc_test02 (ocf:heartbeat:IPaddr): Stopped * Clone Set: DoFencing [child_DoFencing] (unique): * child_DoFencing:0 (stonith:ssh): Started va1 * child_DoFencing:1 (stonith:ssh): Started ibm1 * child_DoFencing:2 (stonith:ssh): Stopped * child_DoFencing:3 (stonith:ssh): Stopped * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique): * ocf_msdummy:0 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped * ocf_msdummy:1 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped * ocf_msdummy:2 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped * ocf_msdummy:3 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped * ocf_msdummy:4 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped * ocf_msdummy:5 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped * ocf_msdummy:6 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped * ocf_msdummy:7 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped Transition Summary: * Start DcIPaddr ( va1 ) due to no quorum (blocked) * Start ocf_127.0.0.11 ( va1 ) due to no quorum (blocked) * Start heartbeat_127.0.0.12 ( va1 ) due to no quorum (blocked) * Start ocf_127.0.0.13 ( va1 ) due to no quorum (blocked) * Start lsb_dummy ( va1 ) due to no quorum (blocked) * Start rsc_sgi2 ( va1 ) due to no quorum (blocked) * Start rsc_ibm1 ( va1 ) due to no quorum (blocked) * Start rsc_va1 ( va1 ) due to no quorum (blocked) * Start rsc_test02 ( va1 ) due to no quorum (blocked) * Stop child_DoFencing:1 ( ibm1 ) due to node availability - * Promote ocf_msdummy:0 ( Stopped -> Master va1 ) blocked + * Promote ocf_msdummy:0 ( Stopped -> Promoted va1 ) blocked * Start ocf_msdummy:1 ( va1 ) due to no quorum (blocked) Executing Cluster Transition: * Resource action: child_DoFencing:1 monitor on va1 * Resource action: child_DoFencing:2 monitor on va1 * Resource action: child_DoFencing:2 monitor on ibm1 * Resource action: child_DoFencing:3 monitor on va1 * Resource action: child_DoFencing:3 monitor on ibm1 * Pseudo action: DoFencing_stop_0 * Resource action: ocf_msdummy:2 monitor on va1 * Resource action: ocf_msdummy:2 monitor on ibm1 * Resource action: ocf_msdummy:3 monitor on va1 * Resource action: ocf_msdummy:3 monitor on ibm1 * Resource action: ocf_msdummy:4 monitor on va1 * Resource action: ocf_msdummy:4 monitor on ibm1 * Resource action: ocf_msdummy:5 monitor on va1 * Resource action: ocf_msdummy:5 monitor on ibm1 * Resource action: ocf_msdummy:6 monitor on va1 * Resource action: ocf_msdummy:6 monitor on ibm1 * Resource action: ocf_msdummy:7 monitor on va1 * Resource action: ocf_msdummy:7 monitor on ibm1 * Resource action: child_DoFencing:1 stop on ibm1 * Pseudo action: DoFencing_stopped_0 * Cluster action: do_shutdown on ibm1 Revised Cluster Status: * Node List: * Node sgi2: UNCLEAN (offline) * Node test02: UNCLEAN (offline) * Online: [ ibm1 va1 ] * Full List of Resources: * DcIPaddr (ocf:heartbeat:IPaddr): Stopped * Resource Group: group-1: * ocf_127.0.0.11 (ocf:heartbeat:IPaddr): Stopped * heartbeat_127.0.0.12 (ocf:heartbeat:IPaddr): Stopped * ocf_127.0.0.13 (ocf:heartbeat:IPaddr): Stopped * lsb_dummy (lsb:/usr/lib64/heartbeat/cts/LSBDummy): Stopped * rsc_sgi2 (ocf:heartbeat:IPaddr): Stopped * rsc_ibm1 (ocf:heartbeat:IPaddr): Stopped * rsc_va1 (ocf:heartbeat:IPaddr): Stopped * rsc_test02 (ocf:heartbeat:IPaddr): Stopped * Clone Set: DoFencing [child_DoFencing] (unique): * child_DoFencing:0 (stonith:ssh): Started va1 * child_DoFencing:1 (stonith:ssh): Stopped * child_DoFencing:2 (stonith:ssh): Stopped * child_DoFencing:3 (stonith:ssh): Stopped * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique): * ocf_msdummy:0 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped * ocf_msdummy:1 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped * ocf_msdummy:2 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped * ocf_msdummy:3 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped * ocf_msdummy:4 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped * ocf_msdummy:5 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped * ocf_msdummy:6 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped * ocf_msdummy:7 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped diff --git a/cts/scheduler/summary/master-allow-start.summary b/cts/scheduler/summary/master-allow-start.summary index f73afdb3df..c9afdaa105 100644 --- a/cts/scheduler/summary/master-allow-start.summary +++ b/cts/scheduler/summary/master-allow-start.summary @@ -1,21 +1,21 @@ Current cluster status: * Node List: * Online: [ sles11-a sles11-b ] * Full List of Resources: * Clone Set: ms_res_Stateful_1 [res_Stateful_1] (promotable): - * Masters: [ sles11-a ] - * Slaves: [ sles11-b ] + * Promoted: [ sles11-a ] + * Unpromoted: [ sles11-b ] Transition Summary: Executing Cluster Transition: Revised Cluster Status: * Node List: * Online: [ sles11-a sles11-b ] * Full List of Resources: * Clone Set: ms_res_Stateful_1 [res_Stateful_1] (promotable): - * Masters: [ sles11-a ] - * Slaves: [ sles11-b ] + * Promoted: [ sles11-a ] + * Unpromoted: [ sles11-b ] diff --git a/cts/scheduler/summary/master-asymmetrical-order.summary b/cts/scheduler/summary/master-asymmetrical-order.summary index 26e76472c2..df6e00c9c2 100644 --- a/cts/scheduler/summary/master-asymmetrical-order.summary +++ b/cts/scheduler/summary/master-asymmetrical-order.summary @@ -1,37 +1,37 @@ 2 of 4 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * Clone Set: ms1 [rsc1] (promotable) (disabled): - * Masters: [ node1 ] - * Slaves: [ node2 ] + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] * Clone Set: ms2 [rsc2] (promotable): - * Masters: [ node2 ] - * Slaves: [ node1 ] + * Promoted: [ node2 ] + * Unpromoted: [ node1 ] Transition Summary: - * Stop rsc1:0 ( Master node1 ) due to node availability - * Stop rsc1:1 ( Slave node2 ) due to node availability + * Stop rsc1:0 ( Promoted node1 ) due to node availability + * Stop rsc1:1 ( Unpromoted node2 ) due to node availability Executing Cluster Transition: * Pseudo action: ms1_demote_0 * Resource action: rsc1:0 demote on node1 * Pseudo action: ms1_demoted_0 * Pseudo action: ms1_stop_0 * Resource action: rsc1:0 stop on node1 * Resource action: rsc1:1 stop on node2 * Pseudo action: ms1_stopped_0 Revised Cluster Status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * Clone Set: ms1 [rsc1] (promotable) (disabled): * Stopped (disabled): [ node1 node2 ] * Clone Set: ms2 [rsc2] (promotable): - * Masters: [ node2 ] - * Slaves: [ node1 ] + * Promoted: [ node2 ] + * Unpromoted: [ node1 ] diff --git a/cts/scheduler/summary/master-colocation.summary b/cts/scheduler/summary/master-colocation.summary index 4acef1969f..b3e776bcd9 100644 --- a/cts/scheduler/summary/master-colocation.summary +++ b/cts/scheduler/summary/master-colocation.summary @@ -1,34 +1,34 @@ Current cluster status: * Node List: * Online: [ box1 box2 ] * Full List of Resources: * Clone Set: ms-conntrackd [conntrackd-stateful] (promotable): - * Slaves: [ box1 box2 ] + * Unpromoted: [ box1 box2 ] * Resource Group: virtualips: * externalip (ocf:heartbeat:IPaddr2): Started box2 * internalip (ocf:heartbeat:IPaddr2): Started box2 * sship (ocf:heartbeat:IPaddr2): Started box2 Transition Summary: - * Promote conntrackd-stateful:1 ( Slave -> Master box2 ) + * Promote conntrackd-stateful:1 ( Unpromoted -> Promoted box2 ) Executing Cluster Transition: * Resource action: conntrackd-stateful:0 monitor=29000 on box1 * Pseudo action: ms-conntrackd_promote_0 * Resource action: conntrackd-stateful:1 promote on box2 * Pseudo action: ms-conntrackd_promoted_0 * Resource action: conntrackd-stateful:1 monitor=30000 on box2 Revised Cluster Status: * Node List: * Online: [ box1 box2 ] * Full List of Resources: * Clone Set: ms-conntrackd [conntrackd-stateful] (promotable): - * Masters: [ box2 ] - * Slaves: [ box1 ] + * Promoted: [ box2 ] + * Unpromoted: [ box1 ] * Resource Group: virtualips: * externalip (ocf:heartbeat:IPaddr2): Started box2 * internalip (ocf:heartbeat:IPaddr2): Started box2 * sship (ocf:heartbeat:IPaddr2): Started box2 diff --git a/cts/scheduler/summary/master-demote-2.summary b/cts/scheduler/summary/master-demote-2.summary index 5496288232..daea66ae8b 100644 --- a/cts/scheduler/summary/master-demote-2.summary +++ b/cts/scheduler/summary/master-demote-2.summary @@ -1,75 +1,75 @@ Current cluster status: * Node List: * Online: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started pcmk-1 * Resource Group: group-1: * r192.168.122.105 (ocf:heartbeat:IPaddr): Stopped * r192.168.122.106 (ocf:heartbeat:IPaddr): Stopped * r192.168.122.107 (ocf:heartbeat:IPaddr): Stopped * rsc_pcmk-1 (ocf:heartbeat:IPaddr): Started pcmk-1 * rsc_pcmk-2 (ocf:heartbeat:IPaddr): Started pcmk-2 * rsc_pcmk-3 (ocf:heartbeat:IPaddr): Started pcmk-3 * rsc_pcmk-4 (ocf:heartbeat:IPaddr): Started pcmk-4 * lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Stopped * migrator (ocf:pacemaker:Dummy): Started pcmk-4 * Clone Set: Connectivity [ping-1]: * Started: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] * Clone Set: master-1 [stateful-1] (promotable): * stateful-1 (ocf:pacemaker:Stateful): FAILED pcmk-1 - * Slaves: [ pcmk-2 pcmk-3 pcmk-4 ] + * Unpromoted: [ pcmk-2 pcmk-3 pcmk-4 ] Transition Summary: * Start r192.168.122.105 ( pcmk-2 ) * Start r192.168.122.106 ( pcmk-2 ) * Start r192.168.122.107 ( pcmk-2 ) * Start lsb-dummy ( pcmk-2 ) - * Recover stateful-1:0 ( Slave pcmk-1 ) - * Promote stateful-1:1 ( Slave -> Master pcmk-2 ) + * Recover stateful-1:0 ( Unpromoted pcmk-1 ) + * Promote stateful-1:1 ( Unpromoted -> Promoted pcmk-2 ) Executing Cluster Transition: * Resource action: stateful-1:0 cancel=15000 on pcmk-2 * Pseudo action: master-1_stop_0 * Resource action: stateful-1:1 stop on pcmk-1 * Pseudo action: master-1_stopped_0 * Pseudo action: master-1_start_0 * Resource action: stateful-1:1 start on pcmk-1 * Pseudo action: master-1_running_0 * Resource action: stateful-1:1 monitor=15000 on pcmk-1 * Pseudo action: master-1_promote_0 * Resource action: stateful-1:0 promote on pcmk-2 * Pseudo action: master-1_promoted_0 * Pseudo action: group-1_start_0 * Resource action: r192.168.122.105 start on pcmk-2 * Resource action: r192.168.122.106 start on pcmk-2 * Resource action: r192.168.122.107 start on pcmk-2 * Resource action: stateful-1:0 monitor=16000 on pcmk-2 * Pseudo action: group-1_running_0 * Resource action: r192.168.122.105 monitor=5000 on pcmk-2 * Resource action: r192.168.122.106 monitor=5000 on pcmk-2 * Resource action: r192.168.122.107 monitor=5000 on pcmk-2 * Resource action: lsb-dummy start on pcmk-2 * Resource action: lsb-dummy monitor=5000 on pcmk-2 Revised Cluster Status: * Node List: * Online: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started pcmk-1 * Resource Group: group-1: * r192.168.122.105 (ocf:heartbeat:IPaddr): Started pcmk-2 * r192.168.122.106 (ocf:heartbeat:IPaddr): Started pcmk-2 * r192.168.122.107 (ocf:heartbeat:IPaddr): Started pcmk-2 * rsc_pcmk-1 (ocf:heartbeat:IPaddr): Started pcmk-1 * rsc_pcmk-2 (ocf:heartbeat:IPaddr): Started pcmk-2 * rsc_pcmk-3 (ocf:heartbeat:IPaddr): Started pcmk-3 * rsc_pcmk-4 (ocf:heartbeat:IPaddr): Started pcmk-4 * lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started pcmk-2 * migrator (ocf:pacemaker:Dummy): Started pcmk-4 * Clone Set: Connectivity [ping-1]: * Started: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] * Clone Set: master-1 [stateful-1] (promotable): - * Masters: [ pcmk-2 ] - * Slaves: [ pcmk-1 pcmk-3 pcmk-4 ] + * Promoted: [ pcmk-2 ] + * Unpromoted: [ pcmk-1 pcmk-3 pcmk-4 ] diff --git a/cts/scheduler/summary/master-demote-block.summary b/cts/scheduler/summary/master-demote-block.summary index f715aa9887..e4fc100651 100644 --- a/cts/scheduler/summary/master-demote-block.summary +++ b/cts/scheduler/summary/master-demote-block.summary @@ -1,26 +1,26 @@ 0 of 2 resource instances DISABLED and 1 BLOCKED from further action due to failure Current cluster status: * Node List: * Node dl380g5c: standby (with active resources) * Online: [ dl380g5d ] * Full List of Resources: * Clone Set: stateful [dummy] (promotable): - * dummy (ocf:pacemaker:Stateful): FAILED Master dl380g5c (blocked) - * Slaves: [ dl380g5d ] + * dummy (ocf:pacemaker:Stateful): FAILED Promoted dl380g5c (blocked) + * Unpromoted: [ dl380g5d ] Transition Summary: Executing Cluster Transition: * Resource action: dummy:1 monitor=20000 on dl380g5d Revised Cluster Status: * Node List: * Node dl380g5c: standby (with active resources) * Online: [ dl380g5d ] * Full List of Resources: * Clone Set: stateful [dummy] (promotable): - * dummy (ocf:pacemaker:Stateful): FAILED Master dl380g5c (blocked) - * Slaves: [ dl380g5d ] + * dummy (ocf:pacemaker:Stateful): FAILED Promoted dl380g5c (blocked) + * Unpromoted: [ dl380g5d ] diff --git a/cts/scheduler/summary/master-demote.summary b/cts/scheduler/summary/master-demote.summary index db687acbc2..a597ec76c0 100644 --- a/cts/scheduler/summary/master-demote.summary +++ b/cts/scheduler/summary/master-demote.summary @@ -1,70 +1,70 @@ Current cluster status: * Node List: * Online: [ cxa1 cxb1 ] * Full List of Resources: * cyrus_address (ocf:heartbeat:IPaddr2): Started cxa1 * cyrus_master (ocf:heartbeat:cyrus-imap): Stopped * cyrus_syslogd (ocf:heartbeat:syslogd): Stopped * cyrus_filesys (ocf:heartbeat:Filesystem): Stopped * cyrus_volgroup (ocf:heartbeat:VolGroup): Stopped * Clone Set: cyrus_drbd [cyrus_drbd_node] (promotable): - * Masters: [ cxa1 ] - * Slaves: [ cxb1 ] + * Promoted: [ cxa1 ] + * Unpromoted: [ cxb1 ] * named_address (ocf:heartbeat:IPaddr2): Started cxa1 * named_filesys (ocf:heartbeat:Filesystem): Stopped * named_volgroup (ocf:heartbeat:VolGroup): Stopped * named_daemon (ocf:heartbeat:recursor): Stopped * named_syslogd (ocf:heartbeat:syslogd): Stopped * Clone Set: named_drbd [named_drbd_node] (promotable): - * Slaves: [ cxa1 cxb1 ] + * Unpromoted: [ cxa1 cxb1 ] * Clone Set: pingd_clone [pingd_node]: * Started: [ cxa1 cxb1 ] * Clone Set: fence_clone [fence_node]: * Started: [ cxa1 cxb1 ] Transition Summary: * Move named_address ( cxa1 -> cxb1 ) - * Promote named_drbd_node:1 ( Slave -> Master cxb1 ) + * Promote named_drbd_node:1 ( Unpromoted -> Promoted cxb1 ) Executing Cluster Transition: * Resource action: named_address stop on cxa1 * Pseudo action: named_drbd_pre_notify_promote_0 * Resource action: named_address start on cxb1 * Resource action: named_drbd_node:1 notify on cxa1 * Resource action: named_drbd_node:0 notify on cxb1 * Pseudo action: named_drbd_confirmed-pre_notify_promote_0 * Pseudo action: named_drbd_promote_0 * Resource action: named_drbd_node:0 promote on cxb1 * Pseudo action: named_drbd_promoted_0 * Pseudo action: named_drbd_post_notify_promoted_0 * Resource action: named_drbd_node:1 notify on cxa1 * Resource action: named_drbd_node:0 notify on cxb1 * Pseudo action: named_drbd_confirmed-post_notify_promoted_0 * Resource action: named_drbd_node:0 monitor=10000 on cxb1 Revised Cluster Status: * Node List: * Online: [ cxa1 cxb1 ] * Full List of Resources: * cyrus_address (ocf:heartbeat:IPaddr2): Started cxa1 * cyrus_master (ocf:heartbeat:cyrus-imap): Stopped * cyrus_syslogd (ocf:heartbeat:syslogd): Stopped * cyrus_filesys (ocf:heartbeat:Filesystem): Stopped * cyrus_volgroup (ocf:heartbeat:VolGroup): Stopped * Clone Set: cyrus_drbd [cyrus_drbd_node] (promotable): - * Masters: [ cxa1 ] - * Slaves: [ cxb1 ] + * Promoted: [ cxa1 ] + * Unpromoted: [ cxb1 ] * named_address (ocf:heartbeat:IPaddr2): Started cxb1 * named_filesys (ocf:heartbeat:Filesystem): Stopped * named_volgroup (ocf:heartbeat:VolGroup): Stopped * named_daemon (ocf:heartbeat:recursor): Stopped * named_syslogd (ocf:heartbeat:syslogd): Stopped * Clone Set: named_drbd [named_drbd_node] (promotable): - * Masters: [ cxb1 ] - * Slaves: [ cxa1 ] + * Promoted: [ cxb1 ] + * Unpromoted: [ cxa1 ] * Clone Set: pingd_clone [pingd_node]: * Started: [ cxa1 cxb1 ] * Clone Set: fence_clone [fence_node]: * Started: [ cxa1 cxb1 ] diff --git a/cts/scheduler/summary/master-depend.summary b/cts/scheduler/summary/master-depend.summary index baba1d0f9b..3df262f90d 100644 --- a/cts/scheduler/summary/master-depend.summary +++ b/cts/scheduler/summary/master-depend.summary @@ -1,62 +1,62 @@ 3 of 10 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: * Node List: * Online: [ vbox4 ] * OFFLINE: [ vbox3 ] * Full List of Resources: * Clone Set: drbd [drbd0] (promotable): * Stopped: [ vbox3 vbox4 ] * Clone Set: cman_clone [cman]: * Stopped: [ vbox3 vbox4 ] * Clone Set: clvmd_clone [clvmd]: * Stopped: [ vbox3 vbox4 ] * vmnci36 (ocf:heartbeat:vm): Stopped * vmnci37 (ocf:heartbeat:vm): Stopped (disabled) * vmnci38 (ocf:heartbeat:vm): Stopped (disabled) * vmnci55 (ocf:heartbeat:vm): Stopped (disabled) Transition Summary: * Start drbd0:0 ( vbox4 ) * Start cman:0 ( vbox4 ) Executing Cluster Transition: * Resource action: drbd0:0 monitor on vbox4 * Pseudo action: drbd_pre_notify_start_0 * Resource action: cman:0 monitor on vbox4 * Pseudo action: cman_clone_start_0 * Resource action: clvmd:0 monitor on vbox4 * Resource action: vmnci36 monitor on vbox4 * Resource action: vmnci37 monitor on vbox4 * Resource action: vmnci38 monitor on vbox4 * Resource action: vmnci55 monitor on vbox4 * Pseudo action: drbd_confirmed-pre_notify_start_0 * Pseudo action: drbd_start_0 * Resource action: cman:0 start on vbox4 * Pseudo action: cman_clone_running_0 * Resource action: drbd0:0 start on vbox4 * Pseudo action: drbd_running_0 * Pseudo action: drbd_post_notify_running_0 * Resource action: drbd0:0 notify on vbox4 * Pseudo action: drbd_confirmed-post_notify_running_0 * Resource action: drbd0:0 monitor=60000 on vbox4 Revised Cluster Status: * Node List: * Online: [ vbox4 ] * OFFLINE: [ vbox3 ] * Full List of Resources: * Clone Set: drbd [drbd0] (promotable): - * Slaves: [ vbox4 ] + * Unpromoted: [ vbox4 ] * Stopped: [ vbox3 ] * Clone Set: cman_clone [cman]: * Started: [ vbox4 ] * Stopped: [ vbox3 ] * Clone Set: clvmd_clone [clvmd]: * Stopped: [ vbox3 vbox4 ] * vmnci36 (ocf:heartbeat:vm): Stopped * vmnci37 (ocf:heartbeat:vm): Stopped (disabled) * vmnci38 (ocf:heartbeat:vm): Stopped (disabled) * vmnci55 (ocf:heartbeat:vm): Stopped (disabled) diff --git a/cts/scheduler/summary/master-dependent-ban.summary b/cts/scheduler/summary/master-dependent-ban.summary index 8f2d6d20de..985326a797 100644 --- a/cts/scheduler/summary/master-dependent-ban.summary +++ b/cts/scheduler/summary/master-dependent-ban.summary @@ -1,38 +1,38 @@ Current cluster status: * Node List: * Online: [ c6 c7 c8 ] * Full List of Resources: * Clone Set: ms_drbd-dtest1 [p_drbd-dtest1] (promotable): - * Slaves: [ c6 c7 ] + * Unpromoted: [ c6 c7 ] * p_dtest1 (ocf:heartbeat:Dummy): Stopped Transition Summary: - * Promote p_drbd-dtest1:0 ( Slave -> Master c7 ) + * Promote p_drbd-dtest1:0 ( Unpromoted -> Promoted c7 ) * Start p_dtest1 ( c7 ) Executing Cluster Transition: * Pseudo action: ms_drbd-dtest1_pre_notify_promote_0 * Resource action: p_drbd-dtest1 notify on c7 * Resource action: p_drbd-dtest1 notify on c6 * Pseudo action: ms_drbd-dtest1_confirmed-pre_notify_promote_0 * Pseudo action: ms_drbd-dtest1_promote_0 * Resource action: p_drbd-dtest1 promote on c7 * Pseudo action: ms_drbd-dtest1_promoted_0 * Pseudo action: ms_drbd-dtest1_post_notify_promoted_0 * Resource action: p_drbd-dtest1 notify on c7 * Resource action: p_drbd-dtest1 notify on c6 * Pseudo action: ms_drbd-dtest1_confirmed-post_notify_promoted_0 * Resource action: p_dtest1 start on c7 * Resource action: p_drbd-dtest1 monitor=10000 on c7 * Resource action: p_drbd-dtest1 monitor=20000 on c6 Revised Cluster Status: * Node List: * Online: [ c6 c7 c8 ] * Full List of Resources: * Clone Set: ms_drbd-dtest1 [p_drbd-dtest1] (promotable): - * Masters: [ c7 ] - * Slaves: [ c6 ] + * Promoted: [ c7 ] + * Unpromoted: [ c6 ] * p_dtest1 (ocf:heartbeat:Dummy): Started c7 diff --git a/cts/scheduler/summary/master-failed-demote-2.summary b/cts/scheduler/summary/master-failed-demote-2.summary index c7d476a801..198d9ad3ee 100644 --- a/cts/scheduler/summary/master-failed-demote-2.summary +++ b/cts/scheduler/summary/master-failed-demote-2.summary @@ -1,47 +1,47 @@ Current cluster status: * Node List: * Online: [ dl380g5a dl380g5b ] * Full List of Resources: * Clone Set: ms-sf [group] (promotable) (unique): * Resource Group: group:0: * stateful-1:0 (ocf:heartbeat:Stateful): FAILED dl380g5b * stateful-2:0 (ocf:heartbeat:Stateful): Stopped * Resource Group: group:1: - * stateful-1:1 (ocf:heartbeat:Stateful): Slave dl380g5a - * stateful-2:1 (ocf:heartbeat:Stateful): Slave dl380g5a + * stateful-1:1 (ocf:heartbeat:Stateful): Unpromoted dl380g5a + * stateful-2:1 (ocf:heartbeat:Stateful): Unpromoted dl380g5a Transition Summary: - * Stop stateful-1:0 ( Slave dl380g5b ) due to node availability - * Promote stateful-1:1 ( Slave -> Master dl380g5a ) - * Promote stateful-2:1 ( Slave -> Master dl380g5a ) + * Stop stateful-1:0 ( Unpromoted dl380g5b ) due to node availability + * Promote stateful-1:1 ( Unpromoted -> Promoted dl380g5a ) + * Promote stateful-2:1 ( Unpromoted -> Promoted dl380g5a ) Executing Cluster Transition: * Resource action: stateful-1:1 cancel=20000 on dl380g5a * Resource action: stateful-2:1 cancel=20000 on dl380g5a * Pseudo action: ms-sf_stop_0 * Pseudo action: group:0_stop_0 * Resource action: stateful-1:0 stop on dl380g5b * Pseudo action: group:0_stopped_0 * Pseudo action: ms-sf_stopped_0 * Pseudo action: ms-sf_promote_0 * Pseudo action: group:1_promote_0 * Resource action: stateful-1:1 promote on dl380g5a * Resource action: stateful-2:1 promote on dl380g5a * Pseudo action: group:1_promoted_0 * Resource action: stateful-1:1 monitor=10000 on dl380g5a * Resource action: stateful-2:1 monitor=10000 on dl380g5a * Pseudo action: ms-sf_promoted_0 Revised Cluster Status: * Node List: * Online: [ dl380g5a dl380g5b ] * Full List of Resources: * Clone Set: ms-sf [group] (promotable) (unique): * Resource Group: group:0: * stateful-1:0 (ocf:heartbeat:Stateful): Stopped * stateful-2:0 (ocf:heartbeat:Stateful): Stopped * Resource Group: group:1: - * stateful-1:1 (ocf:heartbeat:Stateful): Master dl380g5a - * stateful-2:1 (ocf:heartbeat:Stateful): Master dl380g5a + * stateful-1:1 (ocf:heartbeat:Stateful): Promoted dl380g5a + * stateful-2:1 (ocf:heartbeat:Stateful): Promoted dl380g5a diff --git a/cts/scheduler/summary/master-failed-demote.summary b/cts/scheduler/summary/master-failed-demote.summary index 4377898f61..884a380063 100644 --- a/cts/scheduler/summary/master-failed-demote.summary +++ b/cts/scheduler/summary/master-failed-demote.summary @@ -1,64 +1,64 @@ Current cluster status: * Node List: * Online: [ dl380g5a dl380g5b ] * Full List of Resources: * Clone Set: ms-sf [group] (promotable) (unique): * Resource Group: group:0: * stateful-1:0 (ocf:heartbeat:Stateful): FAILED dl380g5b * stateful-2:0 (ocf:heartbeat:Stateful): Stopped * Resource Group: group:1: - * stateful-1:1 (ocf:heartbeat:Stateful): Slave dl380g5a - * stateful-2:1 (ocf:heartbeat:Stateful): Slave dl380g5a + * stateful-1:1 (ocf:heartbeat:Stateful): Unpromoted dl380g5a + * stateful-2:1 (ocf:heartbeat:Stateful): Unpromoted dl380g5a Transition Summary: - * Stop stateful-1:0 ( Slave dl380g5b ) due to node availability - * Promote stateful-1:1 ( Slave -> Master dl380g5a ) - * Promote stateful-2:1 ( Slave -> Master dl380g5a ) + * Stop stateful-1:0 ( Unpromoted dl380g5b ) due to node availability + * Promote stateful-1:1 ( Unpromoted -> Promoted dl380g5a ) + * Promote stateful-2:1 ( Unpromoted -> Promoted dl380g5a ) Executing Cluster Transition: * Resource action: stateful-1:1 cancel=20000 on dl380g5a * Resource action: stateful-2:1 cancel=20000 on dl380g5a * Pseudo action: ms-sf_pre_notify_stop_0 * Resource action: stateful-1:0 notify on dl380g5b * Resource action: stateful-1:1 notify on dl380g5a * Resource action: stateful-2:1 notify on dl380g5a * Pseudo action: ms-sf_confirmed-pre_notify_stop_0 * Pseudo action: ms-sf_stop_0 * Pseudo action: group:0_stop_0 * Resource action: stateful-1:0 stop on dl380g5b * Pseudo action: group:0_stopped_0 * Pseudo action: ms-sf_stopped_0 * Pseudo action: ms-sf_post_notify_stopped_0 * Resource action: stateful-1:1 notify on dl380g5a * Resource action: stateful-2:1 notify on dl380g5a * Pseudo action: ms-sf_confirmed-post_notify_stopped_0 * Pseudo action: ms-sf_pre_notify_promote_0 * Resource action: stateful-1:1 notify on dl380g5a * Resource action: stateful-2:1 notify on dl380g5a * Pseudo action: ms-sf_confirmed-pre_notify_promote_0 * Pseudo action: ms-sf_promote_0 * Pseudo action: group:1_promote_0 * Resource action: stateful-1:1 promote on dl380g5a * Resource action: stateful-2:1 promote on dl380g5a * Pseudo action: group:1_promoted_0 * Pseudo action: ms-sf_promoted_0 * Pseudo action: ms-sf_post_notify_promoted_0 * Resource action: stateful-1:1 notify on dl380g5a * Resource action: stateful-2:1 notify on dl380g5a * Pseudo action: ms-sf_confirmed-post_notify_promoted_0 * Resource action: stateful-1:1 monitor=10000 on dl380g5a * Resource action: stateful-2:1 monitor=10000 on dl380g5a Revised Cluster Status: * Node List: * Online: [ dl380g5a dl380g5b ] * Full List of Resources: * Clone Set: ms-sf [group] (promotable) (unique): * Resource Group: group:0: * stateful-1:0 (ocf:heartbeat:Stateful): Stopped * stateful-2:0 (ocf:heartbeat:Stateful): Stopped * Resource Group: group:1: - * stateful-1:1 (ocf:heartbeat:Stateful): Master dl380g5a - * stateful-2:1 (ocf:heartbeat:Stateful): Master dl380g5a + * stateful-1:1 (ocf:heartbeat:Stateful): Promoted dl380g5a + * stateful-2:1 (ocf:heartbeat:Stateful): Promoted dl380g5a diff --git a/cts/scheduler/summary/master-group.summary b/cts/scheduler/summary/master-group.summary index 5528f9cc22..f06047c34f 100644 --- a/cts/scheduler/summary/master-group.summary +++ b/cts/scheduler/summary/master-group.summary @@ -1,37 +1,37 @@ Current cluster status: * Node List: * Online: [ rh44-1 rh44-2 ] * Full List of Resources: * Resource Group: test: * resource_1 (ocf:heartbeat:IPaddr): Started rh44-1 * Clone Set: ms-sf [grp_ms_sf] (promotable) (unique): * Resource Group: grp_ms_sf:0: - * master_slave_Stateful:0 (ocf:heartbeat:Stateful): Slave rh44-2 + * master_slave_Stateful:0 (ocf:heartbeat:Stateful): Unpromoted rh44-2 * Resource Group: grp_ms_sf:1: - * master_slave_Stateful:1 (ocf:heartbeat:Stateful): Slave rh44-1 + * master_slave_Stateful:1 (ocf:heartbeat:Stateful): Unpromoted rh44-1 Transition Summary: - * Promote master_slave_Stateful:1 ( Slave -> Master rh44-1 ) + * Promote master_slave_Stateful:1 ( Unpromoted -> Promoted rh44-1 ) Executing Cluster Transition: * Resource action: master_slave_Stateful:1 cancel=5000 on rh44-1 * Pseudo action: ms-sf_promote_0 * Pseudo action: grp_ms_sf:1_promote_0 * Resource action: master_slave_Stateful:1 promote on rh44-1 * Pseudo action: grp_ms_sf:1_promoted_0 * Resource action: master_slave_Stateful:1 monitor=6000 on rh44-1 * Pseudo action: ms-sf_promoted_0 Revised Cluster Status: * Node List: * Online: [ rh44-1 rh44-2 ] * Full List of Resources: * Resource Group: test: * resource_1 (ocf:heartbeat:IPaddr): Started rh44-1 * Clone Set: ms-sf [grp_ms_sf] (promotable) (unique): * Resource Group: grp_ms_sf:0: - * master_slave_Stateful:0 (ocf:heartbeat:Stateful): Slave rh44-2 + * master_slave_Stateful:0 (ocf:heartbeat:Stateful): Unpromoted rh44-2 * Resource Group: grp_ms_sf:1: - * master_slave_Stateful:1 (ocf:heartbeat:Stateful): Master rh44-1 + * master_slave_Stateful:1 (ocf:heartbeat:Stateful): Promoted rh44-1 diff --git a/cts/scheduler/summary/master-move.summary b/cts/scheduler/summary/master-move.summary index c098afcacc..2fb2206605 100644 --- a/cts/scheduler/summary/master-move.summary +++ b/cts/scheduler/summary/master-move.summary @@ -1,72 +1,72 @@ Current cluster status: * Node List: * Online: [ bl460g1n13 bl460g1n14 ] * Full List of Resources: * Resource Group: grpDRBD: * dummy01 (ocf:pacemaker:Dummy): FAILED bl460g1n13 * dummy02 (ocf:pacemaker:Dummy): Started bl460g1n13 * dummy03 (ocf:pacemaker:Dummy): Stopped * Clone Set: msDRBD [prmDRBD] (promotable): - * Masters: [ bl460g1n13 ] - * Slaves: [ bl460g1n14 ] + * Promoted: [ bl460g1n13 ] + * Unpromoted: [ bl460g1n14 ] Transition Summary: * Recover dummy01 ( bl460g1n13 -> bl460g1n14 ) * Move dummy02 ( bl460g1n13 -> bl460g1n14 ) * Start dummy03 ( bl460g1n14 ) - * Demote prmDRBD:0 ( Master -> Slave bl460g1n13 ) - * Promote prmDRBD:1 ( Slave -> Master bl460g1n14 ) + * Demote prmDRBD:0 ( Promoted -> Unpromoted bl460g1n13 ) + * Promote prmDRBD:1 ( Unpromoted -> Promoted bl460g1n14 ) Executing Cluster Transition: * Pseudo action: grpDRBD_stop_0 * Resource action: dummy02 stop on bl460g1n13 * Resource action: prmDRBD:0 cancel=10000 on bl460g1n13 * Resource action: prmDRBD:1 cancel=20000 on bl460g1n14 * Pseudo action: msDRBD_pre_notify_demote_0 * Resource action: dummy01 stop on bl460g1n13 * Resource action: prmDRBD:0 notify on bl460g1n13 * Resource action: prmDRBD:1 notify on bl460g1n14 * Pseudo action: msDRBD_confirmed-pre_notify_demote_0 * Pseudo action: grpDRBD_stopped_0 * Pseudo action: msDRBD_demote_0 * Resource action: prmDRBD:0 demote on bl460g1n13 * Pseudo action: msDRBD_demoted_0 * Pseudo action: msDRBD_post_notify_demoted_0 * Resource action: prmDRBD:0 notify on bl460g1n13 * Resource action: prmDRBD:1 notify on bl460g1n14 * Pseudo action: msDRBD_confirmed-post_notify_demoted_0 * Pseudo action: msDRBD_pre_notify_promote_0 * Resource action: prmDRBD:0 notify on bl460g1n13 * Resource action: prmDRBD:1 notify on bl460g1n14 * Pseudo action: msDRBD_confirmed-pre_notify_promote_0 * Pseudo action: msDRBD_promote_0 * Resource action: prmDRBD:1 promote on bl460g1n14 * Pseudo action: msDRBD_promoted_0 * Pseudo action: msDRBD_post_notify_promoted_0 * Resource action: prmDRBD:0 notify on bl460g1n13 * Resource action: prmDRBD:1 notify on bl460g1n14 * Pseudo action: msDRBD_confirmed-post_notify_promoted_0 * Pseudo action: grpDRBD_start_0 * Resource action: dummy01 start on bl460g1n14 * Resource action: dummy02 start on bl460g1n14 * Resource action: dummy03 start on bl460g1n14 * Resource action: prmDRBD:0 monitor=20000 on bl460g1n13 * Resource action: prmDRBD:1 monitor=10000 on bl460g1n14 * Pseudo action: grpDRBD_running_0 * Resource action: dummy01 monitor=10000 on bl460g1n14 * Resource action: dummy02 monitor=10000 on bl460g1n14 * Resource action: dummy03 monitor=10000 on bl460g1n14 Revised Cluster Status: * Node List: * Online: [ bl460g1n13 bl460g1n14 ] * Full List of Resources: * Resource Group: grpDRBD: * dummy01 (ocf:pacemaker:Dummy): Started bl460g1n14 * dummy02 (ocf:pacemaker:Dummy): Started bl460g1n14 * dummy03 (ocf:pacemaker:Dummy): Started bl460g1n14 * Clone Set: msDRBD [prmDRBD] (promotable): - * Masters: [ bl460g1n14 ] - * Slaves: [ bl460g1n13 ] + * Promoted: [ bl460g1n14 ] + * Unpromoted: [ bl460g1n13 ] diff --git a/cts/scheduler/summary/master-notify.summary b/cts/scheduler/summary/master-notify.summary index a5fced41db..f0fb04027d 100644 --- a/cts/scheduler/summary/master-notify.summary +++ b/cts/scheduler/summary/master-notify.summary @@ -1,36 +1,36 @@ Current cluster status: * Node List: * Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ] * Full List of Resources: * shooter (stonith:fence_xvm): Started rhel7-auto1 * Clone Set: fake-master [fake] (promotable): - * Slaves: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ] + * Unpromoted: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ] Transition Summary: - * Promote fake:0 ( Slave -> Master rhel7-auto1 ) + * Promote fake:0 ( Unpromoted -> Promoted rhel7-auto1 ) Executing Cluster Transition: * Pseudo action: fake-master_pre_notify_promote_0 * Resource action: fake notify on rhel7-auto1 * Resource action: fake notify on rhel7-auto3 * Resource action: fake notify on rhel7-auto2 * Pseudo action: fake-master_confirmed-pre_notify_promote_0 * Pseudo action: fake-master_promote_0 * Resource action: fake promote on rhel7-auto1 * Pseudo action: fake-master_promoted_0 * Pseudo action: fake-master_post_notify_promoted_0 * Resource action: fake notify on rhel7-auto1 * Resource action: fake notify on rhel7-auto3 * Resource action: fake notify on rhel7-auto2 * Pseudo action: fake-master_confirmed-post_notify_promoted_0 Revised Cluster Status: * Node List: * Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ] * Full List of Resources: * shooter (stonith:fence_xvm): Started rhel7-auto1 * Clone Set: fake-master [fake] (promotable): - * Masters: [ rhel7-auto1 ] - * Slaves: [ rhel7-auto2 rhel7-auto3 ] + * Promoted: [ rhel7-auto1 ] + * Unpromoted: [ rhel7-auto2 rhel7-auto3 ] diff --git a/cts/scheduler/summary/master-ordering.summary b/cts/scheduler/summary/master-ordering.summary index 3d25afcf86..3222e18341 100644 --- a/cts/scheduler/summary/master-ordering.summary +++ b/cts/scheduler/summary/master-ordering.summary @@ -1,96 +1,96 @@ Current cluster status: * Node List: * Online: [ webcluster01 ] * OFFLINE: [ webcluster02 ] * Full List of Resources: * mysql-server (ocf:heartbeat:mysql): Stopped * extip_1 (ocf:heartbeat:IPaddr2): Stopped * extip_2 (ocf:heartbeat:IPaddr2): Stopped * Resource Group: group_main: * intip_0_main (ocf:heartbeat:IPaddr2): Stopped * intip_1_master (ocf:heartbeat:IPaddr2): Stopped * intip_2_slave (ocf:heartbeat:IPaddr2): Stopped * Clone Set: ms_drbd_www [drbd_www] (promotable): * Stopped: [ webcluster01 webcluster02 ] * Clone Set: clone_ocfs2_www [ocfs2_www] (unique): * ocfs2_www:0 (ocf:heartbeat:Filesystem): Stopped * ocfs2_www:1 (ocf:heartbeat:Filesystem): Stopped * Clone Set: clone_webservice [group_webservice]: * Stopped: [ webcluster01 webcluster02 ] * Clone Set: ms_drbd_mysql [drbd_mysql] (promotable): * Stopped: [ webcluster01 webcluster02 ] * fs_mysql (ocf:heartbeat:Filesystem): Stopped Transition Summary: * Start extip_1 ( webcluster01 ) * Start extip_2 ( webcluster01 ) * Start intip_1_master ( webcluster01 ) * Start intip_2_slave ( webcluster01 ) * Start drbd_www:0 ( webcluster01 ) * Start drbd_mysql:0 ( webcluster01 ) Executing Cluster Transition: * Resource action: mysql-server monitor on webcluster01 * Resource action: extip_1 monitor on webcluster01 * Resource action: extip_2 monitor on webcluster01 * Resource action: intip_0_main monitor on webcluster01 * Resource action: intip_1_master monitor on webcluster01 * Resource action: intip_2_slave monitor on webcluster01 * Resource action: drbd_www:0 monitor on webcluster01 * Pseudo action: ms_drbd_www_pre_notify_start_0 * Resource action: ocfs2_www:0 monitor on webcluster01 * Resource action: ocfs2_www:1 monitor on webcluster01 * Resource action: apache2:0 monitor on webcluster01 * Resource action: mysql-proxy:0 monitor on webcluster01 * Resource action: drbd_mysql:0 monitor on webcluster01 * Pseudo action: ms_drbd_mysql_pre_notify_start_0 * Resource action: fs_mysql monitor on webcluster01 * Resource action: extip_1 start on webcluster01 * Resource action: extip_2 start on webcluster01 * Resource action: intip_1_master start on webcluster01 * Resource action: intip_2_slave start on webcluster01 * Pseudo action: ms_drbd_www_confirmed-pre_notify_start_0 * Pseudo action: ms_drbd_www_start_0 * Pseudo action: ms_drbd_mysql_confirmed-pre_notify_start_0 * Pseudo action: ms_drbd_mysql_start_0 * Resource action: extip_1 monitor=30000 on webcluster01 * Resource action: extip_2 monitor=30000 on webcluster01 * Resource action: intip_1_master monitor=30000 on webcluster01 * Resource action: intip_2_slave monitor=30000 on webcluster01 * Resource action: drbd_www:0 start on webcluster01 * Pseudo action: ms_drbd_www_running_0 * Resource action: drbd_mysql:0 start on webcluster01 * Pseudo action: ms_drbd_mysql_running_0 * Pseudo action: ms_drbd_www_post_notify_running_0 * Pseudo action: ms_drbd_mysql_post_notify_running_0 * Resource action: drbd_www:0 notify on webcluster01 * Pseudo action: ms_drbd_www_confirmed-post_notify_running_0 * Resource action: drbd_mysql:0 notify on webcluster01 * Pseudo action: ms_drbd_mysql_confirmed-post_notify_running_0 Revised Cluster Status: * Node List: * Online: [ webcluster01 ] * OFFLINE: [ webcluster02 ] * Full List of Resources: * mysql-server (ocf:heartbeat:mysql): Stopped * extip_1 (ocf:heartbeat:IPaddr2): Started webcluster01 * extip_2 (ocf:heartbeat:IPaddr2): Started webcluster01 * Resource Group: group_main: * intip_0_main (ocf:heartbeat:IPaddr2): Stopped * intip_1_master (ocf:heartbeat:IPaddr2): Started webcluster01 * intip_2_slave (ocf:heartbeat:IPaddr2): Started webcluster01 * Clone Set: ms_drbd_www [drbd_www] (promotable): - * Slaves: [ webcluster01 ] + * Unpromoted: [ webcluster01 ] * Stopped: [ webcluster02 ] * Clone Set: clone_ocfs2_www [ocfs2_www] (unique): * ocfs2_www:0 (ocf:heartbeat:Filesystem): Stopped * ocfs2_www:1 (ocf:heartbeat:Filesystem): Stopped * Clone Set: clone_webservice [group_webservice]: * Stopped: [ webcluster01 webcluster02 ] * Clone Set: ms_drbd_mysql [drbd_mysql] (promotable): - * Slaves: [ webcluster01 ] + * Unpromoted: [ webcluster01 ] * Stopped: [ webcluster02 ] * fs_mysql (ocf:heartbeat:Filesystem): Stopped diff --git a/cts/scheduler/summary/master-partially-demoted-group.summary b/cts/scheduler/summary/master-partially-demoted-group.summary index 4ab1af39d3..e5b35480d7 100644 --- a/cts/scheduler/summary/master-partially-demoted-group.summary +++ b/cts/scheduler/summary/master-partially-demoted-group.summary @@ -1,118 +1,118 @@ Current cluster status: * Node List: * Online: [ sd01-0 sd01-1 ] * Full List of Resources: * stonith-xvm-sd01-0 (stonith:fence_xvm): Started sd01-1 * stonith-xvm-sd01-1 (stonith:fence_xvm): Started sd01-0 * Resource Group: cdev-pool-0-iscsi-export: * cdev-pool-0-iscsi-target (ocf:vds-ok:iSCSITarget): Started sd01-1 * cdev-pool-0-iscsi-lun-1 (ocf:vds-ok:iSCSILogicalUnit): Started sd01-1 * Clone Set: ms-cdev-pool-0-drbd [cdev-pool-0-drbd] (promotable): - * Masters: [ sd01-1 ] - * Slaves: [ sd01-0 ] + * Promoted: [ sd01-1 ] + * Unpromoted: [ sd01-0 ] * Clone Set: cl-ietd [ietd]: * Started: [ sd01-0 sd01-1 ] * Clone Set: cl-vlan1-net [vlan1-net]: * Started: [ sd01-0 sd01-1 ] * Resource Group: cdev-pool-0-iscsi-vips: * vip-164 (ocf:heartbeat:IPaddr2): Started sd01-1 * vip-165 (ocf:heartbeat:IPaddr2): Started sd01-1 * Clone Set: ms-cdev-pool-0-iscsi-vips-fw [cdev-pool-0-iscsi-vips-fw] (promotable): - * Masters: [ sd01-1 ] - * Slaves: [ sd01-0 ] + * Promoted: [ sd01-1 ] + * Unpromoted: [ sd01-0 ] Transition Summary: * Move vip-164 ( sd01-1 -> sd01-0 ) * Move vip-165 ( sd01-1 -> sd01-0 ) * Move cdev-pool-0-iscsi-target ( sd01-1 -> sd01-0 ) * Move cdev-pool-0-iscsi-lun-1 ( sd01-1 -> sd01-0 ) - * Demote vip-164-fw:0 ( Master -> Slave sd01-1 ) - * Promote vip-164-fw:1 ( Slave -> Master sd01-0 ) - * Promote vip-165-fw:1 ( Slave -> Master sd01-0 ) - * Demote cdev-pool-0-drbd:0 ( Master -> Slave sd01-1 ) - * Promote cdev-pool-0-drbd:1 ( Slave -> Master sd01-0 ) + * Demote vip-164-fw:0 ( Promoted -> Unpromoted sd01-1 ) + * Promote vip-164-fw:1 ( Unpromoted -> Promoted sd01-0 ) + * Promote vip-165-fw:1 ( Unpromoted -> Promoted sd01-0 ) + * Demote cdev-pool-0-drbd:0 ( Promoted -> Unpromoted sd01-1 ) + * Promote cdev-pool-0-drbd:1 ( Unpromoted -> Promoted sd01-0 ) Executing Cluster Transition: * Resource action: vip-165-fw monitor=10000 on sd01-1 * Pseudo action: ms-cdev-pool-0-iscsi-vips-fw_demote_0 * Pseudo action: ms-cdev-pool-0-drbd_pre_notify_demote_0 * Pseudo action: cdev-pool-0-iscsi-vips-fw:0_demote_0 * Resource action: vip-164-fw demote on sd01-1 * Resource action: cdev-pool-0-drbd notify on sd01-1 * Resource action: cdev-pool-0-drbd notify on sd01-0 * Pseudo action: ms-cdev-pool-0-drbd_confirmed-pre_notify_demote_0 * Pseudo action: cdev-pool-0-iscsi-vips-fw:0_demoted_0 * Resource action: vip-164-fw monitor=10000 on sd01-1 * Pseudo action: ms-cdev-pool-0-iscsi-vips-fw_demoted_0 * Pseudo action: cdev-pool-0-iscsi-vips_stop_0 * Resource action: vip-165 stop on sd01-1 * Resource action: vip-164 stop on sd01-1 * Pseudo action: cdev-pool-0-iscsi-vips_stopped_0 * Pseudo action: cdev-pool-0-iscsi-export_stop_0 * Resource action: cdev-pool-0-iscsi-lun-1 stop on sd01-1 * Resource action: cdev-pool-0-iscsi-target stop on sd01-1 * Pseudo action: cdev-pool-0-iscsi-export_stopped_0 * Pseudo action: ms-cdev-pool-0-drbd_demote_0 * Resource action: cdev-pool-0-drbd demote on sd01-1 * Pseudo action: ms-cdev-pool-0-drbd_demoted_0 * Pseudo action: ms-cdev-pool-0-drbd_post_notify_demoted_0 * Resource action: cdev-pool-0-drbd notify on sd01-1 * Resource action: cdev-pool-0-drbd notify on sd01-0 * Pseudo action: ms-cdev-pool-0-drbd_confirmed-post_notify_demoted_0 * Pseudo action: ms-cdev-pool-0-drbd_pre_notify_promote_0 * Resource action: cdev-pool-0-drbd notify on sd01-1 * Resource action: cdev-pool-0-drbd notify on sd01-0 * Pseudo action: ms-cdev-pool-0-drbd_confirmed-pre_notify_promote_0 * Pseudo action: ms-cdev-pool-0-drbd_promote_0 * Resource action: cdev-pool-0-drbd promote on sd01-0 * Pseudo action: ms-cdev-pool-0-drbd_promoted_0 * Pseudo action: ms-cdev-pool-0-drbd_post_notify_promoted_0 * Resource action: cdev-pool-0-drbd notify on sd01-1 * Resource action: cdev-pool-0-drbd notify on sd01-0 * Pseudo action: ms-cdev-pool-0-drbd_confirmed-post_notify_promoted_0 * Pseudo action: cdev-pool-0-iscsi-export_start_0 * Resource action: cdev-pool-0-iscsi-target start on sd01-0 * Resource action: cdev-pool-0-iscsi-lun-1 start on sd01-0 * Resource action: cdev-pool-0-drbd monitor=20000 on sd01-1 * Resource action: cdev-pool-0-drbd monitor=10000 on sd01-0 * Pseudo action: cdev-pool-0-iscsi-export_running_0 * Resource action: cdev-pool-0-iscsi-target monitor=10000 on sd01-0 * Resource action: cdev-pool-0-iscsi-lun-1 monitor=10000 on sd01-0 * Pseudo action: cdev-pool-0-iscsi-vips_start_0 * Resource action: vip-164 start on sd01-0 * Resource action: vip-165 start on sd01-0 * Pseudo action: cdev-pool-0-iscsi-vips_running_0 * Resource action: vip-164 monitor=30000 on sd01-0 * Resource action: vip-165 monitor=30000 on sd01-0 * Pseudo action: ms-cdev-pool-0-iscsi-vips-fw_promote_0 * Pseudo action: cdev-pool-0-iscsi-vips-fw:0_promote_0 * Pseudo action: cdev-pool-0-iscsi-vips-fw:1_promote_0 * Resource action: vip-164-fw promote on sd01-0 * Resource action: vip-165-fw promote on sd01-0 * Pseudo action: cdev-pool-0-iscsi-vips-fw:1_promoted_0 * Pseudo action: ms-cdev-pool-0-iscsi-vips-fw_promoted_0 Revised Cluster Status: * Node List: * Online: [ sd01-0 sd01-1 ] * Full List of Resources: * stonith-xvm-sd01-0 (stonith:fence_xvm): Started sd01-1 * stonith-xvm-sd01-1 (stonith:fence_xvm): Started sd01-0 * Resource Group: cdev-pool-0-iscsi-export: * cdev-pool-0-iscsi-target (ocf:vds-ok:iSCSITarget): Started sd01-0 * cdev-pool-0-iscsi-lun-1 (ocf:vds-ok:iSCSILogicalUnit): Started sd01-0 * Clone Set: ms-cdev-pool-0-drbd [cdev-pool-0-drbd] (promotable): - * Masters: [ sd01-0 ] - * Slaves: [ sd01-1 ] + * Promoted: [ sd01-0 ] + * Unpromoted: [ sd01-1 ] * Clone Set: cl-ietd [ietd]: * Started: [ sd01-0 sd01-1 ] * Clone Set: cl-vlan1-net [vlan1-net]: * Started: [ sd01-0 sd01-1 ] * Resource Group: cdev-pool-0-iscsi-vips: * vip-164 (ocf:heartbeat:IPaddr2): Started sd01-0 * vip-165 (ocf:heartbeat:IPaddr2): Started sd01-0 * Clone Set: ms-cdev-pool-0-iscsi-vips-fw [cdev-pool-0-iscsi-vips-fw] (promotable): - * Masters: [ sd01-0 ] - * Slaves: [ sd01-1 ] + * Promoted: [ sd01-0 ] + * Unpromoted: [ sd01-1 ] diff --git a/cts/scheduler/summary/master-probed-score.summary b/cts/scheduler/summary/master-probed-score.summary index ea2346901a..acf3171fe9 100644 --- a/cts/scheduler/summary/master-probed-score.summary +++ b/cts/scheduler/summary/master-probed-score.summary @@ -1,329 +1,329 @@ 1 of 60 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: * Node List: * Online: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ] * Full List of Resources: * Clone Set: AdminClone [AdminDrbd] (promotable): * Stopped: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ] * CronAmbientTemperature (ocf:heartbeat:symlink): Stopped * StonithHypatia (stonith:fence_nut): Stopped * StonithOrestes (stonith:fence_nut): Stopped * Resource Group: DhcpGroup: * SymlinkDhcpdConf (ocf:heartbeat:symlink): Stopped * SymlinkSysconfigDhcpd (ocf:heartbeat:symlink): Stopped * SymlinkDhcpdLeases (ocf:heartbeat:symlink): Stopped * Dhcpd (lsb:dhcpd): Stopped (disabled) * DhcpIP (ocf:heartbeat:IPaddr2): Stopped * Clone Set: CupsClone [CupsGroup]: * Stopped: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ] * Clone Set: IPClone [IPGroup] (unique): * Resource Group: IPGroup:0: * ClusterIP:0 (ocf:heartbeat:IPaddr2): Stopped * ClusterIPLocal:0 (ocf:heartbeat:IPaddr2): Stopped * ClusterIPSandbox:0 (ocf:heartbeat:IPaddr2): Stopped * Resource Group: IPGroup:1: * ClusterIP:1 (ocf:heartbeat:IPaddr2): Stopped * ClusterIPLocal:1 (ocf:heartbeat:IPaddr2): Stopped * ClusterIPSandbox:1 (ocf:heartbeat:IPaddr2): Stopped * Clone Set: LibvirtdClone [LibvirtdGroup]: * Stopped: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ] * Clone Set: TftpClone [TftpGroup]: * Stopped: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ] * Clone Set: ExportsClone [ExportsGroup]: * Stopped: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ] * Clone Set: FilesystemClone [FilesystemGroup]: * Stopped: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ] * KVM-guest (ocf:heartbeat:VirtualDomain): Stopped * Proxy (ocf:heartbeat:VirtualDomain): Stopped Transition Summary: - * Promote AdminDrbd:0 ( Stopped -> Master hypatia-corosync.nevis.columbia.edu ) - * Promote AdminDrbd:1 ( Stopped -> Master orestes-corosync.nevis.columbia.edu ) + * Promote AdminDrbd:0 ( Stopped -> Promoted hypatia-corosync.nevis.columbia.edu ) + * Promote AdminDrbd:1 ( Stopped -> Promoted orestes-corosync.nevis.columbia.edu ) * Start CronAmbientTemperature ( hypatia-corosync.nevis.columbia.edu ) * Start StonithHypatia ( orestes-corosync.nevis.columbia.edu ) * Start StonithOrestes ( hypatia-corosync.nevis.columbia.edu ) * Start SymlinkDhcpdConf ( orestes-corosync.nevis.columbia.edu ) * Start SymlinkSysconfigDhcpd ( orestes-corosync.nevis.columbia.edu ) * Start SymlinkDhcpdLeases ( orestes-corosync.nevis.columbia.edu ) * Start SymlinkUsrShareCups:0 ( hypatia-corosync.nevis.columbia.edu ) * Start SymlinkCupsdConf:0 ( hypatia-corosync.nevis.columbia.edu ) * Start Cups:0 ( hypatia-corosync.nevis.columbia.edu ) * Start SymlinkUsrShareCups:1 ( orestes-corosync.nevis.columbia.edu ) * Start SymlinkCupsdConf:1 ( orestes-corosync.nevis.columbia.edu ) * Start Cups:1 ( orestes-corosync.nevis.columbia.edu ) * Start ClusterIP:0 ( hypatia-corosync.nevis.columbia.edu ) * Start ClusterIPLocal:0 ( hypatia-corosync.nevis.columbia.edu ) * Start ClusterIPSandbox:0 ( hypatia-corosync.nevis.columbia.edu ) * Start ClusterIP:1 ( orestes-corosync.nevis.columbia.edu ) * Start ClusterIPLocal:1 ( orestes-corosync.nevis.columbia.edu ) * Start ClusterIPSandbox:1 ( orestes-corosync.nevis.columbia.edu ) * Start SymlinkEtcLibvirt:0 ( hypatia-corosync.nevis.columbia.edu ) * Start Libvirtd:0 ( hypatia-corosync.nevis.columbia.edu ) * Start SymlinkEtcLibvirt:1 ( orestes-corosync.nevis.columbia.edu ) * Start Libvirtd:1 ( orestes-corosync.nevis.columbia.edu ) * Start SymlinkTftp:0 ( hypatia-corosync.nevis.columbia.edu ) * Start Xinetd:0 ( hypatia-corosync.nevis.columbia.edu ) * Start SymlinkTftp:1 ( orestes-corosync.nevis.columbia.edu ) * Start Xinetd:1 ( orestes-corosync.nevis.columbia.edu ) * Start ExportMail:0 ( hypatia-corosync.nevis.columbia.edu ) * Start ExportMailInbox:0 ( hypatia-corosync.nevis.columbia.edu ) * Start ExportMailFolders:0 ( hypatia-corosync.nevis.columbia.edu ) * Start ExportMailForward:0 ( hypatia-corosync.nevis.columbia.edu ) * Start ExportMailProcmailrc:0 ( hypatia-corosync.nevis.columbia.edu ) * Start ExportUsrNevis:0 ( hypatia-corosync.nevis.columbia.edu ) * Start ExportUsrNevisOffsite:0 ( hypatia-corosync.nevis.columbia.edu ) * Start ExportWWW:0 ( hypatia-corosync.nevis.columbia.edu ) * Start ExportMail:1 ( orestes-corosync.nevis.columbia.edu ) * Start ExportMailInbox:1 ( orestes-corosync.nevis.columbia.edu ) * Start ExportMailFolders:1 ( orestes-corosync.nevis.columbia.edu ) * Start ExportMailForward:1 ( orestes-corosync.nevis.columbia.edu ) * Start ExportMailProcmailrc:1 ( orestes-corosync.nevis.columbia.edu ) * Start ExportUsrNevis:1 ( orestes-corosync.nevis.columbia.edu ) * Start ExportUsrNevisOffsite:1 ( orestes-corosync.nevis.columbia.edu ) * Start ExportWWW:1 ( orestes-corosync.nevis.columbia.edu ) * Start AdminLvm:0 ( hypatia-corosync.nevis.columbia.edu ) * Start FSUsrNevis:0 ( hypatia-corosync.nevis.columbia.edu ) * Start FSVarNevis:0 ( hypatia-corosync.nevis.columbia.edu ) * Start FSVirtualMachines:0 ( hypatia-corosync.nevis.columbia.edu ) * Start FSMail:0 ( hypatia-corosync.nevis.columbia.edu ) * Start FSWork:0 ( hypatia-corosync.nevis.columbia.edu ) * Start AdminLvm:1 ( orestes-corosync.nevis.columbia.edu ) * Start FSUsrNevis:1 ( orestes-corosync.nevis.columbia.edu ) * Start FSVarNevis:1 ( orestes-corosync.nevis.columbia.edu ) * Start FSVirtualMachines:1 ( orestes-corosync.nevis.columbia.edu ) * Start FSMail:1 ( orestes-corosync.nevis.columbia.edu ) * Start FSWork:1 ( orestes-corosync.nevis.columbia.edu ) * Start KVM-guest ( hypatia-corosync.nevis.columbia.edu ) * Start Proxy ( orestes-corosync.nevis.columbia.edu ) Executing Cluster Transition: * Pseudo action: AdminClone_pre_notify_start_0 * Resource action: StonithHypatia start on orestes-corosync.nevis.columbia.edu * Resource action: StonithOrestes start on hypatia-corosync.nevis.columbia.edu * Resource action: SymlinkEtcLibvirt:0 monitor on hypatia-corosync.nevis.columbia.edu * Resource action: Libvirtd:0 monitor on orestes-corosync.nevis.columbia.edu * Resource action: Libvirtd:0 monitor on hypatia-corosync.nevis.columbia.edu * Resource action: SymlinkTftp:0 monitor on hypatia-corosync.nevis.columbia.edu * Resource action: Xinetd:0 monitor on hypatia-corosync.nevis.columbia.edu * Resource action: SymlinkTftp:1 monitor on orestes-corosync.nevis.columbia.edu * Resource action: Xinetd:1 monitor on orestes-corosync.nevis.columbia.edu * Resource action: ExportMail:0 monitor on hypatia-corosync.nevis.columbia.edu * Resource action: ExportMailInbox:0 monitor on hypatia-corosync.nevis.columbia.edu * Resource action: ExportMailFolders:0 monitor on hypatia-corosync.nevis.columbia.edu * Resource action: ExportMailForward:0 monitor on hypatia-corosync.nevis.columbia.edu * Resource action: ExportMailProcmailrc:0 monitor on hypatia-corosync.nevis.columbia.edu * Resource action: ExportUsrNevis:0 monitor on hypatia-corosync.nevis.columbia.edu * Resource action: ExportUsrNevisOffsite:0 monitor on hypatia-corosync.nevis.columbia.edu * Resource action: ExportWWW:0 monitor on hypatia-corosync.nevis.columbia.edu * Resource action: ExportMail:1 monitor on orestes-corosync.nevis.columbia.edu * Resource action: ExportMailInbox:1 monitor on orestes-corosync.nevis.columbia.edu * Resource action: ExportMailFolders:1 monitor on orestes-corosync.nevis.columbia.edu * Resource action: ExportMailForward:1 monitor on orestes-corosync.nevis.columbia.edu * Resource action: ExportMailProcmailrc:1 monitor on orestes-corosync.nevis.columbia.edu * Resource action: ExportUsrNevis:1 monitor on orestes-corosync.nevis.columbia.edu * Resource action: ExportUsrNevisOffsite:1 monitor on orestes-corosync.nevis.columbia.edu * Resource action: ExportWWW:1 monitor on orestes-corosync.nevis.columbia.edu * Resource action: AdminLvm:0 monitor on hypatia-corosync.nevis.columbia.edu * Resource action: FSUsrNevis:0 monitor on hypatia-corosync.nevis.columbia.edu * Resource action: FSVarNevis:0 monitor on hypatia-corosync.nevis.columbia.edu * Resource action: FSVirtualMachines:0 monitor on hypatia-corosync.nevis.columbia.edu * Resource action: FSMail:0 monitor on hypatia-corosync.nevis.columbia.edu * Resource action: FSWork:0 monitor on hypatia-corosync.nevis.columbia.edu * Resource action: AdminLvm:1 monitor on orestes-corosync.nevis.columbia.edu * Resource action: FSUsrNevis:1 monitor on orestes-corosync.nevis.columbia.edu * Resource action: FSVarNevis:1 monitor on orestes-corosync.nevis.columbia.edu * Resource action: FSVirtualMachines:1 monitor on orestes-corosync.nevis.columbia.edu * Resource action: FSMail:1 monitor on orestes-corosync.nevis.columbia.edu * Resource action: FSWork:1 monitor on orestes-corosync.nevis.columbia.edu * Resource action: KVM-guest monitor on orestes-corosync.nevis.columbia.edu * Resource action: KVM-guest monitor on hypatia-corosync.nevis.columbia.edu * Resource action: Proxy monitor on orestes-corosync.nevis.columbia.edu * Resource action: Proxy monitor on hypatia-corosync.nevis.columbia.edu * Pseudo action: AdminClone_confirmed-pre_notify_start_0 * Pseudo action: AdminClone_start_0 * Resource action: AdminDrbd:0 start on hypatia-corosync.nevis.columbia.edu * Resource action: AdminDrbd:1 start on orestes-corosync.nevis.columbia.edu * Pseudo action: AdminClone_running_0 * Pseudo action: AdminClone_post_notify_running_0 * Resource action: AdminDrbd:0 notify on hypatia-corosync.nevis.columbia.edu * Resource action: AdminDrbd:1 notify on orestes-corosync.nevis.columbia.edu * Pseudo action: AdminClone_confirmed-post_notify_running_0 * Pseudo action: AdminClone_pre_notify_promote_0 * Resource action: AdminDrbd:0 notify on hypatia-corosync.nevis.columbia.edu * Resource action: AdminDrbd:1 notify on orestes-corosync.nevis.columbia.edu * Pseudo action: AdminClone_confirmed-pre_notify_promote_0 * Pseudo action: AdminClone_promote_0 * Resource action: AdminDrbd:0 promote on hypatia-corosync.nevis.columbia.edu * Resource action: AdminDrbd:1 promote on orestes-corosync.nevis.columbia.edu * Pseudo action: AdminClone_promoted_0 * Pseudo action: AdminClone_post_notify_promoted_0 * Resource action: AdminDrbd:0 notify on hypatia-corosync.nevis.columbia.edu * Resource action: AdminDrbd:1 notify on orestes-corosync.nevis.columbia.edu * Pseudo action: AdminClone_confirmed-post_notify_promoted_0 * Pseudo action: FilesystemClone_start_0 * Resource action: AdminDrbd:0 monitor=59000 on hypatia-corosync.nevis.columbia.edu * Resource action: AdminDrbd:1 monitor=59000 on orestes-corosync.nevis.columbia.edu * Pseudo action: FilesystemGroup:0_start_0 * Resource action: AdminLvm:0 start on hypatia-corosync.nevis.columbia.edu * Resource action: FSUsrNevis:0 start on hypatia-corosync.nevis.columbia.edu * Resource action: FSVarNevis:0 start on hypatia-corosync.nevis.columbia.edu * Resource action: FSVirtualMachines:0 start on hypatia-corosync.nevis.columbia.edu * Resource action: FSMail:0 start on hypatia-corosync.nevis.columbia.edu * Resource action: FSWork:0 start on hypatia-corosync.nevis.columbia.edu * Pseudo action: FilesystemGroup:1_start_0 * Resource action: AdminLvm:1 start on orestes-corosync.nevis.columbia.edu * Resource action: FSUsrNevis:1 start on orestes-corosync.nevis.columbia.edu * Resource action: FSVarNevis:1 start on orestes-corosync.nevis.columbia.edu * Resource action: FSVirtualMachines:1 start on orestes-corosync.nevis.columbia.edu * Resource action: FSMail:1 start on orestes-corosync.nevis.columbia.edu * Resource action: FSWork:1 start on orestes-corosync.nevis.columbia.edu * Pseudo action: FilesystemGroup:0_running_0 * Resource action: AdminLvm:0 monitor=30000 on hypatia-corosync.nevis.columbia.edu * Resource action: FSUsrNevis:0 monitor=20000 on hypatia-corosync.nevis.columbia.edu * Resource action: FSVarNevis:0 monitor=20000 on hypatia-corosync.nevis.columbia.edu * Resource action: FSVirtualMachines:0 monitor=20000 on hypatia-corosync.nevis.columbia.edu * Resource action: FSMail:0 monitor=20000 on hypatia-corosync.nevis.columbia.edu * Resource action: FSWork:0 monitor=20000 on hypatia-corosync.nevis.columbia.edu * Pseudo action: FilesystemGroup:1_running_0 * Resource action: AdminLvm:1 monitor=30000 on orestes-corosync.nevis.columbia.edu * Resource action: FSUsrNevis:1 monitor=20000 on orestes-corosync.nevis.columbia.edu * Resource action: FSVarNevis:1 monitor=20000 on orestes-corosync.nevis.columbia.edu * Resource action: FSVirtualMachines:1 monitor=20000 on orestes-corosync.nevis.columbia.edu * Resource action: FSMail:1 monitor=20000 on orestes-corosync.nevis.columbia.edu * Resource action: FSWork:1 monitor=20000 on orestes-corosync.nevis.columbia.edu * Pseudo action: FilesystemClone_running_0 * Resource action: CronAmbientTemperature start on hypatia-corosync.nevis.columbia.edu * Pseudo action: DhcpGroup_start_0 * Resource action: SymlinkDhcpdConf start on orestes-corosync.nevis.columbia.edu * Resource action: SymlinkSysconfigDhcpd start on orestes-corosync.nevis.columbia.edu * Resource action: SymlinkDhcpdLeases start on orestes-corosync.nevis.columbia.edu * Pseudo action: CupsClone_start_0 * Pseudo action: IPClone_start_0 * Pseudo action: LibvirtdClone_start_0 * Pseudo action: TftpClone_start_0 * Pseudo action: ExportsClone_start_0 * Resource action: CronAmbientTemperature monitor=60000 on hypatia-corosync.nevis.columbia.edu * Resource action: SymlinkDhcpdConf monitor=60000 on orestes-corosync.nevis.columbia.edu * Resource action: SymlinkSysconfigDhcpd monitor=60000 on orestes-corosync.nevis.columbia.edu * Resource action: SymlinkDhcpdLeases monitor=60000 on orestes-corosync.nevis.columbia.edu * Pseudo action: CupsGroup:0_start_0 * Resource action: SymlinkUsrShareCups:0 start on hypatia-corosync.nevis.columbia.edu * Resource action: SymlinkCupsdConf:0 start on hypatia-corosync.nevis.columbia.edu * Resource action: Cups:0 start on hypatia-corosync.nevis.columbia.edu * Pseudo action: CupsGroup:1_start_0 * Resource action: SymlinkUsrShareCups:1 start on orestes-corosync.nevis.columbia.edu * Resource action: SymlinkCupsdConf:1 start on orestes-corosync.nevis.columbia.edu * Resource action: Cups:1 start on orestes-corosync.nevis.columbia.edu * Pseudo action: IPGroup:0_start_0 * Resource action: ClusterIP:0 start on hypatia-corosync.nevis.columbia.edu * Resource action: ClusterIPLocal:0 start on hypatia-corosync.nevis.columbia.edu * Resource action: ClusterIPSandbox:0 start on hypatia-corosync.nevis.columbia.edu * Pseudo action: IPGroup:1_start_0 * Resource action: ClusterIP:1 start on orestes-corosync.nevis.columbia.edu * Resource action: ClusterIPLocal:1 start on orestes-corosync.nevis.columbia.edu * Resource action: ClusterIPSandbox:1 start on orestes-corosync.nevis.columbia.edu * Pseudo action: LibvirtdGroup:0_start_0 * Resource action: SymlinkEtcLibvirt:0 start on hypatia-corosync.nevis.columbia.edu * Resource action: Libvirtd:0 start on hypatia-corosync.nevis.columbia.edu * Pseudo action: LibvirtdGroup:1_start_0 * Resource action: SymlinkEtcLibvirt:1 start on orestes-corosync.nevis.columbia.edu * Resource action: Libvirtd:1 start on orestes-corosync.nevis.columbia.edu * Pseudo action: TftpGroup:0_start_0 * Resource action: SymlinkTftp:0 start on hypatia-corosync.nevis.columbia.edu * Resource action: Xinetd:0 start on hypatia-corosync.nevis.columbia.edu * Pseudo action: TftpGroup:1_start_0 * Resource action: SymlinkTftp:1 start on orestes-corosync.nevis.columbia.edu * Resource action: Xinetd:1 start on orestes-corosync.nevis.columbia.edu * Pseudo action: ExportsGroup:0_start_0 * Resource action: ExportMail:0 start on hypatia-corosync.nevis.columbia.edu * Resource action: ExportMailInbox:0 start on hypatia-corosync.nevis.columbia.edu * Resource action: ExportMailFolders:0 start on hypatia-corosync.nevis.columbia.edu * Resource action: ExportMailForward:0 start on hypatia-corosync.nevis.columbia.edu * Resource action: ExportMailProcmailrc:0 start on hypatia-corosync.nevis.columbia.edu * Resource action: ExportUsrNevis:0 start on hypatia-corosync.nevis.columbia.edu * Resource action: ExportUsrNevisOffsite:0 start on hypatia-corosync.nevis.columbia.edu * Resource action: ExportWWW:0 start on hypatia-corosync.nevis.columbia.edu * Pseudo action: ExportsGroup:1_start_0 * Resource action: ExportMail:1 start on orestes-corosync.nevis.columbia.edu * Resource action: ExportMailInbox:1 start on orestes-corosync.nevis.columbia.edu * Resource action: ExportMailFolders:1 start on orestes-corosync.nevis.columbia.edu * Resource action: ExportMailForward:1 start on orestes-corosync.nevis.columbia.edu * Resource action: ExportMailProcmailrc:1 start on orestes-corosync.nevis.columbia.edu * Resource action: ExportUsrNevis:1 start on orestes-corosync.nevis.columbia.edu * Resource action: ExportUsrNevisOffsite:1 start on orestes-corosync.nevis.columbia.edu * Resource action: ExportWWW:1 start on orestes-corosync.nevis.columbia.edu * Pseudo action: CupsGroup:0_running_0 * Resource action: SymlinkUsrShareCups:0 monitor=60000 on hypatia-corosync.nevis.columbia.edu * Resource action: SymlinkCupsdConf:0 monitor=60000 on hypatia-corosync.nevis.columbia.edu * Resource action: Cups:0 monitor=30000 on hypatia-corosync.nevis.columbia.edu * Pseudo action: CupsGroup:1_running_0 * Resource action: SymlinkUsrShareCups:1 monitor=60000 on orestes-corosync.nevis.columbia.edu * Resource action: SymlinkCupsdConf:1 monitor=60000 on orestes-corosync.nevis.columbia.edu * Resource action: Cups:1 monitor=30000 on orestes-corosync.nevis.columbia.edu * Pseudo action: CupsClone_running_0 * Pseudo action: IPGroup:0_running_0 * Resource action: ClusterIP:0 monitor=30000 on hypatia-corosync.nevis.columbia.edu * Resource action: ClusterIPLocal:0 monitor=31000 on hypatia-corosync.nevis.columbia.edu * Resource action: ClusterIPSandbox:0 monitor=32000 on hypatia-corosync.nevis.columbia.edu * Pseudo action: IPGroup:1_running_0 * Resource action: ClusterIP:1 monitor=30000 on orestes-corosync.nevis.columbia.edu * Resource action: ClusterIPLocal:1 monitor=31000 on orestes-corosync.nevis.columbia.edu * Resource action: ClusterIPSandbox:1 monitor=32000 on orestes-corosync.nevis.columbia.edu * Pseudo action: IPClone_running_0 * Pseudo action: LibvirtdGroup:0_running_0 * Resource action: SymlinkEtcLibvirt:0 monitor=60000 on hypatia-corosync.nevis.columbia.edu * Resource action: Libvirtd:0 monitor=30000 on hypatia-corosync.nevis.columbia.edu * Pseudo action: LibvirtdGroup:1_running_0 * Resource action: SymlinkEtcLibvirt:1 monitor=60000 on orestes-corosync.nevis.columbia.edu * Resource action: Libvirtd:1 monitor=30000 on orestes-corosync.nevis.columbia.edu * Pseudo action: LibvirtdClone_running_0 * Pseudo action: TftpGroup:0_running_0 * Resource action: SymlinkTftp:0 monitor=60000 on hypatia-corosync.nevis.columbia.edu * Pseudo action: TftpGroup:1_running_0 * Resource action: SymlinkTftp:1 monitor=60000 on orestes-corosync.nevis.columbia.edu * Pseudo action: TftpClone_running_0 * Pseudo action: ExportsGroup:0_running_0 * Pseudo action: ExportsGroup:1_running_0 * Pseudo action: ExportsClone_running_0 * Resource action: KVM-guest start on hypatia-corosync.nevis.columbia.edu * Resource action: Proxy start on orestes-corosync.nevis.columbia.edu Revised Cluster Status: * Node List: * Online: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ] * Full List of Resources: * Clone Set: AdminClone [AdminDrbd] (promotable): - * Masters: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ] + * Promoted: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ] * CronAmbientTemperature (ocf:heartbeat:symlink): Started hypatia-corosync.nevis.columbia.edu * StonithHypatia (stonith:fence_nut): Started orestes-corosync.nevis.columbia.edu * StonithOrestes (stonith:fence_nut): Started hypatia-corosync.nevis.columbia.edu * Resource Group: DhcpGroup: * SymlinkDhcpdConf (ocf:heartbeat:symlink): Started orestes-corosync.nevis.columbia.edu * SymlinkSysconfigDhcpd (ocf:heartbeat:symlink): Started orestes-corosync.nevis.columbia.edu * SymlinkDhcpdLeases (ocf:heartbeat:symlink): Started orestes-corosync.nevis.columbia.edu * Dhcpd (lsb:dhcpd): Stopped (disabled) * DhcpIP (ocf:heartbeat:IPaddr2): Stopped * Clone Set: CupsClone [CupsGroup]: * Started: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ] * Clone Set: IPClone [IPGroup] (unique): * Resource Group: IPGroup:0: * ClusterIP:0 (ocf:heartbeat:IPaddr2): Started hypatia-corosync.nevis.columbia.edu * ClusterIPLocal:0 (ocf:heartbeat:IPaddr2): Started hypatia-corosync.nevis.columbia.edu * ClusterIPSandbox:0 (ocf:heartbeat:IPaddr2): Started hypatia-corosync.nevis.columbia.edu * Resource Group: IPGroup:1: * ClusterIP:1 (ocf:heartbeat:IPaddr2): Started orestes-corosync.nevis.columbia.edu * ClusterIPLocal:1 (ocf:heartbeat:IPaddr2): Started orestes-corosync.nevis.columbia.edu * ClusterIPSandbox:1 (ocf:heartbeat:IPaddr2): Started orestes-corosync.nevis.columbia.edu * Clone Set: LibvirtdClone [LibvirtdGroup]: * Started: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ] * Clone Set: TftpClone [TftpGroup]: * Started: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ] * Clone Set: ExportsClone [ExportsGroup]: * Started: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ] * Clone Set: FilesystemClone [FilesystemGroup]: * Started: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ] * KVM-guest (ocf:heartbeat:VirtualDomain): Started hypatia-corosync.nevis.columbia.edu * Proxy (ocf:heartbeat:VirtualDomain): Started orestes-corosync.nevis.columbia.edu diff --git a/cts/scheduler/summary/master-promotion-constraint.summary b/cts/scheduler/summary/master-promotion-constraint.summary index 39df0a9f6a..22bc250311 100644 --- a/cts/scheduler/summary/master-promotion-constraint.summary +++ b/cts/scheduler/summary/master-promotion-constraint.summary @@ -1,36 +1,36 @@ 2 of 5 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: * Node List: * Online: [ hex-13 hex-14 ] * Full List of Resources: * fencing-sbd (stonith:external/sbd): Started hex-13 * Resource Group: g0 (disabled): * d0 (ocf:pacemaker:Dummy): Stopped (disabled) * d1 (ocf:pacemaker:Dummy): Stopped (disabled) * Clone Set: ms0 [s0] (promotable): - * Masters: [ hex-14 ] - * Slaves: [ hex-13 ] + * Promoted: [ hex-14 ] + * Unpromoted: [ hex-13 ] Transition Summary: - * Demote s0:0 ( Master -> Slave hex-14 ) + * Demote s0:0 ( Promoted -> Unpromoted hex-14 ) Executing Cluster Transition: * Resource action: s0:1 cancel=20000 on hex-14 * Pseudo action: ms0_demote_0 * Resource action: s0:1 demote on hex-14 * Pseudo action: ms0_demoted_0 * Resource action: s0:1 monitor=21000 on hex-14 Revised Cluster Status: * Node List: * Online: [ hex-13 hex-14 ] * Full List of Resources: * fencing-sbd (stonith:external/sbd): Started hex-13 * Resource Group: g0 (disabled): * d0 (ocf:pacemaker:Dummy): Stopped (disabled) * d1 (ocf:pacemaker:Dummy): Stopped (disabled) * Clone Set: ms0 [s0] (promotable): - * Slaves: [ hex-13 hex-14 ] + * Unpromoted: [ hex-13 hex-14 ] diff --git a/cts/scheduler/summary/master-pseudo.summary b/cts/scheduler/summary/master-pseudo.summary index 080e4635f1..b28ab7168d 100644 --- a/cts/scheduler/summary/master-pseudo.summary +++ b/cts/scheduler/summary/master-pseudo.summary @@ -1,60 +1,60 @@ Current cluster status: * Node List: * Node raki.linbit: standby * Online: [ sambuca.linbit ] * Full List of Resources: * ip_float_right (ocf:heartbeat:IPaddr2): Stopped * Clone Set: ms_drbd_float [drbd_float] (promotable): - * Slaves: [ sambuca.linbit ] + * Unpromoted: [ sambuca.linbit ] * Resource Group: nfsexport: * ip_nfs (ocf:heartbeat:IPaddr2): Stopped * fs_float (ocf:heartbeat:Filesystem): Stopped Transition Summary: * Start ip_float_right ( sambuca.linbit ) - * Restart drbd_float:0 ( Slave -> Master sambuca.linbit ) due to required ip_float_right start + * Restart drbd_float:0 ( Unpromoted -> Promoted sambuca.linbit ) due to required ip_float_right start * Start ip_nfs ( sambuca.linbit ) Executing Cluster Transition: * Resource action: ip_float_right start on sambuca.linbit * Pseudo action: ms_drbd_float_pre_notify_stop_0 * Resource action: drbd_float:0 notify on sambuca.linbit * Pseudo action: ms_drbd_float_confirmed-pre_notify_stop_0 * Pseudo action: ms_drbd_float_stop_0 * Resource action: drbd_float:0 stop on sambuca.linbit * Pseudo action: ms_drbd_float_stopped_0 * Pseudo action: ms_drbd_float_post_notify_stopped_0 * Pseudo action: ms_drbd_float_confirmed-post_notify_stopped_0 * Pseudo action: ms_drbd_float_pre_notify_start_0 * Pseudo action: ms_drbd_float_confirmed-pre_notify_start_0 * Pseudo action: ms_drbd_float_start_0 * Resource action: drbd_float:0 start on sambuca.linbit * Pseudo action: ms_drbd_float_running_0 * Pseudo action: ms_drbd_float_post_notify_running_0 * Resource action: drbd_float:0 notify on sambuca.linbit * Pseudo action: ms_drbd_float_confirmed-post_notify_running_0 * Pseudo action: ms_drbd_float_pre_notify_promote_0 * Resource action: drbd_float:0 notify on sambuca.linbit * Pseudo action: ms_drbd_float_confirmed-pre_notify_promote_0 * Pseudo action: ms_drbd_float_promote_0 * Resource action: drbd_float:0 promote on sambuca.linbit * Pseudo action: ms_drbd_float_promoted_0 * Pseudo action: ms_drbd_float_post_notify_promoted_0 * Resource action: drbd_float:0 notify on sambuca.linbit * Pseudo action: ms_drbd_float_confirmed-post_notify_promoted_0 * Pseudo action: nfsexport_start_0 * Resource action: ip_nfs start on sambuca.linbit Revised Cluster Status: * Node List: * Node raki.linbit: standby * Online: [ sambuca.linbit ] * Full List of Resources: * ip_float_right (ocf:heartbeat:IPaddr2): Started sambuca.linbit * Clone Set: ms_drbd_float [drbd_float] (promotable): - * Masters: [ sambuca.linbit ] + * Promoted: [ sambuca.linbit ] * Resource Group: nfsexport: * ip_nfs (ocf:heartbeat:IPaddr2): Started sambuca.linbit * fs_float (ocf:heartbeat:Filesystem): Stopped diff --git a/cts/scheduler/summary/master-reattach.summary b/cts/scheduler/summary/master-reattach.summary index 47cbcd5035..cf089d9324 100644 --- a/cts/scheduler/summary/master-reattach.summary +++ b/cts/scheduler/summary/master-reattach.summary @@ -1,34 +1,34 @@ Current cluster status: * Node List: * Online: [ dktest1 dktest2 ] * Full List of Resources: * Clone Set: ms-drbd1 [drbd1] (promotable) (unmanaged): - * drbd1 (ocf:heartbeat:drbd): Master dktest1 (unmanaged) - * drbd1 (ocf:heartbeat:drbd): Slave dktest2 (unmanaged) + * drbd1 (ocf:heartbeat:drbd): Promoted dktest1 (unmanaged) + * drbd1 (ocf:heartbeat:drbd): Unpromoted dktest2 (unmanaged) * Resource Group: apache (unmanaged): * apache-vip (ocf:heartbeat:IPaddr2): Started dktest1 (unmanaged) * mount (ocf:heartbeat:Filesystem): Started dktest1 (unmanaged) * webserver (ocf:heartbeat:apache): Started dktest1 (unmanaged) Transition Summary: Executing Cluster Transition: * Resource action: drbd1:0 monitor=10000 on dktest1 * Resource action: drbd1:0 monitor=11000 on dktest2 * Resource action: apache-vip monitor=60000 on dktest1 * Resource action: mount monitor=10000 on dktest1 * Resource action: webserver monitor=30000 on dktest1 Revised Cluster Status: * Node List: * Online: [ dktest1 dktest2 ] * Full List of Resources: * Clone Set: ms-drbd1 [drbd1] (promotable) (unmanaged): - * drbd1 (ocf:heartbeat:drbd): Master dktest1 (unmanaged) - * drbd1 (ocf:heartbeat:drbd): Slave dktest2 (unmanaged) + * drbd1 (ocf:heartbeat:drbd): Promoted dktest1 (unmanaged) + * drbd1 (ocf:heartbeat:drbd): Unpromoted dktest2 (unmanaged) * Resource Group: apache (unmanaged): * apache-vip (ocf:heartbeat:IPaddr2): Started dktest1 (unmanaged) * mount (ocf:heartbeat:Filesystem): Started dktest1 (unmanaged) * webserver (ocf:heartbeat:apache): Started dktest1 (unmanaged) diff --git a/cts/scheduler/summary/master-role.summary b/cts/scheduler/summary/master-role.summary index 6f24fc41f0..588f5230bb 100644 --- a/cts/scheduler/summary/master-role.summary +++ b/cts/scheduler/summary/master-role.summary @@ -1,24 +1,24 @@ Current cluster status: * Node List: * Online: [ sles11-a sles11-b ] * Full List of Resources: * Clone Set: ms_res_Stateful_1 [res_Stateful_1] (promotable): - * Masters: [ sles11-a sles11-b ] + * Promoted: [ sles11-a sles11-b ] Transition Summary: - * Demote res_Stateful_1:1 ( Master -> Slave sles11-a ) + * Demote res_Stateful_1:1 ( Promoted -> Unpromoted sles11-a ) Executing Cluster Transition: * Pseudo action: ms_res_Stateful_1_demote_0 * Resource action: res_Stateful_1:0 demote on sles11-a * Pseudo action: ms_res_Stateful_1_demoted_0 Revised Cluster Status: * Node List: * Online: [ sles11-a sles11-b ] * Full List of Resources: * Clone Set: ms_res_Stateful_1 [res_Stateful_1] (promotable): - * Masters: [ sles11-b ] - * Slaves: [ sles11-a ] + * Promoted: [ sles11-b ] + * Unpromoted: [ sles11-a ] diff --git a/cts/scheduler/summary/master-score-startup.summary b/cts/scheduler/summary/master-score-startup.summary index b4baf724d6..9f527815d8 100644 --- a/cts/scheduler/summary/master-score-startup.summary +++ b/cts/scheduler/summary/master-score-startup.summary @@ -1,54 +1,54 @@ Current cluster status: * Node List: * Online: [ srv1 srv2 ] * Full List of Resources: * Clone Set: pgsql-ha [pgsqld] (promotable): * Stopped: [ srv1 srv2 ] * pgsql-master-ip (ocf:heartbeat:IPaddr2): Stopped Transition Summary: - * Promote pgsqld:0 ( Stopped -> Master srv1 ) + * Promote pgsqld:0 ( Stopped -> Promoted srv1 ) * Start pgsqld:1 ( srv2 ) * Start pgsql-master-ip ( srv1 ) Executing Cluster Transition: * Resource action: pgsqld:0 monitor on srv1 * Resource action: pgsqld:1 monitor on srv2 * Pseudo action: pgsql-ha_pre_notify_start_0 * Resource action: pgsql-master-ip monitor on srv2 * Resource action: pgsql-master-ip monitor on srv1 * Pseudo action: pgsql-ha_confirmed-pre_notify_start_0 * Pseudo action: pgsql-ha_start_0 * Resource action: pgsqld:0 start on srv1 * Resource action: pgsqld:1 start on srv2 * Pseudo action: pgsql-ha_running_0 * Pseudo action: pgsql-ha_post_notify_running_0 * Resource action: pgsqld:0 notify on srv1 * Resource action: pgsqld:1 notify on srv2 * Pseudo action: pgsql-ha_confirmed-post_notify_running_0 * Pseudo action: pgsql-ha_pre_notify_promote_0 * Resource action: pgsqld:0 notify on srv1 * Resource action: pgsqld:1 notify on srv2 * Pseudo action: pgsql-ha_confirmed-pre_notify_promote_0 * Pseudo action: pgsql-ha_promote_0 * Resource action: pgsqld:0 promote on srv1 * Pseudo action: pgsql-ha_promoted_0 * Pseudo action: pgsql-ha_post_notify_promoted_0 * Resource action: pgsqld:0 notify on srv1 * Resource action: pgsqld:1 notify on srv2 * Pseudo action: pgsql-ha_confirmed-post_notify_promoted_0 * Resource action: pgsql-master-ip start on srv1 * Resource action: pgsqld:0 monitor=15000 on srv1 * Resource action: pgsqld:1 monitor=16000 on srv2 * Resource action: pgsql-master-ip monitor=10000 on srv1 Revised Cluster Status: * Node List: * Online: [ srv1 srv2 ] * Full List of Resources: * Clone Set: pgsql-ha [pgsqld] (promotable): - * Masters: [ srv1 ] - * Slaves: [ srv2 ] + * Promoted: [ srv1 ] + * Unpromoted: [ srv2 ] * pgsql-master-ip (ocf:heartbeat:IPaddr2): Started srv1 diff --git a/cts/scheduler/summary/master-stop.summary b/cts/scheduler/summary/master-stop.summary index ce76024399..efc7492830 100644 --- a/cts/scheduler/summary/master-stop.summary +++ b/cts/scheduler/summary/master-stop.summary @@ -1,24 +1,24 @@ Current cluster status: * Node List: * Online: [ node1 node2 node3 ] * Full List of Resources: * Clone Set: m [dummy] (promotable): - * Slaves: [ node1 node2 node3 ] + * Unpromoted: [ node1 node2 node3 ] Transition Summary: - * Stop dummy:2 ( Slave node3 ) due to node availability + * Stop dummy:2 ( Unpromoted node3 ) due to node availability Executing Cluster Transition: * Pseudo action: m_stop_0 * Resource action: dummy:2 stop on node3 * Pseudo action: m_stopped_0 Revised Cluster Status: * Node List: * Online: [ node1 node2 node3 ] * Full List of Resources: * Clone Set: m [dummy] (promotable): - * Slaves: [ node1 node2 ] + * Unpromoted: [ node1 node2 ] * Stopped: [ node3 ] diff --git a/cts/scheduler/summary/master-unmanaged-monitor.summary b/cts/scheduler/summary/master-unmanaged-monitor.summary index 36dac6d428..2b96429fad 100644 --- a/cts/scheduler/summary/master-unmanaged-monitor.summary +++ b/cts/scheduler/summary/master-unmanaged-monitor.summary @@ -1,69 +1,69 @@ Current cluster status: * Node List: * Online: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] * Full List of Resources: * Clone Set: Fencing [FencingChild] (unmanaged): * Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] * Resource Group: group-1 (unmanaged): * r192.168.122.112 (ocf:heartbeat:IPaddr): Started pcmk-3 (unmanaged) * r192.168.122.113 (ocf:heartbeat:IPaddr): Started pcmk-3 (unmanaged) * r192.168.122.114 (ocf:heartbeat:IPaddr): Started pcmk-3 (unmanaged) * rsc_pcmk-1 (ocf:heartbeat:IPaddr): Started pcmk-1 (unmanaged) * rsc_pcmk-2 (ocf:heartbeat:IPaddr): Started pcmk-2 (unmanaged) * rsc_pcmk-3 (ocf:heartbeat:IPaddr): Started pcmk-3 (unmanaged) * rsc_pcmk-4 (ocf:heartbeat:IPaddr): Started pcmk-4 (unmanaged) * lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started pcmk-3 (unmanaged) * migrator (ocf:pacemaker:Dummy): Started pcmk-4 (unmanaged) * Clone Set: Connectivity [ping-1] (unmanaged): * ping-1 (ocf:pacemaker:ping): Started pcmk-2 (unmanaged) * ping-1 (ocf:pacemaker:ping): Started pcmk-3 (unmanaged) * ping-1 (ocf:pacemaker:ping): Started pcmk-4 (unmanaged) * ping-1 (ocf:pacemaker:ping): Started pcmk-1 (unmanaged) * Clone Set: master-1 [stateful-1] (promotable) (unmanaged): - * stateful-1 (ocf:pacemaker:Stateful): Slave pcmk-2 (unmanaged) - * stateful-1 (ocf:pacemaker:Stateful): Master pcmk-3 (unmanaged) - * stateful-1 (ocf:pacemaker:Stateful): Slave pcmk-4 (unmanaged) + * stateful-1 (ocf:pacemaker:Stateful): Unpromoted pcmk-2 (unmanaged) + * stateful-1 (ocf:pacemaker:Stateful): Promoted pcmk-3 (unmanaged) + * stateful-1 (ocf:pacemaker:Stateful): Unpromoted pcmk-4 (unmanaged) * Stopped: [ pcmk-1 ] Transition Summary: Executing Cluster Transition: * Resource action: lsb-dummy monitor=5000 on pcmk-3 * Resource action: migrator monitor=10000 on pcmk-4 * Resource action: ping-1:0 monitor=60000 on pcmk-2 * Resource action: ping-1:0 monitor=60000 on pcmk-3 * Resource action: ping-1:0 monitor=60000 on pcmk-4 * Resource action: ping-1:0 monitor=60000 on pcmk-1 * Resource action: stateful-1:0 monitor=15000 on pcmk-2 * Resource action: stateful-1:0 monitor on pcmk-1 * Resource action: stateful-1:0 monitor=16000 on pcmk-3 * Resource action: stateful-1:0 monitor=15000 on pcmk-4 Revised Cluster Status: * Node List: * Online: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] * Full List of Resources: * Clone Set: Fencing [FencingChild] (unmanaged): * Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] * Resource Group: group-1 (unmanaged): * r192.168.122.112 (ocf:heartbeat:IPaddr): Started pcmk-3 (unmanaged) * r192.168.122.113 (ocf:heartbeat:IPaddr): Started pcmk-3 (unmanaged) * r192.168.122.114 (ocf:heartbeat:IPaddr): Started pcmk-3 (unmanaged) * rsc_pcmk-1 (ocf:heartbeat:IPaddr): Started pcmk-1 (unmanaged) * rsc_pcmk-2 (ocf:heartbeat:IPaddr): Started pcmk-2 (unmanaged) * rsc_pcmk-3 (ocf:heartbeat:IPaddr): Started pcmk-3 (unmanaged) * rsc_pcmk-4 (ocf:heartbeat:IPaddr): Started pcmk-4 (unmanaged) * lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started pcmk-3 (unmanaged) * migrator (ocf:pacemaker:Dummy): Started pcmk-4 (unmanaged) * Clone Set: Connectivity [ping-1] (unmanaged): * ping-1 (ocf:pacemaker:ping): Started pcmk-2 (unmanaged) * ping-1 (ocf:pacemaker:ping): Started pcmk-3 (unmanaged) * ping-1 (ocf:pacemaker:ping): Started pcmk-4 (unmanaged) * ping-1 (ocf:pacemaker:ping): Started pcmk-1 (unmanaged) * Clone Set: master-1 [stateful-1] (promotable) (unmanaged): - * stateful-1 (ocf:pacemaker:Stateful): Slave pcmk-2 (unmanaged) - * stateful-1 (ocf:pacemaker:Stateful): Master pcmk-3 (unmanaged) - * stateful-1 (ocf:pacemaker:Stateful): Slave pcmk-4 (unmanaged) + * stateful-1 (ocf:pacemaker:Stateful): Unpromoted pcmk-2 (unmanaged) + * stateful-1 (ocf:pacemaker:Stateful): Promoted pcmk-3 (unmanaged) + * stateful-1 (ocf:pacemaker:Stateful): Unpromoted pcmk-4 (unmanaged) * Stopped: [ pcmk-1 ] diff --git a/cts/scheduler/summary/master_monitor_restart.summary b/cts/scheduler/summary/master_monitor_restart.summary index 2c60d48070..be181bd6ac 100644 --- a/cts/scheduler/summary/master_monitor_restart.summary +++ b/cts/scheduler/summary/master_monitor_restart.summary @@ -1,24 +1,24 @@ Current cluster status: * Node List: * Node node2: standby * Online: [ node1 ] * Full List of Resources: * Clone Set: MS_RSC [MS_RSC_NATIVE] (promotable): - * Masters: [ node1 ] + * Promoted: [ node1 ] * Stopped: [ node2 ] Transition Summary: Executing Cluster Transition: * Resource action: MS_RSC_NATIVE:0 monitor=5000 on node1 Revised Cluster Status: * Node List: * Node node2: standby * Online: [ node1 ] * Full List of Resources: * Clone Set: MS_RSC [MS_RSC_NATIVE] (promotable): - * Masters: [ node1 ] + * Promoted: [ node1 ] * Stopped: [ node2 ] diff --git a/cts/scheduler/summary/migrate-fencing.summary b/cts/scheduler/summary/migrate-fencing.summary index b41405ee77..fd4fffa1d3 100644 --- a/cts/scheduler/summary/migrate-fencing.summary +++ b/cts/scheduler/summary/migrate-fencing.summary @@ -1,108 +1,108 @@ Current cluster status: * Node List: * Node pcmk-4: UNCLEAN (online) * Online: [ pcmk-1 pcmk-2 pcmk-3 ] * Full List of Resources: * Clone Set: Fencing [FencingChild]: * Started: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] * Resource Group: group-1: * r192.168.101.181 (ocf:heartbeat:IPaddr): Started pcmk-4 * r192.168.101.182 (ocf:heartbeat:IPaddr): Started pcmk-4 * r192.168.101.183 (ocf:heartbeat:IPaddr): Started pcmk-4 * rsc_pcmk-1 (ocf:heartbeat:IPaddr): Started pcmk-1 * rsc_pcmk-2 (ocf:heartbeat:IPaddr): Started pcmk-2 * rsc_pcmk-3 (ocf:heartbeat:IPaddr): Started pcmk-3 * rsc_pcmk-4 (ocf:heartbeat:IPaddr): Started pcmk-4 * lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started pcmk-4 * migrator (ocf:pacemaker:Dummy): Started pcmk-1 * Clone Set: Connectivity [ping-1]: * Started: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] * Clone Set: master-1 [stateful-1] (promotable): - * Masters: [ pcmk-4 ] - * Slaves: [ pcmk-1 pcmk-2 pcmk-3 ] + * Promoted: [ pcmk-4 ] + * Unpromoted: [ pcmk-1 pcmk-2 pcmk-3 ] Transition Summary: * Fence (reboot) pcmk-4 'termination was requested' * Stop FencingChild:0 ( pcmk-4 ) due to node availability * Move r192.168.101.181 ( pcmk-4 -> pcmk-1 ) * Move r192.168.101.182 ( pcmk-4 -> pcmk-1 ) * Move r192.168.101.183 ( pcmk-4 -> pcmk-1 ) * Move rsc_pcmk-4 ( pcmk-4 -> pcmk-2 ) * Move lsb-dummy ( pcmk-4 -> pcmk-1 ) * Migrate migrator ( pcmk-1 -> pcmk-3 ) * Stop ping-1:0 ( pcmk-4 ) due to node availability - * Stop stateful-1:0 ( Master pcmk-4 ) due to node availability - * Promote stateful-1:1 ( Slave -> Master pcmk-1 ) + * Stop stateful-1:0 ( Promoted pcmk-4 ) due to node availability + * Promote stateful-1:1 ( Unpromoted -> Promoted pcmk-1 ) Executing Cluster Transition: * Pseudo action: Fencing_stop_0 * Resource action: stateful-1:3 monitor=15000 on pcmk-3 * Resource action: stateful-1:2 monitor=15000 on pcmk-2 * Fencing pcmk-4 (reboot) * Pseudo action: FencingChild:0_stop_0 * Pseudo action: Fencing_stopped_0 * Pseudo action: rsc_pcmk-4_stop_0 * Pseudo action: lsb-dummy_stop_0 * Resource action: migrator migrate_to on pcmk-1 * Pseudo action: Connectivity_stop_0 * Pseudo action: group-1_stop_0 * Pseudo action: r192.168.101.183_stop_0 * Resource action: rsc_pcmk-4 start on pcmk-2 * Resource action: migrator migrate_from on pcmk-3 * Resource action: migrator stop on pcmk-1 * Pseudo action: ping-1:0_stop_0 * Pseudo action: Connectivity_stopped_0 * Pseudo action: r192.168.101.182_stop_0 * Resource action: rsc_pcmk-4 monitor=5000 on pcmk-2 * Pseudo action: migrator_start_0 * Pseudo action: r192.168.101.181_stop_0 * Resource action: migrator monitor=10000 on pcmk-3 * Pseudo action: group-1_stopped_0 * Pseudo action: master-1_demote_0 * Pseudo action: stateful-1:0_demote_0 * Pseudo action: master-1_demoted_0 * Pseudo action: master-1_stop_0 * Pseudo action: stateful-1:0_stop_0 * Pseudo action: master-1_stopped_0 * Pseudo action: master-1_promote_0 * Resource action: stateful-1:1 promote on pcmk-1 * Pseudo action: master-1_promoted_0 * Pseudo action: group-1_start_0 * Resource action: r192.168.101.181 start on pcmk-1 * Resource action: r192.168.101.182 start on pcmk-1 * Resource action: r192.168.101.183 start on pcmk-1 * Resource action: stateful-1:1 monitor=16000 on pcmk-1 * Pseudo action: group-1_running_0 * Resource action: r192.168.101.181 monitor=5000 on pcmk-1 * Resource action: r192.168.101.182 monitor=5000 on pcmk-1 * Resource action: r192.168.101.183 monitor=5000 on pcmk-1 * Resource action: lsb-dummy start on pcmk-1 * Resource action: lsb-dummy monitor=5000 on pcmk-1 Revised Cluster Status: * Node List: * Online: [ pcmk-1 pcmk-2 pcmk-3 ] * OFFLINE: [ pcmk-4 ] * Full List of Resources: * Clone Set: Fencing [FencingChild]: * Started: [ pcmk-1 pcmk-2 pcmk-3 ] * Stopped: [ pcmk-4 ] * Resource Group: group-1: * r192.168.101.181 (ocf:heartbeat:IPaddr): Started pcmk-1 * r192.168.101.182 (ocf:heartbeat:IPaddr): Started pcmk-1 * r192.168.101.183 (ocf:heartbeat:IPaddr): Started pcmk-1 * rsc_pcmk-1 (ocf:heartbeat:IPaddr): Started pcmk-1 * rsc_pcmk-2 (ocf:heartbeat:IPaddr): Started pcmk-2 * rsc_pcmk-3 (ocf:heartbeat:IPaddr): Started pcmk-3 * rsc_pcmk-4 (ocf:heartbeat:IPaddr): Started pcmk-2 * lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started pcmk-1 * migrator (ocf:pacemaker:Dummy): Started pcmk-3 * Clone Set: Connectivity [ping-1]: * Started: [ pcmk-1 pcmk-2 pcmk-3 ] * Stopped: [ pcmk-4 ] * Clone Set: master-1 [stateful-1] (promotable): - * Masters: [ pcmk-1 ] - * Slaves: [ pcmk-2 pcmk-3 ] + * Promoted: [ pcmk-1 ] + * Unpromoted: [ pcmk-2 pcmk-3 ] * Stopped: [ pcmk-4 ] diff --git a/cts/scheduler/summary/migrate-partial-4.summary b/cts/scheduler/summary/migrate-partial-4.summary index 68621e636f..abb31f1e6f 100644 --- a/cts/scheduler/summary/migrate-partial-4.summary +++ b/cts/scheduler/summary/migrate-partial-4.summary @@ -1,126 +1,126 @@ Current cluster status: * Node List: * Online: [ lustre01-left lustre02-left lustre03-left lustre04-left ] * Full List of Resources: * drbd-local (ocf:vds-ok:Ticketer): Started lustre01-left * drbd-stacked (ocf:vds-ok:Ticketer): Stopped * drbd-testfs-local (ocf:vds-ok:Ticketer): Stopped * drbd-testfs-stacked (ocf:vds-ok:Ticketer): Stopped * ip-testfs-mdt0000-left (ocf:heartbeat:IPaddr2): Stopped * ip-testfs-ost0000-left (ocf:heartbeat:IPaddr2): Stopped * ip-testfs-ost0001-left (ocf:heartbeat:IPaddr2): Stopped * ip-testfs-ost0002-left (ocf:heartbeat:IPaddr2): Stopped * ip-testfs-ost0003-left (ocf:heartbeat:IPaddr2): Stopped * lustre (ocf:vds-ok:Ticketer): Started lustre03-left * mgs (ocf:vds-ok:lustre-server): Stopped * testfs (ocf:vds-ok:Ticketer): Started lustre02-left * testfs-mdt0000 (ocf:vds-ok:lustre-server): Stopped * testfs-ost0000 (ocf:vds-ok:lustre-server): Stopped * testfs-ost0001 (ocf:vds-ok:lustre-server): Stopped * testfs-ost0002 (ocf:vds-ok:lustre-server): Stopped * testfs-ost0003 (ocf:vds-ok:lustre-server): Stopped * Resource Group: booth: * ip-booth (ocf:heartbeat:IPaddr2): Started lustre02-left * boothd (ocf:pacemaker:booth-site): Started lustre02-left * Clone Set: ms-drbd-mgs [drbd-mgs] (promotable): * Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] * Clone Set: ms-drbd-testfs-mdt0000 [drbd-testfs-mdt0000] (promotable): * Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] * Clone Set: ms-drbd-testfs-mdt0000-left [drbd-testfs-mdt0000-left] (promotable): * Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] * Clone Set: ms-drbd-testfs-ost0000 [drbd-testfs-ost0000] (promotable): * Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] * Clone Set: ms-drbd-testfs-ost0000-left [drbd-testfs-ost0000-left] (promotable): * Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] * Clone Set: ms-drbd-testfs-ost0001 [drbd-testfs-ost0001] (promotable): * Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] * Clone Set: ms-drbd-testfs-ost0001-left [drbd-testfs-ost0001-left] (promotable): * Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] * Clone Set: ms-drbd-testfs-ost0002 [drbd-testfs-ost0002] (promotable): * Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] * Clone Set: ms-drbd-testfs-ost0002-left [drbd-testfs-ost0002-left] (promotable): * Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] * Clone Set: ms-drbd-testfs-ost0003 [drbd-testfs-ost0003] (promotable): * Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] * Clone Set: ms-drbd-testfs-ost0003-left [drbd-testfs-ost0003-left] (promotable): * Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] Transition Summary: * Start drbd-stacked ( lustre02-left ) * Start drbd-testfs-local ( lustre03-left ) * Migrate lustre ( lustre03-left -> lustre04-left ) * Move testfs ( lustre02-left -> lustre03-left ) * Start drbd-mgs:0 ( lustre01-left ) * Start drbd-mgs:1 ( lustre02-left ) Executing Cluster Transition: * Resource action: drbd-stacked start on lustre02-left * Resource action: drbd-testfs-local start on lustre03-left * Resource action: lustre migrate_to on lustre03-left * Resource action: testfs stop on lustre02-left * Resource action: testfs stop on lustre01-left * Pseudo action: ms-drbd-mgs_pre_notify_start_0 * Resource action: lustre migrate_from on lustre04-left * Resource action: lustre stop on lustre03-left * Resource action: testfs start on lustre03-left * Pseudo action: ms-drbd-mgs_confirmed-pre_notify_start_0 * Pseudo action: ms-drbd-mgs_start_0 * Pseudo action: lustre_start_0 * Resource action: drbd-mgs:0 start on lustre01-left * Resource action: drbd-mgs:1 start on lustre02-left * Pseudo action: ms-drbd-mgs_running_0 * Pseudo action: ms-drbd-mgs_post_notify_running_0 * Resource action: drbd-mgs:0 notify on lustre01-left * Resource action: drbd-mgs:1 notify on lustre02-left * Pseudo action: ms-drbd-mgs_confirmed-post_notify_running_0 * Resource action: drbd-mgs:0 monitor=30000 on lustre01-left * Resource action: drbd-mgs:1 monitor=30000 on lustre02-left Revised Cluster Status: * Node List: * Online: [ lustre01-left lustre02-left lustre03-left lustre04-left ] * Full List of Resources: * drbd-local (ocf:vds-ok:Ticketer): Started lustre01-left * drbd-stacked (ocf:vds-ok:Ticketer): Started lustre02-left * drbd-testfs-local (ocf:vds-ok:Ticketer): Started lustre03-left * drbd-testfs-stacked (ocf:vds-ok:Ticketer): Stopped * ip-testfs-mdt0000-left (ocf:heartbeat:IPaddr2): Stopped * ip-testfs-ost0000-left (ocf:heartbeat:IPaddr2): Stopped * ip-testfs-ost0001-left (ocf:heartbeat:IPaddr2): Stopped * ip-testfs-ost0002-left (ocf:heartbeat:IPaddr2): Stopped * ip-testfs-ost0003-left (ocf:heartbeat:IPaddr2): Stopped * lustre (ocf:vds-ok:Ticketer): Started lustre04-left * mgs (ocf:vds-ok:lustre-server): Stopped * testfs (ocf:vds-ok:Ticketer): Started lustre03-left * testfs-mdt0000 (ocf:vds-ok:lustre-server): Stopped * testfs-ost0000 (ocf:vds-ok:lustre-server): Stopped * testfs-ost0001 (ocf:vds-ok:lustre-server): Stopped * testfs-ost0002 (ocf:vds-ok:lustre-server): Stopped * testfs-ost0003 (ocf:vds-ok:lustre-server): Stopped * Resource Group: booth: * ip-booth (ocf:heartbeat:IPaddr2): Started lustre02-left * boothd (ocf:pacemaker:booth-site): Started lustre02-left * Clone Set: ms-drbd-mgs [drbd-mgs] (promotable): - * Slaves: [ lustre01-left lustre02-left ] + * Unpromoted: [ lustre01-left lustre02-left ] * Clone Set: ms-drbd-testfs-mdt0000 [drbd-testfs-mdt0000] (promotable): * Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] * Clone Set: ms-drbd-testfs-mdt0000-left [drbd-testfs-mdt0000-left] (promotable): * Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] * Clone Set: ms-drbd-testfs-ost0000 [drbd-testfs-ost0000] (promotable): * Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] * Clone Set: ms-drbd-testfs-ost0000-left [drbd-testfs-ost0000-left] (promotable): * Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] * Clone Set: ms-drbd-testfs-ost0001 [drbd-testfs-ost0001] (promotable): * Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] * Clone Set: ms-drbd-testfs-ost0001-left [drbd-testfs-ost0001-left] (promotable): * Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] * Clone Set: ms-drbd-testfs-ost0002 [drbd-testfs-ost0002] (promotable): * Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] * Clone Set: ms-drbd-testfs-ost0002-left [drbd-testfs-ost0002-left] (promotable): * Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] * Clone Set: ms-drbd-testfs-ost0003 [drbd-testfs-ost0003] (promotable): * Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] * Clone Set: ms-drbd-testfs-ost0003-left [drbd-testfs-ost0003-left] (promotable): * Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ] diff --git a/cts/scheduler/summary/migrate-shutdown.summary b/cts/scheduler/summary/migrate-shutdown.summary index 451c217c55..551a41a175 100644 --- a/cts/scheduler/summary/migrate-shutdown.summary +++ b/cts/scheduler/summary/migrate-shutdown.summary @@ -1,92 +1,92 @@ Current cluster status: * Node List: * Online: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started pcmk-1 * Resource Group: group-1: * r192.168.122.105 (ocf:heartbeat:IPaddr): Started pcmk-2 * r192.168.122.106 (ocf:heartbeat:IPaddr): Started pcmk-2 * r192.168.122.107 (ocf:heartbeat:IPaddr): Started pcmk-2 * rsc_pcmk-1 (ocf:heartbeat:IPaddr): Started pcmk-1 * rsc_pcmk-2 (ocf:heartbeat:IPaddr): Started pcmk-2 * rsc_pcmk-3 (ocf:heartbeat:IPaddr): Stopped * rsc_pcmk-4 (ocf:heartbeat:IPaddr): Started pcmk-4 * lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started pcmk-2 * migrator (ocf:pacemaker:Dummy): Started pcmk-1 * Clone Set: Connectivity [ping-1]: * Started: [ pcmk-1 pcmk-2 pcmk-4 ] * Stopped: [ pcmk-3 ] * Clone Set: master-1 [stateful-1] (promotable): - * Masters: [ pcmk-2 ] - * Slaves: [ pcmk-1 pcmk-4 ] + * Promoted: [ pcmk-2 ] + * Unpromoted: [ pcmk-1 pcmk-4 ] * Stopped: [ pcmk-3 ] Transition Summary: * Stop Fencing ( pcmk-1 ) due to node availability * Stop r192.168.122.105 ( pcmk-2 ) due to node availability * Stop r192.168.122.106 ( pcmk-2 ) due to node availability * Stop r192.168.122.107 ( pcmk-2 ) due to node availability * Stop rsc_pcmk-1 ( pcmk-1 ) due to node availability * Stop rsc_pcmk-2 ( pcmk-2 ) due to node availability * Stop rsc_pcmk-4 ( pcmk-4 ) due to node availability * Stop lsb-dummy ( pcmk-2 ) due to node availability * Stop migrator ( pcmk-1 ) due to node availability * Stop ping-1:0 ( pcmk-1 ) due to node availability * Stop ping-1:1 ( pcmk-2 ) due to node availability * Stop ping-1:2 ( pcmk-4 ) due to node availability - * Stop stateful-1:0 ( Slave pcmk-1 ) due to node availability - * Stop stateful-1:1 ( Master pcmk-2 ) due to node availability - * Stop stateful-1:2 ( Slave pcmk-4 ) due to node availability + * Stop stateful-1:0 ( Unpromoted pcmk-1 ) due to node availability + * Stop stateful-1:1 ( Promoted pcmk-2 ) due to node availability + * Stop stateful-1:2 ( Unpromoted pcmk-4 ) due to node availability Executing Cluster Transition: * Resource action: Fencing stop on pcmk-1 * Resource action: rsc_pcmk-1 stop on pcmk-1 * Resource action: rsc_pcmk-2 stop on pcmk-2 * Resource action: rsc_pcmk-4 stop on pcmk-4 * Resource action: lsb-dummy stop on pcmk-2 * Resource action: migrator stop on pcmk-1 * Resource action: migrator stop on pcmk-3 * Pseudo action: Connectivity_stop_0 * Cluster action: do_shutdown on pcmk-3 * Pseudo action: group-1_stop_0 * Resource action: r192.168.122.107 stop on pcmk-2 * Resource action: ping-1:0 stop on pcmk-1 * Resource action: ping-1:1 stop on pcmk-2 * Resource action: ping-1:3 stop on pcmk-4 * Pseudo action: Connectivity_stopped_0 * Resource action: r192.168.122.106 stop on pcmk-2 * Resource action: r192.168.122.105 stop on pcmk-2 * Pseudo action: group-1_stopped_0 * Pseudo action: master-1_demote_0 * Resource action: stateful-1:0 demote on pcmk-2 * Pseudo action: master-1_demoted_0 * Pseudo action: master-1_stop_0 * Resource action: stateful-1:2 stop on pcmk-1 * Resource action: stateful-1:0 stop on pcmk-2 * Resource action: stateful-1:3 stop on pcmk-4 * Pseudo action: master-1_stopped_0 * Cluster action: do_shutdown on pcmk-4 * Cluster action: do_shutdown on pcmk-2 * Cluster action: do_shutdown on pcmk-1 Revised Cluster Status: * Node List: * Online: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Stopped * Resource Group: group-1: * r192.168.122.105 (ocf:heartbeat:IPaddr): Stopped * r192.168.122.106 (ocf:heartbeat:IPaddr): Stopped * r192.168.122.107 (ocf:heartbeat:IPaddr): Stopped * rsc_pcmk-1 (ocf:heartbeat:IPaddr): Stopped * rsc_pcmk-2 (ocf:heartbeat:IPaddr): Stopped * rsc_pcmk-3 (ocf:heartbeat:IPaddr): Stopped * rsc_pcmk-4 (ocf:heartbeat:IPaddr): Stopped * lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Stopped * migrator (ocf:pacemaker:Dummy): Stopped * Clone Set: Connectivity [ping-1]: * Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] * Clone Set: master-1 [stateful-1] (promotable): * Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ] diff --git a/cts/scheduler/summary/nested-remote-recovery.summary b/cts/scheduler/summary/nested-remote-recovery.summary index 325311f2fe..3114f2790c 100644 --- a/cts/scheduler/summary/nested-remote-recovery.summary +++ b/cts/scheduler/summary/nested-remote-recovery.summary @@ -1,131 +1,131 @@ Using the original execution date of: 2018-09-11 21:23:25Z Current cluster status: * Node List: * Online: [ controller-0 controller-1 controller-2 ] * RemoteOnline: [ database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ] * GuestOnline: [ galera-bundle-1@controller-1 galera-bundle-2@controller-2 rabbitmq-bundle-0@controller-2 rabbitmq-bundle-1@controller-1 rabbitmq-bundle-2@controller-1 redis-bundle-0@controller-0 redis-bundle-1@controller-1 redis-bundle-2@controller-2 ] * Full List of Resources: * database-0 (ocf:pacemaker:remote): Started controller-0 * database-1 (ocf:pacemaker:remote): Started controller-1 * database-2 (ocf:pacemaker:remote): Started controller-2 * messaging-0 (ocf:pacemaker:remote): Started controller-2 * messaging-1 (ocf:pacemaker:remote): Started controller-1 * messaging-2 (ocf:pacemaker:remote): Started controller-1 * Container bundle set: galera-bundle [192.168.24.1:8787/rhosp13/openstack-mariadb:pcmklatest]: - * galera-bundle-0 (ocf:heartbeat:galera): FAILED Master database-0 - * galera-bundle-1 (ocf:heartbeat:galera): Master database-1 - * galera-bundle-2 (ocf:heartbeat:galera): Master database-2 + * galera-bundle-0 (ocf:heartbeat:galera): FAILED Promoted database-0 + * galera-bundle-1 (ocf:heartbeat:galera): Promoted database-1 + * galera-bundle-2 (ocf:heartbeat:galera): Promoted database-2 * Container bundle set: rabbitmq-bundle [192.168.24.1:8787/rhosp13/openstack-rabbitmq:pcmklatest]: * rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started messaging-0 * rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Started messaging-1 * rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Started messaging-2 * Container bundle set: redis-bundle [192.168.24.1:8787/rhosp13/openstack-redis:pcmklatest]: - * redis-bundle-0 (ocf:heartbeat:redis): Slave controller-0 - * redis-bundle-1 (ocf:heartbeat:redis): Master controller-1 - * redis-bundle-2 (ocf:heartbeat:redis): Slave controller-2 + * redis-bundle-0 (ocf:heartbeat:redis): Unpromoted controller-0 + * redis-bundle-1 (ocf:heartbeat:redis): Promoted controller-1 + * redis-bundle-2 (ocf:heartbeat:redis): Unpromoted controller-2 * ip-192.168.24.12 (ocf:heartbeat:IPaddr2): Started controller-1 * ip-10.0.0.109 (ocf:heartbeat:IPaddr2): Started controller-2 * ip-172.17.1.18 (ocf:heartbeat:IPaddr2): Started controller-1 * ip-172.17.1.12 (ocf:heartbeat:IPaddr2): Started controller-2 * ip-172.17.3.18 (ocf:heartbeat:IPaddr2): Started controller-1 * ip-172.17.4.14 (ocf:heartbeat:IPaddr2): Started controller-1 * Container bundle set: haproxy-bundle [192.168.24.1:8787/rhosp13/openstack-haproxy:pcmklatest]: * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started controller-0 * haproxy-bundle-docker-1 (ocf:heartbeat:docker): Started controller-1 * haproxy-bundle-docker-2 (ocf:heartbeat:docker): Started controller-2 * Container bundle: openstack-cinder-volume [192.168.24.1:8787/rhosp13/openstack-cinder-volume:pcmklatest]: * openstack-cinder-volume-docker-0 (ocf:heartbeat:docker): Started controller-0 * stonith-fence_ipmilan-5254005f9a33 (stonith:fence_ipmilan): Started controller-2 * stonith-fence_ipmilan-52540098c9ff (stonith:fence_ipmilan): Started controller-1 * stonith-fence_ipmilan-5254000203a2 (stonith:fence_ipmilan): Started controller-2 * stonith-fence_ipmilan-5254003296a5 (stonith:fence_ipmilan): Started controller-1 * stonith-fence_ipmilan-52540066e27e (stonith:fence_ipmilan): Started controller-1 * stonith-fence_ipmilan-52540065418e (stonith:fence_ipmilan): Started controller-2 * stonith-fence_ipmilan-525400aab9d9 (stonith:fence_ipmilan): Started controller-2 * stonith-fence_ipmilan-525400a16c0d (stonith:fence_ipmilan): Started controller-1 * stonith-fence_ipmilan-5254002f6d57 (stonith:fence_ipmilan): Started controller-1 Transition Summary: * Fence (reboot) galera-bundle-0 (resource: galera-bundle-docker-0) 'guest is unclean' * Recover galera-bundle-docker-0 ( database-0 ) * Recover galera-bundle-0 ( controller-0 ) - * Recover galera:0 ( Master galera-bundle-0 ) + * Recover galera:0 ( Promoted galera-bundle-0 ) Executing Cluster Transition: * Resource action: galera-bundle-0 stop on controller-0 * Pseudo action: galera-bundle_demote_0 * Pseudo action: galera-bundle-master_demote_0 * Pseudo action: galera_demote_0 * Pseudo action: galera-bundle-master_demoted_0 * Pseudo action: galera-bundle_demoted_0 * Pseudo action: galera-bundle_stop_0 * Resource action: galera-bundle-docker-0 stop on database-0 * Pseudo action: stonith-galera-bundle-0-reboot on galera-bundle-0 * Pseudo action: galera-bundle-master_stop_0 * Pseudo action: galera_stop_0 * Pseudo action: galera-bundle-master_stopped_0 * Pseudo action: galera-bundle_stopped_0 * Pseudo action: galera-bundle_start_0 * Pseudo action: galera-bundle-master_start_0 * Resource action: galera-bundle-docker-0 start on database-0 * Resource action: galera-bundle-docker-0 monitor=60000 on database-0 * Resource action: galera-bundle-0 start on controller-0 * Resource action: galera-bundle-0 monitor=30000 on controller-0 * Resource action: galera start on galera-bundle-0 * Pseudo action: galera-bundle-master_running_0 * Pseudo action: galera-bundle_running_0 * Pseudo action: galera-bundle_promote_0 * Pseudo action: galera-bundle-master_promote_0 * Resource action: galera promote on galera-bundle-0 * Pseudo action: galera-bundle-master_promoted_0 * Pseudo action: galera-bundle_promoted_0 * Resource action: galera monitor=10000 on galera-bundle-0 Using the original execution date of: 2018-09-11 21:23:25Z Revised Cluster Status: * Node List: * Online: [ controller-0 controller-1 controller-2 ] * RemoteOnline: [ database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ] * GuestOnline: [ galera-bundle-0@controller-0 galera-bundle-1@controller-1 galera-bundle-2@controller-2 rabbitmq-bundle-0@controller-2 rabbitmq-bundle-1@controller-1 rabbitmq-bundle-2@controller-1 redis-bundle-0@controller-0 redis-bundle-1@controller-1 redis-bundle-2@controller-2 ] * Full List of Resources: * database-0 (ocf:pacemaker:remote): Started controller-0 * database-1 (ocf:pacemaker:remote): Started controller-1 * database-2 (ocf:pacemaker:remote): Started controller-2 * messaging-0 (ocf:pacemaker:remote): Started controller-2 * messaging-1 (ocf:pacemaker:remote): Started controller-1 * messaging-2 (ocf:pacemaker:remote): Started controller-1 * Container bundle set: galera-bundle [192.168.24.1:8787/rhosp13/openstack-mariadb:pcmklatest]: - * galera-bundle-0 (ocf:heartbeat:galera): Master database-0 - * galera-bundle-1 (ocf:heartbeat:galera): Master database-1 - * galera-bundle-2 (ocf:heartbeat:galera): Master database-2 + * galera-bundle-0 (ocf:heartbeat:galera): Promoted database-0 + * galera-bundle-1 (ocf:heartbeat:galera): Promoted database-1 + * galera-bundle-2 (ocf:heartbeat:galera): Promoted database-2 * Container bundle set: rabbitmq-bundle [192.168.24.1:8787/rhosp13/openstack-rabbitmq:pcmklatest]: * rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started messaging-0 * rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Started messaging-1 * rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Started messaging-2 * Container bundle set: redis-bundle [192.168.24.1:8787/rhosp13/openstack-redis:pcmklatest]: - * redis-bundle-0 (ocf:heartbeat:redis): Slave controller-0 - * redis-bundle-1 (ocf:heartbeat:redis): Master controller-1 - * redis-bundle-2 (ocf:heartbeat:redis): Slave controller-2 + * redis-bundle-0 (ocf:heartbeat:redis): Unpromoted controller-0 + * redis-bundle-1 (ocf:heartbeat:redis): Promoted controller-1 + * redis-bundle-2 (ocf:heartbeat:redis): Unpromoted controller-2 * ip-192.168.24.12 (ocf:heartbeat:IPaddr2): Started controller-1 * ip-10.0.0.109 (ocf:heartbeat:IPaddr2): Started controller-2 * ip-172.17.1.18 (ocf:heartbeat:IPaddr2): Started controller-1 * ip-172.17.1.12 (ocf:heartbeat:IPaddr2): Started controller-2 * ip-172.17.3.18 (ocf:heartbeat:IPaddr2): Started controller-1 * ip-172.17.4.14 (ocf:heartbeat:IPaddr2): Started controller-1 * Container bundle set: haproxy-bundle [192.168.24.1:8787/rhosp13/openstack-haproxy:pcmklatest]: * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started controller-0 * haproxy-bundle-docker-1 (ocf:heartbeat:docker): Started controller-1 * haproxy-bundle-docker-2 (ocf:heartbeat:docker): Started controller-2 * Container bundle: openstack-cinder-volume [192.168.24.1:8787/rhosp13/openstack-cinder-volume:pcmklatest]: * openstack-cinder-volume-docker-0 (ocf:heartbeat:docker): Started controller-0 * stonith-fence_ipmilan-5254005f9a33 (stonith:fence_ipmilan): Started controller-2 * stonith-fence_ipmilan-52540098c9ff (stonith:fence_ipmilan): Started controller-1 * stonith-fence_ipmilan-5254000203a2 (stonith:fence_ipmilan): Started controller-2 * stonith-fence_ipmilan-5254003296a5 (stonith:fence_ipmilan): Started controller-1 * stonith-fence_ipmilan-52540066e27e (stonith:fence_ipmilan): Started controller-1 * stonith-fence_ipmilan-52540065418e (stonith:fence_ipmilan): Started controller-2 * stonith-fence_ipmilan-525400aab9d9 (stonith:fence_ipmilan): Started controller-2 * stonith-fence_ipmilan-525400a16c0d (stonith:fence_ipmilan): Started controller-1 * stonith-fence_ipmilan-5254002f6d57 (stonith:fence_ipmilan): Started controller-1 diff --git a/cts/scheduler/summary/no-promote-on-unrunnable-guest.summary b/cts/scheduler/summary/no-promote-on-unrunnable-guest.summary index 06c2ef7dad..532f731235 100644 --- a/cts/scheduler/summary/no-promote-on-unrunnable-guest.summary +++ b/cts/scheduler/summary/no-promote-on-unrunnable-guest.summary @@ -1,103 +1,103 @@ Using the original execution date of: 2020-05-14 10:49:31Z Current cluster status: * Node List: * Online: [ controller-0 controller-1 controller-2 ] * GuestOnline: [ galera-bundle-0@controller-0 galera-bundle-1@controller-1 galera-bundle-2@controller-2 ovn-dbs-bundle-0@controller-0 ovn-dbs-bundle-1@controller-1 ovn-dbs-bundle-2@controller-2 rabbitmq-bundle-0@controller-0 rabbitmq-bundle-1@controller-1 rabbitmq-bundle-2@controller-2 redis-bundle-0@controller-0 redis-bundle-1@controller-1 redis-bundle-2@controller-2 ] * Full List of Resources: * Container bundle set: galera-bundle [cluster.common.tag/rhosp16-openstack-mariadb:pcmklatest]: - * galera-bundle-0 (ocf:heartbeat:galera): Master controller-0 - * galera-bundle-1 (ocf:heartbeat:galera): Master controller-1 - * galera-bundle-2 (ocf:heartbeat:galera): Master controller-2 + * galera-bundle-0 (ocf:heartbeat:galera): Promoted controller-0 + * galera-bundle-1 (ocf:heartbeat:galera): Promoted controller-1 + * galera-bundle-2 (ocf:heartbeat:galera): Promoted controller-2 * Container bundle set: rabbitmq-bundle [cluster.common.tag/rhosp16-openstack-rabbitmq:pcmklatest]: * rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started controller-0 * rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Started controller-1 * rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Started controller-2 * Container bundle set: redis-bundle [cluster.common.tag/rhosp16-openstack-redis:pcmklatest]: - * redis-bundle-0 (ocf:heartbeat:redis): Master controller-0 - * redis-bundle-1 (ocf:heartbeat:redis): Slave controller-1 - * redis-bundle-2 (ocf:heartbeat:redis): Slave controller-2 + * redis-bundle-0 (ocf:heartbeat:redis): Promoted controller-0 + * redis-bundle-1 (ocf:heartbeat:redis): Unpromoted controller-1 + * redis-bundle-2 (ocf:heartbeat:redis): Unpromoted controller-2 * Container bundle set: ovn-dbs-bundle [cluster.common.tag/rhosp16-openstack-ovn-northd:pcmklatest]: - * ovn-dbs-bundle-0 (ocf:ovn:ovndb-servers): Slave controller-0 - * ovn-dbs-bundle-1 (ocf:ovn:ovndb-servers): Slave controller-1 - * ovn-dbs-bundle-2 (ocf:ovn:ovndb-servers): Slave controller-2 + * ovn-dbs-bundle-0 (ocf:ovn:ovndb-servers): Unpromoted controller-0 + * ovn-dbs-bundle-1 (ocf:ovn:ovndb-servers): Unpromoted controller-1 + * ovn-dbs-bundle-2 (ocf:ovn:ovndb-servers): Unpromoted controller-2 * stonith-fence_ipmilan-5254005e097a (stonith:fence_ipmilan): Started controller-0 * stonith-fence_ipmilan-525400afe30e (stonith:fence_ipmilan): Started controller-2 * stonith-fence_ipmilan-525400985679 (stonith:fence_ipmilan): Started controller-1 * Container bundle: openstack-cinder-volume [cluster.common.tag/rhosp16-openstack-cinder-volume:pcmklatest]: * openstack-cinder-volume-podman-0 (ocf:heartbeat:podman): Started controller-0 Transition Summary: * Stop ovn-dbs-bundle-podman-0 ( controller-0 ) due to node availability * Stop ovn-dbs-bundle-0 ( controller-0 ) due to unrunnable ovn-dbs-bundle-podman-0 start - * Stop ovndb_servers:0 ( Slave ovn-dbs-bundle-0 ) due to unrunnable ovn-dbs-bundle-podman-0 start - * Promote ovndb_servers:1 ( Slave -> Master ovn-dbs-bundle-1 ) + * Stop ovndb_servers:0 ( Unpromoted ovn-dbs-bundle-0 ) due to unrunnable ovn-dbs-bundle-podman-0 start + * Promote ovndb_servers:1 ( Unpromoted -> Promoted ovn-dbs-bundle-1 ) Executing Cluster Transition: * Resource action: ovndb_servers cancel=30000 on ovn-dbs-bundle-1 * Pseudo action: ovn-dbs-bundle-master_pre_notify_stop_0 * Pseudo action: ovn-dbs-bundle_stop_0 * Resource action: ovndb_servers notify on ovn-dbs-bundle-0 * Resource action: ovndb_servers notify on ovn-dbs-bundle-1 * Resource action: ovndb_servers notify on ovn-dbs-bundle-2 * Pseudo action: ovn-dbs-bundle-master_confirmed-pre_notify_stop_0 * Pseudo action: ovn-dbs-bundle-master_stop_0 * Resource action: ovndb_servers stop on ovn-dbs-bundle-0 * Pseudo action: ovn-dbs-bundle-master_stopped_0 * Resource action: ovn-dbs-bundle-0 stop on controller-0 * Pseudo action: ovn-dbs-bundle-master_post_notify_stopped_0 * Resource action: ovn-dbs-bundle-podman-0 stop on controller-0 * Resource action: ovndb_servers notify on ovn-dbs-bundle-1 * Resource action: ovndb_servers notify on ovn-dbs-bundle-2 * Pseudo action: ovn-dbs-bundle-master_confirmed-post_notify_stopped_0 * Pseudo action: ovn-dbs-bundle-master_pre_notify_start_0 * Pseudo action: ovn-dbs-bundle_stopped_0 * Pseudo action: ovn-dbs-bundle-master_confirmed-pre_notify_start_0 * Pseudo action: ovn-dbs-bundle-master_start_0 * Pseudo action: ovn-dbs-bundle-master_running_0 * Pseudo action: ovn-dbs-bundle-master_post_notify_running_0 * Pseudo action: ovn-dbs-bundle-master_confirmed-post_notify_running_0 * Pseudo action: ovn-dbs-bundle_running_0 * Pseudo action: ovn-dbs-bundle-master_pre_notify_promote_0 * Pseudo action: ovn-dbs-bundle_promote_0 * Resource action: ovndb_servers notify on ovn-dbs-bundle-1 * Resource action: ovndb_servers notify on ovn-dbs-bundle-2 * Pseudo action: ovn-dbs-bundle-master_confirmed-pre_notify_promote_0 * Pseudo action: ovn-dbs-bundle-master_promote_0 * Resource action: ovndb_servers promote on ovn-dbs-bundle-1 * Pseudo action: ovn-dbs-bundle-master_promoted_0 * Pseudo action: ovn-dbs-bundle-master_post_notify_promoted_0 * Resource action: ovndb_servers notify on ovn-dbs-bundle-1 * Resource action: ovndb_servers notify on ovn-dbs-bundle-2 * Pseudo action: ovn-dbs-bundle-master_confirmed-post_notify_promoted_0 * Pseudo action: ovn-dbs-bundle_promoted_0 * Resource action: ovndb_servers monitor=10000 on ovn-dbs-bundle-1 Using the original execution date of: 2020-05-14 10:49:31Z Revised Cluster Status: * Node List: * Online: [ controller-0 controller-1 controller-2 ] * GuestOnline: [ galera-bundle-0@controller-0 galera-bundle-1@controller-1 galera-bundle-2@controller-2 ovn-dbs-bundle-1@controller-1 ovn-dbs-bundle-2@controller-2 rabbitmq-bundle-0@controller-0 rabbitmq-bundle-1@controller-1 rabbitmq-bundle-2@controller-2 redis-bundle-0@controller-0 redis-bundle-1@controller-1 redis-bundle-2@controller-2 ] * Full List of Resources: * Container bundle set: galera-bundle [cluster.common.tag/rhosp16-openstack-mariadb:pcmklatest]: - * galera-bundle-0 (ocf:heartbeat:galera): Master controller-0 - * galera-bundle-1 (ocf:heartbeat:galera): Master controller-1 - * galera-bundle-2 (ocf:heartbeat:galera): Master controller-2 + * galera-bundle-0 (ocf:heartbeat:galera): Promoted controller-0 + * galera-bundle-1 (ocf:heartbeat:galera): Promoted controller-1 + * galera-bundle-2 (ocf:heartbeat:galera): Promoted controller-2 * Container bundle set: rabbitmq-bundle [cluster.common.tag/rhosp16-openstack-rabbitmq:pcmklatest]: * rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started controller-0 * rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Started controller-1 * rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Started controller-2 * Container bundle set: redis-bundle [cluster.common.tag/rhosp16-openstack-redis:pcmklatest]: - * redis-bundle-0 (ocf:heartbeat:redis): Master controller-0 - * redis-bundle-1 (ocf:heartbeat:redis): Slave controller-1 - * redis-bundle-2 (ocf:heartbeat:redis): Slave controller-2 + * redis-bundle-0 (ocf:heartbeat:redis): Promoted controller-0 + * redis-bundle-1 (ocf:heartbeat:redis): Unpromoted controller-1 + * redis-bundle-2 (ocf:heartbeat:redis): Unpromoted controller-2 * Container bundle set: ovn-dbs-bundle [cluster.common.tag/rhosp16-openstack-ovn-northd:pcmklatest]: * ovn-dbs-bundle-0 (ocf:ovn:ovndb-servers): Stopped - * ovn-dbs-bundle-1 (ocf:ovn:ovndb-servers): Master controller-1 - * ovn-dbs-bundle-2 (ocf:ovn:ovndb-servers): Slave controller-2 + * ovn-dbs-bundle-1 (ocf:ovn:ovndb-servers): Promoted controller-1 + * ovn-dbs-bundle-2 (ocf:ovn:ovndb-servers): Unpromoted controller-2 * stonith-fence_ipmilan-5254005e097a (stonith:fence_ipmilan): Started controller-0 * stonith-fence_ipmilan-525400afe30e (stonith:fence_ipmilan): Started controller-2 * stonith-fence_ipmilan-525400985679 (stonith:fence_ipmilan): Started controller-1 * Container bundle: openstack-cinder-volume [cluster.common.tag/rhosp16-openstack-cinder-volume:pcmklatest]: * openstack-cinder-volume-podman-0 (ocf:heartbeat:podman): Started controller-0 diff --git a/cts/scheduler/summary/no_quorum_demote.summary b/cts/scheduler/summary/no_quorum_demote.summary index 6671bc06b5..d2cde3eb11 100644 --- a/cts/scheduler/summary/no_quorum_demote.summary +++ b/cts/scheduler/summary/no_quorum_demote.summary @@ -1,40 +1,40 @@ Using the original execution date of: 2020-06-17 17:26:35Z Current cluster status: * Node List: * Online: [ rhel7-1 rhel7-2 ] * OFFLINE: [ rhel7-3 rhel7-4 rhel7-5 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started rhel7-1 * Clone Set: rsc1-clone [rsc1] (promotable): - * Masters: [ rhel7-1 ] - * Slaves: [ rhel7-2 ] + * Promoted: [ rhel7-1 ] + * Unpromoted: [ rhel7-2 ] * Stopped: [ rhel7-3 rhel7-4 rhel7-5 ] * rsc2 (ocf:pacemaker:Dummy): Started rhel7-2 Transition Summary: * Stop Fencing ( rhel7-1 ) due to no quorum - * Demote rsc1:0 ( Master -> Slave rhel7-1 ) + * Demote rsc1:0 ( Promoted -> Unpromoted rhel7-1 ) * Stop rsc2 ( rhel7-2 ) due to no quorum Executing Cluster Transition: * Resource action: Fencing stop on rhel7-1 * Resource action: rsc1 cancel=10000 on rhel7-1 * Pseudo action: rsc1-clone_demote_0 * Resource action: rsc2 stop on rhel7-2 * Resource action: rsc1 demote on rhel7-1 * Pseudo action: rsc1-clone_demoted_0 * Resource action: rsc1 monitor=11000 on rhel7-1 Using the original execution date of: 2020-06-17 17:26:35Z Revised Cluster Status: * Node List: * Online: [ rhel7-1 rhel7-2 ] * OFFLINE: [ rhel7-3 rhel7-4 rhel7-5 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Stopped * Clone Set: rsc1-clone [rsc1] (promotable): - * Slaves: [ rhel7-1 rhel7-2 ] + * Unpromoted: [ rhel7-1 rhel7-2 ] * Stopped: [ rhel7-3 rhel7-4 rhel7-5 ] * rsc2 (ocf:pacemaker:Dummy): Stopped diff --git a/cts/scheduler/summary/notifs-for-unrunnable.summary b/cts/scheduler/summary/notifs-for-unrunnable.summary index 941de01014..ecd65be550 100644 --- a/cts/scheduler/summary/notifs-for-unrunnable.summary +++ b/cts/scheduler/summary/notifs-for-unrunnable.summary @@ -1,99 +1,99 @@ Using the original execution date of: 2018-02-13 23:40:47Z Current cluster status: * Node List: * Online: [ controller-1 controller-2 ] * OFFLINE: [ controller-0 ] * GuestOnline: [ galera-bundle-1@controller-1 galera-bundle-2@controller-2 rabbitmq-bundle-1@controller-1 rabbitmq-bundle-2@controller-2 redis-bundle-1@controller-1 redis-bundle-2@controller-2 ] * Full List of Resources: * Container bundle set: rabbitmq-bundle [192.168.24.1:8787/rhosp12/openstack-rabbitmq:pcmklatest]: * rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Stopped * rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Started controller-1 * rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Started controller-2 * Container bundle set: galera-bundle [192.168.24.1:8787/rhosp12/openstack-mariadb:pcmklatest]: * galera-bundle-0 (ocf:heartbeat:galera): Stopped - * galera-bundle-1 (ocf:heartbeat:galera): Master controller-1 - * galera-bundle-2 (ocf:heartbeat:galera): Master controller-2 + * galera-bundle-1 (ocf:heartbeat:galera): Promoted controller-1 + * galera-bundle-2 (ocf:heartbeat:galera): Promoted controller-2 * Container bundle set: redis-bundle [192.168.24.1:8787/rhosp12/openstack-redis:pcmklatest]: * redis-bundle-0 (ocf:heartbeat:redis): Stopped - * redis-bundle-1 (ocf:heartbeat:redis): Slave controller-1 - * redis-bundle-2 (ocf:heartbeat:redis): Master controller-2 + * redis-bundle-1 (ocf:heartbeat:redis): Unpromoted controller-1 + * redis-bundle-2 (ocf:heartbeat:redis): Promoted controller-2 * ip-192.168.24.6 (ocf:heartbeat:IPaddr2): Started controller-2 * ip-10.0.0.109 (ocf:heartbeat:IPaddr2): Started controller-1 * ip-172.17.1.15 (ocf:heartbeat:IPaddr2): Started controller-2 * ip-172.17.1.11 (ocf:heartbeat:IPaddr2): Started controller-1 * ip-172.17.3.11 (ocf:heartbeat:IPaddr2): Started controller-1 * ip-172.17.4.11 (ocf:heartbeat:IPaddr2): Started controller-2 * Container bundle set: haproxy-bundle [192.168.24.1:8787/rhosp12/openstack-haproxy:pcmklatest]: * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Stopped * haproxy-bundle-docker-1 (ocf:heartbeat:docker): Started controller-1 * haproxy-bundle-docker-2 (ocf:heartbeat:docker): Started controller-2 * openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-2 * stonith-fence_ipmilan-525400fec0c8 (stonith:fence_ipmilan): Started controller-1 * stonith-fence_ipmilan-5254002ff217 (stonith:fence_ipmilan): Started controller-2 * stonith-fence_ipmilan-5254008f971a (stonith:fence_ipmilan): Started controller-1 Transition Summary: * Start rabbitmq-bundle-0 ( controller-1 ) due to unrunnable rabbitmq-bundle-docker-0 start (blocked) * Start rabbitmq:0 ( rabbitmq-bundle-0 ) due to unrunnable rabbitmq-bundle-docker-0 start (blocked) * Start galera-bundle-0 ( controller-2 ) due to unrunnable galera-bundle-docker-0 start (blocked) * Start galera:0 ( galera-bundle-0 ) due to unrunnable galera-bundle-docker-0 start (blocked) * Start redis-bundle-0 ( controller-1 ) due to unrunnable redis-bundle-docker-0 start (blocked) * Start redis:0 ( redis-bundle-0 ) due to unrunnable redis-bundle-docker-0 start (blocked) Executing Cluster Transition: * Pseudo action: rabbitmq-bundle-clone_pre_notify_start_0 * Pseudo action: redis-bundle-master_pre_notify_start_0 * Pseudo action: redis-bundle_start_0 * Pseudo action: galera-bundle_start_0 * Pseudo action: rabbitmq-bundle_start_0 * Pseudo action: rabbitmq-bundle-clone_confirmed-pre_notify_start_0 * Pseudo action: rabbitmq-bundle-clone_start_0 * Pseudo action: galera-bundle-master_start_0 * Pseudo action: redis-bundle-master_confirmed-pre_notify_start_0 * Pseudo action: redis-bundle-master_start_0 * Pseudo action: rabbitmq-bundle-clone_running_0 * Pseudo action: galera-bundle-master_running_0 * Pseudo action: redis-bundle-master_running_0 * Pseudo action: galera-bundle_running_0 * Pseudo action: rabbitmq-bundle-clone_post_notify_running_0 * Pseudo action: redis-bundle-master_post_notify_running_0 * Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_running_0 * Pseudo action: redis-bundle-master_confirmed-post_notify_running_0 * Pseudo action: redis-bundle_running_0 * Pseudo action: rabbitmq-bundle_running_0 Using the original execution date of: 2018-02-13 23:40:47Z Revised Cluster Status: * Node List: * Online: [ controller-1 controller-2 ] * OFFLINE: [ controller-0 ] * GuestOnline: [ galera-bundle-1@controller-1 galera-bundle-2@controller-2 rabbitmq-bundle-1@controller-1 rabbitmq-bundle-2@controller-2 redis-bundle-1@controller-1 redis-bundle-2@controller-2 ] * Full List of Resources: * Container bundle set: rabbitmq-bundle [192.168.24.1:8787/rhosp12/openstack-rabbitmq:pcmklatest]: * rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Stopped * rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Started controller-1 * rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Started controller-2 * Container bundle set: galera-bundle [192.168.24.1:8787/rhosp12/openstack-mariadb:pcmklatest]: * galera-bundle-0 (ocf:heartbeat:galera): Stopped - * galera-bundle-1 (ocf:heartbeat:galera): Master controller-1 - * galera-bundle-2 (ocf:heartbeat:galera): Master controller-2 + * galera-bundle-1 (ocf:heartbeat:galera): Promoted controller-1 + * galera-bundle-2 (ocf:heartbeat:galera): Promoted controller-2 * Container bundle set: redis-bundle [192.168.24.1:8787/rhosp12/openstack-redis:pcmklatest]: * redis-bundle-0 (ocf:heartbeat:redis): Stopped - * redis-bundle-1 (ocf:heartbeat:redis): Slave controller-1 - * redis-bundle-2 (ocf:heartbeat:redis): Master controller-2 + * redis-bundle-1 (ocf:heartbeat:redis): Unpromoted controller-1 + * redis-bundle-2 (ocf:heartbeat:redis): Promoted controller-2 * ip-192.168.24.6 (ocf:heartbeat:IPaddr2): Started controller-2 * ip-10.0.0.109 (ocf:heartbeat:IPaddr2): Started controller-1 * ip-172.17.1.15 (ocf:heartbeat:IPaddr2): Started controller-2 * ip-172.17.1.11 (ocf:heartbeat:IPaddr2): Started controller-1 * ip-172.17.3.11 (ocf:heartbeat:IPaddr2): Started controller-1 * ip-172.17.4.11 (ocf:heartbeat:IPaddr2): Started controller-2 * Container bundle set: haproxy-bundle [192.168.24.1:8787/rhosp12/openstack-haproxy:pcmklatest]: * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Stopped * haproxy-bundle-docker-1 (ocf:heartbeat:docker): Started controller-1 * haproxy-bundle-docker-2 (ocf:heartbeat:docker): Started controller-2 * openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-2 * stonith-fence_ipmilan-525400fec0c8 (stonith:fence_ipmilan): Started controller-1 * stonith-fence_ipmilan-5254002ff217 (stonith:fence_ipmilan): Started controller-2 * stonith-fence_ipmilan-5254008f971a (stonith:fence_ipmilan): Started controller-1 diff --git a/cts/scheduler/summary/notify-behind-stopping-remote.summary b/cts/scheduler/summary/notify-behind-stopping-remote.summary index 3b31c62fb6..cfc7f60544 100644 --- a/cts/scheduler/summary/notify-behind-stopping-remote.summary +++ b/cts/scheduler/summary/notify-behind-stopping-remote.summary @@ -1,64 +1,64 @@ Using the original execution date of: 2018-11-22 20:36:07Z Current cluster status: * Node List: * Online: [ ra1 ra2 ra3 ] * GuestOnline: [ redis-bundle-0@ra1 redis-bundle-1@ra2 redis-bundle-2@ra3 ] * Full List of Resources: * Container bundle set: redis-bundle [docker.io/tripleoqueens/centos-binary-redis:current-tripleo-rdo]: - * redis-bundle-0 (ocf:heartbeat:redis): Slave ra1 + * redis-bundle-0 (ocf:heartbeat:redis): Unpromoted ra1 * redis-bundle-1 (ocf:heartbeat:redis): Stopped ra2 - * redis-bundle-2 (ocf:heartbeat:redis): Slave ra3 + * redis-bundle-2 (ocf:heartbeat:redis): Unpromoted ra3 Transition Summary: - * Promote redis:0 ( Slave -> Master redis-bundle-0 ) + * Promote redis:0 ( Unpromoted -> Promoted redis-bundle-0 ) * Stop redis-bundle-docker-1 ( ra2 ) due to node availability * Stop redis-bundle-1 ( ra2 ) due to unrunnable redis-bundle-docker-1 start * Start redis:1 ( redis-bundle-1 ) due to unrunnable redis-bundle-docker-1 start (blocked) Executing Cluster Transition: * Resource action: redis cancel=45000 on redis-bundle-0 * Resource action: redis cancel=60000 on redis-bundle-0 * Pseudo action: redis-bundle-master_pre_notify_start_0 * Resource action: redis-bundle-0 monitor=30000 on ra1 * Resource action: redis-bundle-0 cancel=60000 on ra1 * Resource action: redis-bundle-1 stop on ra2 * Resource action: redis-bundle-1 cancel=60000 on ra2 * Resource action: redis-bundle-2 monitor=30000 on ra3 * Resource action: redis-bundle-2 cancel=60000 on ra3 * Pseudo action: redis-bundle_stop_0 * Pseudo action: redis-bundle-master_confirmed-pre_notify_start_0 * Resource action: redis-bundle-docker-1 stop on ra2 * Pseudo action: redis-bundle_stopped_0 * Pseudo action: redis-bundle_start_0 * Pseudo action: redis-bundle-master_start_0 * Pseudo action: redis-bundle-master_running_0 * Pseudo action: redis-bundle-master_post_notify_running_0 * Pseudo action: redis-bundle-master_confirmed-post_notify_running_0 * Pseudo action: redis-bundle_running_0 * Pseudo action: redis-bundle-master_pre_notify_promote_0 * Pseudo action: redis-bundle_promote_0 * Resource action: redis notify on redis-bundle-0 * Resource action: redis notify on redis-bundle-2 * Pseudo action: redis-bundle-master_confirmed-pre_notify_promote_0 * Pseudo action: redis-bundle-master_promote_0 * Resource action: redis promote on redis-bundle-0 * Pseudo action: redis-bundle-master_promoted_0 * Pseudo action: redis-bundle-master_post_notify_promoted_0 * Resource action: redis notify on redis-bundle-0 * Resource action: redis notify on redis-bundle-2 * Pseudo action: redis-bundle-master_confirmed-post_notify_promoted_0 * Pseudo action: redis-bundle_promoted_0 * Resource action: redis monitor=20000 on redis-bundle-0 Using the original execution date of: 2018-11-22 20:36:07Z Revised Cluster Status: * Node List: * Online: [ ra1 ra2 ra3 ] * GuestOnline: [ redis-bundle-0@ra1 redis-bundle-2@ra3 ] * Full List of Resources: * Container bundle set: redis-bundle [docker.io/tripleoqueens/centos-binary-redis:current-tripleo-rdo]: - * redis-bundle-0 (ocf:heartbeat:redis): Master ra1 + * redis-bundle-0 (ocf:heartbeat:redis): Promoted ra1 * redis-bundle-1 (ocf:heartbeat:redis): Stopped - * redis-bundle-2 (ocf:heartbeat:redis): Slave ra3 + * redis-bundle-2 (ocf:heartbeat:redis): Unpromoted ra3 diff --git a/cts/scheduler/summary/novell-239079.summary b/cts/scheduler/summary/novell-239079.summary index 989fa52601..0afbba5797 100644 --- a/cts/scheduler/summary/novell-239079.summary +++ b/cts/scheduler/summary/novell-239079.summary @@ -1,33 +1,33 @@ Current cluster status: * Node List: * Online: [ xen-1 xen-2 ] * Full List of Resources: * fs_1 (ocf:heartbeat:Filesystem): Stopped * Clone Set: ms-drbd0 [drbd0] (promotable): * Stopped: [ xen-1 xen-2 ] Transition Summary: * Start drbd0:0 ( xen-1 ) * Start drbd0:1 ( xen-2 ) Executing Cluster Transition: * Pseudo action: ms-drbd0_pre_notify_start_0 * Pseudo action: ms-drbd0_confirmed-pre_notify_start_0 * Pseudo action: ms-drbd0_start_0 * Resource action: drbd0:0 start on xen-1 * Resource action: drbd0:1 start on xen-2 * Pseudo action: ms-drbd0_running_0 * Pseudo action: ms-drbd0_post_notify_running_0 * Resource action: drbd0:0 notify on xen-1 * Resource action: drbd0:1 notify on xen-2 * Pseudo action: ms-drbd0_confirmed-post_notify_running_0 Revised Cluster Status: * Node List: * Online: [ xen-1 xen-2 ] * Full List of Resources: * fs_1 (ocf:heartbeat:Filesystem): Stopped * Clone Set: ms-drbd0 [drbd0] (promotable): - * Slaves: [ xen-1 xen-2 ] + * Unpromoted: [ xen-1 xen-2 ] diff --git a/cts/scheduler/summary/novell-239082.summary b/cts/scheduler/summary/novell-239082.summary index 257badd4f7..431b6ddc63 100644 --- a/cts/scheduler/summary/novell-239082.summary +++ b/cts/scheduler/summary/novell-239082.summary @@ -1,59 +1,59 @@ Current cluster status: * Node List: * Online: [ xen-1 xen-2 ] * Full List of Resources: * fs_1 (ocf:heartbeat:Filesystem): Started xen-1 * Clone Set: ms-drbd0 [drbd0] (promotable): - * Masters: [ xen-1 ] - * Slaves: [ xen-2 ] + * Promoted: [ xen-1 ] + * Unpromoted: [ xen-2 ] Transition Summary: * Move fs_1 ( xen-1 -> xen-2 ) - * Promote drbd0:0 ( Slave -> Master xen-2 ) - * Stop drbd0:1 ( Master xen-1 ) due to node availability + * Promote drbd0:0 ( Unpromoted -> Promoted xen-2 ) + * Stop drbd0:1 ( Promoted xen-1 ) due to node availability Executing Cluster Transition: * Resource action: fs_1 stop on xen-1 * Pseudo action: ms-drbd0_pre_notify_demote_0 * Resource action: drbd0:0 notify on xen-2 * Resource action: drbd0:1 notify on xen-1 * Pseudo action: ms-drbd0_confirmed-pre_notify_demote_0 * Pseudo action: ms-drbd0_demote_0 * Resource action: drbd0:1 demote on xen-1 * Pseudo action: ms-drbd0_demoted_0 * Pseudo action: ms-drbd0_post_notify_demoted_0 * Resource action: drbd0:0 notify on xen-2 * Resource action: drbd0:1 notify on xen-1 * Pseudo action: ms-drbd0_confirmed-post_notify_demoted_0 * Pseudo action: ms-drbd0_pre_notify_stop_0 * Resource action: drbd0:0 notify on xen-2 * Resource action: drbd0:1 notify on xen-1 * Pseudo action: ms-drbd0_confirmed-pre_notify_stop_0 * Pseudo action: ms-drbd0_stop_0 * Resource action: drbd0:1 stop on xen-1 * Pseudo action: ms-drbd0_stopped_0 * Cluster action: do_shutdown on xen-1 * Pseudo action: ms-drbd0_post_notify_stopped_0 * Resource action: drbd0:0 notify on xen-2 * Pseudo action: ms-drbd0_confirmed-post_notify_stopped_0 * Pseudo action: ms-drbd0_pre_notify_promote_0 * Resource action: drbd0:0 notify on xen-2 * Pseudo action: ms-drbd0_confirmed-pre_notify_promote_0 * Pseudo action: ms-drbd0_promote_0 * Resource action: drbd0:0 promote on xen-2 * Pseudo action: ms-drbd0_promoted_0 * Pseudo action: ms-drbd0_post_notify_promoted_0 * Resource action: drbd0:0 notify on xen-2 * Pseudo action: ms-drbd0_confirmed-post_notify_promoted_0 * Resource action: fs_1 start on xen-2 Revised Cluster Status: * Node List: * Online: [ xen-1 xen-2 ] * Full List of Resources: * fs_1 (ocf:heartbeat:Filesystem): Started xen-2 * Clone Set: ms-drbd0 [drbd0] (promotable): - * Masters: [ xen-2 ] + * Promoted: [ xen-2 ] * Stopped: [ xen-1 ] diff --git a/cts/scheduler/summary/novell-239087.summary b/cts/scheduler/summary/novell-239087.summary index 0266865ae9..0c158d3873 100644 --- a/cts/scheduler/summary/novell-239087.summary +++ b/cts/scheduler/summary/novell-239087.summary @@ -1,23 +1,23 @@ Current cluster status: * Node List: * Online: [ xen-1 xen-2 ] * Full List of Resources: * fs_1 (ocf:heartbeat:Filesystem): Started xen-1 * Clone Set: ms-drbd0 [drbd0] (promotable): - * Masters: [ xen-1 ] - * Slaves: [ xen-2 ] + * Promoted: [ xen-1 ] + * Unpromoted: [ xen-2 ] Transition Summary: Executing Cluster Transition: Revised Cluster Status: * Node List: * Online: [ xen-1 xen-2 ] * Full List of Resources: * fs_1 (ocf:heartbeat:Filesystem): Started xen-1 * Clone Set: ms-drbd0 [drbd0] (promotable): - * Masters: [ xen-1 ] - * Slaves: [ xen-2 ] + * Promoted: [ xen-1 ] + * Unpromoted: [ xen-2 ] diff --git a/cts/scheduler/summary/ocf_degraded_master-remap-ocf_ok.summary b/cts/scheduler/summary/ocf_degraded_master-remap-ocf_ok.summary index c922ff65fb..f297042593 100644 --- a/cts/scheduler/summary/ocf_degraded_master-remap-ocf_ok.summary +++ b/cts/scheduler/summary/ocf_degraded_master-remap-ocf_ok.summary @@ -1,25 +1,25 @@ Using the original execution date of: 2020-09-30 14:23:26Z Current cluster status: * Node List: * Online: [ rhel8-1 rhel8-2 ] * Full List of Resources: * xvmfence (stonith:fence_xvm): Started rhel8-1 * Clone Set: state-clone [state] (promotable): - * Masters: [ rhel8-1 ] - * Slaves: [ rhel8-2 ] + * Promoted: [ rhel8-1 ] + * Unpromoted: [ rhel8-2 ] Transition Summary: Executing Cluster Transition: Using the original execution date of: 2020-09-30 14:23:26Z Revised Cluster Status: * Node List: * Online: [ rhel8-1 rhel8-2 ] * Full List of Resources: * xvmfence (stonith:fence_xvm): Started rhel8-1 * Clone Set: state-clone [state] (promotable): - * Masters: [ rhel8-1 ] - * Slaves: [ rhel8-2 ] + * Promoted: [ rhel8-1 ] + * Unpromoted: [ rhel8-2 ] diff --git a/cts/scheduler/summary/on_fail_demote1.summary b/cts/scheduler/summary/on_fail_demote1.summary index 78fbb1aea3..ee23f4dc59 100644 --- a/cts/scheduler/summary/on_fail_demote1.summary +++ b/cts/scheduler/summary/on_fail_demote1.summary @@ -1,88 +1,88 @@ Using the original execution date of: 2020-06-16 19:23:21Z Current cluster status: * Node List: * Online: [ rhel7-1 rhel7-3 rhel7-4 rhel7-5 ] * RemoteOnline: [ remote-rhel7-2 ] * GuestOnline: [ lxc1@rhel7-3 lxc2@rhel7-3 stateful-bundle-0@rhel7-5 stateful-bundle-1@rhel7-1 stateful-bundle-2@rhel7-4 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started rhel7-4 * Clone Set: rsc1-clone [rsc1] (promotable): - * rsc1 (ocf:pacemaker:Stateful): FAILED Master rhel7-4 - * Slaves: [ lxc1 lxc2 remote-rhel7-2 rhel7-1 rhel7-3 rhel7-5 ] + * rsc1 (ocf:pacemaker:Stateful): FAILED Promoted rhel7-4 + * Unpromoted: [ lxc1 lxc2 remote-rhel7-2 rhel7-1 rhel7-3 rhel7-5 ] * Clone Set: rsc2-master [rsc2] (promotable): - * rsc2 (ocf:pacemaker:Stateful): FAILED Master remote-rhel7-2 - * Slaves: [ lxc1 lxc2 rhel7-1 rhel7-3 rhel7-4 rhel7-5 ] + * rsc2 (ocf:pacemaker:Stateful): FAILED Promoted remote-rhel7-2 + * Unpromoted: [ lxc1 lxc2 rhel7-1 rhel7-3 rhel7-4 rhel7-5 ] * remote-rhel7-2 (ocf:pacemaker:remote): Started rhel7-1 * container1 (ocf:heartbeat:VirtualDomain): Started rhel7-3 * container2 (ocf:heartbeat:VirtualDomain): Started rhel7-3 * Clone Set: lxc-ms-master [lxc-ms] (promotable): - * lxc-ms (ocf:pacemaker:Stateful): FAILED Master lxc2 - * Slaves: [ lxc1 ] + * lxc-ms (ocf:pacemaker:Stateful): FAILED Promoted lxc2 + * Unpromoted: [ lxc1 ] * Stopped: [ remote-rhel7-2 rhel7-1 rhel7-3 rhel7-4 rhel7-5 ] * Container bundle set: stateful-bundle [pcmktest:http]: - * stateful-bundle-0 (192.168.122.131) (ocf:pacemaker:Stateful): FAILED Master rhel7-5 - * stateful-bundle-1 (192.168.122.132) (ocf:pacemaker:Stateful): Slave rhel7-1 - * stateful-bundle-2 (192.168.122.133) (ocf:pacemaker:Stateful): Slave rhel7-4 + * stateful-bundle-0 (192.168.122.131) (ocf:pacemaker:Stateful): FAILED Promoted rhel7-5 + * stateful-bundle-1 (192.168.122.132) (ocf:pacemaker:Stateful): Unpromoted rhel7-1 + * stateful-bundle-2 (192.168.122.133) (ocf:pacemaker:Stateful): Unpromoted rhel7-4 Transition Summary: - * Re-promote rsc1:0 ( Master rhel7-4 ) - * Re-promote rsc2:4 ( Master remote-rhel7-2 ) - * Re-promote lxc-ms:0 ( Master lxc2 ) - * Re-promote bundled:0 ( Master stateful-bundle-0 ) + * Re-promote rsc1:0 ( Promoted rhel7-4 ) + * Re-promote rsc2:4 ( Promoted remote-rhel7-2 ) + * Re-promote lxc-ms:0 ( Promoted lxc2 ) + * Re-promote bundled:0 ( Promoted stateful-bundle-0 ) Executing Cluster Transition: * Pseudo action: rsc1-clone_demote_0 * Pseudo action: rsc2-master_demote_0 * Pseudo action: lxc-ms-master_demote_0 * Pseudo action: stateful-bundle_demote_0 * Resource action: rsc1 demote on rhel7-4 * Pseudo action: rsc1-clone_demoted_0 * Pseudo action: rsc1-clone_promote_0 * Resource action: rsc2 demote on remote-rhel7-2 * Pseudo action: rsc2-master_demoted_0 * Pseudo action: rsc2-master_promote_0 * Resource action: lxc-ms demote on lxc2 * Pseudo action: lxc-ms-master_demoted_0 * Pseudo action: lxc-ms-master_promote_0 * Pseudo action: stateful-bundle-master_demote_0 * Resource action: rsc1 promote on rhel7-4 * Pseudo action: rsc1-clone_promoted_0 * Resource action: rsc2 promote on remote-rhel7-2 * Pseudo action: rsc2-master_promoted_0 * Resource action: lxc-ms promote on lxc2 * Pseudo action: lxc-ms-master_promoted_0 * Resource action: bundled demote on stateful-bundle-0 * Pseudo action: stateful-bundle-master_demoted_0 * Pseudo action: stateful-bundle_demoted_0 * Pseudo action: stateful-bundle_promote_0 * Pseudo action: stateful-bundle-master_promote_0 * Resource action: bundled promote on stateful-bundle-0 * Pseudo action: stateful-bundle-master_promoted_0 * Pseudo action: stateful-bundle_promoted_0 Using the original execution date of: 2020-06-16 19:23:21Z Revised Cluster Status: * Node List: * Online: [ rhel7-1 rhel7-3 rhel7-4 rhel7-5 ] * RemoteOnline: [ remote-rhel7-2 ] * GuestOnline: [ lxc1@rhel7-3 lxc2@rhel7-3 stateful-bundle-0@rhel7-5 stateful-bundle-1@rhel7-1 stateful-bundle-2@rhel7-4 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started rhel7-4 * Clone Set: rsc1-clone [rsc1] (promotable): - * Masters: [ rhel7-4 ] - * Slaves: [ lxc1 lxc2 remote-rhel7-2 rhel7-1 rhel7-3 rhel7-5 ] + * Promoted: [ rhel7-4 ] + * Unpromoted: [ lxc1 lxc2 remote-rhel7-2 rhel7-1 rhel7-3 rhel7-5 ] * Clone Set: rsc2-master [rsc2] (promotable): - * Masters: [ remote-rhel7-2 ] - * Slaves: [ lxc1 lxc2 rhel7-1 rhel7-3 rhel7-4 rhel7-5 ] + * Promoted: [ remote-rhel7-2 ] + * Unpromoted: [ lxc1 lxc2 rhel7-1 rhel7-3 rhel7-4 rhel7-5 ] * remote-rhel7-2 (ocf:pacemaker:remote): Started rhel7-1 * container1 (ocf:heartbeat:VirtualDomain): Started rhel7-3 * container2 (ocf:heartbeat:VirtualDomain): Started rhel7-3 * Clone Set: lxc-ms-master [lxc-ms] (promotable): - * Masters: [ lxc2 ] - * Slaves: [ lxc1 ] + * Promoted: [ lxc2 ] + * Unpromoted: [ lxc1 ] * Container bundle set: stateful-bundle [pcmktest:http]: - * stateful-bundle-0 (192.168.122.131) (ocf:pacemaker:Stateful): Master rhel7-5 - * stateful-bundle-1 (192.168.122.132) (ocf:pacemaker:Stateful): Slave rhel7-1 - * stateful-bundle-2 (192.168.122.133) (ocf:pacemaker:Stateful): Slave rhel7-4 + * stateful-bundle-0 (192.168.122.131) (ocf:pacemaker:Stateful): Promoted rhel7-5 + * stateful-bundle-1 (192.168.122.132) (ocf:pacemaker:Stateful): Unpromoted rhel7-1 + * stateful-bundle-2 (192.168.122.133) (ocf:pacemaker:Stateful): Unpromoted rhel7-4 diff --git a/cts/scheduler/summary/on_fail_demote2.summary b/cts/scheduler/summary/on_fail_demote2.summary index 76a8ffb901..0ec0ea35fd 100644 --- a/cts/scheduler/summary/on_fail_demote2.summary +++ b/cts/scheduler/summary/on_fail_demote2.summary @@ -1,43 +1,43 @@ Using the original execution date of: 2020-06-16 19:23:21Z Current cluster status: * Node List: * Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started rhel7-1 * Clone Set: rsc1-clone [rsc1] (promotable): - * rsc1 (ocf:pacemaker:Stateful): FAILED Master rhel7-4 - * Slaves: [ rhel7-1 rhel7-2 rhel7-3 rhel7-5 ] + * rsc1 (ocf:pacemaker:Stateful): FAILED Promoted rhel7-4 + * Unpromoted: [ rhel7-1 rhel7-2 rhel7-3 rhel7-5 ] * Clone Set: rsc2-master [rsc2] (promotable): - * Masters: [ rhel7-4 ] - * Slaves: [ rhel7-1 rhel7-2 rhel7-3 rhel7-5 ] + * Promoted: [ rhel7-4 ] + * Unpromoted: [ rhel7-1 rhel7-2 rhel7-3 rhel7-5 ] Transition Summary: - * Demote rsc1:0 ( Master -> Slave rhel7-4 ) - * Promote rsc1:1 ( Slave -> Master rhel7-3 ) + * Demote rsc1:0 ( Promoted -> Unpromoted rhel7-4 ) + * Promote rsc1:1 ( Unpromoted -> Promoted rhel7-3 ) Executing Cluster Transition: * Resource action: rsc1 cancel=10000 on rhel7-4 * Resource action: rsc1 cancel=11000 on rhel7-3 * Pseudo action: rsc1-clone_demote_0 * Resource action: rsc1 demote on rhel7-4 * Pseudo action: rsc1-clone_demoted_0 * Pseudo action: rsc1-clone_promote_0 * Resource action: rsc1 monitor=11000 on rhel7-4 * Resource action: rsc1 promote on rhel7-3 * Pseudo action: rsc1-clone_promoted_0 * Resource action: rsc1 monitor=10000 on rhel7-3 Using the original execution date of: 2020-06-16 19:23:21Z Revised Cluster Status: * Node List: * Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started rhel7-1 * Clone Set: rsc1-clone [rsc1] (promotable): - * Masters: [ rhel7-3 ] - * Slaves: [ rhel7-1 rhel7-2 rhel7-4 rhel7-5 ] + * Promoted: [ rhel7-3 ] + * Unpromoted: [ rhel7-1 rhel7-2 rhel7-4 rhel7-5 ] * Clone Set: rsc2-master [rsc2] (promotable): - * Masters: [ rhel7-4 ] - * Slaves: [ rhel7-1 rhel7-2 rhel7-3 rhel7-5 ] + * Promoted: [ rhel7-4 ] + * Unpromoted: [ rhel7-1 rhel7-2 rhel7-3 rhel7-5 ] diff --git a/cts/scheduler/summary/on_fail_demote3.summary b/cts/scheduler/summary/on_fail_demote3.summary index 01697fcdfe..793804af2f 100644 --- a/cts/scheduler/summary/on_fail_demote3.summary +++ b/cts/scheduler/summary/on_fail_demote3.summary @@ -1,36 +1,36 @@ Using the original execution date of: 2020-06-16 19:23:21Z Current cluster status: * Node List: * Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started rhel7-1 * Clone Set: rsc1-clone [rsc1] (promotable): - * rsc1 (ocf:pacemaker:Stateful): FAILED Master rhel7-4 - * Slaves: [ rhel7-1 rhel7-2 rhel7-3 rhel7-5 ] + * rsc1 (ocf:pacemaker:Stateful): FAILED Promoted rhel7-4 + * Unpromoted: [ rhel7-1 rhel7-2 rhel7-3 rhel7-5 ] * Clone Set: rsc2-master [rsc2] (promotable): - * Masters: [ rhel7-4 ] - * Slaves: [ rhel7-1 rhel7-2 rhel7-3 rhel7-5 ] + * Promoted: [ rhel7-4 ] + * Unpromoted: [ rhel7-1 rhel7-2 rhel7-3 rhel7-5 ] Transition Summary: - * Demote rsc1:0 ( Master -> Slave rhel7-4 ) + * Demote rsc1:0 ( Promoted -> Unpromoted rhel7-4 ) Executing Cluster Transition: * Resource action: rsc1 cancel=10000 on rhel7-4 * Pseudo action: rsc1-clone_demote_0 * Resource action: rsc1 demote on rhel7-4 * Pseudo action: rsc1-clone_demoted_0 * Resource action: rsc1 monitor=11000 on rhel7-4 Using the original execution date of: 2020-06-16 19:23:21Z Revised Cluster Status: * Node List: * Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started rhel7-1 * Clone Set: rsc1-clone [rsc1] (promotable): - * Slaves: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] + * Unpromoted: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] * Clone Set: rsc2-master [rsc2] (promotable): - * Masters: [ rhel7-4 ] - * Slaves: [ rhel7-1 rhel7-2 rhel7-3 rhel7-5 ] + * Promoted: [ rhel7-4 ] + * Unpromoted: [ rhel7-1 rhel7-2 rhel7-3 rhel7-5 ] diff --git a/cts/scheduler/summary/on_fail_demote4.summary b/cts/scheduler/summary/on_fail_demote4.summary index c0c74a33cb..781f5488bb 100644 --- a/cts/scheduler/summary/on_fail_demote4.summary +++ b/cts/scheduler/summary/on_fail_demote4.summary @@ -1,189 +1,189 @@ Using the original execution date of: 2020-06-16 19:23:21Z Current cluster status: * Node List: * RemoteNode remote-rhel7-2: UNCLEAN (offline) * Node rhel7-4: UNCLEAN (offline) * Online: [ rhel7-1 rhel7-3 rhel7-5 ] * GuestOnline: [ lxc1@rhel7-3 stateful-bundle-1@rhel7-1 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started rhel7-4 (UNCLEAN) * Clone Set: rsc1-clone [rsc1] (promotable): - * rsc1 (ocf:pacemaker:Stateful): Master rhel7-4 (UNCLEAN) - * rsc1 (ocf:pacemaker:Stateful): Slave remote-rhel7-2 (UNCLEAN) - * Slaves: [ lxc1 rhel7-1 rhel7-3 rhel7-5 ] + * rsc1 (ocf:pacemaker:Stateful): Promoted rhel7-4 (UNCLEAN) + * rsc1 (ocf:pacemaker:Stateful): Unpromoted remote-rhel7-2 (UNCLEAN) + * Unpromoted: [ lxc1 rhel7-1 rhel7-3 rhel7-5 ] * Clone Set: rsc2-master [rsc2] (promotable): - * rsc2 (ocf:pacemaker:Stateful): Slave rhel7-4 (UNCLEAN) - * rsc2 (ocf:pacemaker:Stateful): Master remote-rhel7-2 (UNCLEAN) - * Slaves: [ lxc1 rhel7-1 rhel7-3 rhel7-5 ] + * rsc2 (ocf:pacemaker:Stateful): Unpromoted rhel7-4 (UNCLEAN) + * rsc2 (ocf:pacemaker:Stateful): Promoted remote-rhel7-2 (UNCLEAN) + * Unpromoted: [ lxc1 rhel7-1 rhel7-3 rhel7-5 ] * remote-rhel7-2 (ocf:pacemaker:remote): FAILED rhel7-1 * container1 (ocf:heartbeat:VirtualDomain): Started rhel7-3 * container2 (ocf:heartbeat:VirtualDomain): FAILED rhel7-3 * Clone Set: lxc-ms-master [lxc-ms] (promotable): - * Slaves: [ lxc1 ] + * Unpromoted: [ lxc1 ] * Stopped: [ remote-rhel7-2 rhel7-1 rhel7-3 rhel7-4 rhel7-5 ] * Container bundle set: stateful-bundle [pcmktest:http]: - * stateful-bundle-0 (192.168.122.131) (ocf:pacemaker:Stateful): FAILED Master rhel7-5 - * stateful-bundle-1 (192.168.122.132) (ocf:pacemaker:Stateful): Slave rhel7-1 + * stateful-bundle-0 (192.168.122.131) (ocf:pacemaker:Stateful): FAILED Promoted rhel7-5 + * stateful-bundle-1 (192.168.122.132) (ocf:pacemaker:Stateful): Unpromoted rhel7-1 * stateful-bundle-2 (192.168.122.133) (ocf:pacemaker:Stateful): FAILED rhel7-4 (UNCLEAN) Transition Summary: * Fence (reboot) stateful-bundle-2 (resource: stateful-bundle-docker-2) 'guest is unclean' * Fence (reboot) stateful-bundle-0 (resource: stateful-bundle-docker-0) 'guest is unclean' * Fence (reboot) lxc2 (resource: container2) 'guest is unclean' * Fence (reboot) remote-rhel7-2 'remote connection is unrecoverable' * Fence (reboot) rhel7-4 'peer is no longer part of the cluster' * Move Fencing ( rhel7-4 -> rhel7-5 ) - * Stop rsc1:0 ( Master rhel7-4 ) due to node availability - * Promote rsc1:1 ( Slave -> Master rhel7-3 ) - * Stop rsc1:4 ( Slave remote-rhel7-2 ) due to node availability - * Recover rsc1:5 ( Slave lxc2 ) - * Stop rsc2:0 ( Slave rhel7-4 ) due to node availability - * Promote rsc2:1 ( Slave -> Master rhel7-3 ) - * Stop rsc2:4 ( Master remote-rhel7-2 ) due to node availability - * Recover rsc2:5 ( Slave lxc2 ) + * Stop rsc1:0 ( Promoted rhel7-4 ) due to node availability + * Promote rsc1:1 ( Unpromoted -> Promoted rhel7-3 ) + * Stop rsc1:4 ( Unpromoted remote-rhel7-2 ) due to node availability + * Recover rsc1:5 ( Unpromoted lxc2 ) + * Stop rsc2:0 ( Unpromoted rhel7-4 ) due to node availability + * Promote rsc2:1 ( Unpromoted -> Promoted rhel7-3 ) + * Stop rsc2:4 ( Promoted remote-rhel7-2 ) due to node availability + * Recover rsc2:5 ( Unpromoted lxc2 ) * Recover remote-rhel7-2 ( rhel7-1 ) * Recover container2 ( rhel7-3 ) - * Recover lxc-ms:0 ( Master lxc2 ) + * Recover lxc-ms:0 ( Promoted lxc2 ) * Recover stateful-bundle-docker-0 ( rhel7-5 ) * Restart stateful-bundle-0 ( rhel7-5 ) due to required stateful-bundle-docker-0 start - * Recover bundled:0 ( Master stateful-bundle-0 ) + * Recover bundled:0 ( Promoted stateful-bundle-0 ) * Move stateful-bundle-ip-192.168.122.133 ( rhel7-4 -> rhel7-3 ) * Recover stateful-bundle-docker-2 ( rhel7-4 -> rhel7-3 ) * Move stateful-bundle-2 ( rhel7-4 -> rhel7-3 ) - * Recover bundled:2 ( Slave stateful-bundle-2 ) + * Recover bundled:2 ( Unpromoted stateful-bundle-2 ) * Restart lxc2 ( rhel7-3 ) due to required container2 start Executing Cluster Transition: * Pseudo action: Fencing_stop_0 * Resource action: rsc1 cancel=11000 on rhel7-3 * Pseudo action: rsc1-clone_demote_0 * Resource action: rsc2 cancel=11000 on rhel7-3 * Pseudo action: rsc2-master_demote_0 * Pseudo action: lxc-ms-master_demote_0 * Resource action: stateful-bundle-0 stop on rhel7-5 * Pseudo action: stateful-bundle-2_stop_0 * Resource action: lxc2 stop on rhel7-3 * Pseudo action: stateful-bundle_demote_0 * Fencing remote-rhel7-2 (reboot) * Fencing rhel7-4 (reboot) * Pseudo action: rsc1_demote_0 * Pseudo action: rsc1-clone_demoted_0 * Pseudo action: rsc2_demote_0 * Pseudo action: rsc2-master_demoted_0 * Resource action: container2 stop on rhel7-3 * Pseudo action: stateful-bundle-master_demote_0 * Pseudo action: stonith-stateful-bundle-2-reboot on stateful-bundle-2 * Pseudo action: stonith-lxc2-reboot on lxc2 * Resource action: Fencing start on rhel7-5 * Pseudo action: rsc1-clone_stop_0 * Pseudo action: rsc2-master_stop_0 * Pseudo action: lxc-ms_demote_0 * Pseudo action: lxc-ms-master_demoted_0 * Pseudo action: lxc-ms-master_stop_0 * Pseudo action: bundled_demote_0 * Pseudo action: stateful-bundle-master_demoted_0 * Pseudo action: stateful-bundle_demoted_0 * Pseudo action: stateful-bundle_stop_0 * Resource action: Fencing monitor=120000 on rhel7-5 * Pseudo action: rsc1_stop_0 * Pseudo action: rsc1_stop_0 * Pseudo action: rsc1_stop_0 * Pseudo action: rsc1-clone_stopped_0 * Pseudo action: rsc1-clone_start_0 * Pseudo action: rsc2_stop_0 * Pseudo action: rsc2_stop_0 * Pseudo action: rsc2_stop_0 * Pseudo action: rsc2-master_stopped_0 * Pseudo action: rsc2-master_start_0 * Resource action: remote-rhel7-2 stop on rhel7-1 * Pseudo action: lxc-ms_stop_0 * Pseudo action: lxc-ms-master_stopped_0 * Pseudo action: lxc-ms-master_start_0 * Resource action: stateful-bundle-docker-0 stop on rhel7-5 * Pseudo action: stateful-bundle-docker-2_stop_0 * Pseudo action: stonith-stateful-bundle-0-reboot on stateful-bundle-0 * Resource action: remote-rhel7-2 start on rhel7-1 * Resource action: remote-rhel7-2 monitor=60000 on rhel7-1 * Resource action: container2 start on rhel7-3 * Resource action: container2 monitor=20000 on rhel7-3 * Pseudo action: stateful-bundle-master_stop_0 * Pseudo action: stateful-bundle-ip-192.168.122.133_stop_0 * Resource action: lxc2 start on rhel7-3 * Resource action: lxc2 monitor=30000 on rhel7-3 * Resource action: rsc1 start on lxc2 * Pseudo action: rsc1-clone_running_0 * Resource action: rsc2 start on lxc2 * Pseudo action: rsc2-master_running_0 * Resource action: lxc-ms start on lxc2 * Pseudo action: lxc-ms-master_running_0 * Pseudo action: bundled_stop_0 * Resource action: stateful-bundle-ip-192.168.122.133 start on rhel7-3 * Resource action: rsc1 monitor=11000 on lxc2 * Pseudo action: rsc1-clone_promote_0 * Resource action: rsc2 monitor=11000 on lxc2 * Pseudo action: rsc2-master_promote_0 * Pseudo action: lxc-ms-master_promote_0 * Pseudo action: bundled_stop_0 * Pseudo action: stateful-bundle-master_stopped_0 * Resource action: stateful-bundle-ip-192.168.122.133 monitor=60000 on rhel7-3 * Pseudo action: stateful-bundle_stopped_0 * Pseudo action: stateful-bundle_start_0 * Resource action: rsc1 promote on rhel7-3 * Pseudo action: rsc1-clone_promoted_0 * Resource action: rsc2 promote on rhel7-3 * Pseudo action: rsc2-master_promoted_0 * Resource action: lxc-ms promote on lxc2 * Pseudo action: lxc-ms-master_promoted_0 * Pseudo action: stateful-bundle-master_start_0 * Resource action: stateful-bundle-docker-0 start on rhel7-5 * Resource action: stateful-bundle-docker-0 monitor=60000 on rhel7-5 * Resource action: stateful-bundle-0 start on rhel7-5 * Resource action: stateful-bundle-0 monitor=30000 on rhel7-5 * Resource action: stateful-bundle-docker-2 start on rhel7-3 * Resource action: stateful-bundle-2 start on rhel7-3 * Resource action: rsc1 monitor=10000 on rhel7-3 * Resource action: rsc2 monitor=10000 on rhel7-3 * Resource action: lxc-ms monitor=10000 on lxc2 * Resource action: bundled start on stateful-bundle-0 * Resource action: bundled start on stateful-bundle-2 * Pseudo action: stateful-bundle-master_running_0 * Resource action: stateful-bundle-docker-2 monitor=60000 on rhel7-3 * Resource action: stateful-bundle-2 monitor=30000 on rhel7-3 * Pseudo action: stateful-bundle_running_0 * Resource action: bundled monitor=11000 on stateful-bundle-2 * Pseudo action: stateful-bundle_promote_0 * Pseudo action: stateful-bundle-master_promote_0 * Resource action: bundled promote on stateful-bundle-0 * Pseudo action: stateful-bundle-master_promoted_0 * Pseudo action: stateful-bundle_promoted_0 * Resource action: bundled monitor=10000 on stateful-bundle-0 Using the original execution date of: 2020-06-16 19:23:21Z Revised Cluster Status: * Node List: * Online: [ rhel7-1 rhel7-3 rhel7-5 ] * OFFLINE: [ rhel7-4 ] * RemoteOnline: [ remote-rhel7-2 ] * GuestOnline: [ lxc1@rhel7-3 lxc2@rhel7-3 stateful-bundle-0@rhel7-5 stateful-bundle-1@rhel7-1 stateful-bundle-2@rhel7-3 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started rhel7-5 * Clone Set: rsc1-clone [rsc1] (promotable): - * Masters: [ rhel7-3 ] - * Slaves: [ lxc1 lxc2 rhel7-1 rhel7-5 ] + * Promoted: [ rhel7-3 ] + * Unpromoted: [ lxc1 lxc2 rhel7-1 rhel7-5 ] * Stopped: [ remote-rhel7-2 rhel7-4 ] * Clone Set: rsc2-master [rsc2] (promotable): - * Masters: [ rhel7-3 ] - * Slaves: [ lxc1 lxc2 rhel7-1 rhel7-5 ] + * Promoted: [ rhel7-3 ] + * Unpromoted: [ lxc1 lxc2 rhel7-1 rhel7-5 ] * Stopped: [ remote-rhel7-2 rhel7-4 ] * remote-rhel7-2 (ocf:pacemaker:remote): Started rhel7-1 * container1 (ocf:heartbeat:VirtualDomain): Started rhel7-3 * container2 (ocf:heartbeat:VirtualDomain): Started rhel7-3 * Clone Set: lxc-ms-master [lxc-ms] (promotable): - * Masters: [ lxc2 ] - * Slaves: [ lxc1 ] + * Promoted: [ lxc2 ] + * Unpromoted: [ lxc1 ] * Container bundle set: stateful-bundle [pcmktest:http]: - * stateful-bundle-0 (192.168.122.131) (ocf:pacemaker:Stateful): Master rhel7-5 - * stateful-bundle-1 (192.168.122.132) (ocf:pacemaker:Stateful): Slave rhel7-1 - * stateful-bundle-2 (192.168.122.133) (ocf:pacemaker:Stateful): Slave rhel7-3 + * stateful-bundle-0 (192.168.122.131) (ocf:pacemaker:Stateful): Promoted rhel7-5 + * stateful-bundle-1 (192.168.122.132) (ocf:pacemaker:Stateful): Unpromoted rhel7-1 + * stateful-bundle-2 (192.168.122.133) (ocf:pacemaker:Stateful): Unpromoted rhel7-3 diff --git a/cts/scheduler/summary/one-or-more-unrunnable-instances.summary b/cts/scheduler/summary/one-or-more-unrunnable-instances.summary index 1e4220b2ed..58c572d199 100644 --- a/cts/scheduler/summary/one-or-more-unrunnable-instances.summary +++ b/cts/scheduler/summary/one-or-more-unrunnable-instances.summary @@ -1,736 +1,736 @@ Current cluster status: * Node List: * Online: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * RemoteOnline: [ mrg-07 mrg-08 mrg-09 ] * Full List of Resources: * fence1 (stonith:fence_xvm): Started rdo7-node2 * fence2 (stonith:fence_xvm): Started rdo7-node1 * fence3 (stonith:fence_xvm): Started rdo7-node3 * Clone Set: lb-haproxy-clone [lb-haproxy]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * vip-db (ocf:heartbeat:IPaddr2): Started rdo7-node3 * vip-rabbitmq (ocf:heartbeat:IPaddr2): Started rdo7-node1 * vip-keystone (ocf:heartbeat:IPaddr2): Started rdo7-node2 * vip-glance (ocf:heartbeat:IPaddr2): Started rdo7-node3 * vip-cinder (ocf:heartbeat:IPaddr2): Started rdo7-node1 * vip-swift (ocf:heartbeat:IPaddr2): Started rdo7-node2 * vip-neutron (ocf:heartbeat:IPaddr2): Started rdo7-node2 * vip-nova (ocf:heartbeat:IPaddr2): Started rdo7-node1 * vip-horizon (ocf:heartbeat:IPaddr2): Started rdo7-node3 * vip-heat (ocf:heartbeat:IPaddr2): Started rdo7-node1 * vip-ceilometer (ocf:heartbeat:IPaddr2): Started rdo7-node2 * vip-qpid (ocf:heartbeat:IPaddr2): Started rdo7-node3 * vip-node (ocf:heartbeat:IPaddr2): Started rdo7-node1 * Clone Set: galera-master [galera] (promotable): - * Masters: [ rdo7-node1 rdo7-node2 rdo7-node3 ] + * Promoted: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: rabbitmq-server-clone [rabbitmq-server]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: memcached-clone [memcached]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: mongodb-clone [mongodb]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: keystone-clone [keystone]: * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: glance-fs-clone [glance-fs]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: glance-registry-clone [glance-registry]: * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: glance-api-clone [glance-api]: * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: cinder-api-clone [cinder-api]: * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: cinder-scheduler-clone [cinder-scheduler]: * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] * cinder-volume (systemd:openstack-cinder-volume): Stopped * Clone Set: swift-fs-clone [swift-fs]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: swift-account-clone [swift-account]: * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: swift-container-clone [swift-container]: * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: swift-object-clone [swift-object]: * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: swift-proxy-clone [swift-proxy]: * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] * swift-object-expirer (systemd:openstack-swift-object-expirer): Stopped * Clone Set: neutron-server-clone [neutron-server]: * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: neutron-scale-clone [neutron-scale] (unique): * neutron-scale:0 (ocf:neutron:NeutronScale): Stopped * neutron-scale:1 (ocf:neutron:NeutronScale): Stopped * neutron-scale:2 (ocf:neutron:NeutronScale): Stopped * Clone Set: neutron-ovs-cleanup-clone [neutron-ovs-cleanup]: * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: neutron-netns-cleanup-clone [neutron-netns-cleanup]: * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: neutron-openvswitch-agent-clone [neutron-openvswitch-agent]: * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: neutron-dhcp-agent-clone [neutron-dhcp-agent]: * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: neutron-l3-agent-clone [neutron-l3-agent]: * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: neutron-metadata-agent-clone [neutron-metadata-agent]: * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: nova-consoleauth-clone [nova-consoleauth]: * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: nova-novncproxy-clone [nova-novncproxy]: * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: nova-api-clone [nova-api]: * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: nova-scheduler-clone [nova-scheduler]: * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: nova-conductor-clone [nova-conductor]: * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: redis-master [redis] (promotable): - * Masters: [ rdo7-node1 ] - * Slaves: [ rdo7-node2 rdo7-node3 ] + * Promoted: [ rdo7-node1 ] + * Unpromoted: [ rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * vip-redis (ocf:heartbeat:IPaddr2): Started rdo7-node1 * Clone Set: ceilometer-central-clone [ceilometer-central]: * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: ceilometer-collector-clone [ceilometer-collector]: * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: ceilometer-api-clone [ceilometer-api]: * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: ceilometer-delay-clone [ceilometer-delay]: * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: ceilometer-alarm-evaluator-clone [ceilometer-alarm-evaluator]: * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: ceilometer-alarm-notifier-clone [ceilometer-alarm-notifier]: * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: ceilometer-notification-clone [ceilometer-notification]: * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: heat-api-clone [heat-api]: * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: heat-api-cfn-clone [heat-api-cfn]: * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: heat-api-cloudwatch-clone [heat-api-cloudwatch]: * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: heat-engine-clone [heat-engine]: * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: horizon-clone [horizon]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: neutron-openvswitch-agent-compute-clone [neutron-openvswitch-agent-compute]: * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: libvirtd-compute-clone [libvirtd-compute]: * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: ceilometer-compute-clone [ceilometer-compute]: * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: nova-compute-clone [nova-compute]: * Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ] * fence-nova (stonith:fence_compute): Stopped * fence-compute (stonith:fence_apc_snmp): Started rdo7-node3 * mrg-07 (ocf:pacemaker:remote): Started rdo7-node1 * mrg-08 (ocf:pacemaker:remote): Started rdo7-node2 * mrg-09 (ocf:pacemaker:remote): Started rdo7-node3 Transition Summary: * Start keystone:0 ( rdo7-node2 ) * Start keystone:1 ( rdo7-node3 ) * Start keystone:2 ( rdo7-node1 ) * Start glance-registry:0 ( rdo7-node2 ) * Start glance-registry:1 ( rdo7-node3 ) * Start glance-registry:2 ( rdo7-node1 ) * Start glance-api:0 ( rdo7-node2 ) * Start glance-api:1 ( rdo7-node3 ) * Start glance-api:2 ( rdo7-node1 ) * Start cinder-api:0 ( rdo7-node2 ) * Start cinder-api:1 ( rdo7-node3 ) * Start cinder-api:2 ( rdo7-node1 ) * Start cinder-scheduler:0 ( rdo7-node2 ) * Start cinder-scheduler:1 ( rdo7-node3 ) * Start cinder-scheduler:2 ( rdo7-node1 ) * Start cinder-volume ( rdo7-node2 ) * Start swift-account:0 ( rdo7-node3 ) * Start swift-account:1 ( rdo7-node1 ) * Start swift-account:2 ( rdo7-node2 ) * Start swift-container:0 ( rdo7-node3 ) * Start swift-container:1 ( rdo7-node1 ) * Start swift-container:2 ( rdo7-node2 ) * Start swift-object:0 ( rdo7-node3 ) * Start swift-object:1 ( rdo7-node1 ) * Start swift-object:2 ( rdo7-node2 ) * Start swift-proxy:0 ( rdo7-node3 ) * Start swift-proxy:1 ( rdo7-node1 ) * Start swift-proxy:2 ( rdo7-node2 ) * Start swift-object-expirer ( rdo7-node3 ) * Start neutron-server:0 ( rdo7-node1 ) * Start neutron-server:1 ( rdo7-node2 ) * Start neutron-server:2 ( rdo7-node3 ) * Start neutron-scale:0 ( rdo7-node1 ) * Start neutron-scale:1 ( rdo7-node2 ) * Start neutron-scale:2 ( rdo7-node3 ) * Start neutron-ovs-cleanup:0 ( rdo7-node1 ) * Start neutron-ovs-cleanup:1 ( rdo7-node2 ) * Start neutron-ovs-cleanup:2 ( rdo7-node3 ) * Start neutron-netns-cleanup:0 ( rdo7-node1 ) * Start neutron-netns-cleanup:1 ( rdo7-node2 ) * Start neutron-netns-cleanup:2 ( rdo7-node3 ) * Start neutron-openvswitch-agent:0 ( rdo7-node1 ) * Start neutron-openvswitch-agent:1 ( rdo7-node2 ) * Start neutron-openvswitch-agent:2 ( rdo7-node3 ) * Start neutron-dhcp-agent:0 ( rdo7-node1 ) * Start neutron-dhcp-agent:1 ( rdo7-node2 ) * Start neutron-dhcp-agent:2 ( rdo7-node3 ) * Start neutron-l3-agent:0 ( rdo7-node1 ) * Start neutron-l3-agent:1 ( rdo7-node2 ) * Start neutron-l3-agent:2 ( rdo7-node3 ) * Start neutron-metadata-agent:0 ( rdo7-node1 ) * Start neutron-metadata-agent:1 ( rdo7-node2 ) * Start neutron-metadata-agent:2 ( rdo7-node3 ) * Start nova-consoleauth:0 ( rdo7-node1 ) * Start nova-consoleauth:1 ( rdo7-node2 ) * Start nova-consoleauth:2 ( rdo7-node3 ) * Start nova-novncproxy:0 ( rdo7-node1 ) * Start nova-novncproxy:1 ( rdo7-node2 ) * Start nova-novncproxy:2 ( rdo7-node3 ) * Start nova-api:0 ( rdo7-node1 ) * Start nova-api:1 ( rdo7-node2 ) * Start nova-api:2 ( rdo7-node3 ) * Start nova-scheduler:0 ( rdo7-node1 ) * Start nova-scheduler:1 ( rdo7-node2 ) * Start nova-scheduler:2 ( rdo7-node3 ) * Start nova-conductor:0 ( rdo7-node1 ) * Start nova-conductor:1 ( rdo7-node2 ) * Start nova-conductor:2 ( rdo7-node3 ) * Start ceilometer-central:0 ( rdo7-node2 ) * Start ceilometer-central:1 ( rdo7-node3 ) * Start ceilometer-central:2 ( rdo7-node1 ) * Start ceilometer-collector:0 ( rdo7-node2 ) * Start ceilometer-collector:1 ( rdo7-node3 ) * Start ceilometer-collector:2 ( rdo7-node1 ) * Start ceilometer-api:0 ( rdo7-node2 ) * Start ceilometer-api:1 ( rdo7-node3 ) * Start ceilometer-api:2 ( rdo7-node1 ) * Start ceilometer-delay:0 ( rdo7-node2 ) * Start ceilometer-delay:1 ( rdo7-node3 ) * Start ceilometer-delay:2 ( rdo7-node1 ) * Start ceilometer-alarm-evaluator:0 ( rdo7-node2 ) * Start ceilometer-alarm-evaluator:1 ( rdo7-node3 ) * Start ceilometer-alarm-evaluator:2 ( rdo7-node1 ) * Start ceilometer-alarm-notifier:0 ( rdo7-node2 ) * Start ceilometer-alarm-notifier:1 ( rdo7-node3 ) * Start ceilometer-alarm-notifier:2 ( rdo7-node1 ) * Start ceilometer-notification:0 ( rdo7-node2 ) * Start ceilometer-notification:1 ( rdo7-node3 ) * Start ceilometer-notification:2 ( rdo7-node1 ) * Start heat-api:0 ( rdo7-node2 ) * Start heat-api:1 ( rdo7-node3 ) * Start heat-api:2 ( rdo7-node1 ) * Start heat-api-cfn:0 ( rdo7-node2 ) * Start heat-api-cfn:1 ( rdo7-node3 ) * Start heat-api-cfn:2 ( rdo7-node1 ) * Start heat-api-cloudwatch:0 ( rdo7-node2 ) * Start heat-api-cloudwatch:1 ( rdo7-node3 ) * Start heat-api-cloudwatch:2 ( rdo7-node1 ) * Start heat-engine:0 ( rdo7-node2 ) * Start heat-engine:1 ( rdo7-node3 ) * Start heat-engine:2 ( rdo7-node1 ) * Start neutron-openvswitch-agent-compute:0 ( mrg-07 ) * Start neutron-openvswitch-agent-compute:1 ( mrg-08 ) * Start neutron-openvswitch-agent-compute:2 ( mrg-09 ) * Start libvirtd-compute:0 ( mrg-07 ) * Start libvirtd-compute:1 ( mrg-08 ) * Start libvirtd-compute:2 ( mrg-09 ) * Start ceilometer-compute:0 ( mrg-07 ) * Start ceilometer-compute:1 ( mrg-08 ) * Start ceilometer-compute:2 ( mrg-09 ) * Start nova-compute:0 ( mrg-07 ) * Start nova-compute:1 ( mrg-08 ) * Start nova-compute:2 ( mrg-09 ) * Start fence-nova ( rdo7-node2 ) Executing Cluster Transition: * Resource action: galera monitor=10000 on rdo7-node2 * Pseudo action: keystone-clone_start_0 * Pseudo action: nova-compute-clone_pre_notify_start_0 * Resource action: keystone start on rdo7-node2 * Resource action: keystone start on rdo7-node3 * Resource action: keystone start on rdo7-node1 * Pseudo action: keystone-clone_running_0 * Pseudo action: glance-registry-clone_start_0 * Pseudo action: cinder-api-clone_start_0 * Pseudo action: swift-account-clone_start_0 * Pseudo action: neutron-server-clone_start_0 * Pseudo action: nova-consoleauth-clone_start_0 * Pseudo action: ceilometer-central-clone_start_0 * Pseudo action: nova-compute-clone_confirmed-pre_notify_start_0 * Resource action: keystone monitor=60000 on rdo7-node2 * Resource action: keystone monitor=60000 on rdo7-node3 * Resource action: keystone monitor=60000 on rdo7-node1 * Resource action: glance-registry start on rdo7-node2 * Resource action: glance-registry start on rdo7-node3 * Resource action: glance-registry start on rdo7-node1 * Pseudo action: glance-registry-clone_running_0 * Pseudo action: glance-api-clone_start_0 * Resource action: cinder-api start on rdo7-node2 * Resource action: cinder-api start on rdo7-node3 * Resource action: cinder-api start on rdo7-node1 * Pseudo action: cinder-api-clone_running_0 * Pseudo action: cinder-scheduler-clone_start_0 * Resource action: swift-account start on rdo7-node3 * Resource action: swift-account start on rdo7-node1 * Resource action: swift-account start on rdo7-node2 * Pseudo action: swift-account-clone_running_0 * Pseudo action: swift-container-clone_start_0 * Pseudo action: swift-proxy-clone_start_0 * Resource action: neutron-server start on rdo7-node1 * Resource action: neutron-server start on rdo7-node2 * Resource action: neutron-server start on rdo7-node3 * Pseudo action: neutron-server-clone_running_0 * Pseudo action: neutron-scale-clone_start_0 * Resource action: nova-consoleauth start on rdo7-node1 * Resource action: nova-consoleauth start on rdo7-node2 * Resource action: nova-consoleauth start on rdo7-node3 * Pseudo action: nova-consoleauth-clone_running_0 * Pseudo action: nova-novncproxy-clone_start_0 * Resource action: ceilometer-central start on rdo7-node2 * Resource action: ceilometer-central start on rdo7-node3 * Resource action: ceilometer-central start on rdo7-node1 * Pseudo action: ceilometer-central-clone_running_0 * Pseudo action: ceilometer-collector-clone_start_0 * Pseudo action: clone-one-or-more:order-neutron-server-clone-neutron-openvswitch-agent-compute-clone-mandatory * Resource action: glance-registry monitor=60000 on rdo7-node2 * Resource action: glance-registry monitor=60000 on rdo7-node3 * Resource action: glance-registry monitor=60000 on rdo7-node1 * Resource action: glance-api start on rdo7-node2 * Resource action: glance-api start on rdo7-node3 * Resource action: glance-api start on rdo7-node1 * Pseudo action: glance-api-clone_running_0 * Resource action: cinder-api monitor=60000 on rdo7-node2 * Resource action: cinder-api monitor=60000 on rdo7-node3 * Resource action: cinder-api monitor=60000 on rdo7-node1 * Resource action: cinder-scheduler start on rdo7-node2 * Resource action: cinder-scheduler start on rdo7-node3 * Resource action: cinder-scheduler start on rdo7-node1 * Pseudo action: cinder-scheduler-clone_running_0 * Resource action: cinder-volume start on rdo7-node2 * Resource action: swift-account monitor=60000 on rdo7-node3 * Resource action: swift-account monitor=60000 on rdo7-node1 * Resource action: swift-account monitor=60000 on rdo7-node2 * Resource action: swift-container start on rdo7-node3 * Resource action: swift-container start on rdo7-node1 * Resource action: swift-container start on rdo7-node2 * Pseudo action: swift-container-clone_running_0 * Pseudo action: swift-object-clone_start_0 * Resource action: swift-proxy start on rdo7-node3 * Resource action: swift-proxy start on rdo7-node1 * Resource action: swift-proxy start on rdo7-node2 * Pseudo action: swift-proxy-clone_running_0 * Resource action: swift-object-expirer start on rdo7-node3 * Resource action: neutron-server monitor=60000 on rdo7-node1 * Resource action: neutron-server monitor=60000 on rdo7-node2 * Resource action: neutron-server monitor=60000 on rdo7-node3 * Resource action: neutron-scale:0 start on rdo7-node1 * Resource action: neutron-scale:1 start on rdo7-node2 * Resource action: neutron-scale:2 start on rdo7-node3 * Pseudo action: neutron-scale-clone_running_0 * Pseudo action: neutron-ovs-cleanup-clone_start_0 * Resource action: nova-consoleauth monitor=60000 on rdo7-node1 * Resource action: nova-consoleauth monitor=60000 on rdo7-node2 * Resource action: nova-consoleauth monitor=60000 on rdo7-node3 * Resource action: nova-novncproxy start on rdo7-node1 * Resource action: nova-novncproxy start on rdo7-node2 * Resource action: nova-novncproxy start on rdo7-node3 * Pseudo action: nova-novncproxy-clone_running_0 * Pseudo action: nova-api-clone_start_0 * Resource action: ceilometer-central monitor=60000 on rdo7-node2 * Resource action: ceilometer-central monitor=60000 on rdo7-node3 * Resource action: ceilometer-central monitor=60000 on rdo7-node1 * Resource action: ceilometer-collector start on rdo7-node2 * Resource action: ceilometer-collector start on rdo7-node3 * Resource action: ceilometer-collector start on rdo7-node1 * Pseudo action: ceilometer-collector-clone_running_0 * Pseudo action: ceilometer-api-clone_start_0 * Pseudo action: neutron-openvswitch-agent-compute-clone_start_0 * Resource action: glance-api monitor=60000 on rdo7-node2 * Resource action: glance-api monitor=60000 on rdo7-node3 * Resource action: glance-api monitor=60000 on rdo7-node1 * Resource action: cinder-scheduler monitor=60000 on rdo7-node2 * Resource action: cinder-scheduler monitor=60000 on rdo7-node3 * Resource action: cinder-scheduler monitor=60000 on rdo7-node1 * Resource action: cinder-volume monitor=60000 on rdo7-node2 * Resource action: swift-container monitor=60000 on rdo7-node3 * Resource action: swift-container monitor=60000 on rdo7-node1 * Resource action: swift-container monitor=60000 on rdo7-node2 * Resource action: swift-object start on rdo7-node3 * Resource action: swift-object start on rdo7-node1 * Resource action: swift-object start on rdo7-node2 * Pseudo action: swift-object-clone_running_0 * Resource action: swift-proxy monitor=60000 on rdo7-node3 * Resource action: swift-proxy monitor=60000 on rdo7-node1 * Resource action: swift-proxy monitor=60000 on rdo7-node2 * Resource action: swift-object-expirer monitor=60000 on rdo7-node3 * Resource action: neutron-scale:0 monitor=10000 on rdo7-node1 * Resource action: neutron-scale:1 monitor=10000 on rdo7-node2 * Resource action: neutron-scale:2 monitor=10000 on rdo7-node3 * Resource action: neutron-ovs-cleanup start on rdo7-node1 * Resource action: neutron-ovs-cleanup start on rdo7-node2 * Resource action: neutron-ovs-cleanup start on rdo7-node3 * Pseudo action: neutron-ovs-cleanup-clone_running_0 * Pseudo action: neutron-netns-cleanup-clone_start_0 * Resource action: nova-novncproxy monitor=60000 on rdo7-node1 * Resource action: nova-novncproxy monitor=60000 on rdo7-node2 * Resource action: nova-novncproxy monitor=60000 on rdo7-node3 * Resource action: nova-api start on rdo7-node1 * Resource action: nova-api start on rdo7-node2 * Resource action: nova-api start on rdo7-node3 * Pseudo action: nova-api-clone_running_0 * Pseudo action: nova-scheduler-clone_start_0 * Resource action: ceilometer-collector monitor=60000 on rdo7-node2 * Resource action: ceilometer-collector monitor=60000 on rdo7-node3 * Resource action: ceilometer-collector monitor=60000 on rdo7-node1 * Resource action: ceilometer-api start on rdo7-node2 * Resource action: ceilometer-api start on rdo7-node3 * Resource action: ceilometer-api start on rdo7-node1 * Pseudo action: ceilometer-api-clone_running_0 * Pseudo action: ceilometer-delay-clone_start_0 * Resource action: neutron-openvswitch-agent-compute start on mrg-07 * Resource action: neutron-openvswitch-agent-compute start on mrg-08 * Resource action: neutron-openvswitch-agent-compute start on mrg-09 * Pseudo action: neutron-openvswitch-agent-compute-clone_running_0 * Pseudo action: libvirtd-compute-clone_start_0 * Resource action: swift-object monitor=60000 on rdo7-node3 * Resource action: swift-object monitor=60000 on rdo7-node1 * Resource action: swift-object monitor=60000 on rdo7-node2 * Resource action: neutron-ovs-cleanup monitor=10000 on rdo7-node1 * Resource action: neutron-ovs-cleanup monitor=10000 on rdo7-node2 * Resource action: neutron-ovs-cleanup monitor=10000 on rdo7-node3 * Resource action: neutron-netns-cleanup start on rdo7-node1 * Resource action: neutron-netns-cleanup start on rdo7-node2 * Resource action: neutron-netns-cleanup start on rdo7-node3 * Pseudo action: neutron-netns-cleanup-clone_running_0 * Pseudo action: neutron-openvswitch-agent-clone_start_0 * Resource action: nova-api monitor=60000 on rdo7-node1 * Resource action: nova-api monitor=60000 on rdo7-node2 * Resource action: nova-api monitor=60000 on rdo7-node3 * Resource action: nova-scheduler start on rdo7-node1 * Resource action: nova-scheduler start on rdo7-node2 * Resource action: nova-scheduler start on rdo7-node3 * Pseudo action: nova-scheduler-clone_running_0 * Pseudo action: nova-conductor-clone_start_0 * Resource action: ceilometer-api monitor=60000 on rdo7-node2 * Resource action: ceilometer-api monitor=60000 on rdo7-node3 * Resource action: ceilometer-api monitor=60000 on rdo7-node1 * Resource action: ceilometer-delay start on rdo7-node2 * Resource action: ceilometer-delay start on rdo7-node3 * Resource action: ceilometer-delay start on rdo7-node1 * Pseudo action: ceilometer-delay-clone_running_0 * Pseudo action: ceilometer-alarm-evaluator-clone_start_0 * Resource action: neutron-openvswitch-agent-compute monitor=60000 on mrg-07 * Resource action: neutron-openvswitch-agent-compute monitor=60000 on mrg-08 * Resource action: neutron-openvswitch-agent-compute monitor=60000 on mrg-09 * Resource action: libvirtd-compute start on mrg-07 * Resource action: libvirtd-compute start on mrg-08 * Resource action: libvirtd-compute start on mrg-09 * Pseudo action: libvirtd-compute-clone_running_0 * Resource action: neutron-netns-cleanup monitor=10000 on rdo7-node1 * Resource action: neutron-netns-cleanup monitor=10000 on rdo7-node2 * Resource action: neutron-netns-cleanup monitor=10000 on rdo7-node3 * Resource action: neutron-openvswitch-agent start on rdo7-node1 * Resource action: neutron-openvswitch-agent start on rdo7-node2 * Resource action: neutron-openvswitch-agent start on rdo7-node3 * Pseudo action: neutron-openvswitch-agent-clone_running_0 * Pseudo action: neutron-dhcp-agent-clone_start_0 * Resource action: nova-scheduler monitor=60000 on rdo7-node1 * Resource action: nova-scheduler monitor=60000 on rdo7-node2 * Resource action: nova-scheduler monitor=60000 on rdo7-node3 * Resource action: nova-conductor start on rdo7-node1 * Resource action: nova-conductor start on rdo7-node2 * Resource action: nova-conductor start on rdo7-node3 * Pseudo action: nova-conductor-clone_running_0 * Resource action: ceilometer-delay monitor=10000 on rdo7-node2 * Resource action: ceilometer-delay monitor=10000 on rdo7-node3 * Resource action: ceilometer-delay monitor=10000 on rdo7-node1 * Resource action: ceilometer-alarm-evaluator start on rdo7-node2 * Resource action: ceilometer-alarm-evaluator start on rdo7-node3 * Resource action: ceilometer-alarm-evaluator start on rdo7-node1 * Pseudo action: ceilometer-alarm-evaluator-clone_running_0 * Pseudo action: ceilometer-alarm-notifier-clone_start_0 * Resource action: libvirtd-compute monitor=60000 on mrg-07 * Resource action: libvirtd-compute monitor=60000 on mrg-08 * Resource action: libvirtd-compute monitor=60000 on mrg-09 * Resource action: fence-nova start on rdo7-node2 * Pseudo action: clone-one-or-more:order-nova-conductor-clone-nova-compute-clone-mandatory * Resource action: neutron-openvswitch-agent monitor=60000 on rdo7-node1 * Resource action: neutron-openvswitch-agent monitor=60000 on rdo7-node2 * Resource action: neutron-openvswitch-agent monitor=60000 on rdo7-node3 * Resource action: neutron-dhcp-agent start on rdo7-node1 * Resource action: neutron-dhcp-agent start on rdo7-node2 * Resource action: neutron-dhcp-agent start on rdo7-node3 * Pseudo action: neutron-dhcp-agent-clone_running_0 * Pseudo action: neutron-l3-agent-clone_start_0 * Resource action: nova-conductor monitor=60000 on rdo7-node1 * Resource action: nova-conductor monitor=60000 on rdo7-node2 * Resource action: nova-conductor monitor=60000 on rdo7-node3 * Resource action: ceilometer-alarm-evaluator monitor=60000 on rdo7-node2 * Resource action: ceilometer-alarm-evaluator monitor=60000 on rdo7-node3 * Resource action: ceilometer-alarm-evaluator monitor=60000 on rdo7-node1 * Resource action: ceilometer-alarm-notifier start on rdo7-node2 * Resource action: ceilometer-alarm-notifier start on rdo7-node3 * Resource action: ceilometer-alarm-notifier start on rdo7-node1 * Pseudo action: ceilometer-alarm-notifier-clone_running_0 * Pseudo action: ceilometer-notification-clone_start_0 * Resource action: fence-nova monitor=60000 on rdo7-node2 * Resource action: neutron-dhcp-agent monitor=60000 on rdo7-node1 * Resource action: neutron-dhcp-agent monitor=60000 on rdo7-node2 * Resource action: neutron-dhcp-agent monitor=60000 on rdo7-node3 * Resource action: neutron-l3-agent start on rdo7-node1 * Resource action: neutron-l3-agent start on rdo7-node2 * Resource action: neutron-l3-agent start on rdo7-node3 * Pseudo action: neutron-l3-agent-clone_running_0 * Pseudo action: neutron-metadata-agent-clone_start_0 * Resource action: ceilometer-alarm-notifier monitor=60000 on rdo7-node2 * Resource action: ceilometer-alarm-notifier monitor=60000 on rdo7-node3 * Resource action: ceilometer-alarm-notifier monitor=60000 on rdo7-node1 * Resource action: ceilometer-notification start on rdo7-node2 * Resource action: ceilometer-notification start on rdo7-node3 * Resource action: ceilometer-notification start on rdo7-node1 * Pseudo action: ceilometer-notification-clone_running_0 * Pseudo action: heat-api-clone_start_0 * Pseudo action: clone-one-or-more:order-ceilometer-notification-clone-ceilometer-compute-clone-mandatory * Resource action: neutron-l3-agent monitor=60000 on rdo7-node1 * Resource action: neutron-l3-agent monitor=60000 on rdo7-node2 * Resource action: neutron-l3-agent monitor=60000 on rdo7-node3 * Resource action: neutron-metadata-agent start on rdo7-node1 * Resource action: neutron-metadata-agent start on rdo7-node2 * Resource action: neutron-metadata-agent start on rdo7-node3 * Pseudo action: neutron-metadata-agent-clone_running_0 * Resource action: ceilometer-notification monitor=60000 on rdo7-node2 * Resource action: ceilometer-notification monitor=60000 on rdo7-node3 * Resource action: ceilometer-notification monitor=60000 on rdo7-node1 * Resource action: heat-api start on rdo7-node2 * Resource action: heat-api start on rdo7-node3 * Resource action: heat-api start on rdo7-node1 * Pseudo action: heat-api-clone_running_0 * Pseudo action: heat-api-cfn-clone_start_0 * Pseudo action: ceilometer-compute-clone_start_0 * Resource action: neutron-metadata-agent monitor=60000 on rdo7-node1 * Resource action: neutron-metadata-agent monitor=60000 on rdo7-node2 * Resource action: neutron-metadata-agent monitor=60000 on rdo7-node3 * Resource action: heat-api monitor=60000 on rdo7-node2 * Resource action: heat-api monitor=60000 on rdo7-node3 * Resource action: heat-api monitor=60000 on rdo7-node1 * Resource action: heat-api-cfn start on rdo7-node2 * Resource action: heat-api-cfn start on rdo7-node3 * Resource action: heat-api-cfn start on rdo7-node1 * Pseudo action: heat-api-cfn-clone_running_0 * Pseudo action: heat-api-cloudwatch-clone_start_0 * Resource action: ceilometer-compute start on mrg-07 * Resource action: ceilometer-compute start on mrg-08 * Resource action: ceilometer-compute start on mrg-09 * Pseudo action: ceilometer-compute-clone_running_0 * Pseudo action: nova-compute-clone_start_0 * Resource action: heat-api-cfn monitor=60000 on rdo7-node2 * Resource action: heat-api-cfn monitor=60000 on rdo7-node3 * Resource action: heat-api-cfn monitor=60000 on rdo7-node1 * Resource action: heat-api-cloudwatch start on rdo7-node2 * Resource action: heat-api-cloudwatch start on rdo7-node3 * Resource action: heat-api-cloudwatch start on rdo7-node1 * Pseudo action: heat-api-cloudwatch-clone_running_0 * Pseudo action: heat-engine-clone_start_0 * Resource action: ceilometer-compute monitor=60000 on mrg-07 * Resource action: ceilometer-compute monitor=60000 on mrg-08 * Resource action: ceilometer-compute monitor=60000 on mrg-09 * Resource action: nova-compute start on mrg-07 * Resource action: nova-compute start on mrg-08 * Resource action: nova-compute start on mrg-09 * Pseudo action: nova-compute-clone_running_0 * Resource action: heat-api-cloudwatch monitor=60000 on rdo7-node2 * Resource action: heat-api-cloudwatch monitor=60000 on rdo7-node3 * Resource action: heat-api-cloudwatch monitor=60000 on rdo7-node1 * Resource action: heat-engine start on rdo7-node2 * Resource action: heat-engine start on rdo7-node3 * Resource action: heat-engine start on rdo7-node1 * Pseudo action: heat-engine-clone_running_0 * Pseudo action: nova-compute-clone_post_notify_running_0 * Resource action: heat-engine monitor=60000 on rdo7-node2 * Resource action: heat-engine monitor=60000 on rdo7-node3 * Resource action: heat-engine monitor=60000 on rdo7-node1 * Resource action: nova-compute notify on mrg-07 * Resource action: nova-compute notify on mrg-08 * Resource action: nova-compute notify on mrg-09 * Pseudo action: nova-compute-clone_confirmed-post_notify_running_0 * Resource action: nova-compute monitor=10000 on mrg-07 * Resource action: nova-compute monitor=10000 on mrg-08 * Resource action: nova-compute monitor=10000 on mrg-09 Revised Cluster Status: * Node List: * Online: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * RemoteOnline: [ mrg-07 mrg-08 mrg-09 ] * Full List of Resources: * fence1 (stonith:fence_xvm): Started rdo7-node2 * fence2 (stonith:fence_xvm): Started rdo7-node1 * fence3 (stonith:fence_xvm): Started rdo7-node3 * Clone Set: lb-haproxy-clone [lb-haproxy]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * vip-db (ocf:heartbeat:IPaddr2): Started rdo7-node3 * vip-rabbitmq (ocf:heartbeat:IPaddr2): Started rdo7-node1 * vip-keystone (ocf:heartbeat:IPaddr2): Started rdo7-node2 * vip-glance (ocf:heartbeat:IPaddr2): Started rdo7-node3 * vip-cinder (ocf:heartbeat:IPaddr2): Started rdo7-node1 * vip-swift (ocf:heartbeat:IPaddr2): Started rdo7-node2 * vip-neutron (ocf:heartbeat:IPaddr2): Started rdo7-node2 * vip-nova (ocf:heartbeat:IPaddr2): Started rdo7-node1 * vip-horizon (ocf:heartbeat:IPaddr2): Started rdo7-node3 * vip-heat (ocf:heartbeat:IPaddr2): Started rdo7-node1 * vip-ceilometer (ocf:heartbeat:IPaddr2): Started rdo7-node2 * vip-qpid (ocf:heartbeat:IPaddr2): Started rdo7-node3 * vip-node (ocf:heartbeat:IPaddr2): Started rdo7-node1 * Clone Set: galera-master [galera] (promotable): - * Masters: [ rdo7-node1 rdo7-node2 rdo7-node3 ] + * Promoted: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: rabbitmq-server-clone [rabbitmq-server]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: memcached-clone [memcached]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: mongodb-clone [mongodb]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: keystone-clone [keystone]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: glance-fs-clone [glance-fs]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: glance-registry-clone [glance-registry]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: glance-api-clone [glance-api]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: cinder-api-clone [cinder-api]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: cinder-scheduler-clone [cinder-scheduler]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * cinder-volume (systemd:openstack-cinder-volume): Started rdo7-node2 * Clone Set: swift-fs-clone [swift-fs]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: swift-account-clone [swift-account]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: swift-container-clone [swift-container]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: swift-object-clone [swift-object]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: swift-proxy-clone [swift-proxy]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * swift-object-expirer (systemd:openstack-swift-object-expirer): Started rdo7-node3 * Clone Set: neutron-server-clone [neutron-server]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: neutron-scale-clone [neutron-scale] (unique): * neutron-scale:0 (ocf:neutron:NeutronScale): Started rdo7-node1 * neutron-scale:1 (ocf:neutron:NeutronScale): Started rdo7-node2 * neutron-scale:2 (ocf:neutron:NeutronScale): Started rdo7-node3 * Clone Set: neutron-ovs-cleanup-clone [neutron-ovs-cleanup]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: neutron-netns-cleanup-clone [neutron-netns-cleanup]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: neutron-openvswitch-agent-clone [neutron-openvswitch-agent]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: neutron-dhcp-agent-clone [neutron-dhcp-agent]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: neutron-l3-agent-clone [neutron-l3-agent]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: neutron-metadata-agent-clone [neutron-metadata-agent]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: nova-consoleauth-clone [nova-consoleauth]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: nova-novncproxy-clone [nova-novncproxy]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: nova-api-clone [nova-api]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: nova-scheduler-clone [nova-scheduler]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: nova-conductor-clone [nova-conductor]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: redis-master [redis] (promotable): - * Masters: [ rdo7-node1 ] - * Slaves: [ rdo7-node2 rdo7-node3 ] + * Promoted: [ rdo7-node1 ] + * Unpromoted: [ rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * vip-redis (ocf:heartbeat:IPaddr2): Started rdo7-node1 * Clone Set: ceilometer-central-clone [ceilometer-central]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: ceilometer-collector-clone [ceilometer-collector]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: ceilometer-api-clone [ceilometer-api]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: ceilometer-delay-clone [ceilometer-delay]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: ceilometer-alarm-evaluator-clone [ceilometer-alarm-evaluator]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: ceilometer-alarm-notifier-clone [ceilometer-alarm-notifier]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: ceilometer-notification-clone [ceilometer-notification]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: heat-api-clone [heat-api]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: heat-api-cfn-clone [heat-api-cfn]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: heat-api-cloudwatch-clone [heat-api-cloudwatch]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: heat-engine-clone [heat-engine]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: horizon-clone [horizon]: * Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Stopped: [ mrg-07 mrg-08 mrg-09 ] * Clone Set: neutron-openvswitch-agent-compute-clone [neutron-openvswitch-agent-compute]: * Started: [ mrg-07 mrg-08 mrg-09 ] * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: libvirtd-compute-clone [libvirtd-compute]: * Started: [ mrg-07 mrg-08 mrg-09 ] * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: ceilometer-compute-clone [ceilometer-compute]: * Started: [ mrg-07 mrg-08 mrg-09 ] * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * Clone Set: nova-compute-clone [nova-compute]: * Started: [ mrg-07 mrg-08 mrg-09 ] * Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ] * fence-nova (stonith:fence_compute): Started rdo7-node2 * fence-compute (stonith:fence_apc_snmp): Started rdo7-node3 * mrg-07 (ocf:pacemaker:remote): Started rdo7-node1 * mrg-08 (ocf:pacemaker:remote): Started rdo7-node2 * mrg-09 (ocf:pacemaker:remote): Started rdo7-node3 diff --git a/cts/scheduler/summary/order-expired-failure.summary b/cts/scheduler/summary/order-expired-failure.summary index 9c79d7123c..ca2e1d3062 100644 --- a/cts/scheduler/summary/order-expired-failure.summary +++ b/cts/scheduler/summary/order-expired-failure.summary @@ -1,112 +1,112 @@ Using the original execution date of: 2018-04-09 07:55:35Z Current cluster status: * Node List: * RemoteNode overcloud-novacompute-1: UNCLEAN (offline) * Online: [ controller-0 controller-1 controller-2 ] * RemoteOnline: [ overcloud-novacompute-0 ] * GuestOnline: [ galera-bundle-0@controller-2 galera-bundle-1@controller-0 galera-bundle-2@controller-1 rabbitmq-bundle-0@controller-2 rabbitmq-bundle-1@controller-0 rabbitmq-bundle-2@controller-1 redis-bundle-0@controller-2 redis-bundle-1@controller-0 redis-bundle-2@controller-1 ] * Full List of Resources: * overcloud-novacompute-0 (ocf:pacemaker:remote): Started controller-0 * overcloud-novacompute-1 (ocf:pacemaker:remote): FAILED controller-1 * Container bundle set: rabbitmq-bundle [192.168.24.1:8787/rhosp13/openstack-rabbitmq:pcmklatest]: * rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started controller-2 * rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Started controller-0 * rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Started controller-1 * Container bundle set: galera-bundle [192.168.24.1:8787/rhosp13/openstack-mariadb:pcmklatest]: - * galera-bundle-0 (ocf:heartbeat:galera): Master controller-2 - * galera-bundle-1 (ocf:heartbeat:galera): Master controller-0 - * galera-bundle-2 (ocf:heartbeat:galera): Master controller-1 + * galera-bundle-0 (ocf:heartbeat:galera): Promoted controller-2 + * galera-bundle-1 (ocf:heartbeat:galera): Promoted controller-0 + * galera-bundle-2 (ocf:heartbeat:galera): Promoted controller-1 * Container bundle set: redis-bundle [192.168.24.1:8787/rhosp13/openstack-redis:pcmklatest]: - * redis-bundle-0 (ocf:heartbeat:redis): Master controller-2 - * redis-bundle-1 (ocf:heartbeat:redis): Slave controller-0 - * redis-bundle-2 (ocf:heartbeat:redis): Slave controller-1 + * redis-bundle-0 (ocf:heartbeat:redis): Promoted controller-2 + * redis-bundle-1 (ocf:heartbeat:redis): Unpromoted controller-0 + * redis-bundle-2 (ocf:heartbeat:redis): Unpromoted controller-1 * ip-192.168.24.11 (ocf:heartbeat:IPaddr2): Started controller-2 * ip-10.0.0.110 (ocf:heartbeat:IPaddr2): Stopped * ip-172.17.1.14 (ocf:heartbeat:IPaddr2): Started controller-1 * ip-172.17.1.17 (ocf:heartbeat:IPaddr2): Started controller-2 * ip-172.17.3.11 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-172.17.4.17 (ocf:heartbeat:IPaddr2): Started controller-1 * Container bundle set: haproxy-bundle [192.168.24.1:8787/rhosp13/openstack-haproxy:pcmklatest]: * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started controller-2 * haproxy-bundle-docker-1 (ocf:heartbeat:docker): Started controller-0 * haproxy-bundle-docker-2 (ocf:heartbeat:docker): Started controller-1 * stonith-fence_compute-fence-nova (stonith:fence_compute): FAILED controller-2 * Clone Set: compute-unfence-trigger-clone [compute-unfence-trigger]: * compute-unfence-trigger (ocf:pacemaker:Dummy): Started overcloud-novacompute-1 (UNCLEAN) * Started: [ overcloud-novacompute-0 ] * Stopped: [ controller-0 controller-1 controller-2 ] * nova-evacuate (ocf:openstack:NovaEvacuate): Started controller-0 * stonith-fence_ipmilan-5254008be2cc (stonith:fence_ipmilan): Started controller-1 * stonith-fence_ipmilan-525400803f9e (stonith:fence_ipmilan): Started controller-0 * stonith-fence_ipmilan-525400fca120 (stonith:fence_ipmilan): Started controller-2 * stonith-fence_ipmilan-525400953d48 (stonith:fence_ipmilan): Started controller-2 * stonith-fence_ipmilan-525400b02b86 (stonith:fence_ipmilan): Started controller-1 * Container bundle: openstack-cinder-volume [192.168.24.1:8787/rhosp13/openstack-cinder-volume:pcmklatest]: * openstack-cinder-volume-docker-0 (ocf:heartbeat:docker): Started controller-0 Transition Summary: * Fence (reboot) overcloud-novacompute-1 'remote connection is unrecoverable' * Stop overcloud-novacompute-1 ( controller-1 ) due to node availability * Start ip-10.0.0.110 ( controller-1 ) * Recover stonith-fence_compute-fence-nova ( controller-2 ) * Stop compute-unfence-trigger:1 ( overcloud-novacompute-1 ) due to node availability Executing Cluster Transition: * Resource action: overcloud-novacompute-1 stop on controller-1 * Resource action: stonith-fence_compute-fence-nova stop on controller-2 * Fencing overcloud-novacompute-1 (reboot) * Cluster action: clear_failcount for overcloud-novacompute-1 on controller-1 * Resource action: ip-10.0.0.110 start on controller-1 * Resource action: stonith-fence_compute-fence-nova start on controller-2 * Resource action: stonith-fence_compute-fence-nova monitor=60000 on controller-2 * Pseudo action: compute-unfence-trigger-clone_stop_0 * Resource action: ip-10.0.0.110 monitor=10000 on controller-1 * Pseudo action: compute-unfence-trigger_stop_0 * Pseudo action: compute-unfence-trigger-clone_stopped_0 Using the original execution date of: 2018-04-09 07:55:35Z Revised Cluster Status: * Node List: * RemoteNode overcloud-novacompute-1: UNCLEAN (offline) * Online: [ controller-0 controller-1 controller-2 ] * RemoteOnline: [ overcloud-novacompute-0 ] * GuestOnline: [ galera-bundle-0@controller-2 galera-bundle-1@controller-0 galera-bundle-2@controller-1 rabbitmq-bundle-0@controller-2 rabbitmq-bundle-1@controller-0 rabbitmq-bundle-2@controller-1 redis-bundle-0@controller-2 redis-bundle-1@controller-0 redis-bundle-2@controller-1 ] * Full List of Resources: * overcloud-novacompute-0 (ocf:pacemaker:remote): Started controller-0 * overcloud-novacompute-1 (ocf:pacemaker:remote): FAILED * Container bundle set: rabbitmq-bundle [192.168.24.1:8787/rhosp13/openstack-rabbitmq:pcmklatest]: * rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started controller-2 * rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Started controller-0 * rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Started controller-1 * Container bundle set: galera-bundle [192.168.24.1:8787/rhosp13/openstack-mariadb:pcmklatest]: - * galera-bundle-0 (ocf:heartbeat:galera): Master controller-2 - * galera-bundle-1 (ocf:heartbeat:galera): Master controller-0 - * galera-bundle-2 (ocf:heartbeat:galera): Master controller-1 + * galera-bundle-0 (ocf:heartbeat:galera): Promoted controller-2 + * galera-bundle-1 (ocf:heartbeat:galera): Promoted controller-0 + * galera-bundle-2 (ocf:heartbeat:galera): Promoted controller-1 * Container bundle set: redis-bundle [192.168.24.1:8787/rhosp13/openstack-redis:pcmklatest]: - * redis-bundle-0 (ocf:heartbeat:redis): Master controller-2 - * redis-bundle-1 (ocf:heartbeat:redis): Slave controller-0 - * redis-bundle-2 (ocf:heartbeat:redis): Slave controller-1 + * redis-bundle-0 (ocf:heartbeat:redis): Promoted controller-2 + * redis-bundle-1 (ocf:heartbeat:redis): Unpromoted controller-0 + * redis-bundle-2 (ocf:heartbeat:redis): Unpromoted controller-1 * ip-192.168.24.11 (ocf:heartbeat:IPaddr2): Started controller-2 * ip-10.0.0.110 (ocf:heartbeat:IPaddr2): Started controller-1 * ip-172.17.1.14 (ocf:heartbeat:IPaddr2): Started controller-1 * ip-172.17.1.17 (ocf:heartbeat:IPaddr2): Started controller-2 * ip-172.17.3.11 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-172.17.4.17 (ocf:heartbeat:IPaddr2): Started controller-1 * Container bundle set: haproxy-bundle [192.168.24.1:8787/rhosp13/openstack-haproxy:pcmklatest]: * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started controller-2 * haproxy-bundle-docker-1 (ocf:heartbeat:docker): Started controller-0 * haproxy-bundle-docker-2 (ocf:heartbeat:docker): Started controller-1 * stonith-fence_compute-fence-nova (stonith:fence_compute): Started controller-2 * Clone Set: compute-unfence-trigger-clone [compute-unfence-trigger]: * Started: [ overcloud-novacompute-0 ] * Stopped: [ controller-0 controller-1 controller-2 overcloud-novacompute-1 ] * nova-evacuate (ocf:openstack:NovaEvacuate): Started controller-0 * stonith-fence_ipmilan-5254008be2cc (stonith:fence_ipmilan): Started controller-1 * stonith-fence_ipmilan-525400803f9e (stonith:fence_ipmilan): Started controller-0 * stonith-fence_ipmilan-525400fca120 (stonith:fence_ipmilan): Started controller-2 * stonith-fence_ipmilan-525400953d48 (stonith:fence_ipmilan): Started controller-2 * stonith-fence_ipmilan-525400b02b86 (stonith:fence_ipmilan): Started controller-1 * Container bundle: openstack-cinder-volume [192.168.24.1:8787/rhosp13/openstack-cinder-volume:pcmklatest]: * openstack-cinder-volume-docker-0 (ocf:heartbeat:docker): Started controller-0 diff --git a/cts/scheduler/summary/order_constraint_stops_master.summary b/cts/scheduler/summary/order_constraint_stops_master.summary index 332a04cad2..d0a3fc2f54 100644 --- a/cts/scheduler/summary/order_constraint_stops_master.summary +++ b/cts/scheduler/summary/order_constraint_stops_master.summary @@ -1,44 +1,44 @@ 1 of 2 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: * Node List: * Online: [ fc16-builder fc16-builder2 ] * Full List of Resources: * Clone Set: MASTER_RSC_A [NATIVE_RSC_A] (promotable): - * Masters: [ fc16-builder ] + * Promoted: [ fc16-builder ] * NATIVE_RSC_B (ocf:pacemaker:Dummy): Started fc16-builder2 (disabled) Transition Summary: - * Stop NATIVE_RSC_A:0 ( Master fc16-builder ) due to required NATIVE_RSC_B start + * Stop NATIVE_RSC_A:0 ( Promoted fc16-builder ) due to required NATIVE_RSC_B start * Stop NATIVE_RSC_B ( fc16-builder2 ) due to node availability Executing Cluster Transition: * Pseudo action: MASTER_RSC_A_pre_notify_demote_0 * Resource action: NATIVE_RSC_A:0 notify on fc16-builder * Pseudo action: MASTER_RSC_A_confirmed-pre_notify_demote_0 * Pseudo action: MASTER_RSC_A_demote_0 * Resource action: NATIVE_RSC_A:0 demote on fc16-builder * Pseudo action: MASTER_RSC_A_demoted_0 * Pseudo action: MASTER_RSC_A_post_notify_demoted_0 * Resource action: NATIVE_RSC_A:0 notify on fc16-builder * Pseudo action: MASTER_RSC_A_confirmed-post_notify_demoted_0 * Pseudo action: MASTER_RSC_A_pre_notify_stop_0 * Resource action: NATIVE_RSC_A:0 notify on fc16-builder * Pseudo action: MASTER_RSC_A_confirmed-pre_notify_stop_0 * Pseudo action: MASTER_RSC_A_stop_0 * Resource action: NATIVE_RSC_A:0 stop on fc16-builder * Resource action: NATIVE_RSC_A:0 delete on fc16-builder2 * Pseudo action: MASTER_RSC_A_stopped_0 * Pseudo action: MASTER_RSC_A_post_notify_stopped_0 * Pseudo action: MASTER_RSC_A_confirmed-post_notify_stopped_0 * Resource action: NATIVE_RSC_B stop on fc16-builder2 Revised Cluster Status: * Node List: * Online: [ fc16-builder fc16-builder2 ] * Full List of Resources: * Clone Set: MASTER_RSC_A [NATIVE_RSC_A] (promotable): * Stopped: [ fc16-builder fc16-builder2 ] * NATIVE_RSC_B (ocf:pacemaker:Dummy): Stopped (disabled) diff --git a/cts/scheduler/summary/order_constraint_stops_slave.summary b/cts/scheduler/summary/order_constraint_stops_slave.summary index b054cf4250..000500512d 100644 --- a/cts/scheduler/summary/order_constraint_stops_slave.summary +++ b/cts/scheduler/summary/order_constraint_stops_slave.summary @@ -1,36 +1,36 @@ 1 of 2 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: * Node List: * Online: [ fc16-builder ] * OFFLINE: [ fc16-builder2 ] * Full List of Resources: * Clone Set: MASTER_RSC_A [NATIVE_RSC_A] (promotable): - * Slaves: [ fc16-builder ] + * Unpromoted: [ fc16-builder ] * NATIVE_RSC_B (ocf:pacemaker:Dummy): Started fc16-builder (disabled) Transition Summary: - * Stop NATIVE_RSC_A:0 ( Slave fc16-builder ) due to required NATIVE_RSC_B start + * Stop NATIVE_RSC_A:0 ( Unpromoted fc16-builder ) due to required NATIVE_RSC_B start * Stop NATIVE_RSC_B ( fc16-builder ) due to node availability Executing Cluster Transition: * Pseudo action: MASTER_RSC_A_pre_notify_stop_0 * Resource action: NATIVE_RSC_A:0 notify on fc16-builder * Pseudo action: MASTER_RSC_A_confirmed-pre_notify_stop_0 * Pseudo action: MASTER_RSC_A_stop_0 * Resource action: NATIVE_RSC_A:0 stop on fc16-builder * Pseudo action: MASTER_RSC_A_stopped_0 * Pseudo action: MASTER_RSC_A_post_notify_stopped_0 * Pseudo action: MASTER_RSC_A_confirmed-post_notify_stopped_0 * Resource action: NATIVE_RSC_B stop on fc16-builder Revised Cluster Status: * Node List: * Online: [ fc16-builder ] * OFFLINE: [ fc16-builder2 ] * Full List of Resources: * Clone Set: MASTER_RSC_A [NATIVE_RSC_A] (promotable): * Stopped: [ fc16-builder fc16-builder2 ] * NATIVE_RSC_B (ocf:pacemaker:Dummy): Stopped (disabled) diff --git a/cts/scheduler/summary/probe-2.summary b/cts/scheduler/summary/probe-2.summary index 67cde5a651..f2c60821ab 100644 --- a/cts/scheduler/summary/probe-2.summary +++ b/cts/scheduler/summary/probe-2.summary @@ -1,163 +1,163 @@ Current cluster status: * Node List: * Node wc02: standby (with active resources) * Online: [ wc01 ] * Full List of Resources: * Resource Group: group_www_data: * fs_www_data (ocf:heartbeat:Filesystem): Started wc01 * nfs-kernel-server (lsb:nfs-kernel-server): Started wc01 * intip_nfs (ocf:heartbeat:IPaddr2): Started wc01 * Clone Set: ms_drbd_mysql [drbd_mysql] (promotable): - * Masters: [ wc02 ] - * Slaves: [ wc01 ] + * Promoted: [ wc02 ] + * Unpromoted: [ wc01 ] * Resource Group: group_mysql: * fs_mysql (ocf:heartbeat:Filesystem): Started wc02 * intip_sql (ocf:heartbeat:IPaddr2): Started wc02 * mysql-server (ocf:heartbeat:mysql): Started wc02 * Clone Set: ms_drbd_www [drbd_www] (promotable): - * Masters: [ wc01 ] - * Slaves: [ wc02 ] + * Promoted: [ wc01 ] + * Unpromoted: [ wc02 ] * Clone Set: clone_nfs-common [group_nfs-common]: * Started: [ wc01 wc02 ] * Clone Set: clone_mysql-proxy [group_mysql-proxy]: * Started: [ wc01 wc02 ] * Clone Set: clone_webservice [group_webservice]: * Started: [ wc01 wc02 ] * Resource Group: group_ftpd: * extip_ftp (ocf:heartbeat:IPaddr2): Started wc01 * pure-ftpd (ocf:heartbeat:Pure-FTPd): Started wc01 * Clone Set: DoFencing [stonith_rackpdu] (unique): * stonith_rackpdu:0 (stonith:external/rackpdu): Started wc01 * stonith_rackpdu:1 (stonith:external/rackpdu): Started wc02 Transition Summary: - * Promote drbd_mysql:0 ( Slave -> Master wc01 ) - * Stop drbd_mysql:1 ( Master wc02 ) due to node availability + * Promote drbd_mysql:0 ( Unpromoted -> Promoted wc01 ) + * Stop drbd_mysql:1 ( Promoted wc02 ) due to node availability * Move fs_mysql ( wc02 -> wc01 ) * Move intip_sql ( wc02 -> wc01 ) * Move mysql-server ( wc02 -> wc01 ) - * Stop drbd_www:1 ( Slave wc02 ) due to node availability + * Stop drbd_www:1 ( Unpromoted wc02 ) due to node availability * Stop nfs-common:1 ( wc02 ) due to node availability * Stop mysql-proxy:1 ( wc02 ) due to node availability * Stop fs_www:1 ( wc02 ) due to node availability * Stop apache2:1 ( wc02 ) due to node availability * Restart stonith_rackpdu:0 ( wc01 ) * Stop stonith_rackpdu:1 ( wc02 ) due to node availability Executing Cluster Transition: * Resource action: drbd_mysql:0 cancel=10000 on wc01 * Pseudo action: ms_drbd_mysql_pre_notify_demote_0 * Pseudo action: group_mysql_stop_0 * Resource action: mysql-server stop on wc02 * Pseudo action: ms_drbd_www_pre_notify_stop_0 * Pseudo action: clone_mysql-proxy_stop_0 * Pseudo action: clone_webservice_stop_0 * Pseudo action: DoFencing_stop_0 * Resource action: drbd_mysql:0 notify on wc01 * Resource action: drbd_mysql:1 notify on wc02 * Pseudo action: ms_drbd_mysql_confirmed-pre_notify_demote_0 * Resource action: intip_sql stop on wc02 * Resource action: drbd_www:0 notify on wc01 * Resource action: drbd_www:1 notify on wc02 * Pseudo action: ms_drbd_www_confirmed-pre_notify_stop_0 * Pseudo action: ms_drbd_www_stop_0 * Pseudo action: group_mysql-proxy:1_stop_0 * Resource action: mysql-proxy:1 stop on wc02 * Pseudo action: group_webservice:1_stop_0 * Resource action: apache2:1 stop on wc02 * Resource action: stonith_rackpdu:0 stop on wc01 * Resource action: stonith_rackpdu:1 stop on wc02 * Pseudo action: DoFencing_stopped_0 * Pseudo action: DoFencing_start_0 * Resource action: fs_mysql stop on wc02 * Resource action: drbd_www:1 stop on wc02 * Pseudo action: ms_drbd_www_stopped_0 * Pseudo action: group_mysql-proxy:1_stopped_0 * Pseudo action: clone_mysql-proxy_stopped_0 * Resource action: fs_www:1 stop on wc02 * Resource action: stonith_rackpdu:0 start on wc01 * Pseudo action: DoFencing_running_0 * Pseudo action: group_mysql_stopped_0 * Pseudo action: ms_drbd_www_post_notify_stopped_0 * Pseudo action: group_webservice:1_stopped_0 * Pseudo action: clone_webservice_stopped_0 * Resource action: stonith_rackpdu:0 monitor=5000 on wc01 * Pseudo action: ms_drbd_mysql_demote_0 * Resource action: drbd_www:0 notify on wc01 * Pseudo action: ms_drbd_www_confirmed-post_notify_stopped_0 * Pseudo action: clone_nfs-common_stop_0 * Resource action: drbd_mysql:1 demote on wc02 * Pseudo action: ms_drbd_mysql_demoted_0 * Pseudo action: group_nfs-common:1_stop_0 * Resource action: nfs-common:1 stop on wc02 * Pseudo action: ms_drbd_mysql_post_notify_demoted_0 * Pseudo action: group_nfs-common:1_stopped_0 * Pseudo action: clone_nfs-common_stopped_0 * Resource action: drbd_mysql:0 notify on wc01 * Resource action: drbd_mysql:1 notify on wc02 * Pseudo action: ms_drbd_mysql_confirmed-post_notify_demoted_0 * Pseudo action: ms_drbd_mysql_pre_notify_stop_0 * Resource action: drbd_mysql:0 notify on wc01 * Resource action: drbd_mysql:1 notify on wc02 * Pseudo action: ms_drbd_mysql_confirmed-pre_notify_stop_0 * Pseudo action: ms_drbd_mysql_stop_0 * Resource action: drbd_mysql:1 stop on wc02 * Pseudo action: ms_drbd_mysql_stopped_0 * Pseudo action: ms_drbd_mysql_post_notify_stopped_0 * Resource action: drbd_mysql:0 notify on wc01 * Pseudo action: ms_drbd_mysql_confirmed-post_notify_stopped_0 * Pseudo action: ms_drbd_mysql_pre_notify_promote_0 * Resource action: drbd_mysql:0 notify on wc01 * Pseudo action: ms_drbd_mysql_confirmed-pre_notify_promote_0 * Pseudo action: ms_drbd_mysql_promote_0 * Resource action: drbd_mysql:0 promote on wc01 * Pseudo action: ms_drbd_mysql_promoted_0 * Pseudo action: ms_drbd_mysql_post_notify_promoted_0 * Resource action: drbd_mysql:0 notify on wc01 * Pseudo action: ms_drbd_mysql_confirmed-post_notify_promoted_0 * Pseudo action: group_mysql_start_0 * Resource action: fs_mysql start on wc01 * Resource action: intip_sql start on wc01 * Resource action: mysql-server start on wc01 * Resource action: drbd_mysql:0 monitor=5000 on wc01 * Pseudo action: group_mysql_running_0 * Resource action: fs_mysql monitor=30000 on wc01 * Resource action: intip_sql monitor=30000 on wc01 * Resource action: mysql-server monitor=30000 on wc01 Revised Cluster Status: * Node List: * Node wc02: standby * Online: [ wc01 ] * Full List of Resources: * Resource Group: group_www_data: * fs_www_data (ocf:heartbeat:Filesystem): Started wc01 * nfs-kernel-server (lsb:nfs-kernel-server): Started wc01 * intip_nfs (ocf:heartbeat:IPaddr2): Started wc01 * Clone Set: ms_drbd_mysql [drbd_mysql] (promotable): - * Masters: [ wc01 ] + * Promoted: [ wc01 ] * Stopped: [ wc02 ] * Resource Group: group_mysql: * fs_mysql (ocf:heartbeat:Filesystem): Started wc01 * intip_sql (ocf:heartbeat:IPaddr2): Started wc01 * mysql-server (ocf:heartbeat:mysql): Started wc01 * Clone Set: ms_drbd_www [drbd_www] (promotable): - * Masters: [ wc01 ] + * Promoted: [ wc01 ] * Stopped: [ wc02 ] * Clone Set: clone_nfs-common [group_nfs-common]: * Started: [ wc01 ] * Stopped: [ wc02 ] * Clone Set: clone_mysql-proxy [group_mysql-proxy]: * Started: [ wc01 ] * Stopped: [ wc02 ] * Clone Set: clone_webservice [group_webservice]: * Started: [ wc01 ] * Stopped: [ wc02 ] * Resource Group: group_ftpd: * extip_ftp (ocf:heartbeat:IPaddr2): Started wc01 * pure-ftpd (ocf:heartbeat:Pure-FTPd): Started wc01 * Clone Set: DoFencing [stonith_rackpdu] (unique): * stonith_rackpdu:0 (stonith:external/rackpdu): Started wc01 * stonith_rackpdu:1 (stonith:external/rackpdu): Stopped diff --git a/cts/scheduler/summary/probe-3.summary b/cts/scheduler/summary/probe-3.summary index 47024e3afd..929fb4d7c8 100644 --- a/cts/scheduler/summary/probe-3.summary +++ b/cts/scheduler/summary/probe-3.summary @@ -1,57 +1,57 @@ Current cluster status: * Node List: * Node pcmk-4: pending * Online: [ pcmk-1 pcmk-2 pcmk-3 ] * Full List of Resources: * Resource Group: group-1: * r192.168.101.181 (ocf:heartbeat:IPaddr): Started pcmk-1 * r192.168.101.182 (ocf:heartbeat:IPaddr): Started pcmk-1 * r192.168.101.183 (ocf:heartbeat:IPaddr): Started pcmk-1 * rsc_pcmk-1 (ocf:heartbeat:IPaddr): Started pcmk-1 * rsc_pcmk-2 (ocf:heartbeat:IPaddr): Started pcmk-2 * rsc_pcmk-3 (ocf:heartbeat:IPaddr): Started pcmk-3 * rsc_pcmk-4 (ocf:heartbeat:IPaddr): Started pcmk-2 * lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started pcmk-1 * migrator (ocf:pacemaker:Dummy): Started pcmk-3 * Clone Set: Connectivity [ping-1]: * Started: [ pcmk-1 pcmk-2 pcmk-3 ] * Stopped: [ pcmk-4 ] * Clone Set: master-1 [stateful-1] (promotable): - * Masters: [ pcmk-1 ] - * Slaves: [ pcmk-2 pcmk-3 ] + * Promoted: [ pcmk-1 ] + * Unpromoted: [ pcmk-2 pcmk-3 ] * Stopped: [ pcmk-4 ] * Clone Set: Fencing [FencingChild]: * Started: [ pcmk-1 pcmk-2 pcmk-3 ] * Stopped: [ pcmk-4 ] Transition Summary: Executing Cluster Transition: Revised Cluster Status: * Node List: * Node pcmk-4: pending * Online: [ pcmk-1 pcmk-2 pcmk-3 ] * Full List of Resources: * Resource Group: group-1: * r192.168.101.181 (ocf:heartbeat:IPaddr): Started pcmk-1 * r192.168.101.182 (ocf:heartbeat:IPaddr): Started pcmk-1 * r192.168.101.183 (ocf:heartbeat:IPaddr): Started pcmk-1 * rsc_pcmk-1 (ocf:heartbeat:IPaddr): Started pcmk-1 * rsc_pcmk-2 (ocf:heartbeat:IPaddr): Started pcmk-2 * rsc_pcmk-3 (ocf:heartbeat:IPaddr): Started pcmk-3 * rsc_pcmk-4 (ocf:heartbeat:IPaddr): Started pcmk-2 * lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started pcmk-1 * migrator (ocf:pacemaker:Dummy): Started pcmk-3 * Clone Set: Connectivity [ping-1]: * Started: [ pcmk-1 pcmk-2 pcmk-3 ] * Stopped: [ pcmk-4 ] * Clone Set: master-1 [stateful-1] (promotable): - * Masters: [ pcmk-1 ] - * Slaves: [ pcmk-2 pcmk-3 ] + * Promoted: [ pcmk-1 ] + * Unpromoted: [ pcmk-2 pcmk-3 ] * Stopped: [ pcmk-4 ] * Clone Set: Fencing [FencingChild]: * Started: [ pcmk-1 pcmk-2 pcmk-3 ] * Stopped: [ pcmk-4 ] diff --git a/cts/scheduler/summary/probe-4.summary b/cts/scheduler/summary/probe-4.summary index 4fa7bfc5b6..99005e966b 100644 --- a/cts/scheduler/summary/probe-4.summary +++ b/cts/scheduler/summary/probe-4.summary @@ -1,58 +1,58 @@ Current cluster status: * Node List: * Node pcmk-4: pending * Online: [ pcmk-1 pcmk-2 pcmk-3 ] * Full List of Resources: * Resource Group: group-1: * r192.168.101.181 (ocf:heartbeat:IPaddr): Started pcmk-1 * r192.168.101.182 (ocf:heartbeat:IPaddr): Started pcmk-1 * r192.168.101.183 (ocf:heartbeat:IPaddr): Started pcmk-1 * rsc_pcmk-1 (ocf:heartbeat:IPaddr): Started pcmk-1 * rsc_pcmk-2 (ocf:heartbeat:IPaddr): Started pcmk-2 * rsc_pcmk-3 (ocf:heartbeat:IPaddr): Started pcmk-3 * rsc_pcmk-4 (ocf:heartbeat:IPaddr): Started pcmk-2 * lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started pcmk-1 * migrator (ocf:pacemaker:Dummy): Stopped * Clone Set: Connectivity [ping-1]: * Started: [ pcmk-1 pcmk-2 pcmk-3 ] * Stopped: [ pcmk-4 ] * Clone Set: master-1 [stateful-1] (promotable): - * Masters: [ pcmk-1 ] - * Slaves: [ pcmk-2 pcmk-3 ] + * Promoted: [ pcmk-1 ] + * Unpromoted: [ pcmk-2 pcmk-3 ] * Stopped: [ pcmk-4 ] * Clone Set: Fencing [FencingChild]: * Started: [ pcmk-1 pcmk-2 pcmk-3 ] * Stopped: [ pcmk-4 ] Transition Summary: * Start migrator ( pcmk-3 ) blocked Executing Cluster Transition: Revised Cluster Status: * Node List: * Node pcmk-4: pending * Online: [ pcmk-1 pcmk-2 pcmk-3 ] * Full List of Resources: * Resource Group: group-1: * r192.168.101.181 (ocf:heartbeat:IPaddr): Started pcmk-1 * r192.168.101.182 (ocf:heartbeat:IPaddr): Started pcmk-1 * r192.168.101.183 (ocf:heartbeat:IPaddr): Started pcmk-1 * rsc_pcmk-1 (ocf:heartbeat:IPaddr): Started pcmk-1 * rsc_pcmk-2 (ocf:heartbeat:IPaddr): Started pcmk-2 * rsc_pcmk-3 (ocf:heartbeat:IPaddr): Started pcmk-3 * rsc_pcmk-4 (ocf:heartbeat:IPaddr): Started pcmk-2 * lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started pcmk-1 * migrator (ocf:pacemaker:Dummy): Stopped * Clone Set: Connectivity [ping-1]: * Started: [ pcmk-1 pcmk-2 pcmk-3 ] * Stopped: [ pcmk-4 ] * Clone Set: master-1 [stateful-1] (promotable): - * Masters: [ pcmk-1 ] - * Slaves: [ pcmk-2 pcmk-3 ] + * Promoted: [ pcmk-1 ] + * Unpromoted: [ pcmk-2 pcmk-3 ] * Stopped: [ pcmk-4 ] * Clone Set: Fencing [FencingChild]: * Started: [ pcmk-1 pcmk-2 pcmk-3 ] * Stopped: [ pcmk-4 ] diff --git a/cts/scheduler/summary/rec-node-13.summary b/cts/scheduler/summary/rec-node-13.summary index 578c337a84..68210542c3 100644 --- a/cts/scheduler/summary/rec-node-13.summary +++ b/cts/scheduler/summary/rec-node-13.summary @@ -1,80 +1,80 @@ Current cluster status: * Node List: * Node c001n04: UNCLEAN (online) * Online: [ c001n02 c001n06 c001n07 ] * OFFLINE: [ c001n03 c001n05 ] * Full List of Resources: * Clone Set: DoFencing [child_DoFencing]: * Started: [ c001n02 c001n06 c001n07 ] * Stopped: [ c001n03 c001n04 c001n05 ] * DcIPaddr (ocf:heartbeat:IPaddr): Stopped * Resource Group: group-1: * ocf_192.168.100.181 (ocf:heartbeat:IPaddr): Started c001n02 * heartbeat_192.168.100.182 (ocf:heartbeat:IPaddr): Started c001n02 * ocf_192.168.100.183 (ocf:heartbeat:IPaddr): Started c001n02 * lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n06 * rsc_c001n05 (ocf:heartbeat:IPaddr): Started c001n07 * rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n06 * rsc_c001n04 (ocf:heartbeat:IPaddr): Started c001n07 * rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02 * rsc_c001n07 (ocf:heartbeat:IPaddr): Started c001n07 * rsc_c001n06 (ocf:heartbeat:IPaddr): Started c001n06 * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique): - * ocf_msdummy:0 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Master c001n02 + * ocf_msdummy:0 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Promoted c001n02 * ocf_msdummy:1 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped * ocf_msdummy:2 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped * ocf_msdummy:3 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped - * ocf_msdummy:4 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 + * ocf_msdummy:4 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02 * ocf_msdummy:5 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped * ocf_msdummy:6 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): FAILED c001n04 * ocf_msdummy:7 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped - * ocf_msdummy:8 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n06 - * ocf_msdummy:9 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n07 - * ocf_msdummy:10 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n06 - * ocf_msdummy:11 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n07 + * ocf_msdummy:8 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n06 + * ocf_msdummy:9 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n07 + * ocf_msdummy:10 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n06 + * ocf_msdummy:11 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n07 Transition Summary: * Fence (reboot) c001n04 'ocf_msdummy:6 failed there' - * Stop ocf_msdummy:6 ( Slave c001n04 ) due to node availability + * Stop ocf_msdummy:6 ( Unpromoted c001n04 ) due to node availability Executing Cluster Transition: * Fencing c001n04 (reboot) * Pseudo action: master_rsc_1_stop_0 * Pseudo action: ocf_msdummy:6_stop_0 * Pseudo action: master_rsc_1_stopped_0 Revised Cluster Status: * Node List: * Online: [ c001n02 c001n06 c001n07 ] * OFFLINE: [ c001n03 c001n04 c001n05 ] * Full List of Resources: * Clone Set: DoFencing [child_DoFencing]: * Started: [ c001n02 c001n06 c001n07 ] * Stopped: [ c001n03 c001n04 c001n05 ] * DcIPaddr (ocf:heartbeat:IPaddr): Stopped * Resource Group: group-1: * ocf_192.168.100.181 (ocf:heartbeat:IPaddr): Started c001n02 * heartbeat_192.168.100.182 (ocf:heartbeat:IPaddr): Started c001n02 * ocf_192.168.100.183 (ocf:heartbeat:IPaddr): Started c001n02 * lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n06 * rsc_c001n05 (ocf:heartbeat:IPaddr): Started c001n07 * rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n06 * rsc_c001n04 (ocf:heartbeat:IPaddr): Started c001n07 * rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02 * rsc_c001n07 (ocf:heartbeat:IPaddr): Started c001n07 * rsc_c001n06 (ocf:heartbeat:IPaddr): Started c001n06 * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique): - * ocf_msdummy:0 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Master c001n02 + * ocf_msdummy:0 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Promoted c001n02 * ocf_msdummy:1 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped * ocf_msdummy:2 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped * ocf_msdummy:3 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped - * ocf_msdummy:4 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 + * ocf_msdummy:4 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02 * ocf_msdummy:5 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped * ocf_msdummy:6 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped * ocf_msdummy:7 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped - * ocf_msdummy:8 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n06 - * ocf_msdummy:9 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n07 - * ocf_msdummy:10 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n06 - * ocf_msdummy:11 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n07 + * ocf_msdummy:8 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n06 + * ocf_msdummy:9 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n07 + * ocf_msdummy:10 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n06 + * ocf_msdummy:11 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n07 diff --git a/cts/scheduler/summary/remote-connection-unrecoverable.summary b/cts/scheduler/summary/remote-connection-unrecoverable.summary index 6f248b894f..bd1adfcfa4 100644 --- a/cts/scheduler/summary/remote-connection-unrecoverable.summary +++ b/cts/scheduler/summary/remote-connection-unrecoverable.summary @@ -1,54 +1,54 @@ Current cluster status: * Node List: * Node node1: UNCLEAN (offline) * Online: [ node2 ] * RemoteOnline: [ remote1 ] * Full List of Resources: * remote1 (ocf:pacemaker:remote): Started node1 (UNCLEAN) * killer (stonith:fence_xvm): Started node2 * rsc1 (ocf:pacemaker:Dummy): Started remote1 * Clone Set: rsc2-master [rsc2] (promotable): - * rsc2 (ocf:pacemaker:Stateful): Master node1 (UNCLEAN) - * Masters: [ node2 ] + * rsc2 (ocf:pacemaker:Stateful): Promoted node1 (UNCLEAN) + * Promoted: [ node2 ] * Stopped: [ remote1 ] Transition Summary: * Fence (reboot) remote1 'resources are active and the connection is unrecoverable' * Fence (reboot) node1 'peer is no longer part of the cluster' * Stop remote1 ( node1 ) due to node availability * Restart killer ( node2 ) due to resource definition change * Move rsc1 ( remote1 -> node2 ) - * Stop rsc2:0 ( Master node1 ) due to node availability + * Stop rsc2:0 ( Promoted node1 ) due to node availability Executing Cluster Transition: * Pseudo action: remote1_stop_0 * Resource action: killer stop on node2 * Resource action: rsc1 monitor on node2 * Fencing node1 (reboot) * Fencing remote1 (reboot) * Resource action: killer start on node2 * Resource action: killer monitor=60000 on node2 * Pseudo action: rsc1_stop_0 * Pseudo action: rsc2-master_demote_0 * Resource action: rsc1 start on node2 * Pseudo action: rsc2_demote_0 * Pseudo action: rsc2-master_demoted_0 * Pseudo action: rsc2-master_stop_0 * Resource action: rsc1 monitor=10000 on node2 * Pseudo action: rsc2_stop_0 * Pseudo action: rsc2-master_stopped_0 Revised Cluster Status: * Node List: * Online: [ node2 ] * OFFLINE: [ node1 ] * RemoteOFFLINE: [ remote1 ] * Full List of Resources: * remote1 (ocf:pacemaker:remote): Stopped * killer (stonith:fence_xvm): Started node2 * rsc1 (ocf:pacemaker:Dummy): Started node2 * Clone Set: rsc2-master [rsc2] (promotable): - * Masters: [ node2 ] + * Promoted: [ node2 ] * Stopped: [ node1 remote1 ] diff --git a/cts/scheduler/summary/remote-fence-unclean-3.summary b/cts/scheduler/summary/remote-fence-unclean-3.summary index 6c2606f19b..296ae937b7 100644 --- a/cts/scheduler/summary/remote-fence-unclean-3.summary +++ b/cts/scheduler/summary/remote-fence-unclean-3.summary @@ -1,103 +1,103 @@ Current cluster status: * Node List: * Online: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * RemoteOFFLINE: [ overcloud-novacompute-0 ] * GuestOnline: [ galera-bundle-0@overcloud-controller-0 galera-bundle-1@overcloud-controller-1 galera-bundle-2@overcloud-controller-2 rabbitmq-bundle-0@overcloud-controller-0 rabbitmq-bundle-1@overcloud-controller-1 rabbitmq-bundle-2@overcloud-controller-2 redis-bundle-0@overcloud-controller-0 redis-bundle-1@overcloud-controller-1 redis-bundle-2@overcloud-controller-2 ] * Full List of Resources: * fence1 (stonith:fence_xvm): Stopped * overcloud-novacompute-0 (ocf:pacemaker:remote): FAILED overcloud-controller-0 * Container bundle set: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest]: * rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started overcloud-controller-0 * rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Started overcloud-controller-1 * rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Started overcloud-controller-2 * Container bundle set: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]: - * galera-bundle-0 (ocf:heartbeat:galera): Master overcloud-controller-0 - * galera-bundle-1 (ocf:heartbeat:galera): Master overcloud-controller-1 - * galera-bundle-2 (ocf:heartbeat:galera): Master overcloud-controller-2 + * galera-bundle-0 (ocf:heartbeat:galera): Promoted overcloud-controller-0 + * galera-bundle-1 (ocf:heartbeat:galera): Promoted overcloud-controller-1 + * galera-bundle-2 (ocf:heartbeat:galera): Promoted overcloud-controller-2 * Container bundle set: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]: - * redis-bundle-0 (ocf:heartbeat:redis): Master overcloud-controller-0 - * redis-bundle-1 (ocf:heartbeat:redis): Slave overcloud-controller-1 - * redis-bundle-2 (ocf:heartbeat:redis): Slave overcloud-controller-2 + * redis-bundle-0 (ocf:heartbeat:redis): Promoted overcloud-controller-0 + * redis-bundle-1 (ocf:heartbeat:redis): Unpromoted overcloud-controller-1 + * redis-bundle-2 (ocf:heartbeat:redis): Unpromoted overcloud-controller-2 * ip-192.168.24.9 (ocf:heartbeat:IPaddr2): Started overcloud-controller-0 * ip-10.0.0.7 (ocf:heartbeat:IPaddr2): Started overcloud-controller-1 * ip-172.16.2.4 (ocf:heartbeat:IPaddr2): Started overcloud-controller-2 * ip-172.16.2.8 (ocf:heartbeat:IPaddr2): Started overcloud-controller-0 * ip-172.16.1.9 (ocf:heartbeat:IPaddr2): Started overcloud-controller-1 * ip-172.16.3.9 (ocf:heartbeat:IPaddr2): Started overcloud-controller-2 * Container bundle set: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]: * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started overcloud-controller-0 * haproxy-bundle-docker-1 (ocf:heartbeat:docker): Started overcloud-controller-1 * haproxy-bundle-docker-2 (ocf:heartbeat:docker): Started overcloud-controller-2 * Container bundle: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest]: * openstack-cinder-volume-docker-0 (ocf:heartbeat:docker): Started overcloud-controller-0 * Container bundle: openstack-cinder-backup [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-backup:latest]: * openstack-cinder-backup-docker-0 (ocf:heartbeat:docker): Started overcloud-controller-1 Transition Summary: * Fence (reboot) overcloud-novacompute-0 'the connection is unrecoverable' * Start fence1 ( overcloud-controller-0 ) * Stop overcloud-novacompute-0 ( overcloud-controller-0 ) due to node availability Executing Cluster Transition: * Resource action: fence1 monitor on overcloud-controller-2 * Resource action: fence1 monitor on overcloud-controller-1 * Resource action: fence1 monitor on overcloud-controller-0 * Resource action: overcloud-novacompute-0 stop on overcloud-controller-0 * Resource action: rabbitmq-bundle-0 monitor on overcloud-controller-2 * Resource action: rabbitmq-bundle-0 monitor on overcloud-controller-1 * Resource action: rabbitmq-bundle-1 monitor on overcloud-controller-2 * Resource action: rabbitmq-bundle-1 monitor on overcloud-controller-0 * Resource action: rabbitmq-bundle-2 monitor on overcloud-controller-1 * Resource action: rabbitmq-bundle-2 monitor on overcloud-controller-0 * Resource action: galera-bundle-0 monitor on overcloud-controller-2 * Resource action: galera-bundle-0 monitor on overcloud-controller-1 * Resource action: galera-bundle-1 monitor on overcloud-controller-2 * Resource action: galera-bundle-1 monitor on overcloud-controller-0 * Resource action: galera-bundle-2 monitor on overcloud-controller-1 * Resource action: galera-bundle-2 monitor on overcloud-controller-0 * Resource action: redis-bundle-0 monitor on overcloud-controller-2 * Resource action: redis-bundle-0 monitor on overcloud-controller-1 * Resource action: redis-bundle-1 monitor on overcloud-controller-2 * Resource action: redis-bundle-1 monitor on overcloud-controller-0 * Resource action: redis-bundle-2 monitor on overcloud-controller-1 * Resource action: redis-bundle-2 monitor on overcloud-controller-0 * Fencing overcloud-novacompute-0 (reboot) * Resource action: fence1 start on overcloud-controller-0 * Resource action: fence1 monitor=60000 on overcloud-controller-0 Revised Cluster Status: * Node List: * Online: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * RemoteOFFLINE: [ overcloud-novacompute-0 ] * GuestOnline: [ galera-bundle-0@overcloud-controller-0 galera-bundle-1@overcloud-controller-1 galera-bundle-2@overcloud-controller-2 rabbitmq-bundle-0@overcloud-controller-0 rabbitmq-bundle-1@overcloud-controller-1 rabbitmq-bundle-2@overcloud-controller-2 redis-bundle-0@overcloud-controller-0 redis-bundle-1@overcloud-controller-1 redis-bundle-2@overcloud-controller-2 ] * Full List of Resources: * fence1 (stonith:fence_xvm): Started overcloud-controller-0 * overcloud-novacompute-0 (ocf:pacemaker:remote): Stopped * Container bundle set: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest]: * rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started overcloud-controller-0 * rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Started overcloud-controller-1 * rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Started overcloud-controller-2 * Container bundle set: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]: - * galera-bundle-0 (ocf:heartbeat:galera): Master overcloud-controller-0 - * galera-bundle-1 (ocf:heartbeat:galera): Master overcloud-controller-1 - * galera-bundle-2 (ocf:heartbeat:galera): Master overcloud-controller-2 + * galera-bundle-0 (ocf:heartbeat:galera): Promoted overcloud-controller-0 + * galera-bundle-1 (ocf:heartbeat:galera): Promoted overcloud-controller-1 + * galera-bundle-2 (ocf:heartbeat:galera): Promoted overcloud-controller-2 * Container bundle set: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]: - * redis-bundle-0 (ocf:heartbeat:redis): Master overcloud-controller-0 - * redis-bundle-1 (ocf:heartbeat:redis): Slave overcloud-controller-1 - * redis-bundle-2 (ocf:heartbeat:redis): Slave overcloud-controller-2 + * redis-bundle-0 (ocf:heartbeat:redis): Promoted overcloud-controller-0 + * redis-bundle-1 (ocf:heartbeat:redis): Unpromoted overcloud-controller-1 + * redis-bundle-2 (ocf:heartbeat:redis): Unpromoted overcloud-controller-2 * ip-192.168.24.9 (ocf:heartbeat:IPaddr2): Started overcloud-controller-0 * ip-10.0.0.7 (ocf:heartbeat:IPaddr2): Started overcloud-controller-1 * ip-172.16.2.4 (ocf:heartbeat:IPaddr2): Started overcloud-controller-2 * ip-172.16.2.8 (ocf:heartbeat:IPaddr2): Started overcloud-controller-0 * ip-172.16.1.9 (ocf:heartbeat:IPaddr2): Started overcloud-controller-1 * ip-172.16.3.9 (ocf:heartbeat:IPaddr2): Started overcloud-controller-2 * Container bundle set: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]: * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started overcloud-controller-0 * haproxy-bundle-docker-1 (ocf:heartbeat:docker): Started overcloud-controller-1 * haproxy-bundle-docker-2 (ocf:heartbeat:docker): Started overcloud-controller-2 * Container bundle: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest]: * openstack-cinder-volume-docker-0 (ocf:heartbeat:docker): Started overcloud-controller-0 * Container bundle: openstack-cinder-backup [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-backup:latest]: * openstack-cinder-backup-docker-0 (ocf:heartbeat:docker): Started overcloud-controller-1 diff --git a/cts/scheduler/summary/remote-orphaned.summary b/cts/scheduler/summary/remote-orphaned.summary index 01f9605bf7..4b5ed6f587 100644 --- a/cts/scheduler/summary/remote-orphaned.summary +++ b/cts/scheduler/summary/remote-orphaned.summary @@ -1,69 +1,69 @@ Current cluster status: * Node List: * Online: [ 18node1 18node3 ] * OFFLINE: [ 18node2 ] * RemoteOnline: [ remote1 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started 18node3 * FencingPass (stonith:fence_dummy): Started 18node1 * FencingFail (stonith:fence_dummy): Started 18node3 * rsc_18node1 (ocf:heartbeat:IPaddr2): Started 18node1 * rsc_18node2 (ocf:heartbeat:IPaddr2): Started remote1 * rsc_18node3 (ocf:heartbeat:IPaddr2): Started 18node3 * migrator (ocf:pacemaker:Dummy): Started 18node1 * Clone Set: Connectivity [ping-1]: * Started: [ 18node1 18node3 remote1 ] * Clone Set: master-1 [stateful-1] (promotable): - * Masters: [ 18node1 ] - * Slaves: [ 18node3 ] + * Promoted: [ 18node1 ] + * Unpromoted: [ 18node3 ] * Stopped: [ 18node2 ] * Resource Group: group-1: * r192.168.122.87 (ocf:heartbeat:IPaddr2): Started 18node1 * r192.168.122.88 (ocf:heartbeat:IPaddr2): Started 18node1 * r192.168.122.89 (ocf:heartbeat:IPaddr2): Started 18node1 * lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started 18node1 * remote1 (ocf:pacemaker:remote): ORPHANED Started 18node1 Transition Summary: * Move rsc_18node2 ( remote1 -> 18node1 ) * Stop ping-1:2 ( remote1 ) due to node availability * Stop remote1 ( 18node1 ) due to node availability Executing Cluster Transition: * Resource action: rsc_18node2 stop on remote1 * Pseudo action: Connectivity_stop_0 * Resource action: rsc_18node2 start on 18node1 * Resource action: ping-1 stop on remote1 * Pseudo action: Connectivity_stopped_0 * Resource action: remote1 stop on 18node1 * Resource action: remote1 delete on 18node3 * Resource action: remote1 delete on 18node1 * Resource action: rsc_18node2 monitor=5000 on 18node1 Revised Cluster Status: * Node List: * Online: [ 18node1 18node3 ] * OFFLINE: [ 18node2 ] * RemoteOFFLINE: [ remote1 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started 18node3 * FencingPass (stonith:fence_dummy): Started 18node1 * FencingFail (stonith:fence_dummy): Started 18node3 * rsc_18node1 (ocf:heartbeat:IPaddr2): Started 18node1 * rsc_18node2 (ocf:heartbeat:IPaddr2): Started 18node1 * rsc_18node3 (ocf:heartbeat:IPaddr2): Started 18node3 * migrator (ocf:pacemaker:Dummy): Started 18node1 * Clone Set: Connectivity [ping-1]: * Started: [ 18node1 18node3 ] * Stopped: [ 18node2 ] * Clone Set: master-1 [stateful-1] (promotable): - * Masters: [ 18node1 ] - * Slaves: [ 18node3 ] + * Promoted: [ 18node1 ] + * Unpromoted: [ 18node3 ] * Stopped: [ 18node2 ] * Resource Group: group-1: * r192.168.122.87 (ocf:heartbeat:IPaddr2): Started 18node1 * r192.168.122.88 (ocf:heartbeat:IPaddr2): Started 18node1 * r192.168.122.89 (ocf:heartbeat:IPaddr2): Started 18node1 * lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started 18node1 diff --git a/cts/scheduler/summary/remote-reconnect-delay.summary b/cts/scheduler/summary/remote-reconnect-delay.summary index bf8c9184f3..f195919986 100644 --- a/cts/scheduler/summary/remote-reconnect-delay.summary +++ b/cts/scheduler/summary/remote-reconnect-delay.summary @@ -1,67 +1,67 @@ Using the original execution date of: 2017-08-21 17:12:54Z Current cluster status: * Node List: * Online: [ rhel7-1 rhel7-2 rhel7-4 rhel7-5 ] * RemoteOFFLINE: [ remote-rhel7-3 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started rhel7-2 * FencingFail (stonith:fence_dummy): Started rhel7-4 * rsc_rhel7-1 (ocf:heartbeat:IPaddr2): Started rhel7-1 * rsc_rhel7-2 (ocf:heartbeat:IPaddr2): Started rhel7-2 * rsc_rhel7-3 (ocf:heartbeat:IPaddr2): Started rhel7-5 * rsc_rhel7-4 (ocf:heartbeat:IPaddr2): Started rhel7-4 * rsc_rhel7-5 (ocf:heartbeat:IPaddr2): Started rhel7-5 * migrator (ocf:pacemaker:Dummy): Started rhel7-5 * Clone Set: Connectivity [ping-1]: * Started: [ rhel7-1 rhel7-2 rhel7-4 rhel7-5 ] * Stopped: [ remote-rhel7-3 ] * Clone Set: master-1 [stateful-1] (promotable): - * Masters: [ rhel7-2 ] - * Slaves: [ rhel7-1 rhel7-4 rhel7-5 ] + * Promoted: [ rhel7-2 ] + * Unpromoted: [ rhel7-1 rhel7-4 rhel7-5 ] * Stopped: [ remote-rhel7-3 ] * Resource Group: group-1: * r192.168.122.207 (ocf:heartbeat:IPaddr2): Started rhel7-2 * petulant (service:DummySD): Started rhel7-2 * r192.168.122.208 (ocf:heartbeat:IPaddr2): Started rhel7-2 * lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started rhel7-2 * remote-rhel7-3 (ocf:pacemaker:remote): FAILED * remote-rsc (ocf:heartbeat:Dummy): Started rhel7-1 Transition Summary: * Restart Fencing ( rhel7-2 ) due to resource definition change Executing Cluster Transition: * Resource action: Fencing stop on rhel7-2 * Resource action: Fencing start on rhel7-2 * Resource action: Fencing monitor=120000 on rhel7-2 Using the original execution date of: 2017-08-21 17:12:54Z Revised Cluster Status: * Node List: * Online: [ rhel7-1 rhel7-2 rhel7-4 rhel7-5 ] * RemoteOFFLINE: [ remote-rhel7-3 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started rhel7-2 * FencingFail (stonith:fence_dummy): Started rhel7-4 * rsc_rhel7-1 (ocf:heartbeat:IPaddr2): Started rhel7-1 * rsc_rhel7-2 (ocf:heartbeat:IPaddr2): Started rhel7-2 * rsc_rhel7-3 (ocf:heartbeat:IPaddr2): Started rhel7-5 * rsc_rhel7-4 (ocf:heartbeat:IPaddr2): Started rhel7-4 * rsc_rhel7-5 (ocf:heartbeat:IPaddr2): Started rhel7-5 * migrator (ocf:pacemaker:Dummy): Started rhel7-5 * Clone Set: Connectivity [ping-1]: * Started: [ rhel7-1 rhel7-2 rhel7-4 rhel7-5 ] * Stopped: [ remote-rhel7-3 ] * Clone Set: master-1 [stateful-1] (promotable): - * Masters: [ rhel7-2 ] - * Slaves: [ rhel7-1 rhel7-4 rhel7-5 ] + * Promoted: [ rhel7-2 ] + * Unpromoted: [ rhel7-1 rhel7-4 rhel7-5 ] * Stopped: [ remote-rhel7-3 ] * Resource Group: group-1: * r192.168.122.207 (ocf:heartbeat:IPaddr2): Started rhel7-2 * petulant (service:DummySD): Started rhel7-2 * r192.168.122.208 (ocf:heartbeat:IPaddr2): Started rhel7-2 * lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started rhel7-2 * remote-rhel7-3 (ocf:pacemaker:remote): FAILED * remote-rsc (ocf:heartbeat:Dummy): Started rhel7-1 diff --git a/cts/scheduler/summary/remote-recover-all.summary b/cts/scheduler/summary/remote-recover-all.summary index 4768139eca..176c1de8b3 100644 --- a/cts/scheduler/summary/remote-recover-all.summary +++ b/cts/scheduler/summary/remote-recover-all.summary @@ -1,146 +1,146 @@ Using the original execution date of: 2017-05-03 13:33:24Z Current cluster status: * Node List: * Node controller-1: UNCLEAN (offline) * Online: [ controller-0 controller-2 ] * RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * Full List of Resources: * messaging-0 (ocf:pacemaker:remote): Started controller-0 * messaging-1 (ocf:pacemaker:remote): Started controller-1 (UNCLEAN) * messaging-2 (ocf:pacemaker:remote): Started controller-0 * galera-0 (ocf:pacemaker:remote): Started controller-1 (UNCLEAN) * galera-1 (ocf:pacemaker:remote): Started controller-0 * galera-2 (ocf:pacemaker:remote): Started controller-1 (UNCLEAN) * Clone Set: rabbitmq-clone [rabbitmq]: * Started: [ messaging-0 messaging-1 messaging-2 ] * Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ] * Clone Set: galera-master [galera] (promotable): - * Masters: [ galera-0 galera-1 galera-2 ] + * Promoted: [ galera-0 galera-1 galera-2 ] * Stopped: [ controller-0 controller-1 controller-2 messaging-0 messaging-1 messaging-2 ] * Clone Set: redis-master [redis] (promotable): - * redis (ocf:heartbeat:redis): Slave controller-1 (UNCLEAN) - * Masters: [ controller-0 ] - * Slaves: [ controller-2 ] + * redis (ocf:heartbeat:redis): Unpromoted controller-1 (UNCLEAN) + * Promoted: [ controller-0 ] + * Unpromoted: [ controller-2 ] * Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * ip-192.168.24.6 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-10.0.0.102 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-172.17.1.14 (ocf:heartbeat:IPaddr2): Started controller-1 (UNCLEAN) * ip-172.17.1.17 (ocf:heartbeat:IPaddr2): Started controller-1 (UNCLEAN) * ip-172.17.3.15 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-172.17.4.11 (ocf:heartbeat:IPaddr2): Started controller-1 (UNCLEAN) * Clone Set: haproxy-clone [haproxy]: * haproxy (systemd:haproxy): Started controller-1 (UNCLEAN) * Started: [ controller-0 controller-2 ] * Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 * stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 * stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 * stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-1 (UNCLEAN) Transition Summary: * Fence (reboot) messaging-1 'resources are active and the connection is unrecoverable' * Fence (reboot) galera-2 'resources are active and the connection is unrecoverable' * Fence (reboot) controller-1 'peer is no longer part of the cluster' * Stop messaging-1 ( controller-1 ) due to node availability * Move galera-0 ( controller-1 -> controller-2 ) * Stop galera-2 ( controller-1 ) due to node availability * Stop rabbitmq:2 ( messaging-1 ) due to node availability - * Stop galera:1 ( Master galera-2 ) due to node availability - * Stop redis:0 ( Slave controller-1 ) due to node availability + * Stop galera:1 ( Promoted galera-2 ) due to node availability + * Stop redis:0 ( Unpromoted controller-1 ) due to node availability * Move ip-172.17.1.14 ( controller-1 -> controller-2 ) * Move ip-172.17.1.17 ( controller-1 -> controller-2 ) * Move ip-172.17.4.11 ( controller-1 -> controller-2 ) * Stop haproxy:0 ( controller-1 ) due to node availability * Move stonith-fence_ipmilan-5254005bdbb5 ( controller-1 -> controller-2 ) Executing Cluster Transition: * Pseudo action: messaging-1_stop_0 * Pseudo action: galera-0_stop_0 * Pseudo action: galera-2_stop_0 * Pseudo action: galera-master_demote_0 * Pseudo action: redis-master_pre_notify_stop_0 * Pseudo action: stonith-fence_ipmilan-5254005bdbb5_stop_0 * Fencing controller-1 (reboot) * Pseudo action: redis_post_notify_stop_0 * Resource action: redis notify on controller-0 * Resource action: redis notify on controller-2 * Pseudo action: redis-master_confirmed-pre_notify_stop_0 * Pseudo action: redis-master_stop_0 * Pseudo action: haproxy-clone_stop_0 * Fencing galera-2 (reboot) * Pseudo action: galera_demote_0 * Pseudo action: galera-master_demoted_0 * Pseudo action: galera-master_stop_0 * Pseudo action: redis_stop_0 * Pseudo action: redis-master_stopped_0 * Pseudo action: haproxy_stop_0 * Pseudo action: haproxy-clone_stopped_0 * Fencing messaging-1 (reboot) * Resource action: galera-0 start on controller-2 * Pseudo action: rabbitmq_post_notify_stop_0 * Pseudo action: rabbitmq-clone_stop_0 * Pseudo action: galera_stop_0 * Resource action: galera monitor=10000 on galera-0 * Pseudo action: galera-master_stopped_0 * Pseudo action: redis-master_post_notify_stopped_0 * Pseudo action: ip-172.17.1.14_stop_0 * Pseudo action: ip-172.17.1.17_stop_0 * Pseudo action: ip-172.17.4.11_stop_0 * Resource action: stonith-fence_ipmilan-5254005bdbb5 start on controller-2 * Resource action: galera-0 monitor=20000 on controller-2 * Resource action: rabbitmq notify on messaging-2 * Resource action: rabbitmq notify on messaging-0 * Pseudo action: rabbitmq_notified_0 * Pseudo action: rabbitmq_stop_0 * Pseudo action: rabbitmq-clone_stopped_0 * Resource action: redis notify on controller-0 * Resource action: redis notify on controller-2 * Pseudo action: redis-master_confirmed-post_notify_stopped_0 * Resource action: ip-172.17.1.14 start on controller-2 * Resource action: ip-172.17.1.17 start on controller-2 * Resource action: ip-172.17.4.11 start on controller-2 * Resource action: stonith-fence_ipmilan-5254005bdbb5 monitor=60000 on controller-2 * Pseudo action: redis_notified_0 * Resource action: ip-172.17.1.14 monitor=10000 on controller-2 * Resource action: ip-172.17.1.17 monitor=10000 on controller-2 * Resource action: ip-172.17.4.11 monitor=10000 on controller-2 Using the original execution date of: 2017-05-03 13:33:24Z Revised Cluster Status: * Node List: * Online: [ controller-0 controller-2 ] * OFFLINE: [ controller-1 ] * RemoteOnline: [ galera-0 galera-1 messaging-0 messaging-2 ] * RemoteOFFLINE: [ galera-2 messaging-1 ] * Full List of Resources: * messaging-0 (ocf:pacemaker:remote): Started controller-0 * messaging-1 (ocf:pacemaker:remote): Stopped * messaging-2 (ocf:pacemaker:remote): Started controller-0 * galera-0 (ocf:pacemaker:remote): Started controller-2 * galera-1 (ocf:pacemaker:remote): Started controller-0 * galera-2 (ocf:pacemaker:remote): Stopped * Clone Set: rabbitmq-clone [rabbitmq]: * Started: [ messaging-0 messaging-2 ] * Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 messaging-1 ] * Clone Set: galera-master [galera] (promotable): - * Masters: [ galera-0 galera-1 ] + * Promoted: [ galera-0 galera-1 ] * Stopped: [ controller-0 controller-1 controller-2 galera-2 messaging-0 messaging-1 messaging-2 ] * Clone Set: redis-master [redis] (promotable): - * Masters: [ controller-0 ] - * Slaves: [ controller-2 ] + * Promoted: [ controller-0 ] + * Unpromoted: [ controller-2 ] * Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * ip-192.168.24.6 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-10.0.0.102 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-172.17.1.14 (ocf:heartbeat:IPaddr2): Started controller-2 * ip-172.17.1.17 (ocf:heartbeat:IPaddr2): Started controller-2 * ip-172.17.3.15 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-172.17.4.11 (ocf:heartbeat:IPaddr2): Started controller-2 * Clone Set: haproxy-clone [haproxy]: * Started: [ controller-0 controller-2 ] * Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 * stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 * stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 * stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-2 diff --git a/cts/scheduler/summary/remote-recover-connection.summary b/cts/scheduler/summary/remote-recover-connection.summary index 0662a0bbfe..fd6900dd96 100644 --- a/cts/scheduler/summary/remote-recover-connection.summary +++ b/cts/scheduler/summary/remote-recover-connection.summary @@ -1,132 +1,132 @@ Using the original execution date of: 2017-05-03 13:33:24Z Current cluster status: * Node List: * Node controller-1: UNCLEAN (offline) * Online: [ controller-0 controller-2 ] * RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * Full List of Resources: * messaging-0 (ocf:pacemaker:remote): Started controller-0 * messaging-1 (ocf:pacemaker:remote): Started controller-1 (UNCLEAN) * messaging-2 (ocf:pacemaker:remote): Started controller-0 * galera-0 (ocf:pacemaker:remote): Started controller-1 (UNCLEAN) * galera-1 (ocf:pacemaker:remote): Started controller-0 * galera-2 (ocf:pacemaker:remote): Started controller-1 (UNCLEAN) * Clone Set: rabbitmq-clone [rabbitmq]: * Started: [ messaging-0 messaging-1 messaging-2 ] * Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ] * Clone Set: galera-master [galera] (promotable): - * Masters: [ galera-0 galera-1 galera-2 ] + * Promoted: [ galera-0 galera-1 galera-2 ] * Stopped: [ controller-0 controller-1 controller-2 messaging-0 messaging-1 messaging-2 ] * Clone Set: redis-master [redis] (promotable): - * redis (ocf:heartbeat:redis): Slave controller-1 (UNCLEAN) - * Masters: [ controller-0 ] - * Slaves: [ controller-2 ] + * redis (ocf:heartbeat:redis): Unpromoted controller-1 (UNCLEAN) + * Promoted: [ controller-0 ] + * Unpromoted: [ controller-2 ] * Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * ip-192.168.24.6 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-10.0.0.102 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-172.17.1.14 (ocf:heartbeat:IPaddr2): Started controller-1 (UNCLEAN) * ip-172.17.1.17 (ocf:heartbeat:IPaddr2): Started controller-1 (UNCLEAN) * ip-172.17.3.15 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-172.17.4.11 (ocf:heartbeat:IPaddr2): Started controller-1 (UNCLEAN) * Clone Set: haproxy-clone [haproxy]: * haproxy (systemd:haproxy): Started controller-1 (UNCLEAN) * Started: [ controller-0 controller-2 ] * Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 * stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 * stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 * stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-1 (UNCLEAN) Transition Summary: * Fence (reboot) controller-1 'peer is no longer part of the cluster' * Move messaging-1 ( controller-1 -> controller-2 ) * Move galera-0 ( controller-1 -> controller-2 ) * Move galera-2 ( controller-1 -> controller-2 ) - * Stop redis:0 ( Slave controller-1 ) due to node availability + * Stop redis:0 ( Unpromoted controller-1 ) due to node availability * Move ip-172.17.1.14 ( controller-1 -> controller-2 ) * Move ip-172.17.1.17 ( controller-1 -> controller-2 ) * Move ip-172.17.4.11 ( controller-1 -> controller-2 ) * Stop haproxy:0 ( controller-1 ) due to node availability * Move stonith-fence_ipmilan-5254005bdbb5 ( controller-1 -> controller-2 ) Executing Cluster Transition: * Pseudo action: messaging-1_stop_0 * Pseudo action: galera-0_stop_0 * Pseudo action: galera-2_stop_0 * Pseudo action: redis-master_pre_notify_stop_0 * Pseudo action: stonith-fence_ipmilan-5254005bdbb5_stop_0 * Fencing controller-1 (reboot) * Resource action: messaging-1 start on controller-2 * Resource action: galera-0 start on controller-2 * Resource action: galera-2 start on controller-2 * Resource action: rabbitmq monitor=10000 on messaging-1 * Resource action: galera monitor=10000 on galera-2 * Resource action: galera monitor=10000 on galera-0 * Pseudo action: redis_post_notify_stop_0 * Resource action: redis notify on controller-0 * Resource action: redis notify on controller-2 * Pseudo action: redis-master_confirmed-pre_notify_stop_0 * Pseudo action: redis-master_stop_0 * Pseudo action: haproxy-clone_stop_0 * Resource action: stonith-fence_ipmilan-5254005bdbb5 start on controller-2 * Resource action: messaging-1 monitor=20000 on controller-2 * Resource action: galera-0 monitor=20000 on controller-2 * Resource action: galera-2 monitor=20000 on controller-2 * Pseudo action: redis_stop_0 * Pseudo action: redis-master_stopped_0 * Pseudo action: haproxy_stop_0 * Pseudo action: haproxy-clone_stopped_0 * Resource action: stonith-fence_ipmilan-5254005bdbb5 monitor=60000 on controller-2 * Pseudo action: redis-master_post_notify_stopped_0 * Pseudo action: ip-172.17.1.14_stop_0 * Pseudo action: ip-172.17.1.17_stop_0 * Pseudo action: ip-172.17.4.11_stop_0 * Resource action: redis notify on controller-0 * Resource action: redis notify on controller-2 * Pseudo action: redis-master_confirmed-post_notify_stopped_0 * Resource action: ip-172.17.1.14 start on controller-2 * Resource action: ip-172.17.1.17 start on controller-2 * Resource action: ip-172.17.4.11 start on controller-2 * Pseudo action: redis_notified_0 * Resource action: ip-172.17.1.14 monitor=10000 on controller-2 * Resource action: ip-172.17.1.17 monitor=10000 on controller-2 * Resource action: ip-172.17.4.11 monitor=10000 on controller-2 Using the original execution date of: 2017-05-03 13:33:24Z Revised Cluster Status: * Node List: * Online: [ controller-0 controller-2 ] * OFFLINE: [ controller-1 ] * RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * Full List of Resources: * messaging-0 (ocf:pacemaker:remote): Started controller-0 * messaging-1 (ocf:pacemaker:remote): Started controller-2 * messaging-2 (ocf:pacemaker:remote): Started controller-0 * galera-0 (ocf:pacemaker:remote): Started controller-2 * galera-1 (ocf:pacemaker:remote): Started controller-0 * galera-2 (ocf:pacemaker:remote): Started controller-2 * Clone Set: rabbitmq-clone [rabbitmq]: * Started: [ messaging-0 messaging-1 messaging-2 ] * Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ] * Clone Set: galera-master [galera] (promotable): - * Masters: [ galera-0 galera-1 galera-2 ] + * Promoted: [ galera-0 galera-1 galera-2 ] * Stopped: [ controller-0 controller-1 controller-2 messaging-0 messaging-1 messaging-2 ] * Clone Set: redis-master [redis] (promotable): - * Masters: [ controller-0 ] - * Slaves: [ controller-2 ] + * Promoted: [ controller-0 ] + * Unpromoted: [ controller-2 ] * Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * ip-192.168.24.6 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-10.0.0.102 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-172.17.1.14 (ocf:heartbeat:IPaddr2): Started controller-2 * ip-172.17.1.17 (ocf:heartbeat:IPaddr2): Started controller-2 * ip-172.17.3.15 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-172.17.4.11 (ocf:heartbeat:IPaddr2): Started controller-2 * Clone Set: haproxy-clone [haproxy]: * Started: [ controller-0 controller-2 ] * Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 * stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 * stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 * stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-2 diff --git a/cts/scheduler/summary/remote-recover-no-resources.summary b/cts/scheduler/summary/remote-recover-no-resources.summary index 6067d16334..332d1c4123 100644 --- a/cts/scheduler/summary/remote-recover-no-resources.summary +++ b/cts/scheduler/summary/remote-recover-no-resources.summary @@ -1,137 +1,137 @@ Using the original execution date of: 2017-05-03 13:33:24Z Current cluster status: * Node List: * Node controller-1: UNCLEAN (offline) * Online: [ controller-0 controller-2 ] * RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * Full List of Resources: * messaging-0 (ocf:pacemaker:remote): Started controller-0 * messaging-1 (ocf:pacemaker:remote): Started controller-1 (UNCLEAN) * messaging-2 (ocf:pacemaker:remote): Started controller-0 * galera-0 (ocf:pacemaker:remote): Started controller-1 (UNCLEAN) * galera-1 (ocf:pacemaker:remote): Started controller-0 * galera-2 (ocf:pacemaker:remote): Started controller-1 (UNCLEAN) * Clone Set: rabbitmq-clone [rabbitmq]: * Started: [ messaging-0 messaging-1 messaging-2 ] * Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ] * Clone Set: galera-master [galera] (promotable): - * Masters: [ galera-0 galera-1 ] + * Promoted: [ galera-0 galera-1 ] * Stopped: [ controller-0 controller-1 controller-2 galera-2 messaging-0 messaging-1 messaging-2 ] * Clone Set: redis-master [redis] (promotable): - * redis (ocf:heartbeat:redis): Slave controller-1 (UNCLEAN) - * Masters: [ controller-0 ] - * Slaves: [ controller-2 ] + * redis (ocf:heartbeat:redis): Unpromoted controller-1 (UNCLEAN) + * Promoted: [ controller-0 ] + * Unpromoted: [ controller-2 ] * Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * ip-192.168.24.6 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-10.0.0.102 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-172.17.1.14 (ocf:heartbeat:IPaddr2): Started controller-1 (UNCLEAN) * ip-172.17.1.17 (ocf:heartbeat:IPaddr2): Started controller-1 (UNCLEAN) * ip-172.17.3.15 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-172.17.4.11 (ocf:heartbeat:IPaddr2): Started controller-1 (UNCLEAN) * Clone Set: haproxy-clone [haproxy]: * haproxy (systemd:haproxy): Started controller-1 (UNCLEAN) * Started: [ controller-0 controller-2 ] * Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 * stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 * stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 * stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-1 (UNCLEAN) Transition Summary: * Fence (reboot) messaging-1 'resources are active and the connection is unrecoverable' * Fence (reboot) controller-1 'peer is no longer part of the cluster' * Stop messaging-1 ( controller-1 ) due to node availability * Move galera-0 ( controller-1 -> controller-2 ) * Stop galera-2 ( controller-1 ) due to node availability * Stop rabbitmq:2 ( messaging-1 ) due to node availability - * Stop redis:0 ( Slave controller-1 ) due to node availability + * Stop redis:0 ( Unpromoted controller-1 ) due to node availability * Move ip-172.17.1.14 ( controller-1 -> controller-2 ) * Move ip-172.17.1.17 ( controller-1 -> controller-2 ) * Move ip-172.17.4.11 ( controller-1 -> controller-2 ) * Stop haproxy:0 ( controller-1 ) due to node availability * Move stonith-fence_ipmilan-5254005bdbb5 ( controller-1 -> controller-2 ) Executing Cluster Transition: * Pseudo action: messaging-1_stop_0 * Pseudo action: galera-0_stop_0 * Pseudo action: galera-2_stop_0 * Pseudo action: redis-master_pre_notify_stop_0 * Pseudo action: stonith-fence_ipmilan-5254005bdbb5_stop_0 * Fencing controller-1 (reboot) * Pseudo action: redis_post_notify_stop_0 * Resource action: redis notify on controller-0 * Resource action: redis notify on controller-2 * Pseudo action: redis-master_confirmed-pre_notify_stop_0 * Pseudo action: redis-master_stop_0 * Pseudo action: haproxy-clone_stop_0 * Fencing messaging-1 (reboot) * Resource action: galera-0 start on controller-2 * Pseudo action: rabbitmq_post_notify_stop_0 * Pseudo action: rabbitmq-clone_stop_0 * Resource action: galera monitor=10000 on galera-0 * Pseudo action: redis_stop_0 * Pseudo action: redis-master_stopped_0 * Pseudo action: haproxy_stop_0 * Pseudo action: haproxy-clone_stopped_0 * Resource action: stonith-fence_ipmilan-5254005bdbb5 start on controller-2 * Resource action: galera-0 monitor=20000 on controller-2 * Resource action: rabbitmq notify on messaging-2 * Resource action: rabbitmq notify on messaging-0 * Pseudo action: rabbitmq_notified_0 * Pseudo action: rabbitmq_stop_0 * Pseudo action: rabbitmq-clone_stopped_0 * Pseudo action: redis-master_post_notify_stopped_0 * Pseudo action: ip-172.17.1.14_stop_0 * Pseudo action: ip-172.17.1.17_stop_0 * Pseudo action: ip-172.17.4.11_stop_0 * Resource action: stonith-fence_ipmilan-5254005bdbb5 monitor=60000 on controller-2 * Resource action: redis notify on controller-0 * Resource action: redis notify on controller-2 * Pseudo action: redis-master_confirmed-post_notify_stopped_0 * Resource action: ip-172.17.1.14 start on controller-2 * Resource action: ip-172.17.1.17 start on controller-2 * Resource action: ip-172.17.4.11 start on controller-2 * Pseudo action: redis_notified_0 * Resource action: ip-172.17.1.14 monitor=10000 on controller-2 * Resource action: ip-172.17.1.17 monitor=10000 on controller-2 * Resource action: ip-172.17.4.11 monitor=10000 on controller-2 Using the original execution date of: 2017-05-03 13:33:24Z Revised Cluster Status: * Node List: * Online: [ controller-0 controller-2 ] * OFFLINE: [ controller-1 ] * RemoteOnline: [ galera-0 galera-1 messaging-0 messaging-2 ] * RemoteOFFLINE: [ galera-2 messaging-1 ] * Full List of Resources: * messaging-0 (ocf:pacemaker:remote): Started controller-0 * messaging-1 (ocf:pacemaker:remote): Stopped * messaging-2 (ocf:pacemaker:remote): Started controller-0 * galera-0 (ocf:pacemaker:remote): Started controller-2 * galera-1 (ocf:pacemaker:remote): Started controller-0 * galera-2 (ocf:pacemaker:remote): Stopped * Clone Set: rabbitmq-clone [rabbitmq]: * Started: [ messaging-0 messaging-2 ] * Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 messaging-1 ] * Clone Set: galera-master [galera] (promotable): - * Masters: [ galera-0 galera-1 ] + * Promoted: [ galera-0 galera-1 ] * Stopped: [ controller-0 controller-1 controller-2 galera-2 messaging-0 messaging-1 messaging-2 ] * Clone Set: redis-master [redis] (promotable): - * Masters: [ controller-0 ] - * Slaves: [ controller-2 ] + * Promoted: [ controller-0 ] + * Unpromoted: [ controller-2 ] * Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * ip-192.168.24.6 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-10.0.0.102 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-172.17.1.14 (ocf:heartbeat:IPaddr2): Started controller-2 * ip-172.17.1.17 (ocf:heartbeat:IPaddr2): Started controller-2 * ip-172.17.3.15 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-172.17.4.11 (ocf:heartbeat:IPaddr2): Started controller-2 * Clone Set: haproxy-clone [haproxy]: * Started: [ controller-0 controller-2 ] * Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 * stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 * stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 * stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-2 diff --git a/cts/scheduler/summary/remote-recover-unknown.summary b/cts/scheduler/summary/remote-recover-unknown.summary index f10395f6f8..ac5143a16e 100644 --- a/cts/scheduler/summary/remote-recover-unknown.summary +++ b/cts/scheduler/summary/remote-recover-unknown.summary @@ -1,139 +1,139 @@ Using the original execution date of: 2017-05-03 13:33:24Z Current cluster status: * Node List: * Node controller-1: UNCLEAN (offline) * Online: [ controller-0 controller-2 ] * RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * Full List of Resources: * messaging-0 (ocf:pacemaker:remote): Started controller-0 * messaging-1 (ocf:pacemaker:remote): Started controller-1 (UNCLEAN) * messaging-2 (ocf:pacemaker:remote): Started controller-0 * galera-0 (ocf:pacemaker:remote): Started controller-1 (UNCLEAN) * galera-1 (ocf:pacemaker:remote): Started controller-0 * galera-2 (ocf:pacemaker:remote): Started controller-1 (UNCLEAN) * Clone Set: rabbitmq-clone [rabbitmq]: * Started: [ messaging-0 messaging-1 messaging-2 ] * Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ] * Clone Set: galera-master [galera] (promotable): - * Masters: [ galera-0 galera-1 ] + * Promoted: [ galera-0 galera-1 ] * Stopped: [ controller-0 controller-1 controller-2 galera-2 messaging-0 messaging-1 messaging-2 ] * Clone Set: redis-master [redis] (promotable): - * redis (ocf:heartbeat:redis): Slave controller-1 (UNCLEAN) - * Masters: [ controller-0 ] - * Slaves: [ controller-2 ] + * redis (ocf:heartbeat:redis): Unpromoted controller-1 (UNCLEAN) + * Promoted: [ controller-0 ] + * Unpromoted: [ controller-2 ] * Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * ip-192.168.24.6 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-10.0.0.102 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-172.17.1.14 (ocf:heartbeat:IPaddr2): Started controller-1 (UNCLEAN) * ip-172.17.1.17 (ocf:heartbeat:IPaddr2): Started controller-1 (UNCLEAN) * ip-172.17.3.15 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-172.17.4.11 (ocf:heartbeat:IPaddr2): Started controller-1 (UNCLEAN) * Clone Set: haproxy-clone [haproxy]: * haproxy (systemd:haproxy): Started controller-1 (UNCLEAN) * Started: [ controller-0 controller-2 ] * Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 * stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 * stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 * stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-1 (UNCLEAN) Transition Summary: * Fence (reboot) galera-2 'resources are in an unknown state and the connection is unrecoverable' * Fence (reboot) messaging-1 'resources are active and the connection is unrecoverable' * Fence (reboot) controller-1 'peer is no longer part of the cluster' * Stop messaging-1 ( controller-1 ) due to node availability * Move galera-0 ( controller-1 -> controller-2 ) * Stop galera-2 ( controller-1 ) due to node availability * Stop rabbitmq:2 ( messaging-1 ) due to node availability - * Stop redis:0 ( Slave controller-1 ) due to node availability + * Stop redis:0 ( Unpromoted controller-1 ) due to node availability * Move ip-172.17.1.14 ( controller-1 -> controller-2 ) * Move ip-172.17.1.17 ( controller-1 -> controller-2 ) * Move ip-172.17.4.11 ( controller-1 -> controller-2 ) * Stop haproxy:0 ( controller-1 ) due to node availability * Move stonith-fence_ipmilan-5254005bdbb5 ( controller-1 -> controller-2 ) Executing Cluster Transition: * Pseudo action: messaging-1_stop_0 * Pseudo action: galera-0_stop_0 * Pseudo action: galera-2_stop_0 * Pseudo action: redis-master_pre_notify_stop_0 * Pseudo action: stonith-fence_ipmilan-5254005bdbb5_stop_0 * Fencing controller-1 (reboot) * Pseudo action: redis_post_notify_stop_0 * Resource action: redis notify on controller-0 * Resource action: redis notify on controller-2 * Pseudo action: redis-master_confirmed-pre_notify_stop_0 * Pseudo action: redis-master_stop_0 * Pseudo action: haproxy-clone_stop_0 * Fencing galera-2 (reboot) * Fencing messaging-1 (reboot) * Resource action: galera-0 start on controller-2 * Pseudo action: rabbitmq_post_notify_stop_0 * Pseudo action: rabbitmq-clone_stop_0 * Resource action: galera monitor=10000 on galera-0 * Pseudo action: redis_stop_0 * Pseudo action: redis-master_stopped_0 * Pseudo action: haproxy_stop_0 * Pseudo action: haproxy-clone_stopped_0 * Resource action: stonith-fence_ipmilan-5254005bdbb5 start on controller-2 * Resource action: galera-0 monitor=20000 on controller-2 * Resource action: rabbitmq notify on messaging-2 * Resource action: rabbitmq notify on messaging-0 * Pseudo action: rabbitmq_notified_0 * Pseudo action: rabbitmq_stop_0 * Pseudo action: rabbitmq-clone_stopped_0 * Pseudo action: redis-master_post_notify_stopped_0 * Pseudo action: ip-172.17.1.14_stop_0 * Pseudo action: ip-172.17.1.17_stop_0 * Pseudo action: ip-172.17.4.11_stop_0 * Resource action: stonith-fence_ipmilan-5254005bdbb5 monitor=60000 on controller-2 * Resource action: redis notify on controller-0 * Resource action: redis notify on controller-2 * Pseudo action: redis-master_confirmed-post_notify_stopped_0 * Resource action: ip-172.17.1.14 start on controller-2 * Resource action: ip-172.17.1.17 start on controller-2 * Resource action: ip-172.17.4.11 start on controller-2 * Pseudo action: redis_notified_0 * Resource action: ip-172.17.1.14 monitor=10000 on controller-2 * Resource action: ip-172.17.1.17 monitor=10000 on controller-2 * Resource action: ip-172.17.4.11 monitor=10000 on controller-2 Using the original execution date of: 2017-05-03 13:33:24Z Revised Cluster Status: * Node List: * Online: [ controller-0 controller-2 ] * OFFLINE: [ controller-1 ] * RemoteOnline: [ galera-0 galera-1 messaging-0 messaging-2 ] * RemoteOFFLINE: [ galera-2 messaging-1 ] * Full List of Resources: * messaging-0 (ocf:pacemaker:remote): Started controller-0 * messaging-1 (ocf:pacemaker:remote): Stopped * messaging-2 (ocf:pacemaker:remote): Started controller-0 * galera-0 (ocf:pacemaker:remote): Started controller-2 * galera-1 (ocf:pacemaker:remote): Started controller-0 * galera-2 (ocf:pacemaker:remote): Stopped * Clone Set: rabbitmq-clone [rabbitmq]: * Started: [ messaging-0 messaging-2 ] * Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 messaging-1 ] * Clone Set: galera-master [galera] (promotable): - * Masters: [ galera-0 galera-1 ] + * Promoted: [ galera-0 galera-1 ] * Stopped: [ controller-0 controller-1 controller-2 galera-2 messaging-0 messaging-1 messaging-2 ] * Clone Set: redis-master [redis] (promotable): - * Masters: [ controller-0 ] - * Slaves: [ controller-2 ] + * Promoted: [ controller-0 ] + * Unpromoted: [ controller-2 ] * Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * ip-192.168.24.6 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-10.0.0.102 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-172.17.1.14 (ocf:heartbeat:IPaddr2): Started controller-2 * ip-172.17.1.17 (ocf:heartbeat:IPaddr2): Started controller-2 * ip-172.17.3.15 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-172.17.4.11 (ocf:heartbeat:IPaddr2): Started controller-2 * Clone Set: haproxy-clone [haproxy]: * Started: [ controller-0 controller-2 ] * Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 * stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 * stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 * stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-2 diff --git a/cts/scheduler/summary/remote-recovery.summary b/cts/scheduler/summary/remote-recovery.summary index 0662a0bbfe..fd6900dd96 100644 --- a/cts/scheduler/summary/remote-recovery.summary +++ b/cts/scheduler/summary/remote-recovery.summary @@ -1,132 +1,132 @@ Using the original execution date of: 2017-05-03 13:33:24Z Current cluster status: * Node List: * Node controller-1: UNCLEAN (offline) * Online: [ controller-0 controller-2 ] * RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * Full List of Resources: * messaging-0 (ocf:pacemaker:remote): Started controller-0 * messaging-1 (ocf:pacemaker:remote): Started controller-1 (UNCLEAN) * messaging-2 (ocf:pacemaker:remote): Started controller-0 * galera-0 (ocf:pacemaker:remote): Started controller-1 (UNCLEAN) * galera-1 (ocf:pacemaker:remote): Started controller-0 * galera-2 (ocf:pacemaker:remote): Started controller-1 (UNCLEAN) * Clone Set: rabbitmq-clone [rabbitmq]: * Started: [ messaging-0 messaging-1 messaging-2 ] * Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ] * Clone Set: galera-master [galera] (promotable): - * Masters: [ galera-0 galera-1 galera-2 ] + * Promoted: [ galera-0 galera-1 galera-2 ] * Stopped: [ controller-0 controller-1 controller-2 messaging-0 messaging-1 messaging-2 ] * Clone Set: redis-master [redis] (promotable): - * redis (ocf:heartbeat:redis): Slave controller-1 (UNCLEAN) - * Masters: [ controller-0 ] - * Slaves: [ controller-2 ] + * redis (ocf:heartbeat:redis): Unpromoted controller-1 (UNCLEAN) + * Promoted: [ controller-0 ] + * Unpromoted: [ controller-2 ] * Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * ip-192.168.24.6 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-10.0.0.102 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-172.17.1.14 (ocf:heartbeat:IPaddr2): Started controller-1 (UNCLEAN) * ip-172.17.1.17 (ocf:heartbeat:IPaddr2): Started controller-1 (UNCLEAN) * ip-172.17.3.15 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-172.17.4.11 (ocf:heartbeat:IPaddr2): Started controller-1 (UNCLEAN) * Clone Set: haproxy-clone [haproxy]: * haproxy (systemd:haproxy): Started controller-1 (UNCLEAN) * Started: [ controller-0 controller-2 ] * Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 * stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 * stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 * stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-1 (UNCLEAN) Transition Summary: * Fence (reboot) controller-1 'peer is no longer part of the cluster' * Move messaging-1 ( controller-1 -> controller-2 ) * Move galera-0 ( controller-1 -> controller-2 ) * Move galera-2 ( controller-1 -> controller-2 ) - * Stop redis:0 ( Slave controller-1 ) due to node availability + * Stop redis:0 ( Unpromoted controller-1 ) due to node availability * Move ip-172.17.1.14 ( controller-1 -> controller-2 ) * Move ip-172.17.1.17 ( controller-1 -> controller-2 ) * Move ip-172.17.4.11 ( controller-1 -> controller-2 ) * Stop haproxy:0 ( controller-1 ) due to node availability * Move stonith-fence_ipmilan-5254005bdbb5 ( controller-1 -> controller-2 ) Executing Cluster Transition: * Pseudo action: messaging-1_stop_0 * Pseudo action: galera-0_stop_0 * Pseudo action: galera-2_stop_0 * Pseudo action: redis-master_pre_notify_stop_0 * Pseudo action: stonith-fence_ipmilan-5254005bdbb5_stop_0 * Fencing controller-1 (reboot) * Resource action: messaging-1 start on controller-2 * Resource action: galera-0 start on controller-2 * Resource action: galera-2 start on controller-2 * Resource action: rabbitmq monitor=10000 on messaging-1 * Resource action: galera monitor=10000 on galera-2 * Resource action: galera monitor=10000 on galera-0 * Pseudo action: redis_post_notify_stop_0 * Resource action: redis notify on controller-0 * Resource action: redis notify on controller-2 * Pseudo action: redis-master_confirmed-pre_notify_stop_0 * Pseudo action: redis-master_stop_0 * Pseudo action: haproxy-clone_stop_0 * Resource action: stonith-fence_ipmilan-5254005bdbb5 start on controller-2 * Resource action: messaging-1 monitor=20000 on controller-2 * Resource action: galera-0 monitor=20000 on controller-2 * Resource action: galera-2 monitor=20000 on controller-2 * Pseudo action: redis_stop_0 * Pseudo action: redis-master_stopped_0 * Pseudo action: haproxy_stop_0 * Pseudo action: haproxy-clone_stopped_0 * Resource action: stonith-fence_ipmilan-5254005bdbb5 monitor=60000 on controller-2 * Pseudo action: redis-master_post_notify_stopped_0 * Pseudo action: ip-172.17.1.14_stop_0 * Pseudo action: ip-172.17.1.17_stop_0 * Pseudo action: ip-172.17.4.11_stop_0 * Resource action: redis notify on controller-0 * Resource action: redis notify on controller-2 * Pseudo action: redis-master_confirmed-post_notify_stopped_0 * Resource action: ip-172.17.1.14 start on controller-2 * Resource action: ip-172.17.1.17 start on controller-2 * Resource action: ip-172.17.4.11 start on controller-2 * Pseudo action: redis_notified_0 * Resource action: ip-172.17.1.14 monitor=10000 on controller-2 * Resource action: ip-172.17.1.17 monitor=10000 on controller-2 * Resource action: ip-172.17.4.11 monitor=10000 on controller-2 Using the original execution date of: 2017-05-03 13:33:24Z Revised Cluster Status: * Node List: * Online: [ controller-0 controller-2 ] * OFFLINE: [ controller-1 ] * RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * Full List of Resources: * messaging-0 (ocf:pacemaker:remote): Started controller-0 * messaging-1 (ocf:pacemaker:remote): Started controller-2 * messaging-2 (ocf:pacemaker:remote): Started controller-0 * galera-0 (ocf:pacemaker:remote): Started controller-2 * galera-1 (ocf:pacemaker:remote): Started controller-0 * galera-2 (ocf:pacemaker:remote): Started controller-2 * Clone Set: rabbitmq-clone [rabbitmq]: * Started: [ messaging-0 messaging-1 messaging-2 ] * Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ] * Clone Set: galera-master [galera] (promotable): - * Masters: [ galera-0 galera-1 galera-2 ] + * Promoted: [ galera-0 galera-1 galera-2 ] * Stopped: [ controller-0 controller-1 controller-2 messaging-0 messaging-1 messaging-2 ] * Clone Set: redis-master [redis] (promotable): - * Masters: [ controller-0 ] - * Slaves: [ controller-2 ] + * Promoted: [ controller-0 ] + * Unpromoted: [ controller-2 ] * Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * ip-192.168.24.6 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-10.0.0.102 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-172.17.1.14 (ocf:heartbeat:IPaddr2): Started controller-2 * ip-172.17.1.17 (ocf:heartbeat:IPaddr2): Started controller-2 * ip-172.17.3.15 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-172.17.4.11 (ocf:heartbeat:IPaddr2): Started controller-2 * Clone Set: haproxy-clone [haproxy]: * Started: [ controller-0 controller-2 ] * Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ] * openstack-cinder-volume (systemd:openstack-cinder-volume): Started controller-0 * stonith-fence_ipmilan-525400bbf613 (stonith:fence_ipmilan): Started controller-0 * stonith-fence_ipmilan-525400b4f6bd (stonith:fence_ipmilan): Started controller-0 * stonith-fence_ipmilan-5254005bdbb5 (stonith:fence_ipmilan): Started controller-2 diff --git a/cts/scheduler/summary/rsc-sets-master.summary b/cts/scheduler/summary/rsc-sets-master.summary index c63c76cd90..a45e4b16e8 100644 --- a/cts/scheduler/summary/rsc-sets-master.summary +++ b/cts/scheduler/summary/rsc-sets-master.summary @@ -1,49 +1,49 @@ Current cluster status: * Node List: * Node node1: standby (with active resources) * Online: [ node2 ] * Full List of Resources: * Clone Set: ms-rsc [rsc] (promotable): - * Masters: [ node1 ] - * Slaves: [ node2 ] + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] * rsc1 (ocf:pacemaker:Dummy): Started node1 * rsc2 (ocf:pacemaker:Dummy): Started node1 * rsc3 (ocf:pacemaker:Dummy): Started node1 Transition Summary: - * Stop rsc:0 ( Master node1 ) due to node availability - * Promote rsc:1 ( Slave -> Master node2 ) + * Stop rsc:0 ( Promoted node1 ) due to node availability + * Promote rsc:1 ( Unpromoted -> Promoted node2 ) * Move rsc1 ( node1 -> node2 ) * Move rsc2 ( node1 -> node2 ) * Move rsc3 ( node1 -> node2 ) Executing Cluster Transition: * Resource action: rsc1 stop on node1 * Resource action: rsc2 stop on node1 * Resource action: rsc3 stop on node1 * Pseudo action: ms-rsc_demote_0 * Resource action: rsc:0 demote on node1 * Pseudo action: ms-rsc_demoted_0 * Pseudo action: ms-rsc_stop_0 * Resource action: rsc:0 stop on node1 * Pseudo action: ms-rsc_stopped_0 * Pseudo action: ms-rsc_promote_0 * Resource action: rsc:1 promote on node2 * Pseudo action: ms-rsc_promoted_0 * Resource action: rsc1 start on node2 * Resource action: rsc2 start on node2 * Resource action: rsc3 start on node2 Revised Cluster Status: * Node List: * Node node1: standby * Online: [ node2 ] * Full List of Resources: * Clone Set: ms-rsc [rsc] (promotable): - * Masters: [ node2 ] + * Promoted: [ node2 ] * Stopped: [ node1 ] * rsc1 (ocf:pacemaker:Dummy): Started node2 * rsc2 (ocf:pacemaker:Dummy): Started node2 * rsc3 (ocf:pacemaker:Dummy): Started node2 diff --git a/cts/scheduler/summary/stonith-0.summary b/cts/scheduler/summary/stonith-0.summary index d8cf2e55e9..5b829bf06d 100644 --- a/cts/scheduler/summary/stonith-0.summary +++ b/cts/scheduler/summary/stonith-0.summary @@ -1,111 +1,111 @@ Current cluster status: * Node List: * Node c001n03: UNCLEAN (online) * Node c001n05: UNCLEAN (online) * Online: [ c001n02 c001n04 c001n06 c001n07 c001n08 ] * Full List of Resources: * DcIPaddr (ocf:heartbeat:IPaddr): Stopped * Resource Group: group-1: * ocf_192.168.100.181 (ocf:heartbeat:IPaddr): Started [ c001n03 c001n05 ] * heartbeat_192.168.100.182 (ocf:heartbeat:IPaddr): Started c001n03 * ocf_192.168.100.183 (ocf:heartbeat:IPaddr): FAILED [ c001n03 c001n05 ] * lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n04 * rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n06 * rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02 * rsc_c001n04 (ocf:heartbeat:IPaddr): Started c001n04 * rsc_c001n05 (ocf:heartbeat:IPaddr): Started c001n05 * rsc_c001n06 (ocf:heartbeat:IPaddr): Started c001n06 * rsc_c001n07 (ocf:heartbeat:IPaddr): Started c001n03 * rsc_c001n08 (ocf:heartbeat:IPaddr): Started c001n08 * Clone Set: DoFencing [child_DoFencing]: * Started: [ c001n02 c001n04 c001n06 c001n07 c001n08 ] * Stopped: [ c001n03 c001n05 ] * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique): - * ocf_msdummy:0 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Master c001n02 - * ocf_msdummy:1 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 - * ocf_msdummy:2 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n07 - * ocf_msdummy:3 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n07 - * ocf_msdummy:4 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 - * ocf_msdummy:5 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 + * ocf_msdummy:0 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Promoted c001n02 + * ocf_msdummy:1 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02 + * ocf_msdummy:2 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n07 + * ocf_msdummy:3 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n07 + * ocf_msdummy:4 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n08 + * ocf_msdummy:5 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n08 * ocf_msdummy:6 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped * ocf_msdummy:7 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped * ocf_msdummy:8 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped * ocf_msdummy:9 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped - * ocf_msdummy:10 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n04 - * ocf_msdummy:11 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n04 - * ocf_msdummy:12 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n06 - * ocf_msdummy:13 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n06 + * ocf_msdummy:10 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n04 + * ocf_msdummy:11 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n04 + * ocf_msdummy:12 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n06 + * ocf_msdummy:13 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n06 Transition Summary: * Fence (reboot) c001n05 'ocf_192.168.100.183 failed there' * Fence (reboot) c001n03 'ocf_192.168.100.183 failed there' * Move ocf_192.168.100.181 ( c001n03 -> c001n02 ) * Move heartbeat_192.168.100.182 ( c001n03 -> c001n02 ) * Recover ocf_192.168.100.183 ( c001n03 -> c001n02 ) * Move rsc_c001n05 ( c001n05 -> c001n07 ) * Move rsc_c001n07 ( c001n03 -> c001n07 ) Executing Cluster Transition: * Resource action: child_DoFencing:4 monitor=20000 on c001n08 * Fencing c001n05 (reboot) * Fencing c001n03 (reboot) * Pseudo action: group-1_stop_0 * Pseudo action: ocf_192.168.100.183_stop_0 * Pseudo action: ocf_192.168.100.183_stop_0 * Pseudo action: rsc_c001n05_stop_0 * Pseudo action: rsc_c001n07_stop_0 * Pseudo action: heartbeat_192.168.100.182_stop_0 * Resource action: rsc_c001n05 start on c001n07 * Resource action: rsc_c001n07 start on c001n07 * Pseudo action: ocf_192.168.100.181_stop_0 * Pseudo action: ocf_192.168.100.181_stop_0 * Resource action: rsc_c001n05 monitor=5000 on c001n07 * Resource action: rsc_c001n07 monitor=5000 on c001n07 * Pseudo action: group-1_stopped_0 * Pseudo action: group-1_start_0 * Resource action: ocf_192.168.100.181 start on c001n02 * Resource action: heartbeat_192.168.100.182 start on c001n02 * Resource action: ocf_192.168.100.183 start on c001n02 * Pseudo action: group-1_running_0 * Resource action: ocf_192.168.100.181 monitor=5000 on c001n02 * Resource action: heartbeat_192.168.100.182 monitor=5000 on c001n02 * Resource action: ocf_192.168.100.183 monitor=5000 on c001n02 Revised Cluster Status: * Node List: * Online: [ c001n02 c001n04 c001n06 c001n07 c001n08 ] * OFFLINE: [ c001n03 c001n05 ] * Full List of Resources: * DcIPaddr (ocf:heartbeat:IPaddr): Stopped * Resource Group: group-1: * ocf_192.168.100.181 (ocf:heartbeat:IPaddr): Started c001n02 * heartbeat_192.168.100.182 (ocf:heartbeat:IPaddr): Started c001n02 * ocf_192.168.100.183 (ocf:heartbeat:IPaddr): Started c001n02 * lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n04 * rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n06 * rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02 * rsc_c001n04 (ocf:heartbeat:IPaddr): Started c001n04 * rsc_c001n05 (ocf:heartbeat:IPaddr): Started c001n07 * rsc_c001n06 (ocf:heartbeat:IPaddr): Started c001n06 * rsc_c001n07 (ocf:heartbeat:IPaddr): Started c001n07 * rsc_c001n08 (ocf:heartbeat:IPaddr): Started c001n08 * Clone Set: DoFencing [child_DoFencing]: * Started: [ c001n02 c001n04 c001n06 c001n07 c001n08 ] * Stopped: [ c001n03 c001n05 ] * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique): - * ocf_msdummy:0 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Master c001n02 - * ocf_msdummy:1 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n02 - * ocf_msdummy:2 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n07 - * ocf_msdummy:3 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n07 - * ocf_msdummy:4 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 - * ocf_msdummy:5 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n08 + * ocf_msdummy:0 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Promoted c001n02 + * ocf_msdummy:1 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n02 + * ocf_msdummy:2 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n07 + * ocf_msdummy:3 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n07 + * ocf_msdummy:4 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n08 + * ocf_msdummy:5 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n08 * ocf_msdummy:6 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped * ocf_msdummy:7 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped * ocf_msdummy:8 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped * ocf_msdummy:9 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Stopped - * ocf_msdummy:10 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n04 - * ocf_msdummy:11 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n04 - * ocf_msdummy:12 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n06 - * ocf_msdummy:13 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Slave c001n06 + * ocf_msdummy:10 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n04 + * ocf_msdummy:11 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n04 + * ocf_msdummy:12 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n06 + * ocf_msdummy:13 (ocf:heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy): Unpromoted c001n06 diff --git a/cts/scheduler/summary/stonith-1.summary b/cts/scheduler/summary/stonith-1.summary index 4e2bf97398..31d2f57ce8 100644 --- a/cts/scheduler/summary/stonith-1.summary +++ b/cts/scheduler/summary/stonith-1.summary @@ -1,113 +1,113 @@ Current cluster status: * Node List: * Node sles-3: UNCLEAN (offline) * Online: [ sles-1 sles-2 sles-4 ] * Full List of Resources: * Resource Group: group-1: * r192.168.100.181 (ocf:heartbeat:IPaddr): Started sles-1 * r192.168.100.182 (ocf:heartbeat:IPaddr): Started sles-1 * r192.168.100.183 (ocf:heartbeat:IPaddr): Stopped * lsb_dummy (lsb:/usr/lib64/heartbeat/cts/LSBDummy): Started sles-2 * migrator (ocf:heartbeat:Dummy): Started sles-3 (UNCLEAN) * rsc_sles-1 (ocf:heartbeat:IPaddr): Started sles-1 * rsc_sles-2 (ocf:heartbeat:IPaddr): Started sles-2 * rsc_sles-3 (ocf:heartbeat:IPaddr): Started sles-3 (UNCLEAN) * rsc_sles-4 (ocf:heartbeat:IPaddr): Started sles-4 * Clone Set: DoFencing [child_DoFencing]: * child_DoFencing (stonith:external/vmware): Started sles-3 (UNCLEAN) * Started: [ sles-1 sles-2 ] * Stopped: [ sles-4 ] * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique): * ocf_msdummy:0 (ocf:heartbeat:Stateful): Stopped * ocf_msdummy:1 (ocf:heartbeat:Stateful): Stopped - * ocf_msdummy:2 (ocf:heartbeat:Stateful): Slave sles-3 (UNCLEAN) + * ocf_msdummy:2 (ocf:heartbeat:Stateful): Unpromoted sles-3 (UNCLEAN) * ocf_msdummy:3 (ocf:heartbeat:Stateful): Stopped * ocf_msdummy:4 (ocf:heartbeat:Stateful): Stopped - * ocf_msdummy:5 (ocf:heartbeat:Stateful): Slave sles-3 (UNCLEAN) + * ocf_msdummy:5 (ocf:heartbeat:Stateful): Unpromoted sles-3 (UNCLEAN) * ocf_msdummy:6 (ocf:heartbeat:Stateful): Stopped * ocf_msdummy:7 (ocf:heartbeat:Stateful): Stopped Transition Summary: * Fence (reboot) sles-3 'peer is no longer part of the cluster' * Start r192.168.100.183 ( sles-1 ) * Move migrator ( sles-3 -> sles-4 ) * Move rsc_sles-3 ( sles-3 -> sles-4 ) * Move child_DoFencing:2 ( sles-3 -> sles-4 ) * Start ocf_msdummy:0 ( sles-4 ) * Start ocf_msdummy:1 ( sles-1 ) - * Move ocf_msdummy:2 ( sles-3 -> sles-2 Slave ) + * Move ocf_msdummy:2 ( sles-3 -> sles-2 Unpromoted ) * Start ocf_msdummy:3 ( sles-4 ) * Start ocf_msdummy:4 ( sles-1 ) - * Move ocf_msdummy:5 ( sles-3 -> sles-2 Slave ) + * Move ocf_msdummy:5 ( sles-3 -> sles-2 Unpromoted ) Executing Cluster Transition: * Pseudo action: group-1_start_0 * Resource action: r192.168.100.182 monitor=5000 on sles-1 * Resource action: lsb_dummy monitor=5000 on sles-2 * Resource action: rsc_sles-2 monitor=5000 on sles-2 * Resource action: rsc_sles-4 monitor=5000 on sles-4 * Pseudo action: DoFencing_stop_0 * Fencing sles-3 (reboot) * Resource action: r192.168.100.183 start on sles-1 * Pseudo action: migrator_stop_0 * Pseudo action: rsc_sles-3_stop_0 * Pseudo action: child_DoFencing:2_stop_0 * Pseudo action: DoFencing_stopped_0 * Pseudo action: DoFencing_start_0 * Pseudo action: master_rsc_1_stop_0 * Pseudo action: group-1_running_0 * Resource action: r192.168.100.183 monitor=5000 on sles-1 * Resource action: migrator start on sles-4 * Resource action: rsc_sles-3 start on sles-4 * Resource action: child_DoFencing:2 start on sles-4 * Pseudo action: DoFencing_running_0 * Pseudo action: ocf_msdummy:2_stop_0 * Pseudo action: ocf_msdummy:5_stop_0 * Pseudo action: master_rsc_1_stopped_0 * Pseudo action: master_rsc_1_start_0 * Resource action: migrator monitor=10000 on sles-4 * Resource action: rsc_sles-3 monitor=5000 on sles-4 * Resource action: child_DoFencing:2 monitor=60000 on sles-4 * Resource action: ocf_msdummy:0 start on sles-4 * Resource action: ocf_msdummy:1 start on sles-1 * Resource action: ocf_msdummy:2 start on sles-2 * Resource action: ocf_msdummy:3 start on sles-4 * Resource action: ocf_msdummy:4 start on sles-1 * Resource action: ocf_msdummy:5 start on sles-2 * Pseudo action: master_rsc_1_running_0 * Resource action: ocf_msdummy:0 monitor=5000 on sles-4 * Resource action: ocf_msdummy:1 monitor=5000 on sles-1 * Resource action: ocf_msdummy:2 monitor=5000 on sles-2 * Resource action: ocf_msdummy:3 monitor=5000 on sles-4 * Resource action: ocf_msdummy:4 monitor=5000 on sles-1 * Resource action: ocf_msdummy:5 monitor=5000 on sles-2 Revised Cluster Status: * Node List: * Online: [ sles-1 sles-2 sles-4 ] * OFFLINE: [ sles-3 ] * Full List of Resources: * Resource Group: group-1: * r192.168.100.181 (ocf:heartbeat:IPaddr): Started sles-1 * r192.168.100.182 (ocf:heartbeat:IPaddr): Started sles-1 * r192.168.100.183 (ocf:heartbeat:IPaddr): Started sles-1 * lsb_dummy (lsb:/usr/lib64/heartbeat/cts/LSBDummy): Started sles-2 * migrator (ocf:heartbeat:Dummy): Started sles-4 * rsc_sles-1 (ocf:heartbeat:IPaddr): Started sles-1 * rsc_sles-2 (ocf:heartbeat:IPaddr): Started sles-2 * rsc_sles-3 (ocf:heartbeat:IPaddr): Started sles-4 * rsc_sles-4 (ocf:heartbeat:IPaddr): Started sles-4 * Clone Set: DoFencing [child_DoFencing]: * Started: [ sles-1 sles-2 sles-4 ] * Stopped: [ sles-3 ] * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique): - * ocf_msdummy:0 (ocf:heartbeat:Stateful): Slave sles-4 - * ocf_msdummy:1 (ocf:heartbeat:Stateful): Slave sles-1 - * ocf_msdummy:2 (ocf:heartbeat:Stateful): Slave sles-2 - * ocf_msdummy:3 (ocf:heartbeat:Stateful): Slave sles-4 - * ocf_msdummy:4 (ocf:heartbeat:Stateful): Slave sles-1 - * ocf_msdummy:5 (ocf:heartbeat:Stateful): Slave sles-2 + * ocf_msdummy:0 (ocf:heartbeat:Stateful): Unpromoted sles-4 + * ocf_msdummy:1 (ocf:heartbeat:Stateful): Unpromoted sles-1 + * ocf_msdummy:2 (ocf:heartbeat:Stateful): Unpromoted sles-2 + * ocf_msdummy:3 (ocf:heartbeat:Stateful): Unpromoted sles-4 + * ocf_msdummy:4 (ocf:heartbeat:Stateful): Unpromoted sles-1 + * ocf_msdummy:5 (ocf:heartbeat:Stateful): Unpromoted sles-2 * ocf_msdummy:6 (ocf:heartbeat:Stateful): Stopped * ocf_msdummy:7 (ocf:heartbeat:Stateful): Stopped diff --git a/cts/scheduler/summary/stonith-2.summary b/cts/scheduler/summary/stonith-2.summary index 1996417c4a..9fd7c65e59 100644 --- a/cts/scheduler/summary/stonith-2.summary +++ b/cts/scheduler/summary/stonith-2.summary @@ -1,78 +1,78 @@ Current cluster status: * Node List: * Node sles-5: UNCLEAN (offline) * Online: [ sles-1 sles-2 sles-3 sles-4 sles-6 ] * Full List of Resources: * Resource Group: group-1: * r192.168.100.181 (ocf:heartbeat:IPaddr): Started sles-1 * r192.168.100.182 (ocf:heartbeat:IPaddr): Started sles-1 * r192.168.100.183 (ocf:heartbeat:IPaddr): Started sles-1 * lsb_dummy (lsb:/usr/share/heartbeat/cts/LSBDummy): Started sles-2 * migrator (ocf:heartbeat:Dummy): Started sles-3 * rsc_sles-1 (ocf:heartbeat:IPaddr): Started sles-1 * rsc_sles-2 (ocf:heartbeat:IPaddr): Started sles-2 * rsc_sles-3 (ocf:heartbeat:IPaddr): Started sles-3 * rsc_sles-4 (ocf:heartbeat:IPaddr): Started sles-4 * rsc_sles-5 (ocf:heartbeat:IPaddr): Stopped * rsc_sles-6 (ocf:heartbeat:IPaddr): Started sles-6 * Clone Set: DoFencing [child_DoFencing]: * Started: [ sles-1 sles-2 sles-3 sles-4 sles-6 ] * Stopped: [ sles-5 ] * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique): - * ocf_msdummy:0 (ocf:heartbeat:Stateful): Slave sles-3 - * ocf_msdummy:1 (ocf:heartbeat:Stateful): Slave sles-4 - * ocf_msdummy:2 (ocf:heartbeat:Stateful): Slave sles-4 - * ocf_msdummy:3 (ocf:heartbeat:Stateful): Slave sles-1 - * ocf_msdummy:4 (ocf:heartbeat:Stateful): Slave sles-2 - * ocf_msdummy:5 (ocf:heartbeat:Stateful): Slave sles-1 + * ocf_msdummy:0 (ocf:heartbeat:Stateful): Unpromoted sles-3 + * ocf_msdummy:1 (ocf:heartbeat:Stateful): Unpromoted sles-4 + * ocf_msdummy:2 (ocf:heartbeat:Stateful): Unpromoted sles-4 + * ocf_msdummy:3 (ocf:heartbeat:Stateful): Unpromoted sles-1 + * ocf_msdummy:4 (ocf:heartbeat:Stateful): Unpromoted sles-2 + * ocf_msdummy:5 (ocf:heartbeat:Stateful): Unpromoted sles-1 * ocf_msdummy:6 (ocf:heartbeat:Stateful): Stopped * ocf_msdummy:7 (ocf:heartbeat:Stateful): Stopped - * ocf_msdummy:8 (ocf:heartbeat:Stateful): Slave sles-6 - * ocf_msdummy:9 (ocf:heartbeat:Stateful): Slave sles-6 - * ocf_msdummy:10 (ocf:heartbeat:Stateful): Slave sles-2 - * ocf_msdummy:11 (ocf:heartbeat:Stateful): Slave sles-3 + * ocf_msdummy:8 (ocf:heartbeat:Stateful): Unpromoted sles-6 + * ocf_msdummy:9 (ocf:heartbeat:Stateful): Unpromoted sles-6 + * ocf_msdummy:10 (ocf:heartbeat:Stateful): Unpromoted sles-2 + * ocf_msdummy:11 (ocf:heartbeat:Stateful): Unpromoted sles-3 Transition Summary: * Fence (reboot) sles-5 'peer is no longer part of the cluster' * Start rsc_sles-5 ( sles-6 ) Executing Cluster Transition: * Fencing sles-5 (reboot) * Resource action: rsc_sles-5 start on sles-6 * Resource action: rsc_sles-5 monitor=5000 on sles-6 Revised Cluster Status: * Node List: * Online: [ sles-1 sles-2 sles-3 sles-4 sles-6 ] * OFFLINE: [ sles-5 ] * Full List of Resources: * Resource Group: group-1: * r192.168.100.181 (ocf:heartbeat:IPaddr): Started sles-1 * r192.168.100.182 (ocf:heartbeat:IPaddr): Started sles-1 * r192.168.100.183 (ocf:heartbeat:IPaddr): Started sles-1 * lsb_dummy (lsb:/usr/share/heartbeat/cts/LSBDummy): Started sles-2 * migrator (ocf:heartbeat:Dummy): Started sles-3 * rsc_sles-1 (ocf:heartbeat:IPaddr): Started sles-1 * rsc_sles-2 (ocf:heartbeat:IPaddr): Started sles-2 * rsc_sles-3 (ocf:heartbeat:IPaddr): Started sles-3 * rsc_sles-4 (ocf:heartbeat:IPaddr): Started sles-4 * rsc_sles-5 (ocf:heartbeat:IPaddr): Started sles-6 * rsc_sles-6 (ocf:heartbeat:IPaddr): Started sles-6 * Clone Set: DoFencing [child_DoFencing]: * Started: [ sles-1 sles-2 sles-3 sles-4 sles-6 ] * Stopped: [ sles-5 ] * Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique): - * ocf_msdummy:0 (ocf:heartbeat:Stateful): Slave sles-3 - * ocf_msdummy:1 (ocf:heartbeat:Stateful): Slave sles-4 - * ocf_msdummy:2 (ocf:heartbeat:Stateful): Slave sles-4 - * ocf_msdummy:3 (ocf:heartbeat:Stateful): Slave sles-1 - * ocf_msdummy:4 (ocf:heartbeat:Stateful): Slave sles-2 - * ocf_msdummy:5 (ocf:heartbeat:Stateful): Slave sles-1 + * ocf_msdummy:0 (ocf:heartbeat:Stateful): Unpromoted sles-3 + * ocf_msdummy:1 (ocf:heartbeat:Stateful): Unpromoted sles-4 + * ocf_msdummy:2 (ocf:heartbeat:Stateful): Unpromoted sles-4 + * ocf_msdummy:3 (ocf:heartbeat:Stateful): Unpromoted sles-1 + * ocf_msdummy:4 (ocf:heartbeat:Stateful): Unpromoted sles-2 + * ocf_msdummy:5 (ocf:heartbeat:Stateful): Unpromoted sles-1 * ocf_msdummy:6 (ocf:heartbeat:Stateful): Stopped * ocf_msdummy:7 (ocf:heartbeat:Stateful): Stopped - * ocf_msdummy:8 (ocf:heartbeat:Stateful): Slave sles-6 - * ocf_msdummy:9 (ocf:heartbeat:Stateful): Slave sles-6 - * ocf_msdummy:10 (ocf:heartbeat:Stateful): Slave sles-2 - * ocf_msdummy:11 (ocf:heartbeat:Stateful): Slave sles-3 + * ocf_msdummy:8 (ocf:heartbeat:Stateful): Unpromoted sles-6 + * ocf_msdummy:9 (ocf:heartbeat:Stateful): Unpromoted sles-6 + * ocf_msdummy:10 (ocf:heartbeat:Stateful): Unpromoted sles-2 + * ocf_msdummy:11 (ocf:heartbeat:Stateful): Unpromoted sles-3 diff --git a/cts/scheduler/summary/target-1.summary b/cts/scheduler/summary/target-1.summary index 9264d1904c..edc1daf32b 100644 --- a/cts/scheduler/summary/target-1.summary +++ b/cts/scheduler/summary/target-1.summary @@ -1,43 +1,43 @@ 1 of 5 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: * Node List: * Online: [ c001n01 c001n02 c001n03 c001n08 ] * Full List of Resources: * DcIPaddr (ocf:heartbeat:IPaddr): Started c001n02 * rsc_c001n08 (ocf:heartbeat:IPaddr): Started c001n08 (disabled) * rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02 * Clone Set: promoteme [rsc_c001n03] (promotable): - * Slaves: [ c001n03 ] + * Unpromoted: [ c001n03 ] * rsc_c001n01 (ocf:heartbeat:IPaddr): Started c001n01 Transition Summary: * Stop rsc_c001n08 ( c001n08 ) due to node availability Executing Cluster Transition: * Resource action: DcIPaddr monitor on c001n08 * Resource action: DcIPaddr monitor on c001n03 * Resource action: DcIPaddr monitor on c001n01 * Resource action: rsc_c001n08 stop on c001n08 * Resource action: rsc_c001n08 monitor on c001n03 * Resource action: rsc_c001n08 monitor on c001n02 * Resource action: rsc_c001n08 monitor on c001n01 * Resource action: rsc_c001n02 monitor on c001n08 * Resource action: rsc_c001n02 monitor on c001n03 * Resource action: rsc_c001n02 monitor on c001n01 * Resource action: rsc_c001n01 monitor on c001n08 * Resource action: rsc_c001n01 monitor on c001n03 * Resource action: rsc_c001n01 monitor on c001n02 Revised Cluster Status: * Node List: * Online: [ c001n01 c001n02 c001n03 c001n08 ] * Full List of Resources: * DcIPaddr (ocf:heartbeat:IPaddr): Started c001n02 * rsc_c001n08 (ocf:heartbeat:IPaddr): Stopped (disabled) * rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02 * Clone Set: promoteme [rsc_c001n03] (promotable): - * Slaves: [ c001n03 ] + * Unpromoted: [ c001n03 ] * rsc_c001n01 (ocf:heartbeat:IPaddr): Started c001n01 diff --git a/cts/scheduler/summary/ticket-master-10.summary b/cts/scheduler/summary/ticket-master-10.summary index 7c86245541..eab3d91008 100644 --- a/cts/scheduler/summary/ticket-master-10.summary +++ b/cts/scheduler/summary/ticket-master-10.summary @@ -1,29 +1,29 @@ Current cluster status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): * Stopped: [ node1 node2 ] Transition Summary: * Start rsc1:0 ( node2 ) * Start rsc1:1 ( node1 ) Executing Cluster Transition: * Resource action: rsc1:0 monitor on node2 * Resource action: rsc1:1 monitor on node1 * Pseudo action: ms1_start_0 * Resource action: rsc1:0 start on node2 * Resource action: rsc1:1 start on node1 * Pseudo action: ms1_running_0 Revised Cluster Status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Slaves: [ node1 node2 ] + * Unpromoted: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-master-11.summary b/cts/scheduler/summary/ticket-master-11.summary index 098359370f..381603997e 100644 --- a/cts/scheduler/summary/ticket-master-11.summary +++ b/cts/scheduler/summary/ticket-master-11.summary @@ -1,26 +1,26 @@ Current cluster status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Slaves: [ node1 node2 ] + * Unpromoted: [ node1 node2 ] Transition Summary: - * Promote rsc1:0 ( Slave -> Master node1 ) + * Promote rsc1:0 ( Unpromoted -> Promoted node1 ) Executing Cluster Transition: * Pseudo action: ms1_promote_0 * Resource action: rsc1:1 promote on node1 * Pseudo action: ms1_promoted_0 Revised Cluster Status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Masters: [ node1 ] - * Slaves: [ node2 ] + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] diff --git a/cts/scheduler/summary/ticket-master-12.summary b/cts/scheduler/summary/ticket-master-12.summary index a89d33640d..b51c277faf 100644 --- a/cts/scheduler/summary/ticket-master-12.summary +++ b/cts/scheduler/summary/ticket-master-12.summary @@ -1,23 +1,23 @@ Current cluster status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Masters: [ node1 ] - * Slaves: [ node2 ] + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] Transition Summary: Executing Cluster Transition: Revised Cluster Status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Masters: [ node1 ] - * Slaves: [ node2 ] + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] diff --git a/cts/scheduler/summary/ticket-master-14.summary b/cts/scheduler/summary/ticket-master-14.summary index 7822b43825..ee8912b2e9 100644 --- a/cts/scheduler/summary/ticket-master-14.summary +++ b/cts/scheduler/summary/ticket-master-14.summary @@ -1,31 +1,31 @@ Current cluster status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Masters: [ node1 ] - * Slaves: [ node2 ] + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] Transition Summary: - * Stop rsc1:0 ( Master node1 ) due to node availability - * Stop rsc1:1 ( Slave node2 ) due to node availability + * Stop rsc1:0 ( Promoted node1 ) due to node availability + * Stop rsc1:1 ( Unpromoted node2 ) due to node availability Executing Cluster Transition: * Pseudo action: ms1_demote_0 * Resource action: rsc1:1 demote on node1 * Pseudo action: ms1_demoted_0 * Pseudo action: ms1_stop_0 * Resource action: rsc1:1 stop on node1 * Resource action: rsc1:0 stop on node2 * Pseudo action: ms1_stopped_0 Revised Cluster Status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): * Stopped: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-master-15.summary b/cts/scheduler/summary/ticket-master-15.summary index 7822b43825..ee8912b2e9 100644 --- a/cts/scheduler/summary/ticket-master-15.summary +++ b/cts/scheduler/summary/ticket-master-15.summary @@ -1,31 +1,31 @@ Current cluster status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Masters: [ node1 ] - * Slaves: [ node2 ] + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] Transition Summary: - * Stop rsc1:0 ( Master node1 ) due to node availability - * Stop rsc1:1 ( Slave node2 ) due to node availability + * Stop rsc1:0 ( Promoted node1 ) due to node availability + * Stop rsc1:1 ( Unpromoted node2 ) due to node availability Executing Cluster Transition: * Pseudo action: ms1_demote_0 * Resource action: rsc1:1 demote on node1 * Pseudo action: ms1_demoted_0 * Pseudo action: ms1_stop_0 * Resource action: rsc1:1 stop on node1 * Resource action: rsc1:0 stop on node2 * Pseudo action: ms1_stopped_0 Revised Cluster Status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): * Stopped: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-master-16.summary b/cts/scheduler/summary/ticket-master-16.summary index ecc416b5f1..851e54ebd5 100644 --- a/cts/scheduler/summary/ticket-master-16.summary +++ b/cts/scheduler/summary/ticket-master-16.summary @@ -1,21 +1,21 @@ Current cluster status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Slaves: [ node1 node2 ] + * Unpromoted: [ node1 node2 ] Transition Summary: Executing Cluster Transition: Revised Cluster Status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Slaves: [ node1 node2 ] + * Unpromoted: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-master-17.summary b/cts/scheduler/summary/ticket-master-17.summary index 3363826c71..ee25f92c4e 100644 --- a/cts/scheduler/summary/ticket-master-17.summary +++ b/cts/scheduler/summary/ticket-master-17.summary @@ -1,26 +1,26 @@ Current cluster status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Masters: [ node1 ] - * Slaves: [ node2 ] + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] Transition Summary: - * Demote rsc1:0 ( Master -> Slave node1 ) + * Demote rsc1:0 ( Promoted -> Unpromoted node1 ) Executing Cluster Transition: * Pseudo action: ms1_demote_0 * Resource action: rsc1:1 demote on node1 * Pseudo action: ms1_demoted_0 Revised Cluster Status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Slaves: [ node1 node2 ] + * Unpromoted: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-master-18.summary b/cts/scheduler/summary/ticket-master-18.summary index 3363826c71..ee25f92c4e 100644 --- a/cts/scheduler/summary/ticket-master-18.summary +++ b/cts/scheduler/summary/ticket-master-18.summary @@ -1,26 +1,26 @@ Current cluster status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Masters: [ node1 ] - * Slaves: [ node2 ] + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] Transition Summary: - * Demote rsc1:0 ( Master -> Slave node1 ) + * Demote rsc1:0 ( Promoted -> Unpromoted node1 ) Executing Cluster Transition: * Pseudo action: ms1_demote_0 * Resource action: rsc1:1 demote on node1 * Pseudo action: ms1_demoted_0 Revised Cluster Status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Slaves: [ node1 node2 ] + * Unpromoted: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-master-19.summary b/cts/scheduler/summary/ticket-master-19.summary index ecc416b5f1..851e54ebd5 100644 --- a/cts/scheduler/summary/ticket-master-19.summary +++ b/cts/scheduler/summary/ticket-master-19.summary @@ -1,21 +1,21 @@ Current cluster status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Slaves: [ node1 node2 ] + * Unpromoted: [ node1 node2 ] Transition Summary: Executing Cluster Transition: Revised Cluster Status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Slaves: [ node1 node2 ] + * Unpromoted: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-master-2.summary b/cts/scheduler/summary/ticket-master-2.summary index a9e7031eb5..4da760a8ac 100644 --- a/cts/scheduler/summary/ticket-master-2.summary +++ b/cts/scheduler/summary/ticket-master-2.summary @@ -1,31 +1,31 @@ Current cluster status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): * Stopped: [ node1 node2 ] Transition Summary: * Start rsc1:0 ( node2 ) - * Promote rsc1:1 ( Stopped -> Master node1 ) + * Promote rsc1:1 ( Stopped -> Promoted node1 ) Executing Cluster Transition: * Pseudo action: ms1_start_0 * Resource action: rsc1:0 start on node2 * Resource action: rsc1:1 start on node1 * Pseudo action: ms1_running_0 * Pseudo action: ms1_promote_0 * Resource action: rsc1:1 promote on node1 * Pseudo action: ms1_promoted_0 Revised Cluster Status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Masters: [ node1 ] - * Slaves: [ node2 ] + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] diff --git a/cts/scheduler/summary/ticket-master-20.summary b/cts/scheduler/summary/ticket-master-20.summary index 3363826c71..ee25f92c4e 100644 --- a/cts/scheduler/summary/ticket-master-20.summary +++ b/cts/scheduler/summary/ticket-master-20.summary @@ -1,26 +1,26 @@ Current cluster status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Masters: [ node1 ] - * Slaves: [ node2 ] + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] Transition Summary: - * Demote rsc1:0 ( Master -> Slave node1 ) + * Demote rsc1:0 ( Promoted -> Unpromoted node1 ) Executing Cluster Transition: * Pseudo action: ms1_demote_0 * Resource action: rsc1:1 demote on node1 * Pseudo action: ms1_demoted_0 Revised Cluster Status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Slaves: [ node1 node2 ] + * Unpromoted: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-master-21.summary b/cts/scheduler/summary/ticket-master-21.summary index 94d9199aea..f116a2eea0 100644 --- a/cts/scheduler/summary/ticket-master-21.summary +++ b/cts/scheduler/summary/ticket-master-21.summary @@ -1,36 +1,36 @@ Current cluster status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Masters: [ node1 ] - * Slaves: [ node2 ] + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] Transition Summary: * Fence (reboot) node1 'deadman ticket was lost' * Move rsc_stonith ( node1 -> node2 ) - * Stop rsc1:0 ( Master node1 ) due to node availability + * Stop rsc1:0 ( Promoted node1 ) due to node availability Executing Cluster Transition: * Pseudo action: rsc_stonith_stop_0 * Pseudo action: ms1_demote_0 * Fencing node1 (reboot) * Resource action: rsc_stonith start on node2 * Pseudo action: rsc1:1_demote_0 * Pseudo action: ms1_demoted_0 * Pseudo action: ms1_stop_0 * Pseudo action: rsc1:1_stop_0 * Pseudo action: ms1_stopped_0 Revised Cluster Status: * Node List: * Online: [ node2 ] * OFFLINE: [ node1 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node2 * Clone Set: ms1 [rsc1] (promotable): - * Slaves: [ node2 ] + * Unpromoted: [ node2 ] * Stopped: [ node1 ] diff --git a/cts/scheduler/summary/ticket-master-22.summary b/cts/scheduler/summary/ticket-master-22.summary index ecc416b5f1..851e54ebd5 100644 --- a/cts/scheduler/summary/ticket-master-22.summary +++ b/cts/scheduler/summary/ticket-master-22.summary @@ -1,21 +1,21 @@ Current cluster status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Slaves: [ node1 node2 ] + * Unpromoted: [ node1 node2 ] Transition Summary: Executing Cluster Transition: Revised Cluster Status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Slaves: [ node1 node2 ] + * Unpromoted: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-master-23.summary b/cts/scheduler/summary/ticket-master-23.summary index 3363826c71..ee25f92c4e 100644 --- a/cts/scheduler/summary/ticket-master-23.summary +++ b/cts/scheduler/summary/ticket-master-23.summary @@ -1,26 +1,26 @@ Current cluster status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Masters: [ node1 ] - * Slaves: [ node2 ] + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] Transition Summary: - * Demote rsc1:0 ( Master -> Slave node1 ) + * Demote rsc1:0 ( Promoted -> Unpromoted node1 ) Executing Cluster Transition: * Pseudo action: ms1_demote_0 * Resource action: rsc1:1 demote on node1 * Pseudo action: ms1_demoted_0 Revised Cluster Status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Slaves: [ node1 node2 ] + * Unpromoted: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-master-24.summary b/cts/scheduler/summary/ticket-master-24.summary index a89d33640d..b51c277faf 100644 --- a/cts/scheduler/summary/ticket-master-24.summary +++ b/cts/scheduler/summary/ticket-master-24.summary @@ -1,23 +1,23 @@ Current cluster status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Masters: [ node1 ] - * Slaves: [ node2 ] + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] Transition Summary: Executing Cluster Transition: Revised Cluster Status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Masters: [ node1 ] - * Slaves: [ node2 ] + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] diff --git a/cts/scheduler/summary/ticket-master-3.summary b/cts/scheduler/summary/ticket-master-3.summary index 7822b43825..ee8912b2e9 100644 --- a/cts/scheduler/summary/ticket-master-3.summary +++ b/cts/scheduler/summary/ticket-master-3.summary @@ -1,31 +1,31 @@ Current cluster status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Masters: [ node1 ] - * Slaves: [ node2 ] + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] Transition Summary: - * Stop rsc1:0 ( Master node1 ) due to node availability - * Stop rsc1:1 ( Slave node2 ) due to node availability + * Stop rsc1:0 ( Promoted node1 ) due to node availability + * Stop rsc1:1 ( Unpromoted node2 ) due to node availability Executing Cluster Transition: * Pseudo action: ms1_demote_0 * Resource action: rsc1:1 demote on node1 * Pseudo action: ms1_demoted_0 * Pseudo action: ms1_stop_0 * Resource action: rsc1:1 stop on node1 * Resource action: rsc1:0 stop on node2 * Pseudo action: ms1_stopped_0 Revised Cluster Status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): * Stopped: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-master-4.summary b/cts/scheduler/summary/ticket-master-4.summary index 7c86245541..eab3d91008 100644 --- a/cts/scheduler/summary/ticket-master-4.summary +++ b/cts/scheduler/summary/ticket-master-4.summary @@ -1,29 +1,29 @@ Current cluster status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): * Stopped: [ node1 node2 ] Transition Summary: * Start rsc1:0 ( node2 ) * Start rsc1:1 ( node1 ) Executing Cluster Transition: * Resource action: rsc1:0 monitor on node2 * Resource action: rsc1:1 monitor on node1 * Pseudo action: ms1_start_0 * Resource action: rsc1:0 start on node2 * Resource action: rsc1:1 start on node1 * Pseudo action: ms1_running_0 Revised Cluster Status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Slaves: [ node1 node2 ] + * Unpromoted: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-master-5.summary b/cts/scheduler/summary/ticket-master-5.summary index 098359370f..381603997e 100644 --- a/cts/scheduler/summary/ticket-master-5.summary +++ b/cts/scheduler/summary/ticket-master-5.summary @@ -1,26 +1,26 @@ Current cluster status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Slaves: [ node1 node2 ] + * Unpromoted: [ node1 node2 ] Transition Summary: - * Promote rsc1:0 ( Slave -> Master node1 ) + * Promote rsc1:0 ( Unpromoted -> Promoted node1 ) Executing Cluster Transition: * Pseudo action: ms1_promote_0 * Resource action: rsc1:1 promote on node1 * Pseudo action: ms1_promoted_0 Revised Cluster Status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Masters: [ node1 ] - * Slaves: [ node2 ] + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] diff --git a/cts/scheduler/summary/ticket-master-6.summary b/cts/scheduler/summary/ticket-master-6.summary index 3363826c71..ee25f92c4e 100644 --- a/cts/scheduler/summary/ticket-master-6.summary +++ b/cts/scheduler/summary/ticket-master-6.summary @@ -1,26 +1,26 @@ Current cluster status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Masters: [ node1 ] - * Slaves: [ node2 ] + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] Transition Summary: - * Demote rsc1:0 ( Master -> Slave node1 ) + * Demote rsc1:0 ( Promoted -> Unpromoted node1 ) Executing Cluster Transition: * Pseudo action: ms1_demote_0 * Resource action: rsc1:1 demote on node1 * Pseudo action: ms1_demoted_0 Revised Cluster Status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Slaves: [ node1 node2 ] + * Unpromoted: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-master-7.summary b/cts/scheduler/summary/ticket-master-7.summary index 7c86245541..eab3d91008 100644 --- a/cts/scheduler/summary/ticket-master-7.summary +++ b/cts/scheduler/summary/ticket-master-7.summary @@ -1,29 +1,29 @@ Current cluster status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): * Stopped: [ node1 node2 ] Transition Summary: * Start rsc1:0 ( node2 ) * Start rsc1:1 ( node1 ) Executing Cluster Transition: * Resource action: rsc1:0 monitor on node2 * Resource action: rsc1:1 monitor on node1 * Pseudo action: ms1_start_0 * Resource action: rsc1:0 start on node2 * Resource action: rsc1:1 start on node1 * Pseudo action: ms1_running_0 Revised Cluster Status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Slaves: [ node1 node2 ] + * Unpromoted: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-master-8.summary b/cts/scheduler/summary/ticket-master-8.summary index 098359370f..381603997e 100644 --- a/cts/scheduler/summary/ticket-master-8.summary +++ b/cts/scheduler/summary/ticket-master-8.summary @@ -1,26 +1,26 @@ Current cluster status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Slaves: [ node1 node2 ] + * Unpromoted: [ node1 node2 ] Transition Summary: - * Promote rsc1:0 ( Slave -> Master node1 ) + * Promote rsc1:0 ( Unpromoted -> Promoted node1 ) Executing Cluster Transition: * Pseudo action: ms1_promote_0 * Resource action: rsc1:1 promote on node1 * Pseudo action: ms1_promoted_0 Revised Cluster Status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Masters: [ node1 ] - * Slaves: [ node2 ] + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] diff --git a/cts/scheduler/summary/ticket-master-9.summary b/cts/scheduler/summary/ticket-master-9.summary index 94d9199aea..f116a2eea0 100644 --- a/cts/scheduler/summary/ticket-master-9.summary +++ b/cts/scheduler/summary/ticket-master-9.summary @@ -1,36 +1,36 @@ Current cluster status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * Clone Set: ms1 [rsc1] (promotable): - * Masters: [ node1 ] - * Slaves: [ node2 ] + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] Transition Summary: * Fence (reboot) node1 'deadman ticket was lost' * Move rsc_stonith ( node1 -> node2 ) - * Stop rsc1:0 ( Master node1 ) due to node availability + * Stop rsc1:0 ( Promoted node1 ) due to node availability Executing Cluster Transition: * Pseudo action: rsc_stonith_stop_0 * Pseudo action: ms1_demote_0 * Fencing node1 (reboot) * Resource action: rsc_stonith start on node2 * Pseudo action: rsc1:1_demote_0 * Pseudo action: ms1_demoted_0 * Pseudo action: ms1_stop_0 * Pseudo action: rsc1:1_stop_0 * Pseudo action: ms1_stopped_0 Revised Cluster Status: * Node List: * Online: [ node2 ] * OFFLINE: [ node1 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node2 * Clone Set: ms1 [rsc1] (promotable): - * Slaves: [ node2 ] + * Unpromoted: [ node2 ] * Stopped: [ node1 ] diff --git a/cts/scheduler/summary/ticket-rsc-sets-1.summary b/cts/scheduler/summary/ticket-rsc-sets-1.summary index 1a62118093..d119ce5176 100644 --- a/cts/scheduler/summary/ticket-rsc-sets-1.summary +++ b/cts/scheduler/summary/ticket-rsc-sets-1.summary @@ -1,49 +1,49 @@ Current cluster status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * rsc1 (ocf:pacemaker:Dummy): Stopped * Resource Group: group2: * rsc2 (ocf:pacemaker:Dummy): Stopped * rsc3 (ocf:pacemaker:Dummy): Stopped * Clone Set: clone4 [rsc4]: * Stopped: [ node1 node2 ] * Clone Set: ms5 [rsc5] (promotable): * Stopped: [ node1 node2 ] Transition Summary: * Start rsc5:0 ( node2 ) * Start rsc5:1 ( node1 ) Executing Cluster Transition: * Resource action: rsc1 monitor on node2 * Resource action: rsc1 monitor on node1 * Resource action: rsc2 monitor on node2 * Resource action: rsc2 monitor on node1 * Resource action: rsc3 monitor on node2 * Resource action: rsc3 monitor on node1 * Resource action: rsc4:0 monitor on node2 * Resource action: rsc4:0 monitor on node1 * Resource action: rsc5:0 monitor on node2 * Resource action: rsc5:1 monitor on node1 * Pseudo action: ms5_start_0 * Resource action: rsc5:0 start on node2 * Resource action: rsc5:1 start on node1 * Pseudo action: ms5_running_0 Revised Cluster Status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * rsc1 (ocf:pacemaker:Dummy): Stopped * Resource Group: group2: * rsc2 (ocf:pacemaker:Dummy): Stopped * rsc3 (ocf:pacemaker:Dummy): Stopped * Clone Set: clone4 [rsc4]: * Stopped: [ node1 node2 ] * Clone Set: ms5 [rsc5] (promotable): - * Slaves: [ node1 node2 ] + * Unpromoted: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-rsc-sets-10.summary b/cts/scheduler/summary/ticket-rsc-sets-10.summary index 581f092906..acf79003f8 100644 --- a/cts/scheduler/summary/ticket-rsc-sets-10.summary +++ b/cts/scheduler/summary/ticket-rsc-sets-10.summary @@ -1,52 +1,52 @@ Current cluster status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * rsc1 (ocf:pacemaker:Dummy): Started node2 * Resource Group: group2: * rsc2 (ocf:pacemaker:Dummy): Started node1 * rsc3 (ocf:pacemaker:Dummy): Started node1 * Clone Set: clone4 [rsc4]: * Started: [ node1 node2 ] * Clone Set: ms5 [rsc5] (promotable): - * Masters: [ node1 ] - * Slaves: [ node2 ] + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] Transition Summary: * Stop rsc1 ( node2 ) due to node availability * Stop rsc2 ( node1 ) due to node availability * Stop rsc3 ( node1 ) due to node availability * Stop rsc4:0 ( node1 ) due to node availability * Stop rsc4:1 ( node2 ) due to node availability - * Demote rsc5:0 ( Master -> Slave node1 ) + * Demote rsc5:0 ( Promoted -> Unpromoted node1 ) Executing Cluster Transition: * Resource action: rsc1 stop on node2 * Pseudo action: group2_stop_0 * Resource action: rsc3 stop on node1 * Pseudo action: clone4_stop_0 * Pseudo action: ms5_demote_0 * Resource action: rsc2 stop on node1 * Resource action: rsc4:1 stop on node1 * Resource action: rsc4:0 stop on node2 * Pseudo action: clone4_stopped_0 * Resource action: rsc5:1 demote on node1 * Pseudo action: ms5_demoted_0 * Pseudo action: group2_stopped_0 Revised Cluster Status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * rsc1 (ocf:pacemaker:Dummy): Stopped * Resource Group: group2: * rsc2 (ocf:pacemaker:Dummy): Stopped * rsc3 (ocf:pacemaker:Dummy): Stopped * Clone Set: clone4 [rsc4]: * Stopped: [ node1 node2 ] * Clone Set: ms5 [rsc5] (promotable): - * Slaves: [ node1 node2 ] + * Unpromoted: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-rsc-sets-11.summary b/cts/scheduler/summary/ticket-rsc-sets-11.summary index 95c86daf5f..03153aa264 100644 --- a/cts/scheduler/summary/ticket-rsc-sets-11.summary +++ b/cts/scheduler/summary/ticket-rsc-sets-11.summary @@ -1,33 +1,33 @@ Current cluster status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * rsc1 (ocf:pacemaker:Dummy): Stopped * Resource Group: group2: * rsc2 (ocf:pacemaker:Dummy): Stopped * rsc3 (ocf:pacemaker:Dummy): Stopped * Clone Set: clone4 [rsc4]: * Stopped: [ node1 node2 ] * Clone Set: ms5 [rsc5] (promotable): - * Slaves: [ node1 node2 ] + * Unpromoted: [ node1 node2 ] Transition Summary: Executing Cluster Transition: Revised Cluster Status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * rsc1 (ocf:pacemaker:Dummy): Stopped * Resource Group: group2: * rsc2 (ocf:pacemaker:Dummy): Stopped * rsc3 (ocf:pacemaker:Dummy): Stopped * Clone Set: clone4 [rsc4]: * Stopped: [ node1 node2 ] * Clone Set: ms5 [rsc5] (promotable): - * Slaves: [ node1 node2 ] + * Unpromoted: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-rsc-sets-12.summary b/cts/scheduler/summary/ticket-rsc-sets-12.summary index 3cc2cad628..68e0827f78 100644 --- a/cts/scheduler/summary/ticket-rsc-sets-12.summary +++ b/cts/scheduler/summary/ticket-rsc-sets-12.summary @@ -1,41 +1,41 @@ Current cluster status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * rsc1 (ocf:pacemaker:Dummy): Started node2 * Resource Group: group2: * rsc2 (ocf:pacemaker:Dummy): Started node1 * rsc3 (ocf:pacemaker:Dummy): Started node1 * Clone Set: clone4 [rsc4]: * Stopped: [ node1 node2 ] * Clone Set: ms5 [rsc5] (promotable): - * Slaves: [ node1 node2 ] + * Unpromoted: [ node1 node2 ] Transition Summary: * Stop rsc1 ( node2 ) due to node availability * Stop rsc2 ( node1 ) due to node availability * Stop rsc3 ( node1 ) due to node availability Executing Cluster Transition: * Resource action: rsc1 stop on node2 * Pseudo action: group2_stop_0 * Resource action: rsc3 stop on node1 * Resource action: rsc2 stop on node1 * Pseudo action: group2_stopped_0 Revised Cluster Status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * rsc1 (ocf:pacemaker:Dummy): Stopped * Resource Group: group2: * rsc2 (ocf:pacemaker:Dummy): Stopped * rsc3 (ocf:pacemaker:Dummy): Stopped * Clone Set: clone4 [rsc4]: * Stopped: [ node1 node2 ] * Clone Set: ms5 [rsc5] (promotable): - * Slaves: [ node1 node2 ] + * Unpromoted: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-rsc-sets-13.summary b/cts/scheduler/summary/ticket-rsc-sets-13.summary index 581f092906..acf79003f8 100644 --- a/cts/scheduler/summary/ticket-rsc-sets-13.summary +++ b/cts/scheduler/summary/ticket-rsc-sets-13.summary @@ -1,52 +1,52 @@ Current cluster status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * rsc1 (ocf:pacemaker:Dummy): Started node2 * Resource Group: group2: * rsc2 (ocf:pacemaker:Dummy): Started node1 * rsc3 (ocf:pacemaker:Dummy): Started node1 * Clone Set: clone4 [rsc4]: * Started: [ node1 node2 ] * Clone Set: ms5 [rsc5] (promotable): - * Masters: [ node1 ] - * Slaves: [ node2 ] + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] Transition Summary: * Stop rsc1 ( node2 ) due to node availability * Stop rsc2 ( node1 ) due to node availability * Stop rsc3 ( node1 ) due to node availability * Stop rsc4:0 ( node1 ) due to node availability * Stop rsc4:1 ( node2 ) due to node availability - * Demote rsc5:0 ( Master -> Slave node1 ) + * Demote rsc5:0 ( Promoted -> Unpromoted node1 ) Executing Cluster Transition: * Resource action: rsc1 stop on node2 * Pseudo action: group2_stop_0 * Resource action: rsc3 stop on node1 * Pseudo action: clone4_stop_0 * Pseudo action: ms5_demote_0 * Resource action: rsc2 stop on node1 * Resource action: rsc4:1 stop on node1 * Resource action: rsc4:0 stop on node2 * Pseudo action: clone4_stopped_0 * Resource action: rsc5:1 demote on node1 * Pseudo action: ms5_demoted_0 * Pseudo action: group2_stopped_0 Revised Cluster Status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * rsc1 (ocf:pacemaker:Dummy): Stopped * Resource Group: group2: * rsc2 (ocf:pacemaker:Dummy): Stopped * rsc3 (ocf:pacemaker:Dummy): Stopped * Clone Set: clone4 [rsc4]: * Stopped: [ node1 node2 ] * Clone Set: ms5 [rsc5] (promotable): - * Slaves: [ node1 node2 ] + * Unpromoted: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-rsc-sets-14.summary b/cts/scheduler/summary/ticket-rsc-sets-14.summary index 581f092906..acf79003f8 100644 --- a/cts/scheduler/summary/ticket-rsc-sets-14.summary +++ b/cts/scheduler/summary/ticket-rsc-sets-14.summary @@ -1,52 +1,52 @@ Current cluster status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * rsc1 (ocf:pacemaker:Dummy): Started node2 * Resource Group: group2: * rsc2 (ocf:pacemaker:Dummy): Started node1 * rsc3 (ocf:pacemaker:Dummy): Started node1 * Clone Set: clone4 [rsc4]: * Started: [ node1 node2 ] * Clone Set: ms5 [rsc5] (promotable): - * Masters: [ node1 ] - * Slaves: [ node2 ] + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] Transition Summary: * Stop rsc1 ( node2 ) due to node availability * Stop rsc2 ( node1 ) due to node availability * Stop rsc3 ( node1 ) due to node availability * Stop rsc4:0 ( node1 ) due to node availability * Stop rsc4:1 ( node2 ) due to node availability - * Demote rsc5:0 ( Master -> Slave node1 ) + * Demote rsc5:0 ( Promoted -> Unpromoted node1 ) Executing Cluster Transition: * Resource action: rsc1 stop on node2 * Pseudo action: group2_stop_0 * Resource action: rsc3 stop on node1 * Pseudo action: clone4_stop_0 * Pseudo action: ms5_demote_0 * Resource action: rsc2 stop on node1 * Resource action: rsc4:1 stop on node1 * Resource action: rsc4:0 stop on node2 * Pseudo action: clone4_stopped_0 * Resource action: rsc5:1 demote on node1 * Pseudo action: ms5_demoted_0 * Pseudo action: group2_stopped_0 Revised Cluster Status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * rsc1 (ocf:pacemaker:Dummy): Stopped * Resource Group: group2: * rsc2 (ocf:pacemaker:Dummy): Stopped * rsc3 (ocf:pacemaker:Dummy): Stopped * Clone Set: clone4 [rsc4]: * Stopped: [ node1 node2 ] * Clone Set: ms5 [rsc5] (promotable): - * Slaves: [ node1 node2 ] + * Unpromoted: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-rsc-sets-2.summary b/cts/scheduler/summary/ticket-rsc-sets-2.summary index cd73ad59a8..673d205880 100644 --- a/cts/scheduler/summary/ticket-rsc-sets-2.summary +++ b/cts/scheduler/summary/ticket-rsc-sets-2.summary @@ -1,57 +1,57 @@ Current cluster status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * rsc1 (ocf:pacemaker:Dummy): Stopped * Resource Group: group2: * rsc2 (ocf:pacemaker:Dummy): Stopped * rsc3 (ocf:pacemaker:Dummy): Stopped * Clone Set: clone4 [rsc4]: * Stopped: [ node1 node2 ] * Clone Set: ms5 [rsc5] (promotable): - * Slaves: [ node1 node2 ] + * Unpromoted: [ node1 node2 ] Transition Summary: * Start rsc1 ( node2 ) * Start rsc2 ( node1 ) * Start rsc3 ( node1 ) * Start rsc4:0 ( node2 ) * Start rsc4:1 ( node1 ) - * Promote rsc5:0 ( Slave -> Master node1 ) + * Promote rsc5:0 ( Unpromoted -> Promoted node1 ) Executing Cluster Transition: * Resource action: rsc1 start on node2 * Pseudo action: group2_start_0 * Resource action: rsc2 start on node1 * Resource action: rsc3 start on node1 * Pseudo action: clone4_start_0 * Pseudo action: ms5_promote_0 * Resource action: rsc1 monitor=10000 on node2 * Pseudo action: group2_running_0 * Resource action: rsc2 monitor=5000 on node1 * Resource action: rsc3 monitor=5000 on node1 * Resource action: rsc4:0 start on node2 * Resource action: rsc4:1 start on node1 * Pseudo action: clone4_running_0 * Resource action: rsc5:1 promote on node1 * Pseudo action: ms5_promoted_0 * Resource action: rsc4:0 monitor=5000 on node2 * Resource action: rsc4:1 monitor=5000 on node1 Revised Cluster Status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * rsc1 (ocf:pacemaker:Dummy): Started node2 * Resource Group: group2: * rsc2 (ocf:pacemaker:Dummy): Started node1 * rsc3 (ocf:pacemaker:Dummy): Started node1 * Clone Set: clone4 [rsc4]: * Started: [ node1 node2 ] * Clone Set: ms5 [rsc5] (promotable): - * Masters: [ node1 ] - * Slaves: [ node2 ] + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] diff --git a/cts/scheduler/summary/ticket-rsc-sets-3.summary b/cts/scheduler/summary/ticket-rsc-sets-3.summary index 581f092906..acf79003f8 100644 --- a/cts/scheduler/summary/ticket-rsc-sets-3.summary +++ b/cts/scheduler/summary/ticket-rsc-sets-3.summary @@ -1,52 +1,52 @@ Current cluster status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * rsc1 (ocf:pacemaker:Dummy): Started node2 * Resource Group: group2: * rsc2 (ocf:pacemaker:Dummy): Started node1 * rsc3 (ocf:pacemaker:Dummy): Started node1 * Clone Set: clone4 [rsc4]: * Started: [ node1 node2 ] * Clone Set: ms5 [rsc5] (promotable): - * Masters: [ node1 ] - * Slaves: [ node2 ] + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] Transition Summary: * Stop rsc1 ( node2 ) due to node availability * Stop rsc2 ( node1 ) due to node availability * Stop rsc3 ( node1 ) due to node availability * Stop rsc4:0 ( node1 ) due to node availability * Stop rsc4:1 ( node2 ) due to node availability - * Demote rsc5:0 ( Master -> Slave node1 ) + * Demote rsc5:0 ( Promoted -> Unpromoted node1 ) Executing Cluster Transition: * Resource action: rsc1 stop on node2 * Pseudo action: group2_stop_0 * Resource action: rsc3 stop on node1 * Pseudo action: clone4_stop_0 * Pseudo action: ms5_demote_0 * Resource action: rsc2 stop on node1 * Resource action: rsc4:1 stop on node1 * Resource action: rsc4:0 stop on node2 * Pseudo action: clone4_stopped_0 * Resource action: rsc5:1 demote on node1 * Pseudo action: ms5_demoted_0 * Pseudo action: group2_stopped_0 Revised Cluster Status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * rsc1 (ocf:pacemaker:Dummy): Stopped * Resource Group: group2: * rsc2 (ocf:pacemaker:Dummy): Stopped * rsc3 (ocf:pacemaker:Dummy): Stopped * Clone Set: clone4 [rsc4]: * Stopped: [ node1 node2 ] * Clone Set: ms5 [rsc5] (promotable): - * Slaves: [ node1 node2 ] + * Unpromoted: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-rsc-sets-4.summary b/cts/scheduler/summary/ticket-rsc-sets-4.summary index 1a62118093..d119ce5176 100644 --- a/cts/scheduler/summary/ticket-rsc-sets-4.summary +++ b/cts/scheduler/summary/ticket-rsc-sets-4.summary @@ -1,49 +1,49 @@ Current cluster status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * rsc1 (ocf:pacemaker:Dummy): Stopped * Resource Group: group2: * rsc2 (ocf:pacemaker:Dummy): Stopped * rsc3 (ocf:pacemaker:Dummy): Stopped * Clone Set: clone4 [rsc4]: * Stopped: [ node1 node2 ] * Clone Set: ms5 [rsc5] (promotable): * Stopped: [ node1 node2 ] Transition Summary: * Start rsc5:0 ( node2 ) * Start rsc5:1 ( node1 ) Executing Cluster Transition: * Resource action: rsc1 monitor on node2 * Resource action: rsc1 monitor on node1 * Resource action: rsc2 monitor on node2 * Resource action: rsc2 monitor on node1 * Resource action: rsc3 monitor on node2 * Resource action: rsc3 monitor on node1 * Resource action: rsc4:0 monitor on node2 * Resource action: rsc4:0 monitor on node1 * Resource action: rsc5:0 monitor on node2 * Resource action: rsc5:1 monitor on node1 * Pseudo action: ms5_start_0 * Resource action: rsc5:0 start on node2 * Resource action: rsc5:1 start on node1 * Pseudo action: ms5_running_0 Revised Cluster Status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * rsc1 (ocf:pacemaker:Dummy): Stopped * Resource Group: group2: * rsc2 (ocf:pacemaker:Dummy): Stopped * rsc3 (ocf:pacemaker:Dummy): Stopped * Clone Set: clone4 [rsc4]: * Stopped: [ node1 node2 ] * Clone Set: ms5 [rsc5] (promotable): - * Slaves: [ node1 node2 ] + * Unpromoted: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-rsc-sets-5.summary b/cts/scheduler/summary/ticket-rsc-sets-5.summary index 8bbb5bd0c8..217243a7b2 100644 --- a/cts/scheduler/summary/ticket-rsc-sets-5.summary +++ b/cts/scheduler/summary/ticket-rsc-sets-5.summary @@ -1,44 +1,44 @@ Current cluster status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * rsc1 (ocf:pacemaker:Dummy): Stopped * Resource Group: group2: * rsc2 (ocf:pacemaker:Dummy): Stopped * rsc3 (ocf:pacemaker:Dummy): Stopped * Clone Set: clone4 [rsc4]: * Stopped: [ node1 node2 ] * Clone Set: ms5 [rsc5] (promotable): - * Slaves: [ node1 node2 ] + * Unpromoted: [ node1 node2 ] Transition Summary: * Start rsc1 ( node2 ) * Start rsc2 ( node1 ) * Start rsc3 ( node1 ) Executing Cluster Transition: * Resource action: rsc1 start on node2 * Pseudo action: group2_start_0 * Resource action: rsc2 start on node1 * Resource action: rsc3 start on node1 * Resource action: rsc1 monitor=10000 on node2 * Pseudo action: group2_running_0 * Resource action: rsc2 monitor=5000 on node1 * Resource action: rsc3 monitor=5000 on node1 Revised Cluster Status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * rsc1 (ocf:pacemaker:Dummy): Started node2 * Resource Group: group2: * rsc2 (ocf:pacemaker:Dummy): Started node1 * rsc3 (ocf:pacemaker:Dummy): Started node1 * Clone Set: clone4 [rsc4]: * Stopped: [ node1 node2 ] * Clone Set: ms5 [rsc5] (promotable): - * Slaves: [ node1 node2 ] + * Unpromoted: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-rsc-sets-6.summary b/cts/scheduler/summary/ticket-rsc-sets-6.summary index 5a138013d8..651c55dccb 100644 --- a/cts/scheduler/summary/ticket-rsc-sets-6.summary +++ b/cts/scheduler/summary/ticket-rsc-sets-6.summary @@ -1,46 +1,46 @@ Current cluster status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * rsc1 (ocf:pacemaker:Dummy): Started node2 * Resource Group: group2: * rsc2 (ocf:pacemaker:Dummy): Started node1 * rsc3 (ocf:pacemaker:Dummy): Started node1 * Clone Set: clone4 [rsc4]: * Stopped: [ node1 node2 ] * Clone Set: ms5 [rsc5] (promotable): - * Slaves: [ node1 node2 ] + * Unpromoted: [ node1 node2 ] Transition Summary: * Start rsc4:0 ( node2 ) * Start rsc4:1 ( node1 ) - * Promote rsc5:0 ( Slave -> Master node1 ) + * Promote rsc5:0 ( Unpromoted -> Promoted node1 ) Executing Cluster Transition: * Pseudo action: clone4_start_0 * Pseudo action: ms5_promote_0 * Resource action: rsc4:0 start on node2 * Resource action: rsc4:1 start on node1 * Pseudo action: clone4_running_0 * Resource action: rsc5:1 promote on node1 * Pseudo action: ms5_promoted_0 * Resource action: rsc4:0 monitor=5000 on node2 * Resource action: rsc4:1 monitor=5000 on node1 Revised Cluster Status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * rsc1 (ocf:pacemaker:Dummy): Started node2 * Resource Group: group2: * rsc2 (ocf:pacemaker:Dummy): Started node1 * rsc3 (ocf:pacemaker:Dummy): Started node1 * Clone Set: clone4 [rsc4]: * Started: [ node1 node2 ] * Clone Set: ms5 [rsc5] (promotable): - * Masters: [ node1 ] - * Slaves: [ node2 ] + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] diff --git a/cts/scheduler/summary/ticket-rsc-sets-7.summary b/cts/scheduler/summary/ticket-rsc-sets-7.summary index 581f092906..acf79003f8 100644 --- a/cts/scheduler/summary/ticket-rsc-sets-7.summary +++ b/cts/scheduler/summary/ticket-rsc-sets-7.summary @@ -1,52 +1,52 @@ Current cluster status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * rsc1 (ocf:pacemaker:Dummy): Started node2 * Resource Group: group2: * rsc2 (ocf:pacemaker:Dummy): Started node1 * rsc3 (ocf:pacemaker:Dummy): Started node1 * Clone Set: clone4 [rsc4]: * Started: [ node1 node2 ] * Clone Set: ms5 [rsc5] (promotable): - * Masters: [ node1 ] - * Slaves: [ node2 ] + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] Transition Summary: * Stop rsc1 ( node2 ) due to node availability * Stop rsc2 ( node1 ) due to node availability * Stop rsc3 ( node1 ) due to node availability * Stop rsc4:0 ( node1 ) due to node availability * Stop rsc4:1 ( node2 ) due to node availability - * Demote rsc5:0 ( Master -> Slave node1 ) + * Demote rsc5:0 ( Promoted -> Unpromoted node1 ) Executing Cluster Transition: * Resource action: rsc1 stop on node2 * Pseudo action: group2_stop_0 * Resource action: rsc3 stop on node1 * Pseudo action: clone4_stop_0 * Pseudo action: ms5_demote_0 * Resource action: rsc2 stop on node1 * Resource action: rsc4:1 stop on node1 * Resource action: rsc4:0 stop on node2 * Pseudo action: clone4_stopped_0 * Resource action: rsc5:1 demote on node1 * Pseudo action: ms5_demoted_0 * Pseudo action: group2_stopped_0 Revised Cluster Status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * rsc1 (ocf:pacemaker:Dummy): Stopped * Resource Group: group2: * rsc2 (ocf:pacemaker:Dummy): Stopped * rsc3 (ocf:pacemaker:Dummy): Stopped * Clone Set: clone4 [rsc4]: * Stopped: [ node1 node2 ] * Clone Set: ms5 [rsc5] (promotable): - * Slaves: [ node1 node2 ] + * Unpromoted: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-rsc-sets-8.summary b/cts/scheduler/summary/ticket-rsc-sets-8.summary index 95c86daf5f..03153aa264 100644 --- a/cts/scheduler/summary/ticket-rsc-sets-8.summary +++ b/cts/scheduler/summary/ticket-rsc-sets-8.summary @@ -1,33 +1,33 @@ Current cluster status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * rsc1 (ocf:pacemaker:Dummy): Stopped * Resource Group: group2: * rsc2 (ocf:pacemaker:Dummy): Stopped * rsc3 (ocf:pacemaker:Dummy): Stopped * Clone Set: clone4 [rsc4]: * Stopped: [ node1 node2 ] * Clone Set: ms5 [rsc5] (promotable): - * Slaves: [ node1 node2 ] + * Unpromoted: [ node1 node2 ] Transition Summary: Executing Cluster Transition: Revised Cluster Status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * rsc1 (ocf:pacemaker:Dummy): Stopped * Resource Group: group2: * rsc2 (ocf:pacemaker:Dummy): Stopped * rsc3 (ocf:pacemaker:Dummy): Stopped * Clone Set: clone4 [rsc4]: * Stopped: [ node1 node2 ] * Clone Set: ms5 [rsc5] (promotable): - * Slaves: [ node1 node2 ] + * Unpromoted: [ node1 node2 ] diff --git a/cts/scheduler/summary/ticket-rsc-sets-9.summary b/cts/scheduler/summary/ticket-rsc-sets-9.summary index 581f092906..acf79003f8 100644 --- a/cts/scheduler/summary/ticket-rsc-sets-9.summary +++ b/cts/scheduler/summary/ticket-rsc-sets-9.summary @@ -1,52 +1,52 @@ Current cluster status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * rsc1 (ocf:pacemaker:Dummy): Started node2 * Resource Group: group2: * rsc2 (ocf:pacemaker:Dummy): Started node1 * rsc3 (ocf:pacemaker:Dummy): Started node1 * Clone Set: clone4 [rsc4]: * Started: [ node1 node2 ] * Clone Set: ms5 [rsc5] (promotable): - * Masters: [ node1 ] - * Slaves: [ node2 ] + * Promoted: [ node1 ] + * Unpromoted: [ node2 ] Transition Summary: * Stop rsc1 ( node2 ) due to node availability * Stop rsc2 ( node1 ) due to node availability * Stop rsc3 ( node1 ) due to node availability * Stop rsc4:0 ( node1 ) due to node availability * Stop rsc4:1 ( node2 ) due to node availability - * Demote rsc5:0 ( Master -> Slave node1 ) + * Demote rsc5:0 ( Promoted -> Unpromoted node1 ) Executing Cluster Transition: * Resource action: rsc1 stop on node2 * Pseudo action: group2_stop_0 * Resource action: rsc3 stop on node1 * Pseudo action: clone4_stop_0 * Pseudo action: ms5_demote_0 * Resource action: rsc2 stop on node1 * Resource action: rsc4:1 stop on node1 * Resource action: rsc4:0 stop on node2 * Pseudo action: clone4_stopped_0 * Resource action: rsc5:1 demote on node1 * Pseudo action: ms5_demoted_0 * Pseudo action: group2_stopped_0 Revised Cluster Status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * rsc1 (ocf:pacemaker:Dummy): Stopped * Resource Group: group2: * rsc2 (ocf:pacemaker:Dummy): Stopped * rsc3 (ocf:pacemaker:Dummy): Stopped * Clone Set: clone4 [rsc4]: * Stopped: [ node1 node2 ] * Clone Set: ms5 [rsc5] (promotable): - * Slaves: [ node1 node2 ] + * Unpromoted: [ node1 node2 ] diff --git a/cts/scheduler/summary/unmanaged-master.summary b/cts/scheduler/summary/unmanaged-master.summary index b43d7c7a20..bdaac99618 100644 --- a/cts/scheduler/summary/unmanaged-master.summary +++ b/cts/scheduler/summary/unmanaged-master.summary @@ -1,63 +1,63 @@ Current cluster status: * Node List: * Online: [ pcmk-1 pcmk-2 ] * OFFLINE: [ pcmk-3 pcmk-4 ] * Full List of Resources: * Clone Set: Fencing [FencingChild] (unmanaged): * FencingChild (stonith:fence_xvm): Started pcmk-2 (unmanaged) * FencingChild (stonith:fence_xvm): Started pcmk-1 (unmanaged) * Stopped: [ pcmk-3 pcmk-4 ] * Resource Group: group-1 (unmanaged): * r192.168.122.126 (ocf:heartbeat:IPaddr): Started pcmk-2 (unmanaged) * r192.168.122.127 (ocf:heartbeat:IPaddr): Started pcmk-2 (unmanaged) * r192.168.122.128 (ocf:heartbeat:IPaddr): Started pcmk-2 (unmanaged) * rsc_pcmk-1 (ocf:heartbeat:IPaddr): Started pcmk-1 (unmanaged) * rsc_pcmk-2 (ocf:heartbeat:IPaddr): Started pcmk-2 (unmanaged) * rsc_pcmk-3 (ocf:heartbeat:IPaddr): Started pcmk-3 (unmanaged) * rsc_pcmk-4 (ocf:heartbeat:IPaddr): Started pcmk-4 (unmanaged) * lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started pcmk-2 (unmanaged) * migrator (ocf:pacemaker:Dummy): Started pcmk-4 (unmanaged) * Clone Set: Connectivity [ping-1] (unmanaged): * ping-1 (ocf:pacemaker:ping): Started pcmk-2 (unmanaged) * ping-1 (ocf:pacemaker:ping): Started pcmk-1 (unmanaged) * Stopped: [ pcmk-3 pcmk-4 ] * Clone Set: master-1 [stateful-1] (promotable) (unmanaged): - * stateful-1 (ocf:pacemaker:Stateful): Master pcmk-2 (unmanaged) - * stateful-1 (ocf:pacemaker:Stateful): Slave pcmk-1 (unmanaged) + * stateful-1 (ocf:pacemaker:Stateful): Promoted pcmk-2 (unmanaged) + * stateful-1 (ocf:pacemaker:Stateful): Unpromoted pcmk-1 (unmanaged) * Stopped: [ pcmk-3 pcmk-4 ] Transition Summary: Executing Cluster Transition: * Cluster action: do_shutdown on pcmk-2 * Cluster action: do_shutdown on pcmk-1 Revised Cluster Status: * Node List: * Online: [ pcmk-1 pcmk-2 ] * OFFLINE: [ pcmk-3 pcmk-4 ] * Full List of Resources: * Clone Set: Fencing [FencingChild] (unmanaged): * FencingChild (stonith:fence_xvm): Started pcmk-2 (unmanaged) * FencingChild (stonith:fence_xvm): Started pcmk-1 (unmanaged) * Stopped: [ pcmk-3 pcmk-4 ] * Resource Group: group-1 (unmanaged): * r192.168.122.126 (ocf:heartbeat:IPaddr): Started pcmk-2 (unmanaged) * r192.168.122.127 (ocf:heartbeat:IPaddr): Started pcmk-2 (unmanaged) * r192.168.122.128 (ocf:heartbeat:IPaddr): Started pcmk-2 (unmanaged) * rsc_pcmk-1 (ocf:heartbeat:IPaddr): Started pcmk-1 (unmanaged) * rsc_pcmk-2 (ocf:heartbeat:IPaddr): Started pcmk-2 (unmanaged) * rsc_pcmk-3 (ocf:heartbeat:IPaddr): Started pcmk-3 (unmanaged) * rsc_pcmk-4 (ocf:heartbeat:IPaddr): Started pcmk-4 (unmanaged) * lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started pcmk-2 (unmanaged) * migrator (ocf:pacemaker:Dummy): Started pcmk-4 (unmanaged) * Clone Set: Connectivity [ping-1] (unmanaged): * ping-1 (ocf:pacemaker:ping): Started pcmk-2 (unmanaged) * ping-1 (ocf:pacemaker:ping): Started pcmk-1 (unmanaged) * Stopped: [ pcmk-3 pcmk-4 ] * Clone Set: master-1 [stateful-1] (promotable) (unmanaged): - * stateful-1 (ocf:pacemaker:Stateful): Master pcmk-2 (unmanaged) - * stateful-1 (ocf:pacemaker:Stateful): Slave pcmk-1 (unmanaged) + * stateful-1 (ocf:pacemaker:Stateful): Promoted pcmk-2 (unmanaged) + * stateful-1 (ocf:pacemaker:Stateful): Unpromoted pcmk-1 (unmanaged) * Stopped: [ pcmk-3 pcmk-4 ] diff --git a/cts/scheduler/summary/unrunnable-2.summary b/cts/scheduler/summary/unrunnable-2.summary index ace0d24022..26c6351078 100644 --- a/cts/scheduler/summary/unrunnable-2.summary +++ b/cts/scheduler/summary/unrunnable-2.summary @@ -1,178 +1,178 @@ 6 of 117 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: * Node List: * Online: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Full List of Resources: * ip-192.0.2.12 (ocf:heartbeat:IPaddr2): Started overcloud-controller-0 * Clone Set: haproxy-clone [haproxy]: * Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: galera-master [galera] (promotable): - * Masters: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] + * Promoted: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: memcached-clone [memcached]: * Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: rabbitmq-clone [rabbitmq]: * Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: openstack-core-clone [openstack-core] (disabled): * Stopped (disabled): [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: redis-master [redis] (promotable): - * Masters: [ overcloud-controller-1 ] - * Slaves: [ overcloud-controller-0 overcloud-controller-2 ] + * Promoted: [ overcloud-controller-1 ] + * Unpromoted: [ overcloud-controller-0 overcloud-controller-2 ] * ip-192.0.2.11 (ocf:heartbeat:IPaddr2): Started overcloud-controller-1 * Clone Set: mongod-clone [mongod]: * Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: openstack-aodh-evaluator-clone [openstack-aodh-evaluator]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: openstack-nova-scheduler-clone [openstack-nova-scheduler]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: neutron-l3-agent-clone [neutron-l3-agent]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: neutron-netns-cleanup-clone [neutron-netns-cleanup]: * Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: neutron-ovs-cleanup-clone [neutron-ovs-cleanup]: * Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * openstack-cinder-volume (systemd:openstack-cinder-volume): Stopped * Clone Set: openstack-heat-engine-clone [openstack-heat-engine]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: openstack-ceilometer-api-clone [openstack-ceilometer-api]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: openstack-aodh-listener-clone [openstack-aodh-listener]: * Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: neutron-metadata-agent-clone [neutron-metadata-agent]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: openstack-aodh-notifier-clone [openstack-aodh-notifier]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: openstack-heat-api-clone [openstack-heat-api]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: openstack-ceilometer-collector-clone [openstack-ceilometer-collector]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: openstack-glance-api-clone [openstack-glance-api]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: openstack-cinder-scheduler-clone [openstack-cinder-scheduler]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: openstack-nova-api-clone [openstack-nova-api]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: openstack-nova-consoleauth-clone [openstack-nova-consoleauth]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: openstack-sahara-api-clone [openstack-sahara-api]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: openstack-heat-api-cloudwatch-clone [openstack-heat-api-cloudwatch]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: openstack-sahara-engine-clone [openstack-sahara-engine]: * Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: openstack-glance-registry-clone [openstack-glance-registry]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: openstack-ceilometer-notification-clone [openstack-ceilometer-notification]: * Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: openstack-cinder-api-clone [openstack-cinder-api]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: neutron-dhcp-agent-clone [neutron-dhcp-agent]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: neutron-openvswitch-agent-clone [neutron-openvswitch-agent]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: openstack-nova-novncproxy-clone [openstack-nova-novncproxy]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: delay-clone [delay]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: neutron-server-clone [neutron-server]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: openstack-ceilometer-central-clone [openstack-ceilometer-central]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: httpd-clone [httpd] (disabled): * Stopped (disabled): [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: openstack-heat-api-cfn-clone [openstack-heat-api-cfn]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: openstack-nova-conductor-clone [openstack-nova-conductor]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] Transition Summary: * Start openstack-cinder-volume ( overcloud-controller-2 ) due to unrunnable openstack-cinder-scheduler-clone running (blocked) Executing Cluster Transition: Revised Cluster Status: * Node List: * Online: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Full List of Resources: * ip-192.0.2.12 (ocf:heartbeat:IPaddr2): Started overcloud-controller-0 * Clone Set: haproxy-clone [haproxy]: * Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: galera-master [galera] (promotable): - * Masters: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] + * Promoted: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: memcached-clone [memcached]: * Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: rabbitmq-clone [rabbitmq]: * Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: openstack-core-clone [openstack-core] (disabled): * Stopped (disabled): [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: redis-master [redis] (promotable): - * Masters: [ overcloud-controller-1 ] - * Slaves: [ overcloud-controller-0 overcloud-controller-2 ] + * Promoted: [ overcloud-controller-1 ] + * Unpromoted: [ overcloud-controller-0 overcloud-controller-2 ] * ip-192.0.2.11 (ocf:heartbeat:IPaddr2): Started overcloud-controller-1 * Clone Set: mongod-clone [mongod]: * Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: openstack-aodh-evaluator-clone [openstack-aodh-evaluator]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: openstack-nova-scheduler-clone [openstack-nova-scheduler]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: neutron-l3-agent-clone [neutron-l3-agent]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: neutron-netns-cleanup-clone [neutron-netns-cleanup]: * Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: neutron-ovs-cleanup-clone [neutron-ovs-cleanup]: * Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * openstack-cinder-volume (systemd:openstack-cinder-volume): Stopped * Clone Set: openstack-heat-engine-clone [openstack-heat-engine]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: openstack-ceilometer-api-clone [openstack-ceilometer-api]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: openstack-aodh-listener-clone [openstack-aodh-listener]: * Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: neutron-metadata-agent-clone [neutron-metadata-agent]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: openstack-aodh-notifier-clone [openstack-aodh-notifier]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: openstack-heat-api-clone [openstack-heat-api]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: openstack-ceilometer-collector-clone [openstack-ceilometer-collector]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: openstack-glance-api-clone [openstack-glance-api]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: openstack-cinder-scheduler-clone [openstack-cinder-scheduler]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: openstack-nova-api-clone [openstack-nova-api]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: openstack-nova-consoleauth-clone [openstack-nova-consoleauth]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: openstack-sahara-api-clone [openstack-sahara-api]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: openstack-heat-api-cloudwatch-clone [openstack-heat-api-cloudwatch]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: openstack-sahara-engine-clone [openstack-sahara-engine]: * Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: openstack-glance-registry-clone [openstack-glance-registry]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: openstack-ceilometer-notification-clone [openstack-ceilometer-notification]: * Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: openstack-cinder-api-clone [openstack-cinder-api]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: neutron-dhcp-agent-clone [neutron-dhcp-agent]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: neutron-openvswitch-agent-clone [neutron-openvswitch-agent]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: openstack-nova-novncproxy-clone [openstack-nova-novncproxy]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: delay-clone [delay]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: neutron-server-clone [neutron-server]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: openstack-ceilometer-central-clone [openstack-ceilometer-central]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: httpd-clone [httpd] (disabled): * Stopped (disabled): [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: openstack-heat-api-cfn-clone [openstack-heat-api-cfn]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] * Clone Set: openstack-nova-conductor-clone [openstack-nova-conductor]: * Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] diff --git a/cts/scheduler/summary/use-after-free-merge.summary b/cts/scheduler/summary/use-after-free-merge.summary index 88b7857fd9..af3e2a2016 100644 --- a/cts/scheduler/summary/use-after-free-merge.summary +++ b/cts/scheduler/summary/use-after-free-merge.summary @@ -1,45 +1,45 @@ 2 of 5 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: * Node List: * Online: [ hex-13 hex-14 ] * Full List of Resources: * fencing-sbd (stonith:external/sbd): Stopped * Resource Group: g0 (disabled): * d0 (ocf:heartbeat:Dummy): Stopped (disabled) * d1 (ocf:heartbeat:Dummy): Stopped (disabled) * Clone Set: ms0 [s0] (promotable): * Stopped: [ hex-13 hex-14 ] Transition Summary: * Start fencing-sbd ( hex-14 ) * Start s0:0 ( hex-13 ) * Start s0:1 ( hex-14 ) Executing Cluster Transition: * Resource action: fencing-sbd monitor on hex-14 * Resource action: fencing-sbd monitor on hex-13 * Resource action: d0 monitor on hex-14 * Resource action: d0 monitor on hex-13 * Resource action: d1 monitor on hex-14 * Resource action: d1 monitor on hex-13 * Resource action: s0:0 monitor on hex-13 * Resource action: s0:1 monitor on hex-14 * Pseudo action: ms0_start_0 * Resource action: fencing-sbd start on hex-14 * Resource action: s0:0 start on hex-13 * Resource action: s0:1 start on hex-14 * Pseudo action: ms0_running_0 Revised Cluster Status: * Node List: * Online: [ hex-13 hex-14 ] * Full List of Resources: * fencing-sbd (stonith:external/sbd): Started hex-14 * Resource Group: g0 (disabled): * d0 (ocf:heartbeat:Dummy): Stopped (disabled) * d1 (ocf:heartbeat:Dummy): Stopped (disabled) * Clone Set: ms0 [s0] (promotable): - * Slaves: [ hex-13 hex-14 ] + * Unpromoted: [ hex-13 hex-14 ] diff --git a/cts/scheduler/summary/whitebox-fail3.summary b/cts/scheduler/summary/whitebox-fail3.summary index ac6cbda0b9..6b38fc2e22 100644 --- a/cts/scheduler/summary/whitebox-fail3.summary +++ b/cts/scheduler/summary/whitebox-fail3.summary @@ -1,55 +1,55 @@ Current cluster status: * Node List: * Online: [ dvossel-laptop2 ] * Full List of Resources: * vm (ocf:heartbeat:VirtualDomain): Stopped * vm2 (ocf:heartbeat:VirtualDomain): Stopped * FAKE (ocf:pacemaker:Dummy): Started dvossel-laptop2 * Clone Set: W-master [W] (promotable): - * Masters: [ dvossel-laptop2 ] + * Promoted: [ dvossel-laptop2 ] * Stopped: [ 18builder 18node1 ] * Clone Set: X-master [X] (promotable): - * Masters: [ dvossel-laptop2 ] + * Promoted: [ dvossel-laptop2 ] * Stopped: [ 18builder 18node1 ] Transition Summary: * Start vm ( dvossel-laptop2 ) * Move FAKE ( dvossel-laptop2 -> 18builder ) * Start W:1 ( 18builder ) * Start X:1 ( 18builder ) * Start 18builder ( dvossel-laptop2 ) Executing Cluster Transition: * Resource action: vm start on dvossel-laptop2 * Pseudo action: W-master_start_0 * Pseudo action: X-master_start_0 * Resource action: 18builder monitor on dvossel-laptop2 * Resource action: 18builder start on dvossel-laptop2 * Resource action: FAKE stop on dvossel-laptop2 * Resource action: W start on 18builder * Pseudo action: W-master_running_0 * Resource action: X start on 18builder * Pseudo action: X-master_running_0 * Resource action: 18builder monitor=30000 on dvossel-laptop2 * Resource action: FAKE start on 18builder * Resource action: W monitor=10000 on 18builder * Resource action: X monitor=10000 on 18builder Revised Cluster Status: * Node List: * Online: [ dvossel-laptop2 ] * GuestOnline: [ 18builder@dvossel-laptop2 ] * Full List of Resources: * vm (ocf:heartbeat:VirtualDomain): Started dvossel-laptop2 * vm2 (ocf:heartbeat:VirtualDomain): Stopped * FAKE (ocf:pacemaker:Dummy): Started 18builder * Clone Set: W-master [W] (promotable): - * Masters: [ dvossel-laptop2 ] - * Slaves: [ 18builder ] + * Promoted: [ dvossel-laptop2 ] + * Unpromoted: [ 18builder ] * Stopped: [ 18node1 ] * Clone Set: X-master [X] (promotable): - * Masters: [ dvossel-laptop2 ] - * Slaves: [ 18builder ] + * Promoted: [ dvossel-laptop2 ] + * Unpromoted: [ 18builder ] * Stopped: [ 18node1 ] diff --git a/cts/scheduler/summary/whitebox-ms-ordering-move.summary b/cts/scheduler/summary/whitebox-ms-ordering-move.summary index c22c808ea4..6a5fb6eaeb 100644 --- a/cts/scheduler/summary/whitebox-ms-ordering-move.summary +++ b/cts/scheduler/summary/whitebox-ms-ordering-move.summary @@ -1,107 +1,107 @@ Current cluster status: * Node List: * Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] * GuestOnline: [ lxc1@rhel7-1 lxc2@rhel7-1 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started rhel7-3 * FencingPass (stonith:fence_dummy): Started rhel7-4 * FencingFail (stonith:fence_dummy): Started rhel7-5 * rsc_rhel7-1 (ocf:heartbeat:IPaddr2): Started rhel7-1 * rsc_rhel7-2 (ocf:heartbeat:IPaddr2): Started rhel7-2 * rsc_rhel7-3 (ocf:heartbeat:IPaddr2): Started rhel7-3 * rsc_rhel7-4 (ocf:heartbeat:IPaddr2): Started rhel7-4 * rsc_rhel7-5 (ocf:heartbeat:IPaddr2): Started rhel7-5 * migrator (ocf:pacemaker:Dummy): Started rhel7-4 * Clone Set: Connectivity [ping-1]: * Started: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] * Stopped: [ lxc1 lxc2 ] * Clone Set: master-1 [stateful-1] (promotable): - * Masters: [ rhel7-3 ] - * Slaves: [ rhel7-1 rhel7-2 rhel7-4 rhel7-5 ] + * Promoted: [ rhel7-3 ] + * Unpromoted: [ rhel7-1 rhel7-2 rhel7-4 rhel7-5 ] * Resource Group: group-1: * r192.168.122.207 (ocf:heartbeat:IPaddr2): Started rhel7-3 * petulant (service:DummySD): Started rhel7-3 * r192.168.122.208 (ocf:heartbeat:IPaddr2): Started rhel7-3 * lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started rhel7-3 * container1 (ocf:heartbeat:VirtualDomain): Started rhel7-1 * container2 (ocf:heartbeat:VirtualDomain): Started rhel7-1 * Clone Set: lxc-ms-master [lxc-ms] (promotable): - * Masters: [ lxc1 ] - * Slaves: [ lxc2 ] + * Promoted: [ lxc1 ] + * Unpromoted: [ lxc2 ] Transition Summary: * Move container1 ( rhel7-1 -> rhel7-2 ) - * Restart lxc-ms:0 ( Master lxc1 ) due to required container1 start + * Restart lxc-ms:0 ( Promoted lxc1 ) due to required container1 start * Move lxc1 ( rhel7-1 -> rhel7-2 ) Executing Cluster Transition: * Resource action: rsc_rhel7-1 monitor on lxc2 * Resource action: rsc_rhel7-2 monitor on lxc2 * Resource action: rsc_rhel7-3 monitor on lxc2 * Resource action: rsc_rhel7-4 monitor on lxc2 * Resource action: rsc_rhel7-5 monitor on lxc2 * Resource action: migrator monitor on lxc2 * Resource action: ping-1 monitor on lxc2 * Resource action: stateful-1 monitor on lxc2 * Resource action: r192.168.122.207 monitor on lxc2 * Resource action: petulant monitor on lxc2 * Resource action: r192.168.122.208 monitor on lxc2 * Resource action: lsb-dummy monitor on lxc2 * Pseudo action: lxc-ms-master_demote_0 * Resource action: lxc1 monitor on rhel7-5 * Resource action: lxc1 monitor on rhel7-4 * Resource action: lxc1 monitor on rhel7-3 * Resource action: lxc1 monitor on rhel7-2 * Resource action: lxc2 monitor on rhel7-5 * Resource action: lxc2 monitor on rhel7-4 * Resource action: lxc2 monitor on rhel7-3 * Resource action: lxc2 monitor on rhel7-2 * Resource action: lxc-ms demote on lxc1 * Pseudo action: lxc-ms-master_demoted_0 * Pseudo action: lxc-ms-master_stop_0 * Resource action: lxc-ms stop on lxc1 * Pseudo action: lxc-ms-master_stopped_0 * Pseudo action: lxc-ms-master_start_0 * Resource action: lxc1 stop on rhel7-1 * Resource action: container1 stop on rhel7-1 * Resource action: container1 start on rhel7-2 * Resource action: lxc1 start on rhel7-2 * Resource action: lxc-ms start on lxc1 * Pseudo action: lxc-ms-master_running_0 * Resource action: lxc1 monitor=30000 on rhel7-2 * Pseudo action: lxc-ms-master_promote_0 * Resource action: lxc-ms promote on lxc1 * Pseudo action: lxc-ms-master_promoted_0 Revised Cluster Status: * Node List: * Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] * GuestOnline: [ lxc1@rhel7-2 lxc2@rhel7-1 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started rhel7-3 * FencingPass (stonith:fence_dummy): Started rhel7-4 * FencingFail (stonith:fence_dummy): Started rhel7-5 * rsc_rhel7-1 (ocf:heartbeat:IPaddr2): Started rhel7-1 * rsc_rhel7-2 (ocf:heartbeat:IPaddr2): Started rhel7-2 * rsc_rhel7-3 (ocf:heartbeat:IPaddr2): Started rhel7-3 * rsc_rhel7-4 (ocf:heartbeat:IPaddr2): Started rhel7-4 * rsc_rhel7-5 (ocf:heartbeat:IPaddr2): Started rhel7-5 * migrator (ocf:pacemaker:Dummy): Started rhel7-4 * Clone Set: Connectivity [ping-1]: * Started: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] * Stopped: [ lxc1 lxc2 ] * Clone Set: master-1 [stateful-1] (promotable): - * Masters: [ rhel7-3 ] - * Slaves: [ rhel7-1 rhel7-2 rhel7-4 rhel7-5 ] + * Promoted: [ rhel7-3 ] + * Unpromoted: [ rhel7-1 rhel7-2 rhel7-4 rhel7-5 ] * Resource Group: group-1: * r192.168.122.207 (ocf:heartbeat:IPaddr2): Started rhel7-3 * petulant (service:DummySD): Started rhel7-3 * r192.168.122.208 (ocf:heartbeat:IPaddr2): Started rhel7-3 * lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started rhel7-3 * container1 (ocf:heartbeat:VirtualDomain): Started rhel7-2 * container2 (ocf:heartbeat:VirtualDomain): Started rhel7-1 * Clone Set: lxc-ms-master [lxc-ms] (promotable): - * Masters: [ lxc1 ] - * Slaves: [ lxc2 ] + * Promoted: [ lxc1 ] + * Unpromoted: [ lxc2 ] diff --git a/cts/scheduler/summary/whitebox-ms-ordering.summary b/cts/scheduler/summary/whitebox-ms-ordering.summary index 0236f3ba52..921f6d068d 100644 --- a/cts/scheduler/summary/whitebox-ms-ordering.summary +++ b/cts/scheduler/summary/whitebox-ms-ordering.summary @@ -1,73 +1,73 @@ Current cluster status: * Node List: * Online: [ 18node1 18node2 18node3 ] * Full List of Resources: * shooter (stonith:fence_xvm): Started 18node2 * container1 (ocf:heartbeat:VirtualDomain): FAILED * container2 (ocf:heartbeat:VirtualDomain): FAILED * Clone Set: lxc-ms-master [lxc-ms] (promotable): * Stopped: [ 18node1 18node2 18node3 ] Transition Summary: * Fence (reboot) lxc2 (resource: container2) 'guest is unclean' * Fence (reboot) lxc1 (resource: container1) 'guest is unclean' * Start container1 ( 18node1 ) * Start container2 ( 18node1 ) - * Recover lxc-ms:0 ( Master lxc1 ) - * Recover lxc-ms:1 ( Slave lxc2 ) + * Recover lxc-ms:0 ( Promoted lxc1 ) + * Recover lxc-ms:1 ( Unpromoted lxc2 ) * Start lxc1 ( 18node1 ) * Start lxc2 ( 18node1 ) Executing Cluster Transition: * Resource action: container1 monitor on 18node3 * Resource action: container1 monitor on 18node2 * Resource action: container1 monitor on 18node1 * Resource action: container2 monitor on 18node3 * Resource action: container2 monitor on 18node2 * Resource action: container2 monitor on 18node1 * Resource action: lxc-ms monitor on 18node3 * Resource action: lxc-ms monitor on 18node2 * Resource action: lxc-ms monitor on 18node1 * Pseudo action: lxc-ms-master_demote_0 * Resource action: lxc1 monitor on 18node3 * Resource action: lxc1 monitor on 18node2 * Resource action: lxc1 monitor on 18node1 * Resource action: lxc2 monitor on 18node3 * Resource action: lxc2 monitor on 18node2 * Resource action: lxc2 monitor on 18node1 * Pseudo action: stonith-lxc2-reboot on lxc2 * Pseudo action: stonith-lxc1-reboot on lxc1 * Resource action: container1 start on 18node1 * Resource action: container2 start on 18node1 * Pseudo action: lxc-ms_demote_0 * Pseudo action: lxc-ms-master_demoted_0 * Pseudo action: lxc-ms-master_stop_0 * Resource action: lxc1 start on 18node1 * Resource action: lxc2 start on 18node1 * Pseudo action: lxc-ms_stop_0 * Pseudo action: lxc-ms_stop_0 * Pseudo action: lxc-ms-master_stopped_0 * Pseudo action: lxc-ms-master_start_0 * Resource action: lxc1 monitor=30000 on 18node1 * Resource action: lxc2 monitor=30000 on 18node1 * Resource action: lxc-ms start on lxc1 * Resource action: lxc-ms start on lxc2 * Pseudo action: lxc-ms-master_running_0 * Resource action: lxc-ms monitor=10000 on lxc2 * Pseudo action: lxc-ms-master_promote_0 * Resource action: lxc-ms promote on lxc1 * Pseudo action: lxc-ms-master_promoted_0 Revised Cluster Status: * Node List: * Online: [ 18node1 18node2 18node3 ] * GuestOnline: [ lxc1@18node1 lxc2@18node1 ] * Full List of Resources: * shooter (stonith:fence_xvm): Started 18node2 * container1 (ocf:heartbeat:VirtualDomain): Started 18node1 * container2 (ocf:heartbeat:VirtualDomain): Started 18node1 * Clone Set: lxc-ms-master [lxc-ms] (promotable): - * Masters: [ lxc1 ] - * Slaves: [ lxc2 ] + * Promoted: [ lxc1 ] + * Unpromoted: [ lxc2 ] diff --git a/cts/scheduler/summary/whitebox-orphan-ms.summary b/cts/scheduler/summary/whitebox-orphan-ms.summary index a42e5cb545..0d0007dcc6 100644 --- a/cts/scheduler/summary/whitebox-orphan-ms.summary +++ b/cts/scheduler/summary/whitebox-orphan-ms.summary @@ -1,87 +1,87 @@ Current cluster status: * Node List: * Online: [ 18node1 18node2 18node3 ] * GuestOnline: [ lxc1@18node1 lxc2@18node1 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started 18node2 * FencingPass (stonith:fence_dummy): Started 18node3 * FencingFail (stonith:fence_dummy): Started 18node3 * rsc_18node1 (ocf:heartbeat:IPaddr2): Started 18node1 * rsc_18node2 (ocf:heartbeat:IPaddr2): Started 18node2 * rsc_18node3 (ocf:heartbeat:IPaddr2): Started 18node3 * migrator (ocf:pacemaker:Dummy): Started 18node1 * Clone Set: Connectivity [ping-1]: * Started: [ 18node1 18node2 18node3 ] * Clone Set: master-1 [stateful-1] (promotable): - * Masters: [ 18node1 ] - * Slaves: [ 18node2 18node3 ] + * Promoted: [ 18node1 ] + * Unpromoted: [ 18node2 18node3 ] * Resource Group: group-1: * r192.168.122.87 (ocf:heartbeat:IPaddr2): Started 18node1 * r192.168.122.88 (ocf:heartbeat:IPaddr2): Started 18node1 * r192.168.122.89 (ocf:heartbeat:IPaddr2): Started 18node1 * lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started 18node1 * container2 (ocf:heartbeat:VirtualDomain): ORPHANED Started 18node1 * lxc1 (ocf:pacemaker:remote): ORPHANED Started 18node1 - * lxc-ms (ocf:pacemaker:Stateful): ORPHANED Master [ lxc1 lxc2 ] + * lxc-ms (ocf:pacemaker:Stateful): ORPHANED Promoted [ lxc1 lxc2 ] * lxc2 (ocf:pacemaker:remote): ORPHANED Started 18node1 * container1 (ocf:heartbeat:VirtualDomain): ORPHANED Started 18node1 Transition Summary: * Move FencingFail ( 18node3 -> 18node1 ) * Stop container2 ( 18node1 ) due to node availability * Stop lxc1 ( 18node1 ) due to node availability - * Stop lxc-ms ( Master lxc1 ) due to node availability - * Stop lxc-ms ( Master lxc2 ) due to node availability + * Stop lxc-ms ( Promoted lxc1 ) due to node availability + * Stop lxc-ms ( Promoted lxc2 ) due to node availability * Stop lxc2 ( 18node1 ) due to node availability * Stop container1 ( 18node1 ) due to node availability Executing Cluster Transition: * Resource action: FencingFail stop on 18node3 * Resource action: lxc-ms demote on lxc2 * Resource action: lxc-ms demote on lxc1 * Resource action: FencingFail start on 18node1 * Resource action: lxc-ms stop on lxc2 * Resource action: lxc-ms stop on lxc1 * Resource action: lxc-ms delete on 18node3 * Resource action: lxc-ms delete on 18node2 * Resource action: lxc-ms delete on 18node1 * Resource action: lxc2 stop on 18node1 * Resource action: lxc2 delete on 18node3 * Resource action: lxc2 delete on 18node2 * Resource action: lxc2 delete on 18node1 * Resource action: container2 stop on 18node1 * Resource action: container2 delete on 18node3 * Resource action: container2 delete on 18node2 * Resource action: container2 delete on 18node1 * Resource action: lxc1 stop on 18node1 * Resource action: lxc1 delete on 18node3 * Resource action: lxc1 delete on 18node2 * Resource action: lxc1 delete on 18node1 * Resource action: container1 stop on 18node1 * Resource action: container1 delete on 18node3 * Resource action: container1 delete on 18node2 * Resource action: container1 delete on 18node1 Revised Cluster Status: * Node List: * Online: [ 18node1 18node2 18node3 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started 18node2 * FencingPass (stonith:fence_dummy): Started 18node3 * FencingFail (stonith:fence_dummy): Started 18node1 * rsc_18node1 (ocf:heartbeat:IPaddr2): Started 18node1 * rsc_18node2 (ocf:heartbeat:IPaddr2): Started 18node2 * rsc_18node3 (ocf:heartbeat:IPaddr2): Started 18node3 * migrator (ocf:pacemaker:Dummy): Started 18node1 * Clone Set: Connectivity [ping-1]: * Started: [ 18node1 18node2 18node3 ] * Clone Set: master-1 [stateful-1] (promotable): - * Masters: [ 18node1 ] - * Slaves: [ 18node2 18node3 ] + * Promoted: [ 18node1 ] + * Unpromoted: [ 18node2 18node3 ] * Resource Group: group-1: * r192.168.122.87 (ocf:heartbeat:IPaddr2): Started 18node1 * r192.168.122.88 (ocf:heartbeat:IPaddr2): Started 18node1 * r192.168.122.89 (ocf:heartbeat:IPaddr2): Started 18node1 * lsb-dummy (lsb:/usr/share/pacemaker/tests/cts/LSBDummy): Started 18node1 diff --git a/cts/scheduler/summary/year-2038.summary b/cts/scheduler/summary/year-2038.summary index 2c63975748..dc276ada27 100644 --- a/cts/scheduler/summary/year-2038.summary +++ b/cts/scheduler/summary/year-2038.summary @@ -1,112 +1,112 @@ Using the original execution date of: 2038-02-17 06:13:20Z Current cluster status: * Node List: * RemoteNode overcloud-novacompute-1: UNCLEAN (offline) * Online: [ controller-0 controller-1 controller-2 ] * RemoteOnline: [ overcloud-novacompute-0 ] * GuestOnline: [ galera-bundle-0@controller-2 galera-bundle-1@controller-0 galera-bundle-2@controller-1 rabbitmq-bundle-0@controller-2 rabbitmq-bundle-1@controller-0 rabbitmq-bundle-2@controller-1 redis-bundle-0@controller-2 redis-bundle-1@controller-0 redis-bundle-2@controller-1 ] * Full List of Resources: * overcloud-novacompute-0 (ocf:pacemaker:remote): Started controller-0 * overcloud-novacompute-1 (ocf:pacemaker:remote): FAILED controller-1 * Container bundle set: rabbitmq-bundle [192.168.24.1:8787/rhosp13/openstack-rabbitmq:pcmklatest]: * rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started controller-2 * rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Started controller-0 * rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Started controller-1 * Container bundle set: galera-bundle [192.168.24.1:8787/rhosp13/openstack-mariadb:pcmklatest]: - * galera-bundle-0 (ocf:heartbeat:galera): Master controller-2 - * galera-bundle-1 (ocf:heartbeat:galera): Master controller-0 - * galera-bundle-2 (ocf:heartbeat:galera): Master controller-1 + * galera-bundle-0 (ocf:heartbeat:galera): Promoted controller-2 + * galera-bundle-1 (ocf:heartbeat:galera): Promoted controller-0 + * galera-bundle-2 (ocf:heartbeat:galera): Promoted controller-1 * Container bundle set: redis-bundle [192.168.24.1:8787/rhosp13/openstack-redis:pcmklatest]: - * redis-bundle-0 (ocf:heartbeat:redis): Master controller-2 - * redis-bundle-1 (ocf:heartbeat:redis): Slave controller-0 - * redis-bundle-2 (ocf:heartbeat:redis): Slave controller-1 + * redis-bundle-0 (ocf:heartbeat:redis): Promoted controller-2 + * redis-bundle-1 (ocf:heartbeat:redis): Unpromoted controller-0 + * redis-bundle-2 (ocf:heartbeat:redis): Unpromoted controller-1 * ip-192.168.24.11 (ocf:heartbeat:IPaddr2): Started controller-2 * ip-10.0.0.110 (ocf:heartbeat:IPaddr2): Stopped * ip-172.17.1.14 (ocf:heartbeat:IPaddr2): Started controller-1 * ip-172.17.1.17 (ocf:heartbeat:IPaddr2): Started controller-2 * ip-172.17.3.11 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-172.17.4.17 (ocf:heartbeat:IPaddr2): Started controller-1 * Container bundle set: haproxy-bundle [192.168.24.1:8787/rhosp13/openstack-haproxy:pcmklatest]: * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started controller-2 * haproxy-bundle-docker-1 (ocf:heartbeat:docker): Started controller-0 * haproxy-bundle-docker-2 (ocf:heartbeat:docker): Started controller-1 * stonith-fence_compute-fence-nova (stonith:fence_compute): FAILED controller-2 * Clone Set: compute-unfence-trigger-clone [compute-unfence-trigger]: * compute-unfence-trigger (ocf:pacemaker:Dummy): Started overcloud-novacompute-1 (UNCLEAN) * Started: [ overcloud-novacompute-0 ] * Stopped: [ controller-0 controller-1 controller-2 ] * nova-evacuate (ocf:openstack:NovaEvacuate): Started controller-0 * stonith-fence_ipmilan-5254008be2cc (stonith:fence_ipmilan): Started controller-1 * stonith-fence_ipmilan-525400803f9e (stonith:fence_ipmilan): Started controller-0 * stonith-fence_ipmilan-525400fca120 (stonith:fence_ipmilan): Started controller-2 * stonith-fence_ipmilan-525400953d48 (stonith:fence_ipmilan): Started controller-2 * stonith-fence_ipmilan-525400b02b86 (stonith:fence_ipmilan): Started controller-1 * Container bundle: openstack-cinder-volume [192.168.24.1:8787/rhosp13/openstack-cinder-volume:pcmklatest]: * openstack-cinder-volume-docker-0 (ocf:heartbeat:docker): Started controller-0 Transition Summary: * Fence (reboot) overcloud-novacompute-1 'remote connection is unrecoverable' * Stop overcloud-novacompute-1 ( controller-1 ) due to node availability * Start ip-10.0.0.110 ( controller-1 ) * Recover stonith-fence_compute-fence-nova ( controller-2 ) * Stop compute-unfence-trigger:1 ( overcloud-novacompute-1 ) due to node availability Executing Cluster Transition: * Resource action: overcloud-novacompute-1 stop on controller-1 * Resource action: stonith-fence_compute-fence-nova stop on controller-2 * Fencing overcloud-novacompute-1 (reboot) * Cluster action: clear_failcount for overcloud-novacompute-1 on controller-1 * Resource action: ip-10.0.0.110 start on controller-1 * Resource action: stonith-fence_compute-fence-nova start on controller-2 * Resource action: stonith-fence_compute-fence-nova monitor=60000 on controller-2 * Pseudo action: compute-unfence-trigger-clone_stop_0 * Resource action: ip-10.0.0.110 monitor=10000 on controller-1 * Pseudo action: compute-unfence-trigger_stop_0 * Pseudo action: compute-unfence-trigger-clone_stopped_0 Using the original execution date of: 2038-02-17 06:13:20Z Revised Cluster Status: * Node List: * RemoteNode overcloud-novacompute-1: UNCLEAN (offline) * Online: [ controller-0 controller-1 controller-2 ] * RemoteOnline: [ overcloud-novacompute-0 ] * GuestOnline: [ galera-bundle-0@controller-2 galera-bundle-1@controller-0 galera-bundle-2@controller-1 rabbitmq-bundle-0@controller-2 rabbitmq-bundle-1@controller-0 rabbitmq-bundle-2@controller-1 redis-bundle-0@controller-2 redis-bundle-1@controller-0 redis-bundle-2@controller-1 ] * Full List of Resources: * overcloud-novacompute-0 (ocf:pacemaker:remote): Started controller-0 * overcloud-novacompute-1 (ocf:pacemaker:remote): FAILED * Container bundle set: rabbitmq-bundle [192.168.24.1:8787/rhosp13/openstack-rabbitmq:pcmklatest]: * rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started controller-2 * rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Started controller-0 * rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Started controller-1 * Container bundle set: galera-bundle [192.168.24.1:8787/rhosp13/openstack-mariadb:pcmklatest]: - * galera-bundle-0 (ocf:heartbeat:galera): Master controller-2 - * galera-bundle-1 (ocf:heartbeat:galera): Master controller-0 - * galera-bundle-2 (ocf:heartbeat:galera): Master controller-1 + * galera-bundle-0 (ocf:heartbeat:galera): Promoted controller-2 + * galera-bundle-1 (ocf:heartbeat:galera): Promoted controller-0 + * galera-bundle-2 (ocf:heartbeat:galera): Promoted controller-1 * Container bundle set: redis-bundle [192.168.24.1:8787/rhosp13/openstack-redis:pcmklatest]: - * redis-bundle-0 (ocf:heartbeat:redis): Master controller-2 - * redis-bundle-1 (ocf:heartbeat:redis): Slave controller-0 - * redis-bundle-2 (ocf:heartbeat:redis): Slave controller-1 + * redis-bundle-0 (ocf:heartbeat:redis): Promoted controller-2 + * redis-bundle-1 (ocf:heartbeat:redis): Unpromoted controller-0 + * redis-bundle-2 (ocf:heartbeat:redis): Unpromoted controller-1 * ip-192.168.24.11 (ocf:heartbeat:IPaddr2): Started controller-2 * ip-10.0.0.110 (ocf:heartbeat:IPaddr2): Started controller-1 * ip-172.17.1.14 (ocf:heartbeat:IPaddr2): Started controller-1 * ip-172.17.1.17 (ocf:heartbeat:IPaddr2): Started controller-2 * ip-172.17.3.11 (ocf:heartbeat:IPaddr2): Started controller-0 * ip-172.17.4.17 (ocf:heartbeat:IPaddr2): Started controller-1 * Container bundle set: haproxy-bundle [192.168.24.1:8787/rhosp13/openstack-haproxy:pcmklatest]: * haproxy-bundle-docker-0 (ocf:heartbeat:docker): Started controller-2 * haproxy-bundle-docker-1 (ocf:heartbeat:docker): Started controller-0 * haproxy-bundle-docker-2 (ocf:heartbeat:docker): Started controller-1 * stonith-fence_compute-fence-nova (stonith:fence_compute): Started controller-2 * Clone Set: compute-unfence-trigger-clone [compute-unfence-trigger]: * Started: [ overcloud-novacompute-0 ] * Stopped: [ controller-0 controller-1 controller-2 overcloud-novacompute-1 ] * nova-evacuate (ocf:openstack:NovaEvacuate): Started controller-0 * stonith-fence_ipmilan-5254008be2cc (stonith:fence_ipmilan): Started controller-1 * stonith-fence_ipmilan-525400803f9e (stonith:fence_ipmilan): Started controller-0 * stonith-fence_ipmilan-525400fca120 (stonith:fence_ipmilan): Started controller-2 * stonith-fence_ipmilan-525400953d48 (stonith:fence_ipmilan): Started controller-2 * stonith-fence_ipmilan-525400b02b86 (stonith:fence_ipmilan): Started controller-1 * Container bundle: openstack-cinder-volume [192.168.24.1:8787/rhosp13/openstack-cinder-volume:pcmklatest]: * openstack-cinder-volume-docker-0 (ocf:heartbeat:docker): Started controller-0