Page MenuHomeClusterLabs Projects

No OneTemporary

This file is larger than 256 KB, so syntax highlighting was skipped.
diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in
index ea9c660f53..c7718fa092 100644
--- a/cts/cts-scheduler.in
+++ b/cts/cts-scheduler.in
@@ -1,1514 +1,1515 @@
#!@PYTHON@
""" Regression tests for Pacemaker's scheduler
"""
# Pacemaker targets compatibility with Python 2.7 and 3.2+
from __future__ import print_function, unicode_literals, absolute_import, division
__copyright__ = "Copyright 2004-2020 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import io
import os
import re
import sys
import stat
import shlex
import shutil
import argparse
import subprocess
import platform
DESC = """Regression tests for Pacemaker's scheduler"""
# Each entry in TESTS is a group of tests, where each test consists of a
# test base name, test description, and additional test arguments.
# Test groups will be separated by newlines in output.
TESTS = [
[
[ "simple1", "Offline" ],
[ "simple2", "Start" ],
[ "simple3", "Start 2" ],
[ "simple4", "Start Failed" ],
[ "simple6", "Stop Start" ],
[ "simple7", "Shutdown" ],
#[ "simple8", "Stonith" ],
#[ "simple9", "Lower version" ],
#[ "simple10", "Higher version" ],
[ "simple11", "Priority (ne)" ],
[ "simple12", "Priority (eq)" ],
[ "simple8", "Stickiness" ],
],
[
[ "group1", "Group" ],
[ "group2", "Group + Native" ],
[ "group3", "Group + Group" ],
[ "group4", "Group + Native (nothing)" ],
[ "group5", "Group + Native (move)" ],
[ "group6", "Group + Group (move)" ],
[ "group7", "Group colocation" ],
[ "group13", "Group colocation (cant run)" ],
[ "group8", "Group anti-colocation" ],
[ "group9", "Group recovery" ],
[ "group10", "Group partial recovery" ],
[ "group11", "Group target_role" ],
[ "group14", "Group stop (graph terminated)" ],
[ "group15", "Negative group colocation" ],
[ "bug-1573", "Partial stop of a group with two children" ],
[ "bug-1718", "Mandatory group ordering - Stop group_FUN" ],
[ "bug-lf-2613", "Move group on failure" ],
[ "bug-lf-2619", "Move group on clone failure" ],
[ "group-fail", "Ensure stop order is preserved for partially active groups" ],
[ "group-unmanaged", "No need to restart r115 because r114 is unmanaged" ],
[ "group-unmanaged-stopped", "Make sure r115 is stopped when r114 fails" ],
[ "group-dependents", "Account for the location preferences of things colocated with a group" ],
[ "group-stop-ordering", "Ensure blocked group member stop does not force other member stops" ],
],
[
[ "rsc_dep1", "Must not" ],
[ "rsc_dep3", "Must" ],
[ "rsc_dep5", "Must not 3" ],
[ "rsc_dep7", "Must 3" ],
[ "rsc_dep10", "Must (but cant)" ],
[ "rsc_dep2", "Must (running)" ],
[ "rsc_dep8", "Must (running : alt)" ],
[ "rsc_dep4", "Must (running + move)" ],
[ "asymmetric", "Asymmetric - require explicit location constraints" ],
],
[
[ "orphan-0", "Orphan ignore" ],
[ "orphan-1", "Orphan stop" ],
[ "orphan-2", "Orphan stop, remove failcount" ],
],
[
[ "params-0", "Params: No change" ],
[ "params-1", "Params: Changed" ],
[ "params-2", "Params: Resource definition" ],
[ "params-3", "Params: Restart instead of reload if start pending" ],
[ "params-4", "Params: Reload" ],
[ "params-5", "Params: Restart based on probe digest" ],
[ "novell-251689", "Resource definition change + target_role=stopped" ],
[ "bug-lf-2106", "Restart all anonymous clone instances after config change" ],
[ "params-6", "Params: Detect reload in previously migrated resource" ],
[ "nvpair-id-ref", "Support id-ref in nvpair with optional name" ],
[ "not-reschedule-unneeded-monitor",
"Do not reschedule unneeded monitors while resource definitions have changed" ],
[ "reload-becomes-restart", "Cancel reload if restart becomes required" ],
],
[
[ "target-0", "Target Role : baseline" ],
[ "target-1", "Target Role : master" ],
[ "target-2", "Target Role : invalid" ],
],
[
[ "base-score", "Set a node's default score for all nodes" ],
],
[
[ "date-1", "Dates", [ "-t", "2005-020" ] ],
[ "date-2", "Date Spec - Pass", [ "-t", "2005-020T12:30" ] ],
[ "date-3", "Date Spec - Fail", [ "-t", "2005-020T11:30" ] ],
[ "origin", "Timing of recurring operations", [ "-t", "2014-05-07 00:28:00" ] ],
[ "probe-0", "Probe (anon clone)" ],
[ "probe-1", "Pending Probe" ],
[ "probe-2", "Correctly re-probe cloned groups" ],
[ "probe-3", "Probe (pending node)" ],
[ "probe-4", "Probe (pending node + stopped resource)" ],
[ "standby", "Standby" ],
[ "comments", "Comments" ],
],
[
[ "one-or-more-0", "Everything starts" ],
[ "one-or-more-1", "Nothing starts because of A" ],
[ "one-or-more-2", "D can start because of C" ],
[ "one-or-more-3", "D cannot start because of B and C" ],
[ "one-or-more-4", "D cannot start because of target-role" ],
[ "one-or-more-5", "Start A and F even though C and D are stopped" ],
[ "one-or-more-6", "Leave A running even though B is stopped" ],
[ "one-or-more-7", "Leave A running even though C is stopped" ],
[ "bug-5140-require-all-false", "Allow basegrp:0 to stop" ],
[ "clone-require-all-1", "clone B starts node 3 and 4" ],
[ "clone-require-all-2", "clone B remains stopped everywhere" ],
[ "clone-require-all-3", "clone B stops everywhere because A stops everywhere" ],
[ "clone-require-all-4", "clone B remains on node 3 and 4 with only one instance of A remaining" ],
[ "clone-require-all-5", "clone B starts on node 1 3 and 4" ],
[ "clone-require-all-6", "clone B remains active after shutting down instances of A" ],
[ "clone-require-all-7",
"clone A and B both start at the same time. all instances of A start before B" ],
[ "clone-require-all-no-interleave-1", "C starts everywhere after A and B" ],
[ "clone-require-all-no-interleave-2",
"C starts on nodes 1, 2, and 4 with only one active instance of B" ],
[ "clone-require-all-no-interleave-3",
"C remains active when instance of B is stopped on one node and started on another" ],
[ "one-or-more-unrunnable-instances", "Avoid dependencies on instances that won't ever be started" ],
],
[
[ "location-date-rules-1", "Use location constraints with ineffective date-based rules" ],
[ "location-date-rules-2", "Use location constraints with effective date-based rules" ],
[ "nvpair-date-rules-1", "Use nvpair blocks with a variety of date-based rules" ],
[ "rule-dbl-as-auto-number-match",
"Floating-point rule values default to number comparison: match" ],
[ "rule-dbl-as-auto-number-no-match",
"Floating-point rule values default to number comparison: no "
"match" ],
[ "rule-dbl-as-integer-match",
"Floating-point rule values set to integer comparison: match" ],
[ "rule-dbl-as-integer-no-match",
"Floating-point rule values set to integer comparison: no match" ],
[ "rule-dbl-as-number-match",
"Floating-point rule values set to number comparison: match" ],
[ "rule-dbl-as-number-no-match",
"Floating-point rule values set to number comparison: no match" ],
[ "rule-dbl-parse-fail-default-str-match",
"Floating-point rule values fail to parse, default to string "
"comparison: match" ],
[ "rule-dbl-parse-fail-default-str-no-match",
"Floating-point rule values fail to parse, default to string "
"comparison: no match" ],
[ "rule-int-as-auto-integer-match",
"Integer rule values default to integer comparison: match" ],
[ "rule-int-as-auto-integer-no-match",
"Integer rule values default to integer comparison: no match" ],
[ "rule-int-as-integer-match",
"Integer rule values set to integer comparison: match" ],
[ "rule-int-as-integer-no-match",
"Integer rule values set to integer comparison: no match" ],
[ "rule-int-as-number-match",
"Integer rule values set to number comparison: match" ],
[ "rule-int-as-number-no-match",
"Integer rule values set to number comparison: no match" ],
[ "rule-int-parse-fail-default-str-match",
"Integer rule values fail to parse, default to string "
"comparison: match" ],
[ "rule-int-parse-fail-default-str-no-match",
"Integer rule values fail to parse, default to string "
"comparison: no match" ],
],
[
[ "order1", "Order start 1" ],
[ "order2", "Order start 2" ],
[ "order3", "Order stop" ],
[ "order4", "Order (multiple)" ],
[ "order5", "Order (move)" ],
[ "order6", "Order (move w/ restart)" ],
[ "order7", "Order (mandatory)" ],
[ "order-optional", "Order (score=0)" ],
[ "order-required", "Order (score=INFINITY)" ],
[ "bug-lf-2171", "Prevent group start when clone is stopped" ],
[ "order-clone", "Clone ordering should be able to prevent startup of dependent clones" ],
[ "order-sets", "Ordering for resource sets" ],
[ "order-serialize", "Serialize resources without inhibiting migration" ],
[ "order-serialize-set", "Serialize a set of resources without inhibiting migration" ],
[ "clone-order-primitive", "Order clone start after a primitive" ],
[ "clone-order-16instances", "Verify ordering of 16 cloned resources" ],
[ "order-optional-keyword", "Order (optional keyword)" ],
[ "order-mandatory", "Order (mandatory keyword)" ],
[ "bug-lf-2493",
"Don't imply colocation requirements when applying ordering constraints with clones" ],
[ "ordered-set-basic-startup", "Constraint set with default order settings" ],
[ "ordered-set-natural", "Allow natural set ordering" ],
[ "order-wrong-kind", "Order (error)" ],
],
[
[ "coloc-loop", "Colocation - loop" ],
[ "coloc-many-one", "Colocation - many-to-one" ],
[ "coloc-list", "Colocation - many-to-one with list" ],
[ "coloc-group", "Colocation - groups" ],
[ "coloc-slave-anti", "Anti-colocation with slave shouldn't prevent master colocation" ],
[ "coloc-attr", "Colocation based on node attributes" ],
[ "coloc-negative-group", "Negative colocation with a group" ],
[ "coloc-intra-set", "Intra-set colocation" ],
[ "bug-lf-2435", "Colocation sets with a negative score" ],
[ "coloc-clone-stays-active",
"Ensure clones don't get stopped/demoted because a dependent must stop" ],
[ "coloc_fp_logic", "Verify floating point calculations in colocation are working" ],
[ "colo_master_w_native",
"cl#5070 - Verify promotion order is affected when colocating master to native rsc" ],
[ "colo_slave_w_native",
"cl#5070 - Verify promotion order is affected when colocating slave to native rsc" ],
[ "anti-colocation-order",
"cl#5187 - Prevent resources in an anti-colocation from even temporarily running on a same node" ],
[ "anti-colocation-master", "Organize order of actions for master resources in anti-colocations" ],
[ "anti-colocation-slave", "Organize order of actions for slave resources in anti-colocations" ],
[ "enforce-colo1", "Always enforce B with A INFINITY" ],
[ "complex_enforce_colo", "Always enforce B with A INFINITY. (make sure heat-engine stops)" ],
[ "coloc-dependee-should-stay", "Stickiness outweighs group colocation" ],
[ "coloc-dependee-should-move", "Group colocation outweighs stickiness" ],
],
[
[ "rsc-sets-seq-true", "Resource Sets - sequential=false" ],
[ "rsc-sets-seq-false", "Resource Sets - sequential=true" ],
[ "rsc-sets-clone", "Resource Sets - Clone" ],
[ "rsc-sets-master", "Resource Sets - Master" ],
[ "rsc-sets-clone-1", "Resource Sets - Clone (lf#2404)" ],
],
[
[ "attrs1", "string: eq (and)" ],
[ "attrs2", "string: lt / gt (and)" ],
[ "attrs3", "string: ne (or)" ],
[ "attrs4", "string: exists" ],
[ "attrs5", "string: not_exists" ],
[ "attrs6", "is_dc: true" ],
[ "attrs7", "is_dc: false" ],
[ "attrs8", "score_attribute" ],
[ "per-node-attrs", "Per node resource parameters" ],
],
[
[ "mon-rsc-1", "Schedule Monitor - start" ],
[ "mon-rsc-2", "Schedule Monitor - move" ],
[ "mon-rsc-3", "Schedule Monitor - pending start" ],
[ "mon-rsc-4", "Schedule Monitor - move/pending start" ],
],
[
[ "rec-rsc-0", "Resource Recover - no start" ],
[ "rec-rsc-1", "Resource Recover - start" ],
[ "rec-rsc-2", "Resource Recover - monitor" ],
[ "rec-rsc-3", "Resource Recover - stop - ignore" ],
[ "rec-rsc-4", "Resource Recover - stop - block" ],
[ "rec-rsc-5", "Resource Recover - stop - fence" ],
[ "rec-rsc-6", "Resource Recover - multiple - restart" ],
[ "rec-rsc-7", "Resource Recover - multiple - stop" ],
[ "rec-rsc-8", "Resource Recover - multiple - block" ],
[ "rec-rsc-9", "Resource Recover - group/group" ],
[ "monitor-recovery", "on-fail=block + resource recovery detected by recurring monitor" ],
[ "stop-failure-no-quorum", "Stop failure without quorum" ],
[ "stop-failure-no-fencing", "Stop failure without fencing available" ],
[ "stop-failure-with-fencing", "Stop failure with fencing available" ],
[ "multiple-active-block-group", "Support of multiple-active=block for resource groups" ],
[ "multiple-monitor-one-failed",
"Consider resource failed if any of the configured monitor operations failed" ],
],
[
[ "quorum-1", "No quorum - ignore" ],
[ "quorum-2", "No quorum - freeze" ],
[ "quorum-3", "No quorum - stop" ],
[ "quorum-4", "No quorum - start anyway" ],
[ "quorum-5", "No quorum - start anyway (group)" ],
[ "quorum-6", "No quorum - start anyway (clone)" ],
[ "bug-cl-5212", "No promotion with no-quorum-policy=freeze" ],
[ "suicide-needed-inquorate", "no-quorum-policy=suicide: suicide necessary" ],
[ "suicide-not-needed-initial-quorum",
"no-quorum-policy=suicide: suicide not necessary at initial quorum" ],
[ "suicide-not-needed-never-quorate",
"no-quorum-policy=suicide: suicide not necessary if never quorate" ],
[ "suicide-not-needed-quorate", "no-quorum-policy=suicide: suicide necessary if quorate" ],
],
[
[ "rec-node-1", "Node Recover - Startup - no fence" ],
[ "rec-node-2", "Node Recover - Startup - fence" ],
[ "rec-node-3", "Node Recover - HA down - no fence" ],
[ "rec-node-4", "Node Recover - HA down - fence" ],
[ "rec-node-5", "Node Recover - CRM down - no fence" ],
[ "rec-node-6", "Node Recover - CRM down - fence" ],
[ "rec-node-7", "Node Recover - no quorum - ignore" ],
[ "rec-node-8", "Node Recover - no quorum - freeze" ],
[ "rec-node-9", "Node Recover - no quorum - stop" ],
[ "rec-node-10", "Node Recover - no quorum - stop w/fence" ],
[ "rec-node-11", "Node Recover - CRM down w/ group - fence" ],
[ "rec-node-12", "Node Recover - nothing active - fence" ],
[ "rec-node-13", "Node Recover - failed resource + shutdown - fence" ],
[ "rec-node-15", "Node Recover - unknown lrm section" ],
[ "rec-node-14", "Serialize all stonith's" ],
],
[
[ "multi1", "Multiple Active (stop/start)" ],
],
[
[ "migrate-begin", "Normal migration" ],
[ "migrate-success", "Completed migration" ],
[ "migrate-partial-1", "Completed migration, missing stop on source" ],
[ "migrate-partial-2", "Successful migrate_to only" ],
[ "migrate-partial-3", "Successful migrate_to only, target down" ],
[ "migrate-partial-4", "Migrate from the correct host after migrate_to+migrate_from" ],
[ "bug-5186-partial-migrate", "Handle partial migration when src node loses membership" ],
[ "migrate-fail-2", "Failed migrate_from" ],
[ "migrate-fail-3", "Failed migrate_from + stop on source" ],
[ "migrate-fail-4",
"Failed migrate_from + stop on target - ideally we wouldn't need to re-stop on target" ],
[ "migrate-fail-5", "Failed migrate_from + stop on source and target" ],
[ "migrate-fail-6", "Failed migrate_to" ],
[ "migrate-fail-7", "Failed migrate_to + stop on source" ],
[ "migrate-fail-8",
"Failed migrate_to + stop on target - ideally we wouldn't need to re-stop on target" ],
[ "migrate-fail-9", "Failed migrate_to + stop on source and target" ],
[ "migration-ping-pong", "Old migrate_to failure + successful migrate_from on same node" ],
[ "migrate-stop", "Migration in a stopping stack" ],
[ "migrate-start", "Migration in a starting stack" ],
[ "migrate-stop_start", "Migration in a restarting stack" ],
[ "migrate-stop-complex", "Migration in a complex stopping stack" ],
[ "migrate-start-complex", "Migration in a complex starting stack" ],
[ "migrate-stop-start-complex", "Migration in a complex moving stack" ],
[ "migrate-shutdown", "Order the post-migration 'stop' before node shutdown" ],
[ "migrate-1", "Migrate (migrate)" ],
[ "migrate-2", "Migrate (stable)" ],
[ "migrate-3", "Migrate (failed migrate_to)" ],
[ "migrate-4", "Migrate (failed migrate_from)" ],
[ "novell-252693", "Migration in a stopping stack" ],
[ "novell-252693-2", "Migration in a starting stack" ],
[ "novell-252693-3", "Non-Migration in a starting and stopping stack" ],
[ "bug-1820", "Migration in a group" ],
[ "bug-1820-1", "Non-migration in a group" ],
[ "migrate-5", "Primitive migration with a clone" ],
[ "migrate-fencing", "Migration after Fencing" ],
[ "migrate-both-vms", "Migrate two VMs that have no colocation" ],
[ "migration-behind-migrating-remote", "Migrate resource behind migrating remote connection" ],
[ "1-a-then-bm-move-b", "Advanced migrate logic. A then B. migrate B" ],
[ "2-am-then-b-move-a", "Advanced migrate logic, A then B, migrate A without stopping B" ],
[ "3-am-then-bm-both-migrate", "Advanced migrate logic. A then B. migrate both" ],
[ "4-am-then-bm-b-not-migratable", "Advanced migrate logic, A then B, B not migratable" ],
[ "5-am-then-bm-a-not-migratable", "Advanced migrate logic. A then B. move both, a not migratable" ],
[ "6-migrate-group", "Advanced migrate logic, migrate a group" ],
[ "7-migrate-group-one-unmigratable",
"Advanced migrate logic, migrate group mixed with allow-migrate true/false" ],
[ "8-am-then-bm-a-migrating-b-stopping",
"Advanced migrate logic, A then B, A migrating, B stopping" ],
[ "9-am-then-bm-b-migrating-a-stopping",
"Advanced migrate logic, A then B, B migrate, A stopping" ],
[ "10-a-then-bm-b-move-a-clone",
"Advanced migrate logic, A clone then B, migrate B while stopping A" ],
[ "11-a-then-bm-b-move-a-clone-starting",
"Advanced migrate logic, A clone then B, B moving while A is start/stopping" ],
[ "a-promote-then-b-migrate", "A promote then B start. migrate B" ],
[ "a-demote-then-b-migrate", "A demote then B stop. migrate B" ],
# @TODO: If pacemaker implements versioned attributes, uncomment this test
#[ "migrate-versioned", "Disable migration for versioned resources" ],
[ "bug-lf-2422", "Dependency on partially active group - stop ocfs:*" ],
],
[
[ "clone-anon-probe-1", "Probe the correct (anonymous) clone instance for each node" ],
[ "clone-anon-probe-2", "Avoid needless re-probing of anonymous clones" ],
[ "clone-anon-failcount", "Merge failcounts for anonymous clones" ],
[ "force-anon-clone-max", "Update clone-max properly when forcing a clone to be anonymous" ],
[ "anon-instance-pending", "Assign anonymous clone instance numbers properly when action pending" ],
[ "inc0", "Incarnation start" ],
[ "inc1", "Incarnation start order" ],
[ "inc2", "Incarnation silent restart, stop, move" ],
[ "inc3", "Inter-incarnation ordering, silent restart, stop, move" ],
[ "inc4", "Inter-incarnation ordering, silent restart, stop, move (ordered)" ],
[ "inc5", "Inter-incarnation ordering, silent restart, stop, move (restart 1)" ],
[ "inc6", "Inter-incarnation ordering, silent restart, stop, move (restart 2)" ],
[ "inc7", "Clone colocation" ],
[ "inc8", "Clone anti-colocation" ],
[ "inc9", "Non-unique clone" ],
[ "inc10", "Non-unique clone (stop)" ],
[ "inc11", "Primitive colocation with clones" ],
[ "inc12", "Clone shutdown" ],
[ "cloned-group", "Make sure only the correct number of cloned groups are started" ],
[ "cloned-group-stop", "Ensure stopping qpidd also stops glance and cinder" ],
[ "clone-no-shuffle", "Don't prioritize allocation of instances that must be moved" ],
[ "clone-max-zero", "Orphan processing with clone-max=0" ],
[ "clone-anon-dup",
"Bug LF#2087 - Correctly parse the state of anonymous clones that are active more than once per node" ],
[ "bug-lf-2160", "Don't shuffle clones due to colocation" ],
[ "bug-lf-2213", "clone-node-max enforcement for cloned groups" ],
[ "bug-lf-2153", "Clone ordering constraints" ],
[ "bug-lf-2361", "Ensure clones observe mandatory ordering constraints if the LHS is unrunnable" ],
[ "bug-lf-2317", "Avoid needless restart of primitive depending on a clone" ],
[ "clone-colocate-instance-1", "Colocation with a specific clone instance (negative example)" ],
[ "clone-colocate-instance-2", "Colocation with a specific clone instance" ],
[ "clone-order-instance", "Ordering with specific clone instances" ],
[ "bug-lf-2453", "Enforce mandatory clone ordering without colocation" ],
[ "bug-lf-2508", "Correctly reconstruct the status of anonymous cloned groups" ],
[ "bug-lf-2544", "Balanced clone placement" ],
[ "bug-lf-2445", "Redistribute clones with node-max > 1 and stickiness = 0" ],
[ "bug-lf-2574", "Avoid clone shuffle" ],
[ "bug-lf-2581", "Avoid group restart due to unrelated clone (re)start" ],
[ "bug-cl-5168", "Don't shuffle clones" ],
[ "bug-cl-5170", "Prevent clone from starting with on-fail=block" ],
[ "clone-fail-block-colocation", "Move colocated group when failed clone has on-fail=block" ],
[ "clone-interleave-1",
"Clone-3 cannot start on pcmk-1 due to interleaved ordering (no colocation)" ],
[ "clone-interleave-2", "Clone-3 must stop on pcmk-1 due to interleaved ordering (no colocation)" ],
[ "clone-interleave-3",
"Clone-3 must be recovered on pcmk-1 due to interleaved ordering (no colocation)" ],
[ "rebalance-unique-clones", "Rebalance unique clone instances with no stickiness" ],
[ "clone-requires-quorum-recovery", "Clone with requires=quorum on failed node needing recovery" ],
[ "clone-requires-quorum",
"Clone with requires=quorum with presumed-inactive instance on failed node" ],
],
[
[ "cloned_start_one", "order first clone then clone... first clone_min=2" ],
[ "cloned_start_two", "order first clone then clone... first clone_min=2" ],
[ "cloned_stop_one", "order first clone then clone... first clone_min=2" ],
[ "cloned_stop_two", "order first clone then clone... first clone_min=2" ],
[ "clone_min_interleave_start_one",
"order first clone then clone... first clone_min=2 and then has interleave=true" ],
[ "clone_min_interleave_start_two",
"order first clone then clone... first clone_min=2 and then has interleave=true" ],
[ "clone_min_interleave_stop_one",
"order first clone then clone... first clone_min=2 and then has interleave=true" ],
[ "clone_min_interleave_stop_two",
"order first clone then clone... first clone_min=2 and then has interleave=true" ],
[ "clone_min_start_one", "order first clone then primitive... first clone_min=2" ],
[ "clone_min_start_two", "order first clone then primitive... first clone_min=2" ],
[ "clone_min_stop_all", "order first clone then primitive... first clone_min=2" ],
[ "clone_min_stop_one", "order first clone then primitive... first clone_min=2" ],
[ "clone_min_stop_two", "order first clone then primitive... first clone_min=2" ],
],
[
[ "unfence-startup", "Clean unfencing" ],
[ "unfence-definition", "Unfencing when the agent changes" ],
[ "unfence-parameters", "Unfencing when the agent parameters changes" ],
[ "unfence-device", "Unfencing when a cluster has only fence devices" ],
],
[
[ "master-0", "Stopped -> Slave" ],
[ "master-1", "Stopped -> Promote" ],
[ "master-2", "Stopped -> Promote : notify" ],
[ "master-3", "Stopped -> Promote : master location" ],
[ "master-4", "Started -> Promote : master location" ],
[ "master-5", "Promoted -> Promoted" ],
[ "master-6", "Promoted -> Promoted (2)" ],
[ "master-7", "Promoted -> Fenced" ],
[ "master-8", "Promoted -> Fenced -> Moved" ],
[ "master-9", "Stopped + Promotable + No quorum" ],
[ "master-10", "Stopped -> Promotable : notify with monitor" ],
[ "master-11", "Stopped -> Promote : colocation" ],
[ "novell-239082", "Demote/Promote ordering" ],
[ "novell-239087", "Stable master placement" ],
[ "master-12", "Promotion based solely on rsc_location constraints" ],
[ "master-13", "Include preferences of colocated resources when placing master" ],
[ "master-demote", "Ordering when actions depends on demoting a slave resource" ],
[ "master-ordering", "Prevent resources from starting that need a master" ],
[ "bug-1765", "Master-Master Colocation (do not stop the slaves)" ],
[ "master-group", "Promotion of cloned groups" ],
[ "bug-lf-1852", "Don't shuffle master/slave instances unnecessarily" ],
[ "master-failed-demote", "Don't retry failed demote actions" ],
[ "master-failed-demote-2", "Don't retry failed demote actions (notify=false)" ],
[ "master-depend",
"Ensure resources that depend on the master don't get allocated until the master does" ],
[ "master-reattach", "Re-attach to a running master" ],
[ "master-allow-start", "Don't include master score if it would prevent allocation" ],
[ "master-colocation",
"Allow master instances placemaker to be influenced by colocation constraints" ],
[ "master-pseudo", "Make sure promote/demote pseudo actions are created correctly" ],
[ "master-role", "Prevent target-role from promoting more than master-max instances" ],
[ "bug-lf-2358", "Master-Master anti-colocation" ],
[ "master-promotion-constraint", "Mandatory master colocation constraints" ],
[ "unmanaged-master", "Ensure role is preserved for unmanaged resources" ],
[ "master-unmanaged-monitor", "Start the correct monitor operation for unmanaged masters" ],
[ "master-demote-2", "Demote does not clear past failure" ],
[ "master-move", "Move master based on failure of colocated group" ],
[ "master-probed-score", "Observe the promotion score of probed resources" ],
[ "colocation_constraint_stops_master",
"cl#5054 - Ensure master is demoted when stopped by colocation constraint" ],
[ "colocation_constraint_stops_slave",
"cl#5054 - Ensure slave is not demoted when stopped by colocation constraint" ],
[ "order_constraint_stops_master",
"cl#5054 - Ensure master is demoted when stopped by order constraint" ],
[ "order_constraint_stops_slave",
"cl#5054 - Ensure slave is not demoted when stopped by order constraint" ],
[ "master_monitor_restart", "cl#5072 - Ensure master monitor operation will start after promotion" ],
[ "bug-rh-880249", "Handle replacement of an m/s resource with a primitive" ],
[ "bug-5143-ms-shuffle", "Prevent master shuffling due to promotion score" ],
[ "master-demote-block", "Block promotion if demote fails with on-fail=block" ],
[ "master-dependent-ban",
"Don't stop instances from being active because a dependent is banned from that host" ],
[ "master-stop", "Stop instances due to location constraint with role=Started" ],
[ "master-partially-demoted-group", "Allow partially demoted group to finish demoting" ],
[ "bug-cl-5213", "Ensure role colocation with -INFINITY is enforced" ],
[ "bug-cl-5219", "Allow unrelated resources with a common colocation target to remain promoted" ],
[ "master-asymmetrical-order",
"Fix the behaviors of multi-state resources with asymmetrical ordering" ],
[ "master-notify", "Master promotion with notifies" ],
[ "master-score-startup", "Use permanent master scores without LRM history" ],
[ "failed-demote-recovery", "Recover resource in slave role after demote fails" ],
[ "failed-demote-recovery-master", "Recover resource in master role after demote fails" ],
[ "on_fail_demote1", "Recovery with on-fail=\"demote\" on healthy cluster, remote, guest, and bundle nodes" ],
[ "on_fail_demote2", "Recovery with on-fail=\"demote\" with promotion on different node" ],
[ "on_fail_demote3", "Recovery with on-fail=\"demote\" with no promotion" ],
[ "on_fail_demote4", "Recovery with on-fail=\"demote\" on failed cluster, remote, guest, and bundle nodes" ],
[ "no_quorum_demote", "Promotable demotion and primitive stop with no-quorum-policy=\"demote\"" ],
+ [ "no-promote-on-unrunnable-guest", "Don't select bundle instance for promotion when container can't run" ],
],
[
[ "history-1", "Correctly parse stateful-1 resource state" ],
],
[
[ "managed-0", "Managed (reference)" ],
[ "managed-1", "Not managed - down" ],
[ "managed-2", "Not managed - up" ],
[ "bug-5028", "Shutdown should block if anything depends on an unmanaged resource" ],
[ "bug-5028-detach", "Ensure detach still works" ],
[ "bug-5028-bottom",
"Ensure shutdown still blocks if the blocked resource is at the bottom of the stack" ],
[ "unmanaged-stop-1",
"cl#5155 - Block the stop of resources if any depending resource is unmanaged" ],
[ "unmanaged-stop-2",
"cl#5155 - Block the stop of resources if the first resource in a mandatory stop order is unmanaged" ],
[ "unmanaged-stop-3",
"cl#5155 - Block the stop of resources if any depending resource in a group is unmanaged" ],
[ "unmanaged-stop-4",
"cl#5155 - Block the stop of resources if any depending resource in the middle of a group is unmanaged" ],
[ "unmanaged-block-restart",
"Block restart of resources if any dependent resource in a group is unmanaged" ],
],
[
[ "interleave-0", "Interleave (reference)" ],
[ "interleave-1", "coloc - not interleaved" ],
[ "interleave-2", "coloc - interleaved" ],
[ "interleave-3", "coloc - interleaved (2)" ],
[ "interleave-pseudo-stop", "Interleaved clone during stonith" ],
[ "interleave-stop", "Interleaved clone during stop" ],
[ "interleave-restart", "Interleaved clone during dependency restart" ],
],
[
[ "notify-0", "Notify reference" ],
[ "notify-1", "Notify simple" ],
[ "notify-2", "Notify simple, confirm" ],
[ "notify-3", "Notify move, confirm" ],
[ "novell-239079", "Notification priority" ],
#[ "notify-2", "Notify - 764" ],
[ "notifs-for-unrunnable", "Don't schedule notifications for an unrunnable action" ],
[ "route-remote-notify", "Route remote notify actions through correct cluster node" ],
[ "notify-behind-stopping-remote", "Don't schedule notifications behind stopped remote" ],
],
[
[ "594", "OSDL #594 - Unrunnable actions scheduled in transition" ],
[ "662", "OSDL #662 - Two resources start on one node when incarnation_node_max = 1" ],
[ "696", "OSDL #696 - CRM starts stonith RA without monitor" ],
[ "726", "OSDL #726 - Attempting to schedule rsc_posic041_monitor_5000 _after_ a stop" ],
[ "735", "OSDL #735 - Correctly detect that rsc_hadev1 is stopped on hadev3" ],
[ "764", "OSDL #764 - Missing monitor op for DoFencing:child_DoFencing:1" ],
[ "797", "OSDL #797 - Assert triggered: task_id_i > max_call_id" ],
[ "829", "OSDL #829" ],
[ "994",
"OSDL #994 - Stopping the last resource in a resource group causes the entire group to be restarted" ],
[ "994-2", "OSDL #994 - with a dependent resource" ],
[ "1360", "OSDL #1360 - Clone stickiness" ],
[ "1484", "OSDL #1484 - on_fail=stop" ],
[ "1494", "OSDL #1494 - Clone stability" ],
[ "unrunnable-1", "Unrunnable" ],
[ "unrunnable-2", "Unrunnable 2" ],
[ "stonith-0", "Stonith loop - 1" ],
[ "stonith-1", "Stonith loop - 2" ],
[ "stonith-2", "Stonith loop - 3" ],
[ "stonith-3", "Stonith startup" ],
[ "stonith-4", "Stonith node state" ],
[ "dc-fence-ordering", "DC needs fencing while other nodes are shutting down" ],
[ "bug-1572-1", "Recovery of groups depending on master/slave" ],
[ "bug-1572-2", "Recovery of groups depending on master/slave when the master is never re-promoted" ],
[ "bug-1685", "Depends-on-master ordering" ],
[ "bug-1822", "Don't promote partially active groups" ],
[ "bug-pm-11", "New resource added to a m/s group" ],
[ "bug-pm-12", "Recover only the failed portion of a cloned group" ],
[ "bug-n-387749", "Don't shuffle clone instances" ],
[ "bug-n-385265",
"Don't ignore the failure stickiness of group children - resource_idvscommon should stay stopped" ],
[ "bug-n-385265-2",
"Ensure groups are migrated instead of remaining partially active on the current node" ],
[ "bug-lf-1920", "Correctly handle probes that find active resources" ],
[ "bnc-515172", "Location constraint with multiple expressions" ],
[ "colocate-primitive-with-clone", "Optional colocation with a clone" ],
[ "use-after-free-merge", "Use-after-free in native_merge_weights" ],
[ "bug-lf-2551", "STONITH ordering for stop" ],
[ "bug-lf-2606", "Stonith implies demote" ],
[ "bug-lf-2474", "Ensure resource op timeout takes precedence over op_defaults" ],
[ "bug-suse-707150", "Prevent vm-01 from starting due to colocation/ordering" ],
[ "bug-5014-A-start-B-start", "Verify when A starts B starts using symmetrical=false" ],
[ "bug-5014-A-stop-B-started",
"Verify when A stops B does not stop if it has already started using symmetric=false" ],
[ "bug-5014-A-stopped-B-stopped",
"Verify when A is stopped and B has not started, B does not start before A using symmetric=false" ],
[ "bug-5014-CthenAthenB-C-stopped",
"Verify when C then A is symmetrical=true, A then B is symmetric=false, and C is stopped that nothing starts" ],
[ "bug-5014-CLONE-A-start-B-start",
"Verify when A starts B starts using clone resources with symmetric=false" ],
[ "bug-5014-CLONE-A-stop-B-started",
"Verify when A stops B does not stop if it has already started using clone resources with symmetric=false" ],
[ "bug-5014-GROUP-A-start-B-start",
"Verify when A starts B starts when using group resources with symmetric=false" ],
[ "bug-5014-GROUP-A-stopped-B-started",
"Verify when A stops B does not stop if it has already started using group resources with symmetric=false" ],
[ "bug-5014-GROUP-A-stopped-B-stopped",
"Verify when A is stopped and B has not started, B does not start before A using group resources with symmetric=false" ],
[ "bug-5014-ordered-set-symmetrical-false",
"Verify ordered sets work with symmetrical=false" ],
[ "bug-5014-ordered-set-symmetrical-true",
"Verify ordered sets work with symmetrical=true" ],
[ "bug-5007-masterslave_colocation",
"Verify use of colocation scores other than INFINITY and -INFINITY work on multi-state resources" ],
[ "bug-5038", "Prevent restart of anonymous clones when clone-max decreases" ],
[ "bug-5025-1", "Automatically clean up failcount after resource config change with reload" ],
[ "bug-5025-2", "Make sure clear failcount action isn't set when config does not change" ],
[ "bug-5025-3", "Automatically clean up failcount after resource config change with restart" ],
[ "bug-5025-4", "Clear failcount when last failure is a start op and rsc attributes changed" ],
[ "failcount", "Ensure failcounts are correctly expired" ],
[ "failcount-block", "Ensure failcounts are not expired when on-fail=block is present" ],
[ "per-op-failcount", "Ensure per-operation failcount is handled and not passed to fence agent" ],
[ "on-fail-ignore", "Ensure on-fail=ignore works even beyond migration-threshold" ],
[ "monitor-onfail-restart", "bug-5058 - Monitor failure with on-fail set to restart" ],
[ "monitor-onfail-stop", "bug-5058 - Monitor failure wiht on-fail set to stop" ],
[ "bug-5059", "No need to restart p_stateful1:*" ],
[ "bug-5069-op-enabled", "Test on-fail=ignore with failure when monitor is enabled" ],
[ "bug-5069-op-disabled", "Test on-fail-ignore with failure when monitor is disabled" ],
[ "obsolete-lrm-resource", "cl#5115 - Do not use obsolete lrm_resource sections" ],
[ "expire-non-blocked-failure",
"Ignore failure-timeout only if the failed operation has on-fail=block" ],
[ "asymmetrical-order-move", "Respect asymmetrical ordering when trying to move resources" ],
[ "asymmetrical-order-restart", "Respect asymmetrical ordering when restarting dependent resource" ],
[ "start-then-stop-with-unfence", "Avoid graph loop with start-then-stop constraint plus unfencing" ],
[ "order-expired-failure", "Order failcount cleanup after remote fencing" ],
[ "ignore_stonith_rsc_order1",
"cl#5056- Ignore order constraint between stonith and non-stonith rsc" ],
[ "ignore_stonith_rsc_order2",
"cl#5056- Ignore order constraint with group rsc containing mixed stonith and non-stonith" ],
[ "ignore_stonith_rsc_order3", "cl#5056- Ignore order constraint, stonith clone and mixed group" ],
[ "ignore_stonith_rsc_order4",
"cl#5056- Ignore order constraint, stonith clone and clone with nested mixed group" ],
[ "honor_stonith_rsc_order1",
"cl#5056- Honor order constraint, stonith clone and pure stonith group(single rsc)" ],
[ "honor_stonith_rsc_order2",
"cl#5056- Honor order constraint, stonith clone and pure stonith group(multiple rsc)" ],
[ "honor_stonith_rsc_order3",
"cl#5056- Honor order constraint, stonith clones with nested pure stonith group" ],
[ "honor_stonith_rsc_order4",
"cl#5056- Honor order constraint, between two native stonith rscs" ],
[ "multiply-active-stonith", "Multiply active stonith" ],
[ "probe-timeout", "cl#5099 - Default probe timeout" ],
[ "order-first-probes",
"cl#5301 - respect order constraints when relevant resources are being probed" ],
[ "concurrent-fencing", "Allow performing fencing operations in parallel" ],
[ "priority-fencing-delay", "Delay fencing targeting the more significant node" ],
],
[
[ "systemhealth1", "System Health () #1" ],
[ "systemhealth2", "System Health () #2" ],
[ "systemhealth3", "System Health () #3" ],
[ "systemhealthn1", "System Health (None) #1" ],
[ "systemhealthn2", "System Health (None) #2" ],
[ "systemhealthn3", "System Health (None) #3" ],
[ "systemhealthm1", "System Health (Migrate On Red) #1" ],
[ "systemhealthm2", "System Health (Migrate On Red) #2" ],
[ "systemhealthm3", "System Health (Migrate On Red) #3" ],
[ "systemhealtho1", "System Health (Only Green) #1" ],
[ "systemhealtho2", "System Health (Only Green) #2" ],
[ "systemhealtho3", "System Health (Only Green) #3" ],
[ "systemhealthp1", "System Health (Progessive) #1" ],
[ "systemhealthp2", "System Health (Progessive) #2" ],
[ "systemhealthp3", "System Health (Progessive) #3" ],
],
[
[ "utilization", "Placement Strategy - utilization" ],
[ "minimal", "Placement Strategy - minimal" ],
[ "balanced", "Placement Strategy - balanced" ],
],
[
[ "placement-stickiness", "Optimized Placement Strategy - stickiness" ],
[ "placement-priority", "Optimized Placement Strategy - priority" ],
[ "placement-location", "Optimized Placement Strategy - location" ],
[ "placement-capacity", "Optimized Placement Strategy - capacity" ],
],
[
[ "utilization-order1", "Utilization Order - Simple" ],
[ "utilization-order2", "Utilization Order - Complex" ],
[ "utilization-order3", "Utilization Order - Migrate" ],
[ "utilization-order4", "Utilization Order - Live Migration (bnc#695440)" ],
[ "utilization-shuffle",
"Don't displace prmExPostgreSQLDB2 on act2, Start prmExPostgreSQLDB1 on act3" ],
[ "load-stopped-loop", "Avoid transition loop due to load_stopped (cl#5044)" ],
[ "load-stopped-loop-2",
"cl#5235 - Prevent graph loops that can be introduced by load_stopped -> migrate_to ordering" ],
],
[
[ "colocated-utilization-primitive-1", "Colocated Utilization - Primitive" ],
[ "colocated-utilization-primitive-2", "Colocated Utilization - Choose the most capable node" ],
[ "colocated-utilization-group", "Colocated Utilization - Group" ],
[ "colocated-utilization-clone", "Colocated Utilization - Clone" ],
[ "utilization-check-allowed-nodes",
"Only check the capacities of the nodes that can run the resource" ],
],
[
[ "reprobe-target_rc", "Ensure correct target_rc for reprobe of inactive resources" ],
[ "node-maintenance-1", "cl#5128 - Node maintenance" ],
[ "node-maintenance-2", "cl#5128 - Node maintenance (coming out of maintenance mode)" ],
[ "shutdown-maintenance-node", "Do not fence a maintenance node if it shuts down cleanly" ],
[ "rsc-maintenance", "Per-resource maintenance" ],
],
[
[ "not-installed-agent", "The resource agent is missing" ],
[ "not-installed-tools", "Something the resource agent needs is missing" ],
],
[
[ "stopped-monitor-00", "Stopped Monitor - initial start" ],
[ "stopped-monitor-01", "Stopped Monitor - failed started" ],
[ "stopped-monitor-02", "Stopped Monitor - started multi-up" ],
[ "stopped-monitor-03", "Stopped Monitor - stop started" ],
[ "stopped-monitor-04", "Stopped Monitor - failed stop" ],
[ "stopped-monitor-05", "Stopped Monitor - start unmanaged" ],
[ "stopped-monitor-06", "Stopped Monitor - unmanaged multi-up" ],
[ "stopped-monitor-07", "Stopped Monitor - start unmanaged multi-up" ],
[ "stopped-monitor-08", "Stopped Monitor - migrate" ],
[ "stopped-monitor-09", "Stopped Monitor - unmanage started" ],
[ "stopped-monitor-10", "Stopped Monitor - unmanaged started multi-up" ],
[ "stopped-monitor-11", "Stopped Monitor - stop unmanaged started" ],
[ "stopped-monitor-12", "Stopped Monitor - unmanaged started multi-up (target-role=Stopped)" ],
[ "stopped-monitor-20", "Stopped Monitor - initial stop" ],
[ "stopped-monitor-21", "Stopped Monitor - stopped single-up" ],
[ "stopped-monitor-22", "Stopped Monitor - stopped multi-up" ],
[ "stopped-monitor-23", "Stopped Monitor - start stopped" ],
[ "stopped-monitor-24", "Stopped Monitor - unmanage stopped" ],
[ "stopped-monitor-25", "Stopped Monitor - unmanaged stopped multi-up" ],
[ "stopped-monitor-26", "Stopped Monitor - start unmanaged stopped" ],
[ "stopped-monitor-27", "Stopped Monitor - unmanaged stopped multi-up (target-role=Started)" ],
[ "stopped-monitor-30", "Stopped Monitor - new node started" ],
[ "stopped-monitor-31", "Stopped Monitor - new node stopped" ],
],
[
# This is a combo test to check:
# - probe timeout defaults to the minimum-interval monitor's
# - duplicate recurring operations are ignored
# - if timeout spec is bad, the default timeout is used
# - failure is blocked with on-fail=block even if ISO8601 interval is specified
# - started/stopped role monitors are started/stopped on right nodes
[ "intervals", "Recurring monitor interval handling" ],
],
[
[ "ticket-primitive-1", "Ticket - Primitive (loss-policy=stop, initial)" ],
[ "ticket-primitive-2", "Ticket - Primitive (loss-policy=stop, granted)" ],
[ "ticket-primitive-3", "Ticket - Primitive (loss-policy-stop, revoked)" ],
[ "ticket-primitive-4", "Ticket - Primitive (loss-policy=demote, initial)" ],
[ "ticket-primitive-5", "Ticket - Primitive (loss-policy=demote, granted)" ],
[ "ticket-primitive-6", "Ticket - Primitive (loss-policy=demote, revoked)" ],
[ "ticket-primitive-7", "Ticket - Primitive (loss-policy=fence, initial)" ],
[ "ticket-primitive-8", "Ticket - Primitive (loss-policy=fence, granted)" ],
[ "ticket-primitive-9", "Ticket - Primitive (loss-policy=fence, revoked)" ],
[ "ticket-primitive-10", "Ticket - Primitive (loss-policy=freeze, initial)" ],
[ "ticket-primitive-11", "Ticket - Primitive (loss-policy=freeze, granted)" ],
[ "ticket-primitive-12", "Ticket - Primitive (loss-policy=freeze, revoked)" ],
[ "ticket-primitive-13", "Ticket - Primitive (loss-policy=stop, standby, granted)" ],
[ "ticket-primitive-14", "Ticket - Primitive (loss-policy=stop, granted, standby)" ],
[ "ticket-primitive-15", "Ticket - Primitive (loss-policy=stop, standby, revoked)" ],
[ "ticket-primitive-16", "Ticket - Primitive (loss-policy=demote, standby, granted)" ],
[ "ticket-primitive-17", "Ticket - Primitive (loss-policy=demote, granted, standby)" ],
[ "ticket-primitive-18", "Ticket - Primitive (loss-policy=demote, standby, revoked)" ],
[ "ticket-primitive-19", "Ticket - Primitive (loss-policy=fence, standby, granted)" ],
[ "ticket-primitive-20", "Ticket - Primitive (loss-policy=fence, granted, standby)" ],
[ "ticket-primitive-21", "Ticket - Primitive (loss-policy=fence, standby, revoked)" ],
[ "ticket-primitive-22", "Ticket - Primitive (loss-policy=freeze, standby, granted)" ],
[ "ticket-primitive-23", "Ticket - Primitive (loss-policy=freeze, granted, standby)" ],
[ "ticket-primitive-24", "Ticket - Primitive (loss-policy=freeze, standby, revoked)" ],
],
[
[ "ticket-group-1", "Ticket - Group (loss-policy=stop, initial)" ],
[ "ticket-group-2", "Ticket - Group (loss-policy=stop, granted)" ],
[ "ticket-group-3", "Ticket - Group (loss-policy-stop, revoked)" ],
[ "ticket-group-4", "Ticket - Group (loss-policy=demote, initial)" ],
[ "ticket-group-5", "Ticket - Group (loss-policy=demote, granted)" ],
[ "ticket-group-6", "Ticket - Group (loss-policy=demote, revoked)" ],
[ "ticket-group-7", "Ticket - Group (loss-policy=fence, initial)" ],
[ "ticket-group-8", "Ticket - Group (loss-policy=fence, granted)" ],
[ "ticket-group-9", "Ticket - Group (loss-policy=fence, revoked)" ],
[ "ticket-group-10", "Ticket - Group (loss-policy=freeze, initial)" ],
[ "ticket-group-11", "Ticket - Group (loss-policy=freeze, granted)" ],
[ "ticket-group-12", "Ticket - Group (loss-policy=freeze, revoked)" ],
[ "ticket-group-13", "Ticket - Group (loss-policy=stop, standby, granted)" ],
[ "ticket-group-14", "Ticket - Group (loss-policy=stop, granted, standby)" ],
[ "ticket-group-15", "Ticket - Group (loss-policy=stop, standby, revoked)" ],
[ "ticket-group-16", "Ticket - Group (loss-policy=demote, standby, granted)" ],
[ "ticket-group-17", "Ticket - Group (loss-policy=demote, granted, standby)" ],
[ "ticket-group-18", "Ticket - Group (loss-policy=demote, standby, revoked)" ],
[ "ticket-group-19", "Ticket - Group (loss-policy=fence, standby, granted)" ],
[ "ticket-group-20", "Ticket - Group (loss-policy=fence, granted, standby)" ],
[ "ticket-group-21", "Ticket - Group (loss-policy=fence, standby, revoked)" ],
[ "ticket-group-22", "Ticket - Group (loss-policy=freeze, standby, granted)" ],
[ "ticket-group-23", "Ticket - Group (loss-policy=freeze, granted, standby)" ],
[ "ticket-group-24", "Ticket - Group (loss-policy=freeze, standby, revoked)" ],
],
[
[ "ticket-clone-1", "Ticket - Clone (loss-policy=stop, initial)" ],
[ "ticket-clone-2", "Ticket - Clone (loss-policy=stop, granted)" ],
[ "ticket-clone-3", "Ticket - Clone (loss-policy-stop, revoked)" ],
[ "ticket-clone-4", "Ticket - Clone (loss-policy=demote, initial)" ],
[ "ticket-clone-5", "Ticket - Clone (loss-policy=demote, granted)" ],
[ "ticket-clone-6", "Ticket - Clone (loss-policy=demote, revoked)" ],
[ "ticket-clone-7", "Ticket - Clone (loss-policy=fence, initial)" ],
[ "ticket-clone-8", "Ticket - Clone (loss-policy=fence, granted)" ],
[ "ticket-clone-9", "Ticket - Clone (loss-policy=fence, revoked)" ],
[ "ticket-clone-10", "Ticket - Clone (loss-policy=freeze, initial)" ],
[ "ticket-clone-11", "Ticket - Clone (loss-policy=freeze, granted)" ],
[ "ticket-clone-12", "Ticket - Clone (loss-policy=freeze, revoked)" ],
[ "ticket-clone-13", "Ticket - Clone (loss-policy=stop, standby, granted)" ],
[ "ticket-clone-14", "Ticket - Clone (loss-policy=stop, granted, standby)" ],
[ "ticket-clone-15", "Ticket - Clone (loss-policy=stop, standby, revoked)" ],
[ "ticket-clone-16", "Ticket - Clone (loss-policy=demote, standby, granted)" ],
[ "ticket-clone-17", "Ticket - Clone (loss-policy=demote, granted, standby)" ],
[ "ticket-clone-18", "Ticket - Clone (loss-policy=demote, standby, revoked)" ],
[ "ticket-clone-19", "Ticket - Clone (loss-policy=fence, standby, granted)" ],
[ "ticket-clone-20", "Ticket - Clone (loss-policy=fence, granted, standby)" ],
[ "ticket-clone-21", "Ticket - Clone (loss-policy=fence, standby, revoked)" ],
[ "ticket-clone-22", "Ticket - Clone (loss-policy=freeze, standby, granted)" ],
[ "ticket-clone-23", "Ticket - Clone (loss-policy=freeze, granted, standby)" ],
[ "ticket-clone-24", "Ticket - Clone (loss-policy=freeze, standby, revoked)" ],
],
[
[ "ticket-master-1", "Ticket - Master (loss-policy=stop, initial)" ],
[ "ticket-master-2", "Ticket - Master (loss-policy=stop, granted)" ],
[ "ticket-master-3", "Ticket - Master (loss-policy-stop, revoked)" ],
[ "ticket-master-4", "Ticket - Master (loss-policy=demote, initial)" ],
[ "ticket-master-5", "Ticket - Master (loss-policy=demote, granted)" ],
[ "ticket-master-6", "Ticket - Master (loss-policy=demote, revoked)" ],
[ "ticket-master-7", "Ticket - Master (loss-policy=fence, initial)" ],
[ "ticket-master-8", "Ticket - Master (loss-policy=fence, granted)" ],
[ "ticket-master-9", "Ticket - Master (loss-policy=fence, revoked)" ],
[ "ticket-master-10", "Ticket - Master (loss-policy=freeze, initial)" ],
[ "ticket-master-11", "Ticket - Master (loss-policy=freeze, granted)" ],
[ "ticket-master-12", "Ticket - Master (loss-policy=freeze, revoked)" ],
[ "ticket-master-13", "Ticket - Master (loss-policy=stop, standby, granted)" ],
[ "ticket-master-14", "Ticket - Master (loss-policy=stop, granted, standby)" ],
[ "ticket-master-15", "Ticket - Master (loss-policy=stop, standby, revoked)" ],
[ "ticket-master-16", "Ticket - Master (loss-policy=demote, standby, granted)" ],
[ "ticket-master-17", "Ticket - Master (loss-policy=demote, granted, standby)" ],
[ "ticket-master-18", "Ticket - Master (loss-policy=demote, standby, revoked)" ],
[ "ticket-master-19", "Ticket - Master (loss-policy=fence, standby, granted)" ],
[ "ticket-master-20", "Ticket - Master (loss-policy=fence, granted, standby)" ],
[ "ticket-master-21", "Ticket - Master (loss-policy=fence, standby, revoked)" ],
[ "ticket-master-22", "Ticket - Master (loss-policy=freeze, standby, granted)" ],
[ "ticket-master-23", "Ticket - Master (loss-policy=freeze, granted, standby)" ],
[ "ticket-master-24", "Ticket - Master (loss-policy=freeze, standby, revoked)" ],
],
[
[ "ticket-rsc-sets-1", "Ticket - Resource sets (1 ticket, initial)" ],
[ "ticket-rsc-sets-2", "Ticket - Resource sets (1 ticket, granted)" ],
[ "ticket-rsc-sets-3", "Ticket - Resource sets (1 ticket, revoked)" ],
[ "ticket-rsc-sets-4", "Ticket - Resource sets (2 tickets, initial)" ],
[ "ticket-rsc-sets-5", "Ticket - Resource sets (2 tickets, granted)" ],
[ "ticket-rsc-sets-6", "Ticket - Resource sets (2 tickets, granted)" ],
[ "ticket-rsc-sets-7", "Ticket - Resource sets (2 tickets, revoked)" ],
[ "ticket-rsc-sets-8", "Ticket - Resource sets (1 ticket, standby, granted)" ],
[ "ticket-rsc-sets-9", "Ticket - Resource sets (1 ticket, granted, standby)" ],
[ "ticket-rsc-sets-10", "Ticket - Resource sets (1 ticket, standby, revoked)" ],
[ "ticket-rsc-sets-11", "Ticket - Resource sets (2 tickets, standby, granted)" ],
[ "ticket-rsc-sets-12", "Ticket - Resource sets (2 tickets, standby, granted)" ],
[ "ticket-rsc-sets-13", "Ticket - Resource sets (2 tickets, granted, standby)" ],
[ "ticket-rsc-sets-14", "Ticket - Resource sets (2 tickets, standby, revoked)" ],
[ "cluster-specific-params", "Cluster-specific instance attributes based on rules" ],
[ "site-specific-params", "Site-specific instance attributes based on rules" ],
],
[
[ "template-1", "Template - 1" ],
[ "template-2", "Template - 2" ],
[ "template-3", "Template - 3 (merge operations)" ],
[ "template-coloc-1", "Template - Colocation 1" ],
[ "template-coloc-2", "Template - Colocation 2" ],
[ "template-coloc-3", "Template - Colocation 3" ],
[ "template-order-1", "Template - Order 1" ],
[ "template-order-2", "Template - Order 2" ],
[ "template-order-3", "Template - Order 3" ],
[ "template-ticket", "Template - Ticket" ],
[ "template-rsc-sets-1", "Template - Resource Sets 1" ],
[ "template-rsc-sets-2", "Template - Resource Sets 2" ],
[ "template-rsc-sets-3", "Template - Resource Sets 3" ],
[ "template-rsc-sets-4", "Template - Resource Sets 4" ],
[ "template-clone-primitive", "Cloned primitive from template" ],
[ "template-clone-group", "Cloned group from template" ],
[ "location-sets-templates", "Resource sets and templates - Location" ],
[ "tags-coloc-order-1", "Tags - Colocation and Order (Simple)" ],
[ "tags-coloc-order-2", "Tags - Colocation and Order (Resource Sets with Templates)" ],
[ "tags-location", "Tags - Location" ],
[ "tags-ticket", "Tags - Ticket" ],
],
[
[ "container-1", "Container - initial" ],
[ "container-2", "Container - monitor failed" ],
[ "container-3", "Container - stop failed" ],
[ "container-4", "Container - reached migration-threshold" ],
[ "container-group-1", "Container in group - initial" ],
[ "container-group-2", "Container in group - monitor failed" ],
[ "container-group-3", "Container in group - stop failed" ],
[ "container-group-4", "Container in group - reached migration-threshold" ],
[ "container-is-remote-node", "Place resource within container when container is remote-node" ],
[ "bug-rh-1097457", "Kill user defined container/contents ordering" ],
[ "bug-cl-5247", "Graph loop when recovering m/s resource in a container" ],
[ "bundle-order-startup", "Bundle startup ordering" ],
[ "bundle-order-partial-start",
"Bundle startup ordering when some dependencies are already running" ],
[ "bundle-order-partial-start-2",
"Bundle startup ordering when some dependencies and the container are already running" ],
[ "bundle-order-stop", "Bundle stop ordering" ],
[ "bundle-order-partial-stop", "Bundle startup ordering when some dependencies are already stopped" ],
[ "bundle-order-stop-on-remote", "Stop nested resource after bringing up the connection" ],
[ "bundle-order-startup-clone", "Prevent startup because bundle isn't promoted" ],
[ "bundle-order-startup-clone-2", "Bundle startup with clones" ],
[ "bundle-order-stop-clone", "Stop bundle because clone is stopping" ],
[ "bundle-nested-colocation", "Colocation of nested connection resources" ],
[ "bundle-order-fencing",
"Order pseudo bundle fencing after parent node fencing if both are happening" ],
[ "bundle-probe-order-1", "order 1" ],
[ "bundle-probe-order-2", "order 2" ],
[ "bundle-probe-order-3", "order 3" ],
[ "bundle-probe-remotes", "Ensure remotes get probed too" ],
[ "bundle-replicas-change", "Change bundle from 1 replica to multiple" ],
[ "nested-remote-recovery", "Recover bundle's container hosted on remote node" ],
],
[
[ "whitebox-fail1", "Fail whitebox container rsc" ],
[ "whitebox-fail2", "Fail cluster connection to guest node" ],
[ "whitebox-fail3", "Failed containers should not run nested on remote nodes" ],
[ "whitebox-start", "Start whitebox container with resources assigned to it" ],
[ "whitebox-stop", "Stop whitebox container with resources assigned to it" ],
[ "whitebox-move", "Move whitebox container with resources assigned to it" ],
[ "whitebox-asymmetric", "Verify connection rsc opts-in based on container resource" ],
[ "whitebox-ms-ordering", "Verify promote/demote can not occur before connection is established" ],
[ "whitebox-ms-ordering-move", "Stop/Start cycle within a moving container" ],
[ "whitebox-orphaned", "Properly shutdown orphaned whitebox container" ],
[ "whitebox-orphan-ms", "Properly tear down orphan ms resources on remote-nodes" ],
[ "whitebox-unexpectedly-running", "Recover container nodes the cluster did not start" ],
[ "whitebox-migrate1", "Migrate both container and connection resource" ],
[ "whitebox-imply-stop-on-fence",
"imply stop action on container node rsc when host node is fenced" ],
[ "whitebox-nested-group", "Verify guest remote-node works nested in a group" ],
[ "guest-node-host-dies", "Verify guest node is recovered if host goes away" ],
[ "guest-node-cleanup", "Order guest node connection recovery after container probe" ],
[ "guest-host-not-fenceable", "Actions on guest node are unrunnable if host is unclean and cannot be fenced" ],
],
[
[ "remote-startup-probes", "Baremetal remote-node startup probes" ],
[ "remote-startup", "Startup a newly discovered remote-nodes with no status" ],
[ "remote-fence-unclean", "Fence unclean baremetal remote-node" ],
[ "remote-fence-unclean2",
"Fence baremetal remote-node after cluster node fails and connection can not be recovered" ],
[ "remote-fence-unclean-3", "Probe failed remote nodes (triggers fencing)" ],
[ "remote-move", "Move remote-node connection resource" ],
[ "remote-disable", "Disable a baremetal remote-node" ],
[ "remote-probe-disable", "Probe then stop a baremetal remote-node" ],
[ "remote-orphaned", "Properly shutdown orphaned connection resource" ],
[ "remote-orphaned2",
"verify we can handle orphaned remote connections with active resources on the remote" ],
[ "remote-recover", "Recover connection resource after cluster-node fails" ],
[ "remote-stale-node-entry",
"Make sure we properly handle leftover remote-node entries in the node section" ],
[ "remote-partial-migrate",
"Make sure partial migrations are handled before ops on the remote node" ],
[ "remote-partial-migrate2",
"Make sure partial migration target is prefered for remote connection" ],
[ "remote-recover-fail", "Make sure start failure causes fencing if rsc are active on remote" ],
[ "remote-start-fail",
"Make sure a start failure does not result in fencing if no active resources are on remote" ],
[ "remote-unclean2",
"Make monitor failure always results in fencing, even if no rsc are active on remote" ],
[ "remote-fence-before-reconnect", "Fence before clearing recurring monitor failure" ],
[ "remote-recovery", "Recover remote connections before attempting demotion" ],
[ "remote-recover-connection", "Optimistically recovery of only the connection" ],
[ "remote-recover-all", "Fencing when the connection has no home" ],
[ "remote-recover-no-resources", "Fencing when the connection has no home and no active resources" ],
[ "remote-recover-unknown",
"Fencing when the connection has no home and the remote has no operation history" ],
[ "remote-reconnect-delay", "Waiting for remote reconnect interval to expire" ],
[ "remote-connection-unrecoverable",
"Remote connection host must be fenced, with connection unrecoverable" ],
],
[
[ "resource-discovery", "Exercises resource-discovery location constraint option" ],
[ "rsc-discovery-per-node", "Disable resource discovery per node" ],
[ "shutdown-lock", "Ensure shutdown lock works properly" ],
[ "shutdown-lock-expiration", "Ensure shutdown lock expiration works properly" ],
],
[
[ "op-defaults", "Test op_defaults conditional expressions" ],
[ "op-defaults-2", "Test op_defaults AND'ed conditional expressions" ],
[ "op-defaults-3", "Test op_defaults precedence" ],
[ "rsc-defaults", "Test rsc_defaults conditional expressions" ],
[ "rsc-defaults-2", "Test rsc_defaults conditional expressions without type" ],
],
# @TODO: If pacemaker implements versioned attributes, uncomment these tests
#[
# [ "versioned-resources", "Start resources with #ra-version rules" ],
# [ "restart-versioned", "Restart resources on #ra-version change" ],
# [ "reload-versioned", "Reload resources on #ra-version change" ],
#],
#[
# [ "versioned-operations-1", "Use #ra-version to configure operations of native resources" ],
# [ "versioned-operations-2", "Use #ra-version to configure operations of stonith resources" ],
# [ "versioned-operations-3", "Use #ra-version to configure operations of master/slave resources" ],
# [ "versioned-operations-4", "Use #ra-version to configure operations of groups of the resources" ],
#],
]
TESTS_64BIT = [
[
[ "year-2038", "Check handling of timestamps beyond 2038-01-19 03:14:08 UTC" ],
],
]
# Constants substituted in the build process
class BuildVars(object):
SBINDIR = "@sbindir@"
BUILDDIR = "@abs_top_builddir@"
CRM_SCHEMA_DIRECTORY = "@CRM_SCHEMA_DIRECTORY@"
# These values must be kept in sync with include/crm/crm.h
class CrmExit(object):
OK = 0
ERROR = 1
NOT_INSTALLED = 5
NOINPUT = 66
def is_executable(path):
""" Check whether a file at a given path is executable. """
try:
return os.stat(path)[stat.ST_MODE] & stat.S_IXUSR
except OSError:
return False
def diff(file1, file2, **kwargs):
""" Call diff on two files """
return subprocess.call([ "diff", "-u", "-N", "--ignore-all-space",
"--ignore-blank-lines", file1, file2 ], **kwargs)
def sort_file(filename):
""" Sort a file alphabetically """
with io.open(filename, "rt") as f:
lines = sorted(f)
with io.open(filename, "wt") as f:
f.writelines(lines)
def remove_files(filenames):
""" Remove a list of files """
for filename in filenames:
try:
os.remove(filename)
except OSError:
pass
def normalize(filename):
""" Remove text from a file that isn't important for comparison """
if not hasattr(normalize, "patterns"):
normalize.patterns = [
re.compile(r'crm_feature_set="[^"]*"'),
re.compile(r'batch-limit="[0-9]*"')
]
if os.path.isfile(filename):
with io.open(filename, "rt") as f:
lines = f.readlines()
with io.open(filename, "wt") as f:
for line in lines:
for pattern in normalize.patterns:
line = pattern.sub("", line)
f.write(line)
def cat(filename, dest=sys.stdout):
""" Copy a file to a destination file descriptor """
with io.open(filename, "rt") as f:
shutil.copyfileobj(f, dest)
class CtsScheduler(object):
""" Regression tests for Pacemaker's scheduler """
def _parse_args(self, argv):
""" Parse command-line arguments """
parser = argparse.ArgumentParser(description=DESC)
parser.add_argument('-V', '--verbose', action='count',
help='Display any differences from expected output')
parser.add_argument('--run', metavar='TEST',
help=('Run only single specified test (any further '
'arguments will be passed to crm_simulate)'))
parser.add_argument('--update', action='store_true',
help='Update expected results with actual results')
parser.add_argument('-b', '--binary', metavar='PATH',
help='Specify path to crm_simulate')
parser.add_argument('-i', '--io-dir', metavar='PATH',
help='Specify path to regression test data directory')
parser.add_argument('-o', '--out-dir', metavar='PATH',
help='Specify where intermediate and output files should go')
parser.add_argument('-v', '--valgrind', action='store_true',
help='Run all commands under valgrind')
parser.add_argument('--valgrind-dhat', action='store_true',
help='Run all commands under valgrind with heap analyzer')
parser.add_argument('--valgrind-skip-output', action='store_true',
help='If running under valgrind, do not display output')
parser.add_argument('--testcmd-options', metavar='OPTIONS', default='',
help='Additional options for command under test')
# argparse can't handle "everything after --run TEST", so grab that
self.single_test_args = []
narg = 0
for arg in argv:
narg = narg + 1
if arg == '--run':
(argv, self.single_test_args) = (argv[:narg+1], argv[narg+1:])
break
self.args = parser.parse_args(argv[1:])
def _error(self, s):
print(" * ERROR: %s" % s)
def _failed(self, s):
print(" * FAILED: %s" % s)
def _get_valgrind_cmd(self):
""" Return command arguments needed (or not) to run valgrind """
if self.args.valgrind:
os.environ['G_SLICE'] = "always-malloc"
return [
"valgrind",
"-q",
"--gen-suppressions=all",
"--time-stamp=yes",
"--trace-children=no",
"--show-reachable=no",
"--leak-check=full",
"--num-callers=20",
"--suppressions=%s/valgrind-pcmk.suppressions" % (self.test_home)
]
if self.args.valgrind_dhat:
os.environ['G_SLICE'] = "always-malloc"
return [
"valgrind",
"--tool=exp-dhat",
"--time-stamp=yes",
"--trace-children=no",
"--show-top-n=100",
"--num-callers=4"
]
return []
def _get_simulator_cmd(self):
""" Locate the simulation binary """
if self.args.binary is None:
self.args.binary = BuildVars.BUILDDIR + "/tools/crm_simulate"
if not is_executable(self.args.binary):
self.args.binary = BuildVars.SBINDIR + "/crm_simulate"
if not is_executable(self.args.binary):
# @TODO it would be more pythonic to raise an exception
self._error("Test binary " + self.args.binary + " not found")
sys.exit(CrmExit.NOT_INSTALLED)
return [ self.args.binary ] + shlex.split(self.args.testcmd_options)
def set_schema_env(self):
""" Ensure schema directory environment variable is set, if possible """
try:
return os.environ['PCMK_schema_directory']
except KeyError:
for d in [ os.path.join(BuildVars.BUILDDIR, "xml"),
BuildVars.CRM_SCHEMA_DIRECTORY ]:
if os.path.isdir(d):
os.environ['PCMK_schema_directory'] = d
return d
return None
def __init__(self, argv=sys.argv):
self._parse_args(argv)
# Where this executable lives
self.test_home = os.path.dirname(os.path.realpath(argv[0]))
# Where test data resides
if self.args.io_dir is None:
self.args.io_dir = os.path.join(self.test_home, "scheduler")
# Where to store generated files
if self.args.out_dir is None:
self.args.out_dir = self.args.io_dir
self.failed_filename = os.path.join(self.test_home, ".regression.failed.diff")
else:
self.failed_filename = os.path.join(self.args.out_dir, ".regression.failed.diff")
os.environ['CIB_shadow_dir'] = self.args.out_dir
self.failed_file = None
# Single test mode (if requested)
try:
# User can give test base name or file name of a test input
self.args.run = os.path.splitext(os.path.basename(self.args.run))[0]
except (AttributeError, TypeError):
pass # --run was not specified
self.set_schema_env()
# Arguments needed (or not) to run commands
self.valgrind_args = self._get_valgrind_cmd()
self.simulate_args = self._get_simulator_cmd()
# Test counters
self.num_failed = 0
self.num_tests = 0
def _compare_files(self, filename1, filename2):
""" Add any file differences to failed results """
with io.open("/dev/null", "wt") as dev_null:
if diff(filename1, filename2, stdout=dev_null) != 0:
diff(filename1, filename2, stdout=self.failed_file, stderr=dev_null)
self.failed_file.write("\n");
return True
return False
def run_one(self, test_name, test_desc, test_args=[]):
""" Run one scheduler test """
print(" Test %-25s %s" % ((test_name + ":"), test_desc))
did_fail = False
self.num_tests = self.num_tests + 1
# Test inputs
input_filename = "%s/%s.xml" % (self.args.io_dir, test_name)
expected_filename = "%s/%s.exp" % (self.args.io_dir, test_name)
dot_expected_filename = "%s/%s.dot" % (self.args.io_dir, test_name)
scores_filename = "%s/%s.scores" % (self.args.io_dir, test_name)
summary_filename = "%s/%s.summary" % (self.args.io_dir, test_name)
stderr_expected_filename = "%s/%s.stderr" % (self.args.io_dir, test_name)
# (Intermediate) test outputs
output_filename = "%s/%s.out" % (self.args.out_dir, test_name)
dot_output_filename = "%s/%s.pe.dot" % (self.args.out_dir, test_name)
score_output_filename = "%s/%s.scores.pe" % (self.args.out_dir, test_name)
summary_output_filename = "%s/%s.summary.pe" % (self.args.out_dir, test_name)
stderr_output_filename = "%s/%s.stderr.pe" % (self.args.out_dir, test_name)
valgrind_output_filename = "%s/%s.valgrind" % (self.args.out_dir, test_name)
# Common arguments for running test
test_cmd = []
if self.valgrind_args:
test_cmd = self.valgrind_args + [ "--log-file=%s" % valgrind_output_filename ]
test_cmd = test_cmd + self.simulate_args
# @TODO It would be more pythonic to raise exceptions for errors,
# then perhaps it would be nice to make a single-test class
# Ensure necessary test inputs exist
if not os.path.isfile(input_filename):
self._error("No input")
self.num_failed = self.num_failed + 1
return CrmExit.NOINPUT
if not self.args.update and not os.path.isfile(expected_filename):
self._error("no stored output")
return CrmExit.NOINPUT
# Run simulation to generate summary output
if self.args.run: # Single test mode
test_cmd_full = test_cmd + [ '-x', input_filename, '-S' ] + test_args
print(" ".join(test_cmd_full))
else:
# @TODO Why isn't test_args added here?
test_cmd_full = test_cmd + [ '-x', input_filename, '-S' ]
with io.open(summary_output_filename, "wt") as f:
subprocess.call(test_cmd_full, stdout=f, stderr=subprocess.STDOUT, env=os.environ)
if self.args.run:
cat(summary_output_filename)
# Re-run simulation to generate dot, graph, and scores
test_cmd_full = test_cmd + [
'-x', input_filename,
'-D', dot_output_filename,
'-G', output_filename,
'-sSQ' ] + test_args
with io.open(stderr_output_filename, "wt") as f_stderr, \
io.open(score_output_filename, "wt") as f_score:
rc = subprocess.call(test_cmd_full, stdout=f_score, stderr=f_stderr, env=os.environ)
# Check for test command failure
if rc != CrmExit.OK:
self._failed("Test returned: %d" % rc)
did_fail = True
print(" ".join(test_cmd_full))
# Check for valgrind errors
if self.valgrind_args and not self.args.valgrind_skip_output:
if os.stat(valgrind_output_filename).st_size > 0:
self._failed("Valgrind reported errors")
did_fail = True
cat(valgrind_output_filename)
remove_files([ valgrind_output_filename ])
# Check for core dump
if os.path.isfile("core"):
self._failed("Core-file detected: core." + test_name)
did_fail = True
os.rename("core", "%s/core.%s" % (self.test_home, test_name))
# Check any stderr output
if os.path.isfile(stderr_expected_filename):
if self._compare_files(stderr_expected_filename, stderr_output_filename):
self._failed("stderr changed")
did_fail = True
elif os.stat(stderr_output_filename).st_size > 0:
self._failed("Output was written to stderr")
did_fail = True
cat(stderr_output_filename)
remove_files([ stderr_output_filename ])
# Check whether output graph exists, and normalize it
if (not os.path.isfile(output_filename)
or os.stat(output_filename).st_size == 0):
self._error("No graph produced")
did_fail = True
self.num_failed = self.num_failed + 1
remove_files([ output_filename ])
return CrmExit.ERROR
normalize(output_filename)
# Check whether dot output exists, and sort it
if (not os.path.isfile(dot_output_filename) or
os.stat(dot_output_filename).st_size == 0):
self._error("No dot-file summary produced")
did_fail = True
self.num_failed = self.num_failed + 1
remove_files([ dot_output_filename, output_filename ])
return CrmExit.ERROR
with io.open(dot_output_filename, "rt") as f:
first_line = f.readline() # "digraph" line with opening brace
lines = f.readlines()
last_line = lines[-1] # closing brace
del lines[-1]
lines = sorted(set(lines)) # unique sort
with io.open(dot_output_filename, "wt") as f:
f.write(first_line)
f.writelines(lines)
f.write(last_line)
# Check whether score output exists, and sort it
if (not os.path.isfile(score_output_filename)
or os.stat(score_output_filename).st_size == 0):
self._error("No allocation scores produced")
did_fail = True
self.num_failed = self.num_failed + 1
remove_files([ score_output_filename, output_filename ])
return CrmExit.ERROR
else:
sort_file(score_output_filename)
if self.args.update:
shutil.copyfile(output_filename, expected_filename)
shutil.copyfile(dot_output_filename, dot_expected_filename)
shutil.copyfile(score_output_filename, scores_filename)
shutil.copyfile(summary_output_filename, summary_filename)
print(" Updated expected outputs")
if self._compare_files(summary_filename, summary_output_filename):
self._failed("summary changed")
did_fail = True
if self._compare_files(dot_expected_filename, dot_output_filename):
self._failed("dot-file summary changed")
did_fail = True
else:
remove_files([ dot_output_filename ])
if self._compare_files(expected_filename, output_filename):
self._failed("xml-file changed")
did_fail = True
if self._compare_files(scores_filename, score_output_filename):
self._failed("scores-file changed")
did_fail = True
remove_files([ output_filename,
score_output_filename,
summary_output_filename])
if did_fail:
self.num_failed = self.num_failed + 1
return CrmExit.ERROR
return CrmExit.OK
def run_all(self):
""" Run all defined tests """
if platform.architecture()[0] == "64bit":
TESTS.extend(TESTS_64BIT)
for group in TESTS:
for test in group:
try:
args = test[2]
except IndexError:
args = []
self.run_one(test[0], test[1], args)
print()
def _print_summary(self):
""" Print a summary of parameters for this test run """
print("Test home is:\t" + self.test_home)
print("Test binary is:\t" + self.args.binary)
if 'PCMK_schema_directory' in os.environ:
print("Schema home is:\t" + os.environ['PCMK_schema_directory'])
if self.valgrind_args != []:
print("Activating memory testing with valgrind")
print()
def _test_results(self):
if self.num_failed == 0:
return CrmExit.OK
if os.path.isfile(self.failed_filename) and os.stat(self.failed_filename).st_size != 0:
if self.args.verbose:
self._error("Results of %d failed tests (out of %d):" %
(self.num_failed, self.num_tests))
cat(self.failed_filename)
else:
self._error("Results of %d failed tests (out of %d) are in %s" %
(self.num_failed, self.num_tests, self.failed_filename))
self._error("Use -V to display them after running the tests")
else:
self._error("%d (of %d) tests failed (no diff results)" %
(self.num_failed, self.num_tests))
if os.path.isfile(self.failed_filename):
os.remove(self.failed_filename)
return CrmExit.ERROR
def run(self):
""" Run test(s) as specified """
self._print_summary()
# Zero out the error log
self.failed_file = io.open(self.failed_filename, "wt")
if self.args.run is None:
print("Performing the following tests from " + self.args.io_dir)
print()
self.run_all()
print()
self.failed_file.close()
rc = self._test_results()
else:
rc = self.run_one(self.args.run, "Single shot", self.single_test_args)
self.failed_file.close()
cat(self.failed_filename)
return rc
if __name__ == "__main__":
sys.exit(CtsScheduler().run())
# vim: set filetype=python expandtab tabstop=4 softtabstop=4 shiftwidth=4 textwidth=120:
diff --git a/cts/scheduler/guest-host-not-fenceable.dot b/cts/scheduler/guest-host-not-fenceable.dot
index 98833f6160..a086fcb4ab 100644
--- a/cts/scheduler/guest-host-not-fenceable.dot
+++ b/cts/scheduler/guest-host-not-fenceable.dot
@@ -1,262 +1,267 @@
digraph "g" {
"Cancel galera-bundle-0_monitor_60000 node1" [ style=bold color="green" fontcolor="black"]
"Cancel rabbitmq-bundle-0_monitor_60000 node1" [ style=bold color="green" fontcolor="black"]
"galera-bundle-0_monitor_30000 node1" [ style=dashed color="red" fontcolor="black"]
"galera-bundle-0_start_0 node1" -> "galera-bundle-0_monitor_30000 node1" [ style = dashed]
"galera-bundle-0_start_0 node1" -> "galera_clear_failcount_0 galera-bundle-0" [ style = dashed]
"galera-bundle-0_start_0 node1" -> "galera_monitor_10000 galera-bundle-0" [ style = dashed]
"galera-bundle-0_start_0 node1" -> "galera_start_0 galera-bundle-0" [ style = dashed]
"galera-bundle-0_start_0 node1" [ style=dashed color="red" fontcolor="black"]
"galera-bundle-0_stop_0 node1" -> "Cancel galera-bundle-0_monitor_60000 node1" [ style = bold]
"galera-bundle-0_stop_0 node1" -> "galera-bundle-0_start_0 node1" [ style = dashed]
"galera-bundle-0_stop_0 node1" -> "galera-bundle-docker-0_stop_0 node1" [ style = bold]
"galera-bundle-0_stop_0 node1" [ style=bold color="green" fontcolor="black"]
"galera-bundle-1_monitor_30000 node1" [ style=dashed color="red" fontcolor="black"]
"galera-bundle-1_start_0 node1" -> "galera-bundle-1_monitor_30000 node1" [ style = dashed]
-"galera-bundle-1_start_0 node1" -> "galera_monitor_10000 galera-bundle-1" [ style = dashed]
+"galera-bundle-1_start_0 node1" -> "galera_monitor_20000 galera-bundle-1" [ style = dashed]
+"galera-bundle-1_start_0 node1" -> "galera_monitor_30000 galera-bundle-1" [ style = dashed]
"galera-bundle-1_start_0 node1" -> "galera_start_0 galera-bundle-1" [ style = dashed]
"galera-bundle-1_start_0 node1" [ style=dashed color="red" fontcolor="black"]
"galera-bundle-1_stop_0 node2" -> "galera-bundle-1_start_0 node1" [ style = dashed]
"galera-bundle-1_stop_0 node2" -> "galera-bundle-docker-1_stop_0 node2" [ style = dashed]
"galera-bundle-1_stop_0 node2" [ style=dashed color="red" fontcolor="black"]
"galera-bundle-2_monitor_30000 node1" [ style=dashed color="red" fontcolor="black"]
"galera-bundle-2_start_0 node1" -> "galera-bundle-2_monitor_30000 node1" [ style = dashed]
"galera-bundle-2_start_0 node1" -> "galera_monitor_20000 galera-bundle-2" [ style = dashed]
"galera-bundle-2_start_0 node1" -> "galera_monitor_30000 galera-bundle-2" [ style = dashed]
"galera-bundle-2_start_0 node1" -> "galera_start_0 galera-bundle-2" [ style = dashed]
"galera-bundle-2_start_0 node1" [ style=dashed color="red" fontcolor="black"]
"galera-bundle-2_stop_0 node3" -> "galera-bundle-2_start_0 node1" [ style = dashed]
"galera-bundle-2_stop_0 node3" -> "galera-bundle-docker-2_stop_0 node3" [ style = dashed]
"galera-bundle-2_stop_0 node3" [ style=dashed color="red" fontcolor="black"]
"galera-bundle-docker-0_monitor_60000 node1" [ style=dashed color="red" fontcolor="black"]
"galera-bundle-docker-0_start_0 node1" -> "galera-bundle-0_start_0 node1" [ style = dashed]
"galera-bundle-docker-0_start_0 node1" -> "galera-bundle-docker-0_monitor_60000 node1" [ style = dashed]
"galera-bundle-docker-0_start_0 node1" -> "galera-bundle_running_0" [ style = dashed]
"galera-bundle-docker-0_start_0 node1" -> "galera_start_0 galera-bundle-0" [ style = dashed]
"galera-bundle-docker-0_start_0 node1" [ style=dashed color="red" fontcolor="black"]
"galera-bundle-docker-0_stop_0 node1" -> "galera-bundle-docker-0_start_0 node1" [ style = dashed]
"galera-bundle-docker-0_stop_0 node1" -> "galera-bundle_stopped_0" [ style = bold]
"galera-bundle-docker-0_stop_0 node1" [ style=bold color="green" fontcolor="black"]
"galera-bundle-docker-1_stop_0 node2" -> "galera-bundle_stopped_0" [ style = dashed]
"galera-bundle-docker-1_stop_0 node2" [ style=dashed color="red" fontcolor="black"]
"galera-bundle-docker-2_stop_0 node3" -> "galera-bundle_stopped_0" [ style = dashed]
"galera-bundle-docker-2_stop_0 node3" [ style=dashed color="red" fontcolor="black"]
"galera-bundle-master_demote_0" -> "galera-bundle-master_demoted_0" [ style = bold]
"galera-bundle-master_demote_0" -> "galera_demote_0 galera-bundle-0" [ style = bold]
"galera-bundle-master_demote_0" -> "galera_demote_0 galera-bundle-1" [ style = dashed]
"galera-bundle-master_demote_0" -> "galera_demote_0 galera-bundle-2" [ style = dashed]
"galera-bundle-master_demote_0" [ style=bold color="green" fontcolor="orange"]
"galera-bundle-master_demoted_0" -> "galera-bundle-master_start_0" [ style = dashed]
"galera-bundle-master_demoted_0" -> "galera-bundle-master_stop_0" [ style = bold]
"galera-bundle-master_demoted_0" -> "galera-bundle_demoted_0" [ style = bold]
"galera-bundle-master_demoted_0" [ style=bold color="green" fontcolor="orange"]
"galera-bundle-master_running_0" -> "galera-bundle_running_0" [ style = dashed]
"galera-bundle-master_running_0" [ style=dashed color="red" fontcolor="orange"]
"galera-bundle-master_start_0" -> "galera-bundle-master_running_0" [ style = dashed]
"galera-bundle-master_start_0" -> "galera_start_0 galera-bundle-0" [ style = dashed]
"galera-bundle-master_start_0" -> "galera_start_0 galera-bundle-1" [ style = dashed]
"galera-bundle-master_start_0" -> "galera_start_0 galera-bundle-2" [ style = dashed]
"galera-bundle-master_start_0" [ style=dashed color="red" fontcolor="orange"]
"galera-bundle-master_stop_0" -> "galera-bundle-master_stopped_0" [ style = bold]
"galera-bundle-master_stop_0" -> "galera_stop_0 galera-bundle-0" [ style = bold]
"galera-bundle-master_stop_0" -> "galera_stop_0 galera-bundle-1" [ style = dashed]
"galera-bundle-master_stop_0" -> "galera_stop_0 galera-bundle-2" [ style = dashed]
"galera-bundle-master_stop_0" [ style=bold color="green" fontcolor="orange"]
"galera-bundle-master_stopped_0" -> "galera-bundle-master_start_0" [ style = dashed]
"galera-bundle-master_stopped_0" -> "galera-bundle_stopped_0" [ style = bold]
"galera-bundle-master_stopped_0" [ style=bold color="green" fontcolor="orange"]
"galera-bundle_demote_0" -> "galera-bundle-master_demote_0" [ style = bold]
"galera-bundle_demote_0" -> "galera-bundle_demoted_0" [ style = bold]
"galera-bundle_demote_0" [ style=bold color="green" fontcolor="orange"]
"galera-bundle_demoted_0" -> "galera-bundle_start_0" [ style = dashed]
"galera-bundle_demoted_0" -> "galera-bundle_stop_0" [ style = bold]
"galera-bundle_demoted_0" [ style=bold color="green" fontcolor="orange"]
"galera-bundle_running_0" [ style=dashed color="red" fontcolor="orange"]
"galera-bundle_start_0" -> "galera-bundle-docker-0_start_0 node1" [ style = dashed]
"galera-bundle_start_0" -> "galera-bundle-master_start_0" [ style = dashed]
"galera-bundle_start_0" [ style=dashed color="red" fontcolor="orange"]
"galera-bundle_stop_0" -> "galera-bundle-docker-0_stop_0 node1" [ style = bold]
"galera-bundle_stop_0" -> "galera-bundle-docker-1_stop_0 node2" [ style = dashed]
"galera-bundle_stop_0" -> "galera-bundle-docker-2_stop_0 node3" [ style = dashed]
"galera-bundle_stop_0" -> "galera-bundle-master_stop_0" [ style = bold]
"galera-bundle_stop_0" -> "galera_stop_0 galera-bundle-0" [ style = bold]
"galera-bundle_stop_0" -> "galera_stop_0 galera-bundle-1" [ style = dashed]
"galera-bundle_stop_0" -> "galera_stop_0 galera-bundle-2" [ style = dashed]
"galera-bundle_stop_0" [ style=bold color="green" fontcolor="orange"]
"galera-bundle_stopped_0" -> "galera-bundle_start_0" [ style = dashed]
"galera-bundle_stopped_0" [ style=bold color="green" fontcolor="orange"]
"galera_clear_failcount_0 galera-bundle-0" [ style=dashed color="red" fontcolor="black"]
"galera_demote_0 galera-bundle-0" -> "galera-bundle-0_stop_0 node1" [ style = bold]
"galera_demote_0 galera-bundle-0" -> "galera-bundle-master_demoted_0" [ style = bold]
"galera_demote_0 galera-bundle-0" -> "galera_stop_0 galera-bundle-0" [ style = bold]
"galera_demote_0 galera-bundle-0" [ style=bold color="green" fontcolor="black"]
"galera_demote_0 galera-bundle-1" -> "galera-bundle-master_demoted_0" [ style = dashed]
"galera_demote_0 galera-bundle-1" -> "galera_demote_0 galera-bundle-0" [ style = dashed]
+"galera_demote_0 galera-bundle-1" -> "galera_monitor_20000 galera-bundle-1" [ style = dashed]
+"galera_demote_0 galera-bundle-1" -> "galera_monitor_30000 galera-bundle-1" [ style = dashed]
"galera_demote_0 galera-bundle-1" -> "galera_stop_0 galera-bundle-1" [ style = dashed]
"galera_demote_0 galera-bundle-1" [ style=dashed color="red" fontcolor="black"]
"galera_demote_0 galera-bundle-2" -> "galera-bundle-master_demoted_0" [ style = dashed]
"galera_demote_0 galera-bundle-2" -> "galera_demote_0 galera-bundle-1" [ style = dashed]
"galera_demote_0 galera-bundle-2" -> "galera_monitor_20000 galera-bundle-2" [ style = dashed]
"galera_demote_0 galera-bundle-2" -> "galera_monitor_30000 galera-bundle-2" [ style = dashed]
"galera_demote_0 galera-bundle-2" -> "galera_stop_0 galera-bundle-2" [ style = dashed]
"galera_demote_0 galera-bundle-2" [ style=dashed color="red" fontcolor="black"]
"galera_monitor_10000 galera-bundle-0" [ style=dashed color="red" fontcolor="black"]
-"galera_monitor_10000 galera-bundle-1" [ style=dashed color="red" fontcolor="black"]
+"galera_monitor_20000 galera-bundle-1" [ style=dashed color="red" fontcolor="black"]
"galera_monitor_20000 galera-bundle-2" [ style=dashed color="red" fontcolor="black"]
+"galera_monitor_30000 galera-bundle-1" [ style=dashed color="red" fontcolor="black"]
"galera_monitor_30000 galera-bundle-2" [ style=dashed color="red" fontcolor="black"]
"galera_start_0 galera-bundle-0" -> "galera-bundle-master_running_0" [ style = dashed]
"galera_start_0 galera-bundle-0" -> "galera_monitor_10000 galera-bundle-0" [ style = dashed]
"galera_start_0 galera-bundle-0" -> "galera_start_0 galera-bundle-1" [ style = dashed]
"galera_start_0 galera-bundle-0" [ style=dashed color="red" fontcolor="black"]
"galera_start_0 galera-bundle-1" -> "galera-bundle-master_running_0" [ style = dashed]
-"galera_start_0 galera-bundle-1" -> "galera_monitor_10000 galera-bundle-1" [ style = dashed]
+"galera_start_0 galera-bundle-1" -> "galera_monitor_20000 galera-bundle-1" [ style = dashed]
+"galera_start_0 galera-bundle-1" -> "galera_monitor_30000 galera-bundle-1" [ style = dashed]
"galera_start_0 galera-bundle-1" -> "galera_start_0 galera-bundle-2" [ style = dashed]
"galera_start_0 galera-bundle-1" [ style=dashed color="red" fontcolor="black"]
"galera_start_0 galera-bundle-2" -> "galera-bundle-master_running_0" [ style = dashed]
"galera_start_0 galera-bundle-2" -> "galera_monitor_20000 galera-bundle-2" [ style = dashed]
"galera_start_0 galera-bundle-2" -> "galera_monitor_30000 galera-bundle-2" [ style = dashed]
"galera_start_0 galera-bundle-2" [ style=dashed color="red" fontcolor="black"]
"galera_stop_0 galera-bundle-0" -> "galera-bundle-0_stop_0 node1" [ style = bold]
"galera_stop_0 galera-bundle-0" -> "galera-bundle-master_stopped_0" [ style = bold]
"galera_stop_0 galera-bundle-0" -> "galera_start_0 galera-bundle-0" [ style = dashed]
"galera_stop_0 galera-bundle-0" [ style=bold color="green" fontcolor="black"]
"galera_stop_0 galera-bundle-1" -> "galera-bundle-master_stopped_0" [ style = dashed]
"galera_stop_0 galera-bundle-1" -> "galera_start_0 galera-bundle-1" [ style = dashed]
"galera_stop_0 galera-bundle-1" -> "galera_stop_0 galera-bundle-0" [ style = dashed]
"galera_stop_0 galera-bundle-1" [ style=dashed color="red" fontcolor="black"]
"galera_stop_0 galera-bundle-2" -> "galera-bundle-master_stopped_0" [ style = dashed]
"galera_stop_0 galera-bundle-2" -> "galera_start_0 galera-bundle-2" [ style = dashed]
"galera_stop_0 galera-bundle-2" -> "galera_stop_0 galera-bundle-1" [ style = dashed]
"galera_stop_0 galera-bundle-2" [ style=dashed color="red" fontcolor="black"]
"rabbitmq-bundle-0_monitor_30000 node1" [ style=dashed color="red" fontcolor="black"]
"rabbitmq-bundle-0_start_0 node1" -> "rabbitmq-bundle-0_monitor_30000 node1" [ style = dashed]
"rabbitmq-bundle-0_start_0 node1" -> "rabbitmq_monitor_10000 rabbitmq-bundle-0" [ style = dashed]
"rabbitmq-bundle-0_start_0 node1" -> "rabbitmq_start_0 rabbitmq-bundle-0" [ style = dashed]
"rabbitmq-bundle-0_start_0 node1" [ style=dashed color="red" fontcolor="black"]
"rabbitmq-bundle-0_stop_0 node1" -> "Cancel rabbitmq-bundle-0_monitor_60000 node1" [ style = bold]
"rabbitmq-bundle-0_stop_0 node1" -> "rabbitmq-bundle-0_start_0 node1" [ style = dashed]
"rabbitmq-bundle-0_stop_0 node1" -> "rabbitmq-bundle-docker-0_stop_0 node1" [ style = bold]
"rabbitmq-bundle-0_stop_0 node1" [ style=bold color="green" fontcolor="black"]
"rabbitmq-bundle-1_monitor_30000 node1" [ style=dashed color="red" fontcolor="black"]
"rabbitmq-bundle-1_start_0 node1" -> "rabbitmq-bundle-1_monitor_30000 node1" [ style = dashed]
"rabbitmq-bundle-1_start_0 node1" -> "rabbitmq_monitor_10000 rabbitmq-bundle-1" [ style = dashed]
"rabbitmq-bundle-1_start_0 node1" -> "rabbitmq_start_0 rabbitmq-bundle-1" [ style = dashed]
"rabbitmq-bundle-1_start_0 node1" [ style=dashed color="red" fontcolor="black"]
"rabbitmq-bundle-1_stop_0 node2" -> "rabbitmq-bundle-1_start_0 node1" [ style = dashed]
"rabbitmq-bundle-1_stop_0 node2" -> "rabbitmq-bundle-docker-1_stop_0 node2" [ style = dashed]
"rabbitmq-bundle-1_stop_0 node2" [ style=dashed color="red" fontcolor="black"]
"rabbitmq-bundle-2_monitor_30000 node1" [ style=dashed color="red" fontcolor="black"]
"rabbitmq-bundle-2_start_0 node1" -> "rabbitmq-bundle-2_monitor_30000 node1" [ style = dashed]
"rabbitmq-bundle-2_start_0 node1" -> "rabbitmq_monitor_10000 rabbitmq-bundle-2" [ style = dashed]
"rabbitmq-bundle-2_start_0 node1" -> "rabbitmq_start_0 rabbitmq-bundle-2" [ style = dashed]
"rabbitmq-bundle-2_start_0 node1" [ style=dashed color="red" fontcolor="black"]
"rabbitmq-bundle-2_stop_0 node3" -> "rabbitmq-bundle-2_start_0 node1" [ style = dashed]
"rabbitmq-bundle-2_stop_0 node3" -> "rabbitmq-bundle-docker-2_stop_0 node3" [ style = dashed]
"rabbitmq-bundle-2_stop_0 node3" [ style=dashed color="red" fontcolor="black"]
"rabbitmq-bundle-clone_confirmed-post_notify_running_0" -> "rabbitmq-bundle_running_0" [ style = dashed]
"rabbitmq-bundle-clone_confirmed-post_notify_running_0" [ style=dashed color="red" fontcolor="orange"]
"rabbitmq-bundle-clone_confirmed-post_notify_stopped_0" -> "rabbitmq-bundle-clone_pre_notify_start_0" [ style = dashed]
"rabbitmq-bundle-clone_confirmed-post_notify_stopped_0" -> "rabbitmq-bundle_stopped_0" [ style = bold]
"rabbitmq-bundle-clone_confirmed-post_notify_stopped_0" [ style=bold color="green" fontcolor="orange"]
"rabbitmq-bundle-clone_confirmed-pre_notify_start_0" -> "rabbitmq-bundle-clone_post_notify_running_0" [ style = dashed]
"rabbitmq-bundle-clone_confirmed-pre_notify_start_0" -> "rabbitmq-bundle-clone_start_0" [ style = dashed]
"rabbitmq-bundle-clone_confirmed-pre_notify_start_0" [ style=dashed color="red" fontcolor="orange"]
"rabbitmq-bundle-clone_confirmed-pre_notify_stop_0" -> "rabbitmq-bundle-clone_post_notify_stopped_0" [ style = bold]
"rabbitmq-bundle-clone_confirmed-pre_notify_stop_0" -> "rabbitmq-bundle-clone_stop_0" [ style = bold]
"rabbitmq-bundle-clone_confirmed-pre_notify_stop_0" [ style=bold color="green" fontcolor="orange"]
"rabbitmq-bundle-clone_post_notify_running_0" -> "rabbitmq-bundle-clone_confirmed-post_notify_running_0" [ style = dashed]
"rabbitmq-bundle-clone_post_notify_running_0" [ style=dashed color="red" fontcolor="orange"]
"rabbitmq-bundle-clone_post_notify_stopped_0" -> "rabbitmq-bundle-clone_confirmed-post_notify_stopped_0" [ style = bold]
"rabbitmq-bundle-clone_post_notify_stopped_0" [ style=bold color="green" fontcolor="orange"]
"rabbitmq-bundle-clone_pre_notify_start_0" -> "rabbitmq-bundle-clone_confirmed-pre_notify_start_0" [ style = dashed]
"rabbitmq-bundle-clone_pre_notify_start_0" [ style=dashed color="red" fontcolor="orange"]
"rabbitmq-bundle-clone_pre_notify_stop_0" -> "rabbitmq-bundle-clone_confirmed-pre_notify_stop_0" [ style = bold]
"rabbitmq-bundle-clone_pre_notify_stop_0" -> "rabbitmq_pre_notify_stop_0 rabbitmq-bundle-0" [ style = bold]
"rabbitmq-bundle-clone_pre_notify_stop_0" -> "rabbitmq_pre_notify_stop_0 rabbitmq-bundle-1" [ style = dashed]
"rabbitmq-bundle-clone_pre_notify_stop_0" -> "rabbitmq_pre_notify_stop_0 rabbitmq-bundle-2" [ style = dashed]
"rabbitmq-bundle-clone_pre_notify_stop_0" [ style=bold color="green" fontcolor="orange"]
"rabbitmq-bundle-clone_running_0" -> "rabbitmq-bundle-clone_post_notify_running_0" [ style = dashed]
"rabbitmq-bundle-clone_running_0" [ style=dashed color="red" fontcolor="orange"]
"rabbitmq-bundle-clone_start_0" -> "rabbitmq-bundle-clone_running_0" [ style = dashed]
"rabbitmq-bundle-clone_start_0" -> "rabbitmq_start_0 rabbitmq-bundle-0" [ style = dashed]
"rabbitmq-bundle-clone_start_0" -> "rabbitmq_start_0 rabbitmq-bundle-1" [ style = dashed]
"rabbitmq-bundle-clone_start_0" -> "rabbitmq_start_0 rabbitmq-bundle-2" [ style = dashed]
"rabbitmq-bundle-clone_start_0" [ style=dashed color="red" fontcolor="orange"]
"rabbitmq-bundle-clone_stop_0" -> "rabbitmq-bundle-clone_stopped_0" [ style = bold]
"rabbitmq-bundle-clone_stop_0" -> "rabbitmq_stop_0 rabbitmq-bundle-0" [ style = bold]
"rabbitmq-bundle-clone_stop_0" -> "rabbitmq_stop_0 rabbitmq-bundle-1" [ style = dashed]
"rabbitmq-bundle-clone_stop_0" -> "rabbitmq_stop_0 rabbitmq-bundle-2" [ style = dashed]
"rabbitmq-bundle-clone_stop_0" [ style=bold color="green" fontcolor="orange"]
"rabbitmq-bundle-clone_stopped_0" -> "rabbitmq-bundle-clone_post_notify_stopped_0" [ style = bold]
"rabbitmq-bundle-clone_stopped_0" -> "rabbitmq-bundle-clone_start_0" [ style = dashed]
"rabbitmq-bundle-clone_stopped_0" [ style=bold color="green" fontcolor="orange"]
"rabbitmq-bundle-docker-0_monitor_60000 node1" [ style=dashed color="red" fontcolor="black"]
"rabbitmq-bundle-docker-0_start_0 node1" -> "rabbitmq-bundle-0_start_0 node1" [ style = dashed]
"rabbitmq-bundle-docker-0_start_0 node1" -> "rabbitmq-bundle-docker-0_monitor_60000 node1" [ style = dashed]
"rabbitmq-bundle-docker-0_start_0 node1" -> "rabbitmq-bundle_running_0" [ style = dashed]
"rabbitmq-bundle-docker-0_start_0 node1" -> "rabbitmq_start_0 rabbitmq-bundle-0" [ style = dashed]
"rabbitmq-bundle-docker-0_start_0 node1" [ style=dashed color="red" fontcolor="black"]
"rabbitmq-bundle-docker-0_stop_0 node1" -> "rabbitmq-bundle-docker-0_start_0 node1" [ style = dashed]
"rabbitmq-bundle-docker-0_stop_0 node1" -> "rabbitmq-bundle_stopped_0" [ style = bold]
"rabbitmq-bundle-docker-0_stop_0 node1" [ style=bold color="green" fontcolor="black"]
"rabbitmq-bundle-docker-1_stop_0 node2" -> "rabbitmq-bundle_stopped_0" [ style = dashed]
"rabbitmq-bundle-docker-1_stop_0 node2" [ style=dashed color="red" fontcolor="black"]
"rabbitmq-bundle-docker-2_stop_0 node3" -> "rabbitmq-bundle_stopped_0" [ style = dashed]
"rabbitmq-bundle-docker-2_stop_0 node3" [ style=dashed color="red" fontcolor="black"]
"rabbitmq-bundle_running_0" [ style=dashed color="red" fontcolor="orange"]
"rabbitmq-bundle_start_0" -> "rabbitmq-bundle-clone_start_0" [ style = dashed]
"rabbitmq-bundle_start_0" -> "rabbitmq-bundle-docker-0_start_0 node1" [ style = dashed]
"rabbitmq-bundle_start_0" [ style=dashed color="red" fontcolor="orange"]
"rabbitmq-bundle_stop_0" -> "rabbitmq-bundle-clone_stop_0" [ style = bold]
"rabbitmq-bundle_stop_0" -> "rabbitmq-bundle-docker-0_stop_0 node1" [ style = bold]
"rabbitmq-bundle_stop_0" -> "rabbitmq-bundle-docker-1_stop_0 node2" [ style = dashed]
"rabbitmq-bundle_stop_0" -> "rabbitmq-bundle-docker-2_stop_0 node3" [ style = dashed]
"rabbitmq-bundle_stop_0" -> "rabbitmq_stop_0 rabbitmq-bundle-0" [ style = bold]
"rabbitmq-bundle_stop_0" -> "rabbitmq_stop_0 rabbitmq-bundle-1" [ style = dashed]
"rabbitmq-bundle_stop_0" -> "rabbitmq_stop_0 rabbitmq-bundle-2" [ style = dashed]
"rabbitmq-bundle_stop_0" [ style=bold color="green" fontcolor="orange"]
"rabbitmq-bundle_stopped_0" [ style=bold color="green" fontcolor="orange"]
"rabbitmq_monitor_10000 rabbitmq-bundle-0" [ style=dashed color="red" fontcolor="black"]
"rabbitmq_monitor_10000 rabbitmq-bundle-1" [ style=dashed color="red" fontcolor="black"]
"rabbitmq_monitor_10000 rabbitmq-bundle-2" [ style=dashed color="red" fontcolor="black"]
"rabbitmq_pre_notify_stop_0 rabbitmq-bundle-0" -> "rabbitmq-bundle-clone_confirmed-pre_notify_stop_0" [ style = bold]
"rabbitmq_pre_notify_stop_0 rabbitmq-bundle-0" [ style=bold color="green" fontcolor="black"]
"rabbitmq_pre_notify_stop_0 rabbitmq-bundle-1" -> "rabbitmq-bundle-clone_confirmed-pre_notify_stop_0" [ style = dashed]
"rabbitmq_pre_notify_stop_0 rabbitmq-bundle-1" [ style=dashed color="red" fontcolor="black"]
"rabbitmq_pre_notify_stop_0 rabbitmq-bundle-2" -> "rabbitmq-bundle-clone_confirmed-pre_notify_stop_0" [ style = dashed]
"rabbitmq_pre_notify_stop_0 rabbitmq-bundle-2" [ style=dashed color="red" fontcolor="black"]
"rabbitmq_start_0 rabbitmq-bundle-0" -> "rabbitmq-bundle-clone_running_0" [ style = dashed]
"rabbitmq_start_0 rabbitmq-bundle-0" -> "rabbitmq_monitor_10000 rabbitmq-bundle-0" [ style = dashed]
"rabbitmq_start_0 rabbitmq-bundle-0" -> "rabbitmq_start_0 rabbitmq-bundle-1" [ style = dashed]
"rabbitmq_start_0 rabbitmq-bundle-0" [ style=dashed color="red" fontcolor="black"]
"rabbitmq_start_0 rabbitmq-bundle-1" -> "rabbitmq-bundle-clone_running_0" [ style = dashed]
"rabbitmq_start_0 rabbitmq-bundle-1" -> "rabbitmq_monitor_10000 rabbitmq-bundle-1" [ style = dashed]
"rabbitmq_start_0 rabbitmq-bundle-1" -> "rabbitmq_start_0 rabbitmq-bundle-2" [ style = dashed]
"rabbitmq_start_0 rabbitmq-bundle-1" [ style=dashed color="red" fontcolor="black"]
"rabbitmq_start_0 rabbitmq-bundle-2" -> "rabbitmq-bundle-clone_running_0" [ style = dashed]
"rabbitmq_start_0 rabbitmq-bundle-2" -> "rabbitmq_monitor_10000 rabbitmq-bundle-2" [ style = dashed]
"rabbitmq_start_0 rabbitmq-bundle-2" [ style=dashed color="red" fontcolor="black"]
"rabbitmq_stop_0 rabbitmq-bundle-0" -> "rabbitmq-bundle-0_stop_0 node1" [ style = bold]
"rabbitmq_stop_0 rabbitmq-bundle-0" -> "rabbitmq-bundle-clone_stopped_0" [ style = bold]
"rabbitmq_stop_0 rabbitmq-bundle-0" -> "rabbitmq_start_0 rabbitmq-bundle-0" [ style = dashed]
"rabbitmq_stop_0 rabbitmq-bundle-0" [ style=bold color="green" fontcolor="black"]
"rabbitmq_stop_0 rabbitmq-bundle-1" -> "rabbitmq-bundle-clone_stopped_0" [ style = dashed]
"rabbitmq_stop_0 rabbitmq-bundle-1" -> "rabbitmq_start_0 rabbitmq-bundle-1" [ style = dashed]
"rabbitmq_stop_0 rabbitmq-bundle-1" -> "rabbitmq_stop_0 rabbitmq-bundle-0" [ style = dashed]
"rabbitmq_stop_0 rabbitmq-bundle-1" [ style=dashed color="red" fontcolor="black"]
"rabbitmq_stop_0 rabbitmq-bundle-2" -> "rabbitmq-bundle-clone_stopped_0" [ style = dashed]
"rabbitmq_stop_0 rabbitmq-bundle-2" -> "rabbitmq_start_0 rabbitmq-bundle-2" [ style = dashed]
"rabbitmq_stop_0 rabbitmq-bundle-2" -> "rabbitmq_stop_0 rabbitmq-bundle-1" [ style = dashed]
"rabbitmq_stop_0 rabbitmq-bundle-2" [ style=dashed color="red" fontcolor="black"]
"stonith-fence_ipmilan-node1_stop_0 node2" [ style=dashed color="red" fontcolor="black"]
"stonith-fence_ipmilan-node2_monitor_60000 node1" [ style=dashed color="red" fontcolor="black"]
"stonith-fence_ipmilan-node2_start_0 node1" -> "stonith-fence_ipmilan-node2_monitor_60000 node1" [ style = dashed]
"stonith-fence_ipmilan-node2_start_0 node1" [ style=dashed color="red" fontcolor="black"]
"stonith-fence_ipmilan-node2_stop_0 node3" -> "stonith-fence_ipmilan-node2_start_0 node1" [ style = dashed]
"stonith-fence_ipmilan-node2_stop_0 node3" [ style=dashed color="red" fontcolor="black"]
"stonith-fence_ipmilan-node3_monitor_60000 node1" [ style=dashed color="red" fontcolor="black"]
"stonith-fence_ipmilan-node3_start_0 node1" -> "stonith-fence_ipmilan-node3_monitor_60000 node1" [ style = dashed]
"stonith-fence_ipmilan-node3_start_0 node1" [ style=dashed color="red" fontcolor="black"]
"stonith-fence_ipmilan-node3_stop_0 node2" -> "stonith-fence_ipmilan-node3_start_0 node1" [ style = dashed]
"stonith-fence_ipmilan-node3_stop_0 node2" [ style=dashed color="red" fontcolor="black"]
}
diff --git a/cts/scheduler/guest-host-not-fenceable.exp b/cts/scheduler/guest-host-not-fenceable.exp
index b9293d7f47..0631c402de 100644
--- a/cts/scheduler/guest-host-not-fenceable.exp
+++ b/cts/scheduler/guest-host-not-fenceable.exp
@@ -1,366 +1,366 @@
<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
<synapse id="0">
<action_set>
- <rsc_op id="110" operation="notify" operation_key="rabbitmq_pre_notify_stop_0" internal_operation_key="rabbitmq:0_pre_notify_stop_0" on_node="rabbitmq-bundle-0" on_node_uuid="rabbitmq-bundle-0" router_node="node1">
+ <rsc_op id="111" operation="notify" operation_key="rabbitmq_pre_notify_stop_0" internal_operation_key="rabbitmq:0_pre_notify_stop_0" on_node="rabbitmq-bundle-0" on_node_uuid="rabbitmq-bundle-0" router_node="node1">
<primitive id="rabbitmq" long-id="rabbitmq:0" class="ocf" provider="heartbeat" type="rabbitmq-cluster"/>
<attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_notify="true" CRM_meta_notify_active_resource="rabbitmq:0 rabbitmq:1 rabbitmq:2" CRM_meta_notify_active_uname="rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2" CRM_meta_notify_all_hosts="node1 node2 node3 node1 node2 node3 node1 node2 node3" CRM_meta_notify_all_uname="galera-bundle-0 galera-bundle-1 galera-bundle-2 node1 node2 node3 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2" CRM_meta_notify_available_uname="node1 node2 node3 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource=" " CRM_meta_notify_key_operation="stop" CRM_meta_notify_key_type="pre" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="stop" CRM_meta_notify_promote_resource=" " CRM_meta_notify_promote_uname=" " CRM_meta_notify_slave_resource=" " CRM_meta_notify_slave_uname=" " CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="rabbitmq:0" CRM_meta_notify_stop_uname="rabbitmq-bundle-0" CRM_meta_notify_type="pre" CRM_meta_on_node="rabbitmq-bundle-0" CRM_meta_on_node_uuid="rabbitmq-bundle-0" CRM_meta_physical_host="node1" CRM_meta_timeout="20000" set_policy="ha-all ^(?!amq\.).* {&quot;ha-mode&quot;:&quot;all&quot;}"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
<pseudo_event id="48" operation="notify" operation_key="rabbitmq-bundle-clone_pre_notify_stop_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="1">
<action_set>
<rsc_op id="32" operation="stop" operation_key="rabbitmq_stop_0" internal_operation_key="rabbitmq:0_stop_0" on_node="rabbitmq-bundle-0" on_node_uuid="rabbitmq-bundle-0" router_node="node1">
<primitive id="rabbitmq" long-id="rabbitmq:0" class="ocf" provider="heartbeat" type="rabbitmq-cluster"/>
<attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_name="stop" CRM_meta_notify="true" CRM_meta_notify_active_resource="rabbitmq:0 rabbitmq:1 rabbitmq:2" CRM_meta_notify_active_uname="rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2" CRM_meta_notify_all_hosts="node1 node2 node3 node1 node2 node3 node1 node2 node3" CRM_meta_notify_all_uname="galera-bundle-0 galera-bundle-1 galera-bundle-2 node1 node2 node3 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2" CRM_meta_notify_available_uname="node1 node2 node3 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource=" " CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_promote_resource=" " CRM_meta_notify_promote_uname=" " CRM_meta_notify_slave_resource=" " CRM_meta_notify_slave_uname=" " CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="rabbitmq:0" CRM_meta_notify_stop_uname="rabbitmq-bundle-0" CRM_meta_on_node="rabbitmq-bundle-0" CRM_meta_on_node_uuid="rabbitmq-bundle-0" CRM_meta_physical_host="node1" CRM_meta_timeout="200000" set_policy="ha-all ^(?!amq\.).* {&quot;ha-mode&quot;:&quot;all&quot;}"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
<pseudo_event id="30" operation="stop" operation_key="rabbitmq-bundle_stop_0"/>
</trigger>
<trigger>
<pseudo_event id="46" operation="stop" operation_key="rabbitmq-bundle-clone_stop_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="2" priority="1000000">
<action_set>
<pseudo_event id="51" operation="notified" operation_key="rabbitmq-bundle-clone_confirmed-post_notify_stopped_0">
<attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="true" CRM_meta_notify_key_operation="stopped" CRM_meta_notify_key_type="confirmed-post" CRM_meta_notify_operation="stop" CRM_meta_notify_type="post" CRM_meta_timeout="20000" />
</pseudo_event>
</action_set>
<inputs>
<trigger>
<pseudo_event id="50" operation="notify" operation_key="rabbitmq-bundle-clone_post_notify_stopped_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="3" priority="1000000">
<action_set>
<pseudo_event id="50" operation="notify" operation_key="rabbitmq-bundle-clone_post_notify_stopped_0">
<attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="true" CRM_meta_notify_key_operation="stopped" CRM_meta_notify_key_type="post" CRM_meta_notify_operation="stop" CRM_meta_notify_type="post" CRM_meta_timeout="20000" />
</pseudo_event>
</action_set>
<inputs>
<trigger>
<pseudo_event id="47" operation="stopped" operation_key="rabbitmq-bundle-clone_stopped_0"/>
</trigger>
<trigger>
<pseudo_event id="49" operation="notified" operation_key="rabbitmq-bundle-clone_confirmed-pre_notify_stop_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="4">
<action_set>
<pseudo_event id="49" operation="notified" operation_key="rabbitmq-bundle-clone_confirmed-pre_notify_stop_0">
<attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="true" CRM_meta_notify_key_operation="stop" CRM_meta_notify_key_type="confirmed-pre" CRM_meta_notify_operation="stop" CRM_meta_notify_type="pre" CRM_meta_timeout="20000" />
</pseudo_event>
</action_set>
<inputs>
<trigger>
<pseudo_event id="48" operation="notify" operation_key="rabbitmq-bundle-clone_pre_notify_stop_0"/>
</trigger>
<trigger>
- <rsc_op id="110" operation="notify" operation_key="rabbitmq_pre_notify_stop_0" internal_operation_key="rabbitmq:0_pre_notify_stop_0" on_node="rabbitmq-bundle-0" on_node_uuid="rabbitmq-bundle-0" router_node="node1"/>
+ <rsc_op id="111" operation="notify" operation_key="rabbitmq_pre_notify_stop_0" internal_operation_key="rabbitmq:0_pre_notify_stop_0" on_node="rabbitmq-bundle-0" on_node_uuid="rabbitmq-bundle-0" router_node="node1"/>
</trigger>
</inputs>
</synapse>
<synapse id="5">
<action_set>
<pseudo_event id="48" operation="notify" operation_key="rabbitmq-bundle-clone_pre_notify_stop_0">
<attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="true" CRM_meta_notify_key_operation="stop" CRM_meta_notify_key_type="pre" CRM_meta_notify_operation="stop" CRM_meta_notify_type="pre" CRM_meta_timeout="20000" />
</pseudo_event>
</action_set>
<inputs/>
</synapse>
<synapse id="6" priority="1000000">
<action_set>
<pseudo_event id="47" operation="stopped" operation_key="rabbitmq-bundle-clone_stopped_0">
<attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="true" CRM_meta_timeout="20000" />
</pseudo_event>
</action_set>
<inputs>
<trigger>
<rsc_op id="32" operation="stop" operation_key="rabbitmq_stop_0" internal_operation_key="rabbitmq:0_stop_0" on_node="rabbitmq-bundle-0" on_node_uuid="rabbitmq-bundle-0" router_node="node1"/>
</trigger>
<trigger>
<pseudo_event id="46" operation="stop" operation_key="rabbitmq-bundle-clone_stop_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="7">
<action_set>
<pseudo_event id="46" operation="stop" operation_key="rabbitmq-bundle-clone_stop_0">
<attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="true" CRM_meta_timeout="20000" />
</pseudo_event>
</action_set>
<inputs>
<trigger>
<pseudo_event id="30" operation="stop" operation_key="rabbitmq-bundle_stop_0"/>
</trigger>
<trigger>
<pseudo_event id="49" operation="notified" operation_key="rabbitmq-bundle-clone_confirmed-pre_notify_stop_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="8">
<action_set>
<rsc_op id="16" operation="stop" operation_key="rabbitmq-bundle-docker-0_stop_0" on_node="node1" on_node_uuid="1">
<primitive id="rabbitmq-bundle-docker-0" class="ocf" provider="heartbeat" type="docker"/>
<attributes CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" allow_pull="true" force_kill="false" image="192.168.122.139:8787/rhosp13/openstack-rabbitmq:pcmklatest" monitor_cmd="/bin/true" mount_points="/var/log/pacemaker/bundles/rabbitmq-bundle-0" reuse="false" run_cmd="/bin/bash /usr/local/bin/kolla_start" run_opts=" --restart=no -e PCMK_stderr=1 --net=host -e PCMK_remote_port=3122 -v /var/lib/kolla/config_files/rabbitmq.json:/var/lib/kolla/config_files/config.json:ro -v /var/lib/config-data/puppet-generated/rabbitmq/:/var/lib/kolla/config_files/src:ro -v /etc/hosts:/etc/hosts:ro -v /etc/localtime:/etc/localtime:ro -v /var/lib/rabbitmq:/var/lib/rabbitmq:rw -v /etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro -v /etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro -v /etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro -v /etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro -v /var/log/containers/rabbitmq:/var/log/rabbitmq:rw -v /dev/log:/dev/log:rw -v /etc/pacemaker/authkey:/etc/pacemaker/authkey -v /var/log/pacemaker/bundles/rabbitmq-bundle-0:/var/log --user=root --log-driver=journald -e KOLLA_CONFIG_STRATEGY=COPY_ALWAYS "/>
</rsc_op>
</action_set>
<inputs>
<trigger>
<rsc_op id="17" operation="stop" operation_key="rabbitmq-bundle-0_stop_0" on_node="node1" on_node_uuid="1"/>
</trigger>
<trigger>
<pseudo_event id="30" operation="stop" operation_key="rabbitmq-bundle_stop_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="9">
<action_set>
<rsc_op id="17" operation="stop" operation_key="rabbitmq-bundle-0_stop_0" on_node="node1" on_node_uuid="1">
<primitive id="rabbitmq-bundle-0" class="ocf" provider="pacemaker" type="remote"/>
<attributes CRM_meta_container="rabbitmq-bundle-docker-0" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" addr="node1" port="3122"/>
<downed>
<node id="rabbitmq-bundle-0"/>
</downed>
</rsc_op>
</action_set>
<inputs>
<trigger>
<rsc_op id="32" operation="stop" operation_key="rabbitmq_stop_0" internal_operation_key="rabbitmq:0_stop_0" on_node="rabbitmq-bundle-0" on_node_uuid="rabbitmq-bundle-0" router_node="node1"/>
</trigger>
</inputs>
</synapse>
<synapse id="10">
<action_set>
<rsc_op id="5" operation="cancel" operation_key="rabbitmq-bundle-0_monitor_60000" on_node="node1" on_node_uuid="1">
<primitive id="rabbitmq-bundle-0" class="ocf" provider="pacemaker" type="remote"/>
<attributes CRM_meta_call_id="9" CRM_meta_container="rabbitmq-bundle-docker-0" CRM_meta_interval="60000" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_operation="monitor" CRM_meta_timeout="20000" addr="node1" port="3122"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
<rsc_op id="17" operation="stop" operation_key="rabbitmq-bundle-0_stop_0" on_node="node1" on_node_uuid="1"/>
</trigger>
</inputs>
</synapse>
<synapse id="11">
<action_set>
<rsc_op id="68" operation="demote" operation_key="galera_demote_0" internal_operation_key="galera:0_demote_0" on_node="galera-bundle-0" on_node_uuid="galera-bundle-0" router_node="node1">
<primitive id="galera" long-id="galera:0" class="ocf" provider="heartbeat" type="galera"/>
<attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="3" CRM_meta_master_node_max="1" CRM_meta_name="demote" CRM_meta_notify="false" CRM_meta_on_node="galera-bundle-0" CRM_meta_on_node_uuid="galera-bundle-0" CRM_meta_physical_host="node1" CRM_meta_promoted_max="3" CRM_meta_promoted_node_max="1" CRM_meta_timeout="120000" additional_parameters="--open-files-limit=16384" cluster_host_map="node1:node1.internalapi.localdomain;node2:node2.internalapi.localdomain;node3:node3.internalapi.localdomain" enable_creation="true" log="/var/log/mysql/mysqld.log" wsrep_cluster_address="gcomm://node1.internalapi.localdomain,node2.internalapi.localdomain,node3.internalapi.localdomain"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <pseudo_event id="84" operation="demote" operation_key="galera-bundle-master_demote_0"/>
+ <pseudo_event id="85" operation="demote" operation_key="galera-bundle-master_demote_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="12">
<action_set>
<rsc_op id="10" operation="stop" operation_key="galera_stop_0" internal_operation_key="galera:0_stop_0" on_node="galera-bundle-0" on_node_uuid="galera-bundle-0" router_node="node1">
<primitive id="galera" long-id="galera:0" class="ocf" provider="heartbeat" type="galera"/>
<attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="3" CRM_meta_master_node_max="1" CRM_meta_name="stop" CRM_meta_notify="false" CRM_meta_on_node="galera-bundle-0" CRM_meta_on_node_uuid="galera-bundle-0" CRM_meta_physical_host="node1" CRM_meta_promoted_max="3" CRM_meta_promoted_node_max="1" CRM_meta_timeout="120000" additional_parameters="--open-files-limit=16384" cluster_host_map="node1:node1.internalapi.localdomain;node2:node2.internalapi.localdomain;node3:node3.internalapi.localdomain" enable_creation="true" log="/var/log/mysql/mysqld.log" wsrep_cluster_address="gcomm://node1.internalapi.localdomain,node2.internalapi.localdomain,node3.internalapi.localdomain"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
<pseudo_event id="66" operation="stop" operation_key="galera-bundle_stop_0"/>
</trigger>
<trigger>
<rsc_op id="68" operation="demote" operation_key="galera_demote_0" internal_operation_key="galera:0_demote_0" on_node="galera-bundle-0" on_node_uuid="galera-bundle-0" router_node="node1"/>
</trigger>
<trigger>
- <pseudo_event id="80" operation="stop" operation_key="galera-bundle-master_stop_0"/>
+ <pseudo_event id="81" operation="stop" operation_key="galera-bundle-master_stop_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="13" priority="1000000">
<action_set>
- <pseudo_event id="85" operation="demoted" operation_key="galera-bundle-master_demoted_0">
+ <pseudo_event id="86" operation="demoted" operation_key="galera-bundle-master_demoted_0">
<attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="3" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="3" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
</pseudo_event>
</action_set>
<inputs>
<trigger>
<rsc_op id="68" operation="demote" operation_key="galera_demote_0" internal_operation_key="galera:0_demote_0" on_node="galera-bundle-0" on_node_uuid="galera-bundle-0" router_node="node1"/>
</trigger>
<trigger>
- <pseudo_event id="84" operation="demote" operation_key="galera-bundle-master_demote_0"/>
+ <pseudo_event id="85" operation="demote" operation_key="galera-bundle-master_demote_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="14">
<action_set>
- <pseudo_event id="84" operation="demote" operation_key="galera-bundle-master_demote_0">
+ <pseudo_event id="85" operation="demote" operation_key="galera-bundle-master_demote_0">
<attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="3" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="3" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
</pseudo_event>
</action_set>
<inputs>
<trigger>
- <pseudo_event id="88" operation="demote" operation_key="galera-bundle_demote_0"/>
+ <pseudo_event id="89" operation="demote" operation_key="galera-bundle_demote_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="15" priority="1000000">
<action_set>
- <pseudo_event id="81" operation="stopped" operation_key="galera-bundle-master_stopped_0">
+ <pseudo_event id="82" operation="stopped" operation_key="galera-bundle-master_stopped_0">
<attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="3" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="3" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
</pseudo_event>
</action_set>
<inputs>
<trigger>
<rsc_op id="10" operation="stop" operation_key="galera_stop_0" internal_operation_key="galera:0_stop_0" on_node="galera-bundle-0" on_node_uuid="galera-bundle-0" router_node="node1"/>
</trigger>
<trigger>
- <pseudo_event id="80" operation="stop" operation_key="galera-bundle-master_stop_0"/>
+ <pseudo_event id="81" operation="stop" operation_key="galera-bundle-master_stop_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="16">
<action_set>
- <pseudo_event id="80" operation="stop" operation_key="galera-bundle-master_stop_0">
+ <pseudo_event id="81" operation="stop" operation_key="galera-bundle-master_stop_0">
<attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="3" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="3" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
</pseudo_event>
</action_set>
<inputs>
<trigger>
<pseudo_event id="66" operation="stop" operation_key="galera-bundle_stop_0"/>
</trigger>
<trigger>
- <pseudo_event id="85" operation="demoted" operation_key="galera-bundle-master_demoted_0"/>
+ <pseudo_event id="86" operation="demoted" operation_key="galera-bundle-master_demoted_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="17">
<action_set>
<rsc_op id="52" operation="stop" operation_key="galera-bundle-docker-0_stop_0" on_node="node1" on_node_uuid="1">
<primitive id="galera-bundle-docker-0" class="ocf" provider="heartbeat" type="docker"/>
<attributes CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" allow_pull="true" force_kill="false" image="192.168.122.139:8787/rhosp13/openstack-mariadb:pcmklatest" monitor_cmd="/bin/true" mount_points="/var/log/pacemaker/bundles/galera-bundle-0" reuse="false" run_cmd="/bin/bash /usr/local/bin/kolla_start" run_opts=" --restart=no -e PCMK_stderr=1 --net=host -e PCMK_remote_port=3123 -v /var/lib/kolla/config_files/mysql.json:/var/lib/kolla/config_files/config.json:ro -v /var/lib/config-data/puppet-generated/mysql/:/var/lib/kolla/config_files/src:ro -v /etc/hosts:/etc/hosts:ro -v /etc/localtime:/etc/localtime:ro -v /var/lib/mysql:/var/lib/mysql:rw -v /var/log/mariadb:/var/log/mariadb:rw -v /var/log/containers/mysql:/var/log/mysql:rw -v /dev/log:/dev/log:rw -v /etc/pacemaker/authkey:/etc/pacemaker/authkey -v /var/log/pacemaker/bundles/galera-bundle-0:/var/log --user=root --log-driver=journald -e KOLLA_CONFIG_STRATEGY=COPY_ALWAYS "/>
</rsc_op>
</action_set>
<inputs>
<trigger>
<rsc_op id="53" operation="stop" operation_key="galera-bundle-0_stop_0" on_node="node1" on_node_uuid="1"/>
</trigger>
<trigger>
<pseudo_event id="66" operation="stop" operation_key="galera-bundle_stop_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="18">
<action_set>
<rsc_op id="53" operation="stop" operation_key="galera-bundle-0_stop_0" on_node="node1" on_node_uuid="1">
<primitive id="galera-bundle-0" class="ocf" provider="pacemaker" type="remote"/>
<attributes CRM_meta_container="galera-bundle-docker-0" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" addr="node1" port="3123"/>
<downed>
<node id="galera-bundle-0"/>
</downed>
</rsc_op>
</action_set>
<inputs>
<trigger>
<rsc_op id="10" operation="stop" operation_key="galera_stop_0" internal_operation_key="galera:0_stop_0" on_node="galera-bundle-0" on_node_uuid="galera-bundle-0" router_node="node1"/>
</trigger>
<trigger>
<rsc_op id="68" operation="demote" operation_key="galera_demote_0" internal_operation_key="galera:0_demote_0" on_node="galera-bundle-0" on_node_uuid="galera-bundle-0" router_node="node1"/>
</trigger>
</inputs>
</synapse>
<synapse id="19">
<action_set>
<rsc_op id="4" operation="cancel" operation_key="galera-bundle-0_monitor_60000" on_node="node1" on_node_uuid="1">
<primitive id="galera-bundle-0" class="ocf" provider="pacemaker" type="remote"/>
<attributes CRM_meta_call_id="10" CRM_meta_container="galera-bundle-docker-0" CRM_meta_interval="60000" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_operation="monitor" CRM_meta_timeout="20000" addr="node1" port="3123"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
<rsc_op id="53" operation="stop" operation_key="galera-bundle-0_stop_0" on_node="node1" on_node_uuid="1"/>
</trigger>
</inputs>
</synapse>
<synapse id="20" priority="1000000">
<action_set>
- <pseudo_event id="89" operation="demoted" operation_key="galera-bundle_demoted_0">
+ <pseudo_event id="90" operation="demoted" operation_key="galera-bundle_demoted_0">
<attributes CRM_meta_timeout="20000" />
</pseudo_event>
</action_set>
<inputs>
<trigger>
- <pseudo_event id="85" operation="demoted" operation_key="galera-bundle-master_demoted_0"/>
+ <pseudo_event id="86" operation="demoted" operation_key="galera-bundle-master_demoted_0"/>
</trigger>
<trigger>
- <pseudo_event id="88" operation="demote" operation_key="galera-bundle_demote_0"/>
+ <pseudo_event id="89" operation="demote" operation_key="galera-bundle_demote_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="21">
<action_set>
- <pseudo_event id="88" operation="demote" operation_key="galera-bundle_demote_0">
+ <pseudo_event id="89" operation="demote" operation_key="galera-bundle_demote_0">
<attributes CRM_meta_timeout="20000" />
</pseudo_event>
</action_set>
<inputs/>
</synapse>
<synapse id="22" priority="1000000">
<action_set>
<pseudo_event id="67" operation="stopped" operation_key="galera-bundle_stopped_0">
<attributes CRM_meta_timeout="20000" />
</pseudo_event>
</action_set>
<inputs>
<trigger>
<rsc_op id="52" operation="stop" operation_key="galera-bundle-docker-0_stop_0" on_node="node1" on_node_uuid="1"/>
</trigger>
<trigger>
- <pseudo_event id="81" operation="stopped" operation_key="galera-bundle-master_stopped_0"/>
+ <pseudo_event id="82" operation="stopped" operation_key="galera-bundle-master_stopped_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="23">
<action_set>
<pseudo_event id="66" operation="stop" operation_key="galera-bundle_stop_0">
<attributes CRM_meta_timeout="20000" />
</pseudo_event>
</action_set>
<inputs>
<trigger>
- <pseudo_event id="89" operation="demoted" operation_key="galera-bundle_demoted_0"/>
+ <pseudo_event id="90" operation="demoted" operation_key="galera-bundle_demoted_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="24" priority="1000000">
<action_set>
<pseudo_event id="31" operation="stopped" operation_key="rabbitmq-bundle_stopped_0">
<attributes CRM_meta_timeout="20000" />
</pseudo_event>
</action_set>
<inputs>
<trigger>
<rsc_op id="16" operation="stop" operation_key="rabbitmq-bundle-docker-0_stop_0" on_node="node1" on_node_uuid="1"/>
</trigger>
<trigger>
<pseudo_event id="51" operation="notified" operation_key="rabbitmq-bundle-clone_confirmed-post_notify_stopped_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="25">
<action_set>
<pseudo_event id="30" operation="stop" operation_key="rabbitmq-bundle_stop_0">
<attributes CRM_meta_timeout="20000" />
</pseudo_event>
</action_set>
<inputs/>
</synapse>
</transition_graph>
diff --git a/cts/scheduler/no-promote-on-unrunnable-guest.dot b/cts/scheduler/no-promote-on-unrunnable-guest.dot
new file mode 100644
index 0000000000..60636401b7
--- /dev/null
+++ b/cts/scheduler/no-promote-on-unrunnable-guest.dot
@@ -0,0 +1,128 @@
+ digraph "g" {
+"Cancel ovndb_servers_monitor_30000 ovn-dbs-bundle-1" -> "ovndb_servers_promote_0 ovn-dbs-bundle-1" [ style = bold]
+"Cancel ovndb_servers_monitor_30000 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"]
+"ovn-dbs-bundle-0_monitor_30000 controller-0" [ style=dashed color="red" fontcolor="black"]
+"ovn-dbs-bundle-0_start_0 controller-0" -> "ovn-dbs-bundle-0_monitor_30000 controller-0" [ style = dashed]
+"ovn-dbs-bundle-0_start_0 controller-0" -> "ovndb_servers_monitor_30000 ovn-dbs-bundle-0" [ style = dashed]
+"ovn-dbs-bundle-0_start_0 controller-0" -> "ovndb_servers_start_0 ovn-dbs-bundle-0" [ style = dashed]
+"ovn-dbs-bundle-0_start_0 controller-0" [ style=dashed color="red" fontcolor="black"]
+"ovn-dbs-bundle-0_stop_0 controller-0" -> "ovn-dbs-bundle-0_start_0 controller-0" [ style = dashed]
+"ovn-dbs-bundle-0_stop_0 controller-0" -> "ovn-dbs-bundle-podman-0_stop_0 controller-0" [ style = bold]
+"ovn-dbs-bundle-0_stop_0 controller-0" [ style=bold color="green" fontcolor="black"]
+"ovn-dbs-bundle-master_confirmed-post_notify_promoted_0" -> "ovn-dbs-bundle_promoted_0" [ style = bold]
+"ovn-dbs-bundle-master_confirmed-post_notify_promoted_0" -> "ovndb_servers_monitor_10000 ovn-dbs-bundle-1" [ style = bold]
+"ovn-dbs-bundle-master_confirmed-post_notify_promoted_0" [ style=bold color="green" fontcolor="orange"]
+"ovn-dbs-bundle-master_confirmed-post_notify_running_0" -> "ovn-dbs-bundle-master_pre_notify_promote_0" [ style = bold]
+"ovn-dbs-bundle-master_confirmed-post_notify_running_0" -> "ovn-dbs-bundle_running_0" [ style = bold]
+"ovn-dbs-bundle-master_confirmed-post_notify_running_0" [ style=bold color="green" fontcolor="orange"]
+"ovn-dbs-bundle-master_confirmed-post_notify_stopped_0" -> "ovn-dbs-bundle-master_pre_notify_promote_0" [ style = bold]
+"ovn-dbs-bundle-master_confirmed-post_notify_stopped_0" -> "ovn-dbs-bundle-master_pre_notify_start_0" [ style = bold]
+"ovn-dbs-bundle-master_confirmed-post_notify_stopped_0" -> "ovn-dbs-bundle_stopped_0" [ style = bold]
+"ovn-dbs-bundle-master_confirmed-post_notify_stopped_0" -> "ovndb_servers_monitor_10000 ovn-dbs-bundle-1" [ style = bold]
+"ovn-dbs-bundle-master_confirmed-post_notify_stopped_0" [ style=bold color="green" fontcolor="orange"]
+"ovn-dbs-bundle-master_confirmed-pre_notify_promote_0" -> "ovn-dbs-bundle-master_post_notify_promoted_0" [ style = bold]
+"ovn-dbs-bundle-master_confirmed-pre_notify_promote_0" -> "ovn-dbs-bundle-master_promote_0" [ style = bold]
+"ovn-dbs-bundle-master_confirmed-pre_notify_promote_0" [ style=bold color="green" fontcolor="orange"]
+"ovn-dbs-bundle-master_confirmed-pre_notify_start_0" -> "ovn-dbs-bundle-master_post_notify_running_0" [ style = bold]
+"ovn-dbs-bundle-master_confirmed-pre_notify_start_0" -> "ovn-dbs-bundle-master_start_0" [ style = bold]
+"ovn-dbs-bundle-master_confirmed-pre_notify_start_0" [ style=bold color="green" fontcolor="orange"]
+"ovn-dbs-bundle-master_confirmed-pre_notify_stop_0" -> "ovn-dbs-bundle-master_post_notify_stopped_0" [ style = bold]
+"ovn-dbs-bundle-master_confirmed-pre_notify_stop_0" -> "ovn-dbs-bundle-master_stop_0" [ style = bold]
+"ovn-dbs-bundle-master_confirmed-pre_notify_stop_0" [ style=bold color="green" fontcolor="orange"]
+"ovn-dbs-bundle-master_post_notify_promoted_0" -> "ovn-dbs-bundle-master_confirmed-post_notify_promoted_0" [ style = bold]
+"ovn-dbs-bundle-master_post_notify_promoted_0" -> "ovndb_servers_post_notify_promoted_0 ovn-dbs-bundle-1" [ style = bold]
+"ovn-dbs-bundle-master_post_notify_promoted_0" -> "ovndb_servers_post_notify_promoted_0 ovn-dbs-bundle-2" [ style = bold]
+"ovn-dbs-bundle-master_post_notify_promoted_0" [ style=bold color="green" fontcolor="orange"]
+"ovn-dbs-bundle-master_post_notify_running_0" -> "ovn-dbs-bundle-master_confirmed-post_notify_running_0" [ style = bold]
+"ovn-dbs-bundle-master_post_notify_running_0" [ style=bold color="green" fontcolor="orange"]
+"ovn-dbs-bundle-master_post_notify_stopped_0" -> "ovn-dbs-bundle-master_confirmed-post_notify_stopped_0" [ style = bold]
+"ovn-dbs-bundle-master_post_notify_stopped_0" -> "ovndb_servers_post_notify_stopped_0 ovn-dbs-bundle-1" [ style = bold]
+"ovn-dbs-bundle-master_post_notify_stopped_0" -> "ovndb_servers_post_notify_stopped_0 ovn-dbs-bundle-2" [ style = bold]
+"ovn-dbs-bundle-master_post_notify_stopped_0" [ style=bold color="green" fontcolor="orange"]
+"ovn-dbs-bundle-master_pre_notify_promote_0" -> "ovn-dbs-bundle-master_confirmed-pre_notify_promote_0" [ style = bold]
+"ovn-dbs-bundle-master_pre_notify_promote_0" -> "ovndb_servers_pre_notify_promote_0 ovn-dbs-bundle-1" [ style = bold]
+"ovn-dbs-bundle-master_pre_notify_promote_0" -> "ovndb_servers_pre_notify_promote_0 ovn-dbs-bundle-2" [ style = bold]
+"ovn-dbs-bundle-master_pre_notify_promote_0" [ style=bold color="green" fontcolor="orange"]
+"ovn-dbs-bundle-master_pre_notify_start_0" -> "ovn-dbs-bundle-master_confirmed-pre_notify_start_0" [ style = bold]
+"ovn-dbs-bundle-master_pre_notify_start_0" [ style=bold color="green" fontcolor="orange"]
+"ovn-dbs-bundle-master_pre_notify_stop_0" -> "ovn-dbs-bundle-master_confirmed-pre_notify_stop_0" [ style = bold]
+"ovn-dbs-bundle-master_pre_notify_stop_0" -> "ovndb_servers_pre_notify_stop_0 ovn-dbs-bundle-0" [ style = bold]
+"ovn-dbs-bundle-master_pre_notify_stop_0" -> "ovndb_servers_pre_notify_stop_0 ovn-dbs-bundle-1" [ style = bold]
+"ovn-dbs-bundle-master_pre_notify_stop_0" -> "ovndb_servers_pre_notify_stop_0 ovn-dbs-bundle-2" [ style = bold]
+"ovn-dbs-bundle-master_pre_notify_stop_0" [ style=bold color="green" fontcolor="orange"]
+"ovn-dbs-bundle-master_promote_0" -> "ovndb_servers_promote_0 ovn-dbs-bundle-1" [ style = bold]
+"ovn-dbs-bundle-master_promote_0" [ style=bold color="green" fontcolor="orange"]
+"ovn-dbs-bundle-master_promoted_0" -> "ovn-dbs-bundle-master_post_notify_promoted_0" [ style = bold]
+"ovn-dbs-bundle-master_promoted_0" [ style=bold color="green" fontcolor="orange"]
+"ovn-dbs-bundle-master_running_0" -> "ovn-dbs-bundle-master_post_notify_running_0" [ style = bold]
+"ovn-dbs-bundle-master_running_0" -> "ovn-dbs-bundle-master_promote_0" [ style = bold]
+"ovn-dbs-bundle-master_running_0" [ style=bold color="green" fontcolor="orange"]
+"ovn-dbs-bundle-master_start_0" -> "ovn-dbs-bundle-master_running_0" [ style = bold]
+"ovn-dbs-bundle-master_start_0" -> "ovndb_servers_start_0 ovn-dbs-bundle-0" [ style = dashed]
+"ovn-dbs-bundle-master_start_0" [ style=bold color="green" fontcolor="orange"]
+"ovn-dbs-bundle-master_stop_0" -> "ovn-dbs-bundle-master_stopped_0" [ style = bold]
+"ovn-dbs-bundle-master_stop_0" -> "ovndb_servers_stop_0 ovn-dbs-bundle-0" [ style = bold]
+"ovn-dbs-bundle-master_stop_0" [ style=bold color="green" fontcolor="orange"]
+"ovn-dbs-bundle-master_stopped_0" -> "ovn-dbs-bundle-master_post_notify_stopped_0" [ style = bold]
+"ovn-dbs-bundle-master_stopped_0" -> "ovn-dbs-bundle-master_promote_0" [ style = bold]
+"ovn-dbs-bundle-master_stopped_0" -> "ovn-dbs-bundle-master_start_0" [ style = bold]
+"ovn-dbs-bundle-master_stopped_0" [ style=bold color="green" fontcolor="orange"]
+"ovn-dbs-bundle-podman-0_stop_0 controller-0" -> "ovn-dbs-bundle_stopped_0" [ style = bold]
+"ovn-dbs-bundle-podman-0_stop_0 controller-0" [ style=bold color="green" fontcolor="black"]
+"ovn-dbs-bundle_promote_0" -> "ovn-dbs-bundle-master_promote_0" [ style = bold]
+"ovn-dbs-bundle_promote_0" [ style=bold color="green" fontcolor="orange"]
+"ovn-dbs-bundle_promoted_0" [ style=bold color="green" fontcolor="orange"]
+"ovn-dbs-bundle_running_0" -> "ovn-dbs-bundle_promote_0" [ style = bold]
+"ovn-dbs-bundle_running_0" [ style=bold color="green" fontcolor="orange"]
+"ovn-dbs-bundle_stop_0" -> "ovn-dbs-bundle-master_stop_0" [ style = bold]
+"ovn-dbs-bundle_stop_0" -> "ovn-dbs-bundle-podman-0_stop_0 controller-0" [ style = bold]
+"ovn-dbs-bundle_stop_0" -> "ovndb_servers_stop_0 ovn-dbs-bundle-0" [ style = bold]
+"ovn-dbs-bundle_stop_0" [ style=bold color="green" fontcolor="orange"]
+"ovn-dbs-bundle_stopped_0" -> "ovn-dbs-bundle_promote_0" [ style = bold]
+"ovn-dbs-bundle_stopped_0" [ style=bold color="green" fontcolor="orange"]
+"ovndb_servers_monitor_10000 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"]
+"ovndb_servers_monitor_30000 ovn-dbs-bundle-0" [ style=dashed color="red" fontcolor="black"]
+"ovndb_servers_post_notify_promoted_0 ovn-dbs-bundle-1" -> "ovn-dbs-bundle-master_confirmed-post_notify_promoted_0" [ style = bold]
+"ovndb_servers_post_notify_promoted_0 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"]
+"ovndb_servers_post_notify_promoted_0 ovn-dbs-bundle-2" -> "ovn-dbs-bundle-master_confirmed-post_notify_promoted_0" [ style = bold]
+"ovndb_servers_post_notify_promoted_0 ovn-dbs-bundle-2" [ style=bold color="green" fontcolor="black"]
+"ovndb_servers_post_notify_stopped_0 ovn-dbs-bundle-1" -> "ovn-dbs-bundle-master_confirmed-post_notify_stopped_0" [ style = bold]
+"ovndb_servers_post_notify_stopped_0 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"]
+"ovndb_servers_post_notify_stopped_0 ovn-dbs-bundle-2" -> "ovn-dbs-bundle-master_confirmed-post_notify_stopped_0" [ style = bold]
+"ovndb_servers_post_notify_stopped_0 ovn-dbs-bundle-2" [ style=bold color="green" fontcolor="black"]
+"ovndb_servers_pre_notify_promote_0 ovn-dbs-bundle-1" -> "ovn-dbs-bundle-master_confirmed-pre_notify_promote_0" [ style = bold]
+"ovndb_servers_pre_notify_promote_0 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"]
+"ovndb_servers_pre_notify_promote_0 ovn-dbs-bundle-2" -> "ovn-dbs-bundle-master_confirmed-pre_notify_promote_0" [ style = bold]
+"ovndb_servers_pre_notify_promote_0 ovn-dbs-bundle-2" [ style=bold color="green" fontcolor="black"]
+"ovndb_servers_pre_notify_stop_0 ovn-dbs-bundle-0" -> "ovn-dbs-bundle-master_confirmed-pre_notify_stop_0" [ style = bold]
+"ovndb_servers_pre_notify_stop_0 ovn-dbs-bundle-0" [ style=bold color="green" fontcolor="black"]
+"ovndb_servers_pre_notify_stop_0 ovn-dbs-bundle-1" -> "ovn-dbs-bundle-master_confirmed-pre_notify_stop_0" [ style = bold]
+"ovndb_servers_pre_notify_stop_0 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"]
+"ovndb_servers_pre_notify_stop_0 ovn-dbs-bundle-2" -> "ovn-dbs-bundle-master_confirmed-pre_notify_stop_0" [ style = bold]
+"ovndb_servers_pre_notify_stop_0 ovn-dbs-bundle-2" [ style=bold color="green" fontcolor="black"]
+"ovndb_servers_promote_0 ovn-dbs-bundle-1" -> "ovn-dbs-bundle-master_promoted_0" [ style = bold]
+"ovndb_servers_promote_0 ovn-dbs-bundle-1" -> "ovndb_servers_monitor_10000 ovn-dbs-bundle-1" [ style = bold]
+"ovndb_servers_promote_0 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"]
+"ovndb_servers_start_0 ovn-dbs-bundle-0" -> "ovn-dbs-bundle-master_running_0" [ style = dashed]
+"ovndb_servers_start_0 ovn-dbs-bundle-0" -> "ovndb_servers_monitor_30000 ovn-dbs-bundle-0" [ style = dashed]
+"ovndb_servers_start_0 ovn-dbs-bundle-0" [ style=dashed color="red" fontcolor="black"]
+"ovndb_servers_stop_0 ovn-dbs-bundle-0" -> "ovn-dbs-bundle-0_stop_0 controller-0" [ style = bold]
+"ovndb_servers_stop_0 ovn-dbs-bundle-0" -> "ovn-dbs-bundle-master_stopped_0" [ style = bold]
+"ovndb_servers_stop_0 ovn-dbs-bundle-0" -> "ovndb_servers_start_0 ovn-dbs-bundle-0" [ style = dashed]
+"ovndb_servers_stop_0 ovn-dbs-bundle-0" [ style=bold color="green" fontcolor="black"]
+"stonith-fence_ipmilan-5254005e097a_monitor_60000 controller-0" [ style=bold color="green" fontcolor="black"]
+"stonith-fence_ipmilan-5254005e097a_start_0 controller-0" -> "stonith-fence_ipmilan-5254005e097a_monitor_60000 controller-0" [ style = bold]
+"stonith-fence_ipmilan-5254005e097a_start_0 controller-0" [ style=bold color="green" fontcolor="black"]
+"stonith-fence_ipmilan-5254005e097a_stop_0 controller-0" -> "stonith-fence_ipmilan-5254005e097a_start_0 controller-0" [ style = bold]
+"stonith-fence_ipmilan-5254005e097a_stop_0 controller-0" [ style=bold color="green" fontcolor="black"]
+"stonith-fence_ipmilan-525400985679_monitor_60000 controller-1" [ style=bold color="green" fontcolor="black"]
+"stonith-fence_ipmilan-525400985679_start_0 controller-1" -> "stonith-fence_ipmilan-525400985679_monitor_60000 controller-1" [ style = bold]
+"stonith-fence_ipmilan-525400985679_start_0 controller-1" [ style=bold color="green" fontcolor="black"]
+"stonith-fence_ipmilan-525400985679_stop_0 controller-1" -> "stonith-fence_ipmilan-525400985679_start_0 controller-1" [ style = bold]
+"stonith-fence_ipmilan-525400985679_stop_0 controller-1" [ style=bold color="green" fontcolor="black"]
+"stonith-fence_ipmilan-525400afe30e_monitor_60000 controller-2" [ style=bold color="green" fontcolor="black"]
+"stonith-fence_ipmilan-525400afe30e_start_0 controller-2" -> "stonith-fence_ipmilan-525400afe30e_monitor_60000 controller-2" [ style = bold]
+"stonith-fence_ipmilan-525400afe30e_start_0 controller-2" [ style=bold color="green" fontcolor="black"]
+"stonith-fence_ipmilan-525400afe30e_stop_0 controller-2" -> "stonith-fence_ipmilan-525400afe30e_start_0 controller-2" [ style = bold]
+"stonith-fence_ipmilan-525400afe30e_stop_0 controller-2" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/no-promote-on-unrunnable-guest.exp b/cts/scheduler/no-promote-on-unrunnable-guest.exp
new file mode 100644
index 0000000000..4417f6e148
--- /dev/null
+++ b/cts/scheduler/no-promote-on-unrunnable-guest.exp
@@ -0,0 +1,647 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <rsc_op id="261" operation="notify" operation_key="ovndb_servers_pre_notify_stop_0" internal_operation_key="ovndb_servers:0_pre_notify_stop_0" on_node="ovn-dbs-bundle-0" on_node_uuid="ovn-dbs-bundle-0" router_node="controller-0">
+ <primitive id="ovndb_servers" long-id="ovndb_servers:0" class="ocf" provider="ovn" type="ovndb-servers"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="notify" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="controller-0 controller-1 controller-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource=" " CRM_meta_notify_key_operation="stop" CRM_meta_notify_key_type="pre" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="stop" CRM_meta_notify_promote_resource="ovndb_servers:1" CRM_meta_notify_promote_uname="ovn-dbs-bundle-1" CRM_meta_notify_slave_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="ovndb_servers:0" CRM_meta_notify_stop_uname="ovn-dbs-bundle-0" CRM_meta_notify_type="pre" CRM_meta_on_node="ovn-dbs-bundle-0" CRM_meta_on_node_uuid="ovn-dbs-bundle-0" CRM_meta_physical_host="controller-0" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" inactive_probe_interval="180000" listen_on_master_ip_only="no" manage_northd="yes" master_ip="172.17.1.247" nb_master_port="6641" sb_master_port="6642"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="203" operation="notify" operation_key="ovn-dbs-bundle-master_pre_notify_stop_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="1">
+ <action_set>
+ <rsc_op id="187" operation="stop" operation_key="ovndb_servers_stop_0" internal_operation_key="ovndb_servers:0_stop_0" on_node="ovn-dbs-bundle-0" on_node_uuid="ovn-dbs-bundle-0" router_node="controller-0">
+ <primitive id="ovndb_servers" long-id="ovndb_servers:0" class="ocf" provider="ovn" type="ovndb-servers"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="stop" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="controller-0 controller-1 controller-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource=" " CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_promote_resource="ovndb_servers:1" CRM_meta_notify_promote_uname="ovn-dbs-bundle-1" CRM_meta_notify_slave_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="ovndb_servers:0" CRM_meta_notify_stop_uname="ovn-dbs-bundle-0" CRM_meta_on_node="ovn-dbs-bundle-0" CRM_meta_on_node_uuid="ovn-dbs-bundle-0" CRM_meta_physical_host="controller-0" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="200000" inactive_probe_interval="180000" listen_on_master_ip_only="no" manage_northd="yes" master_ip="172.17.1.247" nb_master_port="6641" sb_master_port="6642"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="184" operation="stop" operation_key="ovn-dbs-bundle_stop_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="201" operation="stop" operation_key="ovn-dbs-bundle-master_stop_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="2" priority="1000000">
+ <action_set>
+ <rsc_op id="267" operation="notify" operation_key="ovndb_servers_post_notify_promote_0" internal_operation_key="ovndb_servers:1_post_notify_promote_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-1">
+ <primitive id="ovndb_servers" long-id="ovndb_servers:1" class="ocf" provider="ovn" type="ovndb-servers"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="notify" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="controller-0 controller-1 controller-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource=" " CRM_meta_notify_key_operation="promoted" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="promote" CRM_meta_notify_promote_resource="ovndb_servers:1" CRM_meta_notify_promote_uname="ovn-dbs-bundle-1" CRM_meta_notify_slave_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="ovndb_servers:0" CRM_meta_notify_stop_uname="ovn-dbs-bundle-0" CRM_meta_notify_type="post" CRM_meta_on_node="ovn-dbs-bundle-1" CRM_meta_on_node_uuid="ovn-dbs-bundle-1" CRM_meta_physical_host="controller-1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" inactive_probe_interval="180000" listen_on_master_ip_only="no" manage_northd="yes" master_ip="172.17.1.247" nb_master_port="6641" sb_master_port="6642"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="211" operation="notify" operation_key="ovn-dbs-bundle-master_post_notify_promoted_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="3">
+ <action_set>
+ <rsc_op id="266" operation="notify" operation_key="ovndb_servers_pre_notify_promote_0" internal_operation_key="ovndb_servers:1_pre_notify_promote_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-1">
+ <primitive id="ovndb_servers" long-id="ovndb_servers:1" class="ocf" provider="ovn" type="ovndb-servers"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="notify" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="controller-0 controller-1 controller-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource=" " CRM_meta_notify_key_operation="promote" CRM_meta_notify_key_type="pre" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="promote" CRM_meta_notify_promote_resource="ovndb_servers:1" CRM_meta_notify_promote_uname="ovn-dbs-bundle-1" CRM_meta_notify_slave_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="ovndb_servers:0" CRM_meta_notify_stop_uname="ovn-dbs-bundle-0" CRM_meta_notify_type="pre" CRM_meta_on_node="ovn-dbs-bundle-1" CRM_meta_on_node_uuid="ovn-dbs-bundle-1" CRM_meta_physical_host="controller-1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" inactive_probe_interval="180000" listen_on_master_ip_only="no" manage_northd="yes" master_ip="172.17.1.247" nb_master_port="6641" sb_master_port="6642"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="209" operation="notify" operation_key="ovn-dbs-bundle-master_pre_notify_promote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="4" priority="1000000">
+ <action_set>
+ <rsc_op id="263" operation="notify" operation_key="ovndb_servers_post_notify_stop_0" internal_operation_key="ovndb_servers:1_post_notify_stop_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-1">
+ <primitive id="ovndb_servers" long-id="ovndb_servers:1" class="ocf" provider="ovn" type="ovndb-servers"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="notify" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="controller-0 controller-1 controller-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource=" " CRM_meta_notify_key_operation="stopped" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="stop" CRM_meta_notify_promote_resource="ovndb_servers:1" CRM_meta_notify_promote_uname="ovn-dbs-bundle-1" CRM_meta_notify_slave_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="ovndb_servers:0" CRM_meta_notify_stop_uname="ovn-dbs-bundle-0" CRM_meta_notify_type="post" CRM_meta_on_node="ovn-dbs-bundle-1" CRM_meta_on_node_uuid="ovn-dbs-bundle-1" CRM_meta_physical_host="controller-1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" inactive_probe_interval="180000" listen_on_master_ip_only="no" manage_northd="yes" master_ip="172.17.1.247" nb_master_port="6641" sb_master_port="6642"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="205" operation="notify" operation_key="ovn-dbs-bundle-master_post_notify_stopped_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="5">
+ <action_set>
+ <rsc_op id="262" operation="notify" operation_key="ovndb_servers_pre_notify_stop_0" internal_operation_key="ovndb_servers:1_pre_notify_stop_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-1">
+ <primitive id="ovndb_servers" long-id="ovndb_servers:1" class="ocf" provider="ovn" type="ovndb-servers"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="notify" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="controller-0 controller-1 controller-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource=" " CRM_meta_notify_key_operation="stop" CRM_meta_notify_key_type="pre" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="stop" CRM_meta_notify_promote_resource="ovndb_servers:1" CRM_meta_notify_promote_uname="ovn-dbs-bundle-1" CRM_meta_notify_slave_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="ovndb_servers:0" CRM_meta_notify_stop_uname="ovn-dbs-bundle-0" CRM_meta_notify_type="pre" CRM_meta_on_node="ovn-dbs-bundle-1" CRM_meta_on_node_uuid="ovn-dbs-bundle-1" CRM_meta_physical_host="controller-1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" inactive_probe_interval="180000" listen_on_master_ip_only="no" manage_northd="yes" master_ip="172.17.1.247" nb_master_port="6641" sb_master_port="6642"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="203" operation="notify" operation_key="ovn-dbs-bundle-master_pre_notify_stop_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="6">
+ <action_set>
+ <rsc_op id="192" operation="monitor" operation_key="ovndb_servers_monitor_10000" internal_operation_key="ovndb_servers:1_monitor_10000" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-1">
+ <primitive id="ovndb_servers" long-id="ovndb_servers:1" class="ocf" provider="ovn" type="ovndb-servers"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_interval="10000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="true" CRM_meta_on_node="ovn-dbs-bundle-1" CRM_meta_on_node_uuid="ovn-dbs-bundle-1" CRM_meta_op_target_rc="8" CRM_meta_physical_host="controller-1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Master" CRM_meta_timeout="60000" inactive_probe_interval="180000" listen_on_master_ip_only="no" manage_northd="yes" master_ip="172.17.1.247" nb_master_port="6641" sb_master_port="6642"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="191" operation="promote" operation_key="ovndb_servers_promote_0" internal_operation_key="ovndb_servers:1_promote_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="206" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-post_notify_stopped_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="212" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-post_notify_promoted_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="7">
+ <action_set>
+ <rsc_op id="191" operation="promote" operation_key="ovndb_servers_promote_0" internal_operation_key="ovndb_servers:1_promote_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-1">
+ <primitive id="ovndb_servers" long-id="ovndb_servers:1" class="ocf" provider="ovn" type="ovndb-servers"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="promote" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="controller-0 controller-1 controller-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource=" " CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_promote_resource="ovndb_servers:1" CRM_meta_notify_promote_uname="ovn-dbs-bundle-1" CRM_meta_notify_slave_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="ovndb_servers:0" CRM_meta_notify_stop_uname="ovn-dbs-bundle-0" CRM_meta_on_node="ovn-dbs-bundle-1" CRM_meta_on_node_uuid="ovn-dbs-bundle-1" CRM_meta_physical_host="controller-1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="50000" inactive_probe_interval="180000" listen_on_master_ip_only="no" manage_northd="yes" master_ip="172.17.1.247" nb_master_port="6641" sb_master_port="6642"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="40" operation="cancel" operation_key="ovndb_servers_monitor_30000" internal_operation_key="ovndb_servers:1_monitor_30000" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="207" operation="promote" operation_key="ovn-dbs-bundle-master_promote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="8">
+ <action_set>
+ <rsc_op id="40" operation="cancel" operation_key="ovndb_servers_monitor_30000" internal_operation_key="ovndb_servers:1_monitor_30000" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-1">
+ <primitive id="ovndb_servers" long-id="ovndb_servers:1" class="ocf" provider="ovn" type="ovndb-servers"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="30000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="true" CRM_meta_on_node="ovn-dbs-bundle-1" CRM_meta_on_node_uuid="ovn-dbs-bundle-1" CRM_meta_operation="monitor" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Slave" CRM_meta_timeout="60000" inactive_probe_interval="180000" listen_on_master_ip_only="no" manage_northd="yes" master_ip="172.17.1.247" nb_master_port="6641" sb_master_port="6642"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="9" priority="1000000">
+ <action_set>
+ <rsc_op id="269" operation="notify" operation_key="ovndb_servers_post_notify_promote_0" internal_operation_key="ovndb_servers:2_post_notify_promote_0" on_node="ovn-dbs-bundle-2" on_node_uuid="ovn-dbs-bundle-2" router_node="controller-2">
+ <primitive id="ovndb_servers" long-id="ovndb_servers:2" class="ocf" provider="ovn" type="ovndb-servers"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="notify" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="controller-0 controller-1 controller-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource=" " CRM_meta_notify_key_operation="promoted" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="promote" CRM_meta_notify_promote_resource="ovndb_servers:1" CRM_meta_notify_promote_uname="ovn-dbs-bundle-1" CRM_meta_notify_slave_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="ovndb_servers:0" CRM_meta_notify_stop_uname="ovn-dbs-bundle-0" CRM_meta_notify_type="post" CRM_meta_on_node="ovn-dbs-bundle-2" CRM_meta_on_node_uuid="ovn-dbs-bundle-2" CRM_meta_physical_host="controller-2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" inactive_probe_interval="180000" listen_on_master_ip_only="no" manage_northd="yes" master_ip="172.17.1.247" nb_master_port="6641" sb_master_port="6642"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="211" operation="notify" operation_key="ovn-dbs-bundle-master_post_notify_promoted_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="10">
+ <action_set>
+ <rsc_op id="268" operation="notify" operation_key="ovndb_servers_pre_notify_promote_0" internal_operation_key="ovndb_servers:2_pre_notify_promote_0" on_node="ovn-dbs-bundle-2" on_node_uuid="ovn-dbs-bundle-2" router_node="controller-2">
+ <primitive id="ovndb_servers" long-id="ovndb_servers:2" class="ocf" provider="ovn" type="ovndb-servers"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="notify" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="controller-0 controller-1 controller-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource=" " CRM_meta_notify_key_operation="promote" CRM_meta_notify_key_type="pre" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="promote" CRM_meta_notify_promote_resource="ovndb_servers:1" CRM_meta_notify_promote_uname="ovn-dbs-bundle-1" CRM_meta_notify_slave_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="ovndb_servers:0" CRM_meta_notify_stop_uname="ovn-dbs-bundle-0" CRM_meta_notify_type="pre" CRM_meta_on_node="ovn-dbs-bundle-2" CRM_meta_on_node_uuid="ovn-dbs-bundle-2" CRM_meta_physical_host="controller-2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" inactive_probe_interval="180000" listen_on_master_ip_only="no" manage_northd="yes" master_ip="172.17.1.247" nb_master_port="6641" sb_master_port="6642"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="209" operation="notify" operation_key="ovn-dbs-bundle-master_pre_notify_promote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="11" priority="1000000">
+ <action_set>
+ <rsc_op id="265" operation="notify" operation_key="ovndb_servers_post_notify_stop_0" internal_operation_key="ovndb_servers:2_post_notify_stop_0" on_node="ovn-dbs-bundle-2" on_node_uuid="ovn-dbs-bundle-2" router_node="controller-2">
+ <primitive id="ovndb_servers" long-id="ovndb_servers:2" class="ocf" provider="ovn" type="ovndb-servers"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="notify" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="controller-0 controller-1 controller-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource=" " CRM_meta_notify_key_operation="stopped" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="stop" CRM_meta_notify_promote_resource="ovndb_servers:1" CRM_meta_notify_promote_uname="ovn-dbs-bundle-1" CRM_meta_notify_slave_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="ovndb_servers:0" CRM_meta_notify_stop_uname="ovn-dbs-bundle-0" CRM_meta_notify_type="post" CRM_meta_on_node="ovn-dbs-bundle-2" CRM_meta_on_node_uuid="ovn-dbs-bundle-2" CRM_meta_physical_host="controller-2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" inactive_probe_interval="180000" listen_on_master_ip_only="no" manage_northd="yes" master_ip="172.17.1.247" nb_master_port="6641" sb_master_port="6642"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="205" operation="notify" operation_key="ovn-dbs-bundle-master_post_notify_stopped_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="12">
+ <action_set>
+ <rsc_op id="264" operation="notify" operation_key="ovndb_servers_pre_notify_stop_0" internal_operation_key="ovndb_servers:2_pre_notify_stop_0" on_node="ovn-dbs-bundle-2" on_node_uuid="ovn-dbs-bundle-2" router_node="controller-2">
+ <primitive id="ovndb_servers" long-id="ovndb_servers:2" class="ocf" provider="ovn" type="ovndb-servers"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="notify" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="controller-0 controller-1 controller-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource=" " CRM_meta_notify_key_operation="stop" CRM_meta_notify_key_type="pre" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="stop" CRM_meta_notify_promote_resource="ovndb_servers:1" CRM_meta_notify_promote_uname="ovn-dbs-bundle-1" CRM_meta_notify_slave_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="ovndb_servers:0" CRM_meta_notify_stop_uname="ovn-dbs-bundle-0" CRM_meta_notify_type="pre" CRM_meta_on_node="ovn-dbs-bundle-2" CRM_meta_on_node_uuid="ovn-dbs-bundle-2" CRM_meta_physical_host="controller-2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" inactive_probe_interval="180000" listen_on_master_ip_only="no" manage_northd="yes" master_ip="172.17.1.247" nb_master_port="6641" sb_master_port="6642"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="203" operation="notify" operation_key="ovn-dbs-bundle-master_pre_notify_stop_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="13" priority="1000000">
+ <action_set>
+ <pseudo_event id="212" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-post_notify_promoted_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="promoted" CRM_meta_notify_key_type="confirmed-post" CRM_meta_notify_operation="promote" CRM_meta_notify_type="post" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="120000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="211" operation="notify" operation_key="ovn-dbs-bundle-master_post_notify_promoted_0"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="267" operation="notify" operation_key="ovndb_servers_post_notify_promote_0" internal_operation_key="ovndb_servers:1_post_notify_promote_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="269" operation="notify" operation_key="ovndb_servers_post_notify_promote_0" internal_operation_key="ovndb_servers:2_post_notify_promote_0" on_node="ovn-dbs-bundle-2" on_node_uuid="ovn-dbs-bundle-2" router_node="controller-2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="14" priority="1000000">
+ <action_set>
+ <pseudo_event id="211" operation="notify" operation_key="ovn-dbs-bundle-master_post_notify_promoted_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="promoted" CRM_meta_notify_key_type="post" CRM_meta_notify_operation="promote" CRM_meta_notify_type="post" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="120000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="208" operation="promoted" operation_key="ovn-dbs-bundle-master_promoted_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="210" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-pre_notify_promote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="15">
+ <action_set>
+ <pseudo_event id="210" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-pre_notify_promote_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="promote" CRM_meta_notify_key_type="confirmed-pre" CRM_meta_notify_operation="promote" CRM_meta_notify_type="pre" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="120000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="209" operation="notify" operation_key="ovn-dbs-bundle-master_pre_notify_promote_0"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="266" operation="notify" operation_key="ovndb_servers_pre_notify_promote_0" internal_operation_key="ovndb_servers:1_pre_notify_promote_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="268" operation="notify" operation_key="ovndb_servers_pre_notify_promote_0" internal_operation_key="ovndb_servers:2_pre_notify_promote_0" on_node="ovn-dbs-bundle-2" on_node_uuid="ovn-dbs-bundle-2" router_node="controller-2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="16">
+ <action_set>
+ <pseudo_event id="209" operation="notify" operation_key="ovn-dbs-bundle-master_pre_notify_promote_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="promote" CRM_meta_notify_key_type="pre" CRM_meta_notify_operation="promote" CRM_meta_notify_type="pre" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="120000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="200" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-post_notify_running_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="206" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-post_notify_stopped_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="17" priority="1000000">
+ <action_set>
+ <pseudo_event id="208" operation="promoted" operation_key="ovn-dbs-bundle-master_promoted_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="120000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="191" operation="promote" operation_key="ovndb_servers_promote_0" internal_operation_key="ovndb_servers:1_promote_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="18">
+ <action_set>
+ <pseudo_event id="207" operation="promote" operation_key="ovn-dbs-bundle-master_promote_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="120000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="196" operation="running" operation_key="ovn-dbs-bundle-master_running_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="202" operation="stopped" operation_key="ovn-dbs-bundle-master_stopped_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="210" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-pre_notify_promote_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="219" operation="promote" operation_key="ovn-dbs-bundle_promote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="19" priority="1000000">
+ <action_set>
+ <pseudo_event id="206" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-post_notify_stopped_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="stopped" CRM_meta_notify_key_type="confirmed-post" CRM_meta_notify_operation="stop" CRM_meta_notify_type="post" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="120000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="205" operation="notify" operation_key="ovn-dbs-bundle-master_post_notify_stopped_0"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="263" operation="notify" operation_key="ovndb_servers_post_notify_stop_0" internal_operation_key="ovndb_servers:1_post_notify_stop_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="265" operation="notify" operation_key="ovndb_servers_post_notify_stop_0" internal_operation_key="ovndb_servers:2_post_notify_stop_0" on_node="ovn-dbs-bundle-2" on_node_uuid="ovn-dbs-bundle-2" router_node="controller-2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="20" priority="1000000">
+ <action_set>
+ <pseudo_event id="205" operation="notify" operation_key="ovn-dbs-bundle-master_post_notify_stopped_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="stopped" CRM_meta_notify_key_type="post" CRM_meta_notify_operation="stop" CRM_meta_notify_type="post" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="120000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="202" operation="stopped" operation_key="ovn-dbs-bundle-master_stopped_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="204" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-pre_notify_stop_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="21">
+ <action_set>
+ <pseudo_event id="204" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-pre_notify_stop_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="stop" CRM_meta_notify_key_type="confirmed-pre" CRM_meta_notify_operation="stop" CRM_meta_notify_type="pre" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="120000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="203" operation="notify" operation_key="ovn-dbs-bundle-master_pre_notify_stop_0"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="261" operation="notify" operation_key="ovndb_servers_pre_notify_stop_0" internal_operation_key="ovndb_servers:0_pre_notify_stop_0" on_node="ovn-dbs-bundle-0" on_node_uuid="ovn-dbs-bundle-0" router_node="controller-0"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="262" operation="notify" operation_key="ovndb_servers_pre_notify_stop_0" internal_operation_key="ovndb_servers:1_pre_notify_stop_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="264" operation="notify" operation_key="ovndb_servers_pre_notify_stop_0" internal_operation_key="ovndb_servers:2_pre_notify_stop_0" on_node="ovn-dbs-bundle-2" on_node_uuid="ovn-dbs-bundle-2" router_node="controller-2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="22">
+ <action_set>
+ <pseudo_event id="203" operation="notify" operation_key="ovn-dbs-bundle-master_pre_notify_stop_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="stop" CRM_meta_notify_key_type="pre" CRM_meta_notify_operation="stop" CRM_meta_notify_type="pre" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="120000" />
+ </pseudo_event>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="23" priority="1000000">
+ <action_set>
+ <pseudo_event id="202" operation="stopped" operation_key="ovn-dbs-bundle-master_stopped_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="120000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="187" operation="stop" operation_key="ovndb_servers_stop_0" internal_operation_key="ovndb_servers:0_stop_0" on_node="ovn-dbs-bundle-0" on_node_uuid="ovn-dbs-bundle-0" router_node="controller-0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="201" operation="stop" operation_key="ovn-dbs-bundle-master_stop_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="24">
+ <action_set>
+ <pseudo_event id="201" operation="stop" operation_key="ovn-dbs-bundle-master_stop_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="120000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="184" operation="stop" operation_key="ovn-dbs-bundle_stop_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="204" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-pre_notify_stop_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="25" priority="1000000">
+ <action_set>
+ <pseudo_event id="200" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-post_notify_running_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="running" CRM_meta_notify_key_type="confirmed-post" CRM_meta_notify_operation="start" CRM_meta_notify_type="post" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="120000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="199" operation="notify" operation_key="ovn-dbs-bundle-master_post_notify_running_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="26" priority="1000000">
+ <action_set>
+ <pseudo_event id="199" operation="notify" operation_key="ovn-dbs-bundle-master_post_notify_running_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="running" CRM_meta_notify_key_type="post" CRM_meta_notify_operation="start" CRM_meta_notify_type="post" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="120000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="196" operation="running" operation_key="ovn-dbs-bundle-master_running_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="198" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-pre_notify_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="27">
+ <action_set>
+ <pseudo_event id="198" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-pre_notify_start_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="start" CRM_meta_notify_key_type="confirmed-pre" CRM_meta_notify_operation="start" CRM_meta_notify_type="pre" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="120000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="197" operation="notify" operation_key="ovn-dbs-bundle-master_pre_notify_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="28">
+ <action_set>
+ <pseudo_event id="197" operation="notify" operation_key="ovn-dbs-bundle-master_pre_notify_start_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="start" CRM_meta_notify_key_type="pre" CRM_meta_notify_operation="start" CRM_meta_notify_type="pre" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="120000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="206" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-post_notify_stopped_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="29" priority="1000000">
+ <action_set>
+ <pseudo_event id="196" operation="running" operation_key="ovn-dbs-bundle-master_running_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="120000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="195" operation="start" operation_key="ovn-dbs-bundle-master_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="30">
+ <action_set>
+ <pseudo_event id="195" operation="start" operation_key="ovn-dbs-bundle-master_start_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="120000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="198" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-pre_notify_start_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="202" operation="stopped" operation_key="ovn-dbs-bundle-master_stopped_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="31">
+ <action_set>
+ <rsc_op id="171" operation="stop" operation_key="ovn-dbs-bundle-podman-0_stop_0" on_node="controller-0" on_node_uuid="1">
+ <primitive id="ovn-dbs-bundle-podman-0" class="ocf" provider="heartbeat" type="podman"/>
+ <attributes CRM_meta_on_node="controller-0" CRM_meta_on_node_uuid="1" CRM_meta_timeout="120000" allow_pull="true" force_kill="false" image="cluster.common.tag/rhosp16-openstack-ovn-northd:pcmklatest" monitor_cmd="/bin/true" mount_points="/var/log/pacemaker/bundles/ovn-dbs-bundle-0" reuse="false" run_cmd="/bin/bash /usr/local/bin/kolla_start" run_opts=" -e PCMK_stderr=1 --net=host -e PCMK_remote_port=3125 -v /var/lib/kolla/config_files/ovn_dbs.json:/var/lib/kolla/config_files/config.json:ro -v /lib/modules:/lib/modules:ro -v /var/lib/openvswitch/ovn:/run/openvswitch:rw -v /var/log/containers/openvswitch:/var/log/openvswitch:rw -v /var/lib/openvswitch/ovn:/etc/openvswitch:rw -v /usr/lib/ocf/resource.d/ovn:/usr/lib/ocf/resource.d/ovn:rw -v /etc/pacemaker/authkey:/etc/pacemaker/authkey -v /var/log/pacemaker/bundles/ovn-dbs-bundle-0:/var/log --log-driver=k8s-file --log-opt path=/var/log/containers/stdouts/ovn-dbs-bundle.log -e KOLLA_CONFIG_STRATEGY=COPY_ALWAYS "/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="172" operation="stop" operation_key="ovn-dbs-bundle-0_stop_0" on_node="controller-0" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="184" operation="stop" operation_key="ovn-dbs-bundle_stop_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="32">
+ <action_set>
+ <rsc_op id="172" operation="stop" operation_key="ovn-dbs-bundle-0_stop_0" on_node="controller-0" on_node_uuid="1">
+ <primitive id="ovn-dbs-bundle-0" class="ocf" provider="pacemaker" type="remote"/>
+ <attributes CRM_meta_container="ovn-dbs-bundle-podman-0" CRM_meta_on_node="controller-0" CRM_meta_on_node_uuid="1" CRM_meta_timeout="120000" addr="controller-0" port="3125"/>
+ <downed>
+ <node id="ovn-dbs-bundle-0"/>
+ </downed>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="187" operation="stop" operation_key="ovndb_servers_stop_0" internal_operation_key="ovndb_servers:0_stop_0" on_node="ovn-dbs-bundle-0" on_node_uuid="ovn-dbs-bundle-0" router_node="controller-0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="33">
+ <action_set>
+ <rsc_op id="223" operation="stop" operation_key="stonith-fence_ipmilan-5254005e097a_stop_0" on_node="controller-0" on_node_uuid="1">
+ <primitive id="stonith-fence_ipmilan-5254005e097a" class="stonith" type="fence_ipmilan"/>
+ <attributes CRM_meta_on_node="controller-0" CRM_meta_on_node_uuid="1" CRM_meta_timeout="120000" delay="20" ipaddr="172.16.0.21" ipport="6230" lanplus="true" login="admin" passwd="****" pcmk_host_list="controller-2"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="34">
+ <action_set>
+ <rsc_op id="42" operation="start" operation_key="stonith-fence_ipmilan-5254005e097a_start_0" on_node="controller-0" on_node_uuid="1">
+ <primitive id="stonith-fence_ipmilan-5254005e097a" class="stonith" type="fence_ipmilan"/>
+ <attributes CRM_meta_on_node="controller-0" CRM_meta_on_node_uuid="1" CRM_meta_timeout="120000" delay="20" ipaddr="172.16.0.21" ipport="6230" lanplus="true" login="admin" passwd="****" pcmk_host_list="controller-2"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="223" operation="stop" operation_key="stonith-fence_ipmilan-5254005e097a_stop_0" on_node="controller-0" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="35">
+ <action_set>
+ <rsc_op id="2" operation="monitor" operation_key="stonith-fence_ipmilan-5254005e097a_monitor_60000" on_node="controller-0" on_node_uuid="1">
+ <primitive id="stonith-fence_ipmilan-5254005e097a" class="stonith" type="fence_ipmilan"/>
+ <attributes CRM_meta_interval="60000" CRM_meta_name="monitor" CRM_meta_on_node="controller-0" CRM_meta_on_node_uuid="1" CRM_meta_timeout="120000" delay="20" ipaddr="172.16.0.21" ipport="6230" lanplus="true" login="admin" passwd="****" pcmk_host_list="controller-2"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="42" operation="start" operation_key="stonith-fence_ipmilan-5254005e097a_start_0" on_node="controller-0" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="36">
+ <action_set>
+ <rsc_op id="224" operation="stop" operation_key="stonith-fence_ipmilan-525400afe30e_stop_0" on_node="controller-2" on_node_uuid="3">
+ <primitive id="stonith-fence_ipmilan-525400afe30e" class="stonith" type="fence_ipmilan"/>
+ <attributes CRM_meta_on_node="controller-2" CRM_meta_on_node_uuid="3" CRM_meta_timeout="120000" delay="20" ipaddr="172.16.0.21" ipport="6231" lanplus="true" login="admin" passwd="****" pcmk_host_list="controller-1"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="37">
+ <action_set>
+ <rsc_op id="44" operation="start" operation_key="stonith-fence_ipmilan-525400afe30e_start_0" on_node="controller-2" on_node_uuid="3">
+ <primitive id="stonith-fence_ipmilan-525400afe30e" class="stonith" type="fence_ipmilan"/>
+ <attributes CRM_meta_on_node="controller-2" CRM_meta_on_node_uuid="3" CRM_meta_timeout="120000" delay="20" ipaddr="172.16.0.21" ipport="6231" lanplus="true" login="admin" passwd="****" pcmk_host_list="controller-1"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="224" operation="stop" operation_key="stonith-fence_ipmilan-525400afe30e_stop_0" on_node="controller-2" on_node_uuid="3"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="38">
+ <action_set>
+ <rsc_op id="21" operation="monitor" operation_key="stonith-fence_ipmilan-525400afe30e_monitor_60000" on_node="controller-2" on_node_uuid="3">
+ <primitive id="stonith-fence_ipmilan-525400afe30e" class="stonith" type="fence_ipmilan"/>
+ <attributes CRM_meta_interval="60000" CRM_meta_name="monitor" CRM_meta_on_node="controller-2" CRM_meta_on_node_uuid="3" CRM_meta_timeout="120000" delay="20" ipaddr="172.16.0.21" ipport="6231" lanplus="true" login="admin" passwd="****" pcmk_host_list="controller-1"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="44" operation="start" operation_key="stonith-fence_ipmilan-525400afe30e_start_0" on_node="controller-2" on_node_uuid="3"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="39">
+ <action_set>
+ <rsc_op id="225" operation="stop" operation_key="stonith-fence_ipmilan-525400985679_stop_0" on_node="controller-1" on_node_uuid="2">
+ <primitive id="stonith-fence_ipmilan-525400985679" class="stonith" type="fence_ipmilan"/>
+ <attributes CRM_meta_on_node="controller-1" CRM_meta_on_node_uuid="2" CRM_meta_timeout="120000" delay="20" ipaddr="172.16.0.21" ipport="6232" lanplus="true" login="admin" passwd="****" pcmk_host_list="controller-0"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="40">
+ <action_set>
+ <rsc_op id="43" operation="start" operation_key="stonith-fence_ipmilan-525400985679_start_0" on_node="controller-1" on_node_uuid="2">
+ <primitive id="stonith-fence_ipmilan-525400985679" class="stonith" type="fence_ipmilan"/>
+ <attributes CRM_meta_on_node="controller-1" CRM_meta_on_node_uuid="2" CRM_meta_timeout="120000" delay="20" ipaddr="172.16.0.21" ipport="6232" lanplus="true" login="admin" passwd="****" pcmk_host_list="controller-0"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="225" operation="stop" operation_key="stonith-fence_ipmilan-525400985679_stop_0" on_node="controller-1" on_node_uuid="2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="41">
+ <action_set>
+ <rsc_op id="13" operation="monitor" operation_key="stonith-fence_ipmilan-525400985679_monitor_60000" on_node="controller-1" on_node_uuid="2">
+ <primitive id="stonith-fence_ipmilan-525400985679" class="stonith" type="fence_ipmilan"/>
+ <attributes CRM_meta_interval="60000" CRM_meta_name="monitor" CRM_meta_on_node="controller-1" CRM_meta_on_node_uuid="2" CRM_meta_timeout="120000" delay="20" ipaddr="172.16.0.21" ipport="6232" lanplus="true" login="admin" passwd="****" pcmk_host_list="controller-0"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="43" operation="start" operation_key="stonith-fence_ipmilan-525400985679_start_0" on_node="controller-1" on_node_uuid="2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="42" priority="1000000">
+ <action_set>
+ <pseudo_event id="220" operation="promoted" operation_key="ovn-dbs-bundle_promoted_0">
+ <attributes CRM_meta_timeout="120000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="212" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-post_notify_promoted_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="43">
+ <action_set>
+ <pseudo_event id="219" operation="promote" operation_key="ovn-dbs-bundle_promote_0">
+ <attributes CRM_meta_timeout="120000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="183" operation="running" operation_key="ovn-dbs-bundle_running_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="185" operation="stopped" operation_key="ovn-dbs-bundle_stopped_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="44" priority="1000000">
+ <action_set>
+ <pseudo_event id="185" operation="stopped" operation_key="ovn-dbs-bundle_stopped_0">
+ <attributes CRM_meta_timeout="120000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="171" operation="stop" operation_key="ovn-dbs-bundle-podman-0_stop_0" on_node="controller-0" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="206" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-post_notify_stopped_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="45">
+ <action_set>
+ <pseudo_event id="184" operation="stop" operation_key="ovn-dbs-bundle_stop_0">
+ <attributes CRM_meta_timeout="120000" />
+ </pseudo_event>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="46" priority="1000000">
+ <action_set>
+ <pseudo_event id="183" operation="running" operation_key="ovn-dbs-bundle_running_0">
+ <attributes CRM_meta_timeout="120000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="200" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-post_notify_running_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/no-promote-on-unrunnable-guest.scores b/cts/scheduler/no-promote-on-unrunnable-guest.scores
new file mode 100644
index 0000000000..f368ad4418
--- /dev/null
+++ b/cts/scheduler/no-promote-on-unrunnable-guest.scores
@@ -0,0 +1,269 @@
+Allocation scores:
+Using the original execution date of: 2020-05-14 10:49:31Z
+galera:0 promotion score on galera-bundle-0: 100
+galera:1 promotion score on galera-bundle-1: 100
+galera:2 promotion score on galera-bundle-2: 100
+ovndb_servers:0 promotion score on ovn-dbs-bundle-0: 5
+ovndb_servers:1 promotion score on ovn-dbs-bundle-1: 5
+ovndb_servers:2 promotion score on ovn-dbs-bundle-2: 5
+pcmk__bundle_allocate: galera-bundle allocation score on controller-0: 0
+pcmk__bundle_allocate: galera-bundle allocation score on controller-1: 0
+pcmk__bundle_allocate: galera-bundle allocation score on controller-2: 0
+pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-0: INFINITY
+pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-1: 0
+pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-2: 0
+pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-0: 0
+pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-1: INFINITY
+pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-2: 0
+pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-0: 0
+pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-1: 0
+pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-2: INFINITY
+pcmk__bundle_allocate: galera-bundle-master allocation score on controller-0: 0
+pcmk__bundle_allocate: galera-bundle-master allocation score on controller-1: 0
+pcmk__bundle_allocate: galera-bundle-master allocation score on controller-2: 0
+pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY
+pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-1: -INFINITY
+pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-2: -INFINITY
+pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on controller-0: INFINITY
+pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on controller-1: 0
+pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on controller-2: 0
+pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on controller-0: 0
+pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on controller-1: INFINITY
+pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on controller-2: 0
+pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on controller-0: 0
+pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on controller-1: 0
+pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on controller-2: INFINITY
+pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: INFINITY
+pcmk__bundle_allocate: galera:1 allocation score on galera-bundle-1: INFINITY
+pcmk__bundle_allocate: galera:2 allocation score on galera-bundle-2: INFINITY
+pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-0: 0
+pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-1: 0
+pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-2: 0
+pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on controller-0: INFINITY
+pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on controller-1: 0
+pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on controller-2: 0
+pcmk__bundle_allocate: ovn-dbs-bundle allocation score on controller-0: -INFINITY
+pcmk__bundle_allocate: ovn-dbs-bundle allocation score on controller-1: 0
+pcmk__bundle_allocate: ovn-dbs-bundle allocation score on controller-2: 0
+pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on controller-0: INFINITY
+pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on controller-1: 0
+pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on controller-2: 0
+pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on controller-0: 0
+pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on controller-1: INFINITY
+pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on controller-2: 0
+pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on controller-0: 0
+pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on controller-1: 0
+pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on controller-2: INFINITY
+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on controller-0: 0
+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on controller-1: 0
+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on controller-2: 0
+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-0: -INFINITY
+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-1: -INFINITY
+pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-2: -INFINITY
+pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-0: -INFINITY
+pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-1: 0
+pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-2: 0
+pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-0: -INFINITY
+pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-1: INFINITY
+pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-2: 0
+pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-0: -INFINITY
+pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-1: 0
+pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-2: INFINITY
+pcmk__bundle_allocate: ovndb_servers:0 allocation score on ovn-dbs-bundle-0: INFINITY
+pcmk__bundle_allocate: ovndb_servers:1 allocation score on ovn-dbs-bundle-1: INFINITY
+pcmk__bundle_allocate: ovndb_servers:2 allocation score on ovn-dbs-bundle-2: INFINITY
+pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-0: 0
+pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-1: 0
+pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-2: 0
+pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-0: INFINITY
+pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-1: 0
+pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-2: 0
+pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-0: 0
+pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-1: INFINITY
+pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-2: 0
+pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-0: 0
+pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-1: 0
+pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-2: INFINITY
+pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-0: 0
+pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-1: 0
+pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-2: 0
+pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY
+pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY
+pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on controller-0: INFINITY
+pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on controller-1: 0
+pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on controller-2: 0
+pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on controller-0: 0
+pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on controller-1: INFINITY
+pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on controller-2: 0
+pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on controller-0: 0
+pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on controller-1: 0
+pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on controller-2: INFINITY
+pcmk__bundle_allocate: rabbitmq:0 allocation score on rabbitmq-bundle-0: INFINITY
+pcmk__bundle_allocate: rabbitmq:1 allocation score on rabbitmq-bundle-1: INFINITY
+pcmk__bundle_allocate: rabbitmq:2 allocation score on rabbitmq-bundle-2: INFINITY
+pcmk__bundle_allocate: redis-bundle allocation score on controller-0: 0
+pcmk__bundle_allocate: redis-bundle allocation score on controller-1: 0
+pcmk__bundle_allocate: redis-bundle allocation score on controller-2: 0
+pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-0: INFINITY
+pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-1: 0
+pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-2: 0
+pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-0: 0
+pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-1: INFINITY
+pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-2: 0
+pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-0: 0
+pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-1: 0
+pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-2: INFINITY
+pcmk__bundle_allocate: redis-bundle-master allocation score on controller-0: 0
+pcmk__bundle_allocate: redis-bundle-master allocation score on controller-1: 0
+pcmk__bundle_allocate: redis-bundle-master allocation score on controller-2: 0
+pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-0: -INFINITY
+pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-1: -INFINITY
+pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-2: -INFINITY
+pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on controller-0: INFINITY
+pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on controller-1: 0
+pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on controller-2: 0
+pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on controller-0: 0
+pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on controller-1: INFINITY
+pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on controller-2: 0
+pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on controller-0: 0
+pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on controller-1: 0
+pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on controller-2: INFINITY
+pcmk__bundle_allocate: redis:0 allocation score on redis-bundle-0: INFINITY
+pcmk__bundle_allocate: redis:1 allocation score on redis-bundle-1: INFINITY
+pcmk__bundle_allocate: redis:2 allocation score on redis-bundle-2: INFINITY
+pcmk__clone_allocate: galera-bundle-master allocation score on controller-0: -INFINITY
+pcmk__clone_allocate: galera-bundle-master allocation score on controller-1: -INFINITY
+pcmk__clone_allocate: galera-bundle-master allocation score on controller-2: -INFINITY
+pcmk__clone_allocate: galera-bundle-master allocation score on galera-bundle-0: 0
+pcmk__clone_allocate: galera-bundle-master allocation score on galera-bundle-1: 0
+pcmk__clone_allocate: galera-bundle-master allocation score on galera-bundle-2: 0
+pcmk__clone_allocate: galera:0 allocation score on galera-bundle-0: INFINITY
+pcmk__clone_allocate: galera:1 allocation score on galera-bundle-1: INFINITY
+pcmk__clone_allocate: galera:2 allocation score on galera-bundle-2: INFINITY
+pcmk__clone_allocate: ovn-dbs-bundle-master allocation score on controller-0: -INFINITY
+pcmk__clone_allocate: ovn-dbs-bundle-master allocation score on controller-1: -INFINITY
+pcmk__clone_allocate: ovn-dbs-bundle-master allocation score on controller-2: -INFINITY
+pcmk__clone_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-0: 0
+pcmk__clone_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-1: 0
+pcmk__clone_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-2: 0
+pcmk__clone_allocate: ovndb_servers:0 allocation score on ovn-dbs-bundle-0: INFINITY
+pcmk__clone_allocate: ovndb_servers:1 allocation score on ovn-dbs-bundle-1: INFINITY
+pcmk__clone_allocate: ovndb_servers:2 allocation score on ovn-dbs-bundle-2: INFINITY
+pcmk__clone_allocate: rabbitmq-bundle-clone allocation score on controller-0: -INFINITY
+pcmk__clone_allocate: rabbitmq-bundle-clone allocation score on controller-1: -INFINITY
+pcmk__clone_allocate: rabbitmq-bundle-clone allocation score on controller-2: -INFINITY
+pcmk__clone_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: 0
+pcmk__clone_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: 0
+pcmk__clone_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: 0
+pcmk__clone_allocate: rabbitmq:0 allocation score on rabbitmq-bundle-0: INFINITY
+pcmk__clone_allocate: rabbitmq:1 allocation score on rabbitmq-bundle-1: INFINITY
+pcmk__clone_allocate: rabbitmq:2 allocation score on rabbitmq-bundle-2: INFINITY
+pcmk__clone_allocate: redis-bundle-master allocation score on controller-0: -INFINITY
+pcmk__clone_allocate: redis-bundle-master allocation score on controller-1: -INFINITY
+pcmk__clone_allocate: redis-bundle-master allocation score on controller-2: -INFINITY
+pcmk__clone_allocate: redis-bundle-master allocation score on redis-bundle-0: 0
+pcmk__clone_allocate: redis-bundle-master allocation score on redis-bundle-1: 0
+pcmk__clone_allocate: redis-bundle-master allocation score on redis-bundle-2: 0
+pcmk__clone_allocate: redis:0 allocation score on redis-bundle-0: INFINITY
+pcmk__clone_allocate: redis:1 allocation score on redis-bundle-1: INFINITY
+pcmk__clone_allocate: redis:2 allocation score on redis-bundle-2: INFINITY
+pcmk__native_allocate: galera-bundle-0 allocation score on controller-0: INFINITY
+pcmk__native_allocate: galera-bundle-0 allocation score on controller-1: 0
+pcmk__native_allocate: galera-bundle-0 allocation score on controller-2: 0
+pcmk__native_allocate: galera-bundle-1 allocation score on controller-0: 0
+pcmk__native_allocate: galera-bundle-1 allocation score on controller-1: INFINITY
+pcmk__native_allocate: galera-bundle-1 allocation score on controller-2: 0
+pcmk__native_allocate: galera-bundle-2 allocation score on controller-0: 0
+pcmk__native_allocate: galera-bundle-2 allocation score on controller-1: 0
+pcmk__native_allocate: galera-bundle-2 allocation score on controller-2: INFINITY
+pcmk__native_allocate: galera-bundle-podman-0 allocation score on controller-0: INFINITY
+pcmk__native_allocate: galera-bundle-podman-0 allocation score on controller-1: 0
+pcmk__native_allocate: galera-bundle-podman-0 allocation score on controller-2: 0
+pcmk__native_allocate: galera-bundle-podman-1 allocation score on controller-0: -INFINITY
+pcmk__native_allocate: galera-bundle-podman-1 allocation score on controller-1: INFINITY
+pcmk__native_allocate: galera-bundle-podman-1 allocation score on controller-2: 0
+pcmk__native_allocate: galera-bundle-podman-2 allocation score on controller-0: -INFINITY
+pcmk__native_allocate: galera-bundle-podman-2 allocation score on controller-1: -INFINITY
+pcmk__native_allocate: galera-bundle-podman-2 allocation score on controller-2: INFINITY
+pcmk__native_allocate: galera:0 allocation score on galera-bundle-0: INFINITY
+pcmk__native_allocate: galera:1 allocation score on galera-bundle-1: INFINITY
+pcmk__native_allocate: galera:2 allocation score on galera-bundle-2: INFINITY
+pcmk__native_allocate: openstack-cinder-volume-podman-0 allocation score on controller-0: INFINITY
+pcmk__native_allocate: openstack-cinder-volume-podman-0 allocation score on controller-1: 0
+pcmk__native_allocate: openstack-cinder-volume-podman-0 allocation score on controller-2: 0
+pcmk__native_allocate: ovn-dbs-bundle-0 allocation score on controller-0: INFINITY
+pcmk__native_allocate: ovn-dbs-bundle-0 allocation score on controller-1: -10000
+pcmk__native_allocate: ovn-dbs-bundle-0 allocation score on controller-2: -10000
+pcmk__native_allocate: ovn-dbs-bundle-1 allocation score on controller-0: 0
+pcmk__native_allocate: ovn-dbs-bundle-1 allocation score on controller-1: INFINITY
+pcmk__native_allocate: ovn-dbs-bundle-1 allocation score on controller-2: 0
+pcmk__native_allocate: ovn-dbs-bundle-2 allocation score on controller-0: 0
+pcmk__native_allocate: ovn-dbs-bundle-2 allocation score on controller-1: 0
+pcmk__native_allocate: ovn-dbs-bundle-2 allocation score on controller-2: INFINITY
+pcmk__native_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-0: -INFINITY
+pcmk__native_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-1: -INFINITY
+pcmk__native_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-2: -INFINITY
+pcmk__native_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-0: -INFINITY
+pcmk__native_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-1: INFINITY
+pcmk__native_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-2: 0
+pcmk__native_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-0: -INFINITY
+pcmk__native_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-1: -INFINITY
+pcmk__native_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-2: INFINITY
+pcmk__native_allocate: ovndb_servers:0 allocation score on ovn-dbs-bundle-0: INFINITY
+pcmk__native_allocate: ovndb_servers:1 allocation score on ovn-dbs-bundle-1: INFINITY
+pcmk__native_allocate: ovndb_servers:2 allocation score on ovn-dbs-bundle-2: INFINITY
+pcmk__native_allocate: rabbitmq-bundle-0 allocation score on controller-0: INFINITY
+pcmk__native_allocate: rabbitmq-bundle-0 allocation score on controller-1: 0
+pcmk__native_allocate: rabbitmq-bundle-0 allocation score on controller-2: 0
+pcmk__native_allocate: rabbitmq-bundle-1 allocation score on controller-0: 0
+pcmk__native_allocate: rabbitmq-bundle-1 allocation score on controller-1: INFINITY
+pcmk__native_allocate: rabbitmq-bundle-1 allocation score on controller-2: 0
+pcmk__native_allocate: rabbitmq-bundle-2 allocation score on controller-0: 0
+pcmk__native_allocate: rabbitmq-bundle-2 allocation score on controller-1: 0
+pcmk__native_allocate: rabbitmq-bundle-2 allocation score on controller-2: INFINITY
+pcmk__native_allocate: rabbitmq-bundle-podman-0 allocation score on controller-0: INFINITY
+pcmk__native_allocate: rabbitmq-bundle-podman-0 allocation score on controller-1: 0
+pcmk__native_allocate: rabbitmq-bundle-podman-0 allocation score on controller-2: 0
+pcmk__native_allocate: rabbitmq-bundle-podman-1 allocation score on controller-0: -INFINITY
+pcmk__native_allocate: rabbitmq-bundle-podman-1 allocation score on controller-1: INFINITY
+pcmk__native_allocate: rabbitmq-bundle-podman-1 allocation score on controller-2: 0
+pcmk__native_allocate: rabbitmq-bundle-podman-2 allocation score on controller-0: -INFINITY
+pcmk__native_allocate: rabbitmq-bundle-podman-2 allocation score on controller-1: -INFINITY
+pcmk__native_allocate: rabbitmq-bundle-podman-2 allocation score on controller-2: INFINITY
+pcmk__native_allocate: rabbitmq:0 allocation score on rabbitmq-bundle-0: INFINITY
+pcmk__native_allocate: rabbitmq:1 allocation score on rabbitmq-bundle-1: INFINITY
+pcmk__native_allocate: rabbitmq:2 allocation score on rabbitmq-bundle-2: INFINITY
+pcmk__native_allocate: redis-bundle-0 allocation score on controller-0: INFINITY
+pcmk__native_allocate: redis-bundle-0 allocation score on controller-1: 0
+pcmk__native_allocate: redis-bundle-0 allocation score on controller-2: 0
+pcmk__native_allocate: redis-bundle-1 allocation score on controller-0: 0
+pcmk__native_allocate: redis-bundle-1 allocation score on controller-1: INFINITY
+pcmk__native_allocate: redis-bundle-1 allocation score on controller-2: 0
+pcmk__native_allocate: redis-bundle-2 allocation score on controller-0: 0
+pcmk__native_allocate: redis-bundle-2 allocation score on controller-1: 0
+pcmk__native_allocate: redis-bundle-2 allocation score on controller-2: INFINITY
+pcmk__native_allocate: redis-bundle-podman-0 allocation score on controller-0: INFINITY
+pcmk__native_allocate: redis-bundle-podman-0 allocation score on controller-1: 0
+pcmk__native_allocate: redis-bundle-podman-0 allocation score on controller-2: 0
+pcmk__native_allocate: redis-bundle-podman-1 allocation score on controller-0: -INFINITY
+pcmk__native_allocate: redis-bundle-podman-1 allocation score on controller-1: INFINITY
+pcmk__native_allocate: redis-bundle-podman-1 allocation score on controller-2: 0
+pcmk__native_allocate: redis-bundle-podman-2 allocation score on controller-0: -INFINITY
+pcmk__native_allocate: redis-bundle-podman-2 allocation score on controller-1: -INFINITY
+pcmk__native_allocate: redis-bundle-podman-2 allocation score on controller-2: INFINITY
+pcmk__native_allocate: redis:0 allocation score on redis-bundle-0: INFINITY
+pcmk__native_allocate: redis:1 allocation score on redis-bundle-1: INFINITY
+pcmk__native_allocate: redis:2 allocation score on redis-bundle-2: INFINITY
+pcmk__native_allocate: stonith-fence_ipmilan-5254005e097a allocation score on controller-0: INFINITY
+pcmk__native_allocate: stonith-fence_ipmilan-5254005e097a allocation score on controller-1: 0
+pcmk__native_allocate: stonith-fence_ipmilan-5254005e097a allocation score on controller-2: -10000
+pcmk__native_allocate: stonith-fence_ipmilan-525400985679 allocation score on controller-0: -10000
+pcmk__native_allocate: stonith-fence_ipmilan-525400985679 allocation score on controller-1: INFINITY
+pcmk__native_allocate: stonith-fence_ipmilan-525400985679 allocation score on controller-2: 0
+pcmk__native_allocate: stonith-fence_ipmilan-525400afe30e allocation score on controller-0: 0
+pcmk__native_allocate: stonith-fence_ipmilan-525400afe30e allocation score on controller-1: -10000
+pcmk__native_allocate: stonith-fence_ipmilan-525400afe30e allocation score on controller-2: INFINITY
+redis:0 promotion score on redis-bundle-0: 1
+redis:1 promotion score on redis-bundle-1: 1
+redis:2 promotion score on redis-bundle-2: 1
diff --git a/cts/scheduler/no-promote-on-unrunnable-guest.summary b/cts/scheduler/no-promote-on-unrunnable-guest.summary
new file mode 100644
index 0000000000..fd6b926ac7
--- /dev/null
+++ b/cts/scheduler/no-promote-on-unrunnable-guest.summary
@@ -0,0 +1,113 @@
+Using the original execution date of: 2020-05-14 10:49:31Z
+
+Current cluster status:
+Online: [ controller-0 controller-1 controller-2 ]
+GuestOnline: [ galera-bundle-0:galera-bundle-podman-0 galera-bundle-1:galera-bundle-podman-1 galera-bundle-2:galera-bundle-podman-2 ovn-dbs-bundle-0:ovn-dbs-bundle-podman-0 ovn-dbs-bundle-1:ovn-dbs-bundle-podman-1 ovn-dbs-bundle-2:ovn-dbs-bundle-podman-2 rabbitmq-bundle-0:rabbitmq-bundle-podman-0 rabbitmq-bundle-1:rabbitmq-bundle-podman-1 rabbitmq-bundle-2:rabbitmq-bundle-podman-2 redis-bundle-0:redis-bundle-podman-0 redis-bundle-1:redis-bundle-podman-1 redis-bundle-2:redis-bundle-podman-2 ]
+
+ Container bundle set: galera-bundle [cluster.common.tag/rhosp16-openstack-mariadb:pcmklatest]
+ galera-bundle-0 (ocf::heartbeat:galera): Master controller-0
+ galera-bundle-1 (ocf::heartbeat:galera): Master controller-1
+ galera-bundle-2 (ocf::heartbeat:galera): Master controller-2
+ Container bundle set: rabbitmq-bundle [cluster.common.tag/rhosp16-openstack-rabbitmq:pcmklatest]
+ rabbitmq-bundle-0 (ocf::heartbeat:rabbitmq-cluster): Started controller-0
+ rabbitmq-bundle-1 (ocf::heartbeat:rabbitmq-cluster): Started controller-1
+ rabbitmq-bundle-2 (ocf::heartbeat:rabbitmq-cluster): Started controller-2
+ Container bundle set: redis-bundle [cluster.common.tag/rhosp16-openstack-redis:pcmklatest]
+ redis-bundle-0 (ocf::heartbeat:redis): Master controller-0
+ redis-bundle-1 (ocf::heartbeat:redis): Slave controller-1
+ redis-bundle-2 (ocf::heartbeat:redis): Slave controller-2
+ Container bundle set: ovn-dbs-bundle [cluster.common.tag/rhosp16-openstack-ovn-northd:pcmklatest]
+ ovn-dbs-bundle-0 (ocf::ovn:ovndb-servers): Slave controller-0
+ ovn-dbs-bundle-1 (ocf::ovn:ovndb-servers): Slave controller-1
+ ovn-dbs-bundle-2 (ocf::ovn:ovndb-servers): Slave controller-2
+ stonith-fence_ipmilan-5254005e097a (stonith:fence_ipmilan): Started controller-0
+ stonith-fence_ipmilan-525400afe30e (stonith:fence_ipmilan): Started controller-2
+ stonith-fence_ipmilan-525400985679 (stonith:fence_ipmilan): Started controller-1
+ Container bundle: openstack-cinder-volume [cluster.common.tag/rhosp16-openstack-cinder-volume:pcmklatest]
+ openstack-cinder-volume-podman-0 (ocf::heartbeat:podman): Started controller-0
+
+Transition Summary:
+ * Stop ovn-dbs-bundle-podman-0 ( controller-0 ) due to node availability
+ * Stop ovn-dbs-bundle-0 ( controller-0 ) due to unrunnable ovn-dbs-bundle-podman-0 start
+ * Stop ovndb_servers:0 ( Slave ovn-dbs-bundle-0 ) due to unrunnable ovn-dbs-bundle-podman-0 start
+ * Promote ovndb_servers:1 ( Slave -> Master ovn-dbs-bundle-1 )
+ * Restart stonith-fence_ipmilan-5254005e097a ( controller-0 ) due to resource definition change
+ * Restart stonith-fence_ipmilan-525400afe30e ( controller-2 ) due to resource definition change
+ * Restart stonith-fence_ipmilan-525400985679 ( controller-1 ) due to resource definition change
+
+Executing cluster transition:
+ * Resource action: ovndb_servers cancel=30000 on ovn-dbs-bundle-1
+ * Pseudo action: ovn-dbs-bundle-master_pre_notify_stop_0
+ * Resource action: stonith-fence_ipmilan-5254005e097a stop on controller-0
+ * Resource action: stonith-fence_ipmilan-5254005e097a start on controller-0
+ * Resource action: stonith-fence_ipmilan-5254005e097a monitor=60000 on controller-0
+ * Resource action: stonith-fence_ipmilan-525400afe30e stop on controller-2
+ * Resource action: stonith-fence_ipmilan-525400afe30e start on controller-2
+ * Resource action: stonith-fence_ipmilan-525400afe30e monitor=60000 on controller-2
+ * Resource action: stonith-fence_ipmilan-525400985679 stop on controller-1
+ * Resource action: stonith-fence_ipmilan-525400985679 start on controller-1
+ * Resource action: stonith-fence_ipmilan-525400985679 monitor=60000 on controller-1
+ * Pseudo action: ovn-dbs-bundle_stop_0
+ * Resource action: ovndb_servers notify on ovn-dbs-bundle-0
+ * Resource action: ovndb_servers notify on ovn-dbs-bundle-1
+ * Resource action: ovndb_servers notify on ovn-dbs-bundle-2
+ * Pseudo action: ovn-dbs-bundle-master_confirmed-pre_notify_stop_0
+ * Pseudo action: ovn-dbs-bundle-master_stop_0
+ * Resource action: ovndb_servers stop on ovn-dbs-bundle-0
+ * Pseudo action: ovn-dbs-bundle-master_stopped_0
+ * Resource action: ovn-dbs-bundle-0 stop on controller-0
+ * Pseudo action: ovn-dbs-bundle-master_post_notify_stopped_0
+ * Resource action: ovn-dbs-bundle-podman-0 stop on controller-0
+ * Resource action: ovndb_servers notify on ovn-dbs-bundle-1
+ * Resource action: ovndb_servers notify on ovn-dbs-bundle-2
+ * Pseudo action: ovn-dbs-bundle-master_confirmed-post_notify_stopped_0
+ * Pseudo action: ovn-dbs-bundle-master_pre_notify_start_0
+ * Pseudo action: ovn-dbs-bundle_stopped_0
+ * Pseudo action: ovn-dbs-bundle-master_confirmed-pre_notify_start_0
+ * Pseudo action: ovn-dbs-bundle-master_start_0
+ * Pseudo action: ovn-dbs-bundle-master_running_0
+ * Pseudo action: ovn-dbs-bundle-master_post_notify_running_0
+ * Pseudo action: ovn-dbs-bundle-master_confirmed-post_notify_running_0
+ * Pseudo action: ovn-dbs-bundle_running_0
+ * Pseudo action: ovn-dbs-bundle-master_pre_notify_promote_0
+ * Pseudo action: ovn-dbs-bundle_promote_0
+ * Resource action: ovndb_servers notify on ovn-dbs-bundle-1
+ * Resource action: ovndb_servers notify on ovn-dbs-bundle-2
+ * Pseudo action: ovn-dbs-bundle-master_confirmed-pre_notify_promote_0
+ * Pseudo action: ovn-dbs-bundle-master_promote_0
+ * Resource action: ovndb_servers promote on ovn-dbs-bundle-1
+ * Pseudo action: ovn-dbs-bundle-master_promoted_0
+ * Pseudo action: ovn-dbs-bundle-master_post_notify_promoted_0
+ * Resource action: ovndb_servers notify on ovn-dbs-bundle-1
+ * Resource action: ovndb_servers notify on ovn-dbs-bundle-2
+ * Pseudo action: ovn-dbs-bundle-master_confirmed-post_notify_promoted_0
+ * Pseudo action: ovn-dbs-bundle_promoted_0
+ * Resource action: ovndb_servers monitor=10000 on ovn-dbs-bundle-1
+Using the original execution date of: 2020-05-14 10:49:31Z
+
+Revised cluster status:
+Online: [ controller-0 controller-1 controller-2 ]
+GuestOnline: [ galera-bundle-0:galera-bundle-podman-0 galera-bundle-1:galera-bundle-podman-1 galera-bundle-2:galera-bundle-podman-2 ovn-dbs-bundle-1:ovn-dbs-bundle-podman-1 ovn-dbs-bundle-2:ovn-dbs-bundle-podman-2 rabbitmq-bundle-0:rabbitmq-bundle-podman-0 rabbitmq-bundle-1:rabbitmq-bundle-podman-1 rabbitmq-bundle-2:rabbitmq-bundle-podman-2 redis-bundle-0:redis-bundle-podman-0 redis-bundle-1:redis-bundle-podman-1 redis-bundle-2:redis-bundle-podman-2 ]
+
+ Container bundle set: galera-bundle [cluster.common.tag/rhosp16-openstack-mariadb:pcmklatest]
+ galera-bundle-0 (ocf::heartbeat:galera): Master controller-0
+ galera-bundle-1 (ocf::heartbeat:galera): Master controller-1
+ galera-bundle-2 (ocf::heartbeat:galera): Master controller-2
+ Container bundle set: rabbitmq-bundle [cluster.common.tag/rhosp16-openstack-rabbitmq:pcmklatest]
+ rabbitmq-bundle-0 (ocf::heartbeat:rabbitmq-cluster): Started controller-0
+ rabbitmq-bundle-1 (ocf::heartbeat:rabbitmq-cluster): Started controller-1
+ rabbitmq-bundle-2 (ocf::heartbeat:rabbitmq-cluster): Started controller-2
+ Container bundle set: redis-bundle [cluster.common.tag/rhosp16-openstack-redis:pcmklatest]
+ redis-bundle-0 (ocf::heartbeat:redis): Master controller-0
+ redis-bundle-1 (ocf::heartbeat:redis): Slave controller-1
+ redis-bundle-2 (ocf::heartbeat:redis): Slave controller-2
+ Container bundle set: ovn-dbs-bundle [cluster.common.tag/rhosp16-openstack-ovn-northd:pcmklatest]
+ ovn-dbs-bundle-0 (ocf::ovn:ovndb-servers): Stopped
+ ovn-dbs-bundle-1 (ocf::ovn:ovndb-servers): Master controller-1
+ ovn-dbs-bundle-2 (ocf::ovn:ovndb-servers): Slave controller-2
+ stonith-fence_ipmilan-5254005e097a (stonith:fence_ipmilan): Started controller-0
+ stonith-fence_ipmilan-525400afe30e (stonith:fence_ipmilan): Started controller-2
+ stonith-fence_ipmilan-525400985679 (stonith:fence_ipmilan): Started controller-1
+ Container bundle: openstack-cinder-volume [cluster.common.tag/rhosp16-openstack-cinder-volume:pcmklatest]
+ openstack-cinder-volume-podman-0 (ocf::heartbeat:podman): Started controller-0
+
diff --git a/cts/scheduler/no-promote-on-unrunnable-guest.xml b/cts/scheduler/no-promote-on-unrunnable-guest.xml
new file mode 100644
index 0000000000..5a66563e07
--- /dev/null
+++ b/cts/scheduler/no-promote-on-unrunnable-guest.xml
@@ -0,0 +1,802 @@
+<cib crm_feature_set="3.3.0" validate-with="pacemaker-3.2" epoch="137" num_updates="3" admin_epoch="0" cib-last-written="Thu May 14 10:49:30 2020" update-origin="controller-0" update-client="crm_resource" update-user="root" have-quorum="1" dc-uuid="1" execution-date="1589453371">
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.0.3-5.el8-4b1f869f0f"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="tripleo_cluster"/>
+ <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
+ <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1589453354"/>
+ </cluster_property_set>
+ <cluster_property_set id="redis_replication">
+ <nvpair id="redis_replication-redis_REPL_INFO" name="redis_REPL_INFO" value="controller-0"/>
+ </cluster_property_set>
+ <cluster_property_set id="ovn_ovsdb_master_server">
+ <nvpair id="ovn_ovsdb_master_server-OVN_REPL_INFO" name="OVN_REPL_INFO" value="controller-0"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="controller-0">
+ <instance_attributes id="nodes-1">
+ <nvpair id="nodes-1-galera-role" name="galera-role" value="true"/>
+ <nvpair id="nodes-1-rabbitmq-role" name="rabbitmq-role" value="true"/>
+ <nvpair id="nodes-1-rmq-node-attr-last-known-rabbitmq" name="rmq-node-attr-last-known-rabbitmq" value="rabbit@controller-0"/>
+ <nvpair id="nodes-1-redis-role" name="redis-role" value="true"/>
+ <nvpair id="nodes-1-ovn-dbs-role" name="ovn-dbs-role" value="true"/>
+ <nvpair id="nodes-1-cinder-volume-role" name="cinder-volume-role" value="true"/>
+ </instance_attributes>
+ </node>
+ <node id="2" uname="controller-1">
+ <instance_attributes id="nodes-2">
+ <nvpair id="nodes-2-galera-role" name="galera-role" value="true"/>
+ <nvpair id="nodes-2-rabbitmq-role" name="rabbitmq-role" value="true"/>
+ <nvpair id="nodes-2-rmq-node-attr-last-known-rabbitmq" name="rmq-node-attr-last-known-rabbitmq" value="rabbit@controller-1"/>
+ <nvpair id="nodes-2-redis-role" name="redis-role" value="true"/>
+ <nvpair id="nodes-2-ovn-dbs-role" name="ovn-dbs-role" value="true"/>
+ <nvpair id="nodes-2-cinder-volume-role" name="cinder-volume-role" value="true"/>
+ </instance_attributes>
+ </node>
+ <node id="3" uname="controller-2">
+ <instance_attributes id="nodes-3">
+ <nvpair id="nodes-3-galera-role" name="galera-role" value="true"/>
+ <nvpair id="nodes-3-rabbitmq-role" name="rabbitmq-role" value="true"/>
+ <nvpair id="nodes-3-rmq-node-attr-last-known-rabbitmq" name="rmq-node-attr-last-known-rabbitmq" value="rabbit@controller-2"/>
+ <nvpair id="nodes-3-redis-role" name="redis-role" value="true"/>
+ <nvpair id="nodes-3-ovn-dbs-role" name="ovn-dbs-role" value="true"/>
+ <nvpair id="nodes-3-cinder-volume-role" name="cinder-volume-role" value="true"/>
+ </instance_attributes>
+ </node>
+ </nodes>
+ <resources>
+ <bundle id="galera-bundle">
+ <meta_attributes id="galera-bundle-meta_attributes"/>
+ <podman image="cluster.common.tag/rhosp16-openstack-mariadb:pcmklatest" masters="3" network="host" options="--user=root --log-driver=k8s-file --log-opt path=/var/log/containers/stdouts/galera-bundle.log -e KOLLA_CONFIG_STRATEGY=COPY_ALWAYS" replicas="3" run-command="/bin/bash /usr/local/bin/kolla_start"/>
+ <network control-port="3123"/>
+ <storage>
+ <storage-mapping id="mysql-cfg-files" options="ro" source-dir="/var/lib/kolla/config_files/mysql.json" target-dir="/var/lib/kolla/config_files/config.json"/>
+ <storage-mapping id="mysql-cfg-data" options="ro" source-dir="/var/lib/config-data/puppet-generated/mysql/" target-dir="/var/lib/kolla/config_files/src"/>
+ <storage-mapping id="mysql-hosts" options="ro" source-dir="/etc/hosts" target-dir="/etc/hosts"/>
+ <storage-mapping id="mysql-localtime" options="ro" source-dir="/etc/localtime" target-dir="/etc/localtime"/>
+ <storage-mapping id="mysql-lib" options="rw" source-dir="/var/lib/mysql" target-dir="/var/lib/mysql"/>
+ <storage-mapping id="mysql-log-mariadb" options="rw" source-dir="/var/log/mariadb" target-dir="/var/log/mariadb"/>
+ <storage-mapping id="mysql-log" options="rw" source-dir="/var/log/containers/mysql" target-dir="/var/log/mysql"/>
+ <storage-mapping id="mysql-dev-log" options="rw" source-dir="/dev/log" target-dir="/dev/log"/>
+ </storage>
+ <primitive class="ocf" id="galera" provider="heartbeat" type="galera">
+ <instance_attributes id="galera-instance_attributes">
+ <nvpair id="galera-instance_attributes-additional_parameters" name="additional_parameters" value="--open-files-limit=16384"/>
+ <nvpair id="galera-instance_attributes-cluster_host_map" name="cluster_host_map" value="controller-0:controller-0.internalapi.redhat.local;controller-1:controller-1.internalapi.redhat.local;controller-2:controller-2.internalapi.redhat.local"/>
+ <nvpair id="galera-instance_attributes-enable_creation" name="enable_creation" value="true"/>
+ <nvpair id="galera-instance_attributes-log" name="log" value="/var/log/mysql/mysqld.log"/>
+ <nvpair id="galera-instance_attributes-wsrep_cluster_address" name="wsrep_cluster_address" value="gcomm://controller-0.internalapi.redhat.local,controller-1.internalapi.redhat.local,controller-2.internalapi.redhat.local"/>
+ </instance_attributes>
+ <meta_attributes id="galera-meta_attributes">
+ <nvpair id="galera-meta_attributes-container-attribute-target" name="container-attribute-target" value="host"/>
+ <nvpair id="galera-meta_attributes-master-max" name="master-max" value="3"/>
+ <nvpair id="galera-meta_attributes-ordered" name="ordered" value="true"/>
+ </meta_attributes>
+ <operations>
+ <op id="galera-demote-interval-0s" interval="0s" name="demote" timeout="120s"/>
+ <op id="galera-monitor-interval-20s" interval="20s" name="monitor" timeout="30s"/>
+ <op id="galera-monitor-interval-10s" interval="10s" name="monitor" role="Master" timeout="30s"/>
+ <op id="galera-monitor-interval-30s" interval="30s" name="monitor" role="Slave" timeout="30s"/>
+ <op id="galera-promote-interval-0s" interval="0s" name="promote" on-fail="block" timeout="300s"/>
+ <op id="galera-start-interval-0s" interval="0s" name="start" timeout="120s"/>
+ <op id="galera-stop-interval-0s" interval="0s" name="stop" timeout="120s"/>
+ </operations>
+ </primitive>
+ </bundle>
+ <bundle id="rabbitmq-bundle">
+ <meta_attributes id="rabbitmq-bundle-meta_attributes"/>
+ <podman image="cluster.common.tag/rhosp16-openstack-rabbitmq:pcmklatest" network="host" options="--user=root --log-driver=k8s-file --log-opt path=/var/log/containers/stdouts/rabbitmq-bundle.log -e KOLLA_CONFIG_STRATEGY=COPY_ALWAYS -e LANG=en_US.UTF-8 -e LC_ALL=en_US.UTF-8" replicas="3" run-command="/bin/bash /usr/local/bin/kolla_start"/>
+ <network control-port="3122"/>
+ <storage>
+ <storage-mapping id="rabbitmq-cfg-files" options="ro" source-dir="/var/lib/kolla/config_files/rabbitmq.json" target-dir="/var/lib/kolla/config_files/config.json"/>
+ <storage-mapping id="rabbitmq-cfg-data" options="ro" source-dir="/var/lib/config-data/puppet-generated/rabbitmq/" target-dir="/var/lib/kolla/config_files/src"/>
+ <storage-mapping id="rabbitmq-hosts" options="ro" source-dir="/etc/hosts" target-dir="/etc/hosts"/>
+ <storage-mapping id="rabbitmq-localtime" options="ro" source-dir="/etc/localtime" target-dir="/etc/localtime"/>
+ <storage-mapping id="rabbitmq-lib" options="rw" source-dir="/var/lib/rabbitmq" target-dir="/var/lib/rabbitmq"/>
+ <storage-mapping id="rabbitmq-pki-extracted" options="ro" source-dir="/etc/pki/ca-trust/extracted" target-dir="/etc/pki/ca-trust/extracted"/>
+ <storage-mapping id="rabbitmq-pki-ca-bundle-crt" options="ro" source-dir="/etc/pki/tls/certs/ca-bundle.crt" target-dir="/etc/pki/tls/certs/ca-bundle.crt"/>
+ <storage-mapping id="rabbitmq-pki-ca-bundle-trust-crt" options="ro" source-dir="/etc/pki/tls/certs/ca-bundle.trust.crt" target-dir="/etc/pki/tls/certs/ca-bundle.trust.crt"/>
+ <storage-mapping id="rabbitmq-pki-cert" options="ro" source-dir="/etc/pki/tls/cert.pem" target-dir="/etc/pki/tls/cert.pem"/>
+ <storage-mapping id="rabbitmq-log" options="rw" source-dir="/var/log/containers/rabbitmq" target-dir="/var/log/rabbitmq"/>
+ <storage-mapping id="rabbitmq-dev-log" options="rw" source-dir="/dev/log" target-dir="/dev/log"/>
+ </storage>
+ <primitive class="ocf" id="rabbitmq" provider="heartbeat" type="rabbitmq-cluster">
+ <instance_attributes id="rabbitmq-instance_attributes">
+ <nvpair id="rabbitmq-instance_attributes-set_policy" name="set_policy" value="ha-all ^(?!amq\.).* {&quot;ha-mode&quot;:&quot;exactly&quot;,&quot;ha-params&quot;:2,&quot;ha-promote-on-shutdown&quot;:&quot;always&quot;}"/>
+ </instance_attributes>
+ <meta_attributes id="rabbitmq-meta_attributes">
+ <nvpair id="rabbitmq-meta_attributes-container-attribute-target" name="container-attribute-target" value="host"/>
+ <nvpair id="rabbitmq-meta_attributes-notify" name="notify" value="true"/>
+ </meta_attributes>
+ <operations>
+ <op id="rabbitmq-monitor-interval-10s" interval="10s" name="monitor" timeout="40s"/>
+ <op id="rabbitmq-start-interval-0s" interval="0s" name="start" timeout="200s"/>
+ <op id="rabbitmq-stop-interval-0s" interval="0s" name="stop" timeout="200s"/>
+ </operations>
+ </primitive>
+ </bundle>
+ <bundle id="redis-bundle">
+ <meta_attributes id="redis-bundle-meta_attributes"/>
+ <podman image="cluster.common.tag/rhosp16-openstack-redis:pcmklatest" masters="1" network="host" options="--user=root --log-driver=k8s-file --log-opt path=/var/log/containers/stdouts/redis-bundle.log -e KOLLA_CONFIG_STRATEGY=COPY_ALWAYS" replicas="3" run-command="/bin/bash /usr/local/bin/kolla_start"/>
+ <network control-port="3124"/>
+ <storage>
+ <storage-mapping id="redis-cfg-files" options="ro" source-dir="/var/lib/kolla/config_files/redis.json" target-dir="/var/lib/kolla/config_files/config.json"/>
+ <storage-mapping id="redis-cfg-data-redis" options="ro" source-dir="/var/lib/config-data/puppet-generated/redis/" target-dir="/var/lib/kolla/config_files/src"/>
+ <storage-mapping id="redis-hosts" options="ro" source-dir="/etc/hosts" target-dir="/etc/hosts"/>
+ <storage-mapping id="redis-localtime" options="ro" source-dir="/etc/localtime" target-dir="/etc/localtime"/>
+ <storage-mapping id="redis-lib" options="rw" source-dir="/var/lib/redis" target-dir="/var/lib/redis"/>
+ <storage-mapping id="redis-log" options="rw" source-dir="/var/log/containers/redis" target-dir="/var/log/redis"/>
+ <storage-mapping id="redis-run" options="rw,z" source-dir="/var/run/redis" target-dir="/var/run/redis"/>
+ <storage-mapping id="redis-pki-extracted" options="ro" source-dir="/etc/pki/ca-trust/extracted" target-dir="/etc/pki/ca-trust/extracted"/>
+ <storage-mapping id="redis-pki-ca-bundle-crt" options="ro" source-dir="/etc/pki/tls/certs/ca-bundle.crt" target-dir="/etc/pki/tls/certs/ca-bundle.crt"/>
+ <storage-mapping id="redis-pki-ca-bundle-trust-crt" options="ro" source-dir="/etc/pki/tls/certs/ca-bundle.trust.crt" target-dir="/etc/pki/tls/certs/ca-bundle.trust.crt"/>
+ <storage-mapping id="redis-pki-cert" options="ro" source-dir="/etc/pki/tls/cert.pem" target-dir="/etc/pki/tls/cert.pem"/>
+ <storage-mapping id="redis-dev-log" options="rw" source-dir="/dev/log" target-dir="/dev/log"/>
+ </storage>
+ <primitive class="ocf" id="redis" provider="heartbeat" type="redis">
+ <instance_attributes id="redis-instance_attributes">
+ <nvpair id="redis-instance_attributes-wait_last_known_master" name="wait_last_known_master" value="true"/>
+ </instance_attributes>
+ <meta_attributes id="redis-meta_attributes">
+ <nvpair id="redis-meta_attributes-container-attribute-target" name="container-attribute-target" value="host"/>
+ <nvpair id="redis-meta_attributes-interleave" name="interleave" value="true"/>
+ <nvpair id="redis-meta_attributes-notify" name="notify" value="true"/>
+ <nvpair id="redis-meta_attributes-ordered" name="ordered" value="true"/>
+ </meta_attributes>
+ <operations>
+ <op id="redis-demote-interval-0s" interval="0s" name="demote" timeout="120s"/>
+ <op id="redis-monitor-interval-45s" interval="45s" name="monitor" timeout="60s"/>
+ <op id="redis-monitor-interval-20s" interval="20s" name="monitor" role="Master" timeout="60s"/>
+ <op id="redis-monitor-interval-60s" interval="60s" name="monitor" role="Slave" timeout="60s"/>
+ <op id="redis-notify-interval-0s" interval="0s" name="notify" timeout="90s"/>
+ <op id="redis-promote-interval-0s" interval="0s" name="promote" timeout="120s"/>
+ <op id="redis-start-interval-0s" interval="0s" name="start" timeout="200s"/>
+ <op id="redis-stop-interval-0s" interval="0s" name="stop" timeout="200s"/>
+ </operations>
+ </primitive>
+ </bundle>
+ <bundle id="ovn-dbs-bundle">
+ <meta_attributes id="ovn-dbs-bundle-meta_attributes"/>
+ <podman image="cluster.common.tag/rhosp16-openstack-ovn-northd:pcmklatest" masters="1" network="host" options="--log-driver=k8s-file --log-opt path=/var/log/containers/stdouts/ovn-dbs-bundle.log -e KOLLA_CONFIG_STRATEGY=COPY_ALWAYS" replicas="3" run-command="/bin/bash /usr/local/bin/kolla_start"/>
+ <network control-port="3125"/>
+ <storage>
+ <storage-mapping id="ovn-dbs-cfg-files" options="ro" source-dir="/var/lib/kolla/config_files/ovn_dbs.json" target-dir="/var/lib/kolla/config_files/config.json"/>
+ <storage-mapping id="ovn-dbs-mod-files" options="ro" source-dir="/lib/modules" target-dir="/lib/modules"/>
+ <storage-mapping id="ovn-dbs-run-files" options="rw" source-dir="/var/lib/openvswitch/ovn" target-dir="/run/openvswitch"/>
+ <storage-mapping id="ovn-dbs-log-files" options="rw" source-dir="/var/log/containers/openvswitch" target-dir="/var/log/openvswitch"/>
+ <storage-mapping id="ovn-dbs-db-path" options="rw" source-dir="/var/lib/openvswitch/ovn" target-dir="/etc/openvswitch"/>
+ <storage-mapping id="ovn-fuck" options="rw" source-dir="/usr/lib/ocf/resource.d/ovn" target-dir="/usr/lib/ocf/resource.d/ovn"/>
+ </storage>
+ <primitive class="ocf" id="ovndb_servers" provider="ovn" type="ovndb-servers">
+ <instance_attributes id="ovndb_servers-instance_attributes">
+ <nvpair id="ovndb_servers-instance_attributes-inactive_probe_interval" name="inactive_probe_interval" value="180000"/>
+ <nvpair id="ovndb_servers-instance_attributes-listen_on_master_ip_only" name="listen_on_master_ip_only" value="no"/>
+ <nvpair id="ovndb_servers-instance_attributes-manage_northd" name="manage_northd" value="yes"/>
+ <nvpair id="ovndb_servers-instance_attributes-master_ip" name="master_ip" value="172.17.1.247"/>
+ <nvpair id="ovndb_servers-instance_attributes-nb_master_port" name="nb_master_port" value="6641"/>
+ <nvpair id="ovndb_servers-instance_attributes-sb_master_port" name="sb_master_port" value="6642"/>
+ </instance_attributes>
+ <meta_attributes id="ovndb_servers-meta_attributes">
+ <nvpair id="ovndb_servers-meta_attributes-container-attribute-target" name="container-attribute-target" value="host"/>
+ <nvpair id="ovndb_servers-meta_attributes-notify" name="notify" value="true"/>
+ <nvpair id="ovndb_servers-meta_attributes-interleave" name="interleave" value="true"/>
+ <nvpair id="ovndb_servers-meta_attributes-ordered" name="ordered" value="true"/>
+ </meta_attributes>
+ <operations>
+ <op id="ovndb_servers-demote-interval-0s" interval="0s" name="demote" timeout="50s"/>
+ <op id="ovndb_servers-monitor-interval-10s" interval="10s" name="monitor" role="Master" timeout="60s"/>
+ <op id="ovndb_servers-monitor-interval-30s" interval="30s" name="monitor" role="Slave" timeout="60s"/>
+ <op id="ovndb_servers-notify-interval-0s" interval="0s" name="notify" timeout="20s"/>
+ <op id="ovndb_servers-promote-interval-0s" interval="0s" name="promote" timeout="50s"/>
+ <op id="ovndb_servers-start-interval-0s" interval="0s" name="start" timeout="200s"/>
+ <op id="ovndb_servers-stop-interval-0s" interval="0s" name="stop" timeout="200s"/>
+ </operations>
+ </primitive>
+ </bundle>
+ <primitive class="stonith" id="stonith-fence_ipmilan-5254005e097a" type="fence_ipmilan">
+ <meta_attributes id="stonith-fence_ipmilan-5254005e097a-meta_attributes"/>
+ <instance_attributes id="stonith-fence_ipmilan-5254005e097a-instance_attributes">
+ <nvpair id="stonith-fence_ipmilan-5254005e097a-instance_attributes-delay" name="delay" value="20"/>
+ <nvpair id="stonith-fence_ipmilan-5254005e097a-instance_attributes-ipaddr" name="ipaddr" value="172.16.0.21"/>
+ <nvpair id="stonith-fence_ipmilan-5254005e097a-instance_attributes-ipport" name="ipport" value="6230"/>
+ <nvpair id="stonith-fence_ipmilan-5254005e097a-instance_attributes-lanplus" name="lanplus" value="true"/>
+ <nvpair id="stonith-fence_ipmilan-5254005e097a-instance_attributes-login" name="login" value="admin"/>
+ <nvpair id="stonith-fence_ipmilan-5254005e097a-instance_attributes-passwd" name="passwd" value="****"/>
+ <nvpair id="stonith-fence_ipmilan-5254005e097a-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="controller-2"/>
+ </instance_attributes>
+ <operations>
+ <op id="stonith-fence_ipmilan-5254005e097a-monitor-interval-60s" interval="60s" name="monitor"/>
+ </operations>
+ </primitive>
+ <primitive class="stonith" id="stonith-fence_ipmilan-525400afe30e" type="fence_ipmilan">
+ <meta_attributes id="stonith-fence_ipmilan-525400afe30e-meta_attributes"/>
+ <instance_attributes id="stonith-fence_ipmilan-525400afe30e-instance_attributes">
+ <nvpair id="stonith-fence_ipmilan-525400afe30e-instance_attributes-delay" name="delay" value="20"/>
+ <nvpair id="stonith-fence_ipmilan-525400afe30e-instance_attributes-ipaddr" name="ipaddr" value="172.16.0.21"/>
+ <nvpair id="stonith-fence_ipmilan-525400afe30e-instance_attributes-ipport" name="ipport" value="6231"/>
+ <nvpair id="stonith-fence_ipmilan-525400afe30e-instance_attributes-lanplus" name="lanplus" value="true"/>
+ <nvpair id="stonith-fence_ipmilan-525400afe30e-instance_attributes-login" name="login" value="admin"/>
+ <nvpair id="stonith-fence_ipmilan-525400afe30e-instance_attributes-passwd" name="passwd" value="****"/>
+ <nvpair id="stonith-fence_ipmilan-525400afe30e-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="controller-1"/>
+ </instance_attributes>
+ <operations>
+ <op id="stonith-fence_ipmilan-525400afe30e-monitor-interval-60s" interval="60s" name="monitor"/>
+ </operations>
+ </primitive>
+ <primitive class="stonith" id="stonith-fence_ipmilan-525400985679" type="fence_ipmilan">
+ <meta_attributes id="stonith-fence_ipmilan-525400985679-meta_attributes"/>
+ <instance_attributes id="stonith-fence_ipmilan-525400985679-instance_attributes">
+ <nvpair id="stonith-fence_ipmilan-525400985679-instance_attributes-delay" name="delay" value="20"/>
+ <nvpair id="stonith-fence_ipmilan-525400985679-instance_attributes-ipaddr" name="ipaddr" value="172.16.0.21"/>
+ <nvpair id="stonith-fence_ipmilan-525400985679-instance_attributes-ipport" name="ipport" value="6232"/>
+ <nvpair id="stonith-fence_ipmilan-525400985679-instance_attributes-lanplus" name="lanplus" value="true"/>
+ <nvpair id="stonith-fence_ipmilan-525400985679-instance_attributes-login" name="login" value="admin"/>
+ <nvpair id="stonith-fence_ipmilan-525400985679-instance_attributes-passwd" name="passwd" value="****"/>
+ <nvpair id="stonith-fence_ipmilan-525400985679-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="controller-0"/>
+ </instance_attributes>
+ <operations>
+ <op id="stonith-fence_ipmilan-525400985679-monitor-interval-60s" interval="60s" name="monitor"/>
+ </operations>
+ </primitive>
+ <bundle id="openstack-cinder-volume">
+ <meta_attributes id="openstack-cinder-volume-meta_attributes"/>
+ <podman image="cluster.common.tag/rhosp16-openstack-cinder-volume:pcmklatest" network="host" options="--ipc=host --privileged=true --user=root --log-driver=k8s-file --log-opt path=/var/log/containers/stdouts/openstack-cinder-volume.log -e KOLLA_CONFIG_STRATEGY=COPY_ALWAYS" replicas="1" run-command="/bin/bash /usr/local/bin/kolla_start"/>
+ <storage>
+ <storage-mapping id="cinder-volume-etc-hosts" options="ro" source-dir="/etc/hosts" target-dir="/etc/hosts"/>
+ <storage-mapping id="cinder-volume-etc-localtime" options="ro" source-dir="/etc/localtime" target-dir="/etc/localtime"/>
+ <storage-mapping id="cinder-volume-etc-pki-ca-trust-extracted" options="ro" source-dir="/etc/pki/ca-trust/extracted" target-dir="/etc/pki/ca-trust/extracted"/>
+ <storage-mapping id="cinder-volume-etc-pki-ca-trust-source-anchors" options="ro" source-dir="/etc/pki/ca-trust/source/anchors" target-dir="/etc/pki/ca-trust/source/anchors"/>
+ <storage-mapping id="cinder-volume-etc-pki-tls-certs-ca-bundle.crt" options="ro" source-dir="/etc/pki/tls/certs/ca-bundle.crt" target-dir="/etc/pki/tls/certs/ca-bundle.crt"/>
+ <storage-mapping id="cinder-volume-etc-pki-tls-certs-ca-bundle.trust.crt" options="ro" source-dir="/etc/pki/tls/certs/ca-bundle.trust.crt" target-dir="/etc/pki/tls/certs/ca-bundle.trust.crt"/>
+ <storage-mapping id="cinder-volume-etc-pki-tls-cert.pem" options="ro" source-dir="/etc/pki/tls/cert.pem" target-dir="/etc/pki/tls/cert.pem"/>
+ <storage-mapping id="cinder-volume-dev-log" options="rw" source-dir="/dev/log" target-dir="/dev/log"/>
+ <storage-mapping id="cinder-volume-etc-puppet" options="ro" source-dir="/etc/puppet" target-dir="/etc/puppet"/>
+ <storage-mapping id="cinder-volume-var-lib-kolla-config_files-cinder_volume.json" options="ro" source-dir="/var/lib/kolla/config_files/cinder_volume.json" target-dir="/var/lib/kolla/config_files/config.json"/>
+ <storage-mapping id="cinder-volume-var-lib-config-data-puppet-generated-cinder" options="ro" source-dir="/var/lib/config-data/puppet-generated/cinder" target-dir="/var/lib/kolla/config_files/src"/>
+ <storage-mapping id="cinder-volume-etc-iscsi" options="ro" source-dir="/etc/iscsi" target-dir="/var/lib/kolla/config_files/src-iscsid"/>
+ <storage-mapping id="cinder-volume-etc-ceph" options="ro" source-dir="/etc/ceph" target-dir="/var/lib/kolla/config_files/src-ceph"/>
+ <storage-mapping id="cinder-volume-lib-modules" options="ro" source-dir="/lib/modules" target-dir="/lib/modules"/>
+ <storage-mapping id="cinder-volume-dev-" options="rw" source-dir="/dev/" target-dir="/dev/"/>
+ <storage-mapping id="cinder-volume-run-" options="rw" source-dir="/run/" target-dir="/run/"/>
+ <storage-mapping id="cinder-volume-sys" options="rw" source-dir="/sys" target-dir="/sys"/>
+ <storage-mapping id="cinder-volume-var-lib-cinder" options="z" source-dir="/var/lib/cinder" target-dir="/var/lib/cinder"/>
+ <storage-mapping id="cinder-volume-var-lib-iscsi" options="z" source-dir="/var/lib/iscsi" target-dir="/var/lib/iscsi"/>
+ <storage-mapping id="cinder-volume-var-log-containers-cinder" options="z" source-dir="/var/log/containers/cinder" target-dir="/var/log/cinder"/>
+ </storage>
+ </bundle>
+ </resources>
+ <constraints>
+ <rsc_location id="location-galera-bundle" resource-discovery="exclusive" rsc="galera-bundle">
+ <rule id="location-galera-bundle-rule" score="0">
+ <expression attribute="galera-role" id="location-galera-bundle-rule-expr" operation="eq" value="true"/>
+ </rule>
+ </rsc_location>
+ <rsc_location id="location-rabbitmq-bundle" resource-discovery="exclusive" rsc="rabbitmq-bundle">
+ <rule id="location-rabbitmq-bundle-rule" score="0">
+ <expression attribute="rabbitmq-role" id="location-rabbitmq-bundle-rule-expr" operation="eq" value="true"/>
+ </rule>
+ </rsc_location>
+ <rsc_location id="location-redis-bundle" resource-discovery="exclusive" rsc="redis-bundle">
+ <rule id="location-redis-bundle-rule" score="0">
+ <expression attribute="redis-role" id="location-redis-bundle-rule-expr" operation="eq" value="true"/>
+ </rule>
+ </rsc_location>
+ <rsc_location id="location-ovn-dbs-bundle" resource-discovery="exclusive" rsc="ovn-dbs-bundle">
+ <rule id="location-ovn-dbs-bundle-rule" score="0">
+ <expression attribute="ovn-dbs-role" id="location-ovn-dbs-bundle-rule-expr" operation="eq" value="true"/>
+ </rule>
+ </rsc_location>
+ <rsc_location id="location-stonith-fence_ipmilan-5254005e097a-controller-2--10000" node="controller-2" rsc="stonith-fence_ipmilan-5254005e097a" score="-10000"/>
+ <rsc_location id="location-stonith-fence_ipmilan-525400afe30e-controller-1--10000" node="controller-1" rsc="stonith-fence_ipmilan-525400afe30e" score="-10000"/>
+ <rsc_location id="location-stonith-fence_ipmilan-525400985679-controller-0--10000" node="controller-0" rsc="stonith-fence_ipmilan-525400985679" score="-10000"/>
+ <rsc_location id="location-openstack-cinder-volume" resource-discovery="exclusive" rsc="openstack-cinder-volume">
+ <rule id="location-openstack-cinder-volume-rule" score="0">
+ <expression attribute="cinder-volume-role" id="location-openstack-cinder-volume-rule-expr" operation="eq" value="true"/>
+ </rule>
+ </rsc_location>
+ <rsc_location id="cli-ban-ovn-dbs-bundle-on-controller-0" rsc="ovn-dbs-bundle" role="Started" node="controller-0" score="-INFINITY"/>
+ </constraints>
+ <rsc_defaults>
+ <meta_attributes id="rsc_defaults-options">
+ <nvpair id="rsc_defaults-options-resource-stickiness" name="resource-stickiness" value="INFINITY"/>
+ </meta_attributes>
+ </rsc_defaults>
+ <op_defaults>
+ <meta_attributes id="op_defaults-options">
+ <nvpair id="op_defaults-options-timeout" name="timeout" value="120s"/>
+ </meta_attributes>
+ </op_defaults>
+ <fencing-topology>
+ <fencing-level devices="stonith-fence_ipmilan-525400afe30e" id="fl-controller-1-1" index="1" target="controller-1"/>
+ <fencing-level devices="stonith-fence_ipmilan-5254005e097a" id="fl-controller-2-1" index="1" target="controller-2"/>
+ <fencing-level devices="stonith-fence_ipmilan-525400985679" id="fl-controller-0-1" index="1" target="controller-0"/>
+ </fencing-topology>
+ </configuration>
+ <status>
+ <node_state id="1" uname="controller-0" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+ <transient_attributes id="1">
+ <instance_attributes id="status-1">
+ <nvpair id="status-1-master-galera" name="master-galera" value="100"/>
+ <nvpair id="status-1-rmq-node-attr-rabbitmq" name="rmq-node-attr-rabbitmq" value="rabbit@controller-0"/>
+ <nvpair id="status-1-master-ovndb_servers" name="master-ovndb_servers" value="5"/>
+ <nvpair id="status-1-master-redis" name="master-redis" value="1"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="rabbitmq-bundle-1" type="remote" class="ocf" provider="pacemaker" container="rabbitmq-bundle-podman-1">
+ <lrm_rsc_op id="rabbitmq-bundle-1_last_0" operation_key="rabbitmq-bundle-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="17:24:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:7;17:24:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-0" call-id="7" rc-code="7" op-status="0" interval="0" last-rc-change="1589446235" last-run="1589446235" exec-time="0" queue-time="0" op-digest="f1d68ab267df6867301fcff08041cd2f" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="rabbitmq-bundle-2" type="remote" class="ocf" provider="pacemaker" container="rabbitmq-bundle-podman-2">
+ <lrm_rsc_op id="rabbitmq-bundle-2_last_0" operation_key="rabbitmq-bundle-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="18:24:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:7;18:24:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-0" call-id="6" rc-code="7" op-status="0" interval="0" last-rc-change="1589446235" last-run="1589446235" exec-time="0" queue-time="0" op-digest="205c791aea96714f7aa1cb111c727e1b" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="ovn-dbs-bundle-1" type="remote" class="ocf" provider="pacemaker" container="ovn-dbs-bundle-podman-1">
+ <lrm_rsc_op id="ovn-dbs-bundle-1_last_0" operation_key="ovn-dbs-bundle-1_stop_0" operation="stop" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="138:198:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;138:198:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-0" call-id="49" rc-code="0" op-status="0" interval="0" last-rc-change="1589452993" last-run="1589452993" exec-time="0" queue-time="0" op-digest="d00e87492de6b83a85450cd667db2a88" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="ovn-dbs-bundle-1_monitor_30000" operation_key="ovn-dbs-bundle-1_monitor_30000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="173:163:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;173:163:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-0" call-id="44" rc-code="0" op-status="0" interval="30000" last-rc-change="1589452454" exec-time="0" queue-time="0" op-digest="d32292f5736ec6365136aa382f396e66"/>
+ </lrm_resource>
+ <lrm_resource id="redis-bundle-0" type="remote" class="ocf" provider="pacemaker" container="redis-bundle-podman-0">
+ <lrm_rsc_op id="redis-bundle-0_last_0" operation_key="redis-bundle-0_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="105:201:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;105:201:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-0" call-id="52" rc-code="0" op-status="0" interval="0" last-rc-change="1589453023" last-run="1589453023" exec-time="0" queue-time="0" op-digest="dd323696d6c8ed14cb71914c411664c9" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="redis-bundle-0_monitor_30000" operation_key="redis-bundle-0_monitor_30000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="113:202:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;113:202:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-0" call-id="53" rc-code="0" op-status="0" interval="30000" last-rc-change="1589453025" exec-time="0" queue-time="0" op-digest="efa64b64e8e502ff9a6c0dba02a626a4"/>
+ </lrm_resource>
+ <lrm_resource id="stonith-fence_ipmilan-525400985679" type="fence_ipmilan" class="stonith">
+ <lrm_rsc_op id="stonith-fence_ipmilan-525400985679_last_0" operation_key="stonith-fence_ipmilan-525400985679_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="41:62:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:7;41:62:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-0" call-id="99" rc-code="7" op-status="0" interval="0" last-rc-change="1589446766" last-run="1589446766" exec-time="0" queue-time="0" op-digest="12668032635f19b4a55f6c86a5635e46" op-secure-params=" password passwd " op-secure-digest="7fb9bca9cf67b357b59395fc0e3718d4"/>
+ </lrm_resource>
+ <lrm_resource id="stonith-fence_ipmilan-525400afe30e" type="fence_ipmilan" class="stonith">
+ <lrm_rsc_op id="stonith-fence_ipmilan-525400afe30e_last_0" operation_key="stonith-fence_ipmilan-525400afe30e_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="39:56:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:7;39:56:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-0" call-id="89" rc-code="7" op-status="0" interval="0" last-rc-change="1589446758" last-run="1589446758" exec-time="2" queue-time="0" op-digest="56d6e422b6e35b9d6cb07a9098eb1595" op-secure-params=" password passwd " op-secure-digest="228e5b173e3349b1237889a6e7a8fcf3"/>
+ </lrm_resource>
+ <lrm_resource id="stonith-fence_ipmilan-5254005e097a" type="fence_ipmilan" class="stonith">
+ <lrm_rsc_op id="stonith-fence_ipmilan-5254005e097a_last_0" operation_key="stonith-fence_ipmilan-5254005e097a_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="217:60:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;217:60:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-0" call-id="94" rc-code="0" op-status="0" interval="0" last-rc-change="1589446763" last-run="1589446763" exec-time="387" queue-time="0" op-digest="81ab658b0b0f34a426326ca18b9c8bbb" op-secure-params=" password passwd " op-secure-digest="a712ea57511de5e70f22eb97eb7311df"/>
+ <lrm_rsc_op id="stonith-fence_ipmilan-5254005e097a_monitor_60000" operation_key="stonith-fence_ipmilan-5254005e097a_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="218:60:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;218:60:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-0" call-id="95" rc-code="0" op-status="0" interval="60000" last-rc-change="1589446764" exec-time="389" queue-time="0" op-digest="9f1f1de9f2250c98891743b876ce0889" op-secure-params=" password passwd " op-secure-digest="a712ea57511de5e70f22eb97eb7311df"/>
+ </lrm_resource>
+ <lrm_resource id="ovn-dbs-bundle-2" type="remote" class="ocf" provider="pacemaker" container="ovn-dbs-bundle-podman-2">
+ <lrm_rsc_op id="ovn-dbs-bundle-2_last_0" operation_key="ovn-dbs-bundle-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="38:46:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:7;38:46:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-0" call-id="17" rc-code="7" op-status="0" interval="0" last-rc-change="1589446377" last-run="1589446377" exec-time="0" queue-time="0" op-digest="bf04cc8b2becfb29770a97d109ad6c4f" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="openstack-cinder-volume-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="openstack-cinder-volume-podman-0_last_0" operation_key="openstack-cinder-volume-podman-0_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="226:72:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;226:72:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-0" call-id="104" rc-code="0" op-status="0" interval="0" last-rc-change="1589446819" last-run="1589446819" exec-time="1265" queue-time="0" op-digest="5614e4a5b18ece4557c8e78b41caa5a3"/>
+ <lrm_rsc_op id="openstack-cinder-volume-podman-0_monitor_60000" operation_key="openstack-cinder-volume-podman-0_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="227:72:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;227:72:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-0" call-id="105" rc-code="0" op-status="0" interval="60000" last-rc-change="1589446820" exec-time="248" queue-time="0" op-digest="f147b55da3a7e7ed8a6184047734ab7b"/>
+ </lrm_resource>
+ <lrm_resource id="rabbitmq-bundle-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="rabbitmq-bundle-podman-0_last_0" operation_key="rabbitmq-bundle-podman-0_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="13:24:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;13:24:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-0" call-id="39" rc-code="0" op-status="0" interval="0" last-rc-change="1589446233" last-run="1589446233" exec-time="1507" queue-time="0" op-digest="feeb9bad40e564c797bb4f8fa96d4295"/>
+ <lrm_rsc_op id="rabbitmq-bundle-podman-0_monitor_60000" operation_key="rabbitmq-bundle-podman-0_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="3:24:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;3:24:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-0" call-id="42" rc-code="0" op-status="0" interval="60000" last-rc-change="1589446235" exec-time="339" queue-time="0" op-digest="922dfee439a2453b2c1410c9ee509b57"/>
+ </lrm_resource>
+ <lrm_resource id="rabbitmq-bundle-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="rabbitmq-bundle-podman-1_last_0" operation_key="rabbitmq-bundle-podman-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="11:21:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:7;11:21:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-0" call-id="30" rc-code="7" op-status="0" interval="0" last-rc-change="1589446226" last-run="1589446226" exec-time="72" queue-time="0" op-digest="fd2944f2144c46a12dc75a27df6cb7ca"/>
+ </lrm_resource>
+ <lrm_resource id="rabbitmq-bundle-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="rabbitmq-bundle-podman-2_last_0" operation_key="rabbitmq-bundle-podman-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="12:21:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:7;12:21:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-0" call-id="34" rc-code="7" op-status="0" interval="0" last-rc-change="1589446226" last-run="1589446226" exec-time="96" queue-time="0" op-digest="fd2944f2144c46a12dc75a27df6cb7ca"/>
+ </lrm_resource>
+ <lrm_resource id="galera-bundle-0" type="remote" class="ocf" provider="pacemaker" container="galera-bundle-podman-0">
+ <lrm_rsc_op id="galera-bundle-0_last_0" operation_key="galera-bundle-0_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="20:10:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;20:10:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-0" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1589446136" last-run="1589446136" exec-time="0" queue-time="0" op-digest="37e9ab931fa7b5d2122c4cd44c56b7d2" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="galera-bundle-0_monitor_30000" operation_key="galera-bundle-0_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="11:11:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;11:11:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-0" call-id="5" rc-code="0" op-status="0" interval="30000" last-rc-change="1589446138" exec-time="0" queue-time="0" op-digest="28b6c6af28bc390f48c7288b5bdccdb7"/>
+ </lrm_resource>
+ <lrm_resource id="galera-bundle-1" type="remote" class="ocf" provider="pacemaker" container="galera-bundle-podman-1">
+ <lrm_rsc_op id="galera-bundle-1_last_0" operation_key="galera-bundle-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="8:10:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:7;8:10:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-0" call-id="4" rc-code="7" op-status="0" interval="0" last-rc-change="1589446137" last-run="1589446137" exec-time="0" queue-time="0" op-digest="ed95b71467015049c875f75798f405fb" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="galera-bundle-2" type="remote" class="ocf" provider="pacemaker" container="galera-bundle-podman-2">
+ <lrm_rsc_op id="galera-bundle-2_last_0" operation_key="galera-bundle-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="9:10:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:7;9:10:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-0" call-id="3" rc-code="7" op-status="0" interval="0" last-rc-change="1589446136" last-run="1589446136" exec-time="0" queue-time="0" op-digest="f282b02648444d9d95065cede4784d97" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="redis-bundle-2" type="remote" class="ocf" provider="pacemaker" container="redis-bundle-podman-2">
+ <lrm_rsc_op id="redis-bundle-2_last_0" operation_key="redis-bundle-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="27:34:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:7;27:34:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-0" call-id="11" rc-code="7" op-status="0" interval="0" last-rc-change="1589446293" last-run="1589446293" exec-time="0" queue-time="0" op-digest="f325849fbffe2e62970eb201a50da9f3" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="redis-bundle-1" type="remote" class="ocf" provider="pacemaker" container="redis-bundle-podman-1">
+ <lrm_rsc_op id="redis-bundle-1_last_0" operation_key="redis-bundle-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="26:34:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:7;26:34:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-0" call-id="12" rc-code="7" op-status="0" interval="0" last-rc-change="1589446293" last-run="1589446293" exec-time="0" queue-time="0" op-digest="cd98602d6f977947327050913302f861" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="ovn-dbs-bundle-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="ovn-dbs-bundle-podman-1_last_0" operation_key="ovn-dbs-bundle-podman-1_stop_0" operation="stop" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="137:198:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;137:198:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-0" call-id="145" rc-code="0" op-status="0" interval="0" last-rc-change="1589452993" last-run="1589452993" exec-time="767" queue-time="0" op-digest="58778de337ae96452fc829b4a7f22c8d"/>
+ <lrm_rsc_op id="ovn-dbs-bundle-podman-1_monitor_60000" operation_key="ovn-dbs-bundle-podman-1_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="168:162:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;168:162:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-0" call-id="137" rc-code="0" op-status="0" interval="60000" last-rc-change="1589452453" exec-time="325" queue-time="0" op-digest="5b98682c109506c46f921295192a5723"/>
+ </lrm_resource>
+ <lrm_resource id="galera-bundle-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="galera-bundle-podman-0_last_0" operation_key="galera-bundle-podman-0_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="4:10:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;4:10:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-0" call-id="18" rc-code="0" op-status="0" interval="0" last-rc-change="1589446135" last-run="1589446135" exec-time="1381" queue-time="0" op-digest="95caf297c558176f6dffeedd540e6f84"/>
+ <lrm_rsc_op id="galera-bundle-podman-0_monitor_60000" operation_key="galera-bundle-podman-0_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="1:10:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;1:10:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-0" call-id="19" rc-code="0" op-status="0" interval="60000" last-rc-change="1589446136" exec-time="323" queue-time="0" op-digest="ec261ef7b0dcb291d95068e147ad3ec9"/>
+ </lrm_resource>
+ <lrm_resource id="galera-bundle-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="galera-bundle-podman-1_last_0" operation_key="galera-bundle-podman-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="2:7:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:7;2:7:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-0" call-id="9" rc-code="7" op-status="0" interval="0" last-rc-change="1589446128" last-run="1589446128" exec-time="84" queue-time="0" op-digest="79b24c94a068461a64fd8642b637cf6f"/>
+ </lrm_resource>
+ <lrm_resource id="galera-bundle-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="galera-bundle-podman-2_last_0" operation_key="galera-bundle-podman-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="3:7:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:7;3:7:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-0" call-id="13" rc-code="7" op-status="0" interval="0" last-rc-change="1589446128" last-run="1589446128" exec-time="76" queue-time="0" op-digest="79b24c94a068461a64fd8642b637cf6f"/>
+ </lrm_resource>
+ <lrm_resource id="ovn-dbs-bundle-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="ovn-dbs-bundle-podman-2_last_0" operation_key="ovn-dbs-bundle-podman-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="32:43:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:7;32:43:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-0" call-id="76" rc-code="7" op-status="0" interval="0" last-rc-change="1589446368" last-run="1589446368" exec-time="83" queue-time="0" op-digest="f572c78235795d25e72ae0801a5b32b9"/>
+ </lrm_resource>
+ <lrm_resource id="redis-bundle-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="redis-bundle-podman-0_last_0" operation_key="redis-bundle-podman-0_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="103:201:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;103:201:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-0" call-id="148" rc-code="0" op-status="0" interval="0" last-rc-change="1589453022" last-run="1589453022" exec-time="1773" queue-time="0" op-digest="d14ff6267c598be94bbc797573ee0f70"/>
+ <lrm_rsc_op id="redis-bundle-podman-0_monitor_60000" operation_key="redis-bundle-podman-0_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="104:201:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;104:201:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-0" call-id="149" rc-code="0" op-status="0" interval="60000" last-rc-change="1589453023" exec-time="328" queue-time="0" op-digest="a079bd5d2761c2d56f62e4d71d6c2ade"/>
+ </lrm_resource>
+ <lrm_resource id="redis-bundle-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="redis-bundle-podman-1_last_0" operation_key="redis-bundle-podman-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="20:31:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:7;20:31:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-0" call-id="51" rc-code="7" op-status="0" interval="0" last-rc-change="1589446285" last-run="1589446285" exec-time="78" queue-time="0" op-digest="4cec38b57c8812db2c6412206f2f6f64"/>
+ </lrm_resource>
+ <lrm_resource id="redis-bundle-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="redis-bundle-podman-2_last_0" operation_key="redis-bundle-podman-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="21:31:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:7;21:31:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-0" call-id="55" rc-code="7" op-status="0" interval="0" last-rc-change="1589446285" last-run="1589446285" exec-time="64" queue-time="0" op-digest="4cec38b57c8812db2c6412206f2f6f64"/>
+ </lrm_resource>
+ <lrm_resource id="ovn-dbs-bundle-0" type="remote" class="ocf" provider="pacemaker" container="ovn-dbs-bundle-podman-0">
+ <lrm_rsc_op id="ovn-dbs-bundle-0_last_0" operation_key="ovn-dbs-bundle-0_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="131:199:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;131:199:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-0" call-id="50" rc-code="0" op-status="0" interval="0" last-rc-change="1589453020" last-run="1589453020" exec-time="0" queue-time="0" op-digest="d00e87492de6b83a85450cd667db2a88" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="ovn-dbs-bundle-0_monitor_30000" operation_key="ovn-dbs-bundle-0_monitor_30000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="139:200:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;139:200:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-0" call-id="51" rc-code="0" op-status="0" interval="30000" last-rc-change="1589453021" exec-time="0" queue-time="0" op-digest="d32292f5736ec6365136aa382f396e66"/>
+ </lrm_resource>
+ <lrm_resource id="rabbitmq-bundle-0" type="remote" class="ocf" provider="pacemaker" container="rabbitmq-bundle-podman-0">
+ <lrm_rsc_op id="rabbitmq-bundle-0_last_0" operation_key="rabbitmq-bundle-0_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="69:24:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;69:24:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-0" call-id="9" rc-code="0" op-status="0" interval="0" last-rc-change="1589446235" last-run="1589446235" exec-time="0" queue-time="0" op-digest="8dc3f9844e70d4458a7edcd7091ecf50" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="rabbitmq-bundle-0_monitor_30000" operation_key="rabbitmq-bundle-0_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="60:25:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;60:25:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-0" call-id="10" rc-code="0" op-status="0" interval="30000" last-rc-change="1589446237" exec-time="0" queue-time="0" op-digest="f3eacf73dcd1387f8598f91d67d24fe9"/>
+ </lrm_resource>
+ <lrm_resource id="ovn-dbs-bundle-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="ovn-dbs-bundle-podman-0_last_0" operation_key="ovn-dbs-bundle-podman-0_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="129:199:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;129:199:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-0" call-id="146" rc-code="0" op-status="0" interval="0" last-rc-change="1589453018" last-run="1589453018" exec-time="1900" queue-time="1" op-digest="fb796976ed0593e611680e74e3f50680"/>
+ <lrm_rsc_op id="ovn-dbs-bundle-podman-0_monitor_60000" operation_key="ovn-dbs-bundle-podman-0_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="130:199:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;130:199:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-0" call-id="147" rc-code="0" op-status="0" interval="60000" last-rc-change="1589453020" exec-time="331" queue-time="0" op-digest="6d1b8bd447411df56487161c04e4b787"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="2" uname="controller-1" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+ <transient_attributes id="2">
+ <instance_attributes id="status-2">
+ <nvpair id="status-2-master-galera" name="master-galera" value="100"/>
+ <nvpair id="status-2-rmq-node-attr-rabbitmq" name="rmq-node-attr-rabbitmq" value="rabbit@controller-1"/>
+ <nvpair id="status-2-master-ovndb_servers" name="master-ovndb_servers" value="5"/>
+ <nvpair id="status-2-master-redis" name="master-redis" value="1"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="rabbitmq-bundle-1" type="remote" class="ocf" provider="pacemaker" container="rabbitmq-bundle-podman-1">
+ <lrm_rsc_op id="rabbitmq-bundle-1_last_0" operation_key="rabbitmq-bundle-1_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="72:24:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;72:24:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="8" rc-code="0" op-status="0" interval="0" last-rc-change="1589446235" last-run="1589446235" exec-time="0" queue-time="0" op-digest="f1d68ab267df6867301fcff08041cd2f" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="rabbitmq-bundle-1_monitor_30000" operation_key="rabbitmq-bundle-1_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="65:25:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;65:25:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="10" rc-code="0" op-status="0" interval="30000" last-rc-change="1589446238" exec-time="0" queue-time="0" op-digest="61c3a926b0d3bd428b481dd6e40b1745"/>
+ </lrm_resource>
+ <lrm_resource id="rabbitmq-bundle-2" type="remote" class="ocf" provider="pacemaker" container="rabbitmq-bundle-podman-2">
+ <lrm_rsc_op id="rabbitmq-bundle-2_last_0" operation_key="rabbitmq-bundle-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="21:24:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:7;21:24:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="6" rc-code="7" op-status="0" interval="0" last-rc-change="1589446235" last-run="1589446235" exec-time="0" queue-time="0" op-digest="205c791aea96714f7aa1cb111c727e1b" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="ovn-dbs-bundle-1" type="remote" class="ocf" provider="pacemaker" container="ovn-dbs-bundle-podman-1">
+ <lrm_rsc_op id="ovn-dbs-bundle-1_last_0" operation_key="ovn-dbs-bundle-1_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="135:199:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;135:199:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="47" rc-code="0" op-status="0" interval="0" last-rc-change="1589453020" last-run="1589453020" exec-time="0" queue-time="0" op-digest="015c999adc4143966a5e0ba98e5d74af" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="ovn-dbs-bundle-1_monitor_30000" operation_key="ovn-dbs-bundle-1_monitor_30000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="144:200:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;144:200:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="48" rc-code="0" op-status="0" interval="30000" last-rc-change="1589453021" exec-time="0" queue-time="0" op-digest="d80c68bbc581d1bd3b9e926370a2f8d5"/>
+ </lrm_resource>
+ <lrm_resource id="redis-bundle-0" type="remote" class="ocf" provider="pacemaker" container="redis-bundle-podman-0">
+ <lrm_rsc_op id="redis-bundle-0_last_0" operation_key="redis-bundle-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="28:34:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:7;28:34:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="14" rc-code="7" op-status="0" interval="0" last-rc-change="1589446294" last-run="1589446294" exec-time="0" queue-time="0" op-digest="dd323696d6c8ed14cb71914c411664c9" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="stonith-fence_ipmilan-525400985679" type="fence_ipmilan" class="stonith">
+ <lrm_rsc_op id="stonith-fence_ipmilan-525400985679_last_0" operation_key="stonith-fence_ipmilan-525400985679_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="223:64:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;223:64:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="98" rc-code="0" op-status="0" interval="0" last-rc-change="1589446770" last-run="1589446770" exec-time="329" queue-time="0" op-digest="12668032635f19b4a55f6c86a5635e46" op-secure-params=" password passwd " op-secure-digest="7fb9bca9cf67b357b59395fc0e3718d4"/>
+ <lrm_rsc_op id="stonith-fence_ipmilan-525400985679_monitor_60000" operation_key="stonith-fence_ipmilan-525400985679_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="224:64:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;224:64:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="99" rc-code="0" op-status="0" interval="60000" last-rc-change="1589446771" exec-time="321" queue-time="0" op-digest="dfdb2d6f80357af518d5317a5f90059c" op-secure-params=" password passwd " op-secure-digest="7fb9bca9cf67b357b59395fc0e3718d4"/>
+ </lrm_resource>
+ <lrm_resource id="stonith-fence_ipmilan-525400afe30e" type="fence_ipmilan" class="stonith">
+ <lrm_rsc_op id="stonith-fence_ipmilan-525400afe30e_last_0" operation_key="stonith-fence_ipmilan-525400afe30e_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="40:56:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:7;40:56:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="89" rc-code="7" op-status="0" interval="0" last-rc-change="1589446758" last-run="1589446758" exec-time="2" queue-time="0" op-digest="56d6e422b6e35b9d6cb07a9098eb1595" op-secure-params=" password passwd " op-secure-digest="228e5b173e3349b1237889a6e7a8fcf3"/>
+ </lrm_resource>
+ <lrm_resource id="stonith-fence_ipmilan-5254005e097a" type="fence_ipmilan" class="stonith">
+ <lrm_rsc_op id="stonith-fence_ipmilan-5254005e097a_last_0" operation_key="stonith-fence_ipmilan-5254005e097a_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="40:57:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:7;40:57:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="93" rc-code="7" op-status="0" interval="0" last-rc-change="1589446759" last-run="1589446759" exec-time="0" queue-time="1" op-digest="81ab658b0b0f34a426326ca18b9c8bbb" op-secure-params=" password passwd " op-secure-digest="a712ea57511de5e70f22eb97eb7311df"/>
+ </lrm_resource>
+ <lrm_resource id="ovn-dbs-bundle-2" type="remote" class="ocf" provider="pacemaker" container="ovn-dbs-bundle-podman-2">
+ <lrm_rsc_op id="ovn-dbs-bundle-2_last_0" operation_key="ovn-dbs-bundle-2_stop_0" operation="stop" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="140:198:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;140:198:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="46" rc-code="0" op-status="0" interval="0" last-rc-change="1589452993" last-run="1589452993" exec-time="0" queue-time="0" op-digest="015c999adc4143966a5e0ba98e5d74af" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="ovn-dbs-bundle-2_monitor_30000" operation_key="ovn-dbs-bundle-2_monitor_30000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="147:196:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;147:196:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="45" rc-code="0" op-status="0" interval="30000" last-rc-change="1589452905" exec-time="0" queue-time="0" op-digest="d80c68bbc581d1bd3b9e926370a2f8d5"/>
+ </lrm_resource>
+ <lrm_resource id="openstack-cinder-volume-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="openstack-cinder-volume-podman-0_last_0" operation_key="openstack-cinder-volume-podman-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="43:70:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:7;43:70:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="103" rc-code="7" op-status="0" interval="0" last-rc-change="1589446815" last-run="1589446815" exec-time="89" queue-time="0" op-digest="5614e4a5b18ece4557c8e78b41caa5a3"/>
+ </lrm_resource>
+ <lrm_resource id="rabbitmq-bundle-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="rabbitmq-bundle-podman-0_last_0" operation_key="rabbitmq-bundle-podman-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="13:21:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:7;13:21:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="26" rc-code="7" op-status="0" interval="0" last-rc-change="1589446226" last-run="1589446226" exec-time="75" queue-time="0" op-digest="fd2944f2144c46a12dc75a27df6cb7ca"/>
+ </lrm_resource>
+ <lrm_resource id="rabbitmq-bundle-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="rabbitmq-bundle-podman-1_last_0" operation_key="rabbitmq-bundle-podman-1_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="14:24:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;14:24:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="39" rc-code="0" op-status="0" interval="0" last-rc-change="1589446233" last-run="1589446233" exec-time="1482" queue-time="0" op-digest="0518b5a908865705d60c72d51446bb73"/>
+ <lrm_rsc_op id="rabbitmq-bundle-podman-1_monitor_60000" operation_key="rabbitmq-bundle-podman-1_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="6:24:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;6:24:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="41" rc-code="0" op-status="0" interval="60000" last-rc-change="1589446235" exec-time="304" queue-time="0" op-digest="26ddb64c20e02fd3dcb27e13a94b5136"/>
+ </lrm_resource>
+ <lrm_resource id="rabbitmq-bundle-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="rabbitmq-bundle-podman-2_last_0" operation_key="rabbitmq-bundle-podman-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="15:21:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:7;15:21:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="34" rc-code="7" op-status="0" interval="0" last-rc-change="1589446226" last-run="1589446226" exec-time="104" queue-time="0" op-digest="fd2944f2144c46a12dc75a27df6cb7ca"/>
+ </lrm_resource>
+ <lrm_resource id="galera-bundle-0" type="remote" class="ocf" provider="pacemaker" container="galera-bundle-podman-0">
+ <lrm_rsc_op id="galera-bundle-0_last_0" operation_key="galera-bundle-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="10:10:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:7;10:10:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1589446136" last-run="1589446136" exec-time="0" queue-time="0" op-digest="37e9ab931fa7b5d2122c4cd44c56b7d2" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="galera-bundle-1" type="remote" class="ocf" provider="pacemaker" container="galera-bundle-podman-1">
+ <lrm_rsc_op id="galera-bundle-1_last_0" operation_key="galera-bundle-1_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="23:10:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;23:10:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="4" rc-code="0" op-status="0" interval="0" last-rc-change="1589446137" last-run="1589446137" exec-time="0" queue-time="0" op-digest="ed95b71467015049c875f75798f405fb" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="galera-bundle-1_monitor_30000" operation_key="galera-bundle-1_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="16:11:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;16:11:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="5" rc-code="0" op-status="0" interval="30000" last-rc-change="1589446138" exec-time="0" queue-time="0" op-digest="1cb41a6c33cc25b47ebeff7bb61c3eeb"/>
+ </lrm_resource>
+ <lrm_resource id="galera-bundle-2" type="remote" class="ocf" provider="pacemaker" container="galera-bundle-podman-2">
+ <lrm_rsc_op id="galera-bundle-2_last_0" operation_key="galera-bundle-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="12:10:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:7;12:10:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="2" rc-code="7" op-status="0" interval="0" last-rc-change="1589446136" last-run="1589446136" exec-time="0" queue-time="0" op-digest="f282b02648444d9d95065cede4784d97" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="redis-bundle-2" type="remote" class="ocf" provider="pacemaker" container="redis-bundle-podman-2">
+ <lrm_rsc_op id="redis-bundle-2_last_0" operation_key="redis-bundle-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="30:34:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:7;30:34:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="11" rc-code="7" op-status="0" interval="0" last-rc-change="1589446293" last-run="1589446293" exec-time="0" queue-time="0" op-digest="f325849fbffe2e62970eb201a50da9f3" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="redis-bundle-1" type="remote" class="ocf" provider="pacemaker" container="redis-bundle-podman-1">
+ <lrm_rsc_op id="redis-bundle-1_last_0" operation_key="redis-bundle-1_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="119:212:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;119:212:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="52" rc-code="0" op-status="0" interval="0" last-rc-change="1589453175" last-run="1589453175" exec-time="0" queue-time="0" op-digest="cd98602d6f977947327050913302f861" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="redis-bundle-1_monitor_30000" operation_key="redis-bundle-1_monitor_30000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="123:213:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;123:213:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="53" rc-code="0" op-status="0" interval="30000" last-rc-change="1589453176" exec-time="0" queue-time="0" op-digest="a076da96e60da0ec2faccf5f38186767"/>
+ </lrm_resource>
+ <lrm_resource id="ovn-dbs-bundle-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="ovn-dbs-bundle-podman-1_last_0" operation_key="ovn-dbs-bundle-podman-1_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="133:199:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;133:199:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="140" rc-code="0" op-status="0" interval="0" last-rc-change="1589453018" last-run="1589453018" exec-time="1872" queue-time="0" op-digest="58778de337ae96452fc829b4a7f22c8d"/>
+ <lrm_rsc_op id="ovn-dbs-bundle-podman-1_monitor_60000" operation_key="ovn-dbs-bundle-podman-1_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="134:199:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;134:199:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="141" rc-code="0" op-status="0" interval="60000" last-rc-change="1589453020" exec-time="332" queue-time="0" op-digest="5b98682c109506c46f921295192a5723"/>
+ </lrm_resource>
+ <lrm_resource id="galera-bundle-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="galera-bundle-podman-0_last_0" operation_key="galera-bundle-podman-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="4:7:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:7;4:7:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1589446128" last-run="1589446128" exec-time="79" queue-time="0" op-digest="79b24c94a068461a64fd8642b637cf6f"/>
+ </lrm_resource>
+ <lrm_resource id="galera-bundle-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="galera-bundle-podman-1_last_0" operation_key="galera-bundle-podman-1_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="5:10:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;5:10:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="18" rc-code="0" op-status="0" interval="0" last-rc-change="1589446135" last-run="1589446135" exec-time="1501" queue-time="0" op-digest="5ae9ebeec6b2ecf0240e593c43d5b89e"/>
+ <lrm_rsc_op id="galera-bundle-podman-1_monitor_60000" operation_key="galera-bundle-podman-1_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="2:10:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;2:10:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="21" rc-code="0" op-status="0" interval="60000" last-rc-change="1589446137" exec-time="319" queue-time="0" op-digest="f00764e4603ba7a985cdc5b0bebc0159"/>
+ </lrm_resource>
+ <lrm_resource id="galera-bundle-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="galera-bundle-podman-2_last_0" operation_key="galera-bundle-podman-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="6:7:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:7;6:7:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="13" rc-code="7" op-status="0" interval="0" last-rc-change="1589446128" last-run="1589446128" exec-time="70" queue-time="0" op-digest="79b24c94a068461a64fd8642b637cf6f"/>
+ </lrm_resource>
+ <lrm_resource id="ovn-dbs-bundle-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="ovn-dbs-bundle-podman-2_last_0" operation_key="ovn-dbs-bundle-podman-2_stop_0" operation="stop" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="139:198:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;139:198:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="139" rc-code="0" op-status="0" interval="0" last-rc-change="1589452993" last-run="1589452993" exec-time="811" queue-time="0" op-digest="e133ab3f2709aeef1a782f2d0676e290"/>
+ <lrm_rsc_op id="ovn-dbs-bundle-podman-2_monitor_60000" operation_key="ovn-dbs-bundle-podman-2_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="144:195:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;144:195:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="137" rc-code="0" op-status="0" interval="60000" last-rc-change="1589452904" exec-time="328" queue-time="0" op-digest="d0f190c04691466af0b8d69c0cc6a17e"/>
+ </lrm_resource>
+ <lrm_resource id="redis-bundle-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="redis-bundle-podman-0_last_0" operation_key="redis-bundle-podman-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="22:31:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:7;22:31:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="47" rc-code="7" op-status="0" interval="0" last-rc-change="1589446285" last-run="1589446285" exec-time="68" queue-time="0" op-digest="4cec38b57c8812db2c6412206f2f6f64"/>
+ </lrm_resource>
+ <lrm_resource id="redis-bundle-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="redis-bundle-podman-1_last_0" operation_key="redis-bundle-podman-1_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="117:212:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;117:212:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="146" rc-code="0" op-status="0" interval="0" last-rc-change="1589453173" last-run="1589453173" exec-time="1929" queue-time="0" op-digest="199c20c5684fcf3777f273dc84d2cc6a"/>
+ <lrm_rsc_op id="redis-bundle-podman-1_monitor_60000" operation_key="redis-bundle-podman-1_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="118:212:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;118:212:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="147" rc-code="0" op-status="0" interval="60000" last-rc-change="1589453175" exec-time="306" queue-time="0" op-digest="300e33eb82e96fbbe87f7ba883583960"/>
+ </lrm_resource>
+ <lrm_resource id="redis-bundle-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="redis-bundle-podman-2_last_0" operation_key="redis-bundle-podman-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="24:31:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:7;24:31:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="55" rc-code="7" op-status="0" interval="0" last-rc-change="1589446285" last-run="1589446285" exec-time="75" queue-time="0" op-digest="4cec38b57c8812db2c6412206f2f6f64"/>
+ </lrm_resource>
+ <lrm_resource id="ovn-dbs-bundle-0" type="remote" class="ocf" provider="pacemaker" container="ovn-dbs-bundle-podman-0">
+ <lrm_rsc_op id="ovn-dbs-bundle-0_last_0" operation_key="ovn-dbs-bundle-0_stop_0" operation="stop" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="166:165:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;166:165:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="36" rc-code="0" op-status="0" interval="0" last-rc-change="1589452498" last-run="1589452498" exec-time="0" queue-time="0" op-digest="015c999adc4143966a5e0ba98e5d74af" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="ovn-dbs-bundle-0_monitor_30000" operation_key="ovn-dbs-bundle-0_monitor_30000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="172:155:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;172:155:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="35" rc-code="0" op-status="0" interval="30000" last-rc-change="1589452245" exec-time="0" queue-time="0" op-digest="d80c68bbc581d1bd3b9e926370a2f8d5"/>
+ </lrm_resource>
+ <lrm_resource id="rabbitmq-bundle-0" type="remote" class="ocf" provider="pacemaker" container="rabbitmq-bundle-podman-0">
+ <lrm_rsc_op id="rabbitmq-bundle-0_last_0" operation_key="rabbitmq-bundle-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="19:24:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:7;19:24:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="9" rc-code="7" op-status="0" interval="0" last-rc-change="1589446235" last-run="1589446235" exec-time="0" queue-time="0" op-digest="8dc3f9844e70d4458a7edcd7091ecf50" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="ovn-dbs-bundle-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="ovn-dbs-bundle-podman-0_last_0" operation_key="ovn-dbs-bundle-podman-0_stop_0" operation="stop" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="165:165:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;165:165:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="125" rc-code="0" op-status="0" interval="0" last-rc-change="1589452498" last-run="1589452498" exec-time="897" queue-time="0" op-digest="fb796976ed0593e611680e74e3f50680"/>
+ <lrm_rsc_op id="ovn-dbs-bundle-podman-0_monitor_60000" operation_key="ovn-dbs-bundle-podman-0_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="167:154:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;167:154:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="123" rc-code="0" op-status="0" interval="60000" last-rc-change="1589452244" exec-time="313" queue-time="0" op-digest="6d1b8bd447411df56487161c04e4b787"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="3" uname="controller-2" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+ <transient_attributes id="3">
+ <instance_attributes id="status-3">
+ <nvpair id="status-3-master-galera" name="master-galera" value="100"/>
+ <nvpair id="status-3-rmq-node-attr-rabbitmq" name="rmq-node-attr-rabbitmq" value="rabbit@controller-2"/>
+ <nvpair id="status-3-master-ovndb_servers" name="master-ovndb_servers" value="5"/>
+ <nvpair id="status-3-master-redis" name="master-redis" value="1"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="3">
+ <lrm_resources>
+ <lrm_resource id="rabbitmq-bundle-1" type="remote" class="ocf" provider="pacemaker" container="rabbitmq-bundle-podman-1">
+ <lrm_rsc_op id="rabbitmq-bundle-1_last_0" operation_key="rabbitmq-bundle-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="23:24:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:7;23:24:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="8" rc-code="7" op-status="0" interval="0" last-rc-change="1589446235" last-run="1589446235" exec-time="0" queue-time="0" op-digest="f1d68ab267df6867301fcff08041cd2f" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="rabbitmq-bundle-2" type="remote" class="ocf" provider="pacemaker" container="rabbitmq-bundle-podman-2">
+ <lrm_rsc_op id="rabbitmq-bundle-2_last_0" operation_key="rabbitmq-bundle-2_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="75:24:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;75:24:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="7" rc-code="0" op-status="0" interval="0" last-rc-change="1589446235" last-run="1589446235" exec-time="0" queue-time="0" op-digest="205c791aea96714f7aa1cb111c727e1b" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="rabbitmq-bundle-2_monitor_30000" operation_key="rabbitmq-bundle-2_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="70:25:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;70:25:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="10" rc-code="0" op-status="0" interval="30000" last-rc-change="1589446238" exec-time="0" queue-time="0" op-digest="f765304166a359903e5aea671a7275d0"/>
+ </lrm_resource>
+ <lrm_resource id="ovn-dbs-bundle-1" type="remote" class="ocf" provider="pacemaker" container="ovn-dbs-bundle-podman-1">
+ <lrm_rsc_op id="ovn-dbs-bundle-1_last_0" operation_key="ovn-dbs-bundle-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="43:46:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:7;43:46:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="16" rc-code="7" op-status="0" interval="0" last-rc-change="1589446377" last-run="1589446377" exec-time="0" queue-time="0" op-digest="015c999adc4143966a5e0ba98e5d74af" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="redis-bundle-0" type="remote" class="ocf" provider="pacemaker" container="redis-bundle-podman-0">
+ <lrm_rsc_op id="redis-bundle-0_last_0" operation_key="redis-bundle-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="31:34:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:7;31:34:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="14" rc-code="7" op-status="0" interval="0" last-rc-change="1589446294" last-run="1589446294" exec-time="0" queue-time="0" op-digest="dd323696d6c8ed14cb71914c411664c9" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="stonith-fence_ipmilan-525400985679" type="fence_ipmilan" class="stonith">
+ <lrm_rsc_op id="stonith-fence_ipmilan-525400985679_last_0" operation_key="stonith-fence_ipmilan-525400985679_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="43:62:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:7;43:62:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="99" rc-code="7" op-status="0" interval="0" last-rc-change="1589446766" last-run="1589446766" exec-time="0" queue-time="0" op-digest="12668032635f19b4a55f6c86a5635e46" op-secure-params=" password passwd " op-secure-digest="7fb9bca9cf67b357b59395fc0e3718d4"/>
+ </lrm_resource>
+ <lrm_resource id="stonith-fence_ipmilan-525400afe30e" type="fence_ipmilan" class="stonith">
+ <lrm_rsc_op id="stonith-fence_ipmilan-525400afe30e_last_0" operation_key="stonith-fence_ipmilan-525400afe30e_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="219:60:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;219:60:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="94" rc-code="0" op-status="0" interval="0" last-rc-change="1589446763" last-run="1589446763" exec-time="387" queue-time="0" op-digest="56d6e422b6e35b9d6cb07a9098eb1595" op-secure-params=" password passwd " op-secure-digest="228e5b173e3349b1237889a6e7a8fcf3"/>
+ <lrm_rsc_op id="stonith-fence_ipmilan-525400afe30e_monitor_60000" operation_key="stonith-fence_ipmilan-525400afe30e_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="220:60:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;220:60:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="95" rc-code="0" op-status="0" interval="60000" last-rc-change="1589446764" exec-time="393" queue-time="0" op-digest="feba67262a896a74d6e632cd8b76444b" op-secure-params=" password passwd " op-secure-digest="228e5b173e3349b1237889a6e7a8fcf3"/>
+ </lrm_resource>
+ <lrm_resource id="stonith-fence_ipmilan-5254005e097a" type="fence_ipmilan" class="stonith">
+ <lrm_rsc_op id="stonith-fence_ipmilan-5254005e097a_last_0" operation_key="stonith-fence_ipmilan-5254005e097a_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="41:57:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:7;41:57:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="93" rc-code="7" op-status="0" interval="0" last-rc-change="1589446759" last-run="1589446759" exec-time="0" queue-time="0" op-digest="81ab658b0b0f34a426326ca18b9c8bbb" op-secure-params=" password passwd " op-secure-digest="a712ea57511de5e70f22eb97eb7311df"/>
+ </lrm_resource>
+ <lrm_resource id="ovn-dbs-bundle-2" type="remote" class="ocf" provider="pacemaker" container="ovn-dbs-bundle-podman-2">
+ <lrm_rsc_op id="ovn-dbs-bundle-2_last_0" operation_key="ovn-dbs-bundle-2_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="139:199:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;139:199:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="35" rc-code="0" op-status="0" interval="0" last-rc-change="1589453020" last-run="1589453020" exec-time="0" queue-time="0" op-digest="bf04cc8b2becfb29770a97d109ad6c4f" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="ovn-dbs-bundle-2_monitor_30000" operation_key="ovn-dbs-bundle-2_monitor_30000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="149:200:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;149:200:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="36" rc-code="0" op-status="0" interval="30000" last-rc-change="1589453021" exec-time="0" queue-time="0" op-digest="9f294fb368bef68138cb5f6555eacdbb"/>
+ </lrm_resource>
+ <lrm_resource id="openstack-cinder-volume-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="openstack-cinder-volume-podman-0_last_0" operation_key="openstack-cinder-volume-podman-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="44:70:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:7;44:70:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="103" rc-code="7" op-status="0" interval="0" last-rc-change="1589446815" last-run="1589446815" exec-time="80" queue-time="0" op-digest="5614e4a5b18ece4557c8e78b41caa5a3"/>
+ </lrm_resource>
+ <lrm_resource id="rabbitmq-bundle-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="rabbitmq-bundle-podman-0_last_0" operation_key="rabbitmq-bundle-podman-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="16:21:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:7;16:21:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="26" rc-code="7" op-status="0" interval="0" last-rc-change="1589446226" last-run="1589446226" exec-time="75" queue-time="0" op-digest="fd2944f2144c46a12dc75a27df6cb7ca"/>
+ </lrm_resource>
+ <lrm_resource id="rabbitmq-bundle-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="rabbitmq-bundle-podman-1_last_0" operation_key="rabbitmq-bundle-podman-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="17:21:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:7;17:21:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="30" rc-code="7" op-status="0" interval="0" last-rc-change="1589446226" last-run="1589446226" exec-time="68" queue-time="0" op-digest="fd2944f2144c46a12dc75a27df6cb7ca"/>
+ </lrm_resource>
+ <lrm_resource id="rabbitmq-bundle-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="rabbitmq-bundle-podman-2_last_0" operation_key="rabbitmq-bundle-podman-2_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="15:24:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;15:24:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="39" rc-code="0" op-status="0" interval="0" last-rc-change="1589446233" last-run="1589446233" exec-time="1440" queue-time="0" op-digest="fa9ab4aeeb9815ff335e797cd07969e6"/>
+ <lrm_rsc_op id="rabbitmq-bundle-podman-2_monitor_60000" operation_key="rabbitmq-bundle-podman-2_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="9:24:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;9:24:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="40" rc-code="0" op-status="0" interval="60000" last-rc-change="1589446235" exec-time="316" queue-time="0" op-digest="551fba7650e81f55ddd9f61fe213ee78"/>
+ </lrm_resource>
+ <lrm_resource id="galera-bundle-0" type="remote" class="ocf" provider="pacemaker" container="galera-bundle-podman-0">
+ <lrm_rsc_op id="galera-bundle-0_last_0" operation_key="galera-bundle-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="13:10:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:7;13:10:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1589446136" last-run="1589446136" exec-time="0" queue-time="0" op-digest="37e9ab931fa7b5d2122c4cd44c56b7d2" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="galera-bundle-1" type="remote" class="ocf" provider="pacemaker" container="galera-bundle-podman-1">
+ <lrm_rsc_op id="galera-bundle-1_last_0" operation_key="galera-bundle-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="14:10:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:7;14:10:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="4" rc-code="7" op-status="0" interval="0" last-rc-change="1589446137" last-run="1589446137" exec-time="0" queue-time="0" op-digest="ed95b71467015049c875f75798f405fb" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="galera-bundle-2" type="remote" class="ocf" provider="pacemaker" container="galera-bundle-podman-2">
+ <lrm_rsc_op id="galera-bundle-2_last_0" operation_key="galera-bundle-2_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="26:10:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;26:10:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="3" rc-code="0" op-status="0" interval="0" last-rc-change="1589446136" last-run="1589446136" exec-time="0" queue-time="0" op-digest="f282b02648444d9d95065cede4784d97" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="galera-bundle-2_monitor_30000" operation_key="galera-bundle-2_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="21:11:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;21:11:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="5" rc-code="0" op-status="0" interval="30000" last-rc-change="1589446138" exec-time="0" queue-time="0" op-digest="360b985b565e1db13335e072b0bb4305"/>
+ </lrm_resource>
+ <lrm_resource id="redis-bundle-2" type="remote" class="ocf" provider="pacemaker" container="redis-bundle-podman-2">
+ <lrm_rsc_op id="redis-bundle-2_last_0" operation_key="redis-bundle-2_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="113:201:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;113:201:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="37" rc-code="0" op-status="0" interval="0" last-rc-change="1589453023" last-run="1589453023" exec-time="0" queue-time="0" op-digest="f325849fbffe2e62970eb201a50da9f3" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="redis-bundle-2_monitor_30000" operation_key="redis-bundle-2_monitor_30000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="123:202:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;123:202:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="38" rc-code="0" op-status="0" interval="30000" last-rc-change="1589453025" exec-time="0" queue-time="0" op-digest="1edf361604603a3dc6ccd940ce449f27"/>
+ </lrm_resource>
+ <lrm_resource id="redis-bundle-1" type="remote" class="ocf" provider="pacemaker" container="redis-bundle-podman-1">
+ <lrm_rsc_op id="redis-bundle-1_last_0" operation_key="redis-bundle-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="32:34:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:7;32:34:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="13" rc-code="7" op-status="0" interval="0" last-rc-change="1589446293" last-run="1589446293" exec-time="0" queue-time="0" op-digest="cd98602d6f977947327050913302f861" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="ovn-dbs-bundle-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="ovn-dbs-bundle-podman-1_last_0" operation_key="ovn-dbs-bundle-podman-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="37:43:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:7;37:43:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="72" rc-code="7" op-status="0" interval="0" last-rc-change="1589446368" last-run="1589446368" exec-time="73" queue-time="0" op-digest="f572c78235795d25e72ae0801a5b32b9"/>
+ </lrm_resource>
+ <lrm_resource id="galera-bundle-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="galera-bundle-podman-0_last_0" operation_key="galera-bundle-podman-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="7:7:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:7;7:7:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1589446128" last-run="1589446128" exec-time="89" queue-time="0" op-digest="79b24c94a068461a64fd8642b637cf6f"/>
+ </lrm_resource>
+ <lrm_resource id="galera-bundle-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="galera-bundle-podman-1_last_0" operation_key="galera-bundle-podman-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="8:7:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:7;8:7:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="9" rc-code="7" op-status="0" interval="0" last-rc-change="1589446128" last-run="1589446128" exec-time="91" queue-time="0" op-digest="79b24c94a068461a64fd8642b637cf6f"/>
+ </lrm_resource>
+ <lrm_resource id="galera-bundle-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="galera-bundle-podman-2_last_0" operation_key="galera-bundle-podman-2_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="6:10:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;6:10:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="18" rc-code="0" op-status="0" interval="0" last-rc-change="1589446135" last-run="1589446135" exec-time="1420" queue-time="0" op-digest="0e2a3c6ab0eda2ade4e8fc19f575e901"/>
+ <lrm_rsc_op id="galera-bundle-podman-2_monitor_60000" operation_key="galera-bundle-podman-2_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="3:10:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;3:10:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="20" rc-code="0" op-status="0" interval="60000" last-rc-change="1589446136" exec-time="312" queue-time="1" op-digest="ab8f616fc91fe22cf4428feec78cdd61"/>
+ </lrm_resource>
+ <lrm_resource id="ovn-dbs-bundle-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="ovn-dbs-bundle-podman-2_last_0" operation_key="ovn-dbs-bundle-podman-2_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="137:199:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;137:199:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="124" rc-code="0" op-status="0" interval="0" last-rc-change="1589453018" last-run="1589453018" exec-time="1710" queue-time="0" op-digest="e133ab3f2709aeef1a782f2d0676e290"/>
+ <lrm_rsc_op id="ovn-dbs-bundle-podman-2_monitor_60000" operation_key="ovn-dbs-bundle-podman-2_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="138:199:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;138:199:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="125" rc-code="0" op-status="0" interval="60000" last-rc-change="1589453020" exec-time="328" queue-time="0" op-digest="d0f190c04691466af0b8d69c0cc6a17e"/>
+ </lrm_resource>
+ <lrm_resource id="redis-bundle-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="redis-bundle-podman-0_last_0" operation_key="redis-bundle-podman-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="25:31:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:7;25:31:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="47" rc-code="7" op-status="0" interval="0" last-rc-change="1589446285" last-run="1589446285" exec-time="67" queue-time="0" op-digest="4cec38b57c8812db2c6412206f2f6f64"/>
+ </lrm_resource>
+ <lrm_resource id="redis-bundle-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="redis-bundle-podman-1_last_0" operation_key="redis-bundle-podman-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="26:31:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:7;26:31:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="51" rc-code="7" op-status="0" interval="0" last-rc-change="1589446285" last-run="1589446285" exec-time="79" queue-time="0" op-digest="4cec38b57c8812db2c6412206f2f6f64"/>
+ </lrm_resource>
+ <lrm_resource id="redis-bundle-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="redis-bundle-podman-2_last_0" operation_key="redis-bundle-podman-2_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="111:201:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;111:201:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="126" rc-code="0" op-status="0" interval="0" last-rc-change="1589453022" last-run="1589453022" exec-time="1646" queue-time="0" op-digest="3060e3c59f688989fbbcb12bd20f42c6"/>
+ <lrm_rsc_op id="redis-bundle-podman-2_monitor_60000" operation_key="redis-bundle-podman-2_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="112:201:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;112:201:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="127" rc-code="0" op-status="0" interval="60000" last-rc-change="1589453023" exec-time="309" queue-time="0" op-digest="99c1eff0424ab42e8e9400251b938631"/>
+ </lrm_resource>
+ <lrm_resource id="ovn-dbs-bundle-0" type="remote" class="ocf" provider="pacemaker" container="ovn-dbs-bundle-podman-0">
+ <lrm_rsc_op id="ovn-dbs-bundle-0_last_0" operation_key="ovn-dbs-bundle-0_stop_0" operation="stop" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="136:198:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;136:198:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="34" rc-code="0" op-status="0" interval="0" last-rc-change="1589452993" last-run="1589452993" exec-time="0" queue-time="0" op-digest="bf04cc8b2becfb29770a97d109ad6c4f" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="ovn-dbs-bundle-0_monitor_30000" operation_key="ovn-dbs-bundle-0_monitor_30000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="137:195:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;137:195:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="33" rc-code="0" op-status="0" interval="30000" last-rc-change="1589452904" exec-time="0" queue-time="0" op-digest="9f294fb368bef68138cb5f6555eacdbb"/>
+ </lrm_resource>
+ <lrm_resource id="rabbitmq-bundle-0" type="remote" class="ocf" provider="pacemaker" container="rabbitmq-bundle-podman-0">
+ <lrm_rsc_op id="rabbitmq-bundle-0_last_0" operation_key="rabbitmq-bundle-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="22:24:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:7;22:24:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="9" rc-code="7" op-status="0" interval="0" last-rc-change="1589446235" last-run="1589446235" exec-time="0" queue-time="0" op-digest="8dc3f9844e70d4458a7edcd7091ecf50" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="ovn-dbs-bundle-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="ovn-dbs-bundle-podman-0_last_0" operation_key="ovn-dbs-bundle-podman-0_stop_0" operation="stop" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="135:198:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;135:198:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="123" rc-code="0" op-status="0" interval="0" last-rc-change="1589452993" last-run="1589452993" exec-time="736" queue-time="0" op-digest="fb796976ed0593e611680e74e3f50680"/>
+ <lrm_rsc_op id="ovn-dbs-bundle-podman-0_monitor_60000" operation_key="ovn-dbs-bundle-podman-0_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="133:194:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;133:194:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="121" rc-code="0" op-status="0" interval="60000" last-rc-change="1589452902" exec-time="337" queue-time="0" op-digest="6d1b8bd447411df56487161c04e4b787"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state remote_node="true" id="galera-bundle-0" uname="galera-bundle-0" in_ccm="true" crm-debug-origin="do_state_transition" node_fenced="0">
+ <lrm id="galera-bundle-0">
+ <lrm_resources>
+ <lrm_resource id="galera" type="galera" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="galera_last_0" operation_key="galera_promote_0" operation="promote" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="34:15:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;34:15:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-0" call-id="242" rc-code="0" op-status="0" interval="0" last-rc-change="1589446177" last-run="1589446177" exec-time="12875" queue-time="0" op-digest="71f3cd23a7725ffda9a27f53824f3904" op-secure-params=" user " op-secure-digest="71f3cd23a7725ffda9a27f53824f3904"/>
+ <lrm_rsc_op id="galera_monitor_10000" operation_key="galera_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="28:16:8:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:8;28:16:8:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-0" call-id="289" rc-code="8" op-status="0" interval="10000" last-rc-change="1589446190" exec-time="77" queue-time="0" op-digest="93f4e5c007476b01d317e51683962464" op-secure-params=" user " op-secure-digest="71f3cd23a7725ffda9a27f53824f3904"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="galera-bundle-0">
+ <instance_attributes id="status-galera-bundle-0"/>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="galera-bundle-2" uname="galera-bundle-2" in_ccm="true" crm-debug-origin="do_state_transition" node_fenced="0">
+ <lrm id="galera-bundle-2">
+ <lrm_resources>
+ <lrm_resource id="galera" type="galera" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="galera_last_0" operation_key="galera_promote_0" operation="promote" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="29:14:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;29:14:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="94" rc-code="0" op-status="0" interval="0" last-rc-change="1589446164" last-run="1589446164" exec-time="12787" queue-time="0" op-digest="71f3cd23a7725ffda9a27f53824f3904" op-secure-params=" user " op-secure-digest="71f3cd23a7725ffda9a27f53824f3904"/>
+ <lrm_rsc_op id="galera_monitor_10000" operation_key="galera_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="31:15:8:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:8;31:15:8:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="211" rc-code="8" op-status="0" interval="10000" last-rc-change="1589446177" exec-time="74" queue-time="0" op-digest="93f4e5c007476b01d317e51683962464" op-secure-params=" user " op-secure-digest="71f3cd23a7725ffda9a27f53824f3904"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="galera-bundle-2">
+ <instance_attributes id="status-galera-bundle-2"/>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="galera-bundle-1" uname="galera-bundle-1" in_ccm="true" crm-debug-origin="do_state_transition" node_fenced="0">
+ <lrm id="galera-bundle-1">
+ <lrm_resources>
+ <lrm_resource id="galera" type="galera" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="galera_last_0" operation_key="galera_promote_0" operation="promote" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="35:16:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;35:16:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="201" rc-code="0" op-status="0" interval="0" last-rc-change="1589446190" last-run="1589446190" exec-time="12867" queue-time="0" op-digest="71f3cd23a7725ffda9a27f53824f3904" op-secure-params=" user " op-secure-digest="71f3cd23a7725ffda9a27f53824f3904"/>
+ <lrm_rsc_op id="galera_monitor_10000" operation_key="galera_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="33:17:8:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:8;33:17:8:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="249" rc-code="8" op-status="0" interval="10000" last-rc-change="1589446203" exec-time="67" queue-time="0" op-digest="93f4e5c007476b01d317e51683962464" op-secure-params=" user " op-secure-digest="71f3cd23a7725ffda9a27f53824f3904"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="galera-bundle-1">
+ <instance_attributes id="status-galera-bundle-1"/>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="rabbitmq-bundle-1" uname="rabbitmq-bundle-1" in_ccm="true" crm-debug-origin="do_state_transition" node_fenced="0">
+ <lrm id="rabbitmq-bundle-1">
+ <lrm_resources>
+ <lrm_resource id="rabbitmq" type="rabbitmq-cluster" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="rabbitmq_last_0" operation_key="rabbitmq_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="75:26:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;75:26:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="13" rc-code="0" op-status="0" interval="0" last-rc-change="1589446246" last-run="1589446246" exec-time="9480" queue-time="0" op-digest="e3a5f71f4c37be2ed068e4f0dcc847a9"/>
+ <lrm_rsc_op id="rabbitmq_monitor_10000" operation_key="rabbitmq_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="76:26:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;76:26:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="32" rc-code="0" op-status="0" interval="10000" last-rc-change="1589446266" exec-time="2224" queue-time="0" op-digest="04bcb19b3c7c43f101badbd8f1e2eed2"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="rabbitmq-bundle-1">
+ <instance_attributes id="status-rabbitmq-bundle-1"/>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="rabbitmq-bundle-0" uname="rabbitmq-bundle-0" in_ccm="true" crm-debug-origin="do_state_transition" node_fenced="0">
+ <lrm id="rabbitmq-bundle-0">
+ <lrm_resources>
+ <lrm_resource id="rabbitmq" type="rabbitmq-cluster" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="rabbitmq_last_0" operation_key="rabbitmq_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="75:25:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;75:25:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-0" call-id="13" rc-code="0" op-status="0" interval="0" last-rc-change="1589446239" last-run="1589446239" exec-time="7590" queue-time="0" op-digest="e3a5f71f4c37be2ed068e4f0dcc847a9"/>
+ <lrm_rsc_op id="rabbitmq_monitor_10000" operation_key="rabbitmq_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="74:26:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;74:26:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-0" call-id="48" rc-code="0" op-status="0" interval="10000" last-rc-change="1589446266" exec-time="2248" queue-time="0" op-digest="04bcb19b3c7c43f101badbd8f1e2eed2"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="rabbitmq-bundle-0">
+ <instance_attributes id="status-rabbitmq-bundle-0"/>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="rabbitmq-bundle-2" uname="rabbitmq-bundle-2" in_ccm="true" crm-debug-origin="do_state_transition" node_fenced="0">
+ <lrm id="rabbitmq-bundle-2">
+ <lrm_resources>
+ <lrm_resource id="rabbitmq" type="rabbitmq-cluster" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="rabbitmq_last_0" operation_key="rabbitmq_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="77:26:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;77:26:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="13" rc-code="0" op-status="0" interval="0" last-rc-change="1589446256" last-run="1589446256" exec-time="9597" queue-time="0" op-digest="e3a5f71f4c37be2ed068e4f0dcc847a9"/>
+ <lrm_rsc_op id="rabbitmq_monitor_10000" operation_key="rabbitmq_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="78:26:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;78:26:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="32" rc-code="0" op-status="0" interval="10000" last-rc-change="1589446266" exec-time="2191" queue-time="0" op-digest="04bcb19b3c7c43f101badbd8f1e2eed2"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="rabbitmq-bundle-2">
+ <instance_attributes id="status-rabbitmq-bundle-2"/>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="redis-bundle-2" uname="redis-bundle-2" in_ccm="true" crm-debug-origin="do_update_resource" node_fenced="0">
+ <lrm id="redis-bundle-2">
+ <lrm_resources>
+ <lrm_resource id="redis" type="redis" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="redis_last_0" operation_key="redis_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="137:204:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;137:204:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="8" rc-code="0" op-status="0" interval="0" last-rc-change="1589453035" last-run="1589453035" exec-time="5010" queue-time="0" op-digest="a5b2a4f5c557278af14d6cbffc5a229d" op-secure-params=" user " op-secure-digest="a5b2a4f5c557278af14d6cbffc5a229d"/>
+ <lrm_rsc_op id="redis_monitor_60000" operation_key="redis_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="140:205:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;140:205:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="93" rc-code="0" op-status="0" interval="60000" last-rc-change="1589453046" exec-time="292" queue-time="0" op-digest="992feffd37882eb5ce9bfc847b2fa75e" op-secure-params=" user " op-secure-digest="a5b2a4f5c557278af14d6cbffc5a229d"/>
+ <lrm_rsc_op id="redis_monitor_45000" operation_key="redis_monitor_45000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="139:205:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;139:205:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="94" rc-code="0" op-status="0" interval="45000" last-rc-change="1589453046" exec-time="328" queue-time="251" op-digest="992feffd37882eb5ce9bfc847b2fa75e" op-secure-params=" user " op-secure-digest="a5b2a4f5c557278af14d6cbffc5a229d"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state remote_node="true" id="redis-bundle-1" uname="redis-bundle-1" in_ccm="true" crm-debug-origin="do_update_resource" node_fenced="0">
+ <lrm id="redis-bundle-1">
+ <lrm_resources>
+ <lrm_resource id="redis" type="redis" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="redis_last_0" operation_key="redis_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="138:213:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;138:213:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="8" rc-code="0" op-status="0" interval="0" last-rc-change="1589453176" last-run="1589453176" exec-time="4668" queue-time="0" op-digest="a5b2a4f5c557278af14d6cbffc5a229d" op-secure-params=" user " op-secure-digest="a5b2a4f5c557278af14d6cbffc5a229d"/>
+ <lrm_rsc_op id="redis_monitor_60000" operation_key="redis_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="138:214:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;138:214:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="50" rc-code="0" op-status="0" interval="60000" last-rc-change="1589453181" exec-time="291" queue-time="0" op-digest="992feffd37882eb5ce9bfc847b2fa75e" op-secure-params=" user " op-secure-digest="a5b2a4f5c557278af14d6cbffc5a229d"/>
+ <lrm_rsc_op id="redis_monitor_45000" operation_key="redis_monitor_45000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="137:214:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;137:214:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="51" rc-code="0" op-status="0" interval="45000" last-rc-change="1589453181" exec-time="331" queue-time="250" op-digest="992feffd37882eb5ce9bfc847b2fa75e" op-secure-params=" user " op-secure-digest="a5b2a4f5c557278af14d6cbffc5a229d"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state remote_node="true" id="redis-bundle-0" uname="redis-bundle-0" in_ccm="true" crm-debug-origin="do_update_resource" node_fenced="0">
+ <lrm id="redis-bundle-0">
+ <lrm_resources>
+ <lrm_resource id="redis" type="redis" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="redis_last_0" operation_key="redis_promote_0" operation="promote" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="133:210:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;133:210:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-0" call-id="149" rc-code="0" op-status="0" interval="0" last-rc-change="1589453127" last-run="1589453127" exec-time="892" queue-time="0" op-digest="a5b2a4f5c557278af14d6cbffc5a229d" op-secure-params=" user " op-secure-digest="a5b2a4f5c557278af14d6cbffc5a229d"/>
+ <lrm_rsc_op id="redis_monitor_20000" operation_key="redis_monitor_20000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="130:211:8:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:8;130:211:8:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-0" call-id="167" rc-code="8" op-status="0" interval="20000" last-rc-change="1589453133" exec-time="271" queue-time="0" op-digest="992feffd37882eb5ce9bfc847b2fa75e" op-secure-params=" user " op-secure-digest="a5b2a4f5c557278af14d6cbffc5a229d"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state remote_node="true" id="ovn-dbs-bundle-1" uname="ovn-dbs-bundle-1" in_ccm="true" crm-debug-origin="do_update_resource" node_fenced="0">
+ <lrm id="ovn-dbs-bundle-1">
+ <lrm_resources>
+ <lrm_resource id="ovndb_servers" type="ovndb-servers" class="ocf" provider="ovn">
+ <lrm_rsc_op id="ovndb_servers_last_0" operation_key="ovndb_servers_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="42:207:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;42:207:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="58" rc-code="0" op-status="0" interval="0" last-rc-change="1589453104" last-run="1589453104" exec-time="296" queue-time="0" op-digest="df362dc0e5bb42793edd3bdc665f72ba"/>
+ <lrm_rsc_op id="ovndb_servers_last_failure_0" operation_key="ovndb_servers_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="42:207:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;42:207:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="58" rc-code="0" op-status="0" interval="0" last-rc-change="1589453104" last-run="1589453104" exec-time="296" queue-time="0" op-digest="df362dc0e5bb42793edd3bdc665f72ba"/>
+ <lrm_rsc_op id="ovndb_servers_monitor_30000" operation_key="ovndb_servers_monitor_30000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="190:208:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;190:208:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-1" call-id="64" rc-code="0" op-status="0" interval="30000" last-rc-change="1589453105" exec-time="284" queue-time="0" op-digest="d60dafe7583fcd42955f8cce982ffa2c"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="ovn-dbs-bundle-1">
+ <instance_attributes id="status-ovn-dbs-bundle-1"/>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="ovn-dbs-bundle-2" uname="ovn-dbs-bundle-2" in_ccm="true" crm-debug-origin="do_update_resource" node_fenced="0">
+ <lrm id="ovn-dbs-bundle-2">
+ <lrm_resources>
+ <lrm_resource id="ovndb_servers" type="ovndb-servers" class="ocf" provider="ovn">
+ <lrm_rsc_op id="ovndb_servers_last_0" operation_key="ovndb_servers_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="42:218:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;42:218:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="112" rc-code="0" op-status="0" interval="0" last-rc-change="1589453356" last-run="1589453356" exec-time="284" queue-time="0" op-digest="df362dc0e5bb42793edd3bdc665f72ba"/>
+ <lrm_rsc_op id="ovndb_servers_last_failure_0" operation_key="ovndb_servers_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="42:218:7:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;42:218:7:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="112" rc-code="0" op-status="0" interval="0" last-rc-change="1589453356" last-run="1589453356" exec-time="284" queue-time="0" op-digest="df362dc0e5bb42793edd3bdc665f72ba"/>
+ <lrm_rsc_op id="ovndb_servers_monitor_30000" operation_key="ovndb_servers_monitor_30000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="192:219:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;192:219:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-2" call-id="118" rc-code="0" op-status="0" interval="30000" last-rc-change="1589453356" exec-time="293" queue-time="0" op-digest="d60dafe7583fcd42955f8cce982ffa2c"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="ovn-dbs-bundle-2">
+ <instance_attributes id="status-ovn-dbs-bundle-2"/>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="ovn-dbs-bundle-0" uname="ovn-dbs-bundle-0" in_ccm="true" crm-debug-origin="do_update_resource" node_fenced="0">
+ <lrm id="ovn-dbs-bundle-0">
+ <lrm_resources>
+ <lrm_resource id="ovndb_servers" type="ovndb-servers" class="ocf" provider="ovn">
+ <lrm_rsc_op id="ovndb_servers_last_0" operation_key="ovndb_servers_demote_0" operation="demote" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="185:220:0:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:0;185:220:0:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-0" call-id="234" rc-code="0" op-status="0" interval="0" last-rc-change="1589453370" last-run="1589453370" exec-time="714" queue-time="0" op-digest="df362dc0e5bb42793edd3bdc665f72ba"/>
+ <lrm_rsc_op id="ovndb_servers_monitor_10000" operation_key="ovndb_servers_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="183:203:8:515fab44-df8e-4e73-a22c-ed4886e03330" transition-magic="0:8;183:203:8:515fab44-df8e-4e73-a22c-ed4886e03330" exit-reason="" on_node="controller-0" call-id="45" rc-code="8" op-status="0" interval="10000" last-rc-change="1589453031" exec-time="316" queue-time="0" op-digest="d60dafe7583fcd42955f8cce982ffa2c"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="ovn-dbs-bundle-0">
+ <instance_attributes id="status-ovn-dbs-bundle-0"/>
+ </transient_attributes>
+ </node_state>
+ </status>
+</cib>
diff --git a/include/crm/pengine/remote_internal.h b/include/crm/pengine/remote_internal.h
index ed64d848dc..46d58fc5e1 100644
--- a/include/crm/pengine/remote_internal.h
+++ b/include/crm/pengine/remote_internal.h
@@ -1,40 +1,41 @@
/*
* Copyright 2013-2019 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef PE_REMOTE__H
# define PE_REMOTE__H
#ifdef __cplusplus
extern "C" {
#endif
#include <glib.h> // gboolean
#include <libxml/tree.h> // xmlNode
#include <crm/pengine/status.h>
-gboolean xml_contains_remote_node(xmlNode *xml);
-gboolean pe__is_remote_node(pe_node_t *node);
-gboolean pe__is_guest_node(pe_node_t *node);
-gboolean pe__is_guest_or_remote_node(pe_node_t *node);
-bool pe__is_bundle_node(pe_node_t *node);
-gboolean pe__resource_is_remote_conn(pe_resource_t *rsc, pe_working_set_t *data_set);
+bool xml_contains_remote_node(xmlNode *xml);
+bool pe__is_remote_node(const pe_node_t *node);
+bool pe__is_guest_node(const pe_node_t *node);
+bool pe__is_guest_or_remote_node(const pe_node_t *node);
+bool pe__is_bundle_node(const pe_node_t *node);
+bool pe__resource_is_remote_conn(const pe_resource_t *rsc,
+ const pe_working_set_t *data_set);
pe_resource_t *pe__resource_contains_guest_node(const pe_working_set_t *data_set,
const pe_resource_t *rsc);
void pe_foreach_guest_node(const pe_working_set_t *data_set, const pe_node_t *host,
void (*helper)(const pe_node_t*, void*), void *user_data);
xmlNode *pe_create_remote_xml(xmlNode *parent, const char *uname,
const char *container_id, const char *migrateable,
const char *is_managed, const char *start_timeout,
const char *server, const char *port);
#ifdef __cplusplus
}
#endif
#endif
diff --git a/lib/common/operations.c b/lib/common/operations.c
index a81306573a..adc3228c94 100644
--- a/lib/common/operations.c
+++ b/lib/common/operations.c
@@ -1,430 +1,431 @@
/*
* Copyright 2004-2020 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#ifndef _GNU_SOURCE
# define _GNU_SOURCE
#endif
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <ctype.h>
#include <crm/crm.h>
#include <crm/lrmd.h>
#include <crm/msg_xml.h>
#include <crm/common/xml.h>
#include <crm/common/util.h>
/*!
* \brief Generate an operation key (RESOURCE_ACTION_INTERVAL)
*
* \param[in] rsc_id ID of resource being operated on
* \param[in] op_type Operation name
* \param[in] interval_ms Operation interval
*
* \return Newly allocated memory containing operation key as string
*
* \note This function asserts on errors, so it will never return NULL.
* The caller is responsible for freeing the result with free().
*/
char *
pcmk__op_key(const char *rsc_id, const char *op_type, guint interval_ms)
{
CRM_ASSERT(rsc_id != NULL);
CRM_ASSERT(op_type != NULL);
return crm_strdup_printf(PCMK__OP_FMT, rsc_id, op_type, interval_ms);
}
gboolean
parse_op_key(const char *key, char **rsc_id, char **op_type, guint *interval_ms)
{
char *notify = NULL;
char *mutable_key = NULL;
char *mutable_key_ptr = NULL;
size_t len = 0, offset = 0;
unsigned long long ch = 0;
guint local_interval_ms = 0;
// Initialize output variables in case of early return
if (rsc_id) {
*rsc_id = NULL;
}
if (op_type) {
*op_type = NULL;
}
if (interval_ms) {
*interval_ms = 0;
}
CRM_CHECK(key && *key, return FALSE);
// Parse interval at end of string
len = strlen(key);
offset = len - 1;
while ((offset > 0) && isdigit(key[offset])) {
ch = key[offset] - '0';
for (int digits = len - offset; digits > 1; --digits) {
ch = ch * 10;
}
local_interval_ms += ch;
offset--;
}
- crm_trace("Operation key '%s' has interval %ums", key, local_interval_ms);
if (interval_ms) {
*interval_ms = local_interval_ms;
}
CRM_CHECK((offset != (len - 1)) && (key[offset] == '_'), return FALSE);
mutable_key = strndup(key, offset);
offset--;
while (offset > 0 && key[offset] != '_') {
offset--;
}
CRM_CHECK(key[offset] == '_',
free(mutable_key); return FALSE);
mutable_key_ptr = mutable_key + offset + 1;
- crm_trace(" Action: %s", mutable_key_ptr);
if (op_type) {
*op_type = strdup(mutable_key_ptr);
}
mutable_key[offset] = 0;
offset--;
notify = strstr(mutable_key, "_post_notify");
if (notify && pcmk__str_eq(notify, "_post_notify", pcmk__str_casei)) {
notify[0] = 0;
}
notify = strstr(mutable_key, "_pre_notify");
if (notify && pcmk__str_eq(notify, "_pre_notify", pcmk__str_casei)) {
notify[0] = 0;
}
- crm_trace(" Resource: %s", mutable_key);
+ // @TODO We don't really need this trace if we add good unit tests for this
+ crm_trace("Parsed %s into resource %s, action %s, interval %ums",
+ key, mutable_key, mutable_key_ptr, local_interval_ms);
+
if (rsc_id) {
*rsc_id = mutable_key;
} else {
free(mutable_key);
}
return TRUE;
}
char *
pcmk__notify_key(const char *rsc_id, const char *notify_type,
const char *op_type)
{
CRM_CHECK(rsc_id != NULL, return NULL);
CRM_CHECK(op_type != NULL, return NULL);
CRM_CHECK(notify_type != NULL, return NULL);
return crm_strdup_printf("%s_%s_notify_%s_0",
rsc_id, notify_type, op_type);
}
/*!
* \brief Parse a transition magic string into its constituent parts
*
* \param[in] magic Magic string to parse (must be non-NULL)
* \param[out] uuid If non-NULL, where to store copy of parsed UUID
* \param[out] transition_id If non-NULL, where to store parsed transition ID
* \param[out] action_id If non-NULL, where to store parsed action ID
* \param[out] op_status If non-NULL, where to store parsed result status
* \param[out] op_rc If non-NULL, where to store parsed actual rc
* \param[out] target_rc If non-NULL, where to stored parsed target rc
*
* \return TRUE if key was valid, FALSE otherwise
* \note If uuid is supplied and this returns TRUE, the caller is responsible
* for freeing the memory for *uuid using free().
*/
gboolean
decode_transition_magic(const char *magic, char **uuid, int *transition_id, int *action_id,
int *op_status, int *op_rc, int *target_rc)
{
int res = 0;
char *key = NULL;
gboolean result = TRUE;
int local_op_status = -1;
int local_op_rc = -1;
CRM_CHECK(magic != NULL, return FALSE);
#ifdef SSCANF_HAS_M
res = sscanf(magic, "%d:%d;%ms", &local_op_status, &local_op_rc, &key);
#else
key = calloc(1, strlen(magic) - 3); // magic must have >=4 other characters
CRM_ASSERT(key);
res = sscanf(magic, "%d:%d;%s", &local_op_status, &local_op_rc, key);
#endif
if (res == EOF) {
crm_err("Could not decode transition information '%s': %s",
magic, pcmk_strerror(errno));
result = FALSE;
} else if (res < 3) {
crm_warn("Transition information '%s' incomplete (%d of 3 expected items)",
magic, res);
result = FALSE;
} else {
if (op_status) {
*op_status = local_op_status;
}
if (op_rc) {
*op_rc = local_op_rc;
}
result = decode_transition_key(key, uuid, transition_id, action_id,
target_rc);
}
free(key);
return result;
}
char *
pcmk__transition_key(int transition_id, int action_id, int target_rc,
const char *node)
{
CRM_CHECK(node != NULL, return NULL);
return crm_strdup_printf("%d:%d:%d:%-*s",
action_id, transition_id, target_rc, 36, node);
}
/*!
* \brief Parse a transition key into its constituent parts
*
* \param[in] key Transition key to parse (must be non-NULL)
* \param[out] uuid If non-NULL, where to store copy of parsed UUID
* \param[out] transition_id If non-NULL, where to store parsed transition ID
* \param[out] action_id If non-NULL, where to store parsed action ID
* \param[out] target_rc If non-NULL, where to stored parsed target rc
*
* \return TRUE if key was valid, FALSE otherwise
* \note If uuid is supplied and this returns TRUE, the caller is responsible
* for freeing the memory for *uuid using free().
*/
gboolean
decode_transition_key(const char *key, char **uuid, int *transition_id, int *action_id,
int *target_rc)
{
int local_transition_id = -1;
int local_action_id = -1;
int local_target_rc = -1;
char local_uuid[37] = { '\0' };
// Initialize any supplied output arguments
if (uuid) {
*uuid = NULL;
}
if (transition_id) {
*transition_id = -1;
}
if (action_id) {
*action_id = -1;
}
if (target_rc) {
*target_rc = -1;
}
CRM_CHECK(key != NULL, return FALSE);
if (sscanf(key, "%d:%d:%d:%36s", &local_action_id, &local_transition_id,
&local_target_rc, local_uuid) != 4) {
crm_err("Invalid transition key '%s'", key);
return FALSE;
}
if (strlen(local_uuid) != 36) {
crm_warn("Invalid UUID '%s' in transition key '%s'", local_uuid, key);
}
if (uuid) {
*uuid = strdup(local_uuid);
CRM_ASSERT(*uuid);
}
if (transition_id) {
*transition_id = local_transition_id;
}
if (action_id) {
*action_id = local_action_id;
}
if (target_rc) {
*target_rc = local_target_rc;
}
return TRUE;
}
/*!
* \internal
* \brief Remove XML attributes not needed for operation digest
*
* \param[in,out] param_set XML with operation parameters
*/
void
pcmk__filter_op_for_digest(xmlNode *param_set)
{
char *key = NULL;
char *timeout = NULL;
guint interval_ms = 0;
const char *attr_filter[] = {
XML_ATTR_ID,
XML_ATTR_CRM_VERSION,
XML_LRM_ATTR_OP_DIGEST,
XML_LRM_ATTR_TARGET,
XML_LRM_ATTR_TARGET_UUID,
"pcmk_external_ip"
};
const int meta_len = strlen(CRM_META);
if (param_set == NULL) {
return;
}
// Remove the specific attributes listed in attr_filter
for (int lpc = 0; lpc < DIMOF(attr_filter); lpc++) {
xml_remove_prop(param_set, attr_filter[lpc]);
}
key = crm_meta_name(XML_LRM_ATTR_INTERVAL_MS);
if (crm_element_value_ms(param_set, key, &interval_ms) != pcmk_ok) {
interval_ms = 0;
}
free(key);
key = crm_meta_name(XML_ATTR_TIMEOUT);
timeout = crm_element_value_copy(param_set, key);
// Remove all CRM_meta_* attributes
for (xmlAttrPtr xIter = param_set->properties; xIter != NULL; ) {
const char *prop_name = (const char *) (xIter->name);
xIter = xIter->next;
// @TODO Why is this case-insensitive?
if (strncasecmp(prop_name, CRM_META, meta_len) == 0) {
xml_remove_prop(param_set, prop_name);
}
}
if ((interval_ms != 0) && (timeout != NULL)) {
// Add the timeout back, it's useful for recurring operation digests
crm_xml_add(param_set, key, timeout);
}
free(timeout);
free(key);
}
int
rsc_op_expected_rc(lrmd_event_data_t * op)
{
int rc = 0;
if (op && op->user_data) {
decode_transition_key(op->user_data, NULL, NULL, NULL, &rc);
}
return rc;
}
gboolean
did_rsc_op_fail(lrmd_event_data_t * op, int target_rc)
{
switch (op->op_status) {
case PCMK_LRM_OP_CANCELLED:
case PCMK_LRM_OP_PENDING:
return FALSE;
case PCMK_LRM_OP_NOTSUPPORTED:
case PCMK_LRM_OP_TIMEOUT:
case PCMK_LRM_OP_ERROR:
case PCMK_LRM_OP_NOT_CONNECTED:
case PCMK_LRM_OP_INVALID:
return TRUE;
default:
if (target_rc != op->rc) {
return TRUE;
}
}
return FALSE;
}
/*!
* \brief Create a CIB XML element for an operation
*
* \param[in] parent If not NULL, make new XML node a child of this one
* \param[in] prefix Generate an ID using this prefix
* \param[in] task Operation task to set
* \param[in] interval_spec Operation interval to set
* \param[in] timeout If not NULL, operation timeout to set
*
* \return New XML object on success, NULL otherwise
*/
xmlNode *
crm_create_op_xml(xmlNode *parent, const char *prefix, const char *task,
const char *interval_spec, const char *timeout)
{
xmlNode *xml_op;
CRM_CHECK(prefix && task && interval_spec, return NULL);
xml_op = create_xml_node(parent, XML_ATTR_OP);
crm_xml_set_id(xml_op, "%s-%s-%s", prefix, task, interval_spec);
crm_xml_add(xml_op, XML_LRM_ATTR_INTERVAL, interval_spec);
crm_xml_add(xml_op, "name", task);
if (timeout) {
crm_xml_add(xml_op, XML_ATTR_TIMEOUT, timeout);
}
return xml_op;
}
/*!
* \brief Check whether an operation requires resource agent meta-data
*
* \param[in] rsc_class Resource agent class (or NULL to skip class check)
* \param[in] op Operation action (or NULL to skip op check)
*
* \return TRUE if operation needs meta-data, FALSE otherwise
* \note At least one of rsc_class and op must be specified.
*/
bool
crm_op_needs_metadata(const char *rsc_class, const char *op)
{
/* Agent meta-data is used to determine whether a reload is possible, and to
* evaluate versioned parameters -- so if this op is not relevant to those
* features, we don't need the meta-data.
*/
CRM_CHECK(rsc_class || op, return FALSE);
if (rsc_class
&& !pcmk_is_set(pcmk_get_ra_caps(rsc_class), pcmk_ra_cap_params)) {
/* Meta-data is only needed for resource classes that use parameters */
return FALSE;
}
/* Meta-data is only needed for these actions */
if (!pcmk__str_eq(op, CRMD_ACTION_START, pcmk__str_null_matches)
&& strcmp(op, CRMD_ACTION_STATUS)
&& strcmp(op, CRMD_ACTION_PROMOTE)
&& strcmp(op, CRMD_ACTION_DEMOTE)
&& strcmp(op, CRMD_ACTION_RELOAD)
&& strcmp(op, CRMD_ACTION_MIGRATE)
&& strcmp(op, CRMD_ACTION_MIGRATED)
&& strcmp(op, CRMD_ACTION_NOTIFY)) {
return FALSE;
}
return TRUE;
}
diff --git a/lib/pacemaker/pcmk_sched_allocate.c b/lib/pacemaker/pcmk_sched_allocate.c
index 5a97589cdd..501cf4701f 100644
--- a/lib/pacemaker/pcmk_sched_allocate.c
+++ b/lib/pacemaker/pcmk_sched_allocate.c
@@ -1,3060 +1,3059 @@
/*
* Copyright 2004-2020 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <sys/param.h>
#include <crm/crm.h>
#include <crm/cib.h>
#include <crm/msg_xml.h>
#include <crm/common/xml.h>
#include <glib.h>
#include <crm/pengine/status.h>
#include <pacemaker-internal.h>
CRM_TRACE_INIT_DATA(pacemaker);
void set_alloc_actions(pe_working_set_t * data_set);
extern void ReloadRsc(pe_resource_t * rsc, pe_node_t *node, pe_working_set_t * data_set);
extern gboolean DeleteRsc(pe_resource_t * rsc, pe_node_t * node, gboolean optional, pe_working_set_t * data_set);
static void apply_remote_node_ordering(pe_working_set_t *data_set);
static enum remote_connection_state get_remote_node_state(pe_node_t *node);
enum remote_connection_state {
remote_state_unknown = 0,
remote_state_alive = 1,
remote_state_resting = 2,
remote_state_failed = 3,
remote_state_stopped = 4
};
static const char *
state2text(enum remote_connection_state state)
{
switch (state) {
case remote_state_unknown:
return "unknown";
case remote_state_alive:
return "alive";
case remote_state_resting:
return "resting";
case remote_state_failed:
return "failed";
case remote_state_stopped:
return "stopped";
}
return "impossible";
}
resource_alloc_functions_t resource_class_alloc_functions[] = {
{
pcmk__native_merge_weights,
pcmk__native_allocate,
native_create_actions,
native_create_probe,
native_internal_constraints,
native_rsc_colocation_lh,
native_rsc_colocation_rh,
native_rsc_location,
native_action_flags,
native_update_actions,
native_expand,
native_append_meta,
},
{
pcmk__group_merge_weights,
pcmk__group_allocate,
group_create_actions,
native_create_probe,
group_internal_constraints,
group_rsc_colocation_lh,
group_rsc_colocation_rh,
group_rsc_location,
group_action_flags,
group_update_actions,
group_expand,
group_append_meta,
},
{
pcmk__native_merge_weights,
pcmk__clone_allocate,
clone_create_actions,
clone_create_probe,
clone_internal_constraints,
clone_rsc_colocation_lh,
clone_rsc_colocation_rh,
clone_rsc_location,
clone_action_flags,
pcmk__multi_update_actions,
clone_expand,
clone_append_meta,
},
{
pcmk__native_merge_weights,
pcmk__bundle_allocate,
pcmk__bundle_create_actions,
pcmk__bundle_create_probe,
pcmk__bundle_internal_constraints,
pcmk__bundle_rsc_colocation_lh,
pcmk__bundle_rsc_colocation_rh,
pcmk__bundle_rsc_location,
pcmk__bundle_action_flags,
pcmk__multi_update_actions,
pcmk__bundle_expand,
pcmk__bundle_append_meta,
}
};
gboolean
update_action_flags(pe_action_t * action, enum pe_action_flags flags, const char *source, int line)
{
static unsigned long calls = 0;
gboolean changed = FALSE;
gboolean clear = pcmk_is_set(flags, pe_action_clear);
enum pe_action_flags last = action->flags;
if (clear) {
pe__clear_action_flags_as(source, line, action, flags);
} else {
pe__set_action_flags_as(source, line, action, flags);
}
if (last != action->flags) {
calls++;
changed = TRUE;
/* Useful for tracking down _who_ changed a specific flag */
/* CRM_ASSERT(calls != 534); */
pe__clear_raw_action_flags(flags, "action update", pe_action_clear);
crm_trace("%s on %s: %sset flags 0x%.6x (was 0x%.6x, now 0x%.6x, %lu, %s)",
action->uuid, action->node ? action->node->details->uname : "[none]",
clear ? "un-" : "", flags, last, action->flags, calls, source);
}
return changed;
}
static gboolean
check_rsc_parameters(pe_resource_t * rsc, pe_node_t * node, xmlNode * rsc_entry,
gboolean active_here, pe_working_set_t * data_set)
{
int attr_lpc = 0;
gboolean force_restart = FALSE;
gboolean delete_resource = FALSE;
gboolean changed = FALSE;
const char *value = NULL;
const char *old_value = NULL;
const char *attr_list[] = {
XML_ATTR_TYPE,
XML_AGENT_ATTR_CLASS,
XML_AGENT_ATTR_PROVIDER
};
for (; attr_lpc < DIMOF(attr_list); attr_lpc++) {
value = crm_element_value(rsc->xml, attr_list[attr_lpc]);
old_value = crm_element_value(rsc_entry, attr_list[attr_lpc]);
if (value == old_value /* i.e. NULL */
|| pcmk__str_eq(value, old_value, pcmk__str_none)) {
continue;
}
changed = TRUE;
trigger_unfencing(rsc, node, "Device definition changed", NULL, data_set);
if (active_here) {
force_restart = TRUE;
crm_notice("Forcing restart of %s on %s, %s changed: %s -> %s",
rsc->id, node->details->uname, attr_list[attr_lpc],
crm_str(old_value), crm_str(value));
}
}
if (force_restart) {
/* make sure the restart happens */
stop_action(rsc, node, FALSE);
pe__set_resource_flags(rsc, pe_rsc_start_pending);
delete_resource = TRUE;
} else if (changed) {
delete_resource = TRUE;
}
return delete_resource;
}
static void
CancelXmlOp(pe_resource_t * rsc, xmlNode * xml_op, pe_node_t * active_node,
const char *reason, pe_working_set_t * data_set)
{
guint interval_ms = 0;
pe_action_t *cancel = NULL;
const char *task = NULL;
const char *call_id = NULL;
CRM_CHECK(xml_op != NULL, return);
CRM_CHECK(active_node != NULL, return);
task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
call_id = crm_element_value(xml_op, XML_LRM_ATTR_CALLID);
crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
crm_info("Action " PCMK__OP_FMT " on %s will be stopped: %s",
rsc->id, task, interval_ms,
active_node->details->uname, (reason? reason : "unknown"));
cancel = pe_cancel_op(rsc, task, interval_ms, active_node, data_set);
add_hash_param(cancel->meta, XML_LRM_ATTR_CALLID, call_id);
custom_action_order(rsc, stop_key(rsc), NULL, rsc, NULL, cancel, pe_order_optional, data_set);
}
static gboolean
check_action_definition(pe_resource_t * rsc, pe_node_t * active_node, xmlNode * xml_op,
pe_working_set_t * data_set)
{
char *key = NULL;
guint interval_ms = 0;
const op_digest_cache_t *digest_data = NULL;
gboolean did_change = FALSE;
const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
const char *digest_secure = NULL;
CRM_CHECK(active_node != NULL, return FALSE);
crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
if (interval_ms > 0) {
xmlNode *op_match = NULL;
/* we need to reconstruct the key because of the way we used to construct resource IDs */
key = pcmk__op_key(rsc->id, task, interval_ms);
pe_rsc_trace(rsc, "Checking parameters for %s", key);
op_match = find_rsc_op_entry(rsc, key);
if ((op_match == NULL)
&& pcmk_is_set(data_set->flags, pe_flag_stop_action_orphans)) {
CancelXmlOp(rsc, xml_op, active_node, "orphan", data_set);
free(key);
return TRUE;
} else if (op_match == NULL) {
pe_rsc_debug(rsc, "Orphan action detected: %s on %s", key, active_node->details->uname);
free(key);
return TRUE;
}
free(key);
key = NULL;
}
crm_trace("Testing " PCMK__OP_FMT " on %s",
rsc->id, task, interval_ms, active_node->details->uname);
if ((interval_ms == 0) && pcmk__str_eq(task, RSC_STATUS, pcmk__str_casei)) {
/* Reload based on the start action not a probe */
task = RSC_START;
} else if ((interval_ms == 0) && pcmk__str_eq(task, RSC_MIGRATED, pcmk__str_casei)) {
/* Reload based on the start action not a migrate */
task = RSC_START;
} else if ((interval_ms == 0) && pcmk__str_eq(task, RSC_PROMOTE, pcmk__str_casei)) {
/* Reload based on the start action not a promote */
task = RSC_START;
}
digest_data = rsc_action_digest_cmp(rsc, xml_op, active_node, data_set);
if (pcmk_is_set(data_set->flags, pe_flag_sanitized)) {
digest_secure = crm_element_value(xml_op, XML_LRM_ATTR_SECURE_DIGEST);
}
if(digest_data->rc != RSC_DIGEST_MATCH
&& digest_secure
&& digest_data->digest_secure_calc
&& strcmp(digest_data->digest_secure_calc, digest_secure) == 0) {
if (pcmk_is_set(data_set->flags, pe_flag_stdout)) {
printf("Only 'private' parameters to " PCMK__OP_FMT
" on %s changed: %s\n",
rsc->id, task, interval_ms, active_node->details->uname,
crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC));
}
} else if (digest_data->rc == RSC_DIGEST_RESTART) {
/* Changes that force a restart */
pe_action_t *required = NULL;
did_change = TRUE;
key = pcmk__op_key(rsc->id, task, interval_ms);
crm_log_xml_info(digest_data->params_restart, "params:restart");
required = custom_action(rsc, key, task, NULL, TRUE, TRUE, data_set);
pe_action_set_flag_reason(__func__, __LINE__, required, NULL,
"resource definition change", pe_action_optional, TRUE);
trigger_unfencing(rsc, active_node, "Device parameters changed", NULL, data_set);
} else if ((digest_data->rc == RSC_DIGEST_ALL) || (digest_data->rc == RSC_DIGEST_UNKNOWN)) {
/* Changes that can potentially be handled by a reload */
const char *digest_restart = crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST);
did_change = TRUE;
trigger_unfencing(rsc, active_node, "Device parameters changed (reload)", NULL, data_set);
crm_log_xml_info(digest_data->params_all, "params:reload");
key = pcmk__op_key(rsc->id, task, interval_ms);
if (interval_ms > 0) {
pe_action_t *op = NULL;
#if 0
/* Always reload/restart the entire resource */
ReloadRsc(rsc, active_node, data_set);
#else
/* Re-sending the recurring op is sufficient - the old one will be cancelled automatically */
op = custom_action(rsc, key, task, active_node, TRUE, TRUE, data_set);
pe__set_action_flags(op, pe_action_reschedule);
#endif
} else if (digest_restart) {
pe_rsc_trace(rsc, "Reloading '%s' action for resource %s", task, rsc->id);
/* Reload this resource */
ReloadRsc(rsc, active_node, data_set);
free(key);
} else {
pe_action_t *required = NULL;
pe_rsc_trace(rsc, "Resource %s doesn't know how to reload", rsc->id);
/* Re-send the start/demote/promote op
* Recurring ops will be detected independently
*/
required = custom_action(rsc, key, task, NULL, TRUE, TRUE, data_set);
pe_action_set_flag_reason(__func__, __LINE__, required, NULL,
"resource definition change", pe_action_optional, TRUE);
}
}
return did_change;
}
/*!
* \internal
* \brief Do deferred action checks after allocation
*
* \param[in] data_set Working set for cluster
*/
static void
check_params(pe_resource_t *rsc, pe_node_t *node, xmlNode *rsc_op,
enum pe_check_parameters check, pe_working_set_t *data_set)
{
const char *reason = NULL;
op_digest_cache_t *digest_data = NULL;
switch (check) {
case pe_check_active:
if (check_action_definition(rsc, node, rsc_op, data_set)
&& pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL,
data_set)) {
reason = "action definition changed";
}
break;
case pe_check_last_failure:
digest_data = rsc_action_digest_cmp(rsc, rsc_op, node, data_set);
switch (digest_data->rc) {
case RSC_DIGEST_UNKNOWN:
crm_trace("Resource %s history entry %s on %s has no digest to compare",
rsc->id, ID(rsc_op), node->details->id);
break;
case RSC_DIGEST_MATCH:
break;
default:
reason = "resource parameters have changed";
break;
}
break;
}
if (reason) {
pe__clear_failcount(rsc, node, reason, data_set);
}
}
static void
check_actions_for(xmlNode * rsc_entry, pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
int offset = -1;
int stop_index = 0;
int start_index = 0;
const char *task = NULL;
xmlNode *rsc_op = NULL;
GListPtr op_list = NULL;
GListPtr sorted_op_list = NULL;
CRM_CHECK(node != NULL, return);
if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
pe_resource_t *parent = uber_parent(rsc);
if(parent == NULL
|| pe_rsc_is_clone(parent) == FALSE
|| pcmk_is_set(parent->flags, pe_rsc_unique)) {
pe_rsc_trace(rsc, "Skipping param check for %s and deleting: orphan", rsc->id);
DeleteRsc(rsc, node, FALSE, data_set);
} else {
pe_rsc_trace(rsc, "Skipping param check for %s (orphan clone)", rsc->id);
}
return;
} else if (pe_find_node_id(rsc->running_on, node->details->id) == NULL) {
if (check_rsc_parameters(rsc, node, rsc_entry, FALSE, data_set)) {
DeleteRsc(rsc, node, FALSE, data_set);
}
pe_rsc_trace(rsc, "Skipping param check for %s: no longer active on %s",
rsc->id, node->details->uname);
return;
}
pe_rsc_trace(rsc, "Processing %s on %s", rsc->id, node->details->uname);
if (check_rsc_parameters(rsc, node, rsc_entry, TRUE, data_set)) {
DeleteRsc(rsc, node, FALSE, data_set);
}
for (rsc_op = __xml_first_child_element(rsc_entry); rsc_op != NULL;
rsc_op = __xml_next_element(rsc_op)) {
if (pcmk__str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, pcmk__str_none)) {
op_list = g_list_prepend(op_list, rsc_op);
}
}
sorted_op_list = g_list_sort(op_list, sort_op_by_callid);
calculate_active_ops(sorted_op_list, &start_index, &stop_index);
for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) {
xmlNode *rsc_op = (xmlNode *) gIter->data;
guint interval_ms = 0;
offset++;
if (start_index < stop_index) {
/* stopped */
continue;
} else if (offset < start_index) {
/* action occurred prior to a start */
continue;
}
task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK);
crm_element_value_ms(rsc_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
if ((interval_ms > 0) &&
(pcmk_is_set(rsc->flags, pe_rsc_maintenance) || node->details->maintenance)) {
// Maintenance mode cancels recurring operations
CancelXmlOp(rsc, rsc_op, node, "maintenance mode", data_set);
} else if ((interval_ms > 0) || pcmk__strcase_any_of(task, RSC_STATUS, RSC_START,
RSC_PROMOTE, RSC_MIGRATED, NULL)) {
/* If a resource operation failed, and the operation's definition
* has changed, clear any fail count so they can be retried fresh.
*/
if (pe__bundle_needs_remote_name(rsc)) {
/* We haven't allocated resources to nodes yet, so if the
* REMOTE_CONTAINER_HACK is used, we may calculate the digest
* based on the literal "#uname" value rather than the properly
* substituted value. That would mistakenly make the action
* definition appear to have been changed. Defer the check until
* later in this case.
*/
pe__add_param_check(rsc_op, rsc, node, pe_check_active,
data_set);
} else if (check_action_definition(rsc, node, rsc_op, data_set)
&& pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL,
data_set)) {
pe__clear_failcount(rsc, node, "action definition changed",
data_set);
}
}
}
g_list_free(sorted_op_list);
}
static GListPtr
find_rsc_list(GListPtr result, pe_resource_t * rsc, const char *id, gboolean renamed_clones,
gboolean partial, pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
gboolean match = FALSE;
if (id == NULL) {
return NULL;
}
if (rsc == NULL) {
if (data_set == NULL) {
return NULL;
}
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child = (pe_resource_t *) gIter->data;
result = find_rsc_list(result, child, id, renamed_clones, partial,
NULL);
}
return result;
}
if (partial) {
if (strstr(rsc->id, id)) {
match = TRUE;
} else if (renamed_clones && rsc->clone_name && strstr(rsc->clone_name, id)) {
match = TRUE;
}
} else {
if (strcmp(rsc->id, id) == 0) {
match = TRUE;
} else if (renamed_clones && rsc->clone_name && strcmp(rsc->clone_name, id) == 0) {
match = TRUE;
}
}
if (match) {
result = g_list_prepend(result, rsc);
}
if (rsc->children) {
gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child = (pe_resource_t *) gIter->data;
result = find_rsc_list(result, child, id, renamed_clones, partial, NULL);
}
}
return result;
}
static void
check_actions(pe_working_set_t * data_set)
{
const char *id = NULL;
pe_node_t *node = NULL;
xmlNode *lrm_rscs = NULL;
xmlNode *status = get_object_root(XML_CIB_TAG_STATUS, data_set->input);
xmlNode *node_state = NULL;
for (node_state = __xml_first_child_element(status); node_state != NULL;
node_state = __xml_next_element(node_state)) {
if (pcmk__str_eq((const char *)node_state->name, XML_CIB_TAG_STATE, pcmk__str_none)) {
id = crm_element_value(node_state, XML_ATTR_ID);
lrm_rscs = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE);
lrm_rscs = find_xml_node(lrm_rscs, XML_LRM_TAG_RESOURCES, FALSE);
node = pe_find_node_id(data_set->nodes, id);
if (node == NULL) {
continue;
/* Still need to check actions for a maintenance node to cancel existing monitor operations */
} else if (can_run_resources(node) == FALSE && node->details->maintenance == FALSE) {
crm_trace("Skipping param check for %s: can't run resources",
node->details->uname);
continue;
}
crm_trace("Processing node %s", node->details->uname);
if (node->details->online
|| pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
xmlNode *rsc_entry = NULL;
for (rsc_entry = __xml_first_child_element(lrm_rscs);
rsc_entry != NULL;
rsc_entry = __xml_next_element(rsc_entry)) {
if (pcmk__str_eq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE, pcmk__str_none)) {
if (xml_has_children(rsc_entry)) {
GListPtr gIter = NULL;
GListPtr result = NULL;
const char *rsc_id = ID(rsc_entry);
CRM_CHECK(rsc_id != NULL, return);
result = find_rsc_list(NULL, NULL, rsc_id, TRUE, FALSE, data_set);
for (gIter = result; gIter != NULL; gIter = gIter->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter->data;
if (rsc->variant != pe_native) {
continue;
}
check_actions_for(rsc_entry, rsc, node, data_set);
}
g_list_free(result);
}
}
}
}
}
}
}
static void
apply_placement_constraints(pe_working_set_t * data_set)
{
for (GList *gIter = data_set->placement_constraints;
gIter != NULL; gIter = gIter->next) {
pe__location_t *cons = gIter->data;
cons->rsc_lh->cmds->rsc_location(cons->rsc_lh, cons);
}
}
static gboolean
failcount_clear_action_exists(pe_node_t * node, pe_resource_t * rsc)
{
gboolean rc = FALSE;
GList *list = pe__resource_actions(rsc, node, CRM_OP_CLEAR_FAILCOUNT, TRUE);
if (list) {
rc = TRUE;
}
g_list_free(list);
return rc;
}
/*!
* \internal
* \brief Force resource away if failures hit migration threshold
*
* \param[in,out] rsc Resource to check for failures
* \param[in,out] node Node to check for failures
* \param[in,out] data_set Cluster working set to update
*/
static void
check_migration_threshold(pe_resource_t *rsc, pe_node_t *node,
pe_working_set_t *data_set)
{
int fail_count, countdown;
pe_resource_t *failed;
/* Migration threshold of 0 means never force away */
if (rsc->migration_threshold == 0) {
return;
}
// If we're ignoring failures, also ignore the migration threshold
if (pcmk_is_set(rsc->flags, pe_rsc_failure_ignored)) {
return;
}
/* If there are no failures, there's no need to force away */
fail_count = pe_get_failcount(node, rsc, NULL,
pe_fc_effective|pe_fc_fillers, NULL,
data_set);
if (fail_count <= 0) {
return;
}
/* How many more times recovery will be tried on this node */
countdown = QB_MAX(rsc->migration_threshold - fail_count, 0);
/* If failed resource has a parent, we'll force the parent away */
failed = rsc;
if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) {
failed = uber_parent(rsc);
}
if (countdown == 0) {
resource_location(failed, node, -INFINITY, "__fail_limit__", data_set);
crm_warn("Forcing %s away from %s after %d failures (max=%d)",
failed->id, node->details->uname, fail_count,
rsc->migration_threshold);
} else {
crm_info("%s can fail %d more times on %s before being forced off",
failed->id, countdown, node->details->uname);
}
}
static void
common_apply_stickiness(pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * data_set)
{
if (rsc->children) {
GListPtr gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
common_apply_stickiness(child_rsc, node, data_set);
}
return;
}
if (pcmk_is_set(rsc->flags, pe_rsc_managed)
&& rsc->stickiness != 0 && pcmk__list_of_1(rsc->running_on)) {
pe_node_t *current = pe_find_node_id(rsc->running_on, node->details->id);
pe_node_t *match = pe_hash_table_lookup(rsc->allowed_nodes, node->details->id);
if (current == NULL) {
} else if ((match != NULL)
|| pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster)) {
pe_resource_t *sticky_rsc = rsc;
resource_location(sticky_rsc, node, rsc->stickiness, "stickiness", data_set);
pe_rsc_debug(sticky_rsc, "Resource %s: preferring current location"
" (node=%s, weight=%d)", sticky_rsc->id,
node->details->uname, rsc->stickiness);
} else {
GHashTableIter iter;
pe_node_t *nIter = NULL;
pe_rsc_debug(rsc, "Ignoring stickiness for %s: the cluster is asymmetric"
" and node %s is not explicitly allowed", rsc->id, node->details->uname);
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&nIter)) {
crm_err("%s[%s] = %d", rsc->id, nIter->details->uname, nIter->weight);
}
}
}
/* Check the migration threshold only if a failcount clear action
* has not already been placed for this resource on the node.
* There is no sense in potentially forcing the resource from this
* node if the failcount is being reset anyway.
*
* @TODO A clear_failcount operation can be scheduled in stage4() via
* check_actions_for(), or in stage5() via check_params(). This runs in
* stage2(), so it cannot detect those, meaning we might check the migration
* threshold when we shouldn't -- worst case, we stop or move the resource,
* then move it back next transition.
*/
if (failcount_clear_action_exists(node, rsc) == FALSE) {
check_migration_threshold(rsc, node, data_set);
}
}
void
complex_set_cmds(pe_resource_t * rsc)
{
GListPtr gIter = rsc->children;
rsc->cmds = &resource_class_alloc_functions[rsc->variant];
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
complex_set_cmds(child_rsc);
}
}
void
set_alloc_actions(pe_working_set_t * data_set)
{
GListPtr gIter = data_set->resources;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter->data;
complex_set_cmds(rsc);
}
}
static void
calculate_system_health(gpointer gKey, gpointer gValue, gpointer user_data)
{
const char *key = (const char *)gKey;
const char *value = (const char *)gValue;
int *system_health = (int *)user_data;
if (!gKey || !gValue || !user_data) {
return;
}
if (pcmk__starts_with(key, "#health")) {
int score;
/* Convert the value into an integer */
score = char2score(value);
/* Add it to the running total */
*system_health = pe__add_scores(score, *system_health);
}
}
static gboolean
apply_system_health(pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
const char *health_strategy = pe_pref(data_set->config_hash, "node-health-strategy");
int base_health = 0;
if (pcmk__str_eq(health_strategy, "none", pcmk__str_null_matches | pcmk__str_casei)) {
/* Prevent any accidental health -> score translation */
pcmk__score_red = 0;
pcmk__score_yellow = 0;
pcmk__score_green = 0;
return TRUE;
} else if (pcmk__str_eq(health_strategy, "migrate-on-red", pcmk__str_casei)) {
/* Resources on nodes which have health values of red are
* weighted away from that node.
*/
pcmk__score_red = -INFINITY;
pcmk__score_yellow = 0;
pcmk__score_green = 0;
} else if (pcmk__str_eq(health_strategy, "only-green", pcmk__str_casei)) {
/* Resources on nodes which have health values of red or yellow
* are forced away from that node.
*/
pcmk__score_red = -INFINITY;
pcmk__score_yellow = -INFINITY;
pcmk__score_green = 0;
} else if (pcmk__str_eq(health_strategy, "progressive", pcmk__str_casei)) {
/* Same as the above, but use the r/y/g scores provided by the user
* Defaults are provided by the pe_prefs table
* Also, custom health "base score" can be used
*/
base_health = crm_parse_int(pe_pref(data_set->config_hash, "node-health-base"), "0");
} else if (pcmk__str_eq(health_strategy, "custom", pcmk__str_casei)) {
/* Requires the admin to configure the rsc_location constaints for
* processing the stored health scores
*/
/* TODO: Check for the existence of appropriate node health constraints */
return TRUE;
} else {
crm_err("Unknown node health strategy: %s", health_strategy);
return FALSE;
}
crm_info("Applying automated node health strategy: %s", health_strategy);
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
int system_health = base_health;
pe_node_t *node = (pe_node_t *) gIter->data;
/* Search through the node hash table for system health entries. */
g_hash_table_foreach(node->details->attrs, calculate_system_health, &system_health);
crm_info(" Node %s has an combined system health of %d",
node->details->uname, system_health);
/* If the health is non-zero, then create a new rsc2node so that the
* weight will be added later on.
*/
if (system_health != 0) {
GListPtr gIter2 = data_set->resources;
for (; gIter2 != NULL; gIter2 = gIter2->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter2->data;
rsc2node_new(health_strategy, rsc, system_health, NULL, node, data_set);
}
}
}
return TRUE;
}
gboolean
stage0(pe_working_set_t * data_set)
{
xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, data_set->input);
if (data_set->input == NULL) {
return FALSE;
}
if (!pcmk_is_set(data_set->flags, pe_flag_have_status)) {
crm_trace("Calculating status");
cluster_status(data_set);
}
set_alloc_actions(data_set);
apply_system_health(data_set);
unpack_constraints(cib_constraints, data_set);
return TRUE;
}
/*
* Check nodes for resources started outside of the LRM
*/
gboolean
probe_resources(pe_working_set_t * data_set)
{
pe_action_t *probe_node_complete = NULL;
for (GListPtr gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
const char *probed = pe_node_attribute_raw(node, CRM_OP_PROBED);
if (node->details->online == FALSE) {
if (pe__is_remote_node(node) && node->details->remote_rsc
&& (get_remote_node_state(node) == remote_state_failed)) {
pe_fence_node(data_set, node, "the connection is unrecoverable", FALSE);
}
continue;
} else if (node->details->unclean) {
continue;
} else if (node->details->rsc_discovery_enabled == FALSE) {
/* resource discovery is disabled for this node */
continue;
}
if (probed != NULL && crm_is_true(probed) == FALSE) {
pe_action_t *probe_op = custom_action(NULL, crm_strdup_printf("%s-%s", CRM_OP_REPROBE, node->details->uname),
CRM_OP_REPROBE, node, FALSE, TRUE, data_set);
add_hash_param(probe_op->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE);
continue;
}
for (GListPtr gIter2 = data_set->resources; gIter2 != NULL; gIter2 = gIter2->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter2->data;
rsc->cmds->create_probe(rsc, node, probe_node_complete, FALSE, data_set);
}
}
return TRUE;
}
static void
rsc_discover_filter(pe_resource_t *rsc, pe_node_t *node)
{
GListPtr gIter = rsc->children;
pe_resource_t *top = uber_parent(rsc);
pe_node_t *match;
if (rsc->exclusive_discover == FALSE && top->exclusive_discover == FALSE) {
return;
}
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
rsc_discover_filter(child_rsc, node);
}
match = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
if (match && match->rsc_discover_mode != pe_discover_exclusive) {
match->weight = -INFINITY;
}
}
static time_t
shutdown_time(pe_node_t *node, pe_working_set_t *data_set)
{
const char *shutdown = pe_node_attribute_raw(node, XML_CIB_ATTR_SHUTDOWN);
time_t result = 0;
if (shutdown) {
errno = 0;
result = (time_t) crm_parse_ll(shutdown, NULL);
if (errno != 0) {
result = 0;
}
}
return result? result : get_effective_time(data_set);
}
static void
apply_shutdown_lock(pe_resource_t *rsc, pe_working_set_t *data_set)
{
const char *class;
// Only primitives and (uncloned) groups may be locked
if (rsc->variant == pe_group) {
for (GList *item = rsc->children; item != NULL;
item = item->next) {
apply_shutdown_lock((pe_resource_t *) item->data, data_set);
}
} else if (rsc->variant != pe_native) {
return;
}
// Fence devices and remote connections can't be locked
class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
if (pcmk__str_eq(class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_null_matches)
|| pe__resource_is_remote_conn(rsc, data_set)) {
return;
}
if (rsc->lock_node != NULL) {
// The lock was obtained from resource history
if (rsc->running_on != NULL) {
/* The resource was started elsewhere even though it is now
* considered locked. This shouldn't be possible, but as a
* failsafe, we don't want to disturb the resource now.
*/
pe_rsc_info(rsc,
"Cancelling shutdown lock because %s is already active",
rsc->id);
pe__clear_resource_history(rsc, rsc->lock_node, data_set);
rsc->lock_node = NULL;
rsc->lock_time = 0;
}
// Only a resource active on exactly one node can be locked
} else if (pcmk__list_of_1(rsc->running_on)) {
pe_node_t *node = rsc->running_on->data;
if (node->details->shutdown) {
if (node->details->unclean) {
pe_rsc_debug(rsc, "Not locking %s to unclean %s for shutdown",
rsc->id, node->details->uname);
} else {
rsc->lock_node = node;
rsc->lock_time = shutdown_time(node, data_set);
}
}
}
if (rsc->lock_node == NULL) {
// No lock needed
return;
}
if (data_set->shutdown_lock > 0) {
time_t lock_expiration = rsc->lock_time + data_set->shutdown_lock;
pe_rsc_info(rsc, "Locking %s to %s due to shutdown (expires @%lld)",
rsc->id, rsc->lock_node->details->uname,
(long long) lock_expiration);
pe__update_recheck_time(++lock_expiration, data_set);
} else {
pe_rsc_info(rsc, "Locking %s to %s due to shutdown",
rsc->id, rsc->lock_node->details->uname);
}
// If resource is locked to one node, ban it from all other nodes
for (GList *item = data_set->nodes; item != NULL; item = item->next) {
pe_node_t *node = item->data;
if (strcmp(node->details->uname, rsc->lock_node->details->uname)) {
resource_location(rsc, node, -CRM_SCORE_INFINITY,
XML_CONFIG_ATTR_SHUTDOWN_LOCK, data_set);
}
}
}
/*
* \internal
* \brief Stage 2 of cluster status: apply node-specific criteria
*
* Count known nodes, and apply location constraints, stickiness, and exclusive
* resource discovery.
*/
gboolean
stage2(pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
if (pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)) {
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
apply_shutdown_lock((pe_resource_t *) gIter->data, data_set);
}
}
if (!pcmk_is_set(data_set->flags, pe_flag_no_compat)) {
// @COMPAT API backward compatibility
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
if (node && (node->weight >= 0) && node->details->online
&& (node->details->type != node_ping)) {
data_set->max_valid_nodes++;
}
}
}
- crm_trace("Applying placement constraints");
apply_placement_constraints(data_set);
gIter = data_set->nodes;
for (; gIter != NULL; gIter = gIter->next) {
GListPtr gIter2 = NULL;
pe_node_t *node = (pe_node_t *) gIter->data;
gIter2 = data_set->resources;
for (; gIter2 != NULL; gIter2 = gIter2->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter2->data;
common_apply_stickiness(rsc, node, data_set);
rsc_discover_filter(rsc, node);
}
}
return TRUE;
}
/*
* Create internal resource constraints before allocation
*/
gboolean
stage3(pe_working_set_t * data_set)
{
GListPtr gIter = data_set->resources;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter->data;
rsc->cmds->internal_constraints(rsc, data_set);
}
return TRUE;
}
/*
* Check for orphaned or redefined actions
*/
gboolean
stage4(pe_working_set_t * data_set)
{
check_actions(data_set);
return TRUE;
}
static void *
convert_const_pointer(const void *ptr)
{
/* Worst function ever */
return (void *)ptr;
}
static gint
sort_rsc_process_order(gconstpointer a, gconstpointer b, gpointer data)
{
int rc = 0;
int r1_weight = -INFINITY;
int r2_weight = -INFINITY;
const char *reason = "existence";
const GListPtr nodes = (GListPtr) data;
const pe_resource_t *resource1 = a;
const pe_resource_t *resource2 = b;
pe_node_t *r1_node = NULL;
pe_node_t *r2_node = NULL;
GListPtr gIter = NULL;
GHashTable *r1_nodes = NULL;
GHashTable *r2_nodes = NULL;
reason = "priority";
r1_weight = resource1->priority;
r2_weight = resource2->priority;
if (r1_weight > r2_weight) {
rc = -1;
goto done;
}
if (r1_weight < r2_weight) {
rc = 1;
goto done;
}
reason = "no node list";
if (nodes == NULL) {
goto done;
}
r1_nodes = pcmk__native_merge_weights(convert_const_pointer(resource1),
resource1->id, NULL, NULL, 1,
pe_weights_forward | pe_weights_init);
pe__show_node_weights(true, NULL, resource1->id, r1_nodes);
r2_nodes = pcmk__native_merge_weights(convert_const_pointer(resource2),
resource2->id, NULL, NULL, 1,
pe_weights_forward | pe_weights_init);
pe__show_node_weights(true, NULL, resource2->id, r2_nodes);
/* Current location score */
reason = "current location";
r1_weight = -INFINITY;
r2_weight = -INFINITY;
if (resource1->running_on) {
r1_node = pe__current_node(resource1);
r1_node = g_hash_table_lookup(r1_nodes, r1_node->details->id);
if (r1_node != NULL) {
r1_weight = r1_node->weight;
}
}
if (resource2->running_on) {
r2_node = pe__current_node(resource2);
r2_node = g_hash_table_lookup(r2_nodes, r2_node->details->id);
if (r2_node != NULL) {
r2_weight = r2_node->weight;
}
}
if (r1_weight > r2_weight) {
rc = -1;
goto done;
}
if (r1_weight < r2_weight) {
rc = 1;
goto done;
}
reason = "score";
for (gIter = nodes; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
r1_node = NULL;
r2_node = NULL;
r1_weight = -INFINITY;
if (r1_nodes) {
r1_node = g_hash_table_lookup(r1_nodes, node->details->id);
}
if (r1_node) {
r1_weight = r1_node->weight;
}
r2_weight = -INFINITY;
if (r2_nodes) {
r2_node = g_hash_table_lookup(r2_nodes, node->details->id);
}
if (r2_node) {
r2_weight = r2_node->weight;
}
if (r1_weight > r2_weight) {
rc = -1;
goto done;
}
if (r1_weight < r2_weight) {
rc = 1;
goto done;
}
}
done:
crm_trace("%s (%d) on %s %c %s (%d) on %s: %s",
resource1->id, r1_weight, r1_node ? r1_node->details->id : "n/a",
rc < 0 ? '>' : rc > 0 ? '<' : '=',
resource2->id, r2_weight, r2_node ? r2_node->details->id : "n/a", reason);
if (r1_nodes) {
g_hash_table_destroy(r1_nodes);
}
if (r2_nodes) {
g_hash_table_destroy(r2_nodes);
}
return rc;
}
static void
allocate_resources(pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
if (pcmk_is_set(data_set->flags, pe_flag_have_remote_nodes)) {
/* Allocate remote connection resources first (which will also allocate
* any colocation dependencies). If the connection is migrating, always
* prefer the partial migration target.
*/
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter->data;
if (rsc->is_remote_node == FALSE) {
continue;
}
pe_rsc_trace(rsc, "Allocating remote connection resource '%s'",
rsc->id);
rsc->cmds->allocate(rsc, rsc->partial_migration_target, data_set);
}
}
/* now do the rest of the resources */
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter->data;
if (rsc->is_remote_node == TRUE) {
continue;
}
pe_rsc_trace(rsc, "Allocating %s resource '%s'",
crm_element_name(rsc->xml), rsc->id);
rsc->cmds->allocate(rsc, NULL, data_set);
}
}
/* We always use pe_order_preserve with these convenience functions to exempt
* internally generated constraints from the prohibition of user constraints
* involving remote connection resources.
*
* The start ordering additionally uses pe_order_runnable_left so that the
* specified action is not runnable if the start is not runnable.
*/
static inline void
order_start_then_action(pe_resource_t *lh_rsc, pe_action_t *rh_action,
enum pe_ordering extra, pe_working_set_t *data_set)
{
if (lh_rsc && rh_action && data_set) {
custom_action_order(lh_rsc, start_key(lh_rsc), NULL,
rh_action->rsc, NULL, rh_action,
pe_order_preserve | pe_order_runnable_left | extra,
data_set);
}
}
static inline void
order_action_then_stop(pe_action_t *lh_action, pe_resource_t *rh_rsc,
enum pe_ordering extra, pe_working_set_t *data_set)
{
if (lh_action && rh_rsc && data_set) {
custom_action_order(lh_action->rsc, NULL, lh_action,
rh_rsc, stop_key(rh_rsc), NULL,
pe_order_preserve | extra, data_set);
}
}
// Clear fail counts for orphaned rsc on all online nodes
static void
cleanup_orphans(pe_resource_t * rsc, pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
if (node->details->online
&& pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL,
data_set)) {
pe_action_t *clear_op = NULL;
clear_op = pe__clear_failcount(rsc, node, "it is orphaned",
data_set);
/* We can't use order_action_then_stop() here because its
* pe_order_preserve breaks things
*/
custom_action_order(clear_op->rsc, NULL, clear_op,
rsc, stop_key(rsc), NULL,
pe_order_optional, data_set);
}
}
}
gboolean
stage5(pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
int log_prio = show_utilization? LOG_STDOUT : LOG_TRACE;
if (!pcmk__str_eq(data_set->placement_strategy, "default", pcmk__str_casei)) {
GListPtr nodes = g_list_copy(data_set->nodes);
nodes = sort_nodes_by_weight(nodes, NULL, data_set);
data_set->resources =
g_list_sort_with_data(data_set->resources, sort_rsc_process_order, nodes);
g_list_free(nodes);
}
gIter = data_set->nodes;
for (; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
dump_node_capacity(log_prio, "Original", node);
}
crm_trace("Allocating services");
/* Take (next) highest resource, assign it and create its actions */
allocate_resources(data_set);
gIter = data_set->nodes;
for (; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
dump_node_capacity(log_prio, "Remaining", node);
}
// Process deferred action checks
pe__foreach_param_check(data_set, check_params);
pe__free_param_checks(data_set);
if (pcmk_is_set(data_set->flags, pe_flag_startup_probes)) {
crm_trace("Calculating needed probes");
/* This code probably needs optimization
* ptest -x with 100 nodes, 100 clones and clone-max=100:
With probes:
ptest[14781]: 2010/09/27_17:56:46 notice: TRACE: do_calculations: pengine.c:258 Calculate cluster status
ptest[14781]: 2010/09/27_17:56:46 notice: TRACE: do_calculations: pengine.c:278 Applying placement constraints
ptest[14781]: 2010/09/27_17:56:47 notice: TRACE: do_calculations: pengine.c:285 Create internal constraints
ptest[14781]: 2010/09/27_17:56:47 notice: TRACE: do_calculations: pengine.c:292 Check actions
ptest[14781]: 2010/09/27_17:56:48 notice: TRACE: do_calculations: pengine.c:299 Allocate resources
ptest[14781]: 2010/09/27_17:56:48 notice: TRACE: stage5: allocate.c:881 Allocating services
ptest[14781]: 2010/09/27_17:56:49 notice: TRACE: stage5: allocate.c:894 Calculating needed probes
ptest[14781]: 2010/09/27_17:56:51 notice: TRACE: stage5: allocate.c:899 Creating actions
ptest[14781]: 2010/09/27_17:56:52 notice: TRACE: stage5: allocate.c:905 Creating done
ptest[14781]: 2010/09/27_17:56:52 notice: TRACE: do_calculations: pengine.c:306 Processing fencing and shutdown cases
ptest[14781]: 2010/09/27_17:56:52 notice: TRACE: do_calculations: pengine.c:313 Applying ordering constraints
36s
ptest[14781]: 2010/09/27_17:57:28 notice: TRACE: do_calculations: pengine.c:320 Create transition graph
Without probes:
ptest[14637]: 2010/09/27_17:56:21 notice: TRACE: do_calculations: pengine.c:258 Calculate cluster status
ptest[14637]: 2010/09/27_17:56:22 notice: TRACE: do_calculations: pengine.c:278 Applying placement constraints
ptest[14637]: 2010/09/27_17:56:22 notice: TRACE: do_calculations: pengine.c:285 Create internal constraints
ptest[14637]: 2010/09/27_17:56:22 notice: TRACE: do_calculations: pengine.c:292 Check actions
ptest[14637]: 2010/09/27_17:56:23 notice: TRACE: do_calculations: pengine.c:299 Allocate resources
ptest[14637]: 2010/09/27_17:56:23 notice: TRACE: stage5: allocate.c:881 Allocating services
ptest[14637]: 2010/09/27_17:56:24 notice: TRACE: stage5: allocate.c:899 Creating actions
ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: stage5: allocate.c:905 Creating done
ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: do_calculations: pengine.c:306 Processing fencing and shutdown cases
ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: do_calculations: pengine.c:313 Applying ordering constraints
ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: do_calculations: pengine.c:320 Create transition graph
*/
probe_resources(data_set);
}
crm_trace("Handle orphans");
if (pcmk_is_set(data_set->flags, pe_flag_stop_rsc_orphans)) {
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter->data;
/* There's no need to recurse into rsc->children because those
* should just be unallocated clone instances.
*/
if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
cleanup_orphans(rsc, data_set);
}
}
}
crm_trace("Creating actions");
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter->data;
rsc->cmds->create_actions(rsc, data_set);
}
crm_trace("Creating done");
return TRUE;
}
static gboolean
is_managed(const pe_resource_t * rsc)
{
GListPtr gIter = rsc->children;
if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
return TRUE;
}
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
if (is_managed(child_rsc)) {
return TRUE;
}
}
return FALSE;
}
static gboolean
any_managed_resources(pe_working_set_t * data_set)
{
GListPtr gIter = data_set->resources;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter->data;
if (is_managed(rsc)) {
return TRUE;
}
}
return FALSE;
}
/*!
* \internal
* \brief Create pseudo-op for guest node fence, and order relative to it
*
* \param[in] node Guest node to fence
* \param[in] data_set Working set of CIB state
*/
static void
fence_guest(pe_node_t *node, pe_working_set_t *data_set)
{
pe_resource_t *container = node->details->remote_rsc->container;
pe_action_t *stop = NULL;
pe_action_t *stonith_op = NULL;
/* The fence action is just a label; we don't do anything differently for
* off vs. reboot. We specify it explicitly, rather than let it default to
* cluster's default action, because we are not _initiating_ fencing -- we
* are creating a pseudo-event to describe fencing that is already occurring
* by other means (container recovery).
*/
const char *fence_action = "off";
/* Check whether guest's container resource has any explicit stop or
* start (the stop may be implied by fencing of the guest's host).
*/
if (container) {
stop = find_first_action(container->actions, NULL, CRMD_ACTION_STOP, NULL);
if (find_first_action(container->actions, NULL, CRMD_ACTION_START, NULL)) {
fence_action = "reboot";
}
}
/* Create a fence pseudo-event, so we have an event to order actions
* against, and the controller can always detect it.
*/
stonith_op = pe_fence_op(node, fence_action, FALSE, "guest is unclean", FALSE, data_set);
update_action_flags(stonith_op, pe_action_pseudo | pe_action_runnable,
__func__, __LINE__);
/* We want to imply stops/demotes after the guest is stopped, not wait until
* it is restarted, so we always order pseudo-fencing after stop, not start
* (even though start might be closer to what is done for a real reboot).
*/
if ((stop != NULL) && pcmk_is_set(stop->flags, pe_action_pseudo)) {
pe_action_t *parent_stonith_op = pe_fence_op(stop->node, NULL, FALSE, NULL, FALSE, data_set);
crm_info("Implying guest node %s is down (action %d) after %s fencing",
node->details->uname, stonith_op->id, stop->node->details->uname);
order_actions(parent_stonith_op, stonith_op,
pe_order_runnable_left|pe_order_implies_then);
} else if (stop) {
order_actions(stop, stonith_op,
pe_order_runnable_left|pe_order_implies_then);
crm_info("Implying guest node %s is down (action %d) "
"after container %s is stopped (action %d)",
node->details->uname, stonith_op->id,
container->id, stop->id);
} else {
/* If we're fencing the guest node but there's no stop for the guest
* resource, we must think the guest is already stopped. However, we may
* think so because its resource history was just cleaned. To avoid
* unnecessarily considering the guest node down if it's really up,
* order the pseudo-fencing after any stop of the connection resource,
* which will be ordered after any container (re-)probe.
*/
stop = find_first_action(node->details->remote_rsc->actions, NULL,
RSC_STOP, NULL);
if (stop) {
order_actions(stop, stonith_op, pe_order_optional);
crm_info("Implying guest node %s is down (action %d) "
"after connection is stopped (action %d)",
node->details->uname, stonith_op->id, stop->id);
} else {
/* Not sure why we're fencing, but everything must already be
* cleanly stopped.
*/
crm_info("Implying guest node %s is down (action %d) ",
node->details->uname, stonith_op->id);
}
}
/* Order/imply other actions relative to pseudo-fence as with real fence */
pcmk__order_vs_fence(stonith_op, data_set);
}
/*
* Create dependencies for stonith and shutdown operations
*/
gboolean
stage6(pe_working_set_t * data_set)
{
pe_action_t *dc_down = NULL;
pe_action_t *stonith_op = NULL;
gboolean integrity_lost = FALSE;
gboolean need_stonith = TRUE;
GListPtr gIter;
GListPtr stonith_ops = NULL;
GList *shutdown_ops = NULL;
/* Remote ordering constraints need to happen prior to calculating fencing
* because it is one more place we will mark the node as dirty.
*
* A nice side effect of doing them early is that apply_*_ordering() can be
* simpler because pe_fence_node() has already done some of the work.
*/
crm_trace("Creating remote ordering constraints");
apply_remote_node_ordering(data_set);
crm_trace("Processing fencing and shutdown cases");
if (any_managed_resources(data_set) == FALSE) {
crm_notice("Delaying fencing operations until there are resources to manage");
need_stonith = FALSE;
}
/* Check each node for stonith/shutdown */
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
/* Guest nodes are "fenced" by recovering their container resource,
* so handle them separately.
*/
if (pe__is_guest_node(node)) {
if (node->details->remote_requires_reset && need_stonith
&& pe_can_fence(data_set, node)) {
fence_guest(node, data_set);
}
continue;
}
stonith_op = NULL;
if (node->details->unclean
&& need_stonith && pe_can_fence(data_set, node)) {
stonith_op = pe_fence_op(node, NULL, FALSE, "node is unclean", FALSE, data_set);
pe_warn("Scheduling Node %s for STONITH", node->details->uname);
pcmk__order_vs_fence(stonith_op, data_set);
if (node->details->is_dc) {
// Remember if the DC is being fenced
dc_down = stonith_op;
} else {
if (!pcmk_is_set(data_set->flags, pe_flag_concurrent_fencing)
&& (stonith_ops != NULL)) {
/* Concurrent fencing is disabled, so order each non-DC
* fencing in a chain. If there is any DC fencing or
* shutdown, it will be ordered after the last action in the
* chain later.
*/
order_actions((pe_action_t *) stonith_ops->data,
stonith_op, pe_order_optional);
}
// Remember all non-DC fencing actions in a separate list
stonith_ops = g_list_prepend(stonith_ops, stonith_op);
}
} else if (node->details->online && node->details->shutdown &&
/* TODO define what a shutdown op means for a remote node.
* For now we do not send shutdown operations for remote nodes, but
* if we can come up with a good use for this in the future, we will. */
pe__is_guest_or_remote_node(node) == FALSE) {
pe_action_t *down_op = sched_shutdown_op(node, data_set);
if (node->details->is_dc) {
// Remember if the DC is being shut down
dc_down = down_op;
} else {
// Remember non-DC shutdowns for later ordering
shutdown_ops = g_list_prepend(shutdown_ops, down_op);
}
}
if (node->details->unclean && stonith_op == NULL) {
integrity_lost = TRUE;
pe_warn("Node %s is unclean!", node->details->uname);
}
}
if (integrity_lost) {
if (!pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
pe_warn("YOUR RESOURCES ARE NOW LIKELY COMPROMISED");
pe_err("ENABLE STONITH TO KEEP YOUR RESOURCES SAFE");
} else if (!pcmk_is_set(data_set->flags, pe_flag_have_quorum)) {
crm_notice("Cannot fence unclean nodes until quorum is"
" attained (or no-quorum-policy is set to ignore)");
}
}
if (dc_down != NULL) {
/* Order any non-DC shutdowns before any DC shutdown, to avoid repeated
* DC elections. However, we don't want to order non-DC shutdowns before
* a DC *fencing*, because even though we don't want a node that's
* shutting down to become DC, the DC fencing could be ordered before a
* clone stop that's also ordered before the shutdowns, thus leading to
* a graph loop.
*/
if (pcmk__str_eq(dc_down->task, CRM_OP_SHUTDOWN, pcmk__str_casei)) {
for (gIter = shutdown_ops; gIter != NULL; gIter = gIter->next) {
pe_action_t *node_stop = (pe_action_t *) gIter->data;
crm_debug("Ordering shutdown on %s before %s on DC %s",
node_stop->node->details->uname,
dc_down->task, dc_down->node->details->uname);
order_actions(node_stop, dc_down, pe_order_optional);
}
}
// Order any non-DC fencing before any DC fencing or shutdown
if (pcmk_is_set(data_set->flags, pe_flag_concurrent_fencing)) {
/* With concurrent fencing, order each non-DC fencing action
* separately before any DC fencing or shutdown.
*/
for (gIter = stonith_ops; gIter != NULL; gIter = gIter->next) {
order_actions((pe_action_t *) gIter->data, dc_down,
pe_order_optional);
}
} else if (stonith_ops) {
/* Without concurrent fencing, the non-DC fencing actions are
* already ordered relative to each other, so we just need to order
* the DC fencing after the last action in the chain (which is the
* first item in the list).
*/
order_actions((pe_action_t *) stonith_ops->data, dc_down,
pe_order_optional);
}
}
g_list_free(stonith_ops);
g_list_free(shutdown_ops);
return TRUE;
}
/*
* Determine the sets of independent actions and the correct order for the
* actions in each set.
*
* Mark dependencies of un-runnable actions un-runnable
*
*/
static GListPtr
find_actions_by_task(GListPtr actions, pe_resource_t * rsc, const char *original_key)
{
GListPtr list = NULL;
list = find_actions(actions, original_key, NULL);
if (list == NULL) {
/* we're potentially searching a child of the original resource */
char *key = NULL;
char *task = NULL;
guint interval_ms = 0;
if (parse_op_key(original_key, NULL, &task, &interval_ms)) {
key = pcmk__op_key(rsc->id, task, interval_ms);
list = find_actions(actions, key, NULL);
} else {
crm_err("search key: %s", original_key);
}
free(key);
free(task);
}
return list;
}
static void
rsc_order_then(pe_action_t *lh_action, pe_resource_t *rsc,
pe__ordering_t *order)
{
GListPtr gIter = NULL;
GListPtr rh_actions = NULL;
pe_action_t *rh_action = NULL;
enum pe_ordering type;
CRM_CHECK(rsc != NULL, return);
CRM_CHECK(order != NULL, return);
type = order->type;
rh_action = order->rh_action;
crm_trace("Processing RH of ordering constraint %d", order->id);
if (rh_action != NULL) {
rh_actions = g_list_prepend(NULL, rh_action);
} else if (rsc != NULL) {
rh_actions = find_actions_by_task(rsc->actions, rsc, order->rh_action_task);
}
if (rh_actions == NULL) {
pe_rsc_trace(rsc, "No RH-Side (%s/%s) found for constraint..."
" ignoring", rsc->id, order->rh_action_task);
if (lh_action) {
pe_rsc_trace(rsc, "LH-Side was: %s", lh_action->uuid);
}
return;
}
if ((lh_action != NULL) && (lh_action->rsc == rsc)
&& pcmk_is_set(lh_action->flags, pe_action_dangle)) {
pe_rsc_trace(rsc, "Detected dangling operation %s -> %s", lh_action->uuid,
order->rh_action_task);
pe__clear_order_flags(type, pe_order_implies_then);
}
gIter = rh_actions;
for (; gIter != NULL; gIter = gIter->next) {
pe_action_t *rh_action_iter = (pe_action_t *) gIter->data;
if (lh_action) {
order_actions(lh_action, rh_action_iter, type);
} else if (type & pe_order_implies_then) {
update_action_flags(rh_action_iter, pe_action_runnable | pe_action_clear,
__func__, __LINE__);
crm_warn("Unrunnable %s 0x%.6x", rh_action_iter->uuid, type);
} else {
crm_warn("neither %s 0x%.6x", rh_action_iter->uuid, type);
}
}
g_list_free(rh_actions);
}
static void
rsc_order_first(pe_resource_t *lh_rsc, pe__ordering_t *order,
pe_working_set_t *data_set)
{
GListPtr gIter = NULL;
GListPtr lh_actions = NULL;
pe_action_t *lh_action = order->lh_action;
pe_resource_t *rh_rsc = order->rh_rsc;
crm_trace("Processing LH of ordering constraint %d", order->id);
CRM_ASSERT(lh_rsc != NULL);
if (lh_action != NULL) {
lh_actions = g_list_prepend(NULL, lh_action);
} else {
lh_actions = find_actions_by_task(lh_rsc->actions, lh_rsc, order->lh_action_task);
}
if (lh_actions == NULL && lh_rsc != rh_rsc) {
char *key = NULL;
char *op_type = NULL;
guint interval_ms = 0;
parse_op_key(order->lh_action_task, NULL, &op_type, &interval_ms);
key = pcmk__op_key(lh_rsc->id, op_type, interval_ms);
if (lh_rsc->fns->state(lh_rsc, TRUE) == RSC_ROLE_STOPPED && pcmk__str_eq(op_type, RSC_STOP, pcmk__str_casei)) {
free(key);
pe_rsc_trace(lh_rsc, "No LH-Side (%s/%s) found for constraint %d with %s - ignoring",
lh_rsc->id, order->lh_action_task, order->id, order->rh_action_task);
} else if (lh_rsc->fns->state(lh_rsc, TRUE) == RSC_ROLE_SLAVE && pcmk__str_eq(op_type, RSC_DEMOTE, pcmk__str_casei)) {
free(key);
pe_rsc_trace(lh_rsc, "No LH-Side (%s/%s) found for constraint %d with %s - ignoring",
lh_rsc->id, order->lh_action_task, order->id, order->rh_action_task);
} else {
pe_rsc_trace(lh_rsc, "No LH-Side (%s/%s) found for constraint %d with %s - creating",
lh_rsc->id, order->lh_action_task, order->id, order->rh_action_task);
lh_action = custom_action(lh_rsc, key, op_type, NULL, TRUE, TRUE, data_set);
lh_actions = g_list_prepend(NULL, lh_action);
}
free(op_type);
}
gIter = lh_actions;
for (; gIter != NULL; gIter = gIter->next) {
pe_action_t *lh_action_iter = (pe_action_t *) gIter->data;
if (rh_rsc == NULL && order->rh_action) {
rh_rsc = order->rh_action->rsc;
}
if (rh_rsc) {
rsc_order_then(lh_action_iter, rh_rsc, order);
} else if (order->rh_action) {
order_actions(lh_action_iter, order->rh_action, order->type);
}
}
g_list_free(lh_actions);
}
extern void update_colo_start_chain(pe_action_t *action,
pe_working_set_t *data_set);
static int
is_recurring_action(pe_action_t *action)
{
guint interval_ms;
if (pcmk__guint_from_hash(action->meta,
XML_LRM_ATTR_INTERVAL_MS, 0,
&interval_ms) != pcmk_rc_ok) {
return 0;
}
return (interval_ms > 0);
}
static void
apply_container_ordering(pe_action_t *action, pe_working_set_t *data_set)
{
/* VMs are also classified as containers for these purposes... in
* that they both involve a 'thing' running on a real or remote
* cluster node.
*
* This allows us to be smarter about the type and extent of
* recovery actions required in various scenarios
*/
pe_resource_t *remote_rsc = NULL;
pe_resource_t *container = NULL;
enum action_tasks task = text2task(action->task);
CRM_ASSERT(action->rsc);
CRM_ASSERT(action->node);
CRM_ASSERT(pe__is_guest_or_remote_node(action->node));
remote_rsc = action->node->details->remote_rsc;
CRM_ASSERT(remote_rsc);
container = remote_rsc->container;
CRM_ASSERT(container);
if (pcmk_is_set(container->flags, pe_rsc_failed)) {
pe_fence_node(data_set, action->node, "container failed", FALSE);
}
crm_trace("Order %s action %s relative to %s%s for %s%s",
action->task, action->uuid,
pcmk_is_set(remote_rsc->flags, pe_rsc_failed)? "failed " : "",
remote_rsc->id,
pcmk_is_set(container->flags, pe_rsc_failed)? "failed " : "",
container->id);
if (pcmk__strcase_any_of(action->task, CRMD_ACTION_MIGRATE, CRMD_ACTION_MIGRATED, NULL)) {
/* Migration ops map to "no_action", but we need to apply the same
* ordering as for stop or demote (see get_router_node()).
*/
task = stop_rsc;
}
switch (task) {
case start_rsc:
case action_promote:
/* Force resource recovery if the container is recovered */
order_start_then_action(container, action, pe_order_implies_then,
data_set);
/* Wait for the connection resource to be up too */
order_start_then_action(remote_rsc, action, pe_order_none,
data_set);
break;
case stop_rsc:
case action_demote:
if (pcmk_is_set(container->flags, pe_rsc_failed)) {
/* When the container representing a guest node fails, any stop
* or demote actions for resources running on the guest node
* are implied by the container stopping. This is similar to
* how fencing operations work for cluster nodes and remote
* nodes.
*/
} else {
/* Ensure the operation happens before the connection is brought
* down.
*
* If we really wanted to, we could order these after the
* connection start, IFF the container's current role was
* stopped (otherwise we re-introduce an ordering loop when the
* connection is restarting).
*/
order_action_then_stop(action, remote_rsc, pe_order_none,
data_set);
}
break;
default:
/* Wait for the connection resource to be up */
if (is_recurring_action(action)) {
/* In case we ever get the recovery logic wrong, force
* recurring monitors to be restarted, even if just
* the connection was re-established
*/
if(task != no_action) {
order_start_then_action(remote_rsc, action,
pe_order_implies_then, data_set);
}
} else {
order_start_then_action(remote_rsc, action, pe_order_none,
data_set);
}
break;
}
}
static enum remote_connection_state
get_remote_node_state(pe_node_t *node)
{
pe_resource_t *remote_rsc = NULL;
pe_node_t *cluster_node = NULL;
CRM_ASSERT(node);
remote_rsc = node->details->remote_rsc;
CRM_ASSERT(remote_rsc);
cluster_node = pe__current_node(remote_rsc);
/* If the cluster node the remote connection resource resides on
* is unclean or went offline, we can't process any operations
* on that remote node until after it starts elsewhere.
*/
if(remote_rsc->next_role == RSC_ROLE_STOPPED || remote_rsc->allocated_to == NULL) {
/* The connection resource is not going to run anywhere */
if (cluster_node && cluster_node->details->unclean) {
/* The remote connection is failed because its resource is on a
* failed node and can't be recovered elsewhere, so we must fence.
*/
return remote_state_failed;
}
if (!pcmk_is_set(remote_rsc->flags, pe_rsc_failed)) {
/* Connection resource is cleanly stopped */
return remote_state_stopped;
}
/* Connection resource is failed */
if ((remote_rsc->next_role == RSC_ROLE_STOPPED)
&& remote_rsc->remote_reconnect_ms
&& node->details->remote_was_fenced
&& !pe__shutdown_requested(node)) {
/* We won't know whether the connection is recoverable until the
* reconnect interval expires and we reattempt connection.
*/
return remote_state_unknown;
}
/* The remote connection is in a failed state. If there are any
* resources known to be active on it (stop) or in an unknown state
* (probe), we must assume the worst and fence it.
*/
return remote_state_failed;
} else if (cluster_node == NULL) {
/* Connection is recoverable but not currently running anywhere, see if we can recover it first */
return remote_state_unknown;
} else if(cluster_node->details->unclean == TRUE
|| cluster_node->details->online == FALSE) {
/* Connection is running on a dead node, see if we can recover it first */
return remote_state_resting;
} else if (pcmk__list_of_multiple(remote_rsc->running_on)
&& remote_rsc->partial_migration_source
&& remote_rsc->partial_migration_target) {
/* We're in the middle of migrating a connection resource,
* wait until after the resource migrates before performing
* any actions.
*/
return remote_state_resting;
}
return remote_state_alive;
}
/*!
* \internal
* \brief Order actions on remote node relative to actions for the connection
*/
static void
apply_remote_ordering(pe_action_t *action, pe_working_set_t *data_set)
{
pe_resource_t *remote_rsc = NULL;
enum action_tasks task = text2task(action->task);
enum remote_connection_state state = get_remote_node_state(action->node);
enum pe_ordering order_opts = pe_order_none;
if (action->rsc == NULL) {
return;
}
CRM_ASSERT(action->node);
CRM_ASSERT(pe__is_guest_or_remote_node(action->node));
remote_rsc = action->node->details->remote_rsc;
CRM_ASSERT(remote_rsc);
crm_trace("Order %s action %s relative to %s%s (state: %s)",
action->task, action->uuid,
pcmk_is_set(remote_rsc->flags, pe_rsc_failed)? "failed " : "",
remote_rsc->id, state2text(state));
if (pcmk__strcase_any_of(action->task, CRMD_ACTION_MIGRATE, CRMD_ACTION_MIGRATED, NULL)) {
/* Migration ops map to "no_action", but we need to apply the same
* ordering as for stop or demote (see get_router_node()).
*/
task = stop_rsc;
}
switch (task) {
case start_rsc:
case action_promote:
order_opts = pe_order_none;
if (state == remote_state_failed) {
/* Force recovery, by making this action required */
pe__set_order_flags(order_opts, pe_order_implies_then);
}
/* Ensure connection is up before running this action */
order_start_then_action(remote_rsc, action, order_opts, data_set);
break;
case stop_rsc:
if(state == remote_state_alive) {
order_action_then_stop(action, remote_rsc,
pe_order_implies_first, data_set);
} else if(state == remote_state_failed) {
/* The resource is active on the node, but since we don't have a
* valid connection, the only way to stop the resource is by
* fencing the node. There is no need to order the stop relative
* to the remote connection, since the stop will become implied
* by the fencing.
*/
pe_fence_node(data_set, action->node, "resources are active and the connection is unrecoverable", FALSE);
} else if(remote_rsc->next_role == RSC_ROLE_STOPPED) {
/* State must be remote_state_unknown or remote_state_stopped.
* Since the connection is not coming back up in this
* transition, stop this resource first.
*/
order_action_then_stop(action, remote_rsc,
pe_order_implies_first, data_set);
} else {
/* The connection is going to be started somewhere else, so
* stop this resource after that completes.
*/
order_start_then_action(remote_rsc, action, pe_order_none, data_set);
}
break;
case action_demote:
/* Only order this demote relative to the connection start if the
* connection isn't being torn down. Otherwise, the demote would be
* blocked because the connection start would not be allowed.
*/
if(state == remote_state_resting || state == remote_state_unknown) {
order_start_then_action(remote_rsc, action, pe_order_none,
data_set);
} /* Otherwise we can rely on the stop ordering */
break;
default:
/* Wait for the connection resource to be up */
if (is_recurring_action(action)) {
/* In case we ever get the recovery logic wrong, force
* recurring monitors to be restarted, even if just
* the connection was re-established
*/
order_start_then_action(remote_rsc, action,
pe_order_implies_then, data_set);
} else {
pe_node_t *cluster_node = pe__current_node(remote_rsc);
if(task == monitor_rsc && state == remote_state_failed) {
/* We would only be here if we do not know the
* state of the resource on the remote node.
* Since we have no way to find out, it is
* necessary to fence the node.
*/
pe_fence_node(data_set, action->node, "resources are in an unknown state and the connection is unrecoverable", FALSE);
}
if(cluster_node && state == remote_state_stopped) {
/* The connection is currently up, but is going
* down permanently.
*
* Make sure we check services are actually
* stopped _before_ we let the connection get
* closed
*/
order_action_then_stop(action, remote_rsc,
pe_order_runnable_left, data_set);
} else {
order_start_then_action(remote_rsc, action, pe_order_none,
data_set);
}
}
break;
}
}
static void
apply_remote_node_ordering(pe_working_set_t *data_set)
{
if (!pcmk_is_set(data_set->flags, pe_flag_have_remote_nodes)) {
return;
}
for (GListPtr gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
pe_action_t *action = (pe_action_t *) gIter->data;
pe_resource_t *remote = NULL;
// We are only interested in resource actions
if (action->rsc == NULL) {
continue;
}
/* Special case: If we are clearing the failcount of an actual
* remote connection resource, then make sure this happens before
* any start of the resource in this transition.
*/
if (action->rsc->is_remote_node &&
pcmk__str_eq(action->task, CRM_OP_CLEAR_FAILCOUNT, pcmk__str_casei)) {
custom_action_order(action->rsc,
NULL,
action,
action->rsc,
pcmk__op_key(action->rsc->id, RSC_START, 0),
NULL,
pe_order_optional,
data_set);
continue;
}
// We are only interested in actions allocated to a node
if (action->node == NULL) {
continue;
}
if (!pe__is_guest_or_remote_node(action->node)) {
continue;
}
/* We are only interested in real actions.
*
* @TODO This is probably wrong; pseudo-actions might be converted to
* real actions and vice versa later in update_actions() at the end of
* stage7().
*/
if (pcmk_is_set(action->flags, pe_action_pseudo)) {
continue;
}
remote = action->node->details->remote_rsc;
if (remote == NULL) {
// Orphaned
continue;
}
/* Another special case: if a resource is moving to a Pacemaker Remote
* node, order the stop on the original node after any start of the
* remote connection. This ensures that if the connection fails to
* start, we leave the resource running on the original node.
*/
if (pcmk__str_eq(action->task, RSC_START, pcmk__str_casei)) {
for (GList *item = action->rsc->actions; item != NULL;
item = item->next) {
pe_action_t *rsc_action = item->data;
if ((rsc_action->node->details != action->node->details)
&& pcmk__str_eq(rsc_action->task, RSC_STOP, pcmk__str_casei)) {
custom_action_order(remote, start_key(remote), NULL,
action->rsc, NULL, rsc_action,
pe_order_optional, data_set);
}
}
}
/* The action occurs across a remote connection, so create
* ordering constraints that guarantee the action occurs while the node
* is active (after start, before stop ... things like that).
*
* This is somewhat brittle in that we need to make sure the results of
* this ordering are compatible with the result of get_router_node().
* It would probably be better to add XML_LRM_ATTR_ROUTER_NODE as part
* of this logic rather than action2xml().
*/
if (remote->container) {
crm_trace("Container ordering for %s", action->uuid);
apply_container_ordering(action, data_set);
} else {
crm_trace("Remote ordering for %s", action->uuid);
apply_remote_ordering(action, data_set);
}
}
}
static gboolean
order_first_probe_unneeded(pe_action_t * probe, pe_action_t * rh_action)
{
/* No need to probe the resource on the node that is being
* unfenced. Otherwise it might introduce transition loop
* since probe will be performed after the node is
* unfenced.
*/
if (pcmk__str_eq(rh_action->task, CRM_OP_FENCE, pcmk__str_casei)
&& probe->node && rh_action->node
&& probe->node->details == rh_action->node->details) {
const char *op = g_hash_table_lookup(rh_action->meta, "stonith_action");
if (pcmk__str_eq(op, "on", pcmk__str_casei)) {
return TRUE;
}
}
// Shutdown waits for probe to complete only if it's on the same node
if ((pcmk__str_eq(rh_action->task, CRM_OP_SHUTDOWN, pcmk__str_casei))
&& probe->node && rh_action->node
&& probe->node->details != rh_action->node->details) {
return TRUE;
}
return FALSE;
}
static void
order_first_probes_imply_stops(pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
for (gIter = data_set->ordering_constraints; gIter != NULL; gIter = gIter->next) {
pe__ordering_t *order = gIter->data;
enum pe_ordering order_type = pe_order_optional;
pe_resource_t *lh_rsc = order->lh_rsc;
pe_resource_t *rh_rsc = order->rh_rsc;
pe_action_t *lh_action = order->lh_action;
pe_action_t *rh_action = order->rh_action;
const char *lh_action_task = order->lh_action_task;
const char *rh_action_task = order->rh_action_task;
GListPtr probes = NULL;
GListPtr rh_actions = NULL;
GListPtr pIter = NULL;
if (lh_rsc == NULL) {
continue;
} else if (rh_rsc && lh_rsc == rh_rsc) {
continue;
}
if (lh_action == NULL && lh_action_task == NULL) {
continue;
}
if (rh_action == NULL && rh_action_task == NULL) {
continue;
}
/* Technically probe is expected to return "not running", which could be
* the alternative of stop action if the status of the resource is
* unknown yet.
*/
if (lh_action && !pcmk__str_eq(lh_action->task, RSC_STOP, pcmk__str_casei)) {
continue;
} else if (lh_action == NULL
&& lh_action_task
&& !pcmk__ends_with(lh_action_task, "_" RSC_STOP "_0")) {
continue;
}
/* Do not probe the resource inside of a stopping container. Otherwise
* it might introduce transition loop since probe will be performed
* after the container starts again.
*/
if (rh_rsc && lh_rsc->container == rh_rsc) {
if (rh_action && pcmk__str_eq(rh_action->task, RSC_STOP, pcmk__str_casei)) {
continue;
} else if (rh_action == NULL && rh_action_task
&& pcmk__ends_with(rh_action_task,"_" RSC_STOP "_0")) {
continue;
}
}
if (order->type == pe_order_none) {
continue;
}
// Preserve the order options for future filtering
if (pcmk_is_set(order->type, pe_order_apply_first_non_migratable)) {
pe__set_order_flags(order_type,
pe_order_apply_first_non_migratable);
}
if (pcmk_is_set(order->type, pe_order_same_node)) {
pe__set_order_flags(order_type, pe_order_same_node);
}
// Keep the order types for future filtering
if (order->type == pe_order_anti_colocation
|| order->type == pe_order_load) {
order_type = order->type;
}
probes = pe__resource_actions(lh_rsc, NULL, RSC_STATUS, FALSE);
if (probes == NULL) {
continue;
}
if (rh_action) {
rh_actions = g_list_prepend(rh_actions, rh_action);
} else if (rh_rsc && rh_action_task) {
rh_actions = find_actions(rh_rsc->actions, rh_action_task, NULL);
}
if (rh_actions == NULL) {
g_list_free(probes);
continue;
}
crm_trace("Processing for LH probe based on ordering constraint %s -> %s"
" (id=%d, type=%.6x)",
lh_action ? lh_action->uuid : lh_action_task,
rh_action ? rh_action->uuid : rh_action_task,
order->id, order->type);
for (pIter = probes; pIter != NULL; pIter = pIter->next) {
pe_action_t *probe = (pe_action_t *) pIter->data;
GListPtr rIter = NULL;
for (rIter = rh_actions; rIter != NULL; rIter = rIter->next) {
pe_action_t *rh_action_iter = (pe_action_t *) rIter->data;
if (order_first_probe_unneeded(probe, rh_action_iter)) {
continue;
}
order_actions(probe, rh_action_iter, order_type);
}
}
g_list_free(rh_actions);
g_list_free(probes);
}
}
static void
order_first_probe_then_restart_repromote(pe_action_t * probe,
pe_action_t * after,
pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
bool interleave = FALSE;
pe_resource_t *compatible_rsc = NULL;
if (probe == NULL
|| probe->rsc == NULL
|| probe->rsc->variant != pe_native) {
return;
}
if (after == NULL
// Avoid running into any possible loop
|| pcmk_is_set(after->flags, pe_action_tracking)) {
return;
}
if (!pcmk__str_eq(probe->task, RSC_STATUS, pcmk__str_casei)) {
return;
}
pe__set_action_flags(after, pe_action_tracking);
crm_trace("Processing based on %s %s -> %s %s",
probe->uuid,
probe->node ? probe->node->details->uname: "",
after->uuid,
after->node ? after->node->details->uname : "");
if (after->rsc
/* Better not build a dependency directly with a clone/group.
* We are going to proceed through the ordering chain and build
* dependencies with its children.
*/
&& after->rsc->variant == pe_native
&& probe->rsc != after->rsc) {
GListPtr then_actions = NULL;
enum pe_ordering probe_order_type = pe_order_optional;
if (pcmk__str_eq(after->task, RSC_START, pcmk__str_casei)) {
then_actions = pe__resource_actions(after->rsc, NULL, RSC_STOP, FALSE);
} else if (pcmk__str_eq(after->task, RSC_PROMOTE, pcmk__str_casei)) {
then_actions = pe__resource_actions(after->rsc, NULL, RSC_DEMOTE, FALSE);
}
for (gIter = then_actions; gIter != NULL; gIter = gIter->next) {
pe_action_t *then = (pe_action_t *) gIter->data;
// Skip any pseudo action which for example is implied by fencing
if (pcmk_is_set(then->flags, pe_action_pseudo)) {
continue;
}
order_actions(probe, then, probe_order_type);
}
g_list_free(then_actions);
}
if (after->rsc
&& after->rsc->variant > pe_group) {
const char *interleave_s = g_hash_table_lookup(after->rsc->meta,
XML_RSC_ATTR_INTERLEAVE);
interleave = crm_is_true(interleave_s);
if (interleave) {
/* For an interleaved clone, we should build a dependency only
* with the relevant clone child.
*/
compatible_rsc = find_compatible_child(probe->rsc,
after->rsc,
RSC_ROLE_UNKNOWN,
FALSE, data_set);
}
}
for (gIter = after->actions_after; gIter != NULL; gIter = gIter->next) {
pe_action_wrapper_t *after_wrapper = (pe_action_wrapper_t *) gIter->data;
/* pe_order_implies_then is the reason why a required A.start
* implies/enforces B.start to be required too, which is the cause of
* B.restart/re-promote.
*
* Not sure about pe_order_implies_then_on_node though. It's now only
* used for unfencing case, which tends to introduce transition
* loops...
*/
if (!pcmk_is_set(after_wrapper->type, pe_order_implies_then)) {
/* The order type between a group/clone and its child such as
* B.start-> B_child.start is:
* pe_order_implies_first_printed | pe_order_runnable_left
*
* Proceed through the ordering chain and build dependencies with
* its children.
*/
if (after->rsc == NULL
|| after->rsc->variant < pe_group
|| probe->rsc->parent == after->rsc
|| after_wrapper->action->rsc == NULL
|| after_wrapper->action->rsc->variant > pe_group
|| after->rsc != after_wrapper->action->rsc->parent) {
continue;
}
/* Proceed to the children of a group or a non-interleaved clone.
* For an interleaved clone, proceed only to the relevant child.
*/
if (after->rsc->variant > pe_group
&& interleave == TRUE
&& (compatible_rsc == NULL
|| compatible_rsc != after_wrapper->action->rsc)) {
continue;
}
}
crm_trace("Proceeding through %s %s -> %s %s (type=0x%.6x)",
after->uuid,
after->node ? after->node->details->uname: "",
after_wrapper->action->uuid,
after_wrapper->action->node ? after_wrapper->action->node->details->uname : "",
after_wrapper->type);
order_first_probe_then_restart_repromote(probe, after_wrapper->action, data_set);
}
}
static void clear_actions_tracking_flag(pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
pe_action_t *action = (pe_action_t *) gIter->data;
if (pcmk_is_set(action->flags, pe_action_tracking)) {
pe__clear_action_flags(action, pe_action_tracking);
}
}
}
static void
order_first_rsc_probes(pe_resource_t * rsc, pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
GListPtr probes = NULL;
for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
pe_resource_t * child = (pe_resource_t *) gIter->data;
order_first_rsc_probes(child, data_set);
}
if (rsc->variant != pe_native) {
return;
}
probes = pe__resource_actions(rsc, NULL, RSC_STATUS, FALSE);
for (gIter = probes; gIter != NULL; gIter= gIter->next) {
pe_action_t *probe = (pe_action_t *) gIter->data;
GListPtr aIter = NULL;
for (aIter = probe->actions_after; aIter != NULL; aIter = aIter->next) {
pe_action_wrapper_t *after_wrapper = (pe_action_wrapper_t *) aIter->data;
order_first_probe_then_restart_repromote(probe, after_wrapper->action, data_set);
clear_actions_tracking_flag(data_set);
}
}
g_list_free(probes);
}
static void
order_first_probes(pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter->data;
order_first_rsc_probes(rsc, data_set);
}
order_first_probes_imply_stops(data_set);
}
static void
order_then_probes(pe_working_set_t * data_set)
{
#if 0
GListPtr gIter = NULL;
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter->data;
/* Given "A then B", we would prefer to wait for A to be
* started before probing B.
*
* If A was a filesystem on which the binaries and data for B
* lived, it would have been useful if the author of B's agent
* could assume that A is running before B.monitor will be
* called.
*
* However we can't _only_ probe once A is running, otherwise
* we'd not detect the state of B if A could not be started
* for some reason.
*
* In practice however, we cannot even do an opportunistic
* version of this because B may be moving:
*
* B.probe -> B.start
* B.probe -> B.stop
* B.stop -> B.start
* A.stop -> A.start
* A.start -> B.probe
*
* So far so good, but if we add the result of this code:
*
* B.stop -> A.stop
*
* Then we get a loop:
*
* B.probe -> B.stop -> A.stop -> A.start -> B.probe
*
* We could kill the 'B.probe -> B.stop' dependency, but that
* could mean stopping B "too" soon, because B.start must wait
* for the probes to complete.
*
* Another option is to allow it only if A is a non-unique
* clone with clone-max == node-max (since we'll never be
* moving it). However, we could still be stopping one
* instance at the same time as starting another.
* The complexity of checking for allowed conditions combined
* with the ever narrowing usecase suggests that this code
* should remain disabled until someone gets smarter.
*/
pe_action_t *start = NULL;
GListPtr actions = NULL;
GListPtr probes = NULL;
actions = pe__resource_actions(rsc, NULL, RSC_START, FALSE);
if (actions) {
start = actions->data;
g_list_free(actions);
}
if(start == NULL) {
crm_err("No start action for %s", rsc->id);
continue;
}
probes = pe__resource_actions(rsc, NULL, RSC_STATUS, FALSE);
for (actions = start->actions_before; actions != NULL; actions = actions->next) {
pe_action_wrapper_t *before = (pe_action_wrapper_t *) actions->data;
GListPtr pIter = NULL;
pe_action_t *first = before->action;
pe_resource_t *first_rsc = first->rsc;
if(first->required_runnable_before) {
GListPtr clone_actions = NULL;
for (clone_actions = first->actions_before; clone_actions != NULL; clone_actions = clone_actions->next) {
before = (pe_action_wrapper_t *) clone_actions->data;
crm_trace("Testing %s -> %s (%p) for %s", first->uuid, before->action->uuid, before->action->rsc, start->uuid);
CRM_ASSERT(before->action->rsc);
first_rsc = before->action->rsc;
break;
}
} else if(!pcmk__str_eq(first->task, RSC_START, pcmk__str_casei)) {
crm_trace("Not a start op %s for %s", first->uuid, start->uuid);
}
if(first_rsc == NULL) {
continue;
} else if(uber_parent(first_rsc) == uber_parent(start->rsc)) {
crm_trace("Same parent %s for %s", first_rsc->id, start->uuid);
continue;
} else if(FALSE && pe_rsc_is_clone(uber_parent(first_rsc)) == FALSE) {
crm_trace("Not a clone %s for %s", first_rsc->id, start->uuid);
continue;
}
crm_err("Applying %s before %s %d", first->uuid, start->uuid, uber_parent(first_rsc)->variant);
for (pIter = probes; pIter != NULL; pIter = pIter->next) {
pe_action_t *probe = (pe_action_t *) pIter->data;
crm_err("Ordering %s before %s", first->uuid, probe->uuid);
order_actions(first, probe, pe_order_optional);
}
}
}
#endif
}
static void
order_probes(pe_working_set_t * data_set)
{
order_first_probes(data_set);
order_then_probes(data_set);
}
gboolean
stage7(pe_working_set_t * data_set)
{
GList *gIter = NULL;
crm_trace("Applying ordering constraints");
/* Don't ask me why, but apparently they need to be processed in
* the order they were created in... go figure
*
* Also g_list_append() has horrendous performance characteristics
* So we need to use g_list_prepend() and then reverse the list here
*/
data_set->ordering_constraints = g_list_reverse(data_set->ordering_constraints);
for (gIter = data_set->ordering_constraints; gIter != NULL; gIter = gIter->next) {
pe__ordering_t *order = gIter->data;
pe_resource_t *rsc = order->lh_rsc;
crm_trace("Applying ordering constraint: %d", order->id);
if (rsc != NULL) {
crm_trace("rsc_action-to-*");
rsc_order_first(rsc, order, data_set);
continue;
}
rsc = order->rh_rsc;
if (rsc != NULL) {
crm_trace("action-to-rsc_action");
rsc_order_then(order->lh_action, rsc, order);
} else {
crm_trace("action-to-action");
order_actions(order->lh_action, order->rh_action, order->type);
}
}
for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
pe_action_t *action = (pe_action_t *) gIter->data;
update_colo_start_chain(action, data_set);
}
crm_trace("Ordering probes");
order_probes(data_set);
crm_trace("Updating %d actions", g_list_length(data_set->actions));
for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
pe_action_t *action = (pe_action_t *) gIter->data;
update_action(action, data_set);
}
// Check for invalid orderings
for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
pe_action_t *action = (pe_action_t *) gIter->data;
pe_action_wrapper_t *input = NULL;
for (GList *input_iter = action->actions_before;
input_iter != NULL; input_iter = input_iter->next) {
input = (pe_action_wrapper_t *) input_iter->data;
if (pcmk__ordering_is_invalid(action, input)) {
input->type = pe_order_none;
}
}
}
LogNodeActions(data_set, FALSE);
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter->data;
LogActions(rsc, data_set, FALSE);
}
return TRUE;
}
static int transition_id = -1;
/*!
* \internal
* \brief Log a message after calculating a transition
*
* \param[in] filename Where transition input is stored
*/
void
pcmk__log_transition_summary(const char *filename)
{
if (was_processing_error) {
crm_err("Calculated transition %d (with errors), saving inputs in %s",
transition_id, filename);
} else if (was_processing_warning) {
crm_warn("Calculated transition %d (with warnings), saving inputs in %s",
transition_id, filename);
} else {
crm_notice("Calculated transition %d, saving inputs in %s",
transition_id, filename);
}
if (pcmk__config_error) {
crm_notice("Configuration errors found during scheduler processing,"
" please run \"crm_verify -L\" to identify issues");
}
}
/*
* Create a dependency graph to send to the transitioner (via the controller)
*/
gboolean
stage8(pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
const char *value = NULL;
transition_id++;
crm_trace("Creating transition graph %d.", transition_id);
data_set->graph = create_xml_node(NULL, XML_TAG_GRAPH);
value = pe_pref(data_set->config_hash, "cluster-delay");
crm_xml_add(data_set->graph, "cluster-delay", value);
value = pe_pref(data_set->config_hash, "stonith-timeout");
crm_xml_add(data_set->graph, "stonith-timeout", value);
crm_xml_add(data_set->graph, "failed-stop-offset", "INFINITY");
if (pcmk_is_set(data_set->flags, pe_flag_start_failure_fatal)) {
crm_xml_add(data_set->graph, "failed-start-offset", "INFINITY");
} else {
crm_xml_add(data_set->graph, "failed-start-offset", "1");
}
value = pe_pref(data_set->config_hash, "batch-limit");
crm_xml_add(data_set->graph, "batch-limit", value);
crm_xml_add_int(data_set->graph, "transition_id", transition_id);
value = pe_pref(data_set->config_hash, "migration-limit");
if (crm_parse_ll(value, NULL) > 0) {
crm_xml_add(data_set->graph, "migration-limit", value);
}
if (data_set->recheck_by > 0) {
char *recheck_epoch = NULL;
recheck_epoch = crm_strdup_printf("%llu",
(long long) data_set->recheck_by);
crm_xml_add(data_set->graph, "recheck-by", recheck_epoch);
free(recheck_epoch);
}
/* errors...
slist_iter(action, pe_action_t, action_list, lpc,
if(action->optional == FALSE && action->runnable == FALSE) {
print_action("Ignoring", action, TRUE);
}
);
*/
/* The following code will de-duplicate action inputs, so nothing past this
* should rely on the action input type flags retaining their original
* values.
*/
gIter = data_set->resources;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter->data;
pe_rsc_trace(rsc, "processing actions for rsc=%s", rsc->id);
rsc->cmds->expand(rsc, data_set);
}
crm_log_xml_trace(data_set->graph, "created resource-driven action list");
/* pseudo action to distribute list of nodes with maintenance state update */
add_maintenance_update(data_set);
/* catch any non-resource specific actions */
crm_trace("processing non-resource actions");
gIter = data_set->actions;
for (; gIter != NULL; gIter = gIter->next) {
pe_action_t *action = (pe_action_t *) gIter->data;
if (action->rsc
&& action->node
&& action->node->details->shutdown
&& !pcmk_is_set(action->rsc->flags, pe_rsc_maintenance)
&& !pcmk_any_flags_set(action->flags,
pe_action_optional|pe_action_runnable)
&& pcmk__str_eq(action->task, RSC_STOP, pcmk__str_none)
) {
/* Eventually we should just ignore the 'fence' case
* But for now it's the best way to detect (in CTS) when
* CIB resource updates are being lost
*/
if (pcmk_is_set(data_set->flags, pe_flag_have_quorum)
|| data_set->no_quorum_policy == no_quorum_ignore) {
crm_crit("Cannot %s node '%s' because of %s:%s%s (%s)",
action->node->details->unclean ? "fence" : "shut down",
action->node->details->uname, action->rsc->id,
pcmk_is_set(action->rsc->flags, pe_rsc_managed)? " blocked" : " unmanaged",
pcmk_is_set(action->rsc->flags, pe_rsc_failed)? " failed" : "",
action->uuid);
}
}
graph_element_from_action(action, data_set);
}
crm_log_xml_trace(data_set->graph, "created generic action list");
crm_trace("Created transition graph %d.", transition_id);
return TRUE;
}
void
LogNodeActions(pe_working_set_t * data_set, gboolean terminal)
{
GListPtr gIter = NULL;
for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
char *node_name = NULL;
char *task = NULL;
pe_action_t *action = (pe_action_t *) gIter->data;
if (action->rsc != NULL) {
continue;
} else if (pcmk_is_set(action->flags, pe_action_optional)) {
continue;
}
if (pe__is_guest_node(action->node)) {
node_name = crm_strdup_printf("%s (resource: %s)", action->node->details->uname, action->node->details->remote_rsc->container->id);
} else if(action->node) {
node_name = crm_strdup_printf("%s", action->node->details->uname);
}
if (pcmk__str_eq(action->task, CRM_OP_SHUTDOWN, pcmk__str_casei)) {
task = strdup("Shutdown");
} else if (pcmk__str_eq(action->task, CRM_OP_FENCE, pcmk__str_casei)) {
const char *op = g_hash_table_lookup(action->meta, "stonith_action");
task = crm_strdup_printf("Fence (%s)", op);
}
if(task == NULL) {
/* Nothing to report */
} else if(terminal && action->reason) {
printf(" * %s %s '%s'\n", task, node_name, action->reason);
} else if(terminal) {
printf(" * %s %s\n", task, node_name);
} else if(action->reason) {
crm_notice(" * %s %s '%s'\n", task, node_name, action->reason);
} else {
crm_notice(" * %s %s\n", task, node_name);
}
free(node_name);
free(task);
}
}
diff --git a/lib/pacemaker/pcmk_sched_bundle.c b/lib/pacemaker/pcmk_sched_bundle.c
index 48ca783b3d..1a361bc089 100644
--- a/lib/pacemaker/pcmk_sched_bundle.c
+++ b/lib/pacemaker/pcmk_sched_bundle.c
@@ -1,1086 +1,1084 @@
/*
* Copyright 2004-2020 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <crm/msg_xml.h>
#include <pacemaker-internal.h>
#define PE__VARIANT_BUNDLE 1
#include <lib/pengine/variant.h>
static bool
is_bundle_node(pe__bundle_variant_data_t *data, pe_node_t *node)
{
for (GList *gIter = data->replicas; gIter != NULL; gIter = gIter->next) {
pe__bundle_replica_t *replica = gIter->data;
if (node->details == replica->node->details) {
return TRUE;
}
}
return FALSE;
}
gint sort_clone_instance(gconstpointer a, gconstpointer b, gpointer data_set);
void distribute_children(pe_resource_t *rsc, GListPtr children, GListPtr nodes,
int max, int per_host_max, pe_working_set_t * data_set);
static GList *
get_container_list(pe_resource_t *rsc)
{
GList *containers = NULL;
if (rsc->variant == pe_container) {
pe__bundle_variant_data_t *data = NULL;
get_bundle_variant_data(data, rsc);
for (GList *gIter = data->replicas; gIter != NULL;
gIter = gIter->next) {
pe__bundle_replica_t *replica = gIter->data;
containers = g_list_append(containers, replica->container);
}
}
return containers;
}
static inline GList *
get_containers_or_children(pe_resource_t *rsc)
{
return (rsc->variant == pe_container)?
get_container_list(rsc) : rsc->children;
}
static bool
migration_threshold_reached(pe_resource_t *rsc, pe_node_t *node,
pe_working_set_t *data_set)
{
int fail_count, countdown;
/* Migration threshold of 0 means never force away */
if (rsc->migration_threshold == 0) {
return FALSE;
}
// If we're ignoring failures, also ignore the migration threshold
if (pcmk_is_set(rsc->flags, pe_rsc_failure_ignored)) {
return FALSE;
}
/* If there are no failures, there's no need to force away */
fail_count = pe_get_failcount(node, rsc, NULL,
pe_fc_effective|pe_fc_fillers, NULL,
data_set);
if (fail_count <= 0) {
return FALSE;
}
/* How many more times recovery will be tried on this node */
countdown = QB_MAX(rsc->migration_threshold - fail_count, 0);
if (countdown == 0) {
crm_warn("Forcing %s away from %s after %d failures (max=%d)",
rsc->id, node->details->uname, fail_count,
rsc->migration_threshold);
return TRUE;
}
crm_info("%s can fail %d more times on %s before being forced off",
rsc->id, countdown, node->details->uname);
return FALSE;
}
pe_node_t *
pcmk__bundle_allocate(pe_resource_t *rsc, pe_node_t *prefer,
pe_working_set_t *data_set)
{
GListPtr containers = NULL;
GListPtr nodes = NULL;
pe__bundle_variant_data_t *bundle_data = NULL;
CRM_CHECK(rsc != NULL, return NULL);
get_bundle_variant_data(bundle_data, rsc);
pe__set_resource_flags(rsc, pe_rsc_allocating);
containers = get_container_list(rsc);
pe__show_node_weights(!show_scores, rsc, __func__, rsc->allowed_nodes);
nodes = g_hash_table_get_values(rsc->allowed_nodes);
nodes = sort_nodes_by_weight(nodes, NULL, data_set);
containers = g_list_sort_with_data(containers, sort_clone_instance, data_set);
distribute_children(rsc, containers, nodes, bundle_data->nreplicas,
bundle_data->nreplicas_per_host, data_set);
g_list_free(nodes);
g_list_free(containers);
for (GList *gIter = bundle_data->replicas; gIter != NULL;
gIter = gIter->next) {
pe__bundle_replica_t *replica = gIter->data;
pe_node_t *container_host = NULL;
CRM_ASSERT(replica);
if (replica->ip) {
pe_rsc_trace(rsc, "Allocating bundle %s IP %s",
rsc->id, replica->ip->id);
replica->ip->cmds->allocate(replica->ip, prefer, data_set);
}
container_host = replica->container->allocated_to;
if (replica->remote && pe__is_guest_or_remote_node(container_host)) {
/* We need 'nested' connection resources to be on the same
* host because pacemaker-remoted only supports a single
* active connection
*/
rsc_colocation_new("child-remote-with-docker-remote", NULL,
INFINITY, replica->remote,
container_host->details->remote_rsc, NULL, NULL,
data_set);
}
if (replica->remote) {
pe_rsc_trace(rsc, "Allocating bundle %s connection %s",
rsc->id, replica->remote->id);
replica->remote->cmds->allocate(replica->remote, prefer,
data_set);
}
// Explicitly allocate replicas' children before bundle child
if (replica->child) {
pe_node_t *node = NULL;
GHashTableIter iter;
g_hash_table_iter_init(&iter, replica->child->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & node)) {
if (node->details != replica->node->details) {
node->weight = -INFINITY;
} else if (!migration_threshold_reached(replica->child, node,
data_set)) {
node->weight = INFINITY;
}
}
pe__set_resource_flags(replica->child->parent, pe_rsc_allocating);
pe_rsc_trace(rsc, "Allocating bundle %s replica child %s",
rsc->id, replica->child->id);
replica->child->cmds->allocate(replica->child, replica->node,
data_set);
pe__clear_resource_flags(replica->child->parent,
pe_rsc_allocating);
}
}
if (bundle_data->child) {
pe_node_t *node = NULL;
GHashTableIter iter;
g_hash_table_iter_init(&iter, bundle_data->child->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & node)) {
if (is_bundle_node(bundle_data, node)) {
node->weight = 0;
} else {
node->weight = -INFINITY;
}
}
pe_rsc_trace(rsc, "Allocating bundle %s child %s",
rsc->id, bundle_data->child->id);
bundle_data->child->cmds->allocate(bundle_data->child, prefer, data_set);
}
pe__clear_resource_flags(rsc, pe_rsc_allocating|pe_rsc_provisional);
return NULL;
}
void
pcmk__bundle_create_actions(pe_resource_t *rsc, pe_working_set_t *data_set)
{
pe_action_t *action = NULL;
GListPtr containers = NULL;
pe__bundle_variant_data_t *bundle_data = NULL;
CRM_CHECK(rsc != NULL, return);
containers = get_container_list(rsc);
get_bundle_variant_data(bundle_data, rsc);
for (GList *gIter = bundle_data->replicas; gIter != NULL;
gIter = gIter->next) {
pe__bundle_replica_t *replica = gIter->data;
CRM_ASSERT(replica);
if (replica->ip) {
replica->ip->cmds->create_actions(replica->ip, data_set);
}
if (replica->container) {
replica->container->cmds->create_actions(replica->container,
data_set);
}
if (replica->remote) {
replica->remote->cmds->create_actions(replica->remote, data_set);
}
}
clone_create_pseudo_actions(rsc, containers, NULL, NULL, data_set);
if (bundle_data->child) {
bundle_data->child->cmds->create_actions(bundle_data->child, data_set);
if (pcmk_is_set(bundle_data->child->flags, pe_rsc_promotable)) {
/* promote */
action = create_pseudo_resource_op(rsc, RSC_PROMOTE, TRUE, TRUE, data_set);
action = create_pseudo_resource_op(rsc, RSC_PROMOTED, TRUE, TRUE, data_set);
action->priority = INFINITY;
/* demote */
action = create_pseudo_resource_op(rsc, RSC_DEMOTE, TRUE, TRUE, data_set);
action = create_pseudo_resource_op(rsc, RSC_DEMOTED, TRUE, TRUE, data_set);
action->priority = INFINITY;
}
}
g_list_free(containers);
}
void
pcmk__bundle_internal_constraints(pe_resource_t *rsc,
pe_working_set_t *data_set)
{
pe__bundle_variant_data_t *bundle_data = NULL;
CRM_CHECK(rsc != NULL, return);
get_bundle_variant_data(bundle_data, rsc);
if (bundle_data->child) {
new_rsc_order(rsc, RSC_START, bundle_data->child, RSC_START,
pe_order_implies_first_printed, data_set);
new_rsc_order(rsc, RSC_STOP, bundle_data->child, RSC_STOP,
pe_order_implies_first_printed, data_set);
if (bundle_data->child->children) {
new_rsc_order(bundle_data->child, RSC_STARTED, rsc, RSC_STARTED,
pe_order_implies_then_printed, data_set);
new_rsc_order(bundle_data->child, RSC_STOPPED, rsc, RSC_STOPPED,
pe_order_implies_then_printed, data_set);
} else {
new_rsc_order(bundle_data->child, RSC_START, rsc, RSC_STARTED,
pe_order_implies_then_printed, data_set);
new_rsc_order(bundle_data->child, RSC_STOP, rsc, RSC_STOPPED,
pe_order_implies_then_printed, data_set);
}
}
for (GList *gIter = bundle_data->replicas; gIter != NULL;
gIter = gIter->next) {
pe__bundle_replica_t *replica = gIter->data;
CRM_ASSERT(replica);
CRM_ASSERT(replica->container);
replica->container->cmds->internal_constraints(replica->container,
data_set);
order_start_start(rsc, replica->container,
pe_order_runnable_left|pe_order_implies_first_printed);
if (replica->child) {
order_stop_stop(rsc, replica->child,
pe_order_implies_first_printed);
}
order_stop_stop(rsc, replica->container,
pe_order_implies_first_printed);
new_rsc_order(replica->container, RSC_START, rsc, RSC_STARTED,
pe_order_implies_then_printed, data_set);
new_rsc_order(replica->container, RSC_STOP, rsc, RSC_STOPPED,
pe_order_implies_then_printed, data_set);
if (replica->ip) {
replica->ip->cmds->internal_constraints(replica->ip, data_set);
// Start ip then container
new_rsc_order(replica->ip, RSC_START, replica->container, RSC_START,
pe_order_runnable_left|pe_order_preserve, data_set);
new_rsc_order(replica->container, RSC_STOP, replica->ip, RSC_STOP,
pe_order_implies_first|pe_order_preserve, data_set);
rsc_colocation_new("ip-with-docker", NULL, INFINITY, replica->ip,
replica->container, NULL, NULL, data_set);
}
if (replica->remote) {
/* This handles ordering and colocating remote relative to container
* (via "resource-with-container"). Since IP is also ordered and
* colocated relative to the container, we don't need to do anything
* explicit here with IP.
*/
replica->remote->cmds->internal_constraints(replica->remote,
data_set);
}
if (replica->child) {
CRM_ASSERT(replica->remote);
// "Start remote then child" is implicit in scheduler's remote logic
}
}
if (bundle_data->child) {
bundle_data->child->cmds->internal_constraints(bundle_data->child, data_set);
if (pcmk_is_set(bundle_data->child->flags, pe_rsc_promotable)) {
promote_demote_constraints(rsc, data_set);
/* child demoted before global demoted */
new_rsc_order(bundle_data->child, RSC_DEMOTED, rsc, RSC_DEMOTED,
pe_order_implies_then_printed, data_set);
/* global demote before child demote */
new_rsc_order(rsc, RSC_DEMOTE, bundle_data->child, RSC_DEMOTE,
pe_order_implies_first_printed, data_set);
/* child promoted before global promoted */
new_rsc_order(bundle_data->child, RSC_PROMOTED, rsc, RSC_PROMOTED,
pe_order_implies_then_printed, data_set);
/* global promote before child promote */
new_rsc_order(rsc, RSC_PROMOTE, bundle_data->child, RSC_PROMOTE,
pe_order_implies_first_printed, data_set);
}
} else {
// int type = pe_order_optional | pe_order_implies_then | pe_order_restart;
// custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
// rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL, pe_order_optional, data_set);
}
}
static pe_resource_t *
compatible_replica_for_node(pe_resource_t *rsc_lh, pe_node_t *candidate,
pe_resource_t *rsc, enum rsc_role_e filter,
gboolean current)
{
pe__bundle_variant_data_t *bundle_data = NULL;
CRM_CHECK(candidate != NULL, return NULL);
get_bundle_variant_data(bundle_data, rsc);
crm_trace("Looking for compatible child from %s for %s on %s",
rsc_lh->id, rsc->id, candidate->details->uname);
for (GList *gIter = bundle_data->replicas; gIter != NULL;
gIter = gIter->next) {
pe__bundle_replica_t *replica = gIter->data;
if (is_child_compatible(replica->container, candidate, filter, current)) {
crm_trace("Pairing %s with %s on %s",
rsc_lh->id, replica->container->id,
candidate->details->uname);
return replica->container;
}
}
crm_trace("Can't pair %s with %s", rsc_lh->id, rsc->id);
return NULL;
}
static pe_resource_t *
compatible_replica(pe_resource_t *rsc_lh, pe_resource_t *rsc,
enum rsc_role_e filter, gboolean current,
pe_working_set_t *data_set)
{
GListPtr scratch = NULL;
pe_resource_t *pair = NULL;
pe_node_t *active_node_lh = NULL;
active_node_lh = rsc_lh->fns->location(rsc_lh, NULL, current);
if (active_node_lh) {
return compatible_replica_for_node(rsc_lh, active_node_lh, rsc, filter,
current);
}
scratch = g_hash_table_get_values(rsc_lh->allowed_nodes);
scratch = sort_nodes_by_weight(scratch, NULL, data_set);
for (GListPtr gIter = scratch; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
pair = compatible_replica_for_node(rsc_lh, node, rsc, filter, current);
if (pair) {
goto done;
}
}
pe_rsc_debug(rsc, "Can't pair %s with %s", rsc_lh->id, (rsc? rsc->id : "none"));
done:
g_list_free(scratch);
return pair;
}
void
pcmk__bundle_rsc_colocation_lh(pe_resource_t *rsc, pe_resource_t *rsc_rh,
rsc_colocation_t *constraint,
pe_working_set_t *data_set)
{
/* -- Never called --
*
* Instead we add the colocation constraints to the child and call from there
*/
CRM_ASSERT(FALSE);
}
int copies_per_node(pe_resource_t * rsc)
{
/* Strictly speaking, there should be a 'copies_per_node' addition
* to the resource function table and each case would be a
* function. However that would be serious overkill to return an
* int. In fact, it seems to me that both function tables
* could/should be replaced by resources.{c,h} full of
* rsc_{some_operation} functions containing a switch as below
* which calls out to functions named {variant}_{some_operation}
* as needed.
*/
switch(rsc->variant) {
case pe_unknown:
return 0;
case pe_native:
case pe_group:
return 1;
case pe_clone:
{
const char *max_clones_node = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION_NODEMAX);
return crm_parse_int(max_clones_node, "1");
}
case pe_container:
{
pe__bundle_variant_data_t *data = NULL;
get_bundle_variant_data(data, rsc);
return data->nreplicas_per_host;
}
}
return 0;
}
void
pcmk__bundle_rsc_colocation_rh(pe_resource_t *rsc_lh, pe_resource_t *rsc,
rsc_colocation_t *constraint,
pe_working_set_t *data_set)
{
GListPtr allocated_rhs = NULL;
pe__bundle_variant_data_t *bundle_data = NULL;
CRM_CHECK(constraint != NULL, return);
CRM_CHECK(rsc_lh != NULL, pe_err("rsc_lh was NULL for %s", constraint->id); return);
CRM_CHECK(rsc != NULL, pe_err("rsc was NULL for %s", constraint->id); return);
CRM_ASSERT(rsc_lh->variant == pe_native);
if (constraint->score == 0) {
return;
}
if (pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
pe_rsc_trace(rsc, "%s is still provisional", rsc->id);
return;
} else if(constraint->rsc_lh->variant > pe_group) {
pe_resource_t *rh_child = compatible_replica(rsc_lh, rsc,
RSC_ROLE_UNKNOWN, FALSE,
data_set);
if (rh_child) {
pe_rsc_debug(rsc, "Pairing %s with %s", rsc_lh->id, rh_child->id);
rsc_lh->cmds->rsc_colocation_lh(rsc_lh, rh_child, constraint,
data_set);
} else if (constraint->score >= INFINITY) {
crm_notice("Cannot pair %s with instance of %s", rsc_lh->id, rsc->id);
assign_node(rsc_lh, NULL, TRUE);
} else {
pe_rsc_debug(rsc, "Cannot pair %s with instance of %s", rsc_lh->id, rsc->id);
}
return;
}
get_bundle_variant_data(bundle_data, rsc);
pe_rsc_trace(rsc, "Processing constraint %s: %s -> %s %d",
constraint->id, rsc_lh->id, rsc->id, constraint->score);
for (GList *gIter = bundle_data->replicas; gIter != NULL;
gIter = gIter->next) {
pe__bundle_replica_t *replica = gIter->data;
if (constraint->score < INFINITY) {
replica->container->cmds->rsc_colocation_rh(rsc_lh,
replica->container,
constraint, data_set);
} else {
pe_node_t *chosen = replica->container->fns->location(replica->container,
NULL, FALSE);
if ((chosen == NULL)
|| is_set_recursive(replica->container, pe_rsc_block, TRUE)) {
continue;
}
if ((constraint->role_rh >= RSC_ROLE_MASTER)
&& (replica->child == NULL)) {
continue;
}
if ((constraint->role_rh >= RSC_ROLE_MASTER)
&& (replica->child->next_role < RSC_ROLE_MASTER)) {
continue;
}
pe_rsc_trace(rsc, "Allowing %s: %s %d", constraint->id, chosen->details->uname, chosen->weight);
allocated_rhs = g_list_prepend(allocated_rhs, chosen);
}
}
if (constraint->score >= INFINITY) {
node_list_exclude(rsc_lh->allowed_nodes, allocated_rhs, FALSE);
}
g_list_free(allocated_rhs);
}
enum pe_action_flags
pcmk__bundle_action_flags(pe_action_t *action, pe_node_t *node)
{
GListPtr containers = NULL;
enum pe_action_flags flags = 0;
pe__bundle_variant_data_t *data = NULL;
get_bundle_variant_data(data, action->rsc);
if(data->child) {
enum action_tasks task = get_complex_task(data->child, action->task, TRUE);
switch(task) {
case no_action:
case action_notify:
case action_notified:
case action_promote:
case action_promoted:
case action_demote:
case action_demoted:
return summary_action_flags(action, data->child->children, node);
default:
break;
}
}
containers = get_container_list(action->rsc);
flags = summary_action_flags(action, containers, node);
g_list_free(containers);
return flags;
}
pe_resource_t *
find_compatible_child_by_node(pe_resource_t * local_child, pe_node_t * local_node, pe_resource_t * rsc,
enum rsc_role_e filter, gboolean current)
{
GListPtr gIter = NULL;
GListPtr children = NULL;
if (local_node == NULL) {
crm_err("Can't colocate unrunnable child %s with %s", local_child->id, rsc->id);
return NULL;
}
crm_trace("Looking for compatible child from %s for %s on %s",
local_child->id, rsc->id, local_node->details->uname);
children = get_containers_or_children(rsc);
for (gIter = children; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
if(is_child_compatible(child_rsc, local_node, filter, current)) {
crm_trace("Pairing %s with %s on %s",
local_child->id, child_rsc->id, local_node->details->uname);
return child_rsc;
}
}
crm_trace("Can't pair %s with %s", local_child->id, rsc->id);
if(children != rsc->children) {
g_list_free(children);
}
return NULL;
}
static pe__bundle_replica_t *
replica_for_container(pe_resource_t *rsc, pe_resource_t *container,
pe_node_t *node)
{
if (rsc->variant == pe_container) {
pe__bundle_variant_data_t *data = NULL;
get_bundle_variant_data(data, rsc);
for (GList *gIter = data->replicas; gIter != NULL;
gIter = gIter->next) {
pe__bundle_replica_t *replica = gIter->data;
if (replica->child
&& (container == replica->container)
&& (node->details == replica->node->details)) {
return replica;
}
}
}
return NULL;
}
static enum pe_graph_flags
multi_update_interleave_actions(pe_action_t *first, pe_action_t *then,
pe_node_t *node, enum pe_action_flags flags,
enum pe_action_flags filter,
enum pe_ordering type,
pe_working_set_t *data_set)
{
GListPtr gIter = NULL;
GListPtr children = NULL;
gboolean current = FALSE;
enum pe_graph_flags changed = pe_graph_none;
/* Fix this - lazy */
if (pcmk__ends_with(first->uuid, "_stopped_0")
|| pcmk__ends_with(first->uuid, "_demoted_0")) {
current = TRUE;
}
children = get_containers_or_children(then->rsc);
for (gIter = children; gIter != NULL; gIter = gIter->next) {
pe_resource_t *then_child = gIter->data;
pe_resource_t *first_child = find_compatible_child(then_child,
first->rsc,
RSC_ROLE_UNKNOWN,
current, data_set);
if (first_child == NULL && current) {
crm_trace("Ignore");
} else if (first_child == NULL) {
crm_debug("No match found for %s (%d / %s / %s)", then_child->id, current, first->uuid, then->uuid);
/* Me no like this hack - but what else can we do?
*
* If there is no-one active or about to be active
* on the same node as then_child, then they must
* not be allowed to start
*/
if (type & (pe_order_runnable_left | pe_order_implies_then) /* Mandatory */ ) {
pe_rsc_info(then->rsc, "Inhibiting %s from being active", then_child->id);
if(assign_node(then_child, NULL, TRUE)) {
pe__set_graph_flags(changed, first, pe_graph_updated_then);
}
}
} else {
pe_action_t *first_action = NULL;
pe_action_t *then_action = NULL;
enum action_tasks task = clone_child_action(first);
const char *first_task = task2text(task);
pe__bundle_replica_t *first_replica = NULL;
pe__bundle_replica_t *then_replica = NULL;
first_replica = replica_for_container(first->rsc, first_child,
node);
if (strstr(first->task, "stop") && first_replica && first_replica->child) {
/* Except for 'stopped' we should be looking at the
* in-container resource, actions for the child will
* happen later and are therefor more likely to align
* with the user's intent.
*/
first_action = find_first_action(first_replica->child->actions,
NULL, task2text(task), node);
} else {
first_action = find_first_action(first_child->actions, NULL, task2text(task), node);
}
then_replica = replica_for_container(then->rsc, then_child, node);
if (strstr(then->task, "mote")
&& then_replica && then_replica->child) {
/* Promote/demote actions will never be found for the
* container resource, look in the child instead
*
* Alternatively treat:
* 'XXXX then promote YYYY' as 'XXXX then start container for YYYY', and
* 'demote XXXX then stop YYYY' as 'stop container for XXXX then stop YYYY'
*/
then_action = find_first_action(then_replica->child->actions,
NULL, then->task, node);
} else {
then_action = find_first_action(then_child->actions, NULL, then->task, node);
}
if (first_action == NULL) {
if (!pcmk_is_set(first_child->flags, pe_rsc_orphan)
&& !pcmk__str_any_of(first_task, RSC_STOP, RSC_DEMOTE, NULL)) {
crm_err("Internal error: No action found for %s in %s (first)",
first_task, first_child->id);
} else {
crm_trace("No action found for %s in %s%s (first)",
first_task, first_child->id,
pcmk_is_set(first_child->flags, pe_rsc_orphan)? " (ORPHAN)" : "");
}
continue;
}
/* We're only interested if 'then' is neither stopping nor being demoted */
if (then_action == NULL) {
if (!pcmk_is_set(then_child->flags, pe_rsc_orphan)
&& !pcmk__str_any_of(then->task, RSC_STOP, RSC_DEMOTE, NULL)) {
crm_err("Internal error: No action found for %s in %s (then)",
then->task, then_child->id);
} else {
crm_trace("No action found for %s in %s%s (then)",
then->task, then_child->id,
pcmk_is_set(then_child->flags, pe_rsc_orphan)? " (ORPHAN)" : "");
}
continue;
}
if (order_actions(first_action, then_action, type)) {
crm_debug("Created constraint for %s (%d) -> %s (%d) %.6x",
first_action->uuid,
pcmk_is_set(first_action->flags, pe_action_optional),
then_action->uuid,
pcmk_is_set(then_action->flags, pe_action_optional),
type);
pe__set_graph_flags(changed, first,
pe_graph_updated_first|pe_graph_updated_then);
}
if(first_action && then_action) {
changed |= then_child->cmds->update_actions(first_action,
then_action, node,
first_child->cmds->action_flags(first_action, node),
filter, type, data_set);
} else {
crm_err("Nothing found either for %s (%p) or %s (%p) %s",
first_child->id, first_action,
then_child->id, then_action, task2text(task));
}
}
}
if(children != then->rsc->children) {
g_list_free(children);
}
return changed;
}
static bool
can_interleave_actions(pe_action_t *first, pe_action_t *then)
{
bool interleave = FALSE;
pe_resource_t *rsc = NULL;
const char *interleave_s = NULL;
if(first->rsc == NULL || then->rsc == NULL) {
crm_trace("Not interleaving %s with %s (both must be resources)", first->uuid, then->uuid);
return FALSE;
} else if(first->rsc == then->rsc) {
crm_trace("Not interleaving %s with %s (must belong to different resources)", first->uuid, then->uuid);
return FALSE;
} else if(first->rsc->variant < pe_clone || then->rsc->variant < pe_clone) {
crm_trace("Not interleaving %s with %s (both sides must be clones or bundles)", first->uuid, then->uuid);
return FALSE;
}
if (pcmk__ends_with(then->uuid, "_stop_0")
|| pcmk__ends_with(then->uuid, "_demote_0")) {
rsc = first->rsc;
} else {
rsc = then->rsc;
}
interleave_s = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INTERLEAVE);
interleave = crm_is_true(interleave_s);
crm_trace("Interleave %s -> %s: %s (based on %s)",
first->uuid, then->uuid, interleave ? "yes" : "no", rsc->id);
return interleave;
}
enum pe_graph_flags
pcmk__multi_update_actions(pe_action_t *first, pe_action_t *then,
pe_node_t *node, enum pe_action_flags flags,
enum pe_action_flags filter, enum pe_ordering type,
pe_working_set_t *data_set)
{
enum pe_graph_flags changed = pe_graph_none;
crm_trace("%s -> %s", first->uuid, then->uuid);
if(can_interleave_actions(first, then)) {
changed = multi_update_interleave_actions(first, then, node, flags,
filter, type, data_set);
} else if(then->rsc) {
GListPtr gIter = NULL;
GListPtr children = NULL;
// Handle the 'primitive' ordering case
changed |= native_update_actions(first, then, node, flags, filter,
type, data_set);
// Now any children (or containers in the case of a bundle)
children = get_containers_or_children(then->rsc);
for (gIter = children; gIter != NULL; gIter = gIter->next) {
pe_resource_t *then_child = (pe_resource_t *) gIter->data;
enum pe_graph_flags then_child_changed = pe_graph_none;
pe_action_t *then_child_action = find_first_action(then_child->actions, NULL, then->task, node);
if (then_child_action) {
enum pe_action_flags then_child_flags = then_child->cmds->action_flags(then_child_action, node);
if (pcmk_is_set(then_child_flags, pe_action_runnable)) {
then_child_changed |= then_child->cmds->update_actions(first,
then_child_action, node, flags, filter, type, data_set);
}
changed |= then_child_changed;
if (then_child_changed & pe_graph_updated_then) {
for (GListPtr lpc = then_child_action->actions_after; lpc != NULL; lpc = lpc->next) {
pe_action_wrapper_t *next = (pe_action_wrapper_t *) lpc->data;
update_action(next->action, data_set);
}
}
}
}
if(children != then->rsc->children) {
g_list_free(children);
}
}
return changed;
}
void
pcmk__bundle_rsc_location(pe_resource_t *rsc, pe__location_t *constraint)
{
pe__bundle_variant_data_t *bundle_data = NULL;
get_bundle_variant_data(bundle_data, rsc);
- pe_rsc_trace(rsc, "Processing location constraint %s for %s", constraint->id, rsc->id);
-
native_rsc_location(rsc, constraint);
for (GList *gIter = bundle_data->replicas; gIter != NULL;
gIter = gIter->next) {
pe__bundle_replica_t *replica = gIter->data;
if (replica->container) {
replica->container->cmds->rsc_location(replica->container,
constraint);
}
if (replica->ip) {
replica->ip->cmds->rsc_location(replica->ip, constraint);
}
}
if (bundle_data->child
&& ((constraint->role_filter == RSC_ROLE_SLAVE)
|| (constraint->role_filter == RSC_ROLE_MASTER))) {
bundle_data->child->cmds->rsc_location(bundle_data->child, constraint);
bundle_data->child->rsc_location = g_list_prepend(bundle_data->child->rsc_location,
constraint);
}
}
void
pcmk__bundle_expand(pe_resource_t *rsc, pe_working_set_t * data_set)
{
pe__bundle_variant_data_t *bundle_data = NULL;
CRM_CHECK(rsc != NULL, return);
get_bundle_variant_data(bundle_data, rsc);
if (bundle_data->child) {
bundle_data->child->cmds->expand(bundle_data->child, data_set);
}
for (GList *gIter = bundle_data->replicas; gIter != NULL;
gIter = gIter->next) {
pe__bundle_replica_t *replica = gIter->data;
CRM_ASSERT(replica);
if (replica->remote && replica->container
&& pe__bundle_needs_remote_name(replica->remote)) {
/* REMOTE_CONTAINER_HACK: Allow remote nodes to run containers that
* run pacemaker-remoted inside, without needing a separate IP for
* the container. This is done by configuring the inner remote's
* connection host as the magic string "#uname", then
* replacing it with the underlying host when needed.
*/
xmlNode *nvpair = get_xpath_object("//nvpair[@name='" XML_RSC_ATTR_REMOTE_RA_ADDR "']",
replica->remote->xml, LOG_ERR);
const char *calculated_addr = NULL;
calculated_addr = pe__add_bundle_remote_name(replica->remote,
nvpair, "value");
if (calculated_addr) {
crm_trace("Set address for bundle connection %s to bundle host %s",
replica->remote->id, calculated_addr);
g_hash_table_replace(replica->remote->parameters,
strdup(XML_RSC_ATTR_REMOTE_RA_ADDR),
strdup(calculated_addr));
} else {
/* The only way to get here is if the remote connection is
* neither currently running nor scheduled to run. That means we
* won't be doing any operations that require addr (only start
* requires it; we additionally use it to compare digests when
* unpacking status, promote, and migrate_from history, but
* that's already happened by this point).
*/
crm_info("Unable to determine address for bundle %s remote connection",
rsc->id);
}
}
if (replica->ip) {
replica->ip->cmds->expand(replica->ip, data_set);
}
if (replica->container) {
replica->container->cmds->expand(replica->container, data_set);
}
if (replica->remote) {
replica->remote->cmds->expand(replica->remote, data_set);
}
}
}
gboolean
pcmk__bundle_create_probe(pe_resource_t *rsc, pe_node_t *node,
pe_action_t *complete, gboolean force,
pe_working_set_t * data_set)
{
bool any_created = FALSE;
pe__bundle_variant_data_t *bundle_data = NULL;
CRM_CHECK(rsc != NULL, return FALSE);
get_bundle_variant_data(bundle_data, rsc);
for (GList *gIter = bundle_data->replicas; gIter != NULL;
gIter = gIter->next) {
pe__bundle_replica_t *replica = gIter->data;
CRM_ASSERT(replica);
if (replica->ip) {
any_created |= replica->ip->cmds->create_probe(replica->ip, node,
complete, force,
data_set);
}
if (replica->child && (node->details == replica->node->details)) {
any_created |= replica->child->cmds->create_probe(replica->child,
node, complete,
force, data_set);
}
if (replica->container) {
bool created = replica->container->cmds->create_probe(replica->container,
node, complete,
force, data_set);
if(created) {
any_created = TRUE;
/* If we're limited to one replica per host (due to
* the lack of an IP range probably), then we don't
* want any of our peer containers starting until
* we've established that no other copies are already
* running.
*
* Partly this is to ensure that nreplicas_per_host is
* observed, but also to ensure that the containers
* don't fail to start because the necessary port
* mappings (which won't include an IP for uniqueness)
* are already taken
*/
for (GList *tIter = bundle_data->replicas;
tIter && (bundle_data->nreplicas_per_host == 1);
tIter = tIter->next) {
pe__bundle_replica_t *other = tIter->data;
if ((other != replica) && (other != NULL)
&& (other->container != NULL)) {
custom_action_order(replica->container,
pcmk__op_key(replica->container->id, RSC_STATUS, 0),
NULL, other->container,
pcmk__op_key(other->container->id, RSC_START, 0),
NULL,
pe_order_optional|pe_order_same_node,
data_set);
}
}
}
}
if (replica->container && replica->remote
&& replica->remote->cmds->create_probe(replica->remote, node,
complete, force,
data_set)) {
/* Do not probe the remote resource until we know where the
* container is running. This is required for REMOTE_CONTAINER_HACK
* to correctly probe remote resources.
*/
char *probe_uuid = pcmk__op_key(replica->remote->id, RSC_STATUS,
0);
pe_action_t *probe = find_first_action(replica->remote->actions,
probe_uuid, NULL, node);
free(probe_uuid);
if (probe) {
any_created = TRUE;
crm_trace("Ordering %s probe on %s",
replica->remote->id, node->details->uname);
custom_action_order(replica->container,
pcmk__op_key(replica->container->id, RSC_START, 0),
NULL, replica->remote, NULL, probe,
pe_order_probe, data_set);
}
}
}
return any_created;
}
void
pcmk__bundle_append_meta(pe_resource_t *rsc, xmlNode *xml)
{
}
void
pcmk__bundle_log_actions(pe_resource_t *rsc, pe_working_set_t *data_set,
gboolean terminal)
{
pe__bundle_variant_data_t *bundle_data = NULL;
CRM_CHECK(rsc != NULL, return);
get_bundle_variant_data(bundle_data, rsc);
for (GList *gIter = bundle_data->replicas; gIter != NULL;
gIter = gIter->next) {
pe__bundle_replica_t *replica = gIter->data;
CRM_ASSERT(replica);
if (replica->ip) {
LogActions(replica->ip, data_set, terminal);
}
if (replica->container) {
LogActions(replica->container, data_set, terminal);
}
if (replica->remote) {
LogActions(replica->remote, data_set, terminal);
}
if (replica->child) {
LogActions(replica->child, data_set, terminal);
}
}
}
diff --git a/lib/pacemaker/pcmk_sched_messages.c b/lib/pacemaker/pcmk_sched_messages.c
index 32d4aff78a..63c1dc4276 100644
--- a/lib/pacemaker/pcmk_sched_messages.c
+++ b/lib/pacemaker/pcmk_sched_messages.c
@@ -1,149 +1,149 @@
/*
* Copyright 2004-2020 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <sys/param.h>
#include <crm/crm.h>
#include <crm/cib.h>
#include <crm/msg_xml.h>
#include <crm/common/xml.h>
#include <glib.h>
#include <crm/pengine/status.h>
#include <pacemaker-internal.h>
#include <crm/common/ipc_internal.h>
gboolean show_scores = FALSE;
gboolean show_utilization = FALSE;
static void
log_resource_details(pe_working_set_t *data_set)
{
int rc = pcmk_rc_ok;
pcmk__output_t *out = NULL;
const char* argv[] = { "", NULL };
GListPtr all = NULL;
pcmk__supported_format_t formats[] = {
PCMK__SUPPORTED_FORMAT_LOG,
{ NULL, NULL, NULL }
};
/* We need a list of nodes that we are allowed to output information for.
* This is necessary because out->message for all the resource-related
* messages expects such a list, due to the `crm_mon --node=` feature. Here,
* we just make it a list of all the nodes.
*/
all = g_list_prepend(all, strdup("*"));
pcmk__register_formats(NULL, formats);
rc = pcmk__output_new(&out, "log", NULL, (char**)argv);
if ((rc != pcmk_rc_ok) || (out == NULL)) {
crm_err("Can't log resource details due to internal error: %s\n",
pcmk_rc_str(rc));
return;
}
pe__register_messages(out);
for (GList *item = data_set->resources; item != NULL; item = item->next) {
pe_resource_t *rsc = (pe_resource_t *) item->data;
// Log all resources except inactive orphans
if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)
|| (rsc->role != RSC_ROLE_STOPPED)) {
out->message(out, crm_map_element_name(rsc->xml), 0, rsc, all, all);
}
}
pcmk__output_free(out);
g_list_free_full(all, free);
}
/*!
* \internal
* \brief Run the scheduler for a given CIB
*
* \param[in,out] data_set Cluster working set
* \param[in] xml_input CIB XML to use as scheduler input
* \param[in] now Time to use for rule evaluation (or NULL for now)
*/
xmlNode *
pcmk__schedule_actions(pe_working_set_t *data_set, xmlNode *xml_input,
crm_time_t *now)
{
GListPtr gIter = NULL;
/* pe_debug_on(); */
CRM_ASSERT(xml_input || pcmk_is_set(data_set->flags, pe_flag_have_status));
if (!pcmk_is_set(data_set->flags, pe_flag_have_status)) {
set_working_set_defaults(data_set);
data_set->input = xml_input;
data_set->now = now;
} else {
crm_trace("Already have status - reusing");
}
if (data_set->now == NULL) {
data_set->now = crm_time_new(NULL);
}
crm_trace("Calculate cluster status");
stage0(data_set);
if (!pcmk_is_set(data_set->flags, pe_flag_quick_location)) {
log_resource_details(data_set);
}
- crm_trace("Applying placement constraints");
+ crm_trace("Applying location constraints");
stage2(data_set);
if (pcmk_is_set(data_set->flags, pe_flag_quick_location)) {
return NULL;
}
crm_trace("Create internal constraints");
stage3(data_set);
crm_trace("Check actions");
stage4(data_set);
crm_trace("Allocate resources");
stage5(data_set);
crm_trace("Processing fencing and shutdown cases");
stage6(data_set);
crm_trace("Applying ordering constraints");
stage7(data_set);
crm_trace("Create transition graph");
stage8(data_set);
crm_trace("=#=#=#=#= Summary =#=#=#=#=");
crm_trace("\t========= Set %d (Un-runnable) =========", -1);
if (get_crm_log_level() == LOG_TRACE) {
gIter = data_set->actions;
for (; gIter != NULL; gIter = gIter->next) {
pe_action_t *action = (pe_action_t *) gIter->data;
if (!pcmk_any_flags_set(action->flags,
pe_action_optional
|pe_action_runnable
|pe_action_pseudo)) {
log_action(LOG_TRACE, "\t", action, TRUE);
}
}
}
return data_set->graph;
}
diff --git a/lib/pacemaker/pcmk_sched_native.c b/lib/pacemaker/pcmk_sched_native.c
index 57e22ef8e9..833cc66ea3 100644
--- a/lib/pacemaker/pcmk_sched_native.c
+++ b/lib/pacemaker/pcmk_sched_native.c
@@ -1,3491 +1,3494 @@
/*
* Copyright 2004-2020 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <crm/pengine/rules.h>
#include <crm/msg_xml.h>
#include <pacemaker-internal.h>
#include <crm/services.h>
// The controller removes the resource from the CIB, making this redundant
// #define DELETE_THEN_REFRESH 1
#define INFINITY_HACK (INFINITY * -100)
#define VARIANT_NATIVE 1
#include <lib/pengine/variant.h>
static void Recurring(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node,
pe_working_set_t *data_set);
static void RecurringOp(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node,
xmlNode *operation, pe_working_set_t *data_set);
static void Recurring_Stopped(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node,
pe_working_set_t *data_set);
static void RecurringOp_Stopped(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node,
xmlNode *operation, pe_working_set_t *data_set);
void ReloadRsc(pe_resource_t * rsc, pe_node_t *node, pe_working_set_t * data_set);
gboolean DeleteRsc(pe_resource_t * rsc, pe_node_t * node, gboolean optional, pe_working_set_t * data_set);
gboolean StopRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
gboolean StartRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
gboolean DemoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
gboolean PromoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional,
pe_working_set_t * data_set);
gboolean RoleError(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
gboolean NullOp(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
/* This array says what the *next* role should be when transitioning from one
* role to another. For example going from Stopped to Master, the next role is
* RSC_ROLE_SLAVE, because the resource must be started before being promoted.
* The current state then becomes Started, which is fed into this array again,
* giving a next role of RSC_ROLE_MASTER.
*/
static enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = {
/* Current state Next state*/
/* Unknown Stopped Started Slave Master */
/* Unknown */ { RSC_ROLE_UNKNOWN, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, },
/* Stopped */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STARTED, RSC_ROLE_SLAVE, RSC_ROLE_SLAVE, },
/* Started */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STARTED, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, },
/* Slave */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, },
/* Master */ { RSC_ROLE_STOPPED, RSC_ROLE_SLAVE, RSC_ROLE_SLAVE, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, },
};
typedef gboolean (*rsc_transition_fn)(pe_resource_t *rsc, pe_node_t *next,
gboolean optional,
pe_working_set_t *data_set);
// This array picks the function needed to transition from one role to another
static rsc_transition_fn rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = {
/* Current state Next state */
/* Unknown Stopped Started Slave Master */
/* Unknown */ { RoleError, StopRsc, RoleError, RoleError, RoleError, },
/* Stopped */ { RoleError, NullOp, StartRsc, StartRsc, RoleError, },
/* Started */ { RoleError, StopRsc, NullOp, NullOp, PromoteRsc, },
/* Slave */ { RoleError, StopRsc, StopRsc, NullOp, PromoteRsc, },
/* Master */ { RoleError, DemoteRsc, DemoteRsc, DemoteRsc, NullOp , },
};
#define clear_node_weights_flags(nw_flags, nw_rsc, flags_to_clear) do { \
flags = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE, \
"Node weight", (nw_rsc)->id, (flags), \
(flags_to_clear), #flags_to_clear); \
} while (0)
static gboolean
native_choose_node(pe_resource_t * rsc, pe_node_t * prefer, pe_working_set_t * data_set)
{
GListPtr nodes = NULL;
pe_node_t *chosen = NULL;
pe_node_t *best = NULL;
int multiple = 1;
int length = 0;
gboolean result = FALSE;
process_utilization(rsc, &prefer, data_set);
if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
return rsc->allocated_to ? TRUE : FALSE;
}
// Sort allowed nodes by weight
if (rsc->allowed_nodes) {
length = g_hash_table_size(rsc->allowed_nodes);
}
if (length > 0) {
nodes = g_hash_table_get_values(rsc->allowed_nodes);
nodes = sort_nodes_by_weight(nodes, pe__current_node(rsc), data_set);
// First node in sorted list has the best score
best = g_list_nth_data(nodes, 0);
}
if (prefer && nodes) {
chosen = g_hash_table_lookup(rsc->allowed_nodes, prefer->details->id);
if (chosen == NULL) {
pe_rsc_trace(rsc, "Preferred node %s for %s was unknown",
prefer->details->uname, rsc->id);
/* Favor the preferred node as long as its weight is at least as good as
* the best allowed node's.
*
* An alternative would be to favor the preferred node even if the best
* node is better, when the best node's weight is less than INFINITY.
*/
} else if ((chosen->weight < 0) || (chosen->weight < best->weight)) {
pe_rsc_trace(rsc, "Preferred node %s for %s was unsuitable",
chosen->details->uname, rsc->id);
chosen = NULL;
} else if (!can_run_resources(chosen)) {
pe_rsc_trace(rsc, "Preferred node %s for %s was unavailable",
chosen->details->uname, rsc->id);
chosen = NULL;
} else {
pe_rsc_trace(rsc,
"Chose preferred node %s for %s (ignoring %d candidates)",
chosen->details->uname, rsc->id, length);
}
}
if ((chosen == NULL) && nodes) {
/* Either there is no preferred node, or the preferred node is not
* available, but there are other nodes allowed to run the resource.
*/
chosen = best;
pe_rsc_trace(rsc, "Chose node %s for %s from %d candidates",
chosen ? chosen->details->uname : "<none>", rsc->id, length);
if (!pe_rsc_is_unique_clone(rsc->parent)
&& chosen && (chosen->weight > 0) && can_run_resources(chosen)) {
/* If the resource is already running on a node, prefer that node if
* it is just as good as the chosen node.
*
* We don't do this for unique clone instances, because
* distribute_children() has already assigned instances to their
* running nodes when appropriate, and if we get here, we don't want
* remaining unallocated instances to prefer a node that's already
* running another instance.
*/
pe_node_t *running = pe__current_node(rsc);
if (running && (can_run_resources(running) == FALSE)) {
pe_rsc_trace(rsc, "Current node for %s (%s) can't run resources",
rsc->id, running->details->uname);
} else if (running) {
for (GList *iter = nodes->next; iter; iter = iter->next) {
pe_node_t *tmp = (pe_node_t *) iter->data;
if (tmp->weight != chosen->weight) {
// The nodes are sorted by weight, so no more are equal
break;
}
if (tmp->details == running->details) {
// Scores are equal, so prefer the current node
chosen = tmp;
}
multiple++;
}
}
}
}
if (multiple > 1) {
static char score[33];
int log_level = (chosen->weight >= INFINITY)? LOG_WARNING : LOG_INFO;
score2char_stack(chosen->weight, score, sizeof(score));
do_crm_log(log_level,
"Chose node %s for %s from %d nodes with score %s",
chosen->details->uname, rsc->id, multiple, score);
}
result = native_assign_node(rsc, nodes, chosen, FALSE);
g_list_free(nodes);
return result;
}
/*!
* \internal
* \brief Find score of highest-scored node that matches colocation attribute
*
* \param[in] rsc Resource whose allowed nodes should be searched
* \param[in] attr Colocation attribute name (must not be NULL)
* \param[in] value Colocation attribute value to require
*/
static int
best_node_score_matching_attr(const pe_resource_t *rsc, const char *attr,
const char *value)
{
GHashTableIter iter;
pe_node_t *node = NULL;
int best_score = -INFINITY;
const char *best_node = NULL;
// Find best allowed node with matching attribute
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
if ((node->weight > best_score) && can_run_resources(node)
&& pcmk__str_eq(value, pe_node_attribute_raw(node, attr), pcmk__str_casei)) {
best_score = node->weight;
best_node = node->details->uname;
}
}
if (!pcmk__str_eq(attr, CRM_ATTR_UNAME, pcmk__str_casei)) {
if (best_node == NULL) {
crm_info("No allowed node for %s matches node attribute %s=%s",
rsc->id, attr, value);
} else {
crm_info("Allowed node %s for %s had best score (%d) "
"of those matching node attribute %s=%s",
best_node, rsc->id, best_score, attr, value);
}
}
return best_score;
}
/*!
* \internal
* \brief Add resource's colocation matches to current node allocation scores
*
* For each node in a given table, if any of a given resource's allowed nodes
* have a matching value for the colocation attribute, add the highest of those
* nodes' scores to the node's score.
*
* \param[in,out] nodes Hash table of nodes with allocation scores so far
* \param[in] rsc Resource whose allowed nodes should be compared
* \param[in] attr Colocation attribute that must match (NULL for default)
* \param[in] factor Factor by which to multiply scores being added
* \param[in] only_positive Whether to add only positive scores
*/
static void
add_node_scores_matching_attr(GHashTable *nodes, const pe_resource_t *rsc,
const char *attr, float factor,
bool only_positive)
{
GHashTableIter iter;
pe_node_t *node = NULL;
if (attr == NULL) {
attr = CRM_ATTR_UNAME;
}
// Iterate through each node
g_hash_table_iter_init(&iter, nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
float weight_f = 0;
int weight = 0;
int score = 0;
int new_score = 0;
score = best_node_score_matching_attr(rsc, attr,
pe_node_attribute_raw(node, attr));
if ((factor < 0) && (score < 0)) {
/* Negative preference for a node with a negative score
* should not become a positive preference.
*
* @TODO Consider filtering only if weight is -INFINITY
*/
crm_trace("%s: Filtering %d + %f * %d (double negative disallowed)",
node->details->uname, node->weight, factor, score);
continue;
}
if (node->weight == INFINITY_HACK) {
crm_trace("%s: Filtering %d + %f * %d (node was marked unusable)",
node->details->uname, node->weight, factor, score);
continue;
}
weight_f = factor * score;
// Round the number; see http://c-faq.com/fp/round.html
weight = (int) ((weight_f < 0)? (weight_f - 0.5) : (weight_f + 0.5));
/* Small factors can obliterate the small scores that are often actually
* used in configurations. If the score and factor are nonzero, ensure
* that the result is nonzero as well.
*/
if ((weight == 0) && (score != 0)) {
if (factor > 0.0) {
weight = 1;
} else if (factor < 0.0) {
weight = -1;
}
}
new_score = pe__add_scores(weight, node->weight);
if (only_positive && (new_score < 0) && (node->weight > 0)) {
crm_trace("%s: Filtering %d + %f * %d = %d "
"(negative disallowed, marking node unusable)",
node->details->uname, node->weight, factor, score,
new_score);
node->weight = INFINITY_HACK;
continue;
}
if (only_positive && (new_score < 0) && (node->weight == 0)) {
crm_trace("%s: Filtering %d + %f * %d = %d (negative disallowed)",
node->details->uname, node->weight, factor, score,
new_score);
continue;
}
crm_trace("%s: %d + %f * %d = %d", node->details->uname,
node->weight, factor, score, new_score);
node->weight = new_score;
}
}
static inline bool
is_nonempty_group(pe_resource_t *rsc)
{
return rsc && (rsc->variant == pe_group) && (rsc->children != NULL);
}
/*!
* \internal
* \brief Incorporate colocation constraint scores into node weights
*
* \param[in,out] rsc Resource being placed
* \param[in] rhs ID of 'with' resource
* \param[in,out] nodes Nodes, with scores as of this point
* \param[in] attr Colocation attribute (ID by default)
* \param[in] factor Incorporate scores multiplied by this factor
* \param[in] flags Bitmask of enum pe_weights values
*
* \return Nodes, with scores modified by this constraint
* \note This function assumes ownership of the nodes argument. The caller
* should free the returned copy rather than the original.
*/
GHashTable *
pcmk__native_merge_weights(pe_resource_t *rsc, const char *rhs,
GHashTable *nodes, const char *attr, float factor,
uint32_t flags)
{
GHashTable *work = NULL;
// Avoid infinite recursion
if (pcmk_is_set(rsc->flags, pe_rsc_merging)) {
pe_rsc_info(rsc, "%s: Breaking dependency loop at %s", rhs, rsc->id);
return nodes;
}
pe__set_resource_flags(rsc, pe_rsc_merging);
if (pcmk_is_set(flags, pe_weights_init)) {
if (is_nonempty_group(rsc)) {
GList *last = g_list_last(rsc->children);
pe_resource_t *last_rsc = last->data;
pe_rsc_trace(rsc, "%s: Merging scores from group %s "
"using last member %s (at %.6f)",
rhs, rsc->id, last_rsc->id, factor);
work = pcmk__native_merge_weights(last_rsc, rhs, NULL, attr, factor,
flags);
} else {
work = pcmk__copy_node_table(rsc->allowed_nodes);
}
clear_node_weights_flags(flags, rsc, pe_weights_init);
} else if (is_nonempty_group(rsc)) {
/* The first member of the group will recursively incorporate any
* constraints involving other members (including the group internal
* colocation).
*
* @TODO The indirect colocations from the dependent group's other
* members will be incorporated at full strength rather than by
* factor, so the group's combined stickiness will be treated as
* (factor + (#members - 1)) * stickiness. It is questionable what
* the right approach should be.
*/
pe_rsc_trace(rsc, "%s: Merging scores from first member of group %s "
"(at %.6f)", rhs, rsc->id, factor);
work = pcmk__copy_node_table(nodes);
work = pcmk__native_merge_weights(rsc->children->data, rhs, work, attr,
factor, flags);
} else {
pe_rsc_trace(rsc, "%s: Merging scores from %s (at %.6f)",
rhs, rsc->id, factor);
work = pcmk__copy_node_table(nodes);
add_node_scores_matching_attr(work, rsc, attr, factor,
pcmk_is_set(flags, pe_weights_positive));
}
if (can_run_any(work)) {
GListPtr gIter = NULL;
int multiplier = (factor < 0)? -1 : 1;
if (pcmk_is_set(flags, pe_weights_forward)) {
gIter = rsc->rsc_cons;
pe_rsc_trace(rsc,
"Checking additional %d optional '%s with' constraints",
g_list_length(gIter), rsc->id);
} else if (is_nonempty_group(rsc)) {
pe_resource_t *last_rsc = g_list_last(rsc->children)->data;
gIter = last_rsc->rsc_cons_lhs;
pe_rsc_trace(rsc, "Checking additional %d optional 'with group %s' "
"constraints using last member %s",
g_list_length(gIter), rsc->id, last_rsc->id);
} else {
gIter = rsc->rsc_cons_lhs;
pe_rsc_trace(rsc,
"Checking additional %d optional 'with %s' constraints",
g_list_length(gIter), rsc->id);
}
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *other = NULL;
rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data;
if (constraint->score == 0) {
continue;
}
if (pcmk_is_set(flags, pe_weights_forward)) {
other = constraint->rsc_rh;
} else {
other = constraint->rsc_lh;
}
pe_rsc_trace(rsc, "Optionally merging score of '%s' constraint (%s with %s)",
constraint->id, constraint->rsc_lh->id,
constraint->rsc_rh->id);
work = pcmk__native_merge_weights(other, rhs, work,
constraint->node_attribute,
multiplier * constraint->score / (float) INFINITY,
flags|pe_weights_rollback);
pe__show_node_weights(true, NULL, rhs, work);
}
} else if (pcmk_is_set(flags, pe_weights_rollback)) {
pe_rsc_info(rsc, "%s: Rolling back optional scores from %s",
rhs, rsc->id);
g_hash_table_destroy(work);
pe__clear_resource_flags(rsc, pe_rsc_merging);
return nodes;
}
if (pcmk_is_set(flags, pe_weights_positive)) {
pe_node_t *node = NULL;
GHashTableIter iter;
g_hash_table_iter_init(&iter, work);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
if (node->weight == INFINITY_HACK) {
node->weight = 1;
}
}
}
if (nodes) {
g_hash_table_destroy(nodes);
}
pe__clear_resource_flags(rsc, pe_rsc_merging);
return work;
}
static inline bool
node_has_been_unfenced(pe_node_t *node)
{
const char *unfenced = pe_node_attribute_raw(node, CRM_ATTR_UNFENCED);
return !pcmk__str_eq(unfenced, "0", pcmk__str_null_matches);
}
static inline bool
is_unfence_device(pe_resource_t *rsc, pe_working_set_t *data_set)
{
return pcmk_is_set(rsc->flags, pe_rsc_fence_device)
&& pcmk_is_set(data_set->flags, pe_flag_enable_unfencing);
}
pe_node_t *
pcmk__native_allocate(pe_resource_t *rsc, pe_node_t *prefer,
pe_working_set_t *data_set)
{
GListPtr gIter = NULL;
if (rsc->parent && !pcmk_is_set(rsc->parent->flags, pe_rsc_allocating)) {
/* never allocate children on their own */
pe_rsc_debug(rsc, "Escalating allocation of %s to its parent: %s", rsc->id,
rsc->parent->id);
rsc->parent->cmds->allocate(rsc->parent, prefer, data_set);
}
if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
return rsc->allocated_to;
}
if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) {
pe_rsc_debug(rsc, "Dependency loop detected involving %s", rsc->id);
return NULL;
}
pe__set_resource_flags(rsc, pe_rsc_allocating);
pe__show_node_weights(true, rsc, "Pre-alloc", rsc->allowed_nodes);
for (gIter = rsc->rsc_cons; gIter != NULL; gIter = gIter->next) {
rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data;
GHashTable *archive = NULL;
pe_resource_t *rsc_rh = constraint->rsc_rh;
if (constraint->score == 0) {
continue;
}
if (constraint->role_lh >= RSC_ROLE_MASTER
|| (constraint->score < 0 && constraint->score > -INFINITY)) {
archive = pcmk__copy_node_table(rsc->allowed_nodes);
}
pe_rsc_trace(rsc,
"%s: Allocating %s first (constraint=%s score=%d role=%s)",
rsc->id, rsc_rh->id, constraint->id,
constraint->score, role2text(constraint->role_lh));
rsc_rh->cmds->allocate(rsc_rh, NULL, data_set);
rsc->cmds->rsc_colocation_lh(rsc, rsc_rh, constraint, data_set);
if (archive && can_run_any(rsc->allowed_nodes) == FALSE) {
pe_rsc_info(rsc, "%s: Rolling back scores from %s", rsc->id, rsc_rh->id);
g_hash_table_destroy(rsc->allowed_nodes);
rsc->allowed_nodes = archive;
archive = NULL;
}
if (archive) {
g_hash_table_destroy(archive);
}
}
pe__show_node_weights(true, rsc, "Post-coloc", rsc->allowed_nodes);
for (gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) {
rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data;
if (constraint->score == 0) {
continue;
}
pe_rsc_trace(rsc, "Merging score of '%s' constraint (%s with %s)",
constraint->id, constraint->rsc_lh->id,
constraint->rsc_rh->id);
rsc->allowed_nodes =
constraint->rsc_lh->cmds->merge_weights(constraint->rsc_lh, rsc->id, rsc->allowed_nodes,
constraint->node_attribute,
(float)constraint->score / INFINITY,
pe_weights_rollback);
}
if (rsc->next_role == RSC_ROLE_STOPPED) {
pe_rsc_trace(rsc, "Making sure %s doesn't get allocated", rsc->id);
/* make sure it doesn't come up again */
resource_location(rsc, NULL, -INFINITY, XML_RSC_ATTR_TARGET_ROLE, data_set);
} else if(rsc->next_role > rsc->role
&& !pcmk_is_set(data_set->flags, pe_flag_have_quorum)
&& data_set->no_quorum_policy == no_quorum_freeze) {
crm_notice("Resource %s cannot be elevated from %s to %s: no-quorum-policy=freeze",
rsc->id, role2text(rsc->role), role2text(rsc->next_role));
rsc->next_role = rsc->role;
}
pe__show_node_weights(!show_scores, rsc, __func__, rsc->allowed_nodes);
if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)
&& !pcmk_is_set(data_set->flags, pe_flag_have_stonith_resource)) {
pe__clear_resource_flags(rsc, pe_rsc_managed);
}
if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
const char *reason = NULL;
pe_node_t *assign_to = NULL;
rsc->next_role = rsc->role;
assign_to = pe__current_node(rsc);
if (assign_to == NULL) {
reason = "inactive";
} else if (rsc->role == RSC_ROLE_MASTER) {
reason = "master";
} else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
reason = "failed";
} else {
reason = "active";
}
pe_rsc_info(rsc, "Unmanaged resource %s allocated to %s: %s", rsc->id,
(assign_to? assign_to->details->uname : "no node"), reason);
native_assign_node(rsc, NULL, assign_to, TRUE);
} else if (pcmk_is_set(data_set->flags, pe_flag_stop_everything)) {
pe_rsc_debug(rsc, "Forcing %s to stop", rsc->id);
native_assign_node(rsc, NULL, NULL, TRUE);
} else if (pcmk_is_set(rsc->flags, pe_rsc_provisional)
&& native_choose_node(rsc, prefer, data_set)) {
pe_rsc_trace(rsc, "Allocated resource %s to %s", rsc->id,
rsc->allocated_to->details->uname);
} else if (rsc->allocated_to == NULL) {
if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
pe_rsc_info(rsc, "Resource %s cannot run anywhere", rsc->id);
} else if (rsc->running_on != NULL) {
pe_rsc_info(rsc, "Stopping orphan resource %s", rsc->id);
}
} else {
pe_rsc_debug(rsc, "Pre-Allocated resource %s to %s", rsc->id,
rsc->allocated_to->details->uname);
}
pe__clear_resource_flags(rsc, pe_rsc_allocating);
if (rsc->is_remote_node) {
pe_node_t *remote_node = pe_find_node(data_set->nodes, rsc->id);
CRM_ASSERT(remote_node != NULL);
if (rsc->allocated_to && rsc->next_role != RSC_ROLE_STOPPED) {
crm_trace("Setting Pacemaker Remote node %s to ONLINE",
remote_node->details->id);
remote_node->details->online = TRUE;
/* We shouldn't consider an unseen remote-node unclean if we are going
* to try and connect to it. Otherwise we get an unnecessary fence */
if (remote_node->details->unseen == TRUE) {
remote_node->details->unclean = FALSE;
}
} else {
crm_trace("Setting Pacemaker Remote node %s to SHUTDOWN (next role %s, %sallocated)",
remote_node->details->id, role2text(rsc->next_role),
(rsc->allocated_to? "" : "un"));
remote_node->details->shutdown = TRUE;
}
}
return rsc->allocated_to;
}
static gboolean
is_op_dup(pe_resource_t *rsc, const char *name, guint interval_ms)
{
gboolean dup = FALSE;
const char *id = NULL;
const char *value = NULL;
xmlNode *operation = NULL;
guint interval2_ms = 0;
CRM_ASSERT(rsc);
for (operation = __xml_first_child_element(rsc->ops_xml); operation != NULL;
operation = __xml_next_element(operation)) {
if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
value = crm_element_value(operation, "name");
if (!pcmk__str_eq(value, name, pcmk__str_casei)) {
continue;
}
value = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
interval2_ms = crm_parse_interval_spec(value);
if (interval_ms != interval2_ms) {
continue;
}
if (id == NULL) {
id = ID(operation);
} else {
pcmk__config_err("Operation %s is duplicate of %s (do not use "
"same name and interval combination more "
"than once per resource)", ID(operation), id);
dup = TRUE;
}
}
}
return dup;
}
static bool
op_cannot_recur(const char *name)
{
return pcmk__strcase_any_of(name, RSC_STOP, RSC_START, RSC_DEMOTE, RSC_PROMOTE, NULL);
}
static void
RecurringOp(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node,
xmlNode * operation, pe_working_set_t * data_set)
{
char *key = NULL;
const char *name = NULL;
const char *role = NULL;
const char *interval_spec = NULL;
const char *node_uname = node? node->details->uname : "n/a";
guint interval_ms = 0;
pe_action_t *mon = NULL;
gboolean is_optional = TRUE;
GListPtr possible_matches = NULL;
CRM_ASSERT(rsc);
/* Only process for the operations without role="Stopped" */
role = crm_element_value(operation, "role");
if (role && text2role(role) == RSC_ROLE_STOPPED) {
return;
}
interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
interval_ms = crm_parse_interval_spec(interval_spec);
if (interval_ms == 0) {
return;
}
name = crm_element_value(operation, "name");
if (is_op_dup(rsc, name, interval_ms)) {
crm_trace("Not creating duplicate recurring action %s for %dms %s",
ID(operation), interval_ms, name);
return;
}
if (op_cannot_recur(name)) {
pcmk__config_err("Ignoring %s because action '%s' cannot be recurring",
ID(operation), name);
return;
}
key = pcmk__op_key(rsc->id, name, interval_ms);
if (find_rsc_op_entry(rsc, key) == NULL) {
crm_trace("Not creating recurring action %s for disabled resource %s",
ID(operation), rsc->id);
free(key);
return;
}
pe_rsc_trace(rsc, "Creating recurring action %s for %s in role %s on %s",
ID(operation), rsc->id, role2text(rsc->next_role), node_uname);
if (start != NULL) {
pe_rsc_trace(rsc, "Marking %s %s due to %s", key,
pcmk_is_set(start->flags, pe_action_optional)? "optional" : "mandatory",
start->uuid);
is_optional = (rsc->cmds->action_flags(start, NULL) & pe_action_optional);
} else {
pe_rsc_trace(rsc, "Marking %s optional", key);
is_optional = TRUE;
}
/* start a monitor for an already active resource */
possible_matches = find_actions_exact(rsc->actions, key, node);
if (possible_matches == NULL) {
is_optional = FALSE;
pe_rsc_trace(rsc, "Marking %s mandatory: not active", key);
} else {
GListPtr gIter = NULL;
for (gIter = possible_matches; gIter != NULL; gIter = gIter->next) {
pe_action_t *op = (pe_action_t *) gIter->data;
if (pcmk_is_set(op->flags, pe_action_reschedule)) {
is_optional = FALSE;
break;
}
}
g_list_free(possible_matches);
}
if ((rsc->next_role == RSC_ROLE_MASTER && role == NULL)
|| (role != NULL && text2role(role) != rsc->next_role)) {
int log_level = LOG_TRACE;
const char *result = "Ignoring";
if (is_optional) {
char *after_key = NULL;
pe_action_t *cancel_op = NULL;
// It's running, so cancel it
log_level = LOG_INFO;
result = "Cancelling";
cancel_op = pe_cancel_op(rsc, name, interval_ms, node, data_set);
switch (rsc->role) {
case RSC_ROLE_SLAVE:
case RSC_ROLE_STARTED:
if (rsc->next_role == RSC_ROLE_MASTER) {
after_key = promote_key(rsc);
} else if (rsc->next_role == RSC_ROLE_STOPPED) {
after_key = stop_key(rsc);
}
break;
case RSC_ROLE_MASTER:
after_key = demote_key(rsc);
break;
default:
break;
}
if (after_key) {
custom_action_order(rsc, NULL, cancel_op, rsc, after_key, NULL,
pe_order_runnable_left, data_set);
}
}
do_crm_log(log_level, "%s action %s (%s vs. %s)",
result, key, role ? role : role2text(RSC_ROLE_SLAVE),
role2text(rsc->next_role));
free(key);
return;
}
mon = custom_action(rsc, key, name, node, is_optional, TRUE, data_set);
key = mon->uuid;
if (is_optional) {
pe_rsc_trace(rsc, "%s\t %s (optional)", node_uname, mon->uuid);
}
if ((start == NULL) || !pcmk_is_set(start->flags, pe_action_runnable)) {
pe_rsc_debug(rsc, "%s\t %s (cancelled : start un-runnable)",
node_uname, mon->uuid);
update_action_flags(mon, pe_action_runnable | pe_action_clear,
__func__, __LINE__);
} else if (node == NULL || node->details->online == FALSE || node->details->unclean) {
pe_rsc_debug(rsc, "%s\t %s (cancelled : no node available)",
node_uname, mon->uuid);
update_action_flags(mon, pe_action_runnable | pe_action_clear,
__func__, __LINE__);
} else if (!pcmk_is_set(mon->flags, pe_action_optional)) {
pe_rsc_info(rsc, " Start recurring %s (%us) for %s on %s",
mon->task, interval_ms / 1000, rsc->id, node_uname);
}
if (rsc->next_role == RSC_ROLE_MASTER) {
char *running_master = crm_itoa(PCMK_OCF_RUNNING_MASTER);
add_hash_param(mon->meta, XML_ATTR_TE_TARGET_RC, running_master);
free(running_master);
}
if ((node == NULL) || pcmk_is_set(rsc->flags, pe_rsc_managed)) {
custom_action_order(rsc, start_key(rsc), NULL,
NULL, strdup(key), mon,
pe_order_implies_then | pe_order_runnable_left, data_set);
custom_action_order(rsc, reload_key(rsc), NULL,
NULL, strdup(key), mon,
pe_order_implies_then | pe_order_runnable_left, data_set);
if (rsc->next_role == RSC_ROLE_MASTER) {
custom_action_order(rsc, promote_key(rsc), NULL,
rsc, NULL, mon,
pe_order_optional | pe_order_runnable_left, data_set);
} else if (rsc->role == RSC_ROLE_MASTER) {
custom_action_order(rsc, demote_key(rsc), NULL,
rsc, NULL, mon,
pe_order_optional | pe_order_runnable_left, data_set);
}
}
}
static void
Recurring(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node, pe_working_set_t * data_set)
{
if (!pcmk_is_set(rsc->flags, pe_rsc_maintenance) &&
(node == NULL || node->details->maintenance == FALSE)) {
xmlNode *operation = NULL;
for (operation = __xml_first_child_element(rsc->ops_xml);
operation != NULL;
operation = __xml_next_element(operation)) {
if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
RecurringOp(rsc, start, node, operation, data_set);
}
}
}
}
static void
RecurringOp_Stopped(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node,
xmlNode * operation, pe_working_set_t * data_set)
{
char *key = NULL;
const char *name = NULL;
const char *role = NULL;
const char *interval_spec = NULL;
const char *node_uname = node? node->details->uname : "n/a";
guint interval_ms = 0;
GListPtr possible_matches = NULL;
GListPtr gIter = NULL;
/* Only process for the operations with role="Stopped" */
role = crm_element_value(operation, "role");
if (role == NULL || text2role(role) != RSC_ROLE_STOPPED) {
return;
}
interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
interval_ms = crm_parse_interval_spec(interval_spec);
if (interval_ms == 0) {
return;
}
name = crm_element_value(operation, "name");
if (is_op_dup(rsc, name, interval_ms)) {
crm_trace("Not creating duplicate recurring action %s for %dms %s",
ID(operation), interval_ms, name);
return;
}
if (op_cannot_recur(name)) {
pcmk__config_err("Ignoring %s because action '%s' cannot be recurring",
ID(operation), name);
return;
}
key = pcmk__op_key(rsc->id, name, interval_ms);
if (find_rsc_op_entry(rsc, key) == NULL) {
crm_trace("Not creating recurring action %s for disabled resource %s",
ID(operation), rsc->id);
free(key);
return;
}
// @TODO add support
if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) {
crm_notice("Ignoring %s (recurring monitors for Stopped role are "
"not supported for anonymous clones)",
ID(operation));
return;
}
pe_rsc_trace(rsc,
"Creating recurring action %s for %s in role %s on nodes where it should not be running",
ID(operation), rsc->id, role2text(rsc->next_role));
/* if the monitor exists on the node where the resource will be running, cancel it */
if (node != NULL) {
possible_matches = find_actions_exact(rsc->actions, key, node);
if (possible_matches) {
pe_action_t *cancel_op = NULL;
g_list_free(possible_matches);
cancel_op = pe_cancel_op(rsc, name, interval_ms, node, data_set);
if (rsc->next_role == RSC_ROLE_STARTED || rsc->next_role == RSC_ROLE_SLAVE) {
/* rsc->role == RSC_ROLE_STOPPED: cancel the monitor before start */
/* rsc->role == RSC_ROLE_STARTED: for a migration, cancel the monitor on the target node before start */
custom_action_order(rsc, NULL, cancel_op, rsc, start_key(rsc), NULL,
pe_order_runnable_left, data_set);
}
pe_rsc_info(rsc, "Cancel action %s (%s vs. %s) on %s",
key, role, role2text(rsc->next_role), node_uname);
}
}
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
pe_node_t *stop_node = (pe_node_t *) gIter->data;
const char *stop_node_uname = stop_node->details->uname;
gboolean is_optional = TRUE;
gboolean probe_is_optional = TRUE;
gboolean stop_is_optional = TRUE;
pe_action_t *stopped_mon = NULL;
char *rc_inactive = NULL;
GListPtr probe_complete_ops = NULL;
GListPtr stop_ops = NULL;
GListPtr local_gIter = NULL;
if (node && pcmk__str_eq(stop_node_uname, node_uname, pcmk__str_casei)) {
continue;
}
pe_rsc_trace(rsc, "Creating recurring action %s for %s on %s",
ID(operation), rsc->id, crm_str(stop_node_uname));
/* start a monitor for an already stopped resource */
possible_matches = find_actions_exact(rsc->actions, key, stop_node);
if (possible_matches == NULL) {
pe_rsc_trace(rsc, "Marking %s mandatory on %s: not active", key,
crm_str(stop_node_uname));
is_optional = FALSE;
} else {
pe_rsc_trace(rsc, "Marking %s optional on %s: already active", key,
crm_str(stop_node_uname));
is_optional = TRUE;
g_list_free(possible_matches);
}
stopped_mon = custom_action(rsc, strdup(key), name, stop_node, is_optional, TRUE, data_set);
rc_inactive = crm_itoa(PCMK_OCF_NOT_RUNNING);
add_hash_param(stopped_mon->meta, XML_ATTR_TE_TARGET_RC, rc_inactive);
free(rc_inactive);
if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
GList *probes = pe__resource_actions(rsc, stop_node, RSC_STATUS,
FALSE);
GListPtr pIter = NULL;
for (pIter = probes; pIter != NULL; pIter = pIter->next) {
pe_action_t *probe = (pe_action_t *) pIter->data;
order_actions(probe, stopped_mon, pe_order_runnable_left);
crm_trace("%s then %s on %s", probe->uuid, stopped_mon->uuid, stop_node->details->uname);
}
g_list_free(probes);
}
if (probe_complete_ops) {
g_list_free(probe_complete_ops);
}
stop_ops = pe__resource_actions(rsc, stop_node, RSC_STOP, TRUE);
for (local_gIter = stop_ops; local_gIter != NULL; local_gIter = local_gIter->next) {
pe_action_t *stop = (pe_action_t *) local_gIter->data;
if (!pcmk_is_set(stop->flags, pe_action_optional)) {
stop_is_optional = FALSE;
}
if (!pcmk_is_set(stop->flags, pe_action_runnable)) {
crm_debug("%s\t %s (cancelled : stop un-runnable)",
crm_str(stop_node_uname), stopped_mon->uuid);
update_action_flags(stopped_mon, pe_action_runnable | pe_action_clear,
__func__, __LINE__);
}
if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
custom_action_order(rsc, stop_key(rsc), stop,
NULL, strdup(key), stopped_mon,
pe_order_implies_then | pe_order_runnable_left, data_set);
}
}
if (stop_ops) {
g_list_free(stop_ops);
}
if (is_optional == FALSE && probe_is_optional && stop_is_optional
&& !pcmk_is_set(rsc->flags, pe_rsc_managed)) {
pe_rsc_trace(rsc, "Marking %s optional on %s due to unmanaged",
key, crm_str(stop_node_uname));
update_action_flags(stopped_mon, pe_action_optional, __func__,
__LINE__);
}
if (pcmk_is_set(stopped_mon->flags, pe_action_optional)) {
pe_rsc_trace(rsc, "%s\t %s (optional)", crm_str(stop_node_uname), stopped_mon->uuid);
}
if (stop_node->details->online == FALSE || stop_node->details->unclean) {
pe_rsc_debug(rsc, "%s\t %s (cancelled : no node available)",
crm_str(stop_node_uname), stopped_mon->uuid);
update_action_flags(stopped_mon, pe_action_runnable | pe_action_clear,
__func__, __LINE__);
}
if (pcmk_is_set(stopped_mon->flags, pe_action_runnable)
&& !pcmk_is_set(stopped_mon->flags, pe_action_optional)) {
crm_notice(" Start recurring %s (%us) for %s on %s", stopped_mon->task,
interval_ms / 1000, rsc->id, crm_str(stop_node_uname));
}
}
free(key);
}
static void
Recurring_Stopped(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node, pe_working_set_t * data_set)
{
if (!pcmk_is_set(rsc->flags, pe_rsc_maintenance) &&
(node == NULL || node->details->maintenance == FALSE)) {
xmlNode *operation = NULL;
for (operation = __xml_first_child_element(rsc->ops_xml);
operation != NULL;
operation = __xml_next_element(operation)) {
if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
RecurringOp_Stopped(rsc, start, node, operation, data_set);
}
}
}
}
static void
handle_migration_actions(pe_resource_t * rsc, pe_node_t *current, pe_node_t *chosen, pe_working_set_t * data_set)
{
pe_action_t *migrate_to = NULL;
pe_action_t *migrate_from = NULL;
pe_action_t *start = NULL;
pe_action_t *stop = NULL;
gboolean partial = rsc->partial_migration_target ? TRUE : FALSE;
pe_rsc_trace(rsc, "Processing migration actions %s moving from %s to %s . partial migration = %s",
rsc->id, current->details->id, chosen->details->id, partial ? "TRUE" : "FALSE");
start = start_action(rsc, chosen, TRUE);
stop = stop_action(rsc, current, TRUE);
if (partial == FALSE) {
migrate_to = custom_action(rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0),
RSC_MIGRATE, current, TRUE, TRUE, data_set);
}
migrate_from = custom_action(rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0),
RSC_MIGRATED, chosen, TRUE, TRUE, data_set);
if ((migrate_to && migrate_from) || (migrate_from && partial)) {
pe__set_action_flags(start, pe_action_migrate_runnable);
pe__set_action_flags(stop, pe_action_migrate_runnable);
// This is easier than trying to delete it from the graph
update_action_flags(start, pe_action_pseudo, __func__, __LINE__);
/* order probes before migrations */
if (partial) {
pe__set_action_flags(migrate_from, pe_action_migrate_runnable);
migrate_from->needs = start->needs;
custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0), NULL,
rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0),
NULL, pe_order_optional, data_set);
} else {
pe__set_action_flags(migrate_from, pe_action_migrate_runnable);
pe__set_action_flags(migrate_to, pe_action_migrate_runnable);
migrate_to->needs = start->needs;
custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0), NULL,
rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0),
NULL, pe_order_optional, data_set);
custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0),
NULL, rsc,
pcmk__op_key(rsc->id, RSC_MIGRATED, 0), NULL,
pe_order_optional|pe_order_implies_first_migratable,
data_set);
}
custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0), NULL,
rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
pe_order_optional|pe_order_implies_first_migratable,
data_set);
custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0), NULL,
rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL,
pe_order_optional|pe_order_implies_first_migratable|pe_order_pseudo_left,
data_set);
}
if (migrate_to) {
add_hash_param(migrate_to->meta, XML_LRM_ATTR_MIGRATE_SOURCE, current->details->uname);
add_hash_param(migrate_to->meta, XML_LRM_ATTR_MIGRATE_TARGET, chosen->details->uname);
/* Pacemaker Remote connections don't require pending to be recorded in
* the CIB. We can reduce CIB writes by not setting PENDING for them.
*/
if (rsc->is_remote_node == FALSE) {
/* migrate_to takes place on the source node, but can
* have an effect on the target node depending on how
* the agent is written. Because of this, we have to maintain
* a record that the migrate_to occurred, in case the source node
* loses membership while the migrate_to action is still in-flight.
*/
add_hash_param(migrate_to->meta, XML_OP_ATTR_PENDING, "true");
}
}
if (migrate_from) {
add_hash_param(migrate_from->meta, XML_LRM_ATTR_MIGRATE_SOURCE, current->details->uname);
add_hash_param(migrate_from->meta, XML_LRM_ATTR_MIGRATE_TARGET, chosen->details->uname);
}
}
void
native_create_actions(pe_resource_t * rsc, pe_working_set_t * data_set)
{
pe_action_t *start = NULL;
pe_node_t *chosen = NULL;
pe_node_t *current = NULL;
gboolean need_stop = FALSE;
bool need_promote = FALSE;
gboolean is_moving = FALSE;
gboolean allow_migrate = pcmk_is_set(rsc->flags, pe_rsc_allow_migrate)? TRUE : FALSE;
GListPtr gIter = NULL;
unsigned int num_all_active = 0;
unsigned int num_clean_active = 0;
bool multiply_active = FALSE;
enum rsc_role_e role = RSC_ROLE_UNKNOWN;
enum rsc_role_e next_role = RSC_ROLE_UNKNOWN;
CRM_ASSERT(rsc);
chosen = rsc->allocated_to;
- if (chosen != NULL && rsc->next_role == RSC_ROLE_UNKNOWN) {
- rsc->next_role = RSC_ROLE_STARTED;
- pe_rsc_trace(rsc, "Fixed next_role: unknown -> %s", role2text(rsc->next_role));
-
- } else if (rsc->next_role == RSC_ROLE_UNKNOWN) {
- rsc->next_role = RSC_ROLE_STOPPED;
- pe_rsc_trace(rsc, "Fixed next_role: unknown -> %s", role2text(rsc->next_role));
+ next_role = rsc->next_role;
+ if (next_role == RSC_ROLE_UNKNOWN) {
+ rsc->next_role = (chosen == NULL)? RSC_ROLE_STOPPED : RSC_ROLE_STARTED;
}
-
- pe_rsc_trace(rsc, "Processing state transition for %s %p: %s->%s", rsc->id, rsc,
- role2text(rsc->role), role2text(rsc->next_role));
+ pe_rsc_trace(rsc, "Creating all actions for %s transition from %s to %s (%s) on %s",
+ rsc->id, role2text(rsc->role), role2text(rsc->next_role),
+ ((next_role == RSC_ROLE_UNKNOWN)? "implicit" : "explicit"),
+ ((chosen == NULL)? "no node" : chosen->details->uname));
current = pe__find_active_on(rsc, &num_all_active, &num_clean_active);
for (gIter = rsc->dangling_migrations; gIter != NULL; gIter = gIter->next) {
pe_node_t *dangling_source = (pe_node_t *) gIter->data;
- pe_action_t *stop = stop_action(rsc, dangling_source, FALSE);
+ pe_action_t *stop = NULL;
- pe__set_action_flags(stop, pe_action_dangle);
- pe_rsc_trace(rsc, "Forcing a cleanup of %s on %s",
+ pe_rsc_trace(rsc, "Creating stop action %s cleanup for %s on %s due to dangling migration",
+ (pcmk_is_set(data_set->flags, pe_flag_remove_after_stop)? "and" : "without"),
rsc->id, dangling_source->details->uname);
-
+ stop = stop_action(rsc, dangling_source, FALSE);
+ pe__set_action_flags(stop, pe_action_dangle);
if (pcmk_is_set(data_set->flags, pe_flag_remove_after_stop)) {
DeleteRsc(rsc, dangling_source, FALSE, data_set);
}
}
if ((num_all_active == 2) && (num_clean_active == 2) && chosen
&& rsc->partial_migration_source && rsc->partial_migration_target
&& (current->details == rsc->partial_migration_source->details)
&& (chosen->details == rsc->partial_migration_target->details)) {
/* The chosen node is still the migration target from a partial
* migration. Attempt to continue the migration instead of recovering
* by stopping the resource everywhere and starting it on a single node.
*/
- pe_rsc_trace(rsc,
- "Will attempt to continue with a partial migration to target %s from %s",
+ pe_rsc_trace(rsc, "Will attempt to continue with partial migration "
+ "to target %s from %s",
rsc->partial_migration_target->details->id,
rsc->partial_migration_source->details->id);
} else if (!pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)) {
/* If a resource has "requires" set to nothing or quorum, don't consider
* it active on unclean nodes (similar to how all resources behave when
* stonith-enabled is false). We can start such resources elsewhere
* before fencing completes, and if we considered the resource active on
* the failed node, we would attempt recovery for being active on
* multiple nodes.
*/
multiply_active = (num_clean_active > 1);
} else {
multiply_active = (num_all_active > 1);
}
if (multiply_active) {
if (rsc->partial_migration_target && rsc->partial_migration_source) {
// Migration was in progress, but we've chosen a different target
- crm_notice("Resource %s can no longer migrate to %s. Stopping on %s too",
- rsc->id, rsc->partial_migration_target->details->uname,
- rsc->partial_migration_source->details->uname);
+ crm_notice("Resource %s can no longer migrate from %s to %s "
+ "(will stop on both nodes)",
+ rsc->id, rsc->partial_migration_source->details->uname,
+ rsc->partial_migration_target->details->uname);
} else {
// Resource was incorrectly multiply active
pe_proc_err("Resource %s is active on %u nodes (%s)",
rsc->id, num_all_active,
recovery2text(rsc->recovery_type));
crm_notice("See https://wiki.clusterlabs.org/wiki/FAQ#Resource_is_Too_Active for more information");
}
if (rsc->recovery_type == recovery_stop_start) {
need_stop = TRUE;
}
/* If by chance a partial migration is in process, but the migration
* target is not chosen still, clear all partial migration data.
*/
rsc->partial_migration_source = rsc->partial_migration_target = NULL;
allow_migrate = FALSE;
}
if (pcmk_is_set(rsc->flags, pe_rsc_start_pending)) {
+ pe_rsc_trace(rsc, "Creating start action for %s to represent already pending start",
+ rsc->id);
start = start_action(rsc, chosen, TRUE);
pe__set_action_flags(start, pe_action_print_always);
}
if (current && chosen && current->details != chosen->details) {
- pe_rsc_trace(rsc, "Moving %s", rsc->id);
+ pe_rsc_trace(rsc, "Moving %s from %s to %s",
+ rsc->id, crm_str(current->details->uname),
+ crm_str(chosen->details->uname));
is_moving = TRUE;
need_stop = TRUE;
} else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
if (pcmk_is_set(rsc->flags, pe_rsc_stop)) {
need_stop = TRUE;
pe_rsc_trace(rsc, "Recovering %s", rsc->id);
} else {
pe_rsc_trace(rsc, "Recovering %s by demotion", rsc->id);
if (rsc->next_role == RSC_ROLE_MASTER) {
need_promote = TRUE;
}
}
} else if (pcmk_is_set(rsc->flags, pe_rsc_block)) {
- pe_rsc_trace(rsc, "Block %s", rsc->id);
+ pe_rsc_trace(rsc, "Blocking further actions on %s", rsc->id);
need_stop = TRUE;
} else if (rsc->role > RSC_ROLE_STARTED && current != NULL && chosen != NULL) {
- /* Recovery of a promoted resource */
+ pe_rsc_trace(rsc, "Creating start action for promoted resource %s",
+ rsc->id);
start = start_action(rsc, chosen, TRUE);
if (!pcmk_is_set(start->flags, pe_action_optional)) {
- pe_rsc_trace(rsc, "Forced start %s", rsc->id);
+ // Recovery of a promoted resource
+ pe_rsc_trace(rsc, "%s restart is required for recovery", rsc->id);
need_stop = TRUE;
}
}
- pe_rsc_trace(rsc, "Creating actions for %s: %s->%s", rsc->id,
- role2text(rsc->role), role2text(rsc->next_role));
-
/* Create any additional actions required when bringing resource down and
* back up to same level.
*/
role = rsc->role;
while (role != RSC_ROLE_STOPPED) {
next_role = rsc_state_matrix[role][RSC_ROLE_STOPPED];
- pe_rsc_trace(rsc, "Down: Executing: %s->%s (%s)%s", role2text(role), role2text(next_role),
- rsc->id, need_stop ? " required" : "");
+ pe_rsc_trace(rsc, "Creating %s action to take %s down from %s to %s",
+ (need_stop? "required" : "optional"), rsc->id,
+ role2text(role), role2text(next_role));
if (rsc_action_matrix[role][next_role] (rsc, current, !need_stop, data_set) == FALSE) {
break;
}
role = next_role;
}
while ((rsc->role <= rsc->next_role) && (role != rsc->role)
&& !pcmk_is_set(rsc->flags, pe_rsc_block)) {
bool required = need_stop;
next_role = rsc_state_matrix[role][rsc->role];
if ((next_role == RSC_ROLE_MASTER) && need_promote) {
required = true;
}
- pe_rsc_trace(rsc, "Up: Executing: %s->%s (%s)%s", role2text(role), role2text(next_role),
- rsc->id, (required? " required" : ""));
+ pe_rsc_trace(rsc, "Creating %s action to take %s up from %s to %s",
+ (required? "required" : "optional"), rsc->id,
+ role2text(role), role2text(next_role));
if (rsc_action_matrix[role][next_role](rsc, chosen, !required,
data_set) == FALSE) {
break;
}
role = next_role;
}
role = rsc->role;
/* Required steps from this role to the next */
while (role != rsc->next_role) {
next_role = rsc_state_matrix[role][rsc->next_role];
- pe_rsc_trace(rsc, "Role: Executing: %s->%s = (%s on %s)", role2text(role), role2text(next_role), rsc->id, chosen?chosen->details->uname:"NA");
+ pe_rsc_trace(rsc, "Creating action to take %s from %s to %s (ending at %s)",
+ rsc->id, role2text(role), role2text(next_role),
+ role2text(rsc->next_role));
if (rsc_action_matrix[role][next_role] (rsc, chosen, FALSE, data_set) == FALSE) {
break;
}
role = next_role;
}
if (pcmk_is_set(rsc->flags, pe_rsc_block)) {
- pe_rsc_trace(rsc, "No monitor additional ops for blocked resource");
+ pe_rsc_trace(rsc, "Not creating recurring monitors for blocked resource %s",
+ rsc->id);
} else if ((rsc->next_role != RSC_ROLE_STOPPED)
|| !pcmk_is_set(rsc->flags, pe_rsc_managed)) {
- pe_rsc_trace(rsc, "Monitor ops for active resource");
+ pe_rsc_trace(rsc, "Creating recurring monitors for %s resource %s",
+ ((rsc->next_role == RSC_ROLE_STOPPED)? "unmanaged" : "active"),
+ rsc->id);
start = start_action(rsc, chosen, TRUE);
Recurring(rsc, start, chosen, data_set);
Recurring_Stopped(rsc, start, chosen, data_set);
+
} else {
- pe_rsc_trace(rsc, "Monitor ops for inactive resource");
+ pe_rsc_trace(rsc, "Creating recurring monitors for inactive resource %s",
+ rsc->id);
Recurring_Stopped(rsc, NULL, NULL, data_set);
}
/* if we are stuck in a partial migration, where the target
* of the partial migration no longer matches the chosen target.
* A full stop/start is required */
if (rsc->partial_migration_target && (chosen == NULL || rsc->partial_migration_target->details != chosen->details)) {
- pe_rsc_trace(rsc, "Not allowing partial migration to continue. %s", rsc->id);
+ pe_rsc_trace(rsc, "Not allowing partial migration of %s to continue",
+ rsc->id);
allow_migrate = FALSE;
} else if (!is_moving || !pcmk_is_set(rsc->flags, pe_rsc_managed)
|| pcmk_any_flags_set(rsc->flags,
pe_rsc_failed|pe_rsc_start_pending)
|| (current && current->details->unclean)
|| rsc->next_role < RSC_ROLE_STARTED) {
allow_migrate = FALSE;
}
if (allow_migrate) {
handle_migration_actions(rsc, current, chosen, data_set);
}
}
static void
rsc_avoids_remote_nodes(pe_resource_t *rsc)
{
GHashTableIter iter;
pe_node_t *node = NULL;
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
if (node->details->remote_rsc) {
node->weight = -INFINITY;
}
}
}
/*!
* \internal
* \brief Return allowed nodes as (possibly sorted) list
*
* Convert a resource's hash table of allowed nodes to a list. If printing to
* stdout, sort the list, to keep action ID numbers consistent for regression
* test output (while avoiding the performance hit on a live cluster).
*
* \param[in] rsc Resource to check for allowed nodes
* \param[in] data_set Cluster working set
*
* \return List of resource's allowed nodes
* \note Callers should take care not to rely on the list being sorted.
*/
static GList *
allowed_nodes_as_list(pe_resource_t *rsc, pe_working_set_t *data_set)
{
GList *allowed_nodes = NULL;
if (rsc->allowed_nodes) {
allowed_nodes = g_hash_table_get_values(rsc->allowed_nodes);
}
if (pcmk_is_set(data_set->flags, pe_flag_stdout)) {
allowed_nodes = g_list_sort(allowed_nodes, sort_node_uname);
}
return allowed_nodes;
}
void
native_internal_constraints(pe_resource_t * rsc, pe_working_set_t * data_set)
{
/* This function is on the critical path and worth optimizing as much as possible */
pe_resource_t *top = NULL;
GList *allowed_nodes = NULL;
bool check_unfencing = FALSE;
bool check_utilization = FALSE;
if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
pe_rsc_trace(rsc,
"Skipping native constraints for unmanaged resource: %s",
rsc->id);
return;
}
top = uber_parent(rsc);
// Whether resource requires unfencing
check_unfencing = !pcmk_is_set(rsc->flags, pe_rsc_fence_device)
&& pcmk_is_set(data_set->flags, pe_flag_enable_unfencing)
&& pcmk_is_set(rsc->flags, pe_rsc_needs_unfencing);
// Whether a non-default placement strategy is used
check_utilization = (g_hash_table_size(rsc->utilization) > 0)
&& !pcmk__str_eq(data_set->placement_strategy,
"default", pcmk__str_casei);
// Order stops before starts (i.e. restart)
custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL,
pe_order_optional|pe_order_implies_then|pe_order_restart,
data_set);
// Promotable ordering: demote before stop, start before promote
if (pcmk_is_set(top->flags, pe_rsc_promotable)
|| (rsc->role > RSC_ROLE_SLAVE)) {
custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_DEMOTE, 0), NULL,
rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
pe_order_implies_first_master, data_set);
custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL,
rsc, pcmk__op_key(rsc->id, RSC_PROMOTE, 0), NULL,
pe_order_runnable_left, data_set);
}
// Don't clear resource history if probing on same node
custom_action_order(rsc, pcmk__op_key(rsc->id, CRM_OP_LRM_DELETE, 0),
NULL, rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0),
NULL, pe_order_same_node|pe_order_then_cancels_first,
data_set);
// Certain checks need allowed nodes
if (check_unfencing || check_utilization || rsc->container) {
allowed_nodes = allowed_nodes_as_list(rsc, data_set);
}
if (check_unfencing) {
/* Check if the node needs to be unfenced first */
for (GList *item = allowed_nodes; item; item = item->next) {
pe_node_t *node = item->data;
pe_action_t *unfence = pe_fence_op(node, "on", TRUE, NULL, FALSE, data_set);
crm_debug("Ordering any stops of %s before %s, and any starts after",
rsc->id, unfence->uuid);
/*
* It would be more efficient to order clone resources once,
* rather than order each instance, but ordering the instance
* allows us to avoid unnecessary dependencies that might conflict
* with user constraints.
*
* @TODO: This constraint can still produce a transition loop if the
* resource has a stop scheduled on the node being unfenced, and
* there is a user ordering constraint to start some other resource
* (which will be ordered after the unfence) before stopping this
* resource. An example is "start some slow-starting cloned service
* before stopping an associated virtual IP that may be moving to
* it":
* stop this -> unfencing -> start that -> stop this
*/
custom_action_order(rsc, stop_key(rsc), NULL,
NULL, strdup(unfence->uuid), unfence,
pe_order_optional|pe_order_same_node, data_set);
custom_action_order(NULL, strdup(unfence->uuid), unfence,
rsc, start_key(rsc), NULL,
pe_order_implies_then_on_node|pe_order_same_node,
data_set);
}
}
if (check_utilization) {
GListPtr gIter = NULL;
pe_rsc_trace(rsc, "Creating utilization constraints for %s - strategy: %s",
rsc->id, data_set->placement_strategy);
for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
pe_node_t *current = (pe_node_t *) gIter->data;
char *load_stopped_task = crm_strdup_printf(LOAD_STOPPED "_%s",
current->details->uname);
pe_action_t *load_stopped = get_pseudo_op(load_stopped_task, data_set);
if (load_stopped->node == NULL) {
load_stopped->node = pe__copy_node(current);
update_action_flags(load_stopped, pe_action_optional | pe_action_clear,
__func__, __LINE__);
}
custom_action_order(rsc, stop_key(rsc), NULL,
NULL, load_stopped_task, load_stopped, pe_order_load, data_set);
}
for (GList *item = allowed_nodes; item; item = item->next) {
pe_node_t *next = item->data;
char *load_stopped_task = crm_strdup_printf(LOAD_STOPPED "_%s",
next->details->uname);
pe_action_t *load_stopped = get_pseudo_op(load_stopped_task, data_set);
if (load_stopped->node == NULL) {
load_stopped->node = pe__copy_node(next);
update_action_flags(load_stopped, pe_action_optional | pe_action_clear,
__func__, __LINE__);
}
custom_action_order(NULL, strdup(load_stopped_task), load_stopped,
rsc, start_key(rsc), NULL, pe_order_load, data_set);
custom_action_order(NULL, strdup(load_stopped_task), load_stopped,
rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0),
NULL, pe_order_load, data_set);
free(load_stopped_task);
}
}
if (rsc->container) {
pe_resource_t *remote_rsc = NULL;
if (rsc->is_remote_node) {
// rsc is the implicit remote connection for a guest or bundle node
/* Do not allow a guest resource to live on a Pacemaker Remote node,
* to avoid nesting remotes. However, allow bundles to run on remote
* nodes.
*/
if (!pcmk_is_set(rsc->flags, pe_rsc_allow_remote_remotes)) {
rsc_avoids_remote_nodes(rsc->container);
}
/* If someone cleans up a guest or bundle node's container, we will
* likely schedule a (re-)probe of the container and recovery of the
* connection. Order the connection stop after the container probe,
* so that if we detect the container running, we will trigger a new
* transition and avoid the unnecessary recovery.
*/
new_rsc_order(rsc->container, RSC_STATUS, rsc, RSC_STOP,
pe_order_optional, data_set);
/* A user can specify that a resource must start on a Pacemaker Remote
* node by explicitly configuring it with the container=NODENAME
* meta-attribute. This is of questionable merit, since location
* constraints can accomplish the same thing. But we support it, so here
* we check whether a resource (that is not itself a remote connection)
* has container set to a remote node or guest node resource.
*/
} else if (rsc->container->is_remote_node) {
remote_rsc = rsc->container;
} else {
remote_rsc = pe__resource_contains_guest_node(data_set,
rsc->container);
}
if (remote_rsc) {
/* Force the resource on the Pacemaker Remote node instead of
* colocating the resource with the container resource.
*/
for (GList *item = allowed_nodes; item; item = item->next) {
pe_node_t *node = item->data;
if (node->details->remote_rsc != remote_rsc) {
node->weight = -INFINITY;
}
}
} else {
/* This resource is either a filler for a container that does NOT
* represent a Pacemaker Remote node, or a Pacemaker Remote
* connection resource for a guest node or bundle.
*/
int score;
crm_trace("Order and colocate %s relative to its container %s",
rsc->id, rsc->container->id);
custom_action_order(rsc->container,
pcmk__op_key(rsc->container->id, RSC_START, 0),
NULL, rsc, pcmk__op_key(rsc->id, RSC_START, 0),
NULL,
pe_order_implies_then|pe_order_runnable_left,
data_set);
custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
rsc->container,
pcmk__op_key(rsc->container->id, RSC_STOP, 0),
NULL, pe_order_implies_first, data_set);
if (pcmk_is_set(rsc->flags, pe_rsc_allow_remote_remotes)) {
score = 10000; /* Highly preferred but not essential */
} else {
score = INFINITY; /* Force them to run on the same host */
}
rsc_colocation_new("resource-with-container", NULL, score, rsc,
rsc->container, NULL, NULL, data_set);
}
}
if (rsc->is_remote_node || pcmk_is_set(rsc->flags, pe_rsc_fence_device)) {
/* don't allow remote nodes to run stonith devices
* or remote connection resources.*/
rsc_avoids_remote_nodes(rsc);
}
g_list_free(allowed_nodes);
}
void
native_rsc_colocation_lh(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh,
rsc_colocation_t *constraint,
pe_working_set_t *data_set)
{
if (rsc_lh == NULL) {
pe_err("rsc_lh was NULL for %s", constraint->id);
return;
} else if (constraint->rsc_rh == NULL) {
pe_err("rsc_rh was NULL for %s", constraint->id);
return;
}
if (constraint->score == 0) {
return;
}
pe_rsc_trace(rsc_lh, "Processing colocation constraint between %s and %s", rsc_lh->id,
rsc_rh->id);
rsc_rh->cmds->rsc_colocation_rh(rsc_lh, rsc_rh, constraint, data_set);
}
enum filter_colocation_res
filter_colocation_constraint(pe_resource_t * rsc_lh, pe_resource_t * rsc_rh,
rsc_colocation_t * constraint, gboolean preview)
{
if (constraint->score == 0) {
return influence_nothing;
}
/* rh side must be allocated before we can process constraint */
if (!preview && pcmk_is_set(rsc_rh->flags, pe_rsc_provisional)) {
return influence_nothing;
}
if ((constraint->role_lh >= RSC_ROLE_SLAVE) &&
rsc_lh->parent && pcmk_is_set(rsc_lh->parent->flags, pe_rsc_promotable)
&& !pcmk_is_set(rsc_lh->flags, pe_rsc_provisional)) {
/* LH and RH resources have already been allocated, place the correct
* priority on LH rsc for the given promotable clone resource role */
return influence_rsc_priority;
}
if (!preview && !pcmk_is_set(rsc_lh->flags, pe_rsc_provisional)) {
// Log an error if we violated a mandatory colocation constraint
const pe_node_t *rh_node = rsc_rh->allocated_to;
if (rsc_lh->allocated_to == NULL) {
// Dependent resource isn't allocated, so constraint doesn't matter
return influence_nothing;
}
if (constraint->score >= INFINITY) {
// Dependent resource must colocate with rh_node
if ((rh_node == NULL)
|| (rh_node->details != rsc_lh->allocated_to->details)) {
crm_err("%s must be colocated with %s but is not (%s vs. %s)",
rsc_lh->id, rsc_rh->id,
rsc_lh->allocated_to->details->uname,
(rh_node? rh_node->details->uname : "unallocated"));
}
} else if (constraint->score <= -INFINITY) {
// Dependent resource must anti-colocate with rh_node
if ((rh_node != NULL)
&& (rsc_lh->allocated_to->details == rh_node->details)) {
crm_err("%s and %s must be anti-colocated but are allocated "
"to the same node (%s)",
rsc_lh->id, rsc_rh->id, rh_node->details->uname);
}
}
return influence_nothing;
}
if (constraint->score > 0
&& constraint->role_lh != RSC_ROLE_UNKNOWN && constraint->role_lh != rsc_lh->next_role) {
crm_trace("LH: Skipping constraint: \"%s\" state filter nextrole is %s",
role2text(constraint->role_lh), role2text(rsc_lh->next_role));
return influence_nothing;
}
if (constraint->score > 0
&& constraint->role_rh != RSC_ROLE_UNKNOWN && constraint->role_rh != rsc_rh->next_role) {
crm_trace("RH: Skipping constraint: \"%s\" state filter", role2text(constraint->role_rh));
return influence_nothing;
}
if (constraint->score < 0
&& constraint->role_lh != RSC_ROLE_UNKNOWN && constraint->role_lh == rsc_lh->next_role) {
crm_trace("LH: Skipping negative constraint: \"%s\" state filter",
role2text(constraint->role_lh));
return influence_nothing;
}
if (constraint->score < 0
&& constraint->role_rh != RSC_ROLE_UNKNOWN && constraint->role_rh == rsc_rh->next_role) {
crm_trace("RH: Skipping negative constraint: \"%s\" state filter",
role2text(constraint->role_rh));
return influence_nothing;
}
return influence_rsc_location;
}
static void
influence_priority(pe_resource_t * rsc_lh, pe_resource_t * rsc_rh, rsc_colocation_t * constraint)
{
const char *rh_value = NULL;
const char *lh_value = NULL;
const char *attribute = CRM_ATTR_ID;
int score_multiplier = 1;
if (constraint->score == 0) {
return;
}
if (!rsc_rh->allocated_to || !rsc_lh->allocated_to) {
return;
}
if (constraint->node_attribute != NULL) {
attribute = constraint->node_attribute;
}
lh_value = pe_node_attribute_raw(rsc_lh->allocated_to, attribute);
rh_value = pe_node_attribute_raw(rsc_rh->allocated_to, attribute);
if (!pcmk__str_eq(lh_value, rh_value, pcmk__str_casei)) {
if(constraint->score == INFINITY && constraint->role_lh == RSC_ROLE_MASTER) {
rsc_lh->priority = -INFINITY;
}
return;
}
if (constraint->role_rh && (constraint->role_rh != rsc_rh->next_role)) {
return;
}
if (constraint->role_lh == RSC_ROLE_SLAVE) {
score_multiplier = -1;
}
rsc_lh->priority = pe__add_scores(score_multiplier * constraint->score,
rsc_lh->priority);
}
static void
colocation_match(pe_resource_t * rsc_lh, pe_resource_t * rsc_rh, rsc_colocation_t * constraint)
{
const char *attribute = CRM_ATTR_ID;
const char *value = NULL;
GHashTable *work = NULL;
GHashTableIter iter;
pe_node_t *node = NULL;
if (constraint->score == 0) {
return;
}
if (constraint->node_attribute != NULL) {
attribute = constraint->node_attribute;
}
if (rsc_rh->allocated_to) {
value = pe_node_attribute_raw(rsc_rh->allocated_to, attribute);
} else if (constraint->score < 0) {
// Nothing to do (anti-colocation with something that is not running)
return;
}
work = pcmk__copy_node_table(rsc_lh->allowed_nodes);
g_hash_table_iter_init(&iter, work);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
if (rsc_rh->allocated_to == NULL) {
pe_rsc_trace(rsc_lh, "%s: %s@%s -= %d (%s inactive)",
constraint->id, rsc_lh->id, node->details->uname,
constraint->score, rsc_rh->id);
node->weight = pe__add_scores(-constraint->score, node->weight);
} else if (pcmk__str_eq(pe_node_attribute_raw(node, attribute), value, pcmk__str_casei)) {
if (constraint->score < CRM_SCORE_INFINITY) {
pe_rsc_trace(rsc_lh, "%s: %s@%s += %d",
constraint->id, rsc_lh->id,
node->details->uname, constraint->score);
node->weight = pe__add_scores(constraint->score, node->weight);
}
} else if (constraint->score >= CRM_SCORE_INFINITY) {
pe_rsc_trace(rsc_lh, "%s: %s@%s -= %d (%s mismatch)",
constraint->id, rsc_lh->id, node->details->uname,
constraint->score, attribute);
node->weight = pe__add_scores(-constraint->score, node->weight);
}
}
if (can_run_any(work)
|| constraint->score <= -INFINITY || constraint->score >= INFINITY) {
g_hash_table_destroy(rsc_lh->allowed_nodes);
rsc_lh->allowed_nodes = work;
work = NULL;
} else {
pe_rsc_info(rsc_lh,
"%s: Rolling back scores from %s (no available nodes)",
rsc_lh->id, rsc_rh->id);
}
if (work) {
g_hash_table_destroy(work);
}
}
void
native_rsc_colocation_rh(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh,
rsc_colocation_t *constraint,
pe_working_set_t *data_set)
{
enum filter_colocation_res filter_results;
CRM_ASSERT(rsc_lh);
CRM_ASSERT(rsc_rh);
filter_results = filter_colocation_constraint(rsc_lh, rsc_rh, constraint, FALSE);
pe_rsc_trace(rsc_lh, "%s %s with %s (%s, score=%d, filter=%d)",
((constraint->score >= 0)? "Colocating" : "Anti-colocating"),
rsc_lh->id, rsc_rh->id, constraint->id, constraint->score, filter_results);
switch (filter_results) {
case influence_rsc_priority:
influence_priority(rsc_lh, rsc_rh, constraint);
break;
case influence_rsc_location:
colocation_match(rsc_lh, rsc_rh, constraint);
break;
case influence_nothing:
default:
return;
}
}
static gboolean
filter_rsc_ticket(pe_resource_t * rsc_lh, rsc_ticket_t * rsc_ticket)
{
if (rsc_ticket->role_lh != RSC_ROLE_UNKNOWN && rsc_ticket->role_lh != rsc_lh->role) {
pe_rsc_trace(rsc_lh, "LH: Skipping constraint: \"%s\" state filter",
role2text(rsc_ticket->role_lh));
return FALSE;
}
return TRUE;
}
void
rsc_ticket_constraint(pe_resource_t * rsc_lh, rsc_ticket_t * rsc_ticket, pe_working_set_t * data_set)
{
if (rsc_ticket == NULL) {
pe_err("rsc_ticket was NULL");
return;
}
if (rsc_lh == NULL) {
pe_err("rsc_lh was NULL for %s", rsc_ticket->id);
return;
}
if (rsc_ticket->ticket->granted && rsc_ticket->ticket->standby == FALSE) {
return;
}
if (rsc_lh->children) {
GListPtr gIter = rsc_lh->children;
pe_rsc_trace(rsc_lh, "Processing ticket dependencies from %s", rsc_lh->id);
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
rsc_ticket_constraint(child_rsc, rsc_ticket, data_set);
}
return;
}
pe_rsc_trace(rsc_lh, "%s: Processing ticket dependency on %s (%s, %s)",
rsc_lh->id, rsc_ticket->ticket->id, rsc_ticket->id,
role2text(rsc_ticket->role_lh));
if ((rsc_ticket->ticket->granted == FALSE)
&& (rsc_lh->running_on != NULL)) {
GListPtr gIter = NULL;
switch (rsc_ticket->loss_policy) {
case loss_ticket_stop:
resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set);
break;
case loss_ticket_demote:
// Promotion score will be set to -INFINITY in promotion_order()
if (rsc_ticket->role_lh != RSC_ROLE_MASTER) {
resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set);
}
break;
case loss_ticket_fence:
if (filter_rsc_ticket(rsc_lh, rsc_ticket) == FALSE) {
return;
}
resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set);
for (gIter = rsc_lh->running_on; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
pe_fence_node(data_set, node, "deadman ticket was lost", FALSE);
}
break;
case loss_ticket_freeze:
if (filter_rsc_ticket(rsc_lh, rsc_ticket) == FALSE) {
return;
}
if (rsc_lh->running_on != NULL) {
pe__clear_resource_flags(rsc_lh, pe_rsc_managed);
pe__set_resource_flags(rsc_lh, pe_rsc_block);
}
break;
}
} else if (rsc_ticket->ticket->granted == FALSE) {
if (rsc_ticket->role_lh != RSC_ROLE_MASTER || rsc_ticket->loss_policy == loss_ticket_stop) {
resource_location(rsc_lh, NULL, -INFINITY, "__no_ticket__", data_set);
}
} else if (rsc_ticket->ticket->standby) {
if (rsc_ticket->role_lh != RSC_ROLE_MASTER || rsc_ticket->loss_policy == loss_ticket_stop) {
resource_location(rsc_lh, NULL, -INFINITY, "__ticket_standby__", data_set);
}
}
}
enum pe_action_flags
native_action_flags(pe_action_t * action, pe_node_t * node)
{
return action->flags;
}
static inline bool
is_primitive_action(pe_action_t *action)
{
return action && action->rsc && (action->rsc->variant == pe_native);
}
/*!
* \internal
* \brief Set action bits appropriately when pe_restart_order is used
*
* \param[in] first 'First' action in an ordering with pe_restart_order
* \param[in] then 'Then' action in an ordering with pe_restart_order
* \param[in] filter What ordering flags to care about
*
* \note pe_restart_order is set for "stop resource before starting it" and
* "stop later group member before stopping earlier group member"
*/
static void
handle_restart_ordering(pe_action_t *first, pe_action_t *then,
enum pe_action_flags filter)
{
const char *reason = NULL;
CRM_ASSERT(is_primitive_action(first));
CRM_ASSERT(is_primitive_action(then));
// We need to update the action in two cases:
// ... if 'then' is required
if (pcmk_is_set(filter, pe_action_optional)
&& !pcmk_is_set(then->flags, pe_action_optional)) {
reason = "restart";
}
/* ... if 'then' is unrunnable start of managed resource (if a resource
* should restart but can't start, we still want to stop)
*/
if (pcmk_is_set(filter, pe_action_runnable)
&& !pcmk_is_set(then->flags, pe_action_runnable)
&& pcmk_is_set(then->rsc->flags, pe_rsc_managed)
&& pcmk__str_eq(then->task, RSC_START, pcmk__str_casei)) {
reason = "stop";
}
if (reason == NULL) {
return;
}
pe_rsc_trace(first->rsc, "Handling %s -> %s for %s",
first->uuid, then->uuid, reason);
// Make 'first' required if it is runnable
if (pcmk_is_set(first->flags, pe_action_runnable)) {
pe_action_implies(first, then, pe_action_optional);
}
// Make 'first' required if 'then' is required
if (!pcmk_is_set(then->flags, pe_action_optional)) {
pe_action_implies(first, then, pe_action_optional);
}
// Make 'first' unmigratable if 'then' is unmigratable
if (!pcmk_is_set(then->flags, pe_action_migrate_runnable)) {
pe_action_implies(first, then, pe_action_migrate_runnable);
}
// Make 'then' unrunnable if 'first' is required but unrunnable
if (!pcmk_is_set(first->flags, pe_action_optional)
&& !pcmk_is_set(first->flags, pe_action_runnable)) {
pe_action_implies(then, first, pe_action_runnable);
}
}
enum pe_graph_flags
native_update_actions(pe_action_t *first, pe_action_t *then, pe_node_t *node,
enum pe_action_flags flags, enum pe_action_flags filter,
enum pe_ordering type, pe_working_set_t *data_set)
{
/* flags == get_action_flags(first, then_node) called from update_action() */
enum pe_graph_flags changed = pe_graph_none;
enum pe_action_flags then_flags = then->flags;
enum pe_action_flags first_flags = first->flags;
crm_trace( "Testing %s on %s (0x%.6x) with %s 0x%.6x",
first->uuid, first->node ? first->node->details->uname : "[none]",
first->flags, then->uuid, then->flags);
if (type & pe_order_asymmetrical) {
pe_resource_t *then_rsc = then->rsc;
enum rsc_role_e then_rsc_role = then_rsc ? then_rsc->fns->state(then_rsc, TRUE) : 0;
if (!then_rsc) {
/* ignore */
} else if ((then_rsc_role == RSC_ROLE_STOPPED) && pcmk__str_eq(then->task, RSC_STOP, pcmk__str_casei)) {
/* ignore... if 'then' is supposed to be stopped after 'first', but
* then is already stopped, there is nothing to be done when non-symmetrical. */
} else if ((then_rsc_role >= RSC_ROLE_STARTED)
&& pcmk__str_eq(then->task, RSC_START, pcmk__str_casei)
&& pcmk_is_set(then->flags, pe_action_optional)
&& then->node
&& pcmk__list_of_1(then_rsc->running_on)
&& then->node->details == ((pe_node_t *) then_rsc->running_on->data)->details) {
/* Ignore. If 'then' is supposed to be started after 'first', but
* 'then' is already started, there is nothing to be done when
* asymmetrical -- unless the start is mandatory, which indicates
* the resource is restarting, and the ordering is still needed.
*/
} else if (!(first->flags & pe_action_runnable)) {
/* prevent 'then' action from happening if 'first' is not runnable and
* 'then' has not yet occurred. */
pe_action_implies(then, first, pe_action_optional);
pe_action_implies(then, first, pe_action_runnable);
pe_rsc_trace(then->rsc, "Unset optional and runnable on %s", then->uuid);
} else {
/* ignore... then is allowed to start/stop if it wants to. */
}
}
if (type & pe_order_implies_first) {
if (pcmk_is_set(filter, pe_action_optional)
&& !pcmk_is_set(flags /* Should be then_flags? */, pe_action_optional)) {
// Needs pcmk_is_set(first_flags, pe_action_optional) too?
pe_rsc_trace(first->rsc, "Unset optional on %s because of %s", first->uuid, then->uuid);
pe_action_implies(first, then, pe_action_optional);
}
if (pcmk_is_set(flags, pe_action_migrate_runnable) &&
!pcmk_is_set(then->flags, pe_action_migrate_runnable) &&
!pcmk_is_set(then->flags, pe_action_optional)) {
pe_rsc_trace(first->rsc, "Unset migrate runnable on %s because of %s",
first->uuid, then->uuid);
pe_action_implies(first, then, pe_action_migrate_runnable);
}
}
if (type & pe_order_implies_first_master) {
if ((filter & pe_action_optional) &&
((then->flags & pe_action_optional) == FALSE) &&
then->rsc && (then->rsc->role == RSC_ROLE_MASTER)) {
pe_action_implies(first, then, pe_action_optional);
if (pcmk_is_set(first->flags, pe_action_migrate_runnable) &&
!pcmk_is_set(then->flags, pe_action_migrate_runnable)) {
pe_rsc_trace(first->rsc, "Unset migrate runnable on %s because of %s", first->uuid, then->uuid);
pe_action_implies(first, then, pe_action_migrate_runnable);
}
pe_rsc_trace(then->rsc, "Unset optional on %s because of %s", first->uuid, then->uuid);
}
}
if ((type & pe_order_implies_first_migratable)
&& pcmk_is_set(filter, pe_action_optional)) {
if (((then->flags & pe_action_migrate_runnable) == FALSE) ||
((then->flags & pe_action_runnable) == FALSE)) {
pe_rsc_trace(then->rsc, "Unset runnable on %s because %s is neither runnable or migratable", first->uuid, then->uuid);
pe_action_implies(first, then, pe_action_runnable);
}
if ((then->flags & pe_action_optional) == 0) {
pe_rsc_trace(then->rsc, "Unset optional on %s because %s is not optional", first->uuid, then->uuid);
pe_action_implies(first, then, pe_action_optional);
}
}
if ((type & pe_order_pseudo_left)
&& pcmk_is_set(filter, pe_action_optional)) {
if ((first->flags & pe_action_runnable) == FALSE) {
pe_action_implies(then, first, pe_action_migrate_runnable);
pe__clear_action_flags(then, pe_action_pseudo);
pe_rsc_trace(then->rsc, "Unset pseudo on %s because %s is not runnable", then->uuid, first->uuid);
}
}
if (pcmk_is_set(type, pe_order_runnable_left)
&& pcmk_is_set(filter, pe_action_runnable)
&& pcmk_is_set(then->flags, pe_action_runnable)
&& !pcmk_is_set(flags, pe_action_runnable)) {
pe_rsc_trace(then->rsc, "Unset runnable on %s because of %s", then->uuid, first->uuid);
pe_action_implies(then, first, pe_action_runnable);
pe_action_implies(then, first, pe_action_migrate_runnable);
}
if (pcmk_is_set(type, pe_order_implies_then)
&& pcmk_is_set(filter, pe_action_optional)
&& pcmk_is_set(then->flags, pe_action_optional)
&& !pcmk_is_set(flags, pe_action_optional)) {
/* in this case, treat migrate_runnable as if first is optional */
if (!pcmk_is_set(first->flags, pe_action_migrate_runnable)) {
pe_rsc_trace(then->rsc, "Unset optional on %s because of %s", then->uuid, first->uuid);
pe_action_implies(then, first, pe_action_optional);
}
}
if (pcmk_is_set(type, pe_order_restart)) {
handle_restart_ordering(first, then, filter);
}
if (then_flags != then->flags) {
pe__set_graph_flags(changed, first, pe_graph_updated_then);
pe_rsc_trace(then->rsc,
"Then: Flags for %s on %s are now 0x%.6x (was 0x%.6x) because of %s 0x%.6x",
then->uuid, then->node ? then->node->details->uname : "[none]", then->flags,
then_flags, first->uuid, first->flags);
if(then->rsc && then->rsc->parent) {
/* "X_stop then X_start" doesn't get handled for cloned groups unless we do this */
update_action(then, data_set);
}
}
if (first_flags != first->flags) {
pe__set_graph_flags(changed, first, pe_graph_updated_first);
pe_rsc_trace(first->rsc,
"First: Flags for %s on %s are now 0x%.6x (was 0x%.6x) because of %s 0x%.6x",
first->uuid, first->node ? first->node->details->uname : "[none]",
first->flags, first_flags, then->uuid, then->flags);
}
return changed;
}
void
native_rsc_location(pe_resource_t *rsc, pe__location_t *constraint)
{
GListPtr gIter = NULL;
- GHashTableIter iter;
- pe_node_t *node = NULL;
-
- if (constraint == NULL) {
- pe_err("Constraint is NULL");
- return;
-
- } else if (rsc == NULL) {
- pe_err("LHS of rsc_to_node (%s) is NULL", constraint->id);
- return;
- }
+ bool need_role = false;
- pe_rsc_trace(rsc, "Applying %s (%s) to %s", constraint->id,
- role2text(constraint->role_filter), rsc->id);
+ CRM_CHECK((constraint != NULL) && (rsc != NULL), return);
- /* take "lifetime" into account */
- if (constraint->role_filter > RSC_ROLE_UNKNOWN && constraint->role_filter != rsc->next_role) {
- pe_rsc_debug(rsc, "Constraint (%s) is not active (role : %s vs. %s)",
- constraint->id, role2text(constraint->role_filter), role2text(rsc->next_role));
+ // If a role was specified, ensure constraint is applicable
+ need_role = (constraint->role_filter > RSC_ROLE_UNKNOWN);
+ if (need_role && (constraint->role_filter != rsc->next_role)) {
+ pe_rsc_trace(rsc,
+ "Not applying %s to %s because role will be %s not %s",
+ constraint->id, rsc->id, role2text(rsc->next_role),
+ role2text(constraint->role_filter));
return;
}
if (constraint->node_list_rh == NULL) {
- pe_rsc_trace(rsc, "RHS of constraint %s is NULL", constraint->id);
+ pe_rsc_trace(rsc, "Not applying %s to %s because no nodes match",
+ constraint->id, rsc->id);
return;
}
+ pe_rsc_trace(rsc, "Applying %s%s%s to %s", constraint->id,
+ (need_role? " for role " : ""),
+ (need_role? role2text(constraint->role_filter) : ""), rsc->id);
+
for (gIter = constraint->node_list_rh; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
pe_node_t *other_node = NULL;
other_node = (pe_node_t *) pe_hash_table_lookup(rsc->allowed_nodes, node->details->id);
if (other_node != NULL) {
- pe_rsc_trace(rsc, "%s + %s: %d + %d",
- node->details->uname,
- other_node->details->uname, node->weight, other_node->weight);
+ pe_rsc_trace(rsc, "* + %d on %s",
+ node->weight, node->details->uname);
other_node->weight = pe__add_scores(other_node->weight,
node->weight);
} else {
+ pe_rsc_trace(rsc, "* = %d on %s",
+ node->weight, node->details->uname);
other_node = pe__copy_node(node);
-
- pe_rsc_trace(rsc, "%s: %d (insert %d)", other_node->details->uname, other_node->weight, constraint->discover_mode);
g_hash_table_insert(rsc->allowed_nodes, (gpointer) other_node->details->id, other_node);
}
if (other_node->rsc_discover_mode < constraint->discover_mode) {
if (constraint->discover_mode == pe_discover_exclusive) {
rsc->exclusive_discover = TRUE;
}
/* exclusive > never > always... always is default */
other_node->rsc_discover_mode = constraint->discover_mode;
}
}
-
- g_hash_table_iter_init(&iter, rsc->allowed_nodes);
- while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
- pe_rsc_trace(rsc, "%s + %s : %d", rsc->id, node->details->uname, node->weight);
- }
}
void
native_expand(pe_resource_t * rsc, pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
CRM_ASSERT(rsc);
pe_rsc_trace(rsc, "Processing actions from %s", rsc->id);
for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) {
pe_action_t *action = (pe_action_t *) gIter->data;
crm_trace("processing action %d for rsc=%s", action->id, rsc->id);
graph_element_from_action(action, data_set);
}
for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
child_rsc->cmds->expand(child_rsc, data_set);
}
}
#define log_change(a, fmt, args...) do { \
if(a && a->reason && terminal) { \
printf(" * "fmt" \tdue to %s\n", ##args, a->reason); \
} else if(a && a->reason) { \
crm_notice(fmt" \tdue to %s", ##args, a->reason); \
} else if(terminal) { \
printf(" * "fmt"\n", ##args); \
} else { \
crm_notice(fmt, ##args); \
} \
} while(0)
#define STOP_SANITY_ASSERT(lineno) do { \
if(current && current->details->unclean) { \
/* It will be a pseudo op */ \
} else if(stop == NULL) { \
crm_err("%s:%d: No stop action exists for %s", \
__func__, lineno, rsc->id); \
CRM_ASSERT(stop != NULL); \
} else if (pcmk_is_set(stop->flags, pe_action_optional)) { \
crm_err("%s:%d: Action %s is still optional", \
__func__, lineno, stop->uuid); \
CRM_ASSERT(!pcmk_is_set(stop->flags, pe_action_optional)); \
} \
} while(0)
static void
LogAction(const char *change, pe_resource_t *rsc, pe_node_t *origin, pe_node_t *destination, pe_action_t *action, pe_action_t *source, gboolean terminal)
{
int len = 0;
char *reason = NULL;
char *details = NULL;
bool same_host = FALSE;
bool same_role = FALSE;
bool need_role = FALSE;
static int rsc_width = 5;
static int detail_width = 5;
CRM_ASSERT(action);
CRM_ASSERT(destination != NULL || origin != NULL);
if(source == NULL) {
source = action;
}
len = strlen(rsc->id);
if(len > rsc_width) {
rsc_width = len + 2;
}
if(rsc->role > RSC_ROLE_STARTED || rsc->next_role > RSC_ROLE_SLAVE) {
need_role = TRUE;
}
if(origin != NULL && destination != NULL && origin->details == destination->details) {
same_host = TRUE;
}
if(rsc->role == rsc->next_role) {
same_role = TRUE;
}
if (need_role && (origin == NULL)) {
/* Starting and promoting a promotable clone instance */
details = crm_strdup_printf("%s -> %s %s", role2text(rsc->role), role2text(rsc->next_role), destination->details->uname);
} else if (origin == NULL) {
/* Starting a resource */
details = crm_strdup_printf("%s", destination->details->uname);
} else if (need_role && (destination == NULL)) {
/* Stopping a promotable clone instance */
details = crm_strdup_printf("%s %s", role2text(rsc->role), origin->details->uname);
} else if (destination == NULL) {
/* Stopping a resource */
details = crm_strdup_printf("%s", origin->details->uname);
} else if (need_role && same_role && same_host) {
/* Recovering, restarting or re-promoting a promotable clone instance */
details = crm_strdup_printf("%s %s", role2text(rsc->role), origin->details->uname);
} else if (same_role && same_host) {
/* Recovering or Restarting a normal resource */
details = crm_strdup_printf("%s", origin->details->uname);
} else if (need_role && same_role) {
/* Moving a promotable clone instance */
details = crm_strdup_printf("%s -> %s %s", origin->details->uname, destination->details->uname, role2text(rsc->role));
} else if (same_role) {
/* Moving a normal resource */
details = crm_strdup_printf("%s -> %s", origin->details->uname, destination->details->uname);
} else if (same_host) {
/* Promoting or demoting a promotable clone instance */
details = crm_strdup_printf("%s -> %s %s", role2text(rsc->role), role2text(rsc->next_role), origin->details->uname);
} else {
/* Moving and promoting/demoting */
details = crm_strdup_printf("%s %s -> %s %s", role2text(rsc->role), origin->details->uname, role2text(rsc->next_role), destination->details->uname);
}
len = strlen(details);
if(len > detail_width) {
detail_width = len;
}
if(source->reason && !pcmk_is_set(action->flags, pe_action_runnable)) {
reason = crm_strdup_printf(" due to %s (blocked)", source->reason);
} else if(source->reason) {
reason = crm_strdup_printf(" due to %s", source->reason);
} else if (!pcmk_is_set(action->flags, pe_action_runnable)) {
reason = strdup(" blocked");
} else {
reason = strdup("");
}
if(terminal) {
printf(" * %-8s %-*s ( %*s ) %s\n", change, rsc_width, rsc->id, detail_width, details, reason);
} else {
crm_notice(" * %-8s %-*s ( %*s ) %s", change, rsc_width, rsc->id, detail_width, details, reason);
}
free(details);
free(reason);
}
void
LogActions(pe_resource_t * rsc, pe_working_set_t * data_set, gboolean terminal)
{
pe_node_t *next = NULL;
pe_node_t *current = NULL;
pe_node_t *start_node = NULL;
pe_action_t *stop = NULL;
pe_action_t *start = NULL;
pe_action_t *demote = NULL;
pe_action_t *promote = NULL;
char *key = NULL;
gboolean moving = FALSE;
GListPtr possible_matches = NULL;
if(rsc->variant == pe_container) {
pcmk__bundle_log_actions(rsc, data_set, terminal);
return;
}
if (rsc->children) {
GListPtr gIter = NULL;
for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
LogActions(child_rsc, data_set, terminal);
}
return;
}
next = rsc->allocated_to;
if (rsc->running_on) {
current = pe__current_node(rsc);
if (rsc->role == RSC_ROLE_STOPPED) {
/*
* This can occur when resources are being recovered
* We fiddle with the current role in native_create_actions()
*/
rsc->role = RSC_ROLE_STARTED;
}
}
if ((current == NULL) && pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
/* Don't log stopped orphans */
return;
}
if (!pcmk_is_set(rsc->flags, pe_rsc_managed)
|| (current == NULL && next == NULL)) {
pe_rsc_info(rsc, "Leave %s\t(%s%s)",
rsc->id, role2text(rsc->role),
!pcmk_is_set(rsc->flags, pe_rsc_managed)? " unmanaged" : "");
return;
}
if (current != NULL && next != NULL && !pcmk__str_eq(current->details->id, next->details->id, pcmk__str_casei)) {
moving = TRUE;
}
possible_matches = pe__resource_actions(rsc, next, RSC_START, FALSE);
if (possible_matches) {
start = possible_matches->data;
g_list_free(possible_matches);
}
if ((start == NULL) || !pcmk_is_set(start->flags, pe_action_runnable)) {
start_node = NULL;
} else {
start_node = current;
}
possible_matches = pe__resource_actions(rsc, start_node, RSC_STOP, FALSE);
if (possible_matches) {
stop = possible_matches->data;
g_list_free(possible_matches);
}
possible_matches = pe__resource_actions(rsc, next, RSC_PROMOTE, FALSE);
if (possible_matches) {
promote = possible_matches->data;
g_list_free(possible_matches);
}
possible_matches = pe__resource_actions(rsc, next, RSC_DEMOTE, FALSE);
if (possible_matches) {
demote = possible_matches->data;
g_list_free(possible_matches);
}
if (rsc->role == rsc->next_role) {
pe_action_t *migrate_op = NULL;
possible_matches = pe__resource_actions(rsc, next, RSC_MIGRATED, FALSE);
if (possible_matches) {
migrate_op = possible_matches->data;
}
CRM_CHECK(next != NULL,);
if (next == NULL) {
} else if ((migrate_op != NULL) && (current != NULL)
&& pcmk_is_set(migrate_op->flags, pe_action_runnable)) {
LogAction("Migrate", rsc, current, next, start, NULL, terminal);
} else if (pcmk_is_set(rsc->flags, pe_rsc_reload)) {
LogAction("Reload", rsc, current, next, start, NULL, terminal);
} else if (start == NULL || pcmk_is_set(start->flags, pe_action_optional)) {
if ((demote != NULL) && (promote != NULL)
&& !pcmk_is_set(demote->flags, pe_action_optional)
&& !pcmk_is_set(promote->flags, pe_action_optional)) {
LogAction("Re-promote", rsc, current, next, promote, demote,
terminal);
} else {
pe_rsc_info(rsc, "Leave %s\t(%s %s)", rsc->id,
role2text(rsc->role), next->details->uname);
}
} else if (!pcmk_is_set(start->flags, pe_action_runnable)) {
LogAction("Stop", rsc, current, NULL, stop,
(stop && stop->reason)? stop : start, terminal);
STOP_SANITY_ASSERT(__LINE__);
} else if (moving && current) {
LogAction(pcmk_is_set(rsc->flags, pe_rsc_failed)? "Recover" : "Move",
rsc, current, next, stop, NULL, terminal);
} else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
LogAction("Recover", rsc, current, NULL, stop, NULL, terminal);
STOP_SANITY_ASSERT(__LINE__);
} else {
LogAction("Restart", rsc, current, next, start, NULL, terminal);
/* STOP_SANITY_ASSERT(__LINE__); False positive for migrate-fail-7 */
}
g_list_free(possible_matches);
return;
}
if(stop
&& (rsc->next_role == RSC_ROLE_STOPPED
|| (start && !pcmk_is_set(start->flags, pe_action_runnable)))) {
GListPtr gIter = NULL;
key = stop_key(rsc);
for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
pe_action_t *stop_op = NULL;
possible_matches = find_actions(rsc->actions, key, node);
if (possible_matches) {
stop_op = possible_matches->data;
g_list_free(possible_matches);
}
if (stop_op && (stop_op->flags & pe_action_runnable)) {
STOP_SANITY_ASSERT(__LINE__);
}
LogAction("Stop", rsc, node, NULL, stop_op,
(stop_op && stop_op->reason)? stop_op : start, terminal);
}
free(key);
} else if ((stop != NULL)
&& pcmk_all_flags_set(rsc->flags, pe_rsc_failed|pe_rsc_stop)) {
/* 'stop' may be NULL if the failure was ignored */
LogAction("Recover", rsc, current, next, stop, start, terminal);
STOP_SANITY_ASSERT(__LINE__);
} else if (moving) {
LogAction("Move", rsc, current, next, stop, NULL, terminal);
STOP_SANITY_ASSERT(__LINE__);
} else if (pcmk_is_set(rsc->flags, pe_rsc_reload)) {
LogAction("Reload", rsc, current, next, start, NULL, terminal);
} else if (stop != NULL && !pcmk_is_set(stop->flags, pe_action_optional)) {
LogAction("Restart", rsc, current, next, start, NULL, terminal);
STOP_SANITY_ASSERT(__LINE__);
} else if (rsc->role == RSC_ROLE_MASTER) {
CRM_LOG_ASSERT(current != NULL);
LogAction("Demote", rsc, current, next, demote, NULL, terminal);
} else if(rsc->next_role == RSC_ROLE_MASTER) {
CRM_LOG_ASSERT(next);
LogAction("Promote", rsc, current, next, promote, NULL, terminal);
} else if (rsc->role == RSC_ROLE_STOPPED && rsc->next_role > RSC_ROLE_STOPPED) {
LogAction("Start", rsc, current, next, start, NULL, terminal);
}
}
gboolean
StopRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
CRM_ASSERT(rsc);
pe_rsc_trace(rsc, "%s", rsc->id);
for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
pe_node_t *current = (pe_node_t *) gIter->data;
pe_action_t *stop;
if (rsc->partial_migration_target) {
if (rsc->partial_migration_target->details == current->details) {
pe_rsc_trace(rsc, "Filtered %s -> %s %s", current->details->uname,
next->details->uname, rsc->id);
continue;
} else {
pe_rsc_trace(rsc, "Forced on %s %s", current->details->uname, rsc->id);
optional = FALSE;
}
}
pe_rsc_trace(rsc, "%s on %s", rsc->id, current->details->uname);
stop = stop_action(rsc, current, optional);
if(rsc->allocated_to == NULL) {
pe_action_set_reason(stop, "node availability", TRUE);
}
if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
update_action_flags(stop, pe_action_runnable | pe_action_clear,
__func__, __LINE__);
}
if (pcmk_is_set(data_set->flags, pe_flag_remove_after_stop)) {
DeleteRsc(rsc, current, optional, data_set);
}
if (pcmk_is_set(rsc->flags, pe_rsc_needs_unfencing)) {
pe_action_t *unfence = pe_fence_op(current, "on", TRUE, NULL, FALSE, data_set);
order_actions(stop, unfence, pe_order_implies_first);
if (!node_has_been_unfenced(current)) {
pe_proc_err("Stopping %s until %s can be unfenced", rsc->id, current->details->uname);
}
}
}
return TRUE;
}
static void
order_after_unfencing(pe_resource_t *rsc, pe_node_t *node, pe_action_t *action,
enum pe_ordering order, pe_working_set_t *data_set)
{
/* When unfencing is in use, we order unfence actions before any probe or
* start of resources that require unfencing, and also of fence devices.
*
* This might seem to violate the principle that fence devices require
* only quorum. However, fence agents that unfence often don't have enough
* information to even probe or start unless the node is first unfenced.
*/
if (is_unfence_device(rsc, data_set)
|| pcmk_is_set(rsc->flags, pe_rsc_needs_unfencing)) {
/* Start with an optional ordering. Requiring unfencing would result in
* the node being unfenced, and all its resources being stopped,
* whenever a new resource is added -- which would be highly suboptimal.
*/
pe_action_t *unfence = pe_fence_op(node, "on", TRUE, NULL, FALSE, data_set);
order_actions(unfence, action, order);
if (!node_has_been_unfenced(node)) {
// But unfencing is required if it has never been done
char *reason = crm_strdup_printf("required by %s %s",
rsc->id, action->task);
trigger_unfencing(NULL, node, reason, NULL, data_set);
free(reason);
}
}
}
gboolean
StartRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
{
pe_action_t *start = NULL;
CRM_ASSERT(rsc);
pe_rsc_trace(rsc, "%s on %s %d %d", rsc->id, next ? next->details->uname : "N/A", optional, next ? next->weight : 0);
start = start_action(rsc, next, TRUE);
order_after_unfencing(rsc, next, start, pe_order_implies_then, data_set);
if (pcmk_is_set(start->flags, pe_action_runnable) && !optional) {
update_action_flags(start, pe_action_optional | pe_action_clear,
__func__, __LINE__);
}
return TRUE;
}
gboolean
PromoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
gboolean runnable = TRUE;
GListPtr action_list = NULL;
CRM_ASSERT(rsc);
CRM_CHECK(next != NULL, return FALSE);
pe_rsc_trace(rsc, "%s on %s", rsc->id, next->details->uname);
action_list = pe__resource_actions(rsc, next, RSC_START, TRUE);
for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
pe_action_t *start = (pe_action_t *) gIter->data;
if (!pcmk_is_set(start->flags, pe_action_runnable)) {
runnable = FALSE;
}
}
g_list_free(action_list);
if (runnable) {
promote_action(rsc, next, optional);
return TRUE;
}
pe_rsc_debug(rsc, "%s\tPromote %s (canceled)", next->details->uname, rsc->id);
action_list = pe__resource_actions(rsc, next, RSC_PROMOTE, TRUE);
for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
pe_action_t *promote = (pe_action_t *) gIter->data;
update_action_flags(promote, pe_action_runnable | pe_action_clear,
__func__, __LINE__);
}
g_list_free(action_list);
return TRUE;
}
gboolean
DemoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
CRM_ASSERT(rsc);
pe_rsc_trace(rsc, "%s", rsc->id);
/* CRM_CHECK(rsc->next_role == RSC_ROLE_SLAVE, return FALSE); */
for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
pe_node_t *current = (pe_node_t *) gIter->data;
pe_rsc_trace(rsc, "%s on %s", rsc->id, next ? next->details->uname : "N/A");
demote_action(rsc, current, optional);
}
return TRUE;
}
gboolean
RoleError(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
{
CRM_ASSERT(rsc);
crm_err("%s on %s", rsc->id, next ? next->details->uname : "N/A");
CRM_CHECK(FALSE, return FALSE);
return FALSE;
}
gboolean
NullOp(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
{
CRM_ASSERT(rsc);
pe_rsc_trace(rsc, "%s", rsc->id);
return FALSE;
}
gboolean
DeleteRsc(pe_resource_t * rsc, pe_node_t * node, gboolean optional, pe_working_set_t * data_set)
{
if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
pe_rsc_trace(rsc, "Resource %s not deleted from %s: failed", rsc->id, node->details->uname);
return FALSE;
} else if (node == NULL) {
pe_rsc_trace(rsc, "Resource %s not deleted: NULL node", rsc->id);
return FALSE;
} else if (node->details->unclean || node->details->online == FALSE) {
pe_rsc_trace(rsc, "Resource %s not deleted from %s: unrunnable", rsc->id,
node->details->uname);
return FALSE;
}
crm_notice("Removing %s from %s", rsc->id, node->details->uname);
delete_action(rsc, node, optional);
new_rsc_order(rsc, RSC_STOP, rsc, RSC_DELETE,
optional ? pe_order_implies_then : pe_order_optional, data_set);
new_rsc_order(rsc, RSC_DELETE, rsc, RSC_START,
optional ? pe_order_implies_then : pe_order_optional, data_set);
return TRUE;
}
gboolean
native_create_probe(pe_resource_t * rsc, pe_node_t * node, pe_action_t * complete,
gboolean force, pe_working_set_t * data_set)
{
enum pe_ordering flags = pe_order_optional;
char *key = NULL;
pe_action_t *probe = NULL;
pe_node_t *running = NULL;
pe_node_t *allowed = NULL;
pe_resource_t *top = uber_parent(rsc);
static const char *rc_master = NULL;
static const char *rc_inactive = NULL;
if (rc_inactive == NULL) {
rc_inactive = crm_itoa(PCMK_OCF_NOT_RUNNING);
rc_master = crm_itoa(PCMK_OCF_RUNNING_MASTER);
}
CRM_CHECK(node != NULL, return FALSE);
if (!force && !pcmk_is_set(data_set->flags, pe_flag_startup_probes)) {
pe_rsc_trace(rsc, "Skipping active resource detection for %s", rsc->id);
return FALSE;
}
if (pe__is_guest_or_remote_node(node)) {
const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
if (pcmk__str_eq(class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
pe_rsc_trace(rsc,
"Skipping probe for %s on %s because Pacemaker Remote nodes cannot run stonith agents",
rsc->id, node->details->id);
return FALSE;
} else if (pe__is_guest_node(node)
&& pe__resource_contains_guest_node(data_set, rsc)) {
pe_rsc_trace(rsc,
"Skipping probe for %s on %s because guest nodes cannot run resources containing guest nodes",
rsc->id, node->details->id);
return FALSE;
} else if (rsc->is_remote_node) {
pe_rsc_trace(rsc,
"Skipping probe for %s on %s because Pacemaker Remote nodes cannot host remote connections",
rsc->id, node->details->id);
return FALSE;
}
}
if (rsc->children) {
GListPtr gIter = NULL;
gboolean any_created = FALSE;
for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
any_created = child_rsc->cmds->create_probe(child_rsc, node, complete, force, data_set)
|| any_created;
}
return any_created;
} else if ((rsc->container) && (!rsc->is_remote_node)) {
pe_rsc_trace(rsc, "Skipping %s: it is within container %s", rsc->id, rsc->container->id);
return FALSE;
}
if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
pe_rsc_trace(rsc, "Skipping orphan: %s", rsc->id);
return FALSE;
}
// Check whether resource is already known on node
if (!force && g_hash_table_lookup(rsc->known_on, node->details->id)) {
pe_rsc_trace(rsc, "Skipping known: %s on %s", rsc->id, node->details->uname);
return FALSE;
}
allowed = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
if (rsc->exclusive_discover || top->exclusive_discover) {
if (allowed == NULL) {
/* exclusive discover is enabled and this node is not in the allowed list. */
pe_rsc_trace(rsc, "Skipping probe for %s on node %s, A", rsc->id, node->details->id);
return FALSE;
} else if (allowed->rsc_discover_mode != pe_discover_exclusive) {
/* exclusive discover is enabled and this node is not marked
* as a node this resource should be discovered on */
pe_rsc_trace(rsc, "Skipping probe for %s on node %s, B", rsc->id, node->details->id);
return FALSE;
}
}
if(allowed == NULL && node->rsc_discover_mode == pe_discover_never) {
/* If this node was allowed to host this resource it would
* have been explicitly added to the 'allowed_nodes' list.
* However it wasn't and the node has discovery disabled, so
* no need to probe for this resource.
*/
pe_rsc_trace(rsc, "Skipping probe for %s on node %s, C", rsc->id, node->details->id);
return FALSE;
}
if (allowed && allowed->rsc_discover_mode == pe_discover_never) {
/* this resource is marked as not needing to be discovered on this node */
pe_rsc_trace(rsc, "Skipping probe for %s on node %s, discovery mode", rsc->id, node->details->id);
return FALSE;
}
if (pe__is_guest_node(node)) {
pe_resource_t *remote = node->details->remote_rsc->container;
if(remote->role == RSC_ROLE_STOPPED) {
/* If the container is stopped, then we know anything that
* might have been inside it is also stopped and there is
* no need to probe.
*
* If we don't know the container's state on the target
* either:
*
* - the container is running, the transition will abort
* and we'll end up in a different case next time, or
*
* - the container is stopped
*
* Either way there is no need to probe.
*
*/
if(remote->allocated_to
&& g_hash_table_lookup(remote->known_on, remote->allocated_to->details->id) == NULL) {
/* For safety, we order the 'rsc' start after 'remote'
* has been probed.
*
* Using 'top' helps for groups, but we may need to
* follow the start's ordering chain backwards.
*/
custom_action_order(remote,
pcmk__op_key(remote->id, RSC_STATUS, 0),
NULL, top,
pcmk__op_key(top->id, RSC_START, 0), NULL,
pe_order_optional, data_set);
}
pe_rsc_trace(rsc, "Skipping probe for %s on node %s, %s is stopped",
rsc->id, node->details->id, remote->id);
return FALSE;
/* Here we really we want to check if remote->stop is required,
* but that information doesn't exist yet
*/
} else if(node->details->remote_requires_reset
|| node->details->unclean
|| pcmk_is_set(remote->flags, pe_rsc_failed)
|| remote->next_role == RSC_ROLE_STOPPED
|| (remote->allocated_to
&& pe_find_node(remote->running_on, remote->allocated_to->details->uname) == NULL)
) {
/* The container is stopping or restarting, don't start
* 'rsc' until 'remote' stops as this also implies that
* 'rsc' is stopped - avoiding the need to probe
*/
custom_action_order(remote, pcmk__op_key(remote->id, RSC_STOP, 0),
NULL, top, pcmk__op_key(top->id, RSC_START, 0),
NULL, pe_order_optional, data_set);
pe_rsc_trace(rsc, "Skipping probe for %s on node %s, %s is stopping, restarting or moving",
rsc->id, node->details->id, remote->id);
return FALSE;
/* } else {
* The container is running so there is no problem probing it
*/
}
}
key = pcmk__op_key(rsc->id, RSC_STATUS, 0);
probe = custom_action(rsc, key, RSC_STATUS, node, FALSE, TRUE, data_set);
update_action_flags(probe, pe_action_optional | pe_action_clear, __func__,
__LINE__);
order_after_unfencing(rsc, node, probe, pe_order_optional, data_set);
/*
* We need to know if it's running_on (not just known_on) this node
* to correctly determine the target rc.
*/
running = pe_find_node_id(rsc->running_on, node->details->id);
if (running == NULL) {
add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, rc_inactive);
} else if (rsc->role == RSC_ROLE_MASTER) {
add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, rc_master);
}
crm_debug("Probing %s on %s (%s) %d %p", rsc->id, node->details->uname, role2text(rsc->role),
pcmk_is_set(probe->flags, pe_action_runnable), rsc->running_on);
if (is_unfence_device(rsc, data_set) || !pe_rsc_is_clone(top)) {
top = rsc;
} else {
crm_trace("Probing %s on %s (%s) as %s", rsc->id, node->details->uname, role2text(rsc->role), top->id);
}
if (!pcmk_is_set(probe->flags, pe_action_runnable)
&& (rsc->running_on == NULL)) {
/* Prevent the start from occurring if rsc isn't active, but
* don't cause it to stop if it was active already
*/
pe__set_order_flags(flags, pe_order_runnable_left);
}
custom_action_order(rsc, NULL, probe,
top, pcmk__op_key(top->id, RSC_START, 0), NULL,
flags, data_set);
/* Before any reloads, if they exist */
custom_action_order(rsc, NULL, probe,
top, reload_key(rsc), NULL,
pe_order_optional, data_set);
#if 0
// complete is always null currently
if (!is_unfence_device(rsc, data_set)) {
/* Normally rsc.start depends on probe complete which depends
* on rsc.probe. But this can't be the case for fence devices
* with unfencing, as it would create graph loops.
*
* So instead we explicitly order 'rsc.probe then rsc.start'
*/
order_actions(probe, complete, pe_order_implies_then);
}
#endif
return TRUE;
}
/*!
* \internal
* \brief Check whether a resource is known on a particular node
*
* \param[in] rsc Resource to check
* \param[in] node Node to check
*
* \return TRUE if resource (or parent if an anonymous clone) is known
*/
static bool
rsc_is_known_on(pe_resource_t *rsc, const pe_node_t *node)
{
if (pe_hash_table_lookup(rsc->known_on, node->details->id)) {
return TRUE;
} else if ((rsc->variant == pe_native)
&& pe_rsc_is_anon_clone(rsc->parent)
&& pe_hash_table_lookup(rsc->parent->known_on, node->details->id)) {
/* We check only the parent, not the uber-parent, because we cannot
* assume that the resource is known if it is in an anonymously cloned
* group (which may be only partially known).
*/
return TRUE;
}
return FALSE;
}
/*!
* \internal
* \brief Order a resource's start and promote actions relative to fencing
*
* \param[in] rsc Resource to be ordered
* \param[in] stonith_op Fence action
* \param[in] data_set Cluster information
*/
static void
native_start_constraints(pe_resource_t * rsc, pe_action_t * stonith_op, pe_working_set_t * data_set)
{
pe_node_t *target;
GListPtr gIter = NULL;
CRM_CHECK(stonith_op && stonith_op->node, return);
target = stonith_op->node;
for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) {
pe_action_t *action = (pe_action_t *) gIter->data;
switch (action->needs) {
case rsc_req_nothing:
// Anything other than start or promote requires nothing
break;
case rsc_req_stonith:
order_actions(stonith_op, action, pe_order_optional);
break;
case rsc_req_quorum:
if (pcmk__str_eq(action->task, RSC_START, pcmk__str_casei)
&& pe_hash_table_lookup(rsc->allowed_nodes, target->details->id)
&& !rsc_is_known_on(rsc, target)) {
/* If we don't know the status of the resource on the node
* we're about to shoot, we have to assume it may be active
* there. Order the resource start after the fencing. This
* is analogous to waiting for all the probes for a resource
* to complete before starting it.
*
* The most likely explanation is that the DC died and took
* its status with it.
*/
pe_rsc_debug(rsc, "Ordering %s after %s recovery", action->uuid,
target->details->uname);
order_actions(stonith_op, action,
pe_order_optional | pe_order_runnable_left);
}
break;
}
}
}
static void
native_stop_constraints(pe_resource_t * rsc, pe_action_t * stonith_op, pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
GListPtr action_list = NULL;
bool order_implicit = false;
pe_resource_t *top = uber_parent(rsc);
pe_action_t *parent_stop = NULL;
pe_node_t *target;
CRM_CHECK(stonith_op && stonith_op->node, return);
target = stonith_op->node;
/* Get a list of stop actions potentially implied by the fencing */
action_list = pe__resource_actions(rsc, target, RSC_STOP, FALSE);
/* If resource requires fencing, implicit actions must occur after fencing.
*
* Implied stops and demotes of resources running on guest nodes are always
* ordered after fencing, even if the resource does not require fencing,
* because guest node "fencing" is actually just a resource stop.
*/
if (pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)
|| pe__is_guest_node(target)) {
order_implicit = true;
}
if (action_list && order_implicit) {
parent_stop = find_first_action(top->actions, NULL, RSC_STOP, NULL);
}
for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
pe_action_t *action = (pe_action_t *) gIter->data;
// The stop would never complete, so convert it into a pseudo-action.
update_action_flags(action, pe_action_pseudo|pe_action_runnable,
__func__, __LINE__);
if (order_implicit) {
update_action_flags(action, pe_action_implied_by_stonith,
__func__, __LINE__);
/* Order the stonith before the parent stop (if any).
*
* Also order the stonith before the resource stop, unless the
* resource is inside a bundle -- that would cause a graph loop.
* We can rely on the parent stop's ordering instead.
*
* User constraints must not order a resource in a guest node
* relative to the guest node container resource. The
* pe_order_preserve flag marks constraints as generated by the
* cluster and thus immune to that check (and is irrelevant if
* target is not a guest).
*/
if (!pe_rsc_is_bundled(rsc)) {
order_actions(stonith_op, action, pe_order_preserve);
}
order_actions(stonith_op, parent_stop, pe_order_preserve);
}
if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
crm_notice("Stop of failed resource %s is implicit %s %s is fenced",
rsc->id, (order_implicit? "after" : "because"),
target->details->uname);
} else {
crm_info("%s is implicit %s %s is fenced",
action->uuid, (order_implicit? "after" : "because"),
target->details->uname);
}
if (pcmk_is_set(rsc->flags, pe_rsc_notify)) {
/* Create a second notification that will be delivered
* immediately after the node is fenced
*
* Basic problem:
* - C is a clone active on the node to be shot and stopping on another
* - R is a resource that depends on C
*
* + C.stop depends on R.stop
* + C.stopped depends on STONITH
* + C.notify depends on C.stopped
* + C.healthy depends on C.notify
* + R.stop depends on C.healthy
*
* The extra notification here changes
* + C.healthy depends on C.notify
* into:
* + C.healthy depends on C.notify'
* + C.notify' depends on STONITH'
* thus breaking the loop
*/
create_secondary_notification(action, rsc, stonith_op, data_set);
}
/* From Bug #1601, successful fencing must be an input to a failed resources stop action.
However given group(rA, rB) running on nodeX and B.stop has failed,
A := stop healthy resource (rA.stop)
B := stop failed resource (pseudo operation B.stop)
C := stonith nodeX
A requires B, B requires C, C requires A
This loop would prevent the cluster from making progress.
This block creates the "C requires A" dependency and therefore must (at least
for now) be disabled.
Instead, run the block above and treat all resources on nodeX as B would be
(marked as a pseudo op depending on the STONITH).
TODO: Break the "A requires B" dependency in update_action() and re-enable this block
} else if(is_stonith == FALSE) {
crm_info("Moving healthy resource %s"
" off %s before fencing",
rsc->id, node->details->uname);
* stop healthy resources before the
* stonith op
*
custom_action_order(
rsc, stop_key(rsc), NULL,
NULL,strdup(CRM_OP_FENCE),stonith_op,
pe_order_optional, data_set);
*/
}
g_list_free(action_list);
/* Get a list of demote actions potentially implied by the fencing */
action_list = pe__resource_actions(rsc, target, RSC_DEMOTE, FALSE);
for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
pe_action_t *action = (pe_action_t *) gIter->data;
if (action->node->details->online == FALSE || action->node->details->unclean == TRUE
|| pcmk_is_set(rsc->flags, pe_rsc_failed)) {
if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
pe_rsc_info(rsc,
"Demote of failed resource %s is implicit after %s is fenced",
rsc->id, target->details->uname);
} else {
pe_rsc_info(rsc, "%s is implicit after %s is fenced",
action->uuid, target->details->uname);
}
/* The demote would never complete and is now implied by the
* fencing, so convert it into a pseudo-action.
*/
update_action_flags(action, pe_action_pseudo|pe_action_runnable,
__func__, __LINE__);
if (pe_rsc_is_bundled(rsc)) {
/* Do nothing, let the recovery be ordered after the parent's implied stop */
} else if (order_implicit) {
order_actions(stonith_op, action, pe_order_preserve|pe_order_optional);
}
}
}
g_list_free(action_list);
}
void
rsc_stonith_ordering(pe_resource_t * rsc, pe_action_t * stonith_op, pe_working_set_t * data_set)
{
if (rsc->children) {
GListPtr gIter = NULL;
for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
rsc_stonith_ordering(child_rsc, stonith_op, data_set);
}
} else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
pe_rsc_trace(rsc, "Skipping fencing constraints for unmanaged resource: %s", rsc->id);
} else {
native_start_constraints(rsc, stonith_op, data_set);
native_stop_constraints(rsc, stonith_op, data_set);
}
}
void
ReloadRsc(pe_resource_t * rsc, pe_node_t *node, pe_working_set_t * data_set)
{
GListPtr gIter = NULL;
pe_action_t *reload = NULL;
if (rsc->children) {
for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
ReloadRsc(child_rsc, node, data_set);
}
return;
} else if (rsc->variant > pe_native) {
/* Complex resource with no children */
return;
} else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
pe_rsc_trace(rsc, "%s: unmanaged", rsc->id);
return;
} else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
/* We don't need to specify any particular actions here, normal failure
* recovery will apply.
*/
pe_rsc_trace(rsc, "%s: preventing reload because failed", rsc->id);
return;
} else if (pcmk_is_set(rsc->flags, pe_rsc_start_pending)) {
/* If a resource's configuration changed while a start was pending,
* force a full restart.
*/
pe_rsc_trace(rsc, "%s: preventing reload because start pending", rsc->id);
stop_action(rsc, node, FALSE);
return;
} else if (node == NULL) {
pe_rsc_trace(rsc, "%s: not active", rsc->id);
return;
}
pe_rsc_trace(rsc, "Processing %s", rsc->id);
pe__set_resource_flags(rsc, pe_rsc_reload);
reload = custom_action(
rsc, reload_key(rsc), CRMD_ACTION_RELOAD, node, FALSE, TRUE, data_set);
pe_action_set_reason(reload, "resource definition change", FALSE);
custom_action_order(NULL, NULL, reload, rsc, stop_key(rsc), NULL,
pe_order_optional|pe_order_then_cancels_first,
data_set);
custom_action_order(NULL, NULL, reload, rsc, demote_key(rsc), NULL,
pe_order_optional|pe_order_then_cancels_first,
data_set);
}
void
native_append_meta(pe_resource_t * rsc, xmlNode * xml)
{
char *value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION);
pe_resource_t *parent;
if (value) {
char *name = NULL;
name = crm_meta_name(XML_RSC_ATTR_INCARNATION);
crm_xml_add(xml, name, value);
free(name);
}
value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_REMOTE_NODE);
if (value) {
char *name = NULL;
name = crm_meta_name(XML_RSC_ATTR_REMOTE_NODE);
crm_xml_add(xml, name, value);
free(name);
}
for (parent = rsc; parent != NULL; parent = parent->parent) {
if (parent->container) {
crm_xml_add(xml, CRM_META"_"XML_RSC_ATTR_CONTAINER, parent->container->id);
}
}
}
diff --git a/lib/pacemaker/pcmk_sched_promotable.c b/lib/pacemaker/pcmk_sched_promotable.c
index 0dbeed271f..b976344c53 100644
--- a/lib/pacemaker/pcmk_sched_promotable.c
+++ b/lib/pacemaker/pcmk_sched_promotable.c
@@ -1,1025 +1,1042 @@
/*
* Copyright 2004-2020 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <crm/msg_xml.h>
#include <pacemaker-internal.h>
#define VARIANT_CLONE 1
#include <lib/pengine/variant.h>
extern gint sort_clone_instance(gconstpointer a, gconstpointer b, gpointer data_set);
static void
child_promoting_constraints(clone_variant_data_t * clone_data, enum pe_ordering type,
pe_resource_t * rsc, pe_resource_t * child, pe_resource_t * last,
pe_working_set_t * data_set)
{
if (child == NULL) {
if (clone_data->ordered && last != NULL) {
pe_rsc_trace(rsc, "Ordered version (last node)");
/* last child promote before promoted started */
new_rsc_order(last, RSC_PROMOTE, rsc, RSC_PROMOTED, type, data_set);
}
return;
}
/* child promote before global promoted */
new_rsc_order(child, RSC_PROMOTE, rsc, RSC_PROMOTED, type, data_set);
/* global promote before child promote */
new_rsc_order(rsc, RSC_PROMOTE, child, RSC_PROMOTE, type, data_set);
if (clone_data->ordered) {
pe_rsc_trace(rsc, "Ordered version");
if (last == NULL) {
/* global promote before first child promote */
last = rsc;
}
/* else: child/child relative promote */
order_start_start(last, child, type);
new_rsc_order(last, RSC_PROMOTE, child, RSC_PROMOTE, type, data_set);
} else {
pe_rsc_trace(rsc, "Un-ordered version");
}
}
static void
child_demoting_constraints(clone_variant_data_t * clone_data, enum pe_ordering type,
pe_resource_t * rsc, pe_resource_t * child, pe_resource_t * last,
pe_working_set_t * data_set)
{
if (child == NULL) {
if (clone_data->ordered && last != NULL) {
pe_rsc_trace(rsc, "Ordered version (last node)");
/* global demote before first child demote */
new_rsc_order(rsc, RSC_DEMOTE, last, RSC_DEMOTE, pe_order_optional, data_set);
}
return;
}
/* child demote before global demoted */
new_rsc_order(child, RSC_DEMOTE, rsc, RSC_DEMOTED, pe_order_implies_then_printed, data_set);
/* global demote before child demote */
new_rsc_order(rsc, RSC_DEMOTE, child, RSC_DEMOTE, pe_order_implies_first_printed, data_set);
if (clone_data->ordered && last != NULL) {
pe_rsc_trace(rsc, "Ordered version");
/* child/child relative demote */
new_rsc_order(child, RSC_DEMOTE, last, RSC_DEMOTE, type, data_set);
} else if (clone_data->ordered) {
pe_rsc_trace(rsc, "Ordered version (1st node)");
/* first child stop before global stopped */
new_rsc_order(child, RSC_DEMOTE, rsc, RSC_DEMOTED, type, data_set);
} else {
pe_rsc_trace(rsc, "Un-ordered version");
}
}
static void
check_promotable_actions(pe_resource_t *rsc, gboolean *demoting,
gboolean *promoting)
{
GListPtr gIter = NULL;
if (rsc->children) {
gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child = (pe_resource_t *) gIter->data;
check_promotable_actions(child, demoting, promoting);
}
return;
}
CRM_ASSERT(demoting != NULL);
CRM_ASSERT(promoting != NULL);
gIter = rsc->actions;
for (; gIter != NULL; gIter = gIter->next) {
pe_action_t *action = (pe_action_t *) gIter->data;
if (*promoting && *demoting) {
return;
} else if (pcmk_is_set(action->flags, pe_action_optional)) {
continue;
} else if (pcmk__str_eq(RSC_DEMOTE, action->task, pcmk__str_casei)) {
*demoting = TRUE;
} else if (pcmk__str_eq(RSC_PROMOTE, action->task, pcmk__str_casei)) {
*promoting = TRUE;
}
}
}
static void apply_master_location(pe_resource_t *child, GListPtr location_constraints, pe_node_t *chosen)
{
CRM_CHECK(child && chosen, return);
for (GListPtr gIter = location_constraints; gIter; gIter = gIter->next) {
pe_node_t *cons_node = NULL;
pe__location_t *cons = gIter->data;
if (cons->role_filter == RSC_ROLE_MASTER) {
pe_rsc_trace(child, "Applying %s to %s", cons->id, child->id);
cons_node = pe_find_node_id(cons->node_list_rh, chosen->details->id);
}
if (cons_node != NULL) {
int new_priority = pe__add_scores(child->priority,
cons_node->weight);
pe_rsc_trace(child, "\t%s[%s]: %d -> %d (%d)",
child->id, cons_node->details->uname, child->priority,
new_priority, cons_node->weight);
child->priority = new_priority;
}
}
}
+static pe_node_t *
+guest_location(pe_node_t *guest_node)
+{
+ pe_resource_t *guest = guest_node->details->remote_rsc->container;
+
+ return guest->fns->location(guest, NULL, FALSE);
+}
+
static pe_node_t *
can_be_master(pe_resource_t * rsc)
{
pe_node_t *node = NULL;
pe_node_t *local_node = NULL;
pe_resource_t *parent = uber_parent(rsc);
clone_variant_data_t *clone_data = NULL;
#if 0
enum rsc_role_e role = RSC_ROLE_UNKNOWN;
role = rsc->fns->state(rsc, FALSE);
crm_info("%s role: %s", rsc->id, role2text(role));
#endif
if (rsc->children) {
GListPtr gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child = (pe_resource_t *) gIter->data;
if (can_be_master(child) == NULL) {
pe_rsc_trace(rsc, "Child %s of %s can't be promoted", child->id, rsc->id);
return NULL;
}
}
}
node = rsc->fns->location(rsc, NULL, FALSE);
if (node == NULL) {
pe_rsc_trace(rsc, "%s cannot be master: not allocated", rsc->id);
return NULL;
} else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
if (rsc->fns->state(rsc, TRUE) == RSC_ROLE_MASTER) {
crm_notice("Forcing unmanaged master %s to remain promoted on %s",
rsc->id, node->details->uname);
} else {
return NULL;
}
} else if (rsc->priority < 0) {
pe_rsc_trace(rsc, "%s cannot be master: preference: %d", rsc->id, rsc->priority);
return NULL;
} else if (can_run_resources(node) == FALSE) {
crm_trace("Node can't run any resources: %s", node->details->uname);
return NULL;
+
+ /* @TODO It's possible this check should be done in can_run_resources()
+ * instead. We should investigate all its callers to figure out whether that
+ * would be a good idea.
+ */
+ } else if (pe__is_guest_node(node) && (guest_location(node) == NULL)) {
+ pe_rsc_trace(rsc, "%s cannot be promoted: guest %s not allocated",
+ rsc->id, node->details->remote_rsc->container->id);
+ return NULL;
}
get_clone_variant_data(clone_data, parent);
local_node = pe_hash_table_lookup(parent->allowed_nodes, node->details->id);
if (local_node == NULL) {
crm_err("%s cannot run on %s: node not allowed", rsc->id, node->details->uname);
return NULL;
} else if ((local_node->count < clone_data->promoted_node_max)
|| !pcmk_is_set(rsc->flags, pe_rsc_managed)) {
return local_node;
} else {
pe_rsc_trace(rsc, "%s cannot be master on %s: node full", rsc->id, node->details->uname);
}
return NULL;
}
static gint
sort_promotable_instance(gconstpointer a, gconstpointer b, gpointer data_set)
{
int rc;
enum rsc_role_e role1 = RSC_ROLE_UNKNOWN;
enum rsc_role_e role2 = RSC_ROLE_UNKNOWN;
const pe_resource_t *resource1 = (const pe_resource_t *)a;
const pe_resource_t *resource2 = (const pe_resource_t *)b;
CRM_ASSERT(resource1 != NULL);
CRM_ASSERT(resource2 != NULL);
role1 = resource1->fns->state(resource1, TRUE);
role2 = resource2->fns->state(resource2, TRUE);
rc = sort_rsc_index(a, b);
if (rc != 0) {
crm_trace("%s %c %s (index)", resource1->id, rc < 0 ? '<' : '>', resource2->id);
return rc;
}
if (role1 > role2) {
crm_trace("%s %c %s (role)", resource1->id, '<', resource2->id);
return -1;
} else if (role1 < role2) {
crm_trace("%s %c %s (role)", resource1->id, '>', resource2->id);
return 1;
}
return sort_clone_instance(a, b, data_set);
}
static void
promotion_order(pe_resource_t *rsc, pe_working_set_t *data_set)
{
GListPtr gIter = NULL;
pe_node_t *node = NULL;
pe_node_t *chosen = NULL;
clone_variant_data_t *clone_data = NULL;
char score[33];
size_t len = sizeof(score);
get_clone_variant_data(clone_data, rsc);
if (clone_data->merged_master_weights) {
return;
}
clone_data->merged_master_weights = TRUE;
pe_rsc_trace(rsc, "Merging weights for %s", rsc->id);
pe__set_resource_flags(rsc, pe_rsc_merging);
for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child = (pe_resource_t *) gIter->data;
pe_rsc_trace(rsc, "Sort index: %s = %d", child->id, child->sort_index);
}
pe__show_node_weights(true, rsc, "Before", rsc->allowed_nodes);
gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child = (pe_resource_t *) gIter->data;
chosen = child->fns->location(child, NULL, FALSE);
if (chosen == NULL || child->sort_index < 0) {
pe_rsc_trace(rsc, "Skipping %s", child->id);
continue;
}
node = (pe_node_t *) pe_hash_table_lookup(rsc->allowed_nodes, chosen->details->id);
CRM_ASSERT(node != NULL);
/* adds in master preferences and rsc_location.role=Master */
score2char_stack(child->sort_index, score, len);
pe_rsc_trace(rsc, "Adding %s to %s from %s", score,
node->details->uname, child->id);
node->weight = pe__add_scores(child->sort_index, node->weight);
}
pe__show_node_weights(true, rsc, "Middle", rsc->allowed_nodes);
gIter = rsc->rsc_cons;
for (; gIter != NULL; gIter = gIter->next) {
rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data;
if (constraint->score == 0) {
continue;
}
/* (re-)adds location preferences of resources that the
* master instance should/must be colocated with
*/
if (constraint->role_lh == RSC_ROLE_MASTER) {
enum pe_weights flags = constraint->score == INFINITY ? 0 : pe_weights_rollback;
pe_rsc_trace(rsc, "RHS: %s with %s: %d", constraint->rsc_lh->id, constraint->rsc_rh->id,
constraint->score);
rsc->allowed_nodes =
constraint->rsc_rh->cmds->merge_weights(constraint->rsc_rh, rsc->id,
rsc->allowed_nodes,
constraint->node_attribute,
(float)constraint->score / INFINITY, flags);
}
}
gIter = rsc->rsc_cons_lhs;
for (; gIter != NULL; gIter = gIter->next) {
rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data;
if (constraint->score == 0) {
continue;
}
/* (re-)adds location preferences of resource that wish to be
* colocated with the master instance
*/
if (constraint->role_rh == RSC_ROLE_MASTER) {
pe_rsc_trace(rsc, "LHS: %s with %s: %d", constraint->rsc_lh->id, constraint->rsc_rh->id,
constraint->score);
rsc->allowed_nodes =
constraint->rsc_lh->cmds->merge_weights(constraint->rsc_lh, rsc->id,
rsc->allowed_nodes,
constraint->node_attribute,
(float)constraint->score / INFINITY,
(pe_weights_rollback |
pe_weights_positive));
}
}
gIter = rsc->rsc_tickets;
for (; gIter != NULL; gIter = gIter->next) {
rsc_ticket_t *rsc_ticket = (rsc_ticket_t *) gIter->data;
if (rsc_ticket->role_lh == RSC_ROLE_MASTER
&& (rsc_ticket->ticket->granted == FALSE || rsc_ticket->ticket->standby)) {
resource_location(rsc, NULL, -INFINITY, "__stateful_without_ticket__", data_set);
}
}
pe__show_node_weights(true, rsc, "After", rsc->allowed_nodes);
/* write them back and sort */
gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child = (pe_resource_t *) gIter->data;
chosen = child->fns->location(child, NULL, FALSE);
if (!pcmk_is_set(child->flags, pe_rsc_managed)
&& (child->next_role == RSC_ROLE_MASTER)) {
child->sort_index = INFINITY;
} else if (chosen == NULL || child->sort_index < 0) {
pe_rsc_trace(rsc, "%s: %d", child->id, child->sort_index);
} else {
node = (pe_node_t *) pe_hash_table_lookup(rsc->allowed_nodes, chosen->details->id);
CRM_ASSERT(node != NULL);
child->sort_index = node->weight;
}
pe_rsc_trace(rsc, "Set sort index: %s = %d", child->id, child->sort_index);
}
rsc->children = g_list_sort_with_data(rsc->children,
sort_promotable_instance, data_set);
pe__clear_resource_flags(rsc, pe_rsc_merging);
}
static gboolean
filter_anonymous_instance(pe_resource_t *rsc, const pe_node_t *node)
{
GListPtr rIter = NULL;
char *key = clone_strip(rsc->id);
pe_resource_t *parent = uber_parent(rsc);
for (rIter = parent->children; rIter; rIter = rIter->next) {
/* If there is an active instance on the node, only it receives the
* promotion score. Use ->find_rsc() in case this is a cloned group.
*/
pe_resource_t *child = rIter->data;
pe_resource_t *active = parent->fns->find_rsc(child, key, node, pe_find_clone|pe_find_current);
if(rsc == active) {
pe_rsc_trace(rsc, "Found %s for %s active on %s: done", active->id, key, node->details->uname);
free(key);
return TRUE;
} else if(active) {
pe_rsc_trace(rsc, "Found %s for %s on %s: not %s", active->id, key, node->details->uname, rsc->id);
free(key);
return FALSE;
} else {
pe_rsc_trace(rsc, "%s on %s: not active", key, node->details->uname);
}
}
for (rIter = parent->children; rIter; rIter = rIter->next) {
pe_resource_t *child = rIter->data;
/*
* We know it's not running, but any score will still count if
* the instance has been probed on $node
*
* Again use ->find_rsc() because we might be a cloned group
* and knowing that other members of the group are known here
* implies nothing
*/
rsc = parent->fns->find_rsc(child, key, NULL, pe_find_clone);
CRM_LOG_ASSERT(rsc);
if(rsc) {
pe_rsc_trace(rsc, "Checking %s for %s on %s", rsc->id, key, node->details->uname);
if (g_hash_table_lookup(rsc->known_on, node->details->id)) {
free(key);
return TRUE;
}
}
}
free(key);
return FALSE;
}
static const char *
lookup_promotion_score(pe_resource_t *rsc, const pe_node_t *node, const char *name)
{
const char *attr_value = NULL;
if (node && name) {
char *attr_name = crm_strdup_printf("master-%s", name);
attr_value = pe_node_attribute_calculated(node, attr_name, rsc);
free(attr_name);
}
return attr_value;
}
static int
promotion_score(pe_resource_t *rsc, const pe_node_t *node, int not_set_value)
{
char *name = rsc->id;
const char *attr_value = NULL;
int score = not_set_value;
pe_node_t *match = NULL;
CRM_CHECK(node != NULL, return not_set_value);
if (rsc->children) {
GListPtr gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child = (pe_resource_t *) gIter->data;
int c_score = promotion_score(child, node, not_set_value);
if (score == not_set_value) {
score = c_score;
} else {
score += c_score;
}
}
return score;
}
if (!pcmk_is_set(rsc->flags, pe_rsc_unique)
&& filter_anonymous_instance(rsc, node)) {
pe_rsc_trace(rsc, "Anonymous clone %s is allowed on %s", rsc->id, node->details->uname);
} else if (rsc->running_on || g_hash_table_size(rsc->known_on)) {
/* If we've probed and/or started the resource anywhere, consider
* promotion scores only from nodes where we know the status. However,
* if the status of all nodes is unknown (e.g. cluster startup),
* skip this code, to make sure we take into account any permanent
* promotion scores set previously.
*/
pe_node_t *known = pe_hash_table_lookup(rsc->known_on, node->details->id);
match = pe_find_node_id(rsc->running_on, node->details->id);
if ((match == NULL) && (known == NULL)) {
pe_rsc_trace(rsc, "skipping %s (aka. %s) promotion score on %s because inactive",
rsc->id, rsc->clone_name, node->details->uname);
return score;
}
}
match = pe_hash_table_lookup(rsc->allowed_nodes, node->details->id);
if (match == NULL) {
return score;
} else if (match->weight < 0) {
pe_rsc_trace(rsc, "%s on %s has score: %d - ignoring",
rsc->id, match->details->uname, match->weight);
return score;
}
if (rsc->clone_name) {
/* Use the name the lrm knows this resource as,
* since that's what crm_master would have used too
*/
name = rsc->clone_name;
}
attr_value = lookup_promotion_score(rsc, node, name);
pe_rsc_trace(rsc, "promotion score for %s on %s = %s",
name, node->details->uname, crm_str(attr_value));
if ((attr_value == NULL) && !pcmk_is_set(rsc->flags, pe_rsc_unique)) {
/* If we don't have any LRM history yet, we won't have clone_name -- in
* that case, for anonymous clones, try the resource name without any
* instance number.
*/
name = clone_strip(rsc->id);
if (strcmp(rsc->id, name)) {
attr_value = lookup_promotion_score(rsc, node, name);
pe_rsc_trace(rsc, "stripped promotion score for %s on %s = %s",
name, node->details->uname, crm_str(attr_value));
}
free(name);
}
if (attr_value != NULL) {
score = char2score(attr_value);
}
return score;
}
void
apply_master_prefs(pe_resource_t *rsc)
{
int score, new_score;
GListPtr gIter = rsc->children;
clone_variant_data_t *clone_data = NULL;
get_clone_variant_data(clone_data, rsc);
if (clone_data->applied_master_prefs) {
/* Make sure we only do this once */
return;
}
clone_data->applied_master_prefs = TRUE;
for (; gIter != NULL; gIter = gIter->next) {
GHashTableIter iter;
pe_node_t *node = NULL;
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
g_hash_table_iter_init(&iter, child_rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
if (can_run_resources(node) == FALSE) {
/* This node will never be promoted to master,
* so don't apply the promotion score as that may
* lead to clone shuffling
*/
continue;
}
score = promotion_score(child_rsc, node, 0);
if (score > 0) {
new_score = pe__add_scores(node->weight, score);
if (new_score != node->weight) {
pe_rsc_trace(rsc, "\t%s: Updating preference for %s (%d->%d)",
child_rsc->id, node->details->uname, node->weight, new_score);
node->weight = new_score;
}
}
new_score = QB_MAX(child_rsc->priority, score);
if (new_score != child_rsc->priority) {
pe_rsc_trace(rsc, "\t%s: Updating priority (%d->%d)",
child_rsc->id, child_rsc->priority, new_score);
child_rsc->priority = new_score;
}
}
}
}
static void
set_role_slave(pe_resource_t * rsc, gboolean current)
{
GListPtr gIter = rsc->children;
if (current) {
if (rsc->role == RSC_ROLE_STARTED) {
rsc->role = RSC_ROLE_SLAVE;
}
} else {
GListPtr allocated = NULL;
rsc->fns->location(rsc, &allocated, FALSE);
if (allocated) {
rsc->next_role = RSC_ROLE_SLAVE;
} else {
rsc->next_role = RSC_ROLE_STOPPED;
}
g_list_free(allocated);
}
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
set_role_slave(child_rsc, current);
}
}
static void
set_role_master(pe_resource_t * rsc)
{
GListPtr gIter = rsc->children;
if (rsc->next_role == RSC_ROLE_UNKNOWN) {
rsc->next_role = RSC_ROLE_MASTER;
}
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
set_role_master(child_rsc);
}
}
pe_node_t *
pcmk__set_instance_roles(pe_resource_t *rsc, pe_working_set_t *data_set)
{
int promoted = 0;
GListPtr gIter = NULL;
GListPtr gIter2 = NULL;
GHashTableIter iter;
pe_node_t *node = NULL;
pe_node_t *chosen = NULL;
enum rsc_role_e next_role = RSC_ROLE_UNKNOWN;
char score[33];
size_t len = sizeof(score);
clone_variant_data_t *clone_data = NULL;
get_clone_variant_data(clone_data, rsc);
/* count now tracks the number of masters allocated */
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
node->count = 0;
}
/*
* assign priority
*/
for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
GListPtr list = NULL;
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
pe_rsc_trace(rsc, "Assigning priority for %s: %s", child_rsc->id,
role2text(child_rsc->next_role));
if (child_rsc->fns->state(child_rsc, TRUE) == RSC_ROLE_STARTED) {
set_role_slave(child_rsc, TRUE);
}
chosen = child_rsc->fns->location(child_rsc, &list, FALSE);
if (pcmk__list_of_multiple(list)) {
pcmk__config_err("Cannot promote non-colocated child %s",
child_rsc->id);
}
g_list_free(list);
if (chosen == NULL) {
continue;
}
next_role = child_rsc->fns->state(child_rsc, FALSE);
switch (next_role) {
case RSC_ROLE_STARTED:
case RSC_ROLE_UNKNOWN:
/*
* Default to -1 if no value is set
*
* This allows master locations to be specified
* based solely on rsc_location constraints,
* but prevents anyone from being promoted if
* neither a constraint nor a promotion score is present
*/
child_rsc->priority = promotion_score(child_rsc, chosen, -1);
break;
case RSC_ROLE_SLAVE:
case RSC_ROLE_STOPPED:
child_rsc->priority = -INFINITY;
break;
case RSC_ROLE_MASTER:
/* We will arrive here if we're re-creating actions after a stonith
*/
break;
default:
CRM_CHECK(FALSE /* unhandled */ ,
crm_err("Unknown resource role: %d for %s", next_role, child_rsc->id));
}
apply_master_location(child_rsc, child_rsc->rsc_location, chosen);
apply_master_location(child_rsc, rsc->rsc_location, chosen);
for (gIter2 = child_rsc->rsc_cons; gIter2 != NULL; gIter2 = gIter2->next) {
rsc_colocation_t *cons = (rsc_colocation_t *) gIter2->data;
if (cons->score == 0) {
continue;
}
child_rsc->cmds->rsc_colocation_lh(child_rsc, cons->rsc_rh, cons,
data_set);
}
child_rsc->sort_index = child_rsc->priority;
pe_rsc_trace(rsc, "Assigning priority for %s: %d", child_rsc->id, child_rsc->priority);
if (next_role == RSC_ROLE_MASTER) {
child_rsc->sort_index = INFINITY;
}
}
pe__show_node_weights(true, rsc, "Pre merge", rsc->allowed_nodes);
promotion_order(rsc, data_set);
/* mark the first N as masters */
for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
score2char_stack(child_rsc->sort_index, score, len);
chosen = child_rsc->fns->location(child_rsc, NULL, FALSE);
if (show_scores) {
if (pcmk_is_set(data_set->flags, pe_flag_stdout)) {
printf("%s promotion score on %s: %s\n",
child_rsc->id,
(chosen? chosen->details->uname : "none"), score);
}
} else {
pe_rsc_trace(rsc, "%s promotion score on %s: %s", child_rsc->id,
(chosen? chosen->details->uname : "none"), score);
}
chosen = NULL; /* nuke 'chosen' so that we don't promote more than the
* required number of instances
*/
if (child_rsc->sort_index < 0) {
pe_rsc_trace(rsc, "Not supposed to promote child: %s", child_rsc->id);
} else if ((promoted < clone_data->promoted_max)
|| !pcmk_is_set(rsc->flags, pe_rsc_managed)) {
chosen = can_be_master(child_rsc);
}
pe_rsc_debug(rsc, "%s promotion score: %d", child_rsc->id, child_rsc->priority);
if (chosen == NULL) {
set_role_slave(child_rsc, FALSE);
continue;
} else if(child_rsc->role < RSC_ROLE_MASTER
&& !pcmk_is_set(data_set->flags, pe_flag_have_quorum)
&& data_set->no_quorum_policy == no_quorum_freeze) {
crm_notice("Resource %s cannot be elevated from %s to %s: no-quorum-policy=freeze",
child_rsc->id, role2text(child_rsc->role), role2text(child_rsc->next_role));
set_role_slave(child_rsc, FALSE);
continue;
}
chosen->count++;
pe_rsc_info(rsc, "Promoting %s (%s %s)",
child_rsc->id, role2text(child_rsc->role), chosen->details->uname);
set_role_master(child_rsc);
promoted++;
}
pe_rsc_info(rsc, "%s: Promoted %d instances of a possible %d to master",
rsc->id, promoted, clone_data->promoted_max);
return NULL;
}
void
create_promotable_actions(pe_resource_t * rsc, pe_working_set_t * data_set)
{
pe_action_t *action = NULL;
GListPtr gIter = rsc->children;
pe_action_t *action_complete = NULL;
gboolean any_promoting = FALSE;
gboolean any_demoting = FALSE;
pe_resource_t *last_promote_rsc = NULL;
pe_resource_t *last_demote_rsc = NULL;
clone_variant_data_t *clone_data = NULL;
get_clone_variant_data(clone_data, rsc);
pe_rsc_debug(rsc, "Creating actions for %s", rsc->id);
for (; gIter != NULL; gIter = gIter->next) {
gboolean child_promoting = FALSE;
gboolean child_demoting = FALSE;
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
pe_rsc_trace(rsc, "Creating actions for %s", child_rsc->id);
child_rsc->cmds->create_actions(child_rsc, data_set);
check_promotable_actions(child_rsc, &child_demoting, &child_promoting);
any_demoting = any_demoting || child_demoting;
any_promoting = any_promoting || child_promoting;
pe_rsc_trace(rsc, "Created actions for %s: %d %d", child_rsc->id, child_promoting,
child_demoting);
}
/* promote */
action = create_pseudo_resource_op(rsc, RSC_PROMOTE, !any_promoting, TRUE, data_set);
action_complete = create_pseudo_resource_op(rsc, RSC_PROMOTED, !any_promoting, TRUE, data_set);
action_complete->priority = INFINITY;
child_promoting_constraints(clone_data, pe_order_optional,
rsc, NULL, last_promote_rsc, data_set);
if (clone_data->promote_notify == NULL) {
clone_data->promote_notify =
create_notification_boundaries(rsc, RSC_PROMOTE, action, action_complete, data_set);
}
/* demote */
action = create_pseudo_resource_op(rsc, RSC_DEMOTE, !any_demoting, TRUE, data_set);
action_complete = create_pseudo_resource_op(rsc, RSC_DEMOTED, !any_demoting, TRUE, data_set);
action_complete->priority = INFINITY;
child_demoting_constraints(clone_data, pe_order_optional, rsc, NULL, last_demote_rsc, data_set);
if (clone_data->demote_notify == NULL) {
clone_data->demote_notify =
create_notification_boundaries(rsc, RSC_DEMOTE, action, action_complete, data_set);
if (clone_data->promote_notify) {
/* If we ever wanted groups to have notifications we'd need to move this to native_internal_constraints() one day
* Requires exposing *_notify
*/
order_actions(clone_data->stop_notify->post_done, clone_data->promote_notify->pre,
pe_order_optional);
order_actions(clone_data->start_notify->post_done, clone_data->promote_notify->pre,
pe_order_optional);
order_actions(clone_data->demote_notify->post_done, clone_data->promote_notify->pre,
pe_order_optional);
order_actions(clone_data->demote_notify->post_done, clone_data->start_notify->pre,
pe_order_optional);
order_actions(clone_data->demote_notify->post_done, clone_data->stop_notify->pre,
pe_order_optional);
}
}
/* restore the correct priority */
gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
child_rsc->priority = rsc->priority;
}
}
void
promote_demote_constraints(pe_resource_t *rsc, pe_working_set_t *data_set)
{
/* global stopped before start */
new_rsc_order(rsc, RSC_STOPPED, rsc, RSC_START, pe_order_optional, data_set);
/* global stopped before promote */
new_rsc_order(rsc, RSC_STOPPED, rsc, RSC_PROMOTE, pe_order_optional, data_set);
/* global demoted before start */
new_rsc_order(rsc, RSC_DEMOTED, rsc, RSC_START, pe_order_optional, data_set);
/* global started before promote */
new_rsc_order(rsc, RSC_STARTED, rsc, RSC_PROMOTE, pe_order_optional, data_set);
/* global demoted before stop */
new_rsc_order(rsc, RSC_DEMOTED, rsc, RSC_STOP, pe_order_optional, data_set);
/* global demote before demoted */
new_rsc_order(rsc, RSC_DEMOTE, rsc, RSC_DEMOTED, pe_order_optional, data_set);
/* global demoted before promote */
new_rsc_order(rsc, RSC_DEMOTED, rsc, RSC_PROMOTE, pe_order_optional, data_set);
}
void
promotable_constraints(pe_resource_t * rsc, pe_working_set_t * data_set)
{
GListPtr gIter = rsc->children;
pe_resource_t *last_rsc = NULL;
clone_variant_data_t *clone_data = NULL;
get_clone_variant_data(clone_data, rsc);
promote_demote_constraints(rsc, data_set);
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
/* child demote before promote */
new_rsc_order(child_rsc, RSC_DEMOTE, child_rsc, RSC_PROMOTE, pe_order_optional, data_set);
child_promoting_constraints(clone_data, pe_order_optional,
rsc, child_rsc, last_rsc, data_set);
child_demoting_constraints(clone_data, pe_order_optional,
rsc, child_rsc, last_rsc, data_set);
last_rsc = child_rsc;
}
}
static void
node_hash_update_one(GHashTable * hash, pe_node_t * other, const char *attr, int score)
{
GHashTableIter iter;
pe_node_t *node = NULL;
const char *value = NULL;
if (other == NULL) {
return;
} else if (attr == NULL) {
attr = CRM_ATTR_UNAME;
}
value = pe_node_attribute_raw(other, attr);
g_hash_table_iter_init(&iter, hash);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
const char *tmp = pe_node_attribute_raw(node, attr);
if (pcmk__str_eq(value, tmp, pcmk__str_casei)) {
crm_trace("%s: %d + %d", node->details->uname, node->weight, other->weight);
node->weight = pe__add_scores(node->weight, score);
}
}
}
void
promotable_colocation_rh(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh,
rsc_colocation_t *constraint,
pe_working_set_t *data_set)
{
GListPtr gIter = NULL;
if (constraint->score == 0) {
return;
}
if (pcmk_is_set(rsc_lh->flags, pe_rsc_provisional)) {
GListPtr rhs = NULL;
for (gIter = rsc_rh->children; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
pe_node_t *chosen = child_rsc->fns->location(child_rsc, NULL, FALSE);
enum rsc_role_e next_role = child_rsc->fns->state(child_rsc, FALSE);
pe_rsc_trace(rsc_rh, "Processing: %s", child_rsc->id);
if (chosen != NULL && next_role == constraint->role_rh) {
pe_rsc_trace(rsc_rh, "Applying: %s %s %s %d", child_rsc->id,
role2text(next_role), chosen->details->uname, constraint->score);
if (constraint->score < INFINITY) {
node_hash_update_one(rsc_lh->allowed_nodes, chosen,
constraint->node_attribute, constraint->score);
}
rhs = g_list_prepend(rhs, chosen);
}
}
/* Only do this if it's not a master-master colocation
* Doing this unconditionally would prevent the slaves from being started
*/
if (constraint->role_lh != RSC_ROLE_MASTER || constraint->role_rh != RSC_ROLE_MASTER) {
if (constraint->score >= INFINITY) {
node_list_exclude(rsc_lh->allowed_nodes, rhs, TRUE);
}
}
g_list_free(rhs);
} else if (constraint->role_lh == RSC_ROLE_MASTER) {
pe_resource_t *rh_child = find_compatible_child(rsc_lh, rsc_rh,
constraint->role_rh,
FALSE, data_set);
if (rh_child == NULL && constraint->score >= INFINITY) {
pe_rsc_trace(rsc_lh, "%s can't be promoted %s", rsc_lh->id, constraint->id);
rsc_lh->priority = -INFINITY;
} else if (rh_child != NULL) {
int new_priority = pe__add_scores(rsc_lh->priority,
constraint->score);
pe_rsc_debug(rsc_lh, "Applying %s to %s", constraint->id, rsc_lh->id);
pe_rsc_debug(rsc_lh, "\t%s: %d->%d", rsc_lh->id, rsc_lh->priority, new_priority);
rsc_lh->priority = new_priority;
}
}
return;
}
diff --git a/lib/pengine/remote.c b/lib/pengine/remote.c
index 9f78dacd91..551e90148f 100644
--- a/lib/pengine/remote.c
+++ b/lib/pengine/remote.c
@@ -1,271 +1,267 @@
/*
* Copyright 2013-2019 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <crm/msg_xml.h>
#include <crm/common/xml.h>
#include <crm/pengine/internal.h>
#include <glib.h>
-gboolean
-pe__resource_is_remote_conn(pe_resource_t *rsc, pe_working_set_t *data_set)
+bool
+pe__resource_is_remote_conn(const pe_resource_t *rsc,
+ const pe_working_set_t *data_set)
{
- pe_node_t *node;
-
- if (rsc == NULL) {
- return FALSE;
- } else if (rsc->is_remote_node == FALSE) {
- return FALSE;
- }
-
- node = pe_find_node(data_set->nodes, rsc->id);
- if (node == NULL) {
- return FALSE;
- }
-
- return pe__is_remote_node(node);
+ return (rsc != NULL) && rsc->is_remote_node
+ && pe__is_remote_node(pe_find_node(data_set->nodes, rsc->id));
}
-gboolean
-pe__is_remote_node(pe_node_t *node)
+bool
+pe__is_remote_node(const pe_node_t *node)
{
- if (pe__is_guest_or_remote_node(node)
- && ((node->details->remote_rsc == NULL)
- || (node->details->remote_rsc->container == NULL))) {
- return TRUE;
- }
- return FALSE;
+ return (node != NULL) && (node->details->type == node_remote)
+ && ((node->details->remote_rsc == NULL)
+ || (node->details->remote_rsc->container == NULL));
}
-gboolean
-pe__is_guest_node(pe_node_t *node)
+bool
+pe__is_guest_node(const pe_node_t *node)
{
- if (pe__is_guest_or_remote_node(node)
- && node->details->remote_rsc
- && node->details->remote_rsc->container) {
- return TRUE;
- }
- return FALSE;
+ return (node != NULL) && (node->details->type == node_remote)
+ && (node->details->remote_rsc != NULL)
+ && (node->details->remote_rsc->container != NULL);
}
-gboolean
-pe__is_guest_or_remote_node(pe_node_t *node)
+bool
+pe__is_guest_or_remote_node(const pe_node_t *node)
{
return (node != NULL) && (node->details->type == node_remote);
}
bool
-pe__is_bundle_node(pe_node_t *node)
+pe__is_bundle_node(const pe_node_t *node)
{
return pe__is_guest_node(node)
&& pe_rsc_is_bundled(node->details->remote_rsc);
}
/*!
* \internal
* \brief Check whether a resource creates a guest node
*
* If a given resource contains a filler resource that is a remote connection,
* return that filler resource (or NULL if none is found).
*
* \param[in] data_set Working set of cluster
* \param[in] rsc Resource to check
*
* \return Filler resource with remote connection, or NULL if none found
*/
pe_resource_t *
pe__resource_contains_guest_node(const pe_working_set_t *data_set,
const pe_resource_t *rsc)
{
if ((rsc != NULL) && (data_set != NULL)
&& pcmk_is_set(data_set->flags, pe_flag_have_remote_nodes)) {
for (GList *gIter = rsc->fillers; gIter != NULL; gIter = gIter->next) {
pe_resource_t *filler = gIter->data;
if (filler->is_remote_node) {
return filler;
}
}
}
return NULL;
}
-gboolean
+bool
xml_contains_remote_node(xmlNode *xml)
{
- const char *class = crm_element_value(xml, XML_AGENT_ATTR_CLASS);
- const char *provider = crm_element_value(xml, XML_AGENT_ATTR_PROVIDER);
- const char *agent = crm_element_value(xml, XML_ATTR_TYPE);
+ const char *value = NULL;
- if (pcmk__str_eq(agent, "remote", pcmk__str_casei) && pcmk__str_eq(provider, "pacemaker", pcmk__str_casei)
- && pcmk__str_eq(class, PCMK_RESOURCE_CLASS_OCF, pcmk__str_casei)) {
- return TRUE;
+ if (xml == NULL) {
+ return false;
}
- return FALSE;
+
+ value = crm_element_value(xml, XML_ATTR_TYPE);
+ if (!pcmk__str_eq(value, "remote", pcmk__str_casei)) {
+ return false;
+ }
+
+ value = crm_element_value(xml, XML_AGENT_ATTR_CLASS);
+ if (!pcmk__str_eq(value, PCMK_RESOURCE_CLASS_OCF, pcmk__str_casei)) {
+ return false;
+ }
+
+ value = crm_element_value(xml, XML_AGENT_ATTR_PROVIDER);
+ if (!pcmk__str_eq(value, "pacemaker", pcmk__str_casei)) {
+ return false;
+ }
+
+ return true;
}
/*!
* \internal
* \brief Execute a supplied function for each guest node running on a host
*
* \param[in] data_set Working set for cluster
* \param[in] host Host node to check
* \param[in] helper Function to call for each guest node
* \param[in,out] user_data Pointer to pass to helper function
*/
void
pe_foreach_guest_node(const pe_working_set_t *data_set, const pe_node_t *host,
void (*helper)(const pe_node_t*, void*), void *user_data)
{
GListPtr iter;
CRM_CHECK(data_set && host && host->details && helper, return);
if (!pcmk_is_set(data_set->flags, pe_flag_have_remote_nodes)) {
return;
}
for (iter = host->details->running_rsc; iter != NULL; iter = iter->next) {
pe_resource_t *rsc = (pe_resource_t *) iter->data;
if (rsc->is_remote_node && (rsc->container != NULL)) {
pe_node_t *guest_node = pe_find_node(data_set->nodes, rsc->id);
if (guest_node) {
(*helper)(guest_node, user_data);
}
}
}
}
/*!
* \internal
* \brief Create CIB XML for an implicit remote connection
*
* \param[in] parent If not NULL, use as parent XML element
* \param[in] uname Name of Pacemaker Remote node
* \param[in] container If not NULL, use this as connection container
* \param[in] migrateable If not NULL, use as allow-migrate value
* \param[in] is_managed If not NULL, use as is-managed value
* \param[in] start_timeout If not NULL, use as remote connect timeout
* \param[in] server If not NULL, use as remote server value
* \param[in] port If not NULL, use as remote port value
*/
xmlNode *
pe_create_remote_xml(xmlNode *parent, const char *uname,
const char *container_id, const char *migrateable,
const char *is_managed, const char *start_timeout,
const char *server, const char *port)
{
xmlNode *remote;
xmlNode *xml_sub;
remote = create_xml_node(parent, XML_CIB_TAG_RESOURCE);
// Add identity
crm_xml_add(remote, XML_ATTR_ID, uname);
crm_xml_add(remote, XML_AGENT_ATTR_CLASS, PCMK_RESOURCE_CLASS_OCF);
crm_xml_add(remote, XML_AGENT_ATTR_PROVIDER, "pacemaker");
crm_xml_add(remote, XML_ATTR_TYPE, "remote");
// Add meta-attributes
xml_sub = create_xml_node(remote, XML_TAG_META_SETS);
crm_xml_set_id(xml_sub, "%s-%s", uname, XML_TAG_META_SETS);
crm_create_nvpair_xml(xml_sub, NULL,
XML_RSC_ATTR_INTERNAL_RSC, XML_BOOLEAN_TRUE);
if (container_id) {
crm_create_nvpair_xml(xml_sub, NULL,
XML_RSC_ATTR_CONTAINER, container_id);
}
if (migrateable) {
crm_create_nvpair_xml(xml_sub, NULL,
XML_OP_ATTR_ALLOW_MIGRATE, migrateable);
}
if (is_managed) {
crm_create_nvpair_xml(xml_sub, NULL, XML_RSC_ATTR_MANAGED, is_managed);
}
// Add instance attributes
if (port || server) {
xml_sub = create_xml_node(remote, XML_TAG_ATTR_SETS);
crm_xml_set_id(xml_sub, "%s-%s", uname, XML_TAG_ATTR_SETS);
if (server) {
crm_create_nvpair_xml(xml_sub, NULL, XML_RSC_ATTR_REMOTE_RA_ADDR,
server);
}
if (port) {
crm_create_nvpair_xml(xml_sub, NULL, "port", port);
}
}
// Add operations
xml_sub = create_xml_node(remote, "operations");
crm_create_op_xml(xml_sub, uname, "monitor", "30s", "30s");
if (start_timeout) {
crm_create_op_xml(xml_sub, uname, "start", "0", start_timeout);
}
return remote;
}
// History entry to be checked for fail count clearing
struct check_op {
xmlNode *rsc_op; // History entry XML
pe_resource_t *rsc; // Known resource corresponding to history entry
pe_node_t *node; // Known node corresponding to history entry
enum pe_check_parameters check_type; // What needs checking
};
void
pe__add_param_check(xmlNode *rsc_op, pe_resource_t *rsc, pe_node_t *node,
enum pe_check_parameters flag, pe_working_set_t *data_set)
{
struct check_op *check_op = NULL;
CRM_CHECK(data_set && rsc_op && rsc && node, return);
check_op = calloc(1, sizeof(struct check_op));
CRM_ASSERT(check_op != NULL);
crm_trace("Deferring checks of %s until after allocation", ID(rsc_op));
check_op->rsc_op = rsc_op;
check_op->rsc = rsc;
check_op->node = node;
check_op->check_type = flag;
data_set->param_check = g_list_prepend(data_set->param_check, check_op);
}
/*!
* \internal
* \brief Call a function for each action to be checked for addr substitution
*
* \param[in] data_set Working set for cluster
* \param[in] cb Function to be called
*/
void
pe__foreach_param_check(pe_working_set_t *data_set,
void (*cb)(pe_resource_t*, pe_node_t*, xmlNode*,
enum pe_check_parameters, pe_working_set_t*))
{
CRM_CHECK(data_set && cb, return);
for (GList *item = data_set->param_check; item != NULL; item = item->next) {
struct check_op *check_op = item->data;
cb(check_op->rsc, check_op->node, check_op->rsc_op,
check_op->check_type, data_set);
}
}
void
pe__free_param_checks(pe_working_set_t *data_set)
{
if (data_set && data_set->param_check) {
g_list_free_full(data_set->param_check, free);
data_set->param_check = NULL;
}
}
diff --git a/lib/pengine/rules.c b/lib/pengine/rules.c
index d186be7877..ee0f9141a6 100644
--- a/lib/pengine/rules.c
+++ b/lib/pengine/rules.c
@@ -1,1476 +1,1477 @@
/*
* Copyright 2004-2019 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <crm/crm.h>
#include <crm/msg_xml.h>
#include <crm/common/xml.h>
#include <glib.h>
#include <crm/pengine/rules.h>
#include <crm/pengine/rules_internal.h>
#include <crm/pengine/internal.h>
#include <sys/types.h>
#include <regex.h>
#include <ctype.h>
CRM_TRACE_INIT_DATA(pe_rules);
/*!
* \brief Evaluate any rules contained by given XML element
*
* \param[in] xml XML element to check for rules
* \param[in] node_hash Node attributes to use when evaluating expressions
* \param[in] now Time to use when evaluating expressions
* \param[out] next_change If not NULL, set to when evaluation will change
*
* \return TRUE if no rules, or any of rules present is in effect, else FALSE
*/
gboolean
pe_evaluate_rules(xmlNode *ruleset, GHashTable *node_hash, crm_time_t *now,
crm_time_t *next_change)
{
pe_rule_eval_data_t rule_data = {
.node_hash = node_hash,
.role = RSC_ROLE_UNKNOWN,
.now = now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
return pe_eval_rules(ruleset, &rule_data, next_change);
}
gboolean
pe_test_rule(xmlNode *rule, GHashTable *node_hash, enum rsc_role_e role,
crm_time_t *now, crm_time_t *next_change,
pe_match_data_t *match_data)
{
pe_rule_eval_data_t rule_data = {
.node_hash = node_hash,
.role = role,
.now = now,
.match_data = match_data,
.rsc_data = NULL,
.op_data = NULL
};
return pe_eval_expr(rule, &rule_data, next_change);
}
/*!
* \brief Evaluate one rule subelement (pass/fail)
*
* A rule element may contain another rule, a node attribute expression, or a
* date expression. Given any one of those, evaluate it and return whether it
* passed.
*
* \param[in] expr Rule subelement XML
* \param[in] node_hash Node attributes to use when evaluating expression
* \param[in] role Resource role to use when evaluating expression
* \param[in] now Time to use when evaluating expression
* \param[out] next_change If not NULL, set to when evaluation will change
* \param[in] match_data If not NULL, resource back-references and params
*
* \return TRUE if expression is in effect under given conditions, else FALSE
*/
gboolean
pe_test_expression(xmlNode *expr, GHashTable *node_hash, enum rsc_role_e role,
crm_time_t *now, crm_time_t *next_change,
pe_match_data_t *match_data)
{
pe_rule_eval_data_t rule_data = {
.node_hash = node_hash,
.role = role,
.now = now,
.match_data = match_data,
.rsc_data = NULL,
.op_data = NULL
};
return pe_eval_subexpr(expr, &rule_data, next_change);
}
enum expression_type
find_expression_type(xmlNode * expr)
{
const char *tag = NULL;
const char *attr = NULL;
attr = crm_element_value(expr, XML_EXPR_ATTR_ATTRIBUTE);
tag = crm_element_name(expr);
if (pcmk__str_eq(tag, "date_expression", pcmk__str_casei)) {
return time_expr;
} else if (pcmk__str_eq(tag, "rsc_expression", pcmk__str_casei)) {
return rsc_expr;
} else if (pcmk__str_eq(tag, "op_expression", pcmk__str_casei)) {
return op_expr;
} else if (pcmk__str_eq(tag, XML_TAG_RULE, pcmk__str_casei)) {
return nested_rule;
} else if (!pcmk__str_eq(tag, "expression", pcmk__str_casei)) {
return not_expr;
} else if (pcmk__strcase_any_of(attr, CRM_ATTR_UNAME, CRM_ATTR_KIND, CRM_ATTR_ID, NULL)) {
return loc_expr;
} else if (pcmk__str_eq(attr, CRM_ATTR_ROLE, pcmk__str_casei)) {
return role_expr;
#if ENABLE_VERSIONED_ATTRS
} else if (pcmk__str_eq(attr, CRM_ATTR_RA_VERSION, pcmk__str_casei)) {
return version_expr;
#endif
}
return attr_expr;
}
gboolean
pe_test_role_expression(xmlNode *expr, enum rsc_role_e role, crm_time_t *now)
{
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
.role = role,
.now = now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
return pe__eval_role_expr(expr, &rule_data);
}
gboolean
pe_test_attr_expression(xmlNode *expr, GHashTable *hash, crm_time_t *now,
pe_match_data_t *match_data)
{
pe_rule_eval_data_t rule_data = {
.node_hash = hash,
.role = RSC_ROLE_UNKNOWN,
.now = now,
.match_data = match_data,
.rsc_data = NULL,
.op_data = NULL
};
return pe__eval_attr_expr(expr, &rule_data);
}
/* As per the nethack rules:
*
* moon period = 29.53058 days ~= 30, year = 365.2422 days
* days moon phase advances on first day of year compared to preceding year
* = 365.2422 - 12*29.53058 ~= 11
* years in Metonic cycle (time until same phases fall on the same days of
* the month) = 18.6 ~= 19
* moon phase on first day of year (epact) ~= (11*(year%19) + 29) % 30
* (29 as initial condition)
* current phase in days = first day phase + days elapsed in year
* 6 moons ~= 177 days
* 177 ~= 8 reported phases * 22
* + 11/22 for rounding
*
* 0-7, with 0: new, 4: full
*/
static int
phase_of_the_moon(crm_time_t * now)
{
uint32_t epact, diy, goldn;
uint32_t y;
crm_time_get_ordinal(now, &y, &diy);
goldn = (y % 19) + 1;
epact = (11 * goldn + 18) % 30;
if ((epact == 25 && goldn > 11) || epact == 24)
epact++;
return ((((((diy + epact) * 6) + 11) % 177) / 22) & 7);
}
static int
check_one(xmlNode *cron_spec, const char *xml_field, uint32_t time_field) {
int rc = pcmk_rc_undetermined;
const char *value = crm_element_value(cron_spec, xml_field);
long long low, high;
if (value == NULL) {
/* Return pe_date_result_undetermined if the field is missing. */
goto bail;
}
if (pcmk__parse_ll_range(value, &low, &high) == pcmk_rc_unknown_format) {
goto bail;
} else if (low == high) {
/* A single number was given, not a range. */
if (time_field < low) {
rc = pcmk_rc_before_range;
} else if (time_field > high) {
rc = pcmk_rc_after_range;
} else {
rc = pcmk_rc_within_range;
}
} else if (low != -1 && high != -1) {
/* This is a range with both bounds. */
if (time_field < low) {
rc = pcmk_rc_before_range;
} else if (time_field > high) {
rc = pcmk_rc_after_range;
} else {
rc = pcmk_rc_within_range;
}
} else if (low == -1) {
/* This is a range with no starting value. */
rc = time_field <= high ? pcmk_rc_within_range : pcmk_rc_after_range;
} else if (high == -1) {
/* This is a range with no ending value. */
rc = time_field >= low ? pcmk_rc_within_range : pcmk_rc_before_range;
}
bail:
if (rc == pcmk_rc_within_range) {
crm_debug("Condition '%s' in %s: passed", value, xml_field);
} else {
crm_debug("Condition '%s' in %s: failed", value, xml_field);
}
return rc;
}
static gboolean
check_passes(int rc) {
/* _within_range is obvious. _undetermined is a pass because
* this is the return value if a field is not given. In this
* case, we just want to ignore it and check other fields to
* see if they place some restriction on what can pass.
*/
return rc == pcmk_rc_within_range || rc == pcmk_rc_undetermined;
}
#define CHECK_ONE(spec, name, var) do { \
int subpart_rc = check_one(spec, name, var); \
if (check_passes(subpart_rc) == FALSE) { \
return subpart_rc; \
} \
} while (0)
int
pe_cron_range_satisfied(crm_time_t * now, xmlNode * cron_spec)
{
uint32_t h, m, s, y, d, w;
CRM_CHECK(now != NULL, return pcmk_rc_op_unsatisfied);
crm_time_get_gregorian(now, &y, &m, &d);
CHECK_ONE(cron_spec, "years", y);
CHECK_ONE(cron_spec, "months", m);
CHECK_ONE(cron_spec, "monthdays", d);
crm_time_get_timeofday(now, &h, &m, &s);
CHECK_ONE(cron_spec, "hours", h);
CHECK_ONE(cron_spec, "minutes", m);
CHECK_ONE(cron_spec, "seconds", s);
crm_time_get_ordinal(now, &y, &d);
CHECK_ONE(cron_spec, "yeardays", d);
crm_time_get_isoweek(now, &y, &w, &d);
CHECK_ONE(cron_spec, "weekyears", y);
CHECK_ONE(cron_spec, "weeks", w);
CHECK_ONE(cron_spec, "weekdays", d);
CHECK_ONE(cron_spec, "moon", phase_of_the_moon(now));
/* If we get here, either no fields were specified (which is success), or all
* the fields that were specified had their conditions met (which is also a
* success). Thus, the result is success.
*/
return pcmk_rc_ok;
}
#define update_field(xml_field, time_fn) \
value = crm_element_value(duration_spec, xml_field); \
if(value != NULL) { \
int value_i = crm_parse_int(value, "0"); \
time_fn(end, value_i); \
}
crm_time_t *
pe_parse_xml_duration(crm_time_t * start, xmlNode * duration_spec)
{
crm_time_t *end = NULL;
const char *value = NULL;
end = crm_time_new(NULL);
crm_time_set(end, start);
update_field("years", crm_time_add_years);
update_field("months", crm_time_add_months);
update_field("weeks", crm_time_add_weeks);
update_field("days", crm_time_add_days);
update_field("hours", crm_time_add_hours);
update_field("minutes", crm_time_add_minutes);
update_field("seconds", crm_time_add_seconds);
return end;
}
/*!
* \internal
* \brief Test a date expression (pass/fail) for a specific time
*
* \param[in] time_expr date_expression XML
* \param[in] now Time for which to evaluate expression
* \param[out] next_change If not NULL, set to when evaluation will change
*
* \return TRUE if date expression is in effect at given time, FALSE otherwise
*/
gboolean
pe_test_date_expression(xmlNode *expr, crm_time_t *now, crm_time_t *next_change)
{
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
.role = RSC_ROLE_UNKNOWN,
.now = now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
switch (pe__eval_date_expr(expr, &rule_data, next_change)) {
case pcmk_rc_within_range:
case pcmk_rc_ok:
return TRUE;
default:
return FALSE;
}
}
// Set next_change to t if t is earlier
static void
crm_time_set_if_earlier(crm_time_t *next_change, crm_time_t *t)
{
if ((next_change != NULL) && (t != NULL)) {
if (!crm_time_is_defined(next_change)
|| (crm_time_compare(t, next_change) < 0)) {
crm_time_set(next_change, t);
}
}
}
/*!
* \internal
* \brief Evaluate a date expression for a specific time
*
* \param[in] time_expr date_expression XML
* \param[in] now Time for which to evaluate expression
* \param[out] next_change If not NULL, set to when evaluation will change
*
* \return Standard Pacemaker return code
*/
int
pe_eval_date_expression(xmlNode *expr, crm_time_t *now, crm_time_t *next_change)
{
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
.role = RSC_ROLE_UNKNOWN,
.now = now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
return pe__eval_date_expr(expr, &rule_data, next_change);
}
// Information about a block of nvpair elements
typedef struct sorted_set_s {
int score; // This block's score for sorting
const char *name; // This block's ID
const char *special_name; // ID that should sort first
xmlNode *attr_set; // This block
} sorted_set_t;
static gint
sort_pairs(gconstpointer a, gconstpointer b)
{
const sorted_set_t *pair_a = a;
const sorted_set_t *pair_b = b;
if (a == NULL && b == NULL) {
return 0;
} else if (a == NULL) {
return 1;
} else if (b == NULL) {
return -1;
}
if (pcmk__str_eq(pair_a->name, pair_a->special_name, pcmk__str_casei)) {
return -1;
} else if (pcmk__str_eq(pair_b->name, pair_a->special_name, pcmk__str_casei)) {
return 1;
}
if (pair_a->score < pair_b->score) {
return 1;
} else if (pair_a->score > pair_b->score) {
return -1;
}
return 0;
}
static void
populate_hash(xmlNode * nvpair_list, GHashTable * hash, gboolean overwrite, xmlNode * top)
{
const char *name = NULL;
const char *value = NULL;
const char *old_value = NULL;
xmlNode *list = nvpair_list;
xmlNode *an_attr = NULL;
name = crm_element_name(list->children);
if (pcmk__str_eq(XML_TAG_ATTRS, name, pcmk__str_casei)) {
list = list->children;
}
for (an_attr = __xml_first_child_element(list); an_attr != NULL;
an_attr = __xml_next_element(an_attr)) {
if (pcmk__str_eq((const char *)an_attr->name, XML_CIB_TAG_NVPAIR, pcmk__str_none)) {
xmlNode *ref_nvpair = expand_idref(an_attr, top);
name = crm_element_value(an_attr, XML_NVPAIR_ATTR_NAME);
if (name == NULL) {
name = crm_element_value(ref_nvpair, XML_NVPAIR_ATTR_NAME);
}
value = crm_element_value(an_attr, XML_NVPAIR_ATTR_VALUE);
if (value == NULL) {
value = crm_element_value(ref_nvpair, XML_NVPAIR_ATTR_VALUE);
}
if (name == NULL || value == NULL) {
continue;
}
old_value = g_hash_table_lookup(hash, name);
if (pcmk__str_eq(value, "#default", pcmk__str_casei)) {
if (old_value) {
- crm_trace("Removing value for %s (%s)", name, value);
+ crm_trace("Letting %s default (removing explicit value \"%s\")",
+ name, value);
g_hash_table_remove(hash, name);
}
continue;
} else if (old_value == NULL) {
- crm_trace("Setting attribute: %s = %s", name, value);
+ crm_trace("Setting %s=\"%s\"", name, value);
g_hash_table_insert(hash, strdup(name), strdup(value));
} else if (overwrite) {
- crm_debug("Overwriting value of %s: %s -> %s", name, old_value, value);
+ crm_trace("Setting %s=\"%s\" (overwriting old value \"%s\")",
+ name, value, old_value);
g_hash_table_replace(hash, strdup(name), strdup(value));
}
}
}
}
#if ENABLE_VERSIONED_ATTRS
static xmlNode*
get_versioned_rule(xmlNode * attr_set)
{
xmlNode * rule = NULL;
xmlNode * expr = NULL;
for (rule = __xml_first_child_element(attr_set); rule != NULL;
rule = __xml_next_element(rule)) {
if (pcmk__str_eq((const char *)rule->name, XML_TAG_RULE, pcmk__str_none)) {
for (expr = __xml_first_child_element(rule); expr != NULL;
expr = __xml_next_element(expr)) {
if (find_expression_type(expr) == version_expr) {
return rule;
}
}
}
}
return NULL;
}
static void
add_versioned_attributes(xmlNode * attr_set, xmlNode * versioned_attrs)
{
xmlNode *attr_set_copy = NULL;
xmlNode *rule = NULL;
xmlNode *expr = NULL;
if (!attr_set || !versioned_attrs) {
return;
}
attr_set_copy = copy_xml(attr_set);
rule = get_versioned_rule(attr_set_copy);
if (!rule) {
free_xml(attr_set_copy);
return;
}
expr = __xml_first_child_element(rule);
while (expr != NULL) {
if (find_expression_type(expr) != version_expr) {
xmlNode *node = expr;
expr = __xml_next_element(expr);
free_xml(node);
} else {
expr = __xml_next_element(expr);
}
}
add_node_nocopy(versioned_attrs, NULL, attr_set_copy);
}
#endif
typedef struct unpack_data_s {
gboolean overwrite;
void *hash;
crm_time_t *next_change;
pe_rule_eval_data_t *rule_data;
xmlNode *top;
} unpack_data_t;
static void
unpack_attr_set(gpointer data, gpointer user_data)
{
sorted_set_t *pair = data;
unpack_data_t *unpack_data = user_data;
if (!pe_eval_rules(pair->attr_set, unpack_data->rule_data,
unpack_data->next_change)) {
return;
}
#if ENABLE_VERSIONED_ATTRS
if (get_versioned_rule(pair->attr_set) && !(unpack_data->rule_data->node_hash &&
g_hash_table_lookup_extended(unpack_data->rule_data->node_hash,
CRM_ATTR_RA_VERSION, NULL, NULL))) {
// we haven't actually tested versioned expressions yet
return;
}
#endif
- crm_trace("Adding attributes from %s", pair->name);
+ crm_trace("Adding attributes from %s (score %d) %s overwrite",
+ pair->name, pair->score,
+ (unpack_data->overwrite? "with" : "without"));
populate_hash(pair->attr_set, unpack_data->hash, unpack_data->overwrite, unpack_data->top);
}
#if ENABLE_VERSIONED_ATTRS
static void
unpack_versioned_attr_set(gpointer data, gpointer user_data)
{
sorted_set_t *pair = data;
unpack_data_t *unpack_data = user_data;
if (pe_eval_rules(pair->attr_set, unpack_data->rule_data,
unpack_data->next_change)) {
add_versioned_attributes(pair->attr_set, unpack_data->hash);
}
}
#endif
/*!
* \internal
* \brief Create a sorted list of nvpair blocks
*
* \param[in] top XML document root (used to expand id-ref's)
* \param[in] xml_obj XML element containing blocks of nvpair elements
* \param[in] set_name If not NULL, only get blocks of this element type
* \param[in] always_first If not NULL, sort block with this ID as first
*
* \return List of sorted_set_t entries for nvpair blocks
*/
static GList *
make_pairs(xmlNode *top, xmlNode *xml_obj, const char *set_name,
const char *always_first)
{
GListPtr unsorted = NULL;
const char *score = NULL;
sorted_set_t *pair = NULL;
xmlNode *attr_set = NULL;
if (xml_obj == NULL) {
- crm_trace("No instance attributes");
return NULL;
}
-
- crm_trace("Checking for attributes");
for (attr_set = __xml_first_child_element(xml_obj); attr_set != NULL;
attr_set = __xml_next_element(attr_set)) {
/* Uncertain if set_name == NULL check is strictly necessary here */
if (pcmk__str_eq(set_name, (const char *)attr_set->name, pcmk__str_null_matches)) {
pair = NULL;
attr_set = expand_idref(attr_set, top);
if (attr_set == NULL) {
continue;
}
pair = calloc(1, sizeof(sorted_set_t));
pair->name = ID(attr_set);
pair->special_name = always_first;
pair->attr_set = attr_set;
score = crm_element_value(attr_set, XML_RULE_ATTR_SCORE);
pair->score = char2score(score);
unsorted = g_list_prepend(unsorted, pair);
}
}
return g_list_sort(unsorted, sort_pairs);
}
/*!
* \internal
* \brief Extract nvpair blocks contained by an XML element into a hash table
*
* \param[in] top XML document root (used to expand id-ref's)
* \param[in] xml_obj XML element containing blocks of nvpair elements
* \param[in] set_name If not NULL, only use blocks of this element type
* \param[out] hash Where to store extracted name/value pairs
* \param[in] always_first If not NULL, process block with this ID first
* \param[in] overwrite Whether to replace existing values with same name
* \param[in] rule_data Matching parameters to use when unpacking
* \param[out] next_change If not NULL, set to when rule evaluation will change
* \param[in] unpack_func Function to call to unpack each block
*/
static void
unpack_nvpair_blocks(xmlNode *top, xmlNode *xml_obj, const char *set_name,
void *hash, const char *always_first, gboolean overwrite,
pe_rule_eval_data_t *rule_data, crm_time_t *next_change,
GFunc unpack_func)
{
GList *pairs = make_pairs(top, xml_obj, set_name, always_first);
if (pairs) {
unpack_data_t data = {
.hash = hash,
.overwrite = overwrite,
.next_change = next_change,
.top = top,
.rule_data = rule_data
};
g_list_foreach(pairs, unpack_func, &data);
g_list_free_full(pairs, free);
}
}
void
pe_eval_nvpairs(xmlNode *top, xmlNode *xml_obj, const char *set_name,
pe_rule_eval_data_t *rule_data, GHashTable *hash,
const char *always_first, gboolean overwrite,
crm_time_t *next_change)
{
unpack_nvpair_blocks(top, xml_obj, set_name, hash, always_first,
overwrite, rule_data, next_change, unpack_attr_set);
}
/*!
* \brief Extract nvpair blocks contained by an XML element into a hash table
*
* \param[in] top XML document root (used to expand id-ref's)
* \param[in] xml_obj XML element containing blocks of nvpair elements
* \param[in] set_name Element name to identify nvpair blocks
* \param[in] node_hash Node attributes to use when evaluating rules
* \param[out] hash Where to store extracted name/value pairs
* \param[in] always_first If not NULL, process block with this ID first
* \param[in] overwrite Whether to replace existing values with same name
* \param[in] now Time to use when evaluating rules
* \param[out] next_change If not NULL, set to when rule evaluation will change
*/
void
pe_unpack_nvpairs(xmlNode *top, xmlNode *xml_obj, const char *set_name,
GHashTable *node_hash, GHashTable *hash,
const char *always_first, gboolean overwrite,
crm_time_t *now, crm_time_t *next_change)
{
pe_rule_eval_data_t rule_data = {
.node_hash = node_hash,
.role = RSC_ROLE_UNKNOWN,
.now = now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
pe_eval_nvpairs(top, xml_obj, set_name, &rule_data, hash,
always_first, overwrite, next_change);
}
#if ENABLE_VERSIONED_ATTRS
void
pe_eval_versioned_attributes(xmlNode *top, xmlNode *xml_obj, const char *set_name,
pe_rule_eval_data_t *rule_data, xmlNode *hash,
crm_time_t *next_change)
{
unpack_nvpair_blocks(top, xml_obj, set_name, hash, NULL, FALSE, rule_data,
next_change, unpack_versioned_attr_set);
}
void
pe_unpack_versioned_attributes(xmlNode *top, xmlNode *xml_obj,
const char *set_name, GHashTable *node_hash,
xmlNode *hash, crm_time_t *now,
crm_time_t *next_change)
{
pe_rule_eval_data_t rule_data = {
.node_hash = node_hash,
.role = RSC_ROLE_UNKNOWN,
.now = now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
unpack_nvpair_blocks(top, xml_obj, set_name, hash, NULL, FALSE,
&rule_data, next_change, unpack_versioned_attr_set);
}
#endif
char *
pe_expand_re_matches(const char *string, pe_re_match_data_t *match_data)
{
size_t len = 0;
int i;
const char *p, *last_match_index;
char *p_dst, *result = NULL;
if (pcmk__str_empty(string) || !match_data) {
return NULL;
}
p = last_match_index = string;
while (*p) {
if (*p == '%' && *(p + 1) && isdigit(*(p + 1))) {
i = *(p + 1) - '0';
if (match_data->nregs >= i && match_data->pmatch[i].rm_so != -1 &&
match_data->pmatch[i].rm_eo > match_data->pmatch[i].rm_so) {
len += p - last_match_index + (match_data->pmatch[i].rm_eo - match_data->pmatch[i].rm_so);
last_match_index = p + 2;
}
p++;
}
p++;
}
len += p - last_match_index + 1;
/* FIXME: Excessive? */
if (len - 1 <= 0) {
return NULL;
}
p_dst = result = calloc(1, len);
p = string;
while (*p) {
if (*p == '%' && *(p + 1) && isdigit(*(p + 1))) {
i = *(p + 1) - '0';
if (match_data->nregs >= i && match_data->pmatch[i].rm_so != -1 &&
match_data->pmatch[i].rm_eo > match_data->pmatch[i].rm_so) {
/* rm_eo can be equal to rm_so, but then there is nothing to do */
int match_len = match_data->pmatch[i].rm_eo - match_data->pmatch[i].rm_so;
memcpy(p_dst, match_data->string + match_data->pmatch[i].rm_so, match_len);
p_dst += match_len;
}
p++;
} else {
*(p_dst) = *(p);
p_dst++;
}
p++;
}
return result;
}
#if ENABLE_VERSIONED_ATTRS
GHashTable*
pe_unpack_versioned_parameters(xmlNode *versioned_params, const char *ra_version)
{
GHashTable *hash = crm_str_table_new();
if (versioned_params && ra_version) {
GHashTable *node_hash = crm_str_table_new();
xmlNode *attr_set = __xml_first_child_element(versioned_params);
if (attr_set) {
g_hash_table_insert(node_hash, strdup(CRM_ATTR_RA_VERSION),
strdup(ra_version));
pe_unpack_nvpairs(NULL, versioned_params,
crm_element_name(attr_set), node_hash, hash, NULL,
FALSE, NULL, NULL);
}
g_hash_table_destroy(node_hash);
}
return hash;
}
#endif
gboolean
pe_eval_rules(xmlNode *ruleset, pe_rule_eval_data_t *rule_data, crm_time_t *next_change)
{
// If there are no rules, pass by default
gboolean ruleset_default = TRUE;
for (xmlNode *rule = first_named_child(ruleset, XML_TAG_RULE);
rule != NULL; rule = crm_next_same_xml(rule)) {
ruleset_default = FALSE;
if (pe_eval_expr(rule, rule_data, next_change)) {
/* Only the deprecated "lifetime" element of location constraints
* may contain more than one rule at the top level -- the schema
* limits a block of nvpairs to a single top-level rule. So, this
* effectively means that a lifetime is active if any rule it
* contains is active.
*/
return TRUE;
}
}
return ruleset_default;
}
gboolean
pe_eval_expr(xmlNode *rule, pe_rule_eval_data_t *rule_data, crm_time_t *next_change)
{
xmlNode *expr = NULL;
gboolean test = TRUE;
gboolean empty = TRUE;
gboolean passed = TRUE;
gboolean do_and = TRUE;
const char *value = NULL;
rule = expand_idref(rule, NULL);
value = crm_element_value(rule, XML_RULE_ATTR_BOOLEAN_OP);
if (pcmk__str_eq(value, "or", pcmk__str_casei)) {
do_and = FALSE;
passed = FALSE;
}
crm_trace("Testing rule %s", ID(rule));
for (expr = __xml_first_child_element(rule); expr != NULL;
expr = __xml_next_element(expr)) {
test = pe_eval_subexpr(expr, rule_data, next_change);
empty = FALSE;
if (test && do_and == FALSE) {
crm_trace("Expression %s/%s passed", ID(rule), ID(expr));
return TRUE;
} else if (test == FALSE && do_and) {
crm_trace("Expression %s/%s failed", ID(rule), ID(expr));
return FALSE;
}
}
if (empty) {
crm_err("Invalid Rule %s: rules must contain at least one expression", ID(rule));
}
crm_trace("Rule %s %s", ID(rule), passed ? "passed" : "failed");
return passed;
}
gboolean
pe_eval_subexpr(xmlNode *expr, pe_rule_eval_data_t *rule_data, crm_time_t *next_change)
{
gboolean accept = FALSE;
const char *uname = NULL;
switch (find_expression_type(expr)) {
case nested_rule:
accept = pe_eval_expr(expr, rule_data, next_change);
break;
case attr_expr:
case loc_expr:
/* these expressions can never succeed if there is
* no node to compare with
*/
if (rule_data->node_hash != NULL) {
accept = pe__eval_attr_expr(expr, rule_data);
}
break;
case time_expr:
accept = pe_test_date_expression(expr, rule_data->now, next_change);
break;
case role_expr:
accept = pe__eval_role_expr(expr, rule_data);
break;
case rsc_expr:
accept = pe__eval_rsc_expr(expr, rule_data);
break;
case op_expr:
accept = pe__eval_op_expr(expr, rule_data);
break;
#if ENABLE_VERSIONED_ATTRS
case version_expr:
if (rule_data->node_hash &&
g_hash_table_lookup_extended(rule_data->node_hash,
CRM_ATTR_RA_VERSION, NULL, NULL)) {
accept = pe__eval_attr_expr(expr, rule_data);
} else {
// we are going to test it when we have ra-version
accept = TRUE;
}
break;
#endif
default:
CRM_CHECK(FALSE /* bad type */ , return FALSE);
accept = FALSE;
}
if (rule_data->node_hash) {
uname = g_hash_table_lookup(rule_data->node_hash, CRM_ATTR_UNAME);
}
crm_trace("Expression %s %s on %s",
ID(expr), accept ? "passed" : "failed", uname ? uname : "all nodes");
return accept;
}
/*!
* \internal
* \brief Compare two values in a rule's node attribute expression
*
* \param[in] l_val Value on left-hand side of comparison
* \param[in] r_val Value on right-hand side of comparison
* \param[in] type How to interpret the values (allowed values:
* \c "string", \c "integer", \c "number",
* \c "version", \c NULL)
* \param[in] op Type of comparison
*
* \return -1 if <tt>(l_val < r_val)</tt>,
* 0 if <tt>(l_val == r_val)</tt>,
* 1 if <tt>(l_val > r_val)</tt>
*/
static int
compare_attr_expr_vals(const char *l_val, const char *r_val, const char *type,
const char *op)
{
int cmp = 0;
if (l_val != NULL && r_val != NULL) {
if (type == NULL) {
if (pcmk__strcase_any_of(op, "lt", "lte", "gt", "gte", NULL)) {
if (pcmk__char_in_any_str('.', l_val, r_val, NULL)) {
type = "number";
} else {
type = "integer";
}
} else {
type = "string";
}
crm_trace("Defaulting to %s based comparison for '%s' op", type, op);
}
if (pcmk__str_eq(type, "string", pcmk__str_casei)) {
cmp = strcasecmp(l_val, r_val);
} else if (pcmk__str_eq(type, "integer", pcmk__str_casei)) {
long long l_val_num = crm_parse_ll(l_val, NULL);
int rc1 = errno;
long long r_val_num = crm_parse_ll(r_val, NULL);
int rc2 = errno;
if (rc1 == 0 && rc2 == 0) {
if (l_val_num < r_val_num) {
cmp = -1;
} else if (l_val_num > r_val_num) {
cmp = 1;
} else {
cmp = 0;
}
} else {
crm_debug("Integer parse error. Comparing %s and %s as strings",
l_val, r_val);
cmp = compare_attr_expr_vals(l_val, r_val, "string", op);
}
} else if (pcmk__str_eq(type, "number", pcmk__str_casei)) {
double l_val_num;
double r_val_num;
int rc1 = pcmk__scan_double(l_val, &l_val_num, NULL, NULL);
int rc2 = pcmk__scan_double(r_val, &r_val_num, NULL, NULL);
if (rc1 == pcmk_rc_ok && rc2 == pcmk_rc_ok) {
if (l_val_num < r_val_num) {
cmp = -1;
} else if (l_val_num > r_val_num) {
cmp = 1;
} else {
cmp = 0;
}
} else {
crm_debug("Floating-point parse error. Comparing %s and %s as "
"strings", l_val, r_val);
cmp = compare_attr_expr_vals(l_val, r_val, "string", op);
}
} else if (pcmk__str_eq(type, "version", pcmk__str_casei)) {
cmp = compare_version(l_val, r_val);
}
} else if (l_val == NULL && r_val == NULL) {
cmp = 0;
} else if (r_val == NULL) {
cmp = 1;
} else { // l_val == NULL && r_val != NULL
cmp = -1;
}
return cmp;
}
/*!
* \internal
* \brief Check whether an attribute expression evaluates to \c true
*
* \param[in] l_val Value on left-hand side of comparison
* \param[in] r_val Value on right-hand side of comparison
* \param[in] type How to interpret the values (allowed values:
* \c "string", \c "integer", \c "number",
* \c "version", \c NULL)
* \param[in] op Type of comparison.
*
* \return \c true if expression evaluates to \c true, \c false
* otherwise
*/
static bool
accept_attr_expr(const char *l_val, const char *r_val, const char *type,
const char *op)
{
int cmp;
if (pcmk__str_eq(op, "defined", pcmk__str_casei)) {
return (l_val != NULL);
} else if (pcmk__str_eq(op, "not_defined", pcmk__str_casei)) {
return (l_val == NULL);
}
cmp = compare_attr_expr_vals(l_val, r_val, type, op);
if (pcmk__str_eq(op, "eq", pcmk__str_casei)) {
return (cmp == 0);
} else if (pcmk__str_eq(op, "ne", pcmk__str_casei)) {
return (cmp != 0);
} else if (l_val == NULL || r_val == NULL) {
// The comparison is meaningless from this point on
return false;
} else if (pcmk__str_eq(op, "lt", pcmk__str_casei)) {
return (cmp < 0);
} else if (pcmk__str_eq(op, "lte", pcmk__str_casei)) {
return (cmp <= 0);
} else if (pcmk__str_eq(op, "gt", pcmk__str_casei)) {
return (cmp > 0);
} else if (pcmk__str_eq(op, "gte", pcmk__str_casei)) {
return (cmp >= 0);
}
return false; // Should never reach this point
}
gboolean
pe__eval_attr_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data)
{
gboolean attr_allocated = FALSE;
const char *h_val = NULL;
GHashTable *table = NULL;
const char *op = NULL;
const char *type = NULL;
const char *attr = NULL;
const char *value = NULL;
const char *value_source = NULL;
attr = crm_element_value(expr, XML_EXPR_ATTR_ATTRIBUTE);
op = crm_element_value(expr, XML_EXPR_ATTR_OPERATION);
value = crm_element_value(expr, XML_EXPR_ATTR_VALUE);
type = crm_element_value(expr, XML_EXPR_ATTR_TYPE);
value_source = crm_element_value(expr, XML_EXPR_ATTR_VALUE_SOURCE);
if (attr == NULL || op == NULL) {
pe_err("Invalid attribute or operation in expression"
" (\'%s\' \'%s\' \'%s\')", crm_str(attr), crm_str(op), crm_str(value));
return FALSE;
}
if (rule_data->match_data) {
if (rule_data->match_data->re) {
char *resolved_attr = pe_expand_re_matches(attr, rule_data->match_data->re);
if (resolved_attr) {
attr = (const char *) resolved_attr;
attr_allocated = TRUE;
}
}
if (pcmk__str_eq(value_source, "param", pcmk__str_casei)) {
table = rule_data->match_data->params;
} else if (pcmk__str_eq(value_source, "meta", pcmk__str_casei)) {
table = rule_data->match_data->meta;
}
}
if (table) {
const char *param_name = value;
const char *param_value = NULL;
if (param_name && param_name[0]) {
if ((param_value = (const char *)g_hash_table_lookup(table, param_name))) {
value = param_value;
}
}
}
if (rule_data->node_hash != NULL) {
h_val = (const char *)g_hash_table_lookup(rule_data->node_hash, attr);
}
if (attr_allocated) {
free((char *)attr);
attr = NULL;
}
return accept_attr_expr(h_val, value, type, op);
}
int
pe__eval_date_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data, crm_time_t *next_change)
{
crm_time_t *start = NULL;
crm_time_t *end = NULL;
const char *value = NULL;
const char *op = crm_element_value(expr, "operation");
xmlNode *duration_spec = NULL;
xmlNode *date_spec = NULL;
// "undetermined" will also be returned for parsing errors
int rc = pcmk_rc_undetermined;
crm_trace("Testing expression: %s", ID(expr));
duration_spec = first_named_child(expr, "duration");
date_spec = first_named_child(expr, "date_spec");
value = crm_element_value(expr, "start");
if (value != NULL) {
start = crm_time_new(value);
}
value = crm_element_value(expr, "end");
if (value != NULL) {
end = crm_time_new(value);
}
if (start != NULL && end == NULL && duration_spec != NULL) {
end = pe_parse_xml_duration(start, duration_spec);
}
if (pcmk__str_eq(op, "in_range", pcmk__str_null_matches | pcmk__str_casei)) {
if ((start == NULL) && (end == NULL)) {
// in_range requires at least one of start or end
} else if ((start != NULL) && (crm_time_compare(rule_data->now, start) < 0)) {
rc = pcmk_rc_before_range;
crm_time_set_if_earlier(next_change, start);
} else if ((end != NULL) && (crm_time_compare(rule_data->now, end) > 0)) {
rc = pcmk_rc_after_range;
} else {
rc = pcmk_rc_within_range;
if (end && next_change) {
// Evaluation doesn't change until second after end
crm_time_add_seconds(end, 1);
crm_time_set_if_earlier(next_change, end);
}
}
} else if (pcmk__str_eq(op, "date_spec", pcmk__str_casei)) {
rc = pe_cron_range_satisfied(rule_data->now, date_spec);
// @TODO set next_change appropriately
} else if (pcmk__str_eq(op, "gt", pcmk__str_casei)) {
if (start == NULL) {
// gt requires start
} else if (crm_time_compare(rule_data->now, start) > 0) {
rc = pcmk_rc_within_range;
} else {
rc = pcmk_rc_before_range;
// Evaluation doesn't change until second after start
crm_time_add_seconds(start, 1);
crm_time_set_if_earlier(next_change, start);
}
} else if (pcmk__str_eq(op, "lt", pcmk__str_casei)) {
if (end == NULL) {
// lt requires end
} else if (crm_time_compare(rule_data->now, end) < 0) {
rc = pcmk_rc_within_range;
crm_time_set_if_earlier(next_change, end);
} else {
rc = pcmk_rc_after_range;
}
}
crm_time_free(start);
crm_time_free(end);
return rc;
}
gboolean
pe__eval_op_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data) {
const char *name = crm_element_value(expr, XML_NVPAIR_ATTR_NAME);
const char *interval_s = crm_element_value(expr, XML_LRM_ATTR_INTERVAL);
guint interval;
crm_trace("Testing op_defaults expression: %s", ID(expr));
if (rule_data->op_data == NULL) {
crm_trace("No operations data provided");
return FALSE;
}
interval = crm_parse_interval_spec(interval_s);
if (interval == 0 && errno != 0) {
crm_trace("Could not parse interval: %s", interval_s);
return FALSE;
}
if (interval_s != NULL && interval != rule_data->op_data->interval) {
crm_trace("Interval doesn't match: %d != %d", interval, rule_data->op_data->interval);
return FALSE;
}
if (!pcmk__str_eq(name, rule_data->op_data->op_name, pcmk__str_none)) {
crm_trace("Name doesn't match: %s != %s", name, rule_data->op_data->op_name);
return FALSE;
}
return TRUE;
}
gboolean
pe__eval_role_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data)
{
gboolean accept = FALSE;
const char *op = NULL;
const char *value = NULL;
if (rule_data->role == RSC_ROLE_UNKNOWN) {
return accept;
}
value = crm_element_value(expr, XML_EXPR_ATTR_VALUE);
op = crm_element_value(expr, XML_EXPR_ATTR_OPERATION);
if (pcmk__str_eq(op, "defined", pcmk__str_casei)) {
if (rule_data->role > RSC_ROLE_STARTED) {
accept = TRUE;
}
} else if (pcmk__str_eq(op, "not_defined", pcmk__str_casei)) {
if (rule_data->role < RSC_ROLE_SLAVE && rule_data->role > RSC_ROLE_UNKNOWN) {
accept = TRUE;
}
} else if (pcmk__str_eq(op, "eq", pcmk__str_casei)) {
if (text2role(value) == rule_data->role) {
accept = TRUE;
}
} else if (pcmk__str_eq(op, "ne", pcmk__str_casei)) {
// Test "ne" only with promotable clone roles
if (rule_data->role < RSC_ROLE_SLAVE && rule_data->role > RSC_ROLE_UNKNOWN) {
accept = FALSE;
} else if (text2role(value) != rule_data->role) {
accept = TRUE;
}
}
return accept;
}
gboolean
pe__eval_rsc_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data)
{
const char *class = crm_element_value(expr, XML_AGENT_ATTR_CLASS);
const char *provider = crm_element_value(expr, XML_AGENT_ATTR_PROVIDER);
const char *type = crm_element_value(expr, XML_EXPR_ATTR_TYPE);
crm_trace("Testing rsc_defaults expression: %s", ID(expr));
if (rule_data->rsc_data == NULL) {
crm_trace("No resource data provided");
return FALSE;
}
if (class != NULL &&
!pcmk__str_eq(class, rule_data->rsc_data->standard, pcmk__str_none)) {
crm_trace("Class doesn't match: %s != %s", class, rule_data->rsc_data->standard);
return FALSE;
}
if ((provider == NULL && rule_data->rsc_data->provider != NULL) ||
(provider != NULL && rule_data->rsc_data->provider == NULL) ||
!pcmk__str_eq(provider, rule_data->rsc_data->provider, pcmk__str_none)) {
crm_trace("Provider doesn't match: %s != %s", provider, rule_data->rsc_data->provider);
return FALSE;
}
if (type != NULL &&
!pcmk__str_eq(type, rule_data->rsc_data->agent, pcmk__str_none)) {
crm_trace("Agent doesn't match: %s != %s", type, rule_data->rsc_data->agent);
return FALSE;
}
return TRUE;
}
// Deprecated functions kept only for backward API compatibility
gboolean test_ruleset(xmlNode *ruleset, GHashTable *node_hash, crm_time_t *now);
gboolean test_rule(xmlNode *rule, GHashTable *node_hash, enum rsc_role_e role,
crm_time_t *now);
gboolean pe_test_rule_re(xmlNode *rule, GHashTable *node_hash,
enum rsc_role_e role, crm_time_t *now,
pe_re_match_data_t *re_match_data);
gboolean pe_test_rule_full(xmlNode *rule, GHashTable *node_hash,
enum rsc_role_e role, crm_time_t *now,
pe_match_data_t *match_data);
gboolean test_expression(xmlNode *expr, GHashTable *node_hash,
enum rsc_role_e role, crm_time_t *now);
gboolean pe_test_expression_re(xmlNode *expr, GHashTable *node_hash,
enum rsc_role_e role, crm_time_t *now,
pe_re_match_data_t *re_match_data);
gboolean pe_test_expression_full(xmlNode *expr, GHashTable *node_hash,
enum rsc_role_e role, crm_time_t *now,
pe_match_data_t *match_data);
void unpack_instance_attributes(xmlNode *top, xmlNode *xml_obj,
const char *set_name, GHashTable *node_hash,
GHashTable *hash, const char *always_first,
gboolean overwrite, crm_time_t *now);
gboolean
test_ruleset(xmlNode *ruleset, GHashTable *node_hash, crm_time_t *now)
{
return pe_evaluate_rules(ruleset, node_hash, now, NULL);
}
gboolean
test_rule(xmlNode * rule, GHashTable * node_hash, enum rsc_role_e role, crm_time_t * now)
{
return pe_test_rule(rule, node_hash, role, now, NULL, NULL);
}
gboolean
pe_test_rule_re(xmlNode * rule, GHashTable * node_hash, enum rsc_role_e role, crm_time_t * now, pe_re_match_data_t * re_match_data)
{
pe_match_data_t match_data = {
.re = re_match_data,
.params = NULL,
.meta = NULL,
};
return pe_test_rule(rule, node_hash, role, now, NULL, &match_data);
}
gboolean
pe_test_rule_full(xmlNode *rule, GHashTable *node_hash, enum rsc_role_e role,
crm_time_t *now, pe_match_data_t *match_data)
{
return pe_test_rule(rule, node_hash, role, now, NULL, match_data);
}
gboolean
test_expression(xmlNode * expr, GHashTable * node_hash, enum rsc_role_e role, crm_time_t * now)
{
return pe_test_expression(expr, node_hash, role, now, NULL, NULL);
}
gboolean
pe_test_expression_re(xmlNode * expr, GHashTable * node_hash, enum rsc_role_e role, crm_time_t * now, pe_re_match_data_t * re_match_data)
{
pe_match_data_t match_data = {
.re = re_match_data,
.params = NULL,
.meta = NULL,
};
return pe_test_expression(expr, node_hash, role, now, NULL, &match_data);
}
gboolean
pe_test_expression_full(xmlNode *expr, GHashTable *node_hash,
enum rsc_role_e role, crm_time_t *now,
pe_match_data_t *match_data)
{
return pe_test_expression(expr, node_hash, role, now, NULL, match_data);
}
void
unpack_instance_attributes(xmlNode *top, xmlNode *xml_obj, const char *set_name,
GHashTable *node_hash, GHashTable *hash,
const char *always_first, gboolean overwrite,
crm_time_t *now)
{
pe_rule_eval_data_t rule_data = {
.node_hash = node_hash,
.role = RSC_ROLE_UNKNOWN,
.now = now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
unpack_nvpair_blocks(top, xml_obj, set_name, hash, always_first,
overwrite, &rule_data, NULL, unpack_attr_set);
}
diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c
index 8ed4eafa6f..5c33a0bf49 100644
--- a/lib/pengine/utils.c
+++ b/lib/pengine/utils.c
@@ -1,2834 +1,2832 @@
/*
* Copyright 2004-2020 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <crm/crm.h>
#include <crm/msg_xml.h>
#include <crm/common/xml.h>
#include <crm/common/util.h>
#include <glib.h>
#include <stdbool.h>
#include <crm/pengine/rules.h>
#include <crm/pengine/internal.h>
extern xmlNode *get_object_root(const char *object_type, xmlNode * the_root);
void print_str_str(gpointer key, gpointer value, gpointer user_data);
gboolean ghash_free_str_str(gpointer key, gpointer value, gpointer user_data);
static void unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * container,
pe_working_set_t * data_set, guint interval_ms);
static xmlNode *find_rsc_op_entry_helper(pe_resource_t * rsc, const char *key,
gboolean include_disabled);
#if ENABLE_VERSIONED_ATTRS
pe_rsc_action_details_t *
pe_rsc_action_details(pe_action_t *action)
{
pe_rsc_action_details_t *details;
CRM_CHECK(action != NULL, return NULL);
if (action->action_details == NULL) {
action->action_details = calloc(1, sizeof(pe_rsc_action_details_t));
CRM_CHECK(action->action_details != NULL, return NULL);
}
details = (pe_rsc_action_details_t *) action->action_details;
if (details->versioned_parameters == NULL) {
details->versioned_parameters = create_xml_node(NULL,
XML_TAG_OP_VER_ATTRS);
}
if (details->versioned_meta == NULL) {
details->versioned_meta = create_xml_node(NULL, XML_TAG_OP_VER_META);
}
return details;
}
static void
pe_free_rsc_action_details(pe_action_t *action)
{
pe_rsc_action_details_t *details;
if ((action == NULL) || (action->action_details == NULL)) {
return;
}
details = (pe_rsc_action_details_t *) action->action_details;
if (details->versioned_parameters) {
free_xml(details->versioned_parameters);
}
if (details->versioned_meta) {
free_xml(details->versioned_meta);
}
action->action_details = NULL;
}
#endif
/*!
* \internal
* \brief Check whether we can fence a particular node
*
* \param[in] data_set Working set for cluster
* \param[in] node Name of node to check
*
* \return true if node can be fenced, false otherwise
*/
bool
pe_can_fence(pe_working_set_t *data_set, pe_node_t *node)
{
if (pe__is_guest_node(node)) {
/* Guest nodes are fenced by stopping their container resource. We can
* do that if the container's host is either online or fenceable.
*/
pe_resource_t *rsc = node->details->remote_rsc->container;
for (GList *n = rsc->running_on; n != NULL; n = n->next) {
pe_node_t *container_node = n->data;
if (!container_node->details->online
&& !pe_can_fence(data_set, container_node)) {
return false;
}
}
return true;
} else if (!pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
return false; /* Turned off */
} else if (!pcmk_is_set(data_set->flags, pe_flag_have_stonith_resource)) {
return false; /* No devices */
} else if (pcmk_is_set(data_set->flags, pe_flag_have_quorum)) {
return true;
} else if (data_set->no_quorum_policy == no_quorum_ignore) {
return true;
} else if(node == NULL) {
return false;
} else if(node->details->online) {
crm_notice("We can fence %s without quorum because they're in our membership", node->details->uname);
return true;
}
crm_trace("Cannot fence %s", node->details->uname);
return false;
}
/*!
* \internal
* \brief Copy a node object
*
* \param[in] this_node Node object to copy
*
* \return Newly allocated shallow copy of this_node
* \note This function asserts on errors and is guaranteed to return non-NULL.
*/
pe_node_t *
pe__copy_node(const pe_node_t *this_node)
{
pe_node_t *new_node = NULL;
CRM_ASSERT(this_node != NULL);
new_node = calloc(1, sizeof(pe_node_t));
CRM_ASSERT(new_node != NULL);
new_node->rsc_discover_mode = this_node->rsc_discover_mode;
new_node->weight = this_node->weight;
new_node->fixed = this_node->fixed;
new_node->details = this_node->details;
return new_node;
}
/* any node in list1 or list2 and not in the other gets a score of -INFINITY */
void
node_list_exclude(GHashTable * hash, GListPtr list, gboolean merge_scores)
{
GHashTable *result = hash;
pe_node_t *other_node = NULL;
GListPtr gIter = list;
GHashTableIter iter;
pe_node_t *node = NULL;
g_hash_table_iter_init(&iter, hash);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
other_node = pe_find_node_id(list, node->details->id);
if (other_node == NULL) {
node->weight = -INFINITY;
} else if (merge_scores) {
node->weight = pe__add_scores(node->weight, other_node->weight);
}
}
for (; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
other_node = pe_hash_table_lookup(result, node->details->id);
if (other_node == NULL) {
pe_node_t *new_node = pe__copy_node(node);
new_node->weight = -INFINITY;
g_hash_table_insert(result, (gpointer) new_node->details->id, new_node);
}
}
}
/*!
* \internal
* \brief Create a node hash table from a node list
*
* \param[in] list Node list
*
* \return Hash table equivalent of node list
*/
GHashTable *
pe__node_list2table(GList *list)
{
GHashTable *result = NULL;
result = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, free);
for (GList *gIter = list; gIter != NULL; gIter = gIter->next) {
pe_node_t *new_node = pe__copy_node((pe_node_t *) gIter->data);
g_hash_table_insert(result, (gpointer) new_node->details->id, new_node);
}
return result;
}
gint
sort_node_uname(gconstpointer a, gconstpointer b)
{
return pcmk_numeric_strcasecmp(((const pe_node_t *) a)->details->uname,
((const pe_node_t *) b)->details->uname);
}
/*!
* \internal
* \brief Output node weights to stdout
*
* \param[in] rsc Use allowed nodes for this resource
* \param[in] comment Text description to prefix lines with
* \param[in] nodes If rsc is not specified, use these nodes
*/
static void
pe__output_node_weights(pe_resource_t *rsc, const char *comment,
GHashTable *nodes)
{
char score[128]; // Stack-allocated since this is called frequently
// Sort the nodes so the output is consistent for regression tests
GList *list = g_list_sort(g_hash_table_get_values(nodes), sort_node_uname);
for (GList *gIter = list; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
score2char_stack(node->weight, score, sizeof(score));
if (rsc) {
printf("%s: %s allocation score on %s: %s\n",
comment, rsc->id, node->details->uname, score);
} else {
printf("%s: %s = %s\n", comment, node->details->uname, score);
}
}
g_list_free(list);
}
/*!
* \internal
* \brief Log node weights at trace level
*
* \param[in] file Caller's filename
* \param[in] function Caller's function name
* \param[in] line Caller's line number
* \param[in] rsc Use allowed nodes for this resource
* \param[in] comment Text description to prefix lines with
* \param[in] nodes If rsc is not specified, use these nodes
*/
static void
pe__log_node_weights(const char *file, const char *function, int line,
pe_resource_t *rsc, const char *comment, GHashTable *nodes)
{
GHashTableIter iter;
pe_node_t *node = NULL;
char score[128]; // Stack-allocated since this is called frequently
// Don't waste time if we're not tracing at this point
pcmk__log_else(LOG_TRACE, return);
g_hash_table_iter_init(&iter, nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
score2char_stack(node->weight, score, sizeof(score));
if (rsc) {
qb_log_from_external_source(function, file,
"%s: %s allocation score on %s: %s",
LOG_TRACE, line, 0,
comment, rsc->id,
node->details->uname, score);
} else {
qb_log_from_external_source(function, file, "%s: %s = %s",
LOG_TRACE, line, 0,
comment, node->details->uname,
score);
}
}
}
/*!
* \internal
* \brief Log or output node weights
*
* \param[in] file Caller's filename
* \param[in] function Caller's function name
* \param[in] line Caller's line number
* \param[in] to_log Log if true, otherwise output
* \param[in] rsc Use allowed nodes for this resource
* \param[in] comment Text description to prefix lines with
* \param[in] nodes If rsc is not specified, use these nodes
*/
void
pe__show_node_weights_as(const char *file, const char *function, int line,
bool to_log, pe_resource_t *rsc, const char *comment,
GHashTable *nodes)
{
if (rsc != NULL) {
if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
// Don't show allocation scores for orphans
return;
}
nodes = rsc->allowed_nodes;
}
if (nodes == NULL) {
// Nothing to show
return;
}
if (to_log) {
pe__log_node_weights(file, function, line, rsc, comment, nodes);
} else {
pe__output_node_weights(rsc, comment, nodes);
}
// If this resource has children, repeat recursively for each
if (rsc && rsc->children) {
for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child = (pe_resource_t *) gIter->data;
pe__show_node_weights_as(file, function, line, to_log, child,
comment, nodes);
}
}
}
static void
append_dump_text(gpointer key, gpointer value, gpointer user_data)
{
char **dump_text = user_data;
char *new_text = crm_strdup_printf("%s %s=%s",
*dump_text, (char *)key, (char *)value);
free(*dump_text);
*dump_text = new_text;
}
void
dump_node_capacity(int level, const char *comment, pe_node_t * node)
{
char *dump_text = crm_strdup_printf("%s: %s capacity:",
comment, node->details->uname);
g_hash_table_foreach(node->details->utilization, append_dump_text, &dump_text);
if (level == LOG_STDOUT) {
fprintf(stdout, "%s\n", dump_text);
} else {
crm_trace("%s", dump_text);
}
free(dump_text);
}
void
dump_rsc_utilization(int level, const char *comment, pe_resource_t * rsc, pe_node_t * node)
{
char *dump_text = crm_strdup_printf("%s: %s utilization on %s:",
comment, rsc->id, node->details->uname);
g_hash_table_foreach(rsc->utilization, append_dump_text, &dump_text);
switch (level) {
case LOG_STDOUT:
fprintf(stdout, "%s\n", dump_text);
break;
case LOG_NEVER:
break;
default:
crm_trace("%s", dump_text);
}
free(dump_text);
}
gint
sort_rsc_index(gconstpointer a, gconstpointer b)
{
const pe_resource_t *resource1 = (const pe_resource_t *)a;
const pe_resource_t *resource2 = (const pe_resource_t *)b;
if (a == NULL && b == NULL) {
return 0;
}
if (a == NULL) {
return 1;
}
if (b == NULL) {
return -1;
}
if (resource1->sort_index > resource2->sort_index) {
return -1;
}
if (resource1->sort_index < resource2->sort_index) {
return 1;
}
return 0;
}
gint
sort_rsc_priority(gconstpointer a, gconstpointer b)
{
const pe_resource_t *resource1 = (const pe_resource_t *)a;
const pe_resource_t *resource2 = (const pe_resource_t *)b;
if (a == NULL && b == NULL) {
return 0;
}
if (a == NULL) {
return 1;
}
if (b == NULL) {
return -1;
}
if (resource1->priority > resource2->priority) {
return -1;
}
if (resource1->priority < resource2->priority) {
return 1;
}
return 0;
}
static enum pe_quorum_policy
effective_quorum_policy(pe_resource_t *rsc, pe_working_set_t *data_set)
{
enum pe_quorum_policy policy = data_set->no_quorum_policy;
if (pcmk_is_set(data_set->flags, pe_flag_have_quorum)) {
policy = no_quorum_ignore;
} else if (data_set->no_quorum_policy == no_quorum_demote) {
switch (rsc->role) {
case RSC_ROLE_MASTER:
case RSC_ROLE_SLAVE:
if (rsc->next_role > RSC_ROLE_SLAVE) {
rsc->next_role = RSC_ROLE_SLAVE;
}
policy = no_quorum_ignore;
break;
default:
policy = no_quorum_stop;
break;
}
}
return policy;
}
pe_action_t *
custom_action(pe_resource_t * rsc, char *key, const char *task,
pe_node_t * on_node, gboolean optional, gboolean save_action,
pe_working_set_t * data_set)
{
pe_action_t *action = NULL;
GListPtr possible_matches = NULL;
CRM_CHECK(key != NULL, return NULL);
CRM_CHECK(task != NULL, free(key); return NULL);
if (save_action && rsc != NULL) {
possible_matches = find_actions(rsc->actions, key, on_node);
} else if(save_action) {
#if 0
action = g_hash_table_lookup(data_set->singletons, key);
#else
/* More expensive but takes 'node' into account */
possible_matches = find_actions(data_set->actions, key, on_node);
#endif
}
if(data_set->singletons == NULL) {
data_set->singletons = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, NULL);
}
if (possible_matches != NULL) {
if (pcmk__list_of_multiple(possible_matches)) {
pe_warn("Action %s for %s on %s exists %d times",
task, rsc ? rsc->id : "<NULL>",
on_node ? on_node->details->uname : "<NULL>", g_list_length(possible_matches));
}
action = g_list_nth_data(possible_matches, 0);
- pe_rsc_trace(rsc, "Found existing action %d (%s) for %s (%s) on %s",
- action->id, action->uuid,
- (rsc? rsc->id : "no resource"), task,
+ pe_rsc_trace(rsc, "Found action %d: %s for %s (%s) on %s",
+ action->id, task, (rsc? rsc->id : "no resource"),
+ action->uuid,
(on_node? on_node->details->uname : "no node"));
g_list_free(possible_matches);
}
if (action == NULL) {
if (save_action) {
- pe_rsc_trace(rsc, "Creating %s action %d: %s for %s (%s) on %s",
- (optional? "optional" : "mandatory"),
- data_set->action_id, key,
- (rsc? rsc->id : "no resource"), task,
+ pe_rsc_trace(rsc, "Creating action %d (%s): %s for %s (%s) on %s",
+ data_set->action_id,
+ (optional? "optional" : "required"),
+ task, (rsc? rsc->id : "no resource"), key,
(on_node? on_node->details->uname : "no node"));
}
action = calloc(1, sizeof(pe_action_t));
if (save_action) {
action->id = data_set->action_id++;
} else {
action->id = 0;
}
action->rsc = rsc;
- CRM_ASSERT(task != NULL);
action->task = strdup(task);
if (on_node) {
action->node = pe__copy_node(on_node);
}
action->uuid = strdup(key);
if (pcmk__str_eq(task, CRM_OP_LRM_DELETE, pcmk__str_casei)) {
// Resource history deletion for a node can be done on the DC
pe__set_action_flags(action, pe_action_dc);
}
pe__set_action_flags(action, pe_action_runnable);
if (optional) {
pe__set_action_flags(action, pe_action_optional);
} else {
pe__clear_action_flags(action, pe_action_optional);
}
action->extra = crm_str_table_new();
action->meta = crm_str_table_new();
if (save_action) {
data_set->actions = g_list_prepend(data_set->actions, action);
if(rsc == NULL) {
g_hash_table_insert(data_set->singletons, action->uuid, action);
}
}
if (rsc != NULL) {
guint interval_ms = 0;
action->op_entry = find_rsc_op_entry_helper(rsc, key, TRUE);
parse_op_key(key, NULL, NULL, &interval_ms);
unpack_operation(action, action->op_entry, rsc->container, data_set,
interval_ms);
if (save_action) {
rsc->actions = g_list_prepend(rsc->actions, action);
}
}
-
- if (save_action) {
- pe_rsc_trace(rsc, "Action %d created", action->id);
- }
}
if (!optional && pcmk_is_set(action->flags, pe_action_optional)) {
- pe_rsc_trace(rsc, "Unset optional on action %d", action->id);
pe__clear_action_flags(action, pe_action_optional);
}
if (rsc != NULL) {
enum action_tasks a_task = text2task(action->task);
enum pe_quorum_policy quorum_policy = effective_quorum_policy(rsc, data_set);
int warn_level = LOG_TRACE;
if (save_action) {
warn_level = LOG_WARNING;
}
if (!pcmk_is_set(action->flags, pe_action_have_node_attrs)
&& action->node != NULL && action->op_entry != NULL) {
pe_rule_eval_data_t rule_data = {
.node_hash = action->node->details->attrs,
.role = RSC_ROLE_UNKNOWN,
.now = data_set->now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
pe__set_action_flags(action, pe_action_have_node_attrs);
pe__unpack_dataset_nvpairs(action->op_entry, XML_TAG_ATTR_SETS,
&rule_data, action->extra, NULL,
FALSE, data_set);
}
if (pcmk_is_set(action->flags, pe_action_pseudo)) {
/* leave untouched */
} else if (action->node == NULL) {
- pe_rsc_trace(rsc, "Unset runnable on %s", action->uuid);
+ pe_rsc_trace(rsc, "%s is unrunnable (unallocated)",
+ action->uuid);
pe__clear_action_flags(action, pe_action_runnable);
} else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)
&& g_hash_table_lookup(action->meta,
XML_LRM_ATTR_INTERVAL_MS) == NULL) {
- crm_debug("Action %s (unmanaged)", action->uuid);
- pe_rsc_trace(rsc, "Set optional on %s", action->uuid);
+ pe_rsc_debug(rsc, "%s on %s is optional (%s is unmanaged)",
+ action->uuid, action->node->details->uname, rsc->id);
pe__set_action_flags(action, pe_action_optional);
//pe__clear_action_flags(action, pe_action_runnable);
} else if (!pcmk_is_set(action->flags, pe_action_dc)
&& !(action->node->details->online)
&& (!pe__is_guest_node(action->node)
|| action->node->details->remote_requires_reset)) {
pe__clear_action_flags(action, pe_action_runnable);
- do_crm_log(warn_level, "Action %s on %s is unrunnable (offline)",
+ do_crm_log(warn_level,
+ "%s on %s is unrunnable (node is offline)",
action->uuid, action->node->details->uname);
if (pcmk_is_set(action->rsc->flags, pe_rsc_managed)
&& save_action && a_task == stop_rsc
&& action->node->details->unclean == FALSE) {
pe_fence_node(data_set, action->node, "resource actions are unrunnable", FALSE);
}
} else if (!pcmk_is_set(action->flags, pe_action_dc)
&& action->node->details->pending) {
pe__clear_action_flags(action, pe_action_runnable);
- do_crm_log(warn_level, "Action %s on %s is unrunnable (pending)",
+ do_crm_log(warn_level,
+ "Action %s on %s is unrunnable (node is pending)",
action->uuid, action->node->details->uname);
} else if (action->needs == rsc_req_nothing) {
- pe_rsc_trace(rsc, "Action %s does not require anything", action->uuid);
pe_action_set_reason(action, NULL, TRUE);
if (pe__is_guest_node(action->node)
&& !pe_can_fence(data_set, action->node)) {
/* An action that requires nothing usually does not require any
* fencing in order to be runnable. However, there is an
* exception: an action cannot be completed if it is on a guest
* node whose host is unclean and cannot be fenced.
*/
+ pe_rsc_debug(rsc, "%s on %s is unrunnable "
+ "(node's host cannot be fenced)",
+ action->uuid, action->node->details->uname);
pe__clear_action_flags(action, pe_action_runnable);
- crm_debug("%s\t%s (cancelled : host cannot be fenced)",
- action->node->details->uname, action->uuid);
} else {
+ pe_rsc_trace(rsc, "%s on %s does not require fencing or quorum",
+ action->uuid, action->node->details->uname);
pe__set_action_flags(action, pe_action_runnable);
}
#if 0
/*
* No point checking this
* - if we don't have quorum we can't stonith anyway
*/
} else if (action->needs == rsc_req_stonith) {
crm_trace("Action %s requires only stonith", action->uuid);
action->runnable = TRUE;
#endif
} else if (quorum_policy == no_quorum_stop) {
+ pe_rsc_debug(rsc, "%s on %s is unrunnable (no quorum)",
+ action->uuid, action->node->details->uname);
pe_action_set_flag_reason(__func__, __LINE__, action, NULL,
"no quorum", pe_action_runnable, TRUE);
- crm_debug("%s\t%s (cancelled : quorum)", action->node->details->uname, action->uuid);
} else if (quorum_policy == no_quorum_freeze) {
- pe_rsc_trace(rsc, "Check resource is already active: %s %s %s %s", rsc->id, action->uuid, role2text(rsc->next_role), role2text(rsc->role));
if (rsc->fns->active(rsc, TRUE) == FALSE || rsc->next_role > rsc->role) {
+ pe_rsc_debug(rsc, "%s on %s is unrunnable (no quorum)",
+ action->uuid, action->node->details->uname);
pe_action_set_flag_reason(__func__, __LINE__, action, NULL,
"quorum freeze", pe_action_runnable,
TRUE);
- pe_rsc_debug(rsc, "%s\t%s (cancelled : quorum freeze)",
- action->node->details->uname, action->uuid);
}
- } else if (!pcmk_is_set(action->flags, pe_action_runnable)) {
- pe_rsc_trace(rsc, "Action %s is runnable", action->uuid);
+ } else {
//pe_action_set_reason(action, NULL, TRUE);
pe__set_action_flags(action, pe_action_runnable);
}
if (save_action) {
switch (a_task) {
case stop_rsc:
pe__set_resource_flags(rsc, pe_rsc_stopping);
break;
case start_rsc:
pe__clear_resource_flags(rsc, pe_rsc_starting);
if (pcmk_is_set(action->flags, pe_action_runnable)) {
pe__set_resource_flags(rsc, pe_rsc_starting);
}
break;
default:
break;
}
}
}
free(key);
return action;
}
static bool
valid_stop_on_fail(const char *value)
{
return !pcmk__strcase_any_of(value, "standby", "demote", "stop", NULL);
}
static const char *
unpack_operation_on_fail(pe_action_t * action)
{
const char *name = NULL;
const char *role = NULL;
const char *on_fail = NULL;
const char *interval_spec = NULL;
const char *enabled = NULL;
const char *value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ON_FAIL);
if (pcmk__str_eq(action->task, CRMD_ACTION_STOP, pcmk__str_casei)
&& !valid_stop_on_fail(value)) {
pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for %s stop "
"action to default value because '%s' is not "
"allowed for stop", action->rsc->id, value);
return NULL;
} else if (pcmk__str_eq(action->task, CRMD_ACTION_DEMOTE, pcmk__str_casei) && !value) {
/* demote on_fail defaults to master monitor value if present */
xmlNode *operation = NULL;
CRM_CHECK(action->rsc != NULL, return NULL);
for (operation = __xml_first_child_element(action->rsc->ops_xml);
operation && !value; operation = __xml_next_element(operation)) {
if (!pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
continue;
}
name = crm_element_value(operation, "name");
role = crm_element_value(operation, "role");
on_fail = crm_element_value(operation, XML_OP_ATTR_ON_FAIL);
enabled = crm_element_value(operation, "enabled");
interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
if (!on_fail) {
continue;
} else if (enabled && !crm_is_true(enabled)) {
continue;
} else if (!pcmk__str_eq(name, "monitor", pcmk__str_casei) || !pcmk__str_eq(role, "Master", pcmk__str_casei)) {
continue;
} else if (crm_parse_interval_spec(interval_spec) == 0) {
continue;
} else if (pcmk__str_eq(on_fail, "demote", pcmk__str_casei)) {
continue;
}
value = on_fail;
}
} else if (pcmk__str_eq(action->task, CRM_OP_LRM_DELETE, pcmk__str_casei)) {
value = "ignore";
} else if (pcmk__str_eq(value, "demote", pcmk__str_casei)) {
name = crm_element_value(action->op_entry, "name");
role = crm_element_value(action->op_entry, "role");
on_fail = crm_element_value(action->op_entry, XML_OP_ATTR_ON_FAIL);
interval_spec = crm_element_value(action->op_entry,
XML_LRM_ATTR_INTERVAL);
if (!pcmk__str_eq(name, CRMD_ACTION_PROMOTE, pcmk__str_casei)
&& (!pcmk__str_eq(name, CRMD_ACTION_STATUS, pcmk__str_casei)
|| !pcmk__str_eq(role, "Master", pcmk__str_casei)
|| (crm_parse_interval_spec(interval_spec) == 0))) {
pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for %s %s "
"action to default value because 'demote' is not "
"allowed for it", action->rsc->id, name);
return NULL;
}
}
return value;
}
static xmlNode *
find_min_interval_mon(pe_resource_t * rsc, gboolean include_disabled)
{
guint interval_ms = 0;
guint min_interval_ms = G_MAXUINT;
const char *name = NULL;
const char *value = NULL;
const char *interval_spec = NULL;
xmlNode *op = NULL;
xmlNode *operation = NULL;
for (operation = __xml_first_child_element(rsc->ops_xml); operation != NULL;
operation = __xml_next_element(operation)) {
if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
name = crm_element_value(operation, "name");
interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
value = crm_element_value(operation, "enabled");
if (!include_disabled && value && crm_is_true(value) == FALSE) {
continue;
}
if (!pcmk__str_eq(name, RSC_STATUS, pcmk__str_casei)) {
continue;
}
interval_ms = crm_parse_interval_spec(interval_spec);
if (interval_ms && (interval_ms < min_interval_ms)) {
min_interval_ms = interval_ms;
op = operation;
}
}
}
return op;
}
static int
unpack_start_delay(const char *value, GHashTable *meta)
{
int start_delay = 0;
if (value != NULL) {
start_delay = crm_get_msec(value);
if (start_delay < 0) {
start_delay = 0;
}
if (meta) {
g_hash_table_replace(meta, strdup(XML_OP_ATTR_START_DELAY), crm_itoa(start_delay));
}
}
return start_delay;
}
// true if value contains valid, non-NULL interval origin for recurring op
static bool
unpack_interval_origin(const char *value, xmlNode *xml_obj, guint interval_ms,
crm_time_t *now, long long *start_delay)
{
long long result = 0;
guint interval_sec = interval_ms / 1000;
crm_time_t *origin = NULL;
// Ignore unspecified values and non-recurring operations
if ((value == NULL) || (interval_ms == 0) || (now == NULL)) {
return false;
}
// Parse interval origin from text
origin = crm_time_new(value);
if (origin == NULL) {
pcmk__config_err("Ignoring '" XML_OP_ATTR_ORIGIN "' for operation "
"'%s' because '%s' is not valid",
(ID(xml_obj)? ID(xml_obj) : "(missing ID)"), value);
return false;
}
// Get seconds since origin (negative if origin is in the future)
result = crm_time_get_seconds(now) - crm_time_get_seconds(origin);
crm_time_free(origin);
// Calculate seconds from closest interval to now
result = result % interval_sec;
// Calculate seconds remaining until next interval
result = ((result <= 0)? 0 : interval_sec) - result;
crm_info("Calculated a start delay of %llds for operation '%s'",
result,
(ID(xml_obj)? ID(xml_obj) : "(unspecified)"));
if (start_delay != NULL) {
*start_delay = result * 1000; // milliseconds
}
return true;
}
static int
unpack_timeout(const char *value)
{
int timeout_ms = crm_get_msec(value);
if (timeout_ms < 0) {
timeout_ms = crm_get_msec(CRM_DEFAULT_OP_TIMEOUT_S);
}
return timeout_ms;
}
int
pe_get_configured_timeout(pe_resource_t *rsc, const char *action, pe_working_set_t *data_set)
{
xmlNode *child = NULL;
GHashTable *action_meta = NULL;
const char *timeout_spec = NULL;
int timeout_ms = 0;
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
.role = RSC_ROLE_UNKNOWN,
.now = data_set->now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
for (child = first_named_child(rsc->ops_xml, XML_ATTR_OP);
child != NULL; child = crm_next_same_xml(child)) {
if (pcmk__str_eq(action, crm_element_value(child, XML_NVPAIR_ATTR_NAME),
pcmk__str_casei)) {
timeout_spec = crm_element_value(child, XML_ATTR_TIMEOUT);
break;
}
}
if (timeout_spec == NULL && data_set->op_defaults) {
action_meta = crm_str_table_new();
pe__unpack_dataset_nvpairs(data_set->op_defaults, XML_TAG_META_SETS,
&rule_data, action_meta, NULL, FALSE, data_set);
timeout_spec = g_hash_table_lookup(action_meta, XML_ATTR_TIMEOUT);
}
// @TODO check meta-attributes (including versioned meta-attributes)
// @TODO maybe use min-interval monitor timeout as default for monitors
timeout_ms = crm_get_msec(timeout_spec);
if (timeout_ms < 0) {
timeout_ms = crm_get_msec(CRM_DEFAULT_OP_TIMEOUT_S);
}
if (action_meta != NULL) {
g_hash_table_destroy(action_meta);
}
return timeout_ms;
}
#if ENABLE_VERSIONED_ATTRS
static void
unpack_versioned_meta(xmlNode *versioned_meta, xmlNode *xml_obj,
guint interval_ms, crm_time_t *now)
{
xmlNode *attrs = NULL;
xmlNode *attr = NULL;
for (attrs = __xml_first_child_element(versioned_meta); attrs != NULL;
attrs = __xml_next_element(attrs)) {
for (attr = __xml_first_child_element(attrs); attr != NULL;
attr = __xml_next_element(attr)) {
const char *name = crm_element_value(attr, XML_NVPAIR_ATTR_NAME);
const char *value = crm_element_value(attr, XML_NVPAIR_ATTR_VALUE);
if (pcmk__str_eq(name, XML_OP_ATTR_START_DELAY, pcmk__str_casei)) {
int start_delay = unpack_start_delay(value, NULL);
crm_xml_add_int(attr, XML_NVPAIR_ATTR_VALUE, start_delay);
} else if (pcmk__str_eq(name, XML_OP_ATTR_ORIGIN, pcmk__str_casei)) {
long long start_delay = 0;
if (unpack_interval_origin(value, xml_obj, interval_ms, now,
&start_delay)) {
crm_xml_add(attr, XML_NVPAIR_ATTR_NAME,
XML_OP_ATTR_START_DELAY);
crm_xml_add_ll(attr, XML_NVPAIR_ATTR_VALUE, start_delay);
}
} else if (pcmk__str_eq(name, XML_ATTR_TIMEOUT, pcmk__str_casei)) {
int timeout_ms = unpack_timeout(value);
crm_xml_add_int(attr, XML_NVPAIR_ATTR_VALUE, timeout_ms);
}
}
}
}
#endif
/*!
* \brief Unpack operation XML into an action structure
*
* Unpack an operation's meta-attributes (normalizing the interval, timeout,
* and start delay values as integer milliseconds), requirements, and
* failure policy.
*
* \param[in,out] action Action to unpack into
* \param[in] xml_obj Operation XML (or NULL if all defaults)
* \param[in] container Resource that contains affected resource, if any
* \param[in] data_set Cluster state
* \param[in] interval_ms How frequently to perform the operation
*/
static void
unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * container,
pe_working_set_t * data_set, guint interval_ms)
{
int timeout_ms = 0;
const char *value = NULL;
#if ENABLE_VERSIONED_ATTRS
pe_rsc_action_details_t *rsc_details = NULL;
#endif
pe_rsc_eval_data_t rsc_rule_data = {
.standard = crm_element_value(action->rsc->xml, XML_AGENT_ATTR_CLASS),
.provider = crm_element_value(action->rsc->xml, XML_AGENT_ATTR_PROVIDER),
.agent = crm_element_value(action->rsc->xml, XML_EXPR_ATTR_TYPE)
};
pe_op_eval_data_t op_rule_data = {
.op_name = action->task,
.interval = interval_ms
};
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
.role = RSC_ROLE_UNKNOWN,
.now = data_set->now,
.match_data = NULL,
.rsc_data = &rsc_rule_data,
.op_data = &op_rule_data
};
CRM_CHECK(action && action->rsc, return);
// Cluster-wide <op_defaults> <meta_attributes>
pe__unpack_dataset_nvpairs(data_set->op_defaults, XML_TAG_META_SETS, &rule_data,
action->meta, NULL, FALSE, data_set);
// Determine probe default timeout differently
if (pcmk__str_eq(action->task, RSC_STATUS, pcmk__str_casei)
&& (interval_ms == 0)) {
xmlNode *min_interval_mon = find_min_interval_mon(action->rsc, FALSE);
if (min_interval_mon) {
value = crm_element_value(min_interval_mon, XML_ATTR_TIMEOUT);
if (value) {
crm_trace("\t%s: Setting default timeout to minimum-interval "
"monitor's timeout '%s'", action->uuid, value);
g_hash_table_replace(action->meta, strdup(XML_ATTR_TIMEOUT),
strdup(value));
}
}
}
if (xml_obj) {
xmlAttrPtr xIter = NULL;
// <op> <meta_attributes> take precedence over defaults
pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_META_SETS, &rule_data,
action->meta, NULL, TRUE, data_set);
#if ENABLE_VERSIONED_ATTRS
rsc_details = pe_rsc_action_details(action);
pe_eval_versioned_attributes(data_set->input, xml_obj,
XML_TAG_ATTR_SETS, &rule_data,
rsc_details->versioned_parameters,
NULL);
pe_eval_versioned_attributes(data_set->input, xml_obj,
XML_TAG_META_SETS, &rule_data,
rsc_details->versioned_meta,
NULL);
#endif
/* Anything set as an <op> XML property has highest precedence.
* This ensures we use the name and interval from the <op> tag.
*/
for (xIter = xml_obj->properties; xIter; xIter = xIter->next) {
const char *prop_name = (const char *)xIter->name;
const char *prop_value = crm_element_value(xml_obj, prop_name);
g_hash_table_replace(action->meta, strdup(prop_name), strdup(prop_value));
}
}
g_hash_table_remove(action->meta, "id");
// Normalize interval to milliseconds
if (interval_ms > 0) {
g_hash_table_replace(action->meta, strdup(XML_LRM_ATTR_INTERVAL),
crm_strdup_printf("%u", interval_ms));
} else {
g_hash_table_remove(action->meta, XML_LRM_ATTR_INTERVAL);
}
/*
* Timeout order of precedence:
* 1. pcmk_monitor_timeout (if rsc has pcmk_ra_cap_fence_params
* and task is start or a probe; pcmk_monitor_timeout works
* by default for a recurring monitor)
* 2. explicit op timeout on the primitive
* 3. default op timeout
* a. if probe, then min-interval monitor's timeout
* b. else, in XML_CIB_TAG_OPCONFIG
* 4. CRM_DEFAULT_OP_TIMEOUT_S
*
* #1 overrides general rule of <op> XML property having highest
* precedence.
*/
if (pcmk_is_set(pcmk_get_ra_caps(rsc_rule_data.standard),
pcmk_ra_cap_fence_params)
&& (pcmk__str_eq(action->task, RSC_START, pcmk__str_casei)
|| (pcmk__str_eq(action->task, RSC_STATUS, pcmk__str_casei)
&& (interval_ms == 0)))
&& action->rsc->parameters) {
value = g_hash_table_lookup(action->rsc->parameters,
"pcmk_monitor_timeout");
if (value) {
crm_trace("\t%s: Setting timeout to pcmk_monitor_timeout '%s', "
"overriding default", action->uuid, value);
g_hash_table_replace(action->meta, strdup(XML_ATTR_TIMEOUT),
strdup(value));
}
}
// Normalize timeout to positive milliseconds
value = g_hash_table_lookup(action->meta, XML_ATTR_TIMEOUT);
timeout_ms = unpack_timeout(value);
g_hash_table_replace(action->meta, strdup(XML_ATTR_TIMEOUT),
crm_itoa(timeout_ms));
if (!pcmk__strcase_any_of(action->task, RSC_START, RSC_PROMOTE, NULL)) {
action->needs = rsc_req_nothing;
- value = "nothing (not start/promote)";
+ value = "nothing (not start or promote)";
} else if (pcmk_is_set(action->rsc->flags, pe_rsc_needs_fencing)) {
action->needs = rsc_req_stonith;
- value = "fencing (resource)";
+ value = "fencing";
} else if (pcmk_is_set(action->rsc->flags, pe_rsc_needs_quorum)) {
action->needs = rsc_req_quorum;
- value = "quorum (resource)";
+ value = "quorum";
} else {
action->needs = rsc_req_nothing;
- value = "nothing (resource)";
+ value = "nothing";
}
-
- pe_rsc_trace(action->rsc, "\tAction %s requires: %s", action->uuid, value);
+ pe_rsc_trace(action->rsc, "%s requires %s", action->uuid, value);
value = unpack_operation_on_fail(action);
if (value == NULL) {
} else if (pcmk__str_eq(value, "block", pcmk__str_casei)) {
action->on_fail = action_fail_block;
g_hash_table_insert(action->meta, strdup(XML_OP_ATTR_ON_FAIL), strdup("block"));
value = "block"; // The above could destroy the original string
} else if (pcmk__str_eq(value, "fence", pcmk__str_casei)) {
action->on_fail = action_fail_fence;
value = "node fencing";
if (!pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for "
"operation '%s' to 'stop' because 'fence' is not "
"valid when fencing is disabled", action->uuid);
action->on_fail = action_fail_stop;
action->fail_role = RSC_ROLE_STOPPED;
value = "stop resource";
}
} else if (pcmk__str_eq(value, "standby", pcmk__str_casei)) {
action->on_fail = action_fail_standby;
value = "node standby";
} else if (pcmk__strcase_any_of(value, "ignore", "nothing", NULL)) {
action->on_fail = action_fail_ignore;
value = "ignore";
} else if (pcmk__str_eq(value, "migrate", pcmk__str_casei)) {
action->on_fail = action_fail_migrate;
value = "force migration";
} else if (pcmk__str_eq(value, "stop", pcmk__str_casei)) {
action->on_fail = action_fail_stop;
action->fail_role = RSC_ROLE_STOPPED;
value = "stop resource";
} else if (pcmk__str_eq(value, "restart", pcmk__str_casei)) {
action->on_fail = action_fail_recover;
value = "restart (and possibly migrate)";
} else if (pcmk__str_eq(value, "restart-container", pcmk__str_casei)) {
if (container) {
action->on_fail = action_fail_restart_container;
value = "restart container (and possibly migrate)";
} else {
value = NULL;
}
} else if (pcmk__str_eq(value, "demote", pcmk__str_casei)) {
action->on_fail = action_fail_demote;
value = "demote instance";
} else {
pe_err("Resource %s: Unknown failure type (%s)", action->rsc->id, value);
value = NULL;
}
/* defaults */
if (value == NULL && container) {
action->on_fail = action_fail_restart_container;
value = "restart container (and possibly migrate) (default)";
/* For remote nodes, ensure that any failure that results in dropping an
* active connection to the node results in fencing of the node.
*
* There are only two action failures that don't result in fencing.
* 1. probes - probe failures are expected.
* 2. start - a start failure indicates that an active connection does not already
* exist. The user can set op on-fail=fence if they really want to fence start
* failures. */
} else if (((value == NULL) || !pcmk_is_set(action->rsc->flags, pe_rsc_managed))
&& pe__resource_is_remote_conn(action->rsc, data_set)
&& !(pcmk__str_eq(action->task, CRMD_ACTION_STATUS, pcmk__str_casei)
&& (interval_ms == 0))
&& !pcmk__str_eq(action->task, CRMD_ACTION_START, pcmk__str_casei)) {
if (!pcmk_is_set(action->rsc->flags, pe_rsc_managed)) {
action->on_fail = action_fail_stop;
action->fail_role = RSC_ROLE_STOPPED;
value = "stop unmanaged remote node (enforcing default)";
} else {
if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
value = "fence remote node (default)";
} else {
value = "recover remote node connection (default)";
}
if (action->rsc->remote_reconnect_ms) {
action->fail_role = RSC_ROLE_STOPPED;
}
action->on_fail = action_fail_reset_remote;
}
} else if (value == NULL && pcmk__str_eq(action->task, CRMD_ACTION_STOP, pcmk__str_casei)) {
if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
action->on_fail = action_fail_fence;
value = "resource fence (default)";
} else {
action->on_fail = action_fail_block;
value = "resource block (default)";
}
} else if (value == NULL) {
action->on_fail = action_fail_recover;
value = "restart (and possibly migrate) (default)";
}
- pe_rsc_trace(action->rsc, "\t%s failure handling: %s", action->task, value);
+ pe_rsc_trace(action->rsc, "%s failure handling: %s",
+ action->uuid, value);
value = NULL;
if (xml_obj != NULL) {
value = g_hash_table_lookup(action->meta, "role_after_failure");
if (value) {
pe_warn_once(pe_wo_role_after,
"Support for role_after_failure is deprecated and will be removed in a future release");
}
}
if (value != NULL && action->fail_role == RSC_ROLE_UNKNOWN) {
action->fail_role = text2role(value);
}
/* defaults */
if (action->fail_role == RSC_ROLE_UNKNOWN) {
if (pcmk__str_eq(action->task, CRMD_ACTION_PROMOTE, pcmk__str_casei)) {
action->fail_role = RSC_ROLE_SLAVE;
} else {
action->fail_role = RSC_ROLE_STARTED;
}
}
- pe_rsc_trace(action->rsc, "\t%s failure results in: %s", action->task,
- role2text(action->fail_role));
+ pe_rsc_trace(action->rsc, "%s failure results in: %s",
+ action->uuid, role2text(action->fail_role));
value = g_hash_table_lookup(action->meta, XML_OP_ATTR_START_DELAY);
if (value) {
unpack_start_delay(value, action->meta);
} else {
long long start_delay = 0;
value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ORIGIN);
if (unpack_interval_origin(value, xml_obj, interval_ms, data_set->now,
&start_delay)) {
g_hash_table_replace(action->meta, strdup(XML_OP_ATTR_START_DELAY),
crm_strdup_printf("%lld", start_delay));
}
}
#if ENABLE_VERSIONED_ATTRS
unpack_versioned_meta(rsc_details->versioned_meta, xml_obj, interval_ms,
data_set->now);
#endif
}
static xmlNode *
find_rsc_op_entry_helper(pe_resource_t * rsc, const char *key, gboolean include_disabled)
{
guint interval_ms = 0;
gboolean do_retry = TRUE;
char *local_key = NULL;
const char *name = NULL;
const char *value = NULL;
const char *interval_spec = NULL;
char *match_key = NULL;
xmlNode *op = NULL;
xmlNode *operation = NULL;
retry:
for (operation = __xml_first_child_element(rsc->ops_xml); operation != NULL;
operation = __xml_next_element(operation)) {
if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
name = crm_element_value(operation, "name");
interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
value = crm_element_value(operation, "enabled");
if (!include_disabled && value && crm_is_true(value) == FALSE) {
continue;
}
interval_ms = crm_parse_interval_spec(interval_spec);
match_key = pcmk__op_key(rsc->id, name, interval_ms);
if (pcmk__str_eq(key, match_key, pcmk__str_casei)) {
op = operation;
}
free(match_key);
if (rsc->clone_name) {
match_key = pcmk__op_key(rsc->clone_name, name, interval_ms);
if (pcmk__str_eq(key, match_key, pcmk__str_casei)) {
op = operation;
}
free(match_key);
}
if (op != NULL) {
free(local_key);
return op;
}
}
}
free(local_key);
if (do_retry == FALSE) {
return NULL;
}
do_retry = FALSE;
if (strstr(key, CRMD_ACTION_MIGRATE) || strstr(key, CRMD_ACTION_MIGRATED)) {
local_key = pcmk__op_key(rsc->id, "migrate", 0);
key = local_key;
goto retry;
} else if (strstr(key, "_notify_")) {
local_key = pcmk__op_key(rsc->id, "notify", 0);
key = local_key;
goto retry;
}
return NULL;
}
xmlNode *
find_rsc_op_entry(pe_resource_t * rsc, const char *key)
{
return find_rsc_op_entry_helper(rsc, key, FALSE);
}
void
print_node(const char *pre_text, pe_node_t * node, gboolean details)
{
if (node == NULL) {
crm_trace("%s%s: <NULL>", pre_text == NULL ? "" : pre_text, pre_text == NULL ? "" : ": ");
return;
}
CRM_ASSERT(node->details);
crm_trace("%s%s%sNode %s: (weight=%d, fixed=%s)",
pre_text == NULL ? "" : pre_text,
pre_text == NULL ? "" : ": ",
node->details->online ? "" : "Unavailable/Unclean ",
node->details->uname, node->weight, node->fixed ? "True" : "False");
if (details) {
int log_level = LOG_TRACE;
char *pe_mutable = strdup("\t\t");
GListPtr gIter = node->details->running_rsc;
crm_trace("\t\t===Node Attributes");
g_hash_table_foreach(node->details->attrs, print_str_str, pe_mutable);
free(pe_mutable);
crm_trace("\t\t=== Resources");
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter->data;
rsc->fns->print(rsc, "\t\t", pe_print_log|pe_print_pending,
&log_level);
}
}
}
/*
* Used by the HashTable for-loop
*/
void
print_str_str(gpointer key, gpointer value, gpointer user_data)
{
crm_trace("%s%s %s ==> %s",
user_data == NULL ? "" : (char *)user_data,
user_data == NULL ? "" : ": ", (char *)key, (char *)value);
}
void
pe_free_action(pe_action_t * action)
{
if (action == NULL) {
return;
}
g_list_free_full(action->actions_before, free); /* pe_action_wrapper_t* */
g_list_free_full(action->actions_after, free); /* pe_action_wrapper_t* */
if (action->extra) {
g_hash_table_destroy(action->extra);
}
if (action->meta) {
g_hash_table_destroy(action->meta);
}
#if ENABLE_VERSIONED_ATTRS
if (action->rsc) {
pe_free_rsc_action_details(action);
}
#endif
free(action->cancel_task);
free(action->reason);
free(action->task);
free(action->uuid);
free(action->node);
free(action);
}
GListPtr
find_recurring_actions(GListPtr input, pe_node_t * not_on_node)
{
const char *value = NULL;
GListPtr result = NULL;
GListPtr gIter = input;
CRM_CHECK(input != NULL, return NULL);
for (; gIter != NULL; gIter = gIter->next) {
pe_action_t *action = (pe_action_t *) gIter->data;
value = g_hash_table_lookup(action->meta, XML_LRM_ATTR_INTERVAL_MS);
if (value == NULL) {
/* skip */
} else if (pcmk__str_eq(value, "0", pcmk__str_casei)) {
/* skip */
} else if (pcmk__str_eq(CRMD_ACTION_CANCEL, action->task, pcmk__str_casei)) {
/* skip */
} else if (not_on_node == NULL) {
crm_trace("(null) Found: %s", action->uuid);
result = g_list_prepend(result, action);
} else if (action->node == NULL) {
/* skip */
} else if (action->node->details != not_on_node->details) {
crm_trace("Found: %s", action->uuid);
result = g_list_prepend(result, action);
}
}
return result;
}
enum action_tasks
get_complex_task(pe_resource_t * rsc, const char *name, gboolean allow_non_atomic)
{
enum action_tasks task = text2task(name);
if (rsc == NULL) {
return task;
} else if (allow_non_atomic == FALSE || rsc->variant == pe_native) {
switch (task) {
case stopped_rsc:
case started_rsc:
case action_demoted:
case action_promoted:
crm_trace("Folding %s back into its atomic counterpart for %s", name, rsc->id);
return task - 1;
default:
break;
}
}
return task;
}
pe_action_t *
find_first_action(GListPtr input, const char *uuid, const char *task, pe_node_t * on_node)
{
GListPtr gIter = NULL;
CRM_CHECK(uuid || task, return NULL);
for (gIter = input; gIter != NULL; gIter = gIter->next) {
pe_action_t *action = (pe_action_t *) gIter->data;
if (uuid != NULL && !pcmk__str_eq(uuid, action->uuid, pcmk__str_casei)) {
continue;
} else if (task != NULL && !pcmk__str_eq(task, action->task, pcmk__str_casei)) {
continue;
} else if (on_node == NULL) {
return action;
} else if (action->node == NULL) {
continue;
} else if (on_node->details == action->node->details) {
return action;
}
}
return NULL;
}
GListPtr
find_actions(GListPtr input, const char *key, const pe_node_t *on_node)
{
GListPtr gIter = input;
GListPtr result = NULL;
CRM_CHECK(key != NULL, return NULL);
for (; gIter != NULL; gIter = gIter->next) {
pe_action_t *action = (pe_action_t *) gIter->data;
if (!pcmk__str_eq(key, action->uuid, pcmk__str_casei)) {
crm_trace("%s does not match action %s", key, action->uuid);
continue;
} else if (on_node == NULL) {
crm_trace("Action %s matches (ignoring node)", key);
result = g_list_prepend(result, action);
} else if (action->node == NULL) {
crm_trace("Action %s matches (unallocated, assigning to %s)",
key, on_node->details->uname);
action->node = pe__copy_node(on_node);
result = g_list_prepend(result, action);
} else if (on_node->details == action->node->details) {
crm_trace("Action %s on %s matches", key, on_node->details->uname);
result = g_list_prepend(result, action);
} else {
crm_trace("Action %s on node %s does not match requested node %s",
key, action->node->details->uname,
on_node->details->uname);
}
}
return result;
}
GList *
find_actions_exact(GList *input, const char *key, const pe_node_t *on_node)
{
GList *result = NULL;
CRM_CHECK(key != NULL, return NULL);
if (on_node == NULL) {
crm_trace("Not searching for action %s because node not specified",
key);
return NULL;
}
for (GList *gIter = input; gIter != NULL; gIter = gIter->next) {
pe_action_t *action = (pe_action_t *) gIter->data;
if (action->node == NULL) {
crm_trace("Skipping comparison of %s vs action %s without node",
key, action->uuid);
} else if (!pcmk__str_eq(key, action->uuid, pcmk__str_casei)) {
crm_trace("Desired action %s doesn't match %s", key, action->uuid);
} else if (!pcmk__str_eq(on_node->details->id, action->node->details->id, pcmk__str_casei)) {
crm_trace("Action %s desired node ID %s doesn't match %s",
key, on_node->details->id, action->node->details->id);
} else {
crm_trace("Action %s matches", key);
result = g_list_prepend(result, action);
}
}
return result;
}
/*!
* \brief Find all actions of given type for a resource
*
* \param[in] rsc Resource to search
* \param[in] node Find only actions scheduled on this node
* \param[in] task Action name to search for
* \param[in] require_node If TRUE, NULL node or action node will not match
*
* \return List of actions found (or NULL if none)
* \note If node is not NULL and require_node is FALSE, matching actions
* without a node will be assigned to node.
*/
GList *
pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node,
const char *task, bool require_node)
{
GList *result = NULL;
char *key = pcmk__op_key(rsc->id, task, 0);
if (require_node) {
result = find_actions_exact(rsc->actions, key, node);
} else {
result = find_actions(rsc->actions, key, node);
}
free(key);
return result;
}
static void
resource_node_score(pe_resource_t * rsc, pe_node_t * node, int score, const char *tag)
{
pe_node_t *match = NULL;
if ((rsc->exclusive_discover || (node->rsc_discover_mode == pe_discover_never))
&& pcmk__str_eq(tag, "symmetric_default", pcmk__str_casei)) {
/* This string comparision may be fragile, but exclusive resources and
* exclusive nodes should not have the symmetric_default constraint
* applied to them.
*/
return;
} else if (rsc->children) {
GListPtr gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
resource_node_score(child_rsc, node, score, tag);
}
}
pe_rsc_trace(rsc, "Setting %s for %s on %s: %d", tag, rsc->id, node->details->uname, score);
match = pe_hash_table_lookup(rsc->allowed_nodes, node->details->id);
if (match == NULL) {
match = pe__copy_node(node);
g_hash_table_insert(rsc->allowed_nodes, (gpointer) match->details->id, match);
}
match->weight = pe__add_scores(match->weight, score);
}
void
resource_location(pe_resource_t * rsc, pe_node_t * node, int score, const char *tag,
pe_working_set_t * data_set)
{
if (node != NULL) {
resource_node_score(rsc, node, score, tag);
} else if (data_set != NULL) {
GListPtr gIter = data_set->nodes;
for (; gIter != NULL; gIter = gIter->next) {
pe_node_t *node_iter = (pe_node_t *) gIter->data;
resource_node_score(rsc, node_iter, score, tag);
}
} else {
GHashTableIter iter;
pe_node_t *node_iter = NULL;
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node_iter)) {
resource_node_score(rsc, node_iter, score, tag);
}
}
if (node == NULL && score == -INFINITY) {
if (rsc->allocated_to) {
crm_info("Deallocating %s from %s", rsc->id, rsc->allocated_to->details->uname);
free(rsc->allocated_to);
rsc->allocated_to = NULL;
}
}
}
#define sort_return(an_int, why) do { \
free(a_uuid); \
free(b_uuid); \
crm_trace("%s (%d) %c %s (%d) : %s", \
a_xml_id, a_call_id, an_int>0?'>':an_int<0?'<':'=', \
b_xml_id, b_call_id, why); \
return an_int; \
} while(0)
gint
sort_op_by_callid(gconstpointer a, gconstpointer b)
{
int a_call_id = -1;
int b_call_id = -1;
char *a_uuid = NULL;
char *b_uuid = NULL;
const xmlNode *xml_a = a;
const xmlNode *xml_b = b;
const char *a_xml_id = crm_element_value(xml_a, XML_ATTR_ID);
const char *b_xml_id = crm_element_value(xml_b, XML_ATTR_ID);
if (pcmk__str_eq(a_xml_id, b_xml_id, pcmk__str_casei)) {
/* We have duplicate lrm_rsc_op entries in the status
* section which is unlikely to be a good thing
* - we can handle it easily enough, but we need to get
* to the bottom of why it's happening.
*/
pe_err("Duplicate lrm_rsc_op entries named %s", a_xml_id);
sort_return(0, "duplicate");
}
crm_element_value_int(xml_a, XML_LRM_ATTR_CALLID, &a_call_id);
crm_element_value_int(xml_b, XML_LRM_ATTR_CALLID, &b_call_id);
if (a_call_id == -1 && b_call_id == -1) {
/* both are pending ops so it doesn't matter since
* stops are never pending
*/
sort_return(0, "pending");
} else if (a_call_id >= 0 && a_call_id < b_call_id) {
sort_return(-1, "call id");
} else if (b_call_id >= 0 && a_call_id > b_call_id) {
sort_return(1, "call id");
} else if (b_call_id >= 0 && a_call_id == b_call_id) {
/*
* The op and last_failed_op are the same
* Order on last-rc-change
*/
time_t last_a = -1;
time_t last_b = -1;
crm_element_value_epoch(xml_a, XML_RSC_OP_LAST_CHANGE, &last_a);
crm_element_value_epoch(xml_b, XML_RSC_OP_LAST_CHANGE, &last_b);
crm_trace("rc-change: %lld vs %lld",
(long long) last_a, (long long) last_b);
if (last_a >= 0 && last_a < last_b) {
sort_return(-1, "rc-change");
} else if (last_b >= 0 && last_a > last_b) {
sort_return(1, "rc-change");
}
sort_return(0, "rc-change");
} else {
/* One of the inputs is a pending operation
* Attempt to use XML_ATTR_TRANSITION_MAGIC to determine its age relative to the other
*/
int a_id = -1;
int b_id = -1;
const char *a_magic = crm_element_value(xml_a, XML_ATTR_TRANSITION_MAGIC);
const char *b_magic = crm_element_value(xml_b, XML_ATTR_TRANSITION_MAGIC);
CRM_CHECK(a_magic != NULL && b_magic != NULL, sort_return(0, "No magic"));
if (!decode_transition_magic(a_magic, &a_uuid, &a_id, NULL, NULL, NULL,
NULL)) {
sort_return(0, "bad magic a");
}
if (!decode_transition_magic(b_magic, &b_uuid, &b_id, NULL, NULL, NULL,
NULL)) {
sort_return(0, "bad magic b");
}
/* try to determine the relative age of the operation...
* some pending operations (e.g. a start) may have been superseded
* by a subsequent stop
*
* [a|b]_id == -1 means it's a shutdown operation and _always_ comes last
*/
if (!pcmk__str_eq(a_uuid, b_uuid, pcmk__str_casei) || a_id == b_id) {
/*
* some of the logic in here may be redundant...
*
* if the UUID from the TE doesn't match then one better
* be a pending operation.
* pending operations don't survive between elections and joins
* because we query the LRM directly
*/
if (b_call_id == -1) {
sort_return(-1, "transition + call");
} else if (a_call_id == -1) {
sort_return(1, "transition + call");
}
} else if ((a_id >= 0 && a_id < b_id) || b_id == -1) {
sort_return(-1, "transition");
} else if ((b_id >= 0 && a_id > b_id) || a_id == -1) {
sort_return(1, "transition");
}
}
/* we should never end up here */
CRM_CHECK(FALSE, sort_return(0, "default"));
}
time_t
get_effective_time(pe_working_set_t * data_set)
{
if(data_set) {
if (data_set->now == NULL) {
crm_trace("Recording a new 'now'");
data_set->now = crm_time_new(NULL);
}
return crm_time_get_seconds_since_epoch(data_set->now);
}
crm_trace("Defaulting to 'now'");
return time(NULL);
}
gboolean
get_target_role(pe_resource_t * rsc, enum rsc_role_e * role)
{
enum rsc_role_e local_role = RSC_ROLE_UNKNOWN;
const char *value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
CRM_CHECK(role != NULL, return FALSE);
if (pcmk__str_eq(value, "started", pcmk__str_null_matches | pcmk__str_casei)
|| pcmk__str_eq("default", value, pcmk__str_casei)) {
return FALSE;
}
local_role = text2role(value);
if (local_role == RSC_ROLE_UNKNOWN) {
pcmk__config_err("Ignoring '" XML_RSC_ATTR_TARGET_ROLE "' for %s "
"because '%s' is not valid", rsc->id, value);
return FALSE;
} else if (local_role > RSC_ROLE_STARTED) {
if (pcmk_is_set(uber_parent(rsc)->flags, pe_rsc_promotable)) {
if (local_role > RSC_ROLE_SLAVE) {
/* This is what we'd do anyway, just leave the default to avoid messing up the placement algorithm */
return FALSE;
}
} else {
pcmk__config_err("Ignoring '" XML_RSC_ATTR_TARGET_ROLE "' for %s "
"because '%s' only makes sense for promotable "
"clones", rsc->id, value);
return FALSE;
}
}
*role = local_role;
return TRUE;
}
gboolean
order_actions(pe_action_t * lh_action, pe_action_t * rh_action, enum pe_ordering order)
{
GListPtr gIter = NULL;
pe_action_wrapper_t *wrapper = NULL;
GListPtr list = NULL;
if (order == pe_order_none) {
return FALSE;
}
if (lh_action == NULL || rh_action == NULL) {
return FALSE;
}
crm_trace("Ordering Action %s before %s", lh_action->uuid, rh_action->uuid);
/* Ensure we never create a dependency on ourselves... it's happened */
CRM_ASSERT(lh_action != rh_action);
/* Filter dups, otherwise update_action_states() has too much work to do */
gIter = lh_action->actions_after;
for (; gIter != NULL; gIter = gIter->next) {
pe_action_wrapper_t *after = (pe_action_wrapper_t *) gIter->data;
if (after->action == rh_action && (after->type & order)) {
return FALSE;
}
}
wrapper = calloc(1, sizeof(pe_action_wrapper_t));
wrapper->action = rh_action;
wrapper->type = order;
list = lh_action->actions_after;
list = g_list_prepend(list, wrapper);
lh_action->actions_after = list;
wrapper = calloc(1, sizeof(pe_action_wrapper_t));
wrapper->action = lh_action;
wrapper->type = order;
list = rh_action->actions_before;
list = g_list_prepend(list, wrapper);
rh_action->actions_before = list;
return TRUE;
}
pe_action_t *
get_pseudo_op(const char *name, pe_working_set_t * data_set)
{
pe_action_t *op = NULL;
if(data_set->singletons) {
op = g_hash_table_lookup(data_set->singletons, name);
}
if (op == NULL) {
op = custom_action(NULL, strdup(name), name, NULL, TRUE, TRUE, data_set);
pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable);
}
return op;
}
void
destroy_ticket(gpointer data)
{
pe_ticket_t *ticket = data;
if (ticket->state) {
g_hash_table_destroy(ticket->state);
}
free(ticket->id);
free(ticket);
}
pe_ticket_t *
ticket_new(const char *ticket_id, pe_working_set_t * data_set)
{
pe_ticket_t *ticket = NULL;
if (pcmk__str_empty(ticket_id)) {
return NULL;
}
if (data_set->tickets == NULL) {
data_set->tickets =
g_hash_table_new_full(crm_str_hash, g_str_equal, free,
destroy_ticket);
}
ticket = g_hash_table_lookup(data_set->tickets, ticket_id);
if (ticket == NULL) {
ticket = calloc(1, sizeof(pe_ticket_t));
if (ticket == NULL) {
crm_err("Cannot allocate ticket '%s'", ticket_id);
return NULL;
}
crm_trace("Creaing ticket entry for %s", ticket_id);
ticket->id = strdup(ticket_id);
ticket->granted = FALSE;
ticket->last_granted = -1;
ticket->standby = FALSE;
ticket->state = crm_str_table_new();
g_hash_table_insert(data_set->tickets, strdup(ticket->id), ticket);
}
return ticket;
}
static void
filter_parameters(xmlNode * param_set, const char *param_string, bool need_present)
{
if (param_set && param_string) {
xmlAttrPtr xIter = param_set->properties;
while (xIter) {
const char *prop_name = (const char *)xIter->name;
char *name = crm_strdup_printf(" %s ", prop_name);
char *match = strstr(param_string, name);
free(name);
// Do now, because current entry might get removed below
xIter = xIter->next;
if (need_present && match == NULL) {
crm_trace("%s not found in %s", prop_name, param_string);
xml_remove_prop(param_set, prop_name);
} else if (need_present == FALSE && match) {
crm_trace("%s found in %s", prop_name, param_string);
xml_remove_prop(param_set, prop_name);
}
}
}
}
#if ENABLE_VERSIONED_ATTRS
static void
append_versioned_params(xmlNode *versioned_params, const char *ra_version, xmlNode *params)
{
GHashTable *hash = pe_unpack_versioned_parameters(versioned_params, ra_version);
char *key = NULL;
char *value = NULL;
GHashTableIter iter;
g_hash_table_iter_init(&iter, hash);
while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) {
crm_xml_add(params, key, value);
}
g_hash_table_destroy(hash);
}
#endif
/*!
* \internal
* \brief Calculate action digests and store in node's digest cache
*
* \param[in] rsc Resource that action was for
* \param[in] task Name of action performed
* \param[in] key Action's task key
* \param[in] node Node action was performed on
* \param[in] xml_op XML of operation in CIB status (if available)
* \param[in] calc_secure Whether to calculate secure digest
* \param[in] data_set Cluster working set
*
* \return Pointer to node's digest cache entry
*/
static op_digest_cache_t *
rsc_action_digest(pe_resource_t *rsc, const char *task, const char *key,
pe_node_t *node, xmlNode *xml_op, bool calc_secure,
pe_working_set_t *data_set)
{
op_digest_cache_t *data = NULL;
data = g_hash_table_lookup(node->details->digest_cache, key);
if (data == NULL) {
GHashTable *local_rsc_params = crm_str_table_new();
pe_action_t *action = custom_action(rsc, strdup(key), task, node, TRUE, FALSE, data_set);
#if ENABLE_VERSIONED_ATTRS
xmlNode *local_versioned_params = create_xml_node(NULL, XML_TAG_RSC_VER_ATTRS);
const char *ra_version = NULL;
#endif
const char *op_version;
const char *restart_list = NULL;
const char *secure_list = " passwd password ";
data = calloc(1, sizeof(op_digest_cache_t));
CRM_ASSERT(data != NULL);
get_rsc_attributes(local_rsc_params, rsc, node, data_set);
#if ENABLE_VERSIONED_ATTRS
pe_get_versioned_attributes(local_versioned_params, rsc, node, data_set);
#endif
data->params_all = create_xml_node(NULL, XML_TAG_PARAMS);
// REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside
if (pe__add_bundle_remote_name(rsc, data->params_all,
XML_RSC_ATTR_REMOTE_RA_ADDR)) {
crm_trace("Set address for bundle connection %s (on %s)",
rsc->id, node->details->uname);
}
g_hash_table_foreach(local_rsc_params, hash2field, data->params_all);
g_hash_table_foreach(action->extra, hash2field, data->params_all);
g_hash_table_foreach(rsc->parameters, hash2field, data->params_all);
g_hash_table_foreach(action->meta, hash2metafield, data->params_all);
if(xml_op) {
secure_list = crm_element_value(xml_op, XML_LRM_ATTR_OP_SECURE);
restart_list = crm_element_value(xml_op, XML_LRM_ATTR_OP_RESTART);
op_version = crm_element_value(xml_op, XML_ATTR_CRM_VERSION);
#if ENABLE_VERSIONED_ATTRS
ra_version = crm_element_value(xml_op, XML_ATTR_RA_VERSION);
#endif
} else {
op_version = CRM_FEATURE_SET;
}
#if ENABLE_VERSIONED_ATTRS
append_versioned_params(local_versioned_params, ra_version, data->params_all);
append_versioned_params(rsc->versioned_parameters, ra_version, data->params_all);
{
pe_rsc_action_details_t *details = pe_rsc_action_details(action);
append_versioned_params(details->versioned_parameters, ra_version, data->params_all);
}
#endif
pcmk__filter_op_for_digest(data->params_all);
g_hash_table_destroy(local_rsc_params);
pe_free_action(action);
data->digest_all_calc = calculate_operation_digest(data->params_all, op_version);
if (calc_secure) {
data->params_secure = copy_xml(data->params_all);
if(secure_list) {
filter_parameters(data->params_secure, secure_list, FALSE);
}
data->digest_secure_calc = calculate_operation_digest(data->params_secure, op_version);
}
if(xml_op && crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST) != NULL) {
data->params_restart = copy_xml(data->params_all);
if (restart_list) {
filter_parameters(data->params_restart, restart_list, TRUE);
}
data->digest_restart_calc = calculate_operation_digest(data->params_restart, op_version);
}
g_hash_table_insert(node->details->digest_cache, strdup(key), data);
}
return data;
}
op_digest_cache_t *
rsc_action_digest_cmp(pe_resource_t * rsc, xmlNode * xml_op, pe_node_t * node,
pe_working_set_t * data_set)
{
op_digest_cache_t *data = NULL;
char *key = NULL;
guint interval_ms = 0;
const char *op_version;
const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
const char *digest_all;
const char *digest_restart;
CRM_ASSERT(node != NULL);
op_version = crm_element_value(xml_op, XML_ATTR_CRM_VERSION);
digest_all = crm_element_value(xml_op, XML_LRM_ATTR_OP_DIGEST);
digest_restart = crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST);
crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
key = pcmk__op_key(rsc->id, task, interval_ms);
data = rsc_action_digest(rsc, task, key, node, xml_op,
pcmk_is_set(data_set->flags, pe_flag_sanitized),
data_set);
data->rc = RSC_DIGEST_MATCH;
if (digest_restart && data->digest_restart_calc && strcmp(data->digest_restart_calc, digest_restart) != 0) {
pe_rsc_info(rsc, "Parameters to %s on %s changed: was %s vs. now %s (restart:%s) %s",
key, node->details->uname,
crm_str(digest_restart), data->digest_restart_calc,
op_version, crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC));
data->rc = RSC_DIGEST_RESTART;
} else if (digest_all == NULL) {
/* it is unknown what the previous op digest was */
data->rc = RSC_DIGEST_UNKNOWN;
} else if (strcmp(digest_all, data->digest_all_calc) != 0) {
pe_rsc_info(rsc, "Parameters to %s on %s changed: was %s vs. now %s (%s:%s) %s",
key, node->details->uname,
crm_str(digest_all), data->digest_all_calc,
(interval_ms > 0)? "reschedule" : "reload",
op_version, crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC));
data->rc = RSC_DIGEST_ALL;
}
free(key);
return data;
}
/*!
* \internal
* \brief Create an unfencing summary for use in special node attribute
*
* Create a string combining a fence device's resource ID, agent type, and
* parameter digest (whether for all parameters or just non-private parameters).
* This can be stored in a special node attribute, allowing us to detect changes
* in either the agent type or parameters, to know whether unfencing must be
* redone or can be safely skipped when the device's history is cleaned.
*
* \param[in] rsc_id Fence device resource ID
* \param[in] agent_type Fence device agent
* \param[in] param_digest Fence device parameter digest
*
* \return Newly allocated string with unfencing digest
* \note The caller is responsible for freeing the result.
*/
static inline char *
create_unfencing_summary(const char *rsc_id, const char *agent_type,
const char *param_digest)
{
return crm_strdup_printf("%s:%s:%s", rsc_id, agent_type, param_digest);
}
/*!
* \internal
* \brief Check whether a node can skip unfencing
*
* Check whether a fence device's current definition matches a node's
* stored summary of when it was last unfenced by the device.
*
* \param[in] rsc_id Fence device's resource ID
* \param[in] agent Fence device's agent type
* \param[in] digest_calc Fence device's current parameter digest
* \param[in] node_summary Value of node's special unfencing node attribute
* (a comma-separated list of unfencing summaries for
* all devices that have unfenced this node)
*
* \return TRUE if digest matches, FALSE otherwise
*/
static bool
unfencing_digest_matches(const char *rsc_id, const char *agent,
const char *digest_calc, const char *node_summary)
{
bool matches = FALSE;
if (rsc_id && agent && digest_calc && node_summary) {
char *search_secure = create_unfencing_summary(rsc_id, agent,
digest_calc);
/* The digest was calculated including the device ID and agent,
* so there is no risk of collision using strstr().
*/
matches = (strstr(node_summary, search_secure) != NULL);
crm_trace("Calculated unfencing digest '%s' %sfound in '%s'",
search_secure, matches? "" : "not ", node_summary);
free(search_secure);
}
return matches;
}
/* Magic string to use as action name for digest cache entries used for
* unfencing checks. This is not a real action name (i.e. "on"), so
* check_action_definition() won't confuse these entries with real actions.
*/
#define STONITH_DIGEST_TASK "stonith-on"
/*!
* \internal
* \brief Calculate fence device digests and digest comparison result
*
* \param[in] rsc Fence device resource
* \param[in] agent Fence device's agent type
* \param[in] node Node with digest cache to use
* \param[in] data_set Cluster working set
*
* \return Node's digest cache entry
*/
static op_digest_cache_t *
fencing_action_digest_cmp(pe_resource_t *rsc, const char *agent,
pe_node_t *node, pe_working_set_t *data_set)
{
const char *node_summary = NULL;
// Calculate device's current parameter digests
char *key = pcmk__op_key(rsc->id, STONITH_DIGEST_TASK, 0);
op_digest_cache_t *data = rsc_action_digest(rsc, STONITH_DIGEST_TASK, key,
node, NULL, TRUE, data_set);
free(key);
// Check whether node has special unfencing summary node attribute
node_summary = pe_node_attribute_raw(node, CRM_ATTR_DIGESTS_ALL);
if (node_summary == NULL) {
data->rc = RSC_DIGEST_UNKNOWN;
return data;
}
// Check whether full parameter digest matches
if (unfencing_digest_matches(rsc->id, agent, data->digest_all_calc,
node_summary)) {
data->rc = RSC_DIGEST_MATCH;
return data;
}
// Check whether secure parameter digest matches
node_summary = pe_node_attribute_raw(node, CRM_ATTR_DIGESTS_SECURE);
if (unfencing_digest_matches(rsc->id, agent, data->digest_secure_calc,
node_summary)) {
data->rc = RSC_DIGEST_MATCH;
if (pcmk_is_set(data_set->flags, pe_flag_stdout)) {
printf("Only 'private' parameters to %s for unfencing %s changed\n",
rsc->id, node->details->uname);
}
return data;
}
// Parameters don't match
data->rc = RSC_DIGEST_ALL;
if (pcmk_is_set(data_set->flags, (pe_flag_sanitized|pe_flag_stdout))
&& data->digest_secure_calc) {
char *digest = create_unfencing_summary(rsc->id, agent,
data->digest_secure_calc);
printf("Parameters to %s for unfencing %s changed, try '%s'\n",
rsc->id, node->details->uname, digest);
free(digest);
}
return data;
}
const char *rsc_printable_id(pe_resource_t *rsc)
{
if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) {
return ID(rsc->xml);
}
return rsc->id;
}
void
pe__clear_resource_flags_recursive(pe_resource_t *rsc, uint64_t flags)
{
pe__clear_resource_flags(rsc, flags);
for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
pe__clear_resource_flags_recursive((pe_resource_t *) gIter->data, flags);
}
}
void
pe__set_resource_flags_recursive(pe_resource_t *rsc, uint64_t flags)
{
pe__set_resource_flags(rsc, flags);
for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
pe__set_resource_flags_recursive((pe_resource_t *) gIter->data, flags);
}
}
static GListPtr
find_unfencing_devices(GListPtr candidates, GListPtr matches)
{
for (GListPtr gIter = candidates; gIter != NULL; gIter = gIter->next) {
pe_resource_t *candidate = gIter->data;
const char *provides = g_hash_table_lookup(candidate->meta, XML_RSC_ATTR_PROVIDES);
const char *requires = g_hash_table_lookup(candidate->meta, XML_RSC_ATTR_REQUIRES);
if(candidate->children) {
matches = find_unfencing_devices(candidate->children, matches);
} else if (!pcmk_is_set(candidate->flags, pe_rsc_fence_device)) {
continue;
} else if (pcmk__str_eq(provides, "unfencing", pcmk__str_casei) || pcmk__str_eq(requires, "unfencing", pcmk__str_casei)) {
matches = g_list_prepend(matches, candidate);
}
}
return matches;
}
static int
node_priority_fencing_delay(pe_node_t * node, pe_working_set_t * data_set)
{
int member_count = 0;
int online_count = 0;
int top_priority = 0;
int lowest_priority = 0;
GListPtr gIter = NULL;
// `priority-fencing-delay` is disabled
if (data_set->priority_fencing_delay <= 0) {
return 0;
}
/* No need to request a delay if the fencing target is not a normal cluster
* member, for example if it's a remote node or a guest node. */
if (node->details->type != node_member) {
return 0;
}
// No need to request a delay if the fencing target is in our partition
if (node->details->online) {
return 0;
}
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
pe_node_t *n = gIter->data;
if (n->details->type != node_member) {
continue;
}
member_count ++;
if (n->details->online) {
online_count++;
}
if (member_count == 1
|| n->details->priority > top_priority) {
top_priority = n->details->priority;
}
if (member_count == 1
|| n->details->priority < lowest_priority) {
lowest_priority = n->details->priority;
}
}
// No need to delay if we have more than half of the cluster members
if (online_count > member_count / 2) {
return 0;
}
/* All the nodes have equal priority.
* Any configured corresponding `pcmk_delay_base/max` will be applied. */
if (lowest_priority == top_priority) {
return 0;
}
if (node->details->priority < top_priority) {
return 0;
}
return data_set->priority_fencing_delay;
}
pe_action_t *
pe_fence_op(pe_node_t * node, const char *op, bool optional, const char *reason,
bool priority_delay, pe_working_set_t * data_set)
{
char *op_key = NULL;
pe_action_t *stonith_op = NULL;
if(op == NULL) {
op = data_set->stonith_action;
}
op_key = crm_strdup_printf("%s-%s-%s", CRM_OP_FENCE, node->details->uname, op);
if(data_set->singletons) {
stonith_op = g_hash_table_lookup(data_set->singletons, op_key);
}
if(stonith_op == NULL) {
stonith_op = custom_action(NULL, op_key, CRM_OP_FENCE, node, TRUE, TRUE, data_set);
add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET, node->details->uname);
add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET_UUID, node->details->id);
add_hash_param(stonith_op->meta, "stonith_action", op);
if (pe__is_guest_or_remote_node(node)
&& pcmk_is_set(data_set->flags, pe_flag_enable_unfencing)) {
/* Extra work to detect device changes on remotes
*
* We may do this for all nodes in the future, but for now
* the check_action_definition() based stuff works fine.
*/
long max = 1024;
long digests_all_offset = 0;
long digests_secure_offset = 0;
char *digests_all = calloc(max, sizeof(char));
char *digests_secure = calloc(max, sizeof(char));
GListPtr matches = find_unfencing_devices(data_set->resources, NULL);
for (GListPtr gIter = matches; gIter != NULL; gIter = gIter->next) {
pe_resource_t *match = gIter->data;
const char *agent = g_hash_table_lookup(match->meta,
XML_ATTR_TYPE);
op_digest_cache_t *data = NULL;
data = fencing_action_digest_cmp(match, agent, node, data_set);
if(data->rc == RSC_DIGEST_ALL) {
optional = FALSE;
crm_notice("Unfencing %s (remote): because the definition of %s changed", node->details->uname, match->id);
if (pcmk_is_set(data_set->flags, pe_flag_stdout)) {
fprintf(stdout, " notice: Unfencing %s (remote): because the definition of %s changed\n", node->details->uname, match->id);
}
}
digests_all_offset += snprintf(
digests_all+digests_all_offset, max-digests_all_offset,
"%s:%s:%s,", match->id, agent, data->digest_all_calc);
digests_secure_offset += snprintf(
digests_secure+digests_secure_offset, max-digests_secure_offset,
"%s:%s:%s,", match->id, agent, data->digest_secure_calc);
}
g_hash_table_insert(stonith_op->meta,
strdup(XML_OP_ATTR_DIGESTS_ALL),
digests_all);
g_hash_table_insert(stonith_op->meta,
strdup(XML_OP_ATTR_DIGESTS_SECURE),
digests_secure);
}
} else {
free(op_key);
}
if (data_set->priority_fencing_delay > 0
/* It's a suitable case where `priority-fencing-delay` applies.
* At least add `priority-fencing-delay` field as an indicator. */
&& (priority_delay
/* Re-calculate priority delay for the suitable case when
* pe_fence_op() is called again by stage6() after node priority has
* been actually calculated with native_add_running() */
|| g_hash_table_lookup(stonith_op->meta,
XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY) != NULL)) {
/* Add `priority-fencing-delay` to the fencing op even if it's 0 for
* the targeting node. So that it takes precedence over any possible
* `pcmk_delay_base/max`.
*/
char *delay_s = crm_itoa(node_priority_fencing_delay(node, data_set));
g_hash_table_insert(stonith_op->meta,
strdup(XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY),
delay_s);
}
if(optional == FALSE && pe_can_fence(data_set, node)) {
pe_action_required(stonith_op, NULL, reason);
} else if(reason && stonith_op->reason == NULL) {
stonith_op->reason = strdup(reason);
}
return stonith_op;
}
void
trigger_unfencing(
pe_resource_t * rsc, pe_node_t *node, const char *reason, pe_action_t *dependency, pe_working_set_t * data_set)
{
if (!pcmk_is_set(data_set->flags, pe_flag_enable_unfencing)) {
/* No resources require it */
return;
} else if ((rsc != NULL)
&& !pcmk_is_set(rsc->flags, pe_rsc_fence_device)) {
/* Wasn't a stonith device */
return;
} else if(node
&& node->details->online
&& node->details->unclean == FALSE
&& node->details->shutdown == FALSE) {
pe_action_t *unfence = pe_fence_op(node, "on", FALSE, reason, FALSE, data_set);
if(dependency) {
order_actions(unfence, dependency, pe_order_optional);
}
} else if(rsc) {
GHashTableIter iter;
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
if(node->details->online && node->details->unclean == FALSE && node->details->shutdown == FALSE) {
trigger_unfencing(rsc, node, reason, dependency, data_set);
}
}
}
}
gboolean
add_tag_ref(GHashTable * tags, const char * tag_name, const char * obj_ref)
{
pe_tag_t *tag = NULL;
GListPtr gIter = NULL;
gboolean is_existing = FALSE;
CRM_CHECK(tags && tag_name && obj_ref, return FALSE);
tag = g_hash_table_lookup(tags, tag_name);
if (tag == NULL) {
tag = calloc(1, sizeof(pe_tag_t));
if (tag == NULL) {
return FALSE;
}
tag->id = strdup(tag_name);
tag->refs = NULL;
g_hash_table_insert(tags, strdup(tag_name), tag);
}
for (gIter = tag->refs; gIter != NULL; gIter = gIter->next) {
const char *existing_ref = (const char *) gIter->data;
if (pcmk__str_eq(existing_ref, obj_ref, pcmk__str_none)){
is_existing = TRUE;
break;
}
}
if (is_existing == FALSE) {
tag->refs = g_list_append(tag->refs, strdup(obj_ref));
crm_trace("Added: tag=%s ref=%s", tag->id, obj_ref);
}
return TRUE;
}
void pe_action_set_flag_reason(const char *function, long line,
pe_action_t *action, pe_action_t *reason, const char *text,
enum pe_action_flags flags, bool overwrite)
{
bool unset = FALSE;
bool update = FALSE;
const char *change = NULL;
if (pcmk_is_set(flags, pe_action_runnable)) {
unset = TRUE;
change = "unrunnable";
} else if (pcmk_is_set(flags, pe_action_optional)) {
unset = TRUE;
change = "required";
} else if (pcmk_is_set(flags, pe_action_migrate_runnable)) {
unset = TRUE;
overwrite = TRUE;
change = "unrunnable";
} else if (pcmk_is_set(flags, pe_action_dangle)) {
change = "dangling";
} else if (pcmk_is_set(flags, pe_action_requires_any)) {
change = "required";
} else {
crm_err("Unknown flag change to %x by %s: 0x%s",
flags, action->uuid, (reason? reason->uuid : "0"));
}
if(unset) {
if (pcmk_is_set(action->flags, flags)) {
pe__clear_action_flags_as(function, line, action, flags);
update = TRUE;
}
} else {
if (!pcmk_is_set(action->flags, flags)) {
pe__set_action_flags_as(function, line, action, flags);
update = TRUE;
}
}
if((change && update) || text) {
char *reason_text = NULL;
if(reason == NULL) {
pe_action_set_reason(action, text, overwrite);
} else if(reason->rsc == NULL) {
reason_text = crm_strdup_printf("%s %s%c %s", change, reason->task, text?':':0, text?text:"");
} else {
reason_text = crm_strdup_printf("%s %s %s%c %s", change, reason->rsc->id, reason->task, text?':':0, text?text:"NA");
}
if(reason_text && action->rsc != reason->rsc) {
pe_action_set_reason(action, reason_text, overwrite);
}
free(reason_text);
}
}
void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite)
{
if (action->reason != NULL && overwrite) {
pe_rsc_trace(action->rsc, "Changing %s reason from '%s' to '%s'",
action->uuid, action->reason, crm_str(reason));
free(action->reason);
} else if (action->reason == NULL) {
pe_rsc_trace(action->rsc, "Set %s reason to '%s'",
action->uuid, crm_str(reason));
} else {
// crm_assert(action->reason != NULL && !overwrite);
return;
}
if (reason != NULL) {
action->reason = strdup(reason);
} else {
action->reason = NULL;
}
}
/*!
* \internal
* \brief Check whether shutdown has been requested for a node
*
* \param[in] node Node to check
*
* \return TRUE if node has shutdown attribute set and nonzero, FALSE otherwise
* \note This differs from simply using node->details->shutdown in that it can
* be used before that has been determined (and in fact to determine it),
* and it can also be used to distinguish requested shutdown from implicit
* shutdown of remote nodes by virtue of their connection stopping.
*/
bool
pe__shutdown_requested(pe_node_t *node)
{
const char *shutdown = pe_node_attribute_raw(node, XML_CIB_ATTR_SHUTDOWN);
return !pcmk__str_eq(shutdown, "0", pcmk__str_null_matches);
}
/*!
* \internal
* \brief Update a data set's "recheck by" time
*
* \param[in] recheck Epoch time when recheck should happen
* \param[in,out] data_set Current working set
*/
void
pe__update_recheck_time(time_t recheck, pe_working_set_t *data_set)
{
if ((recheck > get_effective_time(data_set))
&& ((data_set->recheck_by == 0)
|| (data_set->recheck_by > recheck))) {
data_set->recheck_by = recheck;
}
}
/*!
* \internal
* \brief Wrapper for pe_unpack_nvpairs() using a cluster working set
*/
void
pe__unpack_dataset_nvpairs(xmlNode *xml_obj, const char *set_name,
pe_rule_eval_data_t *rule_data, GHashTable *hash,
const char *always_first, gboolean overwrite,
pe_working_set_t *data_set)
{
crm_time_t *next_change = crm_time_new_undefined();
pe_eval_nvpairs(data_set->input, xml_obj, set_name, rule_data, hash,
always_first, overwrite, next_change);
if (crm_time_is_defined(next_change)) {
time_t recheck = (time_t) crm_time_get_seconds_since_epoch(next_change);
pe__update_recheck_time(recheck, data_set);
}
crm_time_free(next_change);
}
bool
pe__resource_is_disabled(pe_resource_t *rsc)
{
const char *target_role = NULL;
CRM_CHECK(rsc != NULL, return false);
target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
if (target_role) {
enum rsc_role_e target_role_e = text2role(target_role);
if ((target_role_e == RSC_ROLE_STOPPED)
|| ((target_role_e == RSC_ROLE_SLAVE)
&& pcmk_is_set(uber_parent(rsc)->flags, pe_rsc_promotable))) {
return true;
}
}
return false;
}
/*!
* \internal
* \brief Create an action to clear a resource's history from CIB
*
* \param[in] rsc Resource to clear
* \param[in] node Node to clear history on
*
* \return New action to clear resource history
*/
pe_action_t *
pe__clear_resource_history(pe_resource_t *rsc, pe_node_t *node,
pe_working_set_t *data_set)
{
char *key = NULL;
CRM_ASSERT(rsc && node);
key = pcmk__op_key(rsc->id, CRM_OP_LRM_DELETE, 0);
return custom_action(rsc, key, CRM_OP_LRM_DELETE, node, FALSE, TRUE,
data_set);
}
bool
pe__rsc_running_on_any_node_in_list(pe_resource_t *rsc, GListPtr node_list)
{
for (GListPtr ele = rsc->running_on; ele; ele = ele->next) {
pe_node_t *node = (pe_node_t *) ele->data;
if (pcmk__str_in_list(node_list, node->details->uname)) {
return true;
}
}
return false;
}
bool
pcmk__rsc_filtered_by_node(pe_resource_t *rsc, GListPtr only_node)
{
return (rsc->fns->active(rsc, FALSE) && !pe__rsc_running_on_any_node_in_list(rsc, only_node));
}
GListPtr
pe__filter_rsc_list(GListPtr rscs, GListPtr filter)
{
GListPtr retval = NULL;
for (GListPtr gIter = rscs; gIter; gIter = gIter->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter->data;
/* I think the second condition is safe here for all callers of this
* function. If not, it needs to move into pe__node_text.
*/
if (pcmk__str_in_list(filter, rsc_printable_id(rsc)) ||
(rsc->parent && pcmk__str_in_list(filter, rsc_printable_id(rsc->parent)))) {
retval = g_list_prepend(retval, rsc);
}
}
return retval;
}

File Metadata

Mime Type
text/x-diff
Expires
Sat, Nov 23, 10:03 AM (1 d, 7 h)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
1018555
Default Alt Text
(867 KB)

Event Timeline