diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in index feb5dc852b..10f34b494b 100644 --- a/cts/cts-scheduler.in +++ b/cts/cts-scheduler.in @@ -1,1602 +1,1603 @@ #!@PYTHON@ """ Regression tests for Pacemaker's scheduler """ __copyright__ = "Copyright 2004-2022 the Pacemaker project contributors" __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" import io import os import re import sys import stat import shlex import shutil import argparse import subprocess import platform import tempfile DESC = """Regression tests for Pacemaker's scheduler""" # Each entry in TESTS is a group of tests, where each test consists of a # test base name, test description, and additional test arguments. # Test groups will be separated by newlines in output. TESTS = [ [ [ "simple1", "Offline" ], [ "simple2", "Start" ], [ "simple3", "Start 2" ], [ "simple4", "Start Failed" ], [ "simple6", "Stop Start" ], [ "simple7", "Shutdown" ], #[ "simple8", "Stonith" ], #[ "simple9", "Lower version" ], #[ "simple10", "Higher version" ], [ "simple11", "Priority (ne)" ], [ "simple12", "Priority (eq)" ], [ "simple8", "Stickiness" ], ], [ [ "group1", "Group" ], [ "group2", "Group + Native" ], [ "group3", "Group + Group" ], [ "group4", "Group + Native (nothing)" ], [ "group5", "Group + Native (move)" ], [ "group6", "Group + Group (move)" ], [ "group7", "Group colocation" ], [ "group13", "Group colocation (cant run)" ], [ "group8", "Group anti-colocation" ], [ "group9", "Group recovery" ], [ "group10", "Group partial recovery" ], [ "group11", "Group target_role" ], [ "group14", "Group stop (graph terminated)" ], [ "group15", "Negative group colocation" ], [ "bug-1573", "Partial stop of a group with two children" ], [ "bug-1718", "Mandatory group ordering - Stop group_FUN" ], [ "bug-lf-2613", "Move group on failure" ], [ "bug-lf-2619", "Move group on clone failure" ], [ "group-fail", "Ensure stop order is preserved for partially active groups" ], [ "group-unmanaged", "No need to restart r115 because r114 is unmanaged" ], [ "group-unmanaged-stopped", "Make sure r115 is stopped when r114 fails" ], [ "group-dependents", "Account for the location preferences of things colocated with a group" ], [ "group-stop-ordering", "Ensure blocked group member stop does not force other member stops" ], [ "colocate-unmanaged-group", "Respect mandatory colocations even if earlier group member is unmanaged" ], ], [ [ "rsc_dep1", "Must not" ], [ "rsc_dep3", "Must" ], [ "rsc_dep5", "Must not 3" ], [ "rsc_dep7", "Must 3" ], [ "rsc_dep10", "Must (but cant)" ], [ "rsc_dep2", "Must (running)" ], [ "rsc_dep8", "Must (running : alt)" ], [ "rsc_dep4", "Must (running + move)" ], [ "asymmetric", "Asymmetric - require explicit location constraints" ], ], [ [ "orphan-0", "Orphan ignore" ], [ "orphan-1", "Orphan stop" ], [ "orphan-2", "Orphan stop, remove failcount" ], ], [ [ "params-0", "Params: No change" ], [ "params-1", "Params: Changed" ], [ "params-2", "Params: Resource definition" ], [ "params-3", "Params: Restart instead of reload if start pending" ], [ "params-4", "Params: Reload" ], [ "params-5", "Params: Restart based on probe digest" ], [ "novell-251689", "Resource definition change + target_role=stopped" ], [ "bug-lf-2106", "Restart all anonymous clone instances after config change" ], [ "params-6", "Params: Detect reload in previously migrated resource" ], [ "nvpair-id-ref", "Support id-ref in nvpair with optional name" ], [ "not-reschedule-unneeded-monitor", "Do not reschedule unneeded monitors while resource definitions have changed" ], [ "reload-becomes-restart", "Cancel reload if restart becomes required" ], [ "restart-with-extra-op-params", "Restart if with extra operation parameters upon changes of any" ], ], [ [ "target-0", "Target Role : baseline" ], [ "target-1", "Target Role : promoted" ], [ "target-2", "Target Role : invalid" ], ], [ [ "base-score", "Set a node's default score for all nodes" ], ], [ [ "date-1", "Dates", [ "-t", "2005-020" ] ], [ "date-2", "Date Spec - Pass", [ "-t", "2005-020T12:30" ] ], [ "date-3", "Date Spec - Fail", [ "-t", "2005-020T11:30" ] ], [ "origin", "Timing of recurring operations", [ "-t", "2014-05-07 00:28:00" ] ], [ "probe-0", "Probe (anon clone)" ], [ "probe-1", "Pending Probe" ], [ "probe-2", "Correctly re-probe cloned groups" ], [ "probe-3", "Probe (pending node)" ], [ "probe-4", "Probe (pending node + stopped resource)" ], [ "probe-pending-node", "Probe (pending node + unmanaged resource)" ], [ "failed-probe-primitive", "Maskable vs. unmaskable probe failures on primitive resources" ], [ "failed-probe-clone", "Maskable vs. unmaskable probe failures on cloned resources" ], [ "expired-failed-probe-primitive", "Maskable, expired probe failure on primitive resources" ], [ "standby", "Standby" ], [ "comments", "Comments" ], ], [ [ "one-or-more-0", "Everything starts" ], [ "one-or-more-1", "Nothing starts because of A" ], [ "one-or-more-2", "D can start because of C" ], [ "one-or-more-3", "D cannot start because of B and C" ], [ "one-or-more-4", "D cannot start because of target-role" ], [ "one-or-more-5", "Start A and F even though C and D are stopped" ], [ "one-or-more-6", "Leave A running even though B is stopped" ], [ "one-or-more-7", "Leave A running even though C is stopped" ], [ "bug-5140-require-all-false", "Allow basegrp:0 to stop" ], [ "clone-require-all-1", "clone B starts node 3 and 4" ], [ "clone-require-all-2", "clone B remains stopped everywhere" ], [ "clone-require-all-3", "clone B stops everywhere because A stops everywhere" ], [ "clone-require-all-4", "clone B remains on node 3 and 4 with only one instance of A remaining" ], [ "clone-require-all-5", "clone B starts on node 1 3 and 4" ], [ "clone-require-all-6", "clone B remains active after shutting down instances of A" ], [ "clone-require-all-7", "clone A and B both start at the same time. all instances of A start before B" ], [ "clone-require-all-no-interleave-1", "C starts everywhere after A and B" ], [ "clone-require-all-no-interleave-2", "C starts on nodes 1, 2, and 4 with only one active instance of B" ], [ "clone-require-all-no-interleave-3", "C remains active when instance of B is stopped on one node and started on another" ], [ "one-or-more-unrunnable-instances", "Avoid dependencies on instances that won't ever be started" ], ], [ [ "location-date-rules-1", "Use location constraints with ineffective date-based rules" ], [ "location-date-rules-2", "Use location constraints with effective date-based rules" ], [ "nvpair-date-rules-1", "Use nvpair blocks with a variety of date-based rules" ], [ "value-source", "Use location constraints with node attribute expressions using value-source" ], [ "rule-dbl-as-auto-number-match", "Floating-point rule values default to number comparison: match" ], [ "rule-dbl-as-auto-number-no-match", "Floating-point rule values default to number comparison: no " "match" ], [ "rule-dbl-as-integer-match", "Floating-point rule values set to integer comparison: match" ], [ "rule-dbl-as-integer-no-match", "Floating-point rule values set to integer comparison: no match" ], [ "rule-dbl-as-number-match", "Floating-point rule values set to number comparison: match" ], [ "rule-dbl-as-number-no-match", "Floating-point rule values set to number comparison: no match" ], [ "rule-dbl-parse-fail-default-str-match", "Floating-point rule values fail to parse, default to string " "comparison: match" ], [ "rule-dbl-parse-fail-default-str-no-match", "Floating-point rule values fail to parse, default to string " "comparison: no match" ], [ "rule-int-as-auto-integer-match", "Integer rule values default to integer comparison: match" ], [ "rule-int-as-auto-integer-no-match", "Integer rule values default to integer comparison: no match" ], [ "rule-int-as-integer-match", "Integer rule values set to integer comparison: match" ], [ "rule-int-as-integer-no-match", "Integer rule values set to integer comparison: no match" ], [ "rule-int-as-number-match", "Integer rule values set to number comparison: match" ], [ "rule-int-as-number-no-match", "Integer rule values set to number comparison: no match" ], [ "rule-int-parse-fail-default-str-match", "Integer rule values fail to parse, default to string " "comparison: match" ], [ "rule-int-parse-fail-default-str-no-match", "Integer rule values fail to parse, default to string " "comparison: no match" ], ], [ [ "order1", "Order start 1" ], [ "order2", "Order start 2" ], [ "order3", "Order stop" ], [ "order4", "Order (multiple)" ], [ "order5", "Order (move)" ], [ "order6", "Order (move w/ restart)" ], [ "order7", "Order (mandatory)" ], [ "order-optional", "Order (score=0)" ], [ "order-required", "Order (score=INFINITY)" ], [ "bug-lf-2171", "Prevent group start when clone is stopped" ], [ "order-clone", "Clone ordering should be able to prevent startup of dependent clones" ], [ "order-sets", "Ordering for resource sets" ], [ "order-serialize", "Serialize resources without inhibiting migration" ], [ "order-serialize-set", "Serialize a set of resources without inhibiting migration" ], [ "clone-order-primitive", "Order clone start after a primitive" ], [ "clone-order-16instances", "Verify ordering of 16 cloned resources" ], [ "order-optional-keyword", "Order (optional keyword)" ], [ "order-mandatory", "Order (mandatory keyword)" ], [ "bug-lf-2493", "Don't imply colocation requirements when applying ordering constraints with clones" ], [ "ordered-set-basic-startup", "Constraint set with default order settings" ], [ "ordered-set-natural", "Allow natural set ordering" ], [ "order-wrong-kind", "Order (error)" ], ], [ [ "coloc-loop", "Colocation - loop" ], [ "coloc-many-one", "Colocation - many-to-one" ], [ "coloc-list", "Colocation - many-to-one with list" ], [ "coloc-group", "Colocation - groups" ], [ "coloc-unpromoted-anti", "Anti-colocation with unpromoted shouldn't prevent promoted colocation" ], [ "coloc-attr", "Colocation based on node attributes" ], [ "coloc-negative-group", "Negative colocation with a group" ], [ "coloc-intra-set", "Intra-set colocation" ], [ "bug-lf-2435", "Colocation sets with a negative score" ], [ "coloc-clone-stays-active", "Ensure clones don't get stopped/demoted because a dependent must stop" ], [ "coloc_fp_logic", "Verify floating point calculations in colocation are working" ], [ "colo_promoted_w_native", "cl#5070 - Verify promotion order is affected when colocating promoted with primitive" ], [ "colo_unpromoted_w_native", "cl#5070 - Verify promotion order is affected when colocating unpromoted with primitive" ], [ "anti-colocation-order", "cl#5187 - Prevent resources in an anti-colocation from even temporarily running on a same node" ], [ "anti-colocation-promoted", "Organize order of actions for promoted resources in anti-colocations" ], [ "anti-colocation-unpromoted", "Organize order of actions for unpromoted resources in anti-colocations" ], [ "group-anticolocation", "Group with failed last member anti-colocated with another group" ], [ "group-colocation-failure", "Group with sole member failed, colocated with another group" ], [ "enforce-colo1", "Always enforce B with A INFINITY" ], [ "complex_enforce_colo", "Always enforce B with A INFINITY. (make sure heat-engine stops)" ], [ "coloc-dependee-should-stay", "Stickiness outweighs group colocation" ], [ "coloc-dependee-should-move", "Group colocation outweighs stickiness" ], [ "colocation-influence", "Respect colocation influence" ], [ "colocation-priority-group", "Apply group colocations in order of primary priority" ], [ "colocation-vs-stickiness", "Group stickiness outweighs anti-colocation score" ], [ "promoted-with-blocked", "Promoted role colocated with a resource with blocked start" ], ], [ [ "rsc-sets-seq-true", "Resource Sets - sequential=false" ], [ "rsc-sets-seq-false", "Resource Sets - sequential=true" ], [ "rsc-sets-clone", "Resource Sets - Clone" ], [ "rsc-sets-promoted", "Resource Sets - Promoted" ], [ "rsc-sets-clone-1", "Resource Sets - Clone (lf#2404)" ], ], [ [ "attrs1", "string: eq (and)" ], [ "attrs2", "string: lt / gt (and)" ], [ "attrs3", "string: ne (or)" ], [ "attrs4", "string: exists" ], [ "attrs5", "string: not_exists" ], [ "attrs6", "is_dc: true" ], [ "attrs7", "is_dc: false" ], [ "attrs8", "score_attribute" ], [ "per-node-attrs", "Per node resource parameters" ], ], [ [ "mon-rsc-1", "Schedule Monitor - start" ], [ "mon-rsc-2", "Schedule Monitor - move" ], [ "mon-rsc-3", "Schedule Monitor - pending start" ], [ "mon-rsc-4", "Schedule Monitor - move/pending start" ], ], [ [ "rec-rsc-0", "Resource Recover - no start" ], [ "rec-rsc-1", "Resource Recover - start" ], [ "rec-rsc-2", "Resource Recover - monitor" ], [ "rec-rsc-3", "Resource Recover - stop - ignore" ], [ "rec-rsc-4", "Resource Recover - stop - block" ], [ "rec-rsc-5", "Resource Recover - stop - fence" ], [ "rec-rsc-6", "Resource Recover - multiple - restart" ], [ "rec-rsc-7", "Resource Recover - multiple - stop" ], [ "rec-rsc-8", "Resource Recover - multiple - block" ], [ "rec-rsc-9", "Resource Recover - group/group" ], [ "stop-unexpected", "Recover multiply active group with stop_unexpected" ], [ "stop-unexpected-2", "Resource multiply active primitve with stop_unexpected" ], [ "monitor-recovery", "on-fail=block + resource recovery detected by recurring monitor" ], [ "stop-failure-no-quorum", "Stop failure without quorum" ], [ "stop-failure-no-fencing", "Stop failure without fencing available" ], [ "stop-failure-with-fencing", "Stop failure with fencing available" ], [ "multiple-active-block-group", "Support of multiple-active=block for resource groups" ], [ "multiple-monitor-one-failed", "Consider resource failed if any of the configured monitor operations failed" ], ], [ [ "quorum-1", "No quorum - ignore" ], [ "quorum-2", "No quorum - freeze" ], [ "quorum-3", "No quorum - stop" ], [ "quorum-4", "No quorum - start anyway" ], [ "quorum-5", "No quorum - start anyway (group)" ], [ "quorum-6", "No quorum - start anyway (clone)" ], [ "bug-cl-5212", "No promotion with no-quorum-policy=freeze" ], [ "suicide-needed-inquorate", "no-quorum-policy=suicide: suicide necessary" ], [ "suicide-not-needed-initial-quorum", "no-quorum-policy=suicide: suicide not necessary at initial quorum" ], [ "suicide-not-needed-never-quorate", "no-quorum-policy=suicide: suicide not necessary if never quorate" ], [ "suicide-not-needed-quorate", "no-quorum-policy=suicide: suicide necessary if quorate" ], ], [ [ "rec-node-1", "Node Recover - Startup - no fence" ], [ "rec-node-2", "Node Recover - Startup - fence" ], [ "rec-node-3", "Node Recover - HA down - no fence" ], [ "rec-node-4", "Node Recover - HA down - fence" ], [ "rec-node-5", "Node Recover - CRM down - no fence" ], [ "rec-node-6", "Node Recover - CRM down - fence" ], [ "rec-node-7", "Node Recover - no quorum - ignore" ], [ "rec-node-8", "Node Recover - no quorum - freeze" ], [ "rec-node-9", "Node Recover - no quorum - stop" ], [ "rec-node-10", "Node Recover - no quorum - stop w/fence" ], [ "rec-node-11", "Node Recover - CRM down w/ group - fence" ], [ "rec-node-12", "Node Recover - nothing active - fence" ], [ "rec-node-13", "Node Recover - failed resource + shutdown - fence" ], [ "rec-node-15", "Node Recover - unknown lrm section" ], [ "rec-node-14", "Serialize all stonith's" ], ], [ [ "multi1", "Multiple Active (stop/start)" ], ], [ [ "migrate-begin", "Normal migration" ], [ "migrate-success", "Completed migration" ], [ "migrate-partial-1", "Completed migration, missing stop on source" ], [ "migrate-partial-2", "Successful migrate_to only" ], [ "migrate-partial-3", "Successful migrate_to only, target down" ], [ "migrate-partial-4", "Migrate from the correct host after migrate_to+migrate_from" ], [ "bug-5186-partial-migrate", "Handle partial migration when src node loses membership" ], [ "migrate-fail-2", "Failed migrate_from" ], [ "migrate-fail-3", "Failed migrate_from + stop on source" ], [ "migrate-fail-4", "Failed migrate_from + stop on target - ideally we wouldn't need to re-stop on target" ], [ "migrate-fail-5", "Failed migrate_from + stop on source and target" ], [ "migrate-fail-6", "Failed migrate_to" ], [ "migrate-fail-7", "Failed migrate_to + stop on source" ], [ "migrate-fail-8", "Failed migrate_to + stop on target - ideally we wouldn't need to re-stop on target" ], [ "migrate-fail-9", "Failed migrate_to + stop on source and target" ], [ "migration-ping-pong", "Old migrate_to failure + successful migrate_from on same node" ], [ "migrate-stop", "Migration in a stopping stack" ], [ "migrate-start", "Migration in a starting stack" ], [ "migrate-stop_start", "Migration in a restarting stack" ], [ "migrate-stop-complex", "Migration in a complex stopping stack" ], [ "migrate-start-complex", "Migration in a complex starting stack" ], [ "migrate-stop-start-complex", "Migration in a complex moving stack" ], [ "migrate-shutdown", "Order the post-migration 'stop' before node shutdown" ], [ "migrate-1", "Migrate (migrate)" ], [ "migrate-2", "Migrate (stable)" ], [ "migrate-3", "Migrate (failed migrate_to)" ], [ "migrate-4", "Migrate (failed migrate_from)" ], [ "novell-252693", "Migration in a stopping stack" ], [ "novell-252693-2", "Migration in a starting stack" ], [ "novell-252693-3", "Non-Migration in a starting and stopping stack" ], [ "bug-1820", "Migration in a group" ], [ "bug-1820-1", "Non-migration in a group" ], [ "migrate-5", "Primitive migration with a clone" ], [ "migrate-fencing", "Migration after Fencing" ], [ "migrate-both-vms", "Migrate two VMs that have no colocation" ], [ "migration-behind-migrating-remote", "Migrate resource behind migrating remote connection" ], [ "1-a-then-bm-move-b", "Advanced migrate logic. A then B. migrate B" ], [ "2-am-then-b-move-a", "Advanced migrate logic, A then B, migrate A without stopping B" ], [ "3-am-then-bm-both-migrate", "Advanced migrate logic. A then B. migrate both" ], [ "4-am-then-bm-b-not-migratable", "Advanced migrate logic, A then B, B not migratable" ], [ "5-am-then-bm-a-not-migratable", "Advanced migrate logic. A then B. move both, a not migratable" ], [ "6-migrate-group", "Advanced migrate logic, migrate a group" ], [ "7-migrate-group-one-unmigratable", "Advanced migrate logic, migrate group mixed with allow-migrate true/false" ], [ "8-am-then-bm-a-migrating-b-stopping", "Advanced migrate logic, A then B, A migrating, B stopping" ], [ "9-am-then-bm-b-migrating-a-stopping", "Advanced migrate logic, A then B, B migrate, A stopping" ], [ "10-a-then-bm-b-move-a-clone", "Advanced migrate logic, A clone then B, migrate B while stopping A" ], [ "11-a-then-bm-b-move-a-clone-starting", "Advanced migrate logic, A clone then B, B moving while A is start/stopping" ], [ "a-promote-then-b-migrate", "A promote then B start. migrate B" ], [ "a-demote-then-b-migrate", "A demote then B stop. migrate B" ], [ "probe-target-of-failed-migrate_to-1", "Failed migrate_to, target rejoins" ], [ "probe-target-of-failed-migrate_to-2", "Failed migrate_to, target rejoined and probed" ], [ "partial-live-migration-multiple-active", "Prevent running on multiple nodes due to partial live migration" ], [ "bug-lf-2422", "Dependency on partially active group - stop ocfs:*" ], ], [ [ "clone-anon-probe-1", "Probe the correct (anonymous) clone instance for each node" ], [ "clone-anon-probe-2", "Avoid needless re-probing of anonymous clones" ], [ "clone-anon-failcount", "Merge failcounts for anonymous clones" ], [ "force-anon-clone-max", "Update clone-max properly when forcing a clone to be anonymous" ], [ "anon-instance-pending", "Assign anonymous clone instance numbers properly when action pending" ], [ "inc0", "Incarnation start" ], [ "inc1", "Incarnation start order" ], [ "inc2", "Incarnation silent restart, stop, move" ], [ "inc3", "Inter-incarnation ordering, silent restart, stop, move" ], [ "inc4", "Inter-incarnation ordering, silent restart, stop, move (ordered)" ], [ "inc5", "Inter-incarnation ordering, silent restart, stop, move (restart 1)" ], [ "inc6", "Inter-incarnation ordering, silent restart, stop, move (restart 2)" ], [ "inc7", "Clone colocation" ], [ "inc8", "Clone anti-colocation" ], [ "inc9", "Non-unique clone" ], [ "inc10", "Non-unique clone (stop)" ], [ "inc11", "Primitive colocation with clones" ], [ "inc12", "Clone shutdown" ], [ "cloned-group", "Make sure only the correct number of cloned groups are started" ], [ "cloned-group-stop", "Ensure stopping qpidd also stops glance and cinder" ], [ "clone-no-shuffle", "Don't prioritize allocation of instances that must be moved" ], [ "clone-max-zero", "Orphan processing with clone-max=0" ], [ "clone-anon-dup", "Bug LF#2087 - Correctly parse the state of anonymous clones that are active more than once per node" ], [ "bug-lf-2160", "Don't shuffle clones due to colocation" ], [ "bug-lf-2213", "clone-node-max enforcement for cloned groups" ], [ "bug-lf-2153", "Clone ordering constraints" ], [ "bug-lf-2361", "Ensure clones observe mandatory ordering constraints if the LHS is unrunnable" ], [ "bug-lf-2317", "Avoid needless restart of primitive depending on a clone" ], [ "bug-lf-2453", "Enforce mandatory clone ordering without colocation" ], [ "bug-lf-2508", "Correctly reconstruct the status of anonymous cloned groups" ], [ "bug-lf-2544", "Balanced clone placement" ], [ "bug-lf-2445", "Redistribute clones with node-max > 1 and stickiness = 0" ], [ "bug-lf-2574", "Avoid clone shuffle" ], [ "bug-lf-2581", "Avoid group restart due to unrelated clone (re)start" ], [ "bug-cl-5168", "Don't shuffle clones" ], [ "bug-cl-5170", "Prevent clone from starting with on-fail=block" ], [ "clone-fail-block-colocation", "Move colocated group when failed clone has on-fail=block" ], [ "clone-interleave-1", "Clone-3 cannot start on pcmk-1 due to interleaved ordering (no colocation)" ], [ "clone-interleave-2", "Clone-3 must stop on pcmk-1 due to interleaved ordering (no colocation)" ], [ "clone-interleave-3", "Clone-3 must be recovered on pcmk-1 due to interleaved ordering (no colocation)" ], [ "rebalance-unique-clones", "Rebalance unique clone instances with no stickiness" ], [ "clone-requires-quorum-recovery", "Clone with requires=quorum on failed node needing recovery" ], [ "clone-requires-quorum", "Clone with requires=quorum with presumed-inactive instance on failed node" ], ], [ [ "cloned_start_one", "order first clone then clone... first clone_min=2" ], [ "cloned_start_two", "order first clone then clone... first clone_min=2" ], [ "cloned_stop_one", "order first clone then clone... first clone_min=2" ], [ "cloned_stop_two", "order first clone then clone... first clone_min=2" ], [ "clone_min_interleave_start_one", "order first clone then clone... first clone_min=2 and then has interleave=true" ], [ "clone_min_interleave_start_two", "order first clone then clone... first clone_min=2 and then has interleave=true" ], [ "clone_min_interleave_stop_one", "order first clone then clone... first clone_min=2 and then has interleave=true" ], [ "clone_min_interleave_stop_two", "order first clone then clone... first clone_min=2 and then has interleave=true" ], [ "clone_min_start_one", "order first clone then primitive... first clone_min=2" ], [ "clone_min_start_two", "order first clone then primitive... first clone_min=2" ], [ "clone_min_stop_all", "order first clone then primitive... first clone_min=2" ], [ "clone_min_stop_one", "order first clone then primitive... first clone_min=2" ], [ "clone_min_stop_two", "order first clone then primitive... first clone_min=2" ], ], [ [ "unfence-startup", "Clean unfencing" ], [ "unfence-definition", "Unfencing when the agent changes" ], [ "unfence-parameters", "Unfencing when the agent parameters changes" ], [ "unfence-device", "Unfencing when a cluster has only fence devices" ], ], [ [ "promoted-0", "Stopped -> Unpromoted" ], [ "promoted-1", "Stopped -> Promote" ], [ "promoted-2", "Stopped -> Promote : notify" ], [ "promoted-3", "Stopped -> Promote : promoted location" ], [ "promoted-4", "Started -> Promote : promoted location" ], [ "promoted-5", "Promoted -> Promoted" ], [ "promoted-6", "Promoted -> Promoted (2)" ], [ "promoted-7", "Promoted -> Fenced" ], [ "promoted-8", "Promoted -> Fenced -> Moved" ], [ "promoted-9", "Stopped + Promotable + No quorum" ], [ "promoted-10", "Stopped -> Promotable : notify with monitor" ], [ "promoted-11", "Stopped -> Promote : colocation" ], [ "novell-239082", "Demote/Promote ordering" ], [ "novell-239087", "Stable promoted placement" ], [ "promoted-12", "Promotion based solely on rsc_location constraints" ], [ "promoted-13", "Include preferences of colocated resources when placing promoted" ], [ "promoted-demote", "Ordering when actions depends on demoting an unpromoted resource" ], [ "promoted-ordering", "Prevent resources from starting that need a promoted" ], [ "bug-1765", "Verify promoted-with-promoted colocation does not stop unpromoted instances" ], [ "promoted-group", "Promotion of cloned groups" ], [ "bug-lf-1852", "Don't shuffle promotable instances unnecessarily" ], [ "promoted-failed-demote", "Don't retry failed demote actions" ], [ "promoted-failed-demote-2", "Don't retry failed demote actions (notify=false)" ], [ "promoted-depend", "Ensure resources that depend on promoted instance don't get allocated until that does" ], [ "promoted-reattach", "Re-attach to a running promoted" ], [ "promoted-allow-start", "Don't include promoted score if it would prevent allocation" ], [ "promoted-colocation", "Allow promoted instances placemaker to be influenced by colocation constraints" ], [ "promoted-pseudo", "Make sure promote/demote pseudo actions are created correctly" ], [ "promoted-role", "Prevent target-role from promoting more than promoted-max instances" ], [ "bug-lf-2358", "Anti-colocation of promoted instances" ], [ "promoted-promotion-constraint", "Mandatory promoted colocation constraints" ], [ "unmanaged-promoted", "Ensure role is preserved for unmanaged resources" ], [ "promoted-unmanaged-monitor", "Start correct monitor for unmanaged promoted instances" ], [ "promoted-demote-2", "Demote does not clear past failure" ], [ "promoted-move", "Move promoted based on failure of colocated group" ], [ "promoted-probed-score", "Observe the promotion score of probed resources" ], [ "colocation_constraint_stops_promoted", "cl#5054 - Ensure promoted is demoted when stopped by colocation constraint" ], [ "colocation_constraint_stops_unpromoted", "cl#5054 - Ensure unpromoted is not demoted when stopped by colocation constraint" ], [ "order_constraint_stops_promoted", "cl#5054 - Ensure promoted is demoted when stopped by order constraint" ], [ "order_constraint_stops_unpromoted", "cl#5054 - Ensure unpromoted is not demoted when stopped by order constraint" ], [ "promoted_monitor_restart", "cl#5072 - Ensure promoted monitor operation will start after promotion" ], [ "bug-rh-880249", "Handle replacement of an m/s resource with a primitive" ], [ "bug-5143-ms-shuffle", "Prevent promoted instance shuffling due to promotion score" ], [ "promoted-demote-block", "Block promotion if demote fails with on-fail=block" ], [ "promoted-dependent-ban", "Don't stop instances from being active because a dependent is banned from that host" ], [ "promoted-stop", "Stop instances due to location constraint with role=Started" ], [ "promoted-partially-demoted-group", "Allow partially demoted group to finish demoting" ], [ "bug-cl-5213", "Ensure role colocation with -INFINITY is enforced" ], [ "bug-cl-5219", "Allow unrelated resources with a common colocation target to remain promoted" ], [ "promoted-asymmetrical-order", "Fix the behaviors of multi-state resources with asymmetrical ordering" ], [ "promoted-notify", "Promotion with notifications" ], [ "promoted-score-startup", "Use permanent promoted scores without LRM history" ], [ "failed-demote-recovery", "Recover resource in unpromoted role after demote fails" ], [ "failed-demote-recovery-promoted", "Recover resource in promoted role after demote fails" ], [ "on_fail_demote1", "Recovery with on-fail=\"demote\" on healthy cluster, remote, guest, and bundle nodes" ], [ "on_fail_demote2", "Recovery with on-fail=\"demote\" with promotion on different node" ], [ "on_fail_demote3", "Recovery with on-fail=\"demote\" with no promotion" ], [ "on_fail_demote4", "Recovery with on-fail=\"demote\" on failed cluster, remote, guest, and bundle nodes" ], [ "no_quorum_demote", "Promotable demotion and primitive stop with no-quorum-policy=\"demote\"" ], [ "no-promote-on-unrunnable-guest", "Don't select bundle instance for promotion when container can't run" ], + [ "leftover-pending-monitor", "Prevent a leftover pending monitor from causing unexpected stop of other instances" ], ], [ [ "history-1", "Correctly parse stateful-1 resource state" ], ], [ [ "managed-0", "Managed (reference)" ], [ "managed-1", "Not managed - down" ], [ "managed-2", "Not managed - up" ], [ "bug-5028", "Shutdown should block if anything depends on an unmanaged resource" ], [ "bug-5028-detach", "Ensure detach still works" ], [ "bug-5028-bottom", "Ensure shutdown still blocks if the blocked resource is at the bottom of the stack" ], [ "unmanaged-stop-1", "cl#5155 - Block the stop of resources if any depending resource is unmanaged" ], [ "unmanaged-stop-2", "cl#5155 - Block the stop of resources if the first resource in a mandatory stop order is unmanaged" ], [ "unmanaged-stop-3", "cl#5155 - Block the stop of resources if any depending resource in a group is unmanaged" ], [ "unmanaged-stop-4", "cl#5155 - Block the stop of resources if any depending resource in the middle of a group is unmanaged" ], [ "unmanaged-block-restart", "Block restart of resources if any dependent resource in a group is unmanaged" ], ], [ [ "interleave-0", "Interleave (reference)" ], [ "interleave-1", "coloc - not interleaved" ], [ "interleave-2", "coloc - interleaved" ], [ "interleave-3", "coloc - interleaved (2)" ], [ "interleave-pseudo-stop", "Interleaved clone during stonith" ], [ "interleave-stop", "Interleaved clone during stop" ], [ "interleave-restart", "Interleaved clone during dependency restart" ], ], [ [ "notify-0", "Notify reference" ], [ "notify-1", "Notify simple" ], [ "notify-2", "Notify simple, confirm" ], [ "notify-3", "Notify move, confirm" ], [ "novell-239079", "Notification priority" ], #[ "notify-2", "Notify - 764" ], [ "notifs-for-unrunnable", "Don't schedule notifications for an unrunnable action" ], [ "route-remote-notify", "Route remote notify actions through correct cluster node" ], [ "notify-behind-stopping-remote", "Don't schedule notifications behind stopped remote" ], ], [ [ "594", "OSDL #594 - Unrunnable actions scheduled in transition" ], [ "662", "OSDL #662 - Two resources start on one node when incarnation_node_max = 1" ], [ "696", "OSDL #696 - CRM starts stonith RA without monitor" ], [ "726", "OSDL #726 - Attempting to schedule rsc_posic041_monitor_5000 _after_ a stop" ], [ "735", "OSDL #735 - Correctly detect that rsc_hadev1 is stopped on hadev3" ], [ "764", "OSDL #764 - Missing monitor op for DoFencing:child_DoFencing:1" ], [ "797", "OSDL #797 - Assert triggered: task_id_i > max_call_id" ], [ "829", "OSDL #829" ], [ "994", "OSDL #994 - Stopping the last resource in a resource group causes the entire group to be restarted" ], [ "994-2", "OSDL #994 - with a dependent resource" ], [ "1360", "OSDL #1360 - Clone stickiness" ], [ "1484", "OSDL #1484 - on_fail=stop" ], [ "1494", "OSDL #1494 - Clone stability" ], [ "unrunnable-1", "Unrunnable" ], [ "unrunnable-2", "Unrunnable 2" ], [ "stonith-0", "Stonith loop - 1" ], [ "stonith-1", "Stonith loop - 2" ], [ "stonith-2", "Stonith loop - 3" ], [ "stonith-3", "Stonith startup" ], [ "stonith-4", "Stonith node state" ], [ "dc-fence-ordering", "DC needs fencing while other nodes are shutting down" ], [ "bug-1572-1", "Recovery of groups depending on promotable role" ], [ "bug-1572-2", "Recovery of groups depending on promotable role when promoted is not re-promoted" ], [ "bug-1685", "Depends-on-promoted ordering" ], [ "bug-1822", "Don't promote partially active groups" ], [ "bug-pm-11", "New resource added to a m/s group" ], [ "bug-pm-12", "Recover only the failed portion of a cloned group" ], [ "bug-n-387749", "Don't shuffle clone instances" ], [ "bug-n-385265", "Don't ignore the failure stickiness of group children - resource_idvscommon should stay stopped" ], [ "bug-n-385265-2", "Ensure groups are migrated instead of remaining partially active on the current node" ], [ "bug-lf-1920", "Correctly handle probes that find active resources" ], [ "bnc-515172", "Location constraint with multiple expressions" ], [ "colocate-primitive-with-clone", "Optional colocation with a clone" ], [ "use-after-free-merge", "Use-after-free in native_merge_weights" ], [ "bug-lf-2551", "STONITH ordering for stop" ], [ "bug-lf-2606", "Stonith implies demote" ], [ "bug-lf-2474", "Ensure resource op timeout takes precedence over op_defaults" ], [ "bug-suse-707150", "Prevent vm-01 from starting due to colocation/ordering" ], [ "bug-5014-A-start-B-start", "Verify when A starts B starts using symmetrical=false" ], [ "bug-5014-A-stop-B-started", "Verify when A stops B does not stop if it has already started using symmetric=false" ], [ "bug-5014-A-stopped-B-stopped", "Verify when A is stopped and B has not started, B does not start before A using symmetric=false" ], [ "bug-5014-CthenAthenB-C-stopped", "Verify when C then A is symmetrical=true, A then B is symmetric=false, and C is stopped that nothing starts" ], [ "bug-5014-CLONE-A-start-B-start", "Verify when A starts B starts using clone resources with symmetric=false" ], [ "bug-5014-CLONE-A-stop-B-started", "Verify when A stops B does not stop if it has already started using clone resources with symmetric=false" ], [ "bug-5014-GROUP-A-start-B-start", "Verify when A starts B starts when using group resources with symmetric=false" ], [ "bug-5014-GROUP-A-stopped-B-started", "Verify when A stops B does not stop if it has already started using group resources with symmetric=false" ], [ "bug-5014-GROUP-A-stopped-B-stopped", "Verify when A is stopped and B has not started, B does not start before A using group resources with symmetric=false" ], [ "bug-5014-ordered-set-symmetrical-false", "Verify ordered sets work with symmetrical=false" ], [ "bug-5014-ordered-set-symmetrical-true", "Verify ordered sets work with symmetrical=true" ], [ "clbz5007-promotable-colocation", "Verify use of colocation scores other than INFINITY and -INFINITY work on multi-state resources" ], [ "bug-5038", "Prevent restart of anonymous clones when clone-max decreases" ], [ "bug-5025-1", "Automatically clean up failcount after resource config change with reload" ], [ "bug-5025-2", "Make sure clear failcount action isn't set when config does not change" ], [ "bug-5025-3", "Automatically clean up failcount after resource config change with restart" ], [ "bug-5025-4", "Clear failcount when last failure is a start op and rsc attributes changed" ], [ "failcount", "Ensure failcounts are correctly expired" ], [ "failcount-block", "Ensure failcounts are not expired when on-fail=block is present" ], [ "per-op-failcount", "Ensure per-operation failcount is handled and not passed to fence agent" ], [ "on-fail-ignore", "Ensure on-fail=ignore works even beyond migration-threshold" ], [ "monitor-onfail-restart", "bug-5058 - Monitor failure with on-fail set to restart" ], [ "monitor-onfail-stop", "bug-5058 - Monitor failure wiht on-fail set to stop" ], [ "bug-5059", "No need to restart p_stateful1:*" ], [ "bug-5069-op-enabled", "Test on-fail=ignore with failure when monitor is enabled" ], [ "bug-5069-op-disabled", "Test on-fail-ignore with failure when monitor is disabled" ], [ "obsolete-lrm-resource", "cl#5115 - Do not use obsolete lrm_resource sections" ], [ "expire-non-blocked-failure", "Ignore failure-timeout only if the failed operation has on-fail=block" ], [ "asymmetrical-order-move", "Respect asymmetrical ordering when trying to move resources" ], [ "asymmetrical-order-restart", "Respect asymmetrical ordering when restarting dependent resource" ], [ "start-then-stop-with-unfence", "Avoid graph loop with start-then-stop constraint plus unfencing" ], [ "order-expired-failure", "Order failcount cleanup after remote fencing" ], [ "ignore_stonith_rsc_order1", "cl#5056- Ignore order constraint between stonith and non-stonith rsc" ], [ "ignore_stonith_rsc_order2", "cl#5056- Ignore order constraint with group rsc containing mixed stonith and non-stonith" ], [ "ignore_stonith_rsc_order3", "cl#5056- Ignore order constraint, stonith clone and mixed group" ], [ "ignore_stonith_rsc_order4", "cl#5056- Ignore order constraint, stonith clone and clone with nested mixed group" ], [ "honor_stonith_rsc_order1", "cl#5056- Honor order constraint, stonith clone and pure stonith group(single rsc)" ], [ "honor_stonith_rsc_order2", "cl#5056- Honor order constraint, stonith clone and pure stonith group(multiple rsc)" ], [ "honor_stonith_rsc_order3", "cl#5056- Honor order constraint, stonith clones with nested pure stonith group" ], [ "honor_stonith_rsc_order4", "cl#5056- Honor order constraint, between two native stonith rscs" ], [ "multiply-active-stonith", "Multiply active stonith" ], [ "probe-timeout", "cl#5099 - Default probe timeout" ], [ "order-first-probes", "cl#5301 - respect order constraints when relevant resources are being probed" ], [ "concurrent-fencing", "Allow performing fencing operations in parallel" ], [ "priority-fencing-delay", "Delay fencing targeting the more significant node" ], ], [ [ "systemhealth1", "System Health () #1" ], [ "systemhealth2", "System Health () #2" ], [ "systemhealth3", "System Health () #3" ], [ "systemhealthn1", "System Health (None) #1" ], [ "systemhealthn2", "System Health (None) #2" ], [ "systemhealthn3", "System Health (None) #3" ], [ "systemhealthm1", "System Health (Migrate On Red) #1" ], [ "systemhealthm2", "System Health (Migrate On Red) #2" ], [ "systemhealthm3", "System Health (Migrate On Red) #3" ], [ "systemhealtho1", "System Health (Only Green) #1" ], [ "systemhealtho2", "System Health (Only Green) #2" ], [ "systemhealtho3", "System Health (Only Green) #3" ], [ "systemhealthp1", "System Health (Progessive) #1" ], [ "systemhealthp2", "System Health (Progessive) #2" ], [ "systemhealthp3", "System Health (Progessive) #3" ], [ "allow-unhealthy-nodes", "System Health (migrate-on-red + allow-unhealth-nodes)" ], ], [ [ "utilization", "Placement Strategy - utilization" ], [ "minimal", "Placement Strategy - minimal" ], [ "balanced", "Placement Strategy - balanced" ], ], [ [ "placement-stickiness", "Optimized Placement Strategy - stickiness" ], [ "placement-priority", "Optimized Placement Strategy - priority" ], [ "placement-location", "Optimized Placement Strategy - location" ], [ "placement-capacity", "Optimized Placement Strategy - capacity" ], ], [ [ "utilization-order1", "Utilization Order - Simple" ], [ "utilization-order2", "Utilization Order - Complex" ], [ "utilization-order3", "Utilization Order - Migrate" ], [ "utilization-order4", "Utilization Order - Live Migration (bnc#695440)" ], [ "utilization-complex", "Utilization with complex relationships" ], [ "utilization-shuffle", "Don't displace prmExPostgreSQLDB2 on act2, Start prmExPostgreSQLDB1 on act3" ], [ "load-stopped-loop", "Avoid transition loop due to load_stopped (cl#5044)" ], [ "load-stopped-loop-2", "cl#5235 - Prevent graph loops that can be introduced by load_stopped -> migrate_to ordering" ], ], [ [ "colocated-utilization-primitive-1", "Colocated Utilization - Primitive" ], [ "colocated-utilization-primitive-2", "Colocated Utilization - Choose the most capable node" ], [ "colocated-utilization-group", "Colocated Utilization - Group" ], [ "colocated-utilization-clone", "Colocated Utilization - Clone" ], [ "utilization-check-allowed-nodes", "Only check the capacities of the nodes that can run the resource" ], ], [ [ "reprobe-target_rc", "Ensure correct target_rc for reprobe of inactive resources" ], [ "node-maintenance-1", "cl#5128 - Node maintenance" ], [ "node-maintenance-2", "cl#5128 - Node maintenance (coming out of maintenance mode)" ], [ "shutdown-maintenance-node", "Do not fence a maintenance node if it shuts down cleanly" ], [ "rsc-maintenance", "Per-resource maintenance" ], ], [ [ "not-installed-agent", "The resource agent is missing" ], [ "not-installed-tools", "Something the resource agent needs is missing" ], ], [ [ "stopped-monitor-00", "Stopped Monitor - initial start" ], [ "stopped-monitor-01", "Stopped Monitor - failed started" ], [ "stopped-monitor-02", "Stopped Monitor - started multi-up" ], [ "stopped-monitor-03", "Stopped Monitor - stop started" ], [ "stopped-monitor-04", "Stopped Monitor - failed stop" ], [ "stopped-monitor-05", "Stopped Monitor - start unmanaged" ], [ "stopped-monitor-06", "Stopped Monitor - unmanaged multi-up" ], [ "stopped-monitor-07", "Stopped Monitor - start unmanaged multi-up" ], [ "stopped-monitor-08", "Stopped Monitor - migrate" ], [ "stopped-monitor-09", "Stopped Monitor - unmanage started" ], [ "stopped-monitor-10", "Stopped Monitor - unmanaged started multi-up" ], [ "stopped-monitor-11", "Stopped Monitor - stop unmanaged started" ], [ "stopped-monitor-12", "Stopped Monitor - unmanaged started multi-up (target-role=Stopped)" ], [ "stopped-monitor-20", "Stopped Monitor - initial stop" ], [ "stopped-monitor-21", "Stopped Monitor - stopped single-up" ], [ "stopped-monitor-22", "Stopped Monitor - stopped multi-up" ], [ "stopped-monitor-23", "Stopped Monitor - start stopped" ], [ "stopped-monitor-24", "Stopped Monitor - unmanage stopped" ], [ "stopped-monitor-25", "Stopped Monitor - unmanaged stopped multi-up" ], [ "stopped-monitor-26", "Stopped Monitor - start unmanaged stopped" ], [ "stopped-monitor-27", "Stopped Monitor - unmanaged stopped multi-up (target-role=Started)" ], [ "stopped-monitor-30", "Stopped Monitor - new node started" ], [ "stopped-monitor-31", "Stopped Monitor - new node stopped" ], ], [ # This is a combo test to check: # - probe timeout defaults to the minimum-interval monitor's # - duplicate recurring operations are ignored # - if timeout spec is bad, the default timeout is used # - failure is blocked with on-fail=block even if ISO8601 interval is specified # - started/stopped role monitors are started/stopped on right nodes [ "intervals", "Recurring monitor interval handling" ], ], [ [ "ticket-primitive-1", "Ticket - Primitive (loss-policy=stop, initial)" ], [ "ticket-primitive-2", "Ticket - Primitive (loss-policy=stop, granted)" ], [ "ticket-primitive-3", "Ticket - Primitive (loss-policy-stop, revoked)" ], [ "ticket-primitive-4", "Ticket - Primitive (loss-policy=demote, initial)" ], [ "ticket-primitive-5", "Ticket - Primitive (loss-policy=demote, granted)" ], [ "ticket-primitive-6", "Ticket - Primitive (loss-policy=demote, revoked)" ], [ "ticket-primitive-7", "Ticket - Primitive (loss-policy=fence, initial)" ], [ "ticket-primitive-8", "Ticket - Primitive (loss-policy=fence, granted)" ], [ "ticket-primitive-9", "Ticket - Primitive (loss-policy=fence, revoked)" ], [ "ticket-primitive-10", "Ticket - Primitive (loss-policy=freeze, initial)" ], [ "ticket-primitive-11", "Ticket - Primitive (loss-policy=freeze, granted)" ], [ "ticket-primitive-12", "Ticket - Primitive (loss-policy=freeze, revoked)" ], [ "ticket-primitive-13", "Ticket - Primitive (loss-policy=stop, standby, granted)" ], [ "ticket-primitive-14", "Ticket - Primitive (loss-policy=stop, granted, standby)" ], [ "ticket-primitive-15", "Ticket - Primitive (loss-policy=stop, standby, revoked)" ], [ "ticket-primitive-16", "Ticket - Primitive (loss-policy=demote, standby, granted)" ], [ "ticket-primitive-17", "Ticket - Primitive (loss-policy=demote, granted, standby)" ], [ "ticket-primitive-18", "Ticket - Primitive (loss-policy=demote, standby, revoked)" ], [ "ticket-primitive-19", "Ticket - Primitive (loss-policy=fence, standby, granted)" ], [ "ticket-primitive-20", "Ticket - Primitive (loss-policy=fence, granted, standby)" ], [ "ticket-primitive-21", "Ticket - Primitive (loss-policy=fence, standby, revoked)" ], [ "ticket-primitive-22", "Ticket - Primitive (loss-policy=freeze, standby, granted)" ], [ "ticket-primitive-23", "Ticket - Primitive (loss-policy=freeze, granted, standby)" ], [ "ticket-primitive-24", "Ticket - Primitive (loss-policy=freeze, standby, revoked)" ], ], [ [ "ticket-group-1", "Ticket - Group (loss-policy=stop, initial)" ], [ "ticket-group-2", "Ticket - Group (loss-policy=stop, granted)" ], [ "ticket-group-3", "Ticket - Group (loss-policy-stop, revoked)" ], [ "ticket-group-4", "Ticket - Group (loss-policy=demote, initial)" ], [ "ticket-group-5", "Ticket - Group (loss-policy=demote, granted)" ], [ "ticket-group-6", "Ticket - Group (loss-policy=demote, revoked)" ], [ "ticket-group-7", "Ticket - Group (loss-policy=fence, initial)" ], [ "ticket-group-8", "Ticket - Group (loss-policy=fence, granted)" ], [ "ticket-group-9", "Ticket - Group (loss-policy=fence, revoked)" ], [ "ticket-group-10", "Ticket - Group (loss-policy=freeze, initial)" ], [ "ticket-group-11", "Ticket - Group (loss-policy=freeze, granted)" ], [ "ticket-group-12", "Ticket - Group (loss-policy=freeze, revoked)" ], [ "ticket-group-13", "Ticket - Group (loss-policy=stop, standby, granted)" ], [ "ticket-group-14", "Ticket - Group (loss-policy=stop, granted, standby)" ], [ "ticket-group-15", "Ticket - Group (loss-policy=stop, standby, revoked)" ], [ "ticket-group-16", "Ticket - Group (loss-policy=demote, standby, granted)" ], [ "ticket-group-17", "Ticket - Group (loss-policy=demote, granted, standby)" ], [ "ticket-group-18", "Ticket - Group (loss-policy=demote, standby, revoked)" ], [ "ticket-group-19", "Ticket - Group (loss-policy=fence, standby, granted)" ], [ "ticket-group-20", "Ticket - Group (loss-policy=fence, granted, standby)" ], [ "ticket-group-21", "Ticket - Group (loss-policy=fence, standby, revoked)" ], [ "ticket-group-22", "Ticket - Group (loss-policy=freeze, standby, granted)" ], [ "ticket-group-23", "Ticket - Group (loss-policy=freeze, granted, standby)" ], [ "ticket-group-24", "Ticket - Group (loss-policy=freeze, standby, revoked)" ], ], [ [ "ticket-clone-1", "Ticket - Clone (loss-policy=stop, initial)" ], [ "ticket-clone-2", "Ticket - Clone (loss-policy=stop, granted)" ], [ "ticket-clone-3", "Ticket - Clone (loss-policy-stop, revoked)" ], [ "ticket-clone-4", "Ticket - Clone (loss-policy=demote, initial)" ], [ "ticket-clone-5", "Ticket - Clone (loss-policy=demote, granted)" ], [ "ticket-clone-6", "Ticket - Clone (loss-policy=demote, revoked)" ], [ "ticket-clone-7", "Ticket - Clone (loss-policy=fence, initial)" ], [ "ticket-clone-8", "Ticket - Clone (loss-policy=fence, granted)" ], [ "ticket-clone-9", "Ticket - Clone (loss-policy=fence, revoked)" ], [ "ticket-clone-10", "Ticket - Clone (loss-policy=freeze, initial)" ], [ "ticket-clone-11", "Ticket - Clone (loss-policy=freeze, granted)" ], [ "ticket-clone-12", "Ticket - Clone (loss-policy=freeze, revoked)" ], [ "ticket-clone-13", "Ticket - Clone (loss-policy=stop, standby, granted)" ], [ "ticket-clone-14", "Ticket - Clone (loss-policy=stop, granted, standby)" ], [ "ticket-clone-15", "Ticket - Clone (loss-policy=stop, standby, revoked)" ], [ "ticket-clone-16", "Ticket - Clone (loss-policy=demote, standby, granted)" ], [ "ticket-clone-17", "Ticket - Clone (loss-policy=demote, granted, standby)" ], [ "ticket-clone-18", "Ticket - Clone (loss-policy=demote, standby, revoked)" ], [ "ticket-clone-19", "Ticket - Clone (loss-policy=fence, standby, granted)" ], [ "ticket-clone-20", "Ticket - Clone (loss-policy=fence, granted, standby)" ], [ "ticket-clone-21", "Ticket - Clone (loss-policy=fence, standby, revoked)" ], [ "ticket-clone-22", "Ticket - Clone (loss-policy=freeze, standby, granted)" ], [ "ticket-clone-23", "Ticket - Clone (loss-policy=freeze, granted, standby)" ], [ "ticket-clone-24", "Ticket - Clone (loss-policy=freeze, standby, revoked)" ], ], [ [ "ticket-promoted-1", "Ticket - Promoted (loss-policy=stop, initial)" ], [ "ticket-promoted-2", "Ticket - Promoted (loss-policy=stop, granted)" ], [ "ticket-promoted-3", "Ticket - Promoted (loss-policy-stop, revoked)" ], [ "ticket-promoted-4", "Ticket - Promoted (loss-policy=demote, initial)" ], [ "ticket-promoted-5", "Ticket - Promoted (loss-policy=demote, granted)" ], [ "ticket-promoted-6", "Ticket - Promoted (loss-policy=demote, revoked)" ], [ "ticket-promoted-7", "Ticket - Promoted (loss-policy=fence, initial)" ], [ "ticket-promoted-8", "Ticket - Promoted (loss-policy=fence, granted)" ], [ "ticket-promoted-9", "Ticket - Promoted (loss-policy=fence, revoked)" ], [ "ticket-promoted-10", "Ticket - Promoted (loss-policy=freeze, initial)" ], [ "ticket-promoted-11", "Ticket - Promoted (loss-policy=freeze, granted)" ], [ "ticket-promoted-12", "Ticket - Promoted (loss-policy=freeze, revoked)" ], [ "ticket-promoted-13", "Ticket - Promoted (loss-policy=stop, standby, granted)" ], [ "ticket-promoted-14", "Ticket - Promoted (loss-policy=stop, granted, standby)" ], [ "ticket-promoted-15", "Ticket - Promoted (loss-policy=stop, standby, revoked)" ], [ "ticket-promoted-16", "Ticket - Promoted (loss-policy=demote, standby, granted)" ], [ "ticket-promoted-17", "Ticket - Promoted (loss-policy=demote, granted, standby)" ], [ "ticket-promoted-18", "Ticket - Promoted (loss-policy=demote, standby, revoked)" ], [ "ticket-promoted-19", "Ticket - Promoted (loss-policy=fence, standby, granted)" ], [ "ticket-promoted-20", "Ticket - Promoted (loss-policy=fence, granted, standby)" ], [ "ticket-promoted-21", "Ticket - Promoted (loss-policy=fence, standby, revoked)" ], [ "ticket-promoted-22", "Ticket - Promoted (loss-policy=freeze, standby, granted)" ], [ "ticket-promoted-23", "Ticket - Promoted (loss-policy=freeze, granted, standby)" ], [ "ticket-promoted-24", "Ticket - Promoted (loss-policy=freeze, standby, revoked)" ], ], [ [ "ticket-rsc-sets-1", "Ticket - Resource sets (1 ticket, initial)" ], [ "ticket-rsc-sets-2", "Ticket - Resource sets (1 ticket, granted)" ], [ "ticket-rsc-sets-3", "Ticket - Resource sets (1 ticket, revoked)" ], [ "ticket-rsc-sets-4", "Ticket - Resource sets (2 tickets, initial)" ], [ "ticket-rsc-sets-5", "Ticket - Resource sets (2 tickets, granted)" ], [ "ticket-rsc-sets-6", "Ticket - Resource sets (2 tickets, granted)" ], [ "ticket-rsc-sets-7", "Ticket - Resource sets (2 tickets, revoked)" ], [ "ticket-rsc-sets-8", "Ticket - Resource sets (1 ticket, standby, granted)" ], [ "ticket-rsc-sets-9", "Ticket - Resource sets (1 ticket, granted, standby)" ], [ "ticket-rsc-sets-10", "Ticket - Resource sets (1 ticket, standby, revoked)" ], [ "ticket-rsc-sets-11", "Ticket - Resource sets (2 tickets, standby, granted)" ], [ "ticket-rsc-sets-12", "Ticket - Resource sets (2 tickets, standby, granted)" ], [ "ticket-rsc-sets-13", "Ticket - Resource sets (2 tickets, granted, standby)" ], [ "ticket-rsc-sets-14", "Ticket - Resource sets (2 tickets, standby, revoked)" ], [ "cluster-specific-params", "Cluster-specific instance attributes based on rules" ], [ "site-specific-params", "Site-specific instance attributes based on rules" ], ], [ [ "template-1", "Template - 1" ], [ "template-2", "Template - 2" ], [ "template-3", "Template - 3 (merge operations)" ], [ "template-coloc-1", "Template - Colocation 1" ], [ "template-coloc-2", "Template - Colocation 2" ], [ "template-coloc-3", "Template - Colocation 3" ], [ "template-order-1", "Template - Order 1" ], [ "template-order-2", "Template - Order 2" ], [ "template-order-3", "Template - Order 3" ], [ "template-ticket", "Template - Ticket" ], [ "template-rsc-sets-1", "Template - Resource Sets 1" ], [ "template-rsc-sets-2", "Template - Resource Sets 2" ], [ "template-rsc-sets-3", "Template - Resource Sets 3" ], [ "template-rsc-sets-4", "Template - Resource Sets 4" ], [ "template-clone-primitive", "Cloned primitive from template" ], [ "template-clone-group", "Cloned group from template" ], [ "location-sets-templates", "Resource sets and templates - Location" ], [ "tags-coloc-order-1", "Tags - Colocation and Order (Simple)" ], [ "tags-coloc-order-2", "Tags - Colocation and Order (Resource Sets with Templates)" ], [ "tags-location", "Tags - Location" ], [ "tags-ticket", "Tags - Ticket" ], ], [ [ "container-1", "Container - initial" ], [ "container-2", "Container - monitor failed" ], [ "container-3", "Container - stop failed" ], [ "container-4", "Container - reached migration-threshold" ], [ "container-group-1", "Container in group - initial" ], [ "container-group-2", "Container in group - monitor failed" ], [ "container-group-3", "Container in group - stop failed" ], [ "container-group-4", "Container in group - reached migration-threshold" ], [ "container-is-remote-node", "Place resource within container when container is remote-node" ], [ "bug-rh-1097457", "Kill user defined container/contents ordering" ], [ "bug-cl-5247", "Graph loop when recovering m/s resource in a container" ], [ "bundle-order-startup", "Bundle startup ordering" ], [ "bundle-order-partial-start", "Bundle startup ordering when some dependencies are already running" ], [ "bundle-order-partial-start-2", "Bundle startup ordering when some dependencies and the container are already running" ], [ "bundle-order-stop", "Bundle stop ordering" ], [ "bundle-order-partial-stop", "Bundle startup ordering when some dependencies are already stopped" ], [ "bundle-order-stop-on-remote", "Stop nested resource after bringing up the connection" ], [ "bundle-order-startup-clone", "Prevent startup because bundle isn't promoted" ], [ "bundle-order-startup-clone-2", "Bundle startup with clones" ], [ "bundle-order-stop-clone", "Stop bundle because clone is stopping" ], [ "bundle-nested-colocation", "Colocation of nested connection resources" ], [ "bundle-order-fencing", "Order pseudo bundle fencing after parent node fencing if both are happening" ], [ "bundle-probe-order-1", "order 1" ], [ "bundle-probe-order-2", "order 2" ], [ "bundle-probe-order-3", "order 3" ], [ "bundle-probe-remotes", "Ensure remotes get probed too" ], [ "bundle-replicas-change", "Change bundle from 1 replica to multiple" ], [ "bundle-connection-with-container", "Don't move a container due to connection preferences" ], [ "nested-remote-recovery", "Recover bundle's container hosted on remote node" ], ], [ [ "whitebox-fail1", "Fail whitebox container rsc" ], [ "whitebox-fail2", "Fail cluster connection to guest node" ], [ "whitebox-fail3", "Failed containers should not run nested on remote nodes" ], [ "whitebox-start", "Start whitebox container with resources assigned to it" ], [ "whitebox-stop", "Stop whitebox container with resources assigned to it" ], [ "whitebox-move", "Move whitebox container with resources assigned to it" ], [ "whitebox-asymmetric", "Verify connection rsc opts-in based on container resource" ], [ "whitebox-ms-ordering", "Verify promote/demote can not occur before connection is established" ], [ "whitebox-ms-ordering-move", "Stop/Start cycle within a moving container" ], [ "whitebox-orphaned", "Properly shutdown orphaned whitebox container" ], [ "whitebox-orphan-ms", "Properly tear down orphan ms resources on remote-nodes" ], [ "whitebox-unexpectedly-running", "Recover container nodes the cluster did not start" ], [ "whitebox-migrate1", "Migrate both container and connection resource" ], [ "whitebox-imply-stop-on-fence", "imply stop action on container node rsc when host node is fenced" ], [ "whitebox-nested-group", "Verify guest remote-node works nested in a group" ], [ "guest-node-host-dies", "Verify guest node is recovered if host goes away" ], [ "guest-node-cleanup", "Order guest node connection recovery after container probe" ], [ "guest-host-not-fenceable", "Actions on guest node are unrunnable if host is unclean and cannot be fenced" ], ], [ [ "remote-startup-probes", "Baremetal remote-node startup probes" ], [ "remote-startup", "Startup a newly discovered remote-nodes with no status" ], [ "remote-fence-unclean", "Fence unclean baremetal remote-node" ], [ "remote-fence-unclean2", "Fence baremetal remote-node after cluster node fails and connection can not be recovered" ], [ "remote-fence-unclean-3", "Probe failed remote nodes (triggers fencing)" ], [ "remote-move", "Move remote-node connection resource" ], [ "remote-disable", "Disable a baremetal remote-node" ], [ "remote-probe-disable", "Probe then stop a baremetal remote-node" ], [ "remote-orphaned", "Properly shutdown orphaned connection resource" ], [ "remote-orphaned2", "verify we can handle orphaned remote connections with active resources on the remote" ], [ "remote-recover", "Recover connection resource after cluster-node fails" ], [ "remote-stale-node-entry", "Make sure we properly handle leftover remote-node entries in the node section" ], [ "remote-partial-migrate", "Make sure partial migrations are handled before ops on the remote node" ], [ "remote-partial-migrate2", "Make sure partial migration target is prefered for remote connection" ], [ "remote-recover-fail", "Make sure start failure causes fencing if rsc are active on remote" ], [ "remote-start-fail", "Make sure a start failure does not result in fencing if no active resources are on remote" ], [ "remote-unclean2", "Make monitor failure always results in fencing, even if no rsc are active on remote" ], [ "remote-fence-before-reconnect", "Fence before clearing recurring monitor failure" ], [ "remote-recovery", "Recover remote connections before attempting demotion" ], [ "remote-recover-connection", "Optimistically recovery of only the connection" ], [ "remote-recover-all", "Fencing when the connection has no home" ], [ "remote-recover-no-resources", "Fencing when the connection has no home and no active resources" ], [ "remote-recover-unknown", "Fencing when the connection has no home and the remote has no operation history" ], [ "remote-reconnect-delay", "Waiting for remote reconnect interval to expire" ], [ "remote-connection-unrecoverable", "Remote connection host must be fenced, with connection unrecoverable" ], [ "remote-connection-shutdown", "Remote connection shutdown" ], [ "cancel-behind-moving-remote", "Route recurring monitor cancellations through original node of a moving remote connection" ], ], [ [ "resource-discovery", "Exercises resource-discovery location constraint option" ], [ "rsc-discovery-per-node", "Disable resource discovery per node" ], [ "shutdown-lock", "Ensure shutdown lock works properly" ], [ "shutdown-lock-expiration", "Ensure shutdown lock expiration works properly" ], ], [ [ "op-defaults", "Test op_defaults conditional expressions" ], [ "op-defaults-2", "Test op_defaults AND'ed conditional expressions" ], [ "op-defaults-3", "Test op_defaults precedence" ], [ "rsc-defaults", "Test rsc_defaults conditional expressions" ], [ "rsc-defaults-2", "Test rsc_defaults conditional expressions without type" ], ], [ [ "stop-all-resources", "Test stop-all-resources=true "], ], [ [ "ocf_degraded-remap-ocf_ok", "Test degraded remapped to OK" ], [ "ocf_degraded_promoted-remap-ocf_ok", "Test degraded promoted remapped to OK"], ], ] TESTS_64BIT = [ [ [ "year-2038", "Check handling of timestamps beyond 2038-01-19 03:14:08 UTC" ], ], ] # Constants substituted in the build process class BuildVars(object): SBINDIR = "@sbindir@" BUILDDIR = "@abs_top_builddir@" CRM_SCHEMA_DIRECTORY = "@CRM_SCHEMA_DIRECTORY@" # These values must be kept in sync with crm_exit_t class CrmExit(object): OK = 0 ERROR = 1 NOT_INSTALLED = 5 NOINPUT = 66 CANTCREAT = 73 def is_executable(path): """ Check whether a file at a given path is executable. """ try: return os.stat(path)[stat.ST_MODE] & stat.S_IXUSR except OSError: return False def diff(file1, file2, **kwargs): """ Call diff on two files """ return subprocess.call([ "diff", "-u", "-N", "--ignore-all-space", "--ignore-blank-lines", file1, file2 ], **kwargs) def sort_file(filename): """ Sort a file alphabetically """ with io.open(filename, "rt") as f: lines = sorted(f) with io.open(filename, "wt") as f: f.writelines(lines) def remove_files(filenames): """ Remove a list of files """ for filename in filenames: try: os.remove(filename) except OSError: pass def normalize(filename): """ Remove text from a file that isn't important for comparison """ if not hasattr(normalize, "patterns"): normalize.patterns = [ re.compile(r'crm_feature_set="[^"]*"'), re.compile(r'batch-limit="[0-9]*"') ] if os.path.isfile(filename): with io.open(filename, "rt") as f: lines = f.readlines() with io.open(filename, "wt") as f: for line in lines: for pattern in normalize.patterns: line = pattern.sub("", line) f.write(line) def cat(filename, dest=sys.stdout): """ Copy a file to a destination file descriptor """ with io.open(filename, "rt") as f: shutil.copyfileobj(f, dest) class CtsScheduler(object): """ Regression tests for Pacemaker's scheduler """ def _parse_args(self, argv): """ Parse command-line arguments """ parser = argparse.ArgumentParser(description=DESC) parser.add_argument('-V', '--verbose', action='count', help='Display any differences from expected output') parser.add_argument('--run', metavar='TEST', help=('Run only single specified test (any further ' 'arguments will be passed to crm_simulate)')) parser.add_argument('--update', action='store_true', help='Update expected results with actual results') parser.add_argument('-b', '--binary', metavar='PATH', help='Specify path to crm_simulate') parser.add_argument('-i', '--io-dir', metavar='PATH', help='Specify path to regression test data directory') parser.add_argument('-o', '--out-dir', metavar='PATH', help='Specify where intermediate and output files should go') parser.add_argument('-v', '--valgrind', action='store_true', help='Run all commands under valgrind') parser.add_argument('--valgrind-dhat', action='store_true', help='Run all commands under valgrind with heap analyzer') parser.add_argument('--valgrind-skip-output', action='store_true', help='If running under valgrind, do not display output') parser.add_argument('--testcmd-options', metavar='OPTIONS', default='', help='Additional options for command under test') # argparse can't handle "everything after --run TEST", so grab that self.single_test_args = [] narg = 0 for arg in argv: narg = narg + 1 if arg == '--run': (argv, self.single_test_args) = (argv[:narg+1], argv[narg+1:]) break self.args = parser.parse_args(argv[1:]) def _error(self, s): print(" * ERROR: %s" % s) def _failed(self, s): print(" * FAILED: %s" % s) def _get_valgrind_cmd(self): """ Return command arguments needed (or not) to run valgrind """ if self.args.valgrind: os.environ['G_SLICE'] = "always-malloc" return [ "valgrind", "-q", "--gen-suppressions=all", "--time-stamp=yes", "--trace-children=no", "--show-reachable=no", "--leak-check=full", "--num-callers=20", "--suppressions=%s/valgrind-pcmk.suppressions" % (self.test_home) ] if self.args.valgrind_dhat: os.environ['G_SLICE'] = "always-malloc" return [ "valgrind", "--tool=exp-dhat", "--time-stamp=yes", "--trace-children=no", "--show-top-n=100", "--num-callers=4" ] return [] def _get_simulator_cmd(self): """ Locate the simulation binary """ if self.args.binary is None: self.args.binary = BuildVars.BUILDDIR + "/tools/crm_simulate" if not is_executable(self.args.binary): self.args.binary = BuildVars.SBINDIR + "/crm_simulate" if not is_executable(self.args.binary): # @TODO it would be more pythonic to raise an exception self._error("Test binary " + self.args.binary + " not found") sys.exit(CrmExit.NOT_INSTALLED) return [ self.args.binary ] + shlex.split(self.args.testcmd_options) def set_schema_env(self): """ Ensure schema directory environment variable is set, if possible """ try: return os.environ['PCMK_schema_directory'] except KeyError: for d in [ os.path.join(BuildVars.BUILDDIR, "xml"), BuildVars.CRM_SCHEMA_DIRECTORY ]: if os.path.isdir(d): os.environ['PCMK_schema_directory'] = d return d return None def __init__(self, argv=sys.argv): # Ensure all command output is in portable locale for comparison os.environ['LC_ALL'] = "C" self._parse_args(argv) # Where this executable lives self.test_home = os.path.dirname(os.path.realpath(argv[0])) # Where test data resides if self.args.io_dir is None: self.args.io_dir = os.path.join(self.test_home, "scheduler") self.xml_input_dir = os.path.join(self.args.io_dir, "xml") self.expected_dir = os.path.join(self.args.io_dir, "exp") self.dot_expected_dir = os.path.join(self.args.io_dir, "dot") self.scores_dir = os.path.join(self.args.io_dir, "scores") self.summary_dir = os.path.join(self.args.io_dir, "summary") self.stderr_expected_dir = os.path.join(self.args.io_dir, "stderr") # Create a temporary directory to store diff file self.failed_dir = tempfile.mkdtemp(prefix='cts-scheduler_') # Where to store generated files if self.args.out_dir is None: self.args.out_dir = self.args.io_dir self.failed_filename = os.path.join(self.failed_dir, "test-output.diff") else: self.failed_filename = os.path.join(self.args.out_dir, "test-output.diff") os.environ['CIB_shadow_dir'] = self.args.out_dir self.failed_file = None self.outfile_out_dir = os.path.join(self.args.out_dir, "out") self.dot_out_dir = os.path.join(self.args.out_dir, "dot") self.scores_out_dir = os.path.join(self.args.out_dir, "scores") self.summary_out_dir = os.path.join(self.args.out_dir, "summary") self.stderr_out_dir = os.path.join(self.args.out_dir, "stderr") self.valgrind_out_dir = os.path.join(self.args.out_dir, "valgrind") # Single test mode (if requested) try: # User can give test base name or file name of a test input self.args.run = os.path.splitext(os.path.basename(self.args.run))[0] except (AttributeError, TypeError): pass # --run was not specified self.set_schema_env() # Arguments needed (or not) to run commands self.valgrind_args = self._get_valgrind_cmd() self.simulate_args = self._get_simulator_cmd() # Test counters self.num_failed = 0 self.num_tests = 0 # Ensure that the main output directory exists # We don't want to create it with os.makedirs below if not os.path.isdir(self.args.out_dir): self._error("Output directory missing; can't create output files") sys.exit(CrmExit.CANTCREAT) # Create output subdirectories if they don't exist try: os.makedirs(self.outfile_out_dir, 0o755, True) os.makedirs(self.dot_out_dir, 0o755, True) os.makedirs(self.scores_out_dir, 0o755, True) os.makedirs(self.summary_out_dir, 0o755, True) os.makedirs(self.stderr_out_dir, 0o755, True) if self.valgrind_args: os.makedirs(self.valgrind_out_dir, 0o755, True) except OSError as ex: self._error("Unable to create output subdirectory: %s" % ex) remove_files([ self.outfile_out_dir, self.dot_out_dir, self.scores_out_dir, self.summary_out_dir, self.stderr_out_dir, ]) sys.exit(CrmExit.CANTCREAT) def _compare_files(self, filename1, filename2): """ Add any file differences to failed results """ if diff(filename1, filename2, stdout=subprocess.DEVNULL) != 0: diff(filename1, filename2, stdout=self.failed_file, stderr=subprocess.DEVNULL) self.failed_file.write("\n") return True return False def run_one(self, test_name, test_desc, test_args=[]): """ Run one scheduler test """ print(" Test %-25s %s" % ((test_name + ":"), test_desc)) did_fail = False self.num_tests = self.num_tests + 1 # Test inputs input_filename = os.path.join( self.xml_input_dir, "%s.xml" % test_name) expected_filename = os.path.join( self.expected_dir, "%s.exp" % test_name) dot_expected_filename = os.path.join( self.dot_expected_dir, "%s.dot" % test_name) scores_filename = os.path.join( self.scores_dir, "%s.scores" % test_name) summary_filename = os.path.join( self.summary_dir, "%s.summary" % test_name) stderr_expected_filename = os.path.join( self.stderr_expected_dir, "%s.stderr" % test_name) # (Intermediate) test outputs output_filename = os.path.join( self.outfile_out_dir, "%s.out" % test_name) dot_output_filename = os.path.join( self.dot_out_dir, "%s.dot.pe" % test_name) score_output_filename = os.path.join( self.scores_out_dir, "%s.scores.pe" % test_name) summary_output_filename = os.path.join( self.summary_out_dir, "%s.summary.pe" % test_name) stderr_output_filename = os.path.join( self.stderr_out_dir, "%s.stderr.pe" % test_name) valgrind_output_filename = os.path.join( self.valgrind_out_dir, "%s.valgrind" % test_name) # Common arguments for running test test_cmd = [] if self.valgrind_args: test_cmd = self.valgrind_args + [ "--log-file=%s" % valgrind_output_filename ] test_cmd = test_cmd + self.simulate_args # @TODO It would be more pythonic to raise exceptions for errors, # then perhaps it would be nice to make a single-test class # Ensure necessary test inputs exist if not os.path.isfile(input_filename): self._error("No input") self.num_failed = self.num_failed + 1 return CrmExit.NOINPUT if not self.args.update and not os.path.isfile(expected_filename): self._error("no stored output") return CrmExit.NOINPUT # Run simulation to generate summary output if self.args.run: # Single test mode test_cmd_full = test_cmd + [ '-x', input_filename, '-S' ] + test_args print(" ".join(test_cmd_full)) else: # @TODO Why isn't test_args added here? test_cmd_full = test_cmd + [ '-x', input_filename, '-S' ] with io.open(summary_output_filename, "wt") as f: simulation = subprocess.Popen(test_cmd_full, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=os.environ) # This makes diff happy regardless of --enable-compat-2.0. # Use sed -E to make Linux and BSD special characters more compatible. sed = subprocess.Popen(["sed", "-E", "-e", "s/ocf::/ocf:/g", "-e", r"s/Masters:/Promoted:/", "-e", r"s/Slaves:/Unpromoted:/", "-e", r"s/ Master( |\[|$)/ Promoted\1/", "-e", r"s/ Slave / Unpromoted /", ], stdin=simulation.stdout, stdout=f, stderr=subprocess.STDOUT) simulation.stdout.close() sed.communicate() if self.args.run: cat(summary_output_filename) # Re-run simulation to generate dot, graph, and scores test_cmd_full = test_cmd + [ '-x', input_filename, '-D', dot_output_filename, '-G', output_filename, '-sSQ' ] + test_args with io.open(stderr_output_filename, "wt") as f_stderr, \ io.open(score_output_filename, "wt") as f_score: rc = subprocess.call(test_cmd_full, stdout=f_score, stderr=f_stderr, env=os.environ) # Check for test command failure if rc != CrmExit.OK: self._failed("Test returned: %d" % rc) did_fail = True print(" ".join(test_cmd_full)) # Check for valgrind errors if self.valgrind_args and not self.args.valgrind_skip_output: if os.stat(valgrind_output_filename).st_size > 0: self._failed("Valgrind reported errors") did_fail = True cat(valgrind_output_filename) remove_files([ valgrind_output_filename ]) # Check for core dump if os.path.isfile("core"): self._failed("Core-file detected: core." + test_name) did_fail = True os.rename("core", "%s/core.%s" % (self.test_home, test_name)) # Check any stderr output if os.path.isfile(stderr_expected_filename): if self._compare_files(stderr_expected_filename, stderr_output_filename): self._failed("stderr changed") did_fail = True elif os.stat(stderr_output_filename).st_size > 0: self._failed("Output was written to stderr") did_fail = True cat(stderr_output_filename) remove_files([ stderr_output_filename ]) # Check whether output graph exists, and normalize it if (not os.path.isfile(output_filename) or os.stat(output_filename).st_size == 0): self._error("No graph produced") did_fail = True self.num_failed = self.num_failed + 1 remove_files([ output_filename ]) return CrmExit.ERROR normalize(output_filename) # Check whether dot output exists, and sort it if (not os.path.isfile(dot_output_filename) or os.stat(dot_output_filename).st_size == 0): self._error("No dot-file summary produced") did_fail = True self.num_failed = self.num_failed + 1 remove_files([ dot_output_filename, output_filename ]) return CrmExit.ERROR with io.open(dot_output_filename, "rt") as f: first_line = f.readline() # "digraph" line with opening brace lines = f.readlines() last_line = lines[-1] # closing brace del lines[-1] lines = sorted(set(lines)) # unique sort with io.open(dot_output_filename, "wt") as f: f.write(first_line) f.writelines(lines) f.write(last_line) # Check whether score output exists, and sort it if (not os.path.isfile(score_output_filename) or os.stat(score_output_filename).st_size == 0): self._error("No allocation scores produced") did_fail = True self.num_failed = self.num_failed + 1 remove_files([ score_output_filename, output_filename ]) return CrmExit.ERROR else: sort_file(score_output_filename) if self.args.update: shutil.copyfile(output_filename, expected_filename) shutil.copyfile(dot_output_filename, dot_expected_filename) shutil.copyfile(score_output_filename, scores_filename) shutil.copyfile(summary_output_filename, summary_filename) print(" Updated expected outputs") if self._compare_files(summary_filename, summary_output_filename): self._failed("summary changed") did_fail = True if self._compare_files(dot_expected_filename, dot_output_filename): self._failed("dot-file summary changed") did_fail = True else: remove_files([ dot_output_filename ]) if self._compare_files(expected_filename, output_filename): self._failed("xml-file changed") did_fail = True if self._compare_files(scores_filename, score_output_filename): self._failed("scores-file changed") did_fail = True remove_files([ output_filename, dot_output_filename, score_output_filename, summary_output_filename]) if did_fail: self.num_failed = self.num_failed + 1 return CrmExit.ERROR return CrmExit.OK def run_all(self): """ Run all defined tests """ if platform.architecture()[0] == "64bit": TESTS.extend(TESTS_64BIT) for group in TESTS: for test in group: try: args = test[2] except IndexError: args = [] self.run_one(test[0], test[1], args) print() def _print_summary(self): """ Print a summary of parameters for this test run """ print("Test home is:\t" + self.test_home) print("Test binary is:\t" + self.args.binary) if 'PCMK_schema_directory' in os.environ: print("Schema home is:\t" + os.environ['PCMK_schema_directory']) if self.valgrind_args != []: print("Activating memory testing with valgrind") print() def _test_results(self): if self.num_failed == 0: shutil.rmtree(self.failed_dir) return CrmExit.OK if os.path.isfile(self.failed_filename) and os.stat(self.failed_filename).st_size != 0: if self.args.verbose: self._error("Results of %d failed tests (out of %d):" % (self.num_failed, self.num_tests)) cat(self.failed_filename) else: self._error("Results of %d failed tests (out of %d) are in %s" % (self.num_failed, self.num_tests, self.failed_filename)) self._error("Use -V to display them after running the tests") else: self._error("%d (of %d) tests failed (no diff results)" % (self.num_failed, self.num_tests)) if os.path.isfile(self.failed_filename): shutil.rmtree(self.failed_dir) return CrmExit.ERROR def run(self): """ Run test(s) as specified """ self._print_summary() # Zero out the error log self.failed_file = io.open(self.failed_filename, "wt") if self.args.run is None: print("Performing the following tests from " + self.args.io_dir) print() self.run_all() print() self.failed_file.close() rc = self._test_results() else: rc = self.run_one(self.args.run, "Single shot", self.single_test_args) self.failed_file.close() if self.num_failed > 0: print("\nFailures:\nThese have also been written to: " + self.failed_filename + "\n") cat(self.failed_filename) shutil.rmtree(self.failed_dir) return rc if __name__ == "__main__": sys.exit(CtsScheduler().run()) # vim: set filetype=python expandtab tabstop=4 softtabstop=4 shiftwidth=4 textwidth=120: diff --git a/cts/scheduler/dot/leftover-pending-monitor.dot b/cts/scheduler/dot/leftover-pending-monitor.dot new file mode 100644 index 0000000000..601ef31579 --- /dev/null +++ b/cts/scheduler/dot/leftover-pending-monitor.dot @@ -0,0 +1,14 @@ + digraph "g" { +"promotable-1_running_0" [ style=bold color="green" fontcolor="orange"] +"promotable-1_start_0" -> "promotable-1_running_0" [ style = bold] +"promotable-1_start_0" -> "stateful-1:2_start_0 node-1" [ style = dashed] +"promotable-1_start_0" [ style=bold color="green" fontcolor="orange"] +"st-sbd_monitor_0 node-2" [ style=dashed color="red" fontcolor="black"] +"stateful-1:2_monitor_5000 node-1" [ style=dashed color="red" fontcolor="black"] +"stateful-1:2_start_0 node-1" -> "promotable-1_running_0" [ style = dashed] +"stateful-1:2_start_0 node-1" -> "stateful-1:2_monitor_5000 node-1" [ style = dashed] +"stateful-1:2_start_0 node-1" [ style=dashed color="red" fontcolor="black"] +"stateful-1_monitor_0 node-2" -> "promotable-1_start_0" [ style = dashed] +"stateful-1_monitor_0 node-2" -> "stateful-1:2_start_0 node-1" [ style = dashed] +"stateful-1_monitor_0 node-2" [ style=dashed color="red" fontcolor="black"] +} diff --git a/cts/scheduler/exp/leftover-pending-monitor.exp b/cts/scheduler/exp/leftover-pending-monitor.exp new file mode 100644 index 0000000000..52619adeaf --- /dev/null +++ b/cts/scheduler/exp/leftover-pending-monitor.exp @@ -0,0 +1,22 @@ + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/scores/leftover-pending-monitor.scores b/cts/scheduler/scores/leftover-pending-monitor.scores new file mode 100644 index 0000000000..8ae30ed91c --- /dev/null +++ b/cts/scheduler/scores/leftover-pending-monitor.scores @@ -0,0 +1,28 @@ + +pcmk__clone_allocate: promotable-1 allocation score on node-1: 0 +pcmk__clone_allocate: promotable-1 allocation score on node-2: 0 +pcmk__clone_allocate: promotable-1 allocation score on node-3: 0 +pcmk__clone_allocate: stateful-1:0 allocation score on node-1: 0 +pcmk__clone_allocate: stateful-1:0 allocation score on node-2: 0 +pcmk__clone_allocate: stateful-1:0 allocation score on node-3: 0 +pcmk__clone_allocate: stateful-1:1 allocation score on node-1: 0 +pcmk__clone_allocate: stateful-1:1 allocation score on node-2: 0 +pcmk__clone_allocate: stateful-1:1 allocation score on node-3: 11 +pcmk__clone_allocate: stateful-1:2 allocation score on node-1: 0 +pcmk__clone_allocate: stateful-1:2 allocation score on node-2: 0 +pcmk__clone_allocate: stateful-1:2 allocation score on node-3: 10 +pcmk__primitive_assign: st-sbd allocation score on node-1: 1 +pcmk__primitive_assign: st-sbd allocation score on node-2: 0 +pcmk__primitive_assign: st-sbd allocation score on node-3: 0 +pcmk__primitive_assign: stateful-1:0 allocation score on node-1: -INFINITY +pcmk__primitive_assign: stateful-1:0 allocation score on node-2: -INFINITY +pcmk__primitive_assign: stateful-1:0 allocation score on node-3: -INFINITY +pcmk__primitive_assign: stateful-1:1 allocation score on node-1: 0 +pcmk__primitive_assign: stateful-1:1 allocation score on node-2: -INFINITY +pcmk__primitive_assign: stateful-1:1 allocation score on node-3: 11 +pcmk__primitive_assign: stateful-1:2 allocation score on node-1: 0 +pcmk__primitive_assign: stateful-1:2 allocation score on node-2: -INFINITY +pcmk__primitive_assign: stateful-1:2 allocation score on node-3: -INFINITY +stateful-1:0 promotion score on none: 0 +stateful-1:1 promotion score on node-3: INFINITY +stateful-1:2 promotion score on node-1: -1 diff --git a/cts/scheduler/summary/leftover-pending-monitor.summary b/cts/scheduler/summary/leftover-pending-monitor.summary new file mode 100644 index 0000000000..ee79b5baad --- /dev/null +++ b/cts/scheduler/summary/leftover-pending-monitor.summary @@ -0,0 +1,31 @@ +Using the original execution date of: 2022-12-02 17:04:52Z +Current cluster status: + * Node List: + * Node node-2: pending + * Online: [ node-1 node-3 ] + + * Full List of Resources: + * st-sbd (stonith:external/sbd): Started node-1 + * Clone Set: promotable-1 [stateful-1] (promotable): + * stateful-1 (ocf:pacemaker:Stateful): Stopped node-1 (Monitoring) + * Promoted: [ node-3 ] + * Stopped: [ node-1 node-2 ] + +Transition Summary: + * Start stateful-1:2 ( node-1 ) due to unrunnable stateful-1:0 monitor (blocked) + +Executing Cluster Transition: + * Pseudo action: promotable-1_start_0 + * Pseudo action: promotable-1_running_0 +Using the original execution date of: 2022-12-02 17:04:52Z + +Revised Cluster Status: + * Node List: + * Node node-2: pending + * Online: [ node-1 node-3 ] + + * Full List of Resources: + * st-sbd (stonith:external/sbd): Started node-1 + * Clone Set: promotable-1 [stateful-1] (promotable): + * Promoted: [ node-3 ] + * Stopped: [ node-1 node-2 ] diff --git a/cts/scheduler/xml/leftover-pending-monitor.xml b/cts/scheduler/xml/leftover-pending-monitor.xml new file mode 100644 index 0000000000..06003c4b15 --- /dev/null +++ b/cts/scheduler/xml/leftover-pending-monitor.xml @@ -0,0 +1,82 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/lib/pacemaker/pcmk_sched_actions.c b/lib/pacemaker/pcmk_sched_actions.c index 8c61264c28..acb920f592 100644 --- a/lib/pacemaker/pcmk_sched_actions.c +++ b/lib/pacemaker/pcmk_sched_actions.c @@ -1,1924 +1,1928 @@ /* * Copyright 2004-2022 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include "libpacemaker_private.h" /*! * \internal * \brief Get the action flags relevant to ordering constraints * * \param[in] action Action to check * \param[in] node Node that *other* action in the ordering is on * (used only for clone resource actions) * * \return Action flags that should be used for orderings */ static enum pe_action_flags action_flags_for_ordering(pe_action_t *action, pe_node_t *node) { bool runnable = false; enum pe_action_flags flags; // For non-resource actions, return the action flags if (action->rsc == NULL) { return action->flags; } /* For non-clone resources, or a clone action not assigned to a node, * return the flags as determined by the resource method without a node * specified. */ flags = action->rsc->cmds->action_flags(action, NULL); if ((node == NULL) || !pe_rsc_is_clone(action->rsc)) { return flags; } /* Otherwise (i.e., for clone resource actions on a specific node), first * remember whether the non-node-specific action is runnable. */ runnable = pcmk_is_set(flags, pe_action_runnable); // Then recheck the resource method with the node flags = action->rsc->cmds->action_flags(action, node); /* For clones in ordering constraints, the node-specific "runnable" doesn't * matter, just the non-node-specific setting (i.e., is the action runnable * anywhere). * * This applies only to runnable, and only for ordering constraints. This * function shouldn't be used for other types of constraints without * changes. Not very satisfying, but it's logical and appears to work well. */ if (runnable && !pcmk_is_set(flags, pe_action_runnable)) { pe__set_raw_action_flags(flags, action->rsc->id, pe_action_runnable); } return flags; } /*! * \internal * \brief Get action UUID that should be used with a resource ordering * * When an action is ordered relative to an action for a collective resource * (clone, group, or bundle), it actually needs to be ordered after all * instances of the collective have completed the relevant action (for example, * given "start CLONE then start RSC", RSC must wait until all instances of * CLONE have started). Given the UUID and resource of the first action in an * ordering, this returns the UUID of the action that should actually be used * for ordering (for example, "CLONE_started_0" instead of "CLONE_start_0"). * * \param[in] first_uuid UUID of first action in ordering * \param[in] first_rsc Resource of first action in ordering * * \return Newly allocated copy of UUID to use with ordering * \note It is the caller's responsibility to free the return value. */ static char * action_uuid_for_ordering(const char *first_uuid, pe_resource_t *first_rsc) { guint interval_ms = 0; char *uuid = NULL; char *rid = NULL; char *first_task_str = NULL; enum action_tasks first_task = no_action; enum action_tasks remapped_task = no_action; // Only non-notify actions for collective resources need remapping if ((strstr(first_uuid, "notify") != NULL) || (first_rsc->variant < pe_group)) { goto done; } // Only non-recurring actions need remapping CRM_ASSERT(parse_op_key(first_uuid, &rid, &first_task_str, &interval_ms)); if (interval_ms > 0) { goto done; } first_task = text2task(first_task_str); switch (first_task) { case stop_rsc: case start_rsc: case action_notify: case action_promote: case action_demote: remapped_task = first_task + 1; break; case stopped_rsc: case started_rsc: case action_notified: case action_promoted: case action_demoted: remapped_task = first_task; break; case monitor_rsc: case shutdown_crm: case stonith_node: break; default: crm_err("Unknown action '%s' in ordering", first_task_str); break; } if (remapped_task != no_action) { /* If a (clone) resource has notifications enabled, we want to order * relative to when all notifications have been sent for the remapped * task. Only outermost resources or those in bundles have * notifications. */ if (pcmk_is_set(first_rsc->flags, pe_rsc_notify) && ((first_rsc->parent == NULL) || (pe_rsc_is_clone(first_rsc) && (first_rsc->parent->variant == pe_container)))) { uuid = pcmk__notify_key(rid, "confirmed-post", task2text(remapped_task)); } else { uuid = pcmk__op_key(rid, task2text(remapped_task), 0); } pe_rsc_trace(first_rsc, "Remapped action UUID %s to %s for ordering purposes", first_uuid, uuid); } done: if (uuid == NULL) { uuid = strdup(first_uuid); CRM_ASSERT(uuid != NULL); } free(first_task_str); free(rid); return uuid; } /*! * \internal * \brief Get actual action that should be used with an ordering * * When an action is ordered relative to an action for a collective resource * (clone, group, or bundle), it actually needs to be ordered after all * instances of the collective have completed the relevant action (for example, * given "start CLONE then start RSC", RSC must wait until all instances of * CLONE have started). Given the first action in an ordering, this returns the * the action that should actually be used for ordering (for example, the * started action instead of the start action). * * \param[in] action First action in an ordering * * \return Actual action that should be used for the ordering */ static pe_action_t * action_for_ordering(pe_action_t *action) { pe_action_t *result = action; pe_resource_t *rsc = action->rsc; if ((rsc != NULL) && (rsc->variant >= pe_group) && (action->uuid != NULL)) { char *uuid = action_uuid_for_ordering(action->uuid, rsc); result = find_first_action(rsc->actions, uuid, NULL, NULL); if (result == NULL) { crm_warn("Not remapping %s to %s because %s does not have " "remapped action", action->uuid, uuid, rsc->id); result = action; } free(uuid); } return result; } /*! * \internal * \brief Update flags for ordering's actions appropriately for ordering's flags * * \param[in] first First action in an ordering * \param[in] then Then action in an ordering * \param[in] first_flags Action flags for \p first for ordering purposes * \param[in] then_flags Action flags for \p then for ordering purposes * \param[in] order Action wrapper for \p first in ordering * \param[in] data_set Cluster working set * * \return Group of enum pcmk__updated flags */ static uint32_t update_action_for_ordering_flags(pe_action_t *first, pe_action_t *then, enum pe_action_flags first_flags, enum pe_action_flags then_flags, pe_action_wrapper_t *order, pe_working_set_t *data_set) { uint32_t changed = pcmk__updated_none; /* The node will only be used for clones. If interleaved, node will be NULL, * otherwise the ordering scope will be limited to the node. Normally, the * whole 'then' clone should restart if 'first' is restarted, so then->node * is needed. */ pe_node_t *node = then->node; if (pcmk_is_set(order->type, pe_order_implies_then_on_node)) { /* For unfencing, only instances of 'then' on the same node as 'first' * (the unfencing operation) should restart, so reset node to * first->node, at which point this case is handled like a normal * pe_order_implies_then. */ pe__clear_order_flags(order->type, pe_order_implies_then_on_node); pe__set_order_flags(order->type, pe_order_implies_then); node = first->node; pe_rsc_trace(then->rsc, "%s then %s: mapped pe_order_implies_then_on_node to " "pe_order_implies_then on %s", first->uuid, then->uuid, pe__node_name(node)); } if (pcmk_is_set(order->type, pe_order_implies_then)) { if (then->rsc != NULL) { changed |= then->rsc->cmds->update_ordered_actions(first, then, node, first_flags & pe_action_optional, pe_action_optional, pe_order_implies_then, data_set); } else if (!pcmk_is_set(first_flags, pe_action_optional) && pcmk_is_set(then->flags, pe_action_optional)) { pe__clear_action_flags(then, pe_action_optional); pcmk__set_updated_flags(changed, first, pcmk__updated_then); } pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_implies_then", first->uuid, then->uuid, (changed? "changed" : "unchanged")); } if (pcmk_is_set(order->type, pe_order_restart) && (then->rsc != NULL)) { enum pe_action_flags restart = pe_action_optional|pe_action_runnable; changed |= then->rsc->cmds->update_ordered_actions(first, then, node, first_flags, restart, pe_order_restart, data_set); pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_restart", first->uuid, then->uuid, (changed? "changed" : "unchanged")); } if (pcmk_is_set(order->type, pe_order_implies_first)) { if (first->rsc != NULL) { changed |= first->rsc->cmds->update_ordered_actions(first, then, node, first_flags, pe_action_optional, pe_order_implies_first, data_set); } else if (!pcmk_is_set(first_flags, pe_action_optional) && pcmk_is_set(first->flags, pe_action_runnable)) { pe__clear_action_flags(first, pe_action_runnable); pcmk__set_updated_flags(changed, first, pcmk__updated_first); } pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_implies_first", first->uuid, then->uuid, (changed? "changed" : "unchanged")); } if (pcmk_is_set(order->type, pe_order_promoted_implies_first)) { if (then->rsc != NULL) { changed |= then->rsc->cmds->update_ordered_actions(first, then, node, first_flags & pe_action_optional, pe_action_optional, pe_order_promoted_implies_first, data_set); } pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_promoted_implies_first", first->uuid, then->uuid, (changed? "changed" : "unchanged")); } if (pcmk_is_set(order->type, pe_order_one_or_more)) { if (then->rsc != NULL) { changed |= then->rsc->cmds->update_ordered_actions(first, then, node, first_flags, pe_action_runnable, pe_order_one_or_more, data_set); } else if (pcmk_is_set(first_flags, pe_action_runnable)) { // We have another runnable instance of "first" then->runnable_before++; /* Mark "then" as runnable if it requires a certain number of * "before" instances to be runnable, and they now are. */ if ((then->runnable_before >= then->required_runnable_before) && !pcmk_is_set(then->flags, pe_action_runnable)) { pe__set_action_flags(then, pe_action_runnable); pcmk__set_updated_flags(changed, first, pcmk__updated_then); } } pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_one_or_more", first->uuid, then->uuid, (changed? "changed" : "unchanged")); } if (pcmk_is_set(order->type, pe_order_probe) && (then->rsc != NULL)) { if (!pcmk_is_set(first_flags, pe_action_runnable) && (first->rsc->running_on != NULL)) { pe_rsc_trace(then->rsc, "%s then %s: ignoring because first is stopping", first->uuid, then->uuid); order->type = pe_order_none; } else { changed |= then->rsc->cmds->update_ordered_actions(first, then, node, first_flags, pe_action_runnable, pe_order_runnable_left, data_set); } pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_probe", first->uuid, then->uuid, (changed? "changed" : "unchanged")); } if (pcmk_is_set(order->type, pe_order_runnable_left)) { if (then->rsc != NULL) { changed |= then->rsc->cmds->update_ordered_actions(first, then, node, first_flags, pe_action_runnable, pe_order_runnable_left, data_set); } else if (!pcmk_is_set(first_flags, pe_action_runnable) && pcmk_is_set(then->flags, pe_action_runnable)) { pe__clear_action_flags(then, pe_action_runnable); pcmk__set_updated_flags(changed, first, pcmk__updated_then); } pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_runnable_left", first->uuid, then->uuid, (changed? "changed" : "unchanged")); } if (pcmk_is_set(order->type, pe_order_implies_first_migratable)) { if (then->rsc != NULL) { changed |= then->rsc->cmds->update_ordered_actions(first, then, node, first_flags, pe_action_optional, pe_order_implies_first_migratable, data_set); } pe_rsc_trace(then->rsc, "%s then %s: %s after " "pe_order_implies_first_migratable", first->uuid, then->uuid, (changed? "changed" : "unchanged")); } if (pcmk_is_set(order->type, pe_order_pseudo_left)) { if (then->rsc != NULL) { changed |= then->rsc->cmds->update_ordered_actions(first, then, node, first_flags, pe_action_optional, pe_order_pseudo_left, data_set); } pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_pseudo_left", first->uuid, then->uuid, (changed? "changed" : "unchanged")); } if (pcmk_is_set(order->type, pe_order_optional)) { if (then->rsc != NULL) { changed |= then->rsc->cmds->update_ordered_actions(first, then, node, first_flags, pe_action_runnable, pe_order_optional, data_set); } pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_optional", first->uuid, then->uuid, (changed? "changed" : "unchanged")); } if (pcmk_is_set(order->type, pe_order_asymmetrical)) { if (then->rsc != NULL) { changed |= then->rsc->cmds->update_ordered_actions(first, then, node, first_flags, pe_action_runnable, pe_order_asymmetrical, data_set); } pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_asymmetrical", first->uuid, then->uuid, (changed? "changed" : "unchanged")); } if (pcmk_is_set(first->flags, pe_action_runnable) && pcmk_is_set(order->type, pe_order_implies_then_printed) && !pcmk_is_set(first_flags, pe_action_optional)) { pe_rsc_trace(then->rsc, "%s will be in graph because %s is required", then->uuid, first->uuid); pe__set_action_flags(then, pe_action_print_always); // Don't bother marking 'then' as changed just for this } if (pcmk_is_set(order->type, pe_order_implies_first_printed) && !pcmk_is_set(then_flags, pe_action_optional)) { pe_rsc_trace(then->rsc, "%s will be in graph because %s is required", first->uuid, then->uuid); pe__set_action_flags(first, pe_action_print_always); // Don't bother marking 'first' as changed just for this } if (pcmk_any_flags_set(order->type, pe_order_implies_then |pe_order_implies_first |pe_order_restart) && (first->rsc != NULL) && !pcmk_is_set(first->rsc->flags, pe_rsc_managed) && pcmk_is_set(first->rsc->flags, pe_rsc_block) && !pcmk_is_set(first->flags, pe_action_runnable) && pcmk__str_eq(first->task, RSC_STOP, pcmk__str_casei)) { if (pcmk_is_set(then->flags, pe_action_runnable)) { pe__clear_action_flags(then, pe_action_runnable); pcmk__set_updated_flags(changed, first, pcmk__updated_then); } pe_rsc_trace(then->rsc, "%s then %s: %s after checking whether first " "is blocked, unmanaged, unrunnable stop", first->uuid, then->uuid, (changed? "changed" : "unchanged")); } return changed; } // Convenience macros for logging action properties #define action_type_str(flags) \ (pcmk_is_set((flags), pe_action_pseudo)? "pseudo-action" : "action") #define action_optional_str(flags) \ (pcmk_is_set((flags), pe_action_optional)? "optional" : "required") #define action_runnable_str(flags) \ (pcmk_is_set((flags), pe_action_runnable)? "runnable" : "unrunnable") #define action_node_str(a) \ (((a)->node == NULL)? "no node" : (a)->node->details->uname) /*! * \internal * \brief Update an action's flags for all orderings where it is "then" * * \param[in] then Action to update * \param[in] data_set Cluster working set */ void pcmk__update_action_for_orderings(pe_action_t *then, pe_working_set_t *data_set) { GList *lpc = NULL; uint32_t changed = pcmk__updated_none; int last_flags = then->flags; pe_rsc_trace(then->rsc, "Updating %s %s (%s %s) on %s", action_type_str(then->flags), then->uuid, action_optional_str(then->flags), action_runnable_str(then->flags), action_node_str(then)); if (pcmk_is_set(then->flags, pe_action_requires_any)) { /* Initialize current known "runnable before" actions. As * update_action_for_ordering_flags() is called for each of then's * before actions, this number will increment as runnable 'first' * actions are encountered. */ then->runnable_before = 0; if (then->required_runnable_before == 0) { /* @COMPAT This ordering constraint uses the deprecated * "require-all=false" attribute. Treat it like "clone-min=1". */ then->required_runnable_before = 1; } /* The pe_order_one_or_more clause of update_action_for_ordering_flags() * (called below) will reset runnable if appropriate. */ pe__clear_action_flags(then, pe_action_runnable); } for (lpc = then->actions_before; lpc != NULL; lpc = lpc->next) { pe_action_wrapper_t *other = (pe_action_wrapper_t *) lpc->data; pe_action_t *first = other->action; pe_node_t *then_node = then->node; pe_node_t *first_node = first->node; if ((first->rsc != NULL) && (first->rsc->variant == pe_group) && pcmk__str_eq(first->task, RSC_START, pcmk__str_casei)) { first_node = first->rsc->fns->location(first->rsc, NULL, FALSE); if (first_node != NULL) { pe_rsc_trace(first->rsc, "Found %s for 'first' %s", pe__node_name(first_node), first->uuid); } } if ((then->rsc != NULL) && (then->rsc->variant == pe_group) && pcmk__str_eq(then->task, RSC_START, pcmk__str_casei)) { then_node = then->rsc->fns->location(then->rsc, NULL, FALSE); if (then_node != NULL) { pe_rsc_trace(then->rsc, "Found %s for 'then' %s", pe__node_name(then_node), then->uuid); } } // Disable constraint if it only applies when on same node, but isn't if (pcmk_is_set(other->type, pe_order_same_node) && (first_node != NULL) && (then_node != NULL) && (first_node->details != then_node->details)) { pe_rsc_trace(then->rsc, "Disabled ordering %s on %s then %s on %s: not same node", other->action->uuid, pe__node_name(first_node), then->uuid, pe__node_name(then_node)); other->type = pe_order_none; continue; } pcmk__clear_updated_flags(changed, then, pcmk__updated_first); if ((first->rsc != NULL) && pcmk_is_set(other->type, pe_order_then_cancels_first) && !pcmk_is_set(then->flags, pe_action_optional)) { /* 'then' is required, so we must abandon 'first' * (e.g. a required stop cancels any agent reload). */ pe__set_action_flags(other->action, pe_action_optional); if (!strcmp(first->task, CRMD_ACTION_RELOAD_AGENT)) { pe__clear_resource_flags(first->rsc, pe_rsc_reload); } } if ((first->rsc != NULL) && (then->rsc != NULL) && (first->rsc != then->rsc) && !is_parent(then->rsc, first->rsc)) { first = action_for_ordering(first); } if (first != other->action) { pe_rsc_trace(then->rsc, "Ordering %s after %s instead of %s", then->uuid, first->uuid, other->action->uuid); } pe_rsc_trace(then->rsc, "%s (%#.6x) then %s (%#.6x): type=%#.6x node=%s", first->uuid, first->flags, then->uuid, then->flags, other->type, action_node_str(first)); if (first == other->action) { /* 'first' was not remapped (e.g. from 'start' to 'running'), which * could mean it is a non-resource action, a primitive resource * action, or already expanded. */ enum pe_action_flags first_flags, then_flags; first_flags = action_flags_for_ordering(first, then_node); then_flags = action_flags_for_ordering(then, first_node); changed |= update_action_for_ordering_flags(first, then, first_flags, then_flags, other, data_set); /* 'first' was for a complex resource (clone, group, etc), * create a new dependency if necessary */ } else if (order_actions(first, then, other->type)) { /* This was the first time 'first' and 'then' were associated, * start again to get the new actions_before list */ pcmk__set_updated_flags(changed, then, pcmk__updated_then); pe_rsc_trace(then->rsc, "Disabled ordering %s then %s in favor of %s then %s", other->action->uuid, then->uuid, first->uuid, then->uuid); other->type = pe_order_none; } if (pcmk_is_set(changed, pcmk__updated_first)) { crm_trace("Re-processing %s and its 'after' actions " "because it changed", first->uuid); for (GList *lpc2 = first->actions_after; lpc2 != NULL; lpc2 = lpc2->next) { pe_action_wrapper_t *other = (pe_action_wrapper_t *) lpc2->data; pcmk__update_action_for_orderings(other->action, data_set); } pcmk__update_action_for_orderings(first, data_set); } } if (pcmk_is_set(then->flags, pe_action_requires_any)) { if (last_flags == then->flags) { pcmk__clear_updated_flags(changed, then, pcmk__updated_then); } else { pcmk__set_updated_flags(changed, then, pcmk__updated_then); } } if (pcmk_is_set(changed, pcmk__updated_then)) { crm_trace("Re-processing %s and its 'after' actions because it changed", then->uuid); if (pcmk_is_set(last_flags, pe_action_runnable) && !pcmk_is_set(then->flags, pe_action_runnable)) { pcmk__block_colocation_dependents(then, data_set); } pcmk__update_action_for_orderings(then, data_set); for (lpc = then->actions_after; lpc != NULL; lpc = lpc->next) { pe_action_wrapper_t *other = (pe_action_wrapper_t *) lpc->data; pcmk__update_action_for_orderings(other->action, data_set); } } } static inline bool is_primitive_action(pe_action_t *action) { return action && action->rsc && (action->rsc->variant == pe_native); } /*! * \internal * \brief Clear a single action flag and set reason text * * \param[in] action Action whose flag should be cleared * \param[in] flag Action flag that should be cleared * \param[in] reason Action that is the reason why flag is being cleared */ #define clear_action_flag_because(action, flag, reason) do { \ if (pcmk_is_set((action)->flags, (flag))) { \ pe__clear_action_flags(action, flag); \ if ((action)->rsc != (reason)->rsc) { \ char *reason_text = pe__action2reason((reason), (flag)); \ pe_action_set_reason((action), reason_text, \ ((flag) == pe_action_migrate_runnable)); \ free(reason_text); \ } \ } \ } while (0) /*! * \internal * \brief Update actions in an asymmetric ordering * * \param[in] first 'First' action in an asymmetric ordering * \param[in] then 'Then' action in an asymmetric ordering */ static void handle_asymmetric_ordering(pe_action_t *first, pe_action_t *then) { enum rsc_role_e then_rsc_role = RSC_ROLE_UNKNOWN; GList *then_on = NULL; if (then->rsc == NULL) { // Asymmetric orderings only matter if there's a resource involved return; } then_rsc_role = then->rsc->fns->state(then->rsc, TRUE); then_on = then->rsc->running_on; if ((then_rsc_role == RSC_ROLE_STOPPED) && pcmk__str_eq(then->task, RSC_STOP, pcmk__str_none)) { /* Nothing needs to be done for asymmetric ordering if 'then' is * supposed to be stopped after 'first' but is already stopped. */ return; } if ((then_rsc_role >= RSC_ROLE_STARTED) && pcmk_is_set(then->flags, pe_action_optional) && (then->node != NULL) && pcmk__list_of_1(then_on) && (then->node->details == ((pe_node_t *) then_on->data)->details) && pcmk__str_eq(then->task, RSC_START, pcmk__str_none)) { /* Nothing needs to be done for asymmetric ordering if 'then' is * supposed to be started after 'first' but is already started -- * unless the start is mandatory, which indicates the resource is * restarting and the ordering is still needed. */ return; } if (!pcmk_is_set(first->flags, pe_action_runnable)) { // 'First' can't run, so 'then' can't either clear_action_flag_because(then, pe_action_optional, first); clear_action_flag_because(then, pe_action_runnable, first); } } /*! * \internal * \brief Set action bits appropriately when pe_restart_order is used * * \param[in] first 'First' action in an ordering with pe_restart_order * \param[in] then 'Then' action in an ordering with pe_restart_order * \param[in] filter What action flags to care about * * \note pe_restart_order is set for "stop resource before starting it" and * "stop later group member before stopping earlier group member" */ static void handle_restart_ordering(pe_action_t *first, pe_action_t *then, uint32_t filter) { const char *reason = NULL; CRM_ASSERT(is_primitive_action(first)); CRM_ASSERT(is_primitive_action(then)); // We need to update the action in two cases: // ... if 'then' is required if (pcmk_is_set(filter, pe_action_optional) && !pcmk_is_set(then->flags, pe_action_optional)) { reason = "restart"; } /* ... if 'then' is unrunnable action on same resource (if a resource * should restart but can't start, we still want to stop) */ if (pcmk_is_set(filter, pe_action_runnable) && !pcmk_is_set(then->flags, pe_action_runnable) && pcmk_is_set(then->rsc->flags, pe_rsc_managed) && (first->rsc == then->rsc)) { reason = "stop"; } if (reason == NULL) { return; } pe_rsc_trace(first->rsc, "Handling %s -> %s for %s", first->uuid, then->uuid, reason); // Make 'first' required if it is runnable if (pcmk_is_set(first->flags, pe_action_runnable)) { clear_action_flag_because(first, pe_action_optional, then); } // Make 'first' required if 'then' is required if (!pcmk_is_set(then->flags, pe_action_optional)) { clear_action_flag_because(first, pe_action_optional, then); } // Make 'first' unmigratable if 'then' is unmigratable if (!pcmk_is_set(then->flags, pe_action_migrate_runnable)) { clear_action_flag_because(first, pe_action_migrate_runnable, then); } // Make 'then' unrunnable if 'first' is required but unrunnable if (!pcmk_is_set(first->flags, pe_action_optional) && !pcmk_is_set(first->flags, pe_action_runnable)) { clear_action_flag_because(then, pe_action_runnable, first); } } /*! * \internal * \brief Update two actions according to an ordering between them * * Given information about an ordering of two actions, update the actions' * flags (and runnable_before members if appropriate) as appropriate for the * ordering. In some cases, the ordering could be disabled as well. * * \param[in,out] first 'First' action in an ordering * \param[in,out] then 'Then' action in an ordering * \param[in] node If not NULL, limit scope of ordering to this node * (ignored) * \param[in] flags Action flags for \p first for ordering purposes * \param[in] filter Action flags to limit scope of certain updates (may * include pe_action_optional to affect only mandatory * actions, and pe_action_runnable to affect only * runnable actions) * \param[in] type Group of enum pe_ordering flags to apply * \param[in,out] data_set Cluster working set * * \return Group of enum pcmk__updated flags indicating what was updated */ uint32_t pcmk__update_ordered_actions(pe_action_t *first, pe_action_t *then, const pe_node_t *node, uint32_t flags, uint32_t filter, uint32_t type, pe_working_set_t *data_set) { uint32_t changed = pcmk__updated_none; uint32_t then_flags = then->flags; uint32_t first_flags = first->flags; if (pcmk_is_set(type, pe_order_asymmetrical)) { handle_asymmetric_ordering(first, then); } if (pcmk_is_set(type, pe_order_implies_first) && !pcmk_is_set(then_flags, pe_action_optional)) { // Then is required, and implies first should be, too if (pcmk_is_set(filter, pe_action_optional) && !pcmk_is_set(flags, pe_action_optional) && pcmk_is_set(first_flags, pe_action_optional)) { clear_action_flag_because(first, pe_action_optional, then); } if (pcmk_is_set(flags, pe_action_migrate_runnable) && !pcmk_is_set(then->flags, pe_action_migrate_runnable)) { clear_action_flag_because(first, pe_action_migrate_runnable, then); } } if (pcmk_is_set(type, pe_order_promoted_implies_first) && (then->rsc != NULL) && (then->rsc->role == RSC_ROLE_PROMOTED) && pcmk_is_set(filter, pe_action_optional) && !pcmk_is_set(then->flags, pe_action_optional)) { clear_action_flag_because(first, pe_action_optional, then); if (pcmk_is_set(first->flags, pe_action_migrate_runnable) && !pcmk_is_set(then->flags, pe_action_migrate_runnable)) { clear_action_flag_because(first, pe_action_migrate_runnable, then); } } if (pcmk_is_set(type, pe_order_implies_first_migratable) && pcmk_is_set(filter, pe_action_optional)) { if (!pcmk_all_flags_set(then->flags, pe_action_migrate_runnable|pe_action_runnable)) { clear_action_flag_because(first, pe_action_runnable, then); } if (!pcmk_is_set(then->flags, pe_action_optional)) { clear_action_flag_because(first, pe_action_optional, then); } } if (pcmk_is_set(type, pe_order_pseudo_left) && pcmk_is_set(filter, pe_action_optional) && !pcmk_is_set(first->flags, pe_action_runnable)) { clear_action_flag_because(then, pe_action_migrate_runnable, first); pe__clear_action_flags(then, pe_action_pseudo); } if (pcmk_is_set(type, pe_order_runnable_left) && pcmk_is_set(filter, pe_action_runnable) && pcmk_is_set(then->flags, pe_action_runnable) && !pcmk_is_set(flags, pe_action_runnable)) { clear_action_flag_because(then, pe_action_runnable, first); clear_action_flag_because(then, pe_action_migrate_runnable, first); } if (pcmk_is_set(type, pe_order_implies_then) && pcmk_is_set(filter, pe_action_optional) && pcmk_is_set(then->flags, pe_action_optional) && !pcmk_is_set(flags, pe_action_optional) && !pcmk_is_set(first->flags, pe_action_migrate_runnable)) { clear_action_flag_because(then, pe_action_optional, first); } if (pcmk_is_set(type, pe_order_restart)) { handle_restart_ordering(first, then, filter); } if (then_flags != then->flags) { pcmk__set_updated_flags(changed, first, pcmk__updated_then); pe_rsc_trace(then->rsc, "%s on %s: flags are now %#.6x (was %#.6x) " "because of 'first' %s (%#.6x)", then->uuid, pe__node_name(then->node), then->flags, then_flags, first->uuid, first->flags); if ((then->rsc != NULL) && (then->rsc->parent != NULL)) { // Required to handle "X_stop then X_start" for cloned groups pcmk__update_action_for_orderings(then, data_set); } } if (first_flags != first->flags) { pcmk__set_updated_flags(changed, first, pcmk__updated_first); pe_rsc_trace(first->rsc, "%s on %s: flags are now %#.6x (was %#.6x) " "because of 'then' %s (%#.6x)", first->uuid, pe__node_name(first->node), first->flags, first_flags, then->uuid, then->flags); } return changed; } /*! * \internal * \brief Trace-log an action (optionally with its dependent actions) * * \param[in] pre_text If not NULL, prefix the log with this plus ": " * \param[in] action Action to log * \param[in] details If true, recursively log dependent actions */ void pcmk__log_action(const char *pre_text, pe_action_t *action, bool details) { const char *node_uname = NULL; const char *node_uuid = NULL; const char *desc = NULL; CRM_CHECK(action != NULL, return); if (!pcmk_is_set(action->flags, pe_action_pseudo)) { if (action->node != NULL) { node_uname = action->node->details->uname; node_uuid = action->node->details->id; } else { node_uname = ""; } } switch (text2task(action->task)) { case stonith_node: case shutdown_crm: if (pcmk_is_set(action->flags, pe_action_pseudo)) { desc = "Pseudo "; } else if (pcmk_is_set(action->flags, pe_action_optional)) { desc = "Optional "; } else if (!pcmk_is_set(action->flags, pe_action_runnable)) { desc = "!!Non-Startable!! "; } else if (pcmk_is_set(action->flags, pe_action_processed)) { desc = ""; } else { desc = "(Provisional) "; } crm_trace("%s%s%sAction %d: %s%s%s%s%s%s", ((pre_text == NULL)? "" : pre_text), ((pre_text == NULL)? "" : ": "), desc, action->id, action->uuid, (node_uname? "\ton " : ""), (node_uname? node_uname : ""), (node_uuid? "\t\t(" : ""), (node_uuid? node_uuid : ""), (node_uuid? ")" : "")); break; default: if (pcmk_is_set(action->flags, pe_action_optional)) { desc = "Optional "; } else if (pcmk_is_set(action->flags, pe_action_pseudo)) { desc = "Pseudo "; } else if (!pcmk_is_set(action->flags, pe_action_runnable)) { desc = "!!Non-Startable!! "; } else if (pcmk_is_set(action->flags, pe_action_processed)) { desc = ""; } else { desc = "(Provisional) "; } crm_trace("%s%s%sAction %d: %s %s%s%s%s%s%s", ((pre_text == NULL)? "" : pre_text), ((pre_text == NULL)? "" : ": "), desc, action->id, action->uuid, (action->rsc? action->rsc->id : ""), (node_uname? "\ton " : ""), (node_uname? node_uname : ""), (node_uuid? "\t\t(" : ""), (node_uuid? node_uuid : ""), (node_uuid? ")" : "")); break; } if (details) { GList *iter = NULL; crm_trace("\t\t====== Preceding Actions"); for (iter = action->actions_before; iter != NULL; iter = iter->next) { pe_action_wrapper_t *other = (pe_action_wrapper_t *) iter->data; pcmk__log_action("\t\t", other->action, false); } crm_trace("\t\t====== Subsequent Actions"); for (iter = action->actions_after; iter != NULL; iter = iter->next) { pe_action_wrapper_t *other = (pe_action_wrapper_t *) iter->data; pcmk__log_action("\t\t", other->action, false); } crm_trace("\t\t====== End"); } else { crm_trace("\t\t(before=%d, after=%d)", g_list_length(action->actions_before), g_list_length(action->actions_after)); } } /*! * \internal * \brief Create a new shutdown action for a node * * \param[in] node Node being shut down * * \return Newly created shutdown action for \p node */ pe_action_t * pcmk__new_shutdown_action(pe_node_t *node) { char *shutdown_id = NULL; pe_action_t *shutdown_op = NULL; CRM_ASSERT(node != NULL); shutdown_id = crm_strdup_printf("%s-%s", CRM_OP_SHUTDOWN, node->details->uname); shutdown_op = custom_action(NULL, shutdown_id, CRM_OP_SHUTDOWN, node, FALSE, TRUE, node->details->data_set); pcmk__order_stops_before_shutdown(node, shutdown_op); add_hash_param(shutdown_op->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE); return shutdown_op; } /*! * \internal * \brief Calculate and add an operation digest to XML * * Calculate an operation digest, which enables us to later determine when a * restart is needed due to the resource's parameters being changed, and add it * to given XML. * * \param[in] op Operation result from executor * \param[in] update XML to add digest to */ static void add_op_digest_to_xml(lrmd_event_data_t *op, xmlNode *update) { char *digest = NULL; xmlNode *args_xml = NULL; if (op->params == NULL) { return; } args_xml = create_xml_node(NULL, XML_TAG_PARAMS); g_hash_table_foreach(op->params, hash2field, args_xml); pcmk__filter_op_for_digest(args_xml); digest = calculate_operation_digest(args_xml, NULL); crm_xml_add(update, XML_LRM_ATTR_OP_DIGEST, digest); free_xml(args_xml); free(digest); } #define FAKE_TE_ID "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" /*! * \internal * \brief Create XML for resource operation history update * * \param[in,out] parent Parent XML node to add to * \param[in,out] op Operation event data * \param[in] caller_version DC feature set * \param[in] target_rc Expected result of operation * \param[in] node Name of node on which operation was performed * \param[in] origin Arbitrary description of update source * * \return Newly created XML node for history update */ xmlNode * pcmk__create_history_xml(xmlNode *parent, lrmd_event_data_t *op, const char *caller_version, int target_rc, const char *node, const char *origin) { char *key = NULL; char *magic = NULL; char *op_id = NULL; char *op_id_additional = NULL; char *local_user_data = NULL; const char *exit_reason = NULL; xmlNode *xml_op = NULL; const char *task = NULL; CRM_CHECK(op != NULL, return NULL); crm_trace("Creating history XML for %s-interval %s action for %s on %s " "(DC version: %s, origin: %s)", pcmk__readable_interval(op->interval_ms), op->op_type, op->rsc_id, ((node == NULL)? "no node" : node), caller_version, origin); task = op->op_type; /* Record a successful agent reload as a start, and a failed one as a * monitor, to make life easier for the scheduler when determining the * current state. * * @COMPAT We should check "reload" here only if the operation was for a * pre-OCF-1.1 resource agent, but we don't know that here, and we should * only ever get results for actions scheduled by us, so we can reasonably * assume any "reload" is actually a pre-1.1 agent reload. */ if (pcmk__str_any_of(task, CRMD_ACTION_RELOAD, CRMD_ACTION_RELOAD_AGENT, NULL)) { if (op->op_status == PCMK_EXEC_DONE) { task = CRMD_ACTION_START; } else { task = CRMD_ACTION_STATUS; } } key = pcmk__op_key(op->rsc_id, task, op->interval_ms); if (pcmk__str_eq(task, CRMD_ACTION_NOTIFY, pcmk__str_none)) { const char *n_type = crm_meta_value(op->params, "notify_type"); const char *n_task = crm_meta_value(op->params, "notify_operation"); CRM_LOG_ASSERT(n_type != NULL); CRM_LOG_ASSERT(n_task != NULL); op_id = pcmk__notify_key(op->rsc_id, n_type, n_task); if (op->op_status != PCMK_EXEC_PENDING) { /* Ignore notify errors. * * @TODO It might be better to keep the correct result here, and * ignore it in process_graph_event(). */ lrmd__set_result(op, PCMK_OCF_OK, PCMK_EXEC_DONE, NULL); } /* Migration history is preserved separately, which usually matters for * multiple nodes and is important for future cluster transitions. */ } else if (pcmk__str_any_of(op->op_type, CRMD_ACTION_MIGRATE, CRMD_ACTION_MIGRATED, NULL)) { op_id = strdup(key); } else if (did_rsc_op_fail(op, target_rc)) { op_id = pcmk__op_key(op->rsc_id, "last_failure", 0); if (op->interval_ms == 0) { // Ensure 'last' gets updated, in case record-pending is true op_id_additional = pcmk__op_key(op->rsc_id, "last", 0); + + } else { + // Ensure any pending recurring monitor gets updated if it fails + op_id_additional = strdup(key); } exit_reason = op->exit_reason; } else if (op->interval_ms > 0) { op_id = strdup(key); } else { op_id = pcmk__op_key(op->rsc_id, "last", 0); } again: xml_op = pcmk__xe_match(parent, XML_LRM_TAG_RSC_OP, XML_ATTR_ID, op_id); if (xml_op == NULL) { xml_op = create_xml_node(parent, XML_LRM_TAG_RSC_OP); } if (op->user_data == NULL) { crm_debug("Generating fake transition key for: " PCMK__OP_FMT " %d from %s", op->rsc_id, op->op_type, op->interval_ms, op->call_id, origin); local_user_data = pcmk__transition_key(-1, op->call_id, target_rc, FAKE_TE_ID); op->user_data = local_user_data; } if (magic == NULL) { magic = crm_strdup_printf("%d:%d;%s", op->op_status, op->rc, (const char *) op->user_data); } crm_xml_add(xml_op, XML_ATTR_ID, op_id); crm_xml_add(xml_op, XML_LRM_ATTR_TASK_KEY, key); crm_xml_add(xml_op, XML_LRM_ATTR_TASK, task); crm_xml_add(xml_op, XML_ATTR_ORIGIN, origin); crm_xml_add(xml_op, XML_ATTR_CRM_VERSION, caller_version); crm_xml_add(xml_op, XML_ATTR_TRANSITION_KEY, op->user_data); crm_xml_add(xml_op, XML_ATTR_TRANSITION_MAGIC, magic); crm_xml_add(xml_op, XML_LRM_ATTR_EXIT_REASON, exit_reason == NULL ? "" : exit_reason); crm_xml_add(xml_op, XML_LRM_ATTR_TARGET, node); /* For context during triage */ crm_xml_add_int(xml_op, XML_LRM_ATTR_CALLID, op->call_id); crm_xml_add_int(xml_op, XML_LRM_ATTR_RC, op->rc); crm_xml_add_int(xml_op, XML_LRM_ATTR_OPSTATUS, op->op_status); crm_xml_add_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, op->interval_ms); if (compare_version("2.1", caller_version) <= 0) { if (op->t_run || op->t_rcchange || op->exec_time || op->queue_time) { crm_trace("Timing data (" PCMK__OP_FMT "): last=%u change=%u exec=%u queue=%u", op->rsc_id, op->op_type, op->interval_ms, op->t_run, op->t_rcchange, op->exec_time, op->queue_time); if ((op->interval_ms != 0) && (op->t_rcchange != 0)) { // Recurring ops may have changed rc after initial run crm_xml_add_ll(xml_op, XML_RSC_OP_LAST_CHANGE, (long long) op->t_rcchange); } else { crm_xml_add_ll(xml_op, XML_RSC_OP_LAST_CHANGE, (long long) op->t_run); } crm_xml_add_int(xml_op, XML_RSC_OP_T_EXEC, op->exec_time); crm_xml_add_int(xml_op, XML_RSC_OP_T_QUEUE, op->queue_time); } } if (pcmk__str_any_of(op->op_type, CRMD_ACTION_MIGRATE, CRMD_ACTION_MIGRATED, NULL)) { /* * Record migrate_source and migrate_target always for migrate ops. */ const char *name = XML_LRM_ATTR_MIGRATE_SOURCE; crm_xml_add(xml_op, name, crm_meta_value(op->params, name)); name = XML_LRM_ATTR_MIGRATE_TARGET; crm_xml_add(xml_op, name, crm_meta_value(op->params, name)); } add_op_digest_to_xml(op, xml_op); if (op_id_additional) { free(op_id); op_id = op_id_additional; op_id_additional = NULL; goto again; } if (local_user_data) { free(local_user_data); op->user_data = NULL; } free(magic); free(op_id); free(key); return xml_op; } /*! * \internal * \brief Check whether an action shutdown-locks a resource to a node * * If the shutdown-lock cluster property is set, resources will not be recovered * on a different node if cleanly stopped, and may start only on that same node. * This function checks whether that applies to a given action, so that the * transition graph can be marked appropriately. * * \param[in] action Action to check * * \return true if \p action locks its resource to the action's node, * otherwise false */ bool pcmk__action_locks_rsc_to_node(const pe_action_t *action) { // Only resource actions taking place on resource's lock node are locked if ((action == NULL) || (action->rsc == NULL) || (action->rsc->lock_node == NULL) || (action->node == NULL) || (action->node->details != action->rsc->lock_node->details)) { return false; } /* During shutdown, only stops are locked (otherwise, another action such as * a demote would cause the controller to clear the lock) */ if (action->node->details->shutdown && (action->task != NULL) && (strcmp(action->task, RSC_STOP) != 0)) { return false; } return true; } /* lowest to highest */ static gint sort_action_id(gconstpointer a, gconstpointer b) { const pe_action_wrapper_t *action_wrapper2 = (const pe_action_wrapper_t *)a; const pe_action_wrapper_t *action_wrapper1 = (const pe_action_wrapper_t *)b; if (a == NULL) { return 1; } if (b == NULL) { return -1; } if (action_wrapper1->action->id < action_wrapper2->action->id) { return 1; } if (action_wrapper1->action->id > action_wrapper2->action->id) { return -1; } return 0; } /*! * \internal * \brief Remove any duplicate action inputs, merging action flags * * \param[in] action Action whose inputs should be checked */ void pcmk__deduplicate_action_inputs(pe_action_t *action) { GList *item = NULL; GList *next = NULL; pe_action_wrapper_t *last_input = NULL; action->actions_before = g_list_sort(action->actions_before, sort_action_id); for (item = action->actions_before; item != NULL; item = next) { pe_action_wrapper_t *input = (pe_action_wrapper_t *) item->data; next = item->next; if ((last_input != NULL) && (input->action->id == last_input->action->id)) { crm_trace("Input %s (%d) duplicate skipped for action %s (%d)", input->action->uuid, input->action->id, action->uuid, action->id); /* For the purposes of scheduling, the ordering flags no longer * matter, but crm_simulate looks at certain ones when creating a * dot graph. Combining the flags is sufficient for that purpose. */ last_input->type |= input->type; if (input->state == pe_link_dumped) { last_input->state = pe_link_dumped; } free(item->data); action->actions_before = g_list_delete_link(action->actions_before, item); } else { last_input = input; input->state = pe_link_not_dumped; } } } /*! * \internal * \brief Output all scheduled actions * * \param[in] data_set Cluster working set */ void pcmk__output_actions(pe_working_set_t *data_set) { pcmk__output_t *out = data_set->priv; // Output node (non-resource) actions for (GList *iter = data_set->actions; iter != NULL; iter = iter->next) { char *node_name = NULL; char *task = NULL; pe_action_t *action = (pe_action_t *) iter->data; if (action->rsc != NULL) { continue; // Resource actions will be output later } else if (pcmk_is_set(action->flags, pe_action_optional)) { continue; // This action was not scheduled } if (pcmk__str_eq(action->task, CRM_OP_SHUTDOWN, pcmk__str_casei)) { task = strdup("Shutdown"); } else if (pcmk__str_eq(action->task, CRM_OP_FENCE, pcmk__str_casei)) { const char *op = g_hash_table_lookup(action->meta, "stonith_action"); task = crm_strdup_printf("Fence (%s)", op); } else { continue; // Don't display other node action types } if (pe__is_guest_node(action->node)) { node_name = crm_strdup_printf("%s (resource: %s)", pe__node_name(action->node), action->node->details->remote_rsc->container->id); } else if (action->node != NULL) { node_name = crm_strdup_printf("%s", pe__node_name(action->node)); } out->message(out, "node-action", task, node_name, action->reason); free(node_name); free(task); } // Output resource actions for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) { pe_resource_t *rsc = (pe_resource_t *) iter->data; rsc->cmds->output_actions(rsc); } } /*! * \internal * \brief Check whether action from resource history is still in configuration * * \param[in] rsc Resource that action is for * \param[in] task Action's name * \param[in] interval_ms Action's interval (in milliseconds) * * \return true if action is still in resource configuration, otherwise false */ static bool action_in_config(pe_resource_t *rsc, const char *task, guint interval_ms) { char *key = pcmk__op_key(rsc->id, task, interval_ms); bool config = (find_rsc_op_entry(rsc, key) != NULL); free(key); return config; } /*! * \internal * \brief Get action name needed to compare digest for configuration changes * * \param[in] task Action name from history * \param[in] interval_ms Action interval (in milliseconds) * * \return Action name whose digest should be compared */ static const char * task_for_digest(const char *task, guint interval_ms) { /* Certain actions need to be compared against the parameters used to start * the resource. */ if ((interval_ms == 0) && pcmk__str_any_of(task, RSC_STATUS, RSC_MIGRATED, RSC_PROMOTE, NULL)) { task = RSC_START; } return task; } /*! * \internal * \brief Check whether only sanitized parameters to an action changed * * When collecting CIB files for troubleshooting, crm_report will mask * sensitive resource parameters. If simulations were run using that, affected * resources would appear to need a restart, which would complicate * troubleshooting. To avoid that, we save a "secure digest" of non-sensitive * parameters. This function used that digest to check whether only masked * parameters are different. * * \param[in] xml_op Resource history entry with secure digest * \param[in] digest_data Operation digest information being compared * \param[in] data_set Cluster working set * * \return true if only sanitized parameters changed, otherwise false */ static bool only_sanitized_changed(xmlNode *xml_op, const op_digest_cache_t *digest_data, pe_working_set_t *data_set) { const char *digest_secure = NULL; if (!pcmk_is_set(data_set->flags, pe_flag_sanitized)) { // The scheduler is not being run as a simulation return false; } digest_secure = crm_element_value(xml_op, XML_LRM_ATTR_SECURE_DIGEST); return (digest_data->rc != RSC_DIGEST_MATCH) && (digest_secure != NULL) && (digest_data->digest_secure_calc != NULL) && (strcmp(digest_data->digest_secure_calc, digest_secure) == 0); } /*! * \internal * \brief Force a restart due to a configuration change * * \param[in] rsc Resource that action is for * \param[in] task Name of action whose configuration changed * \param[in] interval_ms Action interval (in milliseconds) * \param[in] node Node where resource should be restarted */ static void force_restart(pe_resource_t *rsc, const char *task, guint interval_ms, pe_node_t *node) { char *key = pcmk__op_key(rsc->id, task, interval_ms); pe_action_t *required = custom_action(rsc, key, task, NULL, FALSE, TRUE, rsc->cluster); pe_action_set_reason(required, "resource definition change", true); trigger_unfencing(rsc, node, "Device parameters changed", NULL, rsc->cluster); } /*! * \internal * \brief Schedule a reload of a resource on a node * * \param[in] rsc Resource to reload * \param[in] node Where resource should be reloaded */ static void schedule_reload(pe_resource_t *rsc, pe_node_t *node) { pe_action_t *reload = NULL; // For collective resources, just call recursively for children if (rsc->variant > pe_native) { g_list_foreach(rsc->children, (GFunc) schedule_reload, node); return; } // Skip the reload in certain situations if ((node == NULL) || !pcmk_is_set(rsc->flags, pe_rsc_managed) || pcmk_is_set(rsc->flags, pe_rsc_failed)) { pe_rsc_trace(rsc, "Skip reload of %s:%s%s %s", rsc->id, pcmk_is_set(rsc->flags, pe_rsc_managed)? "" : " unmanaged", pcmk_is_set(rsc->flags, pe_rsc_failed)? " failed" : "", (node == NULL)? "inactive" : node->details->uname); return; } /* If a resource's configuration changed while a start was pending, * force a full restart instead of a reload. */ if (pcmk_is_set(rsc->flags, pe_rsc_start_pending)) { pe_rsc_trace(rsc, "%s: preventing agent reload because start pending", rsc->id); custom_action(rsc, stop_key(rsc), CRMD_ACTION_STOP, node, FALSE, TRUE, rsc->cluster); return; } // Schedule the reload pe__set_resource_flags(rsc, pe_rsc_reload); reload = custom_action(rsc, reload_key(rsc), CRMD_ACTION_RELOAD_AGENT, node, FALSE, TRUE, rsc->cluster); pe_action_set_reason(reload, "resource definition change", FALSE); // Set orderings so that a required stop or demote cancels the reload pcmk__new_ordering(NULL, NULL, reload, rsc, stop_key(rsc), NULL, pe_order_optional|pe_order_then_cancels_first, rsc->cluster); pcmk__new_ordering(NULL, NULL, reload, rsc, demote_key(rsc), NULL, pe_order_optional|pe_order_then_cancels_first, rsc->cluster); } /*! * \internal * \brief Handle any configuration change for an action * * Given an action from resource history, if the resource's configuration * changed since the action was done, schedule any actions needed (restart, * reload, unfencing, rescheduling recurring actions, etc.). * * \param[in] rsc Resource that action is for * \param[in] node Node that action was on * \param[in] xml_op Action XML from resource history * * \return true if action configuration changed, otherwise false */ bool pcmk__check_action_config(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op) { guint interval_ms = 0; const char *task = NULL; const op_digest_cache_t *digest_data = NULL; CRM_CHECK((rsc != NULL) && (node != NULL) && (xml_op != NULL), return false); task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); CRM_CHECK(task != NULL, return false); crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms); // If this is a recurring action, check whether it has been orphaned if (interval_ms > 0) { if (action_in_config(rsc, task, interval_ms)) { pe_rsc_trace(rsc, "%s-interval %s for %s on %s is in configuration", pcmk__readable_interval(interval_ms), task, rsc->id, pe__node_name(node)); } else if (pcmk_is_set(rsc->cluster->flags, pe_flag_stop_action_orphans)) { pcmk__schedule_cancel(rsc, crm_element_value(xml_op, XML_LRM_ATTR_CALLID), task, interval_ms, node, "orphan"); return true; } else { pe_rsc_debug(rsc, "%s-interval %s for %s on %s is orphaned", pcmk__readable_interval(interval_ms), task, rsc->id, pe__node_name(node)); return true; } } crm_trace("Checking %s-interval %s for %s on %s for configuration changes", pcmk__readable_interval(interval_ms), task, rsc->id, pe__node_name(node)); task = task_for_digest(task, interval_ms); digest_data = rsc_action_digest_cmp(rsc, xml_op, node, rsc->cluster); if (only_sanitized_changed(xml_op, digest_data, rsc->cluster)) { if (!pcmk__is_daemon && (rsc->cluster->priv != NULL)) { pcmk__output_t *out = rsc->cluster->priv; out->info(out, "Only 'private' parameters to %s-interval %s for %s " "on %s changed: %s", pcmk__readable_interval(interval_ms), task, rsc->id, pe__node_name(node), crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC)); } return false; } switch (digest_data->rc) { case RSC_DIGEST_RESTART: crm_log_xml_debug(digest_data->params_restart, "params:restart"); force_restart(rsc, task, interval_ms, node); return true; case RSC_DIGEST_ALL: case RSC_DIGEST_UNKNOWN: // Changes that can potentially be handled by an agent reload if (interval_ms > 0) { /* Recurring actions aren't reloaded per se, they are just * re-scheduled so the next run uses the new parameters. * The old instance will be cancelled automatically. */ crm_log_xml_debug(digest_data->params_all, "params:reschedule"); pcmk__reschedule_recurring(rsc, task, interval_ms, node); } else if (crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST) != NULL) { // Agent supports reload, so use it trigger_unfencing(rsc, node, "Device parameters changed (reload)", NULL, rsc->cluster); crm_log_xml_debug(digest_data->params_all, "params:reload"); schedule_reload(rsc, node); } else { pe_rsc_trace(rsc, "Restarting %s because agent doesn't support reload", rsc->id); crm_log_xml_debug(digest_data->params_restart, "params:restart"); force_restart(rsc, task, interval_ms, node); } return true; default: break; } return false; } /*! * \internal * \brief Create a list of resource's action history entries, sorted by call ID * * \param[in] rsc Resource whose history should be checked * \param[in] rsc_entry Resource's status XML * \param[out] start_index Where to store index of start-like action, if any * \param[out] stop_index Where to store index of stop action, if any */ static GList * rsc_history_as_list(pe_resource_t *rsc, xmlNode *rsc_entry, int *start_index, int *stop_index) { GList *ops = NULL; for (xmlNode *rsc_op = first_named_child(rsc_entry, XML_LRM_TAG_RSC_OP); rsc_op != NULL; rsc_op = crm_next_same_xml(rsc_op)) { ops = g_list_prepend(ops, rsc_op); } ops = g_list_sort(ops, sort_op_by_callid); calculate_active_ops(ops, start_index, stop_index); return ops; } /*! * \internal * \brief Process a resource's action history from the CIB status * * Given a resource's action history, if the resource's configuration * changed since the actions were done, schedule any actions needed (restart, * reload, unfencing, rescheduling recurring actions, clean-up, etc.). * (This also cancels recurring actions for maintenance mode, which is not * entirely related but convenient to do here.) * * \param[in] rsc_entry Resource's status XML * \param[in] rsc Resource whose history is being processed * \param[in] node Node whose history is being processed */ static void process_rsc_history(xmlNode *rsc_entry, pe_resource_t *rsc, pe_node_t *node) { int offset = -1; int stop_index = 0; int start_index = 0; GList *sorted_op_list = NULL; if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) { if (pe_rsc_is_anon_clone(uber_parent(rsc))) { pe_rsc_trace(rsc, "Skipping configuration check " "for orphaned clone instance %s", rsc->id); } else { pe_rsc_trace(rsc, "Skipping configuration check and scheduling clean-up " "for orphaned resource %s", rsc->id); pcmk__schedule_cleanup(rsc, node, false); } return; } if (pe_find_node_id(rsc->running_on, node->details->id) == NULL) { if (pcmk__rsc_agent_changed(rsc, node, rsc_entry, false)) { pcmk__schedule_cleanup(rsc, node, false); } pe_rsc_trace(rsc, "Skipping configuration check for %s " "because no longer active on %s", rsc->id, pe__node_name(node)); return; } pe_rsc_trace(rsc, "Checking for configuration changes for %s on %s", rsc->id, pe__node_name(node)); if (pcmk__rsc_agent_changed(rsc, node, rsc_entry, true)) { pcmk__schedule_cleanup(rsc, node, false); } sorted_op_list = rsc_history_as_list(rsc, rsc_entry, &start_index, &stop_index); if (start_index < stop_index) { return; // Resource is stopped } for (GList *iter = sorted_op_list; iter != NULL; iter = iter->next) { xmlNode *rsc_op = (xmlNode *) iter->data; const char *task = NULL; guint interval_ms = 0; if (++offset < start_index) { // Skip actions that happened before a start continue; } task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK); crm_element_value_ms(rsc_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms); if ((interval_ms > 0) && (pcmk_is_set(rsc->flags, pe_rsc_maintenance) || node->details->maintenance)) { // Maintenance mode cancels recurring operations pcmk__schedule_cancel(rsc, crm_element_value(rsc_op, XML_LRM_ATTR_CALLID), task, interval_ms, node, "maintenance mode"); } else if ((interval_ms > 0) || pcmk__strcase_any_of(task, RSC_STATUS, RSC_START, RSC_PROMOTE, RSC_MIGRATED, NULL)) { /* If a resource operation failed, and the operation's definition * has changed, clear any fail count so they can be retried fresh. */ if (pe__bundle_needs_remote_name(rsc, rsc->cluster)) { /* We haven't allocated resources to nodes yet, so if the * REMOTE_CONTAINER_HACK is used, we may calculate the digest * based on the literal "#uname" value rather than the properly * substituted value. That would mistakenly make the action * definition appear to have been changed. Defer the check until * later in this case. */ pe__add_param_check(rsc_op, rsc, node, pe_check_active, rsc->cluster); } else if (pcmk__check_action_config(rsc, node, rsc_op) && (pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL, rsc->cluster) != 0)) { pe__clear_failcount(rsc, node, "action definition changed", rsc->cluster); } } } g_list_free(sorted_op_list); } /*! * \internal * \brief Process a node's action history from the CIB status * * Given a node's resource history, if the resource's configuration changed * since the actions were done, schedule any actions needed (restart, * reload, unfencing, rescheduling recurring actions, clean-up, etc.). * (This also cancels recurring actions for maintenance mode, which is not * entirely related but convenient to do here.) * * \param[in] node Node whose history is being processed * \param[in] lrm_rscs Node's from CIB status XML * \param[in] data_set Cluster working set */ static void process_node_history(pe_node_t *node, xmlNode *lrm_rscs, pe_working_set_t *data_set) { crm_trace("Processing node history for %s", pe__node_name(node)); for (xmlNode *rsc_entry = first_named_child(lrm_rscs, XML_LRM_TAG_RESOURCE); rsc_entry != NULL; rsc_entry = crm_next_same_xml(rsc_entry)) { if (xml_has_children(rsc_entry)) { GList *result = pcmk__rscs_matching_id(ID(rsc_entry), data_set); for (GList *iter = result; iter != NULL; iter = iter->next) { pe_resource_t *rsc = (pe_resource_t *) iter->data; if (rsc->variant == pe_native) { process_rsc_history(rsc_entry, rsc, node); } } g_list_free(result); } } } // XPath to find a node's resource history #define XPATH_NODE_HISTORY "/" XML_TAG_CIB "/" XML_CIB_TAG_STATUS \ "/" XML_CIB_TAG_STATE "[@" XML_ATTR_UNAME "='%s']" \ "/" XML_CIB_TAG_LRM "/" XML_LRM_TAG_RESOURCES /*! * \internal * \brief Process any resource configuration changes in the CIB status * * Go through all nodes' resource history, and if a resource's configuration * changed since its actions were done, schedule any actions needed (restart, * reload, unfencing, rescheduling recurring actions, clean-up, etc.). * (This also cancels recurring actions for maintenance mode, which is not * entirely related but convenient to do here.) * * \param[in] data_set Cluster working set */ void pcmk__handle_rsc_config_changes(pe_working_set_t *data_set) { crm_trace("Check resource and action configuration for changes"); /* Rather than iterate through the status section, iterate through the nodes * and search for the appropriate status subsection for each. This skips * orphaned nodes and lets us eliminate some cases before searching the XML. */ for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) { pe_node_t *node = (pe_node_t *) iter->data; /* Don't bother checking actions for a node that can't run actions ... * unless it's in maintenance mode, in which case we still need to * cancel any existing recurring monitors. */ if (node->details->maintenance || pcmk__node_available(node, false, false)) { char *xpath = NULL; xmlNode *history = NULL; xpath = crm_strdup_printf(XPATH_NODE_HISTORY, node->details->uname); history = get_xpath_object(xpath, data_set->input, LOG_NEVER); free(xpath); process_node_history(node, history, data_set); } } } diff --git a/lib/pacemaker/pcmk_sched_probes.c b/lib/pacemaker/pcmk_sched_probes.c index 197c7ff3cc..d60bcecd58 100644 --- a/lib/pacemaker/pcmk_sched_probes.c +++ b/lib/pacemaker/pcmk_sched_probes.c @@ -1,823 +1,892 @@ /* * Copyright 2004-2022 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include "libpacemaker_private.h" /*! * \internal * \brief Add the expected result to a newly created probe * * \param[in] probe Probe action to add expected result to * \param[in] rsc Resource that probe is for * \param[in] node Node that probe will run on */ static void add_expected_result(pe_action_t *probe, pe_resource_t *rsc, pe_node_t *node) { // Check whether resource is currently active on node pe_node_t *running = pe_find_node_id(rsc->running_on, node->details->id); // The expected result is what we think the resource's current state is if (running == NULL) { pe__add_action_expected_result(probe, CRM_EX_NOT_RUNNING); } else if (rsc->role == RSC_ROLE_PROMOTED) { pe__add_action_expected_result(probe, CRM_EX_PROMOTED); } } /*! * \internal * \brief Create any needed robes on a node for a list of resources * * \param[in] rscs List of resources to create probes for * \param[in] node Node to create probes on * * \return true if any probe was created, otherwise false */ bool pcmk__probe_resource_list(GList *rscs, pe_node_t *node) { bool any_created = false; for (GList *iter = rscs; iter != NULL; iter = iter->next) { pe_resource_t *rsc = (pe_resource_t *) iter->data; if (rsc->cmds->create_probe(rsc, node)) { any_created = true; } } return any_created; } /*! * \internal * \brief Order one resource's start after another's start-up probe * * \param[in] rsc1 Resource that might get start-up probe * \param[in] rsc2 Resource that might be started */ static void probe_then_start(pe_resource_t *rsc1, pe_resource_t *rsc2) { if ((rsc1->allocated_to != NULL) && (g_hash_table_lookup(rsc1->known_on, rsc1->allocated_to->details->id) == NULL)) { pcmk__new_ordering(rsc1, pcmk__op_key(rsc1->id, RSC_STATUS, 0), NULL, rsc2, pcmk__op_key(rsc2->id, RSC_START, 0), NULL, pe_order_optional, rsc1->cluster); } } /*! * \internal * \brief Check whether a guest resource will stop * * \param[in] node Guest node to check * * \return true if guest resource will likely stop, otherwise false */ static bool guest_resource_will_stop(pe_node_t *node) { pe_resource_t *guest_rsc = node->details->remote_rsc->container; /* Ideally, we'd check whether the guest has a required stop, but that * information doesn't exist yet, so approximate it ... */ return node->details->remote_requires_reset || node->details->unclean || pcmk_is_set(guest_rsc->flags, pe_rsc_failed) || (guest_rsc->next_role == RSC_ROLE_STOPPED) // Guest is moving || ((guest_rsc->role > RSC_ROLE_STOPPED) && (guest_rsc->allocated_to != NULL) && (pe_find_node(guest_rsc->running_on, guest_rsc->allocated_to->details->uname) == NULL)); } /*! * \internal * \brief Create a probe action for a resource on a node * * \param[in] rsc Resource to create probe for * \param[in[ node Node to create probe on * * \return Newly created probe action */ static pe_action_t * probe_action(pe_resource_t *rsc, pe_node_t *node) { pe_action_t *probe = NULL; char *key = pcmk__op_key(rsc->id, RSC_STATUS, 0); crm_debug("Scheduling probe of %s %s on %s", role2text(rsc->role), rsc->id, pe__node_name(node)); probe = custom_action(rsc, key, RSC_STATUS, node, FALSE, TRUE, rsc->cluster); pe__clear_action_flags(probe, pe_action_optional); pcmk__order_vs_unfence(rsc, node, probe, pe_order_optional); add_expected_result(probe, rsc, node); return probe; } /*! * \internal * \brief Create probes for a resource on a node, if needed * * \brief Schedule any probes needed for a resource on a node * * \param[in] rsc Resource to create probe for * \param[in] node Node to create probe on * * \return true if any probe was created, otherwise false */ bool pcmk__probe_rsc_on_node(pe_resource_t *rsc, pe_node_t *node) { uint32_t flags = pe_order_optional; pe_action_t *probe = NULL; pe_node_t *allowed = NULL; pe_resource_t *top = uber_parent(rsc); const char *reason = NULL; CRM_CHECK((rsc != NULL) && (node != NULL), return false); if (!pcmk_is_set(rsc->cluster->flags, pe_flag_startup_probes)) { reason = "start-up probes are disabled"; goto no_probe; } if (pe__is_guest_or_remote_node(node)) { const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); if (pcmk__str_eq(class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_none)) { reason = "Pacemaker Remote nodes cannot run stonith agents"; goto no_probe; } else if (pe__is_guest_node(node) && pe__resource_contains_guest_node(rsc->cluster, rsc)) { reason = "guest nodes cannot run resources containing guest nodes"; goto no_probe; } else if (rsc->is_remote_node) { reason = "Pacemaker Remote nodes cannot host remote connections"; goto no_probe; } } // If this is a collective resource, probes are created for its children if (rsc->children != NULL) { return pcmk__probe_resource_list(rsc->children, node); } if ((rsc->container != NULL) && !rsc->is_remote_node) { reason = "resource is inside a container"; goto no_probe; } else if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) { reason = "resource is orphaned"; goto no_probe; } else if (g_hash_table_lookup(rsc->known_on, node->details->id) != NULL) { reason = "resource state is already known"; goto no_probe; } allowed = g_hash_table_lookup(rsc->allowed_nodes, node->details->id); if (rsc->exclusive_discover || top->exclusive_discover) { // Exclusive discovery is enabled ... if (allowed == NULL) { // ... but this node is not allowed to run the resource reason = "resource has exclusive discovery but is not allowed " "on node"; goto no_probe; } else if (allowed->rsc_discover_mode != pe_discover_exclusive) { // ... but no constraint marks this node for discovery of resource reason = "resource has exclusive discovery but is not enabled " "on node"; goto no_probe; } } if (allowed == NULL) { allowed = node; } if (allowed->rsc_discover_mode == pe_discover_never) { reason = "node has discovery disabled"; goto no_probe; } if (pe__is_guest_node(node)) { pe_resource_t *guest = node->details->remote_rsc->container; if (guest->role == RSC_ROLE_STOPPED) { // The guest is stopped, so we know no resource is active there reason = "node's guest is stopped"; probe_then_start(guest, top); goto no_probe; } else if (guest_resource_will_stop(node)) { reason = "node's guest will stop"; // Order resource start after guest stop (in case it's restarting) pcmk__new_ordering(guest, pcmk__op_key(guest->id, RSC_STOP, 0), NULL, top, pcmk__op_key(top->id, RSC_START, 0), NULL, pe_order_optional, rsc->cluster); goto no_probe; } } // We've eliminated all cases where a probe is not needed, so now it is probe = probe_action(rsc, node); /* Order the probe relative to the parent -- or the resource itself if * cloned or a fence device when unfencing is used. */ if ((pcmk_is_set(rsc->flags, pe_rsc_fence_device) && pcmk_is_set(rsc->cluster->flags, pe_flag_enable_unfencing)) || !pe_rsc_is_clone(top)) { top = rsc; } if (!pcmk_is_set(probe->flags, pe_action_runnable) - && (rsc->running_on == NULL)) { + && (top->running_on == NULL)) { /* Prevent the parent from starting if the resource can't, but don't * cause the parent to stop if already active. */ pe__set_order_flags(flags, pe_order_runnable_left); } // Start or reload the parent after probing the resource pcmk__new_ordering(rsc, NULL, probe, top, pcmk__op_key(top->id, RSC_START, 0), NULL, flags, rsc->cluster); pcmk__new_ordering(rsc, NULL, probe, top, reload_key(rsc), NULL, pe_order_optional, rsc->cluster); return true; no_probe: pe_rsc_trace(rsc, "Skipping probe for %s on %s because %s", rsc->id, node->details->id, reason); return false; } /*! * \internal * \brief Check whether a probe should be ordered before another action * * \param[in] probe Probe action to check * \param[in] then Other action to check * * \return true if \p probe should be ordered before \p then, otherwise false */ static bool probe_needed_before_action(pe_action_t *probe, pe_action_t *then) { // Probes on a node are performed after unfencing it, not before if (pcmk__str_eq(then->task, CRM_OP_FENCE, pcmk__str_casei) && (probe->node != NULL) && (then->node != NULL) && (probe->node->details == then->node->details)) { const char *op = g_hash_table_lookup(then->meta, "stonith_action"); if (pcmk__str_eq(op, "on", pcmk__str_casei)) { return false; } } // Probes should be done on a node before shutting it down if (pcmk__str_eq(then->task, CRM_OP_SHUTDOWN, pcmk__str_none) && (probe->node != NULL) && (then->node != NULL) && (probe->node->details != then->node->details)) { return false; } // Otherwise probes should always be done before any other action return true; } /*! * \internal * \brief Add implicit "probe then X" orderings for "stop then X" orderings * * If the state of a resource is not known yet, a probe will be scheduled, * expecting a "not running" result. If the probe fails, a stop will not be * scheduled until the next transition. Thus, if there are ordering constraints * like "stop this resource then do something else that's not for the same * resource", add implicit "probe this resource then do something" equivalents * so the relation is upheld until we know whether a stop is needed. * * \param[in] data_set Cluster working set */ static void add_probe_orderings_for_stops(pe_working_set_t *data_set) { for (GList *iter = data_set->ordering_constraints; iter != NULL; iter = iter->next) { pe__ordering_t *order = iter->data; uint32_t order_flags = pe_order_optional; GList *probes = NULL; GList *then_actions = NULL; // Skip disabled orderings if (order->flags == pe_order_none) { continue; } // Skip non-resource orderings, and orderings for the same resource if ((order->lh_rsc == NULL) || (order->lh_rsc == order->rh_rsc)) { continue; } // Skip invalid orderings (shouldn't be possible) if (((order->lh_action == NULL) && (order->lh_action_task == NULL)) || ((order->rh_action == NULL) && (order->rh_action_task == NULL))) { continue; } // Skip orderings for first actions other than stop if ((order->lh_action != NULL) && !pcmk__str_eq(order->lh_action->task, RSC_STOP, pcmk__str_none)) { continue; } else if ((order->lh_action == NULL) && !pcmk__ends_with(order->lh_action_task, "_" RSC_STOP "_0")) { continue; } /* Do not imply a probe ordering for a resource inside of a stopping * container. Otherwise, it might introduce a transition loop, since a * probe could be scheduled after the container starts again. */ if ((order->rh_rsc != NULL) && (order->lh_rsc->container == order->rh_rsc)) { if ((order->rh_action != NULL) && pcmk__str_eq(order->rh_action->task, RSC_STOP, pcmk__str_none)) { continue; } else if ((order->rh_action == NULL) && pcmk__ends_with(order->rh_action_task, "_" RSC_STOP "_0")) { continue; } } // Preserve certain order options for future filtering if (pcmk_is_set(order->flags, pe_order_apply_first_non_migratable)) { pe__set_order_flags(order_flags, pe_order_apply_first_non_migratable); } if (pcmk_is_set(order->flags, pe_order_same_node)) { pe__set_order_flags(order_flags, pe_order_same_node); } // Preserve certain order types for future filtering if ((order->flags == pe_order_anti_colocation) || (order->flags == pe_order_load)) { order_flags = order->flags; } // List all scheduled probes for the first resource probes = pe__resource_actions(order->lh_rsc, NULL, RSC_STATUS, FALSE); if (probes == NULL) { // There aren't any continue; } // List all relevant "then" actions if (order->rh_action != NULL) { then_actions = g_list_prepend(NULL, order->rh_action); } else if (order->rh_rsc != NULL) { then_actions = find_actions(order->rh_rsc->actions, order->rh_action_task, NULL); if (then_actions == NULL) { // There aren't any g_list_free(probes); continue; } } crm_trace("Implying 'probe then' orderings for '%s then %s' " "(id=%d, type=%.6x)", order->lh_action? order->lh_action->uuid : order->lh_action_task, order->rh_action? order->rh_action->uuid : order->rh_action_task, order->id, order->flags); for (GList *probe_iter = probes; probe_iter != NULL; probe_iter = probe_iter->next) { pe_action_t *probe = (pe_action_t *) probe_iter->data; for (GList *then_iter = then_actions; then_iter != NULL; then_iter = then_iter->next) { pe_action_t *then = (pe_action_t *) then_iter->data; if (probe_needed_before_action(probe, then)) { order_actions(probe, then, order_flags); } } } g_list_free(then_actions); g_list_free(probes); } } +/*! + * \internal + * \brief Add necessary orderings between probe and starts of clone instances + * + * , in additon to the ordering with the parent resource added upon creating + * the probe. + * + * \param[in,out] probe Probe as 'first' action in an ordering + * \param[in,out] after 'then' action wrapper in the ordering + */ +static void +add_start_orderings_for_probe(pe_action_t *probe, pe_action_wrapper_t *after) +{ + uint32_t flags = pe_order_optional|pe_order_runnable_left; + + /* Although the ordering between the probe of the clone instance and the + * start of its parent has been added in pcmk__probe_rsc_on_node(), we + * avoided enforcing `pe_order_runnable_left` order type for that as long as + * any of the clone instances are running to prevent them from being + * unexpectedly stopped. + * + * On the other hand, we still need to prevent any inactive instances from + * starting unless the probe is runnable so that we don't risk starting too + * many instances before we know the state on all nodes. + */ + if (after->action->rsc->variant <= pe_group + || pcmk_is_set(probe->flags, pe_action_runnable) + // The order type is already enforced for its parent. + || pcmk_is_set(after->type, pe_order_runnable_left) + || uber_parent(probe->rsc) != after->action->rsc + || !pcmk__str_eq(after->action->task, RSC_START, pcmk__str_none)) { + return; + } + + crm_trace("Adding probe start orderings for '%s@%s (%s) " + "then instances of %s@%s'", + probe->uuid, pe__node_name(probe->node), + pcmk_is_set(probe->flags, pe_action_runnable)? "runnable" : "unrunnable", + after->action->uuid, pe__node_name(after->action->node)); + + for (GList *then_iter = after->action->actions_after; then_iter != NULL; + then_iter = then_iter->next) { + + pe_action_wrapper_t *then = (pe_action_wrapper_t *) then_iter->data; + + if (then->action->rsc->running_on + || uber_parent(then->action->rsc) != after->action->rsc + || !pcmk__str_eq(then->action->task, RSC_START, pcmk__str_none)) { + continue; + } + + crm_trace("Adding probe start ordering for '%s@%s (%s) " + "then %s@%s' (type=%#.6x)", + probe->uuid, pe__node_name(probe->node), + pcmk_is_set(probe->flags, pe_action_runnable)? "runnable" : "unrunnable", + then->action->uuid, pe__node_name(then->action->node), + flags); + + /* Prevent the instance from starting if the instance can't, but don't + * cause any other intances to stop if already active. + */ + order_actions(probe, then->action, flags); + } + + return; +} + /*! * \internal * \brief Order probes before restarts and re-promotes * * If a given ordering is a "probe then start" or "probe then promote" ordering, * add an implicit "probe then stop/demote" ordering in case the action is part * of a restart/re-promote, and do the same recursively for all actions ordered * after the "then" action. * * \param[in] probe Probe as 'first' action in an ordering * \param[in] after 'then' action in the ordering * \param[in] data_set Cluster working set */ static void add_restart_orderings_for_probe(pe_action_t *probe, pe_action_t *after, pe_working_set_t *data_set) { GList *iter = NULL; bool interleave = false; pe_resource_t *compatible_rsc = NULL; // Validate that this is a resource probe followed by some action if ((after == NULL) || (probe == NULL) || (probe->rsc == NULL) || (probe->rsc->variant != pe_native) || !pcmk__str_eq(probe->task, RSC_STATUS, pcmk__str_casei)) { return; } // Avoid running into any possible loop if (pcmk_is_set(after->flags, pe_action_tracking)) { return; } pe__set_action_flags(after, pe_action_tracking); crm_trace("Adding probe restart orderings for '%s@%s then %s@%s'", probe->uuid, pe__node_name(probe->node), after->uuid, pe__node_name(after->node)); /* Add restart orderings if "then" is for a different primitive. * Orderings for collective resources will be added later. */ if ((after->rsc != NULL) && (after->rsc->variant == pe_native) && (probe->rsc != after->rsc)) { GList *then_actions = NULL; if (pcmk__str_eq(after->task, RSC_START, pcmk__str_casei)) { then_actions = pe__resource_actions(after->rsc, NULL, RSC_STOP, FALSE); } else if (pcmk__str_eq(after->task, RSC_PROMOTE, pcmk__str_casei)) { then_actions = pe__resource_actions(after->rsc, NULL, RSC_DEMOTE, FALSE); } for (iter = then_actions; iter != NULL; iter = iter->next) { pe_action_t *then = (pe_action_t *) iter->data; // Skip pseudo-actions (for example, those implied by fencing) if (!pcmk_is_set(then->flags, pe_action_pseudo)) { order_actions(probe, then, pe_order_optional); } } g_list_free(then_actions); } /* Detect whether "then" is an interleaved clone action. For these, we want * to add orderings only for the relevant instance. */ if ((after->rsc != NULL) && (after->rsc->variant > pe_group)) { const char *interleave_s = g_hash_table_lookup(after->rsc->meta, XML_RSC_ATTR_INTERLEAVE); interleave = crm_is_true(interleave_s); if (interleave) { compatible_rsc = find_compatible_child(probe->rsc, after->rsc, RSC_ROLE_UNKNOWN, FALSE); } } /* Now recursively do the same for all actions ordered after "then". This * also handles collective resources since the collective action will be * ordered before its individual instances' actions. */ for (iter = after->actions_after; iter != NULL; iter = iter->next) { pe_action_wrapper_t *after_wrapper = (pe_action_wrapper_t *) iter->data; /* pe_order_implies_then is the reason why a required A.start * implies/enforces B.start to be required too, which is the cause of * B.restart/re-promote. * * Not sure about pe_order_implies_then_on_node though. It's now only * used for unfencing case, which tends to introduce transition * loops... */ if (!pcmk_is_set(after_wrapper->type, pe_order_implies_then)) { /* The order type between a group/clone and its child such as * B.start-> B_child.start is: * pe_order_implies_first_printed | pe_order_runnable_left * * Proceed through the ordering chain and build dependencies with * its children. */ if ((after->rsc == NULL) || (after->rsc->variant < pe_group) || (probe->rsc->parent == after->rsc) || (after_wrapper->action->rsc == NULL) || (after_wrapper->action->rsc->variant > pe_group) || (after->rsc != after_wrapper->action->rsc->parent)) { continue; } /* Proceed to the children of a group or a non-interleaved clone. * For an interleaved clone, proceed only to the relevant child. */ if ((after->rsc->variant > pe_group) && interleave && ((compatible_rsc == NULL) || (compatible_rsc != after_wrapper->action->rsc))) { continue; } } crm_trace("Recursively adding probe restart orderings for " "'%s@%s then %s@%s' (type=%#.6x)", after->uuid, pe__node_name(after->node), after_wrapper->action->uuid, pe__node_name(after_wrapper->action->node), after_wrapper->type); add_restart_orderings_for_probe(probe, after_wrapper->action, data_set); } } /*! * \internal * \brief Clear the tracking flag on all scheduled actions * * \param[in] data_set Cluster working set */ static void clear_actions_tracking_flag(pe_working_set_t *data_set) { GList *gIter = NULL; for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) { pe_action_t *action = (pe_action_t *) gIter->data; pe__clear_action_flags(action, pe_action_tracking); } } /*! * \internal - * \brief Add restart orderings for any scheduled probes for a given resource + * \brief Add start and restart orderings for any scheduled probes for a given resource * * \param[in] rsc Resource whose probes should be ordered * \param[in] data_set Cluster working set */ static void -add_restart_orderings_for_rsc(pe_resource_t *rsc, pe_working_set_t *data_set) +add_start_restart_orderings_for_rsc(pe_resource_t *rsc, + pe_working_set_t *data_set) { GList *probes = NULL; // For collective resources, order each instance recursively if (rsc->variant != pe_native) { - g_list_foreach(rsc->children, (GFunc) add_restart_orderings_for_rsc, - data_set); + g_list_foreach(rsc->children, + (GFunc) add_start_restart_orderings_for_rsc, data_set); return; } // Find all probes for given resource probes = pe__resource_actions(rsc, NULL, RSC_STATUS, FALSE); // Add probe restart orderings for each probe found for (GList *iter = probes; iter != NULL; iter = iter->next) { pe_action_t *probe = (pe_action_t *) iter->data; for (GList *then_iter = probe->actions_after; then_iter != NULL; then_iter = then_iter->next) { pe_action_wrapper_t *then = (pe_action_wrapper_t *) then_iter->data; + add_start_orderings_for_probe(probe, then); add_restart_orderings_for_probe(probe, then->action, data_set); clear_actions_tracking_flag(data_set); } } g_list_free(probes); } /*! * \internal * \brief Add "A then probe B" orderings for "A then B" orderings * * \param[in] data_set Cluster working set * * \note This function is currently disabled (see next comment). */ static void order_then_probes(pe_working_set_t *data_set) { #if 0 /* Given an ordering "A then B", we would prefer to wait for A to be started * before probing B. * * For example, if A is a filesystem which B can't even run without, it * would be helpful if the author of B's agent could assume that A is * running before B.monitor will be called. * * However, we can't _only_ probe after A is running, otherwise we wouldn't * detect the state of B if A could not be started. We can't even do an * opportunistic version of this, because B may be moving: * * A.stop -> A.start -> B.probe -> B.stop -> B.start * * and if we add B.stop -> A.stop here, we get a loop: * * A.stop -> A.start -> B.probe -> B.stop -> A.stop * * We could kill the "B.probe -> B.stop" dependency, but that could mean * stopping B "too" soon, because B.start must wait for the probe, and * we don't want to stop B if we can't start it. * * We could add the ordering only if A is an anonymous clone with * clone-max == node-max (since we'll never be moving it). However, we could * still be stopping one instance at the same time as starting another. * * The complexity of checking for allowed conditions combined with the ever * narrowing use case suggests that this code should remain disabled until * someone gets smarter. */ for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) { pe_resource_t *rsc = (pe_resource_t *) iter->data; pe_action_t *start = NULL; GList *actions = NULL; GList *probes = NULL; actions = pe__resource_actions(rsc, NULL, RSC_START, FALSE); if (actions) { start = actions->data; g_list_free(actions); } if (start == NULL) { crm_err("No start action for %s", rsc->id); continue; } probes = pe__resource_actions(rsc, NULL, RSC_STATUS, FALSE); for (actions = start->actions_before; actions != NULL; actions = actions->next) { pe_action_wrapper_t *before = (pe_action_wrapper_t *) actions->data; pe_action_t *first = before->action; pe_resource_t *first_rsc = first->rsc; if (first->required_runnable_before) { for (GList *clone_actions = first->actions_before; clone_actions != NULL; clone_actions = clone_actions->next) { before = (pe_action_wrapper_t *) clone_actions->data; crm_trace("Testing '%s then %s' for %s", first->uuid, before->action->uuid, start->uuid); CRM_ASSERT(before->action->rsc != NULL); first_rsc = before->action->rsc; break; } } else if (!pcmk__str_eq(first->task, RSC_START, pcmk__str_none)) { crm_trace("Not a start op %s for %s", first->uuid, start->uuid); } if (first_rsc == NULL) { continue; } else if (uber_parent(first_rsc) == uber_parent(start->rsc)) { crm_trace("Same parent %s for %s", first_rsc->id, start->uuid); continue; } else if (!pe_rsc_is_clone(uber_parent(first_rsc))) { crm_trace("Not a clone %s for %s", first_rsc->id, start->uuid); continue; } crm_err("Applying %s before %s %d", first->uuid, start->uuid, uber_parent(first_rsc)->variant); for (GList *probe_iter = probes; probe_iter != NULL; probe_iter = probe_iter->next) { pe_action_t *probe = (pe_action_t *) probe_iter->data; crm_err("Ordering %s before %s", first->uuid, probe->uuid); order_actions(first, probe, pe_order_optional); } } } #endif } void pcmk__order_probes(pe_working_set_t *data_set) { // Add orderings for "probe then X" - g_list_foreach(data_set->resources, (GFunc) add_restart_orderings_for_rsc, - data_set); + g_list_foreach(data_set->resources, + (GFunc) add_start_restart_orderings_for_rsc, data_set); add_probe_orderings_for_stops(data_set); order_then_probes(data_set); } /*! * \internal * \brief Schedule any probes needed * * \param[in] data_set Cluster working set * * \note This may also schedule fencing of failed remote nodes. */ void pcmk__schedule_probes(pe_working_set_t *data_set) { // Schedule probes on each node in the cluster as needed for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) { pe_node_t *node = (pe_node_t *) iter->data; const char *probed = NULL; if (!node->details->online) { // Don't probe offline nodes if (pcmk__is_failed_remote_node(node)) { pe_fence_node(data_set, node, "the connection is unrecoverable", FALSE); } continue; } else if (node->details->unclean) { // ... or nodes that need fencing continue; } else if (!node->details->rsc_discovery_enabled) { // The user requested that probes not be done on this node continue; } /* This is no longer needed for live clusters, since the probe_complete * node attribute will never be in the CIB. However this is still useful * for processing old saved CIBs (< 1.1.14), including the * reprobe-target_rc regression test. */ probed = pe_node_attribute_raw(node, CRM_OP_PROBED); if (probed != NULL && crm_is_true(probed) == FALSE) { pe_action_t *probe_op = NULL; probe_op = custom_action(NULL, crm_strdup_printf("%s-%s", CRM_OP_REPROBE, node->details->uname), CRM_OP_REPROBE, node, FALSE, TRUE, data_set); add_hash_param(probe_op->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE); continue; } // Probe each resource in the cluster on this node, as needed pcmk__probe_resource_list(data_set->resources, node); } }