diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in
index 2179b27ced..50c32f6c7c 100644
--- a/cts/cts-scheduler.in
+++ b/cts/cts-scheduler.in
@@ -1,1713 +1,1714 @@
#!@PYTHON@
""" Regression tests for Pacemaker's scheduler
"""
__copyright__ = "Copyright 2004-2023 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import io
import os
import re
import sys
import stat
import shlex
import shutil
import argparse
import subprocess
import platform
import tempfile
# These imports allow running from a source checkout after running `make`.
# Note that while this doesn't necessarily mean it will successfully run tests,
# but being able to see --help output can be useful.
if os.path.exists("@abs_top_srcdir@/python"):
sys.path.insert(0, "@abs_top_srcdir@/python")
if os.path.exists("@abs_top_builddir@/python") and "@abs_top_builddir@" != "@abs_top_srcdir@":
sys.path.insert(0, "@abs_top_builddir@/python")
from pacemaker.buildoptions import BuildOptions
from pacemaker.exitstatus import ExitStatus
DESC = """Regression tests for Pacemaker's scheduler"""
# Each entry in TESTS is a group of tests, where each test consists of a
# test base name, test description, and additional test arguments.
# Test groups will be separated by newlines in output.
TESTS = [
[
[ "simple1", "Offline" ],
[ "simple2", "Start" ],
[ "simple3", "Start 2" ],
[ "simple4", "Start Failed" ],
[ "simple6", "Stop Start" ],
[ "simple7", "Shutdown" ],
#[ "simple8", "Stonith" ],
#[ "simple9", "Lower version" ],
#[ "simple10", "Higher version" ],
[ "simple11", "Priority (ne)" ],
[ "simple12", "Priority (eq)" ],
[ "simple8", "Stickiness" ],
],
[
[ "group1", "Group" ],
[ "group2", "Group + Native" ],
[ "group3", "Group + Group" ],
[ "group4", "Group + Native (nothing)" ],
[ "group5", "Group + Native (move)" ],
[ "group6", "Group + Group (move)" ],
[ "group7", "Group colocation" ],
[ "group13", "Group colocation (cant run)" ],
[ "group8", "Group anti-colocation" ],
[ "group9", "Group recovery" ],
[ "group10", "Group partial recovery" ],
[ "group11", "Group target_role" ],
[ "group14", "Group stop (graph terminated)" ],
[ "group15", "Negative group colocation" ],
[ "bug-1573", "Partial stop of a group with two children" ],
[ "bug-1718", "Mandatory group ordering - Stop group_FUN" ],
[ "failed-sticky-group", "Move group on last member failure despite infinite stickiness" ],
[ "failed-sticky-anticolocated-group",
"Move group on last member failure despite infinite stickiness and optional anti-colocation"
],
[ "bug-lf-2619", "Move group on clone failure" ],
[ "group-fail", "Ensure stop order is preserved for partially active groups" ],
[ "group-unmanaged", "No need to restart r115 because r114 is unmanaged" ],
[ "group-unmanaged-stopped", "Make sure r115 is stopped when r114 fails" ],
[ "partial-unmanaged-group",
"New member in partially unmanaged group"
],
[ "group-dependents", "Account for the location preferences of things colocated with a group" ],
[ "group-stop-ordering", "Ensure blocked group member stop does not force other member stops" ],
[ "colocate-unmanaged-group", "Respect mandatory colocations even if earlier group member is unmanaged" ],
[
"coloc-with-inner-group-member",
"Consider explicit colocations with inner group members"
],
],
[
[ "rsc_dep1", "Must not" ],
[ "rsc_dep3", "Must" ],
[ "rsc_dep5", "Must not 3" ],
[ "rsc_dep7", "Must 3" ],
[ "rsc_dep10", "Must (but cant)" ],
[ "rsc_dep2", "Must (running)" ],
[ "rsc_dep8", "Must (running : alt)" ],
[ "rsc_dep4", "Must (running + move)" ],
[ "asymmetric", "Asymmetric - require explicit location constraints" ],
],
[
[ "orphan-0", "Orphan ignore" ],
[ "orphan-1", "Orphan stop" ],
[ "orphan-2", "Orphan stop, remove failcount" ],
],
[
[ "params-0", "Params: No change" ],
[ "params-1", "Params: Changed" ],
[ "params-2", "Params: Resource definition" ],
[ "params-3", "Params: Restart instead of reload if start pending" ],
[ "params-4", "Params: Reload" ],
[ "params-5", "Params: Restart based on probe digest" ],
[ "novell-251689", "Resource definition change + target_role=stopped" ],
[ "bug-lf-2106", "Restart all anonymous clone instances after config change" ],
[ "params-6", "Params: Detect reload in previously migrated resource" ],
[ "nvpair-id-ref", "Support id-ref in nvpair with optional name" ],
[ "not-reschedule-unneeded-monitor",
"Do not reschedule unneeded monitors while resource definitions have changed" ],
[ "reload-becomes-restart", "Cancel reload if restart becomes required" ],
[ "restart-with-extra-op-params", "Restart if with extra operation parameters upon changes of any" ],
],
[
[ "target-0", "Target Role : baseline" ],
[ "target-1", "Target Role : promoted" ],
[ "target-2", "Target Role : invalid" ],
],
[
[ "base-score", "Set a node's default score for all nodes" ],
],
[
[ "date-1", "Dates", [ "-t", "2005-020" ] ],
[ "date-2", "Date Spec - Pass", [ "-t", "2005-020T12:30" ] ],
[ "date-3", "Date Spec - Fail", [ "-t", "2005-020T11:30" ] ],
[ "origin", "Timing of recurring operations", [ "-t", "2014-05-07 00:28:00" ] ],
[ "probe-0", "Probe (anon clone)" ],
[ "probe-1", "Pending Probe" ],
[ "probe-2", "Correctly re-probe cloned groups" ],
[ "probe-3", "Probe (pending node)" ],
[ "probe-4", "Probe (pending node + stopped resource)" ],
[ "probe-pending-node", "Probe (pending node + unmanaged resource)" ],
[ "failed-probe-primitive", "Maskable vs. unmaskable probe failures on primitive resources" ],
[ "failed-probe-clone", "Maskable vs. unmaskable probe failures on cloned resources" ],
[ "expired-failed-probe-primitive", "Maskable, expired probe failure on primitive resources" ],
[ "standby", "Standby" ],
[ "comments", "Comments" ],
],
[
[ "one-or-more-0", "Everything starts" ],
[ "one-or-more-1", "Nothing starts because of A" ],
[ "one-or-more-2", "D can start because of C" ],
[ "one-or-more-3", "D cannot start because of B and C" ],
[ "one-or-more-4", "D cannot start because of target-role" ],
[ "one-or-more-5", "Start A and F even though C and D are stopped" ],
[ "one-or-more-6", "Leave A running even though B is stopped" ],
[ "one-or-more-7", "Leave A running even though C is stopped" ],
[ "bug-5140-require-all-false", "Allow basegrp:0 to stop" ],
[ "clone-require-all-1", "clone B starts node 3 and 4" ],
[ "clone-require-all-2", "clone B remains stopped everywhere" ],
[ "clone-require-all-3", "clone B stops everywhere because A stops everywhere" ],
[ "clone-require-all-4", "clone B remains on node 3 and 4 with only one instance of A remaining" ],
[ "clone-require-all-5", "clone B starts on node 1 3 and 4" ],
[ "clone-require-all-6", "clone B remains active after shutting down instances of A" ],
[ "clone-require-all-7",
"clone A and B both start at the same time. all instances of A start before B" ],
[ "clone-require-all-no-interleave-1", "C starts everywhere after A and B" ],
[ "clone-require-all-no-interleave-2",
"C starts on nodes 1, 2, and 4 with only one active instance of B" ],
[ "clone-require-all-no-interleave-3",
"C remains active when instance of B is stopped on one node and started on another" ],
[ "one-or-more-unrunnable-instances", "Avoid dependencies on instances that won't ever be started" ],
],
[
[ "location-date-rules-1", "Use location constraints with ineffective date-based rules" ],
[ "location-date-rules-2", "Use location constraints with effective date-based rules" ],
[ "nvpair-date-rules-1", "Use nvpair blocks with a variety of date-based rules" ],
[ "value-source", "Use location constraints with node attribute expressions using value-source" ],
[ "rule-dbl-as-auto-number-match",
"Floating-point rule values default to number comparison: match" ],
[ "rule-dbl-as-auto-number-no-match",
"Floating-point rule values default to number comparison: no "
"match" ],
[ "rule-dbl-as-integer-match",
"Floating-point rule values set to integer comparison: match" ],
[ "rule-dbl-as-integer-no-match",
"Floating-point rule values set to integer comparison: no match" ],
[ "rule-dbl-as-number-match",
"Floating-point rule values set to number comparison: match" ],
[ "rule-dbl-as-number-no-match",
"Floating-point rule values set to number comparison: no match" ],
[ "rule-dbl-parse-fail-default-str-match",
"Floating-point rule values fail to parse, default to string "
"comparison: match" ],
[ "rule-dbl-parse-fail-default-str-no-match",
"Floating-point rule values fail to parse, default to string "
"comparison: no match" ],
[ "rule-int-as-auto-integer-match",
"Integer rule values default to integer comparison: match" ],
[ "rule-int-as-auto-integer-no-match",
"Integer rule values default to integer comparison: no match" ],
[ "rule-int-as-integer-match",
"Integer rule values set to integer comparison: match" ],
[ "rule-int-as-integer-no-match",
"Integer rule values set to integer comparison: no match" ],
[ "rule-int-as-number-match",
"Integer rule values set to number comparison: match" ],
[ "rule-int-as-number-no-match",
"Integer rule values set to number comparison: no match" ],
[ "rule-int-parse-fail-default-str-match",
"Integer rule values fail to parse, default to string "
"comparison: match" ],
[ "rule-int-parse-fail-default-str-no-match",
"Integer rule values fail to parse, default to string "
"comparison: no match" ],
+ [ "timeout-by-node", "Start timeout varies by node" ],
],
[
[ "order1", "Order start 1" ],
[ "order2", "Order start 2" ],
[ "order3", "Order stop" ],
[ "order4", "Order (multiple)" ],
[ "order5", "Order (move)" ],
[ "order6", "Order (move w/ restart)" ],
[ "order7", "Order (mandatory)" ],
[ "order-optional", "Order (score=0)" ],
[ "order-required", "Order (score=INFINITY)" ],
[ "bug-lf-2171", "Prevent group start when clone is stopped" ],
[ "order-clone", "Clone ordering should be able to prevent startup of dependent clones" ],
[ "order-sets", "Ordering for resource sets" ],
[ "order-serialize", "Serialize resources without inhibiting migration" ],
[ "order-serialize-set", "Serialize a set of resources without inhibiting migration" ],
[ "clone-order-primitive", "Order clone start after a primitive" ],
[ "clone-order-16instances", "Verify ordering of 16 cloned resources" ],
[ "order-optional-keyword", "Order (optional keyword)" ],
[ "order-mandatory", "Order (mandatory keyword)" ],
[ "bug-lf-2493",
"Don't imply colocation requirements when applying ordering constraints with clones" ],
[ "ordered-set-basic-startup", "Constraint set with default order settings" ],
[ "ordered-set-natural", "Allow natural set ordering" ],
[ "order-wrong-kind", "Order (error)" ],
],
[
[ "coloc-loop", "Colocation - loop" ],
[ "coloc-many-one", "Colocation - many-to-one" ],
[ "coloc-list", "Colocation - many-to-one with list" ],
[ "coloc-group", "Colocation - groups" ],
[ "coloc-unpromoted-anti", "Anti-colocation with unpromoted shouldn't prevent promoted colocation" ],
[ "coloc-attr", "Colocation based on node attributes" ],
[ "coloc-negative-group", "Negative colocation with a group" ],
[ "coloc-intra-set", "Intra-set colocation" ],
[ "bug-lf-2435", "Colocation sets with a negative score" ],
[ "coloc-clone-stays-active",
"Ensure clones don't get stopped/demoted because a dependent must stop" ],
[ "coloc_fp_logic", "Verify floating point calculations in colocation are working" ],
[ "colo_promoted_w_native",
"cl#5070 - Verify promotion order is affected when colocating promoted with primitive" ],
[ "colo_unpromoted_w_native",
"cl#5070 - Verify promotion order is affected when colocating unpromoted with primitive" ],
[ "anti-colocation-order",
"cl#5187 - Prevent resources in an anti-colocation from even temporarily running on a same node" ],
[ "anti-colocation-promoted", "Organize order of actions for promoted resources in anti-colocations" ],
[ "anti-colocation-unpromoted", "Organize order of actions for unpromoted resources in anti-colocations" ],
[ "group-anticolocation", "Group with failed last member anti-colocated with another group" ],
[ "group-anticolocation-2",
"Group with failed last member anti-colocated with another sticky group"
],
[ "group-anticolocation-3",
"Group with failed last member mandatorily anti-colocated with another group"
],
[ "group-anticolocation-4",
"Group with failed last member anti-colocated without influence with another group"
],
[ "group-anticolocation-5",
"Group with failed last member anti-colocated with another group (third node allowed)"
],
[ "group-colocation-failure",
"Group with sole member failed, colocated with another group"
],
[ "enforce-colo1", "Always enforce B with A INFINITY" ],
[ "complex_enforce_colo", "Always enforce B with A INFINITY. (make sure heat-engine stops)" ],
[ "coloc-dependee-should-stay", "Stickiness outweighs group colocation" ],
[ "coloc-dependee-should-move", "Group colocation outweighs stickiness" ],
[ "colocation-influence", "Respect colocation influence" ],
[ "colocation-priority-group", "Apply group colocations in order of primary priority" ],
[ "colocation-vs-stickiness", "Group stickiness outweighs anti-colocation score" ],
[ "promoted-with-blocked", "Promoted role colocated with a resource with blocked start" ],
[ "primitive-with-group-with-clone",
"Consider group dependent when colocating with clone"
],
[ "primitive-with-group-with-promoted",
"Consider group dependent when colocating with promoted role"
],
[ "primitive-with-unrunnable-group",
"Block primitive colocated with group that can't start",
],
],
[
[ "rsc-sets-seq-true", "Resource Sets - sequential=false" ],
[ "rsc-sets-seq-false", "Resource Sets - sequential=true" ],
[ "rsc-sets-clone", "Resource Sets - Clone" ],
[ "rsc-sets-promoted", "Resource Sets - Promoted" ],
[ "rsc-sets-clone-1", "Resource Sets - Clone (lf#2404)" ],
],
[
[ "attrs1", "string: eq (and)" ],
[ "attrs2", "string: lt / gt (and)" ],
[ "attrs3", "string: ne (or)" ],
[ "attrs4", "string: exists" ],
[ "attrs5", "string: not_exists" ],
[ "attrs6", "is_dc: true" ],
[ "attrs7", "is_dc: false" ],
[ "attrs8", "score_attribute" ],
[ "per-node-attrs", "Per node resource parameters" ],
],
[
[ "mon-rsc-1", "Schedule Monitor - start" ],
[ "mon-rsc-2", "Schedule Monitor - move" ],
[ "mon-rsc-3", "Schedule Monitor - pending start" ],
[ "mon-rsc-4", "Schedule Monitor - move/pending start" ],
],
[
[ "rec-rsc-0", "Resource Recover - no start" ],
[ "rec-rsc-1", "Resource Recover - start" ],
[ "rec-rsc-2", "Resource Recover - monitor" ],
[ "rec-rsc-3", "Resource Recover - stop - ignore" ],
[ "rec-rsc-4", "Resource Recover - stop - block" ],
[ "rec-rsc-5", "Resource Recover - stop - fence" ],
[ "rec-rsc-6", "Resource Recover - multiple - restart" ],
[ "rec-rsc-7", "Resource Recover - multiple - stop" ],
[ "rec-rsc-8", "Resource Recover - multiple - block" ],
[ "rec-rsc-9", "Resource Recover - group/group" ],
[ "stop-unexpected", "Recover multiply active group with stop_unexpected" ],
[ "stop-unexpected-2", "Resource multiply active primitve with stop_unexpected" ],
[ "monitor-recovery", "on-fail=block + resource recovery detected by recurring monitor" ],
[ "stop-failure-no-quorum", "Stop failure without quorum" ],
[ "stop-failure-no-fencing", "Stop failure without fencing available" ],
[ "stop-failure-with-fencing", "Stop failure with fencing available" ],
[ "multiple-active-block-group", "Support of multiple-active=block for resource groups" ],
[ "multiple-monitor-one-failed",
"Consider resource failed if any of the configured monitor operations failed" ],
],
[
[ "quorum-1", "No quorum - ignore" ],
[ "quorum-2", "No quorum - freeze" ],
[ "quorum-3", "No quorum - stop" ],
[ "quorum-4", "No quorum - start anyway" ],
[ "quorum-5", "No quorum - start anyway (group)" ],
[ "quorum-6", "No quorum - start anyway (clone)" ],
[ "bug-cl-5212", "No promotion with no-quorum-policy=freeze" ],
[ "suicide-needed-inquorate", "no-quorum-policy=suicide: suicide necessary" ],
[ "suicide-not-needed-initial-quorum",
"no-quorum-policy=suicide: suicide not necessary at initial quorum" ],
[ "suicide-not-needed-never-quorate",
"no-quorum-policy=suicide: suicide not necessary if never quorate" ],
[ "suicide-not-needed-quorate", "no-quorum-policy=suicide: suicide necessary if quorate" ],
],
[
[ "rec-node-1", "Node Recover - Startup - no fence" ],
[ "rec-node-2", "Node Recover - Startup - fence" ],
[ "rec-node-3", "Node Recover - HA down - no fence" ],
[ "rec-node-4", "Node Recover - HA down - fence" ],
[ "rec-node-5", "Node Recover - CRM down - no fence" ],
[ "rec-node-6", "Node Recover - CRM down - fence" ],
[ "rec-node-7", "Node Recover - no quorum - ignore" ],
[ "rec-node-8", "Node Recover - no quorum - freeze" ],
[ "rec-node-9", "Node Recover - no quorum - stop" ],
[ "rec-node-10", "Node Recover - no quorum - stop w/fence" ],
[ "rec-node-11", "Node Recover - CRM down w/ group - fence" ],
[ "rec-node-12", "Node Recover - nothing active - fence" ],
[ "rec-node-13", "Node Recover - failed resource + shutdown - fence" ],
[ "rec-node-15", "Node Recover - unknown lrm section" ],
[ "rec-node-14", "Serialize all stonith's" ],
],
[
[ "multi1", "Multiple Active (stop/start)" ],
],
[
[ "migrate-begin", "Normal migration" ],
[ "migrate-success", "Completed migration" ],
[ "migrate-partial-1", "Completed migration, missing stop on source" ],
[ "migrate-partial-2", "Successful migrate_to only" ],
[ "migrate-partial-3", "Successful migrate_to only, target down" ],
[ "migrate-partial-4", "Migrate from the correct host after migrate_to+migrate_from" ],
[ "bug-5186-partial-migrate", "Handle partial migration when src node loses membership" ],
[ "migrate-fail-2", "Failed migrate_from" ],
[ "migrate-fail-3", "Failed migrate_from + stop on source" ],
[ "migrate-fail-4",
"Failed migrate_from + stop on target - ideally we wouldn't need to re-stop on target" ],
[ "migrate-fail-5", "Failed migrate_from + stop on source and target" ],
[ "migrate-fail-6", "Failed migrate_to" ],
[ "migrate-fail-7", "Failed migrate_to + stop on source" ],
[ "migrate-fail-8",
"Failed migrate_to + stop on target - ideally we wouldn't need to re-stop on target" ],
[ "migrate-fail-9", "Failed migrate_to + stop on source and target" ],
[ "migration-ping-pong", "Old migrate_to failure + successful migrate_from on same node" ],
[ "migrate-stop", "Migration in a stopping stack" ],
[ "migrate-start", "Migration in a starting stack" ],
[ "migrate-stop_start", "Migration in a restarting stack" ],
[ "migrate-stop-complex", "Migration in a complex stopping stack" ],
[ "migrate-start-complex", "Migration in a complex starting stack" ],
[ "migrate-stop-start-complex", "Migration in a complex moving stack" ],
[ "migrate-shutdown", "Order the post-migration 'stop' before node shutdown" ],
[ "migrate-1", "Migrate (migrate)" ],
[ "migrate-2", "Migrate (stable)" ],
[ "migrate-3", "Migrate (failed migrate_to)" ],
[ "migrate-4", "Migrate (failed migrate_from)" ],
[ "novell-252693", "Migration in a stopping stack" ],
[ "novell-252693-2", "Migration in a starting stack" ],
[ "novell-252693-3", "Non-Migration in a starting and stopping stack" ],
[ "bug-1820", "Migration in a group" ],
[ "bug-1820-1", "Non-migration in a group" ],
[ "migrate-5", "Primitive migration with a clone" ],
[ "migrate-fencing", "Migration after Fencing" ],
[ "migrate-both-vms", "Migrate two VMs that have no colocation" ],
[ "migration-behind-migrating-remote", "Migrate resource behind migrating remote connection" ],
[ "1-a-then-bm-move-b", "Advanced migrate logic. A then B. migrate B" ],
[ "2-am-then-b-move-a", "Advanced migrate logic, A then B, migrate A without stopping B" ],
[ "3-am-then-bm-both-migrate", "Advanced migrate logic. A then B. migrate both" ],
[ "4-am-then-bm-b-not-migratable", "Advanced migrate logic, A then B, B not migratable" ],
[ "5-am-then-bm-a-not-migratable", "Advanced migrate logic. A then B. move both, a not migratable" ],
[ "6-migrate-group", "Advanced migrate logic, migrate a group" ],
[ "7-migrate-group-one-unmigratable",
"Advanced migrate logic, migrate group mixed with allow-migrate true/false" ],
[ "8-am-then-bm-a-migrating-b-stopping",
"Advanced migrate logic, A then B, A migrating, B stopping" ],
[ "9-am-then-bm-b-migrating-a-stopping",
"Advanced migrate logic, A then B, B migrate, A stopping" ],
[ "10-a-then-bm-b-move-a-clone",
"Advanced migrate logic, A clone then B, migrate B while stopping A" ],
[ "11-a-then-bm-b-move-a-clone-starting",
"Advanced migrate logic, A clone then B, B moving while A is start/stopping" ],
[ "a-promote-then-b-migrate", "A promote then B start. migrate B" ],
[ "a-demote-then-b-migrate", "A demote then B stop. migrate B" ],
[ "probe-target-of-failed-migrate_to-1", "Failed migrate_to, target rejoins" ],
[ "probe-target-of-failed-migrate_to-2", "Failed migrate_to, target rejoined and probed" ],
[ "partial-live-migration-multiple-active", "Prevent running on multiple nodes due to partial live migration" ],
[ "migration-intermediary-cleaned",
"Probe live-migration intermediary with no history"
],
[ "bug-lf-2422", "Dependency on partially active group - stop ocfs:*" ],
],
[
[ "clone-anon-probe-1", "Probe the correct (anonymous) clone instance for each node" ],
[ "clone-anon-probe-2", "Avoid needless re-probing of anonymous clones" ],
[ "clone-anon-failcount", "Merge failcounts for anonymous clones" ],
[ "force-anon-clone-max", "Update clone-max properly when forcing a clone to be anonymous" ],
[ "anon-instance-pending", "Assign anonymous clone instance numbers properly when action pending" ],
[ "inc0", "Incarnation start" ],
[ "inc1", "Incarnation start order" ],
[ "inc2", "Incarnation silent restart, stop, move" ],
[ "inc3", "Inter-incarnation ordering, silent restart, stop, move" ],
[ "inc4", "Inter-incarnation ordering, silent restart, stop, move (ordered)" ],
[ "inc5", "Inter-incarnation ordering, silent restart, stop, move (restart 1)" ],
[ "inc6", "Inter-incarnation ordering, silent restart, stop, move (restart 2)" ],
[ "inc7", "Clone colocation" ],
[ "inc8", "Clone anti-colocation" ],
[ "inc9", "Non-unique clone" ],
[ "inc10", "Non-unique clone (stop)" ],
[ "inc11", "Primitive colocation with clones" ],
[ "inc12", "Clone shutdown" ],
[ "cloned-group", "Make sure only the correct number of cloned groups are started" ],
[ "cloned-group-stop", "Ensure stopping qpidd also stops glance and cinder" ],
[ "clone-no-shuffle", "Don't prioritize allocation of instances that must be moved" ],
[ "clone-recover-no-shuffle-1",
"Don't shuffle instances when starting a new primitive instance" ],
[ "clone-recover-no-shuffle-2",
"Don't shuffle instances when starting a new group instance" ],
[ "clone-recover-no-shuffle-3",
"Don't shuffle instances when starting a new bundle instance" ],
[ "clone-recover-no-shuffle-4",
"Don't shuffle instances when starting a new primitive instance with "
"location preference "],
[ "clone-recover-no-shuffle-5",
"Don't shuffle instances when starting a new group instance with "
"location preference" ],
[ "clone-recover-no-shuffle-6",
"Don't shuffle instances when starting a new bundle instance with "
"location preference" ],
[ "clone-recover-no-shuffle-7",
"Don't shuffle instances when starting a new primitive instance that "
"will be promoted" ],
[ "clone-recover-no-shuffle-8",
"Don't shuffle instances when starting a new group instance that "
"will be promoted " ],
[ "clone-recover-no-shuffle-9",
"Don't shuffle instances when starting a new bundle instance that "
"will be promoted " ],
[ "clone-recover-no-shuffle-10",
"Don't shuffle instances when starting a new primitive instance that "
"won't be promoted" ],
[ "clone-recover-no-shuffle-11",
"Don't shuffle instances when starting a new group instance that "
"won't be promoted " ],
[ "clone-recover-no-shuffle-12",
"Don't shuffle instances when starting a new bundle instance that "
"won't be promoted " ],
[ "clone-max-zero", "Orphan processing with clone-max=0" ],
[ "clone-anon-dup",
"Bug LF#2087 - Correctly parse the state of anonymous clones that are active more than once per node" ],
[ "bug-lf-2160", "Don't shuffle clones due to colocation" ],
[ "bug-lf-2213", "clone-node-max enforcement for cloned groups" ],
[ "bug-lf-2153", "Clone ordering constraints" ],
[ "bug-lf-2361", "Ensure clones observe mandatory ordering constraints if the LHS is unrunnable" ],
[ "bug-lf-2317", "Avoid needless restart of primitive depending on a clone" ],
[ "bug-lf-2453", "Enforce mandatory clone ordering without colocation" ],
[ "bug-lf-2508", "Correctly reconstruct the status of anonymous cloned groups" ],
[ "bug-lf-2544", "Balanced clone placement" ],
[ "bug-lf-2445", "Redistribute clones with node-max > 1 and stickiness = 0" ],
[ "bug-lf-2574", "Avoid clone shuffle" ],
[ "bug-lf-2581", "Avoid group restart due to unrelated clone (re)start" ],
[ "bug-cl-5168", "Don't shuffle clones" ],
[ "bug-cl-5170", "Prevent clone from starting with on-fail=block" ],
[ "clone-fail-block-colocation", "Move colocated group when failed clone has on-fail=block" ],
[ "clone-interleave-1",
"Clone-3 cannot start on pcmk-1 due to interleaved ordering (no colocation)" ],
[ "clone-interleave-2", "Clone-3 must stop on pcmk-1 due to interleaved ordering (no colocation)" ],
[ "clone-interleave-3",
"Clone-3 must be recovered on pcmk-1 due to interleaved ordering (no colocation)" ],
[ "rebalance-unique-clones", "Rebalance unique clone instances with no stickiness" ],
[ "clone-requires-quorum-recovery", "Clone with requires=quorum on failed node needing recovery" ],
[ "clone-requires-quorum",
"Clone with requires=quorum with presumed-inactive instance on failed node" ],
],
[
[ "cloned_start_one", "order first clone then clone... first clone_min=2" ],
[ "cloned_start_two", "order first clone then clone... first clone_min=2" ],
[ "cloned_stop_one", "order first clone then clone... first clone_min=2" ],
[ "cloned_stop_two", "order first clone then clone... first clone_min=2" ],
[ "clone_min_interleave_start_one",
"order first clone then clone... first clone_min=2 and then has interleave=true" ],
[ "clone_min_interleave_start_two",
"order first clone then clone... first clone_min=2 and then has interleave=true" ],
[ "clone_min_interleave_stop_one",
"order first clone then clone... first clone_min=2 and then has interleave=true" ],
[ "clone_min_interleave_stop_two",
"order first clone then clone... first clone_min=2 and then has interleave=true" ],
[ "clone_min_start_one", "order first clone then primitive... first clone_min=2" ],
[ "clone_min_start_two", "order first clone then primitive... first clone_min=2" ],
[ "clone_min_stop_all", "order first clone then primitive... first clone_min=2" ],
[ "clone_min_stop_one", "order first clone then primitive... first clone_min=2" ],
[ "clone_min_stop_two", "order first clone then primitive... first clone_min=2" ],
],
[
[ "unfence-startup", "Clean unfencing" ],
[ "unfence-definition", "Unfencing when the agent changes" ],
[ "unfence-parameters", "Unfencing when the agent parameters changes" ],
[ "unfence-device", "Unfencing when a cluster has only fence devices" ],
],
[
[ "promoted-0", "Stopped -> Unpromoted" ],
[ "promoted-1", "Stopped -> Promote" ],
[ "promoted-2", "Stopped -> Promote : notify" ],
[ "promoted-3", "Stopped -> Promote : promoted location" ],
[ "promoted-4", "Started -> Promote : promoted location" ],
[ "promoted-5", "Promoted -> Promoted" ],
[ "promoted-6", "Promoted -> Promoted (2)" ],
[ "promoted-7", "Promoted -> Fenced" ],
[ "promoted-8", "Promoted -> Fenced -> Moved" ],
[ "promoted-9", "Stopped + Promotable + No quorum" ],
[ "promoted-10", "Stopped -> Promotable : notify with monitor" ],
[ "promoted-11", "Stopped -> Promote : colocation" ],
[ "novell-239082", "Demote/Promote ordering" ],
[ "novell-239087", "Stable promoted placement" ],
[ "promoted-12", "Promotion based solely on rsc_location constraints" ],
[ "promoted-13", "Include preferences of colocated resources when placing promoted" ],
[ "promoted-demote", "Ordering when actions depends on demoting an unpromoted resource" ],
[ "promoted-ordering", "Prevent resources from starting that need a promoted" ],
[ "bug-1765", "Verify promoted-with-promoted colocation does not stop unpromoted instances" ],
[ "promoted-group", "Promotion of cloned groups" ],
[ "bug-lf-1852", "Don't shuffle promotable instances unnecessarily" ],
[ "promoted-failed-demote", "Don't retry failed demote actions" ],
[ "promoted-failed-demote-2", "Don't retry failed demote actions (notify=false)" ],
[ "promoted-depend",
"Ensure resources that depend on promoted instance don't get allocated until that does" ],
[ "promoted-reattach", "Re-attach to a running promoted" ],
[ "promoted-allow-start", "Don't include promoted score if it would prevent allocation" ],
[ "promoted-colocation",
"Allow promoted instances placemaker to be influenced by colocation constraints" ],
[ "promoted-pseudo", "Make sure promote/demote pseudo actions are created correctly" ],
[ "promoted-role", "Prevent target-role from promoting more than promoted-max instances" ],
[ "bug-lf-2358", "Anti-colocation of promoted instances" ],
[ "promoted-promotion-constraint", "Mandatory promoted colocation constraints" ],
[ "unmanaged-promoted", "Ensure role is preserved for unmanaged resources" ],
[ "promoted-unmanaged-monitor", "Start correct monitor for unmanaged promoted instances" ],
[ "promoted-demote-2", "Demote does not clear past failure" ],
[ "promoted-move", "Move promoted based on failure of colocated group" ],
[ "promoted-probed-score", "Observe the promotion score of probed resources" ],
[ "colocation_constraint_stops_promoted",
"cl#5054 - Ensure promoted is demoted when stopped by colocation constraint" ],
[ "colocation_constraint_stops_unpromoted",
"cl#5054 - Ensure unpromoted is not demoted when stopped by colocation constraint" ],
[ "order_constraint_stops_promoted",
"cl#5054 - Ensure promoted is demoted when stopped by order constraint" ],
[ "order_constraint_stops_unpromoted",
"cl#5054 - Ensure unpromoted is not demoted when stopped by order constraint" ],
[ "promoted_monitor_restart", "cl#5072 - Ensure promoted monitor operation will start after promotion" ],
[ "bug-rh-880249", "Handle replacement of an m/s resource with a primitive" ],
[ "bug-5143-ms-shuffle", "Prevent promoted instance shuffling due to promotion score" ],
[ "promoted-demote-block", "Block promotion if demote fails with on-fail=block" ],
[ "promoted-dependent-ban",
"Don't stop instances from being active because a dependent is banned from that host" ],
[ "promoted-stop", "Stop instances due to location constraint with role=Started" ],
[ "promoted-partially-demoted-group", "Allow partially demoted group to finish demoting" ],
[ "bug-cl-5213", "Ensure role colocation with -INFINITY is enforced" ],
[ "bug-cl-5219", "Allow unrelated resources with a common colocation target to remain promoted" ],
[ "promoted-asymmetrical-order",
"Fix the behaviors of multi-state resources with asymmetrical ordering" ],
[ "promoted-notify", "Promotion with notifications" ],
[ "promoted-score-startup", "Use permanent promoted scores without LRM history" ],
[ "failed-demote-recovery", "Recover resource in unpromoted role after demote fails" ],
[ "failed-demote-recovery-promoted", "Recover resource in promoted role after demote fails" ],
[ "on_fail_demote1", "Recovery with on-fail=\"demote\" on healthy cluster, remote, guest, and bundle nodes" ],
[ "on_fail_demote2", "Recovery with on-fail=\"demote\" with promotion on different node" ],
[ "on_fail_demote3", "Recovery with on-fail=\"demote\" with no promotion" ],
[ "on_fail_demote4", "Recovery with on-fail=\"demote\" on failed cluster, remote, guest, and bundle nodes" ],
[ "no_quorum_demote", "Promotable demotion and primitive stop with no-quorum-policy=\"demote\"" ],
[ "no-promote-on-unrunnable-guest", "Don't select bundle instance for promotion when container can't run" ],
[ "leftover-pending-monitor", "Prevent a leftover pending monitor from causing unexpected stop of other instances" ],
],
[
[ "history-1", "Correctly parse stateful-1 resource state" ],
],
[
[ "managed-0", "Managed (reference)" ],
[ "managed-1", "Not managed - down" ],
[ "managed-2", "Not managed - up" ],
[ "bug-5028", "Shutdown should block if anything depends on an unmanaged resource" ],
[ "bug-5028-detach", "Ensure detach still works" ],
[ "bug-5028-bottom",
"Ensure shutdown still blocks if the blocked resource is at the bottom of the stack" ],
[ "unmanaged-stop-1",
"cl#5155 - Block the stop of resources if any depending resource is unmanaged" ],
[ "unmanaged-stop-2",
"cl#5155 - Block the stop of resources if the first resource in a mandatory stop order is unmanaged" ],
[ "unmanaged-stop-3",
"cl#5155 - Block the stop of resources if any depending resource in a group is unmanaged" ],
[ "unmanaged-stop-4",
"cl#5155 - Block the stop of resources if any depending resource in the middle of a group is unmanaged" ],
[ "unmanaged-block-restart",
"Block restart of resources if any dependent resource in a group is unmanaged" ],
],
[
[ "interleave-0", "Interleave (reference)" ],
[ "interleave-1", "coloc - not interleaved" ],
[ "interleave-2", "coloc - interleaved" ],
[ "interleave-3", "coloc - interleaved (2)" ],
[ "interleave-pseudo-stop", "Interleaved clone during stonith" ],
[ "interleave-stop", "Interleaved clone during stop" ],
[ "interleave-restart", "Interleaved clone during dependency restart" ],
],
[
[ "notify-0", "Notify reference" ],
[ "notify-1", "Notify simple" ],
[ "notify-2", "Notify simple, confirm" ],
[ "notify-3", "Notify move, confirm" ],
[ "novell-239079", "Notification priority" ],
#[ "notify-2", "Notify - 764" ],
[ "notifs-for-unrunnable", "Don't schedule notifications for an unrunnable action" ],
[ "route-remote-notify", "Route remote notify actions through correct cluster node" ],
[ "notify-behind-stopping-remote", "Don't schedule notifications behind stopped remote" ],
],
[
[ "594", "OSDL #594 - Unrunnable actions scheduled in transition" ],
[ "662", "OSDL #662 - Two resources start on one node when incarnation_node_max = 1" ],
[ "696", "OSDL #696 - CRM starts stonith RA without monitor" ],
[ "726", "OSDL #726 - Attempting to schedule rsc_posic041_monitor_5000 _after_ a stop" ],
[ "735", "OSDL #735 - Correctly detect that rsc_hadev1 is stopped on hadev3" ],
[ "764", "OSDL #764 - Missing monitor op for DoFencing:child_DoFencing:1" ],
[ "797", "OSDL #797 - Assert triggered: task_id_i > max_call_id" ],
[ "829", "OSDL #829" ],
[ "994",
"OSDL #994 - Stopping the last resource in a resource group causes the entire group to be restarted" ],
[ "994-2", "OSDL #994 - with a dependent resource" ],
[ "1360", "OSDL #1360 - Clone stickiness" ],
[ "1484", "OSDL #1484 - on_fail=stop" ],
[ "1494", "OSDL #1494 - Clone stability" ],
[ "unrunnable-1", "Unrunnable" ],
[ "unrunnable-2", "Unrunnable 2" ],
[ "stonith-0", "Stonith loop - 1" ],
[ "stonith-1", "Stonith loop - 2" ],
[ "stonith-2", "Stonith loop - 3" ],
[ "stonith-3", "Stonith startup" ],
[ "stonith-4", "Stonith node state" ],
[ "dc-fence-ordering", "DC needs fencing while other nodes are shutting down" ],
[ "bug-1572-1", "Recovery of groups depending on promotable role" ],
[ "bug-1572-2", "Recovery of groups depending on promotable role when promoted is not re-promoted" ],
[ "bug-1685", "Depends-on-promoted ordering" ],
[ "bug-1822", "Don't promote partially active groups" ],
[ "bug-pm-11", "New resource added to a m/s group" ],
[ "bug-pm-12", "Recover only the failed portion of a cloned group" ],
[ "bug-n-387749", "Don't shuffle clone instances" ],
[ "bug-n-385265",
"Don't ignore the failure stickiness of group children - resource_idvscommon should stay stopped" ],
[ "bug-n-385265-2",
"Ensure groups are migrated instead of remaining partially active on the current node" ],
[ "bug-lf-1920", "Correctly handle probes that find active resources" ],
[ "bnc-515172", "Location constraint with multiple expressions" ],
[ "colocate-primitive-with-clone", "Optional colocation with a clone" ],
[ "use-after-free-merge", "Use-after-free in native_merge_weights" ],
[ "bug-lf-2551", "STONITH ordering for stop" ],
[ "bug-lf-2606", "Stonith implies demote" ],
[ "bug-lf-2474", "Ensure resource op timeout takes precedence over op_defaults" ],
[ "bug-suse-707150", "Prevent vm-01 from starting due to colocation/ordering" ],
[ "bug-5014-A-start-B-start", "Verify when A starts B starts using symmetrical=false" ],
[ "bug-5014-A-stop-B-started",
"Verify when A stops B does not stop if it has already started using symmetric=false" ],
[ "bug-5014-A-stopped-B-stopped",
"Verify when A is stopped and B has not started, B does not start before A using symmetric=false" ],
[ "bug-5014-CthenAthenB-C-stopped",
"Verify when C then A is symmetrical=true, A then B is symmetric=false, and C is stopped that nothing starts" ],
[ "bug-5014-CLONE-A-start-B-start",
"Verify when A starts B starts using clone resources with symmetric=false" ],
[ "bug-5014-CLONE-A-stop-B-started",
"Verify when A stops B does not stop if it has already started using clone resources with symmetric=false" ],
[ "bug-5014-GROUP-A-start-B-start",
"Verify when A starts B starts when using group resources with symmetric=false" ],
[ "bug-5014-GROUP-A-stopped-B-started",
"Verify when A stops B does not stop if it has already started using group resources with symmetric=false" ],
[ "bug-5014-GROUP-A-stopped-B-stopped",
"Verify when A is stopped and B has not started, B does not start before A using group resources with symmetric=false" ],
[ "bug-5014-ordered-set-symmetrical-false",
"Verify ordered sets work with symmetrical=false" ],
[ "bug-5014-ordered-set-symmetrical-true",
"Verify ordered sets work with symmetrical=true" ],
[ "clbz5007-promotable-colocation",
"Verify use of colocation scores other than INFINITY and -INFINITY work on multi-state resources" ],
[ "bug-5038", "Prevent restart of anonymous clones when clone-max decreases" ],
[ "bug-5025-1", "Automatically clean up failcount after resource config change with reload" ],
[ "bug-5025-2", "Make sure clear failcount action isn't set when config does not change" ],
[ "bug-5025-3", "Automatically clean up failcount after resource config change with restart" ],
[ "bug-5025-4", "Clear failcount when last failure is a start op and rsc attributes changed" ],
[ "failcount", "Ensure failcounts are correctly expired" ],
[ "failcount-block", "Ensure failcounts are not expired when on-fail=block is present" ],
[ "per-op-failcount", "Ensure per-operation failcount is handled and not passed to fence agent" ],
[ "on-fail-ignore", "Ensure on-fail=ignore works even beyond migration-threshold" ],
[ "monitor-onfail-restart", "bug-5058 - Monitor failure with on-fail set to restart" ],
[ "monitor-onfail-stop", "bug-5058 - Monitor failure wiht on-fail set to stop" ],
[ "bug-5059", "No need to restart p_stateful1:*" ],
[ "bug-5069-op-enabled", "Test on-fail=ignore with failure when monitor is enabled" ],
[ "bug-5069-op-disabled", "Test on-fail-ignore with failure when monitor is disabled" ],
[ "obsolete-lrm-resource", "cl#5115 - Do not use obsolete lrm_resource sections" ],
[ "expire-non-blocked-failure",
"Ignore failure-timeout only if the failed operation has on-fail=block" ],
[ "asymmetrical-order-move", "Respect asymmetrical ordering when trying to move resources" ],
[ "asymmetrical-order-restart", "Respect asymmetrical ordering when restarting dependent resource" ],
[ "start-then-stop-with-unfence", "Avoid graph loop with start-then-stop constraint plus unfencing" ],
[ "order-expired-failure", "Order failcount cleanup after remote fencing" ],
[ "expired-stop-1", "Expired stop failure should not block resource" ],
[ "ignore_stonith_rsc_order1",
"cl#5056- Ignore order constraint between stonith and non-stonith rsc" ],
[ "ignore_stonith_rsc_order2",
"cl#5056- Ignore order constraint with group rsc containing mixed stonith and non-stonith" ],
[ "ignore_stonith_rsc_order3", "cl#5056- Ignore order constraint, stonith clone and mixed group" ],
[ "ignore_stonith_rsc_order4",
"cl#5056- Ignore order constraint, stonith clone and clone with nested mixed group" ],
[ "honor_stonith_rsc_order1",
"cl#5056- Honor order constraint, stonith clone and pure stonith group(single rsc)" ],
[ "honor_stonith_rsc_order2",
"cl#5056- Honor order constraint, stonith clone and pure stonith group(multiple rsc)" ],
[ "honor_stonith_rsc_order3",
"cl#5056- Honor order constraint, stonith clones with nested pure stonith group" ],
[ "honor_stonith_rsc_order4",
"cl#5056- Honor order constraint, between two native stonith rscs" ],
[ "multiply-active-stonith", "Multiply active stonith" ],
[ "probe-timeout", "cl#5099 - Default probe timeout" ],
[ "order-first-probes",
"cl#5301 - respect order constraints when relevant resources are being probed" ],
[ "concurrent-fencing", "Allow performing fencing operations in parallel" ],
[ "priority-fencing-delay", "Delay fencing targeting the more significant node" ],
[ "pending-node-no-uname", "Do not fence a pending node that doesn't have an uname in node state yet" ],
[ "node-pending-timeout", "Fence a pending node that has reached `node-pending-timeout`" ],
],
[
[ "systemhealth1", "System Health () #1" ],
[ "systemhealth2", "System Health () #2" ],
[ "systemhealth3", "System Health () #3" ],
[ "systemhealthn1", "System Health (None) #1" ],
[ "systemhealthn2", "System Health (None) #2" ],
[ "systemhealthn3", "System Health (None) #3" ],
[ "systemhealthm1", "System Health (Migrate On Red) #1" ],
[ "systemhealthm2", "System Health (Migrate On Red) #2" ],
[ "systemhealthm3", "System Health (Migrate On Red) #3" ],
[ "systemhealtho1", "System Health (Only Green) #1" ],
[ "systemhealtho2", "System Health (Only Green) #2" ],
[ "systemhealtho3", "System Health (Only Green) #3" ],
[ "systemhealthp1", "System Health (Progessive) #1" ],
[ "systemhealthp2", "System Health (Progessive) #2" ],
[ "systemhealthp3", "System Health (Progessive) #3" ],
[ "allow-unhealthy-nodes", "System Health (migrate-on-red + allow-unhealth-nodes)" ],
],
[
[ "utilization", "Placement Strategy - utilization" ],
[ "minimal", "Placement Strategy - minimal" ],
[ "balanced", "Placement Strategy - balanced" ],
],
[
[ "placement-stickiness", "Optimized Placement Strategy - stickiness" ],
[ "placement-priority", "Optimized Placement Strategy - priority" ],
[ "placement-location", "Optimized Placement Strategy - location" ],
[ "placement-capacity", "Optimized Placement Strategy - capacity" ],
],
[
[ "utilization-order1", "Utilization Order - Simple" ],
[ "utilization-order2", "Utilization Order - Complex" ],
[ "utilization-order3", "Utilization Order - Migrate" ],
[ "utilization-order4", "Utilization Order - Live Migration (bnc#695440)" ],
[ "utilization-complex", "Utilization with complex relationships" ],
[ "utilization-shuffle",
"Don't displace prmExPostgreSQLDB2 on act2, Start prmExPostgreSQLDB1 on act3" ],
[ "load-stopped-loop", "Avoid transition loop due to load_stopped (cl#5044)" ],
[ "load-stopped-loop-2",
"cl#5235 - Prevent graph loops that can be introduced by load_stopped -> migrate_to ordering" ],
],
[
[ "colocated-utilization-primitive-1", "Colocated Utilization - Primitive" ],
[ "colocated-utilization-primitive-2", "Colocated Utilization - Choose the most capable node" ],
[ "colocated-utilization-group", "Colocated Utilization - Group" ],
[ "colocated-utilization-clone", "Colocated Utilization - Clone" ],
[ "utilization-check-allowed-nodes",
"Only check the capacities of the nodes that can run the resource" ],
],
[
[ "reprobe-target_rc", "Ensure correct target_rc for reprobe of inactive resources" ],
[ "node-maintenance-1", "cl#5128 - Node maintenance" ],
[ "node-maintenance-2", "cl#5128 - Node maintenance (coming out of maintenance mode)" ],
[ "shutdown-maintenance-node", "Do not fence a maintenance node if it shuts down cleanly" ],
[ "rsc-maintenance", "Per-resource maintenance" ],
],
[
[ "not-installed-agent", "The resource agent is missing" ],
[ "not-installed-tools", "Something the resource agent needs is missing" ],
],
[
[ "stopped-monitor-00", "Stopped Monitor - initial start" ],
[ "stopped-monitor-01", "Stopped Monitor - failed started" ],
[ "stopped-monitor-02", "Stopped Monitor - started multi-up" ],
[ "stopped-monitor-03", "Stopped Monitor - stop started" ],
[ "stopped-monitor-04", "Stopped Monitor - failed stop" ],
[ "stopped-monitor-05", "Stopped Monitor - start unmanaged" ],
[ "stopped-monitor-06", "Stopped Monitor - unmanaged multi-up" ],
[ "stopped-monitor-07", "Stopped Monitor - start unmanaged multi-up" ],
[ "stopped-monitor-08", "Stopped Monitor - migrate" ],
[ "stopped-monitor-09", "Stopped Monitor - unmanage started" ],
[ "stopped-monitor-10", "Stopped Monitor - unmanaged started multi-up" ],
[ "stopped-monitor-11", "Stopped Monitor - stop unmanaged started" ],
[ "stopped-monitor-12", "Stopped Monitor - unmanaged started multi-up (target-role=Stopped)" ],
[ "stopped-monitor-20", "Stopped Monitor - initial stop" ],
[ "stopped-monitor-21", "Stopped Monitor - stopped single-up" ],
[ "stopped-monitor-22", "Stopped Monitor - stopped multi-up" ],
[ "stopped-monitor-23", "Stopped Monitor - start stopped" ],
[ "stopped-monitor-24", "Stopped Monitor - unmanage stopped" ],
[ "stopped-monitor-25", "Stopped Monitor - unmanaged stopped multi-up" ],
[ "stopped-monitor-26", "Stopped Monitor - start unmanaged stopped" ],
[ "stopped-monitor-27", "Stopped Monitor - unmanaged stopped multi-up (target-role=Started)" ],
[ "stopped-monitor-30", "Stopped Monitor - new node started" ],
[ "stopped-monitor-31", "Stopped Monitor - new node stopped" ],
],
[
# This is a combo test to check:
# - probe timeout defaults to the minimum-interval monitor's
# - duplicate recurring operations are ignored
# - if timeout spec is bad, the default timeout is used
# - failure is blocked with on-fail=block even if ISO8601 interval is specified
# - started/stopped role monitors are started/stopped on right nodes
[ "intervals", "Recurring monitor interval handling" ],
],
[
[ "ticket-primitive-1", "Ticket - Primitive (loss-policy=stop, initial)" ],
[ "ticket-primitive-2", "Ticket - Primitive (loss-policy=stop, granted)" ],
[ "ticket-primitive-3", "Ticket - Primitive (loss-policy-stop, revoked)" ],
[ "ticket-primitive-4", "Ticket - Primitive (loss-policy=demote, initial)" ],
[ "ticket-primitive-5", "Ticket - Primitive (loss-policy=demote, granted)" ],
[ "ticket-primitive-6", "Ticket - Primitive (loss-policy=demote, revoked)" ],
[ "ticket-primitive-7", "Ticket - Primitive (loss-policy=fence, initial)" ],
[ "ticket-primitive-8", "Ticket - Primitive (loss-policy=fence, granted)" ],
[ "ticket-primitive-9", "Ticket - Primitive (loss-policy=fence, revoked)" ],
[ "ticket-primitive-10", "Ticket - Primitive (loss-policy=freeze, initial)" ],
[ "ticket-primitive-11", "Ticket - Primitive (loss-policy=freeze, granted)" ],
[ "ticket-primitive-12", "Ticket - Primitive (loss-policy=freeze, revoked)" ],
[ "ticket-primitive-13", "Ticket - Primitive (loss-policy=stop, standby, granted)" ],
[ "ticket-primitive-14", "Ticket - Primitive (loss-policy=stop, granted, standby)" ],
[ "ticket-primitive-15", "Ticket - Primitive (loss-policy=stop, standby, revoked)" ],
[ "ticket-primitive-16", "Ticket - Primitive (loss-policy=demote, standby, granted)" ],
[ "ticket-primitive-17", "Ticket - Primitive (loss-policy=demote, granted, standby)" ],
[ "ticket-primitive-18", "Ticket - Primitive (loss-policy=demote, standby, revoked)" ],
[ "ticket-primitive-19", "Ticket - Primitive (loss-policy=fence, standby, granted)" ],
[ "ticket-primitive-20", "Ticket - Primitive (loss-policy=fence, granted, standby)" ],
[ "ticket-primitive-21", "Ticket - Primitive (loss-policy=fence, standby, revoked)" ],
[ "ticket-primitive-22", "Ticket - Primitive (loss-policy=freeze, standby, granted)" ],
[ "ticket-primitive-23", "Ticket - Primitive (loss-policy=freeze, granted, standby)" ],
[ "ticket-primitive-24", "Ticket - Primitive (loss-policy=freeze, standby, revoked)" ],
],
[
[ "ticket-group-1", "Ticket - Group (loss-policy=stop, initial)" ],
[ "ticket-group-2", "Ticket - Group (loss-policy=stop, granted)" ],
[ "ticket-group-3", "Ticket - Group (loss-policy-stop, revoked)" ],
[ "ticket-group-4", "Ticket - Group (loss-policy=demote, initial)" ],
[ "ticket-group-5", "Ticket - Group (loss-policy=demote, granted)" ],
[ "ticket-group-6", "Ticket - Group (loss-policy=demote, revoked)" ],
[ "ticket-group-7", "Ticket - Group (loss-policy=fence, initial)" ],
[ "ticket-group-8", "Ticket - Group (loss-policy=fence, granted)" ],
[ "ticket-group-9", "Ticket - Group (loss-policy=fence, revoked)" ],
[ "ticket-group-10", "Ticket - Group (loss-policy=freeze, initial)" ],
[ "ticket-group-11", "Ticket - Group (loss-policy=freeze, granted)" ],
[ "ticket-group-12", "Ticket - Group (loss-policy=freeze, revoked)" ],
[ "ticket-group-13", "Ticket - Group (loss-policy=stop, standby, granted)" ],
[ "ticket-group-14", "Ticket - Group (loss-policy=stop, granted, standby)" ],
[ "ticket-group-15", "Ticket - Group (loss-policy=stop, standby, revoked)" ],
[ "ticket-group-16", "Ticket - Group (loss-policy=demote, standby, granted)" ],
[ "ticket-group-17", "Ticket - Group (loss-policy=demote, granted, standby)" ],
[ "ticket-group-18", "Ticket - Group (loss-policy=demote, standby, revoked)" ],
[ "ticket-group-19", "Ticket - Group (loss-policy=fence, standby, granted)" ],
[ "ticket-group-20", "Ticket - Group (loss-policy=fence, granted, standby)" ],
[ "ticket-group-21", "Ticket - Group (loss-policy=fence, standby, revoked)" ],
[ "ticket-group-22", "Ticket - Group (loss-policy=freeze, standby, granted)" ],
[ "ticket-group-23", "Ticket - Group (loss-policy=freeze, granted, standby)" ],
[ "ticket-group-24", "Ticket - Group (loss-policy=freeze, standby, revoked)" ],
],
[
[ "ticket-clone-1", "Ticket - Clone (loss-policy=stop, initial)" ],
[ "ticket-clone-2", "Ticket - Clone (loss-policy=stop, granted)" ],
[ "ticket-clone-3", "Ticket - Clone (loss-policy-stop, revoked)" ],
[ "ticket-clone-4", "Ticket - Clone (loss-policy=demote, initial)" ],
[ "ticket-clone-5", "Ticket - Clone (loss-policy=demote, granted)" ],
[ "ticket-clone-6", "Ticket - Clone (loss-policy=demote, revoked)" ],
[ "ticket-clone-7", "Ticket - Clone (loss-policy=fence, initial)" ],
[ "ticket-clone-8", "Ticket - Clone (loss-policy=fence, granted)" ],
[ "ticket-clone-9", "Ticket - Clone (loss-policy=fence, revoked)" ],
[ "ticket-clone-10", "Ticket - Clone (loss-policy=freeze, initial)" ],
[ "ticket-clone-11", "Ticket - Clone (loss-policy=freeze, granted)" ],
[ "ticket-clone-12", "Ticket - Clone (loss-policy=freeze, revoked)" ],
[ "ticket-clone-13", "Ticket - Clone (loss-policy=stop, standby, granted)" ],
[ "ticket-clone-14", "Ticket - Clone (loss-policy=stop, granted, standby)" ],
[ "ticket-clone-15", "Ticket - Clone (loss-policy=stop, standby, revoked)" ],
[ "ticket-clone-16", "Ticket - Clone (loss-policy=demote, standby, granted)" ],
[ "ticket-clone-17", "Ticket - Clone (loss-policy=demote, granted, standby)" ],
[ "ticket-clone-18", "Ticket - Clone (loss-policy=demote, standby, revoked)" ],
[ "ticket-clone-19", "Ticket - Clone (loss-policy=fence, standby, granted)" ],
[ "ticket-clone-20", "Ticket - Clone (loss-policy=fence, granted, standby)" ],
[ "ticket-clone-21", "Ticket - Clone (loss-policy=fence, standby, revoked)" ],
[ "ticket-clone-22", "Ticket - Clone (loss-policy=freeze, standby, granted)" ],
[ "ticket-clone-23", "Ticket - Clone (loss-policy=freeze, granted, standby)" ],
[ "ticket-clone-24", "Ticket - Clone (loss-policy=freeze, standby, revoked)" ],
],
[
[ "ticket-promoted-1", "Ticket - Promoted (loss-policy=stop, initial)" ],
[ "ticket-promoted-2", "Ticket - Promoted (loss-policy=stop, granted)" ],
[ "ticket-promoted-3", "Ticket - Promoted (loss-policy-stop, revoked)" ],
[ "ticket-promoted-4", "Ticket - Promoted (loss-policy=demote, initial)" ],
[ "ticket-promoted-5", "Ticket - Promoted (loss-policy=demote, granted)" ],
[ "ticket-promoted-6", "Ticket - Promoted (loss-policy=demote, revoked)" ],
[ "ticket-promoted-7", "Ticket - Promoted (loss-policy=fence, initial)" ],
[ "ticket-promoted-8", "Ticket - Promoted (loss-policy=fence, granted)" ],
[ "ticket-promoted-9", "Ticket - Promoted (loss-policy=fence, revoked)" ],
[ "ticket-promoted-10", "Ticket - Promoted (loss-policy=freeze, initial)" ],
[ "ticket-promoted-11", "Ticket - Promoted (loss-policy=freeze, granted)" ],
[ "ticket-promoted-12", "Ticket - Promoted (loss-policy=freeze, revoked)" ],
[ "ticket-promoted-13", "Ticket - Promoted (loss-policy=stop, standby, granted)" ],
[ "ticket-promoted-14", "Ticket - Promoted (loss-policy=stop, granted, standby)" ],
[ "ticket-promoted-15", "Ticket - Promoted (loss-policy=stop, standby, revoked)" ],
[ "ticket-promoted-16", "Ticket - Promoted (loss-policy=demote, standby, granted)" ],
[ "ticket-promoted-17", "Ticket - Promoted (loss-policy=demote, granted, standby)" ],
[ "ticket-promoted-18", "Ticket - Promoted (loss-policy=demote, standby, revoked)" ],
[ "ticket-promoted-19", "Ticket - Promoted (loss-policy=fence, standby, granted)" ],
[ "ticket-promoted-20", "Ticket - Promoted (loss-policy=fence, granted, standby)" ],
[ "ticket-promoted-21", "Ticket - Promoted (loss-policy=fence, standby, revoked)" ],
[ "ticket-promoted-22", "Ticket - Promoted (loss-policy=freeze, standby, granted)" ],
[ "ticket-promoted-23", "Ticket - Promoted (loss-policy=freeze, granted, standby)" ],
[ "ticket-promoted-24", "Ticket - Promoted (loss-policy=freeze, standby, revoked)" ],
],
[
[ "ticket-rsc-sets-1", "Ticket - Resource sets (1 ticket, initial)" ],
[ "ticket-rsc-sets-2", "Ticket - Resource sets (1 ticket, granted)" ],
[ "ticket-rsc-sets-3", "Ticket - Resource sets (1 ticket, revoked)" ],
[ "ticket-rsc-sets-4", "Ticket - Resource sets (2 tickets, initial)" ],
[ "ticket-rsc-sets-5", "Ticket - Resource sets (2 tickets, granted)" ],
[ "ticket-rsc-sets-6", "Ticket - Resource sets (2 tickets, granted)" ],
[ "ticket-rsc-sets-7", "Ticket - Resource sets (2 tickets, revoked)" ],
[ "ticket-rsc-sets-8", "Ticket - Resource sets (1 ticket, standby, granted)" ],
[ "ticket-rsc-sets-9", "Ticket - Resource sets (1 ticket, granted, standby)" ],
[ "ticket-rsc-sets-10", "Ticket - Resource sets (1 ticket, standby, revoked)" ],
[ "ticket-rsc-sets-11", "Ticket - Resource sets (2 tickets, standby, granted)" ],
[ "ticket-rsc-sets-12", "Ticket - Resource sets (2 tickets, standby, granted)" ],
[ "ticket-rsc-sets-13", "Ticket - Resource sets (2 tickets, granted, standby)" ],
[ "ticket-rsc-sets-14", "Ticket - Resource sets (2 tickets, standby, revoked)" ],
[ "cluster-specific-params", "Cluster-specific instance attributes based on rules" ],
[ "site-specific-params", "Site-specific instance attributes based on rules" ],
],
[
[ "template-1", "Template - 1" ],
[ "template-2", "Template - 2" ],
[ "template-3", "Template - 3 (merge operations)" ],
[ "template-coloc-1", "Template - Colocation 1" ],
[ "template-coloc-2", "Template - Colocation 2" ],
[ "template-coloc-3", "Template - Colocation 3" ],
[ "template-order-1", "Template - Order 1" ],
[ "template-order-2", "Template - Order 2" ],
[ "template-order-3", "Template - Order 3" ],
[ "template-ticket", "Template - Ticket" ],
[ "template-rsc-sets-1", "Template - Resource Sets 1" ],
[ "template-rsc-sets-2", "Template - Resource Sets 2" ],
[ "template-rsc-sets-3", "Template - Resource Sets 3" ],
[ "template-rsc-sets-4", "Template - Resource Sets 4" ],
[ "template-clone-primitive", "Cloned primitive from template" ],
[ "template-clone-group", "Cloned group from template" ],
[ "location-sets-templates", "Resource sets and templates - Location" ],
[ "tags-coloc-order-1", "Tags - Colocation and Order (Simple)" ],
[ "tags-coloc-order-2", "Tags - Colocation and Order (Resource Sets with Templates)" ],
[ "tags-location", "Tags - Location" ],
[ "tags-ticket", "Tags - Ticket" ],
],
[
[ "container-1", "Container - initial" ],
[ "container-2", "Container - monitor failed" ],
[ "container-3", "Container - stop failed" ],
[ "container-4", "Container - reached migration-threshold" ],
[ "container-group-1", "Container in group - initial" ],
[ "container-group-2", "Container in group - monitor failed" ],
[ "container-group-3", "Container in group - stop failed" ],
[ "container-group-4", "Container in group - reached migration-threshold" ],
[ "container-is-remote-node", "Place resource within container when container is remote-node" ],
[ "bug-rh-1097457", "Kill user defined container/contents ordering" ],
[ "bug-cl-5247", "Graph loop when recovering m/s resource in a container" ],
[ "bundle-order-startup", "Bundle startup ordering" ],
[ "bundle-order-partial-start",
"Bundle startup ordering when some dependencies are already running" ],
[ "bundle-order-partial-start-2",
"Bundle startup ordering when some dependencies and the container are already running" ],
[ "bundle-order-stop", "Bundle stop ordering" ],
[ "bundle-order-partial-stop", "Bundle startup ordering when some dependencies are already stopped" ],
[ "bundle-order-stop-on-remote", "Stop nested resource after bringing up the connection" ],
[ "bundle-order-startup-clone", "Prevent startup because bundle isn't promoted" ],
[ "bundle-order-startup-clone-2", "Bundle startup with clones" ],
[ "bundle-order-stop-clone", "Stop bundle because clone is stopping" ],
[ "bundle-interleave-start", "Interleave bundle starts" ],
[ "bundle-interleave-promote", "Interleave bundle promotes" ],
[ "bundle-nested-colocation", "Colocation of nested connection resources" ],
[ "bundle-order-fencing",
"Order pseudo bundle fencing after parent node fencing if both are happening" ],
[ "bundle-probe-order-1", "order 1" ],
[ "bundle-probe-order-2", "order 2" ],
[ "bundle-probe-order-3", "order 3" ],
[ "bundle-probe-remotes", "Ensure remotes get probed too" ],
[ "bundle-replicas-change", "Change bundle from 1 replica to multiple" ],
[ "bundle-connection-with-container", "Don't move a container due to connection preferences" ],
[ "nested-remote-recovery", "Recover bundle's container hosted on remote node" ],
[ "bundle-promoted-location-1",
"Promotable bundle, positive location" ],
[ "bundle-promoted-location-2",
"Promotable bundle, negative location" ],
[ "bundle-promoted-location-3",
"Promotable bundle, positive location for promoted role" ],
[ "bundle-promoted-location-4",
"Promotable bundle, negative location for promoted role" ],
[ "bundle-promoted-location-5",
"Promotable bundle, positive location for unpromoted role" ],
[ "bundle-promoted-location-6",
"Promotable bundle, negative location for unpromoted role" ],
[ "bundle-promoted-colocation-1",
"Primary promoted bundle, dependent primitive (mandatory coloc)" ],
[ "bundle-promoted-colocation-2",
"Primary promoted bundle, dependent primitive (optional coloc)" ],
[ "bundle-promoted-colocation-3",
"Dependent promoted bundle, primary primitive (mandatory coloc)" ],
[ "bundle-promoted-colocation-4",
"Dependent promoted bundle, primary primitive (optional coloc)" ],
[ "bundle-promoted-colocation-5",
"Primary and dependent promoted bundle instances (mandatory coloc)" ],
[ "bundle-promoted-colocation-6",
"Primary and dependent promoted bundle instances (optional coloc)" ],
[ "bundle-promoted-anticolocation-1",
"Primary promoted bundle, dependent primitive (mandatory anti)" ],
[ "bundle-promoted-anticolocation-2",
"Primary promoted bundle, dependent primitive (optional anti)" ],
[ "bundle-promoted-anticolocation-3",
"Dependent promoted bundle, primary primitive (mandatory anti)" ],
[ "bundle-promoted-anticolocation-4",
"Dependent promoted bundle, primary primitive (optional anti)" ],
[ "bundle-promoted-anticolocation-5",
"Primary and dependent promoted bundle instances (mandatory anti)" ],
[ "bundle-promoted-anticolocation-6",
"Primary and dependent promoted bundle instances (optional anti)" ],
],
[
[ "whitebox-fail1", "Fail whitebox container rsc" ],
[ "whitebox-fail2", "Fail cluster connection to guest node" ],
[ "whitebox-fail3", "Failed containers should not run nested on remote nodes" ],
[ "whitebox-start", "Start whitebox container with resources assigned to it" ],
[ "whitebox-stop", "Stop whitebox container with resources assigned to it" ],
[ "whitebox-move", "Move whitebox container with resources assigned to it" ],
[ "whitebox-asymmetric", "Verify connection rsc opts-in based on container resource" ],
[ "whitebox-ms-ordering", "Verify promote/demote can not occur before connection is established" ],
[ "whitebox-ms-ordering-move", "Stop/Start cycle within a moving container" ],
[ "whitebox-orphaned", "Properly shutdown orphaned whitebox container" ],
[ "whitebox-orphan-ms", "Properly tear down orphan ms resources on remote-nodes" ],
[ "whitebox-unexpectedly-running", "Recover container nodes the cluster did not start" ],
[ "whitebox-migrate1", "Migrate both container and connection resource" ],
[ "whitebox-imply-stop-on-fence",
"imply stop action on container node rsc when host node is fenced" ],
[ "whitebox-nested-group", "Verify guest remote-node works nested in a group" ],
[ "guest-node-host-dies", "Verify guest node is recovered if host goes away" ],
[ "guest-node-cleanup", "Order guest node connection recovery after container probe" ],
[ "guest-host-not-fenceable", "Actions on guest node are unrunnable if host is unclean and cannot be fenced" ],
],
[
[ "remote-startup-probes", "Baremetal remote-node startup probes" ],
[ "remote-startup", "Startup a newly discovered remote-nodes with no status" ],
[ "remote-fence-unclean", "Fence unclean baremetal remote-node" ],
[ "remote-fence-unclean2",
"Fence baremetal remote-node after cluster node fails and connection can not be recovered" ],
[ "remote-fence-unclean-3", "Probe failed remote nodes (triggers fencing)" ],
[ "remote-move", "Move remote-node connection resource" ],
[ "remote-disable", "Disable a baremetal remote-node" ],
[ "remote-probe-disable", "Probe then stop a baremetal remote-node" ],
[ "remote-orphaned", "Properly shutdown orphaned connection resource" ],
[ "remote-orphaned2",
"verify we can handle orphaned remote connections with active resources on the remote" ],
[ "remote-recover", "Recover connection resource after cluster-node fails" ],
[ "remote-stale-node-entry",
"Make sure we properly handle leftover remote-node entries in the node section" ],
[ "remote-partial-migrate",
"Make sure partial migrations are handled before ops on the remote node" ],
[ "remote-partial-migrate2",
"Make sure partial migration target is prefered for remote connection" ],
[ "remote-recover-fail", "Make sure start failure causes fencing if rsc are active on remote" ],
[ "remote-start-fail",
"Make sure a start failure does not result in fencing if no active resources are on remote" ],
[ "remote-unclean2",
"Make monitor failure always results in fencing, even if no rsc are active on remote" ],
[ "remote-fence-before-reconnect", "Fence before clearing recurring monitor failure" ],
[ "remote-recovery", "Recover remote connections before attempting demotion" ],
[ "remote-recover-connection", "Optimistically recovery of only the connection" ],
[ "remote-recover-all", "Fencing when the connection has no home" ],
[ "remote-recover-no-resources", "Fencing when the connection has no home and no active resources" ],
[ "remote-recover-unknown",
"Fencing when the connection has no home and the remote has no operation history" ],
[ "remote-reconnect-delay", "Waiting for remote reconnect interval to expire" ],
[ "remote-connection-unrecoverable",
"Remote connection host must be fenced, with connection unrecoverable" ],
[ "remote-connection-shutdown", "Remote connection shutdown" ],
[ "cancel-behind-moving-remote",
"Route recurring monitor cancellations through original node of a moving remote connection" ],
],
[
[ "resource-discovery", "Exercises resource-discovery location constraint option" ],
[ "rsc-discovery-per-node", "Disable resource discovery per node" ],
[ "shutdown-lock", "Ensure shutdown lock works properly" ],
[ "shutdown-lock-expiration", "Ensure shutdown lock expiration works properly" ],
],
[
[ "op-defaults", "Test op_defaults conditional expressions" ],
[ "op-defaults-2", "Test op_defaults AND'ed conditional expressions" ],
[ "op-defaults-3", "Test op_defaults precedence" ],
[ "rsc-defaults", "Test rsc_defaults conditional expressions" ],
[ "rsc-defaults-2", "Test rsc_defaults conditional expressions without type" ],
],
[ [ "stop-all-resources", "Test stop-all-resources=true "],
],
[ [ "ocf_degraded-remap-ocf_ok", "Test degraded remapped to OK" ],
[ "ocf_degraded_promoted-remap-ocf_ok", "Test degraded promoted remapped to OK"],
],
]
TESTS_64BIT = [
[
[ "year-2038", "Check handling of timestamps beyond 2038-01-19 03:14:08 UTC" ],
],
]
def is_executable(path):
""" Check whether a file at a given path is executable. """
try:
return os.stat(path)[stat.ST_MODE] & stat.S_IXUSR
except OSError:
return False
def diff(file1, file2, **kwargs):
""" Call diff on two files """
return subprocess.call([ "diff", "-u", "-N", "--ignore-all-space",
"--ignore-blank-lines", file1, file2 ], **kwargs)
def sort_file(filename):
""" Sort a file alphabetically """
with io.open(filename, "rt") as f:
lines = sorted(f)
with io.open(filename, "wt") as f:
f.writelines(lines)
def remove_files(filenames):
""" Remove a list of files """
for filename in filenames:
try:
os.remove(filename)
except OSError:
pass
def normalize(filename):
""" Remove text from a file that isn't important for comparison """
if not hasattr(normalize, "patterns"):
normalize.patterns = [
re.compile(r'crm_feature_set="[^"]*"'),
re.compile(r'batch-limit="[0-9]*"')
]
if os.path.isfile(filename):
with io.open(filename, "rt") as f:
lines = f.readlines()
with io.open(filename, "wt") as f:
for line in lines:
for pattern in normalize.patterns:
line = pattern.sub("", line)
f.write(line)
def cat(filename, dest=sys.stdout):
""" Copy a file to a destination file descriptor """
with io.open(filename, "rt") as f:
shutil.copyfileobj(f, dest)
class CtsScheduler(object):
""" Regression tests for Pacemaker's scheduler """
def _parse_args(self, argv):
""" Parse command-line arguments """
parser = argparse.ArgumentParser(description=DESC)
parser.add_argument('-V', '--verbose', action='count',
help='Display any differences from expected output')
parser.add_argument('--run', metavar='TEST',
help=('Run only single specified test (any further '
'arguments will be passed to crm_simulate)'))
parser.add_argument('--update', action='store_true',
help='Update expected results with actual results')
parser.add_argument('-b', '--binary', metavar='PATH',
help='Specify path to crm_simulate')
parser.add_argument('-i', '--io-dir', metavar='PATH',
help='Specify path to regression test data directory')
parser.add_argument('-o', '--out-dir', metavar='PATH',
help='Specify where intermediate and output files should go')
parser.add_argument('-v', '--valgrind', action='store_true',
help='Run all commands under valgrind')
parser.add_argument('--valgrind-dhat', action='store_true',
help='Run all commands under valgrind with heap analyzer')
parser.add_argument('--valgrind-skip-output', action='store_true',
help='If running under valgrind, do not display output')
parser.add_argument('--testcmd-options', metavar='OPTIONS', default='',
help='Additional options for command under test')
# argparse can't handle "everything after --run TEST", so grab that
self.single_test_args = []
narg = 0
for arg in argv:
narg = narg + 1
if arg == '--run':
(argv, self.single_test_args) = (argv[:narg+1], argv[narg+1:])
break
self.args = parser.parse_args(argv[1:])
def _error(self, s):
print(" * ERROR: %s" % s)
def _failed(self, s):
print(" * FAILED: %s" % s)
def _get_valgrind_cmd(self):
""" Return command arguments needed (or not) to run valgrind """
if self.args.valgrind:
os.environ['G_SLICE'] = "always-malloc"
return [
"valgrind",
"-q",
"--gen-suppressions=all",
"--time-stamp=yes",
"--trace-children=no",
"--show-reachable=no",
"--leak-check=full",
"--num-callers=20",
"--suppressions=%s/valgrind-pcmk.suppressions" % (self.test_home)
]
if self.args.valgrind_dhat:
os.environ['G_SLICE'] = "always-malloc"
return [
"valgrind",
"--tool=exp-dhat",
"--time-stamp=yes",
"--trace-children=no",
"--show-top-n=100",
"--num-callers=4"
]
return []
def _get_simulator_cmd(self):
""" Locate the simulation binary """
if self.args.binary is None:
self.args.binary = BuildOptions._BUILD_DIR + "/tools/crm_simulate"
if not is_executable(self.args.binary):
self.args.binary = BuildOptions.SBIN_DIR + "/crm_simulate"
if not is_executable(self.args.binary):
# @TODO it would be more pythonic to raise an exception
self._error("Test binary " + self.args.binary + " not found")
sys.exit(ExitStatus.NOT_INSTALLED)
return [ self.args.binary ] + shlex.split(self.args.testcmd_options)
def set_schema_env(self):
""" Ensure schema directory environment variable is set, if possible """
try:
return os.environ['PCMK_schema_directory']
except KeyError:
for d in [ os.path.join(BuildOptions._BUILD_DIR, "xml"),
BuildOptions.SCHEMA_DIR ]:
if os.path.isdir(d):
os.environ['PCMK_schema_directory'] = d
return d
return None
def __init__(self, argv=sys.argv):
# Ensure all command output is in portable locale for comparison
os.environ['LC_ALL'] = "C"
self._parse_args(argv)
# Where this executable lives
self.test_home = os.path.dirname(os.path.realpath(argv[0]))
# Where test data resides
if self.args.io_dir is None:
self.args.io_dir = os.path.join(self.test_home, "scheduler")
self.xml_input_dir = os.path.join(self.args.io_dir, "xml")
self.expected_dir = os.path.join(self.args.io_dir, "exp")
self.dot_expected_dir = os.path.join(self.args.io_dir, "dot")
self.scores_dir = os.path.join(self.args.io_dir, "scores")
self.summary_dir = os.path.join(self.args.io_dir, "summary")
self.stderr_expected_dir = os.path.join(self.args.io_dir, "stderr")
# Create a temporary directory to store diff file
self.failed_dir = tempfile.mkdtemp(prefix='cts-scheduler_')
# Where to store generated files
if self.args.out_dir is None:
self.args.out_dir = self.args.io_dir
self.failed_filename = os.path.join(self.failed_dir, "test-output.diff")
else:
self.failed_filename = os.path.join(self.args.out_dir, "test-output.diff")
os.environ['CIB_shadow_dir'] = self.args.out_dir
self.failed_file = None
self.outfile_out_dir = os.path.join(self.args.out_dir, "out")
self.dot_out_dir = os.path.join(self.args.out_dir, "dot")
self.scores_out_dir = os.path.join(self.args.out_dir, "scores")
self.summary_out_dir = os.path.join(self.args.out_dir, "summary")
self.stderr_out_dir = os.path.join(self.args.out_dir, "stderr")
self.valgrind_out_dir = os.path.join(self.args.out_dir, "valgrind")
# Single test mode (if requested)
try:
# User can give test base name or file name of a test input
self.args.run = os.path.splitext(os.path.basename(self.args.run))[0]
except (AttributeError, TypeError):
pass # --run was not specified
self.set_schema_env()
# Arguments needed (or not) to run commands
self.valgrind_args = self._get_valgrind_cmd()
self.simulate_args = self._get_simulator_cmd()
# Test counters
self.num_failed = 0
self.num_tests = 0
# Ensure that the main output directory exists
# We don't want to create it with os.makedirs below
if not os.path.isdir(self.args.out_dir):
self._error("Output directory missing; can't create output files")
sys.exit(ExitStatus.CANTCREAT)
# Create output subdirectories if they don't exist
try:
os.makedirs(self.outfile_out_dir, 0o755, True)
os.makedirs(self.dot_out_dir, 0o755, True)
os.makedirs(self.scores_out_dir, 0o755, True)
os.makedirs(self.summary_out_dir, 0o755, True)
os.makedirs(self.stderr_out_dir, 0o755, True)
if self.valgrind_args:
os.makedirs(self.valgrind_out_dir, 0o755, True)
except OSError as ex:
self._error("Unable to create output subdirectory: %s" % ex)
remove_files([
self.outfile_out_dir,
self.dot_out_dir,
self.scores_out_dir,
self.summary_out_dir,
self.stderr_out_dir,
])
sys.exit(ExitStatus.CANTCREAT)
def _compare_files(self, filename1, filename2):
""" Add any file differences to failed results """
if diff(filename1, filename2, stdout=subprocess.DEVNULL) != 0:
diff(filename1, filename2, stdout=self.failed_file, stderr=subprocess.DEVNULL)
self.failed_file.write("\n")
return True
return False
def run_one(self, test_name, test_desc, test_args=[]):
""" Run one scheduler test """
print(" Test %-25s %s" % ((test_name + ":"), test_desc))
did_fail = False
self.num_tests = self.num_tests + 1
# Test inputs
input_filename = os.path.join(
self.xml_input_dir, "%s.xml" % test_name)
expected_filename = os.path.join(
self.expected_dir, "%s.exp" % test_name)
dot_expected_filename = os.path.join(
self.dot_expected_dir, "%s.dot" % test_name)
scores_filename = os.path.join(
self.scores_dir, "%s.scores" % test_name)
summary_filename = os.path.join(
self.summary_dir, "%s.summary" % test_name)
stderr_expected_filename = os.path.join(
self.stderr_expected_dir, "%s.stderr" % test_name)
# (Intermediate) test outputs
output_filename = os.path.join(
self.outfile_out_dir, "%s.out" % test_name)
dot_output_filename = os.path.join(
self.dot_out_dir, "%s.dot.pe" % test_name)
score_output_filename = os.path.join(
self.scores_out_dir, "%s.scores.pe" % test_name)
summary_output_filename = os.path.join(
self.summary_out_dir, "%s.summary.pe" % test_name)
stderr_output_filename = os.path.join(
self.stderr_out_dir, "%s.stderr.pe" % test_name)
valgrind_output_filename = os.path.join(
self.valgrind_out_dir, "%s.valgrind" % test_name)
# Common arguments for running test
test_cmd = []
if self.valgrind_args:
test_cmd = self.valgrind_args + [ "--log-file=%s" % valgrind_output_filename ]
test_cmd = test_cmd + self.simulate_args
# @TODO It would be more pythonic to raise exceptions for errors,
# then perhaps it would be nice to make a single-test class
# Ensure necessary test inputs exist
if not os.path.isfile(input_filename):
self._error("No input")
self.num_failed = self.num_failed + 1
return ExitStatus.NOINPUT
if not self.args.update and not os.path.isfile(expected_filename):
self._error("no stored output")
return ExitStatus.NOINPUT
# Run simulation to generate summary output
if self.args.run: # Single test mode
test_cmd_full = test_cmd + [ '-x', input_filename, '-S' ] + test_args
print(" ".join(test_cmd_full))
else:
# @TODO Why isn't test_args added here?
test_cmd_full = test_cmd + [ '-x', input_filename, '-S' ]
with io.open(summary_output_filename, "wt") as f:
simulation = subprocess.Popen(test_cmd_full, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=os.environ)
# This makes diff happy regardless of --enable-compat-2.0.
# Use sed -E to make Linux and BSD special characters more compatible.
sed = subprocess.Popen(["sed", "-E",
"-e", "s/ocf::/ocf:/g",
"-e", r"s/Masters:/Promoted:/",
"-e", r"s/Slaves:/Unpromoted:/",
"-e", r"s/ Master( |\[|$)/ Promoted\1/",
"-e", r"s/ Slave / Unpromoted /",
], stdin=simulation.stdout, stdout=f,
stderr=subprocess.STDOUT)
simulation.stdout.close()
sed.communicate()
if self.args.run:
cat(summary_output_filename)
# Re-run simulation to generate dot, graph, and scores
test_cmd_full = test_cmd + [
'-x', input_filename,
'-D', dot_output_filename,
'-G', output_filename,
'-sSQ' ] + test_args
with io.open(stderr_output_filename, "wt") as f_stderr, \
io.open(score_output_filename, "wt") as f_score:
rc = subprocess.call(test_cmd_full, stdout=f_score, stderr=f_stderr, env=os.environ)
# Check for test command failure
if rc != ExitStatus.OK:
self._failed("Test returned: %d" % rc)
did_fail = True
print(" ".join(test_cmd_full))
# Check for valgrind errors
if self.valgrind_args and not self.args.valgrind_skip_output:
if os.stat(valgrind_output_filename).st_size > 0:
self._failed("Valgrind reported errors")
did_fail = True
cat(valgrind_output_filename)
remove_files([ valgrind_output_filename ])
# Check for core dump
if os.path.isfile("core"):
self._failed("Core-file detected: core." + test_name)
did_fail = True
os.rename("core", "%s/core.%s" % (self.test_home, test_name))
# Check any stderr output
if os.path.isfile(stderr_expected_filename):
if self._compare_files(stderr_expected_filename, stderr_output_filename):
self._failed("stderr changed")
did_fail = True
elif os.stat(stderr_output_filename).st_size > 0:
self._failed("Output was written to stderr")
did_fail = True
cat(stderr_output_filename)
remove_files([ stderr_output_filename ])
# Check whether output graph exists, and normalize it
if (not os.path.isfile(output_filename)
or os.stat(output_filename).st_size == 0):
self._error("No graph produced")
did_fail = True
self.num_failed = self.num_failed + 1
remove_files([ output_filename ])
return ExitStatus.ERROR
normalize(output_filename)
# Check whether dot output exists, and sort it
if (not os.path.isfile(dot_output_filename) or
os.stat(dot_output_filename).st_size == 0):
self._error("No dot-file summary produced")
did_fail = True
self.num_failed = self.num_failed + 1
remove_files([ dot_output_filename, output_filename ])
return ExitStatus.ERROR
with io.open(dot_output_filename, "rt") as f:
first_line = f.readline() # "digraph" line with opening brace
lines = f.readlines()
last_line = lines[-1] # closing brace
del lines[-1]
lines = sorted(set(lines)) # unique sort
with io.open(dot_output_filename, "wt") as f:
f.write(first_line)
f.writelines(lines)
f.write(last_line)
# Check whether score output exists, and sort it
if (not os.path.isfile(score_output_filename)
or os.stat(score_output_filename).st_size == 0):
self._error("No allocation scores produced")
did_fail = True
self.num_failed = self.num_failed + 1
remove_files([ score_output_filename, output_filename ])
return ExitStatus.ERROR
else:
sort_file(score_output_filename)
if self.args.update:
shutil.copyfile(output_filename, expected_filename)
shutil.copyfile(dot_output_filename, dot_expected_filename)
shutil.copyfile(score_output_filename, scores_filename)
shutil.copyfile(summary_output_filename, summary_filename)
print(" Updated expected outputs")
if self._compare_files(summary_filename, summary_output_filename):
self._failed("summary changed")
did_fail = True
if self._compare_files(dot_expected_filename, dot_output_filename):
self._failed("dot-file summary changed")
did_fail = True
else:
remove_files([ dot_output_filename ])
if self._compare_files(expected_filename, output_filename):
self._failed("xml-file changed")
did_fail = True
if self._compare_files(scores_filename, score_output_filename):
self._failed("scores-file changed")
did_fail = True
remove_files([ output_filename,
dot_output_filename,
score_output_filename,
summary_output_filename])
if did_fail:
self.num_failed = self.num_failed + 1
return ExitStatus.ERROR
return ExitStatus.OK
def run_all(self):
""" Run all defined tests """
if platform.architecture()[0] == "64bit":
TESTS.extend(TESTS_64BIT)
for group in TESTS:
for test in group:
try:
args = test[2]
except IndexError:
args = []
self.run_one(test[0], test[1], args)
print()
def _print_summary(self):
""" Print a summary of parameters for this test run """
print("Test home is:\t" + self.test_home)
print("Test binary is:\t" + self.args.binary)
if 'PCMK_schema_directory' in os.environ:
print("Schema home is:\t" + os.environ['PCMK_schema_directory'])
if self.valgrind_args != []:
print("Activating memory testing with valgrind")
print()
def _test_results(self):
if self.num_failed == 0:
shutil.rmtree(self.failed_dir)
return ExitStatus.OK
if os.path.isfile(self.failed_filename) and os.stat(self.failed_filename).st_size != 0:
if self.args.verbose:
self._error("Results of %d failed tests (out of %d):" %
(self.num_failed, self.num_tests))
cat(self.failed_filename)
else:
self._error("Results of %d failed tests (out of %d) are in %s" %
(self.num_failed, self.num_tests, self.failed_filename))
self._error("Use -V to display them after running the tests")
else:
self._error("%d (of %d) tests failed (no diff results)" %
(self.num_failed, self.num_tests))
if os.path.isfile(self.failed_filename):
shutil.rmtree(self.failed_dir)
return ExitStatus.ERROR
def run(self):
""" Run test(s) as specified """
# Check for pre-existing core so we don't think it's from us
if os.path.exists("core"):
self._failed("Can't run with core already present in " + self.test_home)
return ExitStatus.OSFILE
self._print_summary()
# Zero out the error log
self.failed_file = io.open(self.failed_filename, "wt")
if self.args.run is None:
print("Performing the following tests from " + self.args.io_dir)
print()
self.run_all()
print()
self.failed_file.close()
rc = self._test_results()
else:
rc = self.run_one(self.args.run, "Single shot", self.single_test_args)
self.failed_file.close()
if self.num_failed > 0:
print("\nFailures:\nThese have also been written to: " + self.failed_filename + "\n")
cat(self.failed_filename)
shutil.rmtree(self.failed_dir)
return rc
if __name__ == "__main__":
sys.exit(CtsScheduler().run())
# vim: set filetype=python expandtab tabstop=4 softtabstop=4 shiftwidth=4 textwidth=120:
diff --git a/cts/scheduler/dot/timeout-by-node.dot b/cts/scheduler/dot/timeout-by-node.dot
new file mode 100644
index 0000000000..b4c0b9727d
--- /dev/null
+++ b/cts/scheduler/dot/timeout-by-node.dot
@@ -0,0 +1,40 @@
+ digraph "g" {
+"rsc1-clone_running_0" [ style=bold color="green" fontcolor="orange"]
+"rsc1-clone_start_0" -> "rsc1-clone_running_0" [ style = bold]
+"rsc1-clone_start_0" -> "rsc1:0_start_0 node2" [ style = bold]
+"rsc1-clone_start_0" -> "rsc1:1_start_0 node3" [ style = bold]
+"rsc1-clone_start_0" -> "rsc1:2_start_0 node4" [ style = bold]
+"rsc1-clone_start_0" -> "rsc1:3_start_0 node5" [ style = bold]
+"rsc1-clone_start_0" -> "rsc1:4_start_0 node1" [ style = bold]
+"rsc1-clone_start_0" [ style=bold color="green" fontcolor="orange"]
+"rsc1:0_monitor_0 node2" -> "rsc1-clone_start_0" [ style = bold]
+"rsc1:0_monitor_0 node2" [ style=bold color="green" fontcolor="black"]
+"rsc1:0_monitor_10000 node2" [ style=bold color="green" fontcolor="black"]
+"rsc1:0_start_0 node2" -> "rsc1-clone_running_0" [ style = bold]
+"rsc1:0_start_0 node2" -> "rsc1:0_monitor_10000 node2" [ style = bold]
+"rsc1:0_start_0 node2" [ style=bold color="green" fontcolor="black"]
+"rsc1:1_monitor_0 node3" -> "rsc1-clone_start_0" [ style = bold]
+"rsc1:1_monitor_0 node3" [ style=bold color="green" fontcolor="black"]
+"rsc1:1_monitor_10000 node3" [ style=bold color="green" fontcolor="black"]
+"rsc1:1_start_0 node3" -> "rsc1-clone_running_0" [ style = bold]
+"rsc1:1_start_0 node3" -> "rsc1:1_monitor_10000 node3" [ style = bold]
+"rsc1:1_start_0 node3" [ style=bold color="green" fontcolor="black"]
+"rsc1:2_monitor_0 node4" -> "rsc1-clone_start_0" [ style = bold]
+"rsc1:2_monitor_0 node4" [ style=bold color="green" fontcolor="black"]
+"rsc1:2_monitor_10000 node4" [ style=bold color="green" fontcolor="black"]
+"rsc1:2_start_0 node4" -> "rsc1-clone_running_0" [ style = bold]
+"rsc1:2_start_0 node4" -> "rsc1:2_monitor_10000 node4" [ style = bold]
+"rsc1:2_start_0 node4" [ style=bold color="green" fontcolor="black"]
+"rsc1:3_monitor_0 node5" -> "rsc1-clone_start_0" [ style = bold]
+"rsc1:3_monitor_0 node5" [ style=bold color="green" fontcolor="black"]
+"rsc1:3_monitor_10000 node5" [ style=bold color="green" fontcolor="black"]
+"rsc1:3_start_0 node5" -> "rsc1-clone_running_0" [ style = bold]
+"rsc1:3_start_0 node5" -> "rsc1:3_monitor_10000 node5" [ style = bold]
+"rsc1:3_start_0 node5" [ style=bold color="green" fontcolor="black"]
+"rsc1:4_monitor_0 node1" -> "rsc1-clone_start_0" [ style = bold]
+"rsc1:4_monitor_0 node1" [ style=bold color="green" fontcolor="black"]
+"rsc1:4_monitor_10000 node1" [ style=bold color="green" fontcolor="black"]
+"rsc1:4_start_0 node1" -> "rsc1-clone_running_0" [ style = bold]
+"rsc1:4_start_0 node1" -> "rsc1:4_monitor_10000 node1" [ style = bold]
+"rsc1:4_start_0 node1" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/exp/timeout-by-node.exp b/cts/scheduler/exp/timeout-by-node.exp
new file mode 100644
index 0000000000..19d1afcca7
--- /dev/null
+++ b/cts/scheduler/exp/timeout-by-node.exp
@@ -0,0 +1,228 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/cts/scheduler/scores/timeout-by-node.scores b/cts/scheduler/scores/timeout-by-node.scores
new file mode 100644
index 0000000000..adb96a5d21
--- /dev/null
+++ b/cts/scheduler/scores/timeout-by-node.scores
@@ -0,0 +1,61 @@
+
+pcmk__clone_assign: rsc1-clone allocation score on node1: 0
+pcmk__clone_assign: rsc1-clone allocation score on node2: 0
+pcmk__clone_assign: rsc1-clone allocation score on node3: 0
+pcmk__clone_assign: rsc1-clone allocation score on node4: 0
+pcmk__clone_assign: rsc1-clone allocation score on node5: 0
+pcmk__clone_assign: rsc1:0 allocation score on node1: 0
+pcmk__clone_assign: rsc1:0 allocation score on node2: 0
+pcmk__clone_assign: rsc1:0 allocation score on node3: 0
+pcmk__clone_assign: rsc1:0 allocation score on node4: 0
+pcmk__clone_assign: rsc1:0 allocation score on node5: 0
+pcmk__clone_assign: rsc1:1 allocation score on node1: 0
+pcmk__clone_assign: rsc1:1 allocation score on node2: 0
+pcmk__clone_assign: rsc1:1 allocation score on node3: 0
+pcmk__clone_assign: rsc1:1 allocation score on node4: 0
+pcmk__clone_assign: rsc1:1 allocation score on node5: 0
+pcmk__clone_assign: rsc1:2 allocation score on node1: 0
+pcmk__clone_assign: rsc1:2 allocation score on node2: 0
+pcmk__clone_assign: rsc1:2 allocation score on node3: 0
+pcmk__clone_assign: rsc1:2 allocation score on node4: 0
+pcmk__clone_assign: rsc1:2 allocation score on node5: 0
+pcmk__clone_assign: rsc1:3 allocation score on node1: 0
+pcmk__clone_assign: rsc1:3 allocation score on node2: 0
+pcmk__clone_assign: rsc1:3 allocation score on node3: 0
+pcmk__clone_assign: rsc1:3 allocation score on node4: 0
+pcmk__clone_assign: rsc1:3 allocation score on node5: 0
+pcmk__clone_assign: rsc1:4 allocation score on node1: 0
+pcmk__clone_assign: rsc1:4 allocation score on node2: 0
+pcmk__clone_assign: rsc1:4 allocation score on node3: 0
+pcmk__clone_assign: rsc1:4 allocation score on node4: 0
+pcmk__clone_assign: rsc1:4 allocation score on node5: 0
+pcmk__primitive_assign: Fencing allocation score on node1: 0
+pcmk__primitive_assign: Fencing allocation score on node2: 0
+pcmk__primitive_assign: Fencing allocation score on node3: 0
+pcmk__primitive_assign: Fencing allocation score on node4: 0
+pcmk__primitive_assign: Fencing allocation score on node5: 0
+pcmk__primitive_assign: rsc1:0 allocation score on node1: 0
+pcmk__primitive_assign: rsc1:0 allocation score on node2: 0
+pcmk__primitive_assign: rsc1:0 allocation score on node3: 0
+pcmk__primitive_assign: rsc1:0 allocation score on node4: 0
+pcmk__primitive_assign: rsc1:0 allocation score on node5: 0
+pcmk__primitive_assign: rsc1:1 allocation score on node1: 0
+pcmk__primitive_assign: rsc1:1 allocation score on node2: -INFINITY
+pcmk__primitive_assign: rsc1:1 allocation score on node3: 0
+pcmk__primitive_assign: rsc1:1 allocation score on node4: 0
+pcmk__primitive_assign: rsc1:1 allocation score on node5: 0
+pcmk__primitive_assign: rsc1:2 allocation score on node1: 0
+pcmk__primitive_assign: rsc1:2 allocation score on node2: -INFINITY
+pcmk__primitive_assign: rsc1:2 allocation score on node3: -INFINITY
+pcmk__primitive_assign: rsc1:2 allocation score on node4: 0
+pcmk__primitive_assign: rsc1:2 allocation score on node5: 0
+pcmk__primitive_assign: rsc1:3 allocation score on node1: 0
+pcmk__primitive_assign: rsc1:3 allocation score on node2: -INFINITY
+pcmk__primitive_assign: rsc1:3 allocation score on node3: -INFINITY
+pcmk__primitive_assign: rsc1:3 allocation score on node4: -INFINITY
+pcmk__primitive_assign: rsc1:3 allocation score on node5: 0
+pcmk__primitive_assign: rsc1:4 allocation score on node1: 0
+pcmk__primitive_assign: rsc1:4 allocation score on node2: -INFINITY
+pcmk__primitive_assign: rsc1:4 allocation score on node3: -INFINITY
+pcmk__primitive_assign: rsc1:4 allocation score on node4: -INFINITY
+pcmk__primitive_assign: rsc1:4 allocation score on node5: -INFINITY
diff --git a/cts/scheduler/summary/timeout-by-node.summary b/cts/scheduler/summary/timeout-by-node.summary
new file mode 100644
index 0000000000..78f4fcdc8b
--- /dev/null
+++ b/cts/scheduler/summary/timeout-by-node.summary
@@ -0,0 +1,43 @@
+Current cluster status:
+ * Node List:
+ * Online: [ node1 node2 node3 node4 node5 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node1
+ * Clone Set: rsc1-clone [rsc1]:
+ * Stopped: [ node1 node2 node3 node4 node5 ]
+
+Transition Summary:
+ * Start rsc1:0 ( node2 )
+ * Start rsc1:1 ( node3 )
+ * Start rsc1:2 ( node4 )
+ * Start rsc1:3 ( node5 )
+ * Start rsc1:4 ( node1 )
+
+Executing Cluster Transition:
+ * Resource action: rsc1:0 monitor on node2
+ * Resource action: rsc1:1 monitor on node3
+ * Resource action: rsc1:2 monitor on node4
+ * Resource action: rsc1:3 monitor on node5
+ * Resource action: rsc1:4 monitor on node1
+ * Pseudo action: rsc1-clone_start_0
+ * Resource action: rsc1:0 start on node2
+ * Resource action: rsc1:1 start on node3
+ * Resource action: rsc1:2 start on node4
+ * Resource action: rsc1:3 start on node5
+ * Resource action: rsc1:4 start on node1
+ * Pseudo action: rsc1-clone_running_0
+ * Resource action: rsc1:0 monitor=10000 on node2
+ * Resource action: rsc1:1 monitor=10000 on node3
+ * Resource action: rsc1:2 monitor=10000 on node4
+ * Resource action: rsc1:3 monitor=10000 on node5
+ * Resource action: rsc1:4 monitor=10000 on node1
+
+Revised Cluster Status:
+ * Node List:
+ * Online: [ node1 node2 node3 node4 node5 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node1
+ * Clone Set: rsc1-clone [rsc1]:
+ * Started: [ node1 node2 node3 node4 node5 ]
diff --git a/cts/scheduler/xml/timeout-by-node.xml b/cts/scheduler/xml/timeout-by-node.xml
new file mode 100644
index 0000000000..221885b000
--- /dev/null
+++ b/cts/scheduler/xml/timeout-by-node.xml
@@ -0,0 +1,139 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/include/crm/pengine/internal.h b/include/crm/pengine/internal.h
index 5cb2b77f79..71beb2fdae 100644
--- a/include/crm/pengine/internal.h
+++ b/include/crm/pengine/internal.h
@@ -1,754 +1,758 @@
/*
* Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef PE_INTERNAL__H
# define PE_INTERNAL__H
# include
# include
# include
# include
# include
# include
# include
# include
# include
# include
const char *pe__resource_description(const pe_resource_t *rsc, uint32_t show_opts);
enum pe__clone_flags {
// Whether instances should be started sequentially
pe__clone_ordered = (1 << 0),
// Whether promotion scores have been added
pe__clone_promotion_added = (1 << 1),
// Whether promotion constraints have been added
pe__clone_promotion_constrained = (1 << 2),
};
bool pe__clone_is_ordered(const pe_resource_t *clone);
int pe__set_clone_flag(pe_resource_t *clone, enum pe__clone_flags flag);
bool pe__clone_flag_is_set(const pe_resource_t *clone, uint32_t flags);
enum pe__group_flags {
pe__group_ordered = (1 << 0), // Members start sequentially
pe__group_colocated = (1 << 1), // Members must be on same node
};
bool pe__group_flag_is_set(const pe_resource_t *group, uint32_t flags);
pe_resource_t *pe__last_group_member(const pe_resource_t *group);
# define pe_rsc_info(rsc, fmt, args...) crm_log_tag(LOG_INFO, rsc ? rsc->id : "", fmt, ##args)
# define pe_rsc_debug(rsc, fmt, args...) crm_log_tag(LOG_DEBUG, rsc ? rsc->id : "", fmt, ##args)
# define pe_rsc_trace(rsc, fmt, args...) crm_log_tag(LOG_TRACE, rsc ? rsc->id : "", fmt, ##args)
# define pe_err(fmt...) do { \
was_processing_error = TRUE; \
pcmk__config_err(fmt); \
} while (0)
# define pe_warn(fmt...) do { \
was_processing_warning = TRUE; \
pcmk__config_warn(fmt); \
} while (0)
# define pe_proc_err(fmt...) { was_processing_error = TRUE; crm_err(fmt); }
# define pe_proc_warn(fmt...) { was_processing_warning = TRUE; crm_warn(fmt); }
#define pe__set_working_set_flags(working_set, flags_to_set) do { \
(working_set)->flags = pcmk__set_flags_as(__func__, __LINE__, \
LOG_TRACE, "Working set", crm_system_name, \
(working_set)->flags, (flags_to_set), #flags_to_set); \
} while (0)
#define pe__clear_working_set_flags(working_set, flags_to_clear) do { \
(working_set)->flags = pcmk__clear_flags_as(__func__, __LINE__, \
LOG_TRACE, "Working set", crm_system_name, \
(working_set)->flags, (flags_to_clear), #flags_to_clear); \
} while (0)
#define pe__set_resource_flags(resource, flags_to_set) do { \
(resource)->flags = pcmk__set_flags_as(__func__, __LINE__, \
LOG_TRACE, "Resource", (resource)->id, (resource)->flags, \
(flags_to_set), #flags_to_set); \
} while (0)
#define pe__clear_resource_flags(resource, flags_to_clear) do { \
(resource)->flags = pcmk__clear_flags_as(__func__, __LINE__, \
LOG_TRACE, "Resource", (resource)->id, (resource)->flags, \
(flags_to_clear), #flags_to_clear); \
} while (0)
#define pe__set_action_flags(action, flags_to_set) do { \
(action)->flags = pcmk__set_flags_as(__func__, __LINE__, \
LOG_TRACE, \
"Action", (action)->uuid, \
(action)->flags, \
(flags_to_set), \
#flags_to_set); \
} while (0)
#define pe__clear_action_flags(action, flags_to_clear) do { \
(action)->flags = pcmk__clear_flags_as(__func__, __LINE__, \
LOG_TRACE, \
"Action", (action)->uuid, \
(action)->flags, \
(flags_to_clear), \
#flags_to_clear); \
} while (0)
#define pe__set_raw_action_flags(action_flags, action_name, flags_to_set) do { \
action_flags = pcmk__set_flags_as(__func__, __LINE__, \
LOG_TRACE, "Action", action_name, \
(action_flags), \
(flags_to_set), #flags_to_set); \
} while (0)
#define pe__clear_raw_action_flags(action_flags, action_name, flags_to_clear) do { \
action_flags = pcmk__clear_flags_as(__func__, __LINE__, \
LOG_TRACE, \
"Action", action_name, \
(action_flags), \
(flags_to_clear), \
#flags_to_clear); \
} while (0)
#define pe__set_action_flags_as(function, line, action, flags_to_set) do { \
(action)->flags = pcmk__set_flags_as((function), (line), \
LOG_TRACE, \
"Action", (action)->uuid, \
(action)->flags, \
(flags_to_set), \
#flags_to_set); \
} while (0)
#define pe__clear_action_flags_as(function, line, action, flags_to_clear) do { \
(action)->flags = pcmk__clear_flags_as((function), (line), \
LOG_TRACE, \
"Action", (action)->uuid, \
(action)->flags, \
(flags_to_clear), \
#flags_to_clear); \
} while (0)
#define pe__set_order_flags(order_flags, flags_to_set) do { \
order_flags = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, \
"Ordering", "constraint", \
order_flags, (flags_to_set), \
#flags_to_set); \
} while (0)
#define pe__clear_order_flags(order_flags, flags_to_clear) do { \
order_flags = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE, \
"Ordering", "constraint", \
order_flags, (flags_to_clear), \
#flags_to_clear); \
} while (0)
// Some warnings we don't want to print every transition
enum pe_warn_once_e {
pe_wo_blind = (1 << 0),
pe_wo_restart_type = (1 << 1),
pe_wo_role_after = (1 << 2),
pe_wo_poweroff = (1 << 3),
pe_wo_require_all = (1 << 4),
pe_wo_order_score = (1 << 5),
pe_wo_neg_threshold = (1 << 6),
pe_wo_remove_after = (1 << 7),
pe_wo_ping_node = (1 << 8),
pe_wo_order_inst = (1 << 9),
pe_wo_coloc_inst = (1 << 10),
pe_wo_group_order = (1 << 11),
pe_wo_group_coloc = (1 << 12),
pe_wo_upstart = (1 << 13),
pe_wo_nagios = (1 << 14),
pe_wo_set_ordering = (1 << 15),
};
extern uint32_t pe_wo;
#define pe_warn_once(pe_wo_bit, fmt...) do { \
if (!pcmk_is_set(pe_wo, pe_wo_bit)) { \
if (pe_wo_bit == pe_wo_blind) { \
crm_warn(fmt); \
} else { \
pe_warn(fmt); \
} \
pe_wo = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, \
"Warn-once", "logging", pe_wo, \
(pe_wo_bit), #pe_wo_bit); \
} \
} while (0);
typedef struct pe__location_constraint_s {
char *id; // Constraint XML ID
pe_resource_t *rsc_lh; // Resource being located
enum rsc_role_e role_filter; // Role to locate
enum pe_discover_e discover_mode; // Resource discovery
GList *node_list_rh; // List of pe_node_t*
} pe__location_t;
typedef struct pe__order_constraint_s {
int id;
uint32_t flags; // Group of enum pe_ordering flags
void *lh_opaque;
pe_resource_t *lh_rsc;
pe_action_t *lh_action;
char *lh_action_task;
void *rh_opaque;
pe_resource_t *rh_rsc;
pe_action_t *rh_action;
char *rh_action_task;
} pe__ordering_t;
const pe_resource_t *pe__const_top_resource(const pe_resource_t *rsc,
bool include_bundle);
int pe__clone_max(const pe_resource_t *clone);
int pe__clone_node_max(const pe_resource_t *clone);
int pe__clone_promoted_max(const pe_resource_t *clone);
int pe__clone_promoted_node_max(const pe_resource_t *clone);
void pe__create_clone_notifications(pe_resource_t *clone);
void pe__free_clone_notification_data(pe_resource_t *clone);
void pe__create_clone_notif_pseudo_ops(pe_resource_t *clone,
pe_action_t *start, pe_action_t *started,
pe_action_t *stop, pe_action_t *stopped);
pe_action_t *pe__new_rsc_pseudo_action(pe_resource_t *rsc, const char *task,
bool optional, bool runnable);
void pe__create_promotable_pseudo_ops(pe_resource_t *clone, bool any_promoting,
bool any_demoting);
bool pe_can_fence(const pe_working_set_t *data_set, const pe_node_t *node);
void add_hash_param(GHashTable * hash, const char *name, const char *value);
/*!
* \internal
* \enum pe__rsc_node
* \brief Type of resource location lookup to perform
*/
enum pe__rsc_node {
pe__rsc_node_assigned = 0, //!< Where resource is assigned
pe__rsc_node_current = 1, //!< Where resource is running
// @COMPAT: Use in native_location() at a compatibility break
pe__rsc_node_pending = 2, //!< Where resource is pending
};
char *native_parameter(pe_resource_t * rsc, pe_node_t * node, gboolean create, const char *name,
pe_working_set_t * data_set);
pe_node_t *native_location(const pe_resource_t *rsc, GList **list, int current);
void pe_metadata(pcmk__output_t *out);
void verify_pe_options(GHashTable * options);
void native_add_running(pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * data_set, gboolean failed);
gboolean native_unpack(pe_resource_t * rsc, pe_working_set_t * data_set);
gboolean group_unpack(pe_resource_t * rsc, pe_working_set_t * data_set);
gboolean clone_unpack(pe_resource_t * rsc, pe_working_set_t * data_set);
gboolean pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set);
pe_resource_t *native_find_rsc(pe_resource_t *rsc, const char *id, const pe_node_t *node,
int flags);
gboolean native_active(pe_resource_t * rsc, gboolean all);
gboolean group_active(pe_resource_t * rsc, gboolean all);
gboolean clone_active(pe_resource_t * rsc, gboolean all);
gboolean pe__bundle_active(pe_resource_t *rsc, gboolean all);
//! \deprecated This function will be removed in a future release
void native_print(pe_resource_t *rsc, const char *pre_text, long options,
void *print_data);
//! \deprecated This function will be removed in a future release
void group_print(pe_resource_t *rsc, const char *pre_text, long options,
void *print_data);
//! \deprecated This function will be removed in a future release
void clone_print(pe_resource_t *rsc, const char *pre_text, long options,
void *print_data);
//! \deprecated This function will be removed in a future release
void pe__print_bundle(pe_resource_t *rsc, const char *pre_text, long options,
void *print_data);
gchar *pcmk__native_output_string(const pe_resource_t *rsc, const char *name,
const pe_node_t *node, uint32_t show_opts,
const char *target_role, bool show_nodes);
int pe__name_and_nvpairs_xml(pcmk__output_t *out, bool is_list, const char *tag_name
, size_t pairs_count, ...);
char *pe__node_display_name(pe_node_t *node, bool print_detail);
// Clone notifications (pe_notif.c)
void pe__order_notifs_after_fencing(const pe_action_t *action,
pe_resource_t *rsc,
pe_action_t *stonith_op);
static inline const char *
pe__rsc_bool_str(const pe_resource_t *rsc, uint64_t rsc_flag)
{
return pcmk__btoa(pcmk_is_set(rsc->flags, rsc_flag));
}
int pe__clone_xml(pcmk__output_t *out, va_list args);
int pe__clone_default(pcmk__output_t *out, va_list args);
int pe__group_xml(pcmk__output_t *out, va_list args);
int pe__group_default(pcmk__output_t *out, va_list args);
int pe__bundle_xml(pcmk__output_t *out, va_list args);
int pe__bundle_html(pcmk__output_t *out, va_list args);
int pe__bundle_text(pcmk__output_t *out, va_list args);
int pe__node_html(pcmk__output_t *out, va_list args);
int pe__node_text(pcmk__output_t *out, va_list args);
int pe__node_xml(pcmk__output_t *out, va_list args);
int pe__resource_xml(pcmk__output_t *out, va_list args);
int pe__resource_html(pcmk__output_t *out, va_list args);
int pe__resource_text(pcmk__output_t *out, va_list args);
void native_free(pe_resource_t * rsc);
void group_free(pe_resource_t * rsc);
void clone_free(pe_resource_t * rsc);
void pe__free_bundle(pe_resource_t *rsc);
enum rsc_role_e native_resource_state(const pe_resource_t * rsc, gboolean current);
enum rsc_role_e group_resource_state(const pe_resource_t * rsc, gboolean current);
enum rsc_role_e clone_resource_state(const pe_resource_t * rsc, gboolean current);
enum rsc_role_e pe__bundle_resource_state(const pe_resource_t *rsc,
gboolean current);
void pe__count_common(pe_resource_t *rsc);
void pe__count_bundle(pe_resource_t *rsc);
void common_free(pe_resource_t * rsc);
pe_node_t *pe__copy_node(const pe_node_t *this_node);
extern time_t get_effective_time(pe_working_set_t * data_set);
/* Failure handling utilities (from failcounts.c) */
// bit flags for fail count handling options
enum pe_fc_flags_e {
pe_fc_default = (1 << 0),
pe_fc_effective = (1 << 1), // don't count expired failures
pe_fc_fillers = (1 << 2), // if container, include filler failures in count
};
int pe_get_failcount(const pe_node_t *node, pe_resource_t *rsc,
time_t *last_failure, uint32_t flags,
const xmlNode *xml_op);
pe_action_t *pe__clear_failcount(pe_resource_t *rsc, const pe_node_t *node,
const char *reason,
pe_working_set_t *data_set);
/* Functions for finding/counting a resource's active nodes */
bool pe__count_active_node(const pe_resource_t *rsc, pe_node_t *node,
pe_node_t **active, unsigned int *count_all,
unsigned int *count_clean);
pe_node_t *pe__find_active_requires(const pe_resource_t *rsc,
unsigned int *count);
static inline pe_node_t *
pe__current_node(const pe_resource_t *rsc)
{
return (rsc == NULL)? NULL : rsc->fns->active_node(rsc, NULL, NULL);
}
/* Binary like operators for lists of nodes */
GHashTable *pe__node_list2table(const GList *list);
extern pe_action_t *get_pseudo_op(const char *name, pe_working_set_t * data_set);
extern gboolean order_actions(pe_action_t * lh_action, pe_action_t * rh_action, enum pe_ordering order);
void pe__show_node_scores_as(const char *file, const char *function,
int line, bool to_log, const pe_resource_t *rsc,
const char *comment, GHashTable *nodes,
pe_working_set_t *data_set);
#define pe__show_node_scores(level, rsc, text, nodes, data_set) \
pe__show_node_scores_as(__FILE__, __func__, __LINE__, \
(level), (rsc), (text), (nodes), (data_set))
xmlNode *find_rsc_op_entry(const pe_resource_t *rsc, const char *key);
+GHashTable *pcmk__unpack_action_meta(pe_resource_t *rsc, const pe_node_t *node,
+ const char *action_name, guint interval_ms,
+ const xmlNode *action_config);
+
pe_action_t *custom_action(pe_resource_t *rsc, char *key, const char *task,
const pe_node_t *on_node, gboolean optional,
gboolean foo, pe_working_set_t *data_set);
# define delete_key(rsc) pcmk__op_key(rsc->id, PCMK_ACTION_DELETE, 0)
# define delete_action(rsc, node, optional) custom_action( \
rsc, delete_key(rsc), PCMK_ACTION_DELETE, node, \
optional, TRUE, rsc->cluster);
# define stopped_key(rsc) pcmk__op_key(rsc->id, PCMK_ACTION_STOPPED, 0)
# define stopped_action(rsc, node, optional) custom_action( \
rsc, stopped_key(rsc), PCMK_ACTION_STOPPED, node, \
optional, TRUE, rsc->cluster);
# define stop_key(rsc) pcmk__op_key(rsc->id, PCMK_ACTION_STOP, 0)
# define stop_action(rsc, node, optional) custom_action( \
rsc, stop_key(rsc), PCMK_ACTION_STOP, node, \
optional, TRUE, rsc->cluster);
# define reload_key(rsc) pcmk__op_key(rsc->id, PCMK_ACTION_RELOAD_AGENT, 0)
# define start_key(rsc) pcmk__op_key(rsc->id, PCMK_ACTION_START, 0)
# define start_action(rsc, node, optional) custom_action( \
rsc, start_key(rsc), PCMK_ACTION_START, node, \
optional, TRUE, rsc->cluster)
# define started_key(rsc) pcmk__op_key(rsc->id, PCMK_ACTION_RUNNING, 0)
# define started_action(rsc, node, optional) custom_action( \
rsc, started_key(rsc), PCMK_ACTION_RUNNING, node, \
optional, TRUE, rsc->cluster)
# define promote_key(rsc) pcmk__op_key(rsc->id, PCMK_ACTION_PROMOTE, 0)
# define promote_action(rsc, node, optional) custom_action( \
rsc, promote_key(rsc), PCMK_ACTION_PROMOTE, node, \
optional, TRUE, rsc->cluster)
# define promoted_key(rsc) pcmk__op_key(rsc->id, PCMK_ACTION_PROMOTED, 0)
# define promoted_action(rsc, node, optional) custom_action( \
rsc, promoted_key(rsc), PCMK_ACTION_PROMOTED, node, \
optional, TRUE, rsc->cluster)
# define demote_key(rsc) pcmk__op_key(rsc->id, PCMK_ACTION_DEMOTE, 0)
# define demote_action(rsc, node, optional) custom_action( \
rsc, demote_key(rsc), PCMK_ACTION_DEMOTE, node, \
optional, TRUE, rsc->cluster)
# define demoted_key(rsc) pcmk__op_key(rsc->id, PCMK_ACTION_DEMOTED, 0)
# define demoted_action(rsc, node, optional) custom_action( \
rsc, demoted_key(rsc), PCMK_ACTION_DEMOTED, node, \
optional, TRUE, rsc->cluster)
extern int pe_get_configured_timeout(pe_resource_t *rsc, const char *action,
pe_working_set_t *data_set);
pe_action_t *find_first_action(const GList *input, const char *uuid,
const char *task, const pe_node_t *on_node);
enum action_tasks get_complex_task(const pe_resource_t *rsc, const char *name);
extern GList *find_actions(GList *input, const char *key, const pe_node_t *on_node);
GList *find_actions_exact(GList *input, const char *key,
const pe_node_t *on_node);
GList *pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node,
const char *task, bool require_node);
extern void pe_free_action(pe_action_t * action);
void resource_location(pe_resource_t *rsc, const pe_node_t *node, int score,
const char *tag, pe_working_set_t *data_set);
extern int pe__is_newer_op(const xmlNode *xml_a, const xmlNode *xml_b,
bool same_node_default);
extern gint sort_op_by_callid(gconstpointer a, gconstpointer b);
gboolean get_target_role(const pe_resource_t *rsc, enum rsc_role_e *role);
void pe__set_next_role(pe_resource_t *rsc, enum rsc_role_e role,
const char *why);
pe_resource_t *find_clone_instance(const pe_resource_t *rsc,
const char *sub_id);
extern void destroy_ticket(gpointer data);
extern pe_ticket_t *ticket_new(const char *ticket_id, pe_working_set_t * data_set);
// Resources for manipulating resource names
const char *pe_base_name_end(const char *id);
char *clone_strip(const char *last_rsc_id);
char *clone_zero(const char *last_rsc_id);
static inline bool
pe_base_name_eq(const pe_resource_t *rsc, const char *id)
{
if (id && rsc && rsc->id) {
// Number of characters in rsc->id before any clone suffix
size_t base_len = pe_base_name_end(rsc->id) - rsc->id + 1;
return (strlen(id) == base_len) && !strncmp(id, rsc->id, base_len);
}
return false;
}
int pe__target_rc_from_xml(const xmlNode *xml_op);
gint pe__cmp_node_name(gconstpointer a, gconstpointer b);
bool is_set_recursive(const pe_resource_t *rsc, long long flag, bool any);
enum rsc_digest_cmp_val {
/*! Digests are the same */
RSC_DIGEST_MATCH = 0,
/*! Params that require a restart changed */
RSC_DIGEST_RESTART,
/*! Some parameter changed. */
RSC_DIGEST_ALL,
/*! rsc op didn't have a digest associated with it, so
* it is unknown if parameters changed or not. */
RSC_DIGEST_UNKNOWN,
};
typedef struct op_digest_cache_s {
enum rsc_digest_cmp_val rc;
xmlNode *params_all;
xmlNode *params_secure;
xmlNode *params_restart;
char *digest_all_calc;
char *digest_secure_calc;
char *digest_restart_calc;
} op_digest_cache_t;
op_digest_cache_t *pe__calculate_digests(pe_resource_t *rsc, const char *task,
guint *interval_ms,
const pe_node_t *node,
const xmlNode *xml_op,
GHashTable *overrides,
bool calc_secure,
pe_working_set_t *data_set);
void pe__free_digests(gpointer ptr);
op_digest_cache_t *rsc_action_digest_cmp(pe_resource_t *rsc,
const xmlNode *xml_op,
pe_node_t *node,
pe_working_set_t *data_set);
pe_action_t *pe_fence_op(pe_node_t *node, const char *op, bool optional,
const char *reason, bool priority_delay,
pe_working_set_t *data_set);
void trigger_unfencing(pe_resource_t *rsc, pe_node_t *node,
const char *reason, pe_action_t *dependency,
pe_working_set_t *data_set);
char *pe__action2reason(const pe_action_t *action, enum pe_action_flags flag);
void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite);
void pe__add_action_expected_result(pe_action_t *action, int expected_result);
void pe__set_resource_flags_recursive(pe_resource_t *rsc, uint64_t flags);
void pe__clear_resource_flags_recursive(pe_resource_t *rsc, uint64_t flags);
void pe__clear_resource_flags_on_all(pe_working_set_t *data_set, uint64_t flag);
gboolean add_tag_ref(GHashTable * tags, const char * tag_name, const char * obj_ref);
//! \deprecated This function will be removed in a future release
void print_rscs_brief(GList *rsc_list, const char * pre_text, long options,
void * print_data, gboolean print_all);
int pe__rscs_brief_output(pcmk__output_t *out, GList *rsc_list, unsigned int options);
void pe_fence_node(pe_working_set_t * data_set, pe_node_t * node, const char *reason, bool priority_delay);
pe_node_t *pe_create_node(const char *id, const char *uname, const char *type,
const char *score, pe_working_set_t * data_set);
//! \deprecated This function will be removed in a future release
void common_print(pe_resource_t *rsc, const char *pre_text, const char *name,
const pe_node_t *node, long options, void *print_data);
int pe__common_output_text(pcmk__output_t *out, const pe_resource_t *rsc,
const char *name, const pe_node_t *node,
unsigned int options);
int pe__common_output_html(pcmk__output_t *out, const pe_resource_t *rsc,
const char *name, const pe_node_t *node,
unsigned int options);
//! A single instance of a bundle
typedef struct {
int offset; //!< 0-origin index of this instance in bundle
char *ipaddr; //!< IP address associated with this instance
pe_node_t *node; //!< Node created for this instance
pe_resource_t *ip; //!< IP address resource for ipaddr
pe_resource_t *child; //!< Instance of bundled resource
pe_resource_t *container; //!< Container associated with this instance
pe_resource_t *remote; //!< Pacemaker Remote connection into container
} pe__bundle_replica_t;
GList *pe__bundle_containers(const pe_resource_t *bundle);
int pe__bundle_max(const pe_resource_t *rsc);
bool pe__node_is_bundle_instance(const pe_resource_t *bundle,
const pe_node_t *node);
pe_resource_t *pe__bundled_resource(const pe_resource_t *rsc);
const pe_resource_t *pe__get_rsc_in_container(const pe_resource_t *instance);
pe_resource_t *pe__first_container(const pe_resource_t *bundle);
void pe__foreach_bundle_replica(pe_resource_t *bundle,
bool (*fn)(pe__bundle_replica_t *, void *),
void *user_data);
void pe__foreach_const_bundle_replica(const pe_resource_t *bundle,
bool (*fn)(const pe__bundle_replica_t *,
void *),
void *user_data);
pe_resource_t *pe__find_bundle_replica(const pe_resource_t *bundle,
const pe_node_t *node);
bool pe__bundle_needs_remote_name(pe_resource_t *rsc);
const char *pe__add_bundle_remote_name(pe_resource_t *rsc,
pe_working_set_t *data_set,
xmlNode *xml, const char *field);
const char *pe__node_attribute_calculated(const pe_node_t *node,
const char *name,
const pe_resource_t *rsc,
enum pe__rsc_node node_type,
bool force_host);
const char *pe_node_attribute_raw(const pe_node_t *node, const char *name);
bool pe__is_universal_clone(const pe_resource_t *rsc,
const pe_working_set_t *data_set);
void pe__add_param_check(const xmlNode *rsc_op, pe_resource_t *rsc,
pe_node_t *node, enum pcmk__check_parameters,
pe_working_set_t *data_set);
void pe__foreach_param_check(pe_working_set_t *data_set,
void (*cb)(pe_resource_t*, pe_node_t*,
const xmlNode*,
enum pcmk__check_parameters));
void pe__free_param_checks(pe_working_set_t *data_set);
bool pe__shutdown_requested(const pe_node_t *node);
void pe__update_recheck_time(time_t recheck, pe_working_set_t *data_set);
/*!
* \internal
* \brief Register xml formatting message functions.
*
* \param[in,out] out Output object to register messages with
*/
void pe__register_messages(pcmk__output_t *out);
void pe__unpack_dataset_nvpairs(const xmlNode *xml_obj, const char *set_name,
const pe_rule_eval_data_t *rule_data,
GHashTable *hash, const char *always_first,
gboolean overwrite, pe_working_set_t *data_set);
bool pe__resource_is_disabled(const pe_resource_t *rsc);
pe_action_t *pe__clear_resource_history(pe_resource_t *rsc,
const pe_node_t *node,
pe_working_set_t *data_set);
GList *pe__rscs_with_tag(pe_working_set_t *data_set, const char *tag_name);
GList *pe__unames_with_tag(pe_working_set_t *data_set, const char *tag_name);
bool pe__rsc_has_tag(pe_working_set_t *data_set, const char *rsc, const char *tag);
bool pe__uname_has_tag(pe_working_set_t *data_set, const char *node, const char *tag);
bool pe__rsc_running_on_only(const pe_resource_t *rsc, const pe_node_t *node);
bool pe__rsc_running_on_any(pe_resource_t *rsc, GList *node_list);
GList *pe__filter_rsc_list(GList *rscs, GList *filter);
GList * pe__build_node_name_list(pe_working_set_t *data_set, const char *s);
GList * pe__build_rsc_list(pe_working_set_t *data_set, const char *s);
bool pcmk__rsc_filtered_by_node(pe_resource_t *rsc, GList *only_node);
gboolean pe__bundle_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
gboolean check_parent);
gboolean pe__clone_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
gboolean check_parent);
gboolean pe__group_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
gboolean check_parent);
gboolean pe__native_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
gboolean check_parent);
xmlNode *pe__failed_probe_for_rsc(const pe_resource_t *rsc, const char *name);
const char *pe__clone_child_id(const pe_resource_t *rsc);
int pe__sum_node_health_scores(const pe_node_t *node, int base_health);
int pe__node_health(pe_node_t *node);
static inline enum pcmk__health_strategy
pe__health_strategy(pe_working_set_t *data_set)
{
return pcmk__parse_health_strategy(pe_pref(data_set->config_hash,
PCMK__OPT_NODE_HEALTH_STRATEGY));
}
static inline int
pe__health_score(const char *option, pe_working_set_t *data_set)
{
return char2score(pe_pref(data_set->config_hash, option));
}
/*!
* \internal
* \brief Return a string suitable for logging as a node name
*
* \param[in] node Node to return a node name string for
*
* \return Node name if available, otherwise node ID if available,
* otherwise "unspecified node" if node is NULL or "unidentified node"
* if node has neither a name nor ID.
*/
static inline const char *
pe__node_name(const pe_node_t *node)
{
if (node == NULL) {
return "unspecified node";
} else if (node->details->uname != NULL) {
return node->details->uname;
} else if (node->details->id != NULL) {
return node->details->id;
} else {
return "unidentified node";
}
}
/*!
* \internal
* \brief Check whether two node objects refer to the same node
*
* \param[in] node1 First node object to compare
* \param[in] node2 Second node object to compare
*
* \return true if \p node1 and \p node2 refer to the same node
*/
static inline bool
pe__same_node(const pe_node_t *node1, const pe_node_t *node2)
{
return (node1 != NULL) && (node2 != NULL)
&& (node1->details == node2->details);
}
/*!
* \internal
* \brief Get the operation key from an action history entry
*
* \param[in] xml Action history entry
*
* \return Entry's operation key
*/
static inline const char *
pe__xe_history_key(const xmlNode *xml)
{
if (xml == NULL) {
return NULL;
} else {
/* @COMPAT Pacemaker <= 1.1.5 did not add the key, and used the ID
* instead. Checking for that allows us to process old saved CIBs,
* including some regression tests.
*/
const char *key = crm_element_value(xml, XML_LRM_ATTR_TASK_KEY);
return pcmk__str_empty(key)? ID(xml) : key;
}
}
#endif
diff --git a/lib/pengine/pe_actions.c b/lib/pengine/pe_actions.c
index 4dfb46a4b9..abffd198ef 100644
--- a/lib/pengine/pe_actions.c
+++ b/lib/pengine/pe_actions.c
@@ -1,1745 +1,1782 @@
/*
* Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include "pe_status_private.h"
static void unpack_operation(pe_action_t *action, const xmlNode *xml_obj,
const pe_resource_t *container, guint interval_ms);
static void
add_singleton(pe_working_set_t *data_set, pe_action_t *action)
{
if (data_set->singletons == NULL) {
data_set->singletons = pcmk__strkey_table(NULL, NULL);
}
g_hash_table_insert(data_set->singletons, action->uuid, action);
}
static pe_action_t *
lookup_singleton(pe_working_set_t *data_set, const char *action_uuid)
{
if (data_set->singletons == NULL) {
return NULL;
}
return g_hash_table_lookup(data_set->singletons, action_uuid);
}
/*!
* \internal
* \brief Find an existing action that matches arguments
*
* \param[in] key Action key to match
* \param[in] rsc Resource to match (if any)
* \param[in] node Node to match (if any)
* \param[in] data_set Cluster working set
*
* \return Existing action that matches arguments (or NULL if none)
*/
static pe_action_t *
find_existing_action(const char *key, const pe_resource_t *rsc,
const pe_node_t *node, const pe_working_set_t *data_set)
{
GList *matches = NULL;
pe_action_t *action = NULL;
/* When rsc is NULL, it would be quicker to check data_set->singletons,
* but checking all data_set->actions takes the node into account.
*/
matches = find_actions(((rsc == NULL)? data_set->actions : rsc->actions),
key, node);
if (matches == NULL) {
return NULL;
}
CRM_LOG_ASSERT(!pcmk__list_of_multiple(matches));
action = matches->data;
g_list_free(matches);
return action;
}
static xmlNode *
find_rsc_op_entry_helper(const pe_resource_t *rsc, const char *key,
gboolean include_disabled)
{
guint interval_ms = 0;
gboolean do_retry = TRUE;
char *local_key = NULL;
const char *name = NULL;
const char *interval_spec = NULL;
char *match_key = NULL;
xmlNode *op = NULL;
xmlNode *operation = NULL;
retry:
for (operation = pcmk__xe_first_child(rsc->ops_xml); operation != NULL;
operation = pcmk__xe_next(operation)) {
if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
bool enabled = false;
name = crm_element_value(operation, "name");
interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
if (!include_disabled && pcmk__xe_get_bool_attr(operation, "enabled", &enabled) == pcmk_rc_ok &&
!enabled) {
continue;
}
interval_ms = crm_parse_interval_spec(interval_spec);
match_key = pcmk__op_key(rsc->id, name, interval_ms);
if (pcmk__str_eq(key, match_key, pcmk__str_casei)) {
op = operation;
}
free(match_key);
if (rsc->clone_name) {
match_key = pcmk__op_key(rsc->clone_name, name, interval_ms);
if (pcmk__str_eq(key, match_key, pcmk__str_casei)) {
op = operation;
}
free(match_key);
}
if (op != NULL) {
free(local_key);
return op;
}
}
}
free(local_key);
if (do_retry == FALSE) {
return NULL;
}
do_retry = FALSE;
if ((strstr(key, PCMK_ACTION_MIGRATE_TO) != NULL)
|| (strstr(key, PCMK_ACTION_MIGRATE_FROM) != NULL)) {
local_key = pcmk__op_key(rsc->id, "migrate", 0);
key = local_key;
goto retry;
} else if (strstr(key, "_notify_")) {
local_key = pcmk__op_key(rsc->id, PCMK_ACTION_NOTIFY, 0);
key = local_key;
goto retry;
}
return NULL;
}
xmlNode *
find_rsc_op_entry(const pe_resource_t *rsc, const char *key)
{
return find_rsc_op_entry_helper(rsc, key, FALSE);
}
/*!
* \internal
* \brief Create a new action object
*
* \param[in] key Action key
* \param[in] task Action name
* \param[in,out] rsc Resource that action is for (if any)
* \param[in] node Node that action is on (if any)
* \param[in] optional Whether action should be considered optional
* \param[in] for_graph Whether action should be recorded in transition graph
* \param[in,out] data_set Cluster working set
*
* \return Newly allocated action
* \note This function takes ownership of \p key. It is the caller's
* responsibility to free the return value with pe_free_action().
*/
static pe_action_t *
new_action(char *key, const char *task, pe_resource_t *rsc,
const pe_node_t *node, bool optional, bool for_graph,
pe_working_set_t *data_set)
{
pe_action_t *action = calloc(1, sizeof(pe_action_t));
CRM_ASSERT(action != NULL);
action->rsc = rsc;
action->task = strdup(task); CRM_ASSERT(action->task != NULL);
action->uuid = key;
action->extra = pcmk__strkey_table(free, free);
- action->meta = pcmk__strkey_table(free, free);
if (node) {
action->node = pe__copy_node(node);
}
if (pcmk__str_eq(task, PCMK_ACTION_LRM_DELETE, pcmk__str_casei)) {
// Resource history deletion for a node can be done on the DC
pe__set_action_flags(action, pe_action_dc);
}
pe__set_action_flags(action, pe_action_runnable);
if (optional) {
pe__set_action_flags(action, pe_action_optional);
} else {
pe__clear_action_flags(action, pe_action_optional);
}
- if (rsc != NULL) {
+ if (rsc == NULL) {
+ action->meta = pcmk__strkey_table(free, free);
+ } else {
guint interval_ms = 0;
action->op_entry = find_rsc_op_entry_helper(rsc, key, TRUE);
parse_op_key(key, NULL, NULL, &interval_ms);
unpack_operation(action, action->op_entry, rsc->container, interval_ms);
}
if (for_graph) {
pe_rsc_trace(rsc, "Created %s action %d (%s): %s for %s on %s",
(optional? "optional" : "required"),
data_set->action_id, key, task,
((rsc == NULL)? "no resource" : rsc->id),
pe__node_name(node));
action->id = data_set->action_id++;
data_set->actions = g_list_prepend(data_set->actions, action);
if (rsc == NULL) {
add_singleton(data_set, action);
} else {
rsc->actions = g_list_prepend(rsc->actions, action);
}
}
return action;
}
/*!
* \internal
* \brief Evaluate node attribute values for an action
*
* \param[in,out] action Action to unpack attributes for
* \param[in,out] data_set Cluster working set
*/
static void
unpack_action_node_attributes(pe_action_t *action, pe_working_set_t *data_set)
{
if (!pcmk_is_set(action->flags, pe_action_have_node_attrs)
&& (action->op_entry != NULL)) {
pe_rule_eval_data_t rule_data = {
.node_hash = action->node->details->attrs,
.role = pcmk_role_unknown,
.now = data_set->now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
pe__set_action_flags(action, pe_action_have_node_attrs);
pe__unpack_dataset_nvpairs(action->op_entry, XML_TAG_ATTR_SETS,
&rule_data, action->extra, NULL,
FALSE, data_set);
}
}
/*!
* \internal
* \brief Update an action's optional flag
*
* \param[in,out] action Action to update
* \param[in] optional Requested optional status
*/
static void
update_action_optional(pe_action_t *action, gboolean optional)
{
// Force a non-recurring action to be optional if its resource is unmanaged
if ((action->rsc != NULL) && (action->node != NULL)
&& !pcmk_is_set(action->flags, pe_action_pseudo)
&& !pcmk_is_set(action->rsc->flags, pcmk_rsc_managed)
&& (g_hash_table_lookup(action->meta,
XML_LRM_ATTR_INTERVAL_MS) == NULL)) {
pe_rsc_debug(action->rsc, "%s on %s is optional (%s is unmanaged)",
action->uuid, pe__node_name(action->node),
action->rsc->id);
pe__set_action_flags(action, pe_action_optional);
// We shouldn't clear runnable here because ... something
// Otherwise require the action if requested
} else if (!optional) {
pe__clear_action_flags(action, pe_action_optional);
}
}
static enum pe_quorum_policy
effective_quorum_policy(pe_resource_t *rsc, pe_working_set_t *data_set)
{
enum pe_quorum_policy policy = data_set->no_quorum_policy;
if (pcmk_is_set(data_set->flags, pcmk_sched_quorate)) {
policy = pcmk_no_quorum_ignore;
} else if (data_set->no_quorum_policy == pcmk_no_quorum_demote) {
switch (rsc->role) {
case pcmk_role_promoted:
case pcmk_role_unpromoted:
if (rsc->next_role > pcmk_role_unpromoted) {
pe__set_next_role(rsc, pcmk_role_unpromoted,
"no-quorum-policy=demote");
}
policy = pcmk_no_quorum_ignore;
break;
default:
policy = pcmk_no_quorum_stop;
break;
}
}
return policy;
}
/*!
* \internal
* \brief Update a resource action's runnable flag
*
* \param[in,out] action Action to update
* \param[in] for_graph Whether action should be recorded in transition graph
* \param[in,out] data_set Cluster working set
*
* \note This may also schedule fencing if a stop is unrunnable.
*/
static void
update_resource_action_runnable(pe_action_t *action, bool for_graph,
pe_working_set_t *data_set)
{
if (pcmk_is_set(action->flags, pe_action_pseudo)) {
return;
}
if (action->node == NULL) {
pe_rsc_trace(action->rsc, "%s is unrunnable (unallocated)",
action->uuid);
pe__clear_action_flags(action, pe_action_runnable);
} else if (!pcmk_is_set(action->flags, pe_action_dc)
&& !(action->node->details->online)
&& (!pe__is_guest_node(action->node)
|| action->node->details->remote_requires_reset)) {
pe__clear_action_flags(action, pe_action_runnable);
do_crm_log((for_graph? LOG_WARNING: LOG_TRACE),
"%s on %s is unrunnable (node is offline)",
action->uuid, pe__node_name(action->node));
if (pcmk_is_set(action->rsc->flags, pcmk_rsc_managed)
&& for_graph
&& pcmk__str_eq(action->task, PCMK_ACTION_STOP, pcmk__str_casei)
&& !(action->node->details->unclean)) {
pe_fence_node(data_set, action->node, "stop is unrunnable", false);
}
} else if (!pcmk_is_set(action->flags, pe_action_dc)
&& action->node->details->pending) {
pe__clear_action_flags(action, pe_action_runnable);
do_crm_log((for_graph? LOG_WARNING: LOG_TRACE),
"Action %s on %s is unrunnable (node is pending)",
action->uuid, pe__node_name(action->node));
} else if (action->needs == pcmk_requires_nothing) {
pe_action_set_reason(action, NULL, TRUE);
if (pe__is_guest_node(action->node)
&& !pe_can_fence(data_set, action->node)) {
/* An action that requires nothing usually does not require any
* fencing in order to be runnable. However, there is an exception:
* such an action cannot be completed if it is on a guest node whose
* host is unclean and cannot be fenced.
*/
pe_rsc_debug(action->rsc, "%s on %s is unrunnable "
"(node's host cannot be fenced)",
action->uuid, pe__node_name(action->node));
pe__clear_action_flags(action, pe_action_runnable);
} else {
pe_rsc_trace(action->rsc,
"%s on %s does not require fencing or quorum",
action->uuid, pe__node_name(action->node));
pe__set_action_flags(action, pe_action_runnable);
}
} else {
switch (effective_quorum_policy(action->rsc, data_set)) {
case pcmk_no_quorum_stop:
pe_rsc_debug(action->rsc, "%s on %s is unrunnable (no quorum)",
action->uuid, pe__node_name(action->node));
pe__clear_action_flags(action, pe_action_runnable);
pe_action_set_reason(action, "no quorum", true);
break;
case pcmk_no_quorum_freeze:
if (!action->rsc->fns->active(action->rsc, TRUE)
|| (action->rsc->next_role > action->rsc->role)) {
pe_rsc_debug(action->rsc,
"%s on %s is unrunnable (no quorum)",
action->uuid, pe__node_name(action->node));
pe__clear_action_flags(action, pe_action_runnable);
pe_action_set_reason(action, "quorum freeze", true);
}
break;
default:
//pe_action_set_reason(action, NULL, TRUE);
pe__set_action_flags(action, pe_action_runnable);
break;
}
}
}
/*!
* \internal
* \brief Update a resource object's flags for a new action on it
*
* \param[in,out] rsc Resource that action is for (if any)
* \param[in] action New action
*/
static void
update_resource_flags_for_action(pe_resource_t *rsc, const pe_action_t *action)
{
/* @COMPAT pcmk_rsc_starting and pcmk_rsc_stopping are deprecated and unused
* within Pacemaker, and will eventually be removed
*/
if (pcmk__str_eq(action->task, PCMK_ACTION_STOP, pcmk__str_casei)) {
pe__set_resource_flags(rsc, pcmk_rsc_stopping);
} else if (pcmk__str_eq(action->task, PCMK_ACTION_START, pcmk__str_casei)) {
if (pcmk_is_set(action->flags, pe_action_runnable)) {
pe__set_resource_flags(rsc, pcmk_rsc_starting);
} else {
pe__clear_resource_flags(rsc, pcmk_rsc_starting);
}
}
}
static bool
valid_stop_on_fail(const char *value)
{
return !pcmk__strcase_any_of(value, "standby", "demote", "stop", NULL);
}
/*!
* \internal
* \brief Validate (and possibly reset) resource action's on_fail meta-attribute
*
* \param[in] rsc Resource that action is for
* \param[in] action_name Action name
* \param[in] action_config Action configuration XML from CIB (if any)
* \param[in,out] meta Table of action meta-attributes
- *
- * \return (Possibly new) value of on-fail meta-attribute
*/
-static const char *
+static void
validate_on_fail(const pe_resource_t *rsc, const char *action_name,
const xmlNode *action_config, GHashTable *meta)
{
const char *name = NULL;
const char *role = NULL;
const char *interval_spec = NULL;
const char *value = g_hash_table_lookup(meta, XML_OP_ATTR_ON_FAIL);
char *key = NULL;
char *new_value = NULL;
// Stop actions can only use certain on-fail values
if (pcmk__str_eq(action_name, PCMK_ACTION_STOP, pcmk__str_none)
&& !valid_stop_on_fail(value)) {
pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for %s stop "
"action to default value because '%s' is not "
"allowed for stop", rsc->id, value);
g_hash_table_remove(meta, XML_OP_ATTR_ON_FAIL);
- return NULL;
+ return;
}
/* Demote actions default on-fail to the on-fail value for the first
* recurring monitor for the promoted role (if any).
*/
if (pcmk__str_eq(action_name, PCMK_ACTION_DEMOTE, pcmk__str_none)
&& (value == NULL)) {
/* @TODO This does not consider promote options set in a meta-attribute
* block (which may have rules that need to be evaluated) rather than
* XML properties.
*/
for (xmlNode *operation = first_named_child(rsc->ops_xml, XML_ATTR_OP);
operation != NULL; operation = crm_next_same_xml(operation)) {
bool enabled = false;
const char *promote_on_fail = NULL;
/* We only care about explicit on-fail (if promote uses default, so
* can demote)
*/
promote_on_fail = crm_element_value(operation, XML_OP_ATTR_ON_FAIL);
if (promote_on_fail == NULL) {
continue;
}
// We only care about recurring monitors for the promoted role
name = crm_element_value(operation, "name");
role = crm_element_value(operation, "role");
if (!pcmk__str_eq(name, PCMK_ACTION_MONITOR, pcmk__str_none)
|| !pcmk__strcase_any_of(role, PCMK__ROLE_PROMOTED,
PCMK__ROLE_PROMOTED_LEGACY, NULL)) {
continue;
}
interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
if (crm_parse_interval_spec(interval_spec) == 0) {
continue;
}
// We only care about enabled monitors
if ((pcmk__xe_get_bool_attr(operation, "enabled",
&enabled) == pcmk_rc_ok) && !enabled) {
continue;
}
// Demote actions can't default to on-fail="demote"
if (pcmk__str_eq(promote_on_fail, "demote", pcmk__str_casei)) {
continue;
}
// Use value from first applicable promote action found
key = strdup(XML_OP_ATTR_ON_FAIL);
new_value = strdup(promote_on_fail);
CRM_ASSERT((key != NULL) && (new_value != NULL));
g_hash_table_insert(meta, key, new_value);
- return g_hash_table_lookup(meta, XML_OP_ATTR_ON_FAIL);
}
- return NULL;
+ return;
}
if (pcmk__str_eq(action_name, PCMK_ACTION_LRM_DELETE, pcmk__str_none)
&& !pcmk__str_eq(value, "ignore", pcmk__str_casei)) {
key = strdup(XML_OP_ATTR_ON_FAIL);
new_value = strdup("ignore");
CRM_ASSERT((key != NULL) && (new_value != NULL));
g_hash_table_insert(meta, key, new_value);
- return g_hash_table_lookup(meta, XML_OP_ATTR_ON_FAIL);
+ return;
}
// on-fail="demote" is allowed only for certain actions
if (pcmk__str_eq(value, "demote", pcmk__str_casei)) {
name = crm_element_value(action_config, "name");
role = crm_element_value(action_config, "role");
interval_spec = crm_element_value(action_config,
XML_LRM_ATTR_INTERVAL);
if (!pcmk__str_eq(name, PCMK_ACTION_PROMOTE, pcmk__str_none)
&& (!pcmk__str_eq(name, PCMK_ACTION_MONITOR, pcmk__str_none)
|| !pcmk__strcase_any_of(role, PCMK__ROLE_PROMOTED,
PCMK__ROLE_PROMOTED_LEGACY, NULL)
|| (crm_parse_interval_spec(interval_spec) == 0))) {
pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for %s %s "
"action to default value because 'demote' is not "
"allowed for it", rsc->id, name);
g_hash_table_remove(meta, XML_OP_ATTR_ON_FAIL);
- return NULL;
+ return;
}
}
-
- return value;
}
static int
unpack_timeout(const char *value)
{
int timeout_ms = crm_get_msec(value);
if (timeout_ms < 0) {
timeout_ms = PCMK_DEFAULT_ACTION_TIMEOUT_MS;
}
return timeout_ms;
}
// true if value contains valid, non-NULL interval origin for recurring op
static bool
unpack_interval_origin(const char *value, const xmlNode *xml_obj,
guint interval_ms, const crm_time_t *now,
long long *start_delay)
{
long long result = 0;
guint interval_sec = interval_ms / 1000;
crm_time_t *origin = NULL;
// Ignore unspecified values and non-recurring operations
if ((value == NULL) || (interval_ms == 0) || (now == NULL)) {
return false;
}
// Parse interval origin from text
origin = crm_time_new(value);
if (origin == NULL) {
pcmk__config_err("Ignoring '" XML_OP_ATTR_ORIGIN "' for operation "
"'%s' because '%s' is not valid",
(ID(xml_obj)? ID(xml_obj) : "(missing ID)"), value);
return false;
}
// Get seconds since origin (negative if origin is in the future)
result = crm_time_get_seconds(now) - crm_time_get_seconds(origin);
crm_time_free(origin);
// Calculate seconds from closest interval to now
result = result % interval_sec;
// Calculate seconds remaining until next interval
result = ((result <= 0)? 0 : interval_sec) - result;
crm_info("Calculated a start delay of %llds for operation '%s'",
result,
(ID(xml_obj)? ID(xml_obj) : "(unspecified)"));
if (start_delay != NULL) {
*start_delay = result * 1000; // milliseconds
}
return true;
}
static int
unpack_start_delay(const char *value, GHashTable *meta)
{
int start_delay = 0;
if (value != NULL) {
start_delay = crm_get_msec(value);
if (start_delay < 0) {
start_delay = 0;
}
if (meta) {
g_hash_table_replace(meta, strdup(XML_OP_ATTR_START_DELAY),
pcmk__itoa(start_delay));
}
}
return start_delay;
}
static xmlNode *
find_min_interval_mon(pe_resource_t * rsc, gboolean include_disabled)
{
guint interval_ms = 0;
guint min_interval_ms = G_MAXUINT;
const char *name = NULL;
const char *interval_spec = NULL;
xmlNode *op = NULL;
xmlNode *operation = NULL;
for (operation = pcmk__xe_first_child(rsc->ops_xml);
operation != NULL;
operation = pcmk__xe_next(operation)) {
if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
bool enabled = false;
name = crm_element_value(operation, "name");
interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
if (!include_disabled && pcmk__xe_get_bool_attr(operation, "enabled", &enabled) == pcmk_rc_ok &&
!enabled) {
continue;
}
if (!pcmk__str_eq(name, PCMK_ACTION_MONITOR, pcmk__str_casei)) {
continue;
}
interval_ms = crm_parse_interval_spec(interval_spec);
if (interval_ms && (interval_ms < min_interval_ms)) {
min_interval_ms = interval_ms;
op = operation;
}
}
}
return op;
}
/*!
* \internal
- * \brief Unpack action configuration
+ * \brief Unpack action meta-attributes
+ *
+ * \param[in,out] rsc Resource that action is for
+ * \param[in] node Node that action is on
+ * \param[in] action_name Action name
+ * \param[in] interval_ms Action interval (in milliseconds)
+ * \param[in] action_config Action XML configuration from CIB (if any)
*
* Unpack a resource action's meta-attributes (normalizing the interval,
- * timeout, and start delay values as integer milliseconds), requirements, and
- * failure policy from its CIB XML configuration (including defaults).
+ * timeout, and start delay values as integer milliseconds) from its CIB XML
+ * configuration (including defaults).
*
- * \param[in,out] action Resource action to unpack into
- * \param[in] xml_obj Action configuration XML (NULL for defaults only)
- * \param[in] container Resource that contains affected resource, if any
- * \param[in] interval_ms How frequently to perform the operation
+ * \return Newly allocated hash table with normalized action meta-attributes
*/
-static void
-unpack_operation(pe_action_t *action, const xmlNode *xml_obj,
- const pe_resource_t *container, guint interval_ms)
+GHashTable *
+pcmk__unpack_action_meta(pe_resource_t *rsc, const pe_node_t *node,
+ const char *action_name, guint interval_ms,
+ const xmlNode *action_config)
{
- int timeout_ms = 0;
- const char *value = NULL;
- bool is_probe = false;
+ GHashTable *meta = NULL;
+ char *name = NULL;
+ char *value = NULL;
+ const char *timeout_spec = NULL;
+ const char *str = NULL;
pe_rsc_eval_data_t rsc_rule_data = {
- .standard = crm_element_value(action->rsc->xml, XML_AGENT_ATTR_CLASS),
- .provider = crm_element_value(action->rsc->xml, XML_AGENT_ATTR_PROVIDER),
- .agent = crm_element_value(action->rsc->xml, XML_EXPR_ATTR_TYPE)
+ .standard = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS),
+ .provider = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER),
+ .agent = crm_element_value(rsc->xml, XML_EXPR_ATTR_TYPE),
};
pe_op_eval_data_t op_rule_data = {
- .op_name = action->task,
- .interval = interval_ms
+ .op_name = action_name,
+ .interval = interval_ms,
};
pe_rule_eval_data_t rule_data = {
- .node_hash = NULL,
+ .node_hash = (node == NULL)? NULL : node->details->attrs,
.role = pcmk_role_unknown,
- .now = action->rsc->cluster->now,
+ .now = rsc->cluster->now,
.match_data = NULL,
.rsc_data = &rsc_rule_data,
- .op_data = &op_rule_data
+ .op_data = &op_rule_data,
};
- CRM_CHECK(action && action->rsc, return);
-
- is_probe = pcmk_is_probe(action->task, interval_ms);
+ meta = pcmk__strkey_table(free, free);
// Cluster-wide
- pe__unpack_dataset_nvpairs(action->rsc->cluster->op_defaults,
- XML_TAG_META_SETS, &rule_data, action->meta,
- NULL, FALSE, action->rsc->cluster);
-
- // Determine probe default timeout differently
- if (is_probe) {
- xmlNode *min_interval_mon = find_min_interval_mon(action->rsc, FALSE);
-
- if (min_interval_mon) {
- value = crm_element_value(min_interval_mon, XML_ATTR_TIMEOUT);
- if (value) {
- crm_trace("\t%s: Setting default timeout to minimum-interval "
- "monitor's timeout '%s'", action->uuid, value);
- g_hash_table_replace(action->meta, strdup(XML_ATTR_TIMEOUT),
- strdup(value));
+ pe__unpack_dataset_nvpairs(rsc->cluster->op_defaults, XML_TAG_META_SETS,
+ &rule_data, meta, NULL, FALSE, rsc->cluster);
+
+ // Derive default timeout for probes from recurring monitor timeouts
+ if (pcmk_is_probe(action_name, interval_ms)) {
+ xmlNode *min_interval_mon = find_min_interval_mon(rsc, FALSE);
+
+ if (min_interval_mon != NULL) {
+ /* @TODO This does not consider timeouts set in meta_attributes
+ * blocks (which may also have rules that need to be evaluated).
+ */
+ timeout_spec = crm_element_value(min_interval_mon,
+ XML_ATTR_TIMEOUT);
+ if (timeout_spec != NULL) {
+ pe_rsc_trace(rsc,
+ "Setting default timeout for %s probe to "
+ "most frequent monitor's timeout '%s'",
+ rsc->id, timeout_spec);
+ name = strdup(XML_ATTR_TIMEOUT);
+ value = strdup(timeout_spec);
+ CRM_ASSERT((name != NULL) && (value != NULL));
+ g_hash_table_insert(meta, name, value);
}
}
}
- if (xml_obj) {
- xmlAttrPtr xIter = NULL;
-
+ if (action_config != NULL) {
// take precedence over defaults
- pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_META_SETS, &rule_data,
- action->meta, NULL, TRUE,
- action->rsc->cluster);
+ pe__unpack_dataset_nvpairs(action_config, XML_TAG_META_SETS, &rule_data,
+ meta, NULL, TRUE, rsc->cluster);
/* Anything set as an XML property has highest precedence.
* This ensures we use the name and interval from the tag.
+ * (See below for the only exception, fence device start/probe timeout.)
*/
- for (xIter = xml_obj->properties; xIter; xIter = xIter->next) {
- const char *prop_name = (const char *)xIter->name;
- const char *prop_value = pcmk__xml_attr_value(xIter);
+ for (xmlAttrPtr attr = action_config->properties;
+ attr != NULL; attr = attr->next) {
+ name = strdup((const char *) attr->name);
+ value = strdup(pcmk__xml_attr_value(attr));
- g_hash_table_replace(action->meta, strdup(prop_name), strdup(prop_value));
+ CRM_ASSERT((name != NULL) && (value != NULL));
+ g_hash_table_insert(meta, name, value);
}
}
- g_hash_table_remove(action->meta, "id");
+ g_hash_table_remove(meta, XML_ATTR_ID);
// Normalize interval to milliseconds
if (interval_ms > 0) {
- g_hash_table_replace(action->meta, strdup(XML_LRM_ATTR_INTERVAL),
- crm_strdup_printf("%u", interval_ms));
+ name = strdup(XML_LRM_ATTR_INTERVAL);
+ CRM_ASSERT(name != NULL);
+ value = crm_strdup_printf("%u", interval_ms);
+ g_hash_table_insert(meta, name, value);
} else {
- g_hash_table_remove(action->meta, XML_LRM_ATTR_INTERVAL);
- }
-
- /*
- * Timeout order of precedence:
- * 1. pcmk_monitor_timeout (if rsc has pcmk_ra_cap_fence_params
- * and task is start or a probe; pcmk_monitor_timeout works
- * by default for a recurring monitor)
- * 2. explicit op timeout on the primitive
- * 3. default op timeout
- * a. if probe, then min-interval monitor's timeout
- * b. else, in XML_CIB_TAG_OPCONFIG
+ g_hash_table_remove(meta, XML_LRM_ATTR_INTERVAL);
+ }
+
+ /* Timeout order of precedence (highest to lowest):
+ * 1. pcmk_monitor_timeout resource parameter (only for starts and probes
+ * when rsc has pcmk_ra_cap_fence_params; this gets used for recurring
+ * monitors via the executor instead)
+ * 2. timeout configured in (with taking precedence over
+ * )
+ * 3. timeout configured in
* 4. PCMK_DEFAULT_ACTION_TIMEOUT_MS
- *
- * #1 overrides general rule of XML property having highest
- * precedence.
*/
+
+ // Check for pcmk_monitor_timeout
if (pcmk_is_set(pcmk_get_ra_caps(rsc_rule_data.standard),
pcmk_ra_cap_fence_params)
- && (pcmk__str_eq(action->task, PCMK_ACTION_START, pcmk__str_casei)
- || is_probe)) {
-
- GHashTable *params = pe_rsc_params(action->rsc, action->node,
- action->rsc->cluster);
+ && (pcmk__str_eq(action_name, PCMK_ACTION_START, pcmk__str_none)
+ || pcmk_is_probe(action_name, interval_ms))) {
+
+ GHashTable *params = pe_rsc_params(rsc, node, rsc->cluster);
+
+ timeout_spec = g_hash_table_lookup(params, "pcmk_monitor_timeout");
+ if (timeout_spec != NULL) {
+ pe_rsc_trace(rsc,
+ "Setting timeout for %s %s to "
+ "pcmk_monitor_timeout (%s)",
+ rsc->id, action_name, timeout_spec);
+ name = strdup(XML_ATTR_TIMEOUT);
+ value = strdup(timeout_spec);
+ CRM_ASSERT((name != NULL) && (value != NULL));
+ g_hash_table_insert(meta, name, value);
+ }
+ }
- value = g_hash_table_lookup(params, "pcmk_monitor_timeout");
+ // Normalize timeout to positive milliseconds
+ name = strdup(XML_ATTR_TIMEOUT);
+ CRM_ASSERT(name != NULL);
+ timeout_spec = g_hash_table_lookup(meta, XML_ATTR_TIMEOUT);
+ g_hash_table_insert(meta, name, pcmk__itoa(unpack_timeout(timeout_spec)));
+
+ // Ensure on-fail has a valid value
+ validate_on_fail(rsc, action_name, action_config, meta);
+
+ // Normalize start-delay
+ str = g_hash_table_lookup(meta, XML_OP_ATTR_START_DELAY);
+ if (str != NULL) {
+ unpack_start_delay(str, meta);
+ } else {
+ long long start_delay = 0;
- if (value) {
- crm_trace("\t%s: Setting timeout to pcmk_monitor_timeout '%s', "
- "overriding default", action->uuid, value);
- g_hash_table_replace(action->meta, strdup(XML_ATTR_TIMEOUT),
- strdup(value));
+ str = g_hash_table_lookup(meta, XML_OP_ATTR_ORIGIN);
+ if (unpack_interval_origin(str, action_config, interval_ms,
+ rsc->cluster->now, &start_delay)) {
+ name = strdup(XML_OP_ATTR_START_DELAY);
+ CRM_ASSERT(name != NULL);
+ g_hash_table_insert(meta, name,
+ crm_strdup_printf("%lld", start_delay));
}
}
+ return meta;
+}
- // Normalize timeout to positive milliseconds
- value = g_hash_table_lookup(action->meta, XML_ATTR_TIMEOUT);
- timeout_ms = unpack_timeout(value);
- g_hash_table_replace(action->meta, strdup(XML_ATTR_TIMEOUT),
- pcmk__itoa(timeout_ms));
+/*!
+ * \internal
+ * \brief Unpack action configuration
+ *
+ * Unpack a resource action's meta-attributes (normalizing the interval,
+ * timeout, and start delay values as integer milliseconds), requirements, and
+ * failure policy from its CIB XML configuration (including defaults).
+ *
+ * \param[in,out] action Resource action to unpack into
+ * \param[in] xml_obj Action configuration XML (NULL for defaults only)
+ * \param[in] container Resource that contains affected resource, if any
+ * \param[in] interval_ms How frequently to perform the operation
+ */
+static void
+unpack_operation(pe_action_t *action, const xmlNode *xml_obj,
+ const pe_resource_t *container, guint interval_ms)
+{
+ const char *value = NULL;
+
+ action->meta = pcmk__unpack_action_meta(action->rsc, action->node,
+ action->task, interval_ms, xml_obj);
if (!pcmk__strcase_any_of(action->task, PCMK_ACTION_START,
PCMK_ACTION_PROMOTE, NULL)) {
action->needs = pcmk_requires_nothing;
value = "nothing (not start or promote)";
} else if (pcmk_is_set(action->rsc->flags, pcmk_rsc_needs_fencing)) {
action->needs = pcmk_requires_fencing;
value = "fencing";
} else if (pcmk_is_set(action->rsc->flags, pcmk_rsc_needs_quorum)) {
action->needs = pcmk_requires_quorum;
value = "quorum";
} else {
action->needs = pcmk_requires_nothing;
value = "nothing";
}
pe_rsc_trace(action->rsc, "%s requires %s", action->uuid, value);
- value = validate_on_fail(action->rsc, action->task, xml_obj, action->meta);
-
+ value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ON_FAIL);
if (value == NULL) {
} else if (pcmk__str_eq(value, "block", pcmk__str_casei)) {
action->on_fail = pcmk_on_fail_block;
} else if (pcmk__str_eq(value, "fence", pcmk__str_casei)) {
action->on_fail = pcmk_on_fail_fence_node;
value = "node fencing";
if (!pcmk_is_set(action->rsc->cluster->flags,
pcmk_sched_fencing_enabled)) {
pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for "
"operation '%s' to 'stop' because 'fence' is not "
"valid when fencing is disabled", action->uuid);
action->on_fail = pcmk_on_fail_stop;
action->fail_role = pcmk_role_stopped;
value = "stop resource";
}
} else if (pcmk__str_eq(value, "standby", pcmk__str_casei)) {
action->on_fail = pcmk_on_fail_standby_node;
value = "node standby";
} else if (pcmk__strcase_any_of(value, "ignore", PCMK__VALUE_NOTHING,
NULL)) {
action->on_fail = pcmk_on_fail_ignore;
value = "ignore";
} else if (pcmk__str_eq(value, "migrate", pcmk__str_casei)) {
action->on_fail = pcmk_on_fail_ban;
value = "force migration";
} else if (pcmk__str_eq(value, "stop", pcmk__str_casei)) {
action->on_fail = pcmk_on_fail_stop;
action->fail_role = pcmk_role_stopped;
value = "stop resource";
} else if (pcmk__str_eq(value, "restart", pcmk__str_casei)) {
action->on_fail = pcmk_on_fail_restart;
value = "restart (and possibly migrate)";
} else if (pcmk__str_eq(value, "restart-container", pcmk__str_casei)) {
if (container) {
action->on_fail = pcmk_on_fail_restart_container;
value = "restart container (and possibly migrate)";
} else {
value = NULL;
}
} else if (pcmk__str_eq(value, "demote", pcmk__str_casei)) {
action->on_fail = pcmk_on_fail_demote;
value = "demote instance";
} else {
pe_err("Resource %s: Unknown failure type (%s)", action->rsc->id, value);
value = NULL;
}
/* defaults */
if (value == NULL && container) {
action->on_fail = pcmk_on_fail_restart_container;
value = "restart container (and possibly migrate) (default)";
/* For remote nodes, ensure that any failure that results in dropping an
* active connection to the node results in fencing of the node.
*
* There are only two action failures that don't result in fencing.
* 1. probes - probe failures are expected.
* 2. start - a start failure indicates that an active connection does not already
* exist. The user can set op on-fail=fence if they really want to fence start
* failures. */
} else if (((value == NULL)
|| !pcmk_is_set(action->rsc->flags, pcmk_rsc_managed))
&& pe__resource_is_remote_conn(action->rsc)
&& !(pcmk__str_eq(action->task, PCMK_ACTION_MONITOR,
pcmk__str_casei)
&& (interval_ms == 0))
&& !pcmk__str_eq(action->task, PCMK_ACTION_START, pcmk__str_casei)) {
if (!pcmk_is_set(action->rsc->flags, pcmk_rsc_managed)) {
action->on_fail = pcmk_on_fail_stop;
action->fail_role = pcmk_role_stopped;
value = "stop unmanaged remote node (enforcing default)";
} else {
if (pcmk_is_set(action->rsc->cluster->flags,
pcmk_sched_fencing_enabled)) {
value = "fence remote node (default)";
} else {
value = "recover remote node connection (default)";
}
if (action->rsc->remote_reconnect_ms) {
action->fail_role = pcmk_role_stopped;
}
action->on_fail = pcmk_on_fail_reset_remote;
}
} else if ((value == NULL)
&& pcmk__str_eq(action->task, PCMK_ACTION_STOP,
pcmk__str_casei)) {
if (pcmk_is_set(action->rsc->cluster->flags,
pcmk_sched_fencing_enabled)) {
action->on_fail = pcmk_on_fail_fence_node;
value = "resource fence (default)";
} else {
action->on_fail = pcmk_on_fail_block;
value = "resource block (default)";
}
} else if (value == NULL) {
action->on_fail = pcmk_on_fail_restart;
value = "restart (and possibly migrate) (default)";
}
pe_rsc_trace(action->rsc, "%s failure handling: %s",
action->uuid, value);
value = NULL;
if (xml_obj != NULL) {
value = g_hash_table_lookup(action->meta, "role_after_failure");
if (value) {
pe_warn_once(pe_wo_role_after,
"Support for role_after_failure is deprecated and will be removed in a future release");
}
}
if (value != NULL && action->fail_role == pcmk_role_unknown) {
action->fail_role = text2role(value);
}
/* defaults */
if (action->fail_role == pcmk_role_unknown) {
if (pcmk__str_eq(action->task, PCMK_ACTION_PROMOTE, pcmk__str_casei)) {
action->fail_role = pcmk_role_unpromoted;
} else {
action->fail_role = pcmk_role_started;
}
}
pe_rsc_trace(action->rsc, "%s failure results in: %s",
action->uuid, role2text(action->fail_role));
-
- value = g_hash_table_lookup(action->meta, XML_OP_ATTR_START_DELAY);
- if (value) {
- unpack_start_delay(value, action->meta);
- } else {
- long long start_delay = 0;
-
- value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ORIGIN);
- if (unpack_interval_origin(value, xml_obj, interval_ms,
- action->rsc->cluster->now, &start_delay)) {
- g_hash_table_replace(action->meta, strdup(XML_OP_ATTR_START_DELAY),
- crm_strdup_printf("%lld", start_delay));
- }
- }
}
/*!
* \brief Create or update an action object
*
* \param[in,out] rsc Resource that action is for (if any)
* \param[in,out] key Action key (must be non-NULL)
* \param[in] task Action name (must be non-NULL)
* \param[in] on_node Node that action is on (if any)
* \param[in] optional Whether action should be considered optional
* \param[in] save_action Whether action should be recorded in transition graph
* \param[in,out] data_set Cluster working set
*
* \return Action object corresponding to arguments
* \note This function takes ownership of (and might free) \p key. If
* \p save_action is true, \p data_set will own the returned action,
* otherwise it is the caller's responsibility to free the return value
* with pe_free_action().
*/
pe_action_t *
custom_action(pe_resource_t *rsc, char *key, const char *task,
const pe_node_t *on_node, gboolean optional, gboolean save_action,
pe_working_set_t *data_set)
{
pe_action_t *action = NULL;
CRM_ASSERT((key != NULL) && (task != NULL) && (data_set != NULL));
if (save_action) {
action = find_existing_action(key, rsc, on_node, data_set);
}
if (action == NULL) {
action = new_action(key, task, rsc, on_node, optional, save_action,
data_set);
} else {
free(key);
}
update_action_optional(action, optional);
if (rsc != NULL) {
if (action->node != NULL) {
unpack_action_node_attributes(action, data_set);
}
update_resource_action_runnable(action, save_action, data_set);
if (save_action) {
update_resource_flags_for_action(rsc, action);
}
}
return action;
}
pe_action_t *
get_pseudo_op(const char *name, pe_working_set_t * data_set)
{
pe_action_t *op = lookup_singleton(data_set, name);
if (op == NULL) {
op = custom_action(NULL, strdup(name), name, NULL, TRUE, TRUE, data_set);
pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable);
}
return op;
}
static GList *
find_unfencing_devices(GList *candidates, GList *matches)
{
for (GList *gIter = candidates; gIter != NULL; gIter = gIter->next) {
pe_resource_t *candidate = gIter->data;
if (candidate->children != NULL) {
matches = find_unfencing_devices(candidate->children, matches);
} else if (!pcmk_is_set(candidate->flags, pcmk_rsc_fence_device)) {
continue;
} else if (pcmk_is_set(candidate->flags, pcmk_rsc_needs_unfencing)) {
matches = g_list_prepend(matches, candidate);
} else if (pcmk__str_eq(g_hash_table_lookup(candidate->meta,
PCMK_STONITH_PROVIDES),
PCMK__VALUE_UNFENCING,
pcmk__str_casei)) {
matches = g_list_prepend(matches, candidate);
}
}
return matches;
}
static int
node_priority_fencing_delay(const pe_node_t *node,
const pe_working_set_t *data_set)
{
int member_count = 0;
int online_count = 0;
int top_priority = 0;
int lowest_priority = 0;
GList *gIter = NULL;
// `priority-fencing-delay` is disabled
if (data_set->priority_fencing_delay <= 0) {
return 0;
}
/* No need to request a delay if the fencing target is not a normal cluster
* member, for example if it's a remote node or a guest node. */
if (node->details->type != pcmk_node_variant_cluster) {
return 0;
}
// No need to request a delay if the fencing target is in our partition
if (node->details->online) {
return 0;
}
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
pe_node_t *n = gIter->data;
if (n->details->type != pcmk_node_variant_cluster) {
continue;
}
member_count ++;
if (n->details->online) {
online_count++;
}
if (member_count == 1
|| n->details->priority > top_priority) {
top_priority = n->details->priority;
}
if (member_count == 1
|| n->details->priority < lowest_priority) {
lowest_priority = n->details->priority;
}
}
// No need to delay if we have more than half of the cluster members
if (online_count > member_count / 2) {
return 0;
}
/* All the nodes have equal priority.
* Any configured corresponding `pcmk_delay_base/max` will be applied. */
if (lowest_priority == top_priority) {
return 0;
}
if (node->details->priority < top_priority) {
return 0;
}
return data_set->priority_fencing_delay;
}
pe_action_t *
pe_fence_op(pe_node_t *node, const char *op, bool optional,
const char *reason, bool priority_delay, pe_working_set_t *data_set)
{
char *op_key = NULL;
pe_action_t *stonith_op = NULL;
if(op == NULL) {
op = data_set->stonith_action;
}
op_key = crm_strdup_printf("%s-%s-%s",
PCMK_ACTION_STONITH, node->details->uname, op);
stonith_op = lookup_singleton(data_set, op_key);
if(stonith_op == NULL) {
stonith_op = custom_action(NULL, op_key, PCMK_ACTION_STONITH, node,
TRUE, TRUE, data_set);
add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET, node->details->uname);
add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET_UUID, node->details->id);
add_hash_param(stonith_op->meta, "stonith_action", op);
if (pcmk_is_set(data_set->flags, pcmk_sched_enable_unfencing)) {
/* Extra work to detect device changes
*/
GString *digests_all = g_string_sized_new(1024);
GString *digests_secure = g_string_sized_new(1024);
GList *matches = find_unfencing_devices(data_set->resources, NULL);
char *key = NULL;
char *value = NULL;
for (GList *gIter = matches; gIter != NULL; gIter = gIter->next) {
pe_resource_t *match = gIter->data;
const char *agent = g_hash_table_lookup(match->meta,
XML_ATTR_TYPE);
op_digest_cache_t *data = NULL;
data = pe__compare_fencing_digest(match, agent, node, data_set);
if(data->rc == RSC_DIGEST_ALL) {
optional = FALSE;
crm_notice("Unfencing node %s because the definition of "
"%s changed", pe__node_name(node), match->id);
if (!pcmk__is_daemon && data_set->priv != NULL) {
pcmk__output_t *out = data_set->priv;
out->info(out,
"notice: Unfencing node %s because the "
"definition of %s changed",
pe__node_name(node), match->id);
}
}
pcmk__g_strcat(digests_all,
match->id, ":", agent, ":",
data->digest_all_calc, ",", NULL);
pcmk__g_strcat(digests_secure,
match->id, ":", agent, ":",
data->digest_secure_calc, ",", NULL);
}
key = strdup(XML_OP_ATTR_DIGESTS_ALL);
value = strdup((const char *) digests_all->str);
CRM_ASSERT((key != NULL) && (value != NULL));
g_hash_table_insert(stonith_op->meta, key, value);
g_string_free(digests_all, TRUE);
key = strdup(XML_OP_ATTR_DIGESTS_SECURE);
value = strdup((const char *) digests_secure->str);
CRM_ASSERT((key != NULL) && (value != NULL));
g_hash_table_insert(stonith_op->meta, key, value);
g_string_free(digests_secure, TRUE);
}
} else {
free(op_key);
}
if (data_set->priority_fencing_delay > 0
/* It's a suitable case where `priority-fencing-delay` applies.
* At least add `priority-fencing-delay` field as an indicator. */
&& (priority_delay
/* The priority delay needs to be recalculated if this function has
* been called by schedule_fencing_and_shutdowns() after node
* priority has already been calculated by native_add_running().
*/
|| g_hash_table_lookup(stonith_op->meta,
XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY) != NULL)) {
/* Add `priority-fencing-delay` to the fencing op even if it's 0 for
* the targeting node. So that it takes precedence over any possible
* `pcmk_delay_base/max`.
*/
char *delay_s = pcmk__itoa(node_priority_fencing_delay(node, data_set));
g_hash_table_insert(stonith_op->meta,
strdup(XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY),
delay_s);
}
if(optional == FALSE && pe_can_fence(data_set, node)) {
pe__clear_action_flags(stonith_op, pe_action_optional);
pe_action_set_reason(stonith_op, reason, false);
} else if(reason && stonith_op->reason == NULL) {
stonith_op->reason = strdup(reason);
}
return stonith_op;
}
void
pe_free_action(pe_action_t * action)
{
if (action == NULL) {
return;
}
g_list_free_full(action->actions_before, free); /* pe_action_wrapper_t* */
g_list_free_full(action->actions_after, free); /* pe_action_wrapper_t* */
if (action->extra) {
g_hash_table_destroy(action->extra);
}
if (action->meta) {
g_hash_table_destroy(action->meta);
}
free(action->cancel_task);
free(action->reason);
free(action->task);
free(action->uuid);
free(action->node);
free(action);
}
int
pe_get_configured_timeout(pe_resource_t *rsc, const char *action, pe_working_set_t *data_set)
{
xmlNode *child = NULL;
GHashTable *action_meta = NULL;
const char *timeout_spec = NULL;
int timeout_ms = 0;
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
.role = pcmk_role_unknown,
.now = data_set->now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
for (child = first_named_child(rsc->ops_xml, XML_ATTR_OP);
child != NULL; child = crm_next_same_xml(child)) {
if (pcmk__str_eq(action, crm_element_value(child, XML_NVPAIR_ATTR_NAME),
pcmk__str_casei)) {
timeout_spec = crm_element_value(child, XML_ATTR_TIMEOUT);
break;
}
}
if (timeout_spec == NULL && data_set->op_defaults) {
action_meta = pcmk__strkey_table(free, free);
pe__unpack_dataset_nvpairs(data_set->op_defaults, XML_TAG_META_SETS,
&rule_data, action_meta, NULL, FALSE, data_set);
timeout_spec = g_hash_table_lookup(action_meta, XML_ATTR_TIMEOUT);
}
// @TODO check meta-attributes
// @TODO maybe use min-interval monitor timeout as default for monitors
timeout_ms = crm_get_msec(timeout_spec);
if (timeout_ms < 0) {
timeout_ms = PCMK_DEFAULT_ACTION_TIMEOUT_MS;
}
if (action_meta != NULL) {
g_hash_table_destroy(action_meta);
}
return timeout_ms;
}
enum action_tasks
get_complex_task(const pe_resource_t *rsc, const char *name)
{
enum action_tasks task = text2task(name);
if ((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_primitive)) {
switch (task) {
case pcmk_action_stopped:
case pcmk_action_started:
case pcmk_action_demoted:
case pcmk_action_promoted:
crm_trace("Folding %s back into its atomic counterpart for %s",
name, rsc->id);
--task;
break;
default:
break;
}
}
return task;
}
/*!
* \internal
* \brief Find first matching action in a list
*
* \param[in] input List of actions to search
* \param[in] uuid If not NULL, action must have this UUID
* \param[in] task If not NULL, action must have this action name
* \param[in] on_node If not NULL, action must be on this node
*
* \return First action in list that matches criteria, or NULL if none
*/
pe_action_t *
find_first_action(const GList *input, const char *uuid, const char *task,
const pe_node_t *on_node)
{
CRM_CHECK(uuid || task, return NULL);
for (const GList *gIter = input; gIter != NULL; gIter = gIter->next) {
pe_action_t *action = (pe_action_t *) gIter->data;
if (uuid != NULL && !pcmk__str_eq(uuid, action->uuid, pcmk__str_casei)) {
continue;
} else if (task != NULL && !pcmk__str_eq(task, action->task, pcmk__str_casei)) {
continue;
} else if (on_node == NULL) {
return action;
} else if (action->node == NULL) {
continue;
} else if (on_node->details == action->node->details) {
return action;
}
}
return NULL;
}
GList *
find_actions(GList *input, const char *key, const pe_node_t *on_node)
{
GList *gIter = input;
GList *result = NULL;
CRM_CHECK(key != NULL, return NULL);
for (; gIter != NULL; gIter = gIter->next) {
pe_action_t *action = (pe_action_t *) gIter->data;
if (!pcmk__str_eq(key, action->uuid, pcmk__str_casei)) {
continue;
} else if (on_node == NULL) {
crm_trace("Action %s matches (ignoring node)", key);
result = g_list_prepend(result, action);
} else if (action->node == NULL) {
crm_trace("Action %s matches (unallocated, assigning to %s)",
key, pe__node_name(on_node));
action->node = pe__copy_node(on_node);
result = g_list_prepend(result, action);
} else if (on_node->details == action->node->details) {
crm_trace("Action %s on %s matches", key, pe__node_name(on_node));
result = g_list_prepend(result, action);
}
}
return result;
}
GList *
find_actions_exact(GList *input, const char *key, const pe_node_t *on_node)
{
GList *result = NULL;
CRM_CHECK(key != NULL, return NULL);
if (on_node == NULL) {
return NULL;
}
for (GList *gIter = input; gIter != NULL; gIter = gIter->next) {
pe_action_t *action = (pe_action_t *) gIter->data;
if ((action->node != NULL)
&& pcmk__str_eq(key, action->uuid, pcmk__str_casei)
&& pcmk__str_eq(on_node->details->id, action->node->details->id,
pcmk__str_casei)) {
crm_trace("Action %s on %s matches", key, pe__node_name(on_node));
result = g_list_prepend(result, action);
}
}
return result;
}
/*!
* \brief Find all actions of given type for a resource
*
* \param[in] rsc Resource to search
* \param[in] node Find only actions scheduled on this node
* \param[in] task Action name to search for
* \param[in] require_node If TRUE, NULL node or action node will not match
*
* \return List of actions found (or NULL if none)
* \note If node is not NULL and require_node is FALSE, matching actions
* without a node will be assigned to node.
*/
GList *
pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node,
const char *task, bool require_node)
{
GList *result = NULL;
char *key = pcmk__op_key(rsc->id, task, 0);
if (require_node) {
result = find_actions_exact(rsc->actions, key, node);
} else {
result = find_actions(rsc->actions, key, node);
}
free(key);
return result;
}
/*!
* \internal
* \brief Create an action reason string based on the action itself
*
* \param[in] action Action to create reason string for
* \param[in] flag Action flag that was cleared
*
* \return Newly allocated string suitable for use as action reason
* \note It is the caller's responsibility to free() the result.
*/
char *
pe__action2reason(const pe_action_t *action, enum pe_action_flags flag)
{
const char *change = NULL;
switch (flag) {
case pe_action_runnable:
change = "unrunnable";
break;
case pe_action_migrate_runnable:
change = "unmigrateable";
break;
case pe_action_optional:
change = "required";
break;
default:
// Bug: caller passed unsupported flag
CRM_CHECK(change != NULL, change = "");
break;
}
return crm_strdup_printf("%s%s%s %s", change,
(action->rsc == NULL)? "" : " ",
(action->rsc == NULL)? "" : action->rsc->id,
action->task);
}
void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite)
{
if (action->reason != NULL && overwrite) {
pe_rsc_trace(action->rsc, "Changing %s reason from '%s' to '%s'",
action->uuid, action->reason, pcmk__s(reason, "(none)"));
} else if (action->reason == NULL) {
pe_rsc_trace(action->rsc, "Set %s reason to '%s'",
action->uuid, pcmk__s(reason, "(none)"));
} else {
// crm_assert(action->reason != NULL && !overwrite);
return;
}
pcmk__str_update(&action->reason, reason);
}
/*!
* \internal
* \brief Create an action to clear a resource's history from CIB
*
* \param[in,out] rsc Resource to clear
* \param[in] node Node to clear history on
* \param[in,out] data_set Cluster working set
*
* \return New action to clear resource history
*/
pe_action_t *
pe__clear_resource_history(pe_resource_t *rsc, const pe_node_t *node,
pe_working_set_t *data_set)
{
char *key = NULL;
CRM_ASSERT(rsc && node);
key = pcmk__op_key(rsc->id, PCMK_ACTION_LRM_DELETE, 0);
return custom_action(rsc, key, PCMK_ACTION_LRM_DELETE, node, FALSE, TRUE,
data_set);
}
#define sort_return(an_int, why) do { \
free(a_uuid); \
free(b_uuid); \
crm_trace("%s (%d) %c %s (%d) : %s", \
a_xml_id, a_call_id, an_int>0?'>':an_int<0?'<':'=', \
b_xml_id, b_call_id, why); \
return an_int; \
} while(0)
int
pe__is_newer_op(const xmlNode *xml_a, const xmlNode *xml_b,
bool same_node_default)
{
int a_call_id = -1;
int b_call_id = -1;
char *a_uuid = NULL;
char *b_uuid = NULL;
const char *a_xml_id = crm_element_value(xml_a, XML_ATTR_ID);
const char *b_xml_id = crm_element_value(xml_b, XML_ATTR_ID);
const char *a_node = crm_element_value(xml_a, XML_LRM_ATTR_TARGET);
const char *b_node = crm_element_value(xml_b, XML_LRM_ATTR_TARGET);
bool same_node = true;
/* @COMPAT The on_node attribute was added to last_failure as of 1.1.13 (via
* 8b3ca1c) and the other entries as of 1.1.12 (via 0b07b5c).
*
* In case that any of the lrm_rsc_op entries doesn't have on_node
* attribute, we need to explicitly tell whether the two operations are on
* the same node.
*/
if (a_node == NULL || b_node == NULL) {
same_node = same_node_default;
} else {
same_node = pcmk__str_eq(a_node, b_node, pcmk__str_casei);
}
if (same_node && pcmk__str_eq(a_xml_id, b_xml_id, pcmk__str_none)) {
/* We have duplicate lrm_rsc_op entries in the status
* section which is unlikely to be a good thing
* - we can handle it easily enough, but we need to get
* to the bottom of why it's happening.
*/
pe_err("Duplicate lrm_rsc_op entries named %s", a_xml_id);
sort_return(0, "duplicate");
}
crm_element_value_int(xml_a, XML_LRM_ATTR_CALLID, &a_call_id);
crm_element_value_int(xml_b, XML_LRM_ATTR_CALLID, &b_call_id);
if (a_call_id == -1 && b_call_id == -1) {
/* both are pending ops so it doesn't matter since
* stops are never pending
*/
sort_return(0, "pending");
} else if (same_node && a_call_id >= 0 && a_call_id < b_call_id) {
sort_return(-1, "call id");
} else if (same_node && b_call_id >= 0 && a_call_id > b_call_id) {
sort_return(1, "call id");
} else if (a_call_id >= 0 && b_call_id >= 0
&& (!same_node || a_call_id == b_call_id)) {
/*
* The op and last_failed_op are the same
* Order on last-rc-change
*/
time_t last_a = -1;
time_t last_b = -1;
crm_element_value_epoch(xml_a, XML_RSC_OP_LAST_CHANGE, &last_a);
crm_element_value_epoch(xml_b, XML_RSC_OP_LAST_CHANGE, &last_b);
crm_trace("rc-change: %lld vs %lld",
(long long) last_a, (long long) last_b);
if (last_a >= 0 && last_a < last_b) {
sort_return(-1, "rc-change");
} else if (last_b >= 0 && last_a > last_b) {
sort_return(1, "rc-change");
}
sort_return(0, "rc-change");
} else {
/* One of the inputs is a pending operation
* Attempt to use XML_ATTR_TRANSITION_MAGIC to determine its age relative to the other
*/
int a_id = -1;
int b_id = -1;
const char *a_magic = crm_element_value(xml_a, XML_ATTR_TRANSITION_MAGIC);
const char *b_magic = crm_element_value(xml_b, XML_ATTR_TRANSITION_MAGIC);
CRM_CHECK(a_magic != NULL && b_magic != NULL, sort_return(0, "No magic"));
if (!decode_transition_magic(a_magic, &a_uuid, &a_id, NULL, NULL, NULL,
NULL)) {
sort_return(0, "bad magic a");
}
if (!decode_transition_magic(b_magic, &b_uuid, &b_id, NULL, NULL, NULL,
NULL)) {
sort_return(0, "bad magic b");
}
/* try to determine the relative age of the operation...
* some pending operations (e.g. a start) may have been superseded
* by a subsequent stop
*
* [a|b]_id == -1 means it's a shutdown operation and _always_ comes last
*/
if (!pcmk__str_eq(a_uuid, b_uuid, pcmk__str_casei) || a_id == b_id) {
/*
* some of the logic in here may be redundant...
*
* if the UUID from the TE doesn't match then one better
* be a pending operation.
* pending operations don't survive between elections and joins
* because we query the LRM directly
*/
if (b_call_id == -1) {
sort_return(-1, "transition + call");
} else if (a_call_id == -1) {
sort_return(1, "transition + call");
}
} else if ((a_id >= 0 && a_id < b_id) || b_id == -1) {
sort_return(-1, "transition");
} else if ((b_id >= 0 && a_id > b_id) || a_id == -1) {
sort_return(1, "transition");
}
}
/* we should never end up here */
CRM_CHECK(FALSE, sort_return(0, "default"));
}
gint
sort_op_by_callid(gconstpointer a, gconstpointer b)
{
const xmlNode *xml_a = a;
const xmlNode *xml_b = b;
return pe__is_newer_op(xml_a, xml_b, true);
}
/*!
* \internal
* \brief Create a new pseudo-action for a resource
*
* \param[in,out] rsc Resource to create action for
* \param[in] task Action name
* \param[in] optional Whether action should be considered optional
* \param[in] runnable Whethe action should be considered runnable
*
* \return New action object corresponding to arguments
*/
pe_action_t *
pe__new_rsc_pseudo_action(pe_resource_t *rsc, const char *task, bool optional,
bool runnable)
{
pe_action_t *action = NULL;
CRM_ASSERT((rsc != NULL) && (task != NULL));
action = custom_action(rsc, pcmk__op_key(rsc->id, task, 0), task, NULL,
optional, TRUE, rsc->cluster);
pe__set_action_flags(action, pe_action_pseudo);
if (runnable) {
pe__set_action_flags(action, pe_action_runnable);
}
return action;
}
/*!
* \internal
* \brief Add the expected result to an action
*
* \param[in,out] action Action to add expected result to
* \param[in] expected_result Expected result to add
*
* \note This is more efficient than calling add_hash_param().
*/
void
pe__add_action_expected_result(pe_action_t *action, int expected_result)
{
char *name = NULL;
CRM_ASSERT((action != NULL) && (action->meta != NULL));
name = strdup(XML_ATTR_TE_TARGET_RC);
CRM_ASSERT (name != NULL);
g_hash_table_insert(action->meta, name, pcmk__itoa(expected_result));
}
diff --git a/lib/pengine/rules.c b/lib/pengine/rules.c
index dd6ec4254a..50f9f64b4a 100644
--- a/lib/pengine/rules.c
+++ b/lib/pengine/rules.c
@@ -1,1313 +1,1319 @@
/*
* Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
CRM_TRACE_INIT_DATA(pe_rules);
/*!
* \brief Evaluate any rules contained by given XML element
*
* \param[in,out] xml XML element to check for rules
* \param[in] node_hash Node attributes to use to evaluate expressions
* \param[in] now Time to use when evaluating expressions
* \param[out] next_change If not NULL, set to when evaluation will change
*
* \return TRUE if no rules, or any of rules present is in effect, else FALSE
*/
gboolean
pe_evaluate_rules(xmlNode *ruleset, GHashTable *node_hash, crm_time_t *now,
crm_time_t *next_change)
{
pe_rule_eval_data_t rule_data = {
.node_hash = node_hash,
.role = pcmk_role_unknown,
.now = now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
return pe_eval_rules(ruleset, &rule_data, next_change);
}
gboolean
pe_test_rule(xmlNode *rule, GHashTable *node_hash, enum rsc_role_e role,
crm_time_t *now, crm_time_t *next_change,
pe_match_data_t *match_data)
{
pe_rule_eval_data_t rule_data = {
.node_hash = node_hash,
.role = role,
.now = now,
.match_data = match_data,
.rsc_data = NULL,
.op_data = NULL
};
return pe_eval_expr(rule, &rule_data, next_change);
}
/*!
* \brief Evaluate one rule subelement (pass/fail)
*
* A rule element may contain another rule, a node attribute expression, or a
* date expression. Given any one of those, evaluate it and return whether it
* passed.
*
* \param[in,out] expr Rule subelement XML
* \param[in] node_hash Node attributes to use when evaluating expression
* \param[in] role Resource role to use when evaluating expression
* \param[in] now Time to use when evaluating expression
* \param[out] next_change If not NULL, set to when evaluation will change
* \param[in] match_data If not NULL, resource back-references and params
*
* \return TRUE if expression is in effect under given conditions, else FALSE
*/
gboolean
pe_test_expression(xmlNode *expr, GHashTable *node_hash, enum rsc_role_e role,
crm_time_t *now, crm_time_t *next_change,
pe_match_data_t *match_data)
{
pe_rule_eval_data_t rule_data = {
.node_hash = node_hash,
.role = role,
.now = now,
.match_data = match_data,
.rsc_data = NULL,
.op_data = NULL
};
return pe_eval_subexpr(expr, &rule_data, next_change);
}
enum expression_type
find_expression_type(xmlNode * expr)
{
const char *attr = NULL;
attr = crm_element_value(expr, XML_EXPR_ATTR_ATTRIBUTE);
if (pcmk__xe_is(expr, PCMK_XE_DATE_EXPRESSION)) {
return time_expr;
} else if (pcmk__xe_is(expr, PCMK_XE_RSC_EXPRESSION)) {
return rsc_expr;
} else if (pcmk__xe_is(expr, PCMK_XE_OP_EXPRESSION)) {
return op_expr;
} else if (pcmk__xe_is(expr, XML_TAG_RULE)) {
return nested_rule;
} else if (!pcmk__xe_is(expr, XML_TAG_EXPRESSION)) {
return not_expr;
} else if (pcmk__str_any_of(attr, CRM_ATTR_UNAME, CRM_ATTR_KIND, CRM_ATTR_ID, NULL)) {
return loc_expr;
} else if (pcmk__str_eq(attr, CRM_ATTR_ROLE, pcmk__str_none)) {
return role_expr;
}
return attr_expr;
}
/* As per the nethack rules:
*
* moon period = 29.53058 days ~= 30, year = 365.2422 days
* days moon phase advances on first day of year compared to preceding year
* = 365.2422 - 12*29.53058 ~= 11
* years in Metonic cycle (time until same phases fall on the same days of
* the month) = 18.6 ~= 19
* moon phase on first day of year (epact) ~= (11*(year%19) + 29) % 30
* (29 as initial condition)
* current phase in days = first day phase + days elapsed in year
* 6 moons ~= 177 days
* 177 ~= 8 reported phases * 22
* + 11/22 for rounding
*
* 0-7, with 0: new, 4: full
*/
static int
phase_of_the_moon(const crm_time_t *now)
{
uint32_t epact, diy, goldn;
uint32_t y;
crm_time_get_ordinal(now, &y, &diy);
goldn = (y % 19) + 1;
epact = (11 * goldn + 18) % 30;
if ((epact == 25 && goldn > 11) || epact == 24)
epact++;
return ((((((diy + epact) * 6) + 11) % 177) / 22) & 7);
}
static int
check_one(const xmlNode *cron_spec, const char *xml_field, uint32_t time_field)
{
int rc = pcmk_rc_undetermined;
const char *value = crm_element_value(cron_spec, xml_field);
long long low, high;
if (value == NULL) {
/* Return pe_date_result_undetermined if the field is missing. */
goto bail;
}
if (pcmk__parse_ll_range(value, &low, &high) != pcmk_rc_ok) {
goto bail;
} else if (low == high) {
/* A single number was given, not a range. */
if (time_field < low) {
rc = pcmk_rc_before_range;
} else if (time_field > high) {
rc = pcmk_rc_after_range;
} else {
rc = pcmk_rc_within_range;
}
} else if (low != -1 && high != -1) {
/* This is a range with both bounds. */
if (time_field < low) {
rc = pcmk_rc_before_range;
} else if (time_field > high) {
rc = pcmk_rc_after_range;
} else {
rc = pcmk_rc_within_range;
}
} else if (low == -1) {
/* This is a range with no starting value. */
rc = time_field <= high ? pcmk_rc_within_range : pcmk_rc_after_range;
} else if (high == -1) {
/* This is a range with no ending value. */
rc = time_field >= low ? pcmk_rc_within_range : pcmk_rc_before_range;
}
bail:
if (rc == pcmk_rc_within_range) {
crm_debug("Condition '%s' in %s: passed", value, xml_field);
} else {
crm_debug("Condition '%s' in %s: failed", value, xml_field);
}
return rc;
}
static gboolean
check_passes(int rc) {
/* _within_range is obvious. _undetermined is a pass because
* this is the return value if a field is not given. In this
* case, we just want to ignore it and check other fields to
* see if they place some restriction on what can pass.
*/
return rc == pcmk_rc_within_range || rc == pcmk_rc_undetermined;
}
#define CHECK_ONE(spec, name, var) do { \
int subpart_rc = check_one(spec, name, var); \
if (check_passes(subpart_rc) == FALSE) { \
return subpart_rc; \
} \
} while (0)
int
pe_cron_range_satisfied(const crm_time_t *now, const xmlNode *cron_spec)
{
uint32_t h, m, s, y, d, w;
CRM_CHECK(now != NULL, return pcmk_rc_op_unsatisfied);
crm_time_get_gregorian(now, &y, &m, &d);
CHECK_ONE(cron_spec, "years", y);
CHECK_ONE(cron_spec, "months", m);
CHECK_ONE(cron_spec, "monthdays", d);
crm_time_get_timeofday(now, &h, &m, &s);
CHECK_ONE(cron_spec, "hours", h);
CHECK_ONE(cron_spec, "minutes", m);
CHECK_ONE(cron_spec, "seconds", s);
crm_time_get_ordinal(now, &y, &d);
CHECK_ONE(cron_spec, "yeardays", d);
crm_time_get_isoweek(now, &y, &w, &d);
CHECK_ONE(cron_spec, "weekyears", y);
CHECK_ONE(cron_spec, "weeks", w);
CHECK_ONE(cron_spec, "weekdays", d);
CHECK_ONE(cron_spec, "moon", phase_of_the_moon(now));
if (crm_element_value(cron_spec, "moon") != NULL) {
pcmk__config_warn("Support for 'moon' in date_spec elements "
"(such as %s) is deprecated and will be removed "
"in a future release of Pacemaker", ID(cron_spec));
}
/* If we get here, either no fields were specified (which is success), or all
* the fields that were specified had their conditions met (which is also a
* success). Thus, the result is success.
*/
return pcmk_rc_ok;
}
static void
update_field(crm_time_t *t, const xmlNode *xml, const char *attr,
void (*time_fn)(crm_time_t *, int))
{
long long value;
if ((pcmk__scan_ll(crm_element_value(xml, attr), &value, 0LL) == pcmk_rc_ok)
&& (value != 0LL) && (value >= INT_MIN) && (value <= INT_MAX)) {
time_fn(t, (int) value);
}
}
static crm_time_t *
parse_xml_duration(const crm_time_t *start, const xmlNode *duration_spec)
{
crm_time_t *end = pcmk_copy_time(start);
update_field(end, duration_spec, "years", crm_time_add_years);
update_field(end, duration_spec, "months", crm_time_add_months);
update_field(end, duration_spec, "weeks", crm_time_add_weeks);
update_field(end, duration_spec, "days", crm_time_add_days);
update_field(end, duration_spec, "hours", crm_time_add_hours);
update_field(end, duration_spec, "minutes", crm_time_add_minutes);
update_field(end, duration_spec, "seconds", crm_time_add_seconds);
return end;
}
// Set next_change to t if t is earlier
static void
crm_time_set_if_earlier(crm_time_t *next_change, crm_time_t *t)
{
if ((next_change != NULL) && (t != NULL)) {
if (!crm_time_is_defined(next_change)
|| (crm_time_compare(t, next_change) < 0)) {
crm_time_set(next_change, t);
}
}
}
// Information about a block of nvpair elements
typedef struct sorted_set_s {
int score; // This block's score for sorting
const char *name; // This block's ID
const char *special_name; // ID that should sort first
xmlNode *attr_set; // This block
+ gboolean overwrite; // Whether existing values will be overwritten
} sorted_set_t;
static gint
sort_pairs(gconstpointer a, gconstpointer b)
{
const sorted_set_t *pair_a = a;
const sorted_set_t *pair_b = b;
if (a == NULL && b == NULL) {
return 0;
} else if (a == NULL) {
return 1;
} else if (b == NULL) {
return -1;
}
if (pcmk__str_eq(pair_a->name, pair_a->special_name, pcmk__str_casei)) {
return -1;
} else if (pcmk__str_eq(pair_b->name, pair_a->special_name, pcmk__str_casei)) {
return 1;
}
+ /* If we're overwriting values, we want lowest score first, so the highest
+ * score is processed last; if we're not overwriting values, we want highest
+ * score first, so nothing else overwrites it.
+ */
if (pair_a->score < pair_b->score) {
- return 1;
+ return pair_a->overwrite? -1 : 1;
} else if (pair_a->score > pair_b->score) {
- return -1;
+ return pair_a->overwrite? 1 : -1;
}
return 0;
}
static void
populate_hash(xmlNode * nvpair_list, GHashTable * hash, gboolean overwrite, xmlNode * top)
{
const char *name = NULL;
const char *value = NULL;
const char *old_value = NULL;
xmlNode *list = nvpair_list;
xmlNode *an_attr = NULL;
if (pcmk__xe_is(list->children, XML_TAG_ATTRS)) {
list = list->children;
}
for (an_attr = pcmk__xe_first_child(list); an_attr != NULL;
an_attr = pcmk__xe_next(an_attr)) {
if (pcmk__str_eq((const char *)an_attr->name, XML_CIB_TAG_NVPAIR, pcmk__str_none)) {
xmlNode *ref_nvpair = expand_idref(an_attr, top);
name = crm_element_value(an_attr, XML_NVPAIR_ATTR_NAME);
if (name == NULL) {
name = crm_element_value(ref_nvpair, XML_NVPAIR_ATTR_NAME);
}
value = crm_element_value(an_attr, XML_NVPAIR_ATTR_VALUE);
if (value == NULL) {
value = crm_element_value(ref_nvpair, XML_NVPAIR_ATTR_VALUE);
}
if (name == NULL || value == NULL) {
continue;
}
old_value = g_hash_table_lookup(hash, name);
if (pcmk__str_eq(value, "#default", pcmk__str_casei)) {
if (old_value) {
crm_trace("Letting %s default (removing explicit value \"%s\")",
name, value);
g_hash_table_remove(hash, name);
}
continue;
} else if (old_value == NULL) {
crm_trace("Setting %s=\"%s\"", name, value);
g_hash_table_insert(hash, strdup(name), strdup(value));
} else if (overwrite) {
crm_trace("Setting %s=\"%s\" (overwriting old value \"%s\")",
name, value, old_value);
g_hash_table_replace(hash, strdup(name), strdup(value));
}
}
}
}
typedef struct unpack_data_s {
gboolean overwrite;
void *hash;
crm_time_t *next_change;
const pe_rule_eval_data_t *rule_data;
xmlNode *top;
} unpack_data_t;
static void
unpack_attr_set(gpointer data, gpointer user_data)
{
sorted_set_t *pair = data;
unpack_data_t *unpack_data = user_data;
if (!pe_eval_rules(pair->attr_set, unpack_data->rule_data,
unpack_data->next_change)) {
return;
}
crm_trace("Adding attributes from %s (score %d) %s overwrite",
pair->name, pair->score,
(unpack_data->overwrite? "with" : "without"));
populate_hash(pair->attr_set, unpack_data->hash, unpack_data->overwrite, unpack_data->top);
}
/*!
* \internal
* \brief Create a sorted list of nvpair blocks
*
* \param[in,out] top XML document root (used to expand id-ref's)
* \param[in] xml_obj XML element containing blocks of nvpair elements
* \param[in] set_name If not NULL, only get blocks of this element
* \param[in] always_first If not NULL, sort block with this ID as first
*
* \return List of sorted_set_t entries for nvpair blocks
*/
static GList *
make_pairs(xmlNode *top, const xmlNode *xml_obj, const char *set_name,
- const char *always_first)
+ const char *always_first, gboolean overwrite)
{
GList *unsorted = NULL;
if (xml_obj == NULL) {
return NULL;
}
for (xmlNode *attr_set = pcmk__xe_first_child(xml_obj); attr_set != NULL;
attr_set = pcmk__xe_next(attr_set)) {
if (pcmk__str_eq(set_name, (const char *) attr_set->name,
pcmk__str_null_matches)) {
const char *score = NULL;
sorted_set_t *pair = NULL;
xmlNode *expanded_attr_set = expand_idref(attr_set, top);
if (expanded_attr_set == NULL) {
// Schema (if not "none") prevents this
continue;
}
pair = calloc(1, sizeof(sorted_set_t));
pair->name = ID(expanded_attr_set);
pair->special_name = always_first;
pair->attr_set = expanded_attr_set;
+ pair->overwrite = overwrite;
score = crm_element_value(expanded_attr_set, XML_RULE_ATTR_SCORE);
pair->score = char2score(score);
unsorted = g_list_prepend(unsorted, pair);
}
}
return g_list_sort(unsorted, sort_pairs);
}
/*!
* \brief Extract nvpair blocks contained by an XML element into a hash table
*
* \param[in,out] top XML document root (used to expand id-ref's)
* \param[in] xml_obj XML element containing blocks of nvpair elements
* \param[in] set_name If not NULL, only use blocks of this element
* \param[in] rule_data Matching parameters to use when unpacking
* \param[out] hash Where to store extracted name/value pairs
* \param[in] always_first If not NULL, process block with this ID first
* \param[in] overwrite Whether to replace existing values with same name
* \param[out] next_change If not NULL, set to when evaluation will change
*/
void
pe_eval_nvpairs(xmlNode *top, const xmlNode *xml_obj, const char *set_name,
const pe_rule_eval_data_t *rule_data, GHashTable *hash,
const char *always_first, gboolean overwrite,
crm_time_t *next_change)
{
- GList *pairs = make_pairs(top, xml_obj, set_name, always_first);
+ GList *pairs = make_pairs(top, xml_obj, set_name, always_first, overwrite);
if (pairs) {
unpack_data_t data = {
.hash = hash,
.overwrite = overwrite,
.next_change = next_change,
.top = top,
.rule_data = rule_data
};
g_list_foreach(pairs, unpack_attr_set, &data);
g_list_free_full(pairs, free);
}
}
/*!
* \brief Extract nvpair blocks contained by an XML element into a hash table
*
* \param[in,out] top XML document root (used to expand id-ref's)
* \param[in] xml_obj XML element containing blocks of nvpair elements
* \param[in] set_name Element name to identify nvpair blocks
* \param[in] node_hash Node attributes to use when evaluating rules
* \param[out] hash Where to store extracted name/value pairs
* \param[in] always_first If not NULL, process block with this ID first
* \param[in] overwrite Whether to replace existing values with same name
* \param[in] now Time to use when evaluating rules
* \param[out] next_change If not NULL, set to when evaluation will change
*/
void
pe_unpack_nvpairs(xmlNode *top, const xmlNode *xml_obj, const char *set_name,
GHashTable *node_hash, GHashTable *hash,
const char *always_first, gboolean overwrite,
crm_time_t *now, crm_time_t *next_change)
{
pe_rule_eval_data_t rule_data = {
.node_hash = node_hash,
.role = pcmk_role_unknown,
.now = now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
pe_eval_nvpairs(top, xml_obj, set_name, &rule_data, hash,
always_first, overwrite, next_change);
}
/*!
* \brief Expand any regular expression submatches (%0-%9) in a string
*
* \param[in] string String possibly containing submatch variables
* \param[in] match_data If not NULL, regular expression matches
*
* \return Newly allocated string identical to \p string with submatches
* expanded, or NULL if there were no matches
*/
char *
pe_expand_re_matches(const char *string, const pe_re_match_data_t *match_data)
{
size_t len = 0;
int i;
const char *p, *last_match_index;
char *p_dst, *result = NULL;
if (pcmk__str_empty(string) || !match_data) {
return NULL;
}
p = last_match_index = string;
while (*p) {
if (*p == '%' && *(p + 1) && isdigit(*(p + 1))) {
i = *(p + 1) - '0';
if (match_data->nregs >= i && match_data->pmatch[i].rm_so != -1 &&
match_data->pmatch[i].rm_eo > match_data->pmatch[i].rm_so) {
len += p - last_match_index + (match_data->pmatch[i].rm_eo - match_data->pmatch[i].rm_so);
last_match_index = p + 2;
}
p++;
}
p++;
}
len += p - last_match_index + 1;
/* FIXME: Excessive? */
if (len - 1 <= 0) {
return NULL;
}
p_dst = result = calloc(1, len);
p = string;
while (*p) {
if (*p == '%' && *(p + 1) && isdigit(*(p + 1))) {
i = *(p + 1) - '0';
if (match_data->nregs >= i && match_data->pmatch[i].rm_so != -1 &&
match_data->pmatch[i].rm_eo > match_data->pmatch[i].rm_so) {
/* rm_eo can be equal to rm_so, but then there is nothing to do */
int match_len = match_data->pmatch[i].rm_eo - match_data->pmatch[i].rm_so;
memcpy(p_dst, match_data->string + match_data->pmatch[i].rm_so, match_len);
p_dst += match_len;
}
p++;
} else {
*(p_dst) = *(p);
p_dst++;
}
p++;
}
return result;
}
/*!
* \brief Evaluate rules
*
* \param[in,out] ruleset XML possibly containing rule sub-elements
* \param[in] rule_data
* \param[out] next_change If not NULL, set to when evaluation will change
*
* \return TRUE if there are no rules or
*/
gboolean
pe_eval_rules(xmlNode *ruleset, const pe_rule_eval_data_t *rule_data,
crm_time_t *next_change)
{
// If there are no rules, pass by default
gboolean ruleset_default = TRUE;
for (xmlNode *rule = first_named_child(ruleset, XML_TAG_RULE);
rule != NULL; rule = crm_next_same_xml(rule)) {
ruleset_default = FALSE;
if (pe_eval_expr(rule, rule_data, next_change)) {
/* Only the deprecated "lifetime" element of location constraints
* may contain more than one rule at the top level -- the schema
* limits a block of nvpairs to a single top-level rule. So, this
* effectively means that a lifetime is active if any rule it
* contains is active.
*/
return TRUE;
}
}
return ruleset_default;
}
/*!
* \brief Evaluate all of a rule's expressions
*
* \param[in,out] rule XML containing a rule definition or its id-ref
* \param[in] rule_data Matching parameters to check against rule
* \param[out] next_change If not NULL, set to when evaluation will change
*
* \return TRUE if \p rule_data passes \p rule, otherwise FALSE
*/
gboolean
pe_eval_expr(xmlNode *rule, const pe_rule_eval_data_t *rule_data,
crm_time_t *next_change)
{
xmlNode *expr = NULL;
gboolean test = TRUE;
gboolean empty = TRUE;
gboolean passed = TRUE;
gboolean do_and = TRUE;
const char *value = NULL;
rule = expand_idref(rule, NULL);
value = crm_element_value(rule, XML_RULE_ATTR_BOOLEAN_OP);
if (pcmk__str_eq(value, "or", pcmk__str_casei)) {
do_and = FALSE;
passed = FALSE;
}
crm_trace("Testing rule %s", ID(rule));
for (expr = pcmk__xe_first_child(rule); expr != NULL;
expr = pcmk__xe_next(expr)) {
test = pe_eval_subexpr(expr, rule_data, next_change);
empty = FALSE;
if (test && do_and == FALSE) {
crm_trace("Expression %s/%s passed", ID(rule), ID(expr));
return TRUE;
} else if (test == FALSE && do_and) {
crm_trace("Expression %s/%s failed", ID(rule), ID(expr));
return FALSE;
}
}
if (empty) {
crm_err("Invalid Rule %s: rules must contain at least one expression", ID(rule));
}
crm_trace("Rule %s %s", ID(rule), passed ? "passed" : "failed");
return passed;
}
/*!
* \brief Evaluate a single rule expression, including any subexpressions
*
* \param[in,out] expr XML containing a rule expression
* \param[in] rule_data Matching parameters to check against expression
* \param[out] next_change If not NULL, set to when evaluation will change
*
* \return TRUE if \p rule_data passes \p expr, otherwise FALSE
*/
gboolean
pe_eval_subexpr(xmlNode *expr, const pe_rule_eval_data_t *rule_data,
crm_time_t *next_change)
{
gboolean accept = FALSE;
const char *uname = NULL;
switch (find_expression_type(expr)) {
case nested_rule:
accept = pe_eval_expr(expr, rule_data, next_change);
break;
case attr_expr:
case loc_expr:
/* these expressions can never succeed if there is
* no node to compare with
*/
if (rule_data->node_hash != NULL) {
accept = pe__eval_attr_expr(expr, rule_data);
}
break;
case time_expr:
switch (pe__eval_date_expr(expr, rule_data, next_change)) {
case pcmk_rc_within_range:
case pcmk_rc_ok:
accept = TRUE;
break;
default:
accept = FALSE;
break;
}
break;
case role_expr:
accept = pe__eval_role_expr(expr, rule_data);
break;
case rsc_expr:
accept = pe__eval_rsc_expr(expr, rule_data);
break;
case op_expr:
accept = pe__eval_op_expr(expr, rule_data);
break;
default:
CRM_CHECK(FALSE /* bad type */ , return FALSE);
accept = FALSE;
}
if (rule_data->node_hash) {
uname = g_hash_table_lookup(rule_data->node_hash, CRM_ATTR_UNAME);
}
crm_trace("Expression %s %s on %s",
ID(expr), accept ? "passed" : "failed", uname ? uname : "all nodes");
return accept;
}
/*!
* \internal
* \brief Compare two values in a rule's node attribute expression
*
* \param[in] l_val Value on left-hand side of comparison
* \param[in] r_val Value on right-hand side of comparison
* \param[in] type How to interpret the values (allowed values:
* \c "string", \c "integer", \c "number",
* \c "version", \c NULL)
* \param[in] op Type of comparison
*
* \return -1 if (l_val < r_val),
* 0 if (l_val == r_val),
* 1 if (l_val > r_val)
*/
static int
compare_attr_expr_vals(const char *l_val, const char *r_val, const char *type,
const char *op)
{
int cmp = 0;
if (l_val != NULL && r_val != NULL) {
if (type == NULL) {
if (pcmk__strcase_any_of(op, "lt", "lte", "gt", "gte", NULL)) {
if (pcmk__char_in_any_str('.', l_val, r_val, NULL)) {
type = "number";
} else {
type = "integer";
}
} else {
type = "string";
}
crm_trace("Defaulting to %s based comparison for '%s' op", type, op);
}
if (pcmk__str_eq(type, "string", pcmk__str_casei)) {
cmp = strcasecmp(l_val, r_val);
} else if (pcmk__str_eq(type, "integer", pcmk__str_casei)) {
long long l_val_num;
int rc1 = pcmk__scan_ll(l_val, &l_val_num, 0LL);
long long r_val_num;
int rc2 = pcmk__scan_ll(r_val, &r_val_num, 0LL);
if ((rc1 == pcmk_rc_ok) && (rc2 == pcmk_rc_ok)) {
if (l_val_num < r_val_num) {
cmp = -1;
} else if (l_val_num > r_val_num) {
cmp = 1;
} else {
cmp = 0;
}
} else {
crm_debug("Integer parse error. Comparing %s and %s as strings",
l_val, r_val);
cmp = compare_attr_expr_vals(l_val, r_val, "string", op);
}
} else if (pcmk__str_eq(type, "number", pcmk__str_casei)) {
double l_val_num;
double r_val_num;
int rc1 = pcmk__scan_double(l_val, &l_val_num, NULL, NULL);
int rc2 = pcmk__scan_double(r_val, &r_val_num, NULL, NULL);
if (rc1 == pcmk_rc_ok && rc2 == pcmk_rc_ok) {
if (l_val_num < r_val_num) {
cmp = -1;
} else if (l_val_num > r_val_num) {
cmp = 1;
} else {
cmp = 0;
}
} else {
crm_debug("Floating-point parse error. Comparing %s and %s as "
"strings", l_val, r_val);
cmp = compare_attr_expr_vals(l_val, r_val, "string", op);
}
} else if (pcmk__str_eq(type, "version", pcmk__str_casei)) {
cmp = compare_version(l_val, r_val);
}
} else if (l_val == NULL && r_val == NULL) {
cmp = 0;
} else if (r_val == NULL) {
cmp = 1;
} else { // l_val == NULL && r_val != NULL
cmp = -1;
}
return cmp;
}
/*!
* \internal
* \brief Check whether an attribute expression evaluates to \c true
*
* \param[in] l_val Value on left-hand side of comparison
* \param[in] r_val Value on right-hand side of comparison
* \param[in] type How to interpret the values (allowed values:
* \c "string", \c "integer", \c "number",
* \c "version", \c NULL)
* \param[in] op Type of comparison.
*
* \return \c true if expression evaluates to \c true, \c false
* otherwise
*/
static bool
accept_attr_expr(const char *l_val, const char *r_val, const char *type,
const char *op)
{
int cmp;
if (pcmk__str_eq(op, "defined", pcmk__str_casei)) {
return (l_val != NULL);
} else if (pcmk__str_eq(op, "not_defined", pcmk__str_casei)) {
return (l_val == NULL);
}
cmp = compare_attr_expr_vals(l_val, r_val, type, op);
if (pcmk__str_eq(op, "eq", pcmk__str_casei)) {
return (cmp == 0);
} else if (pcmk__str_eq(op, "ne", pcmk__str_casei)) {
return (cmp != 0);
} else if (l_val == NULL || r_val == NULL) {
// The comparison is meaningless from this point on
return false;
} else if (pcmk__str_eq(op, "lt", pcmk__str_casei)) {
return (cmp < 0);
} else if (pcmk__str_eq(op, "lte", pcmk__str_casei)) {
return (cmp <= 0);
} else if (pcmk__str_eq(op, "gt", pcmk__str_casei)) {
return (cmp > 0);
} else if (pcmk__str_eq(op, "gte", pcmk__str_casei)) {
return (cmp >= 0);
}
return false; // Should never reach this point
}
/*!
* \internal
* \brief Get correct value according to value-source
*
* \param[in] value value given in rule expression
* \param[in] value_source value-source given in rule expressions
* \param[in] match_data If not NULL, resource back-references and params
*/
static const char *
expand_value_source(const char *value, const char *value_source,
const pe_match_data_t *match_data)
{
GHashTable *table = NULL;
if (pcmk__str_empty(value)) {
return NULL; // value_source is irrelevant
} else if (pcmk__str_eq(value_source, "param", pcmk__str_casei)) {
table = match_data->params;
} else if (pcmk__str_eq(value_source, "meta", pcmk__str_casei)) {
table = match_data->meta;
} else { // literal
return value;
}
if (table == NULL) {
return NULL;
}
return (const char *) g_hash_table_lookup(table, value);
}
/*!
* \internal
* \brief Evaluate a node attribute expression based on #uname, #id, #kind,
* or a generic node attribute
*
* \param[in] expr XML of rule expression
* \param[in] rule_data The match_data and node_hash members are used
*
* \return TRUE if rule_data satisfies the expression, FALSE otherwise
*/
gboolean
pe__eval_attr_expr(const xmlNode *expr, const pe_rule_eval_data_t *rule_data)
{
gboolean attr_allocated = FALSE;
const char *h_val = NULL;
const char *op = NULL;
const char *type = NULL;
const char *attr = NULL;
const char *value = NULL;
const char *value_source = NULL;
attr = crm_element_value(expr, XML_EXPR_ATTR_ATTRIBUTE);
op = crm_element_value(expr, XML_EXPR_ATTR_OPERATION);
value = crm_element_value(expr, XML_EXPR_ATTR_VALUE);
type = crm_element_value(expr, XML_EXPR_ATTR_TYPE);
value_source = crm_element_value(expr, XML_EXPR_ATTR_VALUE_SOURCE);
if (attr == NULL) {
pe_err("Expression %s invalid: " XML_EXPR_ATTR_ATTRIBUTE
" not specified", pcmk__s(ID(expr), "without ID"));
return FALSE;
} else if (op == NULL) {
pe_err("Expression %s invalid: " XML_EXPR_ATTR_OPERATION
" not specified", pcmk__s(ID(expr), "without ID"));
}
if (rule_data->match_data != NULL) {
// Expand any regular expression submatches (%0-%9) in attribute name
if (rule_data->match_data->re != NULL) {
char *resolved_attr = pe_expand_re_matches(attr, rule_data->match_data->re);
if (resolved_attr != NULL) {
attr = (const char *) resolved_attr;
attr_allocated = TRUE;
}
}
// Get value appropriate to value-source
value = expand_value_source(value, value_source, rule_data->match_data);
}
if (rule_data->node_hash != NULL) {
h_val = (const char *)g_hash_table_lookup(rule_data->node_hash, attr);
}
if (attr_allocated) {
free((char *)attr);
attr = NULL;
}
return accept_attr_expr(h_val, value, type, op);
}
/*!
* \internal
* \brief Evaluate a date_expression
*
* \param[in] expr XML of rule expression
* \param[in] rule_data Only the now member is used
* \param[out] next_change If not NULL, set to when evaluation will change
*
* \return Standard Pacemaker return code
*/
int
pe__eval_date_expr(const xmlNode *expr, const pe_rule_eval_data_t *rule_data,
crm_time_t *next_change)
{
crm_time_t *start = NULL;
crm_time_t *end = NULL;
const char *value = NULL;
const char *op = crm_element_value(expr, "operation");
xmlNode *duration_spec = NULL;
xmlNode *date_spec = NULL;
// "undetermined" will also be returned for parsing errors
int rc = pcmk_rc_undetermined;
crm_trace("Testing expression: %s", ID(expr));
duration_spec = first_named_child(expr, "duration");
date_spec = first_named_child(expr, "date_spec");
value = crm_element_value(expr, "start");
if (value != NULL) {
start = crm_time_new(value);
}
value = crm_element_value(expr, "end");
if (value != NULL) {
end = crm_time_new(value);
}
if (start != NULL && end == NULL && duration_spec != NULL) {
end = parse_xml_duration(start, duration_spec);
}
if (pcmk__str_eq(op, "in_range", pcmk__str_null_matches | pcmk__str_casei)) {
if ((start == NULL) && (end == NULL)) {
// in_range requires at least one of start or end
} else if ((start != NULL) && (crm_time_compare(rule_data->now, start) < 0)) {
rc = pcmk_rc_before_range;
crm_time_set_if_earlier(next_change, start);
} else if ((end != NULL) && (crm_time_compare(rule_data->now, end) > 0)) {
rc = pcmk_rc_after_range;
} else {
rc = pcmk_rc_within_range;
if (end && next_change) {
// Evaluation doesn't change until second after end
crm_time_add_seconds(end, 1);
crm_time_set_if_earlier(next_change, end);
}
}
} else if (pcmk__str_eq(op, "date_spec", pcmk__str_casei)) {
rc = pe_cron_range_satisfied(rule_data->now, date_spec);
// @TODO set next_change appropriately
} else if (pcmk__str_eq(op, "gt", pcmk__str_casei)) {
if (start == NULL) {
// gt requires start
} else if (crm_time_compare(rule_data->now, start) > 0) {
rc = pcmk_rc_within_range;
} else {
rc = pcmk_rc_before_range;
// Evaluation doesn't change until second after start
crm_time_add_seconds(start, 1);
crm_time_set_if_earlier(next_change, start);
}
} else if (pcmk__str_eq(op, "lt", pcmk__str_casei)) {
if (end == NULL) {
// lt requires end
} else if (crm_time_compare(rule_data->now, end) < 0) {
rc = pcmk_rc_within_range;
crm_time_set_if_earlier(next_change, end);
} else {
rc = pcmk_rc_after_range;
}
}
crm_time_free(start);
crm_time_free(end);
return rc;
}
gboolean
pe__eval_op_expr(const xmlNode *expr, const pe_rule_eval_data_t *rule_data)
{
const char *name = crm_element_value(expr, XML_NVPAIR_ATTR_NAME);
const char *interval_s = crm_element_value(expr, XML_LRM_ATTR_INTERVAL);
guint interval;
crm_trace("Testing op_defaults expression: %s", ID(expr));
if (rule_data->op_data == NULL) {
crm_trace("No operations data provided");
return FALSE;
}
interval = crm_parse_interval_spec(interval_s);
if (interval == 0 && errno != 0) {
crm_trace("Could not parse interval: %s", interval_s);
return FALSE;
}
if (interval_s != NULL && interval != rule_data->op_data->interval) {
crm_trace("Interval doesn't match: %d != %d", interval, rule_data->op_data->interval);
return FALSE;
}
if (!pcmk__str_eq(name, rule_data->op_data->op_name, pcmk__str_none)) {
crm_trace("Name doesn't match: %s != %s", name, rule_data->op_data->op_name);
return FALSE;
}
return TRUE;
}
/*!
* \internal
* \brief Evaluate a node attribute expression based on #role
*
* \param[in] expr XML of rule expression
* \param[in] rule_data Only the role member is used
*
* \return TRUE if rule_data->role satisfies the expression, FALSE otherwise
*/
gboolean
pe__eval_role_expr(const xmlNode *expr, const pe_rule_eval_data_t *rule_data)
{
gboolean accept = FALSE;
const char *op = NULL;
const char *value = NULL;
if (rule_data->role == pcmk_role_unknown) {
return accept;
}
value = crm_element_value(expr, XML_EXPR_ATTR_VALUE);
op = crm_element_value(expr, XML_EXPR_ATTR_OPERATION);
if (pcmk__str_eq(op, "defined", pcmk__str_casei)) {
if (rule_data->role > pcmk_role_started) {
accept = TRUE;
}
} else if (pcmk__str_eq(op, "not_defined", pcmk__str_casei)) {
if ((rule_data->role > pcmk_role_unknown)
&& (rule_data->role < pcmk_role_unpromoted)) {
accept = TRUE;
}
} else if (pcmk__str_eq(op, "eq", pcmk__str_casei)) {
if (text2role(value) == rule_data->role) {
accept = TRUE;
}
} else if (pcmk__str_eq(op, "ne", pcmk__str_casei)) {
// Test "ne" only with promotable clone roles
if ((rule_data->role > pcmk_role_unknown)
&& (rule_data->role < pcmk_role_unpromoted)) {
accept = FALSE;
} else if (text2role(value) != rule_data->role) {
accept = TRUE;
}
}
return accept;
}
gboolean
pe__eval_rsc_expr(const xmlNode *expr, const pe_rule_eval_data_t *rule_data)
{
const char *class = crm_element_value(expr, XML_AGENT_ATTR_CLASS);
const char *provider = crm_element_value(expr, XML_AGENT_ATTR_PROVIDER);
const char *type = crm_element_value(expr, XML_EXPR_ATTR_TYPE);
crm_trace("Testing rsc_defaults expression: %s", ID(expr));
if (rule_data->rsc_data == NULL) {
crm_trace("No resource data provided");
return FALSE;
}
if (class != NULL &&
!pcmk__str_eq(class, rule_data->rsc_data->standard, pcmk__str_none)) {
crm_trace("Class doesn't match: %s != %s", class, rule_data->rsc_data->standard);
return FALSE;
}
if ((provider == NULL && rule_data->rsc_data->provider != NULL) ||
(provider != NULL && rule_data->rsc_data->provider == NULL) ||
!pcmk__str_eq(provider, rule_data->rsc_data->provider, pcmk__str_none)) {
crm_trace("Provider doesn't match: %s != %s", provider, rule_data->rsc_data->provider);
return FALSE;
}
if (type != NULL &&
!pcmk__str_eq(type, rule_data->rsc_data->agent, pcmk__str_none)) {
crm_trace("Agent doesn't match: %s != %s", type, rule_data->rsc_data->agent);
return FALSE;
}
return TRUE;
}
// Deprecated functions kept only for backward API compatibility
// LCOV_EXCL_START
#include
gboolean
test_ruleset(xmlNode *ruleset, GHashTable *node_hash, crm_time_t *now)
{
return pe_evaluate_rules(ruleset, node_hash, now, NULL);
}
gboolean
test_rule(xmlNode * rule, GHashTable * node_hash, enum rsc_role_e role, crm_time_t * now)
{
return pe_test_rule(rule, node_hash, role, now, NULL, NULL);
}
gboolean
pe_test_rule_re(xmlNode * rule, GHashTable * node_hash, enum rsc_role_e role, crm_time_t * now, pe_re_match_data_t * re_match_data)
{
pe_match_data_t match_data = {
.re = re_match_data,
.params = NULL,
.meta = NULL,
};
return pe_test_rule(rule, node_hash, role, now, NULL, &match_data);
}
gboolean
pe_test_rule_full(xmlNode *rule, GHashTable *node_hash, enum rsc_role_e role,
crm_time_t *now, pe_match_data_t *match_data)
{
return pe_test_rule(rule, node_hash, role, now, NULL, match_data);
}
gboolean
test_expression(xmlNode * expr, GHashTable * node_hash, enum rsc_role_e role, crm_time_t * now)
{
return pe_test_expression(expr, node_hash, role, now, NULL, NULL);
}
gboolean
pe_test_expression_re(xmlNode * expr, GHashTable * node_hash, enum rsc_role_e role, crm_time_t * now, pe_re_match_data_t * re_match_data)
{
pe_match_data_t match_data = {
.re = re_match_data,
.params = NULL,
.meta = NULL,
};
return pe_test_expression(expr, node_hash, role, now, NULL, &match_data);
}
gboolean
pe_test_expression_full(xmlNode *expr, GHashTable *node_hash,
enum rsc_role_e role, crm_time_t *now,
pe_match_data_t *match_data)
{
return pe_test_expression(expr, node_hash, role, now, NULL, match_data);
}
void
unpack_instance_attributes(xmlNode *top, xmlNode *xml_obj, const char *set_name,
GHashTable *node_hash, GHashTable *hash,
const char *always_first, gboolean overwrite,
crm_time_t *now)
{
pe_rule_eval_data_t rule_data = {
.node_hash = node_hash,
.role = pcmk_role_unknown,
.now = now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
pe_eval_nvpairs(top, xml_obj, set_name, &rule_data, hash, always_first,
overwrite, NULL);
}
// LCOV_EXCL_STOP
// End deprecated API