diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in index 7b300bdb7e..6fad9b088d 100644 --- a/cts/cts-scheduler.in +++ b/cts/cts-scheduler.in @@ -1,1289 +1,1291 @@ #!@BASH_PATH@ # # Copyright 2004-2018 Andrew Beekhof # # This source code is licensed under the GNU General Public License version 2 # or later (GPLv2+) WITHOUT ANY WARRANTY. # USAGE_TEXT="Usage: cts-scheduler [] Options: --help Display this text, then exit -V, --verbose Display any differences from expected output --run TEST Run only single specified test --update Update expected results with actual results -b, --binary PATH Specify path to crm_simulate -i, --io-dir PATH Specify path to regression test data directory -v, --valgrind Run all commands under valgrind --valgrind-dhat Run all commands under valgrind with heap analyzer --valgrind-skip-output If running under valgrind, don't display output --testcmd-options Additional options for command under test" SBINDIR="@sbindir@" BUILDDIR="@abs_top_builddir@" CRM_SCHEMA_DIRECTORY="@CRM_SCHEMA_DIRECTORY@" # If readlink supports -e (i.e. GNU), use it readlink -e / >/dev/null 2>/dev/null if [ $? -eq 0 ]; then test_home="$(dirname "$(readlink -e "$0")")" else test_home="$(dirname "$0")" fi io_dir="$test_home/scheduler" failed="$test_home/.regression.failed.diff" test_binary= testcmd_options= single_test= verbose=0 num_failed=0 num_tests=0 VALGRIND_CMD="" VALGRIND_OPTS="-q --gen-suppressions=all --log-file=%q{valgrind_output} --time-stamp=yes --trace-children=no --show-reachable=no --leak-check=full --num-callers=20 --suppressions=$test_home/valgrind-pcmk.suppressions" VALGRIND_DHAT_OPTS="--tool=exp-dhat --log-file=%q{valgrind_output} --time-stamp=yes --trace-children=no --show-top-n=100 --num-callers=4" diff_opts="--ignore-all-space --ignore-blank-lines -u -N" # These constants must track crm_exit_t values CRM_EX_OK=0 CRM_EX_ERROR=1 CRM_EX_NOT_INSTALLED=5 CRM_EX_USAGE=64 CRM_EX_NOINPUT=66 EXITCODE=$CRM_EX_OK function info() { printf "$*\n" } function error() { printf " * ERROR: $*\n" } function failed() { printf " * FAILED: $*\n" } function show_test() { name=$1; shift printf " Test %-25s $*\n" "$name:" } # Normalize scheduler output for comparison normalize() { for NORMALIZE_FILE in "$@"; do # sed -i is not portable :-( sed -e 's/crm_feature_set="[^"]*"//' \ -e 's/batch-limit="[0-9]*"//' \ "$NORMALIZE_FILE" > "${NORMALIZE_FILE}.$$" mv -- "${NORMALIZE_FILE}.$$" "$NORMALIZE_FILE" done } info "Test home is:\t$test_home" create_mode="false" while [ $# -gt 0 ] ; do case "$1" in -V|--verbose) verbose=1 shift ;; -v|--valgrind) export G_SLICE=always-malloc VALGRIND_CMD="valgrind $VALGRIND_OPTS" shift ;; --valgrind-dhat) VALGRIND_CMD="valgrind $VALGRIND_DHAT_OPTS" shift ;; --valgrind-skip-output) VALGRIND_SKIP_OUTPUT=1 shift ;; --update) create_mode="true" shift ;; --run) single_test=$(basename "$2" ".xml") shift 2 break # any remaining arguments will be passed to test command ;; -b|--binary) test_binary="$2" shift 2 ;; -i|--io-dir) io_dir="$2" shift 2 ;; --help) echo "$USAGE_TEXT" exit $CRM_EX_OK ;; --testcmd-options) testcmd_options=$2 shift 2 ;; *) error "unknown option: $1" exit $CRM_EX_USAGE ;; esac done if [ -z "$PCMK_schema_directory" ]; then if [ -d "$BUILDDIR/xml" ]; then export PCMK_schema_directory="$BUILDDIR/xml" elif [ -d "$CRM_SCHEMA_DIRECTORY" ]; then export PCMK_schema_directory="$CRM_SCHEMA_DIRECTORY" fi fi if [ -z "$test_binary" ]; then if [ -x "$BUILDDIR/tools/crm_simulate" ]; then test_binary="$BUILDDIR/tools/crm_simulate" elif [ -x "$SBINDIR/crm_simulate" ]; then test_binary="$SBINDIR/crm_simulate" fi fi if [ ! -x "$test_binary" ]; then error "Test binary $test_binary not found" exit $CRM_EX_NOT_INSTALLED fi info "Test binary is:\t$test_binary" if [ -n "$PCMK_schema_directory" ]; then info "Schema home is:\t$PCMK_schema_directory" fi if [ "x$VALGRIND_CMD" != "x" ]; then info "Activating memory testing with valgrind"; fi info " " test_cmd="$VALGRIND_CMD $test_binary $testcmd_options" #echo $test_cmd if [ "$(whoami)" != "root" ]; then declare -x CIB_shadow_dir=/tmp fi do_test() { did_fail=0 expected_rc=0 num_tests=$(( $num_tests + 1 )) base=$1; shift name=$1; shift input=$io_dir/${base}.xml output=$io_dir/${base}.out expected=$io_dir/${base}.exp dot_expected=$io_dir/${base}.dot dot_output=$io_dir/${base}.pe.dot scores=$io_dir/${base}.scores score_output=$io_dir/${base}.scores.pe stderr_expected=$io_dir/${base}.stderr stderr_output=$io_dir/${base}.stderr.pe summary=$io_dir/${base}.summary summary_output=$io_dir/${base}.summary.pe valgrind_output=$io_dir/${base}.valgrind export valgrind_output if [ "x$1" = "x--rc" ]; then expected_rc=$2 shift; shift; fi show_test "$base" "$name" if [ ! -f $input ]; then error "No input"; did_fail=1 num_failed=$(( $num_failed + 1 )) return $CRM_EX_NOINPUT; fi if [ "$create_mode" != "true" ] && [ ! -f "$expected" ]; then error "no stored output"; return $CRM_EX_NOINPUT; fi # ../admin/crm_verify -X $input if [ ! -z "$single_test" ]; then echo "CIB_shadow_dir=\"$io_dir\" $test_cmd -x \"$input\" -D \"$dot_output\" -G \"$output\" -S" "$@" CIB_shadow_dir="$io_dir" $test_cmd -x "$input" -D "$dot_output" \ -G "$output" -S "$@" 2>&1 | tee "$summary_output" else CIB_shadow_dir="$io_dir" $test_cmd -x "$input" -S &> "$summary_output" fi CIB_shadow_dir="$io_dir" $test_cmd -x "$input" -D "$dot_output" \ -G "$output" -SQ -s "$@" 2> "$stderr_output" > "$score_output" rc=$? if [ $rc -ne $expected_rc ]; then failed "Test returned: $rc"; did_fail=1 echo "CIB_shadow_dir=\"$io_dir\" $test_cmd -x \"$input\" -D \"$dot_output\" -G \"$output\" -SQ -s" "$@" fi if [ -z "$VALGRIND_SKIP_OUTPUT" ]; then if [ -s "${valgrind_output}" ]; then error "Valgrind reported errors"; did_fail=1 cat ${valgrind_output} fi rm -f ${valgrind_output} fi if [ -s core ]; then error "Core-file detected: core.${base}"; did_fail=1 rm -f $test_home/core.$base mv core $test_home/core.$base fi if [ -e "$stderr_expected" ]; then diff $diff_opts $stderr_expected $stderr_output >/dev/null rc2=$? if [ $rc2 -ne 0 ]; then failed "stderr changed"; diff $diff_opts $stderr_expected $stderr_output 2>/dev/null >> $failed echo "" >> $failed did_fail=1 fi elif [ -s "$stderr_output" ]; then error "Output was written to stderr" did_fail=1 cat $stderr_output fi rm -f $stderr_output if [ ! -s $output ]; then error "No graph produced"; did_fail=1 num_failed=$(( $num_failed + 1 )) rm -f $output return $CRM_EX_ERROR; fi if [ ! -s $dot_output ]; then error "No dot-file summary produced"; did_fail=1 num_failed=$(( $num_failed + 1 )) rm -f $output return $CRM_EX_ERROR; else echo "digraph \"g\" {" > $dot_output.sort LC_ALL=POSIX sort -u $dot_output | grep -v -e '^}$' -e digraph >> $dot_output.sort echo "}" >> $dot_output.sort mv -f $dot_output.sort $dot_output fi if [ ! -s $score_output ]; then error "No allocation scores produced"; did_fail=1 num_failed=$(( $num_failed + 1 )) rm $output return $CRM_EX_ERROR; else LC_ALL=POSIX sort $score_output > $score_output.sorted mv -f $score_output.sorted $score_output fi if [ "$create_mode" = "true" ]; then cp "$output" "$expected" cp "$dot_output" "$dot_expected" cp "$score_output" "$scores" cp "$summary_output" "$summary" info " Updated expected outputs" fi diff $diff_opts $summary $summary_output >/dev/null rc2=$? if [ $rc2 -ne 0 ]; then failed "summary changed"; diff $diff_opts $summary $summary_output 2>/dev/null >> $failed echo "" >> $failed did_fail=1 fi diff $diff_opts $dot_expected $dot_output >/dev/null rc=$? if [ $rc -ne 0 ]; then failed "dot-file summary changed"; diff $diff_opts $dot_expected $dot_output 2>/dev/null >> $failed echo "" >> $failed did_fail=1 else rm -f $dot_output fi normalize "$expected" "$output" diff $diff_opts $expected $output >/dev/null rc2=$? if [ $rc2 -ne 0 ]; then failed "xml-file changed"; diff $diff_opts $expected $output 2>/dev/null >> $failed echo "" >> $failed did_fail=1 fi diff $diff_opts $scores $score_output >/dev/null rc=$? if [ $rc -ne 0 ]; then failed "scores-file changed"; diff $diff_opts $scores $score_output 2>/dev/null >> $failed echo "" >> $failed did_fail=1 fi rm -f $output $score_output $summary_output if [ $did_fail -eq 1 ]; then num_failed=$(( $num_failed + 1 )) return $CRM_EX_ERROR fi return $CRM_EX_OK } function test_results { if [ $num_failed -ne 0 ]; then if [ -s "$failed" ]; then if [ $verbose -eq 1 ]; then error "Results of $num_failed failed tests (out of $num_tests)...." cat $failed else error "Results of $num_failed failed tests (out of $num_tests) are in $failed...." error "Use $0 -V to display them automatically." fi else error "$num_failed (of $num_tests) tests failed (no diff results)" rm $failed fi EXITCODE=$CRM_EX_ERROR fi } # zero out the error log true > $failed if [ -n "$single_test" ]; then do_test "$single_test" "Single shot" "$@" TEST_RC=$? cat "$failed" exit $TEST_RC fi DO_VERSIONED_TESTS=0 info Performing the following tests from $io_dir echo "" do_test simple1 "Offline " do_test simple2 "Start " do_test simple3 "Start 2 " do_test simple4 "Start Failed" do_test simple6 "Stop Start " do_test simple7 "Shutdown " #do_test simple8 "Stonith " #do_test simple9 "Lower version" #do_test simple10 "Higher version" do_test simple11 "Priority (ne)" do_test simple12 "Priority (eq)" do_test simple8 "Stickiness" echo "" do_test group1 "Group " do_test group2 "Group + Native " do_test group3 "Group + Group " do_test group4 "Group + Native (nothing)" do_test group5 "Group + Native (move) " do_test group6 "Group + Group (move) " do_test group7 "Group colocation" do_test group13 "Group colocation (cant run)" do_test group8 "Group anti-colocation" do_test group9 "Group recovery" do_test group10 "Group partial recovery" do_test group11 "Group target_role" do_test group14 "Group stop (graph terminated)" do_test group15 "Negative group colocation" do_test bug-1573 "Partial stop of a group with two children" do_test bug-1718 "Mandatory group ordering - Stop group_FUN" do_test bug-lf-2613 "Move group on failure" do_test bug-lf-2619 "Move group on clone failure" do_test group-fail "Ensure stop order is preserved for partially active groups" do_test group-unmanaged "No need to restart r115 because r114 is unmanaged" do_test group-unmanaged-stopped "Make sure r115 is stopped when r114 fails" do_test group-dependents "Account for the location preferences of things colocated with a group" echo "" do_test rsc_dep1 "Must not " do_test rsc_dep3 "Must " do_test rsc_dep5 "Must not 3 " do_test rsc_dep7 "Must 3 " do_test rsc_dep10 "Must (but cant)" do_test rsc_dep2 "Must (running) " do_test rsc_dep8 "Must (running : alt) " do_test rsc_dep4 "Must (running + move)" do_test asymmetric "Asymmetric - require explicit location constraints" echo "" do_test orphan-0 "Orphan ignore" do_test orphan-1 "Orphan stop" do_test orphan-2 "Orphan stop, remove failcount" echo "" do_test params-0 "Params: No change" do_test params-1 "Params: Changed" do_test params-2 "Params: Resource definition" do_test params-4 "Params: Reload" do_test params-5 "Params: Restart based on probe digest" do_test novell-251689 "Resource definition change + target_role=stopped" do_test bug-lf-2106 "Restart all anonymous clone instances after config change" do_test params-6 "Params: Detect reload in previously migrated resource" do_test nvpair-id-ref "Support id-ref in nvpair with optional name" do_test not-reschedule-unneeded-monitor "Do not reschedule unneeded monitors while resource definitions have changed" do_test reload-becomes-restart "Cancel reload if restart becomes required" echo "" do_test target-0 "Target Role : baseline" do_test target-1 "Target Role : master" do_test target-2 "Target Role : invalid" echo "" do_test base-score "Set a node's default score for all nodes" echo "" do_test date-1 "Dates" -t "2005-020" do_test date-2 "Date Spec - Pass" -t "2005-020T12:30" do_test date-3 "Date Spec - Fail" -t "2005-020T11:30" do_test origin "Timing of recurring operations" -t "2014-05-07 00:28:00" do_test probe-0 "Probe (anon clone)" do_test probe-1 "Pending Probe" do_test probe-2 "Correctly re-probe cloned groups" do_test probe-3 "Probe (pending node)" do_test probe-4 "Probe (pending node + stopped resource)" do_test standby "Standby" do_test comments "Comments" echo "" do_test one-or-more-0 "Everything starts" do_test one-or-more-1 "Nothing starts because of A" do_test one-or-more-2 "D can start because of C" do_test one-or-more-3 "D cannot start because of B and C" do_test one-or-more-4 "D cannot start because of target-role" do_test one-or-more-5 "Start A and F even though C and D are stopped" do_test one-or-more-6 "Leave A running even though B is stopped" do_test one-or-more-7 "Leave A running even though C is stopped" do_test bug-5140-require-all-false "Allow basegrp:0 to stop" do_test clone-require-all-1 "clone B starts node 3 and 4" do_test clone-require-all-2 "clone B remains stopped everywhere" do_test clone-require-all-3 "clone B stops everywhere because A stops everywhere" do_test clone-require-all-4 "clone B remains on node 3 and 4 with only one instance of A remaining." do_test clone-require-all-5 "clone B starts on node 1 3 and 4" do_test clone-require-all-6 "clone B remains active after shutting down instances of A" do_test clone-require-all-7 "clone A and B both start at the same time. all instances of A start before B." do_test clone-require-all-no-interleave-1 "C starts everywhere after A and B" do_test clone-require-all-no-interleave-2 "C starts on nodes 1, 2, and 4 with only one active instance of B" do_test clone-require-all-no-interleave-3 "C remains active when instance of B is stopped on one node and started on another." do_test one-or-more-unrunnable-instances "Avoid dependencies on instances that won't ever be started" echo "" do_test order1 "Order start 1 " do_test order2 "Order start 2 " do_test order3 "Order stop " do_test order4 "Order (multiple) " do_test order5 "Order (move) " do_test order6 "Order (move w/ restart) " do_test order7 "Order (mandatory) " do_test order-optional "Order (score=0) " do_test order-required "Order (score=INFINITY) " do_test bug-lf-2171 "Prevent group start when clone is stopped" do_test order-clone "Clone ordering should be able to prevent startup of dependent clones" do_test order-sets "Ordering for resource sets" do_test order-serialize "Serialize resources without inhibiting migration" do_test order-serialize-set "Serialize a set of resources without inhibiting migration" do_test clone-order-primitive "Order clone start after a primitive" do_test clone-order-16instances "Verify ordering of 16 cloned resources" do_test order-optional-keyword "Order (optional keyword)" do_test order-mandatory "Order (mandatory keyword)" do_test bug-lf-2493 "Don't imply colocation requirements when applying ordering constraints with clones" do_test ordered-set-basic-startup "Constraint set with default order settings." do_test ordered-set-natural "Allow natural set ordering" do_test order-wrong-kind "Order (error)" echo "" do_test coloc-loop "Colocation - loop" do_test coloc-many-one "Colocation - many-to-one" do_test coloc-list "Colocation - many-to-one with list" do_test coloc-group "Colocation - groups" do_test coloc-slave-anti "Anti-colocation with slave shouldn't prevent master colocation" do_test coloc-attr "Colocation based on node attributes" do_test coloc-negative-group "Negative colocation with a group" do_test coloc-intra-set "Intra-set colocation" do_test bug-lf-2435 "Colocation sets with a negative score" do_test coloc-clone-stays-active "Ensure clones don't get stopped/demoted because a dependent must stop" do_test coloc_fp_logic "Verify floating point calculations in colocation are working" do_test colo_master_w_native "cl#5070 - Verify promotion order is affected when colocating master to native rsc." do_test colo_slave_w_native "cl#5070 - Verify promotion order is affected when colocating slave to native rsc." do_test anti-colocation-order "cl#5187 - Prevent resources in an anti-colocation from even temporarily running on a same node" do_test anti-colocation-master "Organize order of actions for master resources in anti-colocations" do_test anti-colocation-slave "Organize order of actions for slave resources in anti-colocations" do_test enforce-colo1 "Always enforce B with A INFINITY." do_test complex_enforce_colo "Always enforce B with A INFINITY. (make sure heat-engine stops)" echo "" do_test rsc-sets-seq-true "Resource Sets - sequential=false" do_test rsc-sets-seq-false "Resource Sets - sequential=true" do_test rsc-sets-clone "Resource Sets - Clone" do_test rsc-sets-master "Resource Sets - Master" do_test rsc-sets-clone-1 "Resource Sets - Clone (lf#2404)" #echo "" #do_test agent1 "version: lt (empty)" #do_test agent2 "version: eq " #do_test agent3 "version: gt " echo "" do_test attrs1 "string: eq (and) " do_test attrs2 "string: lt / gt (and)" do_test attrs3 "string: ne (or) " do_test attrs4 "string: exists " do_test attrs5 "string: not_exists " do_test attrs6 "is_dc: true " do_test attrs7 "is_dc: false " do_test attrs8 "score_attribute " do_test per-node-attrs "Per node resource parameters" echo "" do_test mon-rsc-1 "Schedule Monitor - start" do_test mon-rsc-2 "Schedule Monitor - move " do_test mon-rsc-3 "Schedule Monitor - pending start " do_test mon-rsc-4 "Schedule Monitor - move/pending start" echo "" do_test rec-rsc-0 "Resource Recover - no start " do_test rec-rsc-1 "Resource Recover - start " do_test rec-rsc-2 "Resource Recover - monitor " do_test rec-rsc-3 "Resource Recover - stop - ignore" do_test rec-rsc-4 "Resource Recover - stop - block " do_test rec-rsc-5 "Resource Recover - stop - fence " do_test rec-rsc-6 "Resource Recover - multiple - restart" do_test rec-rsc-7 "Resource Recover - multiple - stop " do_test rec-rsc-8 "Resource Recover - multiple - block " do_test rec-rsc-9 "Resource Recover - group/group" do_test monitor-recovery "on-fail=block + resource recovery detected by recurring monitor" do_test stop-failure-no-quorum "Stop failure without quorum" do_test stop-failure-no-fencing "Stop failure without fencing available" do_test stop-failure-with-fencing "Stop failure with fencing available" do_test multiple-active-block-group "Support of multiple-active=block for resource groups" do_test multiple-monitor-one-failed "Consider resource failed if any of the configured monitor operations failed" echo "" do_test quorum-1 "No quorum - ignore" do_test quorum-2 "No quorum - freeze" do_test quorum-3 "No quorum - stop " do_test quorum-4 "No quorum - start anyway" do_test quorum-5 "No quorum - start anyway (group)" do_test quorum-6 "No quorum - start anyway (clone)" do_test bug-cl-5212 "No promotion with no-quorum-policy=freeze" do_test suicide-needed-inquorate "no-quorum-policy=suicide: suicide necessary" do_test suicide-not-needed-initial-quorum "no-quorum-policy=suicide: suicide not necessary at initial quorum" do_test suicide-not-needed-never-quorate "no-quorum-policy=suicide: suicide not necessary if never quorate" do_test suicide-not-needed-quorate "no-quorum-policy=suicide: suicide necessary if quorate" echo "" do_test rec-node-1 "Node Recover - Startup - no fence" do_test rec-node-2 "Node Recover - Startup - fence " do_test rec-node-3 "Node Recover - HA down - no fence" do_test rec-node-4 "Node Recover - HA down - fence " do_test rec-node-5 "Node Recover - CRM down - no fence" do_test rec-node-6 "Node Recover - CRM down - fence " do_test rec-node-7 "Node Recover - no quorum - ignore " do_test rec-node-8 "Node Recover - no quorum - freeze " do_test rec-node-9 "Node Recover - no quorum - stop " do_test rec-node-10 "Node Recover - no quorum - stop w/fence" do_test rec-node-11 "Node Recover - CRM down w/ group - fence " do_test rec-node-12 "Node Recover - nothing active - fence " do_test rec-node-13 "Node Recover - failed resource + shutdown - fence " do_test rec-node-15 "Node Recover - unknown lrm section" do_test rec-node-14 "Serialize all stonith's" echo "" do_test multi1 "Multiple Active (stop/start)" echo "" do_test migrate-begin "Normal migration" do_test migrate-success "Completed migration" do_test migrate-partial-1 "Completed migration, missing stop on source" do_test migrate-partial-2 "Successful migrate_to only" do_test migrate-partial-3 "Successful migrate_to only, target down" do_test migrate-partial-4 "Migrate from the correct host after migrate_to+migrate_from" do_test bug-5186-partial-migrate "Handle partial migration when src node loses membership" do_test migrate-fail-2 "Failed migrate_from" do_test migrate-fail-3 "Failed migrate_from + stop on source" do_test migrate-fail-4 "Failed migrate_from + stop on target - ideally we wouldn't need to re-stop on target" do_test migrate-fail-5 "Failed migrate_from + stop on source and target" do_test migrate-fail-6 "Failed migrate_to" do_test migrate-fail-7 "Failed migrate_to + stop on source" do_test migrate-fail-8 "Failed migrate_to + stop on target - ideally we wouldn't need to re-stop on target" do_test migrate-fail-9 "Failed migrate_to + stop on source and target" do_test migrate-stop "Migration in a stopping stack" do_test migrate-start "Migration in a starting stack" do_test migrate-stop_start "Migration in a restarting stack" do_test migrate-stop-complex "Migration in a complex stopping stack" do_test migrate-start-complex "Migration in a complex starting stack" do_test migrate-stop-start-complex "Migration in a complex moving stack" do_test migrate-shutdown "Order the post-migration 'stop' before node shutdown" do_test migrate-1 "Migrate (migrate)" do_test migrate-2 "Migrate (stable)" do_test migrate-3 "Migrate (failed migrate_to)" do_test migrate-4 "Migrate (failed migrate_from)" do_test novell-252693 "Migration in a stopping stack" do_test novell-252693-2 "Migration in a starting stack" do_test novell-252693-3 "Non-Migration in a starting and stopping stack" do_test bug-1820 "Migration in a group" do_test bug-1820-1 "Non-migration in a group" do_test migrate-5 "Primitive migration with a clone" do_test migrate-fencing "Migration after Fencing" do_test migrate-both-vms "Migrate two VMs that have no colocation" do_test migration-behind-migrating-remote "Migrate resource behind migrating remote connection" do_test 1-a-then-bm-move-b "Advanced migrate logic. A then B. migrate B." do_test 2-am-then-b-move-a "Advanced migrate logic, A then B, migrate A without stopping B" do_test 3-am-then-bm-both-migrate "Advanced migrate logic. A then B. migrate both" do_test 4-am-then-bm-b-not-migratable "Advanced migrate logic, A then B, B not migratable" do_test 5-am-then-bm-a-not-migratable "Advanced migrate logic. A then B. move both, a not migratable" do_test 6-migrate-group "Advanced migrate logic, migrate a group" do_test 7-migrate-group-one-unmigratable "Advanced migrate logic, migrate group mixed with allow-migrate true/false" do_test 8-am-then-bm-a-migrating-b-stopping "Advanced migrate logic, A then B, A migrating, B stopping" do_test 9-am-then-bm-b-migrating-a-stopping "Advanced migrate logic, A then B, B migrate, A stopping" do_test 10-a-then-bm-b-move-a-clone "Advanced migrate logic, A clone then B, migrate B while stopping A" do_test 11-a-then-bm-b-move-a-clone-starting "Advanced migrate logic, A clone then B, B moving while A is start/stopping" do_test a-promote-then-b-migrate "A promote then B start. migrate B" do_test a-demote-then-b-migrate "A demote then B stop. migrate B" if [ $DO_VERSIONED_TESTS -eq 1 ]; then do_test migrate-versioned "Disable migration for versioned resources" fi #echo "" #do_test complex1 "Complex " do_test bug-lf-2422 "Dependency on partially active group - stop ocfs:*" echo "" do_test clone-anon-probe-1 "Probe the correct (anonymous) clone instance for each node" do_test clone-anon-probe-2 "Avoid needless re-probing of anonymous clones" do_test clone-anon-failcount "Merge failcounts for anonymous clones" +do_test force-anon-clone-max "Update clone-max properly when forcing a clone to be anonymous" +do_test anon-instance-pending "Assign anonymous clone instance numbers properly when action pending" do_test inc0 "Incarnation start" do_test inc1 "Incarnation start order" do_test inc2 "Incarnation silent restart, stop, move" do_test inc3 "Inter-incarnation ordering, silent restart, stop, move" do_test inc4 "Inter-incarnation ordering, silent restart, stop, move (ordered)" do_test inc5 "Inter-incarnation ordering, silent restart, stop, move (restart 1)" do_test inc6 "Inter-incarnation ordering, silent restart, stop, move (restart 2)" do_test inc7 "Clone colocation" do_test inc8 "Clone anti-colocation" do_test inc9 "Non-unique clone" do_test inc10 "Non-unique clone (stop)" do_test inc11 "Primitive colocation with clones" do_test inc12 "Clone shutdown" do_test cloned-group "Make sure only the correct number of cloned groups are started" do_test cloned-group-stop "Ensure stopping qpidd also stops glance and cinder" do_test clone-no-shuffle "Don't prioritize allocation of instances that must be moved" do_test clone-max-zero "Orphan processing with clone-max=0" do_test clone-anon-dup "Bug LF#2087 - Correctly parse the state of anonymous clones that are active more than once per node" do_test bug-lf-2160 "Don't shuffle clones due to colocation" do_test bug-lf-2213 "clone-node-max enforcement for cloned groups" do_test bug-lf-2153 "Clone ordering constraints" do_test bug-lf-2361 "Ensure clones observe mandatory ordering constraints if the LHS is unrunnable" do_test bug-lf-2317 "Avoid needless restart of primitive depending on a clone" do_test clone-colocate-instance-1 "Colocation with a specific clone instance (negative example)" do_test clone-colocate-instance-2 "Colocation with a specific clone instance" do_test clone-order-instance "Ordering with specific clone instances" do_test bug-lf-2453 "Enforce mandatory clone ordering without colocation" do_test bug-lf-2508 "Correctly reconstruct the status of anonymous cloned groups" do_test bug-lf-2544 "Balanced clone placement" do_test bug-lf-2445 "Redistribute clones with node-max > 1 and stickiness = 0" do_test bug-lf-2574 "Avoid clone shuffle" do_test bug-lf-2581 "Avoid group restart due to unrelated clone (re)start" do_test bug-cl-5168 "Don't shuffle clones" do_test bug-cl-5170 "Prevent clone from starting with on-fail=block" do_test clone-fail-block-colocation "Move colocated group when failed clone has on-fail=block" do_test clone-interleave-1 "Clone-3 cannot start on pcmk-1 due to interleaved ordering (no colocation)" do_test clone-interleave-2 "Clone-3 must stop on pcmk-1 due to interleaved ordering (no colocation)" do_test clone-interleave-3 "Clone-3 must be recovered on pcmk-1 due to interleaved ordering (no colocation)" do_test rebalance-unique-clones "Rebalance unique clone instances with no stickiness" do_test clone-requires-quorum-recovery "Clone with requires=quorum on failed node needing recovery" do_test clone-requires-quorum "Clone with requires=quorum with presumed-inactive instance on failed node" echo "" do_test cloned_start_one "order first clone then clone... first clone_min=2" do_test cloned_start_two "order first clone then clone... first clone_min=2" do_test cloned_stop_one "order first clone then clone... first clone_min=2" do_test cloned_stop_two "order first clone then clone... first clone_min=2" do_test clone_min_interleave_start_one "order first clone then clone... first clone_min=2 and then has interleave=true" do_test clone_min_interleave_start_two "order first clone then clone... first clone_min=2 and then has interleave=true" do_test clone_min_interleave_stop_one "order first clone then clone... first clone_min=2 and then has interleave=true" do_test clone_min_interleave_stop_two "order first clone then clone... first clone_min=2 and then has interleave=true" do_test clone_min_start_one "order first clone then primitive... first clone_min=2" do_test clone_min_start_two "order first clone then primitive... first clone_min=2" do_test clone_min_stop_all "order first clone then primitive... first clone_min=2" do_test clone_min_stop_one "order first clone then primitive... first clone_min=2" do_test clone_min_stop_two "order first clone then primitive... first clone_min=2" echo "" do_test unfence-startup "Clean unfencing" do_test unfence-definition "Unfencing when the agent changes" do_test unfence-parameters "Unfencing when the agent parameters changes" do_test unfence-device "Unfencing when a cluster has only fence devices" echo "" do_test master-0 "Stopped -> Slave" do_test master-1 "Stopped -> Promote" do_test master-2 "Stopped -> Promote : notify" do_test master-3 "Stopped -> Promote : master location" do_test master-4 "Started -> Promote : master location" do_test master-5 "Promoted -> Promoted" do_test master-6 "Promoted -> Promoted (2)" do_test master-7 "Promoted -> Fenced" do_test master-8 "Promoted -> Fenced -> Moved" do_test master-9 "Stopped + Promotable + No quorum" do_test master-10 "Stopped -> Promotable : notify with monitor" do_test master-11 "Stopped -> Promote : colocation" do_test novell-239082 "Demote/Promote ordering" do_test novell-239087 "Stable master placement" do_test master-12 "Promotion based solely on rsc_location constraints" do_test master-13 "Include preferences of colocated resources when placing master" do_test master-demote "Ordering when actions depends on demoting a slave resource" do_test master-ordering "Prevent resources from starting that need a master" do_test bug-1765 "Master-Master Colocation (dont stop the slaves)" do_test master-group "Promotion of cloned groups" do_test bug-lf-1852 "Don't shuffle master/slave instances unnecessarily" do_test master-failed-demote "Don't retry failed demote actions" do_test master-failed-demote-2 "Don't retry failed demote actions (notify=false)" do_test master-depend "Ensure resources that depend on the master don't get allocated until the master does" do_test master-reattach "Re-attach to a running master" do_test master-allow-start "Don't include master score if it would prevent allocation" do_test master-colocation "Allow master instances placemaker to be influenced by colocation constraints" do_test master-pseudo "Make sure promote/demote pseudo actions are created correctly" do_test master-role "Prevent target-role from promoting more than master-max instances" do_test bug-lf-2358 "Master-Master anti-colocation" do_test master-promotion-constraint "Mandatory master colocation constraints" do_test unmanaged-master "Ensure role is preserved for unmanaged resources" do_test master-unmanaged-monitor "Start the correct monitor operation for unmanaged masters" do_test master-demote-2 "Demote does not clear past failure" do_test master-move "Move master based on failure of colocated group" do_test master-probed-score "Observe the promotion score of probed resources" do_test colocation_constraint_stops_master "cl#5054 - Ensure master is demoted when stopped by colocation constraint" do_test colocation_constraint_stops_slave "cl#5054 - Ensure slave is not demoted when stopped by colocation constraint" do_test order_constraint_stops_master "cl#5054 - Ensure master is demoted when stopped by order constraint" do_test order_constraint_stops_slave "cl#5054 - Ensure slave is not demoted when stopped by order constraint" do_test master_monitor_restart "cl#5072 - Ensure master monitor operation will start after promotion." do_test bug-rh-880249 "Handle replacement of an m/s resource with a primitive" do_test bug-5143-ms-shuffle "Prevent master shuffling due to promotion score" do_test master-demote-block "Block promotion if demote fails with on-fail=block" do_test master-dependent-ban "Don't stop instances from being active because a dependent is banned from that host" do_test master-stop "Stop instances due to location constraint with role=Started" do_test master-partially-demoted-group "Allow partially demoted group to finish demoting" do_test bug-cl-5213 "Ensure role colocation with -INFINITY is enforced" do_test bug-cl-5219 "Allow unrelated resources with a common colocation target to remain promoted" do_test master-asymmetrical-order "Fix the behaviors of multi-state resources with asymmetrical ordering" do_test master-notify "Master promotion with notifies" do_test master-score-startup "Use permanent master scores without LRM history" do_test failed-demote-recovery "Recover resource in slave role after demote fails" do_test failed-demote-recovery-master "Recover resource in master role after demote fails" echo "" do_test history-1 "Correctly parse stateful-1 resource state" echo "" do_test managed-0 "Managed (reference)" do_test managed-1 "Not managed - down " do_test managed-2 "Not managed - up " do_test bug-5028 "Shutdown should block if anything depends on an unmanaged resource" do_test bug-5028-detach "Ensure detach still works" do_test bug-5028-bottom "Ensure shutdown still blocks if the blocked resource is at the bottom of the stack" do_test unmanaged-stop-1 "cl#5155 - Block the stop of resources if any depending resource is unmanaged " do_test unmanaged-stop-2 "cl#5155 - Block the stop of resources if the first resource in a mandatory stop order is unmanaged " do_test unmanaged-stop-3 "cl#5155 - Block the stop of resources if any depending resource in a group is unmanaged " do_test unmanaged-stop-4 "cl#5155 - Block the stop of resources if any depending resource in the middle of a group is unmanaged " do_test unmanaged-block-restart "Block restart of resources if any dependent resource in a group is unmanaged" echo "" do_test interleave-0 "Interleave (reference)" do_test interleave-1 "coloc - not interleaved" do_test interleave-2 "coloc - interleaved " do_test interleave-3 "coloc - interleaved (2)" do_test interleave-pseudo-stop "Interleaved clone during stonith" do_test interleave-stop "Interleaved clone during stop" do_test interleave-restart "Interleaved clone during dependency restart" echo "" do_test notify-0 "Notify reference" do_test notify-1 "Notify simple" do_test notify-2 "Notify simple, confirm" do_test notify-3 "Notify move, confirm" do_test novell-239079 "Notification priority" #do_test notify-2 "Notify - 764" do_test notifs-for-unrunnable "Don't schedule notifications for an unrunnable action" echo "" do_test 594 "OSDL #594 - Unrunnable actions scheduled in transition" do_test 662 "OSDL #662 - Two resources start on one node when incarnation_node_max = 1" do_test 696 "OSDL #696 - CRM starts stonith RA without monitor" do_test 726 "OSDL #726 - Attempting to schedule rsc_posic041_monitor_5000 _after_ a stop" do_test 735 "OSDL #735 - Correctly detect that rsc_hadev1 is stopped on hadev3" do_test 764 "OSDL #764 - Missing monitor op for DoFencing:child_DoFencing:1" do_test 797 "OSDL #797 - Assert triggered: task_id_i > max_call_id" do_test 829 "OSDL #829" do_test 994 "OSDL #994 - Stopping the last resource in a resource group causes the entire group to be restarted" do_test 994-2 "OSDL #994 - with a dependent resource" do_test 1360 "OSDL #1360 - Clone stickiness" do_test 1484 "OSDL #1484 - on_fail=stop" do_test 1494 "OSDL #1494 - Clone stability" do_test unrunnable-1 "Unrunnable" do_test unrunnable-2 "Unrunnable 2" do_test stonith-0 "Stonith loop - 1" do_test stonith-1 "Stonith loop - 2" do_test stonith-2 "Stonith loop - 3" do_test stonith-3 "Stonith startup" do_test stonith-4 "Stonith node state" do_test bug-1572-1 "Recovery of groups depending on master/slave" do_test bug-1572-2 "Recovery of groups depending on master/slave when the master is never re-promoted" do_test bug-1685 "Depends-on-master ordering" do_test bug-1822 "Don't promote partially active groups" do_test bug-pm-11 "New resource added to a m/s group" do_test bug-pm-12 "Recover only the failed portion of a cloned group" do_test bug-n-387749 "Don't shuffle clone instances" do_test bug-n-385265 "Don't ignore the failure stickiness of group children - resource_idvscommon should stay stopped" do_test bug-n-385265-2 "Ensure groups are migrated instead of remaining partially active on the current node" do_test bug-lf-1920 "Correctly handle probes that find active resources" do_test bnc-515172 "Location constraint with multiple expressions" do_test colocate-primitive-with-clone "Optional colocation with a clone" do_test use-after-free-merge "Use-after-free in native_merge_weights" do_test bug-lf-2551 "STONITH ordering for stop" do_test bug-lf-2606 "Stonith implies demote" do_test bug-lf-2474 "Ensure resource op timeout takes precedence over op_defaults" do_test bug-suse-707150 "Prevent vm-01 from starting due to colocation/ordering" do_test bug-5014-A-start-B-start "Verify when A starts B starts using symmetrical=false" do_test bug-5014-A-stop-B-started "Verify when A stops B does not stop if it has already started using symmetric=false" do_test bug-5014-A-stopped-B-stopped "Verify when A is stopped and B has not started, B does not start before A using symmetric=false" do_test bug-5014-CthenAthenB-C-stopped "Verify when C then A is symmetrical=true, A then B is symmetric=false, and C is stopped that nothing starts." do_test bug-5014-CLONE-A-start-B-start "Verify when A starts B starts using clone resources with symmetric=false" do_test bug-5014-CLONE-A-stop-B-started "Verify when A stops B does not stop if it has already started using clone resources with symmetric=false." do_test bug-5014-GROUP-A-start-B-start "Verify when A starts B starts when using group resources with symmetric=false." do_test bug-5014-GROUP-A-stopped-B-started "Verify when A stops B does not stop if it has already started using group resources with symmetric=false." do_test bug-5014-GROUP-A-stopped-B-stopped "Verify when A is stopped and B has not started, B does not start before A using group resources with symmetric=false." do_test bug-5014-ordered-set-symmetrical-false "Verify ordered sets work with symmetrical=false" do_test bug-5014-ordered-set-symmetrical-true "Verify ordered sets work with symmetrical=true" do_test bug-5007-masterslave_colocation "Verify use of colocation scores other than INFINITY and -INFINITY work on multi-state resources." do_test bug-5038 "Prevent restart of anonymous clones when clone-max decreases" do_test bug-5025-1 "Automatically clean up failcount after resource config change with reload" do_test bug-5025-2 "Make sure clear failcount action isn't set when config does not change." do_test bug-5025-3 "Automatically clean up failcount after resource config change with restart" do_test bug-5025-4 "Clear failcount when last failure is a start op and rsc attributes changed." do_test failcount "Ensure failcounts are correctly expired" do_test failcount-block "Ensure failcounts are not expired when on-fail=block is present" do_test per-op-failcount "Ensure per-operation failcount is handled and not passed to fence agent" do_test on-fail-ignore "Ensure on-fail=ignore works even beyond migration-threshold" do_test monitor-onfail-restart "bug-5058 - Monitor failure with on-fail set to restart" do_test monitor-onfail-stop "bug-5058 - Monitor failure wiht on-fail set to stop" do_test bug-5059 "No need to restart p_stateful1:*" do_test bug-5069-op-enabled "Test on-fail=ignore with failure when monitor is enabled." do_test bug-5069-op-disabled "Test on-fail-ignore with failure when monitor is disabled." do_test obsolete-lrm-resource "cl#5115 - Do not use obsolete lrm_resource sections" do_test expire-non-blocked-failure "Ignore failure-timeout only if the failed operation has on-fail=block" do_test asymmetrical-order-move "Respect asymmetrical ordering when trying to move resources" do_test start-then-stop-with-unfence "Avoid graph loop with start-then-stop constraint plus unfencing" do_test order-expired-failure "Order failcount cleanup after remote fencing" do_test ignore_stonith_rsc_order1 "cl#5056- Ignore order constraint between stonith and non-stonith rsc." do_test ignore_stonith_rsc_order2 "cl#5056- Ignore order constraint with group rsc containing mixed stonith and non-stonith." do_test ignore_stonith_rsc_order3 "cl#5056- Ignore order constraint, stonith clone and mixed group" do_test ignore_stonith_rsc_order4 "cl#5056- Ignore order constraint, stonith clone and clone with nested mixed group" do_test honor_stonith_rsc_order1 "cl#5056- Honor order constraint, stonith clone and pure stonith group(single rsc)." do_test honor_stonith_rsc_order2 "cl#5056- Honor order constraint, stonith clone and pure stonith group(multiple rsc)" do_test honor_stonith_rsc_order3 "cl#5056- Honor order constraint, stonith clones with nested pure stonith group." do_test honor_stonith_rsc_order4 "cl#5056- Honor order constraint, between two native stonith rscs." do_test multiply-active-stonith do_test probe-timeout "cl#5099 - Default probe timeout" do_test concurrent-fencing "Allow performing fencing operations in parallel" echo "" do_test systemhealth1 "System Health () #1" do_test systemhealth2 "System Health () #2" do_test systemhealth3 "System Health () #3" do_test systemhealthn1 "System Health (None) #1" do_test systemhealthn2 "System Health (None) #2" do_test systemhealthn3 "System Health (None) #3" do_test systemhealthm1 "System Health (Migrate On Red) #1" do_test systemhealthm2 "System Health (Migrate On Red) #2" do_test systemhealthm3 "System Health (Migrate On Red) #3" do_test systemhealtho1 "System Health (Only Green) #1" do_test systemhealtho2 "System Health (Only Green) #2" do_test systemhealtho3 "System Health (Only Green) #3" do_test systemhealthp1 "System Health (Progessive) #1" do_test systemhealthp2 "System Health (Progessive) #2" do_test systemhealthp3 "System Health (Progessive) #3" echo "" do_test utilization "Placement Strategy - utilization" do_test minimal "Placement Strategy - minimal" do_test balanced "Placement Strategy - balanced" echo "" do_test placement-stickiness "Optimized Placement Strategy - stickiness" do_test placement-priority "Optimized Placement Strategy - priority" do_test placement-location "Optimized Placement Strategy - location" do_test placement-capacity "Optimized Placement Strategy - capacity" echo "" do_test utilization-order1 "Utilization Order - Simple" do_test utilization-order2 "Utilization Order - Complex" do_test utilization-order3 "Utilization Order - Migrate" do_test utilization-order4 "Utilization Order - Live Migration (bnc#695440)" do_test utilization-shuffle "Don't displace prmExPostgreSQLDB2 on act2, Start prmExPostgreSQLDB1 on act3" do_test load-stopped-loop "Avoid transition loop due to load_stopped (cl#5044)" do_test load-stopped-loop-2 "cl#5235 - Prevent graph loops that can be introduced by load_stopped -> migrate_to ordering" echo "" do_test colocated-utilization-primitive-1 "Colocated Utilization - Primitive" do_test colocated-utilization-primitive-2 "Colocated Utilization - Choose the most capable node" do_test colocated-utilization-group "Colocated Utilization - Group" do_test colocated-utilization-clone "Colocated Utilization - Clone" do_test utilization-check-allowed-nodes "Only check the capacities of the nodes that can run the resource" echo "" do_test reprobe-target_rc "Ensure correct target_rc for reprobe of inactive resources" do_test node-maintenance-1 "cl#5128 - Node maintenance" do_test node-maintenance-2 "cl#5128 - Node maintenance (coming out of maintenance mode)" do_test shutdown-maintenance-node "Do not fence a maintenance node if it shuts down cleanly" do_test rsc-maintenance "Per-resource maintenance" echo "" do_test not-installed-agent "The resource agent is missing" do_test not-installed-tools "Something the resource agent needs is missing" echo "" do_test stopped-monitor-00 "Stopped Monitor - initial start" do_test stopped-monitor-01 "Stopped Monitor - failed started" do_test stopped-monitor-02 "Stopped Monitor - started multi-up" do_test stopped-monitor-03 "Stopped Monitor - stop started" do_test stopped-monitor-04 "Stopped Monitor - failed stop" do_test stopped-monitor-05 "Stopped Monitor - start unmanaged" do_test stopped-monitor-06 "Stopped Monitor - unmanaged multi-up" do_test stopped-monitor-07 "Stopped Monitor - start unmanaged multi-up" do_test stopped-monitor-08 "Stopped Monitor - migrate" do_test stopped-monitor-09 "Stopped Monitor - unmanage started" do_test stopped-monitor-10 "Stopped Monitor - unmanaged started multi-up" do_test stopped-monitor-11 "Stopped Monitor - stop unmanaged started" do_test stopped-monitor-12 "Stopped Monitor - unmanaged started multi-up (target-role=Stopped)" do_test stopped-monitor-20 "Stopped Monitor - initial stop" do_test stopped-monitor-21 "Stopped Monitor - stopped single-up" do_test stopped-monitor-22 "Stopped Monitor - stopped multi-up" do_test stopped-monitor-23 "Stopped Monitor - start stopped" do_test stopped-monitor-24 "Stopped Monitor - unmanage stopped" do_test stopped-monitor-25 "Stopped Monitor - unmanaged stopped multi-up" do_test stopped-monitor-26 "Stopped Monitor - start unmanaged stopped" do_test stopped-monitor-27 "Stopped Monitor - unmanaged stopped multi-up (target-role=Started)" do_test stopped-monitor-30 "Stopped Monitor - new node started" do_test stopped-monitor-31 "Stopped Monitor - new node stopped" echo "" # This is a combo test to check: # - probe timeout defaults to the minimum-interval monitor's # - duplicate recurring operations are ignored # - if timeout spec is bad, the default timeout is used # - failure is blocked with on-fail=block even if ISO8601 interval is specified # - started/stopped role monitors are started/stopped on right nodes do_test intervals "Recurring monitor interval handling" echo"" do_test ticket-primitive-1 "Ticket - Primitive (loss-policy=stop, initial)" do_test ticket-primitive-2 "Ticket - Primitive (loss-policy=stop, granted)" do_test ticket-primitive-3 "Ticket - Primitive (loss-policy-stop, revoked)" do_test ticket-primitive-4 "Ticket - Primitive (loss-policy=demote, initial)" do_test ticket-primitive-5 "Ticket - Primitive (loss-policy=demote, granted)" do_test ticket-primitive-6 "Ticket - Primitive (loss-policy=demote, revoked)" do_test ticket-primitive-7 "Ticket - Primitive (loss-policy=fence, initial)" do_test ticket-primitive-8 "Ticket - Primitive (loss-policy=fence, granted)" do_test ticket-primitive-9 "Ticket - Primitive (loss-policy=fence, revoked)" do_test ticket-primitive-10 "Ticket - Primitive (loss-policy=freeze, initial)" do_test ticket-primitive-11 "Ticket - Primitive (loss-policy=freeze, granted)" do_test ticket-primitive-12 "Ticket - Primitive (loss-policy=freeze, revoked)" do_test ticket-primitive-13 "Ticket - Primitive (loss-policy=stop, standby, granted)" do_test ticket-primitive-14 "Ticket - Primitive (loss-policy=stop, granted, standby)" do_test ticket-primitive-15 "Ticket - Primitive (loss-policy=stop, standby, revoked)" do_test ticket-primitive-16 "Ticket - Primitive (loss-policy=demote, standby, granted)" do_test ticket-primitive-17 "Ticket - Primitive (loss-policy=demote, granted, standby)" do_test ticket-primitive-18 "Ticket - Primitive (loss-policy=demote, standby, revoked)" do_test ticket-primitive-19 "Ticket - Primitive (loss-policy=fence, standby, granted)" do_test ticket-primitive-20 "Ticket - Primitive (loss-policy=fence, granted, standby)" do_test ticket-primitive-21 "Ticket - Primitive (loss-policy=fence, standby, revoked)" do_test ticket-primitive-22 "Ticket - Primitive (loss-policy=freeze, standby, granted)" do_test ticket-primitive-23 "Ticket - Primitive (loss-policy=freeze, granted, standby)" do_test ticket-primitive-24 "Ticket - Primitive (loss-policy=freeze, standby, revoked)" echo"" do_test ticket-group-1 "Ticket - Group (loss-policy=stop, initial)" do_test ticket-group-2 "Ticket - Group (loss-policy=stop, granted)" do_test ticket-group-3 "Ticket - Group (loss-policy-stop, revoked)" do_test ticket-group-4 "Ticket - Group (loss-policy=demote, initial)" do_test ticket-group-5 "Ticket - Group (loss-policy=demote, granted)" do_test ticket-group-6 "Ticket - Group (loss-policy=demote, revoked)" do_test ticket-group-7 "Ticket - Group (loss-policy=fence, initial)" do_test ticket-group-8 "Ticket - Group (loss-policy=fence, granted)" do_test ticket-group-9 "Ticket - Group (loss-policy=fence, revoked)" do_test ticket-group-10 "Ticket - Group (loss-policy=freeze, initial)" do_test ticket-group-11 "Ticket - Group (loss-policy=freeze, granted)" do_test ticket-group-12 "Ticket - Group (loss-policy=freeze, revoked)" do_test ticket-group-13 "Ticket - Group (loss-policy=stop, standby, granted)" do_test ticket-group-14 "Ticket - Group (loss-policy=stop, granted, standby)" do_test ticket-group-15 "Ticket - Group (loss-policy=stop, standby, revoked)" do_test ticket-group-16 "Ticket - Group (loss-policy=demote, standby, granted)" do_test ticket-group-17 "Ticket - Group (loss-policy=demote, granted, standby)" do_test ticket-group-18 "Ticket - Group (loss-policy=demote, standby, revoked)" do_test ticket-group-19 "Ticket - Group (loss-policy=fence, standby, granted)" do_test ticket-group-20 "Ticket - Group (loss-policy=fence, granted, standby)" do_test ticket-group-21 "Ticket - Group (loss-policy=fence, standby, revoked)" do_test ticket-group-22 "Ticket - Group (loss-policy=freeze, standby, granted)" do_test ticket-group-23 "Ticket - Group (loss-policy=freeze, granted, standby)" do_test ticket-group-24 "Ticket - Group (loss-policy=freeze, standby, revoked)" echo"" do_test ticket-clone-1 "Ticket - Clone (loss-policy=stop, initial)" do_test ticket-clone-2 "Ticket - Clone (loss-policy=stop, granted)" do_test ticket-clone-3 "Ticket - Clone (loss-policy-stop, revoked)" do_test ticket-clone-4 "Ticket - Clone (loss-policy=demote, initial)" do_test ticket-clone-5 "Ticket - Clone (loss-policy=demote, granted)" do_test ticket-clone-6 "Ticket - Clone (loss-policy=demote, revoked)" do_test ticket-clone-7 "Ticket - Clone (loss-policy=fence, initial)" do_test ticket-clone-8 "Ticket - Clone (loss-policy=fence, granted)" do_test ticket-clone-9 "Ticket - Clone (loss-policy=fence, revoked)" do_test ticket-clone-10 "Ticket - Clone (loss-policy=freeze, initial)" do_test ticket-clone-11 "Ticket - Clone (loss-policy=freeze, granted)" do_test ticket-clone-12 "Ticket - Clone (loss-policy=freeze, revoked)" do_test ticket-clone-13 "Ticket - Clone (loss-policy=stop, standby, granted)" do_test ticket-clone-14 "Ticket - Clone (loss-policy=stop, granted, standby)" do_test ticket-clone-15 "Ticket - Clone (loss-policy=stop, standby, revoked)" do_test ticket-clone-16 "Ticket - Clone (loss-policy=demote, standby, granted)" do_test ticket-clone-17 "Ticket - Clone (loss-policy=demote, granted, standby)" do_test ticket-clone-18 "Ticket - Clone (loss-policy=demote, standby, revoked)" do_test ticket-clone-19 "Ticket - Clone (loss-policy=fence, standby, granted)" do_test ticket-clone-20 "Ticket - Clone (loss-policy=fence, granted, standby)" do_test ticket-clone-21 "Ticket - Clone (loss-policy=fence, standby, revoked)" do_test ticket-clone-22 "Ticket - Clone (loss-policy=freeze, standby, granted)" do_test ticket-clone-23 "Ticket - Clone (loss-policy=freeze, granted, standby)" do_test ticket-clone-24 "Ticket - Clone (loss-policy=freeze, standby, revoked)" echo"" do_test ticket-master-1 "Ticket - Master (loss-policy=stop, initial)" do_test ticket-master-2 "Ticket - Master (loss-policy=stop, granted)" do_test ticket-master-3 "Ticket - Master (loss-policy-stop, revoked)" do_test ticket-master-4 "Ticket - Master (loss-policy=demote, initial)" do_test ticket-master-5 "Ticket - Master (loss-policy=demote, granted)" do_test ticket-master-6 "Ticket - Master (loss-policy=demote, revoked)" do_test ticket-master-7 "Ticket - Master (loss-policy=fence, initial)" do_test ticket-master-8 "Ticket - Master (loss-policy=fence, granted)" do_test ticket-master-9 "Ticket - Master (loss-policy=fence, revoked)" do_test ticket-master-10 "Ticket - Master (loss-policy=freeze, initial)" do_test ticket-master-11 "Ticket - Master (loss-policy=freeze, granted)" do_test ticket-master-12 "Ticket - Master (loss-policy=freeze, revoked)" do_test ticket-master-13 "Ticket - Master (loss-policy=stop, standby, granted)" do_test ticket-master-14 "Ticket - Master (loss-policy=stop, granted, standby)" do_test ticket-master-15 "Ticket - Master (loss-policy=stop, standby, revoked)" do_test ticket-master-16 "Ticket - Master (loss-policy=demote, standby, granted)" do_test ticket-master-17 "Ticket - Master (loss-policy=demote, granted, standby)" do_test ticket-master-18 "Ticket - Master (loss-policy=demote, standby, revoked)" do_test ticket-master-19 "Ticket - Master (loss-policy=fence, standby, granted)" do_test ticket-master-20 "Ticket - Master (loss-policy=fence, granted, standby)" do_test ticket-master-21 "Ticket - Master (loss-policy=fence, standby, revoked)" do_test ticket-master-22 "Ticket - Master (loss-policy=freeze, standby, granted)" do_test ticket-master-23 "Ticket - Master (loss-policy=freeze, granted, standby)" do_test ticket-master-24 "Ticket - Master (loss-policy=freeze, standby, revoked)" echo "" do_test ticket-rsc-sets-1 "Ticket - Resource sets (1 ticket, initial)" do_test ticket-rsc-sets-2 "Ticket - Resource sets (1 ticket, granted)" do_test ticket-rsc-sets-3 "Ticket - Resource sets (1 ticket, revoked)" do_test ticket-rsc-sets-4 "Ticket - Resource sets (2 tickets, initial)" do_test ticket-rsc-sets-5 "Ticket - Resource sets (2 tickets, granted)" do_test ticket-rsc-sets-6 "Ticket - Resource sets (2 tickets, granted)" do_test ticket-rsc-sets-7 "Ticket - Resource sets (2 tickets, revoked)" do_test ticket-rsc-sets-8 "Ticket - Resource sets (1 ticket, standby, granted)" do_test ticket-rsc-sets-9 "Ticket - Resource sets (1 ticket, granted, standby)" do_test ticket-rsc-sets-10 "Ticket - Resource sets (1 ticket, standby, revoked)" do_test ticket-rsc-sets-11 "Ticket - Resource sets (2 tickets, standby, granted)" do_test ticket-rsc-sets-12 "Ticket - Resource sets (2 tickets, standby, granted)" do_test ticket-rsc-sets-13 "Ticket - Resource sets (2 tickets, granted, standby)" do_test ticket-rsc-sets-14 "Ticket - Resource sets (2 tickets, standby, revoked)" do_test cluster-specific-params "Cluster-specific instance attributes based on rules" do_test site-specific-params "Site-specific instance attributes based on rules" echo "" do_test template-1 "Template - 1" do_test template-2 "Template - 2" do_test template-3 "Template - 3 (merge operations)" do_test template-coloc-1 "Template - Colocation 1" do_test template-coloc-2 "Template - Colocation 2" do_test template-coloc-3 "Template - Colocation 3" do_test template-order-1 "Template - Order 1" do_test template-order-2 "Template - Order 2" do_test template-order-3 "Template - Order 3" do_test template-ticket "Template - Ticket" do_test template-rsc-sets-1 "Template - Resource Sets 1" do_test template-rsc-sets-2 "Template - Resource Sets 2" do_test template-rsc-sets-3 "Template - Resource Sets 3" do_test template-rsc-sets-4 "Template - Resource Sets 4" do_test template-clone-primitive "Cloned primitive from template" do_test template-clone-group "Cloned group from template" do_test location-sets-templates "Resource sets and templates - Location" do_test tags-coloc-order-1 "Tags - Colocation and Order (Simple)" do_test tags-coloc-order-2 "Tags - Colocation and Order (Resource Sets with Templates)" do_test tags-location "Tags - Location" do_test tags-ticket "Tags - Ticket" echo "" do_test container-1 "Container - initial" do_test container-2 "Container - monitor failed" do_test container-3 "Container - stop failed" do_test container-4 "Container - reached migration-threshold" do_test container-group-1 "Container in group - initial" do_test container-group-2 "Container in group - monitor failed" do_test container-group-3 "Container in group - stop failed" do_test container-group-4 "Container in group - reached migration-threshold" do_test container-is-remote-node "Place resource within container when container is remote-node" do_test bug-rh-1097457 "Kill user defined container/contents ordering" do_test bug-cl-5247 "Graph loop when recovering m/s resource in a container" do_test bundle-order-startup "Bundle startup ordering" do_test bundle-order-partial-start "Bundle startup ordering when some dependancies are already running" do_test bundle-order-partial-start-2 "Bundle startup ordering when some dependancies and the container are already running" do_test bundle-order-stop "Bundle stop ordering" do_test bundle-order-partial-stop "Bundle startup ordering when some dependancies are already stopped" do_test bundle-order-stop-on-remote "Stop nested resource after bringing up the connection" do_test bundle-order-startup-clone "Prevent startup because bundle isn't promoted" do_test bundle-order-startup-clone-2 "Bundle startup with clones" do_test bundle-order-stop-clone "Stop bundle because clone is stopping" do_test bundle-nested-colocation "Colocation of nested connection resources" do_test bundle-order-fencing "Order pseudo bundle fencing after parent node fencing if both are happening" do_test bundle-probe-order-1 "order 1" do_test bundle-probe-order-2 "order 2" do_test bundle-probe-order-3 "order 3" do_test bundle-probe-remotes "Ensure remotes get probed too" do_test bundle-replicas-change "Change bundle from 1 replica to multiple" echo "" do_test whitebox-fail1 "Fail whitebox container rsc." do_test whitebox-fail2 "Fail cluster connection to guest node" do_test whitebox-fail3 "Failed containers should not run nested on remote nodes." do_test whitebox-start "Start whitebox container with resources assigned to it" do_test whitebox-stop "Stop whitebox container with resources assigned to it" do_test whitebox-move "Move whitebox container with resources assigned to it" do_test whitebox-asymmetric "Verify connection rsc opts-in based on container resource" do_test whitebox-ms-ordering "Verify promote/demote can not occur before connection is established" do_test whitebox-ms-ordering-move "Stop/Start cycle within a moving container" do_test whitebox-orphaned "Properly shutdown orphaned whitebox container" do_test whitebox-orphan-ms "Properly tear down orphan ms resources on remote-nodes" do_test whitebox-unexpectedly-running "Recover container nodes the cluster did not start." do_test whitebox-migrate1 "Migrate both container and connection resource" do_test whitebox-imply-stop-on-fence "imply stop action on container node rsc when host node is fenced" do_test whitebox-nested-group "Verify guest remote-node works nested in a group" do_test guest-node-host-dies "Verify guest node is recovered if host goes away" echo "" do_test remote-startup-probes "Baremetal remote-node startup probes" do_test remote-startup "Startup a newly discovered remote-nodes with no status." do_test remote-fence-unclean "Fence unclean baremetal remote-node" do_test remote-fence-unclean2 "Fence baremetal remote-node after cluster node fails and connection can not be recovered" do_test remote-fence-unclean-3 "Probe failed remote nodes (triggers fencing)" do_test remote-move "Move remote-node connection resource" do_test remote-disable "Disable a baremetal remote-node" do_test remote-probe-disable "Probe then stop a baremetal remote-node" do_test remote-orphaned "Properly shutdown orphaned connection resource" do_test remote-orphaned2 "verify we can handle orphaned remote connections with active resources on the remote" do_test remote-recover "Recover connection resource after cluster-node fails." do_test remote-stale-node-entry "Make sure we properly handle leftover remote-node entries in the node section" do_test remote-partial-migrate "Make sure partial migrations are handled before ops on the remote node." do_test remote-partial-migrate2 "Make sure partial migration target is prefered for remote connection." do_test remote-recover-fail "Make sure start failure causes fencing if rsc are active on remote." do_test remote-start-fail "Make sure a start failure does not result in fencing if no active resources are on remote." do_test remote-unclean2 "Make monitor failure always results in fencing, even if no rsc are active on remote." do_test remote-fence-before-reconnect "Fence before clearing recurring monitor failure" do_test remote-recovery "Recover remote connections before attempting demotion" do_test remote-recover-connection "Optimistically recovery of only the connection" do_test remote-recover-all "Fencing when the connection has no home" do_test remote-recover-no-resources "Fencing when the connection has no home and no active resources" do_test remote-recover-unknown "Fencing when the connection has no home and the remote has no operation history" do_test remote-reconnect-delay "Waiting for remote reconnect interval to expire" do_test remote-connection-unrecoverable "Remote connection host must be fenced, with connection unrecoverable" echo "" do_test resource-discovery "Exercises resource-discovery location constraint option." do_test rsc-discovery-per-node "Disable resource discovery per node" if [ $DO_VERSIONED_TESTS -eq 1 ]; then echo "" do_test versioned-resources "Start resources with #ra-version rules" do_test restart-versioned "Restart resources on #ra-version change" do_test reload-versioned "Reload resources on #ra-version change" echo "" do_test versioned-operations-1 "Use #ra-version to configure operations of native resources" do_test versioned-operations-2 "Use #ra-version to configure operations of stonith resources" do_test versioned-operations-3 "Use #ra-version to configure operations of master/slave resources" do_test versioned-operations-4 "Use #ra-version to configure operations of groups of the resources" fi echo "" test_results exit $EXITCODE diff --git a/cts/scheduler/735.dot b/cts/scheduler/735.dot index 228cc93f41..db194a0141 100644 --- a/cts/scheduler/735.dot +++ b/cts/scheduler/735.dot @@ -1,31 +1,31 @@ - digraph "g" { -"DcIPaddr_monitor_0 hadev3" [ style=bold color="green" fontcolor="black" ] -"DoFencing_running_0" [ style=bold color="green" fontcolor="orange" ] +digraph "g" { +"DcIPaddr_monitor_0 hadev3" [ style=bold color="green" fontcolor="black"] +"DoFencing_running_0" [ style=bold color="green" fontcolor="orange"] "DoFencing_start_0" -> "DoFencing_running_0" [ style = bold] "DoFencing_start_0" -> "child_DoFencing:0_start_0 hadev2" [ style = bold] "DoFencing_start_0" -> "child_DoFencing:1_start_0 hadev3" [ style = bold] -"DoFencing_start_0" [ style=bold color="green" fontcolor="orange" ] -"all_stopped" [ style=bold color="green" fontcolor="orange" ] +"DoFencing_start_0" [ style=bold color="green" fontcolor="orange"] +"all_stopped" [ style=bold color="green" fontcolor="orange"] "child_DoFencing:0_monitor_0 hadev3" -> "DoFencing_start_0" [ style = bold] -"child_DoFencing:0_monitor_0 hadev3" [ style=bold color="green" fontcolor="black" ] -"child_DoFencing:0_monitor_5000 hadev2" [ style=bold color="green" fontcolor="black" ] +"child_DoFencing:0_monitor_0 hadev3" [ style=bold color="green" fontcolor="black"] +"child_DoFencing:0_monitor_5000 hadev2" [ style=bold color="green" fontcolor="black"] "child_DoFencing:0_start_0 hadev2" -> "DoFencing_running_0" [ style = bold] "child_DoFencing:0_start_0 hadev2" -> "child_DoFencing:0_monitor_5000 hadev2" [ style = bold] -"child_DoFencing:0_start_0 hadev2" [ style=bold color="green" fontcolor="black" ] -"child_DoFencing:1_monitor_5000 hadev3" [ style=bold color="green" fontcolor="black" ] +"child_DoFencing:0_start_0 hadev2" [ style=bold color="green" fontcolor="black"] +"child_DoFencing:1_monitor_5000 hadev3" [ style=bold color="green" fontcolor="black"] "child_DoFencing:1_start_0 hadev3" -> "DoFencing_running_0" [ style = bold] "child_DoFencing:1_start_0 hadev3" -> "child_DoFencing:1_monitor_5000 hadev3" [ style = bold] -"child_DoFencing:1_start_0 hadev3" [ style=bold color="green" fontcolor="black" ] +"child_DoFencing:1_start_0 hadev3" [ style=bold color="green" fontcolor="black"] "child_DoFencing:2_monitor_0 hadev3" -> "DoFencing_start_0" [ style = bold] -"child_DoFencing:2_monitor_0 hadev3" [ style=bold color="green" fontcolor="black" ] -"rsc_hadev1_monitor_5000 hadev3" [ style=bold color="green" fontcolor="black" ] +"child_DoFencing:2_monitor_0 hadev3" [ style=bold color="green" fontcolor="black"] +"rsc_hadev1_monitor_5000 hadev3" [ style=bold color="green" fontcolor="black"] "rsc_hadev1_start_0 hadev3" -> "rsc_hadev1_monitor_5000 hadev3" [ style = bold] -"rsc_hadev1_start_0 hadev3" [ style=bold color="green" fontcolor="black" ] +"rsc_hadev1_start_0 hadev3" [ style=bold color="green" fontcolor="black"] "rsc_hadev1_stop_0 hadev2" -> "all_stopped" [ style = bold] "rsc_hadev1_stop_0 hadev2" -> "rsc_hadev1_start_0 hadev3" [ style = bold] -"rsc_hadev1_stop_0 hadev2" [ style=bold color="green" fontcolor="black" ] -"rsc_hadev2_monitor_0 hadev3" [ style=bold color="green" fontcolor="black" ] -"rsc_hadev3_monitor_5000 hadev3" [ style=bold color="green" fontcolor="black" ] +"rsc_hadev1_stop_0 hadev2" [ style=bold color="green" fontcolor="black"] +"rsc_hadev2_monitor_0 hadev3" [ style=bold color="green" fontcolor="black"] +"rsc_hadev3_monitor_5000 hadev3" [ style=bold color="green" fontcolor="black"] "rsc_hadev3_start_0 hadev3" -> "rsc_hadev3_monitor_5000 hadev3" [ style = bold] -"rsc_hadev3_start_0 hadev3" [ style=bold color="green" fontcolor="black" ] +"rsc_hadev3_start_0 hadev3" [ style=bold color="green" fontcolor="black"] } diff --git a/cts/scheduler/735.exp b/cts/scheduler/735.exp index 3f84850547..4bafbb4198 100644 --- a/cts/scheduler/735.exp +++ b/cts/scheduler/735.exp @@ -1,193 +1,192 @@ - - - + + + - - - - - - + + + + + + - - + + - - - + + + - - + + - - - + + + - - - - + + + + - - - - - + + + + + - - - - - - + + + + + + - - - - + + + + - - - + + + - - + + - - + + - - - + + + - - - - + + + + - - - - - + + + + + - - + + - - - + + + - - - - + + + + - - - - - + + + + + - - - - + + + + - - - - - + + + + + - - - - + + + + - + - - - - + + + + - - + + - - + + - - - + + + - + - - + + - + - - - + + + - - - - - + + + + diff --git a/cts/scheduler/735.summary b/cts/scheduler/735.summary index 2db520ad39..b536c16e01 100644 --- a/cts/scheduler/735.summary +++ b/cts/scheduler/735.summary @@ -1,51 +1,51 @@ Current cluster status: Online: [ hadev2 hadev3 ] OFFLINE: [ hadev1 ] DcIPaddr (ocf::heartbeat:IPaddr): Started hadev2 rsc_hadev1 (ocf::heartbeat:IPaddr): Starting hadev2 rsc_hadev2 (ocf::heartbeat:IPaddr): Started hadev2 - rsc_hadev3 (ocf::heartbeat:IPaddr): Starting + rsc_hadev3 (ocf::heartbeat:IPaddr): Starting hadev2 Clone Set: DoFencing [child_DoFencing] (unique) - child_DoFencing:0 (stonith:ssh): Starting - child_DoFencing:1 (stonith:ssh): Stopped - child_DoFencing:2 (stonith:ssh): Stopped + child_DoFencing:0 (stonith:ssh): Starting hadev2 + child_DoFencing:1 (stonith:ssh): Stopped + child_DoFencing:2 (stonith:ssh): Stopped Transition Summary: * Move rsc_hadev1 ( hadev2 -> hadev3 ) - * Start rsc_hadev3 (hadev3) - * Start child_DoFencing:0 (hadev2) - * Start child_DoFencing:1 (hadev3) + * Start rsc_hadev3 ( hadev3 ) + * Start child_DoFencing:0 ( hadev2 ) + * Start child_DoFencing:1 ( hadev3 ) Executing cluster transition: * Resource action: DcIPaddr monitor on hadev3 * Resource action: rsc_hadev1 stop on hadev2 * Resource action: rsc_hadev1 start on hadev3 * Resource action: rsc_hadev2 monitor on hadev3 * Resource action: rsc_hadev3 start on hadev3 * Resource action: child_DoFencing:0 monitor on hadev3 * Resource action: child_DoFencing:2 monitor on hadev3 * Pseudo action: DoFencing_start_0 * Pseudo action: all_stopped * Resource action: rsc_hadev1 monitor=5000 on hadev3 * Resource action: rsc_hadev3 monitor=5000 on hadev3 * Resource action: child_DoFencing:0 start on hadev2 * Resource action: child_DoFencing:1 start on hadev3 * Pseudo action: DoFencing_running_0 * Resource action: child_DoFencing:0 monitor=5000 on hadev2 * Resource action: child_DoFencing:1 monitor=5000 on hadev3 Revised cluster status: Online: [ hadev2 hadev3 ] OFFLINE: [ hadev1 ] DcIPaddr (ocf::heartbeat:IPaddr): Started hadev2 rsc_hadev1 (ocf::heartbeat:IPaddr): Started hadev2 rsc_hadev2 (ocf::heartbeat:IPaddr): Started hadev2 rsc_hadev3 (ocf::heartbeat:IPaddr): Started hadev3 Clone Set: DoFencing [child_DoFencing] (unique) child_DoFencing:0 (stonith:ssh): Started hadev2 child_DoFencing:1 (stonith:ssh): Started hadev3 - child_DoFencing:2 (stonith:ssh): Stopped + child_DoFencing:2 (stonith:ssh): Stopped diff --git a/cts/scheduler/anon-instance-pending.dot b/cts/scheduler/anon-instance-pending.dot new file mode 100644 index 0000000000..426ee75fb6 --- /dev/null +++ b/cts/scheduler/anon-instance-pending.dot @@ -0,0 +1,326 @@ +digraph "g" { +"all_stopped" [ style=bold color="green" fontcolor="orange"] +"clone1_running_0" [ style=bold color="green" fontcolor="orange"] +"clone1_start_0" -> "clone1_running_0" [ style = bold] +"clone1_start_0" -> "clone1rsc:10_start_0 node8" [ style = bold] +"clone1_start_0" -> "clone1rsc:5_start_0 node10" [ style = bold] +"clone1_start_0" -> "clone1rsc:6_start_0 node11" [ style = bold] +"clone1_start_0" -> "clone1rsc:7_start_0 node5" [ style = bold] +"clone1_start_0" -> "clone1rsc:8_start_0 node6" [ style = bold] +"clone1_start_0" -> "clone1rsc:9_start_0 node7" [ style = bold] +"clone1_start_0" -> "clone1rsc_start_0 node4" [ style = bold] +"clone1_start_0" -> "clone1rsc_start_0 node9" [ style = bold] +"clone1_start_0" [ style=bold color="green" fontcolor="orange"] +"clone1rsc:10_monitor_10000 node8" [ style=bold color="green" fontcolor="black"] +"clone1rsc:10_start_0 node8" -> "clone1_running_0" [ style = bold] +"clone1rsc:10_start_0 node8" -> "clone1rsc:10_monitor_10000 node8" [ style = bold] +"clone1rsc:10_start_0 node8" [ style=bold color="green" fontcolor="black"] +"clone1rsc:5_monitor_10000 node10" [ style=bold color="green" fontcolor="black"] +"clone1rsc:5_start_0 node10" -> "clone1_running_0" [ style = bold] +"clone1rsc:5_start_0 node10" -> "clone1rsc:5_monitor_10000 node10" [ style = bold] +"clone1rsc:5_start_0 node10" [ style=bold color="green" fontcolor="black"] +"clone1rsc:6_monitor_10000 node11" [ style=bold color="green" fontcolor="black"] +"clone1rsc:6_start_0 node11" -> "clone1_running_0" [ style = bold] +"clone1rsc:6_start_0 node11" -> "clone1rsc:6_monitor_10000 node11" [ style = bold] +"clone1rsc:6_start_0 node11" [ style=bold color="green" fontcolor="black"] +"clone1rsc:7_monitor_10000 node5" [ style=bold color="green" fontcolor="black"] +"clone1rsc:7_start_0 node5" -> "clone1_running_0" [ style = bold] +"clone1rsc:7_start_0 node5" -> "clone1rsc:7_monitor_10000 node5" [ style = bold] +"clone1rsc:7_start_0 node5" [ style=bold color="green" fontcolor="black"] +"clone1rsc:8_monitor_10000 node6" [ style=bold color="green" fontcolor="black"] +"clone1rsc:8_start_0 node6" -> "clone1_running_0" [ style = bold] +"clone1rsc:8_start_0 node6" -> "clone1rsc:8_monitor_10000 node6" [ style = bold] +"clone1rsc:8_start_0 node6" [ style=bold color="green" fontcolor="black"] +"clone1rsc:9_monitor_10000 node7" [ style=bold color="green" fontcolor="black"] +"clone1rsc:9_start_0 node7" -> "clone1_running_0" [ style = bold] +"clone1rsc:9_start_0 node7" -> "clone1rsc:9_monitor_10000 node7" [ style = bold] +"clone1rsc:9_start_0 node7" [ style=bold color="green" fontcolor="black"] +"clone1rsc_monitor_10000 node4" [ style=bold color="green" fontcolor="black"] +"clone1rsc_monitor_10000 node9" [ style=bold color="green" fontcolor="black"] +"clone1rsc_start_0 node4" -> "clone1_running_0" [ style = bold] +"clone1rsc_start_0 node4" -> "clone1rsc_monitor_10000 node4" [ style = bold] +"clone1rsc_start_0 node4" [ style=bold color="green" fontcolor="black"] +"clone1rsc_start_0 node9" -> "clone1_running_0" [ style = bold] +"clone1rsc_start_0 node9" -> "clone1rsc_monitor_10000 node9" [ style = bold] +"clone1rsc_start_0 node9" [ style=bold color="green" fontcolor="black"] +"clone2_running_0" [ style=bold color="green" fontcolor="orange"] +"clone2_start_0" -> "clone2_running_0" [ style = bold] +"clone2_start_0" -> "clone2rsc:3_start_0 node11" [ style = bold] +"clone2_start_0" -> "clone2rsc:4_start_0 node3" [ style = bold] +"clone2_start_0" -> "clone2rsc_start_0 node10" [ style = bold] +"clone2_start_0" -> "clone2rsc_start_0 node4" [ style = bold] +"clone2_start_0" [ style=bold color="green" fontcolor="orange"] +"clone2rsc:3_monitor_10000 node11" [ style=bold color="green" fontcolor="black"] +"clone2rsc:3_start_0 node11" -> "clone2_running_0" [ style = bold] +"clone2rsc:3_start_0 node11" -> "clone2rsc:3_monitor_10000 node11" [ style = bold] +"clone2rsc:3_start_0 node11" [ style=bold color="green" fontcolor="black"] +"clone2rsc:4_monitor_10000 node3" [ style=bold color="green" fontcolor="black"] +"clone2rsc:4_start_0 node3" -> "clone2_running_0" [ style = bold] +"clone2rsc:4_start_0 node3" -> "clone2rsc:4_monitor_10000 node3" [ style = bold] +"clone2rsc:4_start_0 node3" [ style=bold color="green" fontcolor="black"] +"clone2rsc_monitor_10000 node10" [ style=bold color="green" fontcolor="black"] +"clone2rsc_monitor_10000 node4" [ style=bold color="green" fontcolor="black"] +"clone2rsc_start_0 node10" -> "clone2_running_0" [ style = bold] +"clone2rsc_start_0 node10" -> "clone2rsc_monitor_10000 node10" [ style = bold] +"clone2rsc_start_0 node10" [ style=bold color="green" fontcolor="black"] +"clone2rsc_start_0 node4" -> "clone2_running_0" [ style = bold] +"clone2rsc_start_0 node4" -> "clone2rsc_monitor_10000 node4" [ style = bold] +"clone2rsc_start_0 node4" [ style=bold color="green" fontcolor="black"] +"clone3_running_0" [ style=bold color="green" fontcolor="orange"] +"clone3_start_0" -> "clone3_running_0" [ style = bold] +"clone3_start_0" -> "clone3rsc:10_start_0 node4" [ style = bold] +"clone3_start_0" -> "clone3rsc:2_start_0 node6" [ style = bold] +"clone3_start_0" -> "clone3rsc:3_start_0 node7" [ style = bold] +"clone3_start_0" -> "clone3rsc:4_start_0 node8" [ style = bold] +"clone3_start_0" -> "clone3rsc:5_start_0 node9" [ style = bold] +"clone3_start_0" -> "clone3rsc:6_start_0 node1" [ style = bold] +"clone3_start_0" -> "clone3rsc:7_start_0 node10" [ style = bold] +"clone3_start_0" -> "clone3rsc:8_start_0 node11" [ style = bold] +"clone3_start_0" -> "clone3rsc:9_start_0 node2" [ style = bold] +"clone3_start_0" -> "clone3rsc_start_0 node5" [ style = bold] +"clone3_start_0" [ style=bold color="green" fontcolor="orange"] +"clone3rsc:10_monitor_10000 node4" [ style=bold color="green" fontcolor="black"] +"clone3rsc:10_start_0 node4" -> "clone3_running_0" [ style = bold] +"clone3rsc:10_start_0 node4" -> "clone3rsc:10_monitor_10000 node4" [ style = bold] +"clone3rsc:10_start_0 node4" [ style=bold color="green" fontcolor="black"] +"clone3rsc:2_monitor_10000 node6" [ style=bold color="green" fontcolor="black"] +"clone3rsc:2_start_0 node6" -> "clone3_running_0" [ style = bold] +"clone3rsc:2_start_0 node6" -> "clone3rsc:2_monitor_10000 node6" [ style = bold] +"clone3rsc:2_start_0 node6" [ style=bold color="green" fontcolor="black"] +"clone3rsc:3_monitor_10000 node7" [ style=bold color="green" fontcolor="black"] +"clone3rsc:3_start_0 node7" -> "clone3_running_0" [ style = bold] +"clone3rsc:3_start_0 node7" -> "clone3rsc:3_monitor_10000 node7" [ style = bold] +"clone3rsc:3_start_0 node7" [ style=bold color="green" fontcolor="black"] +"clone3rsc:4_monitor_10000 node8" [ style=bold color="green" fontcolor="black"] +"clone3rsc:4_start_0 node8" -> "clone3_running_0" [ style = bold] +"clone3rsc:4_start_0 node8" -> "clone3rsc:4_monitor_10000 node8" [ style = bold] +"clone3rsc:4_start_0 node8" [ style=bold color="green" fontcolor="black"] +"clone3rsc:5_monitor_10000 node9" [ style=bold color="green" fontcolor="black"] +"clone3rsc:5_start_0 node9" -> "clone3_running_0" [ style = bold] +"clone3rsc:5_start_0 node9" -> "clone3rsc:5_monitor_10000 node9" [ style = bold] +"clone3rsc:5_start_0 node9" [ style=bold color="green" fontcolor="black"] +"clone3rsc:6_monitor_10000 node1" [ style=bold color="green" fontcolor="black"] +"clone3rsc:6_start_0 node1" -> "clone3_running_0" [ style = bold] +"clone3rsc:6_start_0 node1" -> "clone3rsc:6_monitor_10000 node1" [ style = bold] +"clone3rsc:6_start_0 node1" [ style=bold color="green" fontcolor="black"] +"clone3rsc:7_monitor_10000 node10" [ style=bold color="green" fontcolor="black"] +"clone3rsc:7_start_0 node10" -> "clone3_running_0" [ style = bold] +"clone3rsc:7_start_0 node10" -> "clone3rsc:7_monitor_10000 node10" [ style = bold] +"clone3rsc:7_start_0 node10" [ style=bold color="green" fontcolor="black"] +"clone3rsc:8_monitor_10000 node11" [ style=bold color="green" fontcolor="black"] +"clone3rsc:8_start_0 node11" -> "clone3_running_0" [ style = bold] +"clone3rsc:8_start_0 node11" -> "clone3rsc:8_monitor_10000 node11" [ style = bold] +"clone3rsc:8_start_0 node11" [ style=bold color="green" fontcolor="black"] +"clone3rsc:9_monitor_0 node2" -> "clone3_start_0" [ style = bold] +"clone3rsc:9_monitor_0 node2" [ style=bold color="green" fontcolor="black"] +"clone3rsc:9_monitor_10000 node2" [ style=bold color="green" fontcolor="black"] +"clone3rsc:9_start_0 node2" -> "clone3_running_0" [ style = bold] +"clone3rsc:9_start_0 node2" -> "clone3rsc:9_monitor_10000 node2" [ style = bold] +"clone3rsc:9_start_0 node2" [ style=bold color="green" fontcolor="black"] +"clone3rsc_monitor_10000 node5" [ style=bold color="green" fontcolor="black"] +"clone3rsc_start_0 node5" -> "clone3_running_0" [ style = bold] +"clone3rsc_start_0 node5" -> "clone3rsc_monitor_10000 node5" [ style = bold] +"clone3rsc_start_0 node5" [ style=bold color="green" fontcolor="black"] +"clone4_stop_0" -> "clone4_stopped_0" [ style = bold] +"clone4_stop_0" -> "clone4rsc_stop_0 node9" [ style = bold] +"clone4_stop_0" [ style=bold color="green" fontcolor="orange"] +"clone4_stopped_0" [ style=bold color="green" fontcolor="orange"] +"clone4rsc_stop_0 node9" -> "all_stopped" [ style = bold] +"clone4rsc_stop_0 node9" -> "clone4_stopped_0" [ style = bold] +"clone4rsc_stop_0 node9" [ style=bold color="green" fontcolor="black"] +"clone5_running_0" [ style=bold color="green" fontcolor="orange"] +"clone5_start_0" -> "clone5_running_0" [ style = bold] +"clone5_start_0" -> "clone5group:10_start_0" [ style = bold] +"clone5_start_0" -> "clone5group:2_start_0" [ style = bold] +"clone5_start_0" -> "clone5group:3_start_0" [ style = bold] +"clone5_start_0" -> "clone5group:4_start_0" [ style = bold] +"clone5_start_0" -> "clone5group:5_start_0" [ style = bold] +"clone5_start_0" -> "clone5group:6_start_0" [ style = bold] +"clone5_start_0" -> "clone5group:7_start_0" [ style = bold] +"clone5_start_0" -> "clone5group:8_start_0" [ style = bold] +"clone5_start_0" -> "clone5group:9_start_0" [ style = bold] +"clone5_start_0" [ style=bold color="green" fontcolor="orange"] +"clone5group:10_running_0" -> "clone5_running_0" [ style = bold] +"clone5group:10_running_0" [ style=bold color="green" fontcolor="orange"] +"clone5group:10_start_0" -> "clone5group:10_running_0" [ style = bold] +"clone5group:10_start_0" -> "clone5rsc1:10_start_0 node8" [ style = bold] +"clone5group:10_start_0" -> "clone5rsc2:10_start_0 node8" [ style = bold] +"clone5group:10_start_0" -> "clone5rsc3:10_start_0 node8" [ style = bold] +"clone5group:10_start_0" [ style=bold color="green" fontcolor="orange"] +"clone5group:2_running_0" -> "clone5_running_0" [ style = bold] +"clone5group:2_running_0" [ style=bold color="green" fontcolor="orange"] +"clone5group:2_start_0" -> "clone5group:2_running_0" [ style = bold] +"clone5group:2_start_0" -> "clone5rsc2_start_0 node3" [ style = bold] +"clone5group:2_start_0" -> "clone5rsc3_start_0 node3" [ style = bold] +"clone5group:2_start_0" [ style=bold color="green" fontcolor="orange"] +"clone5group:3_running_0" -> "clone5_running_0" [ style = bold] +"clone5group:3_running_0" [ style=bold color="green" fontcolor="orange"] +"clone5group:3_start_0" -> "clone5group:3_running_0" [ style = bold] +"clone5group:3_start_0" -> "clone5rsc1_start_0 node9" [ style = bold] +"clone5group:3_start_0" -> "clone5rsc2_start_0 node9" [ style = bold] +"clone5group:3_start_0" -> "clone5rsc3_start_0 node9" [ style = bold] +"clone5group:3_start_0" [ style=bold color="green" fontcolor="orange"] +"clone5group:4_running_0" -> "clone5_running_0" [ style = bold] +"clone5group:4_running_0" [ style=bold color="green" fontcolor="orange"] +"clone5group:4_start_0" -> "clone5group:4_running_0" [ style = bold] +"clone5group:4_start_0" -> "clone5rsc1:4_start_0 node10" [ style = bold] +"clone5group:4_start_0" -> "clone5rsc2:4_start_0 node10" [ style = bold] +"clone5group:4_start_0" -> "clone5rsc3:4_start_0 node10" [ style = bold] +"clone5group:4_start_0" [ style=bold color="green" fontcolor="orange"] +"clone5group:5_running_0" -> "clone5_running_0" [ style = bold] +"clone5group:5_running_0" [ style=bold color="green" fontcolor="orange"] +"clone5group:5_start_0" -> "clone5group:5_running_0" [ style = bold] +"clone5group:5_start_0" -> "clone5rsc1:5_start_0 node11" [ style = bold] +"clone5group:5_start_0" -> "clone5rsc2:5_start_0 node11" [ style = bold] +"clone5group:5_start_0" -> "clone5rsc3:5_start_0 node11" [ style = bold] +"clone5group:5_start_0" [ style=bold color="green" fontcolor="orange"] +"clone5group:6_running_0" -> "clone5_running_0" [ style = bold] +"clone5group:6_running_0" [ style=bold color="green" fontcolor="orange"] +"clone5group:6_start_0" -> "clone5group:6_running_0" [ style = bold] +"clone5group:6_start_0" -> "clone5rsc1:6_start_0 node4" [ style = bold] +"clone5group:6_start_0" -> "clone5rsc2:6_start_0 node4" [ style = bold] +"clone5group:6_start_0" -> "clone5rsc3:6_start_0 node4" [ style = bold] +"clone5group:6_start_0" [ style=bold color="green" fontcolor="orange"] +"clone5group:7_running_0" -> "clone5_running_0" [ style = bold] +"clone5group:7_running_0" [ style=bold color="green" fontcolor="orange"] +"clone5group:7_start_0" -> "clone5group:7_running_0" [ style = bold] +"clone5group:7_start_0" -> "clone5rsc1:7_start_0 node5" [ style = bold] +"clone5group:7_start_0" -> "clone5rsc2:7_start_0 node5" [ style = bold] +"clone5group:7_start_0" -> "clone5rsc3:7_start_0 node5" [ style = bold] +"clone5group:7_start_0" [ style=bold color="green" fontcolor="orange"] +"clone5group:8_running_0" -> "clone5_running_0" [ style = bold] +"clone5group:8_running_0" [ style=bold color="green" fontcolor="orange"] +"clone5group:8_start_0" -> "clone5group:8_running_0" [ style = bold] +"clone5group:8_start_0" -> "clone5rsc1:8_start_0 node6" [ style = bold] +"clone5group:8_start_0" -> "clone5rsc2:8_start_0 node6" [ style = bold] +"clone5group:8_start_0" -> "clone5rsc3:8_start_0 node6" [ style = bold] +"clone5group:8_start_0" [ style=bold color="green" fontcolor="orange"] +"clone5group:9_running_0" -> "clone5_running_0" [ style = bold] +"clone5group:9_running_0" [ style=bold color="green" fontcolor="orange"] +"clone5group:9_start_0" -> "clone5group:9_running_0" [ style = bold] +"clone5group:9_start_0" -> "clone5rsc1:9_start_0 node7" [ style = bold] +"clone5group:9_start_0" -> "clone5rsc2:9_start_0 node7" [ style = bold] +"clone5group:9_start_0" -> "clone5rsc3:9_start_0 node7" [ style = bold] +"clone5group:9_start_0" [ style=bold color="green" fontcolor="orange"] +"clone5rsc1:10_monitor_10000 node8" [ style=bold color="green" fontcolor="black"] +"clone5rsc1:10_start_0 node8" -> "clone5group:10_running_0" [ style = bold] +"clone5rsc1:10_start_0 node8" -> "clone5rsc1:10_monitor_10000 node8" [ style = bold] +"clone5rsc1:10_start_0 node8" -> "clone5rsc2:10_start_0 node8" [ style = bold] +"clone5rsc1:10_start_0 node8" [ style=bold color="green" fontcolor="black"] +"clone5rsc1:4_monitor_10000 node10" [ style=bold color="green" fontcolor="black"] +"clone5rsc1:4_start_0 node10" -> "clone5group:4_running_0" [ style = bold] +"clone5rsc1:4_start_0 node10" -> "clone5rsc1:4_monitor_10000 node10" [ style = bold] +"clone5rsc1:4_start_0 node10" -> "clone5rsc2:4_start_0 node10" [ style = bold] +"clone5rsc1:4_start_0 node10" [ style=bold color="green" fontcolor="black"] +"clone5rsc1:5_monitor_10000 node11" [ style=bold color="green" fontcolor="black"] +"clone5rsc1:5_start_0 node11" -> "clone5group:5_running_0" [ style = bold] +"clone5rsc1:5_start_0 node11" -> "clone5rsc1:5_monitor_10000 node11" [ style = bold] +"clone5rsc1:5_start_0 node11" -> "clone5rsc2:5_start_0 node11" [ style = bold] +"clone5rsc1:5_start_0 node11" [ style=bold color="green" fontcolor="black"] +"clone5rsc1:6_monitor_10000 node4" [ style=bold color="green" fontcolor="black"] +"clone5rsc1:6_start_0 node4" -> "clone5group:6_running_0" [ style = bold] +"clone5rsc1:6_start_0 node4" -> "clone5rsc1:6_monitor_10000 node4" [ style = bold] +"clone5rsc1:6_start_0 node4" -> "clone5rsc2:6_start_0 node4" [ style = bold] +"clone5rsc1:6_start_0 node4" [ style=bold color="green" fontcolor="black"] +"clone5rsc1:7_monitor_10000 node5" [ style=bold color="green" fontcolor="black"] +"clone5rsc1:7_start_0 node5" -> "clone5group:7_running_0" [ style = bold] +"clone5rsc1:7_start_0 node5" -> "clone5rsc1:7_monitor_10000 node5" [ style = bold] +"clone5rsc1:7_start_0 node5" -> "clone5rsc2:7_start_0 node5" [ style = bold] +"clone5rsc1:7_start_0 node5" [ style=bold color="green" fontcolor="black"] +"clone5rsc1:8_monitor_10000 node6" [ style=bold color="green" fontcolor="black"] +"clone5rsc1:8_start_0 node6" -> "clone5group:8_running_0" [ style = bold] +"clone5rsc1:8_start_0 node6" -> "clone5rsc1:8_monitor_10000 node6" [ style = bold] +"clone5rsc1:8_start_0 node6" -> "clone5rsc2:8_start_0 node6" [ style = bold] +"clone5rsc1:8_start_0 node6" [ style=bold color="green" fontcolor="black"] +"clone5rsc1:9_monitor_10000 node7" [ style=bold color="green" fontcolor="black"] +"clone5rsc1:9_start_0 node7" -> "clone5group:9_running_0" [ style = bold] +"clone5rsc1:9_start_0 node7" -> "clone5rsc1:9_monitor_10000 node7" [ style = bold] +"clone5rsc1:9_start_0 node7" -> "clone5rsc2:9_start_0 node7" [ style = bold] +"clone5rsc1:9_start_0 node7" [ style=bold color="green" fontcolor="black"] +"clone5rsc1_monitor_10000 node9" [ style=bold color="green" fontcolor="black"] +"clone5rsc1_start_0 node9" -> "clone5group:3_running_0" [ style = bold] +"clone5rsc1_start_0 node9" -> "clone5rsc1_monitor_10000 node9" [ style = bold] +"clone5rsc1_start_0 node9" -> "clone5rsc2_start_0 node9" [ style = bold] +"clone5rsc1_start_0 node9" [ style=bold color="green" fontcolor="black"] +"clone5rsc2:10_monitor_10000 node8" [ style=bold color="green" fontcolor="black"] +"clone5rsc2:10_start_0 node8" -> "clone5group:10_running_0" [ style = bold] +"clone5rsc2:10_start_0 node8" -> "clone5rsc2:10_monitor_10000 node8" [ style = bold] +"clone5rsc2:10_start_0 node8" -> "clone5rsc3:10_start_0 node8" [ style = bold] +"clone5rsc2:10_start_0 node8" [ style=bold color="green" fontcolor="black"] +"clone5rsc2:4_monitor_10000 node10" [ style=bold color="green" fontcolor="black"] +"clone5rsc2:4_start_0 node10" -> "clone5group:4_running_0" [ style = bold] +"clone5rsc2:4_start_0 node10" -> "clone5rsc2:4_monitor_10000 node10" [ style = bold] +"clone5rsc2:4_start_0 node10" -> "clone5rsc3:4_start_0 node10" [ style = bold] +"clone5rsc2:4_start_0 node10" [ style=bold color="green" fontcolor="black"] +"clone5rsc2:5_monitor_10000 node11" [ style=bold color="green" fontcolor="black"] +"clone5rsc2:5_start_0 node11" -> "clone5group:5_running_0" [ style = bold] +"clone5rsc2:5_start_0 node11" -> "clone5rsc2:5_monitor_10000 node11" [ style = bold] +"clone5rsc2:5_start_0 node11" -> "clone5rsc3:5_start_0 node11" [ style = bold] +"clone5rsc2:5_start_0 node11" [ style=bold color="green" fontcolor="black"] +"clone5rsc2:6_monitor_10000 node4" [ style=bold color="green" fontcolor="black"] +"clone5rsc2:6_start_0 node4" -> "clone5group:6_running_0" [ style = bold] +"clone5rsc2:6_start_0 node4" -> "clone5rsc2:6_monitor_10000 node4" [ style = bold] +"clone5rsc2:6_start_0 node4" -> "clone5rsc3:6_start_0 node4" [ style = bold] +"clone5rsc2:6_start_0 node4" [ style=bold color="green" fontcolor="black"] +"clone5rsc2:7_monitor_10000 node5" [ style=bold color="green" fontcolor="black"] +"clone5rsc2:7_start_0 node5" -> "clone5group:7_running_0" [ style = bold] +"clone5rsc2:7_start_0 node5" -> "clone5rsc2:7_monitor_10000 node5" [ style = bold] +"clone5rsc2:7_start_0 node5" -> "clone5rsc3:7_start_0 node5" [ style = bold] +"clone5rsc2:7_start_0 node5" [ style=bold color="green" fontcolor="black"] +"clone5rsc2:8_monitor_10000 node6" [ style=bold color="green" fontcolor="black"] +"clone5rsc2:8_start_0 node6" -> "clone5group:8_running_0" [ style = bold] +"clone5rsc2:8_start_0 node6" -> "clone5rsc2:8_monitor_10000 node6" [ style = bold] +"clone5rsc2:8_start_0 node6" -> "clone5rsc3:8_start_0 node6" [ style = bold] +"clone5rsc2:8_start_0 node6" [ style=bold color="green" fontcolor="black"] +"clone5rsc2:9_monitor_10000 node7" [ style=bold color="green" fontcolor="black"] +"clone5rsc2:9_start_0 node7" -> "clone5group:9_running_0" [ style = bold] +"clone5rsc2:9_start_0 node7" -> "clone5rsc2:9_monitor_10000 node7" [ style = bold] +"clone5rsc2:9_start_0 node7" -> "clone5rsc3:9_start_0 node7" [ style = bold] +"clone5rsc2:9_start_0 node7" [ style=bold color="green" fontcolor="black"] +"clone5rsc2_monitor_10000 node3" [ style=bold color="green" fontcolor="black"] +"clone5rsc2_monitor_10000 node9" [ style=bold color="green" fontcolor="black"] +"clone5rsc2_start_0 node3" -> "clone5group:2_running_0" [ style = bold] +"clone5rsc2_start_0 node3" -> "clone5rsc2_monitor_10000 node3" [ style = bold] +"clone5rsc2_start_0 node3" -> "clone5rsc3_start_0 node3" [ style = bold] +"clone5rsc2_start_0 node3" [ style=bold color="green" fontcolor="black"] +"clone5rsc2_start_0 node9" -> "clone5group:3_running_0" [ style = bold] +"clone5rsc2_start_0 node9" -> "clone5rsc2_monitor_10000 node9" [ style = bold] +"clone5rsc2_start_0 node9" -> "clone5rsc3_start_0 node9" [ style = bold] +"clone5rsc2_start_0 node9" [ style=bold color="green" fontcolor="black"] +"clone5rsc3:10_monitor_10000 node8" [ style=bold color="green" fontcolor="black"] +"clone5rsc3:10_start_0 node8" -> "clone5group:10_running_0" [ style = bold] +"clone5rsc3:10_start_0 node8" -> "clone5rsc3:10_monitor_10000 node8" [ style = bold] +"clone5rsc3:10_start_0 node8" [ style=bold color="green" fontcolor="black"] +"clone5rsc3:4_monitor_10000 node10" [ style=bold color="green" fontcolor="black"] +"clone5rsc3:4_start_0 node10" -> "clone5group:4_running_0" [ style = bold] +"clone5rsc3:4_start_0 node10" -> "clone5rsc3:4_monitor_10000 node10" [ style = bold] +"clone5rsc3:4_start_0 node10" [ style=bold color="green" fontcolor="black"] +"clone5rsc3:5_monitor_10000 node11" [ style=bold color="green" fontcolor="black"] +"clone5rsc3:5_start_0 node11" -> "clone5group:5_running_0" [ style = bold] +"clone5rsc3:5_start_0 node11" -> "clone5rsc3:5_monitor_10000 node11" [ style = bold] +"clone5rsc3:5_start_0 node11" [ style=bold color="green" fontcolor="black"] +"clone5rsc3:6_monitor_10000 node4" [ style=bold color="green" fontcolor="black"] +"clone5rsc3:6_start_0 node4" -> "clone5group:6_running_0" [ style = bold] +"clone5rsc3:6_start_0 node4" -> "clone5rsc3:6_monitor_10000 node4" [ style = bold] +"clone5rsc3:6_start_0 node4" [ style=bold color="green" fontcolor="black"] +"clone5rsc3:7_monitor_10000 node5" [ style=bold color="green" fontcolor="black"] +"clone5rsc3:7_start_0 node5" -> "clone5group:7_running_0" [ style = bold] +"clone5rsc3:7_start_0 node5" -> "clone5rsc3:7_monitor_10000 node5" [ style = bold] +"clone5rsc3:7_start_0 node5" [ style=bold color="green" fontcolor="black"] +"clone5rsc3:8_monitor_10000 node6" [ style=bold color="green" fontcolor="black"] +"clone5rsc3:8_start_0 node6" -> "clone5group:8_running_0" [ style = bold] +"clone5rsc3:8_start_0 node6" -> "clone5rsc3:8_monitor_10000 node6" [ style = bold] +"clone5rsc3:8_start_0 node6" [ style=bold color="green" fontcolor="black"] +"clone5rsc3:9_monitor_10000 node7" [ style=bold color="green" fontcolor="black"] +"clone5rsc3:9_start_0 node7" -> "clone5group:9_running_0" [ style = bold] +"clone5rsc3:9_start_0 node7" -> "clone5rsc3:9_monitor_10000 node7" [ style = bold] +"clone5rsc3:9_start_0 node7" [ style=bold color="green" fontcolor="black"] +"clone5rsc3_monitor_10000 node3" [ style=bold color="green" fontcolor="black"] +"clone5rsc3_monitor_10000 node9" [ style=bold color="green" fontcolor="black"] +"clone5rsc3_start_0 node3" -> "clone5group:2_running_0" [ style = bold] +"clone5rsc3_start_0 node3" -> "clone5rsc3_monitor_10000 node3" [ style = bold] +"clone5rsc3_start_0 node3" [ style=bold color="green" fontcolor="black"] +"clone5rsc3_start_0 node9" -> "clone5group:3_running_0" [ style = bold] +"clone5rsc3_start_0 node9" -> "clone5rsc3_monitor_10000 node9" [ style = bold] +"clone5rsc3_start_0 node9" [ style=bold color="green" fontcolor="black"] +} diff --git a/cts/scheduler/anon-instance-pending.exp b/cts/scheduler/anon-instance-pending.exp new file mode 100644 index 0000000000..f6cfa1af11 --- /dev/null +++ b/cts/scheduler/anon-instance-pending.exp @@ -0,0 +1,1829 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/anon-instance-pending.scores b/cts/scheduler/anon-instance-pending.scores new file mode 100644 index 0000000000..a7439f0e90 --- /dev/null +++ b/cts/scheduler/anon-instance-pending.scores @@ -0,0 +1,2113 @@ +Allocation scores: +clone1rsc:0 promotion score on node1: -1 +clone1rsc:1 promotion score on node2: -1 +clone1rsc:10 promotion score on node8: -1 +clone1rsc:2 promotion score on node3: 10 +clone1rsc:3 promotion score on node4: -1 +clone1rsc:4 promotion score on node9: -1 +clone1rsc:5 promotion score on node10: -1 +clone1rsc:6 promotion score on node11: -1 +clone1rsc:7 promotion score on node5: -1 +clone1rsc:8 promotion score on node6: -1 +clone1rsc:9 promotion score on node7: -1 +clone_color: clone1 allocation score on node10: 0 +clone_color: clone1 allocation score on node11: 0 +clone_color: clone1 allocation score on node1: 0 +clone_color: clone1 allocation score on node2: 0 +clone_color: clone1 allocation score on node3: 0 +clone_color: clone1 allocation score on node4: 0 +clone_color: clone1 allocation score on node5: 0 +clone_color: clone1 allocation score on node6: 0 +clone_color: clone1 allocation score on node7: 0 +clone_color: clone1 allocation score on node8: 0 +clone_color: clone1 allocation score on node9: 0 +clone_color: clone1rsc:0 allocation score on node10: 0 +clone_color: clone1rsc:0 allocation score on node11: 0 +clone_color: clone1rsc:0 allocation score on node1: 1 +clone_color: clone1rsc:0 allocation score on node2: 0 +clone_color: clone1rsc:0 allocation score on node3: 0 +clone_color: clone1rsc:0 allocation score on node4: 0 +clone_color: clone1rsc:0 allocation score on node5: 0 +clone_color: clone1rsc:0 allocation score on node6: 0 +clone_color: clone1rsc:0 allocation score on node7: 0 +clone_color: clone1rsc:0 allocation score on node8: 0 +clone_color: clone1rsc:0 allocation score on node9: 0 +clone_color: clone1rsc:1 allocation score on node10: 0 +clone_color: clone1rsc:1 allocation score on node11: 0 +clone_color: clone1rsc:1 allocation score on node1: 0 +clone_color: clone1rsc:1 allocation score on node2: 1 +clone_color: clone1rsc:1 allocation score on node3: 0 +clone_color: clone1rsc:1 allocation score on node4: 0 +clone_color: clone1rsc:1 allocation score on node5: 0 +clone_color: clone1rsc:1 allocation score on node6: 0 +clone_color: clone1rsc:1 allocation score on node7: 0 +clone_color: clone1rsc:1 allocation score on node8: 0 +clone_color: clone1rsc:1 allocation score on node9: 0 +clone_color: clone1rsc:10 allocation score on node10: 0 +clone_color: clone1rsc:10 allocation score on node11: 0 +clone_color: clone1rsc:10 allocation score on node1: 0 +clone_color: clone1rsc:10 allocation score on node2: 0 +clone_color: clone1rsc:10 allocation score on node3: 10 +clone_color: clone1rsc:10 allocation score on node4: 0 +clone_color: clone1rsc:10 allocation score on node5: 0 +clone_color: clone1rsc:10 allocation score on node6: 0 +clone_color: clone1rsc:10 allocation score on node7: 0 +clone_color: clone1rsc:10 allocation score on node8: 0 +clone_color: clone1rsc:10 allocation score on node9: 0 +clone_color: clone1rsc:2 allocation score on node10: 0 +clone_color: clone1rsc:2 allocation score on node11: 0 +clone_color: clone1rsc:2 allocation score on node1: 0 +clone_color: clone1rsc:2 allocation score on node2: 0 +clone_color: clone1rsc:2 allocation score on node3: 11 +clone_color: clone1rsc:2 allocation score on node4: 0 +clone_color: clone1rsc:2 allocation score on node5: 0 +clone_color: clone1rsc:2 allocation score on node6: 0 +clone_color: clone1rsc:2 allocation score on node7: 0 +clone_color: clone1rsc:2 allocation score on node8: 0 +clone_color: clone1rsc:2 allocation score on node9: 0 +clone_color: clone1rsc:3 allocation score on node10: 0 +clone_color: clone1rsc:3 allocation score on node11: 0 +clone_color: clone1rsc:3 allocation score on node1: 0 +clone_color: clone1rsc:3 allocation score on node2: 0 +clone_color: clone1rsc:3 allocation score on node3: 0 +clone_color: clone1rsc:3 allocation score on node4: 1 +clone_color: clone1rsc:3 allocation score on node5: 0 +clone_color: clone1rsc:3 allocation score on node6: 0 +clone_color: clone1rsc:3 allocation score on node7: 0 +clone_color: clone1rsc:3 allocation score on node8: 0 +clone_color: clone1rsc:3 allocation score on node9: 0 +clone_color: clone1rsc:4 allocation score on node10: 0 +clone_color: clone1rsc:4 allocation score on node11: 0 +clone_color: clone1rsc:4 allocation score on node1: 0 +clone_color: clone1rsc:4 allocation score on node2: 0 +clone_color: clone1rsc:4 allocation score on node3: 0 +clone_color: clone1rsc:4 allocation score on node4: 0 +clone_color: clone1rsc:4 allocation score on node5: 0 +clone_color: clone1rsc:4 allocation score on node6: 0 +clone_color: clone1rsc:4 allocation score on node7: 0 +clone_color: clone1rsc:4 allocation score on node8: 0 +clone_color: clone1rsc:4 allocation score on node9: 0 +clone_color: clone1rsc:5 allocation score on node10: 0 +clone_color: clone1rsc:5 allocation score on node11: 0 +clone_color: clone1rsc:5 allocation score on node1: 0 +clone_color: clone1rsc:5 allocation score on node2: 0 +clone_color: clone1rsc:5 allocation score on node3: 10 +clone_color: clone1rsc:5 allocation score on node4: 0 +clone_color: clone1rsc:5 allocation score on node5: 0 +clone_color: clone1rsc:5 allocation score on node6: 0 +clone_color: clone1rsc:5 allocation score on node7: 0 +clone_color: clone1rsc:5 allocation score on node8: 0 +clone_color: clone1rsc:5 allocation score on node9: 0 +clone_color: clone1rsc:6 allocation score on node10: 0 +clone_color: clone1rsc:6 allocation score on node11: 0 +clone_color: clone1rsc:6 allocation score on node1: 0 +clone_color: clone1rsc:6 allocation score on node2: 0 +clone_color: clone1rsc:6 allocation score on node3: 10 +clone_color: clone1rsc:6 allocation score on node4: 0 +clone_color: clone1rsc:6 allocation score on node5: 0 +clone_color: clone1rsc:6 allocation score on node6: 0 +clone_color: clone1rsc:6 allocation score on node7: 0 +clone_color: clone1rsc:6 allocation score on node8: 0 +clone_color: clone1rsc:6 allocation score on node9: 0 +clone_color: clone1rsc:7 allocation score on node10: 0 +clone_color: clone1rsc:7 allocation score on node11: 0 +clone_color: clone1rsc:7 allocation score on node1: 0 +clone_color: clone1rsc:7 allocation score on node2: 0 +clone_color: clone1rsc:7 allocation score on node3: 10 +clone_color: clone1rsc:7 allocation score on node4: 0 +clone_color: clone1rsc:7 allocation score on node5: 0 +clone_color: clone1rsc:7 allocation score on node6: 0 +clone_color: clone1rsc:7 allocation score on node7: 0 +clone_color: clone1rsc:7 allocation score on node8: 0 +clone_color: clone1rsc:7 allocation score on node9: 0 +clone_color: clone1rsc:8 allocation score on node10: 0 +clone_color: clone1rsc:8 allocation score on node11: 0 +clone_color: clone1rsc:8 allocation score on node1: 0 +clone_color: clone1rsc:8 allocation score on node2: 0 +clone_color: clone1rsc:8 allocation score on node3: 10 +clone_color: clone1rsc:8 allocation score on node4: 0 +clone_color: clone1rsc:8 allocation score on node5: 0 +clone_color: clone1rsc:8 allocation score on node6: 0 +clone_color: clone1rsc:8 allocation score on node7: 0 +clone_color: clone1rsc:8 allocation score on node8: 0 +clone_color: clone1rsc:8 allocation score on node9: 0 +clone_color: clone1rsc:9 allocation score on node10: 0 +clone_color: clone1rsc:9 allocation score on node11: 0 +clone_color: clone1rsc:9 allocation score on node1: 0 +clone_color: clone1rsc:9 allocation score on node2: 0 +clone_color: clone1rsc:9 allocation score on node3: 10 +clone_color: clone1rsc:9 allocation score on node4: 0 +clone_color: clone1rsc:9 allocation score on node5: 0 +clone_color: clone1rsc:9 allocation score on node6: 0 +clone_color: clone1rsc:9 allocation score on node7: 0 +clone_color: clone1rsc:9 allocation score on node8: 0 +clone_color: clone1rsc:9 allocation score on node9: 0 +clone_color: clone2 allocation score on node10: 0 +clone_color: clone2 allocation score on node11: 0 +clone_color: clone2 allocation score on node1: 0 +clone_color: clone2 allocation score on node2: 0 +clone_color: clone2 allocation score on node3: 0 +clone_color: clone2 allocation score on node4: 0 +clone_color: clone2 allocation score on node5: 0 +clone_color: clone2 allocation score on node6: 0 +clone_color: clone2 allocation score on node7: 0 +clone_color: clone2 allocation score on node8: 0 +clone_color: clone2 allocation score on node9: 0 +clone_color: clone2rsc:0 allocation score on node10: 0 +clone_color: clone2rsc:0 allocation score on node11: 0 +clone_color: clone2rsc:0 allocation score on node1: 0 +clone_color: clone2rsc:0 allocation score on node2: 1 +clone_color: clone2rsc:0 allocation score on node3: 0 +clone_color: clone2rsc:0 allocation score on node4: 0 +clone_color: clone2rsc:0 allocation score on node5: 0 +clone_color: clone2rsc:0 allocation score on node6: 0 +clone_color: clone2rsc:0 allocation score on node7: 0 +clone_color: clone2rsc:0 allocation score on node8: 0 +clone_color: clone2rsc:0 allocation score on node9: 0 +clone_color: clone2rsc:1 allocation score on node10: 0 +clone_color: clone2rsc:1 allocation score on node11: 0 +clone_color: clone2rsc:1 allocation score on node1: 0 +clone_color: clone2rsc:1 allocation score on node2: 0 +clone_color: clone2rsc:1 allocation score on node3: 0 +clone_color: clone2rsc:1 allocation score on node4: 1 +clone_color: clone2rsc:1 allocation score on node5: 0 +clone_color: clone2rsc:1 allocation score on node6: 0 +clone_color: clone2rsc:1 allocation score on node7: 0 +clone_color: clone2rsc:1 allocation score on node8: 0 +clone_color: clone2rsc:1 allocation score on node9: 0 +clone_color: clone2rsc:2 allocation score on node10: 0 +clone_color: clone2rsc:2 allocation score on node11: 0 +clone_color: clone2rsc:2 allocation score on node1: 0 +clone_color: clone2rsc:2 allocation score on node2: 0 +clone_color: clone2rsc:2 allocation score on node3: 0 +clone_color: clone2rsc:2 allocation score on node4: 0 +clone_color: clone2rsc:2 allocation score on node5: 0 +clone_color: clone2rsc:2 allocation score on node6: 0 +clone_color: clone2rsc:2 allocation score on node7: 0 +clone_color: clone2rsc:2 allocation score on node8: 0 +clone_color: clone2rsc:2 allocation score on node9: 0 +clone_color: clone2rsc:3 allocation score on node10: 0 +clone_color: clone2rsc:3 allocation score on node11: 0 +clone_color: clone2rsc:3 allocation score on node1: 0 +clone_color: clone2rsc:3 allocation score on node2: 0 +clone_color: clone2rsc:3 allocation score on node3: 0 +clone_color: clone2rsc:3 allocation score on node4: 0 +clone_color: clone2rsc:3 allocation score on node5: 0 +clone_color: clone2rsc:3 allocation score on node6: 0 +clone_color: clone2rsc:3 allocation score on node7: 0 +clone_color: clone2rsc:3 allocation score on node8: 0 +clone_color: clone2rsc:3 allocation score on node9: 0 +clone_color: clone2rsc:4 allocation score on node10: 0 +clone_color: clone2rsc:4 allocation score on node11: 0 +clone_color: clone2rsc:4 allocation score on node1: 0 +clone_color: clone2rsc:4 allocation score on node2: 0 +clone_color: clone2rsc:4 allocation score on node3: 0 +clone_color: clone2rsc:4 allocation score on node4: 0 +clone_color: clone2rsc:4 allocation score on node5: 0 +clone_color: clone2rsc:4 allocation score on node6: 0 +clone_color: clone2rsc:4 allocation score on node7: 0 +clone_color: clone2rsc:4 allocation score on node8: 0 +clone_color: clone2rsc:4 allocation score on node9: 0 +clone_color: clone3 allocation score on node10: 0 +clone_color: clone3 allocation score on node11: 0 +clone_color: clone3 allocation score on node1: 0 +clone_color: clone3 allocation score on node2: 0 +clone_color: clone3 allocation score on node3: 0 +clone_color: clone3 allocation score on node4: 0 +clone_color: clone3 allocation score on node5: 0 +clone_color: clone3 allocation score on node6: 0 +clone_color: clone3 allocation score on node7: 0 +clone_color: clone3 allocation score on node8: 0 +clone_color: clone3 allocation score on node9: 0 +clone_color: clone3rsc:0 allocation score on node10: 0 +clone_color: clone3rsc:0 allocation score on node11: 0 +clone_color: clone3rsc:0 allocation score on node1: 0 +clone_color: clone3rsc:0 allocation score on node2: 0 +clone_color: clone3rsc:0 allocation score on node3: 1 +clone_color: clone3rsc:0 allocation score on node4: 0 +clone_color: clone3rsc:0 allocation score on node5: 0 +clone_color: clone3rsc:0 allocation score on node6: 0 +clone_color: clone3rsc:0 allocation score on node7: 0 +clone_color: clone3rsc:0 allocation score on node8: 0 +clone_color: clone3rsc:0 allocation score on node9: 0 +clone_color: clone3rsc:1 allocation score on node10: 0 +clone_color: clone3rsc:1 allocation score on node11: 0 +clone_color: clone3rsc:1 allocation score on node1: 0 +clone_color: clone3rsc:1 allocation score on node2: 0 +clone_color: clone3rsc:1 allocation score on node3: 0 +clone_color: clone3rsc:1 allocation score on node4: 0 +clone_color: clone3rsc:1 allocation score on node5: 0 +clone_color: clone3rsc:1 allocation score on node6: 0 +clone_color: clone3rsc:1 allocation score on node7: 0 +clone_color: clone3rsc:1 allocation score on node8: 0 +clone_color: clone3rsc:1 allocation score on node9: 0 +clone_color: clone3rsc:10 allocation score on node10: 0 +clone_color: clone3rsc:10 allocation score on node11: 0 +clone_color: clone3rsc:10 allocation score on node1: 0 +clone_color: clone3rsc:10 allocation score on node2: 0 +clone_color: clone3rsc:10 allocation score on node3: 0 +clone_color: clone3rsc:10 allocation score on node4: 0 +clone_color: clone3rsc:10 allocation score on node5: 0 +clone_color: clone3rsc:10 allocation score on node6: 0 +clone_color: clone3rsc:10 allocation score on node7: 0 +clone_color: clone3rsc:10 allocation score on node8: 0 +clone_color: clone3rsc:10 allocation score on node9: 0 +clone_color: clone3rsc:2 allocation score on node10: 0 +clone_color: clone3rsc:2 allocation score on node11: 0 +clone_color: clone3rsc:2 allocation score on node1: 0 +clone_color: clone3rsc:2 allocation score on node2: 0 +clone_color: clone3rsc:2 allocation score on node3: 0 +clone_color: clone3rsc:2 allocation score on node4: 0 +clone_color: clone3rsc:2 allocation score on node5: 0 +clone_color: clone3rsc:2 allocation score on node6: 0 +clone_color: clone3rsc:2 allocation score on node7: 0 +clone_color: clone3rsc:2 allocation score on node8: 0 +clone_color: clone3rsc:2 allocation score on node9: 0 +clone_color: clone3rsc:3 allocation score on node10: 0 +clone_color: clone3rsc:3 allocation score on node11: 0 +clone_color: clone3rsc:3 allocation score on node1: 0 +clone_color: clone3rsc:3 allocation score on node2: 0 +clone_color: clone3rsc:3 allocation score on node3: 0 +clone_color: clone3rsc:3 allocation score on node4: 0 +clone_color: clone3rsc:3 allocation score on node5: 0 +clone_color: clone3rsc:3 allocation score on node6: 0 +clone_color: clone3rsc:3 allocation score on node7: 0 +clone_color: clone3rsc:3 allocation score on node8: 0 +clone_color: clone3rsc:3 allocation score on node9: 0 +clone_color: clone3rsc:4 allocation score on node10: 0 +clone_color: clone3rsc:4 allocation score on node11: 0 +clone_color: clone3rsc:4 allocation score on node1: 0 +clone_color: clone3rsc:4 allocation score on node2: 0 +clone_color: clone3rsc:4 allocation score on node3: 0 +clone_color: clone3rsc:4 allocation score on node4: 0 +clone_color: clone3rsc:4 allocation score on node5: 0 +clone_color: clone3rsc:4 allocation score on node6: 0 +clone_color: clone3rsc:4 allocation score on node7: 0 +clone_color: clone3rsc:4 allocation score on node8: 0 +clone_color: clone3rsc:4 allocation score on node9: 0 +clone_color: clone3rsc:5 allocation score on node10: 0 +clone_color: clone3rsc:5 allocation score on node11: 0 +clone_color: clone3rsc:5 allocation score on node1: 0 +clone_color: clone3rsc:5 allocation score on node2: 0 +clone_color: clone3rsc:5 allocation score on node3: 0 +clone_color: clone3rsc:5 allocation score on node4: 0 +clone_color: clone3rsc:5 allocation score on node5: 0 +clone_color: clone3rsc:5 allocation score on node6: 0 +clone_color: clone3rsc:5 allocation score on node7: 0 +clone_color: clone3rsc:5 allocation score on node8: 0 +clone_color: clone3rsc:5 allocation score on node9: 0 +clone_color: clone3rsc:6 allocation score on node10: 0 +clone_color: clone3rsc:6 allocation score on node11: 0 +clone_color: clone3rsc:6 allocation score on node1: 0 +clone_color: clone3rsc:6 allocation score on node2: 0 +clone_color: clone3rsc:6 allocation score on node3: 0 +clone_color: clone3rsc:6 allocation score on node4: 0 +clone_color: clone3rsc:6 allocation score on node5: 0 +clone_color: clone3rsc:6 allocation score on node6: 0 +clone_color: clone3rsc:6 allocation score on node7: 0 +clone_color: clone3rsc:6 allocation score on node8: 0 +clone_color: clone3rsc:6 allocation score on node9: 0 +clone_color: clone3rsc:7 allocation score on node10: 0 +clone_color: clone3rsc:7 allocation score on node11: 0 +clone_color: clone3rsc:7 allocation score on node1: 0 +clone_color: clone3rsc:7 allocation score on node2: 0 +clone_color: clone3rsc:7 allocation score on node3: 0 +clone_color: clone3rsc:7 allocation score on node4: 0 +clone_color: clone3rsc:7 allocation score on node5: 0 +clone_color: clone3rsc:7 allocation score on node6: 0 +clone_color: clone3rsc:7 allocation score on node7: 0 +clone_color: clone3rsc:7 allocation score on node8: 0 +clone_color: clone3rsc:7 allocation score on node9: 0 +clone_color: clone3rsc:8 allocation score on node10: 0 +clone_color: clone3rsc:8 allocation score on node11: 0 +clone_color: clone3rsc:8 allocation score on node1: 0 +clone_color: clone3rsc:8 allocation score on node2: 0 +clone_color: clone3rsc:8 allocation score on node3: 0 +clone_color: clone3rsc:8 allocation score on node4: 0 +clone_color: clone3rsc:8 allocation score on node5: 0 +clone_color: clone3rsc:8 allocation score on node6: 0 +clone_color: clone3rsc:8 allocation score on node7: 0 +clone_color: clone3rsc:8 allocation score on node8: 0 +clone_color: clone3rsc:8 allocation score on node9: 0 +clone_color: clone3rsc:9 allocation score on node10: 0 +clone_color: clone3rsc:9 allocation score on node11: 0 +clone_color: clone3rsc:9 allocation score on node1: 0 +clone_color: clone3rsc:9 allocation score on node2: 0 +clone_color: clone3rsc:9 allocation score on node3: 0 +clone_color: clone3rsc:9 allocation score on node4: 0 +clone_color: clone3rsc:9 allocation score on node5: 0 +clone_color: clone3rsc:9 allocation score on node6: 0 +clone_color: clone3rsc:9 allocation score on node7: 0 +clone_color: clone3rsc:9 allocation score on node8: 0 +clone_color: clone3rsc:9 allocation score on node9: 0 +clone_color: clone4 allocation score on node10: 0 +clone_color: clone4 allocation score on node11: 0 +clone_color: clone4 allocation score on node1: 0 +clone_color: clone4 allocation score on node2: 0 +clone_color: clone4 allocation score on node3: 0 +clone_color: clone4 allocation score on node4: 0 +clone_color: clone4 allocation score on node5: 0 +clone_color: clone4 allocation score on node6: 0 +clone_color: clone4 allocation score on node7: 0 +clone_color: clone4 allocation score on node8: 0 +clone_color: clone4 allocation score on node9: 0 +clone_color: clone4rsc:0 allocation score on node10: 0 +clone_color: clone4rsc:0 allocation score on node11: 0 +clone_color: clone4rsc:0 allocation score on node1: 1 +clone_color: clone4rsc:0 allocation score on node2: 0 +clone_color: clone4rsc:0 allocation score on node3: 0 +clone_color: clone4rsc:0 allocation score on node4: 0 +clone_color: clone4rsc:0 allocation score on node5: 0 +clone_color: clone4rsc:0 allocation score on node6: 0 +clone_color: clone4rsc:0 allocation score on node7: 0 +clone_color: clone4rsc:0 allocation score on node8: 0 +clone_color: clone4rsc:0 allocation score on node9: 0 +clone_color: clone4rsc:1 allocation score on node10: 0 +clone_color: clone4rsc:1 allocation score on node11: 0 +clone_color: clone4rsc:1 allocation score on node1: 0 +clone_color: clone4rsc:1 allocation score on node2: 0 +clone_color: clone4rsc:1 allocation score on node3: 0 +clone_color: clone4rsc:1 allocation score on node4: 0 +clone_color: clone4rsc:1 allocation score on node5: 1 +clone_color: clone4rsc:1 allocation score on node6: 0 +clone_color: clone4rsc:1 allocation score on node7: 0 +clone_color: clone4rsc:1 allocation score on node8: 0 +clone_color: clone4rsc:1 allocation score on node9: 0 +clone_color: clone4rsc:2 allocation score on node10: 0 +clone_color: clone4rsc:2 allocation score on node11: 0 +clone_color: clone4rsc:2 allocation score on node1: 0 +clone_color: clone4rsc:2 allocation score on node2: 0 +clone_color: clone4rsc:2 allocation score on node3: 0 +clone_color: clone4rsc:2 allocation score on node4: 0 +clone_color: clone4rsc:2 allocation score on node5: 0 +clone_color: clone4rsc:2 allocation score on node6: 1 +clone_color: clone4rsc:2 allocation score on node7: 0 +clone_color: clone4rsc:2 allocation score on node8: 0 +clone_color: clone4rsc:2 allocation score on node9: 0 +clone_color: clone4rsc:3 allocation score on node10: 0 +clone_color: clone4rsc:3 allocation score on node11: 0 +clone_color: clone4rsc:3 allocation score on node1: 0 +clone_color: clone4rsc:3 allocation score on node2: 0 +clone_color: clone4rsc:3 allocation score on node3: 0 +clone_color: clone4rsc:3 allocation score on node4: 0 +clone_color: clone4rsc:3 allocation score on node5: 0 +clone_color: clone4rsc:3 allocation score on node6: 0 +clone_color: clone4rsc:3 allocation score on node7: 1 +clone_color: clone4rsc:3 allocation score on node8: 0 +clone_color: clone4rsc:3 allocation score on node9: 0 +clone_color: clone4rsc:4 allocation score on node10: 0 +clone_color: clone4rsc:4 allocation score on node11: 0 +clone_color: clone4rsc:4 allocation score on node1: 0 +clone_color: clone4rsc:4 allocation score on node2: 0 +clone_color: clone4rsc:4 allocation score on node3: 0 +clone_color: clone4rsc:4 allocation score on node4: 0 +clone_color: clone4rsc:4 allocation score on node5: 0 +clone_color: clone4rsc:4 allocation score on node6: 0 +clone_color: clone4rsc:4 allocation score on node7: 0 +clone_color: clone4rsc:4 allocation score on node8: 1 +clone_color: clone4rsc:4 allocation score on node9: 0 +clone_color: clone5 allocation score on node10: 0 +clone_color: clone5 allocation score on node11: 0 +clone_color: clone5 allocation score on node1: 0 +clone_color: clone5 allocation score on node2: 0 +clone_color: clone5 allocation score on node3: 0 +clone_color: clone5 allocation score on node4: 0 +clone_color: clone5 allocation score on node5: 0 +clone_color: clone5 allocation score on node6: 0 +clone_color: clone5 allocation score on node7: 0 +clone_color: clone5 allocation score on node8: 0 +clone_color: clone5 allocation score on node9: 0 +clone_color: clone5group:0 allocation score on node10: 0 +clone_color: clone5group:0 allocation score on node11: 0 +clone_color: clone5group:0 allocation score on node1: 0 +clone_color: clone5group:0 allocation score on node2: 0 +clone_color: clone5group:0 allocation score on node3: 0 +clone_color: clone5group:0 allocation score on node4: 0 +clone_color: clone5group:0 allocation score on node5: 0 +clone_color: clone5group:0 allocation score on node6: 0 +clone_color: clone5group:0 allocation score on node7: 0 +clone_color: clone5group:0 allocation score on node8: 0 +clone_color: clone5group:0 allocation score on node9: 0 +clone_color: clone5group:1 allocation score on node10: 0 +clone_color: clone5group:1 allocation score on node11: 0 +clone_color: clone5group:1 allocation score on node1: 0 +clone_color: clone5group:1 allocation score on node2: 0 +clone_color: clone5group:1 allocation score on node3: 0 +clone_color: clone5group:1 allocation score on node4: 0 +clone_color: clone5group:1 allocation score on node5: 0 +clone_color: clone5group:1 allocation score on node6: 0 +clone_color: clone5group:1 allocation score on node7: 0 +clone_color: clone5group:1 allocation score on node8: 0 +clone_color: clone5group:1 allocation score on node9: 0 +clone_color: clone5group:10 allocation score on node10: 0 +clone_color: clone5group:10 allocation score on node11: 0 +clone_color: clone5group:10 allocation score on node1: 0 +clone_color: clone5group:10 allocation score on node2: 0 +clone_color: clone5group:10 allocation score on node3: 0 +clone_color: clone5group:10 allocation score on node4: 0 +clone_color: clone5group:10 allocation score on node5: 0 +clone_color: clone5group:10 allocation score on node6: 0 +clone_color: clone5group:10 allocation score on node7: 0 +clone_color: clone5group:10 allocation score on node8: 0 +clone_color: clone5group:10 allocation score on node9: 0 +clone_color: clone5group:2 allocation score on node10: 0 +clone_color: clone5group:2 allocation score on node11: 0 +clone_color: clone5group:2 allocation score on node1: 0 +clone_color: clone5group:2 allocation score on node2: 0 +clone_color: clone5group:2 allocation score on node3: 0 +clone_color: clone5group:2 allocation score on node4: 0 +clone_color: clone5group:2 allocation score on node5: 0 +clone_color: clone5group:2 allocation score on node6: 0 +clone_color: clone5group:2 allocation score on node7: 0 +clone_color: clone5group:2 allocation score on node8: 0 +clone_color: clone5group:2 allocation score on node9: 0 +clone_color: clone5group:3 allocation score on node10: 0 +clone_color: clone5group:3 allocation score on node11: 0 +clone_color: clone5group:3 allocation score on node1: 0 +clone_color: clone5group:3 allocation score on node2: 0 +clone_color: clone5group:3 allocation score on node3: 0 +clone_color: clone5group:3 allocation score on node4: 0 +clone_color: clone5group:3 allocation score on node5: 0 +clone_color: clone5group:3 allocation score on node6: 0 +clone_color: clone5group:3 allocation score on node7: 0 +clone_color: clone5group:3 allocation score on node8: 0 +clone_color: clone5group:3 allocation score on node9: 0 +clone_color: clone5group:4 allocation score on node10: 0 +clone_color: clone5group:4 allocation score on node11: 0 +clone_color: clone5group:4 allocation score on node1: 0 +clone_color: clone5group:4 allocation score on node2: 0 +clone_color: clone5group:4 allocation score on node3: 0 +clone_color: clone5group:4 allocation score on node4: 0 +clone_color: clone5group:4 allocation score on node5: 0 +clone_color: clone5group:4 allocation score on node6: 0 +clone_color: clone5group:4 allocation score on node7: 0 +clone_color: clone5group:4 allocation score on node8: 0 +clone_color: clone5group:4 allocation score on node9: 0 +clone_color: clone5group:5 allocation score on node10: 0 +clone_color: clone5group:5 allocation score on node11: 0 +clone_color: clone5group:5 allocation score on node1: 0 +clone_color: clone5group:5 allocation score on node2: 0 +clone_color: clone5group:5 allocation score on node3: 0 +clone_color: clone5group:5 allocation score on node4: 0 +clone_color: clone5group:5 allocation score on node5: 0 +clone_color: clone5group:5 allocation score on node6: 0 +clone_color: clone5group:5 allocation score on node7: 0 +clone_color: clone5group:5 allocation score on node8: 0 +clone_color: clone5group:5 allocation score on node9: 0 +clone_color: clone5group:6 allocation score on node10: 0 +clone_color: clone5group:6 allocation score on node11: 0 +clone_color: clone5group:6 allocation score on node1: 0 +clone_color: clone5group:6 allocation score on node2: 0 +clone_color: clone5group:6 allocation score on node3: 0 +clone_color: clone5group:6 allocation score on node4: 0 +clone_color: clone5group:6 allocation score on node5: 0 +clone_color: clone5group:6 allocation score on node6: 0 +clone_color: clone5group:6 allocation score on node7: 0 +clone_color: clone5group:6 allocation score on node8: 0 +clone_color: clone5group:6 allocation score on node9: 0 +clone_color: clone5group:7 allocation score on node10: 0 +clone_color: clone5group:7 allocation score on node11: 0 +clone_color: clone5group:7 allocation score on node1: 0 +clone_color: clone5group:7 allocation score on node2: 0 +clone_color: clone5group:7 allocation score on node3: 0 +clone_color: clone5group:7 allocation score on node4: 0 +clone_color: clone5group:7 allocation score on node5: 0 +clone_color: clone5group:7 allocation score on node6: 0 +clone_color: clone5group:7 allocation score on node7: 0 +clone_color: clone5group:7 allocation score on node8: 0 +clone_color: clone5group:7 allocation score on node9: 0 +clone_color: clone5group:8 allocation score on node10: 0 +clone_color: clone5group:8 allocation score on node11: 0 +clone_color: clone5group:8 allocation score on node1: 0 +clone_color: clone5group:8 allocation score on node2: 0 +clone_color: clone5group:8 allocation score on node3: 0 +clone_color: clone5group:8 allocation score on node4: 0 +clone_color: clone5group:8 allocation score on node5: 0 +clone_color: clone5group:8 allocation score on node6: 0 +clone_color: clone5group:8 allocation score on node7: 0 +clone_color: clone5group:8 allocation score on node8: 0 +clone_color: clone5group:8 allocation score on node9: 0 +clone_color: clone5group:9 allocation score on node10: 0 +clone_color: clone5group:9 allocation score on node11: 0 +clone_color: clone5group:9 allocation score on node1: 0 +clone_color: clone5group:9 allocation score on node2: 0 +clone_color: clone5group:9 allocation score on node3: 0 +clone_color: clone5group:9 allocation score on node4: 0 +clone_color: clone5group:9 allocation score on node5: 0 +clone_color: clone5group:9 allocation score on node6: 0 +clone_color: clone5group:9 allocation score on node7: 0 +clone_color: clone5group:9 allocation score on node8: 0 +clone_color: clone5group:9 allocation score on node9: 0 +clone_color: clone5rsc1:0 allocation score on node10: 0 +clone_color: clone5rsc1:0 allocation score on node11: 0 +clone_color: clone5rsc1:0 allocation score on node1: 1 +clone_color: clone5rsc1:0 allocation score on node2: 0 +clone_color: clone5rsc1:0 allocation score on node3: 0 +clone_color: clone5rsc1:0 allocation score on node4: 0 +clone_color: clone5rsc1:0 allocation score on node5: 0 +clone_color: clone5rsc1:0 allocation score on node6: 0 +clone_color: clone5rsc1:0 allocation score on node7: 0 +clone_color: clone5rsc1:0 allocation score on node8: 0 +clone_color: clone5rsc1:0 allocation score on node9: 0 +clone_color: clone5rsc1:1 allocation score on node10: 0 +clone_color: clone5rsc1:1 allocation score on node11: 0 +clone_color: clone5rsc1:1 allocation score on node1: 0 +clone_color: clone5rsc1:1 allocation score on node2: 1 +clone_color: clone5rsc1:1 allocation score on node3: 0 +clone_color: clone5rsc1:1 allocation score on node4: 0 +clone_color: clone5rsc1:1 allocation score on node5: 0 +clone_color: clone5rsc1:1 allocation score on node6: 0 +clone_color: clone5rsc1:1 allocation score on node7: 0 +clone_color: clone5rsc1:1 allocation score on node8: 0 +clone_color: clone5rsc1:1 allocation score on node9: 0 +clone_color: clone5rsc1:10 allocation score on node10: 0 +clone_color: clone5rsc1:10 allocation score on node11: 0 +clone_color: clone5rsc1:10 allocation score on node1: 0 +clone_color: clone5rsc1:10 allocation score on node2: 0 +clone_color: clone5rsc1:10 allocation score on node3: 0 +clone_color: clone5rsc1:10 allocation score on node4: 0 +clone_color: clone5rsc1:10 allocation score on node5: 0 +clone_color: clone5rsc1:10 allocation score on node6: 0 +clone_color: clone5rsc1:10 allocation score on node7: 0 +clone_color: clone5rsc1:10 allocation score on node8: 0 +clone_color: clone5rsc1:10 allocation score on node9: 0 +clone_color: clone5rsc1:2 allocation score on node10: 0 +clone_color: clone5rsc1:2 allocation score on node11: 0 +clone_color: clone5rsc1:2 allocation score on node1: 0 +clone_color: clone5rsc1:2 allocation score on node2: 0 +clone_color: clone5rsc1:2 allocation score on node3: 1 +clone_color: clone5rsc1:2 allocation score on node4: 0 +clone_color: clone5rsc1:2 allocation score on node5: 0 +clone_color: clone5rsc1:2 allocation score on node6: 0 +clone_color: clone5rsc1:2 allocation score on node7: 0 +clone_color: clone5rsc1:2 allocation score on node8: 0 +clone_color: clone5rsc1:2 allocation score on node9: 0 +clone_color: clone5rsc1:3 allocation score on node10: 0 +clone_color: clone5rsc1:3 allocation score on node11: 0 +clone_color: clone5rsc1:3 allocation score on node1: 0 +clone_color: clone5rsc1:3 allocation score on node2: 0 +clone_color: clone5rsc1:3 allocation score on node3: 0 +clone_color: clone5rsc1:3 allocation score on node4: 0 +clone_color: clone5rsc1:3 allocation score on node5: 0 +clone_color: clone5rsc1:3 allocation score on node6: 0 +clone_color: clone5rsc1:3 allocation score on node7: 0 +clone_color: clone5rsc1:3 allocation score on node8: 0 +clone_color: clone5rsc1:3 allocation score on node9: 0 +clone_color: clone5rsc1:4 allocation score on node10: 0 +clone_color: clone5rsc1:4 allocation score on node11: 0 +clone_color: clone5rsc1:4 allocation score on node1: 0 +clone_color: clone5rsc1:4 allocation score on node2: 0 +clone_color: clone5rsc1:4 allocation score on node3: 0 +clone_color: clone5rsc1:4 allocation score on node4: 0 +clone_color: clone5rsc1:4 allocation score on node5: 0 +clone_color: clone5rsc1:4 allocation score on node6: 0 +clone_color: clone5rsc1:4 allocation score on node7: 0 +clone_color: clone5rsc1:4 allocation score on node8: 0 +clone_color: clone5rsc1:4 allocation score on node9: 0 +clone_color: clone5rsc1:5 allocation score on node10: 0 +clone_color: clone5rsc1:5 allocation score on node11: 0 +clone_color: clone5rsc1:5 allocation score on node1: 0 +clone_color: clone5rsc1:5 allocation score on node2: 0 +clone_color: clone5rsc1:5 allocation score on node3: 0 +clone_color: clone5rsc1:5 allocation score on node4: 0 +clone_color: clone5rsc1:5 allocation score on node5: 0 +clone_color: clone5rsc1:5 allocation score on node6: 0 +clone_color: clone5rsc1:5 allocation score on node7: 0 +clone_color: clone5rsc1:5 allocation score on node8: 0 +clone_color: clone5rsc1:5 allocation score on node9: 0 +clone_color: clone5rsc1:6 allocation score on node10: 0 +clone_color: clone5rsc1:6 allocation score on node11: 0 +clone_color: clone5rsc1:6 allocation score on node1: 0 +clone_color: clone5rsc1:6 allocation score on node2: 0 +clone_color: clone5rsc1:6 allocation score on node3: 0 +clone_color: clone5rsc1:6 allocation score on node4: 0 +clone_color: clone5rsc1:6 allocation score on node5: 0 +clone_color: clone5rsc1:6 allocation score on node6: 0 +clone_color: clone5rsc1:6 allocation score on node7: 0 +clone_color: clone5rsc1:6 allocation score on node8: 0 +clone_color: clone5rsc1:6 allocation score on node9: 0 +clone_color: clone5rsc1:7 allocation score on node10: 0 +clone_color: clone5rsc1:7 allocation score on node11: 0 +clone_color: clone5rsc1:7 allocation score on node1: 0 +clone_color: clone5rsc1:7 allocation score on node2: 0 +clone_color: clone5rsc1:7 allocation score on node3: 0 +clone_color: clone5rsc1:7 allocation score on node4: 0 +clone_color: clone5rsc1:7 allocation score on node5: 0 +clone_color: clone5rsc1:7 allocation score on node6: 0 +clone_color: clone5rsc1:7 allocation score on node7: 0 +clone_color: clone5rsc1:7 allocation score on node8: 0 +clone_color: clone5rsc1:7 allocation score on node9: 0 +clone_color: clone5rsc1:8 allocation score on node10: 0 +clone_color: clone5rsc1:8 allocation score on node11: 0 +clone_color: clone5rsc1:8 allocation score on node1: 0 +clone_color: clone5rsc1:8 allocation score on node2: 0 +clone_color: clone5rsc1:8 allocation score on node3: 0 +clone_color: clone5rsc1:8 allocation score on node4: 0 +clone_color: clone5rsc1:8 allocation score on node5: 0 +clone_color: clone5rsc1:8 allocation score on node6: 0 +clone_color: clone5rsc1:8 allocation score on node7: 0 +clone_color: clone5rsc1:8 allocation score on node8: 0 +clone_color: clone5rsc1:8 allocation score on node9: 0 +clone_color: clone5rsc1:9 allocation score on node10: 0 +clone_color: clone5rsc1:9 allocation score on node11: 0 +clone_color: clone5rsc1:9 allocation score on node1: 0 +clone_color: clone5rsc1:9 allocation score on node2: 0 +clone_color: clone5rsc1:9 allocation score on node3: 0 +clone_color: clone5rsc1:9 allocation score on node4: 0 +clone_color: clone5rsc1:9 allocation score on node5: 0 +clone_color: clone5rsc1:9 allocation score on node6: 0 +clone_color: clone5rsc1:9 allocation score on node7: 0 +clone_color: clone5rsc1:9 allocation score on node8: 0 +clone_color: clone5rsc1:9 allocation score on node9: 0 +clone_color: clone5rsc2:0 allocation score on node10: 0 +clone_color: clone5rsc2:0 allocation score on node11: 0 +clone_color: clone5rsc2:0 allocation score on node1: 1 +clone_color: clone5rsc2:0 allocation score on node2: 0 +clone_color: clone5rsc2:0 allocation score on node3: 0 +clone_color: clone5rsc2:0 allocation score on node4: 0 +clone_color: clone5rsc2:0 allocation score on node5: 0 +clone_color: clone5rsc2:0 allocation score on node6: 0 +clone_color: clone5rsc2:0 allocation score on node7: 0 +clone_color: clone5rsc2:0 allocation score on node8: 0 +clone_color: clone5rsc2:0 allocation score on node9: 0 +clone_color: clone5rsc2:1 allocation score on node10: 0 +clone_color: clone5rsc2:1 allocation score on node11: 0 +clone_color: clone5rsc2:1 allocation score on node1: 0 +clone_color: clone5rsc2:1 allocation score on node2: 1 +clone_color: clone5rsc2:1 allocation score on node3: 0 +clone_color: clone5rsc2:1 allocation score on node4: 0 +clone_color: clone5rsc2:1 allocation score on node5: 0 +clone_color: clone5rsc2:1 allocation score on node6: 0 +clone_color: clone5rsc2:1 allocation score on node7: 0 +clone_color: clone5rsc2:1 allocation score on node8: 0 +clone_color: clone5rsc2:1 allocation score on node9: 0 +clone_color: clone5rsc2:10 allocation score on node10: 0 +clone_color: clone5rsc2:10 allocation score on node11: 0 +clone_color: clone5rsc2:10 allocation score on node1: 0 +clone_color: clone5rsc2:10 allocation score on node2: 0 +clone_color: clone5rsc2:10 allocation score on node3: 0 +clone_color: clone5rsc2:10 allocation score on node4: 0 +clone_color: clone5rsc2:10 allocation score on node5: 0 +clone_color: clone5rsc2:10 allocation score on node6: 0 +clone_color: clone5rsc2:10 allocation score on node7: 0 +clone_color: clone5rsc2:10 allocation score on node8: 0 +clone_color: clone5rsc2:10 allocation score on node9: 0 +clone_color: clone5rsc2:2 allocation score on node10: 0 +clone_color: clone5rsc2:2 allocation score on node11: 0 +clone_color: clone5rsc2:2 allocation score on node1: 0 +clone_color: clone5rsc2:2 allocation score on node2: 0 +clone_color: clone5rsc2:2 allocation score on node3: 1 +clone_color: clone5rsc2:2 allocation score on node4: 0 +clone_color: clone5rsc2:2 allocation score on node5: 0 +clone_color: clone5rsc2:2 allocation score on node6: 0 +clone_color: clone5rsc2:2 allocation score on node7: 0 +clone_color: clone5rsc2:2 allocation score on node8: 0 +clone_color: clone5rsc2:2 allocation score on node9: 0 +clone_color: clone5rsc2:3 allocation score on node10: 0 +clone_color: clone5rsc2:3 allocation score on node11: 0 +clone_color: clone5rsc2:3 allocation score on node1: 0 +clone_color: clone5rsc2:3 allocation score on node2: 0 +clone_color: clone5rsc2:3 allocation score on node3: 0 +clone_color: clone5rsc2:3 allocation score on node4: 0 +clone_color: clone5rsc2:3 allocation score on node5: 0 +clone_color: clone5rsc2:3 allocation score on node6: 0 +clone_color: clone5rsc2:3 allocation score on node7: 0 +clone_color: clone5rsc2:3 allocation score on node8: 0 +clone_color: clone5rsc2:3 allocation score on node9: 0 +clone_color: clone5rsc2:4 allocation score on node10: 0 +clone_color: clone5rsc2:4 allocation score on node11: 0 +clone_color: clone5rsc2:4 allocation score on node1: 0 +clone_color: clone5rsc2:4 allocation score on node2: 0 +clone_color: clone5rsc2:4 allocation score on node3: 0 +clone_color: clone5rsc2:4 allocation score on node4: 0 +clone_color: clone5rsc2:4 allocation score on node5: 0 +clone_color: clone5rsc2:4 allocation score on node6: 0 +clone_color: clone5rsc2:4 allocation score on node7: 0 +clone_color: clone5rsc2:4 allocation score on node8: 0 +clone_color: clone5rsc2:4 allocation score on node9: 0 +clone_color: clone5rsc2:5 allocation score on node10: 0 +clone_color: clone5rsc2:5 allocation score on node11: 0 +clone_color: clone5rsc2:5 allocation score on node1: 0 +clone_color: clone5rsc2:5 allocation score on node2: 0 +clone_color: clone5rsc2:5 allocation score on node3: 0 +clone_color: clone5rsc2:5 allocation score on node4: 0 +clone_color: clone5rsc2:5 allocation score on node5: 0 +clone_color: clone5rsc2:5 allocation score on node6: 0 +clone_color: clone5rsc2:5 allocation score on node7: 0 +clone_color: clone5rsc2:5 allocation score on node8: 0 +clone_color: clone5rsc2:5 allocation score on node9: 0 +clone_color: clone5rsc2:6 allocation score on node10: 0 +clone_color: clone5rsc2:6 allocation score on node11: 0 +clone_color: clone5rsc2:6 allocation score on node1: 0 +clone_color: clone5rsc2:6 allocation score on node2: 0 +clone_color: clone5rsc2:6 allocation score on node3: 0 +clone_color: clone5rsc2:6 allocation score on node4: 0 +clone_color: clone5rsc2:6 allocation score on node5: 0 +clone_color: clone5rsc2:6 allocation score on node6: 0 +clone_color: clone5rsc2:6 allocation score on node7: 0 +clone_color: clone5rsc2:6 allocation score on node8: 0 +clone_color: clone5rsc2:6 allocation score on node9: 0 +clone_color: clone5rsc2:7 allocation score on node10: 0 +clone_color: clone5rsc2:7 allocation score on node11: 0 +clone_color: clone5rsc2:7 allocation score on node1: 0 +clone_color: clone5rsc2:7 allocation score on node2: 0 +clone_color: clone5rsc2:7 allocation score on node3: 0 +clone_color: clone5rsc2:7 allocation score on node4: 0 +clone_color: clone5rsc2:7 allocation score on node5: 0 +clone_color: clone5rsc2:7 allocation score on node6: 0 +clone_color: clone5rsc2:7 allocation score on node7: 0 +clone_color: clone5rsc2:7 allocation score on node8: 0 +clone_color: clone5rsc2:7 allocation score on node9: 0 +clone_color: clone5rsc2:8 allocation score on node10: 0 +clone_color: clone5rsc2:8 allocation score on node11: 0 +clone_color: clone5rsc2:8 allocation score on node1: 0 +clone_color: clone5rsc2:8 allocation score on node2: 0 +clone_color: clone5rsc2:8 allocation score on node3: 0 +clone_color: clone5rsc2:8 allocation score on node4: 0 +clone_color: clone5rsc2:8 allocation score on node5: 0 +clone_color: clone5rsc2:8 allocation score on node6: 0 +clone_color: clone5rsc2:8 allocation score on node7: 0 +clone_color: clone5rsc2:8 allocation score on node8: 0 +clone_color: clone5rsc2:8 allocation score on node9: 0 +clone_color: clone5rsc2:9 allocation score on node10: 0 +clone_color: clone5rsc2:9 allocation score on node11: 0 +clone_color: clone5rsc2:9 allocation score on node1: 0 +clone_color: clone5rsc2:9 allocation score on node2: 0 +clone_color: clone5rsc2:9 allocation score on node3: 0 +clone_color: clone5rsc2:9 allocation score on node4: 0 +clone_color: clone5rsc2:9 allocation score on node5: 0 +clone_color: clone5rsc2:9 allocation score on node6: 0 +clone_color: clone5rsc2:9 allocation score on node7: 0 +clone_color: clone5rsc2:9 allocation score on node8: 0 +clone_color: clone5rsc2:9 allocation score on node9: 0 +clone_color: clone5rsc3:0 allocation score on node10: 0 +clone_color: clone5rsc3:0 allocation score on node11: 0 +clone_color: clone5rsc3:0 allocation score on node1: 1 +clone_color: clone5rsc3:0 allocation score on node2: 0 +clone_color: clone5rsc3:0 allocation score on node3: 0 +clone_color: clone5rsc3:0 allocation score on node4: 0 +clone_color: clone5rsc3:0 allocation score on node5: 0 +clone_color: clone5rsc3:0 allocation score on node6: 0 +clone_color: clone5rsc3:0 allocation score on node7: 0 +clone_color: clone5rsc3:0 allocation score on node8: 0 +clone_color: clone5rsc3:0 allocation score on node9: 0 +clone_color: clone5rsc3:1 allocation score on node10: 0 +clone_color: clone5rsc3:1 allocation score on node11: 0 +clone_color: clone5rsc3:1 allocation score on node1: 0 +clone_color: clone5rsc3:1 allocation score on node2: 1 +clone_color: clone5rsc3:1 allocation score on node3: 0 +clone_color: clone5rsc3:1 allocation score on node4: 0 +clone_color: clone5rsc3:1 allocation score on node5: 0 +clone_color: clone5rsc3:1 allocation score on node6: 0 +clone_color: clone5rsc3:1 allocation score on node7: 0 +clone_color: clone5rsc3:1 allocation score on node8: 0 +clone_color: clone5rsc3:1 allocation score on node9: 0 +clone_color: clone5rsc3:10 allocation score on node10: 0 +clone_color: clone5rsc3:10 allocation score on node11: 0 +clone_color: clone5rsc3:10 allocation score on node1: 0 +clone_color: clone5rsc3:10 allocation score on node2: 0 +clone_color: clone5rsc3:10 allocation score on node3: 0 +clone_color: clone5rsc3:10 allocation score on node4: 0 +clone_color: clone5rsc3:10 allocation score on node5: 0 +clone_color: clone5rsc3:10 allocation score on node6: 0 +clone_color: clone5rsc3:10 allocation score on node7: 0 +clone_color: clone5rsc3:10 allocation score on node8: 0 +clone_color: clone5rsc3:10 allocation score on node9: 0 +clone_color: clone5rsc3:2 allocation score on node10: 0 +clone_color: clone5rsc3:2 allocation score on node11: 0 +clone_color: clone5rsc3:2 allocation score on node1: 0 +clone_color: clone5rsc3:2 allocation score on node2: 0 +clone_color: clone5rsc3:2 allocation score on node3: 0 +clone_color: clone5rsc3:2 allocation score on node4: 0 +clone_color: clone5rsc3:2 allocation score on node5: 0 +clone_color: clone5rsc3:2 allocation score on node6: 0 +clone_color: clone5rsc3:2 allocation score on node7: 0 +clone_color: clone5rsc3:2 allocation score on node8: 0 +clone_color: clone5rsc3:2 allocation score on node9: 0 +clone_color: clone5rsc3:3 allocation score on node10: 0 +clone_color: clone5rsc3:3 allocation score on node11: 0 +clone_color: clone5rsc3:3 allocation score on node1: 0 +clone_color: clone5rsc3:3 allocation score on node2: 0 +clone_color: clone5rsc3:3 allocation score on node3: 0 +clone_color: clone5rsc3:3 allocation score on node4: 0 +clone_color: clone5rsc3:3 allocation score on node5: 0 +clone_color: clone5rsc3:3 allocation score on node6: 0 +clone_color: clone5rsc3:3 allocation score on node7: 0 +clone_color: clone5rsc3:3 allocation score on node8: 0 +clone_color: clone5rsc3:3 allocation score on node9: 0 +clone_color: clone5rsc3:4 allocation score on node10: 0 +clone_color: clone5rsc3:4 allocation score on node11: 0 +clone_color: clone5rsc3:4 allocation score on node1: 0 +clone_color: clone5rsc3:4 allocation score on node2: 0 +clone_color: clone5rsc3:4 allocation score on node3: 0 +clone_color: clone5rsc3:4 allocation score on node4: 0 +clone_color: clone5rsc3:4 allocation score on node5: 0 +clone_color: clone5rsc3:4 allocation score on node6: 0 +clone_color: clone5rsc3:4 allocation score on node7: 0 +clone_color: clone5rsc3:4 allocation score on node8: 0 +clone_color: clone5rsc3:4 allocation score on node9: 0 +clone_color: clone5rsc3:5 allocation score on node10: 0 +clone_color: clone5rsc3:5 allocation score on node11: 0 +clone_color: clone5rsc3:5 allocation score on node1: 0 +clone_color: clone5rsc3:5 allocation score on node2: 0 +clone_color: clone5rsc3:5 allocation score on node3: 0 +clone_color: clone5rsc3:5 allocation score on node4: 0 +clone_color: clone5rsc3:5 allocation score on node5: 0 +clone_color: clone5rsc3:5 allocation score on node6: 0 +clone_color: clone5rsc3:5 allocation score on node7: 0 +clone_color: clone5rsc3:5 allocation score on node8: 0 +clone_color: clone5rsc3:5 allocation score on node9: 0 +clone_color: clone5rsc3:6 allocation score on node10: 0 +clone_color: clone5rsc3:6 allocation score on node11: 0 +clone_color: clone5rsc3:6 allocation score on node1: 0 +clone_color: clone5rsc3:6 allocation score on node2: 0 +clone_color: clone5rsc3:6 allocation score on node3: 0 +clone_color: clone5rsc3:6 allocation score on node4: 0 +clone_color: clone5rsc3:6 allocation score on node5: 0 +clone_color: clone5rsc3:6 allocation score on node6: 0 +clone_color: clone5rsc3:6 allocation score on node7: 0 +clone_color: clone5rsc3:6 allocation score on node8: 0 +clone_color: clone5rsc3:6 allocation score on node9: 0 +clone_color: clone5rsc3:7 allocation score on node10: 0 +clone_color: clone5rsc3:7 allocation score on node11: 0 +clone_color: clone5rsc3:7 allocation score on node1: 0 +clone_color: clone5rsc3:7 allocation score on node2: 0 +clone_color: clone5rsc3:7 allocation score on node3: 0 +clone_color: clone5rsc3:7 allocation score on node4: 0 +clone_color: clone5rsc3:7 allocation score on node5: 0 +clone_color: clone5rsc3:7 allocation score on node6: 0 +clone_color: clone5rsc3:7 allocation score on node7: 0 +clone_color: clone5rsc3:7 allocation score on node8: 0 +clone_color: clone5rsc3:7 allocation score on node9: 0 +clone_color: clone5rsc3:8 allocation score on node10: 0 +clone_color: clone5rsc3:8 allocation score on node11: 0 +clone_color: clone5rsc3:8 allocation score on node1: 0 +clone_color: clone5rsc3:8 allocation score on node2: 0 +clone_color: clone5rsc3:8 allocation score on node3: 0 +clone_color: clone5rsc3:8 allocation score on node4: 0 +clone_color: clone5rsc3:8 allocation score on node5: 0 +clone_color: clone5rsc3:8 allocation score on node6: 0 +clone_color: clone5rsc3:8 allocation score on node7: 0 +clone_color: clone5rsc3:8 allocation score on node8: 0 +clone_color: clone5rsc3:8 allocation score on node9: 0 +clone_color: clone5rsc3:9 allocation score on node10: 0 +clone_color: clone5rsc3:9 allocation score on node11: 0 +clone_color: clone5rsc3:9 allocation score on node1: 0 +clone_color: clone5rsc3:9 allocation score on node2: 0 +clone_color: clone5rsc3:9 allocation score on node3: 0 +clone_color: clone5rsc3:9 allocation score on node4: 0 +clone_color: clone5rsc3:9 allocation score on node5: 0 +clone_color: clone5rsc3:9 allocation score on node6: 0 +clone_color: clone5rsc3:9 allocation score on node7: 0 +clone_color: clone5rsc3:9 allocation score on node8: 0 +clone_color: clone5rsc3:9 allocation score on node9: 0 +group_color: clone5group:0 allocation score on node10: 0 +group_color: clone5group:0 allocation score on node11: 0 +group_color: clone5group:0 allocation score on node1: 0 +group_color: clone5group:0 allocation score on node2: 0 +group_color: clone5group:0 allocation score on node3: 0 +group_color: clone5group:0 allocation score on node4: 0 +group_color: clone5group:0 allocation score on node5: 0 +group_color: clone5group:0 allocation score on node6: 0 +group_color: clone5group:0 allocation score on node7: 0 +group_color: clone5group:0 allocation score on node8: 0 +group_color: clone5group:0 allocation score on node9: 0 +group_color: clone5group:1 allocation score on node10: 0 +group_color: clone5group:1 allocation score on node11: 0 +group_color: clone5group:1 allocation score on node1: -INFINITY +group_color: clone5group:1 allocation score on node2: 0 +group_color: clone5group:1 allocation score on node3: 0 +group_color: clone5group:1 allocation score on node4: 0 +group_color: clone5group:1 allocation score on node5: 0 +group_color: clone5group:1 allocation score on node6: 0 +group_color: clone5group:1 allocation score on node7: 0 +group_color: clone5group:1 allocation score on node8: 0 +group_color: clone5group:1 allocation score on node9: 0 +group_color: clone5group:10 allocation score on node10: -INFINITY +group_color: clone5group:10 allocation score on node11: -INFINITY +group_color: clone5group:10 allocation score on node1: -INFINITY +group_color: clone5group:10 allocation score on node2: -INFINITY +group_color: clone5group:10 allocation score on node3: -INFINITY +group_color: clone5group:10 allocation score on node4: -INFINITY +group_color: clone5group:10 allocation score on node5: -INFINITY +group_color: clone5group:10 allocation score on node6: -INFINITY +group_color: clone5group:10 allocation score on node7: -INFINITY +group_color: clone5group:10 allocation score on node8: 0 +group_color: clone5group:10 allocation score on node9: -INFINITY +group_color: clone5group:2 allocation score on node10: 0 +group_color: clone5group:2 allocation score on node11: 0 +group_color: clone5group:2 allocation score on node1: -INFINITY +group_color: clone5group:2 allocation score on node2: -INFINITY +group_color: clone5group:2 allocation score on node3: 0 +group_color: clone5group:2 allocation score on node4: 0 +group_color: clone5group:2 allocation score on node5: 0 +group_color: clone5group:2 allocation score on node6: 0 +group_color: clone5group:2 allocation score on node7: 0 +group_color: clone5group:2 allocation score on node8: 0 +group_color: clone5group:2 allocation score on node9: 0 +group_color: clone5group:3 allocation score on node10: 0 +group_color: clone5group:3 allocation score on node11: 0 +group_color: clone5group:3 allocation score on node1: -INFINITY +group_color: clone5group:3 allocation score on node2: -INFINITY +group_color: clone5group:3 allocation score on node3: -INFINITY +group_color: clone5group:3 allocation score on node4: 0 +group_color: clone5group:3 allocation score on node5: 0 +group_color: clone5group:3 allocation score on node6: 0 +group_color: clone5group:3 allocation score on node7: 0 +group_color: clone5group:3 allocation score on node8: 0 +group_color: clone5group:3 allocation score on node9: 0 +group_color: clone5group:4 allocation score on node10: 0 +group_color: clone5group:4 allocation score on node11: 0 +group_color: clone5group:4 allocation score on node1: -INFINITY +group_color: clone5group:4 allocation score on node2: -INFINITY +group_color: clone5group:4 allocation score on node3: -INFINITY +group_color: clone5group:4 allocation score on node4: 0 +group_color: clone5group:4 allocation score on node5: 0 +group_color: clone5group:4 allocation score on node6: 0 +group_color: clone5group:4 allocation score on node7: 0 +group_color: clone5group:4 allocation score on node8: 0 +group_color: clone5group:4 allocation score on node9: -INFINITY +group_color: clone5group:5 allocation score on node10: -INFINITY +group_color: clone5group:5 allocation score on node11: 0 +group_color: clone5group:5 allocation score on node1: -INFINITY +group_color: clone5group:5 allocation score on node2: -INFINITY +group_color: clone5group:5 allocation score on node3: -INFINITY +group_color: clone5group:5 allocation score on node4: 0 +group_color: clone5group:5 allocation score on node5: 0 +group_color: clone5group:5 allocation score on node6: 0 +group_color: clone5group:5 allocation score on node7: 0 +group_color: clone5group:5 allocation score on node8: 0 +group_color: clone5group:5 allocation score on node9: -INFINITY +group_color: clone5group:6 allocation score on node10: -INFINITY +group_color: clone5group:6 allocation score on node11: -INFINITY +group_color: clone5group:6 allocation score on node1: -INFINITY +group_color: clone5group:6 allocation score on node2: -INFINITY +group_color: clone5group:6 allocation score on node3: -INFINITY +group_color: clone5group:6 allocation score on node4: 0 +group_color: clone5group:6 allocation score on node5: 0 +group_color: clone5group:6 allocation score on node6: 0 +group_color: clone5group:6 allocation score on node7: 0 +group_color: clone5group:6 allocation score on node8: 0 +group_color: clone5group:6 allocation score on node9: -INFINITY +group_color: clone5group:7 allocation score on node10: -INFINITY +group_color: clone5group:7 allocation score on node11: -INFINITY +group_color: clone5group:7 allocation score on node1: -INFINITY +group_color: clone5group:7 allocation score on node2: -INFINITY +group_color: clone5group:7 allocation score on node3: -INFINITY +group_color: clone5group:7 allocation score on node4: -INFINITY +group_color: clone5group:7 allocation score on node5: 0 +group_color: clone5group:7 allocation score on node6: 0 +group_color: clone5group:7 allocation score on node7: 0 +group_color: clone5group:7 allocation score on node8: 0 +group_color: clone5group:7 allocation score on node9: -INFINITY +group_color: clone5group:8 allocation score on node10: -INFINITY +group_color: clone5group:8 allocation score on node11: -INFINITY +group_color: clone5group:8 allocation score on node1: -INFINITY +group_color: clone5group:8 allocation score on node2: -INFINITY +group_color: clone5group:8 allocation score on node3: -INFINITY +group_color: clone5group:8 allocation score on node4: -INFINITY +group_color: clone5group:8 allocation score on node5: -INFINITY +group_color: clone5group:8 allocation score on node6: 0 +group_color: clone5group:8 allocation score on node7: 0 +group_color: clone5group:8 allocation score on node8: 0 +group_color: clone5group:8 allocation score on node9: -INFINITY +group_color: clone5group:9 allocation score on node10: -INFINITY +group_color: clone5group:9 allocation score on node11: -INFINITY +group_color: clone5group:9 allocation score on node1: -INFINITY +group_color: clone5group:9 allocation score on node2: -INFINITY +group_color: clone5group:9 allocation score on node3: -INFINITY +group_color: clone5group:9 allocation score on node4: -INFINITY +group_color: clone5group:9 allocation score on node5: -INFINITY +group_color: clone5group:9 allocation score on node6: -INFINITY +group_color: clone5group:9 allocation score on node7: 0 +group_color: clone5group:9 allocation score on node8: 0 +group_color: clone5group:9 allocation score on node9: -INFINITY +group_color: clone5rsc1:0 allocation score on node10: 0 +group_color: clone5rsc1:0 allocation score on node11: 0 +group_color: clone5rsc1:0 allocation score on node1: 1 +group_color: clone5rsc1:0 allocation score on node2: 0 +group_color: clone5rsc1:0 allocation score on node3: 0 +group_color: clone5rsc1:0 allocation score on node4: 0 +group_color: clone5rsc1:0 allocation score on node5: 0 +group_color: clone5rsc1:0 allocation score on node6: 0 +group_color: clone5rsc1:0 allocation score on node7: 0 +group_color: clone5rsc1:0 allocation score on node8: 0 +group_color: clone5rsc1:0 allocation score on node9: 0 +group_color: clone5rsc1:1 allocation score on node10: 0 +group_color: clone5rsc1:1 allocation score on node11: 0 +group_color: clone5rsc1:1 allocation score on node1: -INFINITY +group_color: clone5rsc1:1 allocation score on node2: 1 +group_color: clone5rsc1:1 allocation score on node3: 0 +group_color: clone5rsc1:1 allocation score on node4: 0 +group_color: clone5rsc1:1 allocation score on node5: 0 +group_color: clone5rsc1:1 allocation score on node6: 0 +group_color: clone5rsc1:1 allocation score on node7: 0 +group_color: clone5rsc1:1 allocation score on node8: 0 +group_color: clone5rsc1:1 allocation score on node9: 0 +group_color: clone5rsc1:10 allocation score on node10: -INFINITY +group_color: clone5rsc1:10 allocation score on node11: -INFINITY +group_color: clone5rsc1:10 allocation score on node1: -INFINITY +group_color: clone5rsc1:10 allocation score on node2: -INFINITY +group_color: clone5rsc1:10 allocation score on node3: -INFINITY +group_color: clone5rsc1:10 allocation score on node4: -INFINITY +group_color: clone5rsc1:10 allocation score on node5: -INFINITY +group_color: clone5rsc1:10 allocation score on node6: -INFINITY +group_color: clone5rsc1:10 allocation score on node7: -INFINITY +group_color: clone5rsc1:10 allocation score on node8: 0 +group_color: clone5rsc1:10 allocation score on node9: -INFINITY +group_color: clone5rsc1:2 allocation score on node10: 0 +group_color: clone5rsc1:2 allocation score on node11: 0 +group_color: clone5rsc1:2 allocation score on node1: -INFINITY +group_color: clone5rsc1:2 allocation score on node2: -INFINITY +group_color: clone5rsc1:2 allocation score on node3: 1 +group_color: clone5rsc1:2 allocation score on node4: 0 +group_color: clone5rsc1:2 allocation score on node5: 0 +group_color: clone5rsc1:2 allocation score on node6: 0 +group_color: clone5rsc1:2 allocation score on node7: 0 +group_color: clone5rsc1:2 allocation score on node8: 0 +group_color: clone5rsc1:2 allocation score on node9: 0 +group_color: clone5rsc1:3 allocation score on node10: 0 +group_color: clone5rsc1:3 allocation score on node11: 0 +group_color: clone5rsc1:3 allocation score on node1: -INFINITY +group_color: clone5rsc1:3 allocation score on node2: -INFINITY +group_color: clone5rsc1:3 allocation score on node3: -INFINITY +group_color: clone5rsc1:3 allocation score on node4: 0 +group_color: clone5rsc1:3 allocation score on node5: 0 +group_color: clone5rsc1:3 allocation score on node6: 0 +group_color: clone5rsc1:3 allocation score on node7: 0 +group_color: clone5rsc1:3 allocation score on node8: 0 +group_color: clone5rsc1:3 allocation score on node9: 0 +group_color: clone5rsc1:4 allocation score on node10: 0 +group_color: clone5rsc1:4 allocation score on node11: 0 +group_color: clone5rsc1:4 allocation score on node1: -INFINITY +group_color: clone5rsc1:4 allocation score on node2: -INFINITY +group_color: clone5rsc1:4 allocation score on node3: -INFINITY +group_color: clone5rsc1:4 allocation score on node4: 0 +group_color: clone5rsc1:4 allocation score on node5: 0 +group_color: clone5rsc1:4 allocation score on node6: 0 +group_color: clone5rsc1:4 allocation score on node7: 0 +group_color: clone5rsc1:4 allocation score on node8: 0 +group_color: clone5rsc1:4 allocation score on node9: -INFINITY +group_color: clone5rsc1:5 allocation score on node10: -INFINITY +group_color: clone5rsc1:5 allocation score on node11: 0 +group_color: clone5rsc1:5 allocation score on node1: -INFINITY +group_color: clone5rsc1:5 allocation score on node2: -INFINITY +group_color: clone5rsc1:5 allocation score on node3: -INFINITY +group_color: clone5rsc1:5 allocation score on node4: 0 +group_color: clone5rsc1:5 allocation score on node5: 0 +group_color: clone5rsc1:5 allocation score on node6: 0 +group_color: clone5rsc1:5 allocation score on node7: 0 +group_color: clone5rsc1:5 allocation score on node8: 0 +group_color: clone5rsc1:5 allocation score on node9: -INFINITY +group_color: clone5rsc1:6 allocation score on node10: -INFINITY +group_color: clone5rsc1:6 allocation score on node11: -INFINITY +group_color: clone5rsc1:6 allocation score on node1: -INFINITY +group_color: clone5rsc1:6 allocation score on node2: -INFINITY +group_color: clone5rsc1:6 allocation score on node3: -INFINITY +group_color: clone5rsc1:6 allocation score on node4: 0 +group_color: clone5rsc1:6 allocation score on node5: 0 +group_color: clone5rsc1:6 allocation score on node6: 0 +group_color: clone5rsc1:6 allocation score on node7: 0 +group_color: clone5rsc1:6 allocation score on node8: 0 +group_color: clone5rsc1:6 allocation score on node9: -INFINITY +group_color: clone5rsc1:7 allocation score on node10: -INFINITY +group_color: clone5rsc1:7 allocation score on node11: -INFINITY +group_color: clone5rsc1:7 allocation score on node1: -INFINITY +group_color: clone5rsc1:7 allocation score on node2: -INFINITY +group_color: clone5rsc1:7 allocation score on node3: -INFINITY +group_color: clone5rsc1:7 allocation score on node4: -INFINITY +group_color: clone5rsc1:7 allocation score on node5: 0 +group_color: clone5rsc1:7 allocation score on node6: 0 +group_color: clone5rsc1:7 allocation score on node7: 0 +group_color: clone5rsc1:7 allocation score on node8: 0 +group_color: clone5rsc1:7 allocation score on node9: -INFINITY +group_color: clone5rsc1:8 allocation score on node10: -INFINITY +group_color: clone5rsc1:8 allocation score on node11: -INFINITY +group_color: clone5rsc1:8 allocation score on node1: -INFINITY +group_color: clone5rsc1:8 allocation score on node2: -INFINITY +group_color: clone5rsc1:8 allocation score on node3: -INFINITY +group_color: clone5rsc1:8 allocation score on node4: -INFINITY +group_color: clone5rsc1:8 allocation score on node5: -INFINITY +group_color: clone5rsc1:8 allocation score on node6: 0 +group_color: clone5rsc1:8 allocation score on node7: 0 +group_color: clone5rsc1:8 allocation score on node8: 0 +group_color: clone5rsc1:8 allocation score on node9: -INFINITY +group_color: clone5rsc1:9 allocation score on node10: -INFINITY +group_color: clone5rsc1:9 allocation score on node11: -INFINITY +group_color: clone5rsc1:9 allocation score on node1: -INFINITY +group_color: clone5rsc1:9 allocation score on node2: -INFINITY +group_color: clone5rsc1:9 allocation score on node3: -INFINITY +group_color: clone5rsc1:9 allocation score on node4: -INFINITY +group_color: clone5rsc1:9 allocation score on node5: -INFINITY +group_color: clone5rsc1:9 allocation score on node6: -INFINITY +group_color: clone5rsc1:9 allocation score on node7: 0 +group_color: clone5rsc1:9 allocation score on node8: 0 +group_color: clone5rsc1:9 allocation score on node9: -INFINITY +group_color: clone5rsc2:0 allocation score on node10: 0 +group_color: clone5rsc2:0 allocation score on node11: 0 +group_color: clone5rsc2:0 allocation score on node1: 1 +group_color: clone5rsc2:0 allocation score on node2: 0 +group_color: clone5rsc2:0 allocation score on node3: 0 +group_color: clone5rsc2:0 allocation score on node4: 0 +group_color: clone5rsc2:0 allocation score on node5: 0 +group_color: clone5rsc2:0 allocation score on node6: 0 +group_color: clone5rsc2:0 allocation score on node7: 0 +group_color: clone5rsc2:0 allocation score on node8: 0 +group_color: clone5rsc2:0 allocation score on node9: 0 +group_color: clone5rsc2:1 allocation score on node10: 0 +group_color: clone5rsc2:1 allocation score on node11: 0 +group_color: clone5rsc2:1 allocation score on node1: -INFINITY +group_color: clone5rsc2:1 allocation score on node2: 1 +group_color: clone5rsc2:1 allocation score on node3: 0 +group_color: clone5rsc2:1 allocation score on node4: 0 +group_color: clone5rsc2:1 allocation score on node5: 0 +group_color: clone5rsc2:1 allocation score on node6: 0 +group_color: clone5rsc2:1 allocation score on node7: 0 +group_color: clone5rsc2:1 allocation score on node8: 0 +group_color: clone5rsc2:1 allocation score on node9: 0 +group_color: clone5rsc2:10 allocation score on node10: -INFINITY +group_color: clone5rsc2:10 allocation score on node11: -INFINITY +group_color: clone5rsc2:10 allocation score on node1: -INFINITY +group_color: clone5rsc2:10 allocation score on node2: -INFINITY +group_color: clone5rsc2:10 allocation score on node3: -INFINITY +group_color: clone5rsc2:10 allocation score on node4: -INFINITY +group_color: clone5rsc2:10 allocation score on node5: -INFINITY +group_color: clone5rsc2:10 allocation score on node6: -INFINITY +group_color: clone5rsc2:10 allocation score on node7: -INFINITY +group_color: clone5rsc2:10 allocation score on node8: 0 +group_color: clone5rsc2:10 allocation score on node9: -INFINITY +group_color: clone5rsc2:2 allocation score on node10: 0 +group_color: clone5rsc2:2 allocation score on node11: 0 +group_color: clone5rsc2:2 allocation score on node1: -INFINITY +group_color: clone5rsc2:2 allocation score on node2: -INFINITY +group_color: clone5rsc2:2 allocation score on node3: 1 +group_color: clone5rsc2:2 allocation score on node4: 0 +group_color: clone5rsc2:2 allocation score on node5: 0 +group_color: clone5rsc2:2 allocation score on node6: 0 +group_color: clone5rsc2:2 allocation score on node7: 0 +group_color: clone5rsc2:2 allocation score on node8: 0 +group_color: clone5rsc2:2 allocation score on node9: 0 +group_color: clone5rsc2:3 allocation score on node10: 0 +group_color: clone5rsc2:3 allocation score on node11: 0 +group_color: clone5rsc2:3 allocation score on node1: -INFINITY +group_color: clone5rsc2:3 allocation score on node2: -INFINITY +group_color: clone5rsc2:3 allocation score on node3: -INFINITY +group_color: clone5rsc2:3 allocation score on node4: 0 +group_color: clone5rsc2:3 allocation score on node5: 0 +group_color: clone5rsc2:3 allocation score on node6: 0 +group_color: clone5rsc2:3 allocation score on node7: 0 +group_color: clone5rsc2:3 allocation score on node8: 0 +group_color: clone5rsc2:3 allocation score on node9: 0 +group_color: clone5rsc2:4 allocation score on node10: 0 +group_color: clone5rsc2:4 allocation score on node11: 0 +group_color: clone5rsc2:4 allocation score on node1: -INFINITY +group_color: clone5rsc2:4 allocation score on node2: -INFINITY +group_color: clone5rsc2:4 allocation score on node3: -INFINITY +group_color: clone5rsc2:4 allocation score on node4: 0 +group_color: clone5rsc2:4 allocation score on node5: 0 +group_color: clone5rsc2:4 allocation score on node6: 0 +group_color: clone5rsc2:4 allocation score on node7: 0 +group_color: clone5rsc2:4 allocation score on node8: 0 +group_color: clone5rsc2:4 allocation score on node9: -INFINITY +group_color: clone5rsc2:5 allocation score on node10: -INFINITY +group_color: clone5rsc2:5 allocation score on node11: 0 +group_color: clone5rsc2:5 allocation score on node1: -INFINITY +group_color: clone5rsc2:5 allocation score on node2: -INFINITY +group_color: clone5rsc2:5 allocation score on node3: -INFINITY +group_color: clone5rsc2:5 allocation score on node4: 0 +group_color: clone5rsc2:5 allocation score on node5: 0 +group_color: clone5rsc2:5 allocation score on node6: 0 +group_color: clone5rsc2:5 allocation score on node7: 0 +group_color: clone5rsc2:5 allocation score on node8: 0 +group_color: clone5rsc2:5 allocation score on node9: -INFINITY +group_color: clone5rsc2:6 allocation score on node10: -INFINITY +group_color: clone5rsc2:6 allocation score on node11: -INFINITY +group_color: clone5rsc2:6 allocation score on node1: -INFINITY +group_color: clone5rsc2:6 allocation score on node2: -INFINITY +group_color: clone5rsc2:6 allocation score on node3: -INFINITY +group_color: clone5rsc2:6 allocation score on node4: 0 +group_color: clone5rsc2:6 allocation score on node5: 0 +group_color: clone5rsc2:6 allocation score on node6: 0 +group_color: clone5rsc2:6 allocation score on node7: 0 +group_color: clone5rsc2:6 allocation score on node8: 0 +group_color: clone5rsc2:6 allocation score on node9: -INFINITY +group_color: clone5rsc2:7 allocation score on node10: -INFINITY +group_color: clone5rsc2:7 allocation score on node11: -INFINITY +group_color: clone5rsc2:7 allocation score on node1: -INFINITY +group_color: clone5rsc2:7 allocation score on node2: -INFINITY +group_color: clone5rsc2:7 allocation score on node3: -INFINITY +group_color: clone5rsc2:7 allocation score on node4: -INFINITY +group_color: clone5rsc2:7 allocation score on node5: 0 +group_color: clone5rsc2:7 allocation score on node6: 0 +group_color: clone5rsc2:7 allocation score on node7: 0 +group_color: clone5rsc2:7 allocation score on node8: 0 +group_color: clone5rsc2:7 allocation score on node9: -INFINITY +group_color: clone5rsc2:8 allocation score on node10: -INFINITY +group_color: clone5rsc2:8 allocation score on node11: -INFINITY +group_color: clone5rsc2:8 allocation score on node1: -INFINITY +group_color: clone5rsc2:8 allocation score on node2: -INFINITY +group_color: clone5rsc2:8 allocation score on node3: -INFINITY +group_color: clone5rsc2:8 allocation score on node4: -INFINITY +group_color: clone5rsc2:8 allocation score on node5: -INFINITY +group_color: clone5rsc2:8 allocation score on node6: 0 +group_color: clone5rsc2:8 allocation score on node7: 0 +group_color: clone5rsc2:8 allocation score on node8: 0 +group_color: clone5rsc2:8 allocation score on node9: -INFINITY +group_color: clone5rsc2:9 allocation score on node10: -INFINITY +group_color: clone5rsc2:9 allocation score on node11: -INFINITY +group_color: clone5rsc2:9 allocation score on node1: -INFINITY +group_color: clone5rsc2:9 allocation score on node2: -INFINITY +group_color: clone5rsc2:9 allocation score on node3: -INFINITY +group_color: clone5rsc2:9 allocation score on node4: -INFINITY +group_color: clone5rsc2:9 allocation score on node5: -INFINITY +group_color: clone5rsc2:9 allocation score on node6: -INFINITY +group_color: clone5rsc2:9 allocation score on node7: 0 +group_color: clone5rsc2:9 allocation score on node8: 0 +group_color: clone5rsc2:9 allocation score on node9: -INFINITY +group_color: clone5rsc3:0 allocation score on node10: 0 +group_color: clone5rsc3:0 allocation score on node11: 0 +group_color: clone5rsc3:0 allocation score on node1: 1 +group_color: clone5rsc3:0 allocation score on node2: 0 +group_color: clone5rsc3:0 allocation score on node3: 0 +group_color: clone5rsc3:0 allocation score on node4: 0 +group_color: clone5rsc3:0 allocation score on node5: 0 +group_color: clone5rsc3:0 allocation score on node6: 0 +group_color: clone5rsc3:0 allocation score on node7: 0 +group_color: clone5rsc3:0 allocation score on node8: 0 +group_color: clone5rsc3:0 allocation score on node9: 0 +group_color: clone5rsc3:1 allocation score on node10: 0 +group_color: clone5rsc3:1 allocation score on node11: 0 +group_color: clone5rsc3:1 allocation score on node1: -INFINITY +group_color: clone5rsc3:1 allocation score on node2: 1 +group_color: clone5rsc3:1 allocation score on node3: 0 +group_color: clone5rsc3:1 allocation score on node4: 0 +group_color: clone5rsc3:1 allocation score on node5: 0 +group_color: clone5rsc3:1 allocation score on node6: 0 +group_color: clone5rsc3:1 allocation score on node7: 0 +group_color: clone5rsc3:1 allocation score on node8: 0 +group_color: clone5rsc3:1 allocation score on node9: 0 +group_color: clone5rsc3:10 allocation score on node10: -INFINITY +group_color: clone5rsc3:10 allocation score on node11: -INFINITY +group_color: clone5rsc3:10 allocation score on node1: -INFINITY +group_color: clone5rsc3:10 allocation score on node2: -INFINITY +group_color: clone5rsc3:10 allocation score on node3: -INFINITY +group_color: clone5rsc3:10 allocation score on node4: -INFINITY +group_color: clone5rsc3:10 allocation score on node5: -INFINITY +group_color: clone5rsc3:10 allocation score on node6: -INFINITY +group_color: clone5rsc3:10 allocation score on node7: -INFINITY +group_color: clone5rsc3:10 allocation score on node8: 0 +group_color: clone5rsc3:10 allocation score on node9: -INFINITY +group_color: clone5rsc3:2 allocation score on node10: 0 +group_color: clone5rsc3:2 allocation score on node11: 0 +group_color: clone5rsc3:2 allocation score on node1: -INFINITY +group_color: clone5rsc3:2 allocation score on node2: -INFINITY +group_color: clone5rsc3:2 allocation score on node3: 0 +group_color: clone5rsc3:2 allocation score on node4: 0 +group_color: clone5rsc3:2 allocation score on node5: 0 +group_color: clone5rsc3:2 allocation score on node6: 0 +group_color: clone5rsc3:2 allocation score on node7: 0 +group_color: clone5rsc3:2 allocation score on node8: 0 +group_color: clone5rsc3:2 allocation score on node9: 0 +group_color: clone5rsc3:3 allocation score on node10: 0 +group_color: clone5rsc3:3 allocation score on node11: 0 +group_color: clone5rsc3:3 allocation score on node1: -INFINITY +group_color: clone5rsc3:3 allocation score on node2: -INFINITY +group_color: clone5rsc3:3 allocation score on node3: -INFINITY +group_color: clone5rsc3:3 allocation score on node4: 0 +group_color: clone5rsc3:3 allocation score on node5: 0 +group_color: clone5rsc3:3 allocation score on node6: 0 +group_color: clone5rsc3:3 allocation score on node7: 0 +group_color: clone5rsc3:3 allocation score on node8: 0 +group_color: clone5rsc3:3 allocation score on node9: 0 +group_color: clone5rsc3:4 allocation score on node10: 0 +group_color: clone5rsc3:4 allocation score on node11: 0 +group_color: clone5rsc3:4 allocation score on node1: -INFINITY +group_color: clone5rsc3:4 allocation score on node2: -INFINITY +group_color: clone5rsc3:4 allocation score on node3: -INFINITY +group_color: clone5rsc3:4 allocation score on node4: 0 +group_color: clone5rsc3:4 allocation score on node5: 0 +group_color: clone5rsc3:4 allocation score on node6: 0 +group_color: clone5rsc3:4 allocation score on node7: 0 +group_color: clone5rsc3:4 allocation score on node8: 0 +group_color: clone5rsc3:4 allocation score on node9: -INFINITY +group_color: clone5rsc3:5 allocation score on node10: -INFINITY +group_color: clone5rsc3:5 allocation score on node11: 0 +group_color: clone5rsc3:5 allocation score on node1: -INFINITY +group_color: clone5rsc3:5 allocation score on node2: -INFINITY +group_color: clone5rsc3:5 allocation score on node3: -INFINITY +group_color: clone5rsc3:5 allocation score on node4: 0 +group_color: clone5rsc3:5 allocation score on node5: 0 +group_color: clone5rsc3:5 allocation score on node6: 0 +group_color: clone5rsc3:5 allocation score on node7: 0 +group_color: clone5rsc3:5 allocation score on node8: 0 +group_color: clone5rsc3:5 allocation score on node9: -INFINITY +group_color: clone5rsc3:6 allocation score on node10: -INFINITY +group_color: clone5rsc3:6 allocation score on node11: -INFINITY +group_color: clone5rsc3:6 allocation score on node1: -INFINITY +group_color: clone5rsc3:6 allocation score on node2: -INFINITY +group_color: clone5rsc3:6 allocation score on node3: -INFINITY +group_color: clone5rsc3:6 allocation score on node4: 0 +group_color: clone5rsc3:6 allocation score on node5: 0 +group_color: clone5rsc3:6 allocation score on node6: 0 +group_color: clone5rsc3:6 allocation score on node7: 0 +group_color: clone5rsc3:6 allocation score on node8: 0 +group_color: clone5rsc3:6 allocation score on node9: -INFINITY +group_color: clone5rsc3:7 allocation score on node10: -INFINITY +group_color: clone5rsc3:7 allocation score on node11: -INFINITY +group_color: clone5rsc3:7 allocation score on node1: -INFINITY +group_color: clone5rsc3:7 allocation score on node2: -INFINITY +group_color: clone5rsc3:7 allocation score on node3: -INFINITY +group_color: clone5rsc3:7 allocation score on node4: -INFINITY +group_color: clone5rsc3:7 allocation score on node5: 0 +group_color: clone5rsc3:7 allocation score on node6: 0 +group_color: clone5rsc3:7 allocation score on node7: 0 +group_color: clone5rsc3:7 allocation score on node8: 0 +group_color: clone5rsc3:7 allocation score on node9: -INFINITY +group_color: clone5rsc3:8 allocation score on node10: -INFINITY +group_color: clone5rsc3:8 allocation score on node11: -INFINITY +group_color: clone5rsc3:8 allocation score on node1: -INFINITY +group_color: clone5rsc3:8 allocation score on node2: -INFINITY +group_color: clone5rsc3:8 allocation score on node3: -INFINITY +group_color: clone5rsc3:8 allocation score on node4: -INFINITY +group_color: clone5rsc3:8 allocation score on node5: -INFINITY +group_color: clone5rsc3:8 allocation score on node6: 0 +group_color: clone5rsc3:8 allocation score on node7: 0 +group_color: clone5rsc3:8 allocation score on node8: 0 +group_color: clone5rsc3:8 allocation score on node9: -INFINITY +group_color: clone5rsc3:9 allocation score on node10: -INFINITY +group_color: clone5rsc3:9 allocation score on node11: -INFINITY +group_color: clone5rsc3:9 allocation score on node1: -INFINITY +group_color: clone5rsc3:9 allocation score on node2: -INFINITY +group_color: clone5rsc3:9 allocation score on node3: -INFINITY +group_color: clone5rsc3:9 allocation score on node4: -INFINITY +group_color: clone5rsc3:9 allocation score on node5: -INFINITY +group_color: clone5rsc3:9 allocation score on node6: -INFINITY +group_color: clone5rsc3:9 allocation score on node7: 0 +group_color: clone5rsc3:9 allocation score on node8: 0 +group_color: clone5rsc3:9 allocation score on node9: -INFINITY +native_color: Fencing allocation score on node10: 0 +native_color: Fencing allocation score on node11: 0 +native_color: Fencing allocation score on node1: 0 +native_color: Fencing allocation score on node2: 0 +native_color: Fencing allocation score on node3: 0 +native_color: Fencing allocation score on node4: 0 +native_color: Fencing allocation score on node5: 0 +native_color: Fencing allocation score on node6: 0 +native_color: Fencing allocation score on node7: 0 +native_color: Fencing allocation score on node8: 0 +native_color: Fencing allocation score on node9: 0 +native_color: clone1rsc:0 allocation score on node10: 0 +native_color: clone1rsc:0 allocation score on node11: 0 +native_color: clone1rsc:0 allocation score on node1: 1 +native_color: clone1rsc:0 allocation score on node2: 0 +native_color: clone1rsc:0 allocation score on node3: -INFINITY +native_color: clone1rsc:0 allocation score on node4: 0 +native_color: clone1rsc:0 allocation score on node5: 0 +native_color: clone1rsc:0 allocation score on node6: 0 +native_color: clone1rsc:0 allocation score on node7: 0 +native_color: clone1rsc:0 allocation score on node8: 0 +native_color: clone1rsc:0 allocation score on node9: 0 +native_color: clone1rsc:1 allocation score on node10: 0 +native_color: clone1rsc:1 allocation score on node11: 0 +native_color: clone1rsc:1 allocation score on node1: -INFINITY +native_color: clone1rsc:1 allocation score on node2: 1 +native_color: clone1rsc:1 allocation score on node3: -INFINITY +native_color: clone1rsc:1 allocation score on node4: 0 +native_color: clone1rsc:1 allocation score on node5: 0 +native_color: clone1rsc:1 allocation score on node6: 0 +native_color: clone1rsc:1 allocation score on node7: 0 +native_color: clone1rsc:1 allocation score on node8: 0 +native_color: clone1rsc:1 allocation score on node9: 0 +native_color: clone1rsc:10 allocation score on node10: -INFINITY +native_color: clone1rsc:10 allocation score on node11: -INFINITY +native_color: clone1rsc:10 allocation score on node1: -INFINITY +native_color: clone1rsc:10 allocation score on node2: -INFINITY +native_color: clone1rsc:10 allocation score on node3: -INFINITY +native_color: clone1rsc:10 allocation score on node4: -INFINITY +native_color: clone1rsc:10 allocation score on node5: -INFINITY +native_color: clone1rsc:10 allocation score on node6: -INFINITY +native_color: clone1rsc:10 allocation score on node7: -INFINITY +native_color: clone1rsc:10 allocation score on node8: 0 +native_color: clone1rsc:10 allocation score on node9: 0 +native_color: clone1rsc:2 allocation score on node10: 0 +native_color: clone1rsc:2 allocation score on node11: 0 +native_color: clone1rsc:2 allocation score on node1: 0 +native_color: clone1rsc:2 allocation score on node2: 0 +native_color: clone1rsc:2 allocation score on node3: 11 +native_color: clone1rsc:2 allocation score on node4: 0 +native_color: clone1rsc:2 allocation score on node5: 0 +native_color: clone1rsc:2 allocation score on node6: 0 +native_color: clone1rsc:2 allocation score on node7: 0 +native_color: clone1rsc:2 allocation score on node8: 0 +native_color: clone1rsc:2 allocation score on node9: 0 +native_color: clone1rsc:3 allocation score on node10: 0 +native_color: clone1rsc:3 allocation score on node11: 0 +native_color: clone1rsc:3 allocation score on node1: -INFINITY +native_color: clone1rsc:3 allocation score on node2: -INFINITY +native_color: clone1rsc:3 allocation score on node3: -INFINITY +native_color: clone1rsc:3 allocation score on node4: 1 +native_color: clone1rsc:3 allocation score on node5: 0 +native_color: clone1rsc:3 allocation score on node6: 0 +native_color: clone1rsc:3 allocation score on node7: 0 +native_color: clone1rsc:3 allocation score on node8: 0 +native_color: clone1rsc:3 allocation score on node9: 0 +native_color: clone1rsc:4 allocation score on node10: -INFINITY +native_color: clone1rsc:4 allocation score on node11: -INFINITY +native_color: clone1rsc:4 allocation score on node1: -INFINITY +native_color: clone1rsc:4 allocation score on node2: -INFINITY +native_color: clone1rsc:4 allocation score on node3: -INFINITY +native_color: clone1rsc:4 allocation score on node4: -INFINITY +native_color: clone1rsc:4 allocation score on node5: -INFINITY +native_color: clone1rsc:4 allocation score on node6: -INFINITY +native_color: clone1rsc:4 allocation score on node7: -INFINITY +native_color: clone1rsc:4 allocation score on node8: -INFINITY +native_color: clone1rsc:4 allocation score on node9: 0 +native_color: clone1rsc:5 allocation score on node10: 0 +native_color: clone1rsc:5 allocation score on node11: 0 +native_color: clone1rsc:5 allocation score on node1: -INFINITY +native_color: clone1rsc:5 allocation score on node2: -INFINITY +native_color: clone1rsc:5 allocation score on node3: -INFINITY +native_color: clone1rsc:5 allocation score on node4: -INFINITY +native_color: clone1rsc:5 allocation score on node5: 0 +native_color: clone1rsc:5 allocation score on node6: 0 +native_color: clone1rsc:5 allocation score on node7: 0 +native_color: clone1rsc:5 allocation score on node8: 0 +native_color: clone1rsc:5 allocation score on node9: 0 +native_color: clone1rsc:6 allocation score on node10: -INFINITY +native_color: clone1rsc:6 allocation score on node11: 0 +native_color: clone1rsc:6 allocation score on node1: -INFINITY +native_color: clone1rsc:6 allocation score on node2: -INFINITY +native_color: clone1rsc:6 allocation score on node3: -INFINITY +native_color: clone1rsc:6 allocation score on node4: -INFINITY +native_color: clone1rsc:6 allocation score on node5: 0 +native_color: clone1rsc:6 allocation score on node6: 0 +native_color: clone1rsc:6 allocation score on node7: 0 +native_color: clone1rsc:6 allocation score on node8: 0 +native_color: clone1rsc:6 allocation score on node9: 0 +native_color: clone1rsc:7 allocation score on node10: -INFINITY +native_color: clone1rsc:7 allocation score on node11: -INFINITY +native_color: clone1rsc:7 allocation score on node1: -INFINITY +native_color: clone1rsc:7 allocation score on node2: -INFINITY +native_color: clone1rsc:7 allocation score on node3: -INFINITY +native_color: clone1rsc:7 allocation score on node4: -INFINITY +native_color: clone1rsc:7 allocation score on node5: 0 +native_color: clone1rsc:7 allocation score on node6: 0 +native_color: clone1rsc:7 allocation score on node7: 0 +native_color: clone1rsc:7 allocation score on node8: 0 +native_color: clone1rsc:7 allocation score on node9: 0 +native_color: clone1rsc:8 allocation score on node10: -INFINITY +native_color: clone1rsc:8 allocation score on node11: -INFINITY +native_color: clone1rsc:8 allocation score on node1: -INFINITY +native_color: clone1rsc:8 allocation score on node2: -INFINITY +native_color: clone1rsc:8 allocation score on node3: -INFINITY +native_color: clone1rsc:8 allocation score on node4: -INFINITY +native_color: clone1rsc:8 allocation score on node5: -INFINITY +native_color: clone1rsc:8 allocation score on node6: 0 +native_color: clone1rsc:8 allocation score on node7: 0 +native_color: clone1rsc:8 allocation score on node8: 0 +native_color: clone1rsc:8 allocation score on node9: 0 +native_color: clone1rsc:9 allocation score on node10: -INFINITY +native_color: clone1rsc:9 allocation score on node11: -INFINITY +native_color: clone1rsc:9 allocation score on node1: -INFINITY +native_color: clone1rsc:9 allocation score on node2: -INFINITY +native_color: clone1rsc:9 allocation score on node3: -INFINITY +native_color: clone1rsc:9 allocation score on node4: -INFINITY +native_color: clone1rsc:9 allocation score on node5: -INFINITY +native_color: clone1rsc:9 allocation score on node6: -INFINITY +native_color: clone1rsc:9 allocation score on node7: 0 +native_color: clone1rsc:9 allocation score on node8: 0 +native_color: clone1rsc:9 allocation score on node9: 0 +native_color: clone2rsc:0 allocation score on node10: 0 +native_color: clone2rsc:0 allocation score on node11: 0 +native_color: clone2rsc:0 allocation score on node1: 0 +native_color: clone2rsc:0 allocation score on node2: 1 +native_color: clone2rsc:0 allocation score on node3: 0 +native_color: clone2rsc:0 allocation score on node4: 0 +native_color: clone2rsc:0 allocation score on node5: 0 +native_color: clone2rsc:0 allocation score on node6: 0 +native_color: clone2rsc:0 allocation score on node7: 0 +native_color: clone2rsc:0 allocation score on node8: 0 +native_color: clone2rsc:0 allocation score on node9: 0 +native_color: clone2rsc:1 allocation score on node10: 0 +native_color: clone2rsc:1 allocation score on node11: 0 +native_color: clone2rsc:1 allocation score on node1: 0 +native_color: clone2rsc:1 allocation score on node2: -INFINITY +native_color: clone2rsc:1 allocation score on node3: 0 +native_color: clone2rsc:1 allocation score on node4: 1 +native_color: clone2rsc:1 allocation score on node5: 0 +native_color: clone2rsc:1 allocation score on node6: 0 +native_color: clone2rsc:1 allocation score on node7: 0 +native_color: clone2rsc:1 allocation score on node8: 0 +native_color: clone2rsc:1 allocation score on node9: 0 +native_color: clone2rsc:2 allocation score on node10: 0 +native_color: clone2rsc:2 allocation score on node11: 0 +native_color: clone2rsc:2 allocation score on node1: 0 +native_color: clone2rsc:2 allocation score on node2: -INFINITY +native_color: clone2rsc:2 allocation score on node3: 0 +native_color: clone2rsc:2 allocation score on node4: -INFINITY +native_color: clone2rsc:2 allocation score on node5: 0 +native_color: clone2rsc:2 allocation score on node6: 0 +native_color: clone2rsc:2 allocation score on node7: 0 +native_color: clone2rsc:2 allocation score on node8: 0 +native_color: clone2rsc:2 allocation score on node9: 0 +native_color: clone2rsc:3 allocation score on node10: -INFINITY +native_color: clone2rsc:3 allocation score on node11: 0 +native_color: clone2rsc:3 allocation score on node1: 0 +native_color: clone2rsc:3 allocation score on node2: -INFINITY +native_color: clone2rsc:3 allocation score on node3: 0 +native_color: clone2rsc:3 allocation score on node4: -INFINITY +native_color: clone2rsc:3 allocation score on node5: 0 +native_color: clone2rsc:3 allocation score on node6: 0 +native_color: clone2rsc:3 allocation score on node7: 0 +native_color: clone2rsc:3 allocation score on node8: 0 +native_color: clone2rsc:3 allocation score on node9: 0 +native_color: clone2rsc:4 allocation score on node10: -INFINITY +native_color: clone2rsc:4 allocation score on node11: -INFINITY +native_color: clone2rsc:4 allocation score on node1: 0 +native_color: clone2rsc:4 allocation score on node2: -INFINITY +native_color: clone2rsc:4 allocation score on node3: 0 +native_color: clone2rsc:4 allocation score on node4: -INFINITY +native_color: clone2rsc:4 allocation score on node5: 0 +native_color: clone2rsc:4 allocation score on node6: 0 +native_color: clone2rsc:4 allocation score on node7: 0 +native_color: clone2rsc:4 allocation score on node8: 0 +native_color: clone2rsc:4 allocation score on node9: 0 +native_color: clone3rsc:0 allocation score on node10: 0 +native_color: clone3rsc:0 allocation score on node11: 0 +native_color: clone3rsc:0 allocation score on node1: 0 +native_color: clone3rsc:0 allocation score on node2: 0 +native_color: clone3rsc:0 allocation score on node3: 1 +native_color: clone3rsc:0 allocation score on node4: 0 +native_color: clone3rsc:0 allocation score on node5: 0 +native_color: clone3rsc:0 allocation score on node6: 0 +native_color: clone3rsc:0 allocation score on node7: 0 +native_color: clone3rsc:0 allocation score on node8: 0 +native_color: clone3rsc:0 allocation score on node9: 0 +native_color: clone3rsc:1 allocation score on node10: 0 +native_color: clone3rsc:1 allocation score on node11: 0 +native_color: clone3rsc:1 allocation score on node1: 0 +native_color: clone3rsc:1 allocation score on node2: 0 +native_color: clone3rsc:1 allocation score on node3: -INFINITY +native_color: clone3rsc:1 allocation score on node4: 0 +native_color: clone3rsc:1 allocation score on node5: 0 +native_color: clone3rsc:1 allocation score on node6: 0 +native_color: clone3rsc:1 allocation score on node7: 0 +native_color: clone3rsc:1 allocation score on node8: 0 +native_color: clone3rsc:1 allocation score on node9: 0 +native_color: clone3rsc:10 allocation score on node10: -INFINITY +native_color: clone3rsc:10 allocation score on node11: -INFINITY +native_color: clone3rsc:10 allocation score on node1: -INFINITY +native_color: clone3rsc:10 allocation score on node2: -INFINITY +native_color: clone3rsc:10 allocation score on node3: -INFINITY +native_color: clone3rsc:10 allocation score on node4: 0 +native_color: clone3rsc:10 allocation score on node5: -INFINITY +native_color: clone3rsc:10 allocation score on node6: -INFINITY +native_color: clone3rsc:10 allocation score on node7: -INFINITY +native_color: clone3rsc:10 allocation score on node8: -INFINITY +native_color: clone3rsc:10 allocation score on node9: -INFINITY +native_color: clone3rsc:2 allocation score on node10: 0 +native_color: clone3rsc:2 allocation score on node11: 0 +native_color: clone3rsc:2 allocation score on node1: 0 +native_color: clone3rsc:2 allocation score on node2: 0 +native_color: clone3rsc:2 allocation score on node3: -INFINITY +native_color: clone3rsc:2 allocation score on node4: 0 +native_color: clone3rsc:2 allocation score on node5: -INFINITY +native_color: clone3rsc:2 allocation score on node6: 0 +native_color: clone3rsc:2 allocation score on node7: 0 +native_color: clone3rsc:2 allocation score on node8: 0 +native_color: clone3rsc:2 allocation score on node9: 0 +native_color: clone3rsc:3 allocation score on node10: 0 +native_color: clone3rsc:3 allocation score on node11: 0 +native_color: clone3rsc:3 allocation score on node1: 0 +native_color: clone3rsc:3 allocation score on node2: 0 +native_color: clone3rsc:3 allocation score on node3: -INFINITY +native_color: clone3rsc:3 allocation score on node4: 0 +native_color: clone3rsc:3 allocation score on node5: -INFINITY +native_color: clone3rsc:3 allocation score on node6: -INFINITY +native_color: clone3rsc:3 allocation score on node7: 0 +native_color: clone3rsc:3 allocation score on node8: 0 +native_color: clone3rsc:3 allocation score on node9: 0 +native_color: clone3rsc:4 allocation score on node10: 0 +native_color: clone3rsc:4 allocation score on node11: 0 +native_color: clone3rsc:4 allocation score on node1: 0 +native_color: clone3rsc:4 allocation score on node2: 0 +native_color: clone3rsc:4 allocation score on node3: -INFINITY +native_color: clone3rsc:4 allocation score on node4: 0 +native_color: clone3rsc:4 allocation score on node5: -INFINITY +native_color: clone3rsc:4 allocation score on node6: -INFINITY +native_color: clone3rsc:4 allocation score on node7: -INFINITY +native_color: clone3rsc:4 allocation score on node8: 0 +native_color: clone3rsc:4 allocation score on node9: 0 +native_color: clone3rsc:5 allocation score on node10: 0 +native_color: clone3rsc:5 allocation score on node11: 0 +native_color: clone3rsc:5 allocation score on node1: 0 +native_color: clone3rsc:5 allocation score on node2: 0 +native_color: clone3rsc:5 allocation score on node3: -INFINITY +native_color: clone3rsc:5 allocation score on node4: 0 +native_color: clone3rsc:5 allocation score on node5: -INFINITY +native_color: clone3rsc:5 allocation score on node6: -INFINITY +native_color: clone3rsc:5 allocation score on node7: -INFINITY +native_color: clone3rsc:5 allocation score on node8: -INFINITY +native_color: clone3rsc:5 allocation score on node9: 0 +native_color: clone3rsc:6 allocation score on node10: 0 +native_color: clone3rsc:6 allocation score on node11: 0 +native_color: clone3rsc:6 allocation score on node1: 0 +native_color: clone3rsc:6 allocation score on node2: 0 +native_color: clone3rsc:6 allocation score on node3: -INFINITY +native_color: clone3rsc:6 allocation score on node4: 0 +native_color: clone3rsc:6 allocation score on node5: -INFINITY +native_color: clone3rsc:6 allocation score on node6: -INFINITY +native_color: clone3rsc:6 allocation score on node7: -INFINITY +native_color: clone3rsc:6 allocation score on node8: -INFINITY +native_color: clone3rsc:6 allocation score on node9: -INFINITY +native_color: clone3rsc:7 allocation score on node10: 0 +native_color: clone3rsc:7 allocation score on node11: 0 +native_color: clone3rsc:7 allocation score on node1: -INFINITY +native_color: clone3rsc:7 allocation score on node2: 0 +native_color: clone3rsc:7 allocation score on node3: -INFINITY +native_color: clone3rsc:7 allocation score on node4: 0 +native_color: clone3rsc:7 allocation score on node5: -INFINITY +native_color: clone3rsc:7 allocation score on node6: -INFINITY +native_color: clone3rsc:7 allocation score on node7: -INFINITY +native_color: clone3rsc:7 allocation score on node8: -INFINITY +native_color: clone3rsc:7 allocation score on node9: -INFINITY +native_color: clone3rsc:8 allocation score on node10: -INFINITY +native_color: clone3rsc:8 allocation score on node11: 0 +native_color: clone3rsc:8 allocation score on node1: -INFINITY +native_color: clone3rsc:8 allocation score on node2: 0 +native_color: clone3rsc:8 allocation score on node3: -INFINITY +native_color: clone3rsc:8 allocation score on node4: 0 +native_color: clone3rsc:8 allocation score on node5: -INFINITY +native_color: clone3rsc:8 allocation score on node6: -INFINITY +native_color: clone3rsc:8 allocation score on node7: -INFINITY +native_color: clone3rsc:8 allocation score on node8: -INFINITY +native_color: clone3rsc:8 allocation score on node9: -INFINITY +native_color: clone3rsc:9 allocation score on node10: -INFINITY +native_color: clone3rsc:9 allocation score on node11: -INFINITY +native_color: clone3rsc:9 allocation score on node1: -INFINITY +native_color: clone3rsc:9 allocation score on node2: 0 +native_color: clone3rsc:9 allocation score on node3: -INFINITY +native_color: clone3rsc:9 allocation score on node4: 0 +native_color: clone3rsc:9 allocation score on node5: -INFINITY +native_color: clone3rsc:9 allocation score on node6: -INFINITY +native_color: clone3rsc:9 allocation score on node7: -INFINITY +native_color: clone3rsc:9 allocation score on node8: -INFINITY +native_color: clone3rsc:9 allocation score on node9: -INFINITY +native_color: clone4rsc:0 allocation score on node10: 0 +native_color: clone4rsc:0 allocation score on node11: 0 +native_color: clone4rsc:0 allocation score on node1: 1 +native_color: clone4rsc:0 allocation score on node2: 0 +native_color: clone4rsc:0 allocation score on node3: 0 +native_color: clone4rsc:0 allocation score on node4: 0 +native_color: clone4rsc:0 allocation score on node5: 0 +native_color: clone4rsc:0 allocation score on node6: 0 +native_color: clone4rsc:0 allocation score on node7: 0 +native_color: clone4rsc:0 allocation score on node8: 0 +native_color: clone4rsc:0 allocation score on node9: 0 +native_color: clone4rsc:1 allocation score on node10: 0 +native_color: clone4rsc:1 allocation score on node11: 0 +native_color: clone4rsc:1 allocation score on node1: -INFINITY +native_color: clone4rsc:1 allocation score on node2: 0 +native_color: clone4rsc:1 allocation score on node3: 0 +native_color: clone4rsc:1 allocation score on node4: 0 +native_color: clone4rsc:1 allocation score on node5: 1 +native_color: clone4rsc:1 allocation score on node6: 0 +native_color: clone4rsc:1 allocation score on node7: 0 +native_color: clone4rsc:1 allocation score on node8: 0 +native_color: clone4rsc:1 allocation score on node9: 0 +native_color: clone4rsc:2 allocation score on node10: 0 +native_color: clone4rsc:2 allocation score on node11: 0 +native_color: clone4rsc:2 allocation score on node1: -INFINITY +native_color: clone4rsc:2 allocation score on node2: 0 +native_color: clone4rsc:2 allocation score on node3: 0 +native_color: clone4rsc:2 allocation score on node4: 0 +native_color: clone4rsc:2 allocation score on node5: -INFINITY +native_color: clone4rsc:2 allocation score on node6: 1 +native_color: clone4rsc:2 allocation score on node7: 0 +native_color: clone4rsc:2 allocation score on node8: 0 +native_color: clone4rsc:2 allocation score on node9: 0 +native_color: clone4rsc:3 allocation score on node10: 0 +native_color: clone4rsc:3 allocation score on node11: 0 +native_color: clone4rsc:3 allocation score on node1: -INFINITY +native_color: clone4rsc:3 allocation score on node2: 0 +native_color: clone4rsc:3 allocation score on node3: 0 +native_color: clone4rsc:3 allocation score on node4: 0 +native_color: clone4rsc:3 allocation score on node5: -INFINITY +native_color: clone4rsc:3 allocation score on node6: -INFINITY +native_color: clone4rsc:3 allocation score on node7: 1 +native_color: clone4rsc:3 allocation score on node8: 0 +native_color: clone4rsc:3 allocation score on node9: 0 +native_color: clone4rsc:4 allocation score on node10: 0 +native_color: clone4rsc:4 allocation score on node11: 0 +native_color: clone4rsc:4 allocation score on node1: -INFINITY +native_color: clone4rsc:4 allocation score on node2: 0 +native_color: clone4rsc:4 allocation score on node3: 0 +native_color: clone4rsc:4 allocation score on node4: 0 +native_color: clone4rsc:4 allocation score on node5: -INFINITY +native_color: clone4rsc:4 allocation score on node6: -INFINITY +native_color: clone4rsc:4 allocation score on node7: -INFINITY +native_color: clone4rsc:4 allocation score on node8: 1 +native_color: clone4rsc:4 allocation score on node9: 0 +native_color: clone5rsc1:0 allocation score on node10: 0 +native_color: clone5rsc1:0 allocation score on node11: 0 +native_color: clone5rsc1:0 allocation score on node1: 3 +native_color: clone5rsc1:0 allocation score on node2: 0 +native_color: clone5rsc1:0 allocation score on node3: 0 +native_color: clone5rsc1:0 allocation score on node4: 0 +native_color: clone5rsc1:0 allocation score on node5: 0 +native_color: clone5rsc1:0 allocation score on node6: 0 +native_color: clone5rsc1:0 allocation score on node7: 0 +native_color: clone5rsc1:0 allocation score on node8: 0 +native_color: clone5rsc1:0 allocation score on node9: 0 +native_color: clone5rsc1:1 allocation score on node10: 0 +native_color: clone5rsc1:1 allocation score on node11: 0 +native_color: clone5rsc1:1 allocation score on node1: -INFINITY +native_color: clone5rsc1:1 allocation score on node2: 3 +native_color: clone5rsc1:1 allocation score on node3: 0 +native_color: clone5rsc1:1 allocation score on node4: 0 +native_color: clone5rsc1:1 allocation score on node5: 0 +native_color: clone5rsc1:1 allocation score on node6: 0 +native_color: clone5rsc1:1 allocation score on node7: 0 +native_color: clone5rsc1:1 allocation score on node8: 0 +native_color: clone5rsc1:1 allocation score on node9: 0 +native_color: clone5rsc1:10 allocation score on node10: -INFINITY +native_color: clone5rsc1:10 allocation score on node11: -INFINITY +native_color: clone5rsc1:10 allocation score on node1: -INFINITY +native_color: clone5rsc1:10 allocation score on node2: -INFINITY +native_color: clone5rsc1:10 allocation score on node3: -INFINITY +native_color: clone5rsc1:10 allocation score on node4: -INFINITY +native_color: clone5rsc1:10 allocation score on node5: -INFINITY +native_color: clone5rsc1:10 allocation score on node6: -INFINITY +native_color: clone5rsc1:10 allocation score on node7: -INFINITY +native_color: clone5rsc1:10 allocation score on node8: 0 +native_color: clone5rsc1:10 allocation score on node9: -INFINITY +native_color: clone5rsc1:2 allocation score on node10: 0 +native_color: clone5rsc1:2 allocation score on node11: 0 +native_color: clone5rsc1:2 allocation score on node1: -INFINITY +native_color: clone5rsc1:2 allocation score on node2: -INFINITY +native_color: clone5rsc1:2 allocation score on node3: 2 +native_color: clone5rsc1:2 allocation score on node4: 0 +native_color: clone5rsc1:2 allocation score on node5: 0 +native_color: clone5rsc1:2 allocation score on node6: 0 +native_color: clone5rsc1:2 allocation score on node7: 0 +native_color: clone5rsc1:2 allocation score on node8: 0 +native_color: clone5rsc1:2 allocation score on node9: 0 +native_color: clone5rsc1:3 allocation score on node10: 0 +native_color: clone5rsc1:3 allocation score on node11: 0 +native_color: clone5rsc1:3 allocation score on node1: -INFINITY +native_color: clone5rsc1:3 allocation score on node2: -INFINITY +native_color: clone5rsc1:3 allocation score on node3: -INFINITY +native_color: clone5rsc1:3 allocation score on node4: 0 +native_color: clone5rsc1:3 allocation score on node5: 0 +native_color: clone5rsc1:3 allocation score on node6: 0 +native_color: clone5rsc1:3 allocation score on node7: 0 +native_color: clone5rsc1:3 allocation score on node8: 0 +native_color: clone5rsc1:3 allocation score on node9: 0 +native_color: clone5rsc1:4 allocation score on node10: 0 +native_color: clone5rsc1:4 allocation score on node11: 0 +native_color: clone5rsc1:4 allocation score on node1: -INFINITY +native_color: clone5rsc1:4 allocation score on node2: -INFINITY +native_color: clone5rsc1:4 allocation score on node3: -INFINITY +native_color: clone5rsc1:4 allocation score on node4: 0 +native_color: clone5rsc1:4 allocation score on node5: 0 +native_color: clone5rsc1:4 allocation score on node6: 0 +native_color: clone5rsc1:4 allocation score on node7: 0 +native_color: clone5rsc1:4 allocation score on node8: 0 +native_color: clone5rsc1:4 allocation score on node9: -INFINITY +native_color: clone5rsc1:5 allocation score on node10: -INFINITY +native_color: clone5rsc1:5 allocation score on node11: 0 +native_color: clone5rsc1:5 allocation score on node1: -INFINITY +native_color: clone5rsc1:5 allocation score on node2: -INFINITY +native_color: clone5rsc1:5 allocation score on node3: -INFINITY +native_color: clone5rsc1:5 allocation score on node4: 0 +native_color: clone5rsc1:5 allocation score on node5: 0 +native_color: clone5rsc1:5 allocation score on node6: 0 +native_color: clone5rsc1:5 allocation score on node7: 0 +native_color: clone5rsc1:5 allocation score on node8: 0 +native_color: clone5rsc1:5 allocation score on node9: -INFINITY +native_color: clone5rsc1:6 allocation score on node10: -INFINITY +native_color: clone5rsc1:6 allocation score on node11: -INFINITY +native_color: clone5rsc1:6 allocation score on node1: -INFINITY +native_color: clone5rsc1:6 allocation score on node2: -INFINITY +native_color: clone5rsc1:6 allocation score on node3: -INFINITY +native_color: clone5rsc1:6 allocation score on node4: 0 +native_color: clone5rsc1:6 allocation score on node5: 0 +native_color: clone5rsc1:6 allocation score on node6: 0 +native_color: clone5rsc1:6 allocation score on node7: 0 +native_color: clone5rsc1:6 allocation score on node8: 0 +native_color: clone5rsc1:6 allocation score on node9: -INFINITY +native_color: clone5rsc1:7 allocation score on node10: -INFINITY +native_color: clone5rsc1:7 allocation score on node11: -INFINITY +native_color: clone5rsc1:7 allocation score on node1: -INFINITY +native_color: clone5rsc1:7 allocation score on node2: -INFINITY +native_color: clone5rsc1:7 allocation score on node3: -INFINITY +native_color: clone5rsc1:7 allocation score on node4: -INFINITY +native_color: clone5rsc1:7 allocation score on node5: 0 +native_color: clone5rsc1:7 allocation score on node6: 0 +native_color: clone5rsc1:7 allocation score on node7: 0 +native_color: clone5rsc1:7 allocation score on node8: 0 +native_color: clone5rsc1:7 allocation score on node9: -INFINITY +native_color: clone5rsc1:8 allocation score on node10: -INFINITY +native_color: clone5rsc1:8 allocation score on node11: -INFINITY +native_color: clone5rsc1:8 allocation score on node1: -INFINITY +native_color: clone5rsc1:8 allocation score on node2: -INFINITY +native_color: clone5rsc1:8 allocation score on node3: -INFINITY +native_color: clone5rsc1:8 allocation score on node4: -INFINITY +native_color: clone5rsc1:8 allocation score on node5: -INFINITY +native_color: clone5rsc1:8 allocation score on node6: 0 +native_color: clone5rsc1:8 allocation score on node7: 0 +native_color: clone5rsc1:8 allocation score on node8: 0 +native_color: clone5rsc1:8 allocation score on node9: -INFINITY +native_color: clone5rsc1:9 allocation score on node10: -INFINITY +native_color: clone5rsc1:9 allocation score on node11: -INFINITY +native_color: clone5rsc1:9 allocation score on node1: -INFINITY +native_color: clone5rsc1:9 allocation score on node2: -INFINITY +native_color: clone5rsc1:9 allocation score on node3: -INFINITY +native_color: clone5rsc1:9 allocation score on node4: -INFINITY +native_color: clone5rsc1:9 allocation score on node5: -INFINITY +native_color: clone5rsc1:9 allocation score on node6: -INFINITY +native_color: clone5rsc1:9 allocation score on node7: 0 +native_color: clone5rsc1:9 allocation score on node8: 0 +native_color: clone5rsc1:9 allocation score on node9: -INFINITY +native_color: clone5rsc2:0 allocation score on node10: -INFINITY +native_color: clone5rsc2:0 allocation score on node11: -INFINITY +native_color: clone5rsc2:0 allocation score on node1: 2 +native_color: clone5rsc2:0 allocation score on node2: -INFINITY +native_color: clone5rsc2:0 allocation score on node3: -INFINITY +native_color: clone5rsc2:0 allocation score on node4: -INFINITY +native_color: clone5rsc2:0 allocation score on node5: -INFINITY +native_color: clone5rsc2:0 allocation score on node6: -INFINITY +native_color: clone5rsc2:0 allocation score on node7: -INFINITY +native_color: clone5rsc2:0 allocation score on node8: -INFINITY +native_color: clone5rsc2:0 allocation score on node9: -INFINITY +native_color: clone5rsc2:1 allocation score on node10: -INFINITY +native_color: clone5rsc2:1 allocation score on node11: -INFINITY +native_color: clone5rsc2:1 allocation score on node1: -INFINITY +native_color: clone5rsc2:1 allocation score on node2: 2 +native_color: clone5rsc2:1 allocation score on node3: -INFINITY +native_color: clone5rsc2:1 allocation score on node4: -INFINITY +native_color: clone5rsc2:1 allocation score on node5: -INFINITY +native_color: clone5rsc2:1 allocation score on node6: -INFINITY +native_color: clone5rsc2:1 allocation score on node7: -INFINITY +native_color: clone5rsc2:1 allocation score on node8: -INFINITY +native_color: clone5rsc2:1 allocation score on node9: -INFINITY +native_color: clone5rsc2:10 allocation score on node10: -INFINITY +native_color: clone5rsc2:10 allocation score on node11: -INFINITY +native_color: clone5rsc2:10 allocation score on node1: -INFINITY +native_color: clone5rsc2:10 allocation score on node2: -INFINITY +native_color: clone5rsc2:10 allocation score on node3: -INFINITY +native_color: clone5rsc2:10 allocation score on node4: -INFINITY +native_color: clone5rsc2:10 allocation score on node5: -INFINITY +native_color: clone5rsc2:10 allocation score on node6: -INFINITY +native_color: clone5rsc2:10 allocation score on node7: -INFINITY +native_color: clone5rsc2:10 allocation score on node8: 0 +native_color: clone5rsc2:10 allocation score on node9: -INFINITY +native_color: clone5rsc2:2 allocation score on node10: -INFINITY +native_color: clone5rsc2:2 allocation score on node11: -INFINITY +native_color: clone5rsc2:2 allocation score on node1: -INFINITY +native_color: clone5rsc2:2 allocation score on node2: -INFINITY +native_color: clone5rsc2:2 allocation score on node3: 1 +native_color: clone5rsc2:2 allocation score on node4: -INFINITY +native_color: clone5rsc2:2 allocation score on node5: -INFINITY +native_color: clone5rsc2:2 allocation score on node6: -INFINITY +native_color: clone5rsc2:2 allocation score on node7: -INFINITY +native_color: clone5rsc2:2 allocation score on node8: -INFINITY +native_color: clone5rsc2:2 allocation score on node9: -INFINITY +native_color: clone5rsc2:3 allocation score on node10: -INFINITY +native_color: clone5rsc2:3 allocation score on node11: -INFINITY +native_color: clone5rsc2:3 allocation score on node1: -INFINITY +native_color: clone5rsc2:3 allocation score on node2: -INFINITY +native_color: clone5rsc2:3 allocation score on node3: -INFINITY +native_color: clone5rsc2:3 allocation score on node4: -INFINITY +native_color: clone5rsc2:3 allocation score on node5: -INFINITY +native_color: clone5rsc2:3 allocation score on node6: -INFINITY +native_color: clone5rsc2:3 allocation score on node7: -INFINITY +native_color: clone5rsc2:3 allocation score on node8: -INFINITY +native_color: clone5rsc2:3 allocation score on node9: 0 +native_color: clone5rsc2:4 allocation score on node10: 0 +native_color: clone5rsc2:4 allocation score on node11: -INFINITY +native_color: clone5rsc2:4 allocation score on node1: -INFINITY +native_color: clone5rsc2:4 allocation score on node2: -INFINITY +native_color: clone5rsc2:4 allocation score on node3: -INFINITY +native_color: clone5rsc2:4 allocation score on node4: -INFINITY +native_color: clone5rsc2:4 allocation score on node5: -INFINITY +native_color: clone5rsc2:4 allocation score on node6: -INFINITY +native_color: clone5rsc2:4 allocation score on node7: -INFINITY +native_color: clone5rsc2:4 allocation score on node8: -INFINITY +native_color: clone5rsc2:4 allocation score on node9: -INFINITY +native_color: clone5rsc2:5 allocation score on node10: -INFINITY +native_color: clone5rsc2:5 allocation score on node11: 0 +native_color: clone5rsc2:5 allocation score on node1: -INFINITY +native_color: clone5rsc2:5 allocation score on node2: -INFINITY +native_color: clone5rsc2:5 allocation score on node3: -INFINITY +native_color: clone5rsc2:5 allocation score on node4: -INFINITY +native_color: clone5rsc2:5 allocation score on node5: -INFINITY +native_color: clone5rsc2:5 allocation score on node6: -INFINITY +native_color: clone5rsc2:5 allocation score on node7: -INFINITY +native_color: clone5rsc2:5 allocation score on node8: -INFINITY +native_color: clone5rsc2:5 allocation score on node9: -INFINITY +native_color: clone5rsc2:6 allocation score on node10: -INFINITY +native_color: clone5rsc2:6 allocation score on node11: -INFINITY +native_color: clone5rsc2:6 allocation score on node1: -INFINITY +native_color: clone5rsc2:6 allocation score on node2: -INFINITY +native_color: clone5rsc2:6 allocation score on node3: -INFINITY +native_color: clone5rsc2:6 allocation score on node4: 0 +native_color: clone5rsc2:6 allocation score on node5: -INFINITY +native_color: clone5rsc2:6 allocation score on node6: -INFINITY +native_color: clone5rsc2:6 allocation score on node7: -INFINITY +native_color: clone5rsc2:6 allocation score on node8: -INFINITY +native_color: clone5rsc2:6 allocation score on node9: -INFINITY +native_color: clone5rsc2:7 allocation score on node10: -INFINITY +native_color: clone5rsc2:7 allocation score on node11: -INFINITY +native_color: clone5rsc2:7 allocation score on node1: -INFINITY +native_color: clone5rsc2:7 allocation score on node2: -INFINITY +native_color: clone5rsc2:7 allocation score on node3: -INFINITY +native_color: clone5rsc2:7 allocation score on node4: -INFINITY +native_color: clone5rsc2:7 allocation score on node5: 0 +native_color: clone5rsc2:7 allocation score on node6: -INFINITY +native_color: clone5rsc2:7 allocation score on node7: -INFINITY +native_color: clone5rsc2:7 allocation score on node8: -INFINITY +native_color: clone5rsc2:7 allocation score on node9: -INFINITY +native_color: clone5rsc2:8 allocation score on node10: -INFINITY +native_color: clone5rsc2:8 allocation score on node11: -INFINITY +native_color: clone5rsc2:8 allocation score on node1: -INFINITY +native_color: clone5rsc2:8 allocation score on node2: -INFINITY +native_color: clone5rsc2:8 allocation score on node3: -INFINITY +native_color: clone5rsc2:8 allocation score on node4: -INFINITY +native_color: clone5rsc2:8 allocation score on node5: -INFINITY +native_color: clone5rsc2:8 allocation score on node6: 0 +native_color: clone5rsc2:8 allocation score on node7: -INFINITY +native_color: clone5rsc2:8 allocation score on node8: -INFINITY +native_color: clone5rsc2:8 allocation score on node9: -INFINITY +native_color: clone5rsc2:9 allocation score on node10: -INFINITY +native_color: clone5rsc2:9 allocation score on node11: -INFINITY +native_color: clone5rsc2:9 allocation score on node1: -INFINITY +native_color: clone5rsc2:9 allocation score on node2: -INFINITY +native_color: clone5rsc2:9 allocation score on node3: -INFINITY +native_color: clone5rsc2:9 allocation score on node4: -INFINITY +native_color: clone5rsc2:9 allocation score on node5: -INFINITY +native_color: clone5rsc2:9 allocation score on node6: -INFINITY +native_color: clone5rsc2:9 allocation score on node7: 0 +native_color: clone5rsc2:9 allocation score on node8: -INFINITY +native_color: clone5rsc2:9 allocation score on node9: -INFINITY +native_color: clone5rsc3:0 allocation score on node10: -INFINITY +native_color: clone5rsc3:0 allocation score on node11: -INFINITY +native_color: clone5rsc3:0 allocation score on node1: 1 +native_color: clone5rsc3:0 allocation score on node2: -INFINITY +native_color: clone5rsc3:0 allocation score on node3: -INFINITY +native_color: clone5rsc3:0 allocation score on node4: -INFINITY +native_color: clone5rsc3:0 allocation score on node5: -INFINITY +native_color: clone5rsc3:0 allocation score on node6: -INFINITY +native_color: clone5rsc3:0 allocation score on node7: -INFINITY +native_color: clone5rsc3:0 allocation score on node8: -INFINITY +native_color: clone5rsc3:0 allocation score on node9: -INFINITY +native_color: clone5rsc3:1 allocation score on node10: -INFINITY +native_color: clone5rsc3:1 allocation score on node11: -INFINITY +native_color: clone5rsc3:1 allocation score on node1: -INFINITY +native_color: clone5rsc3:1 allocation score on node2: 1 +native_color: clone5rsc3:1 allocation score on node3: -INFINITY +native_color: clone5rsc3:1 allocation score on node4: -INFINITY +native_color: clone5rsc3:1 allocation score on node5: -INFINITY +native_color: clone5rsc3:1 allocation score on node6: -INFINITY +native_color: clone5rsc3:1 allocation score on node7: -INFINITY +native_color: clone5rsc3:1 allocation score on node8: -INFINITY +native_color: clone5rsc3:1 allocation score on node9: -INFINITY +native_color: clone5rsc3:10 allocation score on node10: -INFINITY +native_color: clone5rsc3:10 allocation score on node11: -INFINITY +native_color: clone5rsc3:10 allocation score on node1: -INFINITY +native_color: clone5rsc3:10 allocation score on node2: -INFINITY +native_color: clone5rsc3:10 allocation score on node3: -INFINITY +native_color: clone5rsc3:10 allocation score on node4: -INFINITY +native_color: clone5rsc3:10 allocation score on node5: -INFINITY +native_color: clone5rsc3:10 allocation score on node6: -INFINITY +native_color: clone5rsc3:10 allocation score on node7: -INFINITY +native_color: clone5rsc3:10 allocation score on node8: 0 +native_color: clone5rsc3:10 allocation score on node9: -INFINITY +native_color: clone5rsc3:2 allocation score on node10: -INFINITY +native_color: clone5rsc3:2 allocation score on node11: -INFINITY +native_color: clone5rsc3:2 allocation score on node1: -INFINITY +native_color: clone5rsc3:2 allocation score on node2: -INFINITY +native_color: clone5rsc3:2 allocation score on node3: 0 +native_color: clone5rsc3:2 allocation score on node4: -INFINITY +native_color: clone5rsc3:2 allocation score on node5: -INFINITY +native_color: clone5rsc3:2 allocation score on node6: -INFINITY +native_color: clone5rsc3:2 allocation score on node7: -INFINITY +native_color: clone5rsc3:2 allocation score on node8: -INFINITY +native_color: clone5rsc3:2 allocation score on node9: -INFINITY +native_color: clone5rsc3:3 allocation score on node10: -INFINITY +native_color: clone5rsc3:3 allocation score on node11: -INFINITY +native_color: clone5rsc3:3 allocation score on node1: -INFINITY +native_color: clone5rsc3:3 allocation score on node2: -INFINITY +native_color: clone5rsc3:3 allocation score on node3: -INFINITY +native_color: clone5rsc3:3 allocation score on node4: -INFINITY +native_color: clone5rsc3:3 allocation score on node5: -INFINITY +native_color: clone5rsc3:3 allocation score on node6: -INFINITY +native_color: clone5rsc3:3 allocation score on node7: -INFINITY +native_color: clone5rsc3:3 allocation score on node8: -INFINITY +native_color: clone5rsc3:3 allocation score on node9: 0 +native_color: clone5rsc3:4 allocation score on node10: 0 +native_color: clone5rsc3:4 allocation score on node11: -INFINITY +native_color: clone5rsc3:4 allocation score on node1: -INFINITY +native_color: clone5rsc3:4 allocation score on node2: -INFINITY +native_color: clone5rsc3:4 allocation score on node3: -INFINITY +native_color: clone5rsc3:4 allocation score on node4: -INFINITY +native_color: clone5rsc3:4 allocation score on node5: -INFINITY +native_color: clone5rsc3:4 allocation score on node6: -INFINITY +native_color: clone5rsc3:4 allocation score on node7: -INFINITY +native_color: clone5rsc3:4 allocation score on node8: -INFINITY +native_color: clone5rsc3:4 allocation score on node9: -INFINITY +native_color: clone5rsc3:5 allocation score on node10: -INFINITY +native_color: clone5rsc3:5 allocation score on node11: 0 +native_color: clone5rsc3:5 allocation score on node1: -INFINITY +native_color: clone5rsc3:5 allocation score on node2: -INFINITY +native_color: clone5rsc3:5 allocation score on node3: -INFINITY +native_color: clone5rsc3:5 allocation score on node4: -INFINITY +native_color: clone5rsc3:5 allocation score on node5: -INFINITY +native_color: clone5rsc3:5 allocation score on node6: -INFINITY +native_color: clone5rsc3:5 allocation score on node7: -INFINITY +native_color: clone5rsc3:5 allocation score on node8: -INFINITY +native_color: clone5rsc3:5 allocation score on node9: -INFINITY +native_color: clone5rsc3:6 allocation score on node10: -INFINITY +native_color: clone5rsc3:6 allocation score on node11: -INFINITY +native_color: clone5rsc3:6 allocation score on node1: -INFINITY +native_color: clone5rsc3:6 allocation score on node2: -INFINITY +native_color: clone5rsc3:6 allocation score on node3: -INFINITY +native_color: clone5rsc3:6 allocation score on node4: 0 +native_color: clone5rsc3:6 allocation score on node5: -INFINITY +native_color: clone5rsc3:6 allocation score on node6: -INFINITY +native_color: clone5rsc3:6 allocation score on node7: -INFINITY +native_color: clone5rsc3:6 allocation score on node8: -INFINITY +native_color: clone5rsc3:6 allocation score on node9: -INFINITY +native_color: clone5rsc3:7 allocation score on node10: -INFINITY +native_color: clone5rsc3:7 allocation score on node11: -INFINITY +native_color: clone5rsc3:7 allocation score on node1: -INFINITY +native_color: clone5rsc3:7 allocation score on node2: -INFINITY +native_color: clone5rsc3:7 allocation score on node3: -INFINITY +native_color: clone5rsc3:7 allocation score on node4: -INFINITY +native_color: clone5rsc3:7 allocation score on node5: 0 +native_color: clone5rsc3:7 allocation score on node6: -INFINITY +native_color: clone5rsc3:7 allocation score on node7: -INFINITY +native_color: clone5rsc3:7 allocation score on node8: -INFINITY +native_color: clone5rsc3:7 allocation score on node9: -INFINITY +native_color: clone5rsc3:8 allocation score on node10: -INFINITY +native_color: clone5rsc3:8 allocation score on node11: -INFINITY +native_color: clone5rsc3:8 allocation score on node1: -INFINITY +native_color: clone5rsc3:8 allocation score on node2: -INFINITY +native_color: clone5rsc3:8 allocation score on node3: -INFINITY +native_color: clone5rsc3:8 allocation score on node4: -INFINITY +native_color: clone5rsc3:8 allocation score on node5: -INFINITY +native_color: clone5rsc3:8 allocation score on node6: 0 +native_color: clone5rsc3:8 allocation score on node7: -INFINITY +native_color: clone5rsc3:8 allocation score on node8: -INFINITY +native_color: clone5rsc3:8 allocation score on node9: -INFINITY +native_color: clone5rsc3:9 allocation score on node10: -INFINITY +native_color: clone5rsc3:9 allocation score on node11: -INFINITY +native_color: clone5rsc3:9 allocation score on node1: -INFINITY +native_color: clone5rsc3:9 allocation score on node2: -INFINITY +native_color: clone5rsc3:9 allocation score on node3: -INFINITY +native_color: clone5rsc3:9 allocation score on node4: -INFINITY +native_color: clone5rsc3:9 allocation score on node5: -INFINITY +native_color: clone5rsc3:9 allocation score on node6: -INFINITY +native_color: clone5rsc3:9 allocation score on node7: 0 +native_color: clone5rsc3:9 allocation score on node8: -INFINITY +native_color: clone5rsc3:9 allocation score on node9: -INFINITY diff --git a/cts/scheduler/anon-instance-pending.summary b/cts/scheduler/anon-instance-pending.summary new file mode 100644 index 0000000000..6ee4e7df69 --- /dev/null +++ b/cts/scheduler/anon-instance-pending.summary @@ -0,0 +1,223 @@ + +Current cluster status: +Online: [ node1 node2 node3 node4 node5 node6 node7 node8 node9 node10 node11 ] + + Fencing (stonith:fence_imaginary): Started node1 + Master/Slave Set: clone1 [clone1rsc] + clone1rsc (ocf::pacemaker:Stateful): Starting node4 + Masters: [ node3 ] + Slaves: [ node1 node2 ] + Stopped: [ node5 node6 node7 node8 node9 node10 node11 ] + Clone Set: clone2 [clone2rsc] + clone2rsc (ocf::pacemaker:Dummy): Starting node4 + Started: [ node2 ] + Stopped: [ node1 node3 node5 node6 node7 node8 node9 node10 node11 ] + Clone Set: clone3 [clone3rsc] + Started: [ node3 ] + Stopped: [ node1 node2 node4 node5 node6 node7 node8 node9 node10 node11 ] + Clone Set: clone4 [clone4rsc] + clone4rsc (ocf::pacemaker:Dummy): Stopping node8 + clone4rsc (ocf::pacemaker:Dummy): ORPHANED Started node9 + Started: [ node1 node5 node6 node7 ] + Stopped: [ node2 node3 node4 node10 node11 ] + Clone Set: clone5 [clone5group] + Resource Group: clone5group:2 + clone5rsc1 (ocf::pacemaker:Dummy): Started node3 + clone5rsc2 (ocf::pacemaker:Dummy): Starting node3 + clone5rsc3 (ocf::pacemaker:Dummy): Stopped + Started: [ node1 node2 ] + Stopped: [ node4 node5 node6 node7 node8 node9 node10 node11 ] + +Transition Summary: + * Start clone1rsc:4 ( node9 ) + * Start clone1rsc:5 ( node10 ) + * Start clone1rsc:6 ( node11 ) + * Start clone1rsc:7 ( node5 ) + * Start clone1rsc:8 ( node6 ) + * Start clone1rsc:9 ( node7 ) + * Start clone1rsc:10 ( node8 ) + * Start clone2rsc:2 ( node10 ) + * Start clone2rsc:3 ( node11 ) + * Start clone2rsc:4 ( node3 ) + * Start clone3rsc:1 ( node5 ) + * Start clone3rsc:2 ( node6 ) + * Start clone3rsc:3 ( node7 ) + * Start clone3rsc:4 ( node8 ) + * Start clone3rsc:5 ( node9 ) + * Start clone3rsc:6 ( node1 ) + * Start clone3rsc:7 ( node10 ) + * Start clone3rsc:8 ( node11 ) + * Start clone3rsc:9 ( node2 ) + * Start clone3rsc:10 ( node4 ) + * Stop clone4rsc:5 ( node9 ) due to node availability + * Start clone5rsc3:2 ( node3 ) + * Start clone5rsc1:3 ( node9 ) + * Start clone5rsc2:3 ( node9 ) + * Start clone5rsc3:3 ( node9 ) + * Start clone5rsc1:4 ( node10 ) + * Start clone5rsc2:4 ( node10 ) + * Start clone5rsc3:4 ( node10 ) + * Start clone5rsc1:5 ( node11 ) + * Start clone5rsc2:5 ( node11 ) + * Start clone5rsc3:5 ( node11 ) + * Start clone5rsc1:6 ( node4 ) + * Start clone5rsc2:6 ( node4 ) + * Start clone5rsc3:6 ( node4 ) + * Start clone5rsc1:7 ( node5 ) + * Start clone5rsc2:7 ( node5 ) + * Start clone5rsc3:7 ( node5 ) + * Start clone5rsc1:8 ( node6 ) + * Start clone5rsc2:8 ( node6 ) + * Start clone5rsc3:8 ( node6 ) + * Start clone5rsc1:9 ( node7 ) + * Start clone5rsc2:9 ( node7 ) + * Start clone5rsc3:9 ( node7 ) + * Start clone5rsc1:10 ( node8 ) + * Start clone5rsc2:10 ( node8 ) + * Start clone5rsc3:10 ( node8 ) + +Executing cluster transition: + * Pseudo action: clone1_start_0 + * Pseudo action: clone2_start_0 + * Resource action: clone3rsc monitor on node2 + * Pseudo action: clone3_start_0 + * Pseudo action: clone4_stop_0 + * Pseudo action: clone5_start_0 + * Resource action: clone1rsc start on node4 + * Resource action: clone1rsc start on node9 + * Resource action: clone1rsc start on node10 + * Resource action: clone1rsc start on node11 + * Resource action: clone1rsc start on node5 + * Resource action: clone1rsc start on node6 + * Resource action: clone1rsc start on node7 + * Resource action: clone1rsc start on node8 + * Pseudo action: clone1_running_0 + * Resource action: clone2rsc start on node4 + * Resource action: clone2rsc start on node10 + * Resource action: clone2rsc start on node11 + * Resource action: clone2rsc start on node3 + * Pseudo action: clone2_running_0 + * Resource action: clone3rsc start on node5 + * Resource action: clone3rsc start on node6 + * Resource action: clone3rsc start on node7 + * Resource action: clone3rsc start on node8 + * Resource action: clone3rsc start on node9 + * Resource action: clone3rsc start on node1 + * Resource action: clone3rsc start on node10 + * Resource action: clone3rsc start on node11 + * Resource action: clone3rsc start on node2 + * Resource action: clone3rsc start on node4 + * Pseudo action: clone3_running_0 + * Resource action: clone4rsc stop on node9 + * Pseudo action: clone4_stopped_0 + * Pseudo action: clone5group:2_start_0 + * Resource action: clone5rsc2 start on node3 + * Resource action: clone5rsc3 start on node3 + * Pseudo action: clone5group:3_start_0 + * Resource action: clone5rsc1 start on node9 + * Resource action: clone5rsc2 start on node9 + * Resource action: clone5rsc3 start on node9 + * Pseudo action: clone5group:4_start_0 + * Resource action: clone5rsc1 start on node10 + * Resource action: clone5rsc2 start on node10 + * Resource action: clone5rsc3 start on node10 + * Pseudo action: clone5group:5_start_0 + * Resource action: clone5rsc1 start on node11 + * Resource action: clone5rsc2 start on node11 + * Resource action: clone5rsc3 start on node11 + * Pseudo action: clone5group:6_start_0 + * Resource action: clone5rsc1 start on node4 + * Resource action: clone5rsc2 start on node4 + * Resource action: clone5rsc3 start on node4 + * Pseudo action: clone5group:7_start_0 + * Resource action: clone5rsc1 start on node5 + * Resource action: clone5rsc2 start on node5 + * Resource action: clone5rsc3 start on node5 + * Pseudo action: clone5group:8_start_0 + * Resource action: clone5rsc1 start on node6 + * Resource action: clone5rsc2 start on node6 + * Resource action: clone5rsc3 start on node6 + * Pseudo action: clone5group:9_start_0 + * Resource action: clone5rsc1 start on node7 + * Resource action: clone5rsc2 start on node7 + * Resource action: clone5rsc3 start on node7 + * Pseudo action: clone5group:10_start_0 + * Resource action: clone5rsc1 start on node8 + * Resource action: clone5rsc2 start on node8 + * Resource action: clone5rsc3 start on node8 + * Pseudo action: all_stopped + * Resource action: clone1rsc monitor=10000 on node4 + * Resource action: clone1rsc monitor=10000 on node9 + * Resource action: clone1rsc monitor=10000 on node10 + * Resource action: clone1rsc monitor=10000 on node11 + * Resource action: clone1rsc monitor=10000 on node5 + * Resource action: clone1rsc monitor=10000 on node6 + * Resource action: clone1rsc monitor=10000 on node7 + * Resource action: clone1rsc monitor=10000 on node8 + * Resource action: clone2rsc monitor=10000 on node4 + * Resource action: clone2rsc monitor=10000 on node10 + * Resource action: clone2rsc monitor=10000 on node11 + * Resource action: clone2rsc monitor=10000 on node3 + * Resource action: clone3rsc monitor=10000 on node5 + * Resource action: clone3rsc monitor=10000 on node6 + * Resource action: clone3rsc monitor=10000 on node7 + * Resource action: clone3rsc monitor=10000 on node8 + * Resource action: clone3rsc monitor=10000 on node9 + * Resource action: clone3rsc monitor=10000 on node1 + * Resource action: clone3rsc monitor=10000 on node10 + * Resource action: clone3rsc monitor=10000 on node11 + * Resource action: clone3rsc monitor=10000 on node2 + * Resource action: clone3rsc monitor=10000 on node4 + * Pseudo action: clone5group:2_running_0 + * Resource action: clone5rsc2 monitor=10000 on node3 + * Resource action: clone5rsc3 monitor=10000 on node3 + * Pseudo action: clone5group:3_running_0 + * Resource action: clone5rsc1 monitor=10000 on node9 + * Resource action: clone5rsc2 monitor=10000 on node9 + * Resource action: clone5rsc3 monitor=10000 on node9 + * Pseudo action: clone5group:4_running_0 + * Resource action: clone5rsc1 monitor=10000 on node10 + * Resource action: clone5rsc2 monitor=10000 on node10 + * Resource action: clone5rsc3 monitor=10000 on node10 + * Pseudo action: clone5group:5_running_0 + * Resource action: clone5rsc1 monitor=10000 on node11 + * Resource action: clone5rsc2 monitor=10000 on node11 + * Resource action: clone5rsc3 monitor=10000 on node11 + * Pseudo action: clone5group:6_running_0 + * Resource action: clone5rsc1 monitor=10000 on node4 + * Resource action: clone5rsc2 monitor=10000 on node4 + * Resource action: clone5rsc3 monitor=10000 on node4 + * Pseudo action: clone5group:7_running_0 + * Resource action: clone5rsc1 monitor=10000 on node5 + * Resource action: clone5rsc2 monitor=10000 on node5 + * Resource action: clone5rsc3 monitor=10000 on node5 + * Pseudo action: clone5group:8_running_0 + * Resource action: clone5rsc1 monitor=10000 on node6 + * Resource action: clone5rsc2 monitor=10000 on node6 + * Resource action: clone5rsc3 monitor=10000 on node6 + * Pseudo action: clone5group:9_running_0 + * Resource action: clone5rsc1 monitor=10000 on node7 + * Resource action: clone5rsc2 monitor=10000 on node7 + * Resource action: clone5rsc3 monitor=10000 on node7 + * Pseudo action: clone5group:10_running_0 + * Resource action: clone5rsc1 monitor=10000 on node8 + * Resource action: clone5rsc2 monitor=10000 on node8 + * Resource action: clone5rsc3 monitor=10000 on node8 + * Pseudo action: clone5_running_0 + +Revised cluster status: +Online: [ node1 node2 node3 node4 node5 node6 node7 node8 node9 node10 node11 ] + + Fencing (stonith:fence_imaginary): Started node1 + Master/Slave Set: clone1 [clone1rsc] + Masters: [ node3 ] + Slaves: [ node1 node2 node4 node5 node6 node7 node8 node9 node10 node11 ] + Clone Set: clone2 [clone2rsc] + Started: [ node2 node3 node4 node10 node11 ] + Clone Set: clone3 [clone3rsc] + Started: [ node1 node2 node3 node4 node5 node6 node7 node8 node9 node10 node11 ] + Clone Set: clone4 [clone4rsc] + Started: [ node1 node5 node6 node7 node8 ] + Clone Set: clone5 [clone5group] + Started: [ node1 node2 node3 node4 node5 node6 node7 node8 node9 node10 node11 ] + diff --git a/cts/scheduler/anon-instance-pending.xml b/cts/scheduler/anon-instance-pending.xml new file mode 100644 index 0000000000..2d8a93da08 --- /dev/null +++ b/cts/scheduler/anon-instance-pending.xml @@ -0,0 +1,439 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/clone-order-16instances.dot b/cts/scheduler/clone-order-16instances.dot index 3ebda6079a..cf874682b6 100644 --- a/cts/scheduler/clone-order-16instances.dot +++ b/cts/scheduler/clone-order-16instances.dot @@ -1,155 +1,155 @@ digraph "g" { "clvmd-clone_running_0" [ style=dashed color="red" fontcolor="orange"] "clvmd-clone_start_0" -> "clvmd-clone_running_0" [ style = dashed] "clvmd-clone_start_0" [ style=dashed color="red" fontcolor="orange"] "clvmd:10_start_0 " -> "clvmd-clone_running_0" [ style = dashed] "clvmd:10_start_0 " -> "clvmd:11_start_0 " [ style = dashed] "clvmd:10_start_0 " [ style=dashed color="red" fontcolor="black"] "clvmd:11_start_0 " -> "clvmd-clone_running_0" [ style = dashed] "clvmd:11_start_0 " -> "clvmd:12_start_0 " [ style = dashed] "clvmd:11_start_0 " [ style=dashed color="red" fontcolor="black"] "clvmd:12_start_0 " -> "clvmd-clone_running_0" [ style = dashed] "clvmd:12_start_0 " -> "clvmd:13_start_0 " [ style = dashed] "clvmd:12_start_0 " [ style=dashed color="red" fontcolor="black"] "clvmd:13_start_0 " -> "clvmd-clone_running_0" [ style = dashed] "clvmd:13_start_0 " -> "clvmd:14_start_0 " [ style = dashed] "clvmd:13_start_0 " [ style=dashed color="red" fontcolor="black"] "clvmd:14_start_0 " -> "clvmd-clone_running_0" [ style = dashed] "clvmd:14_start_0 " -> "clvmd:15_start_0 " [ style = dashed] "clvmd:14_start_0 " [ style=dashed color="red" fontcolor="black"] "clvmd:15_start_0 " -> "clvmd-clone_running_0" [ style = dashed] -"clvmd:15_start_0 " -> "clvmd:2_start_0 " [ style = dashed] "clvmd:15_start_0 " [ style=dashed color="red" fontcolor="black"] "clvmd:1_start_0 " -> "clvmd-clone_running_0" [ style = dashed] -"clvmd:1_start_0 " -> "clvmd:10_start_0 " [ style = dashed] +"clvmd:1_start_0 " -> "clvmd:2_start_0 " [ style = dashed] "clvmd:1_start_0 " [ style=dashed color="red" fontcolor="black"] "clvmd:2_start_0 " -> "clvmd-clone_running_0" [ style = dashed] "clvmd:2_start_0 " -> "clvmd:3_start_0 " [ style = dashed] "clvmd:2_start_0 " [ style=dashed color="red" fontcolor="black"] "clvmd:3_start_0 " -> "clvmd-clone_running_0" [ style = dashed] "clvmd:3_start_0 " -> "clvmd:4_start_0 " [ style = dashed] "clvmd:3_start_0 " [ style=dashed color="red" fontcolor="black"] "clvmd:4_start_0 " -> "clvmd-clone_running_0" [ style = dashed] "clvmd:4_start_0 " -> "clvmd:5_start_0 " [ style = dashed] "clvmd:4_start_0 " [ style=dashed color="red" fontcolor="black"] "clvmd:5_start_0 " -> "clvmd-clone_running_0" [ style = dashed] "clvmd:5_start_0 " -> "clvmd:6_start_0 " [ style = dashed] "clvmd:5_start_0 " [ style=dashed color="red" fontcolor="black"] "clvmd:6_start_0 " -> "clvmd-clone_running_0" [ style = dashed] "clvmd:6_start_0 " -> "clvmd:7_start_0 " [ style = dashed] "clvmd:6_start_0 " [ style=dashed color="red" fontcolor="black"] "clvmd:7_start_0 " -> "clvmd-clone_running_0" [ style = dashed] "clvmd:7_start_0 " -> "clvmd:8_start_0 " [ style = dashed] "clvmd:7_start_0 " [ style=dashed color="red" fontcolor="black"] "clvmd:8_start_0 " -> "clvmd-clone_running_0" [ style = dashed] "clvmd:8_start_0 " -> "clvmd:9_start_0 " [ style = dashed] "clvmd:8_start_0 " [ style=dashed color="red" fontcolor="black"] "clvmd:9_start_0 " -> "clvmd-clone_running_0" [ style = dashed] +"clvmd:9_start_0 " -> "clvmd:10_start_0 " [ style = dashed] "clvmd:9_start_0 " [ style=dashed color="red" fontcolor="black"] "clvmd_start_0 " -> "clvmd-clone_running_0" [ style = dashed] "clvmd_start_0 " -> "clvmd:1_start_0 " [ style = dashed] "clvmd_start_0 " [ style=dashed color="red" fontcolor="black"] "dlm-clone_running_0" -> "clvmd-clone_start_0" [ style = dashed] "dlm-clone_running_0" [ style=bold color="green" fontcolor="orange"] "dlm-clone_start_0" -> "dlm-clone_running_0" [ style = bold] -"dlm-clone_start_0" -> "dlm:10_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] -"dlm-clone_start_0" -> "dlm:11_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] -"dlm-clone_start_0" -> "dlm:12_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] -"dlm-clone_start_0" -> "dlm:13_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] -"dlm-clone_start_0" -> "dlm:14_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] -"dlm-clone_start_0" -> "dlm:15_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] -"dlm-clone_start_0" -> "dlm:3_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] -"dlm-clone_start_0" -> "dlm:4_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] -"dlm-clone_start_0" -> "dlm:5_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] -"dlm-clone_start_0" -> "dlm:6_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] -"dlm-clone_start_0" -> "dlm:7_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] -"dlm-clone_start_0" -> "dlm:8_start_0 virt-033.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] -"dlm-clone_start_0" -> "dlm:9_start_0 virt-034.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] -"dlm-clone_start_0" -> "dlm_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm-clone_start_0" -> "dlm:10_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm-clone_start_0" -> "dlm:11_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm-clone_start_0" -> "dlm:12_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm-clone_start_0" -> "dlm:13_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm-clone_start_0" -> "dlm:14_start_0 virt-033.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm-clone_start_0" -> "dlm:15_start_0 virt-034.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm-clone_start_0" -> "dlm:3_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm-clone_start_0" -> "dlm:4_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm-clone_start_0" -> "dlm:5_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm-clone_start_0" -> "dlm:6_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm-clone_start_0" -> "dlm:7_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm-clone_start_0" -> "dlm:8_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm-clone_start_0" -> "dlm:9_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm-clone_start_0" -> "dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] "dlm-clone_start_0" [ style=bold color="green" fontcolor="orange"] -"dlm:10_monitor_30000 virt-009.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] -"dlm:10_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd:10_start_0 " [ style = dashed] -"dlm:10_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd:11_start_0 " [ style = dashed] -"dlm:10_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd:12_start_0 " [ style = dashed] -"dlm:10_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd:13_start_0 " [ style = dashed] -"dlm:10_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd:14_start_0 " [ style = dashed] -"dlm:10_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd:15_start_0 " [ style = dashed] -"dlm:10_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd:1_start_0 " [ style = dashed] -"dlm:10_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd:2_start_0 " [ style = dashed] -"dlm:10_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd:3_start_0 " [ style = dashed] -"dlm:10_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd:4_start_0 " [ style = dashed] -"dlm:10_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd:5_start_0 " [ style = dashed] -"dlm:10_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd:6_start_0 " [ style = dashed] -"dlm:10_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd:7_start_0 " [ style = dashed] -"dlm:10_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd:8_start_0 " [ style = dashed] -"dlm:10_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd:9_start_0 " [ style = dashed] -"dlm:10_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd_start_0 " [ style = dashed] -"dlm:10_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] -"dlm:10_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:10_monitor_30000 virt-009.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] -"dlm:10_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:11_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] -"dlm:10_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] -"dlm:11_monitor_30000 virt-013.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] -"dlm:11_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] -"dlm:11_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:11_monitor_30000 virt-013.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] -"dlm:11_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:12_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] -"dlm:11_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] -"dlm:12_monitor_30000 virt-014.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] -"dlm:12_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] -"dlm:12_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:12_monitor_30000 virt-014.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] -"dlm:12_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:13_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] -"dlm:12_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] -"dlm:13_monitor_30000 virt-015.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] -"dlm:13_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] -"dlm:13_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:13_monitor_30000 virt-015.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] -"dlm:13_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:14_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] -"dlm:13_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] -"dlm:14_monitor_30000 virt-016.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] -"dlm:14_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] -"dlm:14_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:14_monitor_30000 virt-016.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] -"dlm:14_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:15_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] -"dlm:14_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] -"dlm:15_monitor_30000 virt-020.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] -"dlm:15_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] -"dlm:15_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:15_monitor_30000 virt-020.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] -"dlm:15_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" -> "dlm_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] -"dlm:15_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] -"dlm:3_monitor_30000 virt-028.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] -"dlm:3_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] -"dlm:3_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:3_monitor_30000 virt-028.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] -"dlm:3_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:4_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] -"dlm:3_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] -"dlm:4_monitor_30000 virt-029.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] -"dlm:4_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] -"dlm:4_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:4_monitor_30000 virt-029.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] -"dlm:4_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:5_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] -"dlm:4_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] -"dlm:5_monitor_30000 virt-030.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] -"dlm:5_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] -"dlm:5_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:5_monitor_30000 virt-030.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] -"dlm:5_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:6_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] -"dlm:5_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] -"dlm:6_monitor_30000 virt-031.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] -"dlm:6_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] -"dlm:6_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:6_monitor_30000 virt-031.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] -"dlm:6_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:7_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] -"dlm:6_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] -"dlm:7_monitor_30000 virt-032.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] -"dlm:7_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] -"dlm:7_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:7_monitor_30000 virt-032.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] -"dlm:7_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:8_start_0 virt-033.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] -"dlm:7_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] -"dlm:8_monitor_30000 virt-033.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] -"dlm:8_start_0 virt-033.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] -"dlm:8_start_0 virt-033.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:8_monitor_30000 virt-033.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] -"dlm:8_start_0 virt-033.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:9_start_0 virt-034.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] -"dlm:8_start_0 virt-033.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] -"dlm:9_monitor_30000 virt-034.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] -"dlm:9_start_0 virt-034.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] -"dlm:9_start_0 virt-034.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:9_monitor_30000 virt-034.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] -"dlm:9_start_0 virt-034.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] -"dlm_monitor_30000 virt-027.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] -"dlm_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] -"dlm_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:3_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] -"dlm_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" -> "dlm_monitor_30000 virt-027.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] -"dlm_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:10_monitor_30000 virt-029.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:10_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] +"dlm:10_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:10_monitor_30000 virt-029.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:10_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:11_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:10_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:11_monitor_30000 virt-030.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:11_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] +"dlm:11_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:11_monitor_30000 virt-030.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:11_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:12_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:11_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:12_monitor_30000 virt-031.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:12_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] +"dlm:12_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:12_monitor_30000 virt-031.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:12_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:13_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:12_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:13_monitor_30000 virt-032.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:13_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] +"dlm:13_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:13_monitor_30000 virt-032.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:13_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:14_start_0 virt-033.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:13_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:14_monitor_30000 virt-033.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:14_start_0 virt-033.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] +"dlm:14_start_0 virt-033.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:14_monitor_30000 virt-033.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:14_start_0 virt-033.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:15_start_0 virt-034.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:14_start_0 virt-033.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:15_monitor_30000 virt-034.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:15_start_0 virt-034.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] +"dlm:15_start_0 virt-034.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:15_monitor_30000 virt-034.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:15_start_0 virt-034.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:3_monitor_30000 virt-013.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:3_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] +"dlm:3_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:3_monitor_30000 virt-013.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:3_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:4_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:3_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:4_monitor_30000 virt-014.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:4_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] +"dlm:4_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:4_monitor_30000 virt-014.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:4_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:5_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:4_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:5_monitor_30000 virt-015.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:5_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] +"dlm:5_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:5_monitor_30000 virt-015.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:5_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:6_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:5_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:6_monitor_30000 virt-016.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:6_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] +"dlm:6_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:6_monitor_30000 virt-016.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:6_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:7_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:6_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:7_monitor_30000 virt-020.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:7_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] +"dlm:7_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:7_monitor_30000 virt-020.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:7_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:8_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:7_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:8_monitor_30000 virt-027.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:8_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] +"dlm:8_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:8_monitor_30000 virt-027.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:8_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:9_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:8_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:9_monitor_30000 virt-028.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm:9_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] +"dlm:9_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:10_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:9_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:9_monitor_30000 virt-028.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm:9_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm_monitor_30000 virt-009.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] +"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd:10_start_0 " [ style = dashed] +"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd:11_start_0 " [ style = dashed] +"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd:12_start_0 " [ style = dashed] +"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd:13_start_0 " [ style = dashed] +"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd:14_start_0 " [ style = dashed] +"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd:15_start_0 " [ style = dashed] +"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd:1_start_0 " [ style = dashed] +"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd:2_start_0 " [ style = dashed] +"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd:3_start_0 " [ style = dashed] +"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd:4_start_0 " [ style = dashed] +"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd:5_start_0 " [ style = dashed] +"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd:6_start_0 " [ style = dashed] +"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd:7_start_0 " [ style = dashed] +"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd:8_start_0 " [ style = dashed] +"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd:9_start_0 " [ style = dashed] +"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd_start_0 " [ style = dashed] +"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold] +"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:3_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "dlm_monitor_30000 virt-009.cluster-qe.lab.eng.brq.redhat.com" [ style = bold] +"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"] } diff --git a/cts/scheduler/clone-order-16instances.exp b/cts/scheduler/clone-order-16instances.exp index 96ec770da9..0b1931cfb3 100644 --- a/cts/scheduler/clone-order-16instances.exp +++ b/cts/scheduler/clone-order-16instances.exp @@ -1,467 +1,467 @@ - - - + + + - + - - - + + + - - - + + + - + - - - + + + - + - - - + + + - + - - - + + + - + - - - + + + - + - - - + + + - + - - - + + + - + - - - + + + - + - - - + + + - + - - - + + + - + - - - + + + - + - - - + + + - + - - - + + + - + - - - + + + - + - - - + + + - + - - - + + + - + - - - + + + - + - - - + + + - + - - - + + + - + - - - + + + - + - - - + + + - + - - - + + + - + - - - + + + - + - - - + + + - + - - - + + + - + - - - + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + diff --git a/cts/scheduler/clone-order-16instances.scores b/cts/scheduler/clone-order-16instances.scores index 410bcf7f45..0f6a207026 100644 --- a/cts/scheduler/clone-order-16instances.scores +++ b/cts/scheduler/clone-order-16instances.scores @@ -1,1073 +1,1073 @@ Allocation scores: clone_color: clvmd-clone allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd-clone allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd-clone allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd-clone allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd-clone allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd-clone allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd-clone allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd-clone allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd-clone allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd-clone allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd-clone allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd-clone allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd-clone allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd-clone allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd-clone allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd-clone allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:0 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:0 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:0 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:0 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:0 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:0 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:0 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:0 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:0 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:0 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:0 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:0 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:0 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:0 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:0 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:0 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:1 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:1 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:1 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:1 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:1 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:1 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:1 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:1 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:1 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:1 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:1 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:1 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:1 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:1 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:1 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:1 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:10 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:10 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:10 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:10 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:10 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:10 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:10 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:10 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:10 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:10 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:10 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:10 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:10 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:10 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:10 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:10 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:11 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:11 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:11 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:11 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:11 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:11 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:11 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:11 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:11 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:11 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:11 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:11 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:11 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:11 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:11 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:11 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:12 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:12 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:12 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:12 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:12 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:12 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:12 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:12 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:12 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:12 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:12 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:12 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:12 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:12 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:12 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:12 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:13 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:13 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:13 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:13 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:13 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:13 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:13 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:13 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:13 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:13 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:13 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:13 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:13 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:13 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:13 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:13 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:14 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:14 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:14 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:14 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:14 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:14 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:14 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:14 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:14 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:14 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:14 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:14 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:14 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:14 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:14 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:14 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:15 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:15 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:15 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:15 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:15 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:15 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:15 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:15 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:15 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:15 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:15 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:15 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:15 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:15 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:15 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:15 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:2 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:2 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:2 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:2 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:2 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:2 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:2 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:2 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:2 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:2 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:2 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:2 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:2 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:2 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:2 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:2 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:3 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:3 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:3 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:3 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:3 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:3 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:3 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:3 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:3 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:3 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:3 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:3 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:3 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:3 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:3 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:3 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:4 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:4 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:4 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:4 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:4 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:4 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:4 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:4 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:4 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:4 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:4 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:4 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:4 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:4 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:4 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:4 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:5 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:5 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:5 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:5 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:5 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:5 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:5 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:5 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:5 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:5 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:5 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:5 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:5 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:5 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:5 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:5 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:6 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:6 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:6 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:6 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:6 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:6 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:6 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:6 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:6 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:6 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:6 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:6 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:6 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:6 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:6 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:6 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:7 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:7 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:7 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:7 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:7 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:7 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:7 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:7 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:7 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:7 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:7 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:7 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:7 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:7 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:7 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:7 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:8 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:8 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:8 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:8 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:8 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:8 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:8 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:8 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:8 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:8 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:8 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:8 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:8 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:8 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:8 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:8 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:9 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:9 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:9 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:9 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:9 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:9 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:9 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:9 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:9 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:9 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:9 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:9 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:9 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:9 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:9 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: clvmd:9 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm-clone allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm-clone allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm-clone allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm-clone allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm-clone allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm-clone allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm-clone allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm-clone allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm-clone allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm-clone allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm-clone allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm-clone allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm-clone allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm-clone allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm-clone allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm-clone allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:0 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:0 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 1 clone_color: dlm:0 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:0 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:0 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:0 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:0 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:0 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:0 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:0 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:0 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:0 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:0 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:0 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:0 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:0 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:1 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:1 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:1 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 1 clone_color: dlm:1 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:1 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:1 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:1 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:1 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:1 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:1 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:1 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:1 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:1 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:1 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:1 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:1 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:10 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:10 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:10 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:10 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:10 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:10 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:10 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:10 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:10 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:10 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:10 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:10 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:10 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:10 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:10 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:10 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:11 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:11 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:11 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:11 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:11 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:11 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:11 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:11 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:11 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:11 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:11 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:11 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:11 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:11 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:11 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:11 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:12 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:12 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:12 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:12 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:12 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:12 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:12 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:12 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:12 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:12 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:12 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:12 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:12 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:12 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:12 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:12 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:13 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:13 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:13 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:13 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:13 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:13 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:13 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:13 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:13 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:13 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:13 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:13 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:13 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:13 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:13 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:13 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:14 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:14 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:14 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:14 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:14 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:14 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:14 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:14 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:14 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:14 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:14 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:14 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:14 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:14 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:14 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:14 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:15 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:15 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:15 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:15 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:15 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:15 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:15 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:15 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:15 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:15 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:15 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:15 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:15 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:15 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:15 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:15 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:2 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:2 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:2 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:2 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:2 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:2 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:2 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:2 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:2 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:2 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:2 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:2 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:2 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:2 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:2 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:2 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:3 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:3 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:3 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:3 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:3 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:3 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:3 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:3 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:3 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:3 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:3 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:3 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:3 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:3 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:3 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:3 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:4 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:4 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:4 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:4 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:4 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:4 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:4 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:4 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:4 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:4 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:4 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:4 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:4 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:4 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:4 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:4 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:5 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:5 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:5 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:5 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:5 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:5 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:5 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:5 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:5 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:5 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:5 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:5 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:5 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:5 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:5 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:5 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:6 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:6 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:6 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:6 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:6 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:6 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:6 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:6 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:6 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:6 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:6 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:6 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:6 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:6 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:6 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:6 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:7 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:7 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:7 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:7 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:7 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:7 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:7 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:7 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:7 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:7 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:7 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:7 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:7 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:7 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:7 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:7 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:8 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:8 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:8 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:8 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:8 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:8 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:8 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:8 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:8 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:8 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:8 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:8 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:8 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:8 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:8 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:8 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:9 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:9 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:9 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:9 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:9 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:9 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:9 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:9 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:9 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:9 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:9 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:9 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:9 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:9 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:9 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 clone_color: dlm:9 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: clvmd:0 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:0 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:0 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:0 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:0 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:0 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:0 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:0 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:0 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:0 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:0 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:0 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:0 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:0 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:0 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:0 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:1 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:1 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:1 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:1 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:1 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:1 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:1 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:1 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:1 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:1 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:1 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:1 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:1 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:1 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:1 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:1 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:10 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:10 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:10 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:10 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:10 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:10 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:10 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:10 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:10 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:10 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:10 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:10 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:10 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:10 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:10 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:10 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:11 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:11 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:11 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:11 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:11 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:11 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:11 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:11 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:11 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:11 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:11 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:11 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:11 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:11 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:11 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:11 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:12 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:12 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:12 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:12 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:12 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:12 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:12 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:12 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:12 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:12 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:12 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:12 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:12 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:12 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:12 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:12 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:13 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:13 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:13 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:13 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:13 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:13 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:13 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:13 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:13 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:13 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:13 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:13 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:13 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:13 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:13 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:13 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:14 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:14 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:14 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:14 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:14 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:14 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:14 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:14 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:14 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:14 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:14 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:14 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:14 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:14 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:14 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:14 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:15 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:15 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:15 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:15 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:15 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:15 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:15 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:15 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:15 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:15 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:15 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:15 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:15 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:15 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:15 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:15 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:2 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:2 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:2 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:2 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:2 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:2 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:2 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:2 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:2 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:2 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:2 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:2 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:2 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:2 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:2 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:2 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:3 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:3 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:3 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:3 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:3 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:3 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:3 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:3 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:3 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:3 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:3 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:3 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:3 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:3 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:3 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:3 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:4 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:4 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:4 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:4 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:4 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:4 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:4 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:4 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:4 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:4 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:4 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:4 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:4 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:4 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:4 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:4 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:5 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:5 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:5 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:5 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:5 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:5 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:5 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:5 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:5 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:5 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:5 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:5 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:5 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:5 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:5 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:5 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:6 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:6 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:6 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:6 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:6 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:6 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:6 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:6 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:6 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:6 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:6 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:6 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:6 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:6 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:6 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:6 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:7 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:7 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:7 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:7 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:7 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:7 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:7 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:7 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:7 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:7 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:7 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:7 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:7 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:7 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:7 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:7 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:8 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:8 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:8 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:8 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:8 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:8 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:8 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:8 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:8 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:8 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:8 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:8 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:8 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:8 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:8 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:8 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:9 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:9 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:9 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:9 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:9 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:9 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:9 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:9 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:9 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:9 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:9 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:9 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:9 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:9 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:9 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: clvmd:9 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:0 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:0 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 1 native_color: dlm:0 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:0 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:0 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:0 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:0 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:0 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:0 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:0 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:0 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:0 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:0 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:0 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:0 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:0 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:1 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:1 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:1 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 1 native_color: dlm:1 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:1 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:1 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:1 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:1 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:1 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:1 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:1 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:1 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:1 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:1 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:1 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:1 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 -native_color: dlm:10 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:10 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:10 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:10 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:10 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 -native_color: dlm:10 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 -native_color: dlm:10 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 -native_color: dlm:10 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 -native_color: dlm:10 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 -native_color: dlm:10 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 -native_color: dlm:10 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:10 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:10 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:10 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:10 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:10 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:10 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:10 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:10 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:10 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:10 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:10 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:10 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:10 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:11 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:11 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:11 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:11 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 -native_color: dlm:11 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 -native_color: dlm:11 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 -native_color: dlm:11 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 -native_color: dlm:11 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 -native_color: dlm:11 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 -native_color: dlm:11 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 -native_color: dlm:11 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:11 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:11 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:11 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:11 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:11 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:11 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:11 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:11 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:11 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:11 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:11 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:11 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:11 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:12 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:12 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:12 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:12 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:12 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 -native_color: dlm:12 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 -native_color: dlm:12 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 -native_color: dlm:12 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 -native_color: dlm:12 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 -native_color: dlm:12 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 -native_color: dlm:12 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 -native_color: dlm:12 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:12 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:12 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:12 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:12 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:12 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:12 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:12 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:12 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:12 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:12 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:12 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:12 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:13 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:13 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:13 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:13 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:13 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:13 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 -native_color: dlm:13 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 -native_color: dlm:13 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 -native_color: dlm:13 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 -native_color: dlm:13 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 -native_color: dlm:13 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 -native_color: dlm:13 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 -native_color: dlm:13 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:13 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:13 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:13 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:13 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:13 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:13 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:13 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:13 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:13 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:13 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:13 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:14 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:14 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:14 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:14 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:14 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:14 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:14 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 -native_color: dlm:14 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 -native_color: dlm:14 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 -native_color: dlm:14 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 -native_color: dlm:14 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 -native_color: dlm:14 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 -native_color: dlm:14 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 -native_color: dlm:14 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:14 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:14 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:14 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:14 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:14 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:14 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:14 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:14 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:14 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:14 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:15 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:15 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:15 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:15 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:15 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:15 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:15 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:15 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 -native_color: dlm:15 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 -native_color: dlm:15 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 -native_color: dlm:15 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 -native_color: dlm:15 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 -native_color: dlm:15 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 -native_color: dlm:15 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 -native_color: dlm:15 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:15 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:15 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:15 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:15 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:15 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:15 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:15 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:15 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:15 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 -native_color: dlm:2 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:2 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:2 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:2 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:2 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:2 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:2 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:2 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:2 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:2 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:2 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:2 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:2 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:2 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:2 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:2 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:2 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:2 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:2 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:2 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:2 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:2 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:3 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:3 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:3 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:3 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:3 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:3 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:3 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:3 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:3 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:3 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:3 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:3 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:3 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:3 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:3 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:3 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:3 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:3 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:3 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:3 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:3 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:3 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:4 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:4 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:4 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:4 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:4 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:4 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:4 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:4 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:4 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:4 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:4 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:4 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:4 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:4 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:4 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:4 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:4 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:4 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:4 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:4 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:4 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:4 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:5 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:5 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:5 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:5 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:5 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:5 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:5 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:5 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:5 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:5 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:5 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:5 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:5 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:5 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:5 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:5 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:5 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:5 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:5 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:5 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:5 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:5 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:6 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:6 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:6 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:6 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:6 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:6 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:6 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:6 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:6 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:6 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:6 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:6 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:6 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:6 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:6 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:6 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:6 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:6 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:6 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:6 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:6 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:6 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:7 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:7 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:7 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:7 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:7 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:7 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:7 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:7 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:7 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:7 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:7 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:7 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:7 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:7 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:7 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:7 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:7 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:7 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:7 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:7 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:7 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:7 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:8 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:8 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:8 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:8 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:8 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:8 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:8 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:8 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:8 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:8 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:8 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:8 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:8 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:8 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:8 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:8 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:8 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:8 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:8 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:8 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:8 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:8 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:9 allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:9 allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:9 allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:9 allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:9 allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:9 allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:9 allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:9 allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: -INFINITY native_color: dlm:9 allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:9 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:9 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:9 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:9 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:9 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: -INFINITY -native_color: dlm:9 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: -INFINITY +native_color: dlm:9 allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:9 allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:9 allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:9 allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:9 allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 +native_color: dlm:9 allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: dlm:9 allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: virt-fencing allocation score on virt-009.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: virt-fencing allocation score on virt-010.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: virt-fencing allocation score on virt-012.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: virt-fencing allocation score on virt-013.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: virt-fencing allocation score on virt-014.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: virt-fencing allocation score on virt-015.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: virt-fencing allocation score on virt-016.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: virt-fencing allocation score on virt-020.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: virt-fencing allocation score on virt-027.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: virt-fencing allocation score on virt-028.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: virt-fencing allocation score on virt-029.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: virt-fencing allocation score on virt-030.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: virt-fencing allocation score on virt-031.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: virt-fencing allocation score on virt-032.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: virt-fencing allocation score on virt-033.cluster-qe.lab.eng.brq.redhat.com: 0 native_color: virt-fencing allocation score on virt-034.cluster-qe.lab.eng.brq.redhat.com: 0 diff --git a/cts/scheduler/clone-order-16instances.summary b/cts/scheduler/clone-order-16instances.summary index 34eb735645..f07e72e9dd 100644 --- a/cts/scheduler/clone-order-16instances.summary +++ b/cts/scheduler/clone-order-16instances.summary @@ -1,69 +1,69 @@ 16 of 33 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ virt-009.cluster-qe.lab.eng.brq.redhat.com virt-010.cluster-qe.lab.eng.brq.redhat.com virt-012.cluster-qe.lab.eng.brq.redhat.com virt-013.cluster-qe.lab.eng.brq.redhat.com virt-014.cluster-qe.lab.eng.brq.redhat.com virt-015.cluster-qe.lab.eng.brq.redhat.com virt-016.cluster-qe.lab.eng.brq.redhat.com virt-020.cluster-qe.lab.eng.brq.redhat.com virt-027.cluster-qe.lab.eng.brq.redhat.com virt-028.cluster-qe.lab.eng.brq.redhat.com virt-029.cluster-qe.lab.eng.brq.redhat.com virt-030.cluster-qe.lab.eng.brq.redhat.com virt-031.cluster-qe.lab.eng.brq.redhat.com virt-032.cluster-qe.lab.eng.brq.redhat.com virt-033.cluster-qe.lab.eng.brq.redhat.com virt-034.cluster-qe.lab.eng.brq.redhat.com ] virt-fencing (stonith:fence_xvm): Started virt-010.cluster-qe.lab.eng.brq.redhat.com Clone Set: dlm-clone [dlm] Started: [ virt-010.cluster-qe.lab.eng.brq.redhat.com virt-012.cluster-qe.lab.eng.brq.redhat.com ] Stopped: [ virt-009.cluster-qe.lab.eng.brq.redhat.com virt-013.cluster-qe.lab.eng.brq.redhat.com virt-014.cluster-qe.lab.eng.brq.redhat.com virt-015.cluster-qe.lab.eng.brq.redhat.com virt-016.cluster-qe.lab.eng.brq.redhat.com virt-020.cluster-qe.lab.eng.brq.redhat.com virt-027.cluster-qe.lab.eng.brq.redhat.com virt-028.cluster-qe.lab.eng.brq.redhat.com virt-029.cluster-qe.lab.eng.brq.redhat.com virt-030.cluster-qe.lab.eng.brq.redhat.com virt-031.cluster-qe.lab.eng.brq.redhat.com virt-032.cluster-qe.lab.eng.brq.redhat.com virt-033.cluster-qe.lab.eng.brq.redhat.com virt-034.cluster-qe.lab.eng.brq.redhat.com ] Clone Set: clvmd-clone [clvmd] Stopped (disabled): [ virt-009.cluster-qe.lab.eng.brq.redhat.com virt-010.cluster-qe.lab.eng.brq.redhat.com virt-012.cluster-qe.lab.eng.brq.redhat.com virt-013.cluster-qe.lab.eng.brq.redhat.com virt-014.cluster-qe.lab.eng.brq.redhat.com virt-015.cluster-qe.lab.eng.brq.redhat.com virt-016.cluster-qe.lab.eng.brq.redhat.com virt-020.cluster-qe.lab.eng.brq.redhat.com virt-027.cluster-qe.lab.eng.brq.redhat.com virt-028.cluster-qe.lab.eng.brq.redhat.com virt-029.cluster-qe.lab.eng.brq.redhat.com virt-030.cluster-qe.lab.eng.brq.redhat.com virt-031.cluster-qe.lab.eng.brq.redhat.com virt-032.cluster-qe.lab.eng.brq.redhat.com virt-033.cluster-qe.lab.eng.brq.redhat.com virt-034.cluster-qe.lab.eng.brq.redhat.com ] Transition Summary: - * Start dlm:10 (virt-009.cluster-qe.lab.eng.brq.redhat.com) - * Start dlm:11 (virt-013.cluster-qe.lab.eng.brq.redhat.com) - * Start dlm:12 (virt-014.cluster-qe.lab.eng.brq.redhat.com) - * Start dlm:13 (virt-015.cluster-qe.lab.eng.brq.redhat.com) - * Start dlm:14 (virt-016.cluster-qe.lab.eng.brq.redhat.com) - * Start dlm:15 (virt-020.cluster-qe.lab.eng.brq.redhat.com) - * Start dlm:2 (virt-027.cluster-qe.lab.eng.brq.redhat.com) - * Start dlm:3 (virt-028.cluster-qe.lab.eng.brq.redhat.com) - * Start dlm:4 (virt-029.cluster-qe.lab.eng.brq.redhat.com) - * Start dlm:5 (virt-030.cluster-qe.lab.eng.brq.redhat.com) - * Start dlm:6 (virt-031.cluster-qe.lab.eng.brq.redhat.com) - * Start dlm:7 (virt-032.cluster-qe.lab.eng.brq.redhat.com) - * Start dlm:8 (virt-033.cluster-qe.lab.eng.brq.redhat.com) - * Start dlm:9 (virt-034.cluster-qe.lab.eng.brq.redhat.com) + * Start dlm:2 ( virt-009.cluster-qe.lab.eng.brq.redhat.com ) + * Start dlm:3 ( virt-013.cluster-qe.lab.eng.brq.redhat.com ) + * Start dlm:4 ( virt-014.cluster-qe.lab.eng.brq.redhat.com ) + * Start dlm:5 ( virt-015.cluster-qe.lab.eng.brq.redhat.com ) + * Start dlm:6 ( virt-016.cluster-qe.lab.eng.brq.redhat.com ) + * Start dlm:7 ( virt-020.cluster-qe.lab.eng.brq.redhat.com ) + * Start dlm:8 ( virt-027.cluster-qe.lab.eng.brq.redhat.com ) + * Start dlm:9 ( virt-028.cluster-qe.lab.eng.brq.redhat.com ) + * Start dlm:10 ( virt-029.cluster-qe.lab.eng.brq.redhat.com ) + * Start dlm:11 ( virt-030.cluster-qe.lab.eng.brq.redhat.com ) + * Start dlm:12 ( virt-031.cluster-qe.lab.eng.brq.redhat.com ) + * Start dlm:13 ( virt-032.cluster-qe.lab.eng.brq.redhat.com ) + * Start dlm:14 ( virt-033.cluster-qe.lab.eng.brq.redhat.com ) + * Start dlm:15 ( virt-034.cluster-qe.lab.eng.brq.redhat.com ) Executing cluster transition: * Pseudo action: dlm-clone_start_0 * Resource action: dlm start on virt-009.cluster-qe.lab.eng.brq.redhat.com * Resource action: dlm start on virt-013.cluster-qe.lab.eng.brq.redhat.com * Resource action: dlm start on virt-014.cluster-qe.lab.eng.brq.redhat.com * Resource action: dlm start on virt-015.cluster-qe.lab.eng.brq.redhat.com * Resource action: dlm start on virt-016.cluster-qe.lab.eng.brq.redhat.com * Resource action: dlm start on virt-020.cluster-qe.lab.eng.brq.redhat.com * Resource action: dlm start on virt-027.cluster-qe.lab.eng.brq.redhat.com * Resource action: dlm start on virt-028.cluster-qe.lab.eng.brq.redhat.com * Resource action: dlm start on virt-029.cluster-qe.lab.eng.brq.redhat.com * Resource action: dlm start on virt-030.cluster-qe.lab.eng.brq.redhat.com * Resource action: dlm start on virt-031.cluster-qe.lab.eng.brq.redhat.com * Resource action: dlm start on virt-032.cluster-qe.lab.eng.brq.redhat.com * Resource action: dlm start on virt-033.cluster-qe.lab.eng.brq.redhat.com * Resource action: dlm start on virt-034.cluster-qe.lab.eng.brq.redhat.com * Pseudo action: dlm-clone_running_0 * Resource action: dlm monitor=30000 on virt-009.cluster-qe.lab.eng.brq.redhat.com * Resource action: dlm monitor=30000 on virt-013.cluster-qe.lab.eng.brq.redhat.com * Resource action: dlm monitor=30000 on virt-014.cluster-qe.lab.eng.brq.redhat.com * Resource action: dlm monitor=30000 on virt-015.cluster-qe.lab.eng.brq.redhat.com * Resource action: dlm monitor=30000 on virt-016.cluster-qe.lab.eng.brq.redhat.com * Resource action: dlm monitor=30000 on virt-020.cluster-qe.lab.eng.brq.redhat.com * Resource action: dlm monitor=30000 on virt-027.cluster-qe.lab.eng.brq.redhat.com * Resource action: dlm monitor=30000 on virt-028.cluster-qe.lab.eng.brq.redhat.com * Resource action: dlm monitor=30000 on virt-029.cluster-qe.lab.eng.brq.redhat.com * Resource action: dlm monitor=30000 on virt-030.cluster-qe.lab.eng.brq.redhat.com * Resource action: dlm monitor=30000 on virt-031.cluster-qe.lab.eng.brq.redhat.com * Resource action: dlm monitor=30000 on virt-032.cluster-qe.lab.eng.brq.redhat.com * Resource action: dlm monitor=30000 on virt-033.cluster-qe.lab.eng.brq.redhat.com * Resource action: dlm monitor=30000 on virt-034.cluster-qe.lab.eng.brq.redhat.com Revised cluster status: Online: [ virt-009.cluster-qe.lab.eng.brq.redhat.com virt-010.cluster-qe.lab.eng.brq.redhat.com virt-012.cluster-qe.lab.eng.brq.redhat.com virt-013.cluster-qe.lab.eng.brq.redhat.com virt-014.cluster-qe.lab.eng.brq.redhat.com virt-015.cluster-qe.lab.eng.brq.redhat.com virt-016.cluster-qe.lab.eng.brq.redhat.com virt-020.cluster-qe.lab.eng.brq.redhat.com virt-027.cluster-qe.lab.eng.brq.redhat.com virt-028.cluster-qe.lab.eng.brq.redhat.com virt-029.cluster-qe.lab.eng.brq.redhat.com virt-030.cluster-qe.lab.eng.brq.redhat.com virt-031.cluster-qe.lab.eng.brq.redhat.com virt-032.cluster-qe.lab.eng.brq.redhat.com virt-033.cluster-qe.lab.eng.brq.redhat.com virt-034.cluster-qe.lab.eng.brq.redhat.com ] virt-fencing (stonith:fence_xvm): Started virt-010.cluster-qe.lab.eng.brq.redhat.com Clone Set: dlm-clone [dlm] Started: [ virt-009.cluster-qe.lab.eng.brq.redhat.com virt-010.cluster-qe.lab.eng.brq.redhat.com virt-012.cluster-qe.lab.eng.brq.redhat.com virt-013.cluster-qe.lab.eng.brq.redhat.com virt-014.cluster-qe.lab.eng.brq.redhat.com virt-015.cluster-qe.lab.eng.brq.redhat.com virt-016.cluster-qe.lab.eng.brq.redhat.com virt-020.cluster-qe.lab.eng.brq.redhat.com virt-027.cluster-qe.lab.eng.brq.redhat.com virt-028.cluster-qe.lab.eng.brq.redhat.com virt-029.cluster-qe.lab.eng.brq.redhat.com virt-030.cluster-qe.lab.eng.brq.redhat.com virt-031.cluster-qe.lab.eng.brq.redhat.com virt-032.cluster-qe.lab.eng.brq.redhat.com virt-033.cluster-qe.lab.eng.brq.redhat.com virt-034.cluster-qe.lab.eng.brq.redhat.com ] Clone Set: clvmd-clone [clvmd] Stopped (disabled): [ virt-009.cluster-qe.lab.eng.brq.redhat.com virt-010.cluster-qe.lab.eng.brq.redhat.com virt-012.cluster-qe.lab.eng.brq.redhat.com virt-013.cluster-qe.lab.eng.brq.redhat.com virt-014.cluster-qe.lab.eng.brq.redhat.com virt-015.cluster-qe.lab.eng.brq.redhat.com virt-016.cluster-qe.lab.eng.brq.redhat.com virt-020.cluster-qe.lab.eng.brq.redhat.com virt-027.cluster-qe.lab.eng.brq.redhat.com virt-028.cluster-qe.lab.eng.brq.redhat.com virt-029.cluster-qe.lab.eng.brq.redhat.com virt-030.cluster-qe.lab.eng.brq.redhat.com virt-031.cluster-qe.lab.eng.brq.redhat.com virt-032.cluster-qe.lab.eng.brq.redhat.com virt-033.cluster-qe.lab.eng.brq.redhat.com virt-034.cluster-qe.lab.eng.brq.redhat.com ] diff --git a/cts/scheduler/container-is-remote-node.summary b/cts/scheduler/container-is-remote-node.summary index f5c78ce503..6fc5186695 100644 --- a/cts/scheduler/container-is-remote-node.summary +++ b/cts/scheduler/container-is-remote-node.summary @@ -1,56 +1,56 @@ 3 of 19 resources DISABLED and 0 BLOCKED from being started due to failures Current cluster status: Online: [ lama2 lama3 ] Containers: [ RNVM1:VM1 ] restofencelama2 (stonith:fence_ipmilan): Started lama3 restofencelama3 (stonith:fence_ipmilan): Started lama2 Clone Set: dlm-clone [dlm] Started: [ lama2 lama3 ] Stopped: [ RNVM1 ] Clone Set: clvmd-clone [clvmd] Started: [ lama2 lama3 ] Stopped: [ RNVM1 ] Clone Set: gfs2-lv_1_1-clone [gfs2-lv_1_1] Started: [ lama2 lama3 ] Stopped: [ RNVM1 ] Clone Set: gfs2-lv_1_2-clone [gfs2-lv_1_2] - Stopped (disabled): [ RNVM1 lama2 lama3 ] + Stopped (disabled): [ lama2 lama3 RNVM1 ] VM1 (ocf::heartbeat:VirtualDomain): Started lama2 Resource Group: RES1 FSdata1 (ocf::heartbeat:Filesystem): Started RNVM1 RES1-IP (ocf::heartbeat:IPaddr2): Started RNVM1 res-rsyslog (ocf::heartbeat:rsyslog.test): Started RNVM1 Transition Summary: Executing cluster transition: * Resource action: dlm monitor on RNVM1 * Resource action: clvmd monitor on RNVM1 * Resource action: gfs2-lv_1_1 monitor on RNVM1 * Resource action: gfs2-lv_1_2 monitor on RNVM1 Revised cluster status: Online: [ lama2 lama3 ] Containers: [ RNVM1:VM1 ] restofencelama2 (stonith:fence_ipmilan): Started lama3 restofencelama3 (stonith:fence_ipmilan): Started lama2 Clone Set: dlm-clone [dlm] Started: [ lama2 lama3 ] Stopped: [ RNVM1 ] Clone Set: clvmd-clone [clvmd] Started: [ lama2 lama3 ] Stopped: [ RNVM1 ] Clone Set: gfs2-lv_1_1-clone [gfs2-lv_1_1] Started: [ lama2 lama3 ] Stopped: [ RNVM1 ] Clone Set: gfs2-lv_1_2-clone [gfs2-lv_1_2] - Stopped (disabled): [ RNVM1 lama2 lama3 ] + Stopped (disabled): [ lama2 lama3 RNVM1 ] VM1 (ocf::heartbeat:VirtualDomain): Started lama2 Resource Group: RES1 FSdata1 (ocf::heartbeat:Filesystem): Started RNVM1 RES1-IP (ocf::heartbeat:IPaddr2): Started RNVM1 res-rsyslog (ocf::heartbeat:rsyslog.test): Started RNVM1 diff --git a/cts/scheduler/inc12.exp b/cts/scheduler/inc12.exp index 58701fb293..005b6f2623 100644 --- a/cts/scheduler/inc12.exp +++ b/cts/scheduler/inc12.exp @@ -1,668 +1,668 @@ - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + diff --git a/cts/scheduler/inc12.summary b/cts/scheduler/inc12.summary index df5bc8fe50..2a6a088d57 100644 --- a/cts/scheduler/inc12.summary +++ b/cts/scheduler/inc12.summary @@ -1,137 +1,137 @@ Current cluster status: Online: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 ] DcIPaddr (ocf::heartbeat:IPaddr): Stopped Resource Group: group-1 ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Started c001n02 heartbeat_192.168.100.182 (ocf::heartbeat:IPaddr): Started c001n02 ocf_192.168.100.183 (ocf::heartbeat:IPaddr): Started c001n02 lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Started c001n04 rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n05 rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 rsc_c001n04 (ocf::heartbeat:IPaddr): Started c001n04 rsc_c001n05 (ocf::heartbeat:IPaddr): Started c001n05 rsc_c001n06 (ocf::heartbeat:IPaddr): Started c001n06 rsc_c001n07 (ocf::heartbeat:IPaddr): Started c001n07 Clone Set: DoFencing [child_DoFencing] Started: [ c001n02 c001n04 c001n05 c001n06 c001n07 ] Stopped: [ c001n03 ] Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:1 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:2 (ocf::heartbeat:Stateful): Slave c001n04 ocf_msdummy:3 (ocf::heartbeat:Stateful): Slave c001n04 ocf_msdummy:4 (ocf::heartbeat:Stateful): Slave c001n05 ocf_msdummy:5 (ocf::heartbeat:Stateful): Slave c001n05 ocf_msdummy:6 (ocf::heartbeat:Stateful): Slave c001n06 ocf_msdummy:7 (ocf::heartbeat:Stateful): Slave c001n06 ocf_msdummy:8 (ocf::heartbeat:Stateful): Slave c001n07 ocf_msdummy:9 (ocf::heartbeat:Stateful): Slave c001n07 ocf_msdummy:10 (ocf::heartbeat:Stateful): Slave c001n02 ocf_msdummy:11 (ocf::heartbeat:Stateful): Slave c001n02 Transition Summary: * Shutdown c001n07 * Shutdown c001n06 * Shutdown c001n05 * Shutdown c001n04 * Shutdown c001n03 * Shutdown c001n02 * Stop ocf_192.168.100.181 (c001n02) due to node availability * Stop heartbeat_192.168.100.182 (c001n02) due to node availability * Stop ocf_192.168.100.183 (c001n02) due to node availability * Stop lsb_dummy ( c001n04 ) due to node availability * Stop rsc_c001n03 ( c001n05 ) due to node availability * Stop rsc_c001n02 ( c001n02 ) due to node availability * Stop rsc_c001n04 ( c001n04 ) due to node availability * Stop rsc_c001n05 ( c001n05 ) due to node availability * Stop rsc_c001n06 ( c001n06 ) due to node availability * Stop rsc_c001n07 ( c001n07 ) due to node availability * Stop child_DoFencing:0 (c001n02) due to node availability * Stop child_DoFencing:1 (c001n04) due to node availability * Stop child_DoFencing:2 (c001n05) due to node availability * Stop child_DoFencing:3 (c001n06) due to node availability * Stop child_DoFencing:4 (c001n07) due to node availability - * Stop ocf_msdummy:10 ( Slave c001n02 ) due to node availability - * Stop ocf_msdummy:11 ( Slave c001n02 ) due to node availability * Stop ocf_msdummy:2 ( Slave c001n04 ) due to node availability * Stop ocf_msdummy:3 ( Slave c001n04 ) due to node availability * Stop ocf_msdummy:4 ( Slave c001n05 ) due to node availability * Stop ocf_msdummy:5 ( Slave c001n05 ) due to node availability * Stop ocf_msdummy:6 ( Slave c001n06 ) due to node availability * Stop ocf_msdummy:7 ( Slave c001n06 ) due to node availability * Stop ocf_msdummy:8 ( Slave c001n07 ) due to node availability * Stop ocf_msdummy:9 ( Slave c001n07 ) due to node availability + * Stop ocf_msdummy:10 ( Slave c001n02 ) due to node availability + * Stop ocf_msdummy:11 ( Slave c001n02 ) due to node availability Executing cluster transition: * Pseudo action: group-1_stop_0 * Resource action: ocf_192.168.100.183 stop on c001n02 * Resource action: lsb_dummy stop on c001n04 * Resource action: rsc_c001n03 stop on c001n05 * Resource action: rsc_c001n02 stop on c001n02 * Resource action: rsc_c001n04 stop on c001n04 * Resource action: rsc_c001n05 stop on c001n05 * Resource action: rsc_c001n06 stop on c001n06 * Resource action: rsc_c001n07 stop on c001n07 * Pseudo action: DoFencing_stop_0 * Pseudo action: master_rsc_1_stop_0 * Resource action: heartbeat_192.168.100.182 stop on c001n02 * Resource action: child_DoFencing:1 stop on c001n02 * Resource action: child_DoFencing:2 stop on c001n04 * Resource action: child_DoFencing:3 stop on c001n05 * Resource action: child_DoFencing:4 stop on c001n06 * Resource action: child_DoFencing:5 stop on c001n07 * Pseudo action: DoFencing_stopped_0 - * Resource action: ocf_msdummy:10 stop on c001n02 - * Resource action: ocf_msdummy:11 stop on c001n02 * Resource action: ocf_msdummy:2 stop on c001n04 * Resource action: ocf_msdummy:3 stop on c001n04 * Resource action: ocf_msdummy:4 stop on c001n05 * Resource action: ocf_msdummy:5 stop on c001n05 * Resource action: ocf_msdummy:6 stop on c001n06 * Resource action: ocf_msdummy:7 stop on c001n06 * Resource action: ocf_msdummy:8 stop on c001n07 * Resource action: ocf_msdummy:9 stop on c001n07 + * Resource action: ocf_msdummy:10 stop on c001n02 + * Resource action: ocf_msdummy:11 stop on c001n02 * Pseudo action: master_rsc_1_stopped_0 * Cluster action: do_shutdown on c001n07 * Cluster action: do_shutdown on c001n06 * Cluster action: do_shutdown on c001n05 * Cluster action: do_shutdown on c001n04 * Resource action: ocf_192.168.100.181 stop on c001n02 * Cluster action: do_shutdown on c001n02 * Pseudo action: all_stopped * Pseudo action: group-1_stopped_0 * Cluster action: do_shutdown on c001n03 Revised cluster status: Online: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 ] DcIPaddr (ocf::heartbeat:IPaddr): Stopped Resource Group: group-1 ocf_192.168.100.181 (ocf::heartbeat:IPaddr): Stopped heartbeat_192.168.100.182 (ocf::heartbeat:IPaddr): Stopped ocf_192.168.100.183 (ocf::heartbeat:IPaddr): Stopped lsb_dummy (lsb:/usr/lib/heartbeat/cts/LSBDummy): Stopped rsc_c001n03 (ocf::heartbeat:IPaddr): Stopped rsc_c001n02 (ocf::heartbeat:IPaddr): Stopped rsc_c001n04 (ocf::heartbeat:IPaddr): Stopped rsc_c001n05 (ocf::heartbeat:IPaddr): Stopped rsc_c001n06 (ocf::heartbeat:IPaddr): Stopped rsc_c001n07 (ocf::heartbeat:IPaddr): Stopped Clone Set: DoFencing [child_DoFencing] Stopped: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 ] Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique) ocf_msdummy:0 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:1 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:2 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:3 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:4 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:5 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:6 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:7 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:8 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:9 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:10 (ocf::heartbeat:Stateful): Stopped ocf_msdummy:11 (ocf::heartbeat:Stateful): Stopped diff --git a/cts/scheduler/multiple-monitor-one-failed.summary b/cts/scheduler/multiple-monitor-one-failed.summary index 0691ebf462..7f74d56498 100644 --- a/cts/scheduler/multiple-monitor-one-failed.summary +++ b/cts/scheduler/multiple-monitor-one-failed.summary @@ -1,21 +1,21 @@ Current cluster status: -Online: [ dhcp180 dhcp69 ] +Online: [ dhcp69 dhcp180 ] Dummy-test2 (ocf::test:Dummy): FAILED dhcp180 Transition Summary: * Recover Dummy-test2 ( dhcp180 ) Executing cluster transition: * Resource action: Dummy-test2 stop on dhcp180 * Pseudo action: all_stopped * Resource action: Dummy-test2 start on dhcp180 * Resource action: Dummy-test2 monitor=30000 on dhcp180 * Resource action: Dummy-test2 monitor=10000 on dhcp180 Revised cluster status: -Online: [ dhcp180 dhcp69 ] +Online: [ dhcp69 dhcp180 ] Dummy-test2 (ocf::test:Dummy): Started dhcp180 diff --git a/cts/scheduler/rec-node-13.exp b/cts/scheduler/rec-node-13.exp index 66cab5a57f..c5e45d9cac 100644 --- a/cts/scheduler/rec-node-13.exp +++ b/cts/scheduler/rec-node-13.exp @@ -1,82 +1,82 @@ - + - + - + diff --git a/cts/scheduler/stonith-4.dot b/cts/scheduler/stonith-4.dot index ee659cbcaf..cdaa286ab9 100644 --- a/cts/scheduler/stonith-4.dot +++ b/cts/scheduler/stonith-4.dot @@ -1,21 +1,21 @@ digraph "g" { "Fencing_monitor_0 pcmk-11" -> "Fencing_start_0 pcmk-1" [ style = dashed] "Fencing_monitor_0 pcmk-11" [ style=dashed color="red" fontcolor="black"] "Fencing_monitor_0 pcmk-2" -> "Fencing_start_0 pcmk-1" [ style = dashed] "Fencing_monitor_0 pcmk-2" [ style=dashed color="red" fontcolor="black"] "Fencing_monitor_0 pcmk-3" -> "Fencing_start_0 pcmk-1" [ style = dashed] "Fencing_monitor_0 pcmk-3" [ style=dashed color="red" fontcolor="black"] "Fencing_start_0 pcmk-1" [ style=dashed color="red" fontcolor="black"] "all_stopped" -> "Fencing_start_0 pcmk-1" [ style = dashed] "all_stopped" [ style=bold color="green" fontcolor="orange"] -"stonith 'reboot' pcmk-10" -> "stonith 'reboot' pcmk-5" [ style = bold] +"stonith 'reboot' pcmk-10" -> "stonith_complete" [ style = bold] "stonith 'reboot' pcmk-10" [ style=bold color="green" fontcolor="black"] "stonith 'reboot' pcmk-5" -> "stonith 'reboot' pcmk-7" [ style = bold] "stonith 'reboot' pcmk-5" [ style=bold color="green" fontcolor="black"] "stonith 'reboot' pcmk-7" -> "stonith 'reboot' pcmk-8" [ style = bold] "stonith 'reboot' pcmk-7" [ style=bold color="green" fontcolor="black"] -"stonith 'reboot' pcmk-8" -> "stonith_complete" [ style = bold] +"stonith 'reboot' pcmk-8" -> "stonith 'reboot' pcmk-10" [ style = bold] "stonith 'reboot' pcmk-8" [ style=bold color="green" fontcolor="black"] "stonith_complete" -> "all_stopped" [ style = bold] "stonith_complete" [ style=bold color="green" fontcolor="orange"] } diff --git a/cts/scheduler/stonith-4.exp b/cts/scheduler/stonith-4.exp index d3e9e67cd5..fabf55dab8 100644 --- a/cts/scheduler/stonith-4.exp +++ b/cts/scheduler/stonith-4.exp @@ -1,83 +1,82 @@ - + - + + + + + - - - - - + - diff --git a/cts/scheduler/stonith-4.summary b/cts/scheduler/stonith-4.summary index 7502dada0a..deaf1b8caf 100644 --- a/cts/scheduler/stonith-4.summary +++ b/cts/scheduler/stonith-4.summary @@ -1,40 +1,40 @@ Current cluster status: -Node pcmk-10 (110): UNCLEAN (online) -Node pcmk-11 (111): pending Node pcmk-2 (102): pending Node pcmk-3 (103): pending Node pcmk-5 (105): UNCLEAN (offline) Node pcmk-7 (107): UNCLEAN (online) Node pcmk-8 (108): UNCLEAN (offline) Node pcmk-9 (109): pending +Node pcmk-10 (110): UNCLEAN (online) +Node pcmk-11 (111): pending Online: [ pcmk-1 ] OFFLINE: [ pcmk-4 pcmk-6 ] Fencing (stonith:fence_xvm): Stopped Transition Summary: * Fence (reboot) pcmk-10 'peer process is no longer available' * Fence (reboot) pcmk-8 'peer has not been seen by the cluster' * Fence (reboot) pcmk-7 'peer failed the pacemaker membership criteria' * Fence (reboot) pcmk-5 'peer has not been seen by the cluster' * Start Fencing ( pcmk-1 ) blocked Executing cluster transition: - * Fencing pcmk-10 (reboot) * Fencing pcmk-5 (reboot) * Fencing pcmk-7 (reboot) * Fencing pcmk-8 (reboot) + * Fencing pcmk-10 (reboot) * Pseudo action: stonith_complete * Pseudo action: all_stopped Revised cluster status: -Node pcmk-11 (111): pending Node pcmk-2 (102): pending Node pcmk-3 (103): pending Node pcmk-9 (109): pending +Node pcmk-11 (111): pending Online: [ pcmk-1 ] -OFFLINE: [ pcmk-10 pcmk-4 pcmk-5 pcmk-6 pcmk-7 pcmk-8 ] +OFFLINE: [ pcmk-4 pcmk-5 pcmk-6 pcmk-7 pcmk-8 pcmk-10 ] Fencing (stonith:fence_xvm): Stopped diff --git a/daemons/schedulerd/sched_clone.c b/daemons/schedulerd/sched_clone.c index 1900857dae..7e0a1acc20 100644 --- a/daemons/schedulerd/sched_clone.c +++ b/daemons/schedulerd/sched_clone.c @@ -1,1460 +1,1472 @@ /* * Copyright 2004-2018 Andrew Beekhof * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #define VARIANT_CLONE 1 #include gint sort_clone_instance(gconstpointer a, gconstpointer b, gpointer data_set); static void append_parent_colocation(resource_t * rsc, resource_t * child, gboolean all); static gint sort_rsc_id(gconstpointer a, gconstpointer b) { const resource_t *resource1 = (const resource_t *)a; const resource_t *resource2 = (const resource_t *)b; + long num1, num2; CRM_ASSERT(resource1 != NULL); CRM_ASSERT(resource2 != NULL); - return strcmp(resource1->id, resource2->id); + /* + * Sort clone instances numerically by instance number, so instance :10 + * comes after :9. + */ + num1 = strtol(strrchr(resource1->id, ':') + 1, NULL, 10); + num2 = strtol(strrchr(resource2->id, ':') + 1, NULL, 10); + if (num1 < num2) { + return -1; + } else if (num1 > num2) { + return 1; + } + return 0; } static node_t * parent_node_instance(const resource_t * rsc, node_t * node) { node_t *ret = NULL; if (node != NULL && rsc->parent) { ret = pe_hash_table_lookup(rsc->parent->allowed_nodes, node->details->id); } else if(node != NULL) { ret = pe_hash_table_lookup(rsc->allowed_nodes, node->details->id); } return ret; } static gboolean did_fail(const resource_t * rsc) { GListPtr gIter = rsc->children; if (is_set(rsc->flags, pe_rsc_failed)) { return TRUE; } for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; if (did_fail(child_rsc)) { return TRUE; } } return FALSE; } gint sort_clone_instance(gconstpointer a, gconstpointer b, gpointer data_set) { int rc = 0; node_t *node1 = NULL; node_t *node2 = NULL; node_t *current_node1 = NULL; node_t *current_node2 = NULL; unsigned int nnodes1 = 0; unsigned int nnodes2 = 0; gboolean can1 = TRUE; gboolean can2 = TRUE; const resource_t *resource1 = (const resource_t *)a; const resource_t *resource2 = (const resource_t *)b; CRM_ASSERT(resource1 != NULL); CRM_ASSERT(resource2 != NULL); /* allocation order: * - active instances * - instances running on nodes with the least copies * - active instances on nodes that can't support them or are to be fenced * - failed instances * - inactive instances */ current_node1 = pe__find_active_on(resource1, &nnodes1, NULL); current_node2 = pe__find_active_on(resource2, &nnodes2, NULL); if (nnodes1 && nnodes2) { if (nnodes1 < nnodes2) { crm_trace("%s < %s: running_on", resource1->id, resource2->id); return -1; } else if (nnodes1 > nnodes2) { crm_trace("%s > %s: running_on", resource1->id, resource2->id); return 1; } } node1 = current_node1; node2 = current_node2; if (node1) { node_t *match = pe_hash_table_lookup(resource1->allowed_nodes, node1->details->id); if (match == NULL || match->weight < 0) { crm_trace("%s: current location is unavailable", resource1->id); node1 = NULL; can1 = FALSE; } } if (node2) { node_t *match = pe_hash_table_lookup(resource2->allowed_nodes, node2->details->id); if (match == NULL || match->weight < 0) { crm_trace("%s: current location is unavailable", resource2->id); node2 = NULL; can2 = FALSE; } } if (can1 != can2) { if (can1) { crm_trace("%s < %s: availability of current location", resource1->id, resource2->id); return -1; } crm_trace("%s > %s: availability of current location", resource1->id, resource2->id); return 1; } if (resource1->priority < resource2->priority) { crm_trace("%s < %s: priority", resource1->id, resource2->id); return 1; } else if (resource1->priority > resource2->priority) { crm_trace("%s > %s: priority", resource1->id, resource2->id); return -1; } if (node1 == NULL && node2 == NULL) { crm_trace("%s == %s: not active", resource1->id, resource2->id); return 0; } if (node1 != node2) { if (node1 == NULL) { crm_trace("%s > %s: active", resource1->id, resource2->id); return 1; } else if (node2 == NULL) { crm_trace("%s < %s: active", resource1->id, resource2->id); return -1; } } can1 = can_run_resources(node1); can2 = can_run_resources(node2); if (can1 != can2) { if (can1) { crm_trace("%s < %s: can", resource1->id, resource2->id); return -1; } crm_trace("%s > %s: can", resource1->id, resource2->id); return 1; } node1 = parent_node_instance(resource1, node1); node2 = parent_node_instance(resource2, node2); if (node1 != NULL && node2 == NULL) { crm_trace("%s < %s: not allowed", resource1->id, resource2->id); return -1; } else if (node1 == NULL && node2 != NULL) { crm_trace("%s > %s: not allowed", resource1->id, resource2->id); return 1; } if (node1 == NULL || node2 == NULL) { crm_trace("%s == %s: not allowed", resource1->id, resource2->id); return 0; } if (node1->count < node2->count) { crm_trace("%s < %s: count", resource1->id, resource2->id); return -1; } else if (node1->count > node2->count) { crm_trace("%s > %s: count", resource1->id, resource2->id); return 1; } can1 = did_fail(resource1); can2 = did_fail(resource2); if (can1 != can2) { if (can1) { crm_trace("%s > %s: failed", resource1->id, resource2->id); return 1; } crm_trace("%s < %s: failed", resource1->id, resource2->id); return -1; } if (node1 && node2) { int lpc = 0; int max = 0; node_t *n = NULL; GListPtr gIter = NULL; GListPtr list1 = NULL; GListPtr list2 = NULL; GHashTable *hash1 = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, free); GHashTable *hash2 = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, free); n = node_copy(current_node1); g_hash_table_insert(hash1, (gpointer) n->details->id, n); n = node_copy(current_node2); g_hash_table_insert(hash2, (gpointer) n->details->id, n); if(resource1->parent) { for (gIter = resource1->parent->rsc_cons; gIter; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; crm_trace("Applying %s to %s", constraint->id, resource1->id); hash1 = native_merge_weights(constraint->rsc_rh, resource1->id, hash1, constraint->node_attribute, (float)constraint->score / INFINITY, 0); } for (gIter = resource1->parent->rsc_cons_lhs; gIter; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; crm_trace("Applying %s to %s", constraint->id, resource1->id); hash1 = native_merge_weights(constraint->rsc_lh, resource1->id, hash1, constraint->node_attribute, (float)constraint->score / INFINITY, pe_weights_positive); } } if(resource2->parent) { for (gIter = resource2->parent->rsc_cons; gIter; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; crm_trace("Applying %s to %s", constraint->id, resource2->id); hash2 = native_merge_weights(constraint->rsc_rh, resource2->id, hash2, constraint->node_attribute, (float)constraint->score / INFINITY, 0); } for (gIter = resource2->parent->rsc_cons_lhs; gIter; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; crm_trace("Applying %s to %s", constraint->id, resource2->id); hash2 = native_merge_weights(constraint->rsc_lh, resource2->id, hash2, constraint->node_attribute, (float)constraint->score / INFINITY, pe_weights_positive); } } /* Current location score */ node1 = g_hash_table_lookup(hash1, current_node1->details->id); node2 = g_hash_table_lookup(hash2, current_node2->details->id); if (node1->weight < node2->weight) { if (node1->weight < 0) { crm_trace("%s > %s: current score: %d %d", resource1->id, resource2->id, node1->weight, node2->weight); rc = -1; goto out; } else { crm_trace("%s < %s: current score: %d %d", resource1->id, resource2->id, node1->weight, node2->weight); rc = 1; goto out; } } else if (node1->weight > node2->weight) { crm_trace("%s > %s: current score: %d %d", resource1->id, resource2->id, node1->weight, node2->weight); rc = -1; goto out; } /* All location scores */ list1 = g_hash_table_get_values(hash1); list2 = g_hash_table_get_values(hash2); list1 = g_list_sort_with_data(list1, sort_node_weight, current_node1); list2 = g_list_sort_with_data(list2, sort_node_weight, current_node2); max = g_list_length(list1); if (max < g_list_length(list2)) { max = g_list_length(list2); } for (; lpc < max; lpc++) { node1 = g_list_nth_data(list1, lpc); node2 = g_list_nth_data(list2, lpc); if (node1 == NULL) { crm_trace("%s < %s: colocated score NULL", resource1->id, resource2->id); rc = 1; break; } else if (node2 == NULL) { crm_trace("%s > %s: colocated score NULL", resource1->id, resource2->id); rc = -1; break; } if (node1->weight < node2->weight) { crm_trace("%s < %s: colocated score", resource1->id, resource2->id); rc = 1; break; } else if (node1->weight > node2->weight) { crm_trace("%s > %s: colocated score", resource1->id, resource2->id); rc = -1; break; } } /* Order by reverse uname - same as sort_node_weight() does? */ out: g_hash_table_destroy(hash1); /* Free mem */ g_hash_table_destroy(hash2); /* Free mem */ g_list_free(list1); g_list_free(list2); if (rc != 0) { return rc; } } rc = strcmp(resource1->id, resource2->id); crm_trace("%s %c %s: default", resource1->id, rc < 0 ? '<' : '>', resource2->id); return rc; } static node_t * can_run_instance(resource_t * rsc, node_t * node, int limit) { node_t *local_node = NULL; if (node == NULL && rsc->allowed_nodes) { GHashTableIter iter; g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&local_node)) { can_run_instance(rsc, local_node, limit); } return NULL; } if (can_run_resources(node) == FALSE) { goto bail; } else if (is_set(rsc->flags, pe_rsc_orphan)) { goto bail; } local_node = parent_node_instance(rsc, node); if (local_node == NULL) { crm_warn("%s cannot run on %s: node not allowed", rsc->id, node->details->uname); goto bail; } else if (local_node->weight < 0) { common_update_score(rsc, node->details->id, local_node->weight); pe_rsc_trace(rsc, "%s cannot run on %s: Parent node weight doesn't allow it.", rsc->id, node->details->uname); } else if (local_node->count < limit) { pe_rsc_trace(rsc, "%s can run on %s (already running %d)", rsc->id, node->details->uname, local_node->count); return local_node; } else { pe_rsc_trace(rsc, "%s cannot run on %s: node full (%d >= %d)", rsc->id, node->details->uname, local_node->count, limit); } bail: if (node) { common_update_score(rsc, node->details->id, -INFINITY); } return NULL; } static node_t * color_instance(resource_t * rsc, node_t * prefer, gboolean all_coloc, int limit, pe_working_set_t * data_set) { node_t *chosen = NULL; GHashTable *backup = NULL; CRM_ASSERT(rsc); pe_rsc_trace(rsc, "Checking allocation of %s (preferring %s, using %s parent colocations)", rsc->id, (prefer? prefer->details->uname: "none"), (all_coloc? "all" : "some")); if (is_not_set(rsc->flags, pe_rsc_provisional)) { return rsc->fns->location(rsc, NULL, FALSE); } else if (is_set(rsc->flags, pe_rsc_allocating)) { pe_rsc_debug(rsc, "Dependency loop detected involving %s", rsc->id); return NULL; } /* Only include positive colocation preferences of dependent resources * if not every node will get a copy of the clone */ append_parent_colocation(rsc->parent, rsc, all_coloc); if (prefer) { node_t *local_prefer = g_hash_table_lookup(rsc->allowed_nodes, prefer->details->id); if (local_prefer == NULL || local_prefer->weight < 0) { pe_rsc_trace(rsc, "Not pre-allocating %s to %s - unavailable", rsc->id, prefer->details->uname); return NULL; } } can_run_instance(rsc, NULL, limit); backup = node_hash_dup(rsc->allowed_nodes); chosen = rsc->cmds->allocate(rsc, prefer, data_set); if (chosen) { node_t *local_node = parent_node_instance(rsc, chosen); if (prefer && (chosen->details != prefer->details)) { crm_notice("Pre-allocation failed: got %s instead of %s", chosen->details->uname, prefer->details->uname); g_hash_table_destroy(rsc->allowed_nodes); rsc->allowed_nodes = backup; native_deallocate(rsc); chosen = NULL; backup = NULL; } else if (local_node) { local_node->count++; } else if (is_set(rsc->flags, pe_rsc_managed)) { /* what to do? we can't enforce per-node limits in this case */ crm_config_err("%s not found in %s (list=%d)", chosen->details->id, rsc->parent->id, g_hash_table_size(rsc->parent->allowed_nodes)); } } if(backup) { g_hash_table_destroy(backup); } return chosen; } static void append_parent_colocation(resource_t * rsc, resource_t * child, gboolean all) { GListPtr gIter = NULL; gIter = rsc->rsc_cons; for (; gIter != NULL; gIter = gIter->next) { rsc_colocation_t *cons = (rsc_colocation_t *) gIter->data; if (all || cons->score < 0 || cons->score == INFINITY) { child->rsc_cons = g_list_prepend(child->rsc_cons, cons); } } gIter = rsc->rsc_cons_lhs; for (; gIter != NULL; gIter = gIter->next) { rsc_colocation_t *cons = (rsc_colocation_t *) gIter->data; if (all || cons->score < 0) { child->rsc_cons_lhs = g_list_prepend(child->rsc_cons_lhs, cons); } } } void distribute_children(resource_t *rsc, GListPtr children, GListPtr nodes, int max, int per_host_max, pe_working_set_t * data_set); void distribute_children(resource_t *rsc, GListPtr children, GListPtr nodes, int max, int per_host_max, pe_working_set_t * data_set) { int loop_max = 0; int allocated = 0; int available_nodes = 0; /* count now tracks the number of clones currently allocated */ for(GListPtr nIter = nodes; nIter != NULL; nIter = nIter->next) { pe_node_t *node = nIter->data; node->count = 0; if (can_run_resources(node)) { available_nodes++; } } if(available_nodes) { loop_max = max / available_nodes; } if (loop_max < 1) { loop_max = 1; } pe_rsc_debug(rsc, "Allocating up to %d %s instances to a possible %d nodes (at most %d per host, %d optimal)", max, rsc->id, available_nodes, per_host_max, loop_max); /* Pre-allocate as many instances as we can to their current location */ for (GListPtr gIter = children; gIter != NULL && allocated < max; gIter = gIter->next) { resource_t *child = (resource_t *) gIter->data; if (child->running_on && is_set(child->flags, pe_rsc_provisional) && is_not_set(child->flags, pe_rsc_failed)) { node_t *child_node = pe__current_node(child); node_t *local_node = parent_node_instance(child, child_node); pe_rsc_trace(rsc, "Checking pre-allocation of %s to %s (%d remaining of %d)", child->id, child_node->details->uname, max - allocated, max); if (can_run_resources(child_node) == FALSE || child_node->weight < 0) { pe_rsc_trace(rsc, "Not pre-allocating because %s can not run %s", child_node->details->uname, child->id); } else if(local_node && local_node->count >= loop_max) { pe_rsc_trace(rsc, "Not pre-allocating because %s already allocated optimal instances", child_node->details->uname); } else if (color_instance(child, child_node, max < available_nodes, per_host_max, data_set)) { pe_rsc_trace(rsc, "Pre-allocated %s to %s", child->id, child_node->details->uname); allocated++; } } } pe_rsc_trace(rsc, "Done pre-allocating (%d of %d)", allocated, max); for (GListPtr gIter = children; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t *) gIter->data; if (child->running_on != NULL) { node_t *child_node = pe__current_node(child); node_t *local_node = parent_node_instance(child, child_node); if (local_node == NULL) { crm_err("%s is running on %s which isn't allowed", child->id, child_node->details->uname); } } if (is_not_set(child->flags, pe_rsc_provisional)) { } else if (allocated >= max) { pe_rsc_debug(rsc, "Child %s not allocated - limit reached %d %d", child->id, allocated, max); resource_location(child, NULL, -INFINITY, "clone_color:limit_reached", data_set); } else { if (color_instance(child, NULL, max < available_nodes, per_host_max, data_set)) { allocated++; } } } pe_rsc_debug(rsc, "Allocated %d %s instances of a possible %d", allocated, rsc->id, max); } node_t * clone_color(resource_t *rsc, node_t *prefer, pe_working_set_t *data_set) { GListPtr nodes = NULL; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); if (is_not_set(rsc->flags, pe_rsc_provisional)) { return NULL; } else if (is_set(rsc->flags, pe_rsc_allocating)) { pe_rsc_debug(rsc, "Dependency loop detected involving %s", rsc->id); return NULL; } if (is_set(rsc->flags, pe_rsc_promotable)) { apply_master_prefs(rsc); } set_bit(rsc->flags, pe_rsc_allocating); pe_rsc_trace(rsc, "Processing %s", rsc->id); /* this information is used by sort_clone_instance() when deciding in which * order to allocate clone instances */ for (GListPtr gIter = rsc->rsc_cons; gIter != NULL; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; pe_rsc_trace(rsc, "%s: Coloring %s first", rsc->id, constraint->rsc_rh->id); constraint->rsc_rh->cmds->allocate(constraint->rsc_rh, prefer, data_set); } for (GListPtr gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) { rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data; rsc->allowed_nodes = constraint->rsc_lh->cmds->merge_weights(constraint->rsc_lh, rsc->id, rsc->allowed_nodes, constraint->node_attribute, (float)constraint->score / INFINITY, (pe_weights_rollback | pe_weights_positive)); } dump_node_scores(show_scores ? 0 : scores_log_level, rsc, __FUNCTION__, rsc->allowed_nodes); nodes = g_hash_table_get_values(rsc->allowed_nodes); nodes = g_list_sort_with_data(nodes, sort_node_weight, NULL); rsc->children = g_list_sort_with_data(rsc->children, sort_clone_instance, data_set); distribute_children(rsc, rsc->children, nodes, clone_data->clone_max, clone_data->clone_node_max, data_set); g_list_free(nodes); if (is_set(rsc->flags, pe_rsc_promotable)) { color_promotable(rsc, data_set); } clear_bit(rsc->flags, pe_rsc_provisional); clear_bit(rsc->flags, pe_rsc_allocating); pe_rsc_trace(rsc, "Done allocating %s", rsc->id); return NULL; } static void clone_update_pseudo_status(resource_t * rsc, gboolean * stopping, gboolean * starting, gboolean * active) { GListPtr gIter = NULL; if (rsc->children) { gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t *) gIter->data; clone_update_pseudo_status(child, stopping, starting, active); } return; } CRM_ASSERT(active != NULL); CRM_ASSERT(starting != NULL); CRM_ASSERT(stopping != NULL); if (rsc->running_on) { *active = TRUE; } gIter = rsc->actions; for (; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; if (*starting && *stopping) { return; } else if (is_set(action->flags, pe_action_optional)) { pe_rsc_trace(rsc, "Skipping optional: %s", action->uuid); continue; } else if (is_set(action->flags, pe_action_pseudo) == FALSE && is_set(action->flags, pe_action_runnable) == FALSE) { pe_rsc_trace(rsc, "Skipping unrunnable: %s", action->uuid); continue; } else if (safe_str_eq(RSC_STOP, action->task)) { pe_rsc_trace(rsc, "Stopping due to: %s", action->uuid); *stopping = TRUE; } else if (safe_str_eq(RSC_START, action->task)) { if (is_set(action->flags, pe_action_runnable) == FALSE) { pe_rsc_trace(rsc, "Skipping pseudo-op: %s run=%d, pseudo=%d", action->uuid, is_set(action->flags, pe_action_runnable), is_set(action->flags, pe_action_pseudo)); } else { pe_rsc_trace(rsc, "Starting due to: %s", action->uuid); pe_rsc_trace(rsc, "%s run=%d, pseudo=%d", action->uuid, is_set(action->flags, pe_action_runnable), is_set(action->flags, pe_action_pseudo)); *starting = TRUE; } } } } static action_t * find_rsc_action(resource_t * rsc, const char *key, gboolean active_only, GListPtr * list) { action_t *match = NULL; GListPtr possible = NULL; GListPtr active = NULL; possible = find_actions(rsc->actions, key, NULL); if (active_only) { GListPtr gIter = possible; for (; gIter != NULL; gIter = gIter->next) { action_t *op = (action_t *) gIter->data; if (is_set(op->flags, pe_action_optional) == FALSE) { active = g_list_prepend(active, op); } } if (active && g_list_length(active) == 1) { match = g_list_nth_data(active, 0); } if (list) { *list = active; active = NULL; } } else if (possible && g_list_length(possible) == 1) { match = g_list_nth_data(possible, 0); } if (list) { *list = possible; possible = NULL; } if (possible) { g_list_free(possible); } if (active) { g_list_free(active); } return match; } static void child_ordering_constraints(resource_t * rsc, pe_working_set_t * data_set) { char *key = NULL; action_t *stop = NULL; action_t *start = NULL; action_t *last_stop = NULL; action_t *last_start = NULL; GListPtr gIter = NULL; gboolean active_only = TRUE; /* change to false to get the old behavior */ clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); if (clone_data->ordered == FALSE) { return; } /* we have to maintain a consistent sorted child list when building order constraints */ rsc->children = g_list_sort(rsc->children, sort_rsc_id); for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t *) gIter->data; key = stop_key(child); stop = find_rsc_action(child, key, active_only, NULL); free(key); key = start_key(child); start = find_rsc_action(child, key, active_only, NULL); free(key); if (stop) { if (last_stop) { /* child/child relative stop */ order_actions(stop, last_stop, pe_order_optional); } last_stop = stop; } if (start) { if (last_start) { /* child/child relative start */ order_actions(last_start, start, pe_order_optional); } last_start = start; } } } void clone_create_actions(resource_t *rsc, pe_working_set_t *data_set) { clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); clone_create_pseudo_actions(rsc, rsc->children, &clone_data->start_notify, &clone_data->stop_notify,data_set); child_ordering_constraints(rsc, data_set); if (is_set(rsc->flags, pe_rsc_promotable)) { create_promotable_actions(rsc, data_set); } } void clone_create_pseudo_actions( resource_t * rsc, GListPtr children, notify_data_t **start_notify, notify_data_t **stop_notify, pe_working_set_t * data_set) { gboolean child_active = FALSE; gboolean child_starting = FALSE; gboolean child_stopping = FALSE; gboolean allow_dependent_migrations = TRUE; action_t *stop = NULL; action_t *stopped = NULL; action_t *start = NULL; action_t *started = NULL; pe_rsc_trace(rsc, "Creating actions for %s", rsc->id); for (GListPtr gIter = children; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; gboolean starting = FALSE; gboolean stopping = FALSE; child_rsc->cmds->create_actions(child_rsc, data_set); clone_update_pseudo_status(child_rsc, &stopping, &starting, &child_active); if (stopping && starting) { allow_dependent_migrations = FALSE; } child_stopping |= stopping; child_starting |= starting; } /* start */ start = create_pseudo_resource_op(rsc, RSC_START, !child_starting, TRUE, data_set); started = create_pseudo_resource_op(rsc, RSC_STARTED, !child_starting, FALSE, data_set); started->priority = INFINITY; if (child_active || child_starting) { update_action_flags(started, pe_action_runnable, __FUNCTION__, __LINE__); } if (start_notify != NULL && *start_notify == NULL) { *start_notify = create_notification_boundaries(rsc, RSC_START, start, started, data_set); } /* stop */ stop = create_pseudo_resource_op(rsc, RSC_STOP, !child_stopping, TRUE, data_set); stopped = create_pseudo_resource_op(rsc, RSC_STOPPED, !child_stopping, TRUE, data_set); stopped->priority = INFINITY; if (allow_dependent_migrations) { update_action_flags(stop, pe_action_migrate_runnable, __FUNCTION__, __LINE__); } if (stop_notify != NULL && *stop_notify == NULL) { *stop_notify = create_notification_boundaries(rsc, RSC_STOP, stop, stopped, data_set); if (start_notify && *start_notify && *stop_notify) { order_actions((*stop_notify)->post_done, (*start_notify)->pre, pe_order_optional); } } } void clone_internal_constraints(resource_t *rsc, pe_working_set_t *data_set) { resource_t *last_rsc = NULL; GListPtr gIter; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); pe_rsc_trace(rsc, "Internal constraints for %s", rsc->id); new_rsc_order(rsc, RSC_STOPPED, rsc, RSC_START, pe_order_optional, data_set); new_rsc_order(rsc, RSC_START, rsc, RSC_STARTED, pe_order_runnable_left, data_set); new_rsc_order(rsc, RSC_STOP, rsc, RSC_STOPPED, pe_order_runnable_left, data_set); if (is_set(rsc->flags, pe_rsc_promotable)) { new_rsc_order(rsc, RSC_DEMOTED, rsc, RSC_STOP, pe_order_optional, data_set); new_rsc_order(rsc, RSC_STARTED, rsc, RSC_PROMOTE, pe_order_runnable_left, data_set); } if (clone_data->ordered) { /* we have to maintain a consistent sorted child list when building order constraints */ rsc->children = g_list_sort(rsc->children, sort_rsc_id); } for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; child_rsc->cmds->internal_constraints(child_rsc, data_set); order_start_start(rsc, child_rsc, pe_order_runnable_left | pe_order_implies_first_printed); new_rsc_order(child_rsc, RSC_START, rsc, RSC_STARTED, pe_order_implies_then_printed, data_set); if (clone_data->ordered && last_rsc) { order_start_start(last_rsc, child_rsc, pe_order_optional); } order_stop_stop(rsc, child_rsc, pe_order_implies_first_printed); new_rsc_order(child_rsc, RSC_STOP, rsc, RSC_STOPPED, pe_order_implies_then_printed, data_set); if (clone_data->ordered && last_rsc) { order_stop_stop(child_rsc, last_rsc, pe_order_optional); } last_rsc = child_rsc; } if (is_set(rsc->flags, pe_rsc_promotable)) { promotable_constraints(rsc, data_set); } } bool assign_node(resource_t * rsc, node_t * node, gboolean force) { bool changed = FALSE; if (rsc->children) { for (GListPtr gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; changed |= assign_node(child_rsc, node, force); } return changed; } if (rsc->allocated_to != NULL) { changed = true; } native_assign_node(rsc, NULL, node, force); return changed; } gboolean is_child_compatible(resource_t *child_rsc, node_t * local_node, enum rsc_role_e filter, gboolean current) { node_t *node = NULL; enum rsc_role_e next_role = child_rsc->fns->state(child_rsc, current); CRM_CHECK(child_rsc && local_node, return FALSE); if (is_set_recursive(child_rsc, pe_rsc_block, TRUE) == FALSE) { /* We only want instances that haven't failed */ node = child_rsc->fns->location(child_rsc, NULL, current); } if (filter != RSC_ROLE_UNKNOWN && next_role != filter) { crm_trace("Filtered %s", child_rsc->id); return FALSE; } if (node && (node->details == local_node->details)) { return TRUE; } else if (node) { crm_trace("%s - %s vs %s", child_rsc->id, node->details->uname, local_node->details->uname); } else { crm_trace("%s - not allocated %d", child_rsc->id, current); } return FALSE; } resource_t * find_compatible_child(resource_t * local_child, resource_t * rsc, enum rsc_role_e filter, gboolean current) { resource_t *pair = NULL; GListPtr gIter = NULL; GListPtr scratch = NULL; node_t *local_node = NULL; local_node = local_child->fns->location(local_child, NULL, current); if (local_node) { return find_compatible_child_by_node(local_child, local_node, rsc, filter, current); } scratch = g_hash_table_get_values(local_child->allowed_nodes); scratch = g_list_sort_with_data(scratch, sort_node_weight, NULL); gIter = scratch; for (; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; pair = find_compatible_child_by_node(local_child, node, rsc, filter, current); if (pair) { goto done; } } pe_rsc_debug(rsc, "Can't pair %s with %s", local_child->id, rsc->id); done: g_list_free(scratch); return pair; } void clone_rsc_colocation_lh(resource_t * rsc_lh, resource_t * rsc_rh, rsc_colocation_t * constraint) { /* -- Never called -- * * Instead we add the colocation constraints to the child and call from there */ CRM_ASSERT(FALSE); } void clone_rsc_colocation_rh(resource_t *rsc_lh, resource_t *rsc_rh, rsc_colocation_t *constraint) { GListPtr gIter = NULL; gboolean do_interleave = FALSE; const char *interleave_s = NULL; CRM_CHECK(constraint != NULL, return); CRM_CHECK(rsc_lh != NULL, pe_err("rsc_lh was NULL for %s", constraint->id); return); CRM_CHECK(rsc_rh != NULL, pe_err("rsc_rh was NULL for %s", constraint->id); return); CRM_CHECK(rsc_lh->variant == pe_native, return); pe_rsc_trace(rsc_rh, "Processing constraint %s: %s -> %s %d", constraint->id, rsc_lh->id, rsc_rh->id, constraint->score); if (is_set(rsc_rh->flags, pe_rsc_promotable)) { if (is_set(rsc_rh->flags, pe_rsc_provisional)) { pe_rsc_trace(rsc_rh, "%s is still provisional", rsc_rh->id); return; } else if (constraint->role_rh == RSC_ROLE_UNKNOWN) { pe_rsc_trace(rsc_rh, "Handling %s as a clone colocation", constraint->id); } else { promotable_colocation_rh(rsc_lh, rsc_rh, constraint); return; } } /* only the LHS side needs to be labeled as interleave */ interleave_s = g_hash_table_lookup(constraint->rsc_lh->meta, XML_RSC_ATTR_INTERLEAVE); if(crm_is_true(interleave_s) && constraint->rsc_lh->variant > pe_group) { // TODO: Do we actually care about multiple RH copies sharing a LH copy anymore? if (copies_per_node(constraint->rsc_lh) != copies_per_node(constraint->rsc_rh)) { crm_config_err("Cannot interleave %s and %s because" " they do not support the same number of copies per node", constraint->rsc_lh->id, constraint->rsc_rh->id); } else { do_interleave = TRUE; } } if (is_set(rsc_rh->flags, pe_rsc_provisional)) { pe_rsc_trace(rsc_rh, "%s is still provisional", rsc_rh->id); return; } else if (do_interleave) { resource_t *rh_child = NULL; rh_child = find_compatible_child(rsc_lh, rsc_rh, RSC_ROLE_UNKNOWN, FALSE); if (rh_child) { pe_rsc_debug(rsc_rh, "Pairing %s with %s", rsc_lh->id, rh_child->id); rsc_lh->cmds->rsc_colocation_lh(rsc_lh, rh_child, constraint); } else if (constraint->score >= INFINITY) { crm_notice("Cannot pair %s with instance of %s", rsc_lh->id, rsc_rh->id); assign_node(rsc_lh, NULL, TRUE); } else { pe_rsc_debug(rsc_rh, "Cannot pair %s with instance of %s", rsc_lh->id, rsc_rh->id); } return; } else if (constraint->score >= INFINITY) { GListPtr rhs = NULL; gIter = rsc_rh->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; node_t *chosen = child_rsc->fns->location(child_rsc, NULL, FALSE); if (chosen != NULL && is_set_recursive(child_rsc, pe_rsc_block, TRUE) == FALSE) { pe_rsc_trace(rsc_rh, "Allowing %s: %s %d", constraint->id, chosen->details->uname, chosen->weight); rhs = g_list_prepend(rhs, chosen); } } node_list_exclude(rsc_lh->allowed_nodes, rhs, FALSE); g_list_free(rhs); return; } gIter = rsc_rh->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; child_rsc->cmds->rsc_colocation_rh(rsc_lh, child_rsc, constraint); } } enum action_tasks clone_child_action(action_t * action) { enum action_tasks result = no_action; resource_t *child = (resource_t *) action->rsc->children->data; if (safe_str_eq(action->task, "notify") || safe_str_eq(action->task, "notified")) { /* Find the action we're notifying about instead */ int stop = 0; char *key = action->uuid; int lpc = strlen(key); for (; lpc > 0; lpc--) { if (key[lpc] == '_' && stop == 0) { stop = lpc; } else if (key[lpc] == '_') { char *task_mutable = NULL; lpc++; task_mutable = strdup(key + lpc); task_mutable[stop - lpc] = 0; crm_trace("Extracted action '%s' from '%s'", task_mutable, key); result = get_complex_task(child, task_mutable, TRUE); free(task_mutable); break; } } } else { result = get_complex_task(child, action->task, TRUE); } return result; } enum pe_action_flags summary_action_flags(action_t * action, GListPtr children, node_t * node) { GListPtr gIter = NULL; gboolean any_runnable = FALSE; gboolean check_runnable = TRUE; enum action_tasks task = clone_child_action(action); enum pe_action_flags flags = (pe_action_optional | pe_action_runnable | pe_action_pseudo); const char *task_s = task2text(task); for (gIter = children; gIter != NULL; gIter = gIter->next) { action_t *child_action = NULL; resource_t *child = (resource_t *) gIter->data; child_action = find_first_action(child->actions, NULL, task_s, child->children ? NULL : node); pe_rsc_trace(action->rsc, "Checking for %s in %s on %s (%s)", task_s, child->id, node ? node->details->uname : "none", child_action?child_action->uuid:"NA"); if (child_action) { enum pe_action_flags child_flags = child->cmds->action_flags(child_action, node); if (is_set(flags, pe_action_optional) && is_set(child_flags, pe_action_optional) == FALSE) { pe_rsc_trace(child, "%s is mandatory because of %s", action->uuid, child_action->uuid); flags = crm_clear_bit(__FUNCTION__, __LINE__, action->rsc->id, flags, pe_action_optional); pe_clear_action_bit(action, pe_action_optional); } if (is_set(child_flags, pe_action_runnable)) { any_runnable = TRUE; } } } if (check_runnable && any_runnable == FALSE) { pe_rsc_trace(action->rsc, "%s is not runnable because no children are", action->uuid); flags = crm_clear_bit(__FUNCTION__, __LINE__, action->rsc->id, flags, pe_action_runnable); if (node == NULL) { pe_clear_action_bit(action, pe_action_runnable); } } return flags; } enum pe_action_flags clone_action_flags(action_t * action, node_t * node) { return summary_action_flags(action, action->rsc->children, node); } void clone_rsc_location(resource_t * rsc, rsc_to_node_t * constraint) { GListPtr gIter = rsc->children; pe_rsc_trace(rsc, "Processing location constraint %s for %s", constraint->id, rsc->id); native_rsc_location(rsc, constraint); for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; child_rsc->cmds->rsc_location(child_rsc, constraint); } } void clone_expand(resource_t * rsc, pe_working_set_t * data_set) { GListPtr gIter = NULL; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); gIter = rsc->actions; for (; gIter != NULL; gIter = gIter->next) { action_t *op = (action_t *) gIter->data; rsc->cmds->action_flags(op, NULL); } if (clone_data->start_notify) { collect_notification_data(rsc, TRUE, TRUE, clone_data->start_notify); expand_notification_data(rsc, clone_data->start_notify, data_set); create_notifications(rsc, clone_data->start_notify, data_set); } if (clone_data->stop_notify) { collect_notification_data(rsc, TRUE, TRUE, clone_data->stop_notify); expand_notification_data(rsc, clone_data->stop_notify, data_set); create_notifications(rsc, clone_data->stop_notify, data_set); } if (clone_data->promote_notify) { collect_notification_data(rsc, TRUE, TRUE, clone_data->promote_notify); expand_notification_data(rsc, clone_data->promote_notify, data_set); create_notifications(rsc, clone_data->promote_notify, data_set); } if (clone_data->demote_notify) { collect_notification_data(rsc, TRUE, TRUE, clone_data->demote_notify); expand_notification_data(rsc, clone_data->demote_notify, data_set); create_notifications(rsc, clone_data->demote_notify, data_set); } /* Now that the notifcations have been created we can expand the children */ gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; child_rsc->cmds->expand(child_rsc, data_set); } native_expand(rsc, data_set); /* The notifications are in the graph now, we can destroy the notify_data */ free_notification_data(clone_data->demote_notify); clone_data->demote_notify = NULL; free_notification_data(clone_data->stop_notify); clone_data->stop_notify = NULL; free_notification_data(clone_data->start_notify); clone_data->start_notify = NULL; free_notification_data(clone_data->promote_notify); clone_data->promote_notify = NULL; } // Check whether a resource or any of its children is known on node static bool rsc_known_on(pe_resource_t *rsc, pe_node_t *node) { if (rsc->children) { for (GList *child_iter = rsc->children; child_iter != NULL; child_iter = child_iter->next) { resource_t *child = (resource_t *) child_iter->data; if (rsc_known_on(child, node)) { return TRUE; } } } else if (rsc->known_on) { GHashTableIter iter; node_t *known_node = NULL; g_hash_table_iter_init(&iter, rsc->known_on); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &known_node)) { if (node->details == known_node->details) { return TRUE; } } } return FALSE; } // Look for an instance of clone that is known on node static pe_resource_t * find_instance_on(pe_resource_t *clone, pe_node_t *node) { for (GList *gIter = clone->children; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t *) gIter->data; if (rsc_known_on(child, node)) { return child; } } return NULL; } // For unique clones, probe each instance separately static gboolean probe_unique_clone(pe_resource_t *rsc, pe_node_t *node, pe_action_t *complete, gboolean force, pe_working_set_t *data_set) { gboolean any_created = FALSE; for (GList *child_iter = rsc->children; child_iter != NULL; child_iter = child_iter->next) { resource_t *child = (resource_t *) child_iter->data; any_created |= child->cmds->create_probe(child, node, complete, force, data_set); } return any_created; } // For anonymous clones, only a single instance needs to be probed static gboolean probe_anonymous_clone(pe_resource_t *rsc, pe_node_t *node, pe_action_t *complete, gboolean force, pe_working_set_t *data_set) { // First, check if we probed an instance on this node last time pe_resource_t *child = find_instance_on(rsc, node); // Otherwise, check if we plan to start an instance on this node if (child == NULL) { for (GList *child_iter = rsc->children; child_iter && !child; child_iter = child_iter->next) { node_t *local_node = NULL; resource_t *child_rsc = (resource_t *) child_iter->data; local_node = child_rsc->fns->location(child_rsc, NULL, FALSE); if (local_node && (local_node->details == node->details)) { child = child_rsc; } } } // Otherwise, use the first clone instance if (child == NULL) { child = rsc->children->data; } return child->cmds->create_probe(child, node, complete, force, data_set); } gboolean clone_create_probe(resource_t * rsc, node_t * node, action_t * complete, gboolean force, pe_working_set_t * data_set) { gboolean any_created = FALSE; CRM_ASSERT(rsc); rsc->children = g_list_sort(rsc->children, sort_rsc_id); if (rsc->children == NULL) { pe_warn("Clone %s has no children", rsc->id); return FALSE; } if (rsc->exclusive_discover) { node_t *allowed = g_hash_table_lookup(rsc->allowed_nodes, node->details->id); if (allowed && allowed->rsc_discover_mode != pe_discover_exclusive) { /* exclusive discover is enabled and this node is not marked * as a node this resource should be discovered on * * remove the node from allowed_nodes so that the * notification contains only nodes that we might ever run * on */ g_hash_table_remove(rsc->allowed_nodes, node->details->id); /* Bit of a shortcut - might as well take it */ return FALSE; } } if (is_set(rsc->flags, pe_rsc_unique)) { any_created = probe_unique_clone(rsc, node, complete, force, data_set); } else { any_created = probe_anonymous_clone(rsc, node, complete, force, data_set); } return any_created; } void clone_append_meta(resource_t * rsc, xmlNode * xml) { char *name = NULL; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); name = crm_meta_name(XML_RSC_ATTR_UNIQUE); crm_xml_add(xml, name, is_set(rsc->flags, pe_rsc_unique) ? "true" : "false"); free(name); name = crm_meta_name(XML_RSC_ATTR_NOTIFY); crm_xml_add(xml, name, is_set(rsc->flags, pe_rsc_notify) ? "true" : "false"); free(name); name = crm_meta_name(XML_RSC_ATTR_INCARNATION_MAX); crm_xml_add_int(xml, name, clone_data->clone_max); free(name); name = crm_meta_name(XML_RSC_ATTR_INCARNATION_NODEMAX); crm_xml_add_int(xml, name, clone_data->clone_node_max); free(name); if (is_set(rsc->flags, pe_rsc_promotable)) { name = crm_meta_name(XML_RSC_ATTR_PROMOTED_MAX); crm_xml_add_int(xml, name, clone_data->promoted_max); free(name); name = crm_meta_name(XML_RSC_ATTR_PROMOTED_NODEMAX); crm_xml_add_int(xml, name, clone_data->promoted_node_max); free(name); /* @COMPAT Maintain backward compatibility with resource agents that * expect the old names (deprecated since 2.0.0). */ name = crm_meta_name(XML_RSC_ATTR_MASTER_MAX); crm_xml_add_int(xml, name, clone_data->promoted_max); free(name); name = crm_meta_name(XML_RSC_ATTR_MASTER_NODEMAX); crm_xml_add_int(xml, name, clone_data->promoted_node_max); free(name); } } GHashTable * clone_merge_weights(resource_t * rsc, const char *rhs, GHashTable * nodes, const char *attr, float factor, enum pe_weights flags) { return rsc_merge_weights(rsc, rhs, nodes, attr, factor, flags); } diff --git a/include/crm/pengine/complex.h b/include/crm/pengine/complex.h index 175b55c6f2..02bca1eed2 100644 --- a/include/crm/pengine/complex.h +++ b/include/crm/pengine/complex.h @@ -1,67 +1,57 @@ -/* - * Copyright (C) 2004 Andrew Beekhof - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This software is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +/* + * Copyright 2004-2018 Andrew Beekhof + * + * This source code is licensed under the GNU Lesser General Public License + * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ + #ifndef PENGINE_COMPLEX__H # define PENGINE_COMPLEX__H #ifdef __cplusplus extern "C" { #endif enum pe_obj_types { pe_unknown = -1, pe_native = 0, pe_group = 1, pe_clone = 2, pe_container = 3, }; typedef struct resource_object_functions_s { gboolean (*unpack) (pe_resource_t*, pe_working_set_t*); pe_resource_t *(*find_rsc) (pe_resource_t *parent, const char *search, const pe_node_t *node, int flags); /* parameter result must be free'd */ char *(*parameter) (pe_resource_t*, pe_node_t*, gboolean, const char*, pe_working_set_t*); void (*print) (pe_resource_t*, const char*, long, void*); gboolean (*active) (pe_resource_t*, gboolean); enum rsc_role_e (*state) (const pe_resource_t*, gboolean); - pe_node_t *(*location) (pe_resource_t*, GListPtr*, gboolean); + pe_node_t *(*location) (const pe_resource_t*, GList**, int); void (*free) (pe_resource_t*); } resource_object_functions_t; extern resource_object_functions_t resource_class_functions[]; void get_meta_attributes(GHashTable * meta_hash, pe_resource_t *rsc, pe_node_t *node, pe_working_set_t *data_set); void get_rsc_attributes(GHashTable *meta_hash, pe_resource_t *rsc, pe_node_t *node, pe_working_set_t *data_set); #ifdef ENABLE_VERSIONED_ATTRS void pe_get_versioned_attributes(xmlNode *meta_hash, pe_resource_t *rsc, pe_node_t *node, pe_working_set_t *data_set); #endif typedef struct resource_alloc_functions_s resource_alloc_functions_t; gboolean is_parent(pe_resource_t *child, pe_resource_t *rsc); pe_resource_t *uber_parent(pe_resource_t *rsc); #ifdef __cplusplus } #endif #endif diff --git a/include/crm/pengine/internal.h b/include/crm/pengine/internal.h index c13fc6f3d9..49c292b41a 100644 --- a/include/crm/pengine/internal.h +++ b/include/crm/pengine/internal.h @@ -1,323 +1,323 @@ /* * Copyright 2004-2018 Andrew Beekhof * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #ifndef PE_INTERNAL__H # define PE_INTERNAL__H # include # include # include # define pe_rsc_info(rsc, fmt, args...) crm_log_tag(LOG_INFO, rsc ? rsc->id : "", fmt, ##args) # define pe_rsc_debug(rsc, fmt, args...) crm_log_tag(LOG_DEBUG, rsc ? rsc->id : "", fmt, ##args) # define pe_rsc_trace(rsc, fmt, args...) crm_log_tag(LOG_TRACE, rsc ? rsc->id : "", fmt, ##args) # define pe_err(fmt...) { was_processing_error = TRUE; crm_config_error = TRUE; crm_err(fmt); } # define pe_warn(fmt...) { was_processing_warning = TRUE; crm_config_warning = TRUE; crm_warn(fmt); } # define pe_proc_err(fmt...) { was_processing_error = TRUE; crm_err(fmt); } # define pe_proc_warn(fmt...) { was_processing_warning = TRUE; crm_warn(fmt); } # define pe_set_action_bit(action, bit) action->flags = crm_set_bit(__FUNCTION__, __LINE__, action->uuid, action->flags, bit) # define pe_clear_action_bit(action, bit) action->flags = crm_clear_bit(__FUNCTION__, __LINE__, action->uuid, action->flags, bit) typedef struct notify_data_s { GHashTable *keys; const char *action; action_t *pre; action_t *post; action_t *pre_done; action_t *post_done; GListPtr active; /* notify_entry_t* */ GListPtr inactive; /* notify_entry_t* */ GListPtr start; /* notify_entry_t* */ GListPtr stop; /* notify_entry_t* */ GListPtr demote; /* notify_entry_t* */ GListPtr promote; /* notify_entry_t* */ GListPtr master; /* notify_entry_t* */ GListPtr slave; /* notify_entry_t* */ GHashTable *allowed_nodes; } notify_data_t; bool pe_can_fence(pe_working_set_t *data_set, node_t *node); int merge_weights(int w1, int w2); void add_hash_param(GHashTable * hash, const char *name, const char *value); char *native_parameter(resource_t * rsc, node_t * node, gboolean create, const char *name, pe_working_set_t * data_set); -node_t *native_location(resource_t * rsc, GListPtr * list, gboolean current); +pe_node_t *native_location(const pe_resource_t *rsc, GList **list, int current); void pe_metadata(void); void verify_pe_options(GHashTable * options); void common_update_score(resource_t * rsc, const char *id, int score); void native_add_running(resource_t * rsc, node_t * node, pe_working_set_t * data_set); gboolean native_unpack(resource_t * rsc, pe_working_set_t * data_set); gboolean group_unpack(resource_t * rsc, pe_working_set_t * data_set); gboolean clone_unpack(resource_t * rsc, pe_working_set_t * data_set); gboolean container_unpack(resource_t * rsc, pe_working_set_t * data_set); resource_t *native_find_rsc(resource_t *rsc, const char *id, const node_t *node, int flags); gboolean native_active(resource_t * rsc, gboolean all); gboolean group_active(resource_t * rsc, gboolean all); gboolean clone_active(resource_t * rsc, gboolean all); gboolean container_active(resource_t * rsc, gboolean all); void native_print(resource_t * rsc, const char *pre_text, long options, void *print_data); void group_print(resource_t * rsc, const char *pre_text, long options, void *print_data); void clone_print(resource_t * rsc, const char *pre_text, long options, void *print_data); void container_print(resource_t * rsc, const char *pre_text, long options, void *print_data); void native_free(resource_t * rsc); void group_free(resource_t * rsc); void clone_free(resource_t * rsc); void container_free(resource_t * rsc); enum rsc_role_e native_resource_state(const resource_t * rsc, gboolean current); enum rsc_role_e group_resource_state(const resource_t * rsc, gboolean current); enum rsc_role_e clone_resource_state(const resource_t * rsc, gboolean current); enum rsc_role_e container_resource_state(const resource_t * rsc, gboolean current); gboolean common_unpack(xmlNode * xml_obj, resource_t ** rsc, resource_t * parent, pe_working_set_t * data_set); void common_free(resource_t * rsc); extern pe_working_set_t *pe_dataset; extern node_t *node_copy(const node_t *this_node); extern time_t get_effective_time(pe_working_set_t * data_set); /* Failure handling utilities (from failcounts.c) */ // bit flags for fail count handling options enum pe_fc_flags_e { pe_fc_default = 0x00, pe_fc_effective = 0x01, // don't count expired failures pe_fc_fillers = 0x02, // if container, include filler failures in count }; int pe_get_failcount(node_t *node, resource_t *rsc, time_t *last_failure, uint32_t flags, xmlNode *xml_op, pe_working_set_t *data_set); /* Functions for finding/counting a resource's active nodes */ pe_node_t *pe__find_active_on(const pe_resource_t *rsc, unsigned int *count_all, unsigned int *count_clean); pe_node_t *pe__find_active_requires(const pe_resource_t *rsc, unsigned int *count); static inline pe_node_t * pe__current_node(const pe_resource_t *rsc) { return pe__find_active_on(rsc, NULL, NULL); } /* Binary like operators for lists of nodes */ extern void node_list_exclude(GHashTable * list, GListPtr list2, gboolean merge_scores); extern GListPtr node_list_dup(GListPtr list, gboolean reset, gboolean filter); extern GHashTable *node_hash_from_list(GListPtr list); static inline gpointer pe_hash_table_lookup(GHashTable * hash, gconstpointer key) { if (hash) { return g_hash_table_lookup(hash, key); } return NULL; } extern action_t *get_pseudo_op(const char *name, pe_working_set_t * data_set); extern gboolean order_actions(action_t * lh_action, action_t * rh_action, enum pe_ordering order); GHashTable *node_hash_dup(GHashTable * hash); /* Printing functions for debug */ extern void print_node(const char *pre_text, node_t * node, gboolean details); extern void print_resource(int log_level, const char *pre_text, resource_t * rsc, gboolean details); extern void dump_node_scores_worker(int level, const char *file, const char *function, int line, resource_t * rsc, const char *comment, GHashTable * nodes); extern void dump_node_capacity(int level, const char *comment, node_t * node); extern void dump_rsc_utilization(int level, const char *comment, resource_t * rsc, node_t * node); # define dump_node_scores(level, rsc, text, nodes) do { \ dump_node_scores_worker(level, __FILE__, __FUNCTION__, __LINE__, rsc, text, nodes); \ } while(0) /* Sorting functions */ extern gint sort_rsc_priority(gconstpointer a, gconstpointer b); extern gint sort_rsc_index(gconstpointer a, gconstpointer b); extern xmlNode *find_rsc_op_entry(resource_t * rsc, const char *key); extern action_t *custom_action(resource_t * rsc, char *key, const char *task, node_t * on_node, gboolean optional, gboolean foo, pe_working_set_t * data_set); # define delete_key(rsc) generate_op_key(rsc->id, CRMD_ACTION_DELETE, 0) # define delete_action(rsc, node, optional) custom_action( \ rsc, delete_key(rsc), CRMD_ACTION_DELETE, node, \ optional, TRUE, data_set); # define stopped_key(rsc) generate_op_key(rsc->id, CRMD_ACTION_STOPPED, 0) # define stopped_action(rsc, node, optional) custom_action( \ rsc, stopped_key(rsc), CRMD_ACTION_STOPPED, node, \ optional, TRUE, data_set); # define stop_key(rsc) generate_op_key(rsc->id, CRMD_ACTION_STOP, 0) # define stop_action(rsc, node, optional) custom_action( \ rsc, stop_key(rsc), CRMD_ACTION_STOP, node, \ optional, TRUE, data_set); # define reload_key(rsc) generate_op_key(rsc->id, CRMD_ACTION_RELOAD, 0) # define start_key(rsc) generate_op_key(rsc->id, CRMD_ACTION_START, 0) # define start_action(rsc, node, optional) custom_action( \ rsc, start_key(rsc), CRMD_ACTION_START, node, \ optional, TRUE, data_set) # define started_key(rsc) generate_op_key(rsc->id, CRMD_ACTION_STARTED, 0) # define started_action(rsc, node, optional) custom_action( \ rsc, started_key(rsc), CRMD_ACTION_STARTED, node, \ optional, TRUE, data_set) # define promote_key(rsc) generate_op_key(rsc->id, CRMD_ACTION_PROMOTE, 0) # define promote_action(rsc, node, optional) custom_action( \ rsc, promote_key(rsc), CRMD_ACTION_PROMOTE, node, \ optional, TRUE, data_set) # define promoted_key(rsc) generate_op_key(rsc->id, CRMD_ACTION_PROMOTED, 0) # define promoted_action(rsc, node, optional) custom_action( \ rsc, promoted_key(rsc), CRMD_ACTION_PROMOTED, node, \ optional, TRUE, data_set) # define demote_key(rsc) generate_op_key(rsc->id, CRMD_ACTION_DEMOTE, 0) # define demote_action(rsc, node, optional) custom_action( \ rsc, demote_key(rsc), CRMD_ACTION_DEMOTE, node, \ optional, TRUE, data_set) # define demoted_key(rsc) generate_op_key(rsc->id, CRMD_ACTION_DEMOTED, 0) # define demoted_action(rsc, node, optional) custom_action( \ rsc, demoted_key(rsc), CRMD_ACTION_DEMOTED, node, \ optional, TRUE, data_set) extern int pe_get_configured_timeout(resource_t *rsc, const char *action, pe_working_set_t *data_set); extern action_t *find_first_action(GListPtr input, const char *uuid, const char *task, node_t * on_node); extern enum action_tasks get_complex_task(resource_t * rsc, const char *name, gboolean allow_non_atomic); extern GListPtr find_actions(GListPtr input, const char *key, const node_t *on_node); extern GListPtr find_actions_exact(GListPtr input, const char *key, node_t * on_node); extern GListPtr find_recurring_actions(GListPtr input, node_t * not_on_node); extern void pe_free_action(action_t * action); extern void resource_location(resource_t * rsc, node_t * node, int score, const char *tag, pe_working_set_t * data_set); extern gint sort_op_by_callid(gconstpointer a, gconstpointer b); extern gboolean get_target_role(resource_t * rsc, enum rsc_role_e *role); extern resource_t *find_clone_instance(resource_t * rsc, const char *sub_id, pe_working_set_t * data_set); extern void destroy_ticket(gpointer data); extern ticket_t *ticket_new(const char *ticket_id, pe_working_set_t * data_set); // Resources for manipulating resource names const char *pe_base_name_end(const char *id); char *clone_strip(const char *last_rsc_id); char *clone_zero(const char *last_rsc_id); static inline bool pe_base_name_eq(resource_t *rsc, const char *id) { if (id && rsc && rsc->id) { // Number of characters in rsc->id before any clone suffix size_t base_len = pe_base_name_end(rsc->id) - rsc->id + 1; return (strlen(id) == base_len) && !strncmp(id, rsc->id, base_len); } return FALSE; } int get_target_rc(xmlNode * xml_op); gint sort_node_uname(gconstpointer a, gconstpointer b); bool is_set_recursive(resource_t * rsc, long long flag, bool any); enum rsc_digest_cmp_val { /*! Digests are the same */ RSC_DIGEST_MATCH = 0, /*! Params that require a restart changed */ RSC_DIGEST_RESTART, /*! Some parameter changed. */ RSC_DIGEST_ALL, /*! rsc op didn't have a digest associated with it, so * it is unknown if parameters changed or not. */ RSC_DIGEST_UNKNOWN, }; typedef struct op_digest_cache_s { enum rsc_digest_cmp_val rc; xmlNode *params_all; xmlNode *params_secure; xmlNode *params_restart; char *digest_all_calc; char *digest_secure_calc; char *digest_restart_calc; } op_digest_cache_t; op_digest_cache_t *rsc_action_digest_cmp(resource_t * rsc, xmlNode * xml_op, node_t * node, pe_working_set_t * data_set); action_t *pe_fence_op(node_t * node, const char *op, bool optional, const char *reason, pe_working_set_t * data_set); void trigger_unfencing( resource_t * rsc, node_t *node, const char *reason, action_t *dependency, pe_working_set_t * data_set); void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite); void pe_action_set_flag_reason(const char *function, long line, pe_action_t *action, pe_action_t *reason, const char *text, enum pe_action_flags flags, bool overwrite); #define pe_action_required(action, reason, text) pe_action_set_flag_reason(__FUNCTION__, __LINE__, action, reason, text, pe_action_optional, FALSE) #define pe_action_implies(action, reason, flag) pe_action_set_flag_reason(__FUNCTION__, __LINE__, action, reason, NULL, flag, FALSE) void set_bit_recursive(resource_t * rsc, unsigned long long flag); void clear_bit_recursive(resource_t * rsc, unsigned long long flag); gboolean add_tag_ref(GHashTable * tags, const char * tag_name, const char * obj_ref); void print_rscs_brief(GListPtr rsc_list, const char * pre_text, long options, void * print_data, gboolean print_all); void pe_fence_node(pe_working_set_t * data_set, node_t * node, const char *reason); node_t *pe_create_node(const char *id, const char *uname, const char *type, const char *score, pe_working_set_t * data_set); bool remote_id_conflict(const char *remote_name, pe_working_set_t *data); void common_print(resource_t * rsc, const char *pre_text, const char *name, node_t *node, long options, void *print_data); resource_t *find_container_child(const resource_t *bundle, const node_t *node); bool container_fix_remote_addr(resource_t *rsc); const char *container_fix_remote_addr_in(resource_t *rsc, xmlNode *xml, const char *field); const char *pe_node_attribute_calculated(const pe_node_t *node, const char *name, const resource_t *rsc); const char *pe_node_attribute_raw(pe_node_t *node, const char *name); bool pe__is_universal_clone(pe_resource_t *rsc, pe_working_set_t *data_set); #endif diff --git a/include/crm/pengine/status.h b/include/crm/pengine/status.h index 92a868600a..a10bed6669 100644 --- a/include/crm/pengine/status.h +++ b/include/crm/pengine/status.h @@ -1,505 +1,507 @@ /* * Copyright 2004-2018 Andrew Beekhof * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #ifndef PENGINE_STATUS__H # define PENGINE_STATUS__H #ifdef __cplusplus extern "C" { #endif # include # include # include # include typedef struct pe_node_s pe_node_t; typedef struct pe_action_s pe_action_t; typedef struct pe_resource_s pe_resource_t; typedef struct pe_working_set_s pe_working_set_t; # include enum pe_quorum_policy { no_quorum_freeze, no_quorum_stop, no_quorum_ignore, no_quorum_suicide }; enum node_type { node_ping, node_member, node_remote }; enum pe_restart { pe_restart_restart, pe_restart_ignore }; enum pe_find { pe_find_renamed = 0x001, // match resource ID or LRM history ID pe_find_anon = 0x002, // match base name of anonymous clone instances pe_find_clone = 0x004, // match only clone instances pe_find_current = 0x008, // match resource active on specified node pe_find_inactive = 0x010, // match resource not running anywhere pe_find_any = 0x020, // match base name of any clone instance }; # define pe_flag_have_quorum 0x00000001ULL # define pe_flag_symmetric_cluster 0x00000002ULL # define pe_flag_maintenance_mode 0x00000008ULL # define pe_flag_stonith_enabled 0x00000010ULL # define pe_flag_have_stonith_resource 0x00000020ULL # define pe_flag_enable_unfencing 0x00000040ULL # define pe_flag_concurrent_fencing 0x00000080ULL # define pe_flag_stop_rsc_orphans 0x00000100ULL # define pe_flag_stop_action_orphans 0x00000200ULL # define pe_flag_stop_everything 0x00000400ULL # define pe_flag_start_failure_fatal 0x00001000ULL # define pe_flag_remove_after_stop 0x00002000ULL # define pe_flag_startup_fencing 0x00004000ULL # define pe_flag_startup_probes 0x00010000ULL # define pe_flag_have_status 0x00020000ULL # define pe_flag_have_remote_nodes 0x00040000ULL # define pe_flag_quick_location 0x00100000ULL # define pe_flag_sanitized 0x00200000ULL # define pe_flag_stdout 0x00400000ULL struct pe_working_set_s { xmlNode *input; crm_time_t *now; /* options extracted from the input */ char *dc_uuid; pe_node_t *dc_node; const char *stonith_action; const char *placement_strategy; unsigned long long flags; int stonith_timeout; enum pe_quorum_policy no_quorum_policy; GHashTable *config_hash; GHashTable *tickets; // Actions for which there can be only one (e.g. fence nodeX) GHashTable *singletons; GListPtr nodes; GListPtr resources; GListPtr placement_constraints; GListPtr ordering_constraints; GListPtr colocation_constraints; GListPtr ticket_constraints; GListPtr actions; xmlNode *failed; xmlNode *op_defaults; xmlNode *rsc_defaults; /* stats */ int num_synapse; int max_valid_nodes; int order_id; int action_id; /* final output */ xmlNode *graph; GHashTable *template_rsc_sets; const char *localhost; GHashTable *tags; int blocked_resources; int disabled_resources; }; struct pe_node_shared_s { const char *id; const char *uname; enum node_type type; /* @TODO convert these flags into a bitfield */ gboolean online; gboolean standby; gboolean standby_onfail; gboolean pending; gboolean unclean; gboolean unseen; gboolean shutdown; gboolean expected_up; gboolean is_dc; gboolean maintenance; gboolean rsc_discovery_enabled; gboolean remote_requires_reset; gboolean remote_was_fenced; gboolean remote_maintenance; /* what the remote-rsc is thinking */ gboolean unpacked; int num_resources; pe_resource_t *remote_rsc; GListPtr running_rsc; /* pe_resource_t* */ GListPtr allocated_rsc; /* pe_resource_t* */ GHashTable *attrs; /* char* => char* */ GHashTable *utilization; GHashTable *digest_cache; /*! cache of calculated resource digests */ }; struct pe_node_s { int weight; gboolean fixed; int count; struct pe_node_shared_s *details; int rsc_discover_mode; }; # define pe_rsc_orphan 0x00000001ULL # define pe_rsc_managed 0x00000002ULL # define pe_rsc_block 0x00000004ULL # define pe_rsc_orphan_container_filler 0x00000008ULL # define pe_rsc_notify 0x00000010ULL # define pe_rsc_unique 0x00000020ULL # define pe_rsc_fence_device 0x00000040ULL # define pe_rsc_promotable 0x00000080ULL # define pe_rsc_provisional 0x00000100ULL # define pe_rsc_allocating 0x00000200ULL # define pe_rsc_merging 0x00000400ULL # define pe_rsc_reload 0x00002000ULL # define pe_rsc_allow_remote_remotes 0x00004000ULL # define pe_rsc_failed 0x00010000ULL # define pe_rsc_runnable 0x00040000ULL # define pe_rsc_start_pending 0x00080000ULL # define pe_rsc_starting 0x00100000ULL # define pe_rsc_stopping 0x00200000ULL # define pe_rsc_allow_migrate 0x00800000ULL # define pe_rsc_failure_ignored 0x01000000ULL # define pe_rsc_maintenance 0x04000000ULL # define pe_rsc_is_container 0x08000000ULL # define pe_rsc_needs_quorum 0x10000000ULL # define pe_rsc_needs_fencing 0x20000000ULL # define pe_rsc_needs_unfencing 0x40000000ULL enum pe_graph_flags { pe_graph_none = 0x00000, pe_graph_updated_first = 0x00001, pe_graph_updated_then = 0x00002, pe_graph_disable = 0x00004, }; /* *INDENT-OFF* */ enum pe_action_flags { pe_action_pseudo = 0x00001, pe_action_runnable = 0x00002, pe_action_optional = 0x00004, pe_action_print_always = 0x00008, pe_action_have_node_attrs = 0x00010, pe_action_implied_by_stonith = 0x00040, pe_action_migrate_runnable = 0x00080, pe_action_dumped = 0x00100, pe_action_processed = 0x00200, pe_action_clear = 0x00400, pe_action_dangle = 0x00800, /* This action requires one or more of its dependencies to be runnable. * We use this to clear the runnable flag before checking dependencies. */ pe_action_requires_any = 0x01000, pe_action_reschedule = 0x02000, pe_action_tracking = 0x04000, }; /* *INDENT-ON* */ struct pe_resource_s { char *id; char *clone_name; xmlNode *xml; xmlNode *orig_xml; xmlNode *ops_xml; pe_working_set_t *cluster; pe_resource_t *parent; enum pe_obj_types variant; void *variant_opaque; resource_object_functions_t *fns; resource_alloc_functions_t *cmds; enum rsc_recovery_type recovery_type; enum pe_restart restart_type; int priority; int stickiness; int sort_index; int failure_timeout; int migration_threshold; guint remote_reconnect_ms; char *pending_task; unsigned long long flags; // @TODO merge these into flags gboolean is_remote_node; gboolean exclusive_discover; GListPtr rsc_cons_lhs; /* rsc_colocation_t* */ GListPtr rsc_cons; /* rsc_colocation_t* */ GListPtr rsc_location; /* rsc_to_node_t* */ GListPtr actions; /* pe_action_t* */ GListPtr rsc_tickets; /* rsc_ticket* */ pe_node_t *allocated_to; pe_node_t *partial_migration_target; pe_node_t *partial_migration_source; GListPtr running_on; /* pe_node_t* */ GHashTable *known_on; /* pe_node_t* */ GHashTable *allowed_nodes; /* pe_node_t* */ enum rsc_role_e role; enum rsc_role_e next_role; GHashTable *meta; GHashTable *parameters; GHashTable *utilization; GListPtr children; /* pe_resource_t* */ GListPtr dangling_migrations; /* pe_node_t* */ pe_resource_t *container; GListPtr fillers; + pe_node_t *pending_node; // Node on which pending_task is happening + #if ENABLE_VERSIONED_ATTRS xmlNode *versioned_parameters; #endif }; #if ENABLE_VERSIONED_ATTRS // Used as action->action_details if action->rsc is not NULL typedef struct pe_rsc_action_details_s { xmlNode *versioned_parameters; xmlNode *versioned_meta; } pe_rsc_action_details_t; #endif struct pe_action_s { int id; int priority; pe_resource_t *rsc; pe_node_t *node; xmlNode *op_entry; char *task; char *uuid; char *cancel_task; char *reason; enum pe_action_flags flags; enum rsc_start_requirement needs; enum action_fail_response on_fail; enum rsc_role_e fail_role; GHashTable *meta; GHashTable *extra; /* * These two varables are associated with the constraint logic * that involves first having one or more actions runnable before * then allowing this action to execute. * * These varables are used with features such as 'clone-min' which * requires at minimum X number of cloned instances to be running * before an order dependency can run. Another option that uses * this is 'require-all=false' in ordering constrants. This option * says "only require one instance of a resource to start before * allowing dependencies to start" -- basically, require-all=false is * the same as clone-min=1. */ /* current number of known runnable actions in the before list. */ int runnable_before; /* the number of "before" runnable actions required for this action * to be considered runnable */ int required_runnable_before; GListPtr actions_before; /* pe_action_wrapper_t* */ GListPtr actions_after; /* pe_action_wrapper_t* */ /* Some of the above fields could be moved to the details, * except for API backward compatibility. */ void *action_details; // varies by type of action }; typedef struct pe_ticket_s { char *id; gboolean granted; time_t last_granted; gboolean standby; GHashTable *state; } pe_ticket_t; typedef struct pe_tag_s { char *id; GListPtr refs; } pe_tag_t; enum pe_link_state { pe_link_not_dumped, pe_link_dumped, pe_link_dup, }; enum pe_discover_e { pe_discover_always = 0, pe_discover_never, pe_discover_exclusive, }; /* *INDENT-OFF* */ enum pe_ordering { pe_order_none = 0x0, /* deleted */ pe_order_optional = 0x1, /* pure ordering, nothing implied */ pe_order_apply_first_non_migratable = 0x2, /* Only apply this constraint's ordering if first is not migratable. */ pe_order_implies_first = 0x10, /* If 'then' is required, ensure 'first' is too */ pe_order_implies_then = 0x20, /* If 'first' is required, ensure 'then' is too */ pe_order_implies_first_master = 0x40, /* Imply 'first' is required when 'then' is required and then's rsc holds Master role. */ /* first requires then to be both runnable and migrate runnable. */ pe_order_implies_first_migratable = 0x80, pe_order_runnable_left = 0x100, /* 'then' requires 'first' to be runnable */ pe_order_pseudo_left = 0x200, /* 'then' can only be pseudo if 'first' is runnable */ pe_order_implies_then_on_node = 0x400, /* If 'first' is required on 'nodeX', * ensure instances of 'then' on 'nodeX' are too. * Only really useful if 'then' is a clone and 'first' is not */ pe_order_probe = 0x800, /* If 'first->rsc' is * - running but about to stop, ignore the constraint * - otherwise, behave as runnable_left */ pe_order_restart = 0x1000, /* 'then' is runnable if 'first' is optional or runnable */ pe_order_stonith_stop = 0x2000, /* only applies if the action is non-pseudo */ pe_order_serialize_only = 0x4000, /* serialize */ pe_order_same_node = 0x8000, /* applies only if 'first' and 'then' are on same node */ pe_order_implies_first_printed = 0x10000, /* Like ..implies_first but only ensures 'first' is printed, not mandatory */ pe_order_implies_then_printed = 0x20000, /* Like ..implies_then but only ensures 'then' is printed, not mandatory */ pe_order_asymmetrical = 0x100000, /* Indicates asymmetrical one way ordering constraint. */ pe_order_load = 0x200000, /* Only relevant if... */ pe_order_one_or_more = 0x400000, /* 'then' is runnable only if one or more of its dependencies are too */ pe_order_anti_colocation = 0x800000, pe_order_preserve = 0x1000000, /* Hack for breaking user ordering constraints with container resources */ pe_order_then_cancels_first = 0x2000000, // if 'then' becomes required, 'first' becomes optional pe_order_trace = 0x4000000, /* test marker */ }; /* *INDENT-ON* */ typedef struct pe_action_wrapper_s { enum pe_ordering type; enum pe_link_state state; pe_action_t *action; } pe_action_wrapper_t; const char *rsc_printable_id(pe_resource_t *rsc); gboolean cluster_status(pe_working_set_t * data_set); void set_working_set_defaults(pe_working_set_t * data_set); void cleanup_calculations(pe_working_set_t * data_set); pe_resource_t *pe_find_resource(GListPtr rsc_list, const char *id_rh); pe_resource_t *pe_find_resource_with_flags(GListPtr rsc_list, const char *id, enum pe_find flags); pe_node_t *pe_find_node(GListPtr node_list, const char *uname); pe_node_t *pe_find_node_id(GListPtr node_list, const char *id); pe_node_t *pe_find_node_any(GListPtr node_list, const char *id, const char *uname); GListPtr find_operations(const char *rsc, const char *node, gboolean active_filter, pe_working_set_t * data_set); int pe_bundle_replicas(const pe_resource_t *rsc); #if ENABLE_VERSIONED_ATTRS pe_rsc_action_details_t *pe_rsc_action_details(pe_action_t *action); #endif /*! * \brief Check whether a resource is any clone type * * \param[in] rsc Resource to check * * \return TRUE if resource is clone, FALSE otherwise */ static inline bool pe_rsc_is_clone(pe_resource_t *rsc) { return rsc && (rsc->variant == pe_clone); } /*! * \brief Check whether a resource is a globally unique clone * * \param[in] rsc Resource to check * * \return TRUE if resource is unique clone, FALSE otherwise */ static inline bool pe_rsc_is_unique_clone(pe_resource_t *rsc) { return pe_rsc_is_clone(rsc) && is_set(rsc->flags, pe_rsc_unique); } /*! * \brief Check whether a resource is an anonymous clone * * \param[in] rsc Resource to check * * \return TRUE if resource is anonymous clone, FALSE otherwise */ static inline bool pe_rsc_is_anon_clone(pe_resource_t *rsc) { return pe_rsc_is_clone(rsc) && is_not_set(rsc->flags, pe_rsc_unique); } static inline bool pe_rsc_is_bundled(pe_resource_t *rsc) { return uber_parent(rsc)->parent != NULL; } // Deprecated type aliases typedef struct pe_action_s action_t; typedef struct pe_action_wrapper_s action_wrapper_t; typedef struct pe_node_s node_t; typedef struct pe_resource_s resource_t; typedef struct pe_tag_s tag_t; typedef struct pe_ticket_s ticket_t; typedef enum pe_quorum_policy no_quorum_policy_t; #ifdef __cplusplus } #endif #endif diff --git a/lib/pengine/clone.c b/lib/pengine/clone.c index 18fc513aca..9f3e667d9d 100644 --- a/lib/pengine/clone.c +++ b/lib/pengine/clone.c @@ -1,643 +1,649 @@ /* * Copyright 2004-2018 Andrew Beekhof * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #define VARIANT_CLONE 1 #include "./variant.h" void pe__force_anon(const char *standard, pe_resource_t *rsc, const char *rid, pe_working_set_t *data_set) { if (pe_rsc_is_clone(rsc)) { clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); pe_warn("Ignoring " XML_RSC_ATTR_UNIQUE " for %s because %s resources " "such as %s can be used only as anonymous clones", rsc->id, standard, rid); clone_data->clone_node_max = 1; clone_data->clone_max = QB_MIN(clone_data->clone_max, g_list_length(data_set->nodes)); } } resource_t * find_clone_instance(resource_t * rsc, const char *sub_id, pe_working_set_t * data_set) { char *child_id = NULL; resource_t *child = NULL; const char *child_base = NULL; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); child_base = ID(clone_data->xml_obj_child); child_id = crm_concat(child_base, sub_id, ':'); child = pe_find_resource(rsc->children, child_id); free(child_id); return child; } pe_resource_t * pe__create_clone_child(pe_resource_t *rsc, pe_working_set_t *data_set) { gboolean as_orphan = FALSE; char *inc_num = NULL; char *inc_max = NULL; resource_t *child_rsc = NULL; xmlNode *child_copy = NULL; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); CRM_CHECK(clone_data->xml_obj_child != NULL, return FALSE); if (clone_data->total_clones >= clone_data->clone_max) { // If we've already used all available instances, this is an orphan as_orphan = TRUE; } // Allocate instance numbers in numerical order (starting at 0) inc_num = crm_itoa(clone_data->total_clones); inc_max = crm_itoa(clone_data->clone_max); child_copy = copy_xml(clone_data->xml_obj_child); crm_xml_add(child_copy, XML_RSC_ATTR_INCARNATION, inc_num); if (common_unpack(child_copy, &child_rsc, rsc, data_set) == FALSE) { pe_err("Failed unpacking resource %s", crm_element_value(child_copy, XML_ATTR_ID)); child_rsc = NULL; goto bail; } /* child_rsc->globally_unique = rsc->globally_unique; */ CRM_ASSERT(child_rsc); clone_data->total_clones += 1; pe_rsc_trace(child_rsc, "Setting clone attributes for: %s", child_rsc->id); rsc->children = g_list_append(rsc->children, child_rsc); if (as_orphan) { set_bit_recursive(child_rsc, pe_rsc_orphan); } add_hash_param(child_rsc->meta, XML_RSC_ATTR_INCARNATION_MAX, inc_max); print_resource(LOG_TRACE, "Added ", child_rsc, FALSE); bail: free(inc_num); free(inc_max); return child_rsc; } gboolean clone_unpack(resource_t * rsc, pe_working_set_t * data_set) { int lpc = 0; xmlNode *a_child = NULL; xmlNode *xml_obj = rsc->xml; clone_variant_data_t *clone_data = NULL; const char *ordered = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_ORDERED); const char *interleave = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INTERLEAVE); const char *max_clones = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION_MAX); const char *max_clones_node = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION_NODEMAX); pe_rsc_trace(rsc, "Processing resource %s...", rsc->id); clone_data = calloc(1, sizeof(clone_variant_data_t)); rsc->variant_opaque = clone_data; if (is_set(rsc->flags, pe_rsc_promotable)) { const char *promoted_max = NULL; const char *promoted_node_max = NULL; promoted_max = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_PROMOTED_MAX); if (promoted_max == NULL) { // @COMPAT deprecated since 2.0.0 promoted_max = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_MASTER_MAX); } promoted_node_max = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_PROMOTED_NODEMAX); if (promoted_node_max == NULL) { // @COMPAT deprecated since 2.0.0 promoted_node_max = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_MASTER_NODEMAX); } clone_data->promoted_max = crm_parse_int(promoted_max, "1"); clone_data->promoted_node_max = crm_parse_int(promoted_node_max, "1"); } // Implied by calloc() /* clone_data->xml_obj_child = NULL; */ clone_data->clone_node_max = crm_parse_int(max_clones_node, "1"); if (max_clones) { clone_data->clone_max = crm_parse_int(max_clones, "1"); } else if (g_list_length(data_set->nodes) > 0) { clone_data->clone_max = g_list_length(data_set->nodes); } else { clone_data->clone_max = 1; /* Handy during crm_verify */ } clone_data->interleave = crm_is_true(interleave); clone_data->ordered = crm_is_true(ordered); if ((rsc->flags & pe_rsc_unique) == 0 && clone_data->clone_node_max > 1) { crm_config_err("Anonymous clones (%s) may only support one copy per node", rsc->id); clone_data->clone_node_max = 1; } pe_rsc_trace(rsc, "Options for %s", rsc->id); pe_rsc_trace(rsc, "\tClone max: %d", clone_data->clone_max); pe_rsc_trace(rsc, "\tClone node max: %d", clone_data->clone_node_max); pe_rsc_trace(rsc, "\tClone is unique: %s", is_set(rsc->flags, pe_rsc_unique) ? "true" : "false"); pe_rsc_trace(rsc, "\tClone is promotable: %s", is_set(rsc->flags, pe_rsc_promotable) ? "true" : "false"); // Clones may contain a single group or primitive for (a_child = __xml_first_child(xml_obj); a_child != NULL; a_child = __xml_next_element(a_child)) { if (crm_str_eq((const char *)a_child->name, XML_CIB_TAG_RESOURCE, TRUE) || crm_str_eq((const char *)a_child->name, XML_CIB_TAG_GROUP, TRUE)) { clone_data->xml_obj_child = a_child; break; } } if (clone_data->xml_obj_child == NULL) { crm_config_err("%s has nothing to clone", rsc->id); return FALSE; } /* * Make clones ever so slightly sticky by default * * This helps ensure clone instances are not shuffled around the cluster * for no benefit in situations when pre-allocation is not appropriate */ if (g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_STICKINESS) == NULL) { add_hash_param(rsc->meta, XML_RSC_ATTR_STICKINESS, "1"); } /* This ensures that the globally-unique value always exists for children to * inherit when being unpacked, as well as in resource agents' environment. */ add_hash_param(rsc->meta, XML_RSC_ATTR_UNIQUE, is_set(rsc->flags, pe_rsc_unique) ? XML_BOOLEAN_TRUE : XML_BOOLEAN_FALSE); if (clone_data->clone_max <= 0) { /* Create one child instance so that unpack_find_resource() will hook up * any orphans up to the parent correctly. */ if (pe__create_clone_child(rsc, data_set) == NULL) { return FALSE; } } else { // Create a child instance for each available instance number for (lpc = 0; lpc < clone_data->clone_max; lpc++) { if (pe__create_clone_child(rsc, data_set) == NULL) { return FALSE; } } } pe_rsc_trace(rsc, "Added %d children to resource %s...", clone_data->clone_max, rsc->id); return TRUE; } gboolean clone_active(resource_t * rsc, gboolean all) { GListPtr gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; gboolean child_active = child_rsc->fns->active(child_rsc, all); if (all == FALSE && child_active) { return TRUE; } else if (all && child_active == FALSE) { return FALSE; } } if (all) { return TRUE; } else { return FALSE; } } static void short_print(char *list, const char *prefix, const char *type, const char *suffix, long options, void *print_data) { if(suffix == NULL) { suffix = ""; } if (list) { if (options & pe_print_html) { status_print("
  • "); } status_print("%s%s: [%s ]%s", prefix, type, list, suffix); if (options & pe_print_html) { status_print("
  • \n"); } else if (options & pe_print_suppres_nl) { /* nothing */ } else if ((options & pe_print_printf) || (options & pe_print_ncurses)) { status_print("\n"); } } } static const char * configured_role_str(resource_t * rsc) { const char *target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE); if ((target_role == NULL) && rsc->children && rsc->children->data) { target_role = g_hash_table_lookup(((resource_t*)rsc->children->data)->meta, XML_RSC_ATTR_TARGET_ROLE); } return target_role; } static enum rsc_role_e configured_role(resource_t * rsc) { const char *target_role = configured_role_str(rsc); if (target_role) { return text2role(target_role); } return RSC_ROLE_UNKNOWN; } static void clone_print_xml(resource_t * rsc, const char *pre_text, long options, void *print_data) { char *child_text = crm_concat(pre_text, " ", ' '); const char *target_role = configured_role_str(rsc); GListPtr gIter = rsc->children; status_print("%sid); status_print("multi_state=\"%s\" ", is_set(rsc->flags, pe_rsc_promotable)? "true" : "false"); status_print("unique=\"%s\" ", is_set(rsc->flags, pe_rsc_unique) ? "true" : "false"); status_print("managed=\"%s\" ", is_set(rsc->flags, pe_rsc_managed) ? "true" : "false"); status_print("failed=\"%s\" ", is_set(rsc->flags, pe_rsc_failed) ? "true" : "false"); status_print("failure_ignored=\"%s\" ", is_set(rsc->flags, pe_rsc_failure_ignored) ? "true" : "false"); if (target_role) { status_print("target_role=\"%s\" ", target_role); } status_print(">\n"); for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; child_rsc->fns->print(child_rsc, child_text, options, print_data); } status_print("%s\n", pre_text); free(child_text); } bool is_set_recursive(resource_t * rsc, long long flag, bool any) { GListPtr gIter; bool all = !any; if(is_set(rsc->flags, flag)) { if(any) { return TRUE; } } else if(all) { return FALSE; } for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { if(is_set_recursive(gIter->data, flag, any)) { if(any) { return TRUE; } } else if(all) { return FALSE; } } if(all) { return TRUE; } return FALSE; } void clone_print(resource_t * rsc, const char *pre_text, long options, void *print_data) { char *list_text = NULL; char *child_text = NULL; char *stopped_list = NULL; const char *type = "Clone"; GListPtr master_list = NULL; GListPtr started_list = NULL; GListPtr gIter = rsc->children; clone_variant_data_t *clone_data = NULL; int active_instances = 0; if (pre_text == NULL) { pre_text = " "; } if (options & pe_print_xml) { clone_print_xml(rsc, pre_text, options, print_data); return; } get_clone_variant_data(clone_data, rsc); child_text = crm_concat(pre_text, " ", ' '); if (is_set(rsc->flags, pe_rsc_promotable)) { type = "Master/Slave"; } status_print("%s%s Set: %s [%s]%s%s", pre_text ? pre_text : "", type, rsc->id, ID(clone_data->xml_obj_child), is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "", is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)"); if (options & pe_print_html) { status_print("\n
      \n"); } else if ((options & pe_print_log) == 0) { status_print("\n"); } for (; gIter != NULL; gIter = gIter->next) { gboolean print_full = FALSE; resource_t *child_rsc = (resource_t *) gIter->data; + gboolean partially_active = child_rsc->fns->active(child_rsc, FALSE); if (options & pe_print_clone_details) { print_full = TRUE; } - if (child_rsc->fns->active(child_rsc, FALSE) == FALSE) { - /* Inactive clone */ - if (is_set(child_rsc->flags, pe_rsc_orphan)) { - continue; - - } else if (is_set(rsc->flags, pe_rsc_unique)) { + if (is_set(rsc->flags, pe_rsc_unique)) { + // Print individual instance when unique (except stopped orphans) + if (partially_active || is_not_set(rsc->flags, pe_rsc_orphan)) { print_full = TRUE; + } + + // Everything else in this block is for anonymous clones + + } else if (is_set(options, pe_print_pending) + && (child_rsc->pending_task != NULL) + && strcmp(child_rsc->pending_task, "probe")) { + // Print individual instance when non-probe action is pending + print_full = TRUE; - } else if (is_not_set(options, pe_print_clone_active)) { + } else if (partially_active == FALSE) { + // List stopped instances when requested (except orphans) + if (is_not_set(child_rsc->flags, pe_rsc_orphan) + && is_not_set(options, pe_print_clone_active)) { stopped_list = add_list_element(stopped_list, child_rsc->id); } - } else if (is_set_recursive(child_rsc, pe_rsc_unique, TRUE) - || is_set_recursive(child_rsc, pe_rsc_orphan, TRUE) + } else if (is_set_recursive(child_rsc, pe_rsc_orphan, TRUE) || is_set_recursive(child_rsc, pe_rsc_managed, FALSE) == FALSE || is_set_recursive(child_rsc, pe_rsc_failed, TRUE)) { - /* Unique, unmanaged or failed clone */ - print_full = TRUE; - - } else if (is_set(options, pe_print_pending) && child_rsc->pending_task != NULL) { - /* In a pending state */ + // Print individual instance when active orphaned/unmanaged/failed print_full = TRUE; } else if (child_rsc->fns->active(child_rsc, TRUE)) { - /* Fully active anonymous clone */ + // Instance of fully active anonymous clone + node_t *location = child_rsc->fns->location(child_rsc, NULL, TRUE); if (location) { + // Instance is active on a single node + enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, TRUE); if (location->details->online == FALSE && location->details->unclean) { print_full = TRUE; } else if (a_role > RSC_ROLE_SLAVE) { - /* And active on a single node as master */ master_list = g_list_append(master_list, location); } else { - /* And active on a single node as started/slave */ started_list = g_list_append(started_list, location); } } else { /* uncolocated group - bleh */ print_full = TRUE; } } else { - /* Partially active anonymous clone */ + // Instance of partially active anonymous clone print_full = TRUE; } if (print_full) { if (options & pe_print_html) { status_print("
    • \n"); } child_rsc->fns->print(child_rsc, child_text, options, print_data); if (options & pe_print_html) { status_print("
    • \n"); } } } /* Masters */ master_list = g_list_sort(master_list, sort_node_uname); for (gIter = master_list; gIter; gIter = gIter->next) { node_t *host = gIter->data; list_text = add_list_element(list_text, host->details->uname); active_instances++; } short_print(list_text, child_text, "Masters", NULL, options, print_data); g_list_free(master_list); free(list_text); list_text = NULL; /* Started/Slaves */ started_list = g_list_sort(started_list, sort_node_uname); for (gIter = started_list; gIter; gIter = gIter->next) { node_t *host = gIter->data; list_text = add_list_element(list_text, host->details->uname); active_instances++; } if (is_set(rsc->flags, pe_rsc_promotable)) { enum rsc_role_e role = configured_role(rsc); if(role == RSC_ROLE_SLAVE) { short_print(list_text, child_text, "Slaves (target-role)", NULL, options, print_data); } else { short_print(list_text, child_text, "Slaves", NULL, options, print_data); } } else { short_print(list_text, child_text, "Started", NULL, options, print_data); } g_list_free(started_list); free(list_text); list_text = NULL; if (is_not_set(options, pe_print_clone_active)) { const char *state = "Stopped"; enum rsc_role_e role = configured_role(rsc); if (role == RSC_ROLE_STOPPED) { state = "Stopped (disabled)"; } if (is_not_set(rsc->flags, pe_rsc_unique) && (clone_data->clone_max > active_instances)) { GListPtr nIter; GListPtr list = g_hash_table_get_values(rsc->allowed_nodes); /* Custom stopped list for non-unique clones */ free(stopped_list); stopped_list = NULL; if (g_list_length(list) == 0) { /* Clusters with symmetrical=false haven't calculated allowed_nodes yet * If we've not probed for them yet, the Stopped list will be empty */ list = g_hash_table_get_values(rsc->known_on); } list = g_list_sort(list, sort_node_uname); for (nIter = list; nIter != NULL; nIter = nIter->next) { node_t *node = (node_t *)nIter->data; if (pe_find_node(rsc->running_on, node->details->uname) == NULL) { stopped_list = add_list_element(stopped_list, node->details->uname); } } g_list_free(list); } short_print(stopped_list, child_text, state, NULL, options, print_data); free(stopped_list); } if (options & pe_print_html) { status_print("
    \n"); } free(child_text); } void clone_free(resource_t * rsc) { clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); pe_rsc_trace(rsc, "Freeing %s", rsc->id); for (GListPtr gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; CRM_ASSERT(child_rsc); pe_rsc_trace(child_rsc, "Freeing child %s", child_rsc->id); free_xml(child_rsc->xml); child_rsc->xml = NULL; /* There could be a saved unexpanded xml */ free_xml(child_rsc->orig_xml); child_rsc->orig_xml = NULL; child_rsc->fns->free(child_rsc); } g_list_free(rsc->children); if (clone_data) { CRM_ASSERT(clone_data->demote_notify == NULL); CRM_ASSERT(clone_data->stop_notify == NULL); CRM_ASSERT(clone_data->start_notify == NULL); CRM_ASSERT(clone_data->promote_notify == NULL); } common_free(rsc); } enum rsc_role_e clone_resource_state(const resource_t * rsc, gboolean current) { enum rsc_role_e clone_role = RSC_ROLE_UNKNOWN; GListPtr gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, current); if (a_role > clone_role) { clone_role = a_role; } } pe_rsc_trace(rsc, "%s role: %s", rsc->id, role2text(clone_role)); return clone_role; } /*! * \internal * \brief Check whether a clone has an instance for every node * * \param[in] rsc Clone to check * \param[in] data_set Cluster state */ bool pe__is_universal_clone(pe_resource_t *rsc, pe_working_set_t *data_set) { if (pe_rsc_is_clone(rsc)) { clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); if (clone_data->clone_max == g_list_length(data_set->nodes)) { return TRUE; } } return FALSE; } diff --git a/lib/pengine/native.c b/lib/pengine/native.c index 7ce2c364ee..41ceddeded 100644 --- a/lib/pengine/native.c +++ b/lib/pengine/native.c @@ -1,981 +1,1004 @@ /* * Copyright 2004-2018 Andrew Beekhof * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #define VARIANT_NATIVE 1 #include "./variant.h" /*! * \internal * \brief Check whether a resource is active on multiple nodes */ static bool is_multiply_active(pe_resource_t *rsc) { unsigned int count = 0; if (rsc->variant == pe_native) { pe__find_active_requires(rsc, &count); } return count > 1; } void native_add_running(resource_t * rsc, node_t * node, pe_working_set_t * data_set) { GListPtr gIter = rsc->running_on; CRM_CHECK(node != NULL, return); for (; gIter != NULL; gIter = gIter->next) { node_t *a_node = (node_t *) gIter->data; CRM_CHECK(a_node != NULL, return); if (safe_str_eq(a_node->details->id, node->details->id)) { return; } } pe_rsc_trace(rsc, "Adding %s to %s %s", rsc->id, node->details->uname, is_set(rsc->flags, pe_rsc_managed)?"":"(unmanaged)"); rsc->running_on = g_list_append(rsc->running_on, node); if (rsc->variant == pe_native) { node->details->running_rsc = g_list_append(node->details->running_rsc, rsc); } if (rsc->variant == pe_native && node->details->maintenance) { clear_bit(rsc->flags, pe_rsc_managed); } if (is_not_set(rsc->flags, pe_rsc_managed)) { resource_t *p = rsc->parent; pe_rsc_info(rsc, "resource %s isn't managed", rsc->id); resource_location(rsc, node, INFINITY, "not_managed_default", data_set); while(p && node->details->online) { /* add without the additional location constraint */ p->running_on = g_list_append(p->running_on, node); p = p->parent; } return; } if (is_multiply_active(rsc)) { switch (rsc->recovery_type) { case recovery_stop_only: { GHashTableIter gIter; node_t *local_node = NULL; /* make sure it doesn't come up again */ if (rsc->allowed_nodes != NULL) { g_hash_table_destroy(rsc->allowed_nodes); } rsc->allowed_nodes = node_hash_from_list(data_set->nodes); g_hash_table_iter_init(&gIter, rsc->allowed_nodes); while (g_hash_table_iter_next(&gIter, NULL, (void **)&local_node)) { local_node->weight = -INFINITY; } } break; case recovery_stop_start: break; case recovery_block: clear_bit(rsc->flags, pe_rsc_managed); set_bit(rsc->flags, pe_rsc_block); /* If the resource belongs to a group or bundle configured with * multiple-active=block, block the entire entity. */ if (rsc->parent && (rsc->parent->variant == pe_group || rsc->parent->variant == pe_container) && rsc->parent->recovery_type == recovery_block) { GListPtr gIter = rsc->parent->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t *) gIter->data; clear_bit(child->flags, pe_rsc_managed); set_bit(child->flags, pe_rsc_block); } } break; } crm_debug("%s is active on multiple nodes including %s: %s", rsc->id, node->details->uname, recovery2text(rsc->recovery_type)); } else { pe_rsc_trace(rsc, "Resource %s is active on: %s", rsc->id, node->details->uname); } if (rsc->parent != NULL) { native_add_running(rsc->parent, node, data_set); } } static void recursive_clear_unique(pe_resource_t *rsc) { clear_bit(rsc->flags, pe_rsc_unique); add_hash_param(rsc->meta, XML_RSC_ATTR_UNIQUE, XML_BOOLEAN_FALSE); for (GList *child = rsc->children; child != NULL; child = child->next) { recursive_clear_unique((pe_resource_t *) child->data); } } gboolean native_unpack(resource_t * rsc, pe_working_set_t * data_set) { resource_t *parent = uber_parent(rsc); native_variant_data_t *native_data = NULL; const char *standard = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); uint32_t ra_caps = pcmk_get_ra_caps(standard); pe_rsc_trace(rsc, "Processing resource %s...", rsc->id); native_data = calloc(1, sizeof(native_variant_data_t)); rsc->variant_opaque = native_data; // Only some agent standards support unique and promotable clones if (is_not_set(ra_caps, pcmk_ra_cap_unique) && is_set(rsc->flags, pe_rsc_unique) && pe_rsc_is_clone(parent)) { /* @COMPAT We should probably reject this situation as an error (as we * do for promotable below) rather than warn and convert, but that would * be a backward-incompatible change that we should probably do with a * transform at a schema major version bump. */ pe__force_anon(standard, parent, rsc->id, data_set); /* Clear globally-unique on the parent and all its descendents unpacked * so far (clearing the parent should make any future children unpacking * correct). We have to clear this resource explicitly because it isn't * hooked into the parent's children yet. */ recursive_clear_unique(parent); recursive_clear_unique(rsc); } if (is_not_set(ra_caps, pcmk_ra_cap_promotable) && is_set(parent->flags, pe_rsc_promotable)) { pe_err("Resource %s is of type %s and therefore " "cannot be used as a promotable clone resource", rsc->id, standard); return FALSE; } return TRUE; } static bool rsc_is_on_node(resource_t *rsc, const node_t *node, int flags) { pe_rsc_trace(rsc, "Checking whether %s is on %s", rsc->id, node->details->uname); if (is_set(flags, pe_find_current) && rsc->running_on) { for (GListPtr iter = rsc->running_on; iter; iter = iter->next) { node_t *loc = (node_t *) iter->data; if (loc->details == node->details) { return TRUE; } } } else if (is_set(flags, pe_find_inactive) && (rsc->running_on == NULL)) { return TRUE; } else if (is_not_set(flags, pe_find_current) && rsc->allocated_to && (rsc->allocated_to->details == node->details)) { return TRUE; } return FALSE; } resource_t * native_find_rsc(resource_t * rsc, const char *id, const node_t *on_node, int flags) { bool match = FALSE; resource_t *result = NULL; CRM_CHECK(id && rsc && rsc->id, return NULL); if (flags & pe_find_clone) { const char *rid = ID(rsc->xml); if (!pe_rsc_is_clone(uber_parent(rsc))) { match = FALSE; } else if (!strcmp(id, rsc->id) || safe_str_eq(id, rid)) { match = TRUE; } } else if (!strcmp(id, rsc->id)) { match = TRUE; } else if (is_set(flags, pe_find_renamed) && rsc->clone_name && strcmp(rsc->clone_name, id) == 0) { match = TRUE; } else if (is_set(flags, pe_find_any) || (is_set(flags, pe_find_anon) && is_not_set(rsc->flags, pe_rsc_unique))) { match = pe_base_name_eq(rsc, id); } if (match && on_node) { bool match_node = rsc_is_on_node(rsc, on_node, flags); if (match_node == FALSE) { match = FALSE; } } if (match) { return rsc; } for (GListPtr gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t *) gIter->data; result = rsc->fns->find_rsc(child, id, on_node, flags); if (result) { return result; } } return NULL; } char * native_parameter(resource_t * rsc, node_t * node, gboolean create, const char *name, pe_working_set_t * data_set) { char *value_copy = NULL; const char *value = NULL; GHashTable *hash = NULL; GHashTable *local_hash = NULL; CRM_CHECK(rsc != NULL, return NULL); CRM_CHECK(name != NULL && strlen(name) != 0, return NULL); pe_rsc_trace(rsc, "Looking up %s in %s", name, rsc->id); if (create || g_hash_table_size(rsc->parameters) == 0) { if (node != NULL) { pe_rsc_trace(rsc, "Creating hash with node %s", node->details->uname); } else { pe_rsc_trace(rsc, "Creating default hash"); } local_hash = crm_str_table_new(); get_rsc_attributes(local_hash, rsc, node, data_set); hash = local_hash; } else { hash = rsc->parameters; } value = g_hash_table_lookup(hash, name); if (value == NULL) { /* try meta attributes instead */ value = g_hash_table_lookup(rsc->meta, name); } if (value != NULL) { value_copy = strdup(value); } if (local_hash != NULL) { g_hash_table_destroy(local_hash); } return value_copy; } gboolean native_active(resource_t * rsc, gboolean all) { GListPtr gIter = rsc->running_on; for (; gIter != NULL; gIter = gIter->next) { node_t *a_node = (node_t *) gIter->data; if (a_node->details->unclean) { crm_debug("Resource %s: node %s is unclean", rsc->id, a_node->details->uname); return TRUE; } else if (a_node->details->online == FALSE) { crm_debug("Resource %s: node %s is offline", rsc->id, a_node->details->uname); } else { crm_debug("Resource %s active on %s", rsc->id, a_node->details->uname); return TRUE; } } return FALSE; } struct print_data_s { long options; void *print_data; }; static void native_print_attr(gpointer key, gpointer value, gpointer user_data) { long options = ((struct print_data_s *)user_data)->options; void *print_data = ((struct print_data_s *)user_data)->print_data; status_print("Option: %s = %s\n", (char *)key, (char *)value); } static const char * native_pending_state(resource_t * rsc) { const char *pending_state = NULL; if (safe_str_eq(rsc->pending_task, CRMD_ACTION_START)) { pending_state = "Starting"; } else if (safe_str_eq(rsc->pending_task, CRMD_ACTION_STOP)) { pending_state = "Stopping"; } else if (safe_str_eq(rsc->pending_task, CRMD_ACTION_MIGRATE)) { pending_state = "Migrating"; } else if (safe_str_eq(rsc->pending_task, CRMD_ACTION_MIGRATED)) { /* Work might be done in here. */ pending_state = "Migrating"; } else if (safe_str_eq(rsc->pending_task, CRMD_ACTION_PROMOTE)) { pending_state = "Promoting"; } else if (safe_str_eq(rsc->pending_task, CRMD_ACTION_DEMOTE)) { pending_state = "Demoting"; } return pending_state; } static const char * native_pending_task(resource_t * rsc) { const char *pending_task = NULL; if (safe_str_eq(rsc->pending_task, CRMD_ACTION_STATUS)) { pending_task = "Monitoring"; /* Pending probes are not printed, even if pending * operations are requested. If someone ever requests that * behavior, uncomment this and the corresponding part of * unpack.c:unpack_rsc_op(). */ /* } else if (safe_str_eq(rsc->pending_task, "probe")) { pending_task = "Checking"; */ } return pending_task; } static enum rsc_role_e native_displayable_role(resource_t *rsc) { enum rsc_role_e role = rsc->role; if ((role == RSC_ROLE_STARTED) && is_set(uber_parent(rsc)->flags, pe_rsc_promotable)) { role = RSC_ROLE_SLAVE; } return role; } static const char * native_displayable_state(resource_t *rsc, long options) { const char *rsc_state = NULL; if (options & pe_print_pending) { rsc_state = native_pending_state(rsc); } if (rsc_state == NULL) { rsc_state = role2text(native_displayable_role(rsc)); } return rsc_state; } static void native_print_xml(resource_t * rsc, const char *pre_text, long options, void *print_data) { const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); const char *prov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER); const char *rsc_state = native_displayable_state(rsc, options); const char *target_role = NULL; /* resource information. */ status_print("%sxml, XML_ATTR_TYPE)); status_print("role=\"%s\" ", rsc_state); if (rsc->meta) { target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE); } if (target_role) { status_print("target_role=\"%s\" ", target_role); } status_print("active=\"%s\" ", rsc->fns->active(rsc, TRUE) ? "true" : "false"); status_print("orphaned=\"%s\" ", is_set(rsc->flags, pe_rsc_orphan) ? "true" : "false"); status_print("blocked=\"%s\" ", is_set(rsc->flags, pe_rsc_block) ? "true" : "false"); status_print("managed=\"%s\" ", is_set(rsc->flags, pe_rsc_managed) ? "true" : "false"); status_print("failed=\"%s\" ", is_set(rsc->flags, pe_rsc_failed) ? "true" : "false"); status_print("failure_ignored=\"%s\" ", is_set(rsc->flags, pe_rsc_failure_ignored) ? "true" : "false"); status_print("nodes_running_on=\"%d\" ", g_list_length(rsc->running_on)); if (options & pe_print_pending) { const char *pending_task = native_pending_task(rsc); if (pending_task) { status_print("pending=\"%s\" ", pending_task); } } if (options & pe_print_dev) { status_print("provisional=\"%s\" ", is_set(rsc->flags, pe_rsc_provisional) ? "true" : "false"); status_print("runnable=\"%s\" ", is_set(rsc->flags, pe_rsc_runnable) ? "true" : "false"); status_print("priority=\"%f\" ", (double)rsc->priority); status_print("variant=\"%s\" ", crm_element_name(rsc->xml)); } /* print out the nodes this resource is running on */ if (options & pe_print_rsconly) { status_print("/>\n"); /* do nothing */ } else if (rsc->running_on != NULL) { GListPtr gIter = rsc->running_on; status_print(">\n"); for (; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; status_print("%s \n", pre_text, node->details->uname, node->details->id, node->details->online ? "false" : "true"); } status_print("%s\n", pre_text); } else { status_print("/>\n"); } } /* making this inline rather than a macro prevents a coverity "unreachable" * warning on the first usage */ static inline const char * comma_if(int i) { return i? ", " : ""; } void common_print(resource_t * rsc, const char *pre_text, const char *name, node_t *node, long options, void *print_data) { const char *desc = NULL; const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE); const char *target_role = NULL; enum rsc_role_e role = native_displayable_role(rsc); int offset = 0; int flagOffset = 0; char buffer[LINE_MAX]; char flagBuffer[LINE_MAX]; CRM_ASSERT(rsc->variant == pe_native); CRM_ASSERT(kind != NULL); if (rsc->meta) { const char *is_internal = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INTERNAL_RSC); if (crm_is_true(is_internal) && is_not_set(options, pe_print_implicit)) { crm_trace("skipping print of internal resource %s", rsc->id); return; } target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE); } if (pre_text == NULL && (options & pe_print_printf)) { pre_text = " "; } if (options & pe_print_xml) { native_print_xml(rsc, pre_text, options, print_data); return; } if ((options & pe_print_rsconly) || g_list_length(rsc->running_on) > 1) { node = NULL; } if (options & pe_print_html) { if (is_not_set(rsc->flags, pe_rsc_managed)) { status_print(""); } else if (is_set(rsc->flags, pe_rsc_failed)) { status_print(""); } else if (rsc->variant == pe_native && (rsc->running_on == NULL)) { status_print(""); } else if (g_list_length(rsc->running_on) > 1) { status_print(""); } else if (is_set(rsc->flags, pe_rsc_failure_ignored)) { status_print(""); } else { status_print(""); } } if(pre_text) { offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", pre_text); } offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", name); offset += snprintf(buffer + offset, LINE_MAX - offset, "\t(%s", class); if (is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider)) { const char *prov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER); offset += snprintf(buffer + offset, LINE_MAX - offset, "::%s", prov); } offset += snprintf(buffer + offset, LINE_MAX - offset, ":%s):\t", kind); if(is_set(rsc->flags, pe_rsc_orphan)) { offset += snprintf(buffer + offset, LINE_MAX - offset, " ORPHANED "); } if(role > RSC_ROLE_SLAVE && is_set(rsc->flags, pe_rsc_failed)) { offset += snprintf(buffer + offset, LINE_MAX - offset, "FAILED %s", role2text(role)); } else if(is_set(rsc->flags, pe_rsc_failed)) { offset += snprintf(buffer + offset, LINE_MAX - offset, "FAILED"); } else { const char *rsc_state = native_displayable_state(rsc, options); offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_state); } if(node) { offset += snprintf(buffer + offset, LINE_MAX - offset, " %s", node->details->uname); if (node->details->online == FALSE && node->details->unclean) { flagOffset += snprintf(flagBuffer + flagOffset, LINE_MAX - flagOffset, "%sUNCLEAN", comma_if(flagOffset)); } } if (options & pe_print_pending) { const char *pending_task = native_pending_task(rsc); if (pending_task) { flagOffset += snprintf(flagBuffer + flagOffset, LINE_MAX - flagOffset, "%s%s", comma_if(flagOffset), pending_task); } } if (target_role) { enum rsc_role_e target_role_e = text2role(target_role); /* Ignore target role Started, as it is the default anyways * (and would also allow a Master to be Master). * Show if target role limits our abilities. */ if (target_role_e == RSC_ROLE_STOPPED) { flagOffset += snprintf(flagBuffer + flagOffset, LINE_MAX - flagOffset, "%sdisabled", comma_if(flagOffset)); rsc->cluster->disabled_resources++; } else if (is_set(uber_parent(rsc)->flags, pe_rsc_promotable) && target_role_e == RSC_ROLE_SLAVE) { flagOffset += snprintf(flagBuffer + flagOffset, LINE_MAX - flagOffset, "%starget-role:%s", comma_if(flagOffset), target_role); rsc->cluster->disabled_resources++; } } if (is_set(rsc->flags, pe_rsc_block)) { flagOffset += snprintf(flagBuffer + flagOffset, LINE_MAX - flagOffset, "%sblocked", comma_if(flagOffset)); rsc->cluster->blocked_resources++; } else if (is_not_set(rsc->flags, pe_rsc_managed)) { flagOffset += snprintf(flagBuffer + flagOffset, LINE_MAX - flagOffset, "%sunmanaged", comma_if(flagOffset)); } if(is_set(rsc->flags, pe_rsc_failure_ignored)) { flagOffset += snprintf(flagBuffer + flagOffset, LINE_MAX - flagOffset, "%sfailure ignored", comma_if(flagOffset)); } if ((options & pe_print_rsconly) || g_list_length(rsc->running_on) > 1) { desc = crm_element_value(rsc->xml, XML_ATTR_DESC); } CRM_LOG_ASSERT(offset > 0); if(flagOffset > 0) { status_print("%s (%s)%s%s", buffer, flagBuffer, desc?" ":"", desc?desc:""); } else { status_print("%s%s%s", buffer, desc?" ":"", desc?desc:""); } #if CURSES_ENABLED if ((options & pe_print_rsconly) || g_list_length(rsc->running_on) > 1) { /* Done */ } else if (options & pe_print_ncurses) { /* coverity[negative_returns] False positive */ move(-1, 0); } #endif if (options & pe_print_html) { status_print(" "); } if ((options & pe_print_rsconly)) { } else if (g_list_length(rsc->running_on) > 1) { GListPtr gIter = rsc->running_on; int counter = 0; if (options & pe_print_html) { status_print("
      \n"); } else if ((options & pe_print_printf) || (options & pe_print_ncurses)) { status_print("["); } for (; gIter != NULL; gIter = gIter->next) { node_t *n = (node_t *) gIter->data; counter++; if (options & pe_print_html) { status_print("
    • \n%s", n->details->uname); } else if ((options & pe_print_printf) || (options & pe_print_ncurses)) { status_print(" %s", n->details->uname); } else if ((options & pe_print_log)) { status_print("\t%d : %s", counter, n->details->uname); } else { status_print("%s", n->details->uname); } if (options & pe_print_html) { status_print("
    • \n"); } } if (options & pe_print_html) { status_print("
    \n"); } else if ((options & pe_print_printf) || (options & pe_print_ncurses)) { status_print(" ]"); } } if (options & pe_print_html) { status_print("
    \n"); } else if (options & pe_print_suppres_nl) { /* nothing */ } else if ((options & pe_print_printf) || (options & pe_print_ncurses)) { status_print("\n"); } if (options & pe_print_details) { struct print_data_s pdata; pdata.options = options; pdata.print_data = print_data; g_hash_table_foreach(rsc->parameters, native_print_attr, &pdata); } if (options & pe_print_dev) { GHashTableIter iter; node_t *n = NULL; status_print("%s\t(%s%svariant=%s, priority=%f)", pre_text, is_set(rsc->flags, pe_rsc_provisional) ? "provisional, " : "", is_set(rsc->flags, pe_rsc_runnable) ? "" : "non-startable, ", crm_element_name(rsc->xml), (double)rsc->priority); status_print("%s\tAllowed Nodes", pre_text); g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&n)) { status_print("%s\t * %s %d", pre_text, n->details->uname, n->weight); } } if (options & pe_print_max_details) { GHashTableIter iter; node_t *n = NULL; status_print("%s\t=== Allowed Nodes\n", pre_text); g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&n)) { print_node("\t", n, FALSE); } } } void native_print(resource_t * rsc, const char *pre_text, long options, void *print_data) { node_t *node = NULL; CRM_ASSERT(rsc->variant == pe_native); if (options & pe_print_xml) { native_print_xml(rsc, pre_text, options, print_data); return; } node = pe__current_node(rsc); + + if (node == NULL) { + // This is set only if a non-probe action is pending on this node + node = rsc->pending_node; + } + common_print(rsc, pre_text, rsc_printable_id(rsc), node, options, print_data); } void native_free(resource_t * rsc) { pe_rsc_trace(rsc, "Freeing resource action list (not the data)"); common_free(rsc); } enum rsc_role_e native_resource_state(const resource_t * rsc, gboolean current) { enum rsc_role_e role = rsc->next_role; if (current) { role = rsc->role; } pe_rsc_trace(rsc, "%s state: %s", rsc->id, role2text(role)); return role; } -node_t * -native_location(resource_t * rsc, GListPtr * list, gboolean current) +/*! + * \internal + * \brief List nodes where a resource (or any of its children) is + * + * \param[in] rsc Resource to check + * \param[out] list List to add result to + * \param[in] current 0 = where known, 1 = running, 2 = running or pending + * + * \return If list contains only one node, that node + */ +pe_node_t * +native_location(const pe_resource_t *rsc, GList **list, int current) { node_t *one = NULL; GListPtr result = NULL; if (rsc->children) { GListPtr gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t *) gIter->data; child->fns->location(child, &result, current); } - } else if (current && rsc->running_on) { - result = g_list_copy(rsc->running_on); + } else if (current) { + + if (rsc->running_on) { + result = g_list_copy(rsc->running_on); + } + if ((current == 2) && rsc->pending_node + && !pe_find_node_id(result, rsc->pending_node->details->id)) { + result = g_list_append(result, rsc->pending_node); + } } else if (current == FALSE && rsc->allocated_to) { result = g_list_append(NULL, rsc->allocated_to); } - if (result && g_list_length(result) == 1) { - one = g_list_nth_data(result, 0); + if (result && (result->next == NULL)) { + one = result->data; } if (list) { GListPtr gIter = result; for (; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; if (*list == NULL || pe_find_node_id(*list, node->details->id) == NULL) { *list = g_list_append(*list, node); } } } g_list_free(result); return one; } static void get_rscs_brief(GListPtr rsc_list, GHashTable * rsc_table, GHashTable * active_table) { GListPtr gIter = rsc_list; for (; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t *) gIter->data; const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE); int offset = 0; char buffer[LINE_MAX]; int *rsc_counter = NULL; int *active_counter = NULL; if (rsc->variant != pe_native) { continue; } offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", class); if (is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider)) { const char *prov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER); offset += snprintf(buffer + offset, LINE_MAX - offset, "::%s", prov); } offset += snprintf(buffer + offset, LINE_MAX - offset, ":%s", kind); CRM_LOG_ASSERT(offset > 0); if (rsc_table) { rsc_counter = g_hash_table_lookup(rsc_table, buffer); if (rsc_counter == NULL) { rsc_counter = calloc(1, sizeof(int)); *rsc_counter = 0; g_hash_table_insert(rsc_table, strdup(buffer), rsc_counter); } (*rsc_counter)++; } if (active_table) { GListPtr gIter2 = rsc->running_on; for (; gIter2 != NULL; gIter2 = gIter2->next) { node_t *node = (node_t *) gIter2->data; GHashTable *node_table = NULL; if (node->details->unclean == FALSE && node->details->online == FALSE) { continue; } node_table = g_hash_table_lookup(active_table, node->details->uname); if (node_table == NULL) { node_table = crm_str_table_new(); g_hash_table_insert(active_table, strdup(node->details->uname), node_table); } active_counter = g_hash_table_lookup(node_table, buffer); if (active_counter == NULL) { active_counter = calloc(1, sizeof(int)); *active_counter = 0; g_hash_table_insert(node_table, strdup(buffer), active_counter); } (*active_counter)++; } } } } static void destroy_node_table(gpointer data) { GHashTable *node_table = data; if (node_table) { g_hash_table_destroy(node_table); } } void print_rscs_brief(GListPtr rsc_list, const char *pre_text, long options, void *print_data, gboolean print_all) { GHashTable *rsc_table = crm_str_table_new(); GHashTable *active_table = g_hash_table_new_full(crm_str_hash, g_str_equal, free, destroy_node_table); GHashTableIter hash_iter; char *type = NULL; int *rsc_counter = NULL; get_rscs_brief(rsc_list, rsc_table, active_table); g_hash_table_iter_init(&hash_iter, rsc_table); while (g_hash_table_iter_next(&hash_iter, (gpointer *)&type, (gpointer *)&rsc_counter)) { GHashTableIter hash_iter2; char *node_name = NULL; GHashTable *node_table = NULL; int active_counter_all = 0; g_hash_table_iter_init(&hash_iter2, active_table); while (g_hash_table_iter_next(&hash_iter2, (gpointer *)&node_name, (gpointer *)&node_table)) { int *active_counter = g_hash_table_lookup(node_table, type); if (active_counter == NULL || *active_counter == 0) { continue; } else { active_counter_all += *active_counter; } if (options & pe_print_rsconly) { node_name = NULL; } if (options & pe_print_html) { status_print("
  • \n"); } if (print_all) { status_print("%s%d/%d\t(%s):\tActive %s\n", pre_text ? pre_text : "", active_counter ? *active_counter : 0, rsc_counter ? *rsc_counter : 0, type, active_counter && (*active_counter > 0) && node_name ? node_name : ""); } else { status_print("%s%d\t(%s):\tActive %s\n", pre_text ? pre_text : "", active_counter ? *active_counter : 0, type, active_counter && (*active_counter > 0) && node_name ? node_name : ""); } if (options & pe_print_html) { status_print("
  • \n"); } } if (print_all && active_counter_all == 0) { if (options & pe_print_html) { status_print("
  • \n"); } status_print("%s%d/%d\t(%s):\tActive\n", pre_text ? pre_text : "", active_counter_all, rsc_counter ? *rsc_counter : 0, type); if (options & pe_print_html) { status_print("
  • \n"); } } } if (rsc_table) { g_hash_table_destroy(rsc_table); rsc_table = NULL; } if (active_table) { g_hash_table_destroy(active_table); active_table = NULL; } } diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c index d15852b75f..a2c74b16b3 100644 --- a/lib/pengine/unpack.c +++ b/lib/pengine/unpack.c @@ -1,3441 +1,3449 @@ /* * Copyright 2004-2018 Andrew Beekhof * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include CRM_TRACE_INIT_DATA(pe_status); #define set_config_flag(data_set, option, flag) do { \ const char *tmp = pe_pref(data_set->config_hash, option); \ if(tmp) { \ if(crm_is_true(tmp)) { \ set_bit(data_set->flags, flag); \ } else { \ clear_bit(data_set->flags, flag); \ } \ } \ } while(0) gboolean unpack_rsc_op(resource_t * rsc, node_t * node, xmlNode * xml_op, xmlNode ** last_failure, enum action_fail_response *failed, pe_working_set_t * data_set); static gboolean determine_remote_online_status(pe_working_set_t * data_set, node_t * this_node); // Bitmask for warnings we only want to print once uint32_t pe_wo = 0; static gboolean is_dangling_container_remote_node(node_t *node) { /* we are looking for a remote-node that was supposed to be mapped to a * container resource, but all traces of that container have disappeared * from both the config and the status section. */ if (is_remote_node(node) && node->details->remote_rsc && node->details->remote_rsc->container == NULL && is_set(node->details->remote_rsc->flags, pe_rsc_orphan_container_filler)) { return TRUE; } return FALSE; } /*! * \brief Schedule a fence action for a node * * \param[in,out] data_set Current working set of cluster * \param[in,out] node Node to fence * \param[in] reason Text description of why fencing is needed */ void pe_fence_node(pe_working_set_t * data_set, node_t * node, const char *reason) { CRM_CHECK(node, return); /* A guest node is fenced by marking its container as failed */ if (is_container_remote_node(node)) { resource_t *rsc = node->details->remote_rsc->container; if (is_set(rsc->flags, pe_rsc_failed) == FALSE) { if (!is_set(rsc->flags, pe_rsc_managed)) { crm_notice("Not fencing guest node %s " "(otherwise would because %s): " "its guest resource %s is unmanaged", node->details->uname, reason, rsc->id); } else { crm_warn("Guest node %s will be fenced " "(by recovering its guest resource %s): %s", node->details->uname, rsc->id, reason); /* We don't mark the node as unclean because that would prevent the * node from running resources. We want to allow it to run resources * in this transition if the recovery succeeds. */ node->details->remote_requires_reset = TRUE; set_bit(rsc->flags, pe_rsc_failed); } } } else if (is_dangling_container_remote_node(node)) { crm_info("Cleaning up dangling connection for guest node %s: " "fencing was already done because %s, " "and guest resource no longer exists", node->details->uname, reason); set_bit(node->details->remote_rsc->flags, pe_rsc_failed); } else if (is_baremetal_remote_node(node)) { resource_t *rsc = node->details->remote_rsc; if (rsc && (!is_set(rsc->flags, pe_rsc_managed))) { crm_notice("Not fencing remote node %s " "(otherwise would because %s): connection is unmanaged", node->details->uname, reason); } else if(node->details->remote_requires_reset == FALSE) { node->details->remote_requires_reset = TRUE; crm_warn("Remote node %s %s: %s", node->details->uname, pe_can_fence(data_set, node)? "will be fenced" : "is unclean", reason); } node->details->unclean = TRUE; pe_fence_op(node, NULL, TRUE, reason, data_set); } else if (node->details->unclean) { crm_trace("Cluster node %s %s because %s", node->details->uname, pe_can_fence(data_set, node)? "would also be fenced" : "also is unclean", reason); } else { crm_warn("Cluster node %s %s: %s", node->details->uname, pe_can_fence(data_set, node)? "will be fenced" : "is unclean", reason); node->details->unclean = TRUE; pe_fence_op(node, NULL, TRUE, reason, data_set); } } // @TODO xpaths can't handle templates, rules, or id-refs // nvpair with provides or requires set to unfencing #define XPATH_UNFENCING_NVPAIR XML_CIB_TAG_NVPAIR \ "[(@" XML_NVPAIR_ATTR_NAME "='" XML_RSC_ATTR_PROVIDES "'" \ "or @" XML_NVPAIR_ATTR_NAME "='" XML_RSC_ATTR_REQUIRES "') " \ "and @" XML_NVPAIR_ATTR_VALUE "='unfencing']" // unfencing in rsc_defaults or any resource #define XPATH_ENABLE_UNFENCING \ "/" XML_TAG_CIB "/" XML_CIB_TAG_CONFIGURATION "/" XML_CIB_TAG_RESOURCES \ "//" XML_TAG_META_SETS "/" XPATH_UNFENCING_NVPAIR \ "|/" XML_TAG_CIB "/" XML_CIB_TAG_CONFIGURATION "/" XML_CIB_TAG_RSCCONFIG \ "/" XML_TAG_META_SETS "/" XPATH_UNFENCING_NVPAIR static void set_if_xpath(unsigned long long flag, const char *xpath, pe_working_set_t *data_set) { xmlXPathObjectPtr result = NULL; if (is_not_set(data_set->flags, flag)) { result = xpath_search(data_set->input, xpath); if (result && (numXpathResults(result) > 0)) { set_bit(data_set->flags, flag); } freeXpathObject(result); } } gboolean unpack_config(xmlNode * config, pe_working_set_t * data_set) { const char *value = NULL; GHashTable *config_hash = crm_str_table_new(); data_set->config_hash = config_hash; unpack_instance_attributes(data_set->input, config, XML_CIB_TAG_PROPSET, NULL, config_hash, CIB_OPTIONS_FIRST, FALSE, data_set->now); verify_pe_options(data_set->config_hash); set_config_flag(data_set, "enable-startup-probes", pe_flag_startup_probes); if(is_not_set(data_set->flags, pe_flag_startup_probes)) { crm_info("Startup probes: disabled (dangerous)"); } value = pe_pref(data_set->config_hash, XML_ATTR_HAVE_WATCHDOG); if (value && crm_is_true(value)) { crm_notice("Watchdog will be used via SBD if fencing is required"); set_bit(data_set->flags, pe_flag_have_stonith_resource); } /* Set certain flags via xpath here, so they can be used before the relevant * configuration sections are unpacked. */ set_if_xpath(pe_flag_enable_unfencing, XPATH_ENABLE_UNFENCING, data_set); value = pe_pref(data_set->config_hash, "stonith-timeout"); data_set->stonith_timeout = crm_get_msec(value); crm_debug("STONITH timeout: %d", data_set->stonith_timeout); set_config_flag(data_set, "stonith-enabled", pe_flag_stonith_enabled); crm_debug("STONITH of failed nodes is %s", is_set(data_set->flags, pe_flag_stonith_enabled) ? "enabled" : "disabled"); data_set->stonith_action = pe_pref(data_set->config_hash, "stonith-action"); if (!strcmp(data_set->stonith_action, "poweroff")) { pe_warn_once(pe_wo_poweroff, "Support for stonith-action of 'poweroff' is deprecated " "and will be removed in a future release (use 'off' instead)"); data_set->stonith_action = "off"; } crm_trace("STONITH will %s nodes", data_set->stonith_action); set_config_flag(data_set, "concurrent-fencing", pe_flag_concurrent_fencing); crm_debug("Concurrent fencing is %s", is_set(data_set->flags, pe_flag_concurrent_fencing) ? "enabled" : "disabled"); set_config_flag(data_set, "stop-all-resources", pe_flag_stop_everything); crm_debug("Stop all active resources: %s", is_set(data_set->flags, pe_flag_stop_everything) ? "true" : "false"); set_config_flag(data_set, "symmetric-cluster", pe_flag_symmetric_cluster); if (is_set(data_set->flags, pe_flag_symmetric_cluster)) { crm_debug("Cluster is symmetric" " - resources can run anywhere by default"); } value = pe_pref(data_set->config_hash, "no-quorum-policy"); if (safe_str_eq(value, "ignore")) { data_set->no_quorum_policy = no_quorum_ignore; } else if (safe_str_eq(value, "freeze")) { data_set->no_quorum_policy = no_quorum_freeze; } else if (safe_str_eq(value, "suicide")) { if (is_set(data_set->flags, pe_flag_stonith_enabled)) { int do_panic = 0; crm_element_value_int(data_set->input, XML_ATTR_QUORUM_PANIC, &do_panic); if (do_panic || is_set(data_set->flags, pe_flag_have_quorum)) { data_set->no_quorum_policy = no_quorum_suicide; } else { crm_notice("Resetting no-quorum-policy to 'stop': cluster has never had quorum"); data_set->no_quorum_policy = no_quorum_stop; } } else { crm_config_err("Resetting no-quorum-policy to 'stop': stonith is not configured"); data_set->no_quorum_policy = no_quorum_stop; } } else { data_set->no_quorum_policy = no_quorum_stop; } switch (data_set->no_quorum_policy) { case no_quorum_freeze: crm_debug("On loss of quorum: Freeze resources"); break; case no_quorum_stop: crm_debug("On loss of quorum: Stop ALL resources"); break; case no_quorum_suicide: crm_notice("On loss of quorum: Fence all remaining nodes"); break; case no_quorum_ignore: crm_notice("On loss of quorum: Ignore"); break; } set_config_flag(data_set, "stop-orphan-resources", pe_flag_stop_rsc_orphans); crm_trace("Orphan resources are %s", is_set(data_set->flags, pe_flag_stop_rsc_orphans) ? "stopped" : "ignored"); set_config_flag(data_set, "stop-orphan-actions", pe_flag_stop_action_orphans); crm_trace("Orphan resource actions are %s", is_set(data_set->flags, pe_flag_stop_action_orphans) ? "stopped" : "ignored"); set_config_flag(data_set, "remove-after-stop", pe_flag_remove_after_stop); crm_trace("Stopped resources are removed from the status section: %s", is_set(data_set->flags, pe_flag_remove_after_stop) ? "true" : "false"); set_config_flag(data_set, "maintenance-mode", pe_flag_maintenance_mode); crm_trace("Maintenance mode: %s", is_set(data_set->flags, pe_flag_maintenance_mode) ? "true" : "false"); set_config_flag(data_set, "start-failure-is-fatal", pe_flag_start_failure_fatal); crm_trace("Start failures are %s", is_set(data_set->flags, pe_flag_start_failure_fatal) ? "always fatal" : "handled by failcount"); if (is_set(data_set->flags, pe_flag_stonith_enabled)) { set_config_flag(data_set, "startup-fencing", pe_flag_startup_fencing); } if (is_set(data_set->flags, pe_flag_startup_fencing)) { crm_trace("Unseen nodes will be fenced"); } else { pe_warn_once(pe_wo_blind, "Blind faith: not fencing unseen nodes"); } node_score_red = char2score(pe_pref(data_set->config_hash, "node-health-red")); node_score_green = char2score(pe_pref(data_set->config_hash, "node-health-green")); node_score_yellow = char2score(pe_pref(data_set->config_hash, "node-health-yellow")); crm_debug("Node scores: 'red' = %s, 'yellow' = %s, 'green' = %s", pe_pref(data_set->config_hash, "node-health-red"), pe_pref(data_set->config_hash, "node-health-yellow"), pe_pref(data_set->config_hash, "node-health-green")); data_set->placement_strategy = pe_pref(data_set->config_hash, "placement-strategy"); crm_trace("Placement strategy: %s", data_set->placement_strategy); return TRUE; } static void destroy_digest_cache(gpointer ptr) { op_digest_cache_t *data = ptr; free_xml(data->params_all); free_xml(data->params_secure); free_xml(data->params_restart); free(data->digest_all_calc); free(data->digest_restart_calc); free(data->digest_secure_calc); free(data); } node_t * pe_create_node(const char *id, const char *uname, const char *type, const char *score, pe_working_set_t * data_set) { node_t *new_node = NULL; if (pe_find_node(data_set->nodes, uname) != NULL) { crm_config_warn("Detected multiple node entries with uname=%s" " - this is rarely intended", uname); } new_node = calloc(1, sizeof(node_t)); if (new_node == NULL) { return NULL; } new_node->weight = char2score(score); new_node->fixed = FALSE; new_node->details = calloc(1, sizeof(struct pe_node_shared_s)); if (new_node->details == NULL) { free(new_node); return NULL; } crm_trace("Creating node for entry %s/%s", uname, id); new_node->details->id = id; new_node->details->uname = uname; new_node->details->online = FALSE; new_node->details->shutdown = FALSE; new_node->details->rsc_discovery_enabled = TRUE; new_node->details->running_rsc = NULL; new_node->details->type = node_ping; if (safe_str_eq(type, "remote")) { new_node->details->type = node_remote; set_bit(data_set->flags, pe_flag_have_remote_nodes); } else if ((type == NULL) || safe_str_eq(type, "member")) { new_node->details->type = node_member; } new_node->details->attrs = crm_str_table_new(); if (is_remote_node(new_node)) { g_hash_table_insert(new_node->details->attrs, strdup(CRM_ATTR_KIND), strdup("remote")); } else { g_hash_table_insert(new_node->details->attrs, strdup(CRM_ATTR_KIND), strdup("cluster")); } new_node->details->utilization = crm_str_table_new(); new_node->details->digest_cache = g_hash_table_new_full(crm_str_hash, g_str_equal, free, destroy_digest_cache); data_set->nodes = g_list_insert_sorted(data_set->nodes, new_node, sort_node_uname); return new_node; } bool remote_id_conflict(const char *remote_name, pe_working_set_t *data) { bool match = FALSE; #if 1 pe_find_resource(data->resources, remote_name); #else if (data->name_check == NULL) { data->name_check = g_hash_table_new(crm_str_hash, g_str_equal); for (xml_rsc = __xml_first_child(parent); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) { const char *id = ID(xml_rsc); /* avoiding heap allocation here because we know the duration of this hashtable allows us to */ g_hash_table_insert(data->name_check, (char *) id, (char *) id); } } if (g_hash_table_lookup(data->name_check, remote_name)) { match = TRUE; } #endif if (match) { crm_err("Invalid remote-node name, a resource called '%s' already exists.", remote_name); return NULL; } return match; } static const char * expand_remote_rsc_meta(xmlNode *xml_obj, xmlNode *parent, pe_working_set_t *data) { xmlNode *attr_set = NULL; xmlNode *attr = NULL; const char *container_id = ID(xml_obj); const char *remote_name = NULL; const char *remote_server = NULL; const char *remote_port = NULL; const char *connect_timeout = "60s"; const char *remote_allow_migrate=NULL; const char *container_managed = NULL; for (attr_set = __xml_first_child(xml_obj); attr_set != NULL; attr_set = __xml_next_element(attr_set)) { if (safe_str_neq((const char *)attr_set->name, XML_TAG_META_SETS)) { continue; } for (attr = __xml_first_child(attr_set); attr != NULL; attr = __xml_next_element(attr)) { const char *value = crm_element_value(attr, XML_NVPAIR_ATTR_VALUE); const char *name = crm_element_value(attr, XML_NVPAIR_ATTR_NAME); if (safe_str_eq(name, XML_RSC_ATTR_REMOTE_NODE)) { remote_name = value; } else if (safe_str_eq(name, "remote-addr")) { remote_server = value; } else if (safe_str_eq(name, "remote-port")) { remote_port = value; } else if (safe_str_eq(name, "remote-connect-timeout")) { connect_timeout = value; } else if (safe_str_eq(name, "remote-allow-migrate")) { remote_allow_migrate=value; } else if (safe_str_eq(name, XML_RSC_ATTR_MANAGED)) { container_managed = value; } } } if (remote_name == NULL) { return NULL; } if (remote_id_conflict(remote_name, data)) { return NULL; } pe_create_remote_xml(parent, remote_name, container_id, remote_allow_migrate, container_managed, connect_timeout, remote_server, remote_port); return remote_name; } static void handle_startup_fencing(pe_working_set_t *data_set, node_t *new_node) { if ((new_node->details->type == node_remote) && (new_node->details->remote_rsc == NULL)) { /* Ignore fencing for remote nodes that don't have a connection resource * associated with them. This happens when remote node entries get left * in the nodes section after the connection resource is removed. */ return; } if (is_set(data_set->flags, pe_flag_startup_fencing)) { // All nodes are unclean until we've seen their status entry new_node->details->unclean = TRUE; } else { // Blind faith ... new_node->details->unclean = FALSE; } /* We need to be able to determine if a node's status section * exists or not separate from whether the node is unclean. */ new_node->details->unseen = TRUE; } gboolean unpack_nodes(xmlNode * xml_nodes, pe_working_set_t * data_set) { xmlNode *xml_obj = NULL; node_t *new_node = NULL; const char *id = NULL; const char *uname = NULL; const char *type = NULL; const char *score = NULL; for (xml_obj = __xml_first_child(xml_nodes); xml_obj != NULL; xml_obj = __xml_next_element(xml_obj)) { if (crm_str_eq((const char *)xml_obj->name, XML_CIB_TAG_NODE, TRUE)) { new_node = NULL; id = crm_element_value(xml_obj, XML_ATTR_ID); uname = crm_element_value(xml_obj, XML_ATTR_UNAME); type = crm_element_value(xml_obj, XML_ATTR_TYPE); score = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE); crm_trace("Processing node %s/%s", uname, id); if (id == NULL) { crm_config_err("Must specify id tag in "); continue; } new_node = pe_create_node(id, uname, type, score, data_set); if (new_node == NULL) { return FALSE; } /* if(data_set->have_quorum == FALSE */ /* && data_set->no_quorum_policy == no_quorum_stop) { */ /* /\* start shutting resources down *\/ */ /* new_node->weight = -INFINITY; */ /* } */ handle_startup_fencing(data_set, new_node); add_node_attrs(xml_obj, new_node, FALSE, data_set); unpack_instance_attributes(data_set->input, xml_obj, XML_TAG_UTILIZATION, NULL, new_node->details->utilization, NULL, FALSE, data_set->now); crm_trace("Done with node %s", crm_element_value(xml_obj, XML_ATTR_UNAME)); } } if (data_set->localhost && pe_find_node(data_set->nodes, data_set->localhost) == NULL) { crm_info("Creating a fake local node"); pe_create_node(data_set->localhost, data_set->localhost, NULL, 0, data_set); } return TRUE; } static void setup_container(resource_t * rsc, pe_working_set_t * data_set) { const char *container_id = NULL; if (rsc->children) { GListPtr gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; setup_container(child_rsc, data_set); } return; } container_id = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_CONTAINER); if (container_id && safe_str_neq(container_id, rsc->id)) { resource_t *container = pe_find_resource(data_set->resources, container_id); if (container) { rsc->container = container; set_bit(container->flags, pe_rsc_is_container); container->fillers = g_list_append(container->fillers, rsc); pe_rsc_trace(rsc, "Resource %s's container is %s", rsc->id, container_id); } else { pe_err("Resource %s: Unknown resource container (%s)", rsc->id, container_id); } } } gboolean unpack_remote_nodes(xmlNode * xml_resources, pe_working_set_t * data_set) { xmlNode *xml_obj = NULL; /* generate remote nodes from resource config before unpacking resources */ for (xml_obj = __xml_first_child(xml_resources); xml_obj != NULL; xml_obj = __xml_next_element(xml_obj)) { const char *new_node_id = NULL; /* first check if this is a bare metal remote node. Bare metal remote nodes * are defined as a resource primitive only. */ if (xml_contains_remote_node(xml_obj)) { new_node_id = ID(xml_obj); /* The "pe_find_node" check is here to make sure we don't iterate over * an expanded node that has already been added to the node list. */ if (new_node_id && pe_find_node(data_set->nodes, new_node_id) == NULL) { crm_trace("Found baremetal remote node %s in container resource %s", new_node_id, ID(xml_obj)); pe_create_node(new_node_id, new_node_id, "remote", NULL, data_set); } continue; } /* Now check for guest remote nodes. * guest remote nodes are defined within a resource primitive. * Example1: a vm resource might be configured as a remote node. * Example2: a vm resource might be configured within a group to be a remote node. * Note: right now we only support guest remote nodes in as a standalone primitive * or a primitive within a group. No cloned primitives can be a guest remote node * right now */ if (crm_str_eq((const char *)xml_obj->name, XML_CIB_TAG_RESOURCE, TRUE)) { /* expands a metadata defined remote resource into the xml config * as an actual rsc primitive to be unpacked later. */ new_node_id = expand_remote_rsc_meta(xml_obj, xml_resources, data_set); if (new_node_id && pe_find_node(data_set->nodes, new_node_id) == NULL) { crm_trace("Found guest remote node %s in container resource %s", new_node_id, ID(xml_obj)); pe_create_node(new_node_id, new_node_id, "remote", NULL, data_set); } continue; } else if (crm_str_eq((const char *)xml_obj->name, XML_CIB_TAG_GROUP, TRUE)) { xmlNode *xml_obj2 = NULL; /* search through a group to see if any of the primitive contain a remote node. */ for (xml_obj2 = __xml_first_child(xml_obj); xml_obj2 != NULL; xml_obj2 = __xml_next_element(xml_obj2)) { new_node_id = expand_remote_rsc_meta(xml_obj2, xml_resources, data_set); if (new_node_id && pe_find_node(data_set->nodes, new_node_id) == NULL) { crm_trace("Found guest remote node %s in container resource %s which is in group %s", new_node_id, ID(xml_obj2), ID(xml_obj)); pe_create_node(new_node_id, new_node_id, "remote", NULL, data_set); } } } } return TRUE; } /* Call this after all the nodes and resources have been * unpacked, but before the status section is read. * * A remote node's online status is reflected by the state * of the remote node's connection resource. We need to link * the remote node to this connection resource so we can have * easy access to the connection resource during the PE calculations. */ static void link_rsc2remotenode(pe_working_set_t *data_set, resource_t *new_rsc) { node_t *remote_node = NULL; if (new_rsc->is_remote_node == FALSE) { return; } if (is_set(data_set->flags, pe_flag_quick_location)) { /* remote_nodes and remote_resources are not linked in quick location calculations */ return; } print_resource(LOG_TRACE, "Linking remote-node connection resource, ", new_rsc, FALSE); remote_node = pe_find_node(data_set->nodes, new_rsc->id); CRM_CHECK(remote_node != NULL, return;); remote_node->details->remote_rsc = new_rsc; /* If this is a baremetal remote-node (no container resource * associated with it) then we need to handle startup fencing the same way * as cluster nodes. */ if (new_rsc->container == NULL) { handle_startup_fencing(data_set, remote_node); } else { /* At this point we know if the remote node is a container or baremetal * remote node, update the #kind attribute if a container is involved */ g_hash_table_replace(remote_node->details->attrs, strdup(CRM_ATTR_KIND), strdup("container")); } } static void destroy_tag(gpointer data) { tag_t *tag = data; if (tag) { free(tag->id); g_list_free_full(tag->refs, free); free(tag); } } /*! * \internal * \brief Parse configuration XML for resource information * * \param[in] xml_resources Top of resource configuration XML * \param[in,out] data_set Where to put resource information * * \return TRUE * * \note unpack_remote_nodes() MUST be called before this, so that the nodes can * be used when common_unpack() calls resource_location() */ gboolean unpack_resources(xmlNode * xml_resources, pe_working_set_t * data_set) { xmlNode *xml_obj = NULL; GListPtr gIter = NULL; data_set->template_rsc_sets = g_hash_table_new_full(crm_str_hash, g_str_equal, free, destroy_tag); for (xml_obj = __xml_first_child(xml_resources); xml_obj != NULL; xml_obj = __xml_next_element(xml_obj)) { resource_t *new_rsc = NULL; if (crm_str_eq((const char *)xml_obj->name, XML_CIB_TAG_RSC_TEMPLATE, TRUE)) { const char *template_id = ID(xml_obj); if (template_id && g_hash_table_lookup_extended(data_set->template_rsc_sets, template_id, NULL, NULL) == FALSE) { /* Record the template's ID for the knowledge of its existence anyway. */ g_hash_table_insert(data_set->template_rsc_sets, strdup(template_id), NULL); } continue; } crm_trace("Beginning unpack... <%s id=%s... >", crm_element_name(xml_obj), ID(xml_obj)); if (common_unpack(xml_obj, &new_rsc, NULL, data_set)) { data_set->resources = g_list_append(data_set->resources, new_rsc); print_resource(LOG_TRACE, "Added ", new_rsc, FALSE); } else { crm_config_err("Failed unpacking %s %s", crm_element_name(xml_obj), crm_element_value(xml_obj, XML_ATTR_ID)); if (new_rsc != NULL && new_rsc->fns != NULL) { new_rsc->fns->free(new_rsc); } } } for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t *) gIter->data; setup_container(rsc, data_set); link_rsc2remotenode(data_set, rsc); } data_set->resources = g_list_sort(data_set->resources, sort_rsc_priority); if (is_set(data_set->flags, pe_flag_quick_location)) { /* Ignore */ } else if (is_set(data_set->flags, pe_flag_stonith_enabled) && is_set(data_set->flags, pe_flag_have_stonith_resource) == FALSE) { crm_config_err("Resource start-up disabled since no STONITH resources have been defined"); crm_config_err("Either configure some or disable STONITH with the stonith-enabled option"); crm_config_err("NOTE: Clusters with shared data need STONITH to ensure data integrity"); } return TRUE; } gboolean unpack_tags(xmlNode * xml_tags, pe_working_set_t * data_set) { xmlNode *xml_tag = NULL; data_set->tags = g_hash_table_new_full(crm_str_hash, g_str_equal, free, destroy_tag); for (xml_tag = __xml_first_child(xml_tags); xml_tag != NULL; xml_tag = __xml_next_element(xml_tag)) { xmlNode *xml_obj_ref = NULL; const char *tag_id = ID(xml_tag); if (crm_str_eq((const char *)xml_tag->name, XML_CIB_TAG_TAG, TRUE) == FALSE) { continue; } if (tag_id == NULL) { crm_config_err("Failed unpacking %s: %s should be specified", crm_element_name(xml_tag), XML_ATTR_ID); continue; } for (xml_obj_ref = __xml_first_child(xml_tag); xml_obj_ref != NULL; xml_obj_ref = __xml_next_element(xml_obj_ref)) { const char *obj_ref = ID(xml_obj_ref); if (crm_str_eq((const char *)xml_obj_ref->name, XML_CIB_TAG_OBJ_REF, TRUE) == FALSE) { continue; } if (obj_ref == NULL) { crm_config_err("Failed unpacking %s for tag %s: %s should be specified", crm_element_name(xml_obj_ref), tag_id, XML_ATTR_ID); continue; } if (add_tag_ref(data_set->tags, tag_id, obj_ref) == FALSE) { return FALSE; } } } return TRUE; } /* The ticket state section: * "/cib/status/tickets/ticket_state" */ static gboolean unpack_ticket_state(xmlNode * xml_ticket, pe_working_set_t * data_set) { const char *ticket_id = NULL; const char *granted = NULL; const char *last_granted = NULL; const char *standby = NULL; xmlAttrPtr xIter = NULL; ticket_t *ticket = NULL; ticket_id = ID(xml_ticket); if (ticket_id == NULL || strlen(ticket_id) == 0) { return FALSE; } crm_trace("Processing ticket state for %s", ticket_id); ticket = g_hash_table_lookup(data_set->tickets, ticket_id); if (ticket == NULL) { ticket = ticket_new(ticket_id, data_set); if (ticket == NULL) { return FALSE; } } for (xIter = xml_ticket->properties; xIter; xIter = xIter->next) { const char *prop_name = (const char *)xIter->name; const char *prop_value = crm_element_value(xml_ticket, prop_name); if (crm_str_eq(prop_name, XML_ATTR_ID, TRUE)) { continue; } g_hash_table_replace(ticket->state, strdup(prop_name), strdup(prop_value)); } granted = g_hash_table_lookup(ticket->state, "granted"); if (granted && crm_is_true(granted)) { ticket->granted = TRUE; crm_info("We have ticket '%s'", ticket->id); } else { ticket->granted = FALSE; crm_info("We do not have ticket '%s'", ticket->id); } last_granted = g_hash_table_lookup(ticket->state, "last-granted"); if (last_granted) { ticket->last_granted = crm_parse_int(last_granted, 0); } standby = g_hash_table_lookup(ticket->state, "standby"); if (standby && crm_is_true(standby)) { ticket->standby = TRUE; if (ticket->granted) { crm_info("Granted ticket '%s' is in standby-mode", ticket->id); } } else { ticket->standby = FALSE; } crm_trace("Done with ticket state for %s", ticket_id); return TRUE; } static gboolean unpack_tickets_state(xmlNode * xml_tickets, pe_working_set_t * data_set) { xmlNode *xml_obj = NULL; for (xml_obj = __xml_first_child(xml_tickets); xml_obj != NULL; xml_obj = __xml_next_element(xml_obj)) { if (crm_str_eq((const char *)xml_obj->name, XML_CIB_TAG_TICKET_STATE, TRUE) == FALSE) { continue; } unpack_ticket_state(xml_obj, data_set); } return TRUE; } static void unpack_handle_remote_attrs(node_t *this_node, xmlNode *state, pe_working_set_t * data_set) { const char *resource_discovery_enabled = NULL; xmlNode *attrs = NULL; resource_t *rsc = NULL; const char *shutdown = NULL; if (crm_str_eq((const char *)state->name, XML_CIB_TAG_STATE, TRUE) == FALSE) { return; } if ((this_node == NULL) || (is_remote_node(this_node) == FALSE)) { return; } crm_trace("Processing remote node id=%s, uname=%s", this_node->details->id, this_node->details->uname); this_node->details->remote_maintenance = crm_atoi(crm_element_value(state, XML_NODE_IS_MAINTENANCE), "0"); rsc = this_node->details->remote_rsc; if (this_node->details->remote_requires_reset == FALSE) { this_node->details->unclean = FALSE; this_node->details->unseen = FALSE; } attrs = find_xml_node(state, XML_TAG_TRANSIENT_NODEATTRS, FALSE); add_node_attrs(attrs, this_node, TRUE, data_set); shutdown = pe_node_attribute_raw(this_node, XML_CIB_ATTR_SHUTDOWN); if (shutdown != NULL && safe_str_neq("0", shutdown)) { crm_info("Node %s is shutting down", this_node->details->uname); this_node->details->shutdown = TRUE; if (rsc) { rsc->next_role = RSC_ROLE_STOPPED; } } if (crm_is_true(pe_node_attribute_raw(this_node, "standby"))) { crm_info("Node %s is in standby-mode", this_node->details->uname); this_node->details->standby = TRUE; } if (crm_is_true(pe_node_attribute_raw(this_node, "maintenance")) || (rsc && !is_set(rsc->flags, pe_rsc_managed))) { crm_info("Node %s is in maintenance-mode", this_node->details->uname); this_node->details->maintenance = TRUE; } resource_discovery_enabled = pe_node_attribute_raw(this_node, XML_NODE_ATTR_RSC_DISCOVERY); if (resource_discovery_enabled && !crm_is_true(resource_discovery_enabled)) { if (is_baremetal_remote_node(this_node) && is_not_set(data_set->flags, pe_flag_stonith_enabled)) { crm_warn("ignoring %s attribute on baremetal remote node %s, disabling resource discovery requires stonith to be enabled.", XML_NODE_ATTR_RSC_DISCOVERY, this_node->details->uname); } else { /* if we're here, this is either a baremetal node and fencing is enabled, * or this is a container node which we don't care if fencing is enabled * or not on. container nodes are 'fenced' by recovering the container resource * regardless of whether fencing is enabled. */ crm_info("Node %s has resource discovery disabled", this_node->details->uname); this_node->details->rsc_discovery_enabled = FALSE; } } } static bool unpack_node_loop(xmlNode * status, bool fence, pe_working_set_t * data_set) { bool changed = false; xmlNode *lrm_rsc = NULL; for (xmlNode *state = __xml_first_child(status); state != NULL; state = __xml_next_element(state)) { const char *id = NULL; const char *uname = NULL; node_t *this_node = NULL; bool process = FALSE; if (crm_str_eq((const char *)state->name, XML_CIB_TAG_STATE, TRUE) == FALSE) { continue; } id = crm_element_value(state, XML_ATTR_ID); uname = crm_element_value(state, XML_ATTR_UNAME); this_node = pe_find_node_any(data_set->nodes, id, uname); if (this_node == NULL) { crm_info("Node %s is unknown", id); continue; } else if (this_node->details->unpacked) { crm_info("Node %s is already processed", id); continue; } else if (is_remote_node(this_node) == FALSE && is_set(data_set->flags, pe_flag_stonith_enabled)) { // A redundant test, but preserves the order for regression tests process = TRUE; } else if (is_remote_node(this_node)) { bool check = FALSE; resource_t *rsc = this_node->details->remote_rsc; if(fence) { check = TRUE; } else if(rsc == NULL) { /* Not ready yet */ } else if (is_container_remote_node(this_node) && rsc->role == RSC_ROLE_STARTED && rsc->container->role == RSC_ROLE_STARTED) { /* Both the connection and the underlying container * need to be known 'up' before we volunterily process * resources inside it */ check = TRUE; crm_trace("Checking node %s/%s/%s status %d/%d/%d", id, rsc->id, rsc->container->id, fence, rsc->role, RSC_ROLE_STARTED); } else if (is_container_remote_node(this_node) == FALSE && rsc->role == RSC_ROLE_STARTED) { check = TRUE; crm_trace("Checking node %s/%s status %d/%d/%d", id, rsc->id, fence, rsc->role, RSC_ROLE_STARTED); } if (check) { determine_remote_online_status(data_set, this_node); unpack_handle_remote_attrs(this_node, state, data_set); process = TRUE; } } else if (this_node->details->online) { process = TRUE; } else if (fence) { process = TRUE; } if(process) { crm_trace("Processing lrm resource entries on %shealthy%s node: %s", fence?"un":"", is_remote_node(this_node)?" remote":"", this_node->details->uname); changed = TRUE; this_node->details->unpacked = TRUE; lrm_rsc = find_xml_node(state, XML_CIB_TAG_LRM, FALSE); lrm_rsc = find_xml_node(lrm_rsc, XML_LRM_TAG_RESOURCES, FALSE); unpack_lrm_resources(this_node, lrm_rsc, data_set); } } return changed; } /* remove nodes that are down, stopping */ /* create positive rsc_to_node constraints between resources and the nodes they are running on */ /* anything else? */ gboolean unpack_status(xmlNode * status, pe_working_set_t * data_set) { const char *id = NULL; const char *uname = NULL; xmlNode *state = NULL; node_t *this_node = NULL; crm_trace("Beginning unpack"); if (data_set->tickets == NULL) { data_set->tickets = g_hash_table_new_full(crm_str_hash, g_str_equal, free, destroy_ticket); } for (state = __xml_first_child(status); state != NULL; state = __xml_next_element(state)) { if (crm_str_eq((const char *)state->name, XML_CIB_TAG_TICKETS, TRUE)) { unpack_tickets_state((xmlNode *) state, data_set); } else if (crm_str_eq((const char *)state->name, XML_CIB_TAG_STATE, TRUE)) { xmlNode *attrs = NULL; const char *resource_discovery_enabled = NULL; id = crm_element_value(state, XML_ATTR_ID); uname = crm_element_value(state, XML_ATTR_UNAME); this_node = pe_find_node_any(data_set->nodes, id, uname); if (uname == NULL) { /* error */ continue; } else if (this_node == NULL) { crm_config_warn("Node %s in status section no longer exists", uname); continue; } else if (is_remote_node(this_node)) { /* online state for remote nodes is determined by the * rsc state after all the unpacking is done. we do however * need to mark whether or not the node has been fenced as this plays * a role during unpacking cluster node resource state */ this_node->details->remote_was_fenced = crm_atoi(crm_element_value(state, XML_NODE_IS_FENCED), "0"); continue; } crm_trace("Processing node id=%s, uname=%s", id, uname); /* Mark the node as provisionally clean * - at least we have seen it in the current cluster's lifetime */ this_node->details->unclean = FALSE; this_node->details->unseen = FALSE; attrs = find_xml_node(state, XML_TAG_TRANSIENT_NODEATTRS, FALSE); add_node_attrs(attrs, this_node, TRUE, data_set); if (crm_is_true(pe_node_attribute_raw(this_node, "standby"))) { crm_info("Node %s is in standby-mode", this_node->details->uname); this_node->details->standby = TRUE; } if (crm_is_true(pe_node_attribute_raw(this_node, "maintenance"))) { crm_info("Node %s is in maintenance-mode", this_node->details->uname); this_node->details->maintenance = TRUE; } resource_discovery_enabled = pe_node_attribute_raw(this_node, XML_NODE_ATTR_RSC_DISCOVERY); if (resource_discovery_enabled && !crm_is_true(resource_discovery_enabled)) { crm_warn("ignoring %s attribute on node %s, disabling resource discovery is not allowed on cluster nodes", XML_NODE_ATTR_RSC_DISCOVERY, this_node->details->uname); } crm_trace("determining node state"); determine_online_status(state, this_node, data_set); if (is_not_set(data_set->flags, pe_flag_have_quorum) && this_node->details->online && (data_set->no_quorum_policy == no_quorum_suicide)) { /* Everything else should flow from this automatically * At least until the PE becomes able to migrate off healthy resources */ pe_fence_node(data_set, this_node, "cluster does not have quorum"); } } } while(unpack_node_loop(status, FALSE, data_set)) { crm_trace("Start another loop"); } // Now catch any nodes we didn't see unpack_node_loop(status, is_set(data_set->flags, pe_flag_stonith_enabled), data_set); for (GListPtr gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { node_t *this_node = gIter->data; if (this_node == NULL) { continue; } else if(is_remote_node(this_node) == FALSE) { continue; } else if(this_node->details->unpacked) { continue; } determine_remote_online_status(data_set, this_node); } return TRUE; } static gboolean determine_online_status_no_fencing(pe_working_set_t * data_set, xmlNode * node_state, node_t * this_node) { gboolean online = FALSE; const char *join = crm_element_value(node_state, XML_NODE_JOIN_STATE); const char *is_peer = crm_element_value(node_state, XML_NODE_IS_PEER); const char *in_cluster = crm_element_value(node_state, XML_NODE_IN_CLUSTER); const char *exp_state = crm_element_value(node_state, XML_NODE_EXPECTED); if (!crm_is_true(in_cluster)) { crm_trace("Node is down: in_cluster=%s", crm_str(in_cluster)); } else if (safe_str_eq(is_peer, ONLINESTATUS)) { if (safe_str_eq(join, CRMD_JOINSTATE_MEMBER)) { online = TRUE; } else { crm_debug("Node is not ready to run resources: %s", join); } } else if (this_node->details->expected_up == FALSE) { crm_trace("Controller is down: in_cluster=%s", crm_str(in_cluster)); crm_trace("\tis_peer=%s, join=%s, expected=%s", crm_str(is_peer), crm_str(join), crm_str(exp_state)); } else { /* mark it unclean */ pe_fence_node(data_set, this_node, "peer is unexpectedly down"); crm_info("\tin_cluster=%s, is_peer=%s, join=%s, expected=%s", crm_str(in_cluster), crm_str(is_peer), crm_str(join), crm_str(exp_state)); } return online; } static gboolean determine_online_status_fencing(pe_working_set_t * data_set, xmlNode * node_state, node_t * this_node) { gboolean online = FALSE; gboolean do_terminate = FALSE; bool crmd_online = FALSE; const char *join = crm_element_value(node_state, XML_NODE_JOIN_STATE); const char *is_peer = crm_element_value(node_state, XML_NODE_IS_PEER); const char *in_cluster = crm_element_value(node_state, XML_NODE_IN_CLUSTER); const char *exp_state = crm_element_value(node_state, XML_NODE_EXPECTED); const char *terminate = pe_node_attribute_raw(this_node, "terminate"); /* - XML_NODE_IN_CLUSTER ::= true|false - XML_NODE_IS_PEER ::= online|offline - XML_NODE_JOIN_STATE ::= member|down|pending|banned - XML_NODE_EXPECTED ::= member|down */ if (crm_is_true(terminate)) { do_terminate = TRUE; } else if (terminate != NULL && strlen(terminate) > 0) { /* could be a time() value */ char t = terminate[0]; if (t != '0' && isdigit(t)) { do_terminate = TRUE; } } crm_trace("%s: in_cluster=%s, is_peer=%s, join=%s, expected=%s, term=%d", this_node->details->uname, crm_str(in_cluster), crm_str(is_peer), crm_str(join), crm_str(exp_state), do_terminate); online = crm_is_true(in_cluster); crmd_online = safe_str_eq(is_peer, ONLINESTATUS); if (exp_state == NULL) { exp_state = CRMD_JOINSTATE_DOWN; } if (this_node->details->shutdown) { crm_debug("%s is shutting down", this_node->details->uname); /* Slightly different criteria since we can't shut down a dead peer */ online = crmd_online; } else if (in_cluster == NULL) { pe_fence_node(data_set, this_node, "peer has not been seen by the cluster"); } else if (safe_str_eq(join, CRMD_JOINSTATE_NACK)) { pe_fence_node(data_set, this_node, "peer failed the pacemaker membership criteria"); } else if (do_terminate == FALSE && safe_str_eq(exp_state, CRMD_JOINSTATE_DOWN)) { if (crm_is_true(in_cluster) || crmd_online) { crm_info("- Node %s is not ready to run resources", this_node->details->uname); this_node->details->standby = TRUE; this_node->details->pending = TRUE; } else { crm_trace("%s is down or still coming up", this_node->details->uname); } } else if (do_terminate && safe_str_eq(join, CRMD_JOINSTATE_DOWN) && crm_is_true(in_cluster) == FALSE && !crmd_online) { crm_info("Node %s was just shot", this_node->details->uname); online = FALSE; } else if (crm_is_true(in_cluster) == FALSE) { pe_fence_node(data_set, this_node, "peer is no longer part of the cluster"); } else if (!crmd_online) { pe_fence_node(data_set, this_node, "peer process is no longer available"); /* Everything is running at this point, now check join state */ } else if (do_terminate) { pe_fence_node(data_set, this_node, "termination was requested"); } else if (safe_str_eq(join, CRMD_JOINSTATE_MEMBER)) { crm_info("Node %s is active", this_node->details->uname); } else if (safe_str_eq(join, CRMD_JOINSTATE_PENDING) || safe_str_eq(join, CRMD_JOINSTATE_DOWN)) { crm_info("Node %s is not ready to run resources", this_node->details->uname); this_node->details->standby = TRUE; this_node->details->pending = TRUE; } else { pe_fence_node(data_set, this_node, "peer was in an unknown state"); crm_warn("%s: in-cluster=%s, is-peer=%s, join=%s, expected=%s, term=%d, shutdown=%d", this_node->details->uname, crm_str(in_cluster), crm_str(is_peer), crm_str(join), crm_str(exp_state), do_terminate, this_node->details->shutdown); } return online; } static gboolean determine_remote_online_status(pe_working_set_t * data_set, node_t * this_node) { resource_t *rsc = this_node->details->remote_rsc; resource_t *container = NULL; pe_node_t *host = NULL; /* If there is a node state entry for a (former) Pacemaker Remote node * but no resource creating that node, the node's connection resource will * be NULL. Consider it an offline remote node in that case. */ if (rsc == NULL) { this_node->details->online = FALSE; goto remote_online_done; } container = rsc->container; if (container && (g_list_length(rsc->running_on) == 1)) { host = rsc->running_on->data; } /* If the resource is currently started, mark it online. */ if (rsc->role == RSC_ROLE_STARTED) { crm_trace("%s node %s presumed ONLINE because connection resource is started", (container? "Guest" : "Remote"), this_node->details->id); this_node->details->online = TRUE; } /* consider this node shutting down if transitioning start->stop */ if (rsc->role == RSC_ROLE_STARTED && rsc->next_role == RSC_ROLE_STOPPED) { crm_trace("%s node %s shutting down because connection resource is stopping", (container? "Guest" : "Remote"), this_node->details->id); this_node->details->shutdown = TRUE; } /* Now check all the failure conditions. */ if(container && is_set(container->flags, pe_rsc_failed)) { crm_trace("Guest node %s UNCLEAN because guest resource failed", this_node->details->id); this_node->details->online = FALSE; this_node->details->remote_requires_reset = TRUE; } else if(is_set(rsc->flags, pe_rsc_failed)) { crm_trace("%s node %s OFFLINE because connection resource failed", (container? "Guest" : "Remote"), this_node->details->id); this_node->details->online = FALSE; } else if (rsc->role == RSC_ROLE_STOPPED || (container && container->role == RSC_ROLE_STOPPED)) { crm_trace("%s node %s OFFLINE because its resource is stopped", (container? "Guest" : "Remote"), this_node->details->id); this_node->details->online = FALSE; this_node->details->remote_requires_reset = FALSE; } else if (host && (host->details->online == FALSE) && host->details->unclean) { crm_trace("Guest node %s UNCLEAN because host is unclean", this_node->details->id); this_node->details->online = FALSE; this_node->details->remote_requires_reset = TRUE; } remote_online_done: crm_trace("Remote node %s online=%s", this_node->details->id, this_node->details->online ? "TRUE" : "FALSE"); return this_node->details->online; } gboolean determine_online_status(xmlNode * node_state, node_t * this_node, pe_working_set_t * data_set) { gboolean online = FALSE; const char *shutdown = NULL; const char *exp_state = crm_element_value(node_state, XML_NODE_EXPECTED); if (this_node == NULL) { crm_config_err("No node to check"); return online; } this_node->details->shutdown = FALSE; this_node->details->expected_up = FALSE; shutdown = pe_node_attribute_raw(this_node, XML_CIB_ATTR_SHUTDOWN); if (shutdown != NULL && safe_str_neq("0", shutdown)) { this_node->details->shutdown = TRUE; } else if (safe_str_eq(exp_state, CRMD_JOINSTATE_MEMBER)) { this_node->details->expected_up = TRUE; } if (this_node->details->type == node_ping) { this_node->details->unclean = FALSE; online = FALSE; /* As far as resource management is concerned, * the node is safely offline. * Anyone caught abusing this logic will be shot */ } else if (is_set(data_set->flags, pe_flag_stonith_enabled) == FALSE) { online = determine_online_status_no_fencing(data_set, node_state, this_node); } else { online = determine_online_status_fencing(data_set, node_state, this_node); } if (online) { this_node->details->online = TRUE; } else { /* remove node from contention */ this_node->fixed = TRUE; this_node->weight = -INFINITY; } if (online && this_node->details->shutdown) { /* don't run resources here */ this_node->fixed = TRUE; this_node->weight = -INFINITY; } if (this_node->details->type == node_ping) { crm_info("Node %s is not a pacemaker node", this_node->details->uname); } else if (this_node->details->unclean) { pe_proc_warn("Node %s is unclean", this_node->details->uname); } else if (this_node->details->online) { crm_info("Node %s is %s", this_node->details->uname, this_node->details->shutdown ? "shutting down" : this_node->details->pending ? "pending" : this_node->details->standby ? "standby" : this_node->details->maintenance ? "maintenance" : "online"); } else { crm_trace("Node %s is offline", this_node->details->uname); } return online; } /*! * \internal * \brief Find the end of a resource's name, excluding any clone suffix * * \param[in] id Resource ID to check * * \return Pointer to last character of resource's base name */ const char * pe_base_name_end(const char *id) { if (!crm_strlen_zero(id)) { const char *end = id + strlen(id) - 1; for (const char *s = end; s > id; --s) { switch (*s) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': break; case ':': return (s == end)? s : (s - 1); default: return end; } } return end; } return NULL; } /*! * \internal * \brief Get a resource name excluding any clone suffix * * \param[in] last_rsc_id Resource ID to check * * \return Pointer to newly allocated string with resource's base name * \note It is the caller's responsibility to free() the result. * This asserts on error, so callers can assume result is not NULL. */ char * clone_strip(const char *last_rsc_id) { const char *end = pe_base_name_end(last_rsc_id); char *basename = NULL; CRM_ASSERT(end); basename = strndup(last_rsc_id, end - last_rsc_id + 1); CRM_ASSERT(basename); return basename; } /*! * \internal * \brief Get the name of the first instance of a cloned resource * * \param[in] last_rsc_id Resource ID to check * * \return Pointer to newly allocated string with resource's base name plus :0 * \note It is the caller's responsibility to free() the result. * This asserts on error, so callers can assume result is not NULL. */ char * clone_zero(const char *last_rsc_id) { const char *end = pe_base_name_end(last_rsc_id); size_t base_name_len = end - last_rsc_id + 1; char *zero = NULL; CRM_ASSERT(end); zero = calloc(base_name_len + 3, sizeof(char)); CRM_ASSERT(zero); memcpy(zero, last_rsc_id, base_name_len); zero[base_name_len] = ':'; zero[base_name_len + 1] = '0'; return zero; } static resource_t * create_fake_resource(const char *rsc_id, xmlNode * rsc_entry, pe_working_set_t * data_set) { resource_t *rsc = NULL; xmlNode *xml_rsc = create_xml_node(NULL, XML_CIB_TAG_RESOURCE); copy_in_properties(xml_rsc, rsc_entry); crm_xml_add(xml_rsc, XML_ATTR_ID, rsc_id); crm_log_xml_debug(xml_rsc, "Orphan resource"); if (!common_unpack(xml_rsc, &rsc, NULL, data_set)) { return NULL; } if (xml_contains_remote_node(xml_rsc)) { node_t *node; crm_debug("Detected orphaned remote node %s", rsc_id); node = pe_find_node(data_set->nodes, rsc_id); if (node == NULL) { node = pe_create_node(rsc_id, rsc_id, "remote", NULL, data_set); } link_rsc2remotenode(data_set, rsc); if (node) { crm_trace("Setting node %s as shutting down due to orphaned connection resource", rsc_id); node->details->shutdown = TRUE; } } if (crm_element_value(rsc_entry, XML_RSC_ATTR_CONTAINER)) { /* This orphaned rsc needs to be mapped to a container. */ crm_trace("Detected orphaned container filler %s", rsc_id); set_bit(rsc->flags, pe_rsc_orphan_container_filler); } set_bit(rsc->flags, pe_rsc_orphan); data_set->resources = g_list_append(data_set->resources, rsc); return rsc; } /*! * \internal * \brief Create orphan instance for anonymous clone resource history */ static pe_resource_t * create_anonymous_orphan(pe_resource_t *parent, const char *rsc_id, pe_node_t *node, pe_working_set_t *data_set) { pe_resource_t *top = pe__create_clone_child(parent, data_set); // find_rsc() because we might be a cloned group pe_resource_t *orphan = top->fns->find_rsc(top, rsc_id, NULL, pe_find_clone); pe_rsc_debug(parent, "Created orphan %s for %s: %s on %s", top->id, parent->id, rsc_id, node->details->uname); return orphan; } /*! * \internal * \brief Check a node for an instance of an anonymous clone * * Return a child instance of the specified anonymous clone, in order of * preference: (1) the instance running on the specified node, if any; * (2) an inactive instance (i.e. within the total of clone-max instances); * (3) a newly created orphan (i.e. clone-max instances are already active). * * \param[in] data_set Cluster information * \param[in] node Node on which to check for instance * \param[in] parent Clone to check * \param[in] rsc_id Name of cloned resource in history (without instance) */ static resource_t * find_anonymous_clone(pe_working_set_t * data_set, node_t * node, resource_t * parent, const char *rsc_id) { GListPtr rIter = NULL; pe_resource_t *rsc = NULL; pe_resource_t *inactive_instance = NULL; gboolean skip_inactive = FALSE; CRM_ASSERT(parent != NULL); CRM_ASSERT(pe_rsc_is_clone(parent)); CRM_ASSERT(is_not_set(parent->flags, pe_rsc_unique)); // Check for active (or partially active, for cloned groups) instance pe_rsc_trace(parent, "Looking for %s on %s in %s", rsc_id, node->details->uname, parent->id); for (rIter = parent->children; rsc == NULL && rIter; rIter = rIter->next) { GListPtr locations = NULL; resource_t *child = rIter->data; - /* Check whether this instance is already known to be active anywhere. - * - * "Active" in this case means known to be active at this stage of - * unpacking. Because this function is called for a resource before the - * resource's individual operation history entries are unpacked, - * locations will generally not contain the desired node. + /* Check whether this instance is already known to be active or pending + * anywhere, at this stage of unpacking. Because this function is called + * for a resource before the resource's individual operation history + * entries are unpacked, locations will generally not contain the + * desired node. * * However, there are three exceptions: * (1) when child is a cloned group and we have already unpacked the * history of another member of the group on the same node; * (2) when we've already unpacked the history of another numbered * instance on the same node (which can happen if globally-unique * was flipped from true to false); and * (3) when we re-run calculations on the same data set as part of a * simulation. */ - child->fns->location(child, &locations, TRUE); + child->fns->location(child, &locations, 2); if (locations) { /* We should never associate the same numbered anonymous clone * instance with multiple nodes, and clone instances can't migrate, * so there must be only one location, regardless of history. */ CRM_LOG_ASSERT(locations->next == NULL); if (((pe_node_t *)locations->data)->details == node->details) { /* This child instance is active on the requested node, so check * for a corresponding configured resource. We use find_rsc() * instead of child because child may be a cloned group, and we * need the particular member corresponding to rsc_id. * * If the history entry is orphaned, rsc will be NULL. */ rsc = parent->fns->find_rsc(child, rsc_id, NULL, pe_find_clone); if (rsc) { /* If there are multiple instance history entries for an * anonymous clone in a single node's history (which can * happen if globally-unique is switched from true to * false), we want to consider the instances beyond the * first as orphans, even if there are inactive instance * numbers available. */ if (rsc->running_on) { crm_notice("Active (now-)anonymous clone %s has " "multiple (orphan) instance histories on %s", parent->id, node->details->uname); skip_inactive = TRUE; rsc = NULL; } else { pe_rsc_trace(parent, "Resource %s, active", rsc->id); } } } g_list_free(locations); } else { pe_rsc_trace(parent, "Resource %s, skip inactive", child->id); if (!skip_inactive && !inactive_instance && is_not_set(child->flags, pe_rsc_block)) { // Remember one inactive instance in case we don't find active inactive_instance = parent->fns->find_rsc(child, rsc_id, NULL, pe_find_clone); + + /* ... but don't use it if it was already associated with a + * pending action on another node + */ + if (inactive_instance && inactive_instance->pending_node + && (inactive_instance->pending_node->details != node->details)) { + inactive_instance = NULL; + } } } } if ((rsc == NULL) && !skip_inactive && (inactive_instance != NULL)) { pe_rsc_trace(parent, "Resource %s, empty slot", inactive_instance->id); rsc = inactive_instance; } /* If the resource has "requires" set to "quorum" or "nothing", and we don't * have a clone instance for every node, we don't want to consume a valid * instance number for unclean nodes. Such instances may appear to be active * according to the history, but should be considered inactive, so we can * start an instance elsewhere. Treat such instances as orphans. * * An exception is instances running on guest nodes -- since guest node * "fencing" is actually just a resource stop, requires shouldn't apply. * * @TODO Ideally, we'd use an inactive instance number if it is not needed * for any clean instances. However, we don't know that at this point. */ if ((rsc != NULL) && is_not_set(rsc->flags, pe_rsc_needs_fencing) && (!node->details->online || node->details->unclean) && !is_container_remote_node(node) && !pe__is_universal_clone(parent, data_set)) { rsc = NULL; } if (rsc == NULL) { rsc = create_anonymous_orphan(parent, rsc_id, node, data_set); pe_rsc_trace(parent, "Resource %s, orphan", rsc->id); } return rsc; } static resource_t * unpack_find_resource(pe_working_set_t * data_set, node_t * node, const char *rsc_id, xmlNode * rsc_entry) { resource_t *rsc = NULL; resource_t *parent = NULL; crm_trace("looking for %s", rsc_id); rsc = pe_find_resource(data_set->resources, rsc_id); if (rsc == NULL) { /* If we didn't find the resource by its name in the operation history, * check it again as a clone instance. Even when clone-max=0, we create * a single :0 orphan to match against here. */ char *clone0_id = clone_zero(rsc_id); resource_t *clone0 = pe_find_resource(data_set->resources, clone0_id); if (clone0 && is_not_set(clone0->flags, pe_rsc_unique)) { rsc = clone0; parent = uber_parent(clone0); crm_trace("%s found as %s (%s)", rsc_id, clone0_id, parent->id); } else { crm_trace("%s is not known as %s either (orphan)", rsc_id, clone0_id); } free(clone0_id); } else if (rsc->variant > pe_native) { crm_trace("Resource history for %s is orphaned because it is no longer primitive", rsc_id); return NULL; } else { parent = uber_parent(rsc); } if (pe_rsc_is_anon_clone(parent)) { if (pe_rsc_is_bundled(parent)) { rsc = find_container_child(parent->parent, node); } else { char *base = clone_strip(rsc_id); rsc = find_anonymous_clone(data_set, node, parent, base); free(base); CRM_ASSERT(rsc != NULL); } } if (rsc && safe_str_neq(rsc_id, rsc->id) && safe_str_neq(rsc_id, rsc->clone_name)) { free(rsc->clone_name); rsc->clone_name = strdup(rsc_id); pe_rsc_debug(rsc, "Internally renamed %s on %s to %s%s", rsc_id, node->details->uname, rsc->id, (is_set(rsc->flags, pe_rsc_orphan)? " (ORPHAN)" : "")); } return rsc; } static resource_t * process_orphan_resource(xmlNode * rsc_entry, node_t * node, pe_working_set_t * data_set) { resource_t *rsc = NULL; const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID); crm_debug("Detected orphan resource %s on %s", rsc_id, node->details->uname); rsc = create_fake_resource(rsc_id, rsc_entry, data_set); if (is_set(data_set->flags, pe_flag_stop_rsc_orphans) == FALSE) { clear_bit(rsc->flags, pe_rsc_managed); } else { print_resource(LOG_TRACE, "Added orphan", rsc, FALSE); CRM_CHECK(rsc != NULL, return NULL); resource_location(rsc, NULL, -INFINITY, "__orphan_dont_run__", data_set); } return rsc; } static void process_rsc_state(resource_t * rsc, node_t * node, enum action_fail_response on_fail, xmlNode * migrate_op, pe_working_set_t * data_set) { node_t *tmpnode = NULL; char *reason = NULL; CRM_ASSERT(rsc); pe_rsc_trace(rsc, "Resource %s is %s on %s: on_fail=%s", rsc->id, role2text(rsc->role), node->details->uname, fail2text(on_fail)); /* process current state */ if (rsc->role != RSC_ROLE_UNKNOWN) { resource_t *iter = rsc; while (iter) { if (g_hash_table_lookup(iter->known_on, node->details->id) == NULL) { node_t *n = node_copy(node); pe_rsc_trace(rsc, "%s (aka. %s) known on %s", rsc->id, rsc->clone_name, n->details->uname); g_hash_table_insert(iter->known_on, (gpointer) n->details->id, n); } if (is_set(iter->flags, pe_rsc_unique)) { break; } iter = iter->parent; } } /* If a managed resource is believed to be running, but node is down ... */ if (rsc->role > RSC_ROLE_STOPPED && node->details->online == FALSE && node->details->maintenance == FALSE && is_set(rsc->flags, pe_rsc_managed)) { gboolean should_fence = FALSE; /* If this is a guest node, fence it (regardless of whether fencing is * enabled, because guest node fencing is done by recovery of the * container resource rather than by the fencer). Mark the resource * we're processing as failed. When the guest comes back up, its * operation history in the CIB will be cleared, freeing the affected * resource to run again once we are sure we know its state. */ if (is_container_remote_node(node)) { set_bit(rsc->flags, pe_rsc_failed); should_fence = TRUE; } else if (is_set(data_set->flags, pe_flag_stonith_enabled)) { if (is_baremetal_remote_node(node) && node->details->remote_rsc && is_not_set(node->details->remote_rsc->flags, pe_rsc_failed)) { /* setting unseen = true means that fencing of the remote node will * only occur if the connection resource is not going to start somewhere. * This allows connection resources on a failed cluster-node to move to * another node without requiring the baremetal remote nodes to be fenced * as well. */ node->details->unseen = TRUE; reason = crm_strdup_printf("%s is active there (fencing will be" " revoked if remote connection can " "be re-established elsewhere)", rsc->id); } should_fence = TRUE; } if (should_fence) { if (reason == NULL) { reason = crm_strdup_printf("%s is thought to be active there", rsc->id); } pe_fence_node(data_set, node, reason); } free(reason); } if (node->details->unclean) { /* No extra processing needed * Also allows resources to be started again after a node is shot */ on_fail = action_fail_ignore; } switch (on_fail) { case action_fail_ignore: /* nothing to do */ break; case action_fail_fence: /* treat it as if it is still running * but also mark the node as unclean */ reason = crm_strdup_printf("%s failed there", rsc->id); pe_fence_node(data_set, node, reason); free(reason); break; case action_fail_standby: node->details->standby = TRUE; node->details->standby_onfail = TRUE; break; case action_fail_block: /* is_managed == FALSE will prevent any * actions being sent for the resource */ clear_bit(rsc->flags, pe_rsc_managed); set_bit(rsc->flags, pe_rsc_block); break; case action_fail_migrate: /* make sure it comes up somewhere else * or not at all */ resource_location(rsc, node, -INFINITY, "__action_migration_auto__", data_set); break; case action_fail_stop: rsc->next_role = RSC_ROLE_STOPPED; break; case action_fail_recover: if (rsc->role != RSC_ROLE_STOPPED && rsc->role != RSC_ROLE_UNKNOWN) { set_bit(rsc->flags, pe_rsc_failed); stop_action(rsc, node, FALSE); } break; case action_fail_restart_container: set_bit(rsc->flags, pe_rsc_failed); if (rsc->container) { stop_action(rsc->container, node, FALSE); } else if (rsc->role != RSC_ROLE_STOPPED && rsc->role != RSC_ROLE_UNKNOWN) { stop_action(rsc, node, FALSE); } break; case action_fail_reset_remote: set_bit(rsc->flags, pe_rsc_failed); if (is_set(data_set->flags, pe_flag_stonith_enabled)) { tmpnode = NULL; if (rsc->is_remote_node) { tmpnode = pe_find_node(data_set->nodes, rsc->id); } if (tmpnode && is_baremetal_remote_node(tmpnode) && tmpnode->details->remote_was_fenced == 0) { /* connection resource to baremetal resource failed in a way that * should result in fencing the remote-node. */ pe_fence_node(data_set, tmpnode, "remote connection is unrecoverable"); } } /* require the stop action regardless if fencing is occurring or not. */ if (rsc->role > RSC_ROLE_STOPPED) { stop_action(rsc, node, FALSE); } /* if reconnect delay is in use, prevent the connection from exiting the * "STOPPED" role until the failure is cleared by the delay timeout. */ if (rsc->remote_reconnect_ms) { rsc->next_role = RSC_ROLE_STOPPED; } break; } /* ensure a remote-node connection failure forces an unclean remote-node * to be fenced. By setting unseen = FALSE, the remote-node failure will * result in a fencing operation regardless if we're going to attempt to * reconnect to the remote-node in this transition or not. */ if (is_set(rsc->flags, pe_rsc_failed) && rsc->is_remote_node) { tmpnode = pe_find_node(data_set->nodes, rsc->id); if (tmpnode && tmpnode->details->unclean) { tmpnode->details->unseen = FALSE; } } if (rsc->role != RSC_ROLE_STOPPED && rsc->role != RSC_ROLE_UNKNOWN) { if (is_set(rsc->flags, pe_rsc_orphan)) { if (is_set(rsc->flags, pe_rsc_managed)) { crm_config_warn("Detected active orphan %s running on %s", rsc->id, node->details->uname); } else { crm_config_warn("Cluster configured not to stop active orphans." " %s must be stopped manually on %s", rsc->id, node->details->uname); } } native_add_running(rsc, node, data_set); if (on_fail != action_fail_ignore) { set_bit(rsc->flags, pe_rsc_failed); } } else if (rsc->clone_name && strchr(rsc->clone_name, ':') != NULL) { /* Only do this for older status sections that included instance numbers * Otherwise stopped instances will appear as orphans */ pe_rsc_trace(rsc, "Resetting clone_name %s for %s (stopped)", rsc->clone_name, rsc->id); free(rsc->clone_name); rsc->clone_name = NULL; } else { char *key = stop_key(rsc); GListPtr possible_matches = find_actions(rsc->actions, key, node); GListPtr gIter = possible_matches; for (; gIter != NULL; gIter = gIter->next) { action_t *stop = (action_t *) gIter->data; stop->flags |= pe_action_optional; } g_list_free(possible_matches); free(key); } } /* create active recurring operations as optional */ static void process_recurring(node_t * node, resource_t * rsc, int start_index, int stop_index, GListPtr sorted_op_list, pe_working_set_t * data_set) { int counter = -1; const char *task = NULL; const char *status = NULL; GListPtr gIter = sorted_op_list; CRM_ASSERT(rsc); pe_rsc_trace(rsc, "%s: Start index %d, stop index = %d", rsc->id, start_index, stop_index); for (; gIter != NULL; gIter = gIter->next) { xmlNode *rsc_op = (xmlNode *) gIter->data; guint interval_ms = 0; char *key = NULL; const char *id = ID(rsc_op); const char *interval_ms_s = NULL; counter++; if (node->details->online == FALSE) { pe_rsc_trace(rsc, "Skipping %s/%s: node is offline", rsc->id, node->details->uname); break; /* Need to check if there's a monitor for role="Stopped" */ } else if (start_index < stop_index && counter <= stop_index) { pe_rsc_trace(rsc, "Skipping %s/%s: resource is not active", id, node->details->uname); continue; } else if (counter < start_index) { pe_rsc_trace(rsc, "Skipping %s/%s: old %d", id, node->details->uname, counter); continue; } interval_ms_s = crm_element_value(rsc_op, XML_LRM_ATTR_INTERVAL_MS); interval_ms = crm_parse_ms(interval_ms_s); if (interval_ms == 0) { pe_rsc_trace(rsc, "Skipping %s/%s: non-recurring", id, node->details->uname); continue; } status = crm_element_value(rsc_op, XML_LRM_ATTR_OPSTATUS); if (safe_str_eq(status, "-1")) { pe_rsc_trace(rsc, "Skipping %s/%s: status", id, node->details->uname); continue; } task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK); /* create the action */ key = generate_op_key(rsc->id, task, interval_ms); pe_rsc_trace(rsc, "Creating %s/%s", key, node->details->uname); custom_action(rsc, key, task, node, TRUE, TRUE, data_set); } } void calculate_active_ops(GListPtr sorted_op_list, int *start_index, int *stop_index) { int counter = -1; int implied_monitor_start = -1; int implied_clone_start = -1; const char *task = NULL; const char *status = NULL; GListPtr gIter = sorted_op_list; *stop_index = -1; *start_index = -1; for (; gIter != NULL; gIter = gIter->next) { xmlNode *rsc_op = (xmlNode *) gIter->data; counter++; task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK); status = crm_element_value(rsc_op, XML_LRM_ATTR_OPSTATUS); if (safe_str_eq(task, CRMD_ACTION_STOP) && safe_str_eq(status, "0")) { *stop_index = counter; } else if (safe_str_eq(task, CRMD_ACTION_START) || safe_str_eq(task, CRMD_ACTION_MIGRATED)) { *start_index = counter; } else if ((implied_monitor_start <= *stop_index) && safe_str_eq(task, CRMD_ACTION_STATUS)) { const char *rc = crm_element_value(rsc_op, XML_LRM_ATTR_RC); if (safe_str_eq(rc, "0") || safe_str_eq(rc, "8")) { implied_monitor_start = counter; } } else if (safe_str_eq(task, CRMD_ACTION_PROMOTE) || safe_str_eq(task, CRMD_ACTION_DEMOTE)) { implied_clone_start = counter; } } if (*start_index == -1) { if (implied_clone_start != -1) { *start_index = implied_clone_start; } else if (implied_monitor_start != -1) { *start_index = implied_monitor_start; } } } static resource_t * unpack_lrm_rsc_state(node_t * node, xmlNode * rsc_entry, pe_working_set_t * data_set) { GListPtr gIter = NULL; int stop_index = -1; int start_index = -1; enum rsc_role_e req_role = RSC_ROLE_UNKNOWN; const char *task = NULL; const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID); resource_t *rsc = NULL; GListPtr op_list = NULL; GListPtr sorted_op_list = NULL; xmlNode *migrate_op = NULL; xmlNode *rsc_op = NULL; xmlNode *last_failure = NULL; enum action_fail_response on_fail = FALSE; enum rsc_role_e saved_role = RSC_ROLE_UNKNOWN; crm_trace("[%s] Processing %s on %s", crm_element_name(rsc_entry), rsc_id, node->details->uname); /* extract operations */ op_list = NULL; sorted_op_list = NULL; for (rsc_op = __xml_first_child(rsc_entry); rsc_op != NULL; rsc_op = __xml_next_element(rsc_op)) { if (crm_str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, TRUE)) { op_list = g_list_prepend(op_list, rsc_op); } } if (op_list == NULL) { /* if there are no operations, there is nothing to do */ return NULL; } /* find the resource */ rsc = unpack_find_resource(data_set, node, rsc_id, rsc_entry); if (rsc == NULL) { rsc = process_orphan_resource(rsc_entry, node, data_set); } CRM_ASSERT(rsc != NULL); /* process operations */ saved_role = rsc->role; on_fail = action_fail_ignore; rsc->role = RSC_ROLE_UNKNOWN; sorted_op_list = g_list_sort(op_list, sort_op_by_callid); for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) { xmlNode *rsc_op = (xmlNode *) gIter->data; task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK); if (safe_str_eq(task, CRMD_ACTION_MIGRATED)) { migrate_op = rsc_op; } unpack_rsc_op(rsc, node, rsc_op, &last_failure, &on_fail, data_set); } /* create active recurring operations as optional */ calculate_active_ops(sorted_op_list, &start_index, &stop_index); process_recurring(node, rsc, start_index, stop_index, sorted_op_list, data_set); /* no need to free the contents */ g_list_free(sorted_op_list); process_rsc_state(rsc, node, on_fail, migrate_op, data_set); if (get_target_role(rsc, &req_role)) { if (rsc->next_role == RSC_ROLE_UNKNOWN || req_role < rsc->next_role) { pe_rsc_debug(rsc, "%s: Overwriting calculated next role %s" " with requested next role %s", rsc->id, role2text(rsc->next_role), role2text(req_role)); rsc->next_role = req_role; } else if (req_role > rsc->next_role) { pe_rsc_info(rsc, "%s: Not overwriting calculated next role %s" " with requested next role %s", rsc->id, role2text(rsc->next_role), role2text(req_role)); } } if (saved_role > rsc->role) { rsc->role = saved_role; } return rsc; } static void handle_orphaned_container_fillers(xmlNode * lrm_rsc_list, pe_working_set_t * data_set) { xmlNode *rsc_entry = NULL; for (rsc_entry = __xml_first_child(lrm_rsc_list); rsc_entry != NULL; rsc_entry = __xml_next_element(rsc_entry)) { resource_t *rsc; resource_t *container; const char *rsc_id; const char *container_id; if (safe_str_neq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE)) { continue; } container_id = crm_element_value(rsc_entry, XML_RSC_ATTR_CONTAINER); rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID); if (container_id == NULL || rsc_id == NULL) { continue; } container = pe_find_resource(data_set->resources, container_id); if (container == NULL) { continue; } rsc = pe_find_resource(data_set->resources, rsc_id); if (rsc == NULL || is_set(rsc->flags, pe_rsc_orphan_container_filler) == FALSE || rsc->container != NULL) { continue; } pe_rsc_trace(rsc, "Mapped orphaned rsc %s's container to %s", rsc->id, container_id); rsc->container = container; container->fillers = g_list_append(container->fillers, rsc); } } gboolean unpack_lrm_resources(node_t * node, xmlNode * lrm_rsc_list, pe_working_set_t * data_set) { xmlNode *rsc_entry = NULL; gboolean found_orphaned_container_filler = FALSE; CRM_CHECK(node != NULL, return FALSE); crm_trace("Unpacking resources on %s", node->details->uname); for (rsc_entry = __xml_first_child(lrm_rsc_list); rsc_entry != NULL; rsc_entry = __xml_next_element(rsc_entry)) { if (crm_str_eq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE, TRUE)) { resource_t *rsc = unpack_lrm_rsc_state(node, rsc_entry, data_set); if (!rsc) { continue; } if (is_set(rsc->flags, pe_rsc_orphan_container_filler)) { found_orphaned_container_filler = TRUE; } } } /* now that all the resource state has been unpacked for this node * we have to go back and map any orphaned container fillers to their * container resource */ if (found_orphaned_container_filler) { handle_orphaned_container_fillers(lrm_rsc_list, data_set); } return TRUE; } static void set_active(resource_t * rsc) { resource_t *top = uber_parent(rsc); if (top && is_set(top->flags, pe_rsc_promotable)) { rsc->role = RSC_ROLE_SLAVE; } else { rsc->role = RSC_ROLE_STARTED; } } static void set_node_score(gpointer key, gpointer value, gpointer user_data) { node_t *node = value; int *score = user_data; node->weight = *score; } #define STATUS_PATH_MAX 1024 static xmlNode * find_lrm_op(const char *resource, const char *op, const char *node, const char *source, pe_working_set_t * data_set) { int offset = 0; char xpath[STATUS_PATH_MAX]; offset += snprintf(xpath + offset, STATUS_PATH_MAX - offset, "//node_state[@uname='%s']", node); offset += snprintf(xpath + offset, STATUS_PATH_MAX - offset, "//" XML_LRM_TAG_RESOURCE "[@id='%s']", resource); /* Need to check against transition_magic too? */ if (source && safe_str_eq(op, CRMD_ACTION_MIGRATE)) { offset += snprintf(xpath + offset, STATUS_PATH_MAX - offset, "/" XML_LRM_TAG_RSC_OP "[@operation='%s' and @migrate_target='%s']", op, source); } else if (source && safe_str_eq(op, CRMD_ACTION_MIGRATED)) { offset += snprintf(xpath + offset, STATUS_PATH_MAX - offset, "/" XML_LRM_TAG_RSC_OP "[@operation='%s' and @migrate_source='%s']", op, source); } else { offset += snprintf(xpath + offset, STATUS_PATH_MAX - offset, "/" XML_LRM_TAG_RSC_OP "[@operation='%s']", op); } CRM_LOG_ASSERT(offset > 0); return get_xpath_object(xpath, data_set->input, LOG_DEBUG); } static bool stop_happened_after(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op, pe_working_set_t *data_set) { xmlNode *stop_op = find_lrm_op(rsc->id, CRMD_ACTION_STOP, node->details->id, NULL, data_set); if (stop_op) { int stop_id = 0; int task_id = 0; crm_element_value_int(stop_op, XML_LRM_ATTR_CALLID, &stop_id); crm_element_value_int(xml_op, XML_LRM_ATTR_CALLID, &task_id); if (stop_id > task_id) { return TRUE; } } return FALSE; } static void unpack_rsc_migration(resource_t *rsc, node_t *node, xmlNode *xml_op, pe_working_set_t * data_set) { /* A successful migration sequence is: * migrate_to on source node * migrate_from on target node * stop on source node * * If a migrate_to is followed by a stop, the entire migration (successful * or failed) is complete, and we don't care what happened on the target. * * If no migrate_from has happened, the migration is considered to be * "partial". If the migrate_from failed, make sure the resource gets * stopped on both source and target (if up). * * If the migrate_to and migrate_from both succeeded (which also implies the * resource is no longer running on the source), but there is no stop, the * migration is considered to be "dangling". */ int from_rc = 0; int from_status = 0; const char *migrate_source = NULL; const char *migrate_target = NULL; pe_node_t *target = NULL; pe_node_t *source = NULL; xmlNode *migrate_from = NULL; if (stop_happened_after(rsc, node, xml_op, data_set)) { return; } // Clones are not allowed to migrate, so role can't be master rsc->role = RSC_ROLE_STARTED; migrate_source = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_SOURCE); migrate_target = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_TARGET); target = pe_find_node(data_set->nodes, migrate_target); source = pe_find_node(data_set->nodes, migrate_source); // Check whether there was a migrate_from action migrate_from = find_lrm_op(rsc->id, CRMD_ACTION_MIGRATED, migrate_target, migrate_source, data_set); if (migrate_from) { crm_element_value_int(migrate_from, XML_LRM_ATTR_RC, &from_rc); crm_element_value_int(migrate_from, XML_LRM_ATTR_OPSTATUS, &from_status); pe_rsc_trace(rsc, "%s op on %s exited with status=%d, rc=%d", ID(migrate_from), migrate_target, from_status, from_rc); } if (migrate_from && from_rc == PCMK_OCF_OK && from_status == PCMK_LRM_OP_DONE) { /* The migrate_to and migrate_from both succeeded, so mark the migration * as "dangling". This will be used to schedule a stop action on the * source without affecting the target. */ pe_rsc_trace(rsc, "Detected dangling migration op: %s on %s", ID(xml_op), migrate_source); rsc->role = RSC_ROLE_STOPPED; rsc->dangling_migrations = g_list_prepend(rsc->dangling_migrations, node); } else if (migrate_from && (from_status != PCMK_LRM_OP_PENDING)) { // Failed if (target && target->details->online) { pe_rsc_trace(rsc, "Marking active on %s %p %d", migrate_target, target, target->details->online); native_add_running(rsc, target, data_set); } } else { // Pending, or complete but erased if (target && target->details->online) { pe_rsc_trace(rsc, "Marking active on %s %p %d", migrate_target, target, target->details->online); native_add_running(rsc, target, data_set); if (source && source->details->online) { /* This is a partial migration: the migrate_to completed * successfully on the source, but the migrate_from has not * completed. Remember the source and target; if the newly * chosen target remains the same when we schedule actions * later, we may continue with the migration. */ rsc->partial_migration_target = target; rsc->partial_migration_source = source; } } else { /* Consider it failed here - forces a restart, prevents migration */ set_bit(rsc->flags, pe_rsc_failed); clear_bit(rsc->flags, pe_rsc_allow_migrate); } } } static void unpack_rsc_migration_failure(resource_t *rsc, node_t *node, xmlNode *xml_op, pe_working_set_t * data_set) { const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); CRM_ASSERT(rsc); if (safe_str_eq(task, CRMD_ACTION_MIGRATED)) { int stop_id = 0; int migrate_id = 0; const char *migrate_source = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_SOURCE); const char *migrate_target = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_TARGET); xmlNode *stop_op = find_lrm_op(rsc->id, CRMD_ACTION_STOP, migrate_source, NULL, data_set); xmlNode *migrate_op = find_lrm_op(rsc->id, CRMD_ACTION_MIGRATE, migrate_source, migrate_target, data_set); if (stop_op) { crm_element_value_int(stop_op, XML_LRM_ATTR_CALLID, &stop_id); } if (migrate_op) { crm_element_value_int(migrate_op, XML_LRM_ATTR_CALLID, &migrate_id); } /* Get our state right */ rsc->role = RSC_ROLE_STARTED; /* can be master? */ if (stop_op == NULL || stop_id < migrate_id) { node_t *source = pe_find_node(data_set->nodes, migrate_source); if (source && source->details->online) { native_add_running(rsc, source, data_set); } } } else if (safe_str_eq(task, CRMD_ACTION_MIGRATE)) { int stop_id = 0; int migrate_id = 0; const char *migrate_source = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_SOURCE); const char *migrate_target = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_TARGET); xmlNode *stop_op = find_lrm_op(rsc->id, CRMD_ACTION_STOP, migrate_target, NULL, data_set); xmlNode *migrate_op = find_lrm_op(rsc->id, CRMD_ACTION_MIGRATED, migrate_target, migrate_source, data_set); if (stop_op) { crm_element_value_int(stop_op, XML_LRM_ATTR_CALLID, &stop_id); } if (migrate_op) { crm_element_value_int(migrate_op, XML_LRM_ATTR_CALLID, &migrate_id); } /* Get our state right */ rsc->role = RSC_ROLE_STARTED; /* can be master? */ if (stop_op == NULL || stop_id < migrate_id) { node_t *target = pe_find_node(data_set->nodes, migrate_target); pe_rsc_trace(rsc, "Stop: %p %d, Migrated: %p %d", stop_op, stop_id, migrate_op, migrate_id); if (target && target->details->online) { native_add_running(rsc, target, data_set); } } else if (migrate_op == NULL) { /* Make sure it gets cleaned up, the stop may pre-date the migrate_from */ rsc->dangling_migrations = g_list_prepend(rsc->dangling_migrations, node); } } } static void record_failed_op(xmlNode *op, node_t* node, resource_t *rsc, pe_working_set_t * data_set) { xmlNode *xIter = NULL; const char *op_key = crm_element_value(op, XML_LRM_ATTR_TASK_KEY); if (node->details->online == FALSE) { return; } for (xIter = data_set->failed->children; xIter; xIter = xIter->next) { const char *key = crm_element_value(xIter, XML_LRM_ATTR_TASK_KEY); const char *uname = crm_element_value(xIter, XML_ATTR_UNAME); if(safe_str_eq(op_key, key) && safe_str_eq(uname, node->details->uname)) { crm_trace("Skipping duplicate entry %s on %s", op_key, node->details->uname); return; } } crm_trace("Adding entry %s on %s", op_key, node->details->uname); crm_xml_add(op, XML_ATTR_UNAME, node->details->uname); crm_xml_add(op, XML_LRM_ATTR_RSCID, rsc->id); add_node_copy(data_set->failed, op); } static const char *get_op_key(xmlNode *xml_op) { const char *key = crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY); if(key == NULL) { key = ID(xml_op); } return key; } static void unpack_rsc_op_failure(resource_t * rsc, node_t * node, int rc, xmlNode * xml_op, xmlNode ** last_failure, enum action_fail_response * on_fail, pe_working_set_t * data_set) { guint interval_ms = 0; bool is_probe = FALSE; action_t *action = NULL; const char *key = get_op_key(xml_op); const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); CRM_ASSERT(rsc); *last_failure = xml_op; crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms); if ((interval_ms == 0) && safe_str_eq(task, CRMD_ACTION_STATUS)) { is_probe = TRUE; pe_rsc_trace(rsc, "is a probe: %s", key); } if (rc != PCMK_OCF_NOT_INSTALLED || is_set(data_set->flags, pe_flag_symmetric_cluster)) { crm_warn("Processing failed %s of %s on %s: %s " CRM_XS " rc=%d", (is_probe? "probe" : task), rsc->id, node->details->uname, services_ocf_exitcode_str(rc), rc); if (is_probe && (rc != PCMK_OCF_OK) && (rc != PCMK_OCF_NOT_RUNNING) && (rc != PCMK_OCF_RUNNING_MASTER)) { /* A failed (not just unexpected) probe result could mean the user * didn't know resources will be probed even where they can't run. */ crm_notice("If it is not possible for %s to run on %s, see " "the resource-discovery option for location constraints", rsc->id, node->details->uname); } record_failed_op(xml_op, node, rsc, data_set); } else { crm_trace("Processing failed op %s for %s on %s: %s (%d)", task, rsc->id, node->details->uname, services_ocf_exitcode_str(rc), rc); } action = custom_action(rsc, strdup(key), task, NULL, TRUE, FALSE, data_set); if ((action->on_fail <= action_fail_fence && *on_fail < action->on_fail) || (action->on_fail == action_fail_reset_remote && *on_fail <= action_fail_recover) || (action->on_fail == action_fail_restart_container && *on_fail <= action_fail_recover) || (*on_fail == action_fail_restart_container && action->on_fail >= action_fail_migrate)) { pe_rsc_trace(rsc, "on-fail %s -> %s for %s (%s)", fail2text(*on_fail), fail2text(action->on_fail), action->uuid, key); *on_fail = action->on_fail; } if (safe_str_eq(task, CRMD_ACTION_STOP)) { resource_location(rsc, node, -INFINITY, "__stop_fail__", data_set); } else if (safe_str_eq(task, CRMD_ACTION_MIGRATE) || safe_str_eq(task, CRMD_ACTION_MIGRATED)) { unpack_rsc_migration_failure(rsc, node, xml_op, data_set); } else if (safe_str_eq(task, CRMD_ACTION_PROMOTE)) { rsc->role = RSC_ROLE_MASTER; } else if (safe_str_eq(task, CRMD_ACTION_DEMOTE)) { if (action->on_fail == action_fail_block) { rsc->role = RSC_ROLE_MASTER; rsc->next_role = RSC_ROLE_STOPPED; } else if(rc == PCMK_OCF_NOT_RUNNING) { rsc->role = RSC_ROLE_STOPPED; } else { /* * Staying in master role would put the PE/TE into a loop. Setting * slave role is not dangerous because the resource will be stopped * as part of recovery, and any master promotion will be ordered * after that stop. */ rsc->role = RSC_ROLE_SLAVE; } } if(is_probe && rc == PCMK_OCF_NOT_INSTALLED) { /* leave stopped */ pe_rsc_trace(rsc, "Leaving %s stopped", rsc->id); rsc->role = RSC_ROLE_STOPPED; } else if (rsc->role < RSC_ROLE_STARTED) { pe_rsc_trace(rsc, "Setting %s active", rsc->id); set_active(rsc); } pe_rsc_trace(rsc, "Resource %s: role=%s, unclean=%s, on_fail=%s, fail_role=%s", rsc->id, role2text(rsc->role), node->details->unclean ? "true" : "false", fail2text(action->on_fail), role2text(action->fail_role)); if (action->fail_role != RSC_ROLE_STARTED && rsc->next_role < action->fail_role) { rsc->next_role = action->fail_role; } if (action->fail_role == RSC_ROLE_STOPPED) { int score = -INFINITY; resource_t *fail_rsc = rsc; if (fail_rsc->parent) { resource_t *parent = uber_parent(fail_rsc); if (pe_rsc_is_clone(parent) && is_not_set(parent->flags, pe_rsc_unique)) { /* For clone resources, if a child fails on an operation * with on-fail = stop, all the resources fail. Do this by preventing * the parent from coming up again. */ fail_rsc = parent; } } crm_warn("Making sure %s doesn't come up again", fail_rsc->id); /* make sure it doesn't come up again */ if (fail_rsc->allowed_nodes != NULL) { g_hash_table_destroy(fail_rsc->allowed_nodes); } fail_rsc->allowed_nodes = node_hash_from_list(data_set->nodes); g_hash_table_foreach(fail_rsc->allowed_nodes, set_node_score, &score); } pe_free_action(action); } static int determine_op_status( resource_t *rsc, int rc, int target_rc, node_t * node, xmlNode * xml_op, enum action_fail_response * on_fail, pe_working_set_t * data_set) { guint interval_ms = 0; int result = PCMK_LRM_OP_DONE; const char *key = get_op_key(xml_op); const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); bool is_probe = FALSE; CRM_ASSERT(rsc); crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms); if ((interval_ms == 0) && safe_str_eq(task, CRMD_ACTION_STATUS)) { is_probe = TRUE; } if (target_rc >= 0 && target_rc != rc) { result = PCMK_LRM_OP_ERROR; pe_rsc_debug(rsc, "%s on %s returned '%s' (%d) instead of the expected value: '%s' (%d)", key, node->details->uname, services_ocf_exitcode_str(rc), rc, services_ocf_exitcode_str(target_rc), target_rc); } /* we could clean this up significantly except for old LRMs and CRMs that * didn't include target_rc and liked to remap status */ switch (rc) { case PCMK_OCF_OK: if (is_probe && target_rc == 7) { result = PCMK_LRM_OP_DONE; pe_rsc_info(rsc, "Operation %s found resource %s active on %s", task, rsc->id, node->details->uname); } break; case PCMK_OCF_NOT_RUNNING: if (is_probe || target_rc == rc || is_not_set(rsc->flags, pe_rsc_managed)) { result = PCMK_LRM_OP_DONE; rsc->role = RSC_ROLE_STOPPED; /* clear any previous failure actions */ *on_fail = action_fail_ignore; rsc->next_role = RSC_ROLE_UNKNOWN; } else if (safe_str_neq(task, CRMD_ACTION_STOP)) { result = PCMK_LRM_OP_ERROR; } break; case PCMK_OCF_RUNNING_MASTER: if (is_probe) { result = PCMK_LRM_OP_DONE; pe_rsc_info(rsc, "Operation %s found resource %s active in master mode on %s", task, rsc->id, node->details->uname); } else if (target_rc == rc) { /* nothing to do */ } else if (target_rc >= 0) { result = PCMK_LRM_OP_ERROR; } rsc->role = RSC_ROLE_MASTER; break; case PCMK_OCF_DEGRADED_MASTER: case PCMK_OCF_FAILED_MASTER: rsc->role = RSC_ROLE_MASTER; result = PCMK_LRM_OP_ERROR; break; case PCMK_OCF_NOT_CONFIGURED: result = PCMK_LRM_OP_ERROR_FATAL; break; case PCMK_OCF_NOT_INSTALLED: case PCMK_OCF_INVALID_PARAM: case PCMK_OCF_INSUFFICIENT_PRIV: case PCMK_OCF_UNIMPLEMENT_FEATURE: if (rc == PCMK_OCF_UNIMPLEMENT_FEATURE && (interval_ms > 0)) { result = PCMK_LRM_OP_NOTSUPPORTED; break; } else if (pe_can_fence(data_set, node) == FALSE && safe_str_eq(task, CRMD_ACTION_STOP)) { /* If a stop fails and we can't fence, there's nothing else we can do */ pe_proc_err("No further recovery can be attempted for %s: %s action failed with '%s' (%d)", rsc->id, task, services_ocf_exitcode_str(rc), rc); clear_bit(rsc->flags, pe_rsc_managed); set_bit(rsc->flags, pe_rsc_block); } result = PCMK_LRM_OP_ERROR_HARD; break; default: if (result == PCMK_LRM_OP_DONE) { crm_info("Treating %s (rc=%d) on %s as an ERROR", key, rc, node->details->uname); result = PCMK_LRM_OP_ERROR; } } return result; } static bool check_operation_expiry(resource_t *rsc, node_t *node, int rc, xmlNode *xml_op, pe_working_set_t * data_set) { bool expired = FALSE; time_t last_failure = 0; guint interval_ms = 0; int failure_timeout = rsc->failure_timeout; const char *key = get_op_key(xml_op); const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); const char *clear_reason = NULL; crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms); /* clearing recurring monitor operation failures automatically * needs to be carefully considered */ if ((interval_ms != 0) && safe_str_eq(task, "monitor")) { /* TODO, in the future we should consider not clearing recurring monitor * op failures unless the last action for a resource was a "stop" action. * otherwise it is possible that clearing the monitor failure will result * in the resource being in an undeterministic state. * * For now we handle this potential undeterministic condition for remote * node connection resources by not clearing a recurring monitor op failure * until after the node has been fenced. */ if (is_set(data_set->flags, pe_flag_stonith_enabled) && rsc->remote_reconnect_ms) { node_t *remote_node = pe_find_node(data_set->nodes, rsc->id); if (remote_node && remote_node->details->remote_was_fenced == 0) { if (strstr(ID(xml_op), "last_failure")) { crm_info("Waiting to clear monitor failure for remote node %s until fencing has occurred", rsc->id); } /* disabling failure timeout for this operation because we believe * fencing of the remote node should occur first. */ failure_timeout = 0; } } } if (failure_timeout > 0) { int last_run = 0; if (crm_element_value_int(xml_op, XML_RSC_OP_LAST_CHANGE, &last_run) == 0) { time_t now = get_effective_time(data_set); if (now > (last_run + failure_timeout)) { expired = TRUE; } } } if (expired) { if (failure_timeout > 0) { if (pe_get_failcount(node, rsc, &last_failure, pe_fc_default, xml_op, data_set)) { if (pe_get_failcount(node, rsc, &last_failure, pe_fc_effective, xml_op, data_set) == 0) { clear_reason = "it expired"; } else { expired = FALSE; } } else if (rsc->remote_reconnect_ms && strstr(ID(xml_op), "last_failure")) { /* always clear last failure when reconnect interval is set */ clear_reason = "reconnect interval is set"; } } } else if (strstr(ID(xml_op), "last_failure") && ((strcmp(task, "start") == 0) || (strcmp(task, "monitor") == 0))) { op_digest_cache_t *digest_data = NULL; digest_data = rsc_action_digest_cmp(rsc, xml_op, node, data_set); if (digest_data->rc == RSC_DIGEST_UNKNOWN) { crm_trace("rsc op %s/%s on node %s does not have a op digest to compare against", rsc->id, key, node->details->id); } else if(container_fix_remote_addr(rsc) && digest_data->rc != RSC_DIGEST_MATCH) { // We can't sanely check the changing 'addr' attribute. Yet crm_trace("Ignoring rsc op %s/%s on node %s", rsc->id, key, node->details->id); } else if (digest_data->rc != RSC_DIGEST_MATCH) { clear_reason = "resource parameters have changed"; } } if (clear_reason != NULL) { node_t *remote_node = pe_find_node(data_set->nodes, rsc->id); char *key = generate_op_key(rsc->id, CRM_OP_CLEAR_FAILCOUNT, 0); action_t *clear_op = custom_action(rsc, key, CRM_OP_CLEAR_FAILCOUNT, node, FALSE, TRUE, data_set); add_hash_param(clear_op->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE); crm_notice("Clearing failure of %s on %s because %s " CRM_XS " %s", rsc->id, node->details->uname, clear_reason, clear_op->uuid); if (is_set(data_set->flags, pe_flag_stonith_enabled) && rsc->remote_reconnect_ms && remote_node && remote_node->details->unclean) { action_t *fence = pe_fence_op(remote_node, NULL, TRUE, NULL, data_set); crm_notice("Waiting for %s to complete before clearing %s failure for remote node %s", fence?fence->uuid:"nil", task, rsc->id); order_actions(fence, clear_op, pe_order_implies_then); } } if (expired && (interval_ms == 0) && safe_str_eq(task, CRMD_ACTION_STATUS)) { switch(rc) { case PCMK_OCF_OK: case PCMK_OCF_NOT_RUNNING: case PCMK_OCF_RUNNING_MASTER: case PCMK_OCF_DEGRADED: case PCMK_OCF_DEGRADED_MASTER: /* Don't expire probes that return these values */ expired = FALSE; break; } } return expired; } int get_target_rc(xmlNode *xml_op) { int dummy = 0; int target_rc = 0; char *dummy_string = NULL; const char *key = crm_element_value(xml_op, XML_ATTR_TRANSITION_KEY); if (key == NULL) { return -1; } decode_transition_key(key, &dummy_string, &dummy, &dummy, &target_rc); free(dummy_string); return target_rc; } static enum action_fail_response get_action_on_fail(resource_t *rsc, const char *key, const char *task, pe_working_set_t * data_set) { int result = action_fail_recover; action_t *action = custom_action(rsc, strdup(key), task, NULL, TRUE, FALSE, data_set); result = action->on_fail; pe_free_action(action); return result; } static void update_resource_state(resource_t * rsc, node_t * node, xmlNode * xml_op, const char * task, int rc, xmlNode * last_failure, enum action_fail_response * on_fail, pe_working_set_t * data_set) { gboolean clear_past_failure = FALSE; CRM_ASSERT(rsc); CRM_ASSERT(xml_op); if (rc == PCMK_OCF_NOT_RUNNING) { clear_past_failure = TRUE; } else if (rc == PCMK_OCF_NOT_INSTALLED) { rsc->role = RSC_ROLE_STOPPED; } else if (safe_str_eq(task, CRMD_ACTION_STATUS)) { if (last_failure) { const char *op_key = get_op_key(xml_op); const char *last_failure_key = get_op_key(last_failure); if (safe_str_eq(op_key, last_failure_key)) { clear_past_failure = TRUE; } } if (rsc->role < RSC_ROLE_STARTED) { set_active(rsc); } } else if (safe_str_eq(task, CRMD_ACTION_START)) { rsc->role = RSC_ROLE_STARTED; clear_past_failure = TRUE; } else if (safe_str_eq(task, CRMD_ACTION_STOP)) { rsc->role = RSC_ROLE_STOPPED; clear_past_failure = TRUE; } else if (safe_str_eq(task, CRMD_ACTION_PROMOTE)) { rsc->role = RSC_ROLE_MASTER; clear_past_failure = TRUE; } else if (safe_str_eq(task, CRMD_ACTION_DEMOTE)) { /* Demote from Master does not clear an error */ rsc->role = RSC_ROLE_SLAVE; } else if (safe_str_eq(task, CRMD_ACTION_MIGRATED)) { rsc->role = RSC_ROLE_STARTED; clear_past_failure = TRUE; } else if (safe_str_eq(task, CRMD_ACTION_MIGRATE)) { unpack_rsc_migration(rsc, node, xml_op, data_set); } else if (rsc->role < RSC_ROLE_STARTED) { pe_rsc_trace(rsc, "%s active on %s", rsc->id, node->details->uname); set_active(rsc); } /* clear any previous failure actions */ if (clear_past_failure) { switch (*on_fail) { case action_fail_stop: case action_fail_fence: case action_fail_migrate: case action_fail_standby: pe_rsc_trace(rsc, "%s.%s is not cleared by a completed stop", rsc->id, fail2text(*on_fail)); break; case action_fail_block: case action_fail_ignore: case action_fail_recover: case action_fail_restart_container: *on_fail = action_fail_ignore; rsc->next_role = RSC_ROLE_UNKNOWN; break; case action_fail_reset_remote: if (rsc->remote_reconnect_ms == 0) { /* With no reconnect interval, the connection is allowed to * start again after the remote node is fenced and * completely stopped. (With a reconnect interval, we wait * for the failure to be cleared entirely before attempting * to reconnect.) */ *on_fail = action_fail_ignore; rsc->next_role = RSC_ROLE_UNKNOWN; } break; } } } gboolean unpack_rsc_op(resource_t * rsc, node_t * node, xmlNode * xml_op, xmlNode ** last_failure, enum action_fail_response * on_fail, pe_working_set_t * data_set) { int task_id = 0; const char *key = NULL; const char *task = NULL; const char *task_key = NULL; int rc = 0; int status = PCMK_LRM_OP_UNKNOWN; int target_rc = get_target_rc(xml_op); guint interval_ms = 0; gboolean expired = FALSE; resource_t *parent = rsc; enum action_fail_response failure_strategy = action_fail_recover; CRM_CHECK(rsc != NULL, return FALSE); CRM_CHECK(node != NULL, return FALSE); CRM_CHECK(xml_op != NULL, return FALSE); task_key = get_op_key(xml_op); task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); key = crm_element_value(xml_op, XML_ATTR_TRANSITION_KEY); crm_element_value_int(xml_op, XML_LRM_ATTR_RC, &rc); crm_element_value_int(xml_op, XML_LRM_ATTR_CALLID, &task_id); crm_element_value_int(xml_op, XML_LRM_ATTR_OPSTATUS, &status); crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms); CRM_CHECK(task != NULL, return FALSE); CRM_CHECK(status <= PCMK_LRM_OP_NOT_INSTALLED, return FALSE); CRM_CHECK(status >= PCMK_LRM_OP_PENDING, return FALSE); if (safe_str_eq(task, CRMD_ACTION_NOTIFY) || safe_str_eq(task, CRMD_ACTION_METADATA)) { /* safe to ignore these */ return TRUE; } if (is_not_set(rsc->flags, pe_rsc_unique)) { parent = uber_parent(rsc); } pe_rsc_trace(rsc, "Unpacking task %s/%s (call_id=%d, status=%d, rc=%d) on %s (role=%s)", task_key, task, task_id, status, rc, node->details->uname, role2text(rsc->role)); if (node->details->unclean) { pe_rsc_trace(rsc, "Node %s (where %s is running) is unclean." " Further action depends on the value of the stop's on-fail attribute", node->details->uname, rsc->id); } if (status == PCMK_LRM_OP_ERROR) { /* Older versions set this if rc != 0 but it's up to us to decide */ status = PCMK_LRM_OP_DONE; } if(status != PCMK_LRM_OP_NOT_INSTALLED) { expired = check_operation_expiry(rsc, node, rc, xml_op, data_set); } /* Degraded results are informational only, re-map them to their error-free equivalents */ if (rc == PCMK_OCF_DEGRADED && safe_str_eq(task, CRMD_ACTION_STATUS)) { rc = PCMK_OCF_OK; /* Add them to the failed list to highlight them for the user */ if ((node->details->shutdown == FALSE) || (node->details->online == TRUE)) { crm_trace("Remapping %d to %d", PCMK_OCF_DEGRADED, PCMK_OCF_OK); record_failed_op(xml_op, node, rsc, data_set); } } else if (rc == PCMK_OCF_DEGRADED_MASTER && safe_str_eq(task, CRMD_ACTION_STATUS)) { rc = PCMK_OCF_RUNNING_MASTER; /* Add them to the failed list to highlight them for the user */ if ((node->details->shutdown == FALSE) || (node->details->online == TRUE)) { crm_trace("Remapping %d to %d", PCMK_OCF_DEGRADED_MASTER, PCMK_OCF_RUNNING_MASTER); record_failed_op(xml_op, node, rsc, data_set); } } if (expired && target_rc != rc) { const char *magic = crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC); pe_rsc_debug(rsc, "Expired operation '%s' on %s returned '%s' (%d) instead of the expected value: '%s' (%d)", key, node->details->uname, services_ocf_exitcode_str(rc), rc, services_ocf_exitcode_str(target_rc), target_rc); if (interval_ms == 0) { crm_notice("Ignoring expired calculated failure %s (rc=%d, magic=%s) on %s", task_key, rc, magic, node->details->uname); goto done; } else if(node->details->online && node->details->unclean == FALSE) { crm_notice("Re-initiated expired calculated failure %s (rc=%d, magic=%s) on %s", task_key, rc, magic, node->details->uname); /* This is SO horrible, but we don't have access to CancelXmlOp() yet */ crm_xml_add(xml_op, XML_LRM_ATTR_RESTART_DIGEST, "calculated-failure-timeout"); goto done; } } if(status == PCMK_LRM_OP_DONE || status == PCMK_LRM_OP_ERROR) { status = determine_op_status(rsc, rc, target_rc, node, xml_op, on_fail, data_set); } pe_rsc_trace(rsc, "Handling status: %d", status); switch (status) { case PCMK_LRM_OP_CANCELLED: /* do nothing?? */ pe_err("Don't know what to do for cancelled ops yet"); break; case PCMK_LRM_OP_PENDING: if (safe_str_eq(task, CRMD_ACTION_START)) { set_bit(rsc->flags, pe_rsc_start_pending); set_active(rsc); } else if (safe_str_eq(task, CRMD_ACTION_PROMOTE)) { rsc->role = RSC_ROLE_MASTER; } else if (safe_str_eq(task, CRMD_ACTION_MIGRATE) && node->details->unclean) { /* If a pending migrate_to action is out on a unclean node, * we have to force the stop action on the target. */ const char *migrate_target = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_TARGET); node_t *target = pe_find_node(data_set->nodes, migrate_target); if (target) { stop_action(rsc, target, FALSE); } } if (rsc->pending_task == NULL) { if (safe_str_eq(task, CRMD_ACTION_STATUS) && (interval_ms == 0)) { /* Pending probes are not printed, even if pending * operations are requested. If someone ever requests that * behavior, uncomment this and the corresponding part of * native.c:native_pending_task(). */ /*rsc->pending_task = strdup("probe");*/ - + /*rsc->pending_node = node;*/ } else { rsc->pending_task = strdup(task); + rsc->pending_node = node; } } break; case PCMK_LRM_OP_DONE: pe_rsc_trace(rsc, "%s/%s completed on %s", rsc->id, task, node->details->uname); update_resource_state(rsc, node, xml_op, task, rc, *last_failure, on_fail, data_set); break; case PCMK_LRM_OP_NOT_INSTALLED: failure_strategy = get_action_on_fail(rsc, task_key, task, data_set); if (failure_strategy == action_fail_ignore) { crm_warn("Cannot ignore failed %s (status=%d, rc=%d) on %s: " "Resource agent doesn't exist", task_key, status, rc, node->details->uname); /* Also for printing it as "FAILED" by marking it as pe_rsc_failed later */ *on_fail = action_fail_migrate; } resource_location(parent, node, -INFINITY, "hard-error", data_set); unpack_rsc_op_failure(rsc, node, rc, xml_op, last_failure, on_fail, data_set); break; case PCMK_LRM_OP_ERROR: case PCMK_LRM_OP_ERROR_HARD: case PCMK_LRM_OP_ERROR_FATAL: case PCMK_LRM_OP_TIMEOUT: case PCMK_LRM_OP_NOTSUPPORTED: failure_strategy = get_action_on_fail(rsc, task_key, task, data_set); if ((failure_strategy == action_fail_ignore) || (failure_strategy == action_fail_restart_container && safe_str_eq(task, CRMD_ACTION_STOP))) { crm_warn("Pretending the failure of %s (rc=%d) on %s succeeded", task_key, rc, node->details->uname); update_resource_state(rsc, node, xml_op, task, target_rc, *last_failure, on_fail, data_set); crm_xml_add(xml_op, XML_ATTR_UNAME, node->details->uname); set_bit(rsc->flags, pe_rsc_failure_ignored); record_failed_op(xml_op, node, rsc, data_set); if (failure_strategy == action_fail_restart_container && *on_fail <= action_fail_recover) { *on_fail = failure_strategy; } } else { unpack_rsc_op_failure(rsc, node, rc, xml_op, last_failure, on_fail, data_set); if(status == PCMK_LRM_OP_ERROR_HARD) { do_crm_log(rc != PCMK_OCF_NOT_INSTALLED?LOG_ERR:LOG_NOTICE, "Preventing %s from re-starting on %s: operation %s failed '%s' (%d)", parent->id, node->details->uname, task, services_ocf_exitcode_str(rc), rc); resource_location(parent, node, -INFINITY, "hard-error", data_set); } else if(status == PCMK_LRM_OP_ERROR_FATAL) { crm_err("Preventing %s from re-starting anywhere: operation %s failed '%s' (%d)", parent->id, task, services_ocf_exitcode_str(rc), rc); resource_location(parent, NULL, -INFINITY, "fatal-error", data_set); } } break; } done: pe_rsc_trace(rsc, "Resource %s after %s: role=%s, next=%s", rsc->id, task, role2text(rsc->role), role2text(rsc->next_role)); return TRUE; } gboolean add_node_attrs(xmlNode * xml_obj, node_t * node, gboolean overwrite, pe_working_set_t * data_set) { const char *cluster_name = NULL; g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_UNAME), strdup(node->details->uname)); g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_ID), strdup(node->details->id)); if (safe_str_eq(node->details->id, data_set->dc_uuid)) { data_set->dc_node = node; node->details->is_dc = TRUE; g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_IS_DC), strdup(XML_BOOLEAN_TRUE)); } else { g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_IS_DC), strdup(XML_BOOLEAN_FALSE)); } cluster_name = g_hash_table_lookup(data_set->config_hash, "cluster-name"); if (cluster_name) { g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_CLUSTER_NAME), strdup(cluster_name)); } unpack_instance_attributes(data_set->input, xml_obj, XML_TAG_ATTR_SETS, NULL, node->details->attrs, NULL, overwrite, data_set->now); if (pe_node_attribute_raw(node, CRM_ATTR_SITE_NAME) == NULL) { const char *site_name = pe_node_attribute_raw(node, "site-name"); if (site_name) { g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_SITE_NAME), strdup(site_name)); } else if (cluster_name) { /* Default to cluster-name if unset */ g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_SITE_NAME), strdup(cluster_name)); } } return TRUE; } static GListPtr extract_operations(const char *node, const char *rsc, xmlNode * rsc_entry, gboolean active_filter) { int counter = -1; int stop_index = -1; int start_index = -1; xmlNode *rsc_op = NULL; GListPtr gIter = NULL; GListPtr op_list = NULL; GListPtr sorted_op_list = NULL; /* extract operations */ op_list = NULL; sorted_op_list = NULL; for (rsc_op = __xml_first_child(rsc_entry); rsc_op != NULL; rsc_op = __xml_next_element(rsc_op)) { if (crm_str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, TRUE)) { crm_xml_add(rsc_op, "resource", rsc); crm_xml_add(rsc_op, XML_ATTR_UNAME, node); op_list = g_list_prepend(op_list, rsc_op); } } if (op_list == NULL) { /* if there are no operations, there is nothing to do */ return NULL; } sorted_op_list = g_list_sort(op_list, sort_op_by_callid); /* create active recurring operations as optional */ if (active_filter == FALSE) { return sorted_op_list; } op_list = NULL; calculate_active_ops(sorted_op_list, &start_index, &stop_index); for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) { xmlNode *rsc_op = (xmlNode *) gIter->data; counter++; if (start_index < stop_index) { crm_trace("Skipping %s: not active", ID(rsc_entry)); break; } else if (counter < start_index) { crm_trace("Skipping %s: old", ID(rsc_op)); continue; } op_list = g_list_append(op_list, rsc_op); } g_list_free(sorted_op_list); return op_list; } GListPtr find_operations(const char *rsc, const char *node, gboolean active_filter, pe_working_set_t * data_set) { GListPtr output = NULL; GListPtr intermediate = NULL; xmlNode *tmp = NULL; xmlNode *status = find_xml_node(data_set->input, XML_CIB_TAG_STATUS, TRUE); node_t *this_node = NULL; xmlNode *node_state = NULL; for (node_state = __xml_first_child(status); node_state != NULL; node_state = __xml_next_element(node_state)) { if (crm_str_eq((const char *)node_state->name, XML_CIB_TAG_STATE, TRUE)) { const char *uname = crm_element_value(node_state, XML_ATTR_UNAME); if (node != NULL && safe_str_neq(uname, node)) { continue; } this_node = pe_find_node(data_set->nodes, uname); if(this_node == NULL) { CRM_LOG_ASSERT(this_node != NULL); continue; } else if (is_remote_node(this_node)) { determine_remote_online_status(data_set, this_node); } else { determine_online_status(node_state, this_node, data_set); } if (this_node->details->online || is_set(data_set->flags, pe_flag_stonith_enabled)) { /* offline nodes run no resources... * unless stonith is enabled in which case we need to * make sure rsc start events happen after the stonith */ xmlNode *lrm_rsc = NULL; tmp = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE); tmp = find_xml_node(tmp, XML_LRM_TAG_RESOURCES, FALSE); for (lrm_rsc = __xml_first_child(tmp); lrm_rsc != NULL; lrm_rsc = __xml_next_element(lrm_rsc)) { if (crm_str_eq((const char *)lrm_rsc->name, XML_LRM_TAG_RESOURCE, TRUE)) { const char *rsc_id = crm_element_value(lrm_rsc, XML_ATTR_ID); if (rsc != NULL && safe_str_neq(rsc_id, rsc)) { continue; } intermediate = extract_operations(uname, rsc_id, lrm_rsc, active_filter); output = g_list_concat(output, intermediate); } } } } } return output; } diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c index 7b87d04a11..3ec22674f1 100644 --- a/lib/pengine/utils.c +++ b/lib/pengine/utils.c @@ -1,2334 +1,2378 @@ /* * Copyright 2004-2018 Andrew Beekhof * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include +#include #include #include #include #include pe_working_set_t *pe_dataset = NULL; extern xmlNode *get_object_root(const char *object_type, xmlNode * the_root); void print_str_str(gpointer key, gpointer value, gpointer user_data); gboolean ghash_free_str_str(gpointer key, gpointer value, gpointer user_data); void unpack_operation(action_t * action, xmlNode * xml_obj, resource_t * container, pe_working_set_t * data_set); static xmlNode *find_rsc_op_entry_helper(resource_t * rsc, const char *key, gboolean include_disabled); #if ENABLE_VERSIONED_ATTRS pe_rsc_action_details_t * pe_rsc_action_details(pe_action_t *action) { pe_rsc_action_details_t *details; CRM_CHECK(action != NULL, return NULL); if (action->action_details == NULL) { action->action_details = calloc(1, sizeof(pe_rsc_action_details_t)); CRM_CHECK(action->action_details != NULL, return NULL); } details = (pe_rsc_action_details_t *) action->action_details; if (details->versioned_parameters == NULL) { details->versioned_parameters = create_xml_node(NULL, XML_TAG_OP_VER_ATTRS); } if (details->versioned_meta == NULL) { details->versioned_meta = create_xml_node(NULL, XML_TAG_OP_VER_META); } return details; } static void pe_free_rsc_action_details(pe_action_t *action) { pe_rsc_action_details_t *details; if ((action == NULL) || (action->action_details == NULL)) { return; } details = (pe_rsc_action_details_t *) action->action_details; if (details->versioned_parameters) { free_xml(details->versioned_parameters); } if (details->versioned_meta) { free_xml(details->versioned_meta); } action->action_details = NULL; } #endif /*! * \internal * \brief Check whether we can fence a particular node * * \param[in] data_set Working set for cluster * \param[in] node Name of node to check * * \return TRUE if node can be fenced, FALSE otherwise * * \note This function should only be called for cluster nodes and baremetal * remote nodes; guest nodes are fenced by stopping their container * resource, so fence execution requirements do not apply to them. */ bool pe_can_fence(pe_working_set_t * data_set, node_t *node) { if(is_not_set(data_set->flags, pe_flag_stonith_enabled)) { return FALSE; /* Turned off */ } else if (is_not_set(data_set->flags, pe_flag_have_stonith_resource)) { return FALSE; /* No devices */ } else if (is_set(data_set->flags, pe_flag_have_quorum)) { return TRUE; } else if (data_set->no_quorum_policy == no_quorum_ignore) { return TRUE; } else if(node == NULL) { return FALSE; } else if(node->details->online) { crm_notice("We can fence %s without quorum because they're in our membership", node->details->uname); return TRUE; } crm_trace("Cannot fence %s", node->details->uname); return FALSE; } node_t * node_copy(const node_t *this_node) { node_t *new_node = NULL; CRM_CHECK(this_node != NULL, return NULL); new_node = calloc(1, sizeof(node_t)); CRM_ASSERT(new_node != NULL); crm_trace("Copying %p (%s) to %p", this_node, this_node->details->uname, new_node); new_node->rsc_discover_mode = this_node->rsc_discover_mode; new_node->weight = this_node->weight; new_node->fixed = this_node->fixed; new_node->details = this_node->details; return new_node; } /* any node in list1 or list2 and not in the other gets a score of -INFINITY */ void node_list_exclude(GHashTable * hash, GListPtr list, gboolean merge_scores) { GHashTable *result = hash; node_t *other_node = NULL; GListPtr gIter = list; GHashTableIter iter; node_t *node = NULL; g_hash_table_iter_init(&iter, hash); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { other_node = pe_find_node_id(list, node->details->id); if (other_node == NULL) { node->weight = -INFINITY; } else if (merge_scores) { node->weight = merge_weights(node->weight, other_node->weight); } } for (; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; other_node = pe_hash_table_lookup(result, node->details->id); if (other_node == NULL) { node_t *new_node = node_copy(node); new_node->weight = -INFINITY; g_hash_table_insert(result, (gpointer) new_node->details->id, new_node); } } } GHashTable * node_hash_from_list(GListPtr list) { GListPtr gIter = list; GHashTable *result = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, free); for (; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; node_t *n = node_copy(node); g_hash_table_insert(result, (gpointer) n->details->id, n); } return result; } GListPtr node_list_dup(GListPtr list1, gboolean reset, gboolean filter) { GListPtr result = NULL; GListPtr gIter = list1; for (; gIter != NULL; gIter = gIter->next) { node_t *new_node = NULL; node_t *this_node = (node_t *) gIter->data; if (filter && this_node->weight < 0) { continue; } new_node = node_copy(this_node); if (reset) { new_node->weight = 0; } if (new_node != NULL) { result = g_list_prepend(result, new_node); } } return result; } gint sort_node_uname(gconstpointer a, gconstpointer b) { - const node_t *node_a = a; - const node_t *node_b = b; - - return strcmp(node_a->details->uname, node_b->details->uname); + const char *name_a = ((const node_t *) a)->details->uname; + const char *name_b = ((const node_t *) b)->details->uname; + + while (*name_a && *name_b) { + if (isdigit(*name_a) && isdigit(*name_b)) { + // If node names contain a number, sort numerically + + char *end_a = NULL; + char *end_b = NULL; + long num_a = strtol(name_a, &end_a, 10); + long num_b = strtol(name_b, &end_b, 10); + + // allow ordering e.g. 007 > 7 + size_t len_a = end_a - name_a; + size_t len_b = end_b - name_b; + + if (num_a < num_b) { + return -1; + } else if (num_a > num_b) { + return 1; + } else if (len_a < len_b) { + return -1; + } else if (len_a > len_b) { + return 1; + } + name_a = end_a; + name_b = end_b; + } else { + // Compare non-digits case-insensitively + int lower_a = tolower(*name_a); + int lower_b = tolower(*name_b); + + if (lower_a < lower_b) { + return -1; + } else if (lower_a > lower_b) { + return 1; + } + ++name_a; + ++name_b; + } + } + if (!*name_a && *name_b) { + return -1; + } else if (*name_a && !*name_b) { + return 1; + } + return 0; } void dump_node_scores_worker(int level, const char *file, const char *function, int line, resource_t * rsc, const char *comment, GHashTable * nodes) { GHashTable *hash = nodes; GHashTableIter iter; node_t *node = NULL; if (rsc) { hash = rsc->allowed_nodes; } if (rsc && is_set(rsc->flags, pe_rsc_orphan)) { /* Don't show the allocation scores for orphans */ return; } if (level == 0) { char score[128]; int len = sizeof(score); /* For now we want this in sorted order to keep the regression tests happy */ GListPtr gIter = NULL; GListPtr list = g_hash_table_get_values(hash); list = g_list_sort(list, sort_node_uname); gIter = list; for (; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; /* This function is called a whole lot, use stack allocated score */ score2char_stack(node->weight, score, len); if (rsc) { printf("%s: %s allocation score on %s: %s\n", comment, rsc->id, node->details->uname, score); } else { printf("%s: %s = %s\n", comment, node->details->uname, score); } } g_list_free(list); } else if (hash) { char score[128]; int len = sizeof(score); g_hash_table_iter_init(&iter, hash); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { /* This function is called a whole lot, use stack allocated score */ score2char_stack(node->weight, score, len); if (rsc) { do_crm_log_alias(LOG_TRACE, file, function, line, "%s: %s allocation score on %s: %s", comment, rsc->id, node->details->uname, score); } else { do_crm_log_alias(LOG_TRACE, file, function, line + 1, "%s: %s = %s", comment, node->details->uname, score); } } } if (rsc && rsc->children) { GListPtr gIter = NULL; gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t *) gIter->data; dump_node_scores_worker(level, file, function, line, child, comment, nodes); } } } static void append_dump_text(gpointer key, gpointer value, gpointer user_data) { char **dump_text = user_data; char *new_text = crm_strdup_printf("%s %s=%s", *dump_text, (char *)key, (char *)value); free(*dump_text); *dump_text = new_text; } void dump_node_capacity(int level, const char *comment, node_t * node) { char *dump_text = crm_strdup_printf("%s: %s capacity:", comment, node->details->uname); g_hash_table_foreach(node->details->utilization, append_dump_text, &dump_text); if (level == 0) { fprintf(stdout, "%s\n", dump_text); } else { crm_trace("%s", dump_text); } free(dump_text); } void dump_rsc_utilization(int level, const char *comment, resource_t * rsc, node_t * node) { char *dump_text = crm_strdup_printf("%s: %s utilization on %s:", comment, rsc->id, node->details->uname); g_hash_table_foreach(rsc->utilization, append_dump_text, &dump_text); if (level == 0) { fprintf(stdout, "%s\n", dump_text); } else { crm_trace("%s", dump_text); } free(dump_text); } gint sort_rsc_index(gconstpointer a, gconstpointer b) { const resource_t *resource1 = (const resource_t *)a; const resource_t *resource2 = (const resource_t *)b; if (a == NULL && b == NULL) { return 0; } if (a == NULL) { return 1; } if (b == NULL) { return -1; } if (resource1->sort_index > resource2->sort_index) { return -1; } if (resource1->sort_index < resource2->sort_index) { return 1; } return 0; } gint sort_rsc_priority(gconstpointer a, gconstpointer b) { const resource_t *resource1 = (const resource_t *)a; const resource_t *resource2 = (const resource_t *)b; if (a == NULL && b == NULL) { return 0; } if (a == NULL) { return 1; } if (b == NULL) { return -1; } if (resource1->priority > resource2->priority) { return -1; } if (resource1->priority < resource2->priority) { return 1; } return 0; } action_t * custom_action(resource_t * rsc, char *key, const char *task, node_t * on_node, gboolean optional, gboolean save_action, pe_working_set_t * data_set) { action_t *action = NULL; GListPtr possible_matches = NULL; CRM_CHECK(key != NULL, return NULL); CRM_CHECK(task != NULL, free(key); return NULL); if (save_action && rsc != NULL) { possible_matches = find_actions(rsc->actions, key, on_node); } else if(save_action) { #if 0 action = g_hash_table_lookup(data_set->singletons, key); #else /* More expensive but takes 'node' into account */ possible_matches = find_actions(data_set->actions, key, on_node); #endif } if(data_set->singletons == NULL) { data_set->singletons = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, NULL); } if (possible_matches != NULL) { if (g_list_length(possible_matches) > 1) { pe_warn("Action %s for %s on %s exists %d times", task, rsc ? rsc->id : "", on_node ? on_node->details->uname : "", g_list_length(possible_matches)); } action = g_list_nth_data(possible_matches, 0); pe_rsc_trace(rsc, "Found existing action %d (%s) for %s (%s) on %s", action->id, action->uuid, (rsc? rsc->id : "no resource"), task, (on_node? on_node->details->uname : "no node")); g_list_free(possible_matches); } if (action == NULL) { if (save_action) { pe_rsc_trace(rsc, "Creating %s action %d: %s for %s (%s) on %s", (optional? "optional" : " mandatory"), data_set->action_id, key, (rsc? rsc->id : "no resource"), task, (on_node? on_node->details->uname : "no node")); } action = calloc(1, sizeof(action_t)); if (save_action) { action->id = data_set->action_id++; } else { action->id = 0; } action->rsc = rsc; CRM_ASSERT(task != NULL); action->task = strdup(task); if (on_node) { action->node = node_copy(on_node); } action->uuid = strdup(key); pe_set_action_bit(action, pe_action_runnable); if (optional) { pe_set_action_bit(action, pe_action_optional); } else { pe_clear_action_bit(action, pe_action_optional); } action->extra = crm_str_table_new(); action->meta = crm_str_table_new(); if (save_action) { data_set->actions = g_list_prepend(data_set->actions, action); if(rsc == NULL) { g_hash_table_insert(data_set->singletons, action->uuid, action); } } if (rsc != NULL) { action->op_entry = find_rsc_op_entry_helper(rsc, key, TRUE); unpack_operation(action, action->op_entry, rsc->container, data_set); if (save_action) { rsc->actions = g_list_prepend(rsc->actions, action); } } if (save_action) { pe_rsc_trace(rsc, "Action %d created", action->id); } } if (!optional && is_set(action->flags, pe_action_optional)) { pe_rsc_trace(rsc, "Unset optional on action %d", action->id); pe_clear_action_bit(action, pe_action_optional); } if (rsc != NULL) { enum action_tasks a_task = text2task(action->task); int warn_level = LOG_TRACE; if (save_action) { warn_level = LOG_WARNING; } if (is_set(action->flags, pe_action_have_node_attrs) == FALSE && action->node != NULL && action->op_entry != NULL) { pe_set_action_bit(action, pe_action_have_node_attrs); unpack_instance_attributes(data_set->input, action->op_entry, XML_TAG_ATTR_SETS, action->node->details->attrs, action->extra, NULL, FALSE, data_set->now); } if (is_set(action->flags, pe_action_pseudo)) { /* leave untouched */ } else if (action->node == NULL) { pe_rsc_trace(rsc, "Unset runnable on %s", action->uuid); pe_clear_action_bit(action, pe_action_runnable); } else if (is_not_set(rsc->flags, pe_rsc_managed) && g_hash_table_lookup(action->meta, XML_LRM_ATTR_INTERVAL_MS) == NULL) { crm_debug("Action %s (unmanaged)", action->uuid); pe_rsc_trace(rsc, "Set optional on %s", action->uuid); pe_set_action_bit(action, pe_action_optional); /* action->runnable = FALSE; */ } else if (action->node->details->online == FALSE && (!is_container_remote_node(action->node) || action->node->details->remote_requires_reset)) { pe_clear_action_bit(action, pe_action_runnable); do_crm_log(warn_level, "Action %s on %s is unrunnable (offline)", action->uuid, action->node->details->uname); if (is_set(action->rsc->flags, pe_rsc_managed) && save_action && a_task == stop_rsc && action->node->details->unclean == FALSE) { pe_fence_node(data_set, action->node, "resource actions are unrunnable"); } } else if (action->node->details->pending) { pe_clear_action_bit(action, pe_action_runnable); do_crm_log(warn_level, "Action %s on %s is unrunnable (pending)", action->uuid, action->node->details->uname); } else if (action->needs == rsc_req_nothing) { pe_rsc_trace(rsc, "Action %s does not require anything", action->uuid); pe_action_set_reason(action, NULL, TRUE); pe_set_action_bit(action, pe_action_runnable); #if 0 /* * No point checking this * - if we don't have quorum we can't stonith anyway */ } else if (action->needs == rsc_req_stonith) { crm_trace("Action %s requires only stonith", action->uuid); action->runnable = TRUE; #endif } else if (is_set(data_set->flags, pe_flag_have_quorum) == FALSE && data_set->no_quorum_policy == no_quorum_stop) { pe_action_set_flag_reason(__FUNCTION__, __LINE__, action, NULL, "no quorum", pe_action_runnable, TRUE); crm_debug("%s\t%s (cancelled : quorum)", action->node->details->uname, action->uuid); } else if (is_set(data_set->flags, pe_flag_have_quorum) == FALSE && data_set->no_quorum_policy == no_quorum_freeze) { pe_rsc_trace(rsc, "Check resource is already active: %s %s %s %s", rsc->id, action->uuid, role2text(rsc->next_role), role2text(rsc->role)); if (rsc->fns->active(rsc, TRUE) == FALSE || rsc->next_role > rsc->role) { pe_action_set_flag_reason(__FUNCTION__, __LINE__, action, NULL, "quorum freeze", pe_action_runnable, TRUE); pe_rsc_debug(rsc, "%s\t%s (cancelled : quorum freeze)", action->node->details->uname, action->uuid); } } else if(is_not_set(action->flags, pe_action_runnable)) { pe_rsc_trace(rsc, "Action %s is runnable", action->uuid); //pe_action_set_reason(action, NULL, TRUE); pe_set_action_bit(action, pe_action_runnable); } if (save_action) { switch (a_task) { case stop_rsc: set_bit(rsc->flags, pe_rsc_stopping); break; case start_rsc: clear_bit(rsc->flags, pe_rsc_starting); if (is_set(action->flags, pe_action_runnable)) { set_bit(rsc->flags, pe_rsc_starting); } break; default: break; } } } free(key); return action; } static const char * unpack_operation_on_fail(action_t * action) { const char *value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ON_FAIL); if (safe_str_eq(action->task, CRMD_ACTION_STOP) && safe_str_eq(value, "standby")) { crm_config_err("on-fail=standby is not allowed for stop actions: %s", action->rsc->id); return NULL; } else if (safe_str_eq(action->task, CRMD_ACTION_DEMOTE) && !value) { /* demote on_fail defaults to master monitor value if present */ xmlNode *operation = NULL; const char *name = NULL; const char *role = NULL; const char *on_fail = NULL; const char *interval_spec = NULL; const char *enabled = NULL; CRM_CHECK(action->rsc != NULL, return NULL); for (operation = __xml_first_child(action->rsc->ops_xml); operation && !value; operation = __xml_next_element(operation)) { if (!crm_str_eq((const char *)operation->name, "op", TRUE)) { continue; } name = crm_element_value(operation, "name"); role = crm_element_value(operation, "role"); on_fail = crm_element_value(operation, XML_OP_ATTR_ON_FAIL); enabled = crm_element_value(operation, "enabled"); interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); if (!on_fail) { continue; } else if (enabled && !crm_is_true(enabled)) { continue; } else if (safe_str_neq(name, "monitor") || safe_str_neq(role, "Master")) { continue; } else if (crm_parse_interval_spec(interval_spec) == 0) { continue; } value = on_fail; } } return value; } static xmlNode * find_min_interval_mon(resource_t * rsc, gboolean include_disabled) { guint interval_ms = 0; guint min_interval_ms = G_MAXUINT; const char *name = NULL; const char *value = NULL; const char *interval_spec = NULL; xmlNode *op = NULL; xmlNode *operation = NULL; for (operation = __xml_first_child(rsc->ops_xml); operation != NULL; operation = __xml_next_element(operation)) { if (crm_str_eq((const char *)operation->name, "op", TRUE)) { name = crm_element_value(operation, "name"); interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); value = crm_element_value(operation, "enabled"); if (!include_disabled && value && crm_is_true(value) == FALSE) { continue; } if (safe_str_neq(name, RSC_STATUS)) { continue; } interval_ms = crm_parse_interval_spec(interval_spec); if (interval_ms && (interval_ms < min_interval_ms)) { min_interval_ms = interval_ms; op = operation; } } } return op; } static int unpack_start_delay(const char *value, GHashTable *meta) { int start_delay = 0; if (value != NULL) { start_delay = crm_get_msec(value); if (start_delay < 0) { start_delay = 0; } if (meta) { g_hash_table_replace(meta, strdup(XML_OP_ATTR_START_DELAY), crm_itoa(start_delay)); } } return start_delay; } static int unpack_interval_origin(const char *value, GHashTable *meta, xmlNode *xml_obj, guint interval_ms, crm_time_t *now) { int start_delay = 0; if ((interval_ms > 0) && (value != NULL)) { crm_time_t *origin = crm_time_new(value); if (origin && now) { crm_time_t *delay = NULL; int rc = crm_time_compare(origin, now); long long delay_s = 0; int interval_sec = interval_ms / 1000; crm_trace("Origin: %s, interval: %d", value, interval_sec); /* If 'origin' is in the future, find the most recent "multiple" that occurred in the past */ while(rc > 0) { crm_time_add_seconds(origin, -interval_sec); rc = crm_time_compare(origin, now); } /* Now find the first "multiple" that occurs after 'now' */ while (rc < 0) { crm_time_add_seconds(origin, interval_sec); rc = crm_time_compare(origin, now); } delay = crm_time_calculate_duration(origin, now); crm_time_log(LOG_TRACE, "origin", origin, crm_time_log_date | crm_time_log_timeofday | crm_time_log_with_timezone); crm_time_log(LOG_TRACE, "now", now, crm_time_log_date | crm_time_log_timeofday | crm_time_log_with_timezone); crm_time_log(LOG_TRACE, "delay", delay, crm_time_log_duration); delay_s = crm_time_get_seconds(delay); if (delay_s < 0) { delay_s = 0; } start_delay = delay_s * 1000; if (xml_obj) { crm_info("Calculated a start delay of %llds for %s", delay_s, ID(xml_obj)); } if (meta) { g_hash_table_replace(meta, strdup(XML_OP_ATTR_START_DELAY), crm_itoa(start_delay)); } crm_time_free(origin); crm_time_free(delay); } else if (!origin && xml_obj) { crm_config_err("Operation %s contained an invalid " XML_OP_ATTR_ORIGIN ": %s", ID(xml_obj), value); } } return start_delay; } static int unpack_timeout(const char *value) { int timeout = crm_get_msec(value); if (timeout < 0) { timeout = crm_get_msec(CRM_DEFAULT_OP_TIMEOUT_S); } return timeout; } int pe_get_configured_timeout(resource_t *rsc, const char *action, pe_working_set_t *data_set) { xmlNode *child = NULL; const char *timeout = NULL; int timeout_ms = 0; for (child = first_named_child(rsc->ops_xml, XML_ATTR_OP); child != NULL; child = crm_next_same_xml(child)) { if (safe_str_eq(action, crm_element_value(child, XML_NVPAIR_ATTR_NAME))) { timeout = crm_element_value(child, XML_ATTR_TIMEOUT); break; } } if (timeout == NULL && data_set->op_defaults) { GHashTable *action_meta = crm_str_table_new(); unpack_instance_attributes(data_set->input, data_set->op_defaults, XML_TAG_META_SETS, NULL, action_meta, NULL, FALSE, data_set->now); timeout = g_hash_table_lookup(action_meta, XML_ATTR_TIMEOUT); } // @TODO check meta-attributes (including versioned meta-attributes) // @TODO maybe use min-interval monitor timeout as default for monitors timeout_ms = crm_get_msec(timeout); if (timeout_ms < 0) { timeout_ms = crm_get_msec(CRM_DEFAULT_OP_TIMEOUT_S); } return timeout_ms; } #if ENABLE_VERSIONED_ATTRS static void unpack_versioned_meta(xmlNode *versioned_meta, xmlNode *xml_obj, guint interval_ms, crm_time_t *now) { xmlNode *attrs = NULL; xmlNode *attr = NULL; for (attrs = __xml_first_child(versioned_meta); attrs != NULL; attrs = __xml_next_element(attrs)) { for (attr = __xml_first_child(attrs); attr != NULL; attr = __xml_next_element(attr)) { const char *name = crm_element_value(attr, XML_NVPAIR_ATTR_NAME); const char *value = crm_element_value(attr, XML_NVPAIR_ATTR_VALUE); if (safe_str_eq(name, XML_OP_ATTR_START_DELAY)) { int start_delay = unpack_start_delay(value, NULL); crm_xml_add_int(attr, XML_NVPAIR_ATTR_VALUE, start_delay); } else if (safe_str_eq(name, XML_OP_ATTR_ORIGIN)) { int start_delay = unpack_interval_origin(value, NULL, xml_obj, interval_ms, now); crm_xml_add(attr, XML_NVPAIR_ATTR_NAME, XML_OP_ATTR_START_DELAY); crm_xml_add_int(attr, XML_NVPAIR_ATTR_VALUE, start_delay); } else if (safe_str_eq(name, XML_ATTR_TIMEOUT)) { int timeout = unpack_timeout(value); crm_xml_add_int(attr, XML_NVPAIR_ATTR_VALUE, timeout); } } } } #endif /*! * \brief Unpack operation XML into an action structure * * Unpack an operation's meta-attributes (normalizing the interval, timeout, * and start delay values as integer milliseconds), requirements, and * failure policy. * * \param[in,out] action Action to unpack into * \param[in] xml_obj Operation XML (or NULL if all defaults) * \param[in] container Resource that contains affected resource, if any * \param[in] data_set Cluster state */ void unpack_operation(action_t * action, xmlNode * xml_obj, resource_t * container, pe_working_set_t * data_set) { guint interval_ms = 0; int timeout = 0; char *value_ms = NULL; const char *value = NULL; const char *field = NULL; char *default_timeout = NULL; #if ENABLE_VERSIONED_ATTRS pe_rsc_action_details_t *rsc_details = NULL; #endif CRM_CHECK(action && action->rsc, return); // Cluster-wide unpack_instance_attributes(data_set->input, data_set->op_defaults, XML_TAG_META_SETS, NULL, action->meta, NULL, FALSE, data_set->now); // Probe timeouts default differently, so handle timeout default later default_timeout = g_hash_table_lookup(action->meta, XML_ATTR_TIMEOUT); if (default_timeout) { default_timeout = strdup(default_timeout); g_hash_table_remove(action->meta, XML_ATTR_TIMEOUT); } if (xml_obj) { xmlAttrPtr xIter = NULL; // take precedence over defaults unpack_instance_attributes(data_set->input, xml_obj, XML_TAG_META_SETS, NULL, action->meta, NULL, TRUE, data_set->now); #if ENABLE_VERSIONED_ATTRS rsc_details = pe_rsc_action_details(action); pe_unpack_versioned_attributes(data_set->input, xml_obj, XML_TAG_ATTR_SETS, NULL, rsc_details->versioned_parameters, data_set->now); pe_unpack_versioned_attributes(data_set->input, xml_obj, XML_TAG_META_SETS, NULL, rsc_details->versioned_meta, data_set->now); #endif /* Anything set as an XML property has highest precedence. * This ensures we use the name and interval from the tag. */ for (xIter = xml_obj->properties; xIter; xIter = xIter->next) { const char *prop_name = (const char *)xIter->name; const char *prop_value = crm_element_value(xml_obj, prop_name); g_hash_table_replace(action->meta, strdup(prop_name), strdup(prop_value)); } } g_hash_table_remove(action->meta, "id"); // Normalize interval to milliseconds field = XML_LRM_ATTR_INTERVAL; value = g_hash_table_lookup(action->meta, field); if (value != NULL) { interval_ms = crm_parse_interval_spec(value); } else if ((xml_obj == NULL) && !strcmp(action->task, RSC_STATUS)) { /* An orphaned recurring monitor will not have any XML. However, we * want the interval to be set, so the action can be properly detected * as a recurring monitor. Parse it from the key in this case. */ parse_op_key(action->uuid, NULL, NULL, &interval_ms); } if (interval_ms > 0) { value_ms = crm_strdup_printf("%u", interval_ms); g_hash_table_replace(action->meta, strdup(field), value_ms); } else if (value) { g_hash_table_remove(action->meta, field); } // Handle timeout default, now that we know the interval if (g_hash_table_lookup(action->meta, XML_ATTR_TIMEOUT)) { free(default_timeout); } else { // Probe timeouts default to minimum-interval monitor's if (safe_str_eq(action->task, RSC_STATUS) && (interval_ms == 0)) { xmlNode *min_interval_mon = find_min_interval_mon(action->rsc, FALSE); if (min_interval_mon) { value = crm_element_value(min_interval_mon, XML_ATTR_TIMEOUT); if (value) { crm_trace("\t%s defaults to minimum-interval monitor's timeout '%s'", action->uuid, value); free(default_timeout); default_timeout = strdup(value); } } } if (default_timeout) { g_hash_table_insert(action->meta, strdup(XML_ATTR_TIMEOUT), default_timeout); } } if (safe_str_neq(action->task, RSC_START) && safe_str_neq(action->task, RSC_PROMOTE)) { action->needs = rsc_req_nothing; value = "nothing (not start/promote)"; } else if (is_set(action->rsc->flags, pe_rsc_needs_fencing)) { action->needs = rsc_req_stonith; value = "fencing (resource)"; } else if (is_set(action->rsc->flags, pe_rsc_needs_quorum)) { action->needs = rsc_req_quorum; value = "quorum (resource)"; } else { action->needs = rsc_req_nothing; value = "nothing (resource)"; } pe_rsc_trace(action->rsc, "\tAction %s requires: %s", action->uuid, value); value = unpack_operation_on_fail(action); if (value == NULL) { } else if (safe_str_eq(value, "block")) { action->on_fail = action_fail_block; g_hash_table_insert(action->meta, strdup(XML_OP_ATTR_ON_FAIL), strdup("block")); value = "block"; // The above could destroy the original string } else if (safe_str_eq(value, "fence")) { action->on_fail = action_fail_fence; value = "node fencing"; if (is_set(data_set->flags, pe_flag_stonith_enabled) == FALSE) { crm_config_err("Specifying on_fail=fence and" " stonith-enabled=false makes no sense"); action->on_fail = action_fail_stop; action->fail_role = RSC_ROLE_STOPPED; value = "stop resource"; } } else if (safe_str_eq(value, "standby")) { action->on_fail = action_fail_standby; value = "node standby"; } else if (safe_str_eq(value, "ignore") || safe_str_eq(value, "nothing")) { action->on_fail = action_fail_ignore; value = "ignore"; } else if (safe_str_eq(value, "migrate")) { action->on_fail = action_fail_migrate; value = "force migration"; } else if (safe_str_eq(value, "stop")) { action->on_fail = action_fail_stop; action->fail_role = RSC_ROLE_STOPPED; value = "stop resource"; } else if (safe_str_eq(value, "restart")) { action->on_fail = action_fail_recover; value = "restart (and possibly migrate)"; } else if (safe_str_eq(value, "restart-container")) { if (container) { action->on_fail = action_fail_restart_container; value = "restart container (and possibly migrate)"; } else { value = NULL; } } else { pe_err("Resource %s: Unknown failure type (%s)", action->rsc->id, value); value = NULL; } /* defaults */ if (value == NULL && container) { action->on_fail = action_fail_restart_container; value = "restart container (and possibly migrate) (default)"; /* for baremetal remote nodes, ensure that any failure that results in * dropping an active connection to a remote node results in fencing of * the remote node. * * There are only two action failures that don't result in fencing. * 1. probes - probe failures are expected. * 2. start - a start failure indicates that an active connection does not already * exist. The user can set op on-fail=fence if they really want to fence start * failures. */ } else if (((value == NULL) || !is_set(action->rsc->flags, pe_rsc_managed)) && (is_rsc_baremetal_remote_node(action->rsc, data_set) && !(safe_str_eq(action->task, CRMD_ACTION_STATUS) && (interval_ms == 0)) && (safe_str_neq(action->task, CRMD_ACTION_START)))) { if (!is_set(action->rsc->flags, pe_rsc_managed)) { action->on_fail = action_fail_stop; action->fail_role = RSC_ROLE_STOPPED; value = "stop unmanaged baremetal remote node (enforcing default)"; } else { if (is_set(data_set->flags, pe_flag_stonith_enabled)) { value = "fence baremetal remote node (default)"; } else { value = "recover baremetal remote node connection (default)"; } if (action->rsc->remote_reconnect_ms) { action->fail_role = RSC_ROLE_STOPPED; } action->on_fail = action_fail_reset_remote; } } else if (value == NULL && safe_str_eq(action->task, CRMD_ACTION_STOP)) { if (is_set(data_set->flags, pe_flag_stonith_enabled)) { action->on_fail = action_fail_fence; value = "resource fence (default)"; } else { action->on_fail = action_fail_block; value = "resource block (default)"; } } else if (value == NULL) { action->on_fail = action_fail_recover; value = "restart (and possibly migrate) (default)"; } pe_rsc_trace(action->rsc, "\t%s failure handling: %s", action->task, value); value = NULL; if (xml_obj != NULL) { value = g_hash_table_lookup(action->meta, "role_after_failure"); if (value) { pe_warn_once(pe_wo_role_after, "Support for role_after_failure is deprecated and will be removed in a future release"); } } if (value != NULL && action->fail_role == RSC_ROLE_UNKNOWN) { action->fail_role = text2role(value); } /* defaults */ if (action->fail_role == RSC_ROLE_UNKNOWN) { if (safe_str_eq(action->task, CRMD_ACTION_PROMOTE)) { action->fail_role = RSC_ROLE_SLAVE; } else { action->fail_role = RSC_ROLE_STARTED; } } pe_rsc_trace(action->rsc, "\t%s failure results in: %s", action->task, role2text(action->fail_role)); value = g_hash_table_lookup(action->meta, XML_OP_ATTR_START_DELAY); if (value) { unpack_start_delay(value, action->meta); } else { value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ORIGIN); unpack_interval_origin(value, action->meta, xml_obj, interval_ms, data_set->now); } value = g_hash_table_lookup(action->meta, XML_ATTR_TIMEOUT); timeout = unpack_timeout(value); g_hash_table_replace(action->meta, strdup(XML_ATTR_TIMEOUT), crm_itoa(timeout)); #if ENABLE_VERSIONED_ATTRS unpack_versioned_meta(rsc_details->versioned_meta, xml_obj, interval_ms, data_set->now); #endif } static xmlNode * find_rsc_op_entry_helper(resource_t * rsc, const char *key, gboolean include_disabled) { guint interval_ms = 0; gboolean do_retry = TRUE; char *local_key = NULL; const char *name = NULL; const char *value = NULL; const char *interval_spec = NULL; char *match_key = NULL; xmlNode *op = NULL; xmlNode *operation = NULL; retry: for (operation = __xml_first_child(rsc->ops_xml); operation != NULL; operation = __xml_next_element(operation)) { if (crm_str_eq((const char *)operation->name, "op", TRUE)) { name = crm_element_value(operation, "name"); interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); value = crm_element_value(operation, "enabled"); if (!include_disabled && value && crm_is_true(value) == FALSE) { continue; } interval_ms = crm_parse_interval_spec(interval_spec); match_key = generate_op_key(rsc->id, name, interval_ms); if (safe_str_eq(key, match_key)) { op = operation; } free(match_key); if (rsc->clone_name) { match_key = generate_op_key(rsc->clone_name, name, interval_ms); if (safe_str_eq(key, match_key)) { op = operation; } free(match_key); } if (op != NULL) { free(local_key); return op; } } } free(local_key); if (do_retry == FALSE) { return NULL; } do_retry = FALSE; if (strstr(key, CRMD_ACTION_MIGRATE) || strstr(key, CRMD_ACTION_MIGRATED)) { local_key = generate_op_key(rsc->id, "migrate", 0); key = local_key; goto retry; } else if (strstr(key, "_notify_")) { local_key = generate_op_key(rsc->id, "notify", 0); key = local_key; goto retry; } return NULL; } xmlNode * find_rsc_op_entry(resource_t * rsc, const char *key) { return find_rsc_op_entry_helper(rsc, key, FALSE); } void print_node(const char *pre_text, node_t * node, gboolean details) { if (node == NULL) { crm_trace("%s%s: ", pre_text == NULL ? "" : pre_text, pre_text == NULL ? "" : ": "); return; } CRM_ASSERT(node->details); crm_trace("%s%s%sNode %s: (weight=%d, fixed=%s)", pre_text == NULL ? "" : pre_text, pre_text == NULL ? "" : ": ", node->details->online ? "" : "Unavailable/Unclean ", node->details->uname, node->weight, node->fixed ? "True" : "False"); if (details) { char *pe_mutable = strdup("\t\t"); GListPtr gIter = node->details->running_rsc; crm_trace("\t\t===Node Attributes"); g_hash_table_foreach(node->details->attrs, print_str_str, pe_mutable); free(pe_mutable); crm_trace("\t\t=== Resources"); for (; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t *) gIter->data; print_resource(LOG_TRACE, "\t\t", rsc, FALSE); } } } /* * Used by the HashTable for-loop */ void print_str_str(gpointer key, gpointer value, gpointer user_data) { crm_trace("%s%s %s ==> %s", user_data == NULL ? "" : (char *)user_data, user_data == NULL ? "" : ": ", (char *)key, (char *)value); } void print_resource(int log_level, const char *pre_text, resource_t * rsc, gboolean details) { long options = pe_print_log | pe_print_pending; if (rsc == NULL) { do_crm_log(log_level - 1, "%s%s: ", pre_text == NULL ? "" : pre_text, pre_text == NULL ? "" : ": "); return; } if (details) { options |= pe_print_details; } rsc->fns->print(rsc, pre_text, options, &log_level); } void pe_free_action(action_t * action) { if (action == NULL) { return; } g_list_free_full(action->actions_before, free); /* action_wrapper_t* */ g_list_free_full(action->actions_after, free); /* action_wrapper_t* */ if (action->extra) { g_hash_table_destroy(action->extra); } if (action->meta) { g_hash_table_destroy(action->meta); } #if ENABLE_VERSIONED_ATTRS if (action->rsc) { pe_free_rsc_action_details(action); } #endif free(action->cancel_task); free(action->reason); free(action->task); free(action->uuid); free(action->node); free(action); } GListPtr find_recurring_actions(GListPtr input, node_t * not_on_node) { const char *value = NULL; GListPtr result = NULL; GListPtr gIter = input; CRM_CHECK(input != NULL, return NULL); for (; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; value = g_hash_table_lookup(action->meta, XML_LRM_ATTR_INTERVAL_MS); if (value == NULL) { /* skip */ } else if (safe_str_eq(value, "0")) { /* skip */ } else if (safe_str_eq(CRMD_ACTION_CANCEL, action->task)) { /* skip */ } else if (not_on_node == NULL) { crm_trace("(null) Found: %s", action->uuid); result = g_list_prepend(result, action); } else if (action->node == NULL) { /* skip */ } else if (action->node->details != not_on_node->details) { crm_trace("Found: %s", action->uuid); result = g_list_prepend(result, action); } } return result; } enum action_tasks get_complex_task(resource_t * rsc, const char *name, gboolean allow_non_atomic) { enum action_tasks task = text2task(name); if (rsc == NULL) { return task; } else if (allow_non_atomic == FALSE || rsc->variant == pe_native) { switch (task) { case stopped_rsc: case started_rsc: case action_demoted: case action_promoted: crm_trace("Folding %s back into its atomic counterpart for %s", name, rsc->id); return task - 1; break; default: break; } } return task; } action_t * find_first_action(GListPtr input, const char *uuid, const char *task, node_t * on_node) { GListPtr gIter = NULL; CRM_CHECK(uuid || task, return NULL); for (gIter = input; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; if (uuid != NULL && safe_str_neq(uuid, action->uuid)) { continue; } else if (task != NULL && safe_str_neq(task, action->task)) { continue; } else if (on_node == NULL) { return action; } else if (action->node == NULL) { continue; } else if (on_node->details == action->node->details) { return action; } } return NULL; } GListPtr find_actions(GListPtr input, const char *key, const node_t *on_node) { GListPtr gIter = input; GListPtr result = NULL; CRM_CHECK(key != NULL, return NULL); for (; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; if (safe_str_neq(key, action->uuid)) { crm_trace("%s does not match action %s", key, action->uuid); continue; } else if (on_node == NULL) { crm_trace("Action %s matches (ignoring node)", key); result = g_list_prepend(result, action); } else if (action->node == NULL) { crm_trace("Action %s matches (unallocated, assigning to %s)", key, on_node->details->uname); action->node = node_copy(on_node); result = g_list_prepend(result, action); } else if (on_node->details == action->node->details) { crm_trace("Action %s on %s matches", key, on_node->details->uname); result = g_list_prepend(result, action); } else { crm_trace("Action %s on node %s does not match requested node %s", key, action->node->details->uname, on_node->details->uname); } } return result; } GListPtr find_actions_exact(GListPtr input, const char *key, node_t * on_node) { GListPtr gIter = input; GListPtr result = NULL; CRM_CHECK(key != NULL, return NULL); for (; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; crm_trace("Matching %s against %s", key, action->uuid); if (safe_str_neq(key, action->uuid)) { crm_trace("Key mismatch: %s vs. %s", key, action->uuid); continue; } else if (on_node == NULL || action->node == NULL) { crm_trace("on_node=%p, action->node=%p", on_node, action->node); continue; } else if (safe_str_eq(on_node->details->id, action->node->details->id)) { result = g_list_prepend(result, action); } crm_trace("Node mismatch: %s vs. %s", on_node->details->id, action->node->details->id); } return result; } static void resource_node_score(resource_t * rsc, node_t * node, int score, const char *tag) { node_t *match = NULL; if ((rsc->exclusive_discover || (node->rsc_discover_mode == pe_discover_never)) && safe_str_eq(tag, "symmetric_default")) { /* This string comparision may be fragile, but exclusive resources and * exclusive nodes should not have the symmetric_default constraint * applied to them. */ return; } else if (rsc->children) { GListPtr gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; resource_node_score(child_rsc, node, score, tag); } } pe_rsc_trace(rsc, "Setting %s for %s on %s: %d", tag, rsc->id, node->details->uname, score); match = pe_hash_table_lookup(rsc->allowed_nodes, node->details->id); if (match == NULL) { match = node_copy(node); g_hash_table_insert(rsc->allowed_nodes, (gpointer) match->details->id, match); } match->weight = merge_weights(match->weight, score); } void resource_location(resource_t * rsc, node_t * node, int score, const char *tag, pe_working_set_t * data_set) { if (node != NULL) { resource_node_score(rsc, node, score, tag); } else if (data_set != NULL) { GListPtr gIter = data_set->nodes; for (; gIter != NULL; gIter = gIter->next) { node_t *node_iter = (node_t *) gIter->data; resource_node_score(rsc, node_iter, score, tag); } } else { GHashTableIter iter; node_t *node_iter = NULL; g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&node_iter)) { resource_node_score(rsc, node_iter, score, tag); } } if (node == NULL && score == -INFINITY) { if (rsc->allocated_to) { crm_info("Deallocating %s from %s", rsc->id, rsc->allocated_to->details->uname); free(rsc->allocated_to); rsc->allocated_to = NULL; } } } #define sort_return(an_int, why) do { \ free(a_uuid); \ free(b_uuid); \ crm_trace("%s (%d) %c %s (%d) : %s", \ a_xml_id, a_call_id, an_int>0?'>':an_int<0?'<':'=', \ b_xml_id, b_call_id, why); \ return an_int; \ } while(0) gint sort_op_by_callid(gconstpointer a, gconstpointer b) { int a_call_id = -1; int b_call_id = -1; char *a_uuid = NULL; char *b_uuid = NULL; const xmlNode *xml_a = a; const xmlNode *xml_b = b; const char *a_xml_id = crm_element_value(xml_a, XML_ATTR_ID); const char *b_xml_id = crm_element_value(xml_b, XML_ATTR_ID); if (safe_str_eq(a_xml_id, b_xml_id)) { /* We have duplicate lrm_rsc_op entries in the status * section which is unliklely to be a good thing * - we can handle it easily enough, but we need to get * to the bottom of why it's happening. */ pe_err("Duplicate lrm_rsc_op entries named %s", a_xml_id); sort_return(0, "duplicate"); } crm_element_value_int(xml_a, XML_LRM_ATTR_CALLID, &a_call_id); crm_element_value_int(xml_b, XML_LRM_ATTR_CALLID, &b_call_id); if (a_call_id == -1 && b_call_id == -1) { /* both are pending ops so it doesn't matter since * stops are never pending */ sort_return(0, "pending"); } else if (a_call_id >= 0 && a_call_id < b_call_id) { sort_return(-1, "call id"); } else if (b_call_id >= 0 && a_call_id > b_call_id) { sort_return(1, "call id"); } else if (b_call_id >= 0 && a_call_id == b_call_id) { /* * The op and last_failed_op are the same * Order on last-rc-change */ int last_a = -1; int last_b = -1; crm_element_value_int(xml_a, XML_RSC_OP_LAST_CHANGE, &last_a); crm_element_value_int(xml_b, XML_RSC_OP_LAST_CHANGE, &last_b); crm_trace("rc-change: %d vs %d", last_a, last_b); if (last_a >= 0 && last_a < last_b) { sort_return(-1, "rc-change"); } else if (last_b >= 0 && last_a > last_b) { sort_return(1, "rc-change"); } sort_return(0, "rc-change"); } else { /* One of the inputs is a pending operation * Attempt to use XML_ATTR_TRANSITION_MAGIC to determine its age relative to the other */ int a_id = -1; int b_id = -1; int dummy = -1; const char *a_magic = crm_element_value(xml_a, XML_ATTR_TRANSITION_MAGIC); const char *b_magic = crm_element_value(xml_b, XML_ATTR_TRANSITION_MAGIC); CRM_CHECK(a_magic != NULL && b_magic != NULL, sort_return(0, "No magic")); if(!decode_transition_magic(a_magic, &a_uuid, &a_id, &dummy, &dummy, &dummy, &dummy)) { sort_return(0, "bad magic a"); } if(!decode_transition_magic(b_magic, &b_uuid, &b_id, &dummy, &dummy, &dummy, &dummy)) { sort_return(0, "bad magic b"); } /* try to determine the relative age of the operation... * some pending operations (e.g. a start) may have been superseded * by a subsequent stop * * [a|b]_id == -1 means it's a shutdown operation and _always_ comes last */ if (safe_str_neq(a_uuid, b_uuid) || a_id == b_id) { /* * some of the logic in here may be redundant... * * if the UUID from the TE doesn't match then one better * be a pending operation. * pending operations don't survive between elections and joins * because we query the LRM directly */ if (b_call_id == -1) { sort_return(-1, "transition + call"); } else if (a_call_id == -1) { sort_return(1, "transition + call"); } } else if ((a_id >= 0 && a_id < b_id) || b_id == -1) { sort_return(-1, "transition"); } else if ((b_id >= 0 && a_id > b_id) || a_id == -1) { sort_return(1, "transition"); } } /* we should never end up here */ CRM_CHECK(FALSE, sort_return(0, "default")); } time_t get_effective_time(pe_working_set_t * data_set) { if(data_set) { if (data_set->now == NULL) { crm_trace("Recording a new 'now'"); data_set->now = crm_time_new(NULL); } return crm_time_get_seconds_since_epoch(data_set->now); } crm_trace("Defaulting to 'now'"); return time(NULL); } gboolean get_target_role(resource_t * rsc, enum rsc_role_e * role) { enum rsc_role_e local_role = RSC_ROLE_UNKNOWN; const char *value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE); CRM_CHECK(role != NULL, return FALSE); if (value == NULL || safe_str_eq("started", value) || safe_str_eq("default", value)) { return FALSE; } local_role = text2role(value); if (local_role == RSC_ROLE_UNKNOWN) { crm_config_err("%s: Unknown value for %s: %s", rsc->id, XML_RSC_ATTR_TARGET_ROLE, value); return FALSE; } else if (local_role > RSC_ROLE_STARTED) { if (is_set(uber_parent(rsc)->flags, pe_rsc_promotable)) { if (local_role > RSC_ROLE_SLAVE) { /* This is what we'd do anyway, just leave the default to avoid messing up the placement algorithm */ return FALSE; } } else { crm_config_err("%s is not part of a promotable clone resource, a %s of '%s' makes no sense", rsc->id, XML_RSC_ATTR_TARGET_ROLE, value); return FALSE; } } *role = local_role; return TRUE; } gboolean order_actions(action_t * lh_action, action_t * rh_action, enum pe_ordering order) { GListPtr gIter = NULL; action_wrapper_t *wrapper = NULL; GListPtr list = NULL; if (order == pe_order_none) { return FALSE; } if (lh_action == NULL || rh_action == NULL) { return FALSE; } crm_trace("Ordering Action %s before %s", lh_action->uuid, rh_action->uuid); /* Ensure we never create a dependency on ourselves... it's happened */ CRM_ASSERT(lh_action != rh_action); /* Filter dups, otherwise update_action_states() has too much work to do */ gIter = lh_action->actions_after; for (; gIter != NULL; gIter = gIter->next) { action_wrapper_t *after = (action_wrapper_t *) gIter->data; if (after->action == rh_action && (after->type & order)) { return FALSE; } } wrapper = calloc(1, sizeof(action_wrapper_t)); wrapper->action = rh_action; wrapper->type = order; list = lh_action->actions_after; list = g_list_prepend(list, wrapper); lh_action->actions_after = list; wrapper = NULL; /* order |= pe_order_implies_then; */ /* order ^= pe_order_implies_then; */ wrapper = calloc(1, sizeof(action_wrapper_t)); wrapper->action = lh_action; wrapper->type = order; list = rh_action->actions_before; list = g_list_prepend(list, wrapper); rh_action->actions_before = list; return TRUE; } action_t * get_pseudo_op(const char *name, pe_working_set_t * data_set) { action_t *op = NULL; if(data_set->singletons) { op = g_hash_table_lookup(data_set->singletons, name); } if (op == NULL) { op = custom_action(NULL, strdup(name), name, NULL, TRUE, TRUE, data_set); set_bit(op->flags, pe_action_pseudo); set_bit(op->flags, pe_action_runnable); } return op; } void destroy_ticket(gpointer data) { ticket_t *ticket = data; if (ticket->state) { g_hash_table_destroy(ticket->state); } free(ticket->id); free(ticket); } ticket_t * ticket_new(const char *ticket_id, pe_working_set_t * data_set) { ticket_t *ticket = NULL; if (ticket_id == NULL || strlen(ticket_id) == 0) { return NULL; } if (data_set->tickets == NULL) { data_set->tickets = g_hash_table_new_full(crm_str_hash, g_str_equal, free, destroy_ticket); } ticket = g_hash_table_lookup(data_set->tickets, ticket_id); if (ticket == NULL) { ticket = calloc(1, sizeof(ticket_t)); if (ticket == NULL) { crm_err("Cannot allocate ticket '%s'", ticket_id); return NULL; } crm_trace("Creaing ticket entry for %s", ticket_id); ticket->id = strdup(ticket_id); ticket->granted = FALSE; ticket->last_granted = -1; ticket->standby = FALSE; ticket->state = crm_str_table_new(); g_hash_table_insert(data_set->tickets, strdup(ticket->id), ticket); } return ticket; } static void filter_parameters(xmlNode * param_set, const char *param_string, bool need_present) { if (param_set && param_string) { xmlAttrPtr xIter = param_set->properties; while (xIter) { const char *prop_name = (const char *)xIter->name; char *name = crm_strdup_printf(" %s ", prop_name); char *match = strstr(param_string, name); free(name); // Do now, because current entry might get removed below xIter = xIter->next; if (need_present && match == NULL) { crm_trace("%s not found in %s", prop_name, param_string); xml_remove_prop(param_set, prop_name); } else if (need_present == FALSE && match) { crm_trace("%s found in %s", prop_name, param_string); xml_remove_prop(param_set, prop_name); } } } } #if ENABLE_VERSIONED_ATTRS static void append_versioned_params(xmlNode *versioned_params, const char *ra_version, xmlNode *params) { GHashTable *hash = pe_unpack_versioned_parameters(versioned_params, ra_version); char *key = NULL; char *value = NULL; GHashTableIter iter; g_hash_table_iter_init(&iter, hash); while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) { crm_xml_add(params, key, value); } g_hash_table_destroy(hash); } #endif static op_digest_cache_t * rsc_action_digest(resource_t * rsc, const char *task, const char *key, node_t * node, xmlNode * xml_op, pe_working_set_t * data_set) { op_digest_cache_t *data = NULL; data = g_hash_table_lookup(node->details->digest_cache, key); if (data == NULL) { GHashTable *local_rsc_params = crm_str_table_new(); action_t *action = custom_action(rsc, strdup(key), task, node, TRUE, FALSE, data_set); #if ENABLE_VERSIONED_ATTRS xmlNode *local_versioned_params = create_xml_node(NULL, XML_TAG_RSC_VER_ATTRS); const char *ra_version = NULL; #endif const char *op_version; const char *restart_list = NULL; const char *secure_list = " passwd password "; data = calloc(1, sizeof(op_digest_cache_t)); CRM_ASSERT(data != NULL); get_rsc_attributes(local_rsc_params, rsc, node, data_set); #if ENABLE_VERSIONED_ATTRS pe_get_versioned_attributes(local_versioned_params, rsc, node, data_set); #endif data->params_all = create_xml_node(NULL, XML_TAG_PARAMS); // REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside if (container_fix_remote_addr_in(rsc, data->params_all, "addr")) { crm_trace("Fixed addr for %s on %s", rsc->id, node->details->uname); } g_hash_table_foreach(local_rsc_params, hash2field, data->params_all); g_hash_table_foreach(action->extra, hash2field, data->params_all); g_hash_table_foreach(rsc->parameters, hash2field, data->params_all); g_hash_table_foreach(action->meta, hash2metafield, data->params_all); if(xml_op) { secure_list = crm_element_value(xml_op, XML_LRM_ATTR_OP_SECURE); restart_list = crm_element_value(xml_op, XML_LRM_ATTR_OP_RESTART); op_version = crm_element_value(xml_op, XML_ATTR_CRM_VERSION); #if ENABLE_VERSIONED_ATTRS ra_version = crm_element_value(xml_op, XML_ATTR_RA_VERSION); #endif } else { op_version = CRM_FEATURE_SET; } #if ENABLE_VERSIONED_ATTRS append_versioned_params(local_versioned_params, ra_version, data->params_all); append_versioned_params(rsc->versioned_parameters, ra_version, data->params_all); { pe_rsc_action_details_t *details = pe_rsc_action_details(action); append_versioned_params(details->versioned_parameters, ra_version, data->params_all); } #endif filter_action_parameters(data->params_all, op_version); g_hash_table_destroy(local_rsc_params); pe_free_action(action); data->digest_all_calc = calculate_operation_digest(data->params_all, op_version); if (is_set(data_set->flags, pe_flag_sanitized)) { data->params_secure = copy_xml(data->params_all); if(secure_list) { filter_parameters(data->params_secure, secure_list, FALSE); } data->digest_secure_calc = calculate_operation_digest(data->params_secure, op_version); } if(xml_op && crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST) != NULL) { data->params_restart = copy_xml(data->params_all); if (restart_list) { filter_parameters(data->params_restart, restart_list, TRUE); } data->digest_restart_calc = calculate_operation_digest(data->params_restart, op_version); } g_hash_table_insert(node->details->digest_cache, strdup(key), data); } return data; } op_digest_cache_t * rsc_action_digest_cmp(resource_t * rsc, xmlNode * xml_op, node_t * node, pe_working_set_t * data_set) { op_digest_cache_t *data = NULL; char *key = NULL; guint interval_ms = 0; const char *op_version; const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); const char *interval_ms_s = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL_MS); const char *digest_all; const char *digest_restart; CRM_ASSERT(node != NULL); op_version = crm_element_value(xml_op, XML_ATTR_CRM_VERSION); digest_all = crm_element_value(xml_op, XML_LRM_ATTR_OP_DIGEST); digest_restart = crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST); interval_ms = crm_parse_ms(interval_ms_s); key = generate_op_key(rsc->id, task, interval_ms); data = rsc_action_digest(rsc, task, key, node, xml_op, data_set); data->rc = RSC_DIGEST_MATCH; if (digest_restart && data->digest_restart_calc && strcmp(data->digest_restart_calc, digest_restart) != 0) { pe_rsc_info(rsc, "Parameters to %s on %s changed: was %s vs. now %s (restart:%s) %s", key, node->details->uname, crm_str(digest_restart), data->digest_restart_calc, op_version, crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC)); data->rc = RSC_DIGEST_RESTART; } else if (digest_all == NULL) { /* it is unknown what the previous op digest was */ data->rc = RSC_DIGEST_UNKNOWN; } else if (strcmp(digest_all, data->digest_all_calc) != 0) { pe_rsc_info(rsc, "Parameters to %s on %s changed: was %s vs. now %s (%s:%s) %s", key, node->details->uname, crm_str(digest_all), data->digest_all_calc, (interval_ms > 0)? "reschedule" : "reload", op_version, crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC)); data->rc = RSC_DIGEST_ALL; } free(key); return data; } #define STONITH_DIGEST_TASK "stonith-on" static op_digest_cache_t * fencing_action_digest_cmp(resource_t * rsc, node_t * node, pe_working_set_t * data_set) { char *key = generate_op_key(rsc->id, STONITH_DIGEST_TASK, 0); op_digest_cache_t *data = rsc_action_digest(rsc, STONITH_DIGEST_TASK, key, node, NULL, data_set); const char *digest_all = pe_node_attribute_raw(node, CRM_ATTR_DIGESTS_ALL); const char *digest_secure = pe_node_attribute_raw(node, CRM_ATTR_DIGESTS_SECURE); /* No 'reloads' for fencing device changes * * We use the resource id + agent + digest so that we can detect * changes to the agent and/or the parameters used */ char *search_all = crm_strdup_printf("%s:%s:%s", rsc->id, (const char*)g_hash_table_lookup(rsc->meta, XML_ATTR_TYPE), data->digest_all_calc); char *search_secure = crm_strdup_printf("%s:%s:%s", rsc->id, (const char*)g_hash_table_lookup(rsc->meta, XML_ATTR_TYPE), data->digest_secure_calc); data->rc = RSC_DIGEST_ALL; if (digest_all == NULL) { /* it is unknown what the previous op digest was */ data->rc = RSC_DIGEST_UNKNOWN; } else if (strstr(digest_all, search_all)) { data->rc = RSC_DIGEST_MATCH; } else if(digest_secure && data->digest_secure_calc) { if(strstr(digest_secure, search_secure)) { if (is_set(data_set->flags, pe_flag_stdout)) { printf("Only 'private' parameters to %s for unfencing %s changed\n", rsc->id, node->details->uname); } data->rc = RSC_DIGEST_MATCH; } } if (is_set(data_set->flags, pe_flag_sanitized) && is_set(data_set->flags, pe_flag_stdout) && (data->rc == RSC_DIGEST_ALL) && data->digest_secure_calc) { printf("Parameters to %s for unfencing %s changed, try '%s:%s:%s'\n", rsc->id, node->details->uname, rsc->id, (const char *) g_hash_table_lookup(rsc->meta, XML_ATTR_TYPE), data->digest_secure_calc); } free(key); free(search_all); free(search_secure); return data; } const char *rsc_printable_id(resource_t *rsc) { if (is_not_set(rsc->flags, pe_rsc_unique)) { return ID(rsc->xml); } return rsc->id; } void clear_bit_recursive(resource_t * rsc, unsigned long long flag) { GListPtr gIter = rsc->children; clear_bit(rsc->flags, flag); for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; clear_bit_recursive(child_rsc, flag); } } void set_bit_recursive(resource_t * rsc, unsigned long long flag) { GListPtr gIter = rsc->children; set_bit(rsc->flags, flag); for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; set_bit_recursive(child_rsc, flag); } } static GListPtr find_unfencing_devices(GListPtr candidates, GListPtr matches) { for (GListPtr gIter = candidates; gIter != NULL; gIter = gIter->next) { resource_t *candidate = gIter->data; const char *provides = g_hash_table_lookup(candidate->meta, XML_RSC_ATTR_PROVIDES); const char *requires = g_hash_table_lookup(candidate->meta, XML_RSC_ATTR_REQUIRES); if(candidate->children) { matches = find_unfencing_devices(candidate->children, matches); } else if (is_not_set(candidate->flags, pe_rsc_fence_device)) { continue; } else if (crm_str_eq(provides, "unfencing", FALSE) || crm_str_eq(requires, "unfencing", FALSE)) { matches = g_list_prepend(matches, candidate); } } return matches; } action_t * pe_fence_op(node_t * node, const char *op, bool optional, const char *reason, pe_working_set_t * data_set) { char *op_key = NULL; action_t *stonith_op = NULL; if(op == NULL) { op = data_set->stonith_action; } op_key = crm_strdup_printf("%s-%s-%s", CRM_OP_FENCE, node->details->uname, op); if(data_set->singletons) { stonith_op = g_hash_table_lookup(data_set->singletons, op_key); } if(stonith_op == NULL) { stonith_op = custom_action(NULL, op_key, CRM_OP_FENCE, node, TRUE, TRUE, data_set); add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET, node->details->uname); add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET_UUID, node->details->id); add_hash_param(stonith_op->meta, "stonith_action", op); if(is_remote_node(node) && is_set(data_set->flags, pe_flag_enable_unfencing)) { /* Extra work to detect device changes on remotes * * We may do this for all nodes in the future, but for now * the check_action_definition() based stuff works fine. * * Use "stonith-on" to avoid creating cache entries for * operations check_action_definition() would look for. */ long max = 1024; long digests_all_offset = 0; long digests_secure_offset = 0; char *digests_all = malloc(max); char *digests_secure = malloc(max); GListPtr matches = find_unfencing_devices(data_set->resources, NULL); for (GListPtr gIter = matches; gIter != NULL; gIter = gIter->next) { resource_t *match = gIter->data; op_digest_cache_t *data = fencing_action_digest_cmp(match, node, data_set); if(data->rc == RSC_DIGEST_ALL) { optional = FALSE; crm_notice("Unfencing %s (remote): because the definition of %s changed", node->details->uname, match->id); if (is_set(data_set->flags, pe_flag_stdout)) { fprintf(stdout, " notice: Unfencing %s (remote): because the definition of %s changed\n", node->details->uname, match->id); } } digests_all_offset += snprintf( digests_all+digests_all_offset, max-digests_all_offset, "%s:%s:%s,", match->id, (const char*)g_hash_table_lookup(match->meta, XML_ATTR_TYPE), data->digest_all_calc); digests_secure_offset += snprintf( digests_secure+digests_secure_offset, max-digests_secure_offset, "%s:%s:%s,", match->id, (const char*)g_hash_table_lookup(match->meta, XML_ATTR_TYPE), data->digest_secure_calc); } g_hash_table_insert(stonith_op->meta, strdup(XML_OP_ATTR_DIGESTS_ALL), digests_all); g_hash_table_insert(stonith_op->meta, strdup(XML_OP_ATTR_DIGESTS_SECURE), digests_secure); } } else { free(op_key); } if(optional == FALSE && pe_can_fence(data_set, node)) { pe_action_required(stonith_op, NULL, reason); } else if(reason && stonith_op->reason == NULL) { stonith_op->reason = strdup(reason); } return stonith_op; } void trigger_unfencing( resource_t * rsc, node_t *node, const char *reason, action_t *dependency, pe_working_set_t * data_set) { if(is_not_set(data_set->flags, pe_flag_enable_unfencing)) { /* No resources require it */ return; } else if (rsc != NULL && is_not_set(rsc->flags, pe_rsc_fence_device)) { /* Wasn't a stonith device */ return; } else if(node && node->details->online && node->details->unclean == FALSE && node->details->shutdown == FALSE) { action_t *unfence = pe_fence_op(node, "on", FALSE, reason, data_set); if(dependency) { order_actions(unfence, dependency, pe_order_optional); } } else if(rsc) { GHashTableIter iter; g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { if(node->details->online && node->details->unclean == FALSE && node->details->shutdown == FALSE) { trigger_unfencing(rsc, node, reason, dependency, data_set); } } } } gboolean add_tag_ref(GHashTable * tags, const char * tag_name, const char * obj_ref) { tag_t *tag = NULL; GListPtr gIter = NULL; gboolean is_existing = FALSE; CRM_CHECK(tags && tag_name && obj_ref, return FALSE); tag = g_hash_table_lookup(tags, tag_name); if (tag == NULL) { tag = calloc(1, sizeof(tag_t)); if (tag == NULL) { return FALSE; } tag->id = strdup(tag_name); tag->refs = NULL; g_hash_table_insert(tags, strdup(tag_name), tag); } for (gIter = tag->refs; gIter != NULL; gIter = gIter->next) { const char *existing_ref = (const char *) gIter->data; if (crm_str_eq(existing_ref, obj_ref, TRUE)){ is_existing = TRUE; break; } } if (is_existing == FALSE) { tag->refs = g_list_append(tag->refs, strdup(obj_ref)); crm_trace("Added: tag=%s ref=%s", tag->id, obj_ref); } return TRUE; } void pe_action_set_flag_reason(const char *function, long line, pe_action_t *action, pe_action_t *reason, const char *text, enum pe_action_flags flags, bool overwrite) { bool unset = FALSE; bool update = FALSE; const char *change = NULL; if(is_set(flags, pe_action_runnable)) { unset = TRUE; change = "unrunnable"; } else if(is_set(flags, pe_action_optional)) { unset = TRUE; change = "required"; } else if(is_set(flags, pe_action_migrate_runnable)) { unset = TRUE; overwrite = TRUE; change = "unrunnable"; } else if(is_set(flags, pe_action_dangle)) { change = "dangling"; } else if(is_set(flags, pe_action_requires_any)) { change = "required"; } else { crm_err("Unknown flag change to %x by %s: 0x%s", flags, action->uuid, (reason? reason->uuid : "0")); } if(unset) { if(is_set(action->flags, flags)) { action->flags = crm_clear_bit(function, line, action->uuid, action->flags, flags); update = TRUE; } } else { if(is_not_set(action->flags, flags)) { action->flags = crm_set_bit(function, line, action->uuid, action->flags, flags); update = TRUE; } } if((change && update) || text) { char *reason_text = NULL; if(reason == NULL) { pe_action_set_reason(action, text, overwrite); } else if(reason->rsc == NULL) { reason_text = crm_strdup_printf("%s %s%c %s", change, reason->task, text?':':0, text?text:""); } else { reason_text = crm_strdup_printf("%s %s %s%c %s", change, reason->rsc->id, reason->task, text?':':0, text?text:"NA"); } if(reason_text && action->rsc != reason->rsc) { pe_action_set_reason(action, reason_text, overwrite); } free(reason_text); } } void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite) { if(action->reason && overwrite) { pe_rsc_trace(action->rsc, "Changing %s reason from '%s' to '%s'", action->uuid, action->reason, reason); free(action->reason); action->reason = NULL; } if(action->reason == NULL) { if(reason) { pe_rsc_trace(action->rsc, "Set %s reason to '%s'", action->uuid, reason); action->reason = strdup(reason); } else { action->reason = NULL; } } }