diff --git a/cts/scheduler/remote-connection-shutdown.dot b/cts/scheduler/remote-connection-shutdown.dot new file mode 100644 index 0000000000..74eb9e3e40 --- /dev/null +++ b/cts/scheduler/remote-connection-shutdown.dot @@ -0,0 +1,57 @@ + digraph "g" { +"compute-0_stop_0 controller-0" [ style=bold color="green" fontcolor="black"] +"compute-unfence-trigger-clone_stop_0" -> "compute-unfence-trigger-clone_stopped_0" [ style = bold] +"compute-unfence-trigger-clone_stop_0" -> "compute-unfence-trigger_stop_0 compute-0" [ style = bold] +"compute-unfence-trigger-clone_stop_0" [ style=bold color="green" fontcolor="orange"] +"compute-unfence-trigger-clone_stopped_0" [ style=bold color="green" fontcolor="orange"] +"compute-unfence-trigger_stop_0 compute-0" -> "compute-0_stop_0 controller-0" [ style = bold] +"compute-unfence-trigger_stop_0 compute-0" -> "compute-unfence-trigger-clone_stopped_0" [ style = bold] +"compute-unfence-trigger_stop_0 compute-0" [ style=bold color="green" fontcolor="black"] +"nova-evacuate_monitor_10000 database-1" [ style=bold color="green" fontcolor="black"] +"nova-evacuate_start_0 database-1" -> "nova-evacuate_monitor_10000 database-1" [ style = bold] +"nova-evacuate_start_0 database-1" [ style=bold color="green" fontcolor="black"] +"nova-evacuate_stop_0 database-0" -> "nova-evacuate_start_0 database-1" [ style = bold] +"nova-evacuate_stop_0 database-0" [ style=bold color="green" fontcolor="black"] +"stonith-fence_compute-fence-nova_clear_failcount_0 controller-0" [ style=bold color="green" fontcolor="black"] +"stonith-fence_compute-fence-nova_clear_failcount_0 controller-1" [ style=bold color="green" fontcolor="black"] +"stonith-fence_compute-fence-nova_clear_failcount_0 controller-2" [ style=bold color="green" fontcolor="black"] +"stonith-fence_compute-fence-nova_clear_failcount_0 database-0" [ style=bold color="green" fontcolor="black"] +"stonith-fence_compute-fence-nova_clear_failcount_0 database-1" [ style=bold color="green" fontcolor="black"] +"stonith-fence_compute-fence-nova_clear_failcount_0 database-2" [ style=bold color="green" fontcolor="black"] +"stonith-fence_compute-fence-nova_clear_failcount_0 messaging-0" [ style=bold color="green" fontcolor="black"] +"stonith-fence_compute-fence-nova_clear_failcount_0 messaging-1" [ style=bold color="green" fontcolor="black"] +"stonith-fence_compute-fence-nova_clear_failcount_0 messaging-2" [ style=bold color="green" fontcolor="black"] +"stonith-fence_compute-fence-nova_monitor_60000 database-0" [ style=bold color="green" fontcolor="black"] +"stonith-fence_compute-fence-nova_start_0 database-0" -> "stonith-fence_compute-fence-nova_monitor_60000 database-0" [ style = bold] +"stonith-fence_compute-fence-nova_start_0 database-0" [ style=bold color="green" fontcolor="black"] +"stonith-fence_ipmilan-5254001f5f3c_monitor_60000 messaging-0" [ style=bold color="green" fontcolor="black"] +"stonith-fence_ipmilan-5254001f5f3c_start_0 messaging-0" -> "stonith-fence_ipmilan-5254001f5f3c_monitor_60000 messaging-0" [ style = bold] +"stonith-fence_ipmilan-5254001f5f3c_start_0 messaging-0" [ style=bold color="green" fontcolor="black"] +"stonith-fence_ipmilan-5254001f5f3c_stop_0 database-2" -> "stonith-fence_ipmilan-5254001f5f3c_start_0 messaging-0" [ style = bold] +"stonith-fence_ipmilan-5254001f5f3c_stop_0 database-2" [ style=bold color="green" fontcolor="black"] +"stonith-fence_ipmilan-52540033df9c_monitor_60000 database-2" [ style=bold color="green" fontcolor="black"] +"stonith-fence_ipmilan-52540033df9c_start_0 database-2" -> "stonith-fence_ipmilan-52540033df9c_monitor_60000 database-2" [ style = bold] +"stonith-fence_ipmilan-52540033df9c_start_0 database-2" [ style=bold color="green" fontcolor="black"] +"stonith-fence_ipmilan-52540033df9c_stop_0 database-1" -> "stonith-fence_ipmilan-52540033df9c_start_0 database-2" [ style = bold] +"stonith-fence_ipmilan-52540033df9c_stop_0 database-1" [ style=bold color="green" fontcolor="black"] +"stonith-fence_ipmilan-5254003f88b4_monitor_60000 messaging-1" [ style=bold color="green" fontcolor="black"] +"stonith-fence_ipmilan-5254003f88b4_start_0 messaging-1" -> "stonith-fence_ipmilan-5254003f88b4_monitor_60000 messaging-1" [ style = bold] +"stonith-fence_ipmilan-5254003f88b4_start_0 messaging-1" [ style=bold color="green" fontcolor="black"] +"stonith-fence_ipmilan-5254003f88b4_stop_0 messaging-0" -> "stonith-fence_ipmilan-5254003f88b4_start_0 messaging-1" [ style = bold] +"stonith-fence_ipmilan-5254003f88b4_stop_0 messaging-0" [ style=bold color="green" fontcolor="black"] +"stonith-fence_ipmilan-5254007b7920_monitor_60000 messaging-2" [ style=bold color="green" fontcolor="black"] +"stonith-fence_ipmilan-5254007b7920_start_0 messaging-2" -> "stonith-fence_ipmilan-5254007b7920_monitor_60000 messaging-2" [ style = bold] +"stonith-fence_ipmilan-5254007b7920_start_0 messaging-2" [ style=bold color="green" fontcolor="black"] +"stonith-fence_ipmilan-5254007b7920_stop_0 messaging-1" -> "stonith-fence_ipmilan-5254007b7920_start_0 messaging-2" [ style = bold] +"stonith-fence_ipmilan-5254007b7920_stop_0 messaging-1" [ style=bold color="green" fontcolor="black"] +"stonith-fence_ipmilan-5254009cb549_monitor_60000 database-1" [ style=bold color="green" fontcolor="black"] +"stonith-fence_ipmilan-5254009cb549_start_0 database-1" -> "stonith-fence_ipmilan-5254009cb549_monitor_60000 database-1" [ style = bold] +"stonith-fence_ipmilan-5254009cb549_start_0 database-1" [ style=bold color="green" fontcolor="black"] +"stonith-fence_ipmilan-5254009cb549_stop_0 database-0" -> "stonith-fence_ipmilan-5254009cb549_start_0 database-1" [ style = bold] +"stonith-fence_ipmilan-5254009cb549_stop_0 database-0" [ style=bold color="green" fontcolor="black"] +"stonith-fence_ipmilan-525400ffc780_monitor_60000 database-0" [ style=bold color="green" fontcolor="black"] +"stonith-fence_ipmilan-525400ffc780_start_0 database-0" -> "stonith-fence_ipmilan-525400ffc780_monitor_60000 database-0" [ style = bold] +"stonith-fence_ipmilan-525400ffc780_start_0 database-0" [ style=bold color="green" fontcolor="black"] +"stonith-fence_ipmilan-525400ffc780_stop_0 messaging-2" -> "stonith-fence_ipmilan-525400ffc780_start_0 database-0" [ style = bold] +"stonith-fence_ipmilan-525400ffc780_stop_0 messaging-2" [ style=bold color="green" fontcolor="black"] +} diff --git a/cts/scheduler/remote-connection-shutdown.exp b/cts/scheduler/remote-connection-shutdown.exp new file mode 100644 index 0000000000..f3c3424faa --- /dev/null +++ b/cts/scheduler/remote-connection-shutdown.exp @@ -0,0 +1,402 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/remote-connection-shutdown.scores b/cts/scheduler/remote-connection-shutdown.scores new file mode 100644 index 0000000000..003b067701 --- /dev/null +++ b/cts/scheduler/remote-connection-shutdown.scores @@ -0,0 +1,2560 @@ +Allocation scores: +Only 'private' parameters to nova-evacuate_monitor_10000 on database-0 changed: 0:0;259:1420:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to nova-evacuate_start_0 on database-0 changed: 0:0;258:1420:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to stonith-fence_compute-fence-nova for unfencing compute-0 changed +Only 'private' parameters to stonith-fence_compute-fence-nova for unfencing compute-1 changed +Only 'private' parameters to stonith-fence_ipmilan-5254001f5f3c_monitor_60000 on database-2 changed: 0:0;265:1422:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to stonith-fence_ipmilan-5254001f5f3c_start_0 on database-2 changed: 0:0;263:1422:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to stonith-fence_ipmilan-52540033df9c_monitor_60000 on database-1 changed: 0:0;263:1420:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to stonith-fence_ipmilan-52540033df9c_start_0 on database-1 changed: 0:0;261:1420:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to stonith-fence_ipmilan-5254003f88b4_monitor_60000 on messaging-0 changed: 0:0;269:1422:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to stonith-fence_ipmilan-5254003f88b4_start_0 on messaging-0 changed: 0:0;267:1422:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to stonith-fence_ipmilan-525400642894_monitor_60000 on messaging-2 changed: 0:0;274:1424:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to stonith-fence_ipmilan-525400642894_start_0 on messaging-2 changed: 0:0;272:1424:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to stonith-fence_ipmilan-5254007b7920_monitor_60000 on messaging-1 changed: 0:0;273:1422:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to stonith-fence_ipmilan-5254007b7920_start_0 on messaging-1 changed: 0:0;271:1422:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to stonith-fence_ipmilan-5254009cb549_monitor_60000 on database-0 changed: 0:0;323:1375:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to stonith-fence_ipmilan-5254009cb549_start_0 on database-0 changed: 0:0;322:1375:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to stonith-fence_ipmilan-525400bb150b_monitor_60000 on messaging-0 changed: 0:0;325:70:0:40f880e4-b328-4380-9703-47856390a1e0 +Only 'private' parameters to stonith-fence_ipmilan-525400bb150b_start_0 on messaging-0 changed: 0:0;324:70:0:40f880e4-b328-4380-9703-47856390a1e0 +Only 'private' parameters to stonith-fence_ipmilan-525400d5382b_monitor_60000 on database-2 changed: 0:0;320:1374:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to stonith-fence_ipmilan-525400d5382b_start_0 on database-2 changed: 0:0;319:1374:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to stonith-fence_ipmilan-525400dc0f81_monitor_60000 on database-1 changed: 0:0;331:1380:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to stonith-fence_ipmilan-525400dc0f81_start_0 on database-1 changed: 0:0;330:1380:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to stonith-fence_ipmilan-525400e10267_monitor_60000 on messaging-1 changed: 0:0;326:1318:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to stonith-fence_ipmilan-525400e10267_start_0 on messaging-1 changed: 0:0;320:1317:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to stonith-fence_ipmilan-525400ffc780_monitor_60000 on messaging-2 changed: 0:0;323:50:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to stonith-fence_ipmilan-525400ffc780_start_0 on messaging-2 changed: 0:0;321:49:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Using the original execution date of: 2020-11-17 07:03:16Z +galera:0 promotion score on galera-bundle-0: 100 +galera:1 promotion score on galera-bundle-1: 100 +galera:2 promotion score on galera-bundle-2: 100 +ovndb_servers:0 promotion score on ovn-dbs-bundle-0: 10 +ovndb_servers:1 promotion score on ovn-dbs-bundle-1: 5 +ovndb_servers:2 promotion score on ovn-dbs-bundle-2: 5 +pcmk__bundle_allocate: galera-bundle allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: galera-bundle allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: galera-bundle allocation score on controller-0: -INFINITY +pcmk__bundle_allocate: galera-bundle allocation score on controller-1: -INFINITY +pcmk__bundle_allocate: galera-bundle allocation score on controller-2: -INFINITY +pcmk__bundle_allocate: galera-bundle allocation score on database-0: 0 +pcmk__bundle_allocate: galera-bundle allocation score on database-1: 0 +pcmk__bundle_allocate: galera-bundle allocation score on database-2: 0 +pcmk__bundle_allocate: galera-bundle allocation score on messaging-0: -INFINITY +pcmk__bundle_allocate: galera-bundle allocation score on messaging-1: -INFINITY +pcmk__bundle_allocate: galera-bundle allocation score on messaging-2: -INFINITY +pcmk__bundle_allocate: galera-bundle-0 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: galera-bundle-0 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-0: 0 +pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-1: 0 +pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-2: 0 +pcmk__bundle_allocate: galera-bundle-0 allocation score on database-0: 0 +pcmk__bundle_allocate: galera-bundle-0 allocation score on database-1: 0 +pcmk__bundle_allocate: galera-bundle-0 allocation score on database-2: 0 +pcmk__bundle_allocate: galera-bundle-0 allocation score on messaging-0: 0 +pcmk__bundle_allocate: galera-bundle-0 allocation score on messaging-1: 0 +pcmk__bundle_allocate: galera-bundle-0 allocation score on messaging-2: 0 +pcmk__bundle_allocate: galera-bundle-1 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: galera-bundle-1 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-0: 0 +pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-1: 0 +pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-2: 0 +pcmk__bundle_allocate: galera-bundle-1 allocation score on database-0: 0 +pcmk__bundle_allocate: galera-bundle-1 allocation score on database-1: 0 +pcmk__bundle_allocate: galera-bundle-1 allocation score on database-2: 0 +pcmk__bundle_allocate: galera-bundle-1 allocation score on messaging-0: 0 +pcmk__bundle_allocate: galera-bundle-1 allocation score on messaging-1: 0 +pcmk__bundle_allocate: galera-bundle-1 allocation score on messaging-2: 0 +pcmk__bundle_allocate: galera-bundle-2 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: galera-bundle-2 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-0: 0 +pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-1: 0 +pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-2: 0 +pcmk__bundle_allocate: galera-bundle-2 allocation score on database-0: 0 +pcmk__bundle_allocate: galera-bundle-2 allocation score on database-1: 0 +pcmk__bundle_allocate: galera-bundle-2 allocation score on database-2: 0 +pcmk__bundle_allocate: galera-bundle-2 allocation score on messaging-0: 0 +pcmk__bundle_allocate: galera-bundle-2 allocation score on messaging-1: 0 +pcmk__bundle_allocate: galera-bundle-2 allocation score on messaging-2: 0 +pcmk__bundle_allocate: galera-bundle-master allocation score on compute-0: 0 +pcmk__bundle_allocate: galera-bundle-master allocation score on compute-1: 0 +pcmk__bundle_allocate: galera-bundle-master allocation score on controller-0: 0 +pcmk__bundle_allocate: galera-bundle-master allocation score on controller-1: 0 +pcmk__bundle_allocate: galera-bundle-master allocation score on controller-2: 0 +pcmk__bundle_allocate: galera-bundle-master allocation score on database-0: 0 +pcmk__bundle_allocate: galera-bundle-master allocation score on database-1: 0 +pcmk__bundle_allocate: galera-bundle-master allocation score on database-2: 0 +pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY +pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-1: -INFINITY +pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-2: -INFINITY +pcmk__bundle_allocate: galera-bundle-master allocation score on messaging-0: 0 +pcmk__bundle_allocate: galera-bundle-master allocation score on messaging-1: 0 +pcmk__bundle_allocate: galera-bundle-master allocation score on messaging-2: 0 +pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on controller-0: -INFINITY +pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on controller-1: -INFINITY +pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on controller-2: -INFINITY +pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on database-0: 0 +pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on database-1: 0 +pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on database-2: 0 +pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on messaging-0: -INFINITY +pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on messaging-1: -INFINITY +pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on messaging-2: -INFINITY +pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on controller-0: -INFINITY +pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on controller-1: -INFINITY +pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on controller-2: -INFINITY +pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on database-0: 0 +pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on database-1: 0 +pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on database-2: 0 +pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on messaging-0: -INFINITY +pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on messaging-1: -INFINITY +pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on messaging-2: -INFINITY +pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on controller-0: -INFINITY +pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on controller-1: -INFINITY +pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on controller-2: -INFINITY +pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on database-0: 0 +pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on database-1: 0 +pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on database-2: 0 +pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on messaging-0: -INFINITY +pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on messaging-1: -INFINITY +pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on messaging-2: -INFINITY +pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: 501 +pcmk__bundle_allocate: galera:1 allocation score on galera-bundle-1: 501 +pcmk__bundle_allocate: galera:2 allocation score on galera-bundle-2: 501 +pcmk__bundle_allocate: haproxy-bundle allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0 +pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0 +pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0 +pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0 +pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0 +pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0 +pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0 +pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0 +pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0 +pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0 +pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0 +pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0 +pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0 +pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0 +pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0 +pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0 +pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0 +pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0 +pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0 +pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0 +pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0 +pcmk__bundle_allocate: haproxy-bundle allocation score on database-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on database-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on database-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on database-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on database-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on database-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on database-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on database-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on database-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on database-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on database-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on database-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on database-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on database-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on database-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on database-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on database-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on database-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on database-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on database-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on database-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-0: 0 +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-0: INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-0: INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-0: INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-0: INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-0: INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-0: INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-1: 0 +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-1: INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-1: INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-1: INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-1: INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-1: INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-1: INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-2: 0 +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-2: INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-2: INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-2: INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-2: INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-2: INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-2: INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-0: 0 +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-0: INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-0: INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-0: INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-0: INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-0: INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-0: INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-1: 0 +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-1: INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-1: INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-1: INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-1: INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-1: INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-1: INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-2: 0 +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-0: 0 +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-1: 0 +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-1: INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-1: INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-1: INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-1: INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-1: INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-1: INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-2: 0 +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-0: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-1: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-2: -INFINITY +pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-2: -INFINITY +pcmk__bundle_allocate: openstack-cinder-volume allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: openstack-cinder-volume allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-0: 0 +pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-1: 0 +pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-2: 0 +pcmk__bundle_allocate: openstack-cinder-volume allocation score on database-0: -INFINITY +pcmk__bundle_allocate: openstack-cinder-volume allocation score on database-1: -INFINITY +pcmk__bundle_allocate: openstack-cinder-volume allocation score on database-2: -INFINITY +pcmk__bundle_allocate: openstack-cinder-volume allocation score on messaging-0: -INFINITY +pcmk__bundle_allocate: openstack-cinder-volume allocation score on messaging-1: -INFINITY +pcmk__bundle_allocate: openstack-cinder-volume allocation score on messaging-2: -INFINITY +pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on controller-0: 0 +pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on controller-1: 0 +pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on controller-2: 0 +pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on database-0: -INFINITY +pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on database-1: -INFINITY +pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on database-2: -INFINITY +pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on messaging-0: -INFINITY +pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on messaging-1: -INFINITY +pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on messaging-2: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle allocation score on controller-0: 0 +pcmk__bundle_allocate: ovn-dbs-bundle allocation score on controller-0: 0 +pcmk__bundle_allocate: ovn-dbs-bundle allocation score on controller-1: 0 +pcmk__bundle_allocate: ovn-dbs-bundle allocation score on controller-1: 0 +pcmk__bundle_allocate: ovn-dbs-bundle allocation score on controller-2: 0 +pcmk__bundle_allocate: ovn-dbs-bundle allocation score on controller-2: 0 +pcmk__bundle_allocate: ovn-dbs-bundle allocation score on database-0: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle allocation score on database-0: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle allocation score on database-1: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle allocation score on database-1: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle allocation score on database-2: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle allocation score on database-2: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle allocation score on messaging-0: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle allocation score on messaging-0: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle allocation score on messaging-1: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle allocation score on messaging-1: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle allocation score on messaging-2: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle allocation score on messaging-2: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on controller-0: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on controller-0: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on controller-1: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on controller-1: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on controller-2: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on controller-2: 10000 +pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on database-0: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on database-0: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on database-1: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on database-1: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on database-2: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on database-2: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on messaging-0: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on messaging-0: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on messaging-1: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on messaging-1: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on messaging-2: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on messaging-2: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on controller-0: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on controller-0: 10000 +pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on controller-1: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on controller-1: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on controller-2: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on controller-2: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on database-0: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on database-0: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on database-1: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on database-1: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on database-2: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on database-2: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on messaging-0: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on messaging-0: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on messaging-1: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on messaging-1: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on messaging-2: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on messaging-2: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on controller-0: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on controller-0: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on controller-1: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on controller-1: 10000 +pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on controller-2: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on controller-2: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on database-0: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on database-0: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on database-1: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on database-1: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on database-2: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on database-2: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on messaging-0: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on messaging-0: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on messaging-1: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on messaging-1: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on messaging-2: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on messaging-2: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on compute-0: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on compute-1: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on controller-0: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on controller-0: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on controller-1: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on controller-1: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on controller-2: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on controller-2: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on database-0: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on database-0: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on database-1: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on database-1: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on database-2: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on database-2: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on messaging-0: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on messaging-0: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on messaging-1: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on messaging-1: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on messaging-2: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on messaging-2: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-0: 10 +pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-1: 5 +pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-2: 5 +pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-0: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-0: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-1: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-1: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-2: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-2: INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on database-0: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on database-0: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on database-1: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on database-1: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on database-2: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on database-2: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on messaging-0: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on messaging-0: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on messaging-1: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on messaging-1: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on messaging-2: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on messaging-2: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-0: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-0: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-1: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-1: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-2: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-2: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on database-0: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on database-0: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on database-1: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on database-1: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on database-2: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on database-2: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on messaging-0: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on messaging-0: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on messaging-1: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on messaging-1: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on messaging-2: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on messaging-2: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-0: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-0: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-1: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-1: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-2: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-2: 0 +pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on database-0: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on database-0: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on database-1: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on database-1: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on database-2: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on database-2: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on messaging-0: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on messaging-0: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on messaging-1: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on messaging-1: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on messaging-2: -INFINITY +pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on messaging-2: -INFINITY +pcmk__bundle_allocate: ovndb_servers:0 allocation score on ovn-dbs-bundle-0: 501 +pcmk__bundle_allocate: ovndb_servers:0 allocation score on ovn-dbs-bundle-0: INFINITY +pcmk__bundle_allocate: ovndb_servers:1 allocation score on ovn-dbs-bundle-1: 501 +pcmk__bundle_allocate: ovndb_servers:1 allocation score on ovn-dbs-bundle-1: INFINITY +pcmk__bundle_allocate: ovndb_servers:2 allocation score on ovn-dbs-bundle-2: 501 +pcmk__bundle_allocate: ovndb_servers:2 allocation score on ovn-dbs-bundle-2: INFINITY +pcmk__bundle_allocate: rabbitmq-bundle allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: rabbitmq-bundle allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-0: -INFINITY +pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-1: -INFINITY +pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-2: -INFINITY +pcmk__bundle_allocate: rabbitmq-bundle allocation score on database-0: -INFINITY +pcmk__bundle_allocate: rabbitmq-bundle allocation score on database-1: -INFINITY +pcmk__bundle_allocate: rabbitmq-bundle allocation score on database-2: -INFINITY +pcmk__bundle_allocate: rabbitmq-bundle allocation score on messaging-0: 0 +pcmk__bundle_allocate: rabbitmq-bundle allocation score on messaging-1: 0 +pcmk__bundle_allocate: rabbitmq-bundle allocation score on messaging-2: 0 +pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-0: 0 +pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-1: 0 +pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-2: 0 +pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on database-0: 0 +pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on database-1: 0 +pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on database-2: 0 +pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on messaging-0: 0 +pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on messaging-1: 0 +pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on messaging-2: 0 +pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-0: 0 +pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-1: 0 +pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-2: 0 +pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on database-0: 0 +pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on database-1: 0 +pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on database-2: 0 +pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on messaging-0: 0 +pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on messaging-1: 0 +pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on messaging-2: 0 +pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-0: 0 +pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-1: 0 +pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-2: 0 +pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on database-0: 0 +pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on database-1: 0 +pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on database-2: 0 +pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on messaging-0: 0 +pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on messaging-1: 0 +pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on messaging-2: 0 +pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on compute-0: 0 +pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on compute-1: 0 +pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-0: 0 +pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-1: 0 +pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-2: 0 +pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on database-0: 0 +pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on database-1: 0 +pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on database-2: 0 +pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on messaging-0: 0 +pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on messaging-1: 0 +pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on messaging-2: 0 +pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on controller-0: -INFINITY +pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on controller-1: -INFINITY +pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on controller-2: -INFINITY +pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on database-0: -INFINITY +pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on database-1: -INFINITY +pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on database-2: -INFINITY +pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on messaging-0: 0 +pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on messaging-1: 0 +pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on messaging-2: 0 +pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on controller-0: -INFINITY +pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on controller-1: -INFINITY +pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on controller-2: -INFINITY +pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on database-0: -INFINITY +pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on database-1: -INFINITY +pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on database-2: -INFINITY +pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on messaging-0: 0 +pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on messaging-1: 0 +pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on messaging-2: 0 +pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on controller-0: -INFINITY +pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on controller-1: -INFINITY +pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on controller-2: -INFINITY +pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on database-0: -INFINITY +pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on database-1: -INFINITY +pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on database-2: -INFINITY +pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on messaging-0: 0 +pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on messaging-1: 0 +pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on messaging-2: 0 +pcmk__bundle_allocate: rabbitmq:0 allocation score on rabbitmq-bundle-0: 501 +pcmk__bundle_allocate: rabbitmq:1 allocation score on rabbitmq-bundle-1: 501 +pcmk__bundle_allocate: rabbitmq:2 allocation score on rabbitmq-bundle-2: 501 +pcmk__bundle_allocate: redis-bundle allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: redis-bundle allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: redis-bundle allocation score on controller-0: 0 +pcmk__bundle_allocate: redis-bundle allocation score on controller-1: 0 +pcmk__bundle_allocate: redis-bundle allocation score on controller-2: 0 +pcmk__bundle_allocate: redis-bundle allocation score on database-0: -INFINITY +pcmk__bundle_allocate: redis-bundle allocation score on database-1: -INFINITY +pcmk__bundle_allocate: redis-bundle allocation score on database-2: -INFINITY +pcmk__bundle_allocate: redis-bundle allocation score on messaging-0: -INFINITY +pcmk__bundle_allocate: redis-bundle allocation score on messaging-1: -INFINITY +pcmk__bundle_allocate: redis-bundle allocation score on messaging-2: -INFINITY +pcmk__bundle_allocate: redis-bundle-0 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: redis-bundle-0 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-0: 0 +pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-1: 0 +pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-2: 0 +pcmk__bundle_allocate: redis-bundle-0 allocation score on database-0: 0 +pcmk__bundle_allocate: redis-bundle-0 allocation score on database-1: 0 +pcmk__bundle_allocate: redis-bundle-0 allocation score on database-2: 0 +pcmk__bundle_allocate: redis-bundle-0 allocation score on messaging-0: 0 +pcmk__bundle_allocate: redis-bundle-0 allocation score on messaging-1: 0 +pcmk__bundle_allocate: redis-bundle-0 allocation score on messaging-2: 0 +pcmk__bundle_allocate: redis-bundle-1 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: redis-bundle-1 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-0: 0 +pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-1: 0 +pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-2: 0 +pcmk__bundle_allocate: redis-bundle-1 allocation score on database-0: 0 +pcmk__bundle_allocate: redis-bundle-1 allocation score on database-1: 0 +pcmk__bundle_allocate: redis-bundle-1 allocation score on database-2: 0 +pcmk__bundle_allocate: redis-bundle-1 allocation score on messaging-0: 0 +pcmk__bundle_allocate: redis-bundle-1 allocation score on messaging-1: 0 +pcmk__bundle_allocate: redis-bundle-1 allocation score on messaging-2: 0 +pcmk__bundle_allocate: redis-bundle-2 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: redis-bundle-2 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-0: 0 +pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-1: 0 +pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-2: 0 +pcmk__bundle_allocate: redis-bundle-2 allocation score on database-0: 0 +pcmk__bundle_allocate: redis-bundle-2 allocation score on database-1: 0 +pcmk__bundle_allocate: redis-bundle-2 allocation score on database-2: 0 +pcmk__bundle_allocate: redis-bundle-2 allocation score on messaging-0: 0 +pcmk__bundle_allocate: redis-bundle-2 allocation score on messaging-1: 0 +pcmk__bundle_allocate: redis-bundle-2 allocation score on messaging-2: 0 +pcmk__bundle_allocate: redis-bundle-master allocation score on compute-0: 0 +pcmk__bundle_allocate: redis-bundle-master allocation score on compute-1: 0 +pcmk__bundle_allocate: redis-bundle-master allocation score on controller-0: 0 +pcmk__bundle_allocate: redis-bundle-master allocation score on controller-1: 0 +pcmk__bundle_allocate: redis-bundle-master allocation score on controller-2: 0 +pcmk__bundle_allocate: redis-bundle-master allocation score on database-0: 0 +pcmk__bundle_allocate: redis-bundle-master allocation score on database-1: 0 +pcmk__bundle_allocate: redis-bundle-master allocation score on database-2: 0 +pcmk__bundle_allocate: redis-bundle-master allocation score on messaging-0: 0 +pcmk__bundle_allocate: redis-bundle-master allocation score on messaging-1: 0 +pcmk__bundle_allocate: redis-bundle-master allocation score on messaging-2: 0 +pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-0: -INFINITY +pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-1: -INFINITY +pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-2: -INFINITY +pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on controller-0: 0 +pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on controller-1: 0 +pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on controller-2: 0 +pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on database-0: -INFINITY +pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on database-1: -INFINITY +pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on database-2: -INFINITY +pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on messaging-0: -INFINITY +pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on messaging-1: -INFINITY +pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on messaging-2: -INFINITY +pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on controller-0: 0 +pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on controller-1: 0 +pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on controller-2: 0 +pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on database-0: -INFINITY +pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on database-1: -INFINITY +pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on database-2: -INFINITY +pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on messaging-0: -INFINITY +pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on messaging-1: -INFINITY +pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on messaging-2: -INFINITY +pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on compute-0: -INFINITY +pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on compute-1: -INFINITY +pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on controller-0: 0 +pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on controller-1: 0 +pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on controller-2: 0 +pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on database-0: -INFINITY +pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on database-1: -INFINITY +pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on database-2: -INFINITY +pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on messaging-0: -INFINITY +pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on messaging-1: -INFINITY +pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on messaging-2: -INFINITY +pcmk__bundle_allocate: redis:0 allocation score on redis-bundle-0: 501 +pcmk__bundle_allocate: redis:1 allocation score on redis-bundle-1: 501 +pcmk__bundle_allocate: redis:2 allocation score on redis-bundle-2: 501 +pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on compute-0: 0 +pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on compute-1: 0 +pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on controller-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on controller-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on controller-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on database-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on database-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on database-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on galera-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on galera-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on galera-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on messaging-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on messaging-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on messaging-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on redis-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on redis-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger-clone allocation score on redis-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on compute-0: 1 +pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on compute-1: 0 +pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on controller-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on controller-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on controller-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on database-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on database-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on database-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on galera-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on galera-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on galera-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on messaging-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on messaging-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on messaging-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on redis-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on redis-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:0 allocation score on redis-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on compute-0: 0 +pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on compute-1: 1 +pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on controller-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on controller-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on controller-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on database-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on database-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on database-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on galera-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on galera-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on galera-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on messaging-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on messaging-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on messaging-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on redis-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on redis-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:1 allocation score on redis-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on compute-0: 0 +pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on compute-1: 0 +pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on controller-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on controller-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on controller-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on database-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on database-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on database-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on galera-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on galera-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on galera-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on messaging-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on messaging-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on messaging-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on redis-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on redis-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:10 allocation score on redis-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on compute-0: 0 +pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on compute-1: 0 +pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on controller-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on controller-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on controller-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on database-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on database-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on database-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on galera-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on galera-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on galera-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on messaging-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on messaging-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on messaging-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on redis-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on redis-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:11 allocation score on redis-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on compute-0: 0 +pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on compute-1: 0 +pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on controller-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on controller-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on controller-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on database-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on database-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on database-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on galera-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on galera-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on galera-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on messaging-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on messaging-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on messaging-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on redis-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on redis-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:12 allocation score on redis-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on compute-0: 0 +pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on compute-1: 0 +pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on controller-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on controller-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on controller-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on database-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on database-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on database-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on galera-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on galera-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on galera-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on messaging-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on messaging-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on messaging-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on redis-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on redis-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:13 allocation score on redis-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on compute-0: 0 +pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on compute-1: 0 +pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on controller-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on controller-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on controller-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on database-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on database-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on database-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on galera-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on galera-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on galera-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on messaging-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on messaging-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on messaging-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on redis-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on redis-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:14 allocation score on redis-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on compute-0: 0 +pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on compute-1: 0 +pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on controller-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on controller-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on controller-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on database-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on database-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on database-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on galera-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on galera-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on galera-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on messaging-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on messaging-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on messaging-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on redis-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on redis-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:15 allocation score on redis-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on compute-0: 0 +pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on compute-1: 0 +pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on controller-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on controller-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on controller-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on database-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on database-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on database-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on galera-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on galera-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on galera-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on messaging-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on messaging-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on messaging-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on redis-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on redis-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:16 allocation score on redis-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on compute-0: 0 +pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on compute-1: 0 +pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on controller-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on controller-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on controller-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on database-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on database-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on database-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on galera-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on galera-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on galera-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on messaging-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on messaging-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on messaging-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on redis-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on redis-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:17 allocation score on redis-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on compute-0: 0 +pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on compute-1: 0 +pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on controller-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on controller-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on controller-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on database-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on database-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on database-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on galera-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on galera-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on galera-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on messaging-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on messaging-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on messaging-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on redis-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on redis-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:18 allocation score on redis-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on compute-0: 0 +pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on compute-1: 0 +pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on controller-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on controller-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on controller-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on database-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on database-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on database-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on galera-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on galera-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on galera-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on messaging-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on messaging-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on messaging-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on redis-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on redis-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:19 allocation score on redis-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on compute-0: 0 +pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on compute-1: 0 +pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on controller-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on controller-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on controller-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on database-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on database-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on database-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on galera-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on galera-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on galera-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on messaging-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on messaging-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on messaging-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on redis-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on redis-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:2 allocation score on redis-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on compute-0: 0 +pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on compute-1: 0 +pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on controller-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on controller-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on controller-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on database-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on database-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on database-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on galera-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on galera-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on galera-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on messaging-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on messaging-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on messaging-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on redis-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on redis-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:20 allocation score on redis-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on compute-0: 0 +pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on compute-1: 0 +pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on controller-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on controller-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on controller-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on database-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on database-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on database-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on galera-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on galera-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on galera-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on messaging-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on messaging-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on messaging-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on redis-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on redis-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:21 allocation score on redis-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on compute-0: 0 +pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on compute-1: 0 +pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on controller-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on controller-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on controller-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on database-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on database-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on database-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on galera-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on galera-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on galera-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on messaging-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on messaging-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on messaging-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on redis-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on redis-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:22 allocation score on redis-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on compute-0: 0 +pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on compute-1: 0 +pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on controller-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on controller-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on controller-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on database-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on database-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on database-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on galera-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on galera-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on galera-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on messaging-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on messaging-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on messaging-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on redis-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on redis-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:3 allocation score on redis-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on compute-0: 0 +pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on compute-1: 0 +pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on controller-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on controller-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on controller-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on database-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on database-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on database-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on galera-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on galera-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on galera-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on messaging-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on messaging-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on messaging-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on redis-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on redis-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:4 allocation score on redis-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on compute-0: 0 +pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on compute-1: 0 +pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on controller-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on controller-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on controller-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on database-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on database-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on database-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on galera-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on galera-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on galera-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on messaging-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on messaging-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on messaging-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on redis-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on redis-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:5 allocation score on redis-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on compute-0: 0 +pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on compute-1: 0 +pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on controller-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on controller-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on controller-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on database-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on database-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on database-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on galera-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on galera-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on galera-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on messaging-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on messaging-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on messaging-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on redis-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on redis-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:6 allocation score on redis-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on compute-0: 0 +pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on compute-1: 0 +pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on controller-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on controller-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on controller-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on database-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on database-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on database-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on galera-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on galera-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on galera-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on messaging-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on messaging-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on messaging-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on redis-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on redis-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:7 allocation score on redis-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on compute-0: 0 +pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on compute-1: 0 +pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on controller-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on controller-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on controller-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on database-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on database-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on database-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on galera-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on galera-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on galera-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on messaging-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on messaging-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on messaging-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on redis-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on redis-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:8 allocation score on redis-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on compute-0: 0 +pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on compute-1: 0 +pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on controller-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on controller-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on controller-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on database-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on database-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on database-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on galera-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on galera-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on galera-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on messaging-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on messaging-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on messaging-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on redis-bundle-0: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on redis-bundle-1: -INFINITY +pcmk__clone_allocate: compute-unfence-trigger:9 allocation score on redis-bundle-2: -INFINITY +pcmk__clone_allocate: galera-bundle-master allocation score on compute-0: -INFINITY +pcmk__clone_allocate: galera-bundle-master allocation score on compute-1: -INFINITY +pcmk__clone_allocate: galera-bundle-master allocation score on controller-0: -INFINITY +pcmk__clone_allocate: galera-bundle-master allocation score on controller-1: -INFINITY +pcmk__clone_allocate: galera-bundle-master allocation score on controller-2: -INFINITY +pcmk__clone_allocate: galera-bundle-master allocation score on database-0: -INFINITY +pcmk__clone_allocate: galera-bundle-master allocation score on database-1: -INFINITY +pcmk__clone_allocate: galera-bundle-master allocation score on database-2: -INFINITY +pcmk__clone_allocate: galera-bundle-master allocation score on galera-bundle-0: 0 +pcmk__clone_allocate: galera-bundle-master allocation score on galera-bundle-1: 0 +pcmk__clone_allocate: galera-bundle-master allocation score on galera-bundle-2: 0 +pcmk__clone_allocate: galera-bundle-master allocation score on messaging-0: -INFINITY +pcmk__clone_allocate: galera-bundle-master allocation score on messaging-1: -INFINITY +pcmk__clone_allocate: galera-bundle-master allocation score on messaging-2: -INFINITY +pcmk__clone_allocate: galera:0 allocation score on galera-bundle-0: INFINITY +pcmk__clone_allocate: galera:1 allocation score on galera-bundle-1: INFINITY +pcmk__clone_allocate: galera:2 allocation score on galera-bundle-2: INFINITY +pcmk__clone_allocate: ovn-dbs-bundle-master allocation score on compute-0: -INFINITY +pcmk__clone_allocate: ovn-dbs-bundle-master allocation score on compute-1: -INFINITY +pcmk__clone_allocate: ovn-dbs-bundle-master allocation score on controller-0: -INFINITY +pcmk__clone_allocate: ovn-dbs-bundle-master allocation score on controller-1: -INFINITY +pcmk__clone_allocate: ovn-dbs-bundle-master allocation score on controller-2: -INFINITY +pcmk__clone_allocate: ovn-dbs-bundle-master allocation score on database-0: -INFINITY +pcmk__clone_allocate: ovn-dbs-bundle-master allocation score on database-1: -INFINITY +pcmk__clone_allocate: ovn-dbs-bundle-master allocation score on database-2: -INFINITY +pcmk__clone_allocate: ovn-dbs-bundle-master allocation score on messaging-0: -INFINITY +pcmk__clone_allocate: ovn-dbs-bundle-master allocation score on messaging-1: -INFINITY +pcmk__clone_allocate: ovn-dbs-bundle-master allocation score on messaging-2: -INFINITY +pcmk__clone_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-0: 0 +pcmk__clone_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-1: 0 +pcmk__clone_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-2: 0 +pcmk__clone_allocate: ovndb_servers:0 allocation score on ovn-dbs-bundle-0: INFINITY +pcmk__clone_allocate: ovndb_servers:1 allocation score on ovn-dbs-bundle-1: INFINITY +pcmk__clone_allocate: ovndb_servers:2 allocation score on ovn-dbs-bundle-2: INFINITY +pcmk__clone_allocate: rabbitmq-bundle-clone allocation score on compute-0: -INFINITY +pcmk__clone_allocate: rabbitmq-bundle-clone allocation score on compute-1: -INFINITY +pcmk__clone_allocate: rabbitmq-bundle-clone allocation score on controller-0: -INFINITY +pcmk__clone_allocate: rabbitmq-bundle-clone allocation score on controller-1: -INFINITY +pcmk__clone_allocate: rabbitmq-bundle-clone allocation score on controller-2: -INFINITY +pcmk__clone_allocate: rabbitmq-bundle-clone allocation score on database-0: -INFINITY +pcmk__clone_allocate: rabbitmq-bundle-clone allocation score on database-1: -INFINITY +pcmk__clone_allocate: rabbitmq-bundle-clone allocation score on database-2: -INFINITY +pcmk__clone_allocate: rabbitmq-bundle-clone allocation score on messaging-0: -INFINITY +pcmk__clone_allocate: rabbitmq-bundle-clone allocation score on messaging-1: -INFINITY +pcmk__clone_allocate: rabbitmq-bundle-clone allocation score on messaging-2: -INFINITY +pcmk__clone_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: 0 +pcmk__clone_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: 0 +pcmk__clone_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: 0 +pcmk__clone_allocate: rabbitmq:0 allocation score on rabbitmq-bundle-0: INFINITY +pcmk__clone_allocate: rabbitmq:1 allocation score on rabbitmq-bundle-1: INFINITY +pcmk__clone_allocate: rabbitmq:2 allocation score on rabbitmq-bundle-2: INFINITY +pcmk__clone_allocate: redis-bundle-master allocation score on compute-0: -INFINITY +pcmk__clone_allocate: redis-bundle-master allocation score on compute-1: -INFINITY +pcmk__clone_allocate: redis-bundle-master allocation score on controller-0: -INFINITY +pcmk__clone_allocate: redis-bundle-master allocation score on controller-1: -INFINITY +pcmk__clone_allocate: redis-bundle-master allocation score on controller-2: -INFINITY +pcmk__clone_allocate: redis-bundle-master allocation score on database-0: -INFINITY +pcmk__clone_allocate: redis-bundle-master allocation score on database-1: -INFINITY +pcmk__clone_allocate: redis-bundle-master allocation score on database-2: -INFINITY +pcmk__clone_allocate: redis-bundle-master allocation score on messaging-0: -INFINITY +pcmk__clone_allocate: redis-bundle-master allocation score on messaging-1: -INFINITY +pcmk__clone_allocate: redis-bundle-master allocation score on messaging-2: -INFINITY +pcmk__clone_allocate: redis-bundle-master allocation score on redis-bundle-0: 0 +pcmk__clone_allocate: redis-bundle-master allocation score on redis-bundle-1: 0 +pcmk__clone_allocate: redis-bundle-master allocation score on redis-bundle-2: 0 +pcmk__clone_allocate: redis:0 allocation score on redis-bundle-0: INFINITY +pcmk__clone_allocate: redis:1 allocation score on redis-bundle-1: INFINITY +pcmk__clone_allocate: redis:2 allocation score on redis-bundle-2: INFINITY +pcmk__native_allocate: compute-0 allocation score on compute-0: -INFINITY +pcmk__native_allocate: compute-0 allocation score on compute-1: -INFINITY +pcmk__native_allocate: compute-0 allocation score on controller-0: -INFINITY +pcmk__native_allocate: compute-0 allocation score on controller-1: -INFINITY +pcmk__native_allocate: compute-0 allocation score on controller-2: -INFINITY +pcmk__native_allocate: compute-0 allocation score on database-0: -INFINITY +pcmk__native_allocate: compute-0 allocation score on database-1: -INFINITY +pcmk__native_allocate: compute-0 allocation score on database-2: -INFINITY +pcmk__native_allocate: compute-0 allocation score on galera-bundle-0: -INFINITY +pcmk__native_allocate: compute-0 allocation score on galera-bundle-1: -INFINITY +pcmk__native_allocate: compute-0 allocation score on galera-bundle-2: -INFINITY +pcmk__native_allocate: compute-0 allocation score on messaging-0: -INFINITY +pcmk__native_allocate: compute-0 allocation score on messaging-1: -INFINITY +pcmk__native_allocate: compute-0 allocation score on messaging-2: -INFINITY +pcmk__native_allocate: compute-0 allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__native_allocate: compute-0 allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__native_allocate: compute-0 allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__native_allocate: compute-0 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__native_allocate: compute-0 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__native_allocate: compute-0 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__native_allocate: compute-0 allocation score on redis-bundle-0: -INFINITY +pcmk__native_allocate: compute-0 allocation score on redis-bundle-1: -INFINITY +pcmk__native_allocate: compute-0 allocation score on redis-bundle-2: -INFINITY +pcmk__native_allocate: compute-1 allocation score on compute-0: -INFINITY +pcmk__native_allocate: compute-1 allocation score on compute-1: -INFINITY +pcmk__native_allocate: compute-1 allocation score on controller-0: 0 +pcmk__native_allocate: compute-1 allocation score on controller-1: 0 +pcmk__native_allocate: compute-1 allocation score on controller-2: 0 +pcmk__native_allocate: compute-1 allocation score on database-0: 0 +pcmk__native_allocate: compute-1 allocation score on database-1: 0 +pcmk__native_allocate: compute-1 allocation score on database-2: 0 +pcmk__native_allocate: compute-1 allocation score on messaging-0: 0 +pcmk__native_allocate: compute-1 allocation score on messaging-1: 0 +pcmk__native_allocate: compute-1 allocation score on messaging-2: 0 +pcmk__native_allocate: compute-unfence-trigger:0 allocation score on compute-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:0 allocation score on compute-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:0 allocation score on controller-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:0 allocation score on controller-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:0 allocation score on controller-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:0 allocation score on database-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:0 allocation score on database-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:0 allocation score on database-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:0 allocation score on galera-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:0 allocation score on galera-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:0 allocation score on galera-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:0 allocation score on messaging-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:0 allocation score on messaging-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:0 allocation score on messaging-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:0 allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:0 allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:0 allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:0 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:0 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:0 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:0 allocation score on redis-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:0 allocation score on redis-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:0 allocation score on redis-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:1 allocation score on compute-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:1 allocation score on compute-1: 1 +pcmk__native_allocate: compute-unfence-trigger:1 allocation score on controller-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:1 allocation score on controller-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:1 allocation score on controller-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:1 allocation score on database-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:1 allocation score on database-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:1 allocation score on database-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:1 allocation score on galera-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:1 allocation score on galera-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:1 allocation score on galera-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:1 allocation score on messaging-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:1 allocation score on messaging-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:1 allocation score on messaging-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:1 allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:1 allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:1 allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:1 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:1 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:1 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:1 allocation score on redis-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:1 allocation score on redis-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:1 allocation score on redis-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:10 allocation score on compute-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:10 allocation score on compute-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:10 allocation score on controller-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:10 allocation score on controller-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:10 allocation score on controller-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:10 allocation score on database-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:10 allocation score on database-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:10 allocation score on database-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:10 allocation score on galera-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:10 allocation score on galera-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:10 allocation score on galera-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:10 allocation score on messaging-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:10 allocation score on messaging-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:10 allocation score on messaging-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:10 allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:10 allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:10 allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:10 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:10 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:10 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:10 allocation score on redis-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:10 allocation score on redis-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:10 allocation score on redis-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:11 allocation score on compute-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:11 allocation score on compute-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:11 allocation score on controller-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:11 allocation score on controller-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:11 allocation score on controller-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:11 allocation score on database-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:11 allocation score on database-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:11 allocation score on database-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:11 allocation score on galera-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:11 allocation score on galera-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:11 allocation score on galera-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:11 allocation score on messaging-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:11 allocation score on messaging-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:11 allocation score on messaging-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:11 allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:11 allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:11 allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:11 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:11 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:11 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:11 allocation score on redis-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:11 allocation score on redis-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:11 allocation score on redis-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:12 allocation score on compute-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:12 allocation score on compute-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:12 allocation score on controller-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:12 allocation score on controller-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:12 allocation score on controller-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:12 allocation score on database-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:12 allocation score on database-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:12 allocation score on database-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:12 allocation score on galera-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:12 allocation score on galera-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:12 allocation score on galera-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:12 allocation score on messaging-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:12 allocation score on messaging-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:12 allocation score on messaging-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:12 allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:12 allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:12 allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:12 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:12 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:12 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:12 allocation score on redis-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:12 allocation score on redis-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:12 allocation score on redis-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:13 allocation score on compute-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:13 allocation score on compute-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:13 allocation score on controller-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:13 allocation score on controller-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:13 allocation score on controller-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:13 allocation score on database-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:13 allocation score on database-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:13 allocation score on database-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:13 allocation score on galera-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:13 allocation score on galera-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:13 allocation score on galera-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:13 allocation score on messaging-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:13 allocation score on messaging-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:13 allocation score on messaging-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:13 allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:13 allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:13 allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:13 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:13 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:13 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:13 allocation score on redis-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:13 allocation score on redis-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:13 allocation score on redis-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:14 allocation score on compute-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:14 allocation score on compute-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:14 allocation score on controller-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:14 allocation score on controller-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:14 allocation score on controller-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:14 allocation score on database-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:14 allocation score on database-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:14 allocation score on database-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:14 allocation score on galera-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:14 allocation score on galera-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:14 allocation score on galera-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:14 allocation score on messaging-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:14 allocation score on messaging-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:14 allocation score on messaging-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:14 allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:14 allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:14 allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:14 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:14 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:14 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:14 allocation score on redis-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:14 allocation score on redis-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:14 allocation score on redis-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:15 allocation score on compute-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:15 allocation score on compute-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:15 allocation score on controller-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:15 allocation score on controller-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:15 allocation score on controller-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:15 allocation score on database-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:15 allocation score on database-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:15 allocation score on database-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:15 allocation score on galera-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:15 allocation score on galera-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:15 allocation score on galera-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:15 allocation score on messaging-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:15 allocation score on messaging-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:15 allocation score on messaging-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:15 allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:15 allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:15 allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:15 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:15 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:15 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:15 allocation score on redis-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:15 allocation score on redis-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:15 allocation score on redis-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:16 allocation score on compute-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:16 allocation score on compute-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:16 allocation score on controller-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:16 allocation score on controller-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:16 allocation score on controller-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:16 allocation score on database-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:16 allocation score on database-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:16 allocation score on database-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:16 allocation score on galera-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:16 allocation score on galera-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:16 allocation score on galera-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:16 allocation score on messaging-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:16 allocation score on messaging-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:16 allocation score on messaging-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:16 allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:16 allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:16 allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:16 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:16 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:16 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:16 allocation score on redis-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:16 allocation score on redis-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:16 allocation score on redis-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:17 allocation score on compute-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:17 allocation score on compute-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:17 allocation score on controller-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:17 allocation score on controller-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:17 allocation score on controller-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:17 allocation score on database-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:17 allocation score on database-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:17 allocation score on database-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:17 allocation score on galera-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:17 allocation score on galera-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:17 allocation score on galera-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:17 allocation score on messaging-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:17 allocation score on messaging-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:17 allocation score on messaging-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:17 allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:17 allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:17 allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:17 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:17 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:17 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:17 allocation score on redis-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:17 allocation score on redis-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:17 allocation score on redis-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:18 allocation score on compute-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:18 allocation score on compute-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:18 allocation score on controller-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:18 allocation score on controller-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:18 allocation score on controller-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:18 allocation score on database-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:18 allocation score on database-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:18 allocation score on database-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:18 allocation score on galera-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:18 allocation score on galera-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:18 allocation score on galera-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:18 allocation score on messaging-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:18 allocation score on messaging-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:18 allocation score on messaging-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:18 allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:18 allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:18 allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:18 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:18 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:18 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:18 allocation score on redis-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:18 allocation score on redis-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:18 allocation score on redis-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:19 allocation score on compute-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:19 allocation score on compute-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:19 allocation score on controller-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:19 allocation score on controller-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:19 allocation score on controller-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:19 allocation score on database-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:19 allocation score on database-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:19 allocation score on database-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:19 allocation score on galera-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:19 allocation score on galera-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:19 allocation score on galera-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:19 allocation score on messaging-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:19 allocation score on messaging-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:19 allocation score on messaging-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:19 allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:19 allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:19 allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:19 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:19 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:19 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:19 allocation score on redis-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:19 allocation score on redis-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:19 allocation score on redis-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:2 allocation score on compute-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:2 allocation score on compute-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:2 allocation score on controller-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:2 allocation score on controller-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:2 allocation score on controller-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:2 allocation score on database-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:2 allocation score on database-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:2 allocation score on database-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:2 allocation score on galera-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:2 allocation score on galera-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:2 allocation score on galera-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:2 allocation score on messaging-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:2 allocation score on messaging-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:2 allocation score on messaging-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:2 allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:2 allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:2 allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:2 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:2 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:2 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:2 allocation score on redis-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:2 allocation score on redis-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:2 allocation score on redis-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:20 allocation score on compute-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:20 allocation score on compute-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:20 allocation score on controller-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:20 allocation score on controller-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:20 allocation score on controller-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:20 allocation score on database-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:20 allocation score on database-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:20 allocation score on database-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:20 allocation score on galera-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:20 allocation score on galera-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:20 allocation score on galera-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:20 allocation score on messaging-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:20 allocation score on messaging-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:20 allocation score on messaging-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:20 allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:20 allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:20 allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:20 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:20 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:20 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:20 allocation score on redis-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:20 allocation score on redis-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:20 allocation score on redis-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:21 allocation score on compute-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:21 allocation score on compute-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:21 allocation score on controller-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:21 allocation score on controller-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:21 allocation score on controller-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:21 allocation score on database-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:21 allocation score on database-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:21 allocation score on database-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:21 allocation score on galera-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:21 allocation score on galera-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:21 allocation score on galera-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:21 allocation score on messaging-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:21 allocation score on messaging-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:21 allocation score on messaging-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:21 allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:21 allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:21 allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:21 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:21 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:21 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:21 allocation score on redis-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:21 allocation score on redis-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:21 allocation score on redis-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:22 allocation score on compute-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:22 allocation score on compute-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:22 allocation score on controller-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:22 allocation score on controller-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:22 allocation score on controller-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:22 allocation score on database-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:22 allocation score on database-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:22 allocation score on database-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:22 allocation score on galera-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:22 allocation score on galera-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:22 allocation score on galera-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:22 allocation score on messaging-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:22 allocation score on messaging-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:22 allocation score on messaging-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:22 allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:22 allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:22 allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:22 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:22 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:22 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:22 allocation score on redis-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:22 allocation score on redis-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:22 allocation score on redis-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:3 allocation score on compute-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:3 allocation score on compute-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:3 allocation score on controller-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:3 allocation score on controller-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:3 allocation score on controller-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:3 allocation score on database-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:3 allocation score on database-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:3 allocation score on database-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:3 allocation score on galera-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:3 allocation score on galera-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:3 allocation score on galera-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:3 allocation score on messaging-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:3 allocation score on messaging-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:3 allocation score on messaging-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:3 allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:3 allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:3 allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:3 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:3 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:3 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:3 allocation score on redis-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:3 allocation score on redis-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:3 allocation score on redis-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:4 allocation score on compute-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:4 allocation score on compute-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:4 allocation score on controller-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:4 allocation score on controller-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:4 allocation score on controller-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:4 allocation score on database-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:4 allocation score on database-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:4 allocation score on database-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:4 allocation score on galera-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:4 allocation score on galera-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:4 allocation score on galera-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:4 allocation score on messaging-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:4 allocation score on messaging-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:4 allocation score on messaging-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:4 allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:4 allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:4 allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:4 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:4 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:4 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:4 allocation score on redis-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:4 allocation score on redis-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:4 allocation score on redis-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:5 allocation score on compute-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:5 allocation score on compute-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:5 allocation score on controller-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:5 allocation score on controller-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:5 allocation score on controller-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:5 allocation score on database-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:5 allocation score on database-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:5 allocation score on database-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:5 allocation score on galera-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:5 allocation score on galera-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:5 allocation score on galera-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:5 allocation score on messaging-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:5 allocation score on messaging-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:5 allocation score on messaging-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:5 allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:5 allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:5 allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:5 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:5 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:5 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:5 allocation score on redis-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:5 allocation score on redis-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:5 allocation score on redis-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:6 allocation score on compute-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:6 allocation score on compute-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:6 allocation score on controller-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:6 allocation score on controller-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:6 allocation score on controller-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:6 allocation score on database-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:6 allocation score on database-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:6 allocation score on database-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:6 allocation score on galera-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:6 allocation score on galera-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:6 allocation score on galera-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:6 allocation score on messaging-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:6 allocation score on messaging-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:6 allocation score on messaging-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:6 allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:6 allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:6 allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:6 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:6 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:6 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:6 allocation score on redis-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:6 allocation score on redis-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:6 allocation score on redis-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:7 allocation score on compute-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:7 allocation score on compute-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:7 allocation score on controller-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:7 allocation score on controller-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:7 allocation score on controller-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:7 allocation score on database-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:7 allocation score on database-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:7 allocation score on database-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:7 allocation score on galera-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:7 allocation score on galera-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:7 allocation score on galera-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:7 allocation score on messaging-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:7 allocation score on messaging-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:7 allocation score on messaging-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:7 allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:7 allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:7 allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:7 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:7 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:7 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:7 allocation score on redis-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:7 allocation score on redis-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:7 allocation score on redis-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:8 allocation score on compute-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:8 allocation score on compute-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:8 allocation score on controller-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:8 allocation score on controller-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:8 allocation score on controller-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:8 allocation score on database-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:8 allocation score on database-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:8 allocation score on database-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:8 allocation score on galera-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:8 allocation score on galera-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:8 allocation score on galera-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:8 allocation score on messaging-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:8 allocation score on messaging-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:8 allocation score on messaging-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:8 allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:8 allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:8 allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:8 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:8 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:8 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:8 allocation score on redis-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:8 allocation score on redis-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:8 allocation score on redis-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:9 allocation score on compute-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:9 allocation score on compute-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:9 allocation score on controller-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:9 allocation score on controller-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:9 allocation score on controller-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:9 allocation score on database-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:9 allocation score on database-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:9 allocation score on database-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:9 allocation score on galera-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:9 allocation score on galera-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:9 allocation score on galera-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:9 allocation score on messaging-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:9 allocation score on messaging-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:9 allocation score on messaging-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:9 allocation score on ovn-dbs-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:9 allocation score on ovn-dbs-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:9 allocation score on ovn-dbs-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:9 allocation score on rabbitmq-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:9 allocation score on rabbitmq-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:9 allocation score on rabbitmq-bundle-2: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:9 allocation score on redis-bundle-0: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:9 allocation score on redis-bundle-1: -INFINITY +pcmk__native_allocate: compute-unfence-trigger:9 allocation score on redis-bundle-2: -INFINITY +pcmk__native_allocate: galera-bundle-0 allocation score on compute-0: -INFINITY +pcmk__native_allocate: galera-bundle-0 allocation score on compute-1: -INFINITY +pcmk__native_allocate: galera-bundle-0 allocation score on controller-0: 0 +pcmk__native_allocate: galera-bundle-0 allocation score on controller-1: 0 +pcmk__native_allocate: galera-bundle-0 allocation score on controller-2: 0 +pcmk__native_allocate: galera-bundle-0 allocation score on database-0: 10000 +pcmk__native_allocate: galera-bundle-0 allocation score on database-1: 0 +pcmk__native_allocate: galera-bundle-0 allocation score on database-2: 0 +pcmk__native_allocate: galera-bundle-0 allocation score on messaging-0: 0 +pcmk__native_allocate: galera-bundle-0 allocation score on messaging-1: 0 +pcmk__native_allocate: galera-bundle-0 allocation score on messaging-2: 0 +pcmk__native_allocate: galera-bundle-1 allocation score on compute-0: -INFINITY +pcmk__native_allocate: galera-bundle-1 allocation score on compute-1: -INFINITY +pcmk__native_allocate: galera-bundle-1 allocation score on controller-0: 0 +pcmk__native_allocate: galera-bundle-1 allocation score on controller-1: 0 +pcmk__native_allocate: galera-bundle-1 allocation score on controller-2: 0 +pcmk__native_allocate: galera-bundle-1 allocation score on database-0: 0 +pcmk__native_allocate: galera-bundle-1 allocation score on database-1: 10000 +pcmk__native_allocate: galera-bundle-1 allocation score on database-2: 0 +pcmk__native_allocate: galera-bundle-1 allocation score on messaging-0: 0 +pcmk__native_allocate: galera-bundle-1 allocation score on messaging-1: 0 +pcmk__native_allocate: galera-bundle-1 allocation score on messaging-2: 0 +pcmk__native_allocate: galera-bundle-2 allocation score on compute-0: -INFINITY +pcmk__native_allocate: galera-bundle-2 allocation score on compute-1: -INFINITY +pcmk__native_allocate: galera-bundle-2 allocation score on controller-0: 0 +pcmk__native_allocate: galera-bundle-2 allocation score on controller-1: 0 +pcmk__native_allocate: galera-bundle-2 allocation score on controller-2: 0 +pcmk__native_allocate: galera-bundle-2 allocation score on database-0: 0 +pcmk__native_allocate: galera-bundle-2 allocation score on database-1: 0 +pcmk__native_allocate: galera-bundle-2 allocation score on database-2: 10000 +pcmk__native_allocate: galera-bundle-2 allocation score on messaging-0: 0 +pcmk__native_allocate: galera-bundle-2 allocation score on messaging-1: 0 +pcmk__native_allocate: galera-bundle-2 allocation score on messaging-2: 0 +pcmk__native_allocate: galera-bundle-podman-0 allocation score on compute-0: -INFINITY +pcmk__native_allocate: galera-bundle-podman-0 allocation score on compute-1: -INFINITY +pcmk__native_allocate: galera-bundle-podman-0 allocation score on controller-0: -INFINITY +pcmk__native_allocate: galera-bundle-podman-0 allocation score on controller-1: -INFINITY +pcmk__native_allocate: galera-bundle-podman-0 allocation score on controller-2: -INFINITY +pcmk__native_allocate: galera-bundle-podman-0 allocation score on database-0: 0 +pcmk__native_allocate: galera-bundle-podman-0 allocation score on database-1: 0 +pcmk__native_allocate: galera-bundle-podman-0 allocation score on database-2: 0 +pcmk__native_allocate: galera-bundle-podman-0 allocation score on messaging-0: -INFINITY +pcmk__native_allocate: galera-bundle-podman-0 allocation score on messaging-1: -INFINITY +pcmk__native_allocate: galera-bundle-podman-0 allocation score on messaging-2: -INFINITY +pcmk__native_allocate: galera-bundle-podman-1 allocation score on compute-0: -INFINITY +pcmk__native_allocate: galera-bundle-podman-1 allocation score on compute-1: -INFINITY +pcmk__native_allocate: galera-bundle-podman-1 allocation score on controller-0: -INFINITY +pcmk__native_allocate: galera-bundle-podman-1 allocation score on controller-1: -INFINITY +pcmk__native_allocate: galera-bundle-podman-1 allocation score on controller-2: -INFINITY +pcmk__native_allocate: galera-bundle-podman-1 allocation score on database-0: -INFINITY +pcmk__native_allocate: galera-bundle-podman-1 allocation score on database-1: 0 +pcmk__native_allocate: galera-bundle-podman-1 allocation score on database-2: 0 +pcmk__native_allocate: galera-bundle-podman-1 allocation score on messaging-0: -INFINITY +pcmk__native_allocate: galera-bundle-podman-1 allocation score on messaging-1: -INFINITY +pcmk__native_allocate: galera-bundle-podman-1 allocation score on messaging-2: -INFINITY +pcmk__native_allocate: galera-bundle-podman-2 allocation score on compute-0: -INFINITY +pcmk__native_allocate: galera-bundle-podman-2 allocation score on compute-1: -INFINITY +pcmk__native_allocate: galera-bundle-podman-2 allocation score on controller-0: -INFINITY +pcmk__native_allocate: galera-bundle-podman-2 allocation score on controller-1: -INFINITY +pcmk__native_allocate: galera-bundle-podman-2 allocation score on controller-2: -INFINITY +pcmk__native_allocate: galera-bundle-podman-2 allocation score on database-0: -INFINITY +pcmk__native_allocate: galera-bundle-podman-2 allocation score on database-1: -INFINITY +pcmk__native_allocate: galera-bundle-podman-2 allocation score on database-2: 0 +pcmk__native_allocate: galera-bundle-podman-2 allocation score on messaging-0: -INFINITY +pcmk__native_allocate: galera-bundle-podman-2 allocation score on messaging-1: -INFINITY +pcmk__native_allocate: galera-bundle-podman-2 allocation score on messaging-2: -INFINITY +pcmk__native_allocate: galera:0 allocation score on galera-bundle-0: INFINITY +pcmk__native_allocate: galera:1 allocation score on galera-bundle-1: INFINITY +pcmk__native_allocate: galera:2 allocation score on galera-bundle-2: INFINITY +pcmk__native_allocate: haproxy-bundle-podman-0 allocation score on compute-0: -INFINITY +pcmk__native_allocate: haproxy-bundle-podman-0 allocation score on compute-1: -INFINITY +pcmk__native_allocate: haproxy-bundle-podman-0 allocation score on controller-0: INFINITY +pcmk__native_allocate: haproxy-bundle-podman-0 allocation score on controller-1: INFINITY +pcmk__native_allocate: haproxy-bundle-podman-0 allocation score on controller-2: INFINITY +pcmk__native_allocate: haproxy-bundle-podman-0 allocation score on database-0: -INFINITY +pcmk__native_allocate: haproxy-bundle-podman-0 allocation score on database-1: -INFINITY +pcmk__native_allocate: haproxy-bundle-podman-0 allocation score on database-2: -INFINITY +pcmk__native_allocate: haproxy-bundle-podman-0 allocation score on messaging-0: -INFINITY +pcmk__native_allocate: haproxy-bundle-podman-0 allocation score on messaging-1: -INFINITY +pcmk__native_allocate: haproxy-bundle-podman-0 allocation score on messaging-2: -INFINITY +pcmk__native_allocate: haproxy-bundle-podman-1 allocation score on compute-0: -INFINITY +pcmk__native_allocate: haproxy-bundle-podman-1 allocation score on compute-1: -INFINITY +pcmk__native_allocate: haproxy-bundle-podman-1 allocation score on controller-0: INFINITY +pcmk__native_allocate: haproxy-bundle-podman-1 allocation score on controller-1: INFINITY +pcmk__native_allocate: haproxy-bundle-podman-1 allocation score on controller-2: -INFINITY +pcmk__native_allocate: haproxy-bundle-podman-1 allocation score on database-0: -INFINITY +pcmk__native_allocate: haproxy-bundle-podman-1 allocation score on database-1: -INFINITY +pcmk__native_allocate: haproxy-bundle-podman-1 allocation score on database-2: -INFINITY +pcmk__native_allocate: haproxy-bundle-podman-1 allocation score on messaging-0: -INFINITY +pcmk__native_allocate: haproxy-bundle-podman-1 allocation score on messaging-1: -INFINITY +pcmk__native_allocate: haproxy-bundle-podman-1 allocation score on messaging-2: -INFINITY +pcmk__native_allocate: haproxy-bundle-podman-2 allocation score on compute-0: -INFINITY +pcmk__native_allocate: haproxy-bundle-podman-2 allocation score on compute-1: -INFINITY +pcmk__native_allocate: haproxy-bundle-podman-2 allocation score on controller-0: -INFINITY +pcmk__native_allocate: haproxy-bundle-podman-2 allocation score on controller-1: INFINITY +pcmk__native_allocate: haproxy-bundle-podman-2 allocation score on controller-2: -INFINITY +pcmk__native_allocate: haproxy-bundle-podman-2 allocation score on database-0: -INFINITY +pcmk__native_allocate: haproxy-bundle-podman-2 allocation score on database-1: -INFINITY +pcmk__native_allocate: haproxy-bundle-podman-2 allocation score on database-2: -INFINITY +pcmk__native_allocate: haproxy-bundle-podman-2 allocation score on messaging-0: -INFINITY +pcmk__native_allocate: haproxy-bundle-podman-2 allocation score on messaging-1: -INFINITY +pcmk__native_allocate: haproxy-bundle-podman-2 allocation score on messaging-2: -INFINITY +pcmk__native_allocate: ip-10.0.0.150 allocation score on compute-0: -INFINITY +pcmk__native_allocate: ip-10.0.0.150 allocation score on compute-1: -INFINITY +pcmk__native_allocate: ip-10.0.0.150 allocation score on controller-0: INFINITY +pcmk__native_allocate: ip-10.0.0.150 allocation score on controller-1: 0 +pcmk__native_allocate: ip-10.0.0.150 allocation score on controller-2: 0 +pcmk__native_allocate: ip-10.0.0.150 allocation score on database-0: -INFINITY +pcmk__native_allocate: ip-10.0.0.150 allocation score on database-1: -INFINITY +pcmk__native_allocate: ip-10.0.0.150 allocation score on database-2: -INFINITY +pcmk__native_allocate: ip-10.0.0.150 allocation score on messaging-0: -INFINITY +pcmk__native_allocate: ip-10.0.0.150 allocation score on messaging-1: -INFINITY +pcmk__native_allocate: ip-10.0.0.150 allocation score on messaging-2: -INFINITY +pcmk__native_allocate: ip-172.17.1.150 allocation score on compute-0: -INFINITY +pcmk__native_allocate: ip-172.17.1.150 allocation score on compute-1: -INFINITY +pcmk__native_allocate: ip-172.17.1.150 allocation score on controller-0: 0 +pcmk__native_allocate: ip-172.17.1.150 allocation score on controller-1: 0 +pcmk__native_allocate: ip-172.17.1.150 allocation score on controller-2: INFINITY +pcmk__native_allocate: ip-172.17.1.150 allocation score on database-0: -INFINITY +pcmk__native_allocate: ip-172.17.1.150 allocation score on database-1: -INFINITY +pcmk__native_allocate: ip-172.17.1.150 allocation score on database-2: -INFINITY +pcmk__native_allocate: ip-172.17.1.150 allocation score on messaging-0: -INFINITY +pcmk__native_allocate: ip-172.17.1.150 allocation score on messaging-1: -INFINITY +pcmk__native_allocate: ip-172.17.1.150 allocation score on messaging-2: -INFINITY +pcmk__native_allocate: ip-172.17.1.151 allocation score on compute-0: -INFINITY +pcmk__native_allocate: ip-172.17.1.151 allocation score on compute-1: -INFINITY +pcmk__native_allocate: ip-172.17.1.151 allocation score on controller-0: 0 +pcmk__native_allocate: ip-172.17.1.151 allocation score on controller-1: INFINITY +pcmk__native_allocate: ip-172.17.1.151 allocation score on controller-2: 0 +pcmk__native_allocate: ip-172.17.1.151 allocation score on database-0: -INFINITY +pcmk__native_allocate: ip-172.17.1.151 allocation score on database-1: -INFINITY +pcmk__native_allocate: ip-172.17.1.151 allocation score on database-2: -INFINITY +pcmk__native_allocate: ip-172.17.1.151 allocation score on messaging-0: -INFINITY +pcmk__native_allocate: ip-172.17.1.151 allocation score on messaging-1: -INFINITY +pcmk__native_allocate: ip-172.17.1.151 allocation score on messaging-2: -INFINITY +pcmk__native_allocate: ip-172.17.1.57 allocation score on compute-0: -INFINITY +pcmk__native_allocate: ip-172.17.1.57 allocation score on compute-1: -INFINITY +pcmk__native_allocate: ip-172.17.1.57 allocation score on controller-0: -INFINITY +pcmk__native_allocate: ip-172.17.1.57 allocation score on controller-1: -INFINITY +pcmk__native_allocate: ip-172.17.1.57 allocation score on controller-2: INFINITY +pcmk__native_allocate: ip-172.17.1.57 allocation score on database-0: -INFINITY +pcmk__native_allocate: ip-172.17.1.57 allocation score on database-1: -INFINITY +pcmk__native_allocate: ip-172.17.1.57 allocation score on database-2: -INFINITY +pcmk__native_allocate: ip-172.17.1.57 allocation score on messaging-0: -INFINITY +pcmk__native_allocate: ip-172.17.1.57 allocation score on messaging-1: -INFINITY +pcmk__native_allocate: ip-172.17.1.57 allocation score on messaging-2: -INFINITY +pcmk__native_allocate: ip-172.17.3.150 allocation score on compute-0: -INFINITY +pcmk__native_allocate: ip-172.17.3.150 allocation score on compute-1: -INFINITY +pcmk__native_allocate: ip-172.17.3.150 allocation score on controller-0: INFINITY +pcmk__native_allocate: ip-172.17.3.150 allocation score on controller-1: 0 +pcmk__native_allocate: ip-172.17.3.150 allocation score on controller-2: 0 +pcmk__native_allocate: ip-172.17.3.150 allocation score on database-0: -INFINITY +pcmk__native_allocate: ip-172.17.3.150 allocation score on database-1: -INFINITY +pcmk__native_allocate: ip-172.17.3.150 allocation score on database-2: -INFINITY +pcmk__native_allocate: ip-172.17.3.150 allocation score on messaging-0: -INFINITY +pcmk__native_allocate: ip-172.17.3.150 allocation score on messaging-1: -INFINITY +pcmk__native_allocate: ip-172.17.3.150 allocation score on messaging-2: -INFINITY +pcmk__native_allocate: ip-172.17.4.150 allocation score on compute-0: -INFINITY +pcmk__native_allocate: ip-172.17.4.150 allocation score on compute-1: -INFINITY +pcmk__native_allocate: ip-172.17.4.150 allocation score on controller-0: 0 +pcmk__native_allocate: ip-172.17.4.150 allocation score on controller-1: INFINITY +pcmk__native_allocate: ip-172.17.4.150 allocation score on controller-2: 0 +pcmk__native_allocate: ip-172.17.4.150 allocation score on database-0: -INFINITY +pcmk__native_allocate: ip-172.17.4.150 allocation score on database-1: -INFINITY +pcmk__native_allocate: ip-172.17.4.150 allocation score on database-2: -INFINITY +pcmk__native_allocate: ip-172.17.4.150 allocation score on messaging-0: -INFINITY +pcmk__native_allocate: ip-172.17.4.150 allocation score on messaging-1: -INFINITY +pcmk__native_allocate: ip-172.17.4.150 allocation score on messaging-2: -INFINITY +pcmk__native_allocate: ip-192.168.24.150 allocation score on compute-0: -INFINITY +pcmk__native_allocate: ip-192.168.24.150 allocation score on compute-1: -INFINITY +pcmk__native_allocate: ip-192.168.24.150 allocation score on controller-0: 0 +pcmk__native_allocate: ip-192.168.24.150 allocation score on controller-1: 0 +pcmk__native_allocate: ip-192.168.24.150 allocation score on controller-2: INFINITY +pcmk__native_allocate: ip-192.168.24.150 allocation score on database-0: -INFINITY +pcmk__native_allocate: ip-192.168.24.150 allocation score on database-1: -INFINITY +pcmk__native_allocate: ip-192.168.24.150 allocation score on database-2: -INFINITY +pcmk__native_allocate: ip-192.168.24.150 allocation score on messaging-0: -INFINITY +pcmk__native_allocate: ip-192.168.24.150 allocation score on messaging-1: -INFINITY +pcmk__native_allocate: ip-192.168.24.150 allocation score on messaging-2: -INFINITY +pcmk__native_allocate: nova-evacuate allocation score on compute-0: -INFINITY +pcmk__native_allocate: nova-evacuate allocation score on compute-1: -INFINITY +pcmk__native_allocate: nova-evacuate allocation score on controller-0: 0 +pcmk__native_allocate: nova-evacuate allocation score on controller-1: 0 +pcmk__native_allocate: nova-evacuate allocation score on controller-2: 0 +pcmk__native_allocate: nova-evacuate allocation score on database-0: 0 +pcmk__native_allocate: nova-evacuate allocation score on database-1: 0 +pcmk__native_allocate: nova-evacuate allocation score on database-2: 0 +pcmk__native_allocate: nova-evacuate allocation score on messaging-0: 0 +pcmk__native_allocate: nova-evacuate allocation score on messaging-1: 0 +pcmk__native_allocate: nova-evacuate allocation score on messaging-2: 0 +pcmk__native_allocate: openstack-cinder-volume-podman-0 allocation score on compute-0: -INFINITY +pcmk__native_allocate: openstack-cinder-volume-podman-0 allocation score on compute-1: -INFINITY +pcmk__native_allocate: openstack-cinder-volume-podman-0 allocation score on controller-0: 0 +pcmk__native_allocate: openstack-cinder-volume-podman-0 allocation score on controller-1: 0 +pcmk__native_allocate: openstack-cinder-volume-podman-0 allocation score on controller-2: 0 +pcmk__native_allocate: openstack-cinder-volume-podman-0 allocation score on database-0: -INFINITY +pcmk__native_allocate: openstack-cinder-volume-podman-0 allocation score on database-1: -INFINITY +pcmk__native_allocate: openstack-cinder-volume-podman-0 allocation score on database-2: -INFINITY +pcmk__native_allocate: openstack-cinder-volume-podman-0 allocation score on messaging-0: -INFINITY +pcmk__native_allocate: openstack-cinder-volume-podman-0 allocation score on messaging-1: -INFINITY +pcmk__native_allocate: openstack-cinder-volume-podman-0 allocation score on messaging-2: -INFINITY +pcmk__native_allocate: ovn-dbs-bundle-0 allocation score on compute-0: -INFINITY +pcmk__native_allocate: ovn-dbs-bundle-0 allocation score on compute-1: -INFINITY +pcmk__native_allocate: ovn-dbs-bundle-0 allocation score on controller-0: 0 +pcmk__native_allocate: ovn-dbs-bundle-0 allocation score on controller-1: 0 +pcmk__native_allocate: ovn-dbs-bundle-0 allocation score on controller-2: 10000 +pcmk__native_allocate: ovn-dbs-bundle-0 allocation score on database-0: 0 +pcmk__native_allocate: ovn-dbs-bundle-0 allocation score on database-1: 0 +pcmk__native_allocate: ovn-dbs-bundle-0 allocation score on database-2: 0 +pcmk__native_allocate: ovn-dbs-bundle-0 allocation score on messaging-0: 0 +pcmk__native_allocate: ovn-dbs-bundle-0 allocation score on messaging-1: 0 +pcmk__native_allocate: ovn-dbs-bundle-0 allocation score on messaging-2: 0 +pcmk__native_allocate: ovn-dbs-bundle-1 allocation score on compute-0: -INFINITY +pcmk__native_allocate: ovn-dbs-bundle-1 allocation score on compute-1: -INFINITY +pcmk__native_allocate: ovn-dbs-bundle-1 allocation score on controller-0: 10000 +pcmk__native_allocate: ovn-dbs-bundle-1 allocation score on controller-1: 0 +pcmk__native_allocate: ovn-dbs-bundle-1 allocation score on controller-2: 0 +pcmk__native_allocate: ovn-dbs-bundle-1 allocation score on database-0: 0 +pcmk__native_allocate: ovn-dbs-bundle-1 allocation score on database-1: 0 +pcmk__native_allocate: ovn-dbs-bundle-1 allocation score on database-2: 0 +pcmk__native_allocate: ovn-dbs-bundle-1 allocation score on messaging-0: 0 +pcmk__native_allocate: ovn-dbs-bundle-1 allocation score on messaging-1: 0 +pcmk__native_allocate: ovn-dbs-bundle-1 allocation score on messaging-2: 0 +pcmk__native_allocate: ovn-dbs-bundle-2 allocation score on compute-0: -INFINITY +pcmk__native_allocate: ovn-dbs-bundle-2 allocation score on compute-1: -INFINITY +pcmk__native_allocate: ovn-dbs-bundle-2 allocation score on controller-0: 0 +pcmk__native_allocate: ovn-dbs-bundle-2 allocation score on controller-1: 10000 +pcmk__native_allocate: ovn-dbs-bundle-2 allocation score on controller-2: 0 +pcmk__native_allocate: ovn-dbs-bundle-2 allocation score on database-0: 0 +pcmk__native_allocate: ovn-dbs-bundle-2 allocation score on database-1: 0 +pcmk__native_allocate: ovn-dbs-bundle-2 allocation score on database-2: 0 +pcmk__native_allocate: ovn-dbs-bundle-2 allocation score on messaging-0: 0 +pcmk__native_allocate: ovn-dbs-bundle-2 allocation score on messaging-1: 0 +pcmk__native_allocate: ovn-dbs-bundle-2 allocation score on messaging-2: 0 +pcmk__native_allocate: ovn-dbs-bundle-podman-0 allocation score on compute-0: -INFINITY +pcmk__native_allocate: ovn-dbs-bundle-podman-0 allocation score on compute-1: -INFINITY +pcmk__native_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-0: 0 +pcmk__native_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-1: 0 +pcmk__native_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-2: INFINITY +pcmk__native_allocate: ovn-dbs-bundle-podman-0 allocation score on database-0: -INFINITY +pcmk__native_allocate: ovn-dbs-bundle-podman-0 allocation score on database-1: -INFINITY +pcmk__native_allocate: ovn-dbs-bundle-podman-0 allocation score on database-2: -INFINITY +pcmk__native_allocate: ovn-dbs-bundle-podman-0 allocation score on messaging-0: -INFINITY +pcmk__native_allocate: ovn-dbs-bundle-podman-0 allocation score on messaging-1: -INFINITY +pcmk__native_allocate: ovn-dbs-bundle-podman-0 allocation score on messaging-2: -INFINITY +pcmk__native_allocate: ovn-dbs-bundle-podman-1 allocation score on compute-0: -INFINITY +pcmk__native_allocate: ovn-dbs-bundle-podman-1 allocation score on compute-1: -INFINITY +pcmk__native_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-0: 0 +pcmk__native_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-1: 0 +pcmk__native_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-2: -INFINITY +pcmk__native_allocate: ovn-dbs-bundle-podman-1 allocation score on database-0: -INFINITY +pcmk__native_allocate: ovn-dbs-bundle-podman-1 allocation score on database-1: -INFINITY +pcmk__native_allocate: ovn-dbs-bundle-podman-1 allocation score on database-2: -INFINITY +pcmk__native_allocate: ovn-dbs-bundle-podman-1 allocation score on messaging-0: -INFINITY +pcmk__native_allocate: ovn-dbs-bundle-podman-1 allocation score on messaging-1: -INFINITY +pcmk__native_allocate: ovn-dbs-bundle-podman-1 allocation score on messaging-2: -INFINITY +pcmk__native_allocate: ovn-dbs-bundle-podman-2 allocation score on compute-0: -INFINITY +pcmk__native_allocate: ovn-dbs-bundle-podman-2 allocation score on compute-1: -INFINITY +pcmk__native_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-0: -INFINITY +pcmk__native_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-1: 0 +pcmk__native_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-2: -INFINITY +pcmk__native_allocate: ovn-dbs-bundle-podman-2 allocation score on database-0: -INFINITY +pcmk__native_allocate: ovn-dbs-bundle-podman-2 allocation score on database-1: -INFINITY +pcmk__native_allocate: ovn-dbs-bundle-podman-2 allocation score on database-2: -INFINITY +pcmk__native_allocate: ovn-dbs-bundle-podman-2 allocation score on messaging-0: -INFINITY +pcmk__native_allocate: ovn-dbs-bundle-podman-2 allocation score on messaging-1: -INFINITY +pcmk__native_allocate: ovn-dbs-bundle-podman-2 allocation score on messaging-2: -INFINITY +pcmk__native_allocate: ovndb_servers:0 allocation score on ovn-dbs-bundle-0: INFINITY +pcmk__native_allocate: ovndb_servers:1 allocation score on ovn-dbs-bundle-1: INFINITY +pcmk__native_allocate: ovndb_servers:2 allocation score on ovn-dbs-bundle-2: INFINITY +pcmk__native_allocate: rabbitmq-bundle-0 allocation score on compute-0: -INFINITY +pcmk__native_allocate: rabbitmq-bundle-0 allocation score on compute-1: -INFINITY +pcmk__native_allocate: rabbitmq-bundle-0 allocation score on controller-0: 0 +pcmk__native_allocate: rabbitmq-bundle-0 allocation score on controller-1: 0 +pcmk__native_allocate: rabbitmq-bundle-0 allocation score on controller-2: 0 +pcmk__native_allocate: rabbitmq-bundle-0 allocation score on database-0: 0 +pcmk__native_allocate: rabbitmq-bundle-0 allocation score on database-1: 0 +pcmk__native_allocate: rabbitmq-bundle-0 allocation score on database-2: 0 +pcmk__native_allocate: rabbitmq-bundle-0 allocation score on messaging-0: 10000 +pcmk__native_allocate: rabbitmq-bundle-0 allocation score on messaging-1: 0 +pcmk__native_allocate: rabbitmq-bundle-0 allocation score on messaging-2: 0 +pcmk__native_allocate: rabbitmq-bundle-1 allocation score on compute-0: -INFINITY +pcmk__native_allocate: rabbitmq-bundle-1 allocation score on compute-1: -INFINITY +pcmk__native_allocate: rabbitmq-bundle-1 allocation score on controller-0: 0 +pcmk__native_allocate: rabbitmq-bundle-1 allocation score on controller-1: 0 +pcmk__native_allocate: rabbitmq-bundle-1 allocation score on controller-2: 0 +pcmk__native_allocate: rabbitmq-bundle-1 allocation score on database-0: 0 +pcmk__native_allocate: rabbitmq-bundle-1 allocation score on database-1: 0 +pcmk__native_allocate: rabbitmq-bundle-1 allocation score on database-2: 0 +pcmk__native_allocate: rabbitmq-bundle-1 allocation score on messaging-0: 0 +pcmk__native_allocate: rabbitmq-bundle-1 allocation score on messaging-1: 10000 +pcmk__native_allocate: rabbitmq-bundle-1 allocation score on messaging-2: 0 +pcmk__native_allocate: rabbitmq-bundle-2 allocation score on compute-0: -INFINITY +pcmk__native_allocate: rabbitmq-bundle-2 allocation score on compute-1: -INFINITY +pcmk__native_allocate: rabbitmq-bundle-2 allocation score on controller-0: 0 +pcmk__native_allocate: rabbitmq-bundle-2 allocation score on controller-1: 0 +pcmk__native_allocate: rabbitmq-bundle-2 allocation score on controller-2: 0 +pcmk__native_allocate: rabbitmq-bundle-2 allocation score on database-0: 0 +pcmk__native_allocate: rabbitmq-bundle-2 allocation score on database-1: 0 +pcmk__native_allocate: rabbitmq-bundle-2 allocation score on database-2: 0 +pcmk__native_allocate: rabbitmq-bundle-2 allocation score on messaging-0: 0 +pcmk__native_allocate: rabbitmq-bundle-2 allocation score on messaging-1: 0 +pcmk__native_allocate: rabbitmq-bundle-2 allocation score on messaging-2: 10000 +pcmk__native_allocate: rabbitmq-bundle-podman-0 allocation score on compute-0: -INFINITY +pcmk__native_allocate: rabbitmq-bundle-podman-0 allocation score on compute-1: -INFINITY +pcmk__native_allocate: rabbitmq-bundle-podman-0 allocation score on controller-0: -INFINITY +pcmk__native_allocate: rabbitmq-bundle-podman-0 allocation score on controller-1: -INFINITY +pcmk__native_allocate: rabbitmq-bundle-podman-0 allocation score on controller-2: -INFINITY +pcmk__native_allocate: rabbitmq-bundle-podman-0 allocation score on database-0: -INFINITY +pcmk__native_allocate: rabbitmq-bundle-podman-0 allocation score on database-1: -INFINITY +pcmk__native_allocate: rabbitmq-bundle-podman-0 allocation score on database-2: -INFINITY +pcmk__native_allocate: rabbitmq-bundle-podman-0 allocation score on messaging-0: 0 +pcmk__native_allocate: rabbitmq-bundle-podman-0 allocation score on messaging-1: 0 +pcmk__native_allocate: rabbitmq-bundle-podman-0 allocation score on messaging-2: 0 +pcmk__native_allocate: rabbitmq-bundle-podman-1 allocation score on compute-0: -INFINITY +pcmk__native_allocate: rabbitmq-bundle-podman-1 allocation score on compute-1: -INFINITY +pcmk__native_allocate: rabbitmq-bundle-podman-1 allocation score on controller-0: -INFINITY +pcmk__native_allocate: rabbitmq-bundle-podman-1 allocation score on controller-1: -INFINITY +pcmk__native_allocate: rabbitmq-bundle-podman-1 allocation score on controller-2: -INFINITY +pcmk__native_allocate: rabbitmq-bundle-podman-1 allocation score on database-0: -INFINITY +pcmk__native_allocate: rabbitmq-bundle-podman-1 allocation score on database-1: -INFINITY +pcmk__native_allocate: rabbitmq-bundle-podman-1 allocation score on database-2: -INFINITY +pcmk__native_allocate: rabbitmq-bundle-podman-1 allocation score on messaging-0: -INFINITY +pcmk__native_allocate: rabbitmq-bundle-podman-1 allocation score on messaging-1: 0 +pcmk__native_allocate: rabbitmq-bundle-podman-1 allocation score on messaging-2: 0 +pcmk__native_allocate: rabbitmq-bundle-podman-2 allocation score on compute-0: -INFINITY +pcmk__native_allocate: rabbitmq-bundle-podman-2 allocation score on compute-1: -INFINITY +pcmk__native_allocate: rabbitmq-bundle-podman-2 allocation score on controller-0: -INFINITY +pcmk__native_allocate: rabbitmq-bundle-podman-2 allocation score on controller-1: -INFINITY +pcmk__native_allocate: rabbitmq-bundle-podman-2 allocation score on controller-2: -INFINITY +pcmk__native_allocate: rabbitmq-bundle-podman-2 allocation score on database-0: -INFINITY +pcmk__native_allocate: rabbitmq-bundle-podman-2 allocation score on database-1: -INFINITY +pcmk__native_allocate: rabbitmq-bundle-podman-2 allocation score on database-2: -INFINITY +pcmk__native_allocate: rabbitmq-bundle-podman-2 allocation score on messaging-0: -INFINITY +pcmk__native_allocate: rabbitmq-bundle-podman-2 allocation score on messaging-1: -INFINITY +pcmk__native_allocate: rabbitmq-bundle-podman-2 allocation score on messaging-2: 0 +pcmk__native_allocate: rabbitmq:0 allocation score on rabbitmq-bundle-0: INFINITY +pcmk__native_allocate: rabbitmq:1 allocation score on rabbitmq-bundle-1: INFINITY +pcmk__native_allocate: rabbitmq:2 allocation score on rabbitmq-bundle-2: INFINITY +pcmk__native_allocate: redis-bundle-0 allocation score on compute-0: -INFINITY +pcmk__native_allocate: redis-bundle-0 allocation score on compute-1: -INFINITY +pcmk__native_allocate: redis-bundle-0 allocation score on controller-0: 0 +pcmk__native_allocate: redis-bundle-0 allocation score on controller-1: 0 +pcmk__native_allocate: redis-bundle-0 allocation score on controller-2: 10000 +pcmk__native_allocate: redis-bundle-0 allocation score on database-0: 0 +pcmk__native_allocate: redis-bundle-0 allocation score on database-1: 0 +pcmk__native_allocate: redis-bundle-0 allocation score on database-2: 0 +pcmk__native_allocate: redis-bundle-0 allocation score on messaging-0: 0 +pcmk__native_allocate: redis-bundle-0 allocation score on messaging-1: 0 +pcmk__native_allocate: redis-bundle-0 allocation score on messaging-2: 0 +pcmk__native_allocate: redis-bundle-1 allocation score on compute-0: -INFINITY +pcmk__native_allocate: redis-bundle-1 allocation score on compute-1: -INFINITY +pcmk__native_allocate: redis-bundle-1 allocation score on controller-0: 10000 +pcmk__native_allocate: redis-bundle-1 allocation score on controller-1: 0 +pcmk__native_allocate: redis-bundle-1 allocation score on controller-2: 0 +pcmk__native_allocate: redis-bundle-1 allocation score on database-0: 0 +pcmk__native_allocate: redis-bundle-1 allocation score on database-1: 0 +pcmk__native_allocate: redis-bundle-1 allocation score on database-2: 0 +pcmk__native_allocate: redis-bundle-1 allocation score on messaging-0: 0 +pcmk__native_allocate: redis-bundle-1 allocation score on messaging-1: 0 +pcmk__native_allocate: redis-bundle-1 allocation score on messaging-2: 0 +pcmk__native_allocate: redis-bundle-2 allocation score on compute-0: -INFINITY +pcmk__native_allocate: redis-bundle-2 allocation score on compute-1: -INFINITY +pcmk__native_allocate: redis-bundle-2 allocation score on controller-0: 0 +pcmk__native_allocate: redis-bundle-2 allocation score on controller-1: 10000 +pcmk__native_allocate: redis-bundle-2 allocation score on controller-2: 0 +pcmk__native_allocate: redis-bundle-2 allocation score on database-0: 0 +pcmk__native_allocate: redis-bundle-2 allocation score on database-1: 0 +pcmk__native_allocate: redis-bundle-2 allocation score on database-2: 0 +pcmk__native_allocate: redis-bundle-2 allocation score on messaging-0: 0 +pcmk__native_allocate: redis-bundle-2 allocation score on messaging-1: 0 +pcmk__native_allocate: redis-bundle-2 allocation score on messaging-2: 0 +pcmk__native_allocate: redis-bundle-podman-0 allocation score on compute-0: -INFINITY +pcmk__native_allocate: redis-bundle-podman-0 allocation score on compute-1: -INFINITY +pcmk__native_allocate: redis-bundle-podman-0 allocation score on controller-0: 0 +pcmk__native_allocate: redis-bundle-podman-0 allocation score on controller-1: 0 +pcmk__native_allocate: redis-bundle-podman-0 allocation score on controller-2: 0 +pcmk__native_allocate: redis-bundle-podman-0 allocation score on database-0: -INFINITY +pcmk__native_allocate: redis-bundle-podman-0 allocation score on database-1: -INFINITY +pcmk__native_allocate: redis-bundle-podman-0 allocation score on database-2: -INFINITY +pcmk__native_allocate: redis-bundle-podman-0 allocation score on messaging-0: -INFINITY +pcmk__native_allocate: redis-bundle-podman-0 allocation score on messaging-1: -INFINITY +pcmk__native_allocate: redis-bundle-podman-0 allocation score on messaging-2: -INFINITY +pcmk__native_allocate: redis-bundle-podman-1 allocation score on compute-0: -INFINITY +pcmk__native_allocate: redis-bundle-podman-1 allocation score on compute-1: -INFINITY +pcmk__native_allocate: redis-bundle-podman-1 allocation score on controller-0: 0 +pcmk__native_allocate: redis-bundle-podman-1 allocation score on controller-1: 0 +pcmk__native_allocate: redis-bundle-podman-1 allocation score on controller-2: -INFINITY +pcmk__native_allocate: redis-bundle-podman-1 allocation score on database-0: -INFINITY +pcmk__native_allocate: redis-bundle-podman-1 allocation score on database-1: -INFINITY +pcmk__native_allocate: redis-bundle-podman-1 allocation score on database-2: -INFINITY +pcmk__native_allocate: redis-bundle-podman-1 allocation score on messaging-0: -INFINITY +pcmk__native_allocate: redis-bundle-podman-1 allocation score on messaging-1: -INFINITY +pcmk__native_allocate: redis-bundle-podman-1 allocation score on messaging-2: -INFINITY +pcmk__native_allocate: redis-bundle-podman-2 allocation score on compute-0: -INFINITY +pcmk__native_allocate: redis-bundle-podman-2 allocation score on compute-1: -INFINITY +pcmk__native_allocate: redis-bundle-podman-2 allocation score on controller-0: -INFINITY +pcmk__native_allocate: redis-bundle-podman-2 allocation score on controller-1: 0 +pcmk__native_allocate: redis-bundle-podman-2 allocation score on controller-2: -INFINITY +pcmk__native_allocate: redis-bundle-podman-2 allocation score on database-0: -INFINITY +pcmk__native_allocate: redis-bundle-podman-2 allocation score on database-1: -INFINITY +pcmk__native_allocate: redis-bundle-podman-2 allocation score on database-2: -INFINITY +pcmk__native_allocate: redis-bundle-podman-2 allocation score on messaging-0: -INFINITY +pcmk__native_allocate: redis-bundle-podman-2 allocation score on messaging-1: -INFINITY +pcmk__native_allocate: redis-bundle-podman-2 allocation score on messaging-2: -INFINITY +pcmk__native_allocate: redis:0 allocation score on redis-bundle-0: INFINITY +pcmk__native_allocate: redis:1 allocation score on redis-bundle-1: INFINITY +pcmk__native_allocate: redis:2 allocation score on redis-bundle-2: INFINITY +pcmk__native_allocate: stonith-fence_compute-fence-nova allocation score on compute-0: -INFINITY +pcmk__native_allocate: stonith-fence_compute-fence-nova allocation score on compute-1: -INFINITY +pcmk__native_allocate: stonith-fence_compute-fence-nova allocation score on controller-0: 0 +pcmk__native_allocate: stonith-fence_compute-fence-nova allocation score on controller-1: 0 +pcmk__native_allocate: stonith-fence_compute-fence-nova allocation score on controller-2: 0 +pcmk__native_allocate: stonith-fence_compute-fence-nova allocation score on database-0: 0 +pcmk__native_allocate: stonith-fence_compute-fence-nova allocation score on database-1: 0 +pcmk__native_allocate: stonith-fence_compute-fence-nova allocation score on database-2: 0 +pcmk__native_allocate: stonith-fence_compute-fence-nova allocation score on messaging-0: 0 +pcmk__native_allocate: stonith-fence_compute-fence-nova allocation score on messaging-1: 0 +pcmk__native_allocate: stonith-fence_compute-fence-nova allocation score on messaging-2: 0 +pcmk__native_allocate: stonith-fence_ipmilan-5254001f5f3c allocation score on compute-0: -INFINITY +pcmk__native_allocate: stonith-fence_ipmilan-5254001f5f3c allocation score on compute-1: -INFINITY +pcmk__native_allocate: stonith-fence_ipmilan-5254001f5f3c allocation score on controller-0: 0 +pcmk__native_allocate: stonith-fence_ipmilan-5254001f5f3c allocation score on controller-1: 0 +pcmk__native_allocate: stonith-fence_ipmilan-5254001f5f3c allocation score on controller-2: 0 +pcmk__native_allocate: stonith-fence_ipmilan-5254001f5f3c allocation score on database-0: -10000 +pcmk__native_allocate: stonith-fence_ipmilan-5254001f5f3c allocation score on database-1: 0 +pcmk__native_allocate: stonith-fence_ipmilan-5254001f5f3c allocation score on database-2: 0 +pcmk__native_allocate: stonith-fence_ipmilan-5254001f5f3c allocation score on messaging-0: 0 +pcmk__native_allocate: stonith-fence_ipmilan-5254001f5f3c allocation score on messaging-1: 0 +pcmk__native_allocate: stonith-fence_ipmilan-5254001f5f3c allocation score on messaging-2: 0 +pcmk__native_allocate: stonith-fence_ipmilan-52540033df9c allocation score on compute-0: -INFINITY +pcmk__native_allocate: stonith-fence_ipmilan-52540033df9c allocation score on compute-1: -INFINITY +pcmk__native_allocate: stonith-fence_ipmilan-52540033df9c allocation score on controller-0: 0 +pcmk__native_allocate: stonith-fence_ipmilan-52540033df9c allocation score on controller-1: 0 +pcmk__native_allocate: stonith-fence_ipmilan-52540033df9c allocation score on controller-2: 0 +pcmk__native_allocate: stonith-fence_ipmilan-52540033df9c allocation score on database-0: 0 +pcmk__native_allocate: stonith-fence_ipmilan-52540033df9c allocation score on database-1: 0 +pcmk__native_allocate: stonith-fence_ipmilan-52540033df9c allocation score on database-2: 0 +pcmk__native_allocate: stonith-fence_ipmilan-52540033df9c allocation score on messaging-0: 0 +pcmk__native_allocate: stonith-fence_ipmilan-52540033df9c allocation score on messaging-1: 0 +pcmk__native_allocate: stonith-fence_ipmilan-52540033df9c allocation score on messaging-2: -10000 +pcmk__native_allocate: stonith-fence_ipmilan-5254003f88b4 allocation score on compute-0: -INFINITY +pcmk__native_allocate: stonith-fence_ipmilan-5254003f88b4 allocation score on compute-1: -INFINITY +pcmk__native_allocate: stonith-fence_ipmilan-5254003f88b4 allocation score on controller-0: 0 +pcmk__native_allocate: stonith-fence_ipmilan-5254003f88b4 allocation score on controller-1: 0 +pcmk__native_allocate: stonith-fence_ipmilan-5254003f88b4 allocation score on controller-2: -10000 +pcmk__native_allocate: stonith-fence_ipmilan-5254003f88b4 allocation score on database-0: 0 +pcmk__native_allocate: stonith-fence_ipmilan-5254003f88b4 allocation score on database-1: 0 +pcmk__native_allocate: stonith-fence_ipmilan-5254003f88b4 allocation score on database-2: 0 +pcmk__native_allocate: stonith-fence_ipmilan-5254003f88b4 allocation score on messaging-0: 0 +pcmk__native_allocate: stonith-fence_ipmilan-5254003f88b4 allocation score on messaging-1: 0 +pcmk__native_allocate: stonith-fence_ipmilan-5254003f88b4 allocation score on messaging-2: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400642894 allocation score on compute-0: -INFINITY +pcmk__native_allocate: stonith-fence_ipmilan-525400642894 allocation score on compute-1: -INFINITY +pcmk__native_allocate: stonith-fence_ipmilan-525400642894 allocation score on controller-0: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400642894 allocation score on controller-1: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400642894 allocation score on controller-2: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400642894 allocation score on database-0: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400642894 allocation score on database-1: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400642894 allocation score on database-2: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400642894 allocation score on messaging-0: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400642894 allocation score on messaging-1: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400642894 allocation score on messaging-2: 0 +pcmk__native_allocate: stonith-fence_ipmilan-5254007b7920 allocation score on compute-0: -INFINITY +pcmk__native_allocate: stonith-fence_ipmilan-5254007b7920 allocation score on compute-1: -INFINITY +pcmk__native_allocate: stonith-fence_ipmilan-5254007b7920 allocation score on controller-0: 0 +pcmk__native_allocate: stonith-fence_ipmilan-5254007b7920 allocation score on controller-1: -10000 +pcmk__native_allocate: stonith-fence_ipmilan-5254007b7920 allocation score on controller-2: 0 +pcmk__native_allocate: stonith-fence_ipmilan-5254007b7920 allocation score on database-0: 0 +pcmk__native_allocate: stonith-fence_ipmilan-5254007b7920 allocation score on database-1: 0 +pcmk__native_allocate: stonith-fence_ipmilan-5254007b7920 allocation score on database-2: 0 +pcmk__native_allocate: stonith-fence_ipmilan-5254007b7920 allocation score on messaging-0: 0 +pcmk__native_allocate: stonith-fence_ipmilan-5254007b7920 allocation score on messaging-1: 0 +pcmk__native_allocate: stonith-fence_ipmilan-5254007b7920 allocation score on messaging-2: 0 +pcmk__native_allocate: stonith-fence_ipmilan-5254009cb549 allocation score on compute-0: -INFINITY +pcmk__native_allocate: stonith-fence_ipmilan-5254009cb549 allocation score on compute-1: -INFINITY +pcmk__native_allocate: stonith-fence_ipmilan-5254009cb549 allocation score on controller-0: 0 +pcmk__native_allocate: stonith-fence_ipmilan-5254009cb549 allocation score on controller-1: 0 +pcmk__native_allocate: stonith-fence_ipmilan-5254009cb549 allocation score on controller-2: 0 +pcmk__native_allocate: stonith-fence_ipmilan-5254009cb549 allocation score on database-0: 0 +pcmk__native_allocate: stonith-fence_ipmilan-5254009cb549 allocation score on database-1: 0 +pcmk__native_allocate: stonith-fence_ipmilan-5254009cb549 allocation score on database-2: 0 +pcmk__native_allocate: stonith-fence_ipmilan-5254009cb549 allocation score on messaging-0: 0 +pcmk__native_allocate: stonith-fence_ipmilan-5254009cb549 allocation score on messaging-1: -10000 +pcmk__native_allocate: stonith-fence_ipmilan-5254009cb549 allocation score on messaging-2: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400bb150b allocation score on compute-0: -INFINITY +pcmk__native_allocate: stonith-fence_ipmilan-525400bb150b allocation score on compute-1: -INFINITY +pcmk__native_allocate: stonith-fence_ipmilan-525400bb150b allocation score on controller-0: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400bb150b allocation score on controller-1: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400bb150b allocation score on controller-2: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400bb150b allocation score on database-0: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400bb150b allocation score on database-1: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400bb150b allocation score on database-2: -10000 +pcmk__native_allocate: stonith-fence_ipmilan-525400bb150b allocation score on messaging-0: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400bb150b allocation score on messaging-1: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400bb150b allocation score on messaging-2: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400d5382b allocation score on compute-0: -INFINITY +pcmk__native_allocate: stonith-fence_ipmilan-525400d5382b allocation score on compute-1: -INFINITY +pcmk__native_allocate: stonith-fence_ipmilan-525400d5382b allocation score on controller-0: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400d5382b allocation score on controller-1: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400d5382b allocation score on controller-2: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400d5382b allocation score on database-0: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400d5382b allocation score on database-1: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400d5382b allocation score on database-2: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400d5382b allocation score on messaging-0: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400d5382b allocation score on messaging-1: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400d5382b allocation score on messaging-2: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400dc0f81 allocation score on compute-0: -INFINITY +pcmk__native_allocate: stonith-fence_ipmilan-525400dc0f81 allocation score on compute-1: -INFINITY +pcmk__native_allocate: stonith-fence_ipmilan-525400dc0f81 allocation score on controller-0: -10000 +pcmk__native_allocate: stonith-fence_ipmilan-525400dc0f81 allocation score on controller-1: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400dc0f81 allocation score on controller-2: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400dc0f81 allocation score on database-0: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400dc0f81 allocation score on database-1: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400dc0f81 allocation score on database-2: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400dc0f81 allocation score on messaging-0: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400dc0f81 allocation score on messaging-1: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400dc0f81 allocation score on messaging-2: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400e10267 allocation score on compute-0: -INFINITY +pcmk__native_allocate: stonith-fence_ipmilan-525400e10267 allocation score on compute-1: -INFINITY +pcmk__native_allocate: stonith-fence_ipmilan-525400e10267 allocation score on controller-0: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400e10267 allocation score on controller-1: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400e10267 allocation score on controller-2: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400e10267 allocation score on database-0: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400e10267 allocation score on database-1: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400e10267 allocation score on database-2: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400e10267 allocation score on messaging-0: -10000 +pcmk__native_allocate: stonith-fence_ipmilan-525400e10267 allocation score on messaging-1: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400e10267 allocation score on messaging-2: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400ffc780 allocation score on compute-0: -INFINITY +pcmk__native_allocate: stonith-fence_ipmilan-525400ffc780 allocation score on compute-1: -INFINITY +pcmk__native_allocate: stonith-fence_ipmilan-525400ffc780 allocation score on controller-0: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400ffc780 allocation score on controller-1: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400ffc780 allocation score on controller-2: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400ffc780 allocation score on database-0: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400ffc780 allocation score on database-1: -10000 +pcmk__native_allocate: stonith-fence_ipmilan-525400ffc780 allocation score on database-2: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400ffc780 allocation score on messaging-0: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400ffc780 allocation score on messaging-1: 0 +pcmk__native_allocate: stonith-fence_ipmilan-525400ffc780 allocation score on messaging-2: 0 +redis:0 promotion score on redis-bundle-0: 1 +redis:1 promotion score on redis-bundle-1: 1 +redis:2 promotion score on redis-bundle-2: 1 diff --git a/cts/scheduler/remote-connection-shutdown.summary b/cts/scheduler/remote-connection-shutdown.summary new file mode 100644 index 0000000000..8756c33333 --- /dev/null +++ b/cts/scheduler/remote-connection-shutdown.summary @@ -0,0 +1,186 @@ +Using the original execution date of: 2020-11-17 07:03:16Z + +Current cluster status: +Online: [ controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ] +RemoteOnline: [ compute-0 compute-1 ] +GuestOnline: [ galera-bundle-0:galera-bundle-podman-0 galera-bundle-1:galera-bundle-podman-1 galera-bundle-2:galera-bundle-podman-2 ovn-dbs-bundle-0:ovn-dbs-bundle-podman-0 ovn-dbs-bundle-1:ovn-dbs-bundle-podman-1 ovn-dbs-bundle-2:ovn-dbs-bundle-podman-2 rabbitmq-bundle-0:rabbitmq-bundle-podman-0 rabbitmq-bundle-1:rabbitmq-bundle-podman-1 rabbitmq-bundle-2:rabbitmq-bundle-podman-2 redis-bundle-0:redis-bundle-podman-0 redis-bundle-1:redis-bundle-podman-1 redis-bundle-2:redis-bundle-podman-2 ] + + compute-0 (ocf::pacemaker:remote): Started controller-0 + compute-1 (ocf::pacemaker:remote): Started controller-1 + Container bundle set: galera-bundle [cluster.common.tag/mariadb:pcmklatest] + galera-bundle-0 (ocf::heartbeat:galera): Master database-0 + galera-bundle-1 (ocf::heartbeat:galera): Master database-1 + galera-bundle-2 (ocf::heartbeat:galera): Master database-2 + Container bundle set: rabbitmq-bundle [cluster.common.tag/rabbitmq:pcmklatest] + rabbitmq-bundle-0 (ocf::heartbeat:rabbitmq-cluster): Started messaging-0 + rabbitmq-bundle-1 (ocf::heartbeat:rabbitmq-cluster): Started messaging-1 + rabbitmq-bundle-2 (ocf::heartbeat:rabbitmq-cluster): Started messaging-2 + Container bundle set: redis-bundle [cluster.common.tag/redis:pcmklatest] + redis-bundle-0 (ocf::heartbeat:redis): Master controller-2 + redis-bundle-1 (ocf::heartbeat:redis): Slave controller-0 + redis-bundle-2 (ocf::heartbeat:redis): Slave controller-1 + ip-192.168.24.150 (ocf::heartbeat:IPaddr2): Started controller-2 + ip-10.0.0.150 (ocf::heartbeat:IPaddr2): Started controller-0 + ip-172.17.1.151 (ocf::heartbeat:IPaddr2): Started controller-1 + ip-172.17.1.150 (ocf::heartbeat:IPaddr2): Started controller-2 + ip-172.17.3.150 (ocf::heartbeat:IPaddr2): Started controller-0 + ip-172.17.4.150 (ocf::heartbeat:IPaddr2): Started controller-1 + Container bundle set: haproxy-bundle [cluster.common.tag/haproxy:pcmklatest] + haproxy-bundle-podman-0 (ocf::heartbeat:podman): Started controller-2 + haproxy-bundle-podman-1 (ocf::heartbeat:podman): Started controller-0 + haproxy-bundle-podman-2 (ocf::heartbeat:podman): Started controller-1 + Container bundle set: ovn-dbs-bundle [cluster.common.tag/ovn-northd:pcmklatest] + ovn-dbs-bundle-0 (ocf::ovn:ovndb-servers): Master controller-2 + ovn-dbs-bundle-1 (ocf::ovn:ovndb-servers): Slave controller-0 + ovn-dbs-bundle-2 (ocf::ovn:ovndb-servers): Slave controller-1 + ip-172.17.1.57 (ocf::heartbeat:IPaddr2): Started controller-2 + stonith-fence_compute-fence-nova (stonith:fence_compute): Stopped + Clone Set: compute-unfence-trigger-clone [compute-unfence-trigger] + Started: [ compute-0 compute-1 ] + Stopped: [ controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ] + nova-evacuate (ocf::openstack:NovaEvacuate): Started database-0 + stonith-fence_ipmilan-52540033df9c (stonith:fence_ipmilan): Started database-1 + stonith-fence_ipmilan-5254001f5f3c (stonith:fence_ipmilan): Started database-2 + stonith-fence_ipmilan-5254003f88b4 (stonith:fence_ipmilan): Started messaging-0 + stonith-fence_ipmilan-5254007b7920 (stonith:fence_ipmilan): Started messaging-1 + stonith-fence_ipmilan-525400642894 (stonith:fence_ipmilan): Started messaging-2 + stonith-fence_ipmilan-525400d5382b (stonith:fence_ipmilan): Started database-2 + stonith-fence_ipmilan-525400bb150b (stonith:fence_ipmilan): Started messaging-0 + stonith-fence_ipmilan-525400ffc780 (stonith:fence_ipmilan): Started messaging-2 + stonith-fence_ipmilan-5254009cb549 (stonith:fence_ipmilan): Started database-0 + stonith-fence_ipmilan-525400e10267 (stonith:fence_ipmilan): Started messaging-1 + stonith-fence_ipmilan-525400dc0f81 (stonith:fence_ipmilan): Started database-1 + Container bundle: openstack-cinder-volume [cluster.common.tag/cinder-volume:pcmklatest] + openstack-cinder-volume-podman-0 (ocf::heartbeat:podman): Started controller-0 + +Only 'private' parameters to stonith-fence_compute-fence-nova for unfencing compute-0 changed +Only 'private' parameters to stonith-fence_compute-fence-nova for unfencing compute-1 changed +Only 'private' parameters to stonith-fence_ipmilan-5254009cb549_start_0 on database-0 changed: 0:0;322:1375:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to stonith-fence_ipmilan-5254009cb549_monitor_60000 on database-0 changed: 0:0;323:1375:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to nova-evacuate_start_0 on database-0 changed: 0:0;258:1420:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to nova-evacuate_monitor_10000 on database-0 changed: 0:0;259:1420:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to stonith-fence_ipmilan-525400dc0f81_start_0 on database-1 changed: 0:0;330:1380:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to stonith-fence_ipmilan-525400dc0f81_monitor_60000 on database-1 changed: 0:0;331:1380:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to stonith-fence_ipmilan-52540033df9c_start_0 on database-1 changed: 0:0;261:1420:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to stonith-fence_ipmilan-52540033df9c_monitor_60000 on database-1 changed: 0:0;263:1420:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to stonith-fence_ipmilan-525400d5382b_start_0 on database-2 changed: 0:0;319:1374:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to stonith-fence_ipmilan-525400d5382b_monitor_60000 on database-2 changed: 0:0;320:1374:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to stonith-fence_ipmilan-5254001f5f3c_start_0 on database-2 changed: 0:0;263:1422:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to stonith-fence_ipmilan-5254001f5f3c_monitor_60000 on database-2 changed: 0:0;265:1422:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to stonith-fence_ipmilan-525400e10267_start_0 on messaging-1 changed: 0:0;320:1317:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to stonith-fence_ipmilan-525400e10267_monitor_60000 on messaging-1 changed: 0:0;326:1318:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to stonith-fence_ipmilan-5254007b7920_start_0 on messaging-1 changed: 0:0;271:1422:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to stonith-fence_ipmilan-5254007b7920_monitor_60000 on messaging-1 changed: 0:0;273:1422:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to stonith-fence_ipmilan-525400bb150b_start_0 on messaging-0 changed: 0:0;324:70:0:40f880e4-b328-4380-9703-47856390a1e0 +Only 'private' parameters to stonith-fence_ipmilan-525400bb150b_monitor_60000 on messaging-0 changed: 0:0;325:70:0:40f880e4-b328-4380-9703-47856390a1e0 +Only 'private' parameters to stonith-fence_ipmilan-5254003f88b4_start_0 on messaging-0 changed: 0:0;267:1422:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to stonith-fence_ipmilan-5254003f88b4_monitor_60000 on messaging-0 changed: 0:0;269:1422:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to stonith-fence_ipmilan-525400642894_start_0 on messaging-2 changed: 0:0;272:1424:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to stonith-fence_ipmilan-525400642894_monitor_60000 on messaging-2 changed: 0:0;274:1424:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to stonith-fence_ipmilan-525400ffc780_start_0 on messaging-2 changed: 0:0;321:49:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Only 'private' parameters to stonith-fence_ipmilan-525400ffc780_monitor_60000 on messaging-2 changed: 0:0;323:50:0:51c2f6fa-d5ae-4ae7-b8df-b121f8ea9269 +Transition Summary: + * Stop compute-0 ( controller-0 ) due to node availability + * Start stonith-fence_compute-fence-nova ( database-0 ) + * Stop compute-unfence-trigger:0 ( compute-0 ) due to node availability + * Move nova-evacuate ( database-0 -> database-1 ) + * Move stonith-fence_ipmilan-52540033df9c ( database-1 -> database-2 ) + * Move stonith-fence_ipmilan-5254001f5f3c ( database-2 -> messaging-0 ) + * Move stonith-fence_ipmilan-5254003f88b4 ( messaging-0 -> messaging-1 ) + * Move stonith-fence_ipmilan-5254007b7920 ( messaging-1 -> messaging-2 ) + * Move stonith-fence_ipmilan-525400ffc780 ( messaging-2 -> database-0 ) + * Move stonith-fence_ipmilan-5254009cb549 ( database-0 -> database-1 ) + +Executing cluster transition: + * Resource action: stonith-fence_compute-fence-nova start on database-0 + * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on messaging-2 + * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on messaging-0 + * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on messaging-1 + * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on controller-2 + * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on controller-1 + * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on controller-0 + * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on database-2 + * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on database-1 + * Cluster action: clear_failcount for stonith-fence_compute-fence-nova on database-0 + * Pseudo action: compute-unfence-trigger-clone_stop_0 + * Resource action: nova-evacuate stop on database-0 + * Resource action: stonith-fence_ipmilan-52540033df9c stop on database-1 + * Resource action: stonith-fence_ipmilan-5254001f5f3c stop on database-2 + * Resource action: stonith-fence_ipmilan-5254003f88b4 stop on messaging-0 + * Resource action: stonith-fence_ipmilan-5254007b7920 stop on messaging-1 + * Resource action: stonith-fence_ipmilan-525400ffc780 stop on messaging-2 + * Resource action: stonith-fence_ipmilan-5254009cb549 stop on database-0 + * Resource action: stonith-fence_compute-fence-nova monitor=60000 on database-0 + * Resource action: compute-unfence-trigger stop on compute-0 + * Pseudo action: compute-unfence-trigger-clone_stopped_0 + * Resource action: nova-evacuate start on database-1 + * Resource action: stonith-fence_ipmilan-52540033df9c start on database-2 + * Resource action: stonith-fence_ipmilan-5254001f5f3c start on messaging-0 + * Resource action: stonith-fence_ipmilan-5254003f88b4 start on messaging-1 + * Resource action: stonith-fence_ipmilan-5254007b7920 start on messaging-2 + * Resource action: stonith-fence_ipmilan-525400ffc780 start on database-0 + * Resource action: stonith-fence_ipmilan-5254009cb549 start on database-1 + * Resource action: compute-0 stop on controller-0 + * Resource action: nova-evacuate monitor=10000 on database-1 + * Resource action: stonith-fence_ipmilan-52540033df9c monitor=60000 on database-2 + * Resource action: stonith-fence_ipmilan-5254001f5f3c monitor=60000 on messaging-0 + * Resource action: stonith-fence_ipmilan-5254003f88b4 monitor=60000 on messaging-1 + * Resource action: stonith-fence_ipmilan-5254007b7920 monitor=60000 on messaging-2 + * Resource action: stonith-fence_ipmilan-525400ffc780 monitor=60000 on database-0 + * Resource action: stonith-fence_ipmilan-5254009cb549 monitor=60000 on database-1 +Using the original execution date of: 2020-11-17 07:03:16Z + +Revised cluster status: +Online: [ controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ] +RemoteOnline: [ compute-1 ] +RemoteOFFLINE: [ compute-0 ] +GuestOnline: [ galera-bundle-0:galera-bundle-podman-0 galera-bundle-1:galera-bundle-podman-1 galera-bundle-2:galera-bundle-podman-2 ovn-dbs-bundle-0:ovn-dbs-bundle-podman-0 ovn-dbs-bundle-1:ovn-dbs-bundle-podman-1 ovn-dbs-bundle-2:ovn-dbs-bundle-podman-2 rabbitmq-bundle-0:rabbitmq-bundle-podman-0 rabbitmq-bundle-1:rabbitmq-bundle-podman-1 rabbitmq-bundle-2:rabbitmq-bundle-podman-2 redis-bundle-0:redis-bundle-podman-0 redis-bundle-1:redis-bundle-podman-1 redis-bundle-2:redis-bundle-podman-2 ] + + compute-0 (ocf::pacemaker:remote): Stopped + compute-1 (ocf::pacemaker:remote): Started controller-1 + Container bundle set: galera-bundle [cluster.common.tag/mariadb:pcmklatest] + galera-bundle-0 (ocf::heartbeat:galera): Master database-0 + galera-bundle-1 (ocf::heartbeat:galera): Master database-1 + galera-bundle-2 (ocf::heartbeat:galera): Master database-2 + Container bundle set: rabbitmq-bundle [cluster.common.tag/rabbitmq:pcmklatest] + rabbitmq-bundle-0 (ocf::heartbeat:rabbitmq-cluster): Started messaging-0 + rabbitmq-bundle-1 (ocf::heartbeat:rabbitmq-cluster): Started messaging-1 + rabbitmq-bundle-2 (ocf::heartbeat:rabbitmq-cluster): Started messaging-2 + Container bundle set: redis-bundle [cluster.common.tag/redis:pcmklatest] + redis-bundle-0 (ocf::heartbeat:redis): Master controller-2 + redis-bundle-1 (ocf::heartbeat:redis): Slave controller-0 + redis-bundle-2 (ocf::heartbeat:redis): Slave controller-1 + ip-192.168.24.150 (ocf::heartbeat:IPaddr2): Started controller-2 + ip-10.0.0.150 (ocf::heartbeat:IPaddr2): Started controller-0 + ip-172.17.1.151 (ocf::heartbeat:IPaddr2): Started controller-1 + ip-172.17.1.150 (ocf::heartbeat:IPaddr2): Started controller-2 + ip-172.17.3.150 (ocf::heartbeat:IPaddr2): Started controller-0 + ip-172.17.4.150 (ocf::heartbeat:IPaddr2): Started controller-1 + Container bundle set: haproxy-bundle [cluster.common.tag/haproxy:pcmklatest] + haproxy-bundle-podman-0 (ocf::heartbeat:podman): Started controller-2 + haproxy-bundle-podman-1 (ocf::heartbeat:podman): Started controller-0 + haproxy-bundle-podman-2 (ocf::heartbeat:podman): Started controller-1 + Container bundle set: ovn-dbs-bundle [cluster.common.tag/ovn-northd:pcmklatest] + ovn-dbs-bundle-0 (ocf::ovn:ovndb-servers): Master controller-2 + ovn-dbs-bundle-1 (ocf::ovn:ovndb-servers): Slave controller-0 + ovn-dbs-bundle-2 (ocf::ovn:ovndb-servers): Slave controller-1 + ip-172.17.1.57 (ocf::heartbeat:IPaddr2): Started controller-2 + stonith-fence_compute-fence-nova (stonith:fence_compute): Started database-0 + Clone Set: compute-unfence-trigger-clone [compute-unfence-trigger] + Started: [ compute-1 ] + Stopped: [ compute-0 controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ] + nova-evacuate (ocf::openstack:NovaEvacuate): Started database-1 + stonith-fence_ipmilan-52540033df9c (stonith:fence_ipmilan): Started database-2 + stonith-fence_ipmilan-5254001f5f3c (stonith:fence_ipmilan): Started messaging-0 + stonith-fence_ipmilan-5254003f88b4 (stonith:fence_ipmilan): Started messaging-1 + stonith-fence_ipmilan-5254007b7920 (stonith:fence_ipmilan): Started messaging-2 + stonith-fence_ipmilan-525400642894 (stonith:fence_ipmilan): Started messaging-2 + stonith-fence_ipmilan-525400d5382b (stonith:fence_ipmilan): Started database-2 + stonith-fence_ipmilan-525400bb150b (stonith:fence_ipmilan): Started messaging-0 + stonith-fence_ipmilan-525400ffc780 (stonith:fence_ipmilan): Started database-0 + stonith-fence_ipmilan-5254009cb549 (stonith:fence_ipmilan): Started database-1 + stonith-fence_ipmilan-525400e10267 (stonith:fence_ipmilan): Started messaging-1 + stonith-fence_ipmilan-525400dc0f81 (stonith:fence_ipmilan): Started database-1 + Container bundle: openstack-cinder-volume [cluster.common.tag/cinder-volume:pcmklatest] + openstack-cinder-volume-podman-0 (ocf::heartbeat:podman): Started controller-0 + diff --git a/cts/scheduler/remote-connection-shutdown.xml b/cts/scheduler/remote-connection-shutdown.xml new file mode 100644 index 0000000000..0e4f995ddd --- /dev/null +++ b/cts/scheduler/remote-connection-shutdown.xml @@ -0,0 +1,2109 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/include/crm/pengine/complex.h b/include/crm/pengine/complex.h index 1d010f4fd5..d5e1a398c3 100644 --- a/include/crm/pengine/complex.h +++ b/include/crm/pengine/complex.h @@ -1,42 +1,45 @@ /* - * Copyright 2004-2020 the Pacemaker project contributors + * Copyright 2004-2021 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #ifndef PENGINE_COMPLEX__H # define PENGINE_COMPLEX__H #ifdef __cplusplus extern "C" { #endif #include // gboolean, GHashTable #include // xmlNode #include // pe_node_t, pe_resource_t, etc. extern resource_object_functions_t resource_class_functions[]; GHashTable *pe_rsc_params(pe_resource_t *rsc, pe_node_t *node, pe_working_set_t *data_set); void get_meta_attributes(GHashTable * meta_hash, pe_resource_t *rsc, pe_node_t *node, pe_working_set_t *data_set); void get_rsc_attributes(GHashTable *meta_hash, pe_resource_t *rsc, pe_node_t *node, pe_working_set_t *data_set); #if ENABLE_VERSIONED_ATTRS void pe_get_versioned_attributes(xmlNode *meta_hash, pe_resource_t *rsc, pe_node_t *node, pe_working_set_t *data_set); #endif gboolean is_parent(pe_resource_t *child, pe_resource_t *rsc); pe_resource_t *uber_parent(pe_resource_t *rsc); +void pe__set_next_role(pe_resource_t *rsc, enum rsc_role_e role, + const char *why); + #ifdef __cplusplus } #endif #endif diff --git a/lib/pacemaker/pcmk_sched_group.c b/lib/pacemaker/pcmk_sched_group.c index 439ed91c3f..c9026e4508 100644 --- a/lib/pacemaker/pcmk_sched_group.c +++ b/lib/pacemaker/pcmk_sched_group.c @@ -1,530 +1,531 @@ /* * Copyright 2004-2021 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #define VARIANT_GROUP 1 #include pe_node_t * pcmk__group_allocate(pe_resource_t *rsc, pe_node_t *prefer, pe_working_set_t *data_set) { pe_node_t *node = NULL; pe_node_t *group_node = NULL; GListPtr gIter = NULL; group_variant_data_t *group_data = NULL; get_group_variant_data(group_data, rsc); if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) { return rsc->allocated_to; } if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) { pe_rsc_debug(rsc, "Dependency loop detected involving %s", rsc->id); return NULL; } if (group_data->first_child == NULL) { // Nothing to allocate pe__clear_resource_flags(rsc, pe_rsc_provisional); return NULL; } pe__set_resource_flags(rsc, pe_rsc_allocating); rsc->role = group_data->first_child->role; group_data->first_child->rsc_cons = g_list_concat(group_data->first_child->rsc_cons, rsc->rsc_cons); rsc->rsc_cons = NULL; group_data->last_child->rsc_cons_lhs = g_list_concat(group_data->last_child->rsc_cons_lhs, rsc->rsc_cons_lhs); rsc->rsc_cons_lhs = NULL; pe__show_node_weights(!show_scores, rsc, __func__, rsc->allowed_nodes); gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; pe_rsc_trace(rsc, "Allocating group %s member %s", rsc->id, child_rsc->id); node = child_rsc->cmds->allocate(child_rsc, prefer, data_set); if (group_node == NULL) { group_node = node; } } - rsc->next_role = group_data->first_child->next_role; + pe__set_next_role(rsc, group_data->first_child->next_role, + "first group member"); pe__clear_resource_flags(rsc, pe_rsc_allocating|pe_rsc_provisional); if (group_data->colocated) { return group_node; } return NULL; } void group_update_pseudo_status(pe_resource_t * parent, pe_resource_t * child); void group_create_actions(pe_resource_t * rsc, pe_working_set_t * data_set) { pe_action_t *op = NULL; const char *value = NULL; GListPtr gIter = rsc->children; pe_rsc_trace(rsc, "Creating actions for %s", rsc->id); for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; child_rsc->cmds->create_actions(child_rsc, data_set); group_update_pseudo_status(rsc, child_rsc); } op = start_action(rsc, NULL, TRUE /* !group_data->child_starting */ ); pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable); op = custom_action(rsc, started_key(rsc), RSC_STARTED, NULL, TRUE /* !group_data->child_starting */ , TRUE, data_set); pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable); op = stop_action(rsc, NULL, TRUE /* !group_data->child_stopping */ ); pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable); op = custom_action(rsc, stopped_key(rsc), RSC_STOPPED, NULL, TRUE /* !group_data->child_stopping */ , TRUE, data_set); pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable); value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_PROMOTABLE); if (crm_is_true(value)) { op = custom_action(rsc, demote_key(rsc), RSC_DEMOTE, NULL, TRUE, TRUE, data_set); pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable); op = custom_action(rsc, demoted_key(rsc), RSC_DEMOTED, NULL, TRUE, TRUE, data_set); pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable); op = custom_action(rsc, promote_key(rsc), RSC_PROMOTE, NULL, TRUE, TRUE, data_set); pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable); op = custom_action(rsc, promoted_key(rsc), RSC_PROMOTED, NULL, TRUE, TRUE, data_set); pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable); } } void group_update_pseudo_status(pe_resource_t * parent, pe_resource_t * child) { GListPtr gIter = child->actions; group_variant_data_t *group_data = NULL; get_group_variant_data(group_data, parent); if (group_data->ordered == FALSE) { /* If this group is not ordered, then leave the meta-actions as optional */ return; } if (group_data->child_stopping && group_data->child_starting) { return; } for (; gIter != NULL; gIter = gIter->next) { pe_action_t *action = (pe_action_t *) gIter->data; if (pcmk_is_set(action->flags, pe_action_optional)) { continue; } if (pcmk__str_eq(RSC_STOP, action->task, pcmk__str_casei) && pcmk_is_set(action->flags, pe_action_runnable)) { group_data->child_stopping = TRUE; pe_rsc_trace(action->rsc, "Based on %s the group is stopping", action->uuid); } else if (pcmk__str_eq(RSC_START, action->task, pcmk__str_casei) && pcmk_is_set(action->flags, pe_action_runnable)) { group_data->child_starting = TRUE; pe_rsc_trace(action->rsc, "Based on %s the group is starting", action->uuid); } } } void group_internal_constraints(pe_resource_t * rsc, pe_working_set_t * data_set) { GListPtr gIter = rsc->children; pe_resource_t *last_rsc = NULL; pe_resource_t *last_active = NULL; pe_resource_t *top = uber_parent(rsc); group_variant_data_t *group_data = NULL; get_group_variant_data(group_data, rsc); new_rsc_order(rsc, RSC_STOPPED, rsc, RSC_START, pe_order_optional, data_set); new_rsc_order(rsc, RSC_START, rsc, RSC_STARTED, pe_order_runnable_left, data_set); new_rsc_order(rsc, RSC_STOP, rsc, RSC_STOPPED, pe_order_runnable_left, data_set); for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; int stop = pe_order_none; int stopped = pe_order_implies_then_printed; int start = pe_order_implies_then | pe_order_runnable_left; int started = pe_order_runnable_left | pe_order_implies_then | pe_order_implies_then_printed; child_rsc->cmds->internal_constraints(child_rsc, data_set); if (last_rsc == NULL) { if (group_data->ordered) { pe__set_order_flags(stop, pe_order_optional); stopped = pe_order_implies_then; } } else if (group_data->colocated) { pcmk__new_colocation("group:internal_colocation", NULL, INFINITY, child_rsc, last_rsc, NULL, NULL, pcmk_is_set(child_rsc->flags, pe_rsc_critical), data_set); } if (pcmk_is_set(top->flags, pe_rsc_promotable)) { new_rsc_order(rsc, RSC_DEMOTE, child_rsc, RSC_DEMOTE, stop | pe_order_implies_first_printed, data_set); new_rsc_order(child_rsc, RSC_DEMOTE, rsc, RSC_DEMOTED, stopped, data_set); new_rsc_order(child_rsc, RSC_PROMOTE, rsc, RSC_PROMOTED, started, data_set); new_rsc_order(rsc, RSC_PROMOTE, child_rsc, RSC_PROMOTE, pe_order_implies_first_printed, data_set); } order_start_start(rsc, child_rsc, pe_order_implies_first_printed); order_stop_stop(rsc, child_rsc, stop | pe_order_implies_first_printed); new_rsc_order(child_rsc, RSC_STOP, rsc, RSC_STOPPED, stopped, data_set); new_rsc_order(child_rsc, RSC_START, rsc, RSC_STARTED, started, data_set); if (group_data->ordered == FALSE) { order_start_start(rsc, child_rsc, start | pe_order_implies_first_printed); if (pcmk_is_set(top->flags, pe_rsc_promotable)) { new_rsc_order(rsc, RSC_PROMOTE, child_rsc, RSC_PROMOTE, start | pe_order_implies_first_printed, data_set); } } else if (last_rsc != NULL) { child_rsc->restart_type = pe_restart_restart; order_start_start(last_rsc, child_rsc, start); order_stop_stop(child_rsc, last_rsc, pe_order_optional | pe_order_restart); if (pcmk_is_set(top->flags, pe_rsc_promotable)) { new_rsc_order(last_rsc, RSC_PROMOTE, child_rsc, RSC_PROMOTE, start, data_set); new_rsc_order(child_rsc, RSC_DEMOTE, last_rsc, RSC_DEMOTE, pe_order_optional, data_set); } } else { /* If anyone in the group is starting, then * pe_order_implies_then will cause _everyone_ in the group * to be sent a start action * But this is safe since starting something that is already * started is required to be "safe" */ int flags = pe_order_none; order_start_start(rsc, child_rsc, flags); if (pcmk_is_set(top->flags, pe_rsc_promotable)) { new_rsc_order(rsc, RSC_PROMOTE, child_rsc, RSC_PROMOTE, flags, data_set); } } /* Look for partially active groups * Make sure they still shut down in sequence */ if (child_rsc->running_on) { if (group_data->ordered && last_rsc && last_rsc->running_on == NULL && last_active && last_active->running_on) { order_stop_stop(child_rsc, last_active, pe_order_optional); } last_active = child_rsc; } last_rsc = child_rsc; } if (group_data->ordered && last_rsc != NULL) { int stop_stop_flags = pe_order_implies_then; int stop_stopped_flags = pe_order_optional; order_stop_stop(rsc, last_rsc, stop_stop_flags); new_rsc_order(last_rsc, RSC_STOP, rsc, RSC_STOPPED, stop_stopped_flags, data_set); if (pcmk_is_set(top->flags, pe_rsc_promotable)) { new_rsc_order(rsc, RSC_DEMOTE, last_rsc, RSC_DEMOTE, stop_stop_flags, data_set); new_rsc_order(last_rsc, RSC_DEMOTE, rsc, RSC_DEMOTED, stop_stopped_flags, data_set); } } } void group_rsc_colocation_lh(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, pcmk__colocation_t *constraint, pe_working_set_t *data_set) { GListPtr gIter = NULL; group_variant_data_t *group_data = NULL; if (rsc_lh == NULL) { pe_err("rsc_lh was NULL for %s", constraint->id); return; } else if (rsc_rh == NULL) { pe_err("rsc_rh was NULL for %s", constraint->id); return; } gIter = rsc_lh->children; pe_rsc_trace(rsc_lh, "Processing constraints from %s", rsc_lh->id); get_group_variant_data(group_data, rsc_lh); if (group_data->colocated) { group_data->first_child->cmds->rsc_colocation_lh(group_data->first_child, rsc_rh, constraint, data_set); return; } else if (constraint->score >= INFINITY) { pcmk__config_err("%s: Cannot perform mandatory colocation " "between non-colocated group and %s", rsc_lh->id, rsc_rh->id); return; } for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; child_rsc->cmds->rsc_colocation_lh(child_rsc, rsc_rh, constraint, data_set); } } void group_rsc_colocation_rh(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, pcmk__colocation_t *constraint, pe_working_set_t *data_set) { GListPtr gIter = rsc_rh->children; group_variant_data_t *group_data = NULL; get_group_variant_data(group_data, rsc_rh); CRM_CHECK(rsc_lh->variant == pe_native, return); pe_rsc_trace(rsc_rh, "Processing RH %s of constraint %s (LH is %s)", rsc_rh->id, constraint->id, rsc_lh->id); if (pcmk_is_set(rsc_rh->flags, pe_rsc_provisional)) { return; } else if (group_data->colocated && group_data->first_child) { if (constraint->score >= INFINITY) { /* Ensure RHS is _fully_ up before can start LHS */ group_data->last_child->cmds->rsc_colocation_rh(rsc_lh, group_data->last_child, constraint, data_set); } else { /* A partially active RHS is fine */ group_data->first_child->cmds->rsc_colocation_rh(rsc_lh, group_data->first_child, constraint, data_set); } return; } else if (constraint->score >= INFINITY) { pcmk__config_err("%s: Cannot perform mandatory colocation with" " non-colocated group %s", rsc_lh->id, rsc_rh->id); return; } for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; child_rsc->cmds->rsc_colocation_rh(rsc_lh, child_rsc, constraint, data_set); } } enum pe_action_flags group_action_flags(pe_action_t * action, pe_node_t * node) { GListPtr gIter = NULL; enum pe_action_flags flags = (pe_action_optional | pe_action_runnable | pe_action_pseudo); for (gIter = action->rsc->children; gIter != NULL; gIter = gIter->next) { pe_resource_t *child = (pe_resource_t *) gIter->data; enum action_tasks task = get_complex_task(child, action->task, TRUE); const char *task_s = task2text(task); pe_action_t *child_action = find_first_action(child->actions, NULL, task_s, node); if (child_action) { enum pe_action_flags child_flags = child->cmds->action_flags(child_action, node); if (pcmk_is_set(flags, pe_action_optional) && !pcmk_is_set(child_flags, pe_action_optional)) { pe_rsc_trace(action->rsc, "%s is mandatory because of %s", action->uuid, child_action->uuid); pe__clear_raw_action_flags(flags, "group action", pe_action_optional); pe__clear_action_flags(action, pe_action_optional); } if (!pcmk__str_eq(task_s, action->task, pcmk__str_casei) && pcmk_is_set(flags, pe_action_runnable) && !pcmk_is_set(child_flags, pe_action_runnable)) { pe_rsc_trace(action->rsc, "%s is not runnable because of %s", action->uuid, child_action->uuid); pe__clear_raw_action_flags(flags, "group action", pe_action_runnable); pe__clear_action_flags(action, pe_action_runnable); } } else if (task != stop_rsc && task != action_demote) { pe_rsc_trace(action->rsc, "%s is not runnable because of %s (not found in %s)", action->uuid, task_s, child->id); pe__clear_raw_action_flags(flags, "group action", pe_action_runnable); } } return flags; } enum pe_graph_flags group_update_actions(pe_action_t *first, pe_action_t *then, pe_node_t *node, enum pe_action_flags flags, enum pe_action_flags filter, enum pe_ordering type, pe_working_set_t *data_set) { GListPtr gIter = then->rsc->children; enum pe_graph_flags changed = pe_graph_none; CRM_ASSERT(then->rsc != NULL); changed |= native_update_actions(first, then, node, flags, filter, type, data_set); for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child = (pe_resource_t *) gIter->data; pe_action_t *child_action = find_first_action(child->actions, NULL, then->task, node); if (child_action) { changed |= child->cmds->update_actions(first, child_action, node, flags, filter, type, data_set); } } return changed; } void group_rsc_location(pe_resource_t *rsc, pe__location_t *constraint) { GListPtr gIter = rsc->children; GListPtr saved = constraint->node_list_rh; GListPtr zero = pcmk__copy_node_list(constraint->node_list_rh, true); gboolean reset_scores = TRUE; group_variant_data_t *group_data = NULL; get_group_variant_data(group_data, rsc); pe_rsc_debug(rsc, "Processing rsc_location %s for %s", constraint->id, rsc->id); native_rsc_location(rsc, constraint); for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; child_rsc->cmds->rsc_location(child_rsc, constraint); if (group_data->colocated && reset_scores) { reset_scores = FALSE; constraint->node_list_rh = zero; } } constraint->node_list_rh = saved; g_list_free_full(zero, free); } void group_expand(pe_resource_t * rsc, pe_working_set_t * data_set) { CRM_CHECK(rsc != NULL, return); pe_rsc_trace(rsc, "Processing actions from %s", rsc->id); native_expand(rsc, data_set); for (GListPtr gIter = rsc->children; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; child_rsc->cmds->expand(child_rsc, data_set); } } GHashTable * pcmk__group_merge_weights(pe_resource_t *rsc, const char *rhs, GHashTable *nodes, const char *attr, float factor, uint32_t flags) { GListPtr gIter = rsc->rsc_cons_lhs; group_variant_data_t *group_data = NULL; get_group_variant_data(group_data, rsc); if (pcmk_is_set(rsc->flags, pe_rsc_merging)) { pe_rsc_info(rsc, "Breaking dependency loop with %s at %s", rsc->id, rhs); return nodes; } pe__set_resource_flags(rsc, pe_rsc_merging); nodes = group_data->first_child->cmds->merge_weights(group_data->first_child, rhs, nodes, attr, factor, flags); for (; gIter != NULL; gIter = gIter->next) { pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data; nodes = pcmk__native_merge_weights(constraint->rsc_lh, rsc->id, nodes, constraint->node_attribute, constraint->score / (float) INFINITY, flags); } pe__clear_resource_flags(rsc, pe_rsc_merging); return nodes; } void group_append_meta(pe_resource_t * rsc, xmlNode * xml) { } diff --git a/lib/pacemaker/pcmk_sched_native.c b/lib/pacemaker/pcmk_sched_native.c index 0e50edab55..6548b204fb 100644 --- a/lib/pacemaker/pcmk_sched_native.c +++ b/lib/pacemaker/pcmk_sched_native.c @@ -1,3482 +1,3484 @@ /* * Copyright 2004-2021 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include // The controller removes the resource from the CIB, making this redundant // #define DELETE_THEN_REFRESH 1 #define INFINITY_HACK (INFINITY * -100) #define VARIANT_NATIVE 1 #include static void Recurring(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node, pe_working_set_t *data_set); static void RecurringOp(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node, xmlNode *operation, pe_working_set_t *data_set); static void Recurring_Stopped(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node, pe_working_set_t *data_set); static void RecurringOp_Stopped(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node, xmlNode *operation, pe_working_set_t *data_set); void ReloadRsc(pe_resource_t * rsc, pe_node_t *node, pe_working_set_t * data_set); gboolean DeleteRsc(pe_resource_t * rsc, pe_node_t * node, gboolean optional, pe_working_set_t * data_set); gboolean StopRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set); gboolean StartRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set); gboolean DemoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set); gboolean PromoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set); gboolean RoleError(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set); gboolean NullOp(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set); /* This array says what the *next* role should be when transitioning from one * role to another. For example going from Stopped to Master, the next role is * RSC_ROLE_SLAVE, because the resource must be started before being promoted. * The current state then becomes Started, which is fed into this array again, * giving a next role of RSC_ROLE_MASTER. */ static enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = { /* Current state Next state*/ /* Unknown Stopped Started Slave Master */ /* Unknown */ { RSC_ROLE_UNKNOWN, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, }, /* Stopped */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STARTED, RSC_ROLE_SLAVE, RSC_ROLE_SLAVE, }, /* Started */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STARTED, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, }, /* Slave */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, }, /* Master */ { RSC_ROLE_STOPPED, RSC_ROLE_SLAVE, RSC_ROLE_SLAVE, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, }, }; typedef gboolean (*rsc_transition_fn)(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set); // This array picks the function needed to transition from one role to another static rsc_transition_fn rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = { /* Current state Next state */ /* Unknown Stopped Started Slave Master */ /* Unknown */ { RoleError, StopRsc, RoleError, RoleError, RoleError, }, /* Stopped */ { RoleError, NullOp, StartRsc, StartRsc, RoleError, }, /* Started */ { RoleError, StopRsc, NullOp, NullOp, PromoteRsc, }, /* Slave */ { RoleError, StopRsc, StopRsc, NullOp, PromoteRsc, }, /* Master */ { RoleError, DemoteRsc, DemoteRsc, DemoteRsc, NullOp , }, }; #define clear_node_weights_flags(nw_flags, nw_rsc, flags_to_clear) do { \ flags = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE, \ "Node weight", (nw_rsc)->id, (flags), \ (flags_to_clear), #flags_to_clear); \ } while (0) static gboolean native_choose_node(pe_resource_t * rsc, pe_node_t * prefer, pe_working_set_t * data_set) { GListPtr nodes = NULL; pe_node_t *chosen = NULL; pe_node_t *best = NULL; int multiple = 1; int length = 0; gboolean result = FALSE; process_utilization(rsc, &prefer, data_set); if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) { return rsc->allocated_to ? TRUE : FALSE; } // Sort allowed nodes by weight if (rsc->allowed_nodes) { length = g_hash_table_size(rsc->allowed_nodes); } if (length > 0) { nodes = g_hash_table_get_values(rsc->allowed_nodes); nodes = sort_nodes_by_weight(nodes, pe__current_node(rsc), data_set); // First node in sorted list has the best score best = g_list_nth_data(nodes, 0); } if (prefer && nodes) { chosen = g_hash_table_lookup(rsc->allowed_nodes, prefer->details->id); if (chosen == NULL) { pe_rsc_trace(rsc, "Preferred node %s for %s was unknown", prefer->details->uname, rsc->id); /* Favor the preferred node as long as its weight is at least as good as * the best allowed node's. * * An alternative would be to favor the preferred node even if the best * node is better, when the best node's weight is less than INFINITY. */ } else if ((chosen->weight < 0) || (chosen->weight < best->weight)) { pe_rsc_trace(rsc, "Preferred node %s for %s was unsuitable", chosen->details->uname, rsc->id); chosen = NULL; } else if (!can_run_resources(chosen)) { pe_rsc_trace(rsc, "Preferred node %s for %s was unavailable", chosen->details->uname, rsc->id); chosen = NULL; } else { pe_rsc_trace(rsc, "Chose preferred node %s for %s (ignoring %d candidates)", chosen->details->uname, rsc->id, length); } } if ((chosen == NULL) && nodes) { /* Either there is no preferred node, or the preferred node is not * available, but there are other nodes allowed to run the resource. */ chosen = best; pe_rsc_trace(rsc, "Chose node %s for %s from %d candidates", chosen ? chosen->details->uname : "", rsc->id, length); if (!pe_rsc_is_unique_clone(rsc->parent) && chosen && (chosen->weight > 0) && can_run_resources(chosen)) { /* If the resource is already running on a node, prefer that node if * it is just as good as the chosen node. * * We don't do this for unique clone instances, because * distribute_children() has already assigned instances to their * running nodes when appropriate, and if we get here, we don't want * remaining unallocated instances to prefer a node that's already * running another instance. */ pe_node_t *running = pe__current_node(rsc); if (running && (can_run_resources(running) == FALSE)) { pe_rsc_trace(rsc, "Current node for %s (%s) can't run resources", rsc->id, running->details->uname); } else if (running) { for (GList *iter = nodes->next; iter; iter = iter->next) { pe_node_t *tmp = (pe_node_t *) iter->data; if (tmp->weight != chosen->weight) { // The nodes are sorted by weight, so no more are equal break; } if (tmp->details == running->details) { // Scores are equal, so prefer the current node chosen = tmp; } multiple++; } } } } if (multiple > 1) { static char score[33]; int log_level = (chosen->weight >= INFINITY)? LOG_WARNING : LOG_INFO; score2char_stack(chosen->weight, score, sizeof(score)); do_crm_log(log_level, "Chose node %s for %s from %d nodes with score %s", chosen->details->uname, rsc->id, multiple, score); } result = native_assign_node(rsc, nodes, chosen, FALSE); g_list_free(nodes); return result; } /*! * \internal * \brief Find score of highest-scored node that matches colocation attribute * * \param[in] rsc Resource whose allowed nodes should be searched * \param[in] attr Colocation attribute name (must not be NULL) * \param[in] value Colocation attribute value to require */ static int best_node_score_matching_attr(const pe_resource_t *rsc, const char *attr, const char *value) { GHashTableIter iter; pe_node_t *node = NULL; int best_score = -INFINITY; const char *best_node = NULL; // Find best allowed node with matching attribute g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) { if ((node->weight > best_score) && can_run_resources(node) && pcmk__str_eq(value, pe_node_attribute_raw(node, attr), pcmk__str_casei)) { best_score = node->weight; best_node = node->details->uname; } } if (!pcmk__str_eq(attr, CRM_ATTR_UNAME, pcmk__str_casei)) { if (best_node == NULL) { crm_info("No allowed node for %s matches node attribute %s=%s", rsc->id, attr, value); } else { crm_info("Allowed node %s for %s had best score (%d) " "of those matching node attribute %s=%s", best_node, rsc->id, best_score, attr, value); } } return best_score; } /*! * \internal * \brief Add resource's colocation matches to current node allocation scores * * For each node in a given table, if any of a given resource's allowed nodes * have a matching value for the colocation attribute, add the highest of those * nodes' scores to the node's score. * * \param[in,out] nodes Hash table of nodes with allocation scores so far * \param[in] rsc Resource whose allowed nodes should be compared * \param[in] attr Colocation attribute that must match (NULL for default) * \param[in] factor Factor by which to multiply scores being added * \param[in] only_positive Whether to add only positive scores */ static void add_node_scores_matching_attr(GHashTable *nodes, const pe_resource_t *rsc, const char *attr, float factor, bool only_positive) { GHashTableIter iter; pe_node_t *node = NULL; if (attr == NULL) { attr = CRM_ATTR_UNAME; } // Iterate through each node g_hash_table_iter_init(&iter, nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { float weight_f = 0; int weight = 0; int score = 0; int new_score = 0; score = best_node_score_matching_attr(rsc, attr, pe_node_attribute_raw(node, attr)); if ((factor < 0) && (score < 0)) { /* Negative preference for a node with a negative score * should not become a positive preference. * * @TODO Consider filtering only if weight is -INFINITY */ crm_trace("%s: Filtering %d + %f * %d (double negative disallowed)", node->details->uname, node->weight, factor, score); continue; } if (node->weight == INFINITY_HACK) { crm_trace("%s: Filtering %d + %f * %d (node was marked unusable)", node->details->uname, node->weight, factor, score); continue; } weight_f = factor * score; // Round the number; see http://c-faq.com/fp/round.html weight = (int) ((weight_f < 0)? (weight_f - 0.5) : (weight_f + 0.5)); /* Small factors can obliterate the small scores that are often actually * used in configurations. If the score and factor are nonzero, ensure * that the result is nonzero as well. */ if ((weight == 0) && (score != 0)) { if (factor > 0.0) { weight = 1; } else if (factor < 0.0) { weight = -1; } } new_score = pe__add_scores(weight, node->weight); if (only_positive && (new_score < 0) && (node->weight > 0)) { crm_trace("%s: Filtering %d + %f * %d = %d " "(negative disallowed, marking node unusable)", node->details->uname, node->weight, factor, score, new_score); node->weight = INFINITY_HACK; continue; } if (only_positive && (new_score < 0) && (node->weight == 0)) { crm_trace("%s: Filtering %d + %f * %d = %d (negative disallowed)", node->details->uname, node->weight, factor, score, new_score); continue; } crm_trace("%s: %d + %f * %d = %d", node->details->uname, node->weight, factor, score, new_score); node->weight = new_score; } } static inline bool is_nonempty_group(pe_resource_t *rsc) { return rsc && (rsc->variant == pe_group) && (rsc->children != NULL); } /*! * \internal * \brief Incorporate colocation constraint scores into node weights * * \param[in,out] rsc Resource being placed * \param[in] rhs ID of 'with' resource * \param[in,out] nodes Nodes, with scores as of this point * \param[in] attr Colocation attribute (ID by default) * \param[in] factor Incorporate scores multiplied by this factor * \param[in] flags Bitmask of enum pe_weights values * * \return Nodes, with scores modified by this constraint * \note This function assumes ownership of the nodes argument. The caller * should free the returned copy rather than the original. */ GHashTable * pcmk__native_merge_weights(pe_resource_t *rsc, const char *rhs, GHashTable *nodes, const char *attr, float factor, uint32_t flags) { GHashTable *work = NULL; // Avoid infinite recursion if (pcmk_is_set(rsc->flags, pe_rsc_merging)) { pe_rsc_info(rsc, "%s: Breaking dependency loop at %s", rhs, rsc->id); return nodes; } pe__set_resource_flags(rsc, pe_rsc_merging); if (pcmk_is_set(flags, pe_weights_init)) { if (is_nonempty_group(rsc)) { GList *last = g_list_last(rsc->children); pe_resource_t *last_rsc = last->data; pe_rsc_trace(rsc, "%s: Merging scores from group %s " "using last member %s (at %.6f)", rhs, rsc->id, last_rsc->id, factor); work = pcmk__native_merge_weights(last_rsc, rhs, NULL, attr, factor, flags); } else { work = pcmk__copy_node_table(rsc->allowed_nodes); } clear_node_weights_flags(flags, rsc, pe_weights_init); } else if (is_nonempty_group(rsc)) { /* The first member of the group will recursively incorporate any * constraints involving other members (including the group internal * colocation). * * @TODO The indirect colocations from the dependent group's other * members will be incorporated at full strength rather than by * factor, so the group's combined stickiness will be treated as * (factor + (#members - 1)) * stickiness. It is questionable what * the right approach should be. */ pe_rsc_trace(rsc, "%s: Merging scores from first member of group %s " "(at %.6f)", rhs, rsc->id, factor); work = pcmk__copy_node_table(nodes); work = pcmk__native_merge_weights(rsc->children->data, rhs, work, attr, factor, flags); } else { pe_rsc_trace(rsc, "%s: Merging scores from %s (at %.6f)", rhs, rsc->id, factor); work = pcmk__copy_node_table(nodes); add_node_scores_matching_attr(work, rsc, attr, factor, pcmk_is_set(flags, pe_weights_positive)); } if (can_run_any(work)) { GListPtr gIter = NULL; int multiplier = (factor < 0)? -1 : 1; if (pcmk_is_set(flags, pe_weights_forward)) { gIter = rsc->rsc_cons; pe_rsc_trace(rsc, "Checking additional %d optional '%s with' constraints", g_list_length(gIter), rsc->id); } else if (is_nonempty_group(rsc)) { pe_resource_t *last_rsc = g_list_last(rsc->children)->data; gIter = last_rsc->rsc_cons_lhs; pe_rsc_trace(rsc, "Checking additional %d optional 'with group %s' " "constraints using last member %s", g_list_length(gIter), rsc->id, last_rsc->id); } else { gIter = rsc->rsc_cons_lhs; pe_rsc_trace(rsc, "Checking additional %d optional 'with %s' constraints", g_list_length(gIter), rsc->id); } for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *other = NULL; pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data; if (pcmk_is_set(flags, pe_weights_forward)) { other = constraint->rsc_rh; } else if (!pcmk__colocation_has_influence(constraint, NULL)) { continue; } else { other = constraint->rsc_lh; } pe_rsc_trace(rsc, "Optionally merging score of '%s' constraint (%s with %s)", constraint->id, constraint->rsc_lh->id, constraint->rsc_rh->id); work = pcmk__native_merge_weights(other, rhs, work, constraint->node_attribute, multiplier * constraint->score / (float) INFINITY, flags|pe_weights_rollback); pe__show_node_weights(true, NULL, rhs, work); } } else if (pcmk_is_set(flags, pe_weights_rollback)) { pe_rsc_info(rsc, "%s: Rolling back optional scores from %s", rhs, rsc->id); g_hash_table_destroy(work); pe__clear_resource_flags(rsc, pe_rsc_merging); return nodes; } if (pcmk_is_set(flags, pe_weights_positive)) { pe_node_t *node = NULL; GHashTableIter iter; g_hash_table_iter_init(&iter, work); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { if (node->weight == INFINITY_HACK) { node->weight = 1; } } } if (nodes) { g_hash_table_destroy(nodes); } pe__clear_resource_flags(rsc, pe_rsc_merging); return work; } static inline bool node_has_been_unfenced(pe_node_t *node) { const char *unfenced = pe_node_attribute_raw(node, CRM_ATTR_UNFENCED); return !pcmk__str_eq(unfenced, "0", pcmk__str_null_matches); } static inline bool is_unfence_device(pe_resource_t *rsc, pe_working_set_t *data_set) { return pcmk_is_set(rsc->flags, pe_rsc_fence_device) && pcmk_is_set(data_set->flags, pe_flag_enable_unfencing); } pe_node_t * pcmk__native_allocate(pe_resource_t *rsc, pe_node_t *prefer, pe_working_set_t *data_set) { GListPtr gIter = NULL; if (rsc->parent && !pcmk_is_set(rsc->parent->flags, pe_rsc_allocating)) { /* never allocate children on their own */ pe_rsc_debug(rsc, "Escalating allocation of %s to its parent: %s", rsc->id, rsc->parent->id); rsc->parent->cmds->allocate(rsc->parent, prefer, data_set); } if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) { return rsc->allocated_to; } if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) { pe_rsc_debug(rsc, "Dependency loop detected involving %s", rsc->id); return NULL; } pe__set_resource_flags(rsc, pe_rsc_allocating); pe__show_node_weights(true, rsc, "Pre-alloc", rsc->allowed_nodes); for (gIter = rsc->rsc_cons; gIter != NULL; gIter = gIter->next) { pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data; GHashTable *archive = NULL; pe_resource_t *rsc_rh = constraint->rsc_rh; if (constraint->role_lh >= RSC_ROLE_MASTER || (constraint->score < 0 && constraint->score > -INFINITY)) { archive = pcmk__copy_node_table(rsc->allowed_nodes); } pe_rsc_trace(rsc, "%s: Allocating %s first (constraint=%s score=%d role=%s)", rsc->id, rsc_rh->id, constraint->id, constraint->score, role2text(constraint->role_lh)); rsc_rh->cmds->allocate(rsc_rh, NULL, data_set); rsc->cmds->rsc_colocation_lh(rsc, rsc_rh, constraint, data_set); if (archive && can_run_any(rsc->allowed_nodes) == FALSE) { pe_rsc_info(rsc, "%s: Rolling back scores from %s", rsc->id, rsc_rh->id); g_hash_table_destroy(rsc->allowed_nodes); rsc->allowed_nodes = archive; archive = NULL; } if (archive) { g_hash_table_destroy(archive); } } pe__show_node_weights(true, rsc, "Post-coloc", rsc->allowed_nodes); for (gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) { pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data; if (!pcmk__colocation_has_influence(constraint, NULL)) { continue; } pe_rsc_trace(rsc, "Merging score of '%s' constraint (%s with %s)", constraint->id, constraint->rsc_lh->id, constraint->rsc_rh->id); rsc->allowed_nodes = constraint->rsc_lh->cmds->merge_weights(constraint->rsc_lh, rsc->id, rsc->allowed_nodes, constraint->node_attribute, (float)constraint->score / INFINITY, pe_weights_rollback); } if (rsc->next_role == RSC_ROLE_STOPPED) { pe_rsc_trace(rsc, "Making sure %s doesn't get allocated", rsc->id); /* make sure it doesn't come up again */ resource_location(rsc, NULL, -INFINITY, XML_RSC_ATTR_TARGET_ROLE, data_set); } else if(rsc->next_role > rsc->role && !pcmk_is_set(data_set->flags, pe_flag_have_quorum) && data_set->no_quorum_policy == no_quorum_freeze) { crm_notice("Resource %s cannot be elevated from %s to %s: no-quorum-policy=freeze", rsc->id, role2text(rsc->role), role2text(rsc->next_role)); - rsc->next_role = rsc->role; + pe__set_next_role(rsc, rsc->role, "no-quorum-policy=freeze"); } pe__show_node_weights(!show_scores, rsc, __func__, rsc->allowed_nodes); if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled) && !pcmk_is_set(data_set->flags, pe_flag_have_stonith_resource)) { pe__clear_resource_flags(rsc, pe_rsc_managed); } if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) { const char *reason = NULL; pe_node_t *assign_to = NULL; - rsc->next_role = rsc->role; + pe__set_next_role(rsc, rsc->role, "unmanaged"); assign_to = pe__current_node(rsc); if (assign_to == NULL) { reason = "inactive"; } else if (rsc->role == RSC_ROLE_MASTER) { reason = "master"; } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) { reason = "failed"; } else { reason = "active"; } pe_rsc_info(rsc, "Unmanaged resource %s allocated to %s: %s", rsc->id, (assign_to? assign_to->details->uname : "no node"), reason); native_assign_node(rsc, NULL, assign_to, TRUE); } else if (pcmk_is_set(data_set->flags, pe_flag_stop_everything)) { pe_rsc_debug(rsc, "Forcing %s to stop", rsc->id); native_assign_node(rsc, NULL, NULL, TRUE); } else if (pcmk_is_set(rsc->flags, pe_rsc_provisional) && native_choose_node(rsc, prefer, data_set)) { pe_rsc_trace(rsc, "Allocated resource %s to %s", rsc->id, rsc->allocated_to->details->uname); } else if (rsc->allocated_to == NULL) { if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)) { pe_rsc_info(rsc, "Resource %s cannot run anywhere", rsc->id); } else if (rsc->running_on != NULL) { pe_rsc_info(rsc, "Stopping orphan resource %s", rsc->id); } } else { pe_rsc_debug(rsc, "Pre-Allocated resource %s to %s", rsc->id, rsc->allocated_to->details->uname); } pe__clear_resource_flags(rsc, pe_rsc_allocating); if (rsc->is_remote_node) { pe_node_t *remote_node = pe_find_node(data_set->nodes, rsc->id); CRM_ASSERT(remote_node != NULL); if (rsc->allocated_to && rsc->next_role != RSC_ROLE_STOPPED) { crm_trace("Setting Pacemaker Remote node %s to ONLINE", remote_node->details->id); remote_node->details->online = TRUE; /* We shouldn't consider an unseen remote-node unclean if we are going * to try and connect to it. Otherwise we get an unnecessary fence */ if (remote_node->details->unseen == TRUE) { remote_node->details->unclean = FALSE; } } else { crm_trace("Setting Pacemaker Remote node %s to SHUTDOWN (next role %s, %sallocated)", remote_node->details->id, role2text(rsc->next_role), (rsc->allocated_to? "" : "un")); remote_node->details->shutdown = TRUE; } } return rsc->allocated_to; } static gboolean is_op_dup(pe_resource_t *rsc, const char *name, guint interval_ms) { gboolean dup = FALSE; const char *id = NULL; const char *value = NULL; xmlNode *operation = NULL; guint interval2_ms = 0; CRM_ASSERT(rsc); for (operation = pcmk__xe_first_child(rsc->ops_xml); operation != NULL; operation = pcmk__xe_next(operation)) { if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) { value = crm_element_value(operation, "name"); if (!pcmk__str_eq(value, name, pcmk__str_casei)) { continue; } value = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); interval2_ms = crm_parse_interval_spec(value); if (interval_ms != interval2_ms) { continue; } if (id == NULL) { id = ID(operation); } else { pcmk__config_err("Operation %s is duplicate of %s (do not use " "same name and interval combination more " "than once per resource)", ID(operation), id); dup = TRUE; } } } return dup; } static bool op_cannot_recur(const char *name) { return pcmk__strcase_any_of(name, RSC_STOP, RSC_START, RSC_DEMOTE, RSC_PROMOTE, NULL); } static void RecurringOp(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node, xmlNode * operation, pe_working_set_t * data_set) { char *key = NULL; const char *name = NULL; const char *role = NULL; const char *interval_spec = NULL; const char *node_uname = node? node->details->uname : "n/a"; guint interval_ms = 0; pe_action_t *mon = NULL; gboolean is_optional = TRUE; GListPtr possible_matches = NULL; CRM_ASSERT(rsc); /* Only process for the operations without role="Stopped" */ role = crm_element_value(operation, "role"); if (role && text2role(role) == RSC_ROLE_STOPPED) { return; } interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); interval_ms = crm_parse_interval_spec(interval_spec); if (interval_ms == 0) { return; } name = crm_element_value(operation, "name"); if (is_op_dup(rsc, name, interval_ms)) { crm_trace("Not creating duplicate recurring action %s for %dms %s", ID(operation), interval_ms, name); return; } if (op_cannot_recur(name)) { pcmk__config_err("Ignoring %s because action '%s' cannot be recurring", ID(operation), name); return; } key = pcmk__op_key(rsc->id, name, interval_ms); if (find_rsc_op_entry(rsc, key) == NULL) { crm_trace("Not creating recurring action %s for disabled resource %s", ID(operation), rsc->id); free(key); return; } pe_rsc_trace(rsc, "Creating recurring action %s for %s in role %s on %s", ID(operation), rsc->id, role2text(rsc->next_role), node_uname); if (start != NULL) { pe_rsc_trace(rsc, "Marking %s %s due to %s", key, pcmk_is_set(start->flags, pe_action_optional)? "optional" : "mandatory", start->uuid); is_optional = (rsc->cmds->action_flags(start, NULL) & pe_action_optional); } else { pe_rsc_trace(rsc, "Marking %s optional", key); is_optional = TRUE; } /* start a monitor for an already active resource */ possible_matches = find_actions_exact(rsc->actions, key, node); if (possible_matches == NULL) { is_optional = FALSE; pe_rsc_trace(rsc, "Marking %s mandatory: not active", key); } else { GListPtr gIter = NULL; for (gIter = possible_matches; gIter != NULL; gIter = gIter->next) { pe_action_t *op = (pe_action_t *) gIter->data; if (pcmk_is_set(op->flags, pe_action_reschedule)) { is_optional = FALSE; break; } } g_list_free(possible_matches); } if ((rsc->next_role == RSC_ROLE_MASTER && role == NULL) || (role != NULL && text2role(role) != rsc->next_role)) { int log_level = LOG_TRACE; const char *result = "Ignoring"; if (is_optional) { char *after_key = NULL; pe_action_t *cancel_op = NULL; // It's running, so cancel it log_level = LOG_INFO; result = "Cancelling"; cancel_op = pe_cancel_op(rsc, name, interval_ms, node, data_set); switch (rsc->role) { case RSC_ROLE_SLAVE: case RSC_ROLE_STARTED: if (rsc->next_role == RSC_ROLE_MASTER) { after_key = promote_key(rsc); } else if (rsc->next_role == RSC_ROLE_STOPPED) { after_key = stop_key(rsc); } break; case RSC_ROLE_MASTER: after_key = demote_key(rsc); break; default: break; } if (after_key) { custom_action_order(rsc, NULL, cancel_op, rsc, after_key, NULL, pe_order_runnable_left, data_set); } } do_crm_log(log_level, "%s action %s (%s vs. %s)", result, key, role ? role : role2text(RSC_ROLE_SLAVE), role2text(rsc->next_role)); free(key); return; } mon = custom_action(rsc, key, name, node, is_optional, TRUE, data_set); key = mon->uuid; if (is_optional) { pe_rsc_trace(rsc, "%s\t %s (optional)", node_uname, mon->uuid); } if ((start == NULL) || !pcmk_is_set(start->flags, pe_action_runnable)) { pe_rsc_debug(rsc, "%s\t %s (cancelled : start un-runnable)", node_uname, mon->uuid); update_action_flags(mon, pe_action_runnable | pe_action_clear, __func__, __LINE__); } else if (node == NULL || node->details->online == FALSE || node->details->unclean) { pe_rsc_debug(rsc, "%s\t %s (cancelled : no node available)", node_uname, mon->uuid); update_action_flags(mon, pe_action_runnable | pe_action_clear, __func__, __LINE__); } else if (!pcmk_is_set(mon->flags, pe_action_optional)) { pe_rsc_info(rsc, " Start recurring %s (%us) for %s on %s", mon->task, interval_ms / 1000, rsc->id, node_uname); } if (rsc->next_role == RSC_ROLE_MASTER) { char *running_master = crm_itoa(PCMK_OCF_RUNNING_MASTER); add_hash_param(mon->meta, XML_ATTR_TE_TARGET_RC, running_master); free(running_master); } if ((node == NULL) || pcmk_is_set(rsc->flags, pe_rsc_managed)) { custom_action_order(rsc, start_key(rsc), NULL, NULL, strdup(key), mon, pe_order_implies_then | pe_order_runnable_left, data_set); custom_action_order(rsc, reload_key(rsc), NULL, NULL, strdup(key), mon, pe_order_implies_then | pe_order_runnable_left, data_set); if (rsc->next_role == RSC_ROLE_MASTER) { custom_action_order(rsc, promote_key(rsc), NULL, rsc, NULL, mon, pe_order_optional | pe_order_runnable_left, data_set); } else if (rsc->role == RSC_ROLE_MASTER) { custom_action_order(rsc, demote_key(rsc), NULL, rsc, NULL, mon, pe_order_optional | pe_order_runnable_left, data_set); } } } static void Recurring(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node, pe_working_set_t * data_set) { if (!pcmk_is_set(rsc->flags, pe_rsc_maintenance) && (node == NULL || node->details->maintenance == FALSE)) { xmlNode *operation = NULL; for (operation = pcmk__xe_first_child(rsc->ops_xml); operation != NULL; operation = pcmk__xe_next(operation)) { if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) { RecurringOp(rsc, start, node, operation, data_set); } } } } static void RecurringOp_Stopped(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node, xmlNode * operation, pe_working_set_t * data_set) { char *key = NULL; const char *name = NULL; const char *role = NULL; const char *interval_spec = NULL; const char *node_uname = node? node->details->uname : "n/a"; guint interval_ms = 0; GListPtr possible_matches = NULL; GListPtr gIter = NULL; /* Only process for the operations with role="Stopped" */ role = crm_element_value(operation, "role"); if (role == NULL || text2role(role) != RSC_ROLE_STOPPED) { return; } interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); interval_ms = crm_parse_interval_spec(interval_spec); if (interval_ms == 0) { return; } name = crm_element_value(operation, "name"); if (is_op_dup(rsc, name, interval_ms)) { crm_trace("Not creating duplicate recurring action %s for %dms %s", ID(operation), interval_ms, name); return; } if (op_cannot_recur(name)) { pcmk__config_err("Ignoring %s because action '%s' cannot be recurring", ID(operation), name); return; } key = pcmk__op_key(rsc->id, name, interval_ms); if (find_rsc_op_entry(rsc, key) == NULL) { crm_trace("Not creating recurring action %s for disabled resource %s", ID(operation), rsc->id); free(key); return; } // @TODO add support if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) { crm_notice("Ignoring %s (recurring monitors for Stopped role are " "not supported for anonymous clones)", ID(operation)); return; } pe_rsc_trace(rsc, "Creating recurring action %s for %s in role %s on nodes where it should not be running", ID(operation), rsc->id, role2text(rsc->next_role)); /* if the monitor exists on the node where the resource will be running, cancel it */ if (node != NULL) { possible_matches = find_actions_exact(rsc->actions, key, node); if (possible_matches) { pe_action_t *cancel_op = NULL; g_list_free(possible_matches); cancel_op = pe_cancel_op(rsc, name, interval_ms, node, data_set); if (rsc->next_role == RSC_ROLE_STARTED || rsc->next_role == RSC_ROLE_SLAVE) { /* rsc->role == RSC_ROLE_STOPPED: cancel the monitor before start */ /* rsc->role == RSC_ROLE_STARTED: for a migration, cancel the monitor on the target node before start */ custom_action_order(rsc, NULL, cancel_op, rsc, start_key(rsc), NULL, pe_order_runnable_left, data_set); } pe_rsc_info(rsc, "Cancel action %s (%s vs. %s) on %s", key, role, role2text(rsc->next_role), node_uname); } } for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { pe_node_t *stop_node = (pe_node_t *) gIter->data; const char *stop_node_uname = stop_node->details->uname; gboolean is_optional = TRUE; gboolean probe_is_optional = TRUE; gboolean stop_is_optional = TRUE; pe_action_t *stopped_mon = NULL; char *rc_inactive = NULL; GListPtr probe_complete_ops = NULL; GListPtr stop_ops = NULL; GListPtr local_gIter = NULL; if (node && pcmk__str_eq(stop_node_uname, node_uname, pcmk__str_casei)) { continue; } pe_rsc_trace(rsc, "Creating recurring action %s for %s on %s", ID(operation), rsc->id, crm_str(stop_node_uname)); /* start a monitor for an already stopped resource */ possible_matches = find_actions_exact(rsc->actions, key, stop_node); if (possible_matches == NULL) { pe_rsc_trace(rsc, "Marking %s mandatory on %s: not active", key, crm_str(stop_node_uname)); is_optional = FALSE; } else { pe_rsc_trace(rsc, "Marking %s optional on %s: already active", key, crm_str(stop_node_uname)); is_optional = TRUE; g_list_free(possible_matches); } stopped_mon = custom_action(rsc, strdup(key), name, stop_node, is_optional, TRUE, data_set); rc_inactive = crm_itoa(PCMK_OCF_NOT_RUNNING); add_hash_param(stopped_mon->meta, XML_ATTR_TE_TARGET_RC, rc_inactive); free(rc_inactive); if (pcmk_is_set(rsc->flags, pe_rsc_managed)) { GList *probes = pe__resource_actions(rsc, stop_node, RSC_STATUS, FALSE); GListPtr pIter = NULL; for (pIter = probes; pIter != NULL; pIter = pIter->next) { pe_action_t *probe = (pe_action_t *) pIter->data; order_actions(probe, stopped_mon, pe_order_runnable_left); crm_trace("%s then %s on %s", probe->uuid, stopped_mon->uuid, stop_node->details->uname); } g_list_free(probes); } if (probe_complete_ops) { g_list_free(probe_complete_ops); } stop_ops = pe__resource_actions(rsc, stop_node, RSC_STOP, TRUE); for (local_gIter = stop_ops; local_gIter != NULL; local_gIter = local_gIter->next) { pe_action_t *stop = (pe_action_t *) local_gIter->data; if (!pcmk_is_set(stop->flags, pe_action_optional)) { stop_is_optional = FALSE; } if (!pcmk_is_set(stop->flags, pe_action_runnable)) { crm_debug("%s\t %s (cancelled : stop un-runnable)", crm_str(stop_node_uname), stopped_mon->uuid); update_action_flags(stopped_mon, pe_action_runnable | pe_action_clear, __func__, __LINE__); } if (pcmk_is_set(rsc->flags, pe_rsc_managed)) { custom_action_order(rsc, stop_key(rsc), stop, NULL, strdup(key), stopped_mon, pe_order_implies_then | pe_order_runnable_left, data_set); } } if (stop_ops) { g_list_free(stop_ops); } if (is_optional == FALSE && probe_is_optional && stop_is_optional && !pcmk_is_set(rsc->flags, pe_rsc_managed)) { pe_rsc_trace(rsc, "Marking %s optional on %s due to unmanaged", key, crm_str(stop_node_uname)); update_action_flags(stopped_mon, pe_action_optional, __func__, __LINE__); } if (pcmk_is_set(stopped_mon->flags, pe_action_optional)) { pe_rsc_trace(rsc, "%s\t %s (optional)", crm_str(stop_node_uname), stopped_mon->uuid); } if (stop_node->details->online == FALSE || stop_node->details->unclean) { pe_rsc_debug(rsc, "%s\t %s (cancelled : no node available)", crm_str(stop_node_uname), stopped_mon->uuid); update_action_flags(stopped_mon, pe_action_runnable | pe_action_clear, __func__, __LINE__); } if (pcmk_is_set(stopped_mon->flags, pe_action_runnable) && !pcmk_is_set(stopped_mon->flags, pe_action_optional)) { crm_notice(" Start recurring %s (%us) for %s on %s", stopped_mon->task, interval_ms / 1000, rsc->id, crm_str(stop_node_uname)); } } free(key); } static void Recurring_Stopped(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node, pe_working_set_t * data_set) { if (!pcmk_is_set(rsc->flags, pe_rsc_maintenance) && (node == NULL || node->details->maintenance == FALSE)) { xmlNode *operation = NULL; for (operation = pcmk__xe_first_child(rsc->ops_xml); operation != NULL; operation = pcmk__xe_next(operation)) { if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) { RecurringOp_Stopped(rsc, start, node, operation, data_set); } } } } static void handle_migration_actions(pe_resource_t * rsc, pe_node_t *current, pe_node_t *chosen, pe_working_set_t * data_set) { pe_action_t *migrate_to = NULL; pe_action_t *migrate_from = NULL; pe_action_t *start = NULL; pe_action_t *stop = NULL; gboolean partial = rsc->partial_migration_target ? TRUE : FALSE; pe_rsc_trace(rsc, "Processing migration actions %s moving from %s to %s . partial migration = %s", rsc->id, current->details->id, chosen->details->id, partial ? "TRUE" : "FALSE"); start = start_action(rsc, chosen, TRUE); stop = stop_action(rsc, current, TRUE); if (partial == FALSE) { migrate_to = custom_action(rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0), RSC_MIGRATE, current, TRUE, TRUE, data_set); } migrate_from = custom_action(rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0), RSC_MIGRATED, chosen, TRUE, TRUE, data_set); if ((migrate_to && migrate_from) || (migrate_from && partial)) { pe__set_action_flags(start, pe_action_migrate_runnable); pe__set_action_flags(stop, pe_action_migrate_runnable); // This is easier than trying to delete it from the graph update_action_flags(start, pe_action_pseudo, __func__, __LINE__); /* order probes before migrations */ if (partial) { pe__set_action_flags(migrate_from, pe_action_migrate_runnable); migrate_from->needs = start->needs; custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0), NULL, rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0), NULL, pe_order_optional, data_set); } else { pe__set_action_flags(migrate_from, pe_action_migrate_runnable); pe__set_action_flags(migrate_to, pe_action_migrate_runnable); migrate_to->needs = start->needs; custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0), NULL, rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0), NULL, pe_order_optional, data_set); custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0), NULL, rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0), NULL, pe_order_optional|pe_order_implies_first_migratable, data_set); } custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0), NULL, rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL, pe_order_optional|pe_order_implies_first_migratable, data_set); custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0), NULL, rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL, pe_order_optional|pe_order_implies_first_migratable|pe_order_pseudo_left, data_set); } if (migrate_to) { add_hash_param(migrate_to->meta, XML_LRM_ATTR_MIGRATE_SOURCE, current->details->uname); add_hash_param(migrate_to->meta, XML_LRM_ATTR_MIGRATE_TARGET, chosen->details->uname); /* Pacemaker Remote connections don't require pending to be recorded in * the CIB. We can reduce CIB writes by not setting PENDING for them. */ if (rsc->is_remote_node == FALSE) { /* migrate_to takes place on the source node, but can * have an effect on the target node depending on how * the agent is written. Because of this, we have to maintain * a record that the migrate_to occurred, in case the source node * loses membership while the migrate_to action is still in-flight. */ add_hash_param(migrate_to->meta, XML_OP_ATTR_PENDING, "true"); } } if (migrate_from) { add_hash_param(migrate_from->meta, XML_LRM_ATTR_MIGRATE_SOURCE, current->details->uname); add_hash_param(migrate_from->meta, XML_LRM_ATTR_MIGRATE_TARGET, chosen->details->uname); } } void native_create_actions(pe_resource_t * rsc, pe_working_set_t * data_set) { pe_action_t *start = NULL; pe_node_t *chosen = NULL; pe_node_t *current = NULL; gboolean need_stop = FALSE; bool need_promote = FALSE; gboolean is_moving = FALSE; gboolean allow_migrate = pcmk_is_set(rsc->flags, pe_rsc_allow_migrate)? TRUE : FALSE; GListPtr gIter = NULL; unsigned int num_all_active = 0; unsigned int num_clean_active = 0; bool multiply_active = FALSE; enum rsc_role_e role = RSC_ROLE_UNKNOWN; enum rsc_role_e next_role = RSC_ROLE_UNKNOWN; CRM_ASSERT(rsc); chosen = rsc->allocated_to; next_role = rsc->next_role; if (next_role == RSC_ROLE_UNKNOWN) { - rsc->next_role = (chosen == NULL)? RSC_ROLE_STOPPED : RSC_ROLE_STARTED; + pe__set_next_role(rsc, + (chosen == NULL)? RSC_ROLE_STOPPED : RSC_ROLE_STARTED, + "allocation"); } pe_rsc_trace(rsc, "Creating all actions for %s transition from %s to %s (%s) on %s", rsc->id, role2text(rsc->role), role2text(rsc->next_role), ((next_role == RSC_ROLE_UNKNOWN)? "implicit" : "explicit"), ((chosen == NULL)? "no node" : chosen->details->uname)); current = pe__find_active_on(rsc, &num_all_active, &num_clean_active); for (gIter = rsc->dangling_migrations; gIter != NULL; gIter = gIter->next) { pe_node_t *dangling_source = (pe_node_t *) gIter->data; pe_action_t *stop = NULL; pe_rsc_trace(rsc, "Creating stop action %s cleanup for %s on %s due to dangling migration", (pcmk_is_set(data_set->flags, pe_flag_remove_after_stop)? "and" : "without"), rsc->id, dangling_source->details->uname); stop = stop_action(rsc, dangling_source, FALSE); pe__set_action_flags(stop, pe_action_dangle); if (pcmk_is_set(data_set->flags, pe_flag_remove_after_stop)) { DeleteRsc(rsc, dangling_source, FALSE, data_set); } } if ((num_all_active == 2) && (num_clean_active == 2) && chosen && rsc->partial_migration_source && rsc->partial_migration_target && (current->details == rsc->partial_migration_source->details) && (chosen->details == rsc->partial_migration_target->details)) { /* The chosen node is still the migration target from a partial * migration. Attempt to continue the migration instead of recovering * by stopping the resource everywhere and starting it on a single node. */ pe_rsc_trace(rsc, "Will attempt to continue with partial migration " "to target %s from %s", rsc->partial_migration_target->details->id, rsc->partial_migration_source->details->id); } else if (!pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)) { /* If a resource has "requires" set to nothing or quorum, don't consider * it active on unclean nodes (similar to how all resources behave when * stonith-enabled is false). We can start such resources elsewhere * before fencing completes, and if we considered the resource active on * the failed node, we would attempt recovery for being active on * multiple nodes. */ multiply_active = (num_clean_active > 1); } else { multiply_active = (num_all_active > 1); } if (multiply_active) { if (rsc->partial_migration_target && rsc->partial_migration_source) { // Migration was in progress, but we've chosen a different target crm_notice("Resource %s can no longer migrate from %s to %s " "(will stop on both nodes)", rsc->id, rsc->partial_migration_source->details->uname, rsc->partial_migration_target->details->uname); } else { const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); // Resource was incorrectly multiply active pe_proc_err("%s resource %s is active on %u nodes (%s)", crm_str(class), rsc->id, num_all_active, recovery2text(rsc->recovery_type)); crm_notice("See https://wiki.clusterlabs.org/wiki/FAQ#Resource_is_Too_Active for more information"); } if (rsc->recovery_type == recovery_stop_start) { need_stop = TRUE; } /* If by chance a partial migration is in process, but the migration * target is not chosen still, clear all partial migration data. */ rsc->partial_migration_source = rsc->partial_migration_target = NULL; allow_migrate = FALSE; } if (pcmk_is_set(rsc->flags, pe_rsc_start_pending)) { pe_rsc_trace(rsc, "Creating start action for %s to represent already pending start", rsc->id); start = start_action(rsc, chosen, TRUE); pe__set_action_flags(start, pe_action_print_always); } if (current && chosen && current->details != chosen->details) { pe_rsc_trace(rsc, "Moving %s from %s to %s", rsc->id, crm_str(current->details->uname), crm_str(chosen->details->uname)); is_moving = TRUE; need_stop = TRUE; } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) { if (pcmk_is_set(rsc->flags, pe_rsc_stop)) { need_stop = TRUE; pe_rsc_trace(rsc, "Recovering %s", rsc->id); } else { pe_rsc_trace(rsc, "Recovering %s by demotion", rsc->id); if (rsc->next_role == RSC_ROLE_MASTER) { need_promote = TRUE; } } } else if (pcmk_is_set(rsc->flags, pe_rsc_block)) { pe_rsc_trace(rsc, "Blocking further actions on %s", rsc->id); need_stop = TRUE; } else if (rsc->role > RSC_ROLE_STARTED && current != NULL && chosen != NULL) { pe_rsc_trace(rsc, "Creating start action for promoted resource %s", rsc->id); start = start_action(rsc, chosen, TRUE); if (!pcmk_is_set(start->flags, pe_action_optional)) { // Recovery of a promoted resource pe_rsc_trace(rsc, "%s restart is required for recovery", rsc->id); need_stop = TRUE; } } /* Create any additional actions required when bringing resource down and * back up to same level. */ role = rsc->role; while (role != RSC_ROLE_STOPPED) { next_role = rsc_state_matrix[role][RSC_ROLE_STOPPED]; pe_rsc_trace(rsc, "Creating %s action to take %s down from %s to %s", (need_stop? "required" : "optional"), rsc->id, role2text(role), role2text(next_role)); if (rsc_action_matrix[role][next_role] (rsc, current, !need_stop, data_set) == FALSE) { break; } role = next_role; } while ((rsc->role <= rsc->next_role) && (role != rsc->role) && !pcmk_is_set(rsc->flags, pe_rsc_block)) { bool required = need_stop; next_role = rsc_state_matrix[role][rsc->role]; if ((next_role == RSC_ROLE_MASTER) && need_promote) { required = true; } pe_rsc_trace(rsc, "Creating %s action to take %s up from %s to %s", (required? "required" : "optional"), rsc->id, role2text(role), role2text(next_role)); if (rsc_action_matrix[role][next_role](rsc, chosen, !required, data_set) == FALSE) { break; } role = next_role; } role = rsc->role; /* Required steps from this role to the next */ while (role != rsc->next_role) { next_role = rsc_state_matrix[role][rsc->next_role]; pe_rsc_trace(rsc, "Creating action to take %s from %s to %s (ending at %s)", rsc->id, role2text(role), role2text(next_role), role2text(rsc->next_role)); if (rsc_action_matrix[role][next_role] (rsc, chosen, FALSE, data_set) == FALSE) { break; } role = next_role; } if (pcmk_is_set(rsc->flags, pe_rsc_block)) { pe_rsc_trace(rsc, "Not creating recurring monitors for blocked resource %s", rsc->id); } else if ((rsc->next_role != RSC_ROLE_STOPPED) || !pcmk_is_set(rsc->flags, pe_rsc_managed)) { pe_rsc_trace(rsc, "Creating recurring monitors for %s resource %s", ((rsc->next_role == RSC_ROLE_STOPPED)? "unmanaged" : "active"), rsc->id); start = start_action(rsc, chosen, TRUE); Recurring(rsc, start, chosen, data_set); Recurring_Stopped(rsc, start, chosen, data_set); } else { pe_rsc_trace(rsc, "Creating recurring monitors for inactive resource %s", rsc->id); Recurring_Stopped(rsc, NULL, NULL, data_set); } /* if we are stuck in a partial migration, where the target * of the partial migration no longer matches the chosen target. * A full stop/start is required */ if (rsc->partial_migration_target && (chosen == NULL || rsc->partial_migration_target->details != chosen->details)) { pe_rsc_trace(rsc, "Not allowing partial migration of %s to continue", rsc->id); allow_migrate = FALSE; } else if (!is_moving || !pcmk_is_set(rsc->flags, pe_rsc_managed) || pcmk_any_flags_set(rsc->flags, pe_rsc_failed|pe_rsc_start_pending) || (current && current->details->unclean) || rsc->next_role < RSC_ROLE_STARTED) { allow_migrate = FALSE; } if (allow_migrate) { handle_migration_actions(rsc, current, chosen, data_set); } } static void rsc_avoids_remote_nodes(pe_resource_t *rsc) { GHashTableIter iter; pe_node_t *node = NULL; g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { if (node->details->remote_rsc) { node->weight = -INFINITY; } } } /*! * \internal * \brief Return allowed nodes as (possibly sorted) list * * Convert a resource's hash table of allowed nodes to a list. If printing to * stdout, sort the list, to keep action ID numbers consistent for regression * test output (while avoiding the performance hit on a live cluster). * * \param[in] rsc Resource to check for allowed nodes * \param[in] data_set Cluster working set * * \return List of resource's allowed nodes * \note Callers should take care not to rely on the list being sorted. */ static GList * allowed_nodes_as_list(pe_resource_t *rsc, pe_working_set_t *data_set) { GList *allowed_nodes = NULL; if (rsc->allowed_nodes) { allowed_nodes = g_hash_table_get_values(rsc->allowed_nodes); } if (pcmk_is_set(data_set->flags, pe_flag_stdout)) { allowed_nodes = g_list_sort(allowed_nodes, sort_node_uname); } return allowed_nodes; } void native_internal_constraints(pe_resource_t * rsc, pe_working_set_t * data_set) { /* This function is on the critical path and worth optimizing as much as possible */ pe_resource_t *top = NULL; GList *allowed_nodes = NULL; bool check_unfencing = FALSE; bool check_utilization = FALSE; if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) { pe_rsc_trace(rsc, "Skipping native constraints for unmanaged resource: %s", rsc->id); return; } top = uber_parent(rsc); // Whether resource requires unfencing check_unfencing = !pcmk_is_set(rsc->flags, pe_rsc_fence_device) && pcmk_is_set(data_set->flags, pe_flag_enable_unfencing) && pcmk_is_set(rsc->flags, pe_rsc_needs_unfencing); // Whether a non-default placement strategy is used check_utilization = (g_hash_table_size(rsc->utilization) > 0) && !pcmk__str_eq(data_set->placement_strategy, "default", pcmk__str_casei); // Order stops before starts (i.e. restart) custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL, rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL, pe_order_optional|pe_order_implies_then|pe_order_restart, data_set); // Promotable ordering: demote before stop, start before promote if (pcmk_is_set(top->flags, pe_rsc_promotable) || (rsc->role > RSC_ROLE_SLAVE)) { custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_DEMOTE, 0), NULL, rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL, pe_order_implies_first_master, data_set); custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL, rsc, pcmk__op_key(rsc->id, RSC_PROMOTE, 0), NULL, pe_order_runnable_left, data_set); } // Don't clear resource history if probing on same node custom_action_order(rsc, pcmk__op_key(rsc->id, CRM_OP_LRM_DELETE, 0), NULL, rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0), NULL, pe_order_same_node|pe_order_then_cancels_first, data_set); // Certain checks need allowed nodes if (check_unfencing || check_utilization || rsc->container) { allowed_nodes = allowed_nodes_as_list(rsc, data_set); } if (check_unfencing) { /* Check if the node needs to be unfenced first */ for (GList *item = allowed_nodes; item; item = item->next) { pe_node_t *node = item->data; pe_action_t *unfence = pe_fence_op(node, "on", TRUE, NULL, FALSE, data_set); crm_debug("Ordering any stops of %s before %s, and any starts after", rsc->id, unfence->uuid); /* * It would be more efficient to order clone resources once, * rather than order each instance, but ordering the instance * allows us to avoid unnecessary dependencies that might conflict * with user constraints. * * @TODO: This constraint can still produce a transition loop if the * resource has a stop scheduled on the node being unfenced, and * there is a user ordering constraint to start some other resource * (which will be ordered after the unfence) before stopping this * resource. An example is "start some slow-starting cloned service * before stopping an associated virtual IP that may be moving to * it": * stop this -> unfencing -> start that -> stop this */ custom_action_order(rsc, stop_key(rsc), NULL, NULL, strdup(unfence->uuid), unfence, pe_order_optional|pe_order_same_node, data_set); custom_action_order(NULL, strdup(unfence->uuid), unfence, rsc, start_key(rsc), NULL, pe_order_implies_then_on_node|pe_order_same_node, data_set); } } if (check_utilization) { GListPtr gIter = NULL; pe_rsc_trace(rsc, "Creating utilization constraints for %s - strategy: %s", rsc->id, data_set->placement_strategy); for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) { pe_node_t *current = (pe_node_t *) gIter->data; char *load_stopped_task = crm_strdup_printf(LOAD_STOPPED "_%s", current->details->uname); pe_action_t *load_stopped = get_pseudo_op(load_stopped_task, data_set); if (load_stopped->node == NULL) { load_stopped->node = pe__copy_node(current); update_action_flags(load_stopped, pe_action_optional | pe_action_clear, __func__, __LINE__); } custom_action_order(rsc, stop_key(rsc), NULL, NULL, load_stopped_task, load_stopped, pe_order_load, data_set); } for (GList *item = allowed_nodes; item; item = item->next) { pe_node_t *next = item->data; char *load_stopped_task = crm_strdup_printf(LOAD_STOPPED "_%s", next->details->uname); pe_action_t *load_stopped = get_pseudo_op(load_stopped_task, data_set); if (load_stopped->node == NULL) { load_stopped->node = pe__copy_node(next); update_action_flags(load_stopped, pe_action_optional | pe_action_clear, __func__, __LINE__); } custom_action_order(NULL, strdup(load_stopped_task), load_stopped, rsc, start_key(rsc), NULL, pe_order_load, data_set); custom_action_order(NULL, strdup(load_stopped_task), load_stopped, rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0), NULL, pe_order_load, data_set); free(load_stopped_task); } } if (rsc->container) { pe_resource_t *remote_rsc = NULL; if (rsc->is_remote_node) { // rsc is the implicit remote connection for a guest or bundle node /* Do not allow a guest resource to live on a Pacemaker Remote node, * to avoid nesting remotes. However, allow bundles to run on remote * nodes. */ if (!pcmk_is_set(rsc->flags, pe_rsc_allow_remote_remotes)) { rsc_avoids_remote_nodes(rsc->container); } /* If someone cleans up a guest or bundle node's container, we will * likely schedule a (re-)probe of the container and recovery of the * connection. Order the connection stop after the container probe, * so that if we detect the container running, we will trigger a new * transition and avoid the unnecessary recovery. */ new_rsc_order(rsc->container, RSC_STATUS, rsc, RSC_STOP, pe_order_optional, data_set); /* A user can specify that a resource must start on a Pacemaker Remote * node by explicitly configuring it with the container=NODENAME * meta-attribute. This is of questionable merit, since location * constraints can accomplish the same thing. But we support it, so here * we check whether a resource (that is not itself a remote connection) * has container set to a remote node or guest node resource. */ } else if (rsc->container->is_remote_node) { remote_rsc = rsc->container; } else { remote_rsc = pe__resource_contains_guest_node(data_set, rsc->container); } if (remote_rsc) { /* Force the resource on the Pacemaker Remote node instead of * colocating the resource with the container resource. */ for (GList *item = allowed_nodes; item; item = item->next) { pe_node_t *node = item->data; if (node->details->remote_rsc != remote_rsc) { node->weight = -INFINITY; } } } else { /* This resource is either a filler for a container that does NOT * represent a Pacemaker Remote node, or a Pacemaker Remote * connection resource for a guest node or bundle. */ int score; crm_trace("Order and colocate %s relative to its container %s", rsc->id, rsc->container->id); custom_action_order(rsc->container, pcmk__op_key(rsc->container->id, RSC_START, 0), NULL, rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL, pe_order_implies_then|pe_order_runnable_left, data_set); custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL, rsc->container, pcmk__op_key(rsc->container->id, RSC_STOP, 0), NULL, pe_order_implies_first, data_set); if (pcmk_is_set(rsc->flags, pe_rsc_allow_remote_remotes)) { score = 10000; /* Highly preferred but not essential */ } else { score = INFINITY; /* Force them to run on the same host */ } pcmk__new_colocation("resource-with-container", NULL, score, rsc, rsc->container, NULL, NULL, true, data_set); } } if (rsc->is_remote_node || pcmk_is_set(rsc->flags, pe_rsc_fence_device)) { /* don't allow remote nodes to run stonith devices * or remote connection resources.*/ rsc_avoids_remote_nodes(rsc); } g_list_free(allowed_nodes); } void native_rsc_colocation_lh(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, pcmk__colocation_t *constraint, pe_working_set_t *data_set) { if (rsc_lh == NULL) { pe_err("rsc_lh was NULL for %s", constraint->id); return; } else if (constraint->rsc_rh == NULL) { pe_err("rsc_rh was NULL for %s", constraint->id); return; } pe_rsc_trace(rsc_lh, "Processing colocation constraint between %s and %s", rsc_lh->id, rsc_rh->id); rsc_rh->cmds->rsc_colocation_rh(rsc_lh, rsc_rh, constraint, data_set); } enum filter_colocation_res filter_colocation_constraint(pe_resource_t * rsc_lh, pe_resource_t * rsc_rh, pcmk__colocation_t *constraint, gboolean preview) { /* rh side must be allocated before we can process constraint */ if (!preview && pcmk_is_set(rsc_rh->flags, pe_rsc_provisional)) { return influence_nothing; } if ((constraint->role_lh >= RSC_ROLE_SLAVE) && rsc_lh->parent && pcmk_is_set(rsc_lh->parent->flags, pe_rsc_promotable) && !pcmk_is_set(rsc_lh->flags, pe_rsc_provisional)) { /* LH and RH resources have already been allocated, place the correct * priority on LH rsc for the given promotable clone resource role */ return influence_rsc_priority; } if (!preview && !pcmk_is_set(rsc_lh->flags, pe_rsc_provisional)) { // Log an error if we violated a mandatory colocation constraint const pe_node_t *rh_node = rsc_rh->allocated_to; if (rsc_lh->allocated_to == NULL) { // Dependent resource isn't allocated, so constraint doesn't matter return influence_nothing; } if (constraint->score >= INFINITY) { // Dependent resource must colocate with rh_node if ((rh_node == NULL) || (rh_node->details != rsc_lh->allocated_to->details)) { crm_err("%s must be colocated with %s but is not (%s vs. %s)", rsc_lh->id, rsc_rh->id, rsc_lh->allocated_to->details->uname, (rh_node? rh_node->details->uname : "unallocated")); } } else if (constraint->score <= -INFINITY) { // Dependent resource must anti-colocate with rh_node if ((rh_node != NULL) && (rsc_lh->allocated_to->details == rh_node->details)) { crm_err("%s and %s must be anti-colocated but are allocated " "to the same node (%s)", rsc_lh->id, rsc_rh->id, rh_node->details->uname); } } return influence_nothing; } if (constraint->score > 0 && constraint->role_lh != RSC_ROLE_UNKNOWN && constraint->role_lh != rsc_lh->next_role) { crm_trace("LH: Skipping constraint: \"%s\" state filter nextrole is %s", role2text(constraint->role_lh), role2text(rsc_lh->next_role)); return influence_nothing; } if (constraint->score > 0 && constraint->role_rh != RSC_ROLE_UNKNOWN && constraint->role_rh != rsc_rh->next_role) { crm_trace("RH: Skipping constraint: \"%s\" state filter", role2text(constraint->role_rh)); return influence_nothing; } if (constraint->score < 0 && constraint->role_lh != RSC_ROLE_UNKNOWN && constraint->role_lh == rsc_lh->next_role) { crm_trace("LH: Skipping negative constraint: \"%s\" state filter", role2text(constraint->role_lh)); return influence_nothing; } if (constraint->score < 0 && constraint->role_rh != RSC_ROLE_UNKNOWN && constraint->role_rh == rsc_rh->next_role) { crm_trace("RH: Skipping negative constraint: \"%s\" state filter", role2text(constraint->role_rh)); return influence_nothing; } return influence_rsc_location; } static void influence_priority(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, pcmk__colocation_t *constraint) { const char *rh_value = NULL; const char *lh_value = NULL; const char *attribute = CRM_ATTR_ID; int score_multiplier = 1; if (!rsc_rh->allocated_to || !rsc_lh->allocated_to) { return; } if (constraint->node_attribute != NULL) { attribute = constraint->node_attribute; } lh_value = pe_node_attribute_raw(rsc_lh->allocated_to, attribute); rh_value = pe_node_attribute_raw(rsc_rh->allocated_to, attribute); if (!pcmk__str_eq(lh_value, rh_value, pcmk__str_casei)) { if(constraint->score == INFINITY && constraint->role_lh == RSC_ROLE_MASTER) { rsc_lh->priority = -INFINITY; } return; } if (constraint->role_rh && (constraint->role_rh != rsc_rh->next_role)) { return; } if (constraint->role_lh == RSC_ROLE_SLAVE) { score_multiplier = -1; } rsc_lh->priority = pe__add_scores(score_multiplier * constraint->score, rsc_lh->priority); } static void colocation_match(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, pcmk__colocation_t *constraint) { const char *attribute = CRM_ATTR_ID; const char *value = NULL; GHashTable *work = NULL; GHashTableIter iter; pe_node_t *node = NULL; if (constraint->node_attribute != NULL) { attribute = constraint->node_attribute; } if (rsc_rh->allocated_to) { value = pe_node_attribute_raw(rsc_rh->allocated_to, attribute); } else if (constraint->score < 0) { // Nothing to do (anti-colocation with something that is not running) return; } work = pcmk__copy_node_table(rsc_lh->allowed_nodes); g_hash_table_iter_init(&iter, work); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { if (rsc_rh->allocated_to == NULL) { pe_rsc_trace(rsc_lh, "%s: %s@%s -= %d (%s inactive)", constraint->id, rsc_lh->id, node->details->uname, constraint->score, rsc_rh->id); node->weight = pe__add_scores(-constraint->score, node->weight); } else if (pcmk__str_eq(pe_node_attribute_raw(node, attribute), value, pcmk__str_casei)) { if (constraint->score < CRM_SCORE_INFINITY) { pe_rsc_trace(rsc_lh, "%s: %s@%s += %d", constraint->id, rsc_lh->id, node->details->uname, constraint->score); node->weight = pe__add_scores(constraint->score, node->weight); } } else if (constraint->score >= CRM_SCORE_INFINITY) { pe_rsc_trace(rsc_lh, "%s: %s@%s -= %d (%s mismatch)", constraint->id, rsc_lh->id, node->details->uname, constraint->score, attribute); node->weight = pe__add_scores(-constraint->score, node->weight); } } if (can_run_any(work) || constraint->score <= -INFINITY || constraint->score >= INFINITY) { g_hash_table_destroy(rsc_lh->allowed_nodes); rsc_lh->allowed_nodes = work; work = NULL; } else { pe_rsc_info(rsc_lh, "%s: Rolling back scores from %s (no available nodes)", rsc_lh->id, rsc_rh->id); } if (work) { g_hash_table_destroy(work); } } void native_rsc_colocation_rh(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, pcmk__colocation_t *constraint, pe_working_set_t *data_set) { enum filter_colocation_res filter_results; CRM_ASSERT(rsc_lh); CRM_ASSERT(rsc_rh); filter_results = filter_colocation_constraint(rsc_lh, rsc_rh, constraint, FALSE); pe_rsc_trace(rsc_lh, "%s %s with %s (%s, score=%d, filter=%d)", ((constraint->score > 0)? "Colocating" : "Anti-colocating"), rsc_lh->id, rsc_rh->id, constraint->id, constraint->score, filter_results); switch (filter_results) { case influence_rsc_priority: influence_priority(rsc_lh, rsc_rh, constraint); break; case influence_rsc_location: colocation_match(rsc_lh, rsc_rh, constraint); break; case influence_nothing: default: return; } } static gboolean filter_rsc_ticket(pe_resource_t * rsc_lh, rsc_ticket_t * rsc_ticket) { if (rsc_ticket->role_lh != RSC_ROLE_UNKNOWN && rsc_ticket->role_lh != rsc_lh->role) { pe_rsc_trace(rsc_lh, "LH: Skipping constraint: \"%s\" state filter", role2text(rsc_ticket->role_lh)); return FALSE; } return TRUE; } void rsc_ticket_constraint(pe_resource_t * rsc_lh, rsc_ticket_t * rsc_ticket, pe_working_set_t * data_set) { if (rsc_ticket == NULL) { pe_err("rsc_ticket was NULL"); return; } if (rsc_lh == NULL) { pe_err("rsc_lh was NULL for %s", rsc_ticket->id); return; } if (rsc_ticket->ticket->granted && rsc_ticket->ticket->standby == FALSE) { return; } if (rsc_lh->children) { GListPtr gIter = rsc_lh->children; pe_rsc_trace(rsc_lh, "Processing ticket dependencies from %s", rsc_lh->id); for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; rsc_ticket_constraint(child_rsc, rsc_ticket, data_set); } return; } pe_rsc_trace(rsc_lh, "%s: Processing ticket dependency on %s (%s, %s)", rsc_lh->id, rsc_ticket->ticket->id, rsc_ticket->id, role2text(rsc_ticket->role_lh)); if ((rsc_ticket->ticket->granted == FALSE) && (rsc_lh->running_on != NULL)) { GListPtr gIter = NULL; switch (rsc_ticket->loss_policy) { case loss_ticket_stop: resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set); break; case loss_ticket_demote: // Promotion score will be set to -INFINITY in promotion_order() if (rsc_ticket->role_lh != RSC_ROLE_MASTER) { resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set); } break; case loss_ticket_fence: if (filter_rsc_ticket(rsc_lh, rsc_ticket) == FALSE) { return; } resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set); for (gIter = rsc_lh->running_on; gIter != NULL; gIter = gIter->next) { pe_node_t *node = (pe_node_t *) gIter->data; pe_fence_node(data_set, node, "deadman ticket was lost", FALSE); } break; case loss_ticket_freeze: if (filter_rsc_ticket(rsc_lh, rsc_ticket) == FALSE) { return; } if (rsc_lh->running_on != NULL) { pe__clear_resource_flags(rsc_lh, pe_rsc_managed); pe__set_resource_flags(rsc_lh, pe_rsc_block); } break; } } else if (rsc_ticket->ticket->granted == FALSE) { if (rsc_ticket->role_lh != RSC_ROLE_MASTER || rsc_ticket->loss_policy == loss_ticket_stop) { resource_location(rsc_lh, NULL, -INFINITY, "__no_ticket__", data_set); } } else if (rsc_ticket->ticket->standby) { if (rsc_ticket->role_lh != RSC_ROLE_MASTER || rsc_ticket->loss_policy == loss_ticket_stop) { resource_location(rsc_lh, NULL, -INFINITY, "__ticket_standby__", data_set); } } } enum pe_action_flags native_action_flags(pe_action_t * action, pe_node_t * node) { return action->flags; } static inline bool is_primitive_action(pe_action_t *action) { return action && action->rsc && (action->rsc->variant == pe_native); } /*! * \internal * \brief Set action bits appropriately when pe_restart_order is used * * \param[in] first 'First' action in an ordering with pe_restart_order * \param[in] then 'Then' action in an ordering with pe_restart_order * \param[in] filter What ordering flags to care about * * \note pe_restart_order is set for "stop resource before starting it" and * "stop later group member before stopping earlier group member" */ static void handle_restart_ordering(pe_action_t *first, pe_action_t *then, enum pe_action_flags filter) { const char *reason = NULL; CRM_ASSERT(is_primitive_action(first)); CRM_ASSERT(is_primitive_action(then)); // We need to update the action in two cases: // ... if 'then' is required if (pcmk_is_set(filter, pe_action_optional) && !pcmk_is_set(then->flags, pe_action_optional)) { reason = "restart"; } /* ... if 'then' is unrunnable start of managed resource (if a resource * should restart but can't start, we still want to stop) */ if (pcmk_is_set(filter, pe_action_runnable) && !pcmk_is_set(then->flags, pe_action_runnable) && pcmk_is_set(then->rsc->flags, pe_rsc_managed) && pcmk__str_eq(then->task, RSC_START, pcmk__str_casei)) { reason = "stop"; } if (reason == NULL) { return; } pe_rsc_trace(first->rsc, "Handling %s -> %s for %s", first->uuid, then->uuid, reason); // Make 'first' required if it is runnable if (pcmk_is_set(first->flags, pe_action_runnable)) { pe_action_implies(first, then, pe_action_optional); } // Make 'first' required if 'then' is required if (!pcmk_is_set(then->flags, pe_action_optional)) { pe_action_implies(first, then, pe_action_optional); } // Make 'first' unmigratable if 'then' is unmigratable if (!pcmk_is_set(then->flags, pe_action_migrate_runnable)) { pe_action_implies(first, then, pe_action_migrate_runnable); } // Make 'then' unrunnable if 'first' is required but unrunnable if (!pcmk_is_set(first->flags, pe_action_optional) && !pcmk_is_set(first->flags, pe_action_runnable)) { pe_action_implies(then, first, pe_action_runnable); } } enum pe_graph_flags native_update_actions(pe_action_t *first, pe_action_t *then, pe_node_t *node, enum pe_action_flags flags, enum pe_action_flags filter, enum pe_ordering type, pe_working_set_t *data_set) { /* flags == get_action_flags(first, then_node) called from update_action() */ enum pe_graph_flags changed = pe_graph_none; enum pe_action_flags then_flags = then->flags; enum pe_action_flags first_flags = first->flags; crm_trace( "Testing %s on %s (0x%.6x) with %s 0x%.6x", first->uuid, first->node ? first->node->details->uname : "[none]", first->flags, then->uuid, then->flags); if (type & pe_order_asymmetrical) { pe_resource_t *then_rsc = then->rsc; enum rsc_role_e then_rsc_role = then_rsc ? then_rsc->fns->state(then_rsc, TRUE) : 0; if (!then_rsc) { /* ignore */ } else if ((then_rsc_role == RSC_ROLE_STOPPED) && pcmk__str_eq(then->task, RSC_STOP, pcmk__str_casei)) { /* ignore... if 'then' is supposed to be stopped after 'first', but * then is already stopped, there is nothing to be done when non-symmetrical. */ } else if ((then_rsc_role >= RSC_ROLE_STARTED) && pcmk__str_eq(then->task, RSC_START, pcmk__str_casei) && pcmk_is_set(then->flags, pe_action_optional) && then->node && pcmk__list_of_1(then_rsc->running_on) && then->node->details == ((pe_node_t *) then_rsc->running_on->data)->details) { /* Ignore. If 'then' is supposed to be started after 'first', but * 'then' is already started, there is nothing to be done when * asymmetrical -- unless the start is mandatory, which indicates * the resource is restarting, and the ordering is still needed. */ } else if (!(first->flags & pe_action_runnable)) { /* prevent 'then' action from happening if 'first' is not runnable and * 'then' has not yet occurred. */ pe_action_implies(then, first, pe_action_optional); pe_action_implies(then, first, pe_action_runnable); pe_rsc_trace(then->rsc, "Unset optional and runnable on %s", then->uuid); } else { /* ignore... then is allowed to start/stop if it wants to. */ } } if (type & pe_order_implies_first) { if (pcmk_is_set(filter, pe_action_optional) && !pcmk_is_set(flags /* Should be then_flags? */, pe_action_optional)) { // Needs pcmk_is_set(first_flags, pe_action_optional) too? pe_rsc_trace(first->rsc, "Unset optional on %s because of %s", first->uuid, then->uuid); pe_action_implies(first, then, pe_action_optional); } if (pcmk_is_set(flags, pe_action_migrate_runnable) && !pcmk_is_set(then->flags, pe_action_migrate_runnable) && !pcmk_is_set(then->flags, pe_action_optional)) { pe_rsc_trace(first->rsc, "Unset migrate runnable on %s because of %s", first->uuid, then->uuid); pe_action_implies(first, then, pe_action_migrate_runnable); } } if (type & pe_order_implies_first_master) { if ((filter & pe_action_optional) && ((then->flags & pe_action_optional) == FALSE) && then->rsc && (then->rsc->role == RSC_ROLE_MASTER)) { pe_action_implies(first, then, pe_action_optional); if (pcmk_is_set(first->flags, pe_action_migrate_runnable) && !pcmk_is_set(then->flags, pe_action_migrate_runnable)) { pe_rsc_trace(first->rsc, "Unset migrate runnable on %s because of %s", first->uuid, then->uuid); pe_action_implies(first, then, pe_action_migrate_runnable); } pe_rsc_trace(then->rsc, "Unset optional on %s because of %s", first->uuid, then->uuid); } } if ((type & pe_order_implies_first_migratable) && pcmk_is_set(filter, pe_action_optional)) { if (((then->flags & pe_action_migrate_runnable) == FALSE) || ((then->flags & pe_action_runnable) == FALSE)) { pe_rsc_trace(then->rsc, "Unset runnable on %s because %s is neither runnable or migratable", first->uuid, then->uuid); pe_action_implies(first, then, pe_action_runnable); } if ((then->flags & pe_action_optional) == 0) { pe_rsc_trace(then->rsc, "Unset optional on %s because %s is not optional", first->uuid, then->uuid); pe_action_implies(first, then, pe_action_optional); } } if ((type & pe_order_pseudo_left) && pcmk_is_set(filter, pe_action_optional)) { if ((first->flags & pe_action_runnable) == FALSE) { pe_action_implies(then, first, pe_action_migrate_runnable); pe__clear_action_flags(then, pe_action_pseudo); pe_rsc_trace(then->rsc, "Unset pseudo on %s because %s is not runnable", then->uuid, first->uuid); } } if (pcmk_is_set(type, pe_order_runnable_left) && pcmk_is_set(filter, pe_action_runnable) && pcmk_is_set(then->flags, pe_action_runnable) && !pcmk_is_set(flags, pe_action_runnable)) { pe_rsc_trace(then->rsc, "Unset runnable on %s because of %s", then->uuid, first->uuid); pe_action_implies(then, first, pe_action_runnable); pe_action_implies(then, first, pe_action_migrate_runnable); } if (pcmk_is_set(type, pe_order_implies_then) && pcmk_is_set(filter, pe_action_optional) && pcmk_is_set(then->flags, pe_action_optional) && !pcmk_is_set(flags, pe_action_optional)) { /* in this case, treat migrate_runnable as if first is optional */ if (!pcmk_is_set(first->flags, pe_action_migrate_runnable)) { pe_rsc_trace(then->rsc, "Unset optional on %s because of %s", then->uuid, first->uuid); pe_action_implies(then, first, pe_action_optional); } } if (pcmk_is_set(type, pe_order_restart)) { handle_restart_ordering(first, then, filter); } if (then_flags != then->flags) { pe__set_graph_flags(changed, first, pe_graph_updated_then); pe_rsc_trace(then->rsc, "Then: Flags for %s on %s are now 0x%.6x (was 0x%.6x) because of %s 0x%.6x", then->uuid, then->node ? then->node->details->uname : "[none]", then->flags, then_flags, first->uuid, first->flags); if(then->rsc && then->rsc->parent) { /* "X_stop then X_start" doesn't get handled for cloned groups unless we do this */ update_action(then, data_set); } } if (first_flags != first->flags) { pe__set_graph_flags(changed, first, pe_graph_updated_first); pe_rsc_trace(first->rsc, "First: Flags for %s on %s are now 0x%.6x (was 0x%.6x) because of %s 0x%.6x", first->uuid, first->node ? first->node->details->uname : "[none]", first->flags, first_flags, then->uuid, then->flags); } return changed; } void native_rsc_location(pe_resource_t *rsc, pe__location_t *constraint) { GListPtr gIter = NULL; bool need_role = false; CRM_CHECK((constraint != NULL) && (rsc != NULL), return); // If a role was specified, ensure constraint is applicable need_role = (constraint->role_filter > RSC_ROLE_UNKNOWN); if (need_role && (constraint->role_filter != rsc->next_role)) { pe_rsc_trace(rsc, "Not applying %s to %s because role will be %s not %s", constraint->id, rsc->id, role2text(rsc->next_role), role2text(constraint->role_filter)); return; } if (constraint->node_list_rh == NULL) { pe_rsc_trace(rsc, "Not applying %s to %s because no nodes match", constraint->id, rsc->id); return; } pe_rsc_trace(rsc, "Applying %s%s%s to %s", constraint->id, (need_role? " for role " : ""), (need_role? role2text(constraint->role_filter) : ""), rsc->id); for (gIter = constraint->node_list_rh; gIter != NULL; gIter = gIter->next) { pe_node_t *node = (pe_node_t *) gIter->data; pe_node_t *other_node = NULL; other_node = (pe_node_t *) pe_hash_table_lookup(rsc->allowed_nodes, node->details->id); if (other_node != NULL) { pe_rsc_trace(rsc, "* + %d on %s", node->weight, node->details->uname); other_node->weight = pe__add_scores(other_node->weight, node->weight); } else { pe_rsc_trace(rsc, "* = %d on %s", node->weight, node->details->uname); other_node = pe__copy_node(node); g_hash_table_insert(rsc->allowed_nodes, (gpointer) other_node->details->id, other_node); } if (other_node->rsc_discover_mode < constraint->discover_mode) { if (constraint->discover_mode == pe_discover_exclusive) { rsc->exclusive_discover = TRUE; } /* exclusive > never > always... always is default */ other_node->rsc_discover_mode = constraint->discover_mode; } } } void native_expand(pe_resource_t * rsc, pe_working_set_t * data_set) { GListPtr gIter = NULL; CRM_ASSERT(rsc); pe_rsc_trace(rsc, "Processing actions from %s", rsc->id); for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) { pe_action_t *action = (pe_action_t *) gIter->data; crm_trace("processing action %d for rsc=%s", action->id, rsc->id); graph_element_from_action(action, data_set); } for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; child_rsc->cmds->expand(child_rsc, data_set); } } #define log_change(a, fmt, args...) do { \ if(a && a->reason && terminal) { \ printf(" * "fmt" \tdue to %s\n", ##args, a->reason); \ } else if(a && a->reason) { \ crm_notice(fmt" \tdue to %s", ##args, a->reason); \ } else if(terminal) { \ printf(" * "fmt"\n", ##args); \ } else { \ crm_notice(fmt, ##args); \ } \ } while(0) #define STOP_SANITY_ASSERT(lineno) do { \ if(current && current->details->unclean) { \ /* It will be a pseudo op */ \ } else if(stop == NULL) { \ crm_err("%s:%d: No stop action exists for %s", \ __func__, lineno, rsc->id); \ CRM_ASSERT(stop != NULL); \ } else if (pcmk_is_set(stop->flags, pe_action_optional)) { \ crm_err("%s:%d: Action %s is still optional", \ __func__, lineno, stop->uuid); \ CRM_ASSERT(!pcmk_is_set(stop->flags, pe_action_optional)); \ } \ } while(0) static void LogAction(const char *change, pe_resource_t *rsc, pe_node_t *origin, pe_node_t *destination, pe_action_t *action, pe_action_t *source, gboolean terminal) { int len = 0; char *reason = NULL; char *details = NULL; bool same_host = FALSE; bool same_role = FALSE; bool need_role = FALSE; static int rsc_width = 5; static int detail_width = 5; CRM_ASSERT(action); CRM_ASSERT(destination != NULL || origin != NULL); if(source == NULL) { source = action; } len = strlen(rsc->id); if(len > rsc_width) { rsc_width = len + 2; } if(rsc->role > RSC_ROLE_STARTED || rsc->next_role > RSC_ROLE_SLAVE) { need_role = TRUE; } if(origin != NULL && destination != NULL && origin->details == destination->details) { same_host = TRUE; } if(rsc->role == rsc->next_role) { same_role = TRUE; } if (need_role && (origin == NULL)) { /* Starting and promoting a promotable clone instance */ details = crm_strdup_printf("%s -> %s %s", role2text(rsc->role), role2text(rsc->next_role), destination->details->uname); } else if (origin == NULL) { /* Starting a resource */ details = crm_strdup_printf("%s", destination->details->uname); } else if (need_role && (destination == NULL)) { /* Stopping a promotable clone instance */ details = crm_strdup_printf("%s %s", role2text(rsc->role), origin->details->uname); } else if (destination == NULL) { /* Stopping a resource */ details = crm_strdup_printf("%s", origin->details->uname); } else if (need_role && same_role && same_host) { /* Recovering, restarting or re-promoting a promotable clone instance */ details = crm_strdup_printf("%s %s", role2text(rsc->role), origin->details->uname); } else if (same_role && same_host) { /* Recovering or Restarting a normal resource */ details = crm_strdup_printf("%s", origin->details->uname); } else if (need_role && same_role) { /* Moving a promotable clone instance */ details = crm_strdup_printf("%s -> %s %s", origin->details->uname, destination->details->uname, role2text(rsc->role)); } else if (same_role) { /* Moving a normal resource */ details = crm_strdup_printf("%s -> %s", origin->details->uname, destination->details->uname); } else if (same_host) { /* Promoting or demoting a promotable clone instance */ details = crm_strdup_printf("%s -> %s %s", role2text(rsc->role), role2text(rsc->next_role), origin->details->uname); } else { /* Moving and promoting/demoting */ details = crm_strdup_printf("%s %s -> %s %s", role2text(rsc->role), origin->details->uname, role2text(rsc->next_role), destination->details->uname); } len = strlen(details); if(len > detail_width) { detail_width = len; } if(source->reason && !pcmk_is_set(action->flags, pe_action_runnable)) { reason = crm_strdup_printf(" due to %s (blocked)", source->reason); } else if(source->reason) { reason = crm_strdup_printf(" due to %s", source->reason); } else if (!pcmk_is_set(action->flags, pe_action_runnable)) { reason = strdup(" blocked"); } else { reason = strdup(""); } if(terminal) { printf(" * %-8s %-*s ( %*s ) %s\n", change, rsc_width, rsc->id, detail_width, details, reason); } else { crm_notice(" * %-8s %-*s ( %*s ) %s", change, rsc_width, rsc->id, detail_width, details, reason); } free(details); free(reason); } void LogActions(pe_resource_t * rsc, pe_working_set_t * data_set, gboolean terminal) { pe_node_t *next = NULL; pe_node_t *current = NULL; pe_node_t *start_node = NULL; pe_action_t *stop = NULL; pe_action_t *start = NULL; pe_action_t *demote = NULL; pe_action_t *promote = NULL; char *key = NULL; gboolean moving = FALSE; GListPtr possible_matches = NULL; if(rsc->variant == pe_container) { pcmk__bundle_log_actions(rsc, data_set, terminal); return; } if (rsc->children) { GListPtr gIter = NULL; for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; LogActions(child_rsc, data_set, terminal); } return; } next = rsc->allocated_to; if (rsc->running_on) { current = pe__current_node(rsc); if (rsc->role == RSC_ROLE_STOPPED) { /* * This can occur when resources are being recovered * We fiddle with the current role in native_create_actions() */ rsc->role = RSC_ROLE_STARTED; } } if ((current == NULL) && pcmk_is_set(rsc->flags, pe_rsc_orphan)) { /* Don't log stopped orphans */ return; } if (!pcmk_is_set(rsc->flags, pe_rsc_managed) || (current == NULL && next == NULL)) { pe_rsc_info(rsc, "Leave %s\t(%s%s)", rsc->id, role2text(rsc->role), !pcmk_is_set(rsc->flags, pe_rsc_managed)? " unmanaged" : ""); return; } if (current != NULL && next != NULL && !pcmk__str_eq(current->details->id, next->details->id, pcmk__str_casei)) { moving = TRUE; } possible_matches = pe__resource_actions(rsc, next, RSC_START, FALSE); if (possible_matches) { start = possible_matches->data; g_list_free(possible_matches); } if ((start == NULL) || !pcmk_is_set(start->flags, pe_action_runnable)) { start_node = NULL; } else { start_node = current; } possible_matches = pe__resource_actions(rsc, start_node, RSC_STOP, FALSE); if (possible_matches) { stop = possible_matches->data; g_list_free(possible_matches); } possible_matches = pe__resource_actions(rsc, next, RSC_PROMOTE, FALSE); if (possible_matches) { promote = possible_matches->data; g_list_free(possible_matches); } possible_matches = pe__resource_actions(rsc, next, RSC_DEMOTE, FALSE); if (possible_matches) { demote = possible_matches->data; g_list_free(possible_matches); } if (rsc->role == rsc->next_role) { pe_action_t *migrate_op = NULL; possible_matches = pe__resource_actions(rsc, next, RSC_MIGRATED, FALSE); if (possible_matches) { migrate_op = possible_matches->data; } CRM_CHECK(next != NULL,); if (next == NULL) { } else if ((migrate_op != NULL) && (current != NULL) && pcmk_is_set(migrate_op->flags, pe_action_runnable)) { LogAction("Migrate", rsc, current, next, start, NULL, terminal); } else if (pcmk_is_set(rsc->flags, pe_rsc_reload)) { LogAction("Reload", rsc, current, next, start, NULL, terminal); } else if (start == NULL || pcmk_is_set(start->flags, pe_action_optional)) { if ((demote != NULL) && (promote != NULL) && !pcmk_is_set(demote->flags, pe_action_optional) && !pcmk_is_set(promote->flags, pe_action_optional)) { LogAction("Re-promote", rsc, current, next, promote, demote, terminal); } else { pe_rsc_info(rsc, "Leave %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); } } else if (!pcmk_is_set(start->flags, pe_action_runnable)) { LogAction("Stop", rsc, current, NULL, stop, (stop && stop->reason)? stop : start, terminal); STOP_SANITY_ASSERT(__LINE__); } else if (moving && current) { LogAction(pcmk_is_set(rsc->flags, pe_rsc_failed)? "Recover" : "Move", rsc, current, next, stop, NULL, terminal); } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) { LogAction("Recover", rsc, current, NULL, stop, NULL, terminal); STOP_SANITY_ASSERT(__LINE__); } else { LogAction("Restart", rsc, current, next, start, NULL, terminal); /* STOP_SANITY_ASSERT(__LINE__); False positive for migrate-fail-7 */ } g_list_free(possible_matches); return; } if(stop && (rsc->next_role == RSC_ROLE_STOPPED || (start && !pcmk_is_set(start->flags, pe_action_runnable)))) { GListPtr gIter = NULL; key = stop_key(rsc); for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) { pe_node_t *node = (pe_node_t *) gIter->data; pe_action_t *stop_op = NULL; possible_matches = find_actions(rsc->actions, key, node); if (possible_matches) { stop_op = possible_matches->data; g_list_free(possible_matches); } if (stop_op && (stop_op->flags & pe_action_runnable)) { STOP_SANITY_ASSERT(__LINE__); } LogAction("Stop", rsc, node, NULL, stop_op, (stop_op && stop_op->reason)? stop_op : start, terminal); } free(key); } else if ((stop != NULL) && pcmk_all_flags_set(rsc->flags, pe_rsc_failed|pe_rsc_stop)) { /* 'stop' may be NULL if the failure was ignored */ LogAction("Recover", rsc, current, next, stop, start, terminal); STOP_SANITY_ASSERT(__LINE__); } else if (moving) { LogAction("Move", rsc, current, next, stop, NULL, terminal); STOP_SANITY_ASSERT(__LINE__); } else if (pcmk_is_set(rsc->flags, pe_rsc_reload)) { LogAction("Reload", rsc, current, next, start, NULL, terminal); } else if (stop != NULL && !pcmk_is_set(stop->flags, pe_action_optional)) { LogAction("Restart", rsc, current, next, start, NULL, terminal); STOP_SANITY_ASSERT(__LINE__); } else if (rsc->role == RSC_ROLE_MASTER) { CRM_LOG_ASSERT(current != NULL); LogAction("Demote", rsc, current, next, demote, NULL, terminal); } else if(rsc->next_role == RSC_ROLE_MASTER) { CRM_LOG_ASSERT(next); LogAction("Promote", rsc, current, next, promote, NULL, terminal); } else if (rsc->role == RSC_ROLE_STOPPED && rsc->next_role > RSC_ROLE_STOPPED) { LogAction("Start", rsc, current, next, start, NULL, terminal); } } gboolean StopRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set) { GListPtr gIter = NULL; CRM_ASSERT(rsc); pe_rsc_trace(rsc, "%s", rsc->id); for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) { pe_node_t *current = (pe_node_t *) gIter->data; pe_action_t *stop; if (rsc->partial_migration_target) { if (rsc->partial_migration_target->details == current->details) { pe_rsc_trace(rsc, "Filtered %s -> %s %s", current->details->uname, next->details->uname, rsc->id); continue; } else { pe_rsc_trace(rsc, "Forced on %s %s", current->details->uname, rsc->id); optional = FALSE; } } pe_rsc_trace(rsc, "%s on %s", rsc->id, current->details->uname); stop = stop_action(rsc, current, optional); if(rsc->allocated_to == NULL) { pe_action_set_reason(stop, "node availability", TRUE); } if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) { update_action_flags(stop, pe_action_runnable | pe_action_clear, __func__, __LINE__); } if (pcmk_is_set(data_set->flags, pe_flag_remove_after_stop)) { DeleteRsc(rsc, current, optional, data_set); } if (pcmk_is_set(rsc->flags, pe_rsc_needs_unfencing)) { pe_action_t *unfence = pe_fence_op(current, "on", TRUE, NULL, FALSE, data_set); order_actions(stop, unfence, pe_order_implies_first); if (!node_has_been_unfenced(current)) { pe_proc_err("Stopping %s until %s can be unfenced", rsc->id, current->details->uname); } } } return TRUE; } static void order_after_unfencing(pe_resource_t *rsc, pe_node_t *node, pe_action_t *action, enum pe_ordering order, pe_working_set_t *data_set) { /* When unfencing is in use, we order unfence actions before any probe or * start of resources that require unfencing, and also of fence devices. * * This might seem to violate the principle that fence devices require * only quorum. However, fence agents that unfence often don't have enough * information to even probe or start unless the node is first unfenced. */ if (is_unfence_device(rsc, data_set) || pcmk_is_set(rsc->flags, pe_rsc_needs_unfencing)) { /* Start with an optional ordering. Requiring unfencing would result in * the node being unfenced, and all its resources being stopped, * whenever a new resource is added -- which would be highly suboptimal. */ pe_action_t *unfence = pe_fence_op(node, "on", TRUE, NULL, FALSE, data_set); order_actions(unfence, action, order); if (!node_has_been_unfenced(node)) { // But unfencing is required if it has never been done char *reason = crm_strdup_printf("required by %s %s", rsc->id, action->task); trigger_unfencing(NULL, node, reason, NULL, data_set); free(reason); } } } gboolean StartRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set) { pe_action_t *start = NULL; CRM_ASSERT(rsc); pe_rsc_trace(rsc, "%s on %s %d %d", rsc->id, next ? next->details->uname : "N/A", optional, next ? next->weight : 0); start = start_action(rsc, next, TRUE); order_after_unfencing(rsc, next, start, pe_order_implies_then, data_set); if (pcmk_is_set(start->flags, pe_action_runnable) && !optional) { update_action_flags(start, pe_action_optional | pe_action_clear, __func__, __LINE__); } return TRUE; } gboolean PromoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set) { GListPtr gIter = NULL; gboolean runnable = TRUE; GListPtr action_list = NULL; CRM_ASSERT(rsc); CRM_CHECK(next != NULL, return FALSE); pe_rsc_trace(rsc, "%s on %s", rsc->id, next->details->uname); action_list = pe__resource_actions(rsc, next, RSC_START, TRUE); for (gIter = action_list; gIter != NULL; gIter = gIter->next) { pe_action_t *start = (pe_action_t *) gIter->data; if (!pcmk_is_set(start->flags, pe_action_runnable)) { runnable = FALSE; } } g_list_free(action_list); if (runnable) { promote_action(rsc, next, optional); return TRUE; } pe_rsc_debug(rsc, "%s\tPromote %s (canceled)", next->details->uname, rsc->id); action_list = pe__resource_actions(rsc, next, RSC_PROMOTE, TRUE); for (gIter = action_list; gIter != NULL; gIter = gIter->next) { pe_action_t *promote = (pe_action_t *) gIter->data; update_action_flags(promote, pe_action_runnable | pe_action_clear, __func__, __LINE__); } g_list_free(action_list); return TRUE; } gboolean DemoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set) { GListPtr gIter = NULL; CRM_ASSERT(rsc); pe_rsc_trace(rsc, "%s", rsc->id); /* CRM_CHECK(rsc->next_role == RSC_ROLE_SLAVE, return FALSE); */ for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) { pe_node_t *current = (pe_node_t *) gIter->data; pe_rsc_trace(rsc, "%s on %s", rsc->id, next ? next->details->uname : "N/A"); demote_action(rsc, current, optional); } return TRUE; } gboolean RoleError(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set) { CRM_ASSERT(rsc); crm_err("%s on %s", rsc->id, next ? next->details->uname : "N/A"); CRM_CHECK(FALSE, return FALSE); return FALSE; } gboolean NullOp(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set) { CRM_ASSERT(rsc); pe_rsc_trace(rsc, "%s", rsc->id); return FALSE; } gboolean DeleteRsc(pe_resource_t * rsc, pe_node_t * node, gboolean optional, pe_working_set_t * data_set) { if (pcmk_is_set(rsc->flags, pe_rsc_failed)) { pe_rsc_trace(rsc, "Resource %s not deleted from %s: failed", rsc->id, node->details->uname); return FALSE; } else if (node == NULL) { pe_rsc_trace(rsc, "Resource %s not deleted: NULL node", rsc->id); return FALSE; } else if (node->details->unclean || node->details->online == FALSE) { pe_rsc_trace(rsc, "Resource %s not deleted from %s: unrunnable", rsc->id, node->details->uname); return FALSE; } crm_notice("Removing %s from %s", rsc->id, node->details->uname); delete_action(rsc, node, optional); new_rsc_order(rsc, RSC_STOP, rsc, RSC_DELETE, optional ? pe_order_implies_then : pe_order_optional, data_set); new_rsc_order(rsc, RSC_DELETE, rsc, RSC_START, optional ? pe_order_implies_then : pe_order_optional, data_set); return TRUE; } gboolean native_create_probe(pe_resource_t * rsc, pe_node_t * node, pe_action_t * complete, gboolean force, pe_working_set_t * data_set) { enum pe_ordering flags = pe_order_optional; char *key = NULL; pe_action_t *probe = NULL; pe_node_t *running = NULL; pe_node_t *allowed = NULL; pe_resource_t *top = uber_parent(rsc); static const char *rc_master = NULL; static const char *rc_inactive = NULL; if (rc_inactive == NULL) { rc_inactive = crm_itoa(PCMK_OCF_NOT_RUNNING); rc_master = crm_itoa(PCMK_OCF_RUNNING_MASTER); } CRM_CHECK(node != NULL, return FALSE); if (!force && !pcmk_is_set(data_set->flags, pe_flag_startup_probes)) { pe_rsc_trace(rsc, "Skipping active resource detection for %s", rsc->id); return FALSE; } if (pe__is_guest_or_remote_node(node)) { const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); if (pcmk__str_eq(class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) { pe_rsc_trace(rsc, "Skipping probe for %s on %s because Pacemaker Remote nodes cannot run stonith agents", rsc->id, node->details->id); return FALSE; } else if (pe__is_guest_node(node) && pe__resource_contains_guest_node(data_set, rsc)) { pe_rsc_trace(rsc, "Skipping probe for %s on %s because guest nodes cannot run resources containing guest nodes", rsc->id, node->details->id); return FALSE; } else if (rsc->is_remote_node) { pe_rsc_trace(rsc, "Skipping probe for %s on %s because Pacemaker Remote nodes cannot host remote connections", rsc->id, node->details->id); return FALSE; } } if (rsc->children) { GListPtr gIter = NULL; gboolean any_created = FALSE; for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; any_created = child_rsc->cmds->create_probe(child_rsc, node, complete, force, data_set) || any_created; } return any_created; } else if ((rsc->container) && (!rsc->is_remote_node)) { pe_rsc_trace(rsc, "Skipping %s: it is within container %s", rsc->id, rsc->container->id); return FALSE; } if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) { pe_rsc_trace(rsc, "Skipping orphan: %s", rsc->id); return FALSE; } // Check whether resource is already known on node if (!force && g_hash_table_lookup(rsc->known_on, node->details->id)) { pe_rsc_trace(rsc, "Skipping known: %s on %s", rsc->id, node->details->uname); return FALSE; } allowed = g_hash_table_lookup(rsc->allowed_nodes, node->details->id); if (rsc->exclusive_discover || top->exclusive_discover) { if (allowed == NULL) { /* exclusive discover is enabled and this node is not in the allowed list. */ pe_rsc_trace(rsc, "Skipping probe for %s on node %s, A", rsc->id, node->details->id); return FALSE; } else if (allowed->rsc_discover_mode != pe_discover_exclusive) { /* exclusive discover is enabled and this node is not marked * as a node this resource should be discovered on */ pe_rsc_trace(rsc, "Skipping probe for %s on node %s, B", rsc->id, node->details->id); return FALSE; } } if(allowed == NULL && node->rsc_discover_mode == pe_discover_never) { /* If this node was allowed to host this resource it would * have been explicitly added to the 'allowed_nodes' list. * However it wasn't and the node has discovery disabled, so * no need to probe for this resource. */ pe_rsc_trace(rsc, "Skipping probe for %s on node %s, C", rsc->id, node->details->id); return FALSE; } if (allowed && allowed->rsc_discover_mode == pe_discover_never) { /* this resource is marked as not needing to be discovered on this node */ pe_rsc_trace(rsc, "Skipping probe for %s on node %s, discovery mode", rsc->id, node->details->id); return FALSE; } if (pe__is_guest_node(node)) { pe_resource_t *remote = node->details->remote_rsc->container; if(remote->role == RSC_ROLE_STOPPED) { /* If the container is stopped, then we know anything that * might have been inside it is also stopped and there is * no need to probe. * * If we don't know the container's state on the target * either: * * - the container is running, the transition will abort * and we'll end up in a different case next time, or * * - the container is stopped * * Either way there is no need to probe. * */ if(remote->allocated_to && g_hash_table_lookup(remote->known_on, remote->allocated_to->details->id) == NULL) { /* For safety, we order the 'rsc' start after 'remote' * has been probed. * * Using 'top' helps for groups, but we may need to * follow the start's ordering chain backwards. */ custom_action_order(remote, pcmk__op_key(remote->id, RSC_STATUS, 0), NULL, top, pcmk__op_key(top->id, RSC_START, 0), NULL, pe_order_optional, data_set); } pe_rsc_trace(rsc, "Skipping probe for %s on node %s, %s is stopped", rsc->id, node->details->id, remote->id); return FALSE; /* Here we really we want to check if remote->stop is required, * but that information doesn't exist yet */ } else if(node->details->remote_requires_reset || node->details->unclean || pcmk_is_set(remote->flags, pe_rsc_failed) || remote->next_role == RSC_ROLE_STOPPED || (remote->allocated_to && pe_find_node(remote->running_on, remote->allocated_to->details->uname) == NULL) ) { /* The container is stopping or restarting, don't start * 'rsc' until 'remote' stops as this also implies that * 'rsc' is stopped - avoiding the need to probe */ custom_action_order(remote, pcmk__op_key(remote->id, RSC_STOP, 0), NULL, top, pcmk__op_key(top->id, RSC_START, 0), NULL, pe_order_optional, data_set); pe_rsc_trace(rsc, "Skipping probe for %s on node %s, %s is stopping, restarting or moving", rsc->id, node->details->id, remote->id); return FALSE; /* } else { * The container is running so there is no problem probing it */ } } key = pcmk__op_key(rsc->id, RSC_STATUS, 0); probe = custom_action(rsc, key, RSC_STATUS, node, FALSE, TRUE, data_set); update_action_flags(probe, pe_action_optional | pe_action_clear, __func__, __LINE__); order_after_unfencing(rsc, node, probe, pe_order_optional, data_set); /* * We need to know if it's running_on (not just known_on) this node * to correctly determine the target rc. */ running = pe_find_node_id(rsc->running_on, node->details->id); if (running == NULL) { add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, rc_inactive); } else if (rsc->role == RSC_ROLE_MASTER) { add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, rc_master); } crm_debug("Probing %s on %s (%s) %d %p", rsc->id, node->details->uname, role2text(rsc->role), pcmk_is_set(probe->flags, pe_action_runnable), rsc->running_on); if (is_unfence_device(rsc, data_set) || !pe_rsc_is_clone(top)) { top = rsc; } else { crm_trace("Probing %s on %s (%s) as %s", rsc->id, node->details->uname, role2text(rsc->role), top->id); } if (!pcmk_is_set(probe->flags, pe_action_runnable) && (rsc->running_on == NULL)) { /* Prevent the start from occurring if rsc isn't active, but * don't cause it to stop if it was active already */ pe__set_order_flags(flags, pe_order_runnable_left); } custom_action_order(rsc, NULL, probe, top, pcmk__op_key(top->id, RSC_START, 0), NULL, flags, data_set); /* Before any reloads, if they exist */ custom_action_order(rsc, NULL, probe, top, reload_key(rsc), NULL, pe_order_optional, data_set); #if 0 // complete is always null currently if (!is_unfence_device(rsc, data_set)) { /* Normally rsc.start depends on probe complete which depends * on rsc.probe. But this can't be the case for fence devices * with unfencing, as it would create graph loops. * * So instead we explicitly order 'rsc.probe then rsc.start' */ order_actions(probe, complete, pe_order_implies_then); } #endif return TRUE; } /*! * \internal * \brief Check whether a resource is known on a particular node * * \param[in] rsc Resource to check * \param[in] node Node to check * * \return TRUE if resource (or parent if an anonymous clone) is known */ static bool rsc_is_known_on(pe_resource_t *rsc, const pe_node_t *node) { if (pe_hash_table_lookup(rsc->known_on, node->details->id)) { return TRUE; } else if ((rsc->variant == pe_native) && pe_rsc_is_anon_clone(rsc->parent) && pe_hash_table_lookup(rsc->parent->known_on, node->details->id)) { /* We check only the parent, not the uber-parent, because we cannot * assume that the resource is known if it is in an anonymously cloned * group (which may be only partially known). */ return TRUE; } return FALSE; } /*! * \internal * \brief Order a resource's start and promote actions relative to fencing * * \param[in] rsc Resource to be ordered * \param[in] stonith_op Fence action * \param[in] data_set Cluster information */ static void native_start_constraints(pe_resource_t * rsc, pe_action_t * stonith_op, pe_working_set_t * data_set) { pe_node_t *target; GListPtr gIter = NULL; CRM_CHECK(stonith_op && stonith_op->node, return); target = stonith_op->node; for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) { pe_action_t *action = (pe_action_t *) gIter->data; switch (action->needs) { case rsc_req_nothing: // Anything other than start or promote requires nothing break; case rsc_req_stonith: order_actions(stonith_op, action, pe_order_optional); break; case rsc_req_quorum: if (pcmk__str_eq(action->task, RSC_START, pcmk__str_casei) && pe_hash_table_lookup(rsc->allowed_nodes, target->details->id) && !rsc_is_known_on(rsc, target)) { /* If we don't know the status of the resource on the node * we're about to shoot, we have to assume it may be active * there. Order the resource start after the fencing. This * is analogous to waiting for all the probes for a resource * to complete before starting it. * * The most likely explanation is that the DC died and took * its status with it. */ pe_rsc_debug(rsc, "Ordering %s after %s recovery", action->uuid, target->details->uname); order_actions(stonith_op, action, pe_order_optional | pe_order_runnable_left); } break; } } } static void native_stop_constraints(pe_resource_t * rsc, pe_action_t * stonith_op, pe_working_set_t * data_set) { GListPtr gIter = NULL; GListPtr action_list = NULL; bool order_implicit = false; pe_resource_t *top = uber_parent(rsc); pe_action_t *parent_stop = NULL; pe_node_t *target; CRM_CHECK(stonith_op && stonith_op->node, return); target = stonith_op->node; /* Get a list of stop actions potentially implied by the fencing */ action_list = pe__resource_actions(rsc, target, RSC_STOP, FALSE); /* If resource requires fencing, implicit actions must occur after fencing. * * Implied stops and demotes of resources running on guest nodes are always * ordered after fencing, even if the resource does not require fencing, * because guest node "fencing" is actually just a resource stop. */ if (pcmk_is_set(rsc->flags, pe_rsc_needs_fencing) || pe__is_guest_node(target)) { order_implicit = true; } if (action_list && order_implicit) { parent_stop = find_first_action(top->actions, NULL, RSC_STOP, NULL); } for (gIter = action_list; gIter != NULL; gIter = gIter->next) { pe_action_t *action = (pe_action_t *) gIter->data; // The stop would never complete, so convert it into a pseudo-action. update_action_flags(action, pe_action_pseudo|pe_action_runnable, __func__, __LINE__); if (order_implicit) { update_action_flags(action, pe_action_implied_by_stonith, __func__, __LINE__); /* Order the stonith before the parent stop (if any). * * Also order the stonith before the resource stop, unless the * resource is inside a bundle -- that would cause a graph loop. * We can rely on the parent stop's ordering instead. * * User constraints must not order a resource in a guest node * relative to the guest node container resource. The * pe_order_preserve flag marks constraints as generated by the * cluster and thus immune to that check (and is irrelevant if * target is not a guest). */ if (!pe_rsc_is_bundled(rsc)) { order_actions(stonith_op, action, pe_order_preserve); } order_actions(stonith_op, parent_stop, pe_order_preserve); } if (pcmk_is_set(rsc->flags, pe_rsc_failed)) { crm_notice("Stop of failed resource %s is implicit %s %s is fenced", rsc->id, (order_implicit? "after" : "because"), target->details->uname); } else { crm_info("%s is implicit %s %s is fenced", action->uuid, (order_implicit? "after" : "because"), target->details->uname); } if (pcmk_is_set(rsc->flags, pe_rsc_notify)) { /* Create a second notification that will be delivered * immediately after the node is fenced * * Basic problem: * - C is a clone active on the node to be shot and stopping on another * - R is a resource that depends on C * * + C.stop depends on R.stop * + C.stopped depends on STONITH * + C.notify depends on C.stopped * + C.healthy depends on C.notify * + R.stop depends on C.healthy * * The extra notification here changes * + C.healthy depends on C.notify * into: * + C.healthy depends on C.notify' * + C.notify' depends on STONITH' * thus breaking the loop */ create_secondary_notification(action, rsc, stonith_op, data_set); } /* From Bug #1601, successful fencing must be an input to a failed resources stop action. However given group(rA, rB) running on nodeX and B.stop has failed, A := stop healthy resource (rA.stop) B := stop failed resource (pseudo operation B.stop) C := stonith nodeX A requires B, B requires C, C requires A This loop would prevent the cluster from making progress. This block creates the "C requires A" dependency and therefore must (at least for now) be disabled. Instead, run the block above and treat all resources on nodeX as B would be (marked as a pseudo op depending on the STONITH). TODO: Break the "A requires B" dependency in update_action() and re-enable this block } else if(is_stonith == FALSE) { crm_info("Moving healthy resource %s" " off %s before fencing", rsc->id, node->details->uname); * stop healthy resources before the * stonith op * custom_action_order( rsc, stop_key(rsc), NULL, NULL,strdup(CRM_OP_FENCE),stonith_op, pe_order_optional, data_set); */ } g_list_free(action_list); /* Get a list of demote actions potentially implied by the fencing */ action_list = pe__resource_actions(rsc, target, RSC_DEMOTE, FALSE); for (gIter = action_list; gIter != NULL; gIter = gIter->next) { pe_action_t *action = (pe_action_t *) gIter->data; if (action->node->details->online == FALSE || action->node->details->unclean == TRUE || pcmk_is_set(rsc->flags, pe_rsc_failed)) { if (pcmk_is_set(rsc->flags, pe_rsc_failed)) { pe_rsc_info(rsc, "Demote of failed resource %s is implicit after %s is fenced", rsc->id, target->details->uname); } else { pe_rsc_info(rsc, "%s is implicit after %s is fenced", action->uuid, target->details->uname); } /* The demote would never complete and is now implied by the * fencing, so convert it into a pseudo-action. */ update_action_flags(action, pe_action_pseudo|pe_action_runnable, __func__, __LINE__); if (pe_rsc_is_bundled(rsc)) { /* Do nothing, let the recovery be ordered after the parent's implied stop */ } else if (order_implicit) { order_actions(stonith_op, action, pe_order_preserve|pe_order_optional); } } } g_list_free(action_list); } void rsc_stonith_ordering(pe_resource_t * rsc, pe_action_t * stonith_op, pe_working_set_t * data_set) { if (rsc->children) { GListPtr gIter = NULL; for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; rsc_stonith_ordering(child_rsc, stonith_op, data_set); } } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) { pe_rsc_trace(rsc, "Skipping fencing constraints for unmanaged resource: %s", rsc->id); } else { native_start_constraints(rsc, stonith_op, data_set); native_stop_constraints(rsc, stonith_op, data_set); } } void ReloadRsc(pe_resource_t * rsc, pe_node_t *node, pe_working_set_t * data_set) { GListPtr gIter = NULL; pe_action_t *reload = NULL; if (rsc->children) { for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; ReloadRsc(child_rsc, node, data_set); } return; } else if (rsc->variant > pe_native) { /* Complex resource with no children */ return; } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) { pe_rsc_trace(rsc, "%s: unmanaged", rsc->id); return; } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) { /* We don't need to specify any particular actions here, normal failure * recovery will apply. */ pe_rsc_trace(rsc, "%s: preventing reload because failed", rsc->id); return; } else if (pcmk_is_set(rsc->flags, pe_rsc_start_pending)) { /* If a resource's configuration changed while a start was pending, * force a full restart. */ pe_rsc_trace(rsc, "%s: preventing reload because start pending", rsc->id); stop_action(rsc, node, FALSE); return; } else if (node == NULL) { pe_rsc_trace(rsc, "%s: not active", rsc->id); return; } pe_rsc_trace(rsc, "Processing %s", rsc->id); pe__set_resource_flags(rsc, pe_rsc_reload); reload = custom_action( rsc, reload_key(rsc), CRMD_ACTION_RELOAD, node, FALSE, TRUE, data_set); pe_action_set_reason(reload, "resource definition change", FALSE); custom_action_order(NULL, NULL, reload, rsc, stop_key(rsc), NULL, pe_order_optional|pe_order_then_cancels_first, data_set); custom_action_order(NULL, NULL, reload, rsc, demote_key(rsc), NULL, pe_order_optional|pe_order_then_cancels_first, data_set); } void native_append_meta(pe_resource_t * rsc, xmlNode * xml) { char *value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION); pe_resource_t *parent; if (value) { char *name = NULL; name = crm_meta_name(XML_RSC_ATTR_INCARNATION); crm_xml_add(xml, name, value); free(name); } value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_REMOTE_NODE); if (value) { char *name = NULL; name = crm_meta_name(XML_RSC_ATTR_REMOTE_NODE); crm_xml_add(xml, name, value); free(name); } for (parent = rsc; parent != NULL; parent = parent->parent) { if (parent->container) { crm_xml_add(xml, CRM_META"_"XML_RSC_ATTR_CONTAINER, parent->container->id); } } } diff --git a/lib/pacemaker/pcmk_sched_promotable.c b/lib/pacemaker/pcmk_sched_promotable.c index 40d07e9691..f3bde0c2be 100644 --- a/lib/pacemaker/pcmk_sched_promotable.c +++ b/lib/pacemaker/pcmk_sched_promotable.c @@ -1,1032 +1,1027 @@ /* * Copyright 2004-2021 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #define VARIANT_CLONE 1 #include extern gint sort_clone_instance(gconstpointer a, gconstpointer b, gpointer data_set); static void child_promoting_constraints(clone_variant_data_t * clone_data, enum pe_ordering type, pe_resource_t * rsc, pe_resource_t * child, pe_resource_t * last, pe_working_set_t * data_set) { if (child == NULL) { if (clone_data->ordered && last != NULL) { pe_rsc_trace(rsc, "Ordered version (last node)"); /* last child promote before promoted started */ new_rsc_order(last, RSC_PROMOTE, rsc, RSC_PROMOTED, type, data_set); } return; } /* child promote before global promoted */ new_rsc_order(child, RSC_PROMOTE, rsc, RSC_PROMOTED, type, data_set); /* global promote before child promote */ new_rsc_order(rsc, RSC_PROMOTE, child, RSC_PROMOTE, type, data_set); if (clone_data->ordered) { pe_rsc_trace(rsc, "Ordered version"); if (last == NULL) { /* global promote before first child promote */ last = rsc; } /* else: child/child relative promote */ order_start_start(last, child, type); new_rsc_order(last, RSC_PROMOTE, child, RSC_PROMOTE, type, data_set); } else { pe_rsc_trace(rsc, "Un-ordered version"); } } static void child_demoting_constraints(clone_variant_data_t * clone_data, enum pe_ordering type, pe_resource_t * rsc, pe_resource_t * child, pe_resource_t * last, pe_working_set_t * data_set) { if (child == NULL) { if (clone_data->ordered && last != NULL) { pe_rsc_trace(rsc, "Ordered version (last node)"); /* global demote before first child demote */ new_rsc_order(rsc, RSC_DEMOTE, last, RSC_DEMOTE, pe_order_optional, data_set); } return; } /* child demote before global demoted */ new_rsc_order(child, RSC_DEMOTE, rsc, RSC_DEMOTED, pe_order_implies_then_printed, data_set); /* global demote before child demote */ new_rsc_order(rsc, RSC_DEMOTE, child, RSC_DEMOTE, pe_order_implies_first_printed, data_set); if (clone_data->ordered && last != NULL) { pe_rsc_trace(rsc, "Ordered version"); /* child/child relative demote */ new_rsc_order(child, RSC_DEMOTE, last, RSC_DEMOTE, type, data_set); } else if (clone_data->ordered) { pe_rsc_trace(rsc, "Ordered version (1st node)"); /* first child stop before global stopped */ new_rsc_order(child, RSC_DEMOTE, rsc, RSC_DEMOTED, type, data_set); } else { pe_rsc_trace(rsc, "Un-ordered version"); } } static void check_promotable_actions(pe_resource_t *rsc, gboolean *demoting, gboolean *promoting) { GListPtr gIter = NULL; if (rsc->children) { gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child = (pe_resource_t *) gIter->data; check_promotable_actions(child, demoting, promoting); } return; } CRM_ASSERT(demoting != NULL); CRM_ASSERT(promoting != NULL); gIter = rsc->actions; for (; gIter != NULL; gIter = gIter->next) { pe_action_t *action = (pe_action_t *) gIter->data; if (*promoting && *demoting) { return; } else if (pcmk_is_set(action->flags, pe_action_optional)) { continue; } else if (pcmk__str_eq(RSC_DEMOTE, action->task, pcmk__str_casei)) { *demoting = TRUE; } else if (pcmk__str_eq(RSC_PROMOTE, action->task, pcmk__str_casei)) { *promoting = TRUE; } } } static void apply_master_location(pe_resource_t *child, GListPtr location_constraints, pe_node_t *chosen) { CRM_CHECK(child && chosen, return); for (GListPtr gIter = location_constraints; gIter; gIter = gIter->next) { pe_node_t *cons_node = NULL; pe__location_t *cons = gIter->data; if (cons->role_filter == RSC_ROLE_MASTER) { pe_rsc_trace(child, "Applying %s to %s", cons->id, child->id); cons_node = pe_find_node_id(cons->node_list_rh, chosen->details->id); } if (cons_node != NULL) { int new_priority = pe__add_scores(child->priority, cons_node->weight); pe_rsc_trace(child, "\t%s[%s]: %d -> %d (%d)", child->id, cons_node->details->uname, child->priority, new_priority, cons_node->weight); child->priority = new_priority; } } } static pe_node_t * guest_location(pe_node_t *guest_node) { pe_resource_t *guest = guest_node->details->remote_rsc->container; return guest->fns->location(guest, NULL, FALSE); } static pe_node_t * can_be_master(pe_resource_t * rsc) { pe_node_t *node = NULL; pe_node_t *local_node = NULL; pe_resource_t *parent = uber_parent(rsc); clone_variant_data_t *clone_data = NULL; #if 0 enum rsc_role_e role = RSC_ROLE_UNKNOWN; role = rsc->fns->state(rsc, FALSE); crm_info("%s role: %s", rsc->id, role2text(role)); #endif if (rsc->children) { GListPtr gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child = (pe_resource_t *) gIter->data; if (can_be_master(child) == NULL) { pe_rsc_trace(rsc, "Child %s of %s can't be promoted", child->id, rsc->id); return NULL; } } } node = rsc->fns->location(rsc, NULL, FALSE); if (node == NULL) { pe_rsc_trace(rsc, "%s cannot be master: not allocated", rsc->id); return NULL; } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) { if (rsc->fns->state(rsc, TRUE) == RSC_ROLE_MASTER) { crm_notice("Forcing unmanaged master %s to remain promoted on %s", rsc->id, node->details->uname); } else { return NULL; } } else if (rsc->priority < 0) { pe_rsc_trace(rsc, "%s cannot be master: preference: %d", rsc->id, rsc->priority); return NULL; } else if (can_run_resources(node) == FALSE) { crm_trace("Node can't run any resources: %s", node->details->uname); return NULL; /* @TODO It's possible this check should be done in can_run_resources() * instead. We should investigate all its callers to figure out whether that * would be a good idea. */ } else if (pe__is_guest_node(node) && (guest_location(node) == NULL)) { pe_rsc_trace(rsc, "%s cannot be promoted: guest %s not allocated", rsc->id, node->details->remote_rsc->container->id); return NULL; } get_clone_variant_data(clone_data, parent); local_node = pe_hash_table_lookup(parent->allowed_nodes, node->details->id); if (local_node == NULL) { crm_err("%s cannot run on %s: node not allowed", rsc->id, node->details->uname); return NULL; } else if ((local_node->count < clone_data->promoted_node_max) || !pcmk_is_set(rsc->flags, pe_rsc_managed)) { return local_node; } else { pe_rsc_trace(rsc, "%s cannot be master on %s: node full", rsc->id, node->details->uname); } return NULL; } static gint sort_promotable_instance(gconstpointer a, gconstpointer b, gpointer data_set) { int rc; enum rsc_role_e role1 = RSC_ROLE_UNKNOWN; enum rsc_role_e role2 = RSC_ROLE_UNKNOWN; const pe_resource_t *resource1 = (const pe_resource_t *)a; const pe_resource_t *resource2 = (const pe_resource_t *)b; CRM_ASSERT(resource1 != NULL); CRM_ASSERT(resource2 != NULL); role1 = resource1->fns->state(resource1, TRUE); role2 = resource2->fns->state(resource2, TRUE); rc = sort_rsc_index(a, b); if (rc != 0) { crm_trace("%s %c %s (index)", resource1->id, rc < 0 ? '<' : '>', resource2->id); return rc; } if (role1 > role2) { crm_trace("%s %c %s (role)", resource1->id, '<', resource2->id); return -1; } else if (role1 < role2) { crm_trace("%s %c %s (role)", resource1->id, '>', resource2->id); return 1; } return sort_clone_instance(a, b, data_set); } static void promotion_order(pe_resource_t *rsc, pe_working_set_t *data_set) { GListPtr gIter = NULL; pe_node_t *node = NULL; pe_node_t *chosen = NULL; clone_variant_data_t *clone_data = NULL; char score[33]; size_t len = sizeof(score); get_clone_variant_data(clone_data, rsc); if (clone_data->merged_master_weights) { return; } clone_data->merged_master_weights = TRUE; pe_rsc_trace(rsc, "Merging weights for %s", rsc->id); pe__set_resource_flags(rsc, pe_rsc_merging); for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { pe_resource_t *child = (pe_resource_t *) gIter->data; pe_rsc_trace(rsc, "Sort index: %s = %d", child->id, child->sort_index); } pe__show_node_weights(true, rsc, "Before", rsc->allowed_nodes); gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child = (pe_resource_t *) gIter->data; chosen = child->fns->location(child, NULL, FALSE); if (chosen == NULL || child->sort_index < 0) { pe_rsc_trace(rsc, "Skipping %s", child->id); continue; } node = (pe_node_t *) pe_hash_table_lookup(rsc->allowed_nodes, chosen->details->id); CRM_ASSERT(node != NULL); /* adds in master preferences and rsc_location.role=Master */ score2char_stack(child->sort_index, score, len); pe_rsc_trace(rsc, "Adding %s to %s from %s", score, node->details->uname, child->id); node->weight = pe__add_scores(child->sort_index, node->weight); } pe__show_node_weights(true, rsc, "Middle", rsc->allowed_nodes); gIter = rsc->rsc_cons; for (; gIter != NULL; gIter = gIter->next) { pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data; /* (re-)adds location preferences of resources that the * master instance should/must be colocated with */ if (constraint->role_lh == RSC_ROLE_MASTER) { enum pe_weights flags = constraint->score == INFINITY ? 0 : pe_weights_rollback; pe_rsc_trace(rsc, "RHS: %s with %s: %d", constraint->rsc_lh->id, constraint->rsc_rh->id, constraint->score); rsc->allowed_nodes = constraint->rsc_rh->cmds->merge_weights(constraint->rsc_rh, rsc->id, rsc->allowed_nodes, constraint->node_attribute, (float)constraint->score / INFINITY, flags); } } gIter = rsc->rsc_cons_lhs; for (; gIter != NULL; gIter = gIter->next) { pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data; if (!pcmk__colocation_has_influence(constraint, NULL)) { continue; } /* (re-)adds location preferences of resource that wish to be * colocated with the master instance */ if (constraint->role_rh == RSC_ROLE_MASTER) { pe_rsc_trace(rsc, "LHS: %s with %s: %d", constraint->rsc_lh->id, constraint->rsc_rh->id, constraint->score); rsc->allowed_nodes = constraint->rsc_lh->cmds->merge_weights(constraint->rsc_lh, rsc->id, rsc->allowed_nodes, constraint->node_attribute, (float)constraint->score / INFINITY, (pe_weights_rollback | pe_weights_positive)); } } gIter = rsc->rsc_tickets; for (; gIter != NULL; gIter = gIter->next) { rsc_ticket_t *rsc_ticket = (rsc_ticket_t *) gIter->data; if (rsc_ticket->role_lh == RSC_ROLE_MASTER && (rsc_ticket->ticket->granted == FALSE || rsc_ticket->ticket->standby)) { resource_location(rsc, NULL, -INFINITY, "__stateful_without_ticket__", data_set); } } pe__show_node_weights(true, rsc, "After", rsc->allowed_nodes); /* write them back and sort */ gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child = (pe_resource_t *) gIter->data; chosen = child->fns->location(child, NULL, FALSE); if (!pcmk_is_set(child->flags, pe_rsc_managed) && (child->next_role == RSC_ROLE_MASTER)) { child->sort_index = INFINITY; } else if (chosen == NULL || child->sort_index < 0) { pe_rsc_trace(rsc, "%s: %d", child->id, child->sort_index); } else { node = (pe_node_t *) pe_hash_table_lookup(rsc->allowed_nodes, chosen->details->id); CRM_ASSERT(node != NULL); child->sort_index = node->weight; } pe_rsc_trace(rsc, "Set sort index: %s = %d", child->id, child->sort_index); } rsc->children = g_list_sort_with_data(rsc->children, sort_promotable_instance, data_set); pe__clear_resource_flags(rsc, pe_rsc_merging); } static gboolean filter_anonymous_instance(pe_resource_t *rsc, const pe_node_t *node) { GListPtr rIter = NULL; char *key = clone_strip(rsc->id); pe_resource_t *parent = uber_parent(rsc); for (rIter = parent->children; rIter; rIter = rIter->next) { /* If there is an active instance on the node, only it receives the * promotion score. Use ->find_rsc() in case this is a cloned group. */ pe_resource_t *child = rIter->data; pe_resource_t *active = parent->fns->find_rsc(child, key, node, pe_find_clone|pe_find_current); if(rsc == active) { pe_rsc_trace(rsc, "Found %s for %s active on %s: done", active->id, key, node->details->uname); free(key); return TRUE; } else if(active) { pe_rsc_trace(rsc, "Found %s for %s on %s: not %s", active->id, key, node->details->uname, rsc->id); free(key); return FALSE; } else { pe_rsc_trace(rsc, "%s on %s: not active", key, node->details->uname); } } for (rIter = parent->children; rIter; rIter = rIter->next) { pe_resource_t *child = rIter->data; /* * We know it's not running, but any score will still count if * the instance has been probed on $node * * Again use ->find_rsc() because we might be a cloned group * and knowing that other members of the group are known here * implies nothing */ rsc = parent->fns->find_rsc(child, key, NULL, pe_find_clone); CRM_LOG_ASSERT(rsc); if(rsc) { pe_rsc_trace(rsc, "Checking %s for %s on %s", rsc->id, key, node->details->uname); if (g_hash_table_lookup(rsc->known_on, node->details->id)) { free(key); return TRUE; } } } free(key); return FALSE; } static const char * lookup_promotion_score(pe_resource_t *rsc, const pe_node_t *node, const char *name) { const char *attr_value = NULL; if (node && name) { char *attr_name = crm_strdup_printf("master-%s", name); attr_value = pe_node_attribute_calculated(node, attr_name, rsc); free(attr_name); } return attr_value; } static int promotion_score(pe_resource_t *rsc, const pe_node_t *node, int not_set_value) { char *name = rsc->id; const char *attr_value = NULL; int score = not_set_value; pe_node_t *match = NULL; CRM_CHECK(node != NULL, return not_set_value); if (rsc->children) { GListPtr gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child = (pe_resource_t *) gIter->data; int c_score = promotion_score(child, node, not_set_value); if (score == not_set_value) { score = c_score; } else { score += c_score; } } return score; } if (!pcmk_is_set(rsc->flags, pe_rsc_unique) && filter_anonymous_instance(rsc, node)) { pe_rsc_trace(rsc, "Anonymous clone %s is allowed on %s", rsc->id, node->details->uname); } else if (rsc->running_on || g_hash_table_size(rsc->known_on)) { /* If we've probed and/or started the resource anywhere, consider * promotion scores only from nodes where we know the status. However, * if the status of all nodes is unknown (e.g. cluster startup), * skip this code, to make sure we take into account any permanent * promotion scores set previously. */ pe_node_t *known = pe_hash_table_lookup(rsc->known_on, node->details->id); match = pe_find_node_id(rsc->running_on, node->details->id); if ((match == NULL) && (known == NULL)) { pe_rsc_trace(rsc, "skipping %s (aka. %s) promotion score on %s because inactive", rsc->id, rsc->clone_name, node->details->uname); return score; } } match = pe_hash_table_lookup(rsc->allowed_nodes, node->details->id); if (match == NULL) { return score; } else if (match->weight < 0) { pe_rsc_trace(rsc, "%s on %s has score: %d - ignoring", rsc->id, match->details->uname, match->weight); return score; } if (rsc->clone_name) { /* Use the name the lrm knows this resource as, * since that's what crm_master would have used too */ name = rsc->clone_name; } attr_value = lookup_promotion_score(rsc, node, name); pe_rsc_trace(rsc, "promotion score for %s on %s = %s", name, node->details->uname, crm_str(attr_value)); if ((attr_value == NULL) && !pcmk_is_set(rsc->flags, pe_rsc_unique)) { /* If we don't have any LRM history yet, we won't have clone_name -- in * that case, for anonymous clones, try the resource name without any * instance number. */ name = clone_strip(rsc->id); if (strcmp(rsc->id, name)) { attr_value = lookup_promotion_score(rsc, node, name); pe_rsc_trace(rsc, "stripped promotion score for %s on %s = %s", name, node->details->uname, crm_str(attr_value)); } free(name); } if (attr_value != NULL) { score = char2score(attr_value); } return score; } void apply_master_prefs(pe_resource_t *rsc) { int score, new_score; GListPtr gIter = rsc->children; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); if (clone_data->applied_master_prefs) { /* Make sure we only do this once */ return; } clone_data->applied_master_prefs = TRUE; for (; gIter != NULL; gIter = gIter->next) { GHashTableIter iter; pe_node_t *node = NULL; pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; g_hash_table_iter_init(&iter, child_rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { if (can_run_resources(node) == FALSE) { /* This node will never be promoted to master, * so don't apply the promotion score as that may * lead to clone shuffling */ continue; } score = promotion_score(child_rsc, node, 0); if (score > 0) { new_score = pe__add_scores(node->weight, score); if (new_score != node->weight) { pe_rsc_trace(rsc, "\t%s: Updating preference for %s (%d->%d)", child_rsc->id, node->details->uname, node->weight, new_score); node->weight = new_score; } } new_score = QB_MAX(child_rsc->priority, score); if (new_score != child_rsc->priority) { pe_rsc_trace(rsc, "\t%s: Updating priority (%d->%d)", child_rsc->id, child_rsc->priority, new_score); child_rsc->priority = new_score; } } } } static void set_role_slave(pe_resource_t * rsc, gboolean current) { GListPtr gIter = rsc->children; if (current) { if (rsc->role == RSC_ROLE_STARTED) { rsc->role = RSC_ROLE_SLAVE; } } else { GListPtr allocated = NULL; rsc->fns->location(rsc, &allocated, FALSE); - - if (allocated) { - rsc->next_role = RSC_ROLE_SLAVE; - - } else { - rsc->next_role = RSC_ROLE_STOPPED; - } + pe__set_next_role(rsc, (allocated? RSC_ROLE_SLAVE : RSC_ROLE_STOPPED), + "unpromoted instance"); g_list_free(allocated); } for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; set_role_slave(child_rsc, current); } } static void set_role_master(pe_resource_t * rsc) { GListPtr gIter = rsc->children; if (rsc->next_role == RSC_ROLE_UNKNOWN) { - rsc->next_role = RSC_ROLE_MASTER; + pe__set_next_role(rsc, RSC_ROLE_MASTER, "promoted instance"); } for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; set_role_master(child_rsc); } } pe_node_t * pcmk__set_instance_roles(pe_resource_t *rsc, pe_working_set_t *data_set) { int promoted = 0; GListPtr gIter = NULL; GListPtr gIter2 = NULL; GHashTableIter iter; pe_node_t *node = NULL; pe_node_t *chosen = NULL; enum rsc_role_e next_role = RSC_ROLE_UNKNOWN; char score[33]; size_t len = sizeof(score); clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); /* count now tracks the number of masters allocated */ g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { node->count = 0; } /* * assign priority */ for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { GListPtr list = NULL; pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; pe_rsc_trace(rsc, "Assigning priority for %s: %s", child_rsc->id, role2text(child_rsc->next_role)); if (child_rsc->fns->state(child_rsc, TRUE) == RSC_ROLE_STARTED) { set_role_slave(child_rsc, TRUE); } chosen = child_rsc->fns->location(child_rsc, &list, FALSE); if (pcmk__list_of_multiple(list)) { pcmk__config_err("Cannot promote non-colocated child %s", child_rsc->id); } g_list_free(list); if (chosen == NULL) { continue; } next_role = child_rsc->fns->state(child_rsc, FALSE); switch (next_role) { case RSC_ROLE_STARTED: case RSC_ROLE_UNKNOWN: /* * Default to -1 if no value is set * * This allows master locations to be specified * based solely on rsc_location constraints, * but prevents anyone from being promoted if * neither a constraint nor a promotion score is present */ child_rsc->priority = promotion_score(child_rsc, chosen, -1); break; case RSC_ROLE_SLAVE: case RSC_ROLE_STOPPED: child_rsc->priority = -INFINITY; break; case RSC_ROLE_MASTER: /* We will arrive here if we're re-creating actions after a stonith */ break; default: CRM_CHECK(FALSE /* unhandled */ , crm_err("Unknown resource role: %d for %s", next_role, child_rsc->id)); } apply_master_location(child_rsc, child_rsc->rsc_location, chosen); apply_master_location(child_rsc, rsc->rsc_location, chosen); for (gIter2 = child_rsc->rsc_cons; gIter2 != NULL; gIter2 = gIter2->next) { pcmk__colocation_t *cons = (pcmk__colocation_t *) gIter2->data; child_rsc->cmds->rsc_colocation_lh(child_rsc, cons->rsc_rh, cons, data_set); } child_rsc->sort_index = child_rsc->priority; pe_rsc_trace(rsc, "Assigning priority for %s: %d", child_rsc->id, child_rsc->priority); if (next_role == RSC_ROLE_MASTER) { child_rsc->sort_index = INFINITY; } } pe__show_node_weights(true, rsc, "Pre merge", rsc->allowed_nodes); promotion_order(rsc, data_set); /* mark the first N as masters */ for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; score2char_stack(child_rsc->sort_index, score, len); chosen = child_rsc->fns->location(child_rsc, NULL, FALSE); if (show_scores) { if (pcmk_is_set(data_set->flags, pe_flag_stdout)) { printf("%s promotion score on %s: %s\n", child_rsc->id, (chosen? chosen->details->uname : "none"), score); } } else { pe_rsc_trace(rsc, "%s promotion score on %s: %s", child_rsc->id, (chosen? chosen->details->uname : "none"), score); } chosen = NULL; /* nuke 'chosen' so that we don't promote more than the * required number of instances */ if (child_rsc->sort_index < 0) { pe_rsc_trace(rsc, "Not supposed to promote child: %s", child_rsc->id); } else if ((promoted < clone_data->promoted_max) || !pcmk_is_set(rsc->flags, pe_rsc_managed)) { chosen = can_be_master(child_rsc); } pe_rsc_debug(rsc, "%s promotion score: %d", child_rsc->id, child_rsc->priority); if (chosen == NULL) { set_role_slave(child_rsc, FALSE); continue; } else if(child_rsc->role < RSC_ROLE_MASTER && !pcmk_is_set(data_set->flags, pe_flag_have_quorum) && data_set->no_quorum_policy == no_quorum_freeze) { crm_notice("Resource %s cannot be elevated from %s to %s: no-quorum-policy=freeze", child_rsc->id, role2text(child_rsc->role), role2text(child_rsc->next_role)); set_role_slave(child_rsc, FALSE); continue; } chosen->count++; pe_rsc_info(rsc, "Promoting %s (%s %s)", child_rsc->id, role2text(child_rsc->role), chosen->details->uname); set_role_master(child_rsc); promoted++; } pe_rsc_info(rsc, "%s: Promoted %d instances of a possible %d to master", rsc->id, promoted, clone_data->promoted_max); return NULL; } void create_promotable_actions(pe_resource_t * rsc, pe_working_set_t * data_set) { pe_action_t *action = NULL; GListPtr gIter = rsc->children; pe_action_t *action_complete = NULL; gboolean any_promoting = FALSE; gboolean any_demoting = FALSE; pe_resource_t *last_promote_rsc = NULL; pe_resource_t *last_demote_rsc = NULL; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); pe_rsc_debug(rsc, "Creating actions for %s", rsc->id); for (; gIter != NULL; gIter = gIter->next) { gboolean child_promoting = FALSE; gboolean child_demoting = FALSE; pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; pe_rsc_trace(rsc, "Creating actions for %s", child_rsc->id); child_rsc->cmds->create_actions(child_rsc, data_set); check_promotable_actions(child_rsc, &child_demoting, &child_promoting); any_demoting = any_demoting || child_demoting; any_promoting = any_promoting || child_promoting; pe_rsc_trace(rsc, "Created actions for %s: %d %d", child_rsc->id, child_promoting, child_demoting); } /* promote */ action = create_pseudo_resource_op(rsc, RSC_PROMOTE, !any_promoting, TRUE, data_set); action_complete = create_pseudo_resource_op(rsc, RSC_PROMOTED, !any_promoting, TRUE, data_set); action_complete->priority = INFINITY; child_promoting_constraints(clone_data, pe_order_optional, rsc, NULL, last_promote_rsc, data_set); if (clone_data->promote_notify == NULL) { clone_data->promote_notify = create_notification_boundaries(rsc, RSC_PROMOTE, action, action_complete, data_set); } /* demote */ action = create_pseudo_resource_op(rsc, RSC_DEMOTE, !any_demoting, TRUE, data_set); action_complete = create_pseudo_resource_op(rsc, RSC_DEMOTED, !any_demoting, TRUE, data_set); action_complete->priority = INFINITY; child_demoting_constraints(clone_data, pe_order_optional, rsc, NULL, last_demote_rsc, data_set); if (clone_data->demote_notify == NULL) { clone_data->demote_notify = create_notification_boundaries(rsc, RSC_DEMOTE, action, action_complete, data_set); if (clone_data->promote_notify) { /* If we ever wanted groups to have notifications we'd need to move this to native_internal_constraints() one day * Requires exposing *_notify */ order_actions(clone_data->stop_notify->post_done, clone_data->promote_notify->pre, pe_order_optional); order_actions(clone_data->start_notify->post_done, clone_data->promote_notify->pre, pe_order_optional); order_actions(clone_data->demote_notify->post_done, clone_data->promote_notify->pre, pe_order_optional); order_actions(clone_data->demote_notify->post_done, clone_data->start_notify->pre, pe_order_optional); order_actions(clone_data->demote_notify->post_done, clone_data->stop_notify->pre, pe_order_optional); } } /* restore the correct priority */ gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; child_rsc->priority = rsc->priority; } } void promote_demote_constraints(pe_resource_t *rsc, pe_working_set_t *data_set) { /* global stopped before start */ new_rsc_order(rsc, RSC_STOPPED, rsc, RSC_START, pe_order_optional, data_set); /* global stopped before promote */ new_rsc_order(rsc, RSC_STOPPED, rsc, RSC_PROMOTE, pe_order_optional, data_set); /* global demoted before start */ new_rsc_order(rsc, RSC_DEMOTED, rsc, RSC_START, pe_order_optional, data_set); /* global started before promote */ new_rsc_order(rsc, RSC_STARTED, rsc, RSC_PROMOTE, pe_order_optional, data_set); /* global demoted before stop */ new_rsc_order(rsc, RSC_DEMOTED, rsc, RSC_STOP, pe_order_optional, data_set); /* global demote before demoted */ new_rsc_order(rsc, RSC_DEMOTE, rsc, RSC_DEMOTED, pe_order_optional, data_set); /* global demoted before promote */ new_rsc_order(rsc, RSC_DEMOTED, rsc, RSC_PROMOTE, pe_order_optional, data_set); } void promotable_constraints(pe_resource_t * rsc, pe_working_set_t * data_set) { GListPtr gIter = rsc->children; pe_resource_t *last_rsc = NULL; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); promote_demote_constraints(rsc, data_set); for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; /* child demote before promote */ new_rsc_order(child_rsc, RSC_DEMOTE, child_rsc, RSC_PROMOTE, pe_order_optional, data_set); child_promoting_constraints(clone_data, pe_order_optional, rsc, child_rsc, last_rsc, data_set); child_demoting_constraints(clone_data, pe_order_optional, rsc, child_rsc, last_rsc, data_set); last_rsc = child_rsc; } } static void node_hash_update_one(GHashTable * hash, pe_node_t * other, const char *attr, int score) { GHashTableIter iter; pe_node_t *node = NULL; const char *value = NULL; if (other == NULL) { return; } else if (attr == NULL) { attr = CRM_ATTR_UNAME; } value = pe_node_attribute_raw(other, attr); g_hash_table_iter_init(&iter, hash); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { const char *tmp = pe_node_attribute_raw(node, attr); if (pcmk__str_eq(value, tmp, pcmk__str_casei)) { crm_trace("%s: %d + %d", node->details->uname, node->weight, other->weight); node->weight = pe__add_scores(node->weight, score); } } } void promotable_colocation_rh(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, pcmk__colocation_t *constraint, pe_working_set_t *data_set) { GListPtr gIter = NULL; if (pcmk_is_set(rsc_lh->flags, pe_rsc_provisional)) { GListPtr rhs = NULL; for (gIter = rsc_rh->children; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; pe_node_t *chosen = child_rsc->fns->location(child_rsc, NULL, FALSE); enum rsc_role_e next_role = child_rsc->fns->state(child_rsc, FALSE); pe_rsc_trace(rsc_rh, "Processing: %s", child_rsc->id); if (chosen != NULL && next_role == constraint->role_rh) { pe_rsc_trace(rsc_rh, "Applying: %s %s %s %d", child_rsc->id, role2text(next_role), chosen->details->uname, constraint->score); if (constraint->score < INFINITY) { node_hash_update_one(rsc_lh->allowed_nodes, chosen, constraint->node_attribute, constraint->score); } rhs = g_list_prepend(rhs, chosen); } } /* Only do this if it's not a master-master colocation * Doing this unconditionally would prevent the slaves from being started */ if (constraint->role_lh != RSC_ROLE_MASTER || constraint->role_rh != RSC_ROLE_MASTER) { if (constraint->score >= INFINITY) { node_list_exclude(rsc_lh->allowed_nodes, rhs, TRUE); } } g_list_free(rhs); } else if (constraint->role_lh == RSC_ROLE_MASTER) { pe_resource_t *rh_child = find_compatible_child(rsc_lh, rsc_rh, constraint->role_rh, FALSE, data_set); if (rh_child == NULL && constraint->score >= INFINITY) { pe_rsc_trace(rsc_lh, "%s can't be promoted %s", rsc_lh->id, constraint->id); rsc_lh->priority = -INFINITY; } else if (rh_child != NULL) { int new_priority = pe__add_scores(rsc_lh->priority, constraint->score); pe_rsc_debug(rsc_lh, "Applying %s to %s", constraint->id, rsc_lh->id); pe_rsc_debug(rsc_lh, "\t%s: %d->%d", rsc_lh->id, rsc_lh->priority, new_priority); rsc_lh->priority = new_priority; } } return; } diff --git a/lib/pacemaker/pcmk_sched_utils.c b/lib/pacemaker/pcmk_sched_utils.c index eaaf5261a3..177f43eb48 100644 --- a/lib/pacemaker/pcmk_sched_utils.c +++ b/lib/pacemaker/pcmk_sched_utils.c @@ -1,767 +1,767 @@ /* - * Copyright 2004-2020 the Pacemaker project contributors + * Copyright 2004-2021 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include // lrmd_event_data_t #include #include pe__location_t * rsc2node_new(const char *id, pe_resource_t *rsc, int node_weight, const char *discover_mode, pe_node_t *foo_node, pe_working_set_t *data_set) { pe__location_t *new_con = NULL; if (rsc == NULL || id == NULL) { pe_err("Invalid constraint %s for rsc=%p", crm_str(id), rsc); return NULL; } else if (foo_node == NULL) { CRM_CHECK(node_weight == 0, return NULL); } new_con = calloc(1, sizeof(pe__location_t)); if (new_con != NULL) { new_con->id = strdup(id); new_con->rsc_lh = rsc; new_con->node_list_rh = NULL; new_con->role_filter = RSC_ROLE_UNKNOWN; if (pcmk__str_eq(discover_mode, "always", pcmk__str_null_matches | pcmk__str_casei)) { new_con->discover_mode = pe_discover_always; } else if (pcmk__str_eq(discover_mode, "never", pcmk__str_casei)) { new_con->discover_mode = pe_discover_never; } else if (pcmk__str_eq(discover_mode, "exclusive", pcmk__str_casei)) { new_con->discover_mode = pe_discover_exclusive; rsc->exclusive_discover = TRUE; } else { pe_err("Invalid %s value %s in location constraint", XML_LOCATION_ATTR_DISCOVERY, discover_mode); } if (foo_node != NULL) { pe_node_t *copy = pe__copy_node(foo_node); copy->weight = node_weight; new_con->node_list_rh = g_list_prepend(NULL, copy); } data_set->placement_constraints = g_list_prepend(data_set->placement_constraints, new_con); rsc->rsc_location = g_list_prepend(rsc->rsc_location, new_con); } return new_con; } gboolean can_run_resources(const pe_node_t * node) { if (node == NULL) { return FALSE; } #if 0 if (node->weight < 0) { return FALSE; } #endif if (node->details->online == FALSE || node->details->shutdown || node->details->unclean || node->details->standby || node->details->maintenance) { crm_trace("%s: online=%d, unclean=%d, standby=%d, maintenance=%d", node->details->uname, node->details->online, node->details->unclean, node->details->standby, node->details->maintenance); return FALSE; } return TRUE; } /*! * \internal * \brief Copy a hash table of node objects * * \param[in] nodes Hash table to copy * * \return New copy of nodes (or NULL if nodes is NULL) */ GHashTable * pcmk__copy_node_table(GHashTable *nodes) { GHashTable *new_table = NULL; GHashTableIter iter; pe_node_t *node = NULL; if (nodes == NULL) { return NULL; } new_table = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, free); g_hash_table_iter_init(&iter, nodes); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) { pe_node_t *new_node = pe__copy_node(node); g_hash_table_insert(new_table, (gpointer) new_node->details->id, new_node); } return new_table; } /*! * \internal * \brief Copy a list of node objects * * \param[in] list List to copy * \param[in] reset Set copies' scores to 0 * * \return New list of shallow copies of nodes in original list */ GList * pcmk__copy_node_list(const GList *list, bool reset) { GList *result = NULL; for (const GList *gIter = list; gIter != NULL; gIter = gIter->next) { pe_node_t *new_node = NULL; pe_node_t *this_node = (pe_node_t *) gIter->data; new_node = pe__copy_node(this_node); if (reset) { new_node->weight = 0; } result = g_list_prepend(result, new_node); } return result; } struct node_weight_s { pe_node_t *active; pe_working_set_t *data_set; }; /* return -1 if 'a' is more preferred * return 1 if 'b' is more preferred */ static gint sort_node_weight(gconstpointer a, gconstpointer b, gpointer data) { const pe_node_t *node1 = (const pe_node_t *)a; const pe_node_t *node2 = (const pe_node_t *)b; struct node_weight_s *nw = data; int node1_weight = 0; int node2_weight = 0; int result = 0; if (a == NULL) { return 1; } if (b == NULL) { return -1; } node1_weight = node1->weight; node2_weight = node2->weight; if (can_run_resources(node1) == FALSE) { node1_weight = -INFINITY; } if (can_run_resources(node2) == FALSE) { node2_weight = -INFINITY; } if (node1_weight > node2_weight) { crm_trace("%s (%d) > %s (%d) : weight", node1->details->uname, node1_weight, node2->details->uname, node2_weight); return -1; } if (node1_weight < node2_weight) { crm_trace("%s (%d) < %s (%d) : weight", node1->details->uname, node1_weight, node2->details->uname, node2_weight); return 1; } crm_trace("%s (%d) == %s (%d) : weight", node1->details->uname, node1_weight, node2->details->uname, node2_weight); if (pcmk__str_eq(nw->data_set->placement_strategy, "minimal", pcmk__str_casei)) { goto equal; } if (pcmk__str_eq(nw->data_set->placement_strategy, "balanced", pcmk__str_casei)) { result = compare_capacity(node1, node2); if (result < 0) { crm_trace("%s > %s : capacity (%d)", node1->details->uname, node2->details->uname, result); return -1; } else if (result > 0) { crm_trace("%s < %s : capacity (%d)", node1->details->uname, node2->details->uname, result); return 1; } } /* now try to balance resources across the cluster */ if (node1->details->num_resources < node2->details->num_resources) { crm_trace("%s (%d) > %s (%d) : resources", node1->details->uname, node1->details->num_resources, node2->details->uname, node2->details->num_resources); return -1; } else if (node1->details->num_resources > node2->details->num_resources) { crm_trace("%s (%d) < %s (%d) : resources", node1->details->uname, node1->details->num_resources, node2->details->uname, node2->details->num_resources); return 1; } if (nw->active && nw->active->details == node1->details) { crm_trace("%s (%d) > %s (%d) : active", node1->details->uname, node1->details->num_resources, node2->details->uname, node2->details->num_resources); return -1; } else if (nw->active && nw->active->details == node2->details) { crm_trace("%s (%d) < %s (%d) : active", node1->details->uname, node1->details->num_resources, node2->details->uname, node2->details->num_resources); return 1; } equal: crm_trace("%s = %s", node1->details->uname, node2->details->uname); return strcmp(node1->details->uname, node2->details->uname); } GList * sort_nodes_by_weight(GList *nodes, pe_node_t *active_node, pe_working_set_t *data_set) { struct node_weight_s nw = { active_node, data_set }; return g_list_sort_with_data(nodes, sort_node_weight, &nw); } void native_deallocate(pe_resource_t * rsc) { if (rsc->allocated_to) { pe_node_t *old = rsc->allocated_to; crm_info("Deallocating %s from %s", rsc->id, old->details->uname); pe__set_resource_flags(rsc, pe_rsc_provisional); rsc->allocated_to = NULL; old->details->allocated_rsc = g_list_remove(old->details->allocated_rsc, rsc); old->details->num_resources--; /* old->count--; */ calculate_utilization(old->details->utilization, rsc->utilization, TRUE); free(old); } } gboolean native_assign_node(pe_resource_t * rsc, GListPtr nodes, pe_node_t * chosen, gboolean force) { CRM_ASSERT(rsc->variant == pe_native); if (force == FALSE && chosen != NULL) { bool unset = FALSE; if(chosen->weight < 0) { unset = TRUE; // Allow the graph to assume that the remote resource will come up } else if (!can_run_resources(chosen) && !pe__is_guest_node(chosen)) { unset = TRUE; } if(unset) { crm_debug("All nodes for resource %s are unavailable" ", unclean or shutting down (%s: %d, %d)", rsc->id, chosen->details->uname, can_run_resources(chosen), chosen->weight); - rsc->next_role = RSC_ROLE_STOPPED; + pe__set_next_role(rsc, RSC_ROLE_STOPPED, "node availability"); chosen = NULL; } } /* todo: update the old node for each resource to reflect its * new resource count */ native_deallocate(rsc); pe__clear_resource_flags(rsc, pe_rsc_provisional); if (chosen == NULL) { GListPtr gIter = NULL; char *rc_inactive = crm_itoa(PCMK_OCF_NOT_RUNNING); crm_debug("Could not allocate a node for %s", rsc->id); - rsc->next_role = RSC_ROLE_STOPPED; + pe__set_next_role(rsc, RSC_ROLE_STOPPED, "unable to allocate"); for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) { pe_action_t *op = (pe_action_t *) gIter->data; const char *interval_ms_s = g_hash_table_lookup(op->meta, XML_LRM_ATTR_INTERVAL_MS); crm_debug("Processing %s", op->uuid); if(pcmk__str_eq(RSC_STOP, op->task, pcmk__str_casei)) { update_action_flags(op, pe_action_optional | pe_action_clear, __func__, __LINE__); } else if(pcmk__str_eq(RSC_START, op->task, pcmk__str_casei)) { update_action_flags(op, pe_action_runnable | pe_action_clear, __func__, __LINE__); //pe__set_resource_flags(rsc, pe_rsc_block); } else if (interval_ms_s && !pcmk__str_eq(interval_ms_s, "0", pcmk__str_casei)) { if(pcmk__str_eq(rc_inactive, g_hash_table_lookup(op->meta, XML_ATTR_TE_TARGET_RC), pcmk__str_casei)) { /* This is a recurring monitor for the stopped state, leave it alone */ } else { /* Normal monitor operation, cancel it */ update_action_flags(op, pe_action_runnable | pe_action_clear, __func__, __LINE__); } } } free(rc_inactive); return FALSE; } crm_debug("Assigning %s to %s", chosen->details->uname, rsc->id); rsc->allocated_to = pe__copy_node(chosen); chosen->details->allocated_rsc = g_list_prepend(chosen->details->allocated_rsc, rsc); chosen->details->num_resources++; chosen->count++; calculate_utilization(chosen->details->utilization, rsc->utilization, FALSE); dump_rsc_utilization((show_utilization? LOG_STDOUT : LOG_TRACE), __func__, rsc, chosen); return TRUE; } void log_action(unsigned int log_level, const char *pre_text, pe_action_t * action, gboolean details) { const char *node_uname = NULL; const char *node_uuid = NULL; const char *desc = NULL; if (action == NULL) { crm_trace("%s%s: ", pre_text == NULL ? "" : pre_text, pre_text == NULL ? "" : ": "); return; } if (pcmk_is_set(action->flags, pe_action_pseudo)) { node_uname = NULL; node_uuid = NULL; } else if (action->node != NULL) { node_uname = action->node->details->uname; node_uuid = action->node->details->id; } else { node_uname = ""; node_uuid = NULL; } switch (text2task(action->task)) { case stonith_node: case shutdown_crm: if (pcmk_is_set(action->flags, pe_action_pseudo)) { desc = "Pseudo "; } else if (pcmk_is_set(action->flags, pe_action_optional)) { desc = "Optional "; } else if (!pcmk_is_set(action->flags, pe_action_runnable)) { desc = "!!Non-Startable!! "; } else if (pcmk_is_set(action->flags, pe_action_processed)) { desc = ""; } else { desc = "(Provisional) "; } crm_trace("%s%s%sAction %d: %s%s%s%s%s%s", ((pre_text == NULL)? "" : pre_text), ((pre_text == NULL)? "" : ": "), desc, action->id, action->uuid, (node_uname? "\ton " : ""), (node_uname? node_uname : ""), (node_uuid? "\t\t(" : ""), (node_uuid? node_uuid : ""), (node_uuid? ")" : "")); break; default: if (pcmk_is_set(action->flags, pe_action_optional)) { desc = "Optional "; } else if (pcmk_is_set(action->flags, pe_action_pseudo)) { desc = "Pseudo "; } else if (!pcmk_is_set(action->flags, pe_action_runnable)) { desc = "!!Non-Startable!! "; } else if (pcmk_is_set(action->flags, pe_action_processed)) { desc = ""; } else { desc = "(Provisional) "; } crm_trace("%s%s%sAction %d: %s %s%s%s%s%s%s", ((pre_text == NULL)? "" : pre_text), ((pre_text == NULL)? "" : ": "), desc, action->id, action->uuid, (action->rsc? action->rsc->id : ""), (node_uname? "\ton " : ""), (node_uname? node_uname : ""), (node_uuid? "\t\t(" : ""), (node_uuid? node_uuid : ""), (node_uuid? ")" : "")); break; } if (details) { GListPtr gIter = NULL; crm_trace("\t\t====== Preceding Actions"); gIter = action->actions_before; for (; gIter != NULL; gIter = gIter->next) { pe_action_wrapper_t *other = (pe_action_wrapper_t *) gIter->data; log_action(log_level + 1, "\t\t", other->action, FALSE); } crm_trace("\t\t====== Subsequent Actions"); gIter = action->actions_after; for (; gIter != NULL; gIter = gIter->next) { pe_action_wrapper_t *other = (pe_action_wrapper_t *) gIter->data; log_action(log_level + 1, "\t\t", other->action, FALSE); } crm_trace("\t\t====== End"); } else { crm_trace("\t\t(before=%d, after=%d)", g_list_length(action->actions_before), g_list_length(action->actions_after)); } } gboolean can_run_any(GHashTable * nodes) { GHashTableIter iter; pe_node_t *node = NULL; if (nodes == NULL) { return FALSE; } g_hash_table_iter_init(&iter, nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { if (can_run_resources(node) && node->weight >= 0) { return TRUE; } } return FALSE; } pe_action_t * create_pseudo_resource_op(pe_resource_t * rsc, const char *task, bool optional, bool runnable, pe_working_set_t *data_set) { pe_action_t *action = custom_action(rsc, pcmk__op_key(rsc->id, task, 0), task, NULL, optional, TRUE, data_set); update_action_flags(action, pe_action_pseudo, __func__, __LINE__); update_action_flags(action, pe_action_runnable, __func__, __LINE__); if(runnable) { update_action_flags(action, pe_action_runnable, __func__, __LINE__); } return action; } /*! * \internal * \brief Create an executor cancel op * * \param[in] rsc Resource of action to cancel * \param[in] task Name of action to cancel * \param[in] interval_ms Interval of action to cancel * \param[in] node Node of action to cancel * \param[in] data_set Working set of cluster * * \return Created op */ pe_action_t * pe_cancel_op(pe_resource_t *rsc, const char *task, guint interval_ms, pe_node_t *node, pe_working_set_t *data_set) { pe_action_t *cancel_op; char *interval_ms_s = crm_strdup_printf("%u", interval_ms); // @TODO dangerous if possible to schedule another action with this key char *key = pcmk__op_key(rsc->id, task, interval_ms); cancel_op = custom_action(rsc, key, RSC_CANCEL, node, FALSE, TRUE, data_set); free(cancel_op->task); cancel_op->task = strdup(RSC_CANCEL); free(cancel_op->cancel_task); cancel_op->cancel_task = strdup(task); add_hash_param(cancel_op->meta, XML_LRM_ATTR_TASK, task); add_hash_param(cancel_op->meta, XML_LRM_ATTR_INTERVAL_MS, interval_ms_s); free(interval_ms_s); return cancel_op; } /*! * \internal * \brief Create a shutdown op for a scheduler transition * * \param[in] node Node being shut down * \param[in] data_set Working set of cluster * * \return Created op */ pe_action_t * sched_shutdown_op(pe_node_t *node, pe_working_set_t *data_set) { char *shutdown_id = crm_strdup_printf("%s-%s", CRM_OP_SHUTDOWN, node->details->uname); pe_action_t *shutdown_op = custom_action(NULL, shutdown_id, CRM_OP_SHUTDOWN, node, FALSE, TRUE, data_set); crm_notice("Scheduling shutdown of node %s", node->details->uname); shutdown_constraints(node, shutdown_op, data_set); add_hash_param(shutdown_op->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE); return shutdown_op; } static char * generate_transition_magic(const char *transition_key, int op_status, int op_rc) { CRM_CHECK(transition_key != NULL, return NULL); return crm_strdup_printf("%d:%d;%s", op_status, op_rc, transition_key); } static void append_digest(lrmd_event_data_t *op, xmlNode *update, const char *version, const char *magic, int level) { /* this will enable us to later determine that the * resource's parameters have changed and we should force * a restart */ char *digest = NULL; xmlNode *args_xml = NULL; if (op->params == NULL) { return; } args_xml = create_xml_node(NULL, XML_TAG_PARAMS); g_hash_table_foreach(op->params, hash2field, args_xml); pcmk__filter_op_for_digest(args_xml); digest = calculate_operation_digest(args_xml, version); #if 0 if (level < get_crm_log_level() && op->interval_ms == 0 && pcmk__str_eq(op->op_type, CRMD_ACTION_START, pcmk__str_none)) { char *digest_source = dump_xml_unformatted(args_xml); do_crm_log(level, "Calculated digest %s for %s (%s). Source: %s\n", digest, ID(update), magic, digest_source); free(digest_source); } #endif crm_xml_add(update, XML_LRM_ATTR_OP_DIGEST, digest); free_xml(args_xml); free(digest); } #define FAKE_TE_ID "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" /*! * \internal * \brief Create XML for resource operation history update * * \param[in,out] parent Parent XML node to add to * \param[in,out] op Operation event data * \param[in] caller_version DC feature set * \param[in] target_rc Expected result of operation * \param[in] node Name of node on which operation was performed * \param[in] origin Arbitrary description of update source * \param[in] level A log message will be logged at this level * * \return Newly created XML node for history update */ xmlNode * pcmk__create_history_xml(xmlNode *parent, lrmd_event_data_t *op, const char *caller_version, int target_rc, const char *node, const char *origin, int level) { char *key = NULL; char *magic = NULL; char *op_id = NULL; char *op_id_additional = NULL; char *local_user_data = NULL; const char *exit_reason = NULL; xmlNode *xml_op = NULL; const char *task = NULL; CRM_CHECK(op != NULL, return NULL); do_crm_log(level, "%s: Updating resource %s after %s op %s (interval=%u)", origin, op->rsc_id, op->op_type, services_lrm_status_str(op->op_status), op->interval_ms); crm_trace("DC version: %s", caller_version); task = op->op_type; /* Record a successful reload as a start, and a failed reload as a monitor, * to make life easier for the scheduler when determining the current state. */ if (pcmk__str_eq(task, "reload", pcmk__str_none)) { if (op->op_status == PCMK_LRM_OP_DONE) { task = CRMD_ACTION_START; } else { task = CRMD_ACTION_STATUS; } } key = pcmk__op_key(op->rsc_id, task, op->interval_ms); if (pcmk__str_eq(task, CRMD_ACTION_NOTIFY, pcmk__str_none)) { const char *n_type = crm_meta_value(op->params, "notify_type"); const char *n_task = crm_meta_value(op->params, "notify_operation"); CRM_LOG_ASSERT(n_type != NULL); CRM_LOG_ASSERT(n_task != NULL); op_id = pcmk__notify_key(op->rsc_id, n_type, n_task); if (op->op_status != PCMK_LRM_OP_PENDING) { /* Ignore notify errors. * * @TODO It might be better to keep the correct result here, and * ignore it in process_graph_event(). */ op->op_status = PCMK_LRM_OP_DONE; op->rc = 0; } } else if (did_rsc_op_fail(op, target_rc)) { op_id = pcmk__op_key(op->rsc_id, "last_failure", 0); if (op->interval_ms == 0) { // Ensure 'last' gets updated, in case record-pending is true op_id_additional = pcmk__op_key(op->rsc_id, "last", 0); } exit_reason = op->exit_reason; } else if (op->interval_ms > 0) { op_id = strdup(key); } else { op_id = pcmk__op_key(op->rsc_id, "last", 0); } again: xml_op = pcmk__xe_match(parent, XML_LRM_TAG_RSC_OP, XML_ATTR_ID, op_id); if (xml_op == NULL) { xml_op = create_xml_node(parent, XML_LRM_TAG_RSC_OP); } if (op->user_data == NULL) { crm_debug("Generating fake transition key for: " PCMK__OP_FMT " %d from %s", op->rsc_id, op->op_type, op->interval_ms, op->call_id, origin); local_user_data = pcmk__transition_key(-1, op->call_id, target_rc, FAKE_TE_ID); op->user_data = local_user_data; } if(magic == NULL) { magic = generate_transition_magic(op->user_data, op->op_status, op->rc); } crm_xml_add(xml_op, XML_ATTR_ID, op_id); crm_xml_add(xml_op, XML_LRM_ATTR_TASK_KEY, key); crm_xml_add(xml_op, XML_LRM_ATTR_TASK, task); crm_xml_add(xml_op, XML_ATTR_ORIGIN, origin); crm_xml_add(xml_op, XML_ATTR_CRM_VERSION, caller_version); crm_xml_add(xml_op, XML_ATTR_TRANSITION_KEY, op->user_data); crm_xml_add(xml_op, XML_ATTR_TRANSITION_MAGIC, magic); crm_xml_add(xml_op, XML_LRM_ATTR_EXIT_REASON, exit_reason == NULL ? "" : exit_reason); crm_xml_add(xml_op, XML_LRM_ATTR_TARGET, node); /* For context during triage */ crm_xml_add_int(xml_op, XML_LRM_ATTR_CALLID, op->call_id); crm_xml_add_int(xml_op, XML_LRM_ATTR_RC, op->rc); crm_xml_add_int(xml_op, XML_LRM_ATTR_OPSTATUS, op->op_status); crm_xml_add_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, op->interval_ms); if (compare_version("2.1", caller_version) <= 0) { if (op->t_run || op->t_rcchange || op->exec_time || op->queue_time) { crm_trace("Timing data (" PCMK__OP_FMT "): last=%u change=%u exec=%u queue=%u", op->rsc_id, op->op_type, op->interval_ms, op->t_run, op->t_rcchange, op->exec_time, op->queue_time); if (op->interval_ms == 0) { crm_xml_add_ll(xml_op, XML_RSC_OP_LAST_CHANGE, (long long) op->t_run); // @COMPAT last-run is deprecated crm_xml_add_ll(xml_op, XML_RSC_OP_LAST_RUN, (long long) op->t_run); } else if(op->t_rcchange) { /* last-run is not accurate for recurring ops */ crm_xml_add_ll(xml_op, XML_RSC_OP_LAST_CHANGE, (long long) op->t_rcchange); } else { /* ...but is better than nothing otherwise */ crm_xml_add_ll(xml_op, XML_RSC_OP_LAST_CHANGE, (long long) op->t_run); } crm_xml_add_int(xml_op, XML_RSC_OP_T_EXEC, op->exec_time); crm_xml_add_int(xml_op, XML_RSC_OP_T_QUEUE, op->queue_time); } } if (pcmk__str_any_of(op->op_type, CRMD_ACTION_MIGRATE, CRMD_ACTION_MIGRATED, NULL)) { /* * Record migrate_source and migrate_target always for migrate ops. */ const char *name = XML_LRM_ATTR_MIGRATE_SOURCE; crm_xml_add(xml_op, name, crm_meta_value(op->params, name)); name = XML_LRM_ATTR_MIGRATE_TARGET; crm_xml_add(xml_op, name, crm_meta_value(op->params, name)); } append_digest(op, xml_op, caller_version, magic, LOG_DEBUG); if (op_id_additional) { free(op_id); op_id = op_id_additional; op_id_additional = NULL; goto again; } if (local_user_data) { free(local_user_data); op->user_data = NULL; } free(magic); free(op_id); free(key); return xml_op; } diff --git a/lib/pengine/complex.c b/lib/pengine/complex.c index 5d7d628857..3b5f722c37 100644 --- a/lib/pengine/complex.c +++ b/lib/pengine/complex.c @@ -1,1111 +1,1130 @@ /* * Copyright 2004-2021 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include void populate_hash(xmlNode * nvpair_list, GHashTable * hash, const char **attrs, int attrs_length); resource_object_functions_t resource_class_functions[] = { { native_unpack, native_find_rsc, native_parameter, native_print, native_active, native_resource_state, native_location, native_free, pe__count_common, pe__native_is_filtered, }, { group_unpack, native_find_rsc, native_parameter, group_print, group_active, group_resource_state, native_location, group_free, pe__count_common, pe__group_is_filtered, }, { clone_unpack, native_find_rsc, native_parameter, clone_print, clone_active, clone_resource_state, native_location, clone_free, pe__count_common, pe__clone_is_filtered, }, { pe__unpack_bundle, native_find_rsc, native_parameter, pe__print_bundle, pe__bundle_active, pe__bundle_resource_state, native_location, pe__free_bundle, pe__count_bundle, pe__bundle_is_filtered, } }; static enum pe_obj_types get_resource_type(const char *name) { if (pcmk__str_eq(name, XML_CIB_TAG_RESOURCE, pcmk__str_casei)) { return pe_native; } else if (pcmk__str_eq(name, XML_CIB_TAG_GROUP, pcmk__str_casei)) { return pe_group; } else if (pcmk__str_eq(name, XML_CIB_TAG_INCARNATION, pcmk__str_casei)) { return pe_clone; } else if (pcmk__str_eq(name, XML_CIB_TAG_MASTER, pcmk__str_casei)) { // @COMPAT deprecated since 2.0.0 return pe_clone; } else if (pcmk__str_eq(name, XML_CIB_TAG_CONTAINER, pcmk__str_casei)) { return pe_container; } return pe_unknown; } static void dup_attr(gpointer key, gpointer value, gpointer user_data) { add_hash_param(user_data, key, value); } static void expand_parents_fixed_nvpairs(pe_resource_t * rsc, pe_rule_eval_data_t * rule_data, GHashTable * meta_hash, pe_working_set_t * data_set) { GHashTable *parent_orig_meta = crm_str_table_new(); pe_resource_t *p = rsc->parent; if (p == NULL) { return ; } /* Search all parent resources, get the fixed value of "meta_attributes" set only in the original xml, and stack it in the hash table. */ /* The fixed value of the lower parent resource takes precedence and is not overwritten. */ while(p != NULL) { /* A hash table for comparison is generated, including the id-ref. */ pe__unpack_dataset_nvpairs(p->xml, XML_TAG_META_SETS, rule_data, parent_orig_meta, NULL, FALSE, data_set); p = p->parent; } /* If there is a fixed value of "meta_attributes" of the parent resource, it will be processed. */ if (parent_orig_meta != NULL) { GHashTableIter iter; char *key = NULL; char *value = NULL; g_hash_table_iter_init(&iter, parent_orig_meta); while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) { /* Parameters set in the original xml of the parent resource will also try to overwrite the child resource. */ /* Attributes that already exist in the child lease are not updated. */ dup_attr(key, value, meta_hash); } } if (parent_orig_meta != NULL) { g_hash_table_destroy(parent_orig_meta); } return ; } void get_meta_attributes(GHashTable * meta_hash, pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * data_set) { pe_rsc_eval_data_t rsc_rule_data = { .standard = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS), .provider = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER), .agent = crm_element_value(rsc->xml, XML_EXPR_ATTR_TYPE) }; pe_rule_eval_data_t rule_data = { .node_hash = NULL, .role = RSC_ROLE_UNKNOWN, .now = data_set->now, .match_data = NULL, .rsc_data = &rsc_rule_data, .op_data = NULL }; if (node) { rule_data.node_hash = node->details->attrs; } for (xmlAttrPtr a = pcmk__xe_first_attr(rsc->xml); a != NULL; a = a->next) { const char *prop_name = (const char *) a->name; const char *prop_value = crm_element_value(rsc->xml, prop_name); add_hash_param(meta_hash, prop_name, prop_value); } pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_META_SETS, &rule_data, meta_hash, NULL, FALSE, data_set); /* Set the "meta_attributes" explicitly set in the parent resource to the hash table of the child resource. */ /* If it is already explicitly set as a child, it will not be overwritten. */ if (rsc->parent != NULL) { expand_parents_fixed_nvpairs(rsc, &rule_data, meta_hash, data_set); } /* check the defaults */ pe__unpack_dataset_nvpairs(data_set->rsc_defaults, XML_TAG_META_SETS, &rule_data, meta_hash, NULL, FALSE, data_set); /* If there is "meta_attributes" that the parent resource has not explicitly set, set a value that is not set from rsc_default either. */ /* The values already set up to this point will not be overwritten. */ if (rsc->parent) { g_hash_table_foreach(rsc->parent->meta, dup_attr, meta_hash); } } void get_rsc_attributes(GHashTable * meta_hash, pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * data_set) { pe_rule_eval_data_t rule_data = { .node_hash = NULL, .role = RSC_ROLE_UNKNOWN, .now = data_set->now, .match_data = NULL, .rsc_data = NULL, .op_data = NULL }; if (node) { rule_data.node_hash = node->details->attrs; } pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_ATTR_SETS, &rule_data, meta_hash, NULL, FALSE, data_set); /* set anything else based on the parent */ if (rsc->parent != NULL) { get_rsc_attributes(meta_hash, rsc->parent, node, data_set); } else { /* and finally check the defaults */ pe__unpack_dataset_nvpairs(data_set->rsc_defaults, XML_TAG_ATTR_SETS, &rule_data, meta_hash, NULL, FALSE, data_set); } } #if ENABLE_VERSIONED_ATTRS void pe_get_versioned_attributes(xmlNode * meta_hash, pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * data_set) { pe_rule_eval_data_t rule_data = { .node_hash = (node == NULL)? NULL : node->details->attrs, .role = RSC_ROLE_UNKNOWN, .now = data_set->now, .match_data = NULL, .rsc_data = NULL, .op_data = NULL }; pe_eval_versioned_attributes(data_set->input, rsc->xml, XML_TAG_ATTR_SETS, &rule_data, meta_hash, NULL); /* set anything else based on the parent */ if (rsc->parent != NULL) { pe_get_versioned_attributes(meta_hash, rsc->parent, node, data_set); } else { /* and finally check the defaults */ pe_eval_versioned_attributes(data_set->input, data_set->rsc_defaults, XML_TAG_ATTR_SETS, &rule_data, meta_hash, NULL); } } #endif static char * template_op_key(xmlNode * op) { const char *name = crm_element_value(op, "name"); const char *role = crm_element_value(op, "role"); char *key = NULL; if (pcmk__str_eq(role, RSC_ROLE_STARTED_S, pcmk__str_null_matches) || pcmk__str_eq(role, RSC_ROLE_SLAVE_S, pcmk__str_none)) { role = RSC_ROLE_UNKNOWN_S; } key = crm_strdup_printf("%s-%s", name, role); return key; } static gboolean unpack_template(xmlNode * xml_obj, xmlNode ** expanded_xml, pe_working_set_t * data_set) { xmlNode *cib_resources = NULL; xmlNode *template = NULL; xmlNode *new_xml = NULL; xmlNode *child_xml = NULL; xmlNode *rsc_ops = NULL; xmlNode *template_ops = NULL; const char *template_ref = NULL; const char *clone = NULL; const char *id = NULL; if (xml_obj == NULL) { pe_err("No resource object for template unpacking"); return FALSE; } template_ref = crm_element_value(xml_obj, XML_CIB_TAG_RSC_TEMPLATE); if (template_ref == NULL) { return TRUE; } id = ID(xml_obj); if (id == NULL) { pe_err("'%s' object must have a id", crm_element_name(xml_obj)); return FALSE; } if (pcmk__str_eq(template_ref, id, pcmk__str_none)) { pe_err("The resource object '%s' should not reference itself", id); return FALSE; } cib_resources = get_xpath_object("//"XML_CIB_TAG_RESOURCES, data_set->input, LOG_TRACE); if (cib_resources == NULL) { pe_err("No resources configured"); return FALSE; } template = pcmk__xe_match(cib_resources, XML_CIB_TAG_RSC_TEMPLATE, XML_ATTR_ID, template_ref); if (template == NULL) { pe_err("No template named '%s'", template_ref); return FALSE; } new_xml = copy_xml(template); xmlNodeSetName(new_xml, xml_obj->name); crm_xml_replace(new_xml, XML_ATTR_ID, id); clone = crm_element_value(xml_obj, XML_RSC_ATTR_INCARNATION); if(clone) { crm_xml_add(new_xml, XML_RSC_ATTR_INCARNATION, clone); } template_ops = find_xml_node(new_xml, "operations", FALSE); for (child_xml = pcmk__xe_first_child(xml_obj); child_xml != NULL; child_xml = pcmk__xe_next(child_xml)) { xmlNode *new_child = NULL; new_child = add_node_copy(new_xml, child_xml); if (pcmk__str_eq((const char *)new_child->name, "operations", pcmk__str_none)) { rsc_ops = new_child; } } if (template_ops && rsc_ops) { xmlNode *op = NULL; GHashTable *rsc_ops_hash = g_hash_table_new_full(crm_str_hash, g_str_equal, free, NULL); for (op = pcmk__xe_first_child(rsc_ops); op != NULL; op = pcmk__xe_next(op)) { char *key = template_op_key(op); g_hash_table_insert(rsc_ops_hash, key, op); } for (op = pcmk__xe_first_child(template_ops); op != NULL; op = pcmk__xe_next(op)) { char *key = template_op_key(op); if (g_hash_table_lookup(rsc_ops_hash, key) == NULL) { add_node_copy(rsc_ops, op); } free(key); } if (rsc_ops_hash) { g_hash_table_destroy(rsc_ops_hash); } free_xml(template_ops); } /*free_xml(*expanded_xml); */ *expanded_xml = new_xml; /* Disable multi-level templates for now */ /*if(unpack_template(new_xml, expanded_xml, data_set) == FALSE) { free_xml(*expanded_xml); *expanded_xml = NULL; return FALSE; } */ return TRUE; } static gboolean add_template_rsc(xmlNode * xml_obj, pe_working_set_t * data_set) { const char *template_ref = NULL; const char *id = NULL; if (xml_obj == NULL) { pe_err("No resource object for processing resource list of template"); return FALSE; } template_ref = crm_element_value(xml_obj, XML_CIB_TAG_RSC_TEMPLATE); if (template_ref == NULL) { return TRUE; } id = ID(xml_obj); if (id == NULL) { pe_err("'%s' object must have a id", crm_element_name(xml_obj)); return FALSE; } if (pcmk__str_eq(template_ref, id, pcmk__str_none)) { pe_err("The resource object '%s' should not reference itself", id); return FALSE; } if (add_tag_ref(data_set->template_rsc_sets, template_ref, id) == FALSE) { return FALSE; } return TRUE; } static bool detect_promotable(pe_resource_t *rsc) { const char *promotable = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_PROMOTABLE); if (crm_is_true(promotable)) { return TRUE; } // @COMPAT deprecated since 2.0.0 if (pcmk__str_eq(crm_element_name(rsc->xml), XML_CIB_TAG_MASTER, pcmk__str_casei)) { /* @TODO in some future version, pe_warn_once() here, * then drop support in even later version */ g_hash_table_insert(rsc->meta, strdup(XML_RSC_ATTR_PROMOTABLE), strdup(XML_BOOLEAN_TRUE)); return TRUE; } return FALSE; } static void free_params_table(gpointer data) { g_hash_table_destroy((GHashTable *) data); } /*! * \brief Get a table of resource parameters * * \param[in] rsc Resource to query * \param[in] node Node for evaluating rules (NULL for defaults) * \param[in] data_set Cluster working set * * \return Hash table containing resource parameter names and values * (or NULL if \p rsc or \p data_set is NULL) * \note The returned table will be destroyed when the resource is freed, so * callers should not destroy it. */ GHashTable * pe_rsc_params(pe_resource_t *rsc, pe_node_t *node, pe_working_set_t *data_set) { GHashTable *params_on_node = NULL; /* A NULL node is used to request the resource's default parameters * (not evaluated for node), but we always want something non-NULL * as a hash table key. */ const char *node_name = ""; // Sanity check if ((rsc == NULL) || (data_set == NULL)) { return NULL; } if ((node != NULL) && (node->details->uname != NULL)) { node_name = node->details->uname; } // Find the parameter table for given node if (rsc->parameter_cache == NULL) { rsc->parameter_cache = g_hash_table_new_full(crm_strcase_hash, crm_strcase_equal, free, free_params_table); } else { params_on_node = g_hash_table_lookup(rsc->parameter_cache, node_name); } // If none exists yet, create one with parameters evaluated for node if (params_on_node == NULL) { params_on_node = crm_str_table_new(); get_rsc_attributes(params_on_node, rsc, node, data_set); g_hash_table_insert(rsc->parameter_cache, strdup(node_name), params_on_node); } return params_on_node; } gboolean common_unpack(xmlNode * xml_obj, pe_resource_t ** rsc, pe_resource_t * parent, pe_working_set_t * data_set) { bool isdefault = FALSE; xmlNode *expanded_xml = NULL; xmlNode *ops = NULL; const char *value = NULL; const char *rclass = NULL; /* Look for this after any templates have been expanded */ const char *id = crm_element_value(xml_obj, XML_ATTR_ID); bool guest_node = FALSE; bool remote_node = FALSE; bool has_versioned_params = FALSE; pe_rule_eval_data_t rule_data = { .node_hash = NULL, .role = RSC_ROLE_UNKNOWN, .now = data_set->now, .match_data = NULL, .rsc_data = NULL, .op_data = NULL }; crm_log_xml_trace(xml_obj, "Processing resource input..."); if (id == NULL) { pe_err("Must specify id tag in "); return FALSE; } else if (rsc == NULL) { pe_err("Nowhere to unpack resource into"); return FALSE; } if (unpack_template(xml_obj, &expanded_xml, data_set) == FALSE) { return FALSE; } *rsc = calloc(1, sizeof(pe_resource_t)); (*rsc)->cluster = data_set; if (expanded_xml) { crm_log_xml_trace(expanded_xml, "Expanded resource..."); (*rsc)->xml = expanded_xml; (*rsc)->orig_xml = xml_obj; } else { (*rsc)->xml = xml_obj; (*rsc)->orig_xml = NULL; } /* Do not use xml_obj from here on, use (*rsc)->xml in case templates are involved */ rclass = crm_element_value((*rsc)->xml, XML_AGENT_ATTR_CLASS); (*rsc)->parent = parent; ops = find_xml_node((*rsc)->xml, "operations", FALSE); (*rsc)->ops_xml = expand_idref(ops, data_set->input); (*rsc)->variant = get_resource_type(crm_element_name((*rsc)->xml)); if ((*rsc)->variant == pe_unknown) { pe_err("Unknown resource type: %s", crm_element_name((*rsc)->xml)); free(*rsc); return FALSE; } #if ENABLE_VERSIONED_ATTRS (*rsc)->versioned_parameters = create_xml_node(NULL, XML_TAG_RSC_VER_ATTRS); #endif (*rsc)->meta = crm_str_table_new(); (*rsc)->allowed_nodes = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, free); (*rsc)->known_on = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, free); value = crm_element_value((*rsc)->xml, XML_RSC_ATTR_INCARNATION); if (value) { (*rsc)->id = crm_strdup_printf("%s:%s", id, value); add_hash_param((*rsc)->meta, XML_RSC_ATTR_INCARNATION, value); } else { (*rsc)->id = strdup(id); } (*rsc)->fns = &resource_class_functions[(*rsc)->variant]; pe_rsc_trace((*rsc), "Unpacking resource..."); get_meta_attributes((*rsc)->meta, *rsc, NULL, data_set); (*rsc)->parameters = pe_rsc_params(*rsc, NULL, data_set); // \deprecated #if ENABLE_VERSIONED_ATTRS pe_get_versioned_attributes((*rsc)->versioned_parameters, *rsc, NULL, data_set); #endif (*rsc)->flags = 0; pe__set_resource_flags(*rsc, pe_rsc_runnable|pe_rsc_provisional); if (!pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) { pe__set_resource_flags(*rsc, pe_rsc_managed); } (*rsc)->rsc_cons = NULL; (*rsc)->rsc_tickets = NULL; (*rsc)->actions = NULL; (*rsc)->role = RSC_ROLE_STOPPED; (*rsc)->next_role = RSC_ROLE_UNKNOWN; (*rsc)->recovery_type = recovery_stop_start; (*rsc)->stickiness = 0; (*rsc)->migration_threshold = INFINITY; (*rsc)->failure_timeout = 0; value = g_hash_table_lookup((*rsc)->meta, XML_CIB_ATTR_PRIORITY); (*rsc)->priority = crm_parse_int(value, "0"); value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_CRITICAL); if ((value == NULL) || crm_is_true(value)) { pe__set_resource_flags(*rsc, pe_rsc_critical); } value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_NOTIFY); if (crm_is_true(value)) { pe__set_resource_flags(*rsc, pe_rsc_notify); } if (xml_contains_remote_node((*rsc)->xml)) { (*rsc)->is_remote_node = TRUE; if (g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_CONTAINER)) { guest_node = TRUE; } else { remote_node = TRUE; } } value = g_hash_table_lookup((*rsc)->meta, XML_OP_ATTR_ALLOW_MIGRATE); #if ENABLE_VERSIONED_ATTRS has_versioned_params = xml_has_children((*rsc)->versioned_parameters); #endif if (crm_is_true(value) && has_versioned_params) { pe_rsc_trace((*rsc), "Migration is disabled for resources with versioned parameters"); } else if (crm_is_true(value)) { pe__set_resource_flags(*rsc, pe_rsc_allow_migrate); } else if ((value == NULL) && remote_node && !has_versioned_params) { /* By default, we want remote nodes to be able * to float around the cluster without having to stop all the * resources within the remote-node before moving. Allowing * migration support enables this feature. If this ever causes * problems, migration support can be explicitly turned off with * allow-migrate=false. * We don't support migration for versioned resources, though. */ pe__set_resource_flags(*rsc, pe_rsc_allow_migrate); } value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_MANAGED); if (value != NULL && !pcmk__str_eq("default", value, pcmk__str_casei)) { if (crm_is_true(value)) { pe__set_resource_flags(*rsc, pe_rsc_managed); } else { pe__clear_resource_flags(*rsc, pe_rsc_managed); } } value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_MAINTENANCE); if (crm_is_true(value)) { pe__clear_resource_flags(*rsc, pe_rsc_managed); pe__set_resource_flags(*rsc, pe_rsc_maintenance); } if (pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) { pe__clear_resource_flags(*rsc, pe_rsc_managed); pe__set_resource_flags(*rsc, pe_rsc_maintenance); } if (pe_rsc_is_clone(uber_parent(*rsc))) { value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_UNIQUE); if (crm_is_true(value)) { pe__set_resource_flags(*rsc, pe_rsc_unique); } if (detect_promotable(*rsc)) { pe__set_resource_flags(*rsc, pe_rsc_promotable); } } else { pe__set_resource_flags(*rsc, pe_rsc_unique); } pe_rsc_trace((*rsc), "Options for %s", (*rsc)->id); value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_RESTART); if (pcmk__str_eq(value, "restart", pcmk__str_casei)) { (*rsc)->restart_type = pe_restart_restart; pe_rsc_trace((*rsc), "\tDependency restart handling: restart"); pe_warn_once(pe_wo_restart_type, "Support for restart-type is deprecated and will be removed in a future release"); } else { (*rsc)->restart_type = pe_restart_ignore; pe_rsc_trace((*rsc), "\tDependency restart handling: ignore"); } value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_MULTIPLE); if (pcmk__str_eq(value, "stop_only", pcmk__str_casei)) { (*rsc)->recovery_type = recovery_stop_only; pe_rsc_trace((*rsc), "\tMultiple running resource recovery: stop only"); } else if (pcmk__str_eq(value, "block", pcmk__str_casei)) { (*rsc)->recovery_type = recovery_block; pe_rsc_trace((*rsc), "\tMultiple running resource recovery: block"); } else { (*rsc)->recovery_type = recovery_stop_start; pe_rsc_trace((*rsc), "\tMultiple running resource recovery: stop/start"); } value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_STICKINESS); if (value != NULL && !pcmk__str_eq("default", value, pcmk__str_casei)) { (*rsc)->stickiness = char2score(value); } value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_FAIL_STICKINESS); if (value != NULL && !pcmk__str_eq("default", value, pcmk__str_casei)) { (*rsc)->migration_threshold = char2score(value); if ((*rsc)->migration_threshold < 0) { /* @TODO We use 1 here to preserve previous behavior, but this * should probably use the default (INFINITY) or 0 (to disable) * instead. */ pe_warn_once(pe_wo_neg_threshold, XML_RSC_ATTR_FAIL_STICKINESS " must be non-negative, using 1 instead"); (*rsc)->migration_threshold = 1; } } if (pcmk__str_eq(rclass, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) { pe__set_working_set_flags(data_set, pe_flag_have_stonith_resource); pe__set_resource_flags(*rsc, pe_rsc_fence_device); } value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_REQUIRES); handle_requires_pref: if (pcmk__str_eq(value, "nothing", pcmk__str_casei)) { } else if (pcmk__str_eq(value, "quorum", pcmk__str_casei)) { pe__set_resource_flags(*rsc, pe_rsc_needs_quorum); } else if (pcmk__str_eq(value, "unfencing", pcmk__str_casei)) { if (pcmk_is_set((*rsc)->flags, pe_rsc_fence_device)) { pcmk__config_warn("Resetting '" XML_RSC_ATTR_REQUIRES "' for %s " "to 'quorum' because fencing devices cannot " "require unfencing", (*rsc)->id); value = "quorum"; isdefault = TRUE; goto handle_requires_pref; } else if (!pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) { pcmk__config_warn("Resetting '" XML_RSC_ATTR_REQUIRES "' for %s " "to 'quorum' because fencing is disabled", (*rsc)->id); value = "quorum"; isdefault = TRUE; goto handle_requires_pref; } else { pe__set_resource_flags(*rsc, pe_rsc_needs_fencing |pe_rsc_needs_unfencing); } } else if (pcmk__str_eq(value, "fencing", pcmk__str_casei)) { pe__set_resource_flags(*rsc, pe_rsc_needs_fencing); if (!pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) { pcmk__config_warn("%s requires fencing but fencing is disabled", (*rsc)->id); } } else { const char *orig_value = value; isdefault = TRUE; if (pcmk_is_set((*rsc)->flags, pe_rsc_fence_device)) { value = "quorum"; } else if (((*rsc)->variant == pe_native) && pcmk__str_eq(crm_element_value((*rsc)->xml, XML_AGENT_ATTR_CLASS), PCMK_RESOURCE_CLASS_OCF, pcmk__str_casei) && pcmk__str_eq(crm_element_value((*rsc)->xml, XML_AGENT_ATTR_PROVIDER), "pacemaker", pcmk__str_casei) && pcmk__str_eq(crm_element_value((*rsc)->xml, XML_ATTR_TYPE), "remote", pcmk__str_casei) ) { value = "quorum"; } else if (pcmk_is_set(data_set->flags, pe_flag_enable_unfencing)) { value = "unfencing"; } else if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) { value = "fencing"; } else if (data_set->no_quorum_policy == no_quorum_ignore) { value = "nothing"; } else { value = "quorum"; } if (orig_value != NULL) { pcmk__config_err("Resetting '" XML_RSC_ATTR_REQUIRES "' for %s " "to '%s' because '%s' is not valid", (*rsc)->id, value, orig_value); } goto handle_requires_pref; } pe_rsc_trace((*rsc), "\tRequired to start: %s%s", value, isdefault?" (default)":""); value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_FAIL_TIMEOUT); if (value != NULL) { // Stored as seconds (*rsc)->failure_timeout = (int) (crm_parse_interval_spec(value) / 1000); } if (remote_node) { GHashTable *params = pe_rsc_params(*rsc, NULL, data_set); /* Grabbing the value now means that any rules based on node attributes * will evaluate to false, so such rules should not be used with * reconnect_interval. * * @TODO Evaluate per node before using */ value = g_hash_table_lookup(params, XML_REMOTE_ATTR_RECONNECT_INTERVAL); if (value) { /* reconnect delay works by setting failure_timeout and preventing the * connection from starting until the failure is cleared. */ (*rsc)->remote_reconnect_ms = crm_parse_interval_spec(value); /* we want to override any default failure_timeout in use when remote * reconnect_interval is in use. */ (*rsc)->failure_timeout = (*rsc)->remote_reconnect_ms / 1000; } } get_target_role(*rsc, &((*rsc)->next_role)); pe_rsc_trace((*rsc), "\tDesired next state: %s", (*rsc)->next_role != RSC_ROLE_UNKNOWN ? role2text((*rsc)->next_role) : "default"); if ((*rsc)->fns->unpack(*rsc, data_set) == FALSE) { return FALSE; } if (pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster)) { // This tag must stay exactly the same because it is tested elsewhere resource_location(*rsc, NULL, 0, "symmetric_default", data_set); } else if (guest_node) { /* remote resources tied to a container resource must always be allowed * to opt-in to the cluster. Whether the connection resource is actually * allowed to be placed on a node is dependent on the container resource */ resource_location(*rsc, NULL, 0, "remote_connection_default", data_set); } pe_rsc_trace((*rsc), "\tAction notification: %s", pcmk_is_set((*rsc)->flags, pe_rsc_notify)? "required" : "not required"); (*rsc)->utilization = crm_str_table_new(); pe__unpack_dataset_nvpairs((*rsc)->xml, XML_TAG_UTILIZATION, &rule_data, (*rsc)->utilization, NULL, FALSE, data_set); /* data_set->resources = g_list_append(data_set->resources, (*rsc)); */ if (expanded_xml) { if (add_template_rsc(xml_obj, data_set) == FALSE) { return FALSE; } } return TRUE; } void common_update_score(pe_resource_t * rsc, const char *id, int score) { pe_node_t *node = NULL; node = pe_hash_table_lookup(rsc->allowed_nodes, id); if (node != NULL) { pe_rsc_trace(rsc, "Updating score for %s on %s: %d + %d", rsc->id, id, node->weight, score); node->weight = pe__add_scores(node->weight, score); } if (rsc->children) { GListPtr gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; common_update_score(child_rsc, id, score); } } } gboolean is_parent(pe_resource_t *child, pe_resource_t *rsc) { pe_resource_t *parent = child; if (parent == NULL || rsc == NULL) { return FALSE; } while (parent->parent != NULL) { if (parent->parent == rsc) { return TRUE; } parent = parent->parent; } return FALSE; } pe_resource_t * uber_parent(pe_resource_t * rsc) { pe_resource_t *parent = rsc; if (parent == NULL) { return NULL; } while (parent->parent != NULL && parent->parent->variant != pe_container) { parent = parent->parent; } return parent; } void common_free(pe_resource_t * rsc) { if (rsc == NULL) { return; } pe_rsc_trace(rsc, "Freeing %s %d", rsc->id, rsc->variant); g_list_free(rsc->rsc_cons); g_list_free(rsc->rsc_cons_lhs); g_list_free(rsc->rsc_tickets); g_list_free(rsc->dangling_migrations); if (rsc->parameter_cache != NULL) { g_hash_table_destroy(rsc->parameter_cache); } #if ENABLE_VERSIONED_ATTRS if (rsc->versioned_parameters != NULL) { free_xml(rsc->versioned_parameters); } #endif if (rsc->meta != NULL) { g_hash_table_destroy(rsc->meta); } if (rsc->utilization != NULL) { g_hash_table_destroy(rsc->utilization); } if ((rsc->parent == NULL) && pcmk_is_set(rsc->flags, pe_rsc_orphan)) { free_xml(rsc->xml); rsc->xml = NULL; free_xml(rsc->orig_xml); rsc->orig_xml = NULL; /* if rsc->orig_xml, then rsc->xml is an expanded xml from a template */ } else if (rsc->orig_xml) { free_xml(rsc->xml); rsc->xml = NULL; } if (rsc->running_on) { g_list_free(rsc->running_on); rsc->running_on = NULL; } if (rsc->known_on) { g_hash_table_destroy(rsc->known_on); rsc->known_on = NULL; } if (rsc->actions) { g_list_free(rsc->actions); rsc->actions = NULL; } if (rsc->allowed_nodes) { g_hash_table_destroy(rsc->allowed_nodes); rsc->allowed_nodes = NULL; } g_list_free(rsc->fillers); g_list_free(rsc->rsc_location); pe_rsc_trace(rsc, "Resource freed"); free(rsc->id); free(rsc->clone_name); free(rsc->allocated_to); free(rsc->variant_opaque); free(rsc->pending_task); free(rsc); } /*! * \brief * \internal Find a node (and optionally count all) where resource is active * * \param[in] rsc Resource to check * \param[out] count_all If not NULL, will be set to count of active nodes * \param[out] count_clean If not NULL, will be set to count of clean nodes * * \return An active node (or NULL if resource is not active anywhere) * * \note The order of preference is: an active node that is the resource's * partial migration source; if the resource's "requires" is "quorum" or * "nothing", the first active node in the list that is clean and online; * the first active node in the list. */ pe_node_t * pe__find_active_on(const pe_resource_t *rsc, unsigned int *count_all, unsigned int *count_clean) { pe_node_t *active = NULL; pe_node_t *node = NULL; bool keep_looking = FALSE; bool is_happy = FALSE; if (count_all) { *count_all = 0; } if (count_clean) { *count_clean = 0; } if (rsc == NULL) { return NULL; } for (GList *node_iter = rsc->running_on; node_iter != NULL; node_iter = node_iter->next) { node = node_iter->data; keep_looking = FALSE; is_happy = node->details->online && !node->details->unclean; if (count_all) { ++*count_all; } if (count_clean && is_happy) { ++*count_clean; } if (count_all || count_clean) { // If we're counting, we need to go through entire list keep_looking = TRUE; } if (rsc->partial_migration_source != NULL) { if (node->details == rsc->partial_migration_source->details) { // This is the migration source active = node; } else { keep_looking = TRUE; } } else if (!pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)) { if (is_happy && (!active || !active->details->online || active->details->unclean)) { // This is the first clean node active = node; } else { keep_looking = TRUE; } } if (active == NULL) { // This is first node in list active = node; } if (keep_looking == FALSE) { // Don't waste time iterating if we don't have to break; } } return active; } /*! * \brief * \internal Find and count active nodes according to "requires" * * \param[in] rsc Resource to check * \param[out] count If not NULL, will be set to count of active nodes * * \return An active node (or NULL if resource is not active anywhere) * * \note This is a convenience wrapper for pe__find_active_on() where the count * of all active nodes or only clean active nodes is desired according to * the "requires" meta-attribute. */ pe_node_t * pe__find_active_requires(const pe_resource_t *rsc, unsigned int *count) { if (rsc && !pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)) { return pe__find_active_on(rsc, NULL, count); } return pe__find_active_on(rsc, count, NULL); } void pe__count_common(pe_resource_t *rsc) { if (rsc->children != NULL) { for (GList *item = rsc->children; item != NULL; item = item->next) { ((pe_resource_t *) item->data)->fns->count(item->data); } } else if (!pcmk_is_set(rsc->flags, pe_rsc_orphan) || (rsc->role > RSC_ROLE_STOPPED)) { rsc->cluster->ninstances++; if (pe__resource_is_disabled(rsc)) { rsc->cluster->disabled_resources++; } if (pcmk_is_set(rsc->flags, pe_rsc_block)) { rsc->cluster->blocked_resources++; } } } + +/*! + * \internal + * \brief Update a resource's next role + * + * \param[in,out] rsc Resource to be updated + * \param[in] role Resource's new next role + * \param[in] why Human-friendly reason why role is changing (for logs) + */ +void +pe__set_next_role(pe_resource_t *rsc, enum rsc_role_e role, const char *why) +{ + CRM_ASSERT((rsc != NULL) && (why != NULL)); + if (rsc->next_role != role) { + pe_rsc_trace(rsc, "Resetting next role for %s from %s to %s (%s)", + rsc->id, role2text(rsc->next_role), role2text(role), why); + rsc->next_role = role; + } +} diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c index 281bc88796..2d91abc49d 100644 --- a/lib/pengine/unpack.c +++ b/lib/pengine/unpack.c @@ -1,4008 +1,4099 @@ /* - * Copyright 2004-2020 the Pacemaker project contributors + * Copyright 2004-2021 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include CRM_TRACE_INIT_DATA(pe_status); /* This uses pcmk__set_flags_as()/pcmk__clear_flags_as() directly rather than * use pe__set_working_set_flags()/pe__clear_working_set_flags() so that the * flag is stringified more readably in log messages. */ #define set_config_flag(data_set, option, flag) do { \ const char *scf_value = pe_pref((data_set)->config_hash, (option)); \ if (scf_value != NULL) { \ if (crm_is_true(scf_value)) { \ (data_set)->flags = pcmk__set_flags_as(__func__, __LINE__, \ LOG_TRACE, "Working set", \ crm_system_name, (data_set)->flags, \ (flag), #flag); \ } else { \ (data_set)->flags = pcmk__clear_flags_as(__func__, __LINE__,\ LOG_TRACE, "Working set", \ crm_system_name, (data_set)->flags, \ (flag), #flag); \ } \ } \ } while(0) static void unpack_rsc_op(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op, xmlNode **last_failure, enum action_fail_response *failed, pe_working_set_t *data_set); static void determine_remote_online_status(pe_working_set_t *data_set, pe_node_t *this_node); static void add_node_attrs(xmlNode *attrs, pe_node_t *node, bool overwrite, pe_working_set_t *data_set); static void determine_online_status(xmlNode *node_state, pe_node_t *this_node, pe_working_set_t *data_set); -static void unpack_lrm_resources(pe_node_t *node, xmlNode *lrm_state, - pe_working_set_t *data_set); +static void unpack_node_lrm(pe_node_t *node, xmlNode *xml, + pe_working_set_t *data_set); // Bitmask for warnings we only want to print once uint32_t pe_wo = 0; static gboolean is_dangling_guest_node(pe_node_t *node) { /* we are looking for a remote-node that was supposed to be mapped to a * container resource, but all traces of that container have disappeared * from both the config and the status section. */ if (pe__is_guest_or_remote_node(node) && node->details->remote_rsc && node->details->remote_rsc->container == NULL && pcmk_is_set(node->details->remote_rsc->flags, pe_rsc_orphan_container_filler)) { return TRUE; } return FALSE; } /*! * \brief Schedule a fence action for a node * * \param[in,out] data_set Current working set of cluster * \param[in,out] node Node to fence * \param[in] reason Text description of why fencing is needed * \param[in] priority_delay Whether to consider `priority-fencing-delay` */ void pe_fence_node(pe_working_set_t * data_set, pe_node_t * node, const char *reason, bool priority_delay) { CRM_CHECK(node, return); /* A guest node is fenced by marking its container as failed */ if (pe__is_guest_node(node)) { pe_resource_t *rsc = node->details->remote_rsc->container; if (!pcmk_is_set(rsc->flags, pe_rsc_failed)) { if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) { crm_notice("Not fencing guest node %s " "(otherwise would because %s): " "its guest resource %s is unmanaged", node->details->uname, reason, rsc->id); } else { crm_warn("Guest node %s will be fenced " "(by recovering its guest resource %s): %s", node->details->uname, rsc->id, reason); /* We don't mark the node as unclean because that would prevent the * node from running resources. We want to allow it to run resources * in this transition if the recovery succeeds. */ node->details->remote_requires_reset = TRUE; pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop); } } } else if (is_dangling_guest_node(node)) { crm_info("Cleaning up dangling connection for guest node %s: " "fencing was already done because %s, " "and guest resource no longer exists", node->details->uname, reason); pe__set_resource_flags(node->details->remote_rsc, pe_rsc_failed|pe_rsc_stop); } else if (pe__is_remote_node(node)) { pe_resource_t *rsc = node->details->remote_rsc; if ((rsc != NULL) && !pcmk_is_set(rsc->flags, pe_rsc_managed)) { crm_notice("Not fencing remote node %s " "(otherwise would because %s): connection is unmanaged", node->details->uname, reason); } else if(node->details->remote_requires_reset == FALSE) { node->details->remote_requires_reset = TRUE; crm_warn("Remote node %s %s: %s", node->details->uname, pe_can_fence(data_set, node)? "will be fenced" : "is unclean", reason); } node->details->unclean = TRUE; // No need to apply `priority-fencing-delay` for remote nodes pe_fence_op(node, NULL, TRUE, reason, FALSE, data_set); } else if (node->details->unclean) { crm_trace("Cluster node %s %s because %s", node->details->uname, pe_can_fence(data_set, node)? "would also be fenced" : "also is unclean", reason); } else { crm_warn("Cluster node %s %s: %s", node->details->uname, pe_can_fence(data_set, node)? "will be fenced" : "is unclean", reason); node->details->unclean = TRUE; pe_fence_op(node, NULL, TRUE, reason, priority_delay, data_set); } } // @TODO xpaths can't handle templates, rules, or id-refs // nvpair with provides or requires set to unfencing #define XPATH_UNFENCING_NVPAIR XML_CIB_TAG_NVPAIR \ "[(@" XML_NVPAIR_ATTR_NAME "='" PCMK_STONITH_PROVIDES "'" \ "or @" XML_NVPAIR_ATTR_NAME "='" XML_RSC_ATTR_REQUIRES "') " \ "and @" XML_NVPAIR_ATTR_VALUE "='unfencing']" // unfencing in rsc_defaults or any resource #define XPATH_ENABLE_UNFENCING \ "/" XML_TAG_CIB "/" XML_CIB_TAG_CONFIGURATION "/" XML_CIB_TAG_RESOURCES \ "//" XML_TAG_META_SETS "/" XPATH_UNFENCING_NVPAIR \ "|/" XML_TAG_CIB "/" XML_CIB_TAG_CONFIGURATION "/" XML_CIB_TAG_RSCCONFIG \ "/" XML_TAG_META_SETS "/" XPATH_UNFENCING_NVPAIR static void set_if_xpath(uint64_t flag, const char *xpath, pe_working_set_t *data_set) { xmlXPathObjectPtr result = NULL; if (!pcmk_is_set(data_set->flags, flag)) { result = xpath_search(data_set->input, xpath); if (result && (numXpathResults(result) > 0)) { pe__set_working_set_flags(data_set, flag); } freeXpathObject(result); } } gboolean unpack_config(xmlNode * config, pe_working_set_t * data_set) { const char *value = NULL; GHashTable *config_hash = crm_str_table_new(); pe_rule_eval_data_t rule_data = { .node_hash = NULL, .role = RSC_ROLE_UNKNOWN, .now = data_set->now, .match_data = NULL, .rsc_data = NULL, .op_data = NULL }; data_set->config_hash = config_hash; pe__unpack_dataset_nvpairs(config, XML_CIB_TAG_PROPSET, &rule_data, config_hash, CIB_OPTIONS_FIRST, FALSE, data_set); verify_pe_options(data_set->config_hash); set_config_flag(data_set, "enable-startup-probes", pe_flag_startup_probes); if (!pcmk_is_set(data_set->flags, pe_flag_startup_probes)) { crm_info("Startup probes: disabled (dangerous)"); } value = pe_pref(data_set->config_hash, XML_ATTR_HAVE_WATCHDOG); if (value && crm_is_true(value)) { crm_info("Watchdog-based self-fencing will be performed via SBD if " "fencing is required and stonith-watchdog-timeout is nonzero"); pe__set_working_set_flags(data_set, pe_flag_have_stonith_resource); } /* Set certain flags via xpath here, so they can be used before the relevant * configuration sections are unpacked. */ set_if_xpath(pe_flag_enable_unfencing, XPATH_ENABLE_UNFENCING, data_set); value = pe_pref(data_set->config_hash, "stonith-timeout"); data_set->stonith_timeout = (int) crm_parse_interval_spec(value); crm_debug("STONITH timeout: %d", data_set->stonith_timeout); set_config_flag(data_set, "stonith-enabled", pe_flag_stonith_enabled); crm_debug("STONITH of failed nodes is %s", pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)? "enabled" : "disabled"); data_set->stonith_action = pe_pref(data_set->config_hash, "stonith-action"); if (!strcmp(data_set->stonith_action, "poweroff")) { pe_warn_once(pe_wo_poweroff, "Support for stonith-action of 'poweroff' is deprecated " "and will be removed in a future release (use 'off' instead)"); data_set->stonith_action = "off"; } crm_trace("STONITH will %s nodes", data_set->stonith_action); set_config_flag(data_set, "concurrent-fencing", pe_flag_concurrent_fencing); crm_debug("Concurrent fencing is %s", pcmk_is_set(data_set->flags, pe_flag_concurrent_fencing)? "enabled" : "disabled"); value = pe_pref(data_set->config_hash, XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY); if (value) { data_set->priority_fencing_delay = crm_parse_interval_spec(value) / 1000; crm_trace("Priority fencing delay is %ds", data_set->priority_fencing_delay); } set_config_flag(data_set, "stop-all-resources", pe_flag_stop_everything); crm_debug("Stop all active resources: %s", pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_stop_everything))); set_config_flag(data_set, "symmetric-cluster", pe_flag_symmetric_cluster); if (pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster)) { crm_debug("Cluster is symmetric" " - resources can run anywhere by default"); } value = pe_pref(data_set->config_hash, "no-quorum-policy"); if (pcmk__str_eq(value, "ignore", pcmk__str_casei)) { data_set->no_quorum_policy = no_quorum_ignore; } else if (pcmk__str_eq(value, "freeze", pcmk__str_casei)) { data_set->no_quorum_policy = no_quorum_freeze; } else if (pcmk__str_eq(value, "demote", pcmk__str_casei)) { data_set->no_quorum_policy = no_quorum_demote; } else if (pcmk__str_eq(value, "suicide", pcmk__str_casei)) { if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) { int do_panic = 0; crm_element_value_int(data_set->input, XML_ATTR_QUORUM_PANIC, &do_panic); if (do_panic || pcmk_is_set(data_set->flags, pe_flag_have_quorum)) { data_set->no_quorum_policy = no_quorum_suicide; } else { crm_notice("Resetting no-quorum-policy to 'stop': cluster has never had quorum"); data_set->no_quorum_policy = no_quorum_stop; } } else { pcmk__config_err("Resetting no-quorum-policy to 'stop' because " "fencing is disabled"); data_set->no_quorum_policy = no_quorum_stop; } } else { data_set->no_quorum_policy = no_quorum_stop; } switch (data_set->no_quorum_policy) { case no_quorum_freeze: crm_debug("On loss of quorum: Freeze resources"); break; case no_quorum_stop: crm_debug("On loss of quorum: Stop ALL resources"); break; case no_quorum_demote: crm_debug("On loss of quorum: " "Demote promotable resources and stop other resources"); break; case no_quorum_suicide: crm_notice("On loss of quorum: Fence all remaining nodes"); break; case no_quorum_ignore: crm_notice("On loss of quorum: Ignore"); break; } set_config_flag(data_set, "stop-orphan-resources", pe_flag_stop_rsc_orphans); crm_trace("Orphan resources are %s", pcmk_is_set(data_set->flags, pe_flag_stop_rsc_orphans)? "stopped" : "ignored"); set_config_flag(data_set, "stop-orphan-actions", pe_flag_stop_action_orphans); crm_trace("Orphan resource actions are %s", pcmk_is_set(data_set->flags, pe_flag_stop_action_orphans)? "stopped" : "ignored"); set_config_flag(data_set, "remove-after-stop", pe_flag_remove_after_stop); crm_trace("Stopped resources are removed from the status section: %s", pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_remove_after_stop))); set_config_flag(data_set, "maintenance-mode", pe_flag_maintenance_mode); crm_trace("Maintenance mode: %s", pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_maintenance_mode))); set_config_flag(data_set, "start-failure-is-fatal", pe_flag_start_failure_fatal); crm_trace("Start failures are %s", pcmk_is_set(data_set->flags, pe_flag_start_failure_fatal)? "always fatal" : "handled by failcount"); if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) { set_config_flag(data_set, "startup-fencing", pe_flag_startup_fencing); } if (pcmk_is_set(data_set->flags, pe_flag_startup_fencing)) { crm_trace("Unseen nodes will be fenced"); } else { pe_warn_once(pe_wo_blind, "Blind faith: not fencing unseen nodes"); } pcmk__score_red = char2score(pe_pref(data_set->config_hash, "node-health-red")); pcmk__score_green = char2score(pe_pref(data_set->config_hash, "node-health-green")); pcmk__score_yellow = char2score(pe_pref(data_set->config_hash, "node-health-yellow")); crm_debug("Node scores: 'red' = %s, 'yellow' = %s, 'green' = %s", pe_pref(data_set->config_hash, "node-health-red"), pe_pref(data_set->config_hash, "node-health-yellow"), pe_pref(data_set->config_hash, "node-health-green")); data_set->placement_strategy = pe_pref(data_set->config_hash, "placement-strategy"); crm_trace("Placement strategy: %s", data_set->placement_strategy); set_config_flag(data_set, "shutdown-lock", pe_flag_shutdown_lock); crm_trace("Resources will%s be locked to cleanly shut down nodes", (pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)? "" : " not")); if (pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)) { value = pe_pref(data_set->config_hash, XML_CONFIG_ATTR_SHUTDOWN_LOCK_LIMIT); data_set->shutdown_lock = crm_parse_interval_spec(value) / 1000; crm_trace("Shutdown locks expire after %us", data_set->shutdown_lock); } return TRUE; } pe_node_t * pe_create_node(const char *id, const char *uname, const char *type, const char *score, pe_working_set_t * data_set) { pe_node_t *new_node = NULL; if (pe_find_node(data_set->nodes, uname) != NULL) { pcmk__config_warn("More than one node entry has name '%s'", uname); } new_node = calloc(1, sizeof(pe_node_t)); if (new_node == NULL) { return NULL; } new_node->weight = char2score(score); new_node->fixed = FALSE; new_node->details = calloc(1, sizeof(struct pe_node_shared_s)); if (new_node->details == NULL) { free(new_node); return NULL; } crm_trace("Creating node for entry %s/%s", uname, id); new_node->details->id = id; new_node->details->uname = uname; new_node->details->online = FALSE; new_node->details->shutdown = FALSE; new_node->details->rsc_discovery_enabled = TRUE; new_node->details->running_rsc = NULL; new_node->details->type = node_ping; if (pcmk__str_eq(type, "remote", pcmk__str_casei)) { new_node->details->type = node_remote; pe__set_working_set_flags(data_set, pe_flag_have_remote_nodes); } else if (pcmk__str_eq(type, "member", pcmk__str_null_matches | pcmk__str_casei)) { new_node->details->type = node_member; } new_node->details->attrs = crm_str_table_new(); if (pe__is_guest_or_remote_node(new_node)) { g_hash_table_insert(new_node->details->attrs, strdup(CRM_ATTR_KIND), strdup("remote")); } else { g_hash_table_insert(new_node->details->attrs, strdup(CRM_ATTR_KIND), strdup("cluster")); } new_node->details->utilization = crm_str_table_new(); new_node->details->digest_cache = g_hash_table_new_full(crm_str_hash, g_str_equal, free, pe__free_digests); data_set->nodes = g_list_insert_sorted(data_set->nodes, new_node, sort_node_uname); return new_node; } static const char * expand_remote_rsc_meta(xmlNode *xml_obj, xmlNode *parent, pe_working_set_t *data) { xmlNode *attr_set = NULL; xmlNode *attr = NULL; const char *container_id = ID(xml_obj); const char *remote_name = NULL; const char *remote_server = NULL; const char *remote_port = NULL; const char *connect_timeout = "60s"; const char *remote_allow_migrate=NULL; const char *is_managed = NULL; for (attr_set = pcmk__xe_first_child(xml_obj); attr_set != NULL; attr_set = pcmk__xe_next(attr_set)) { if (!pcmk__str_eq((const char *)attr_set->name, XML_TAG_META_SETS, pcmk__str_casei)) { continue; } for (attr = pcmk__xe_first_child(attr_set); attr != NULL; attr = pcmk__xe_next(attr)) { const char *value = crm_element_value(attr, XML_NVPAIR_ATTR_VALUE); const char *name = crm_element_value(attr, XML_NVPAIR_ATTR_NAME); if (pcmk__str_eq(name, XML_RSC_ATTR_REMOTE_NODE, pcmk__str_casei)) { remote_name = value; } else if (pcmk__str_eq(name, "remote-addr", pcmk__str_casei)) { remote_server = value; } else if (pcmk__str_eq(name, "remote-port", pcmk__str_casei)) { remote_port = value; } else if (pcmk__str_eq(name, "remote-connect-timeout", pcmk__str_casei)) { connect_timeout = value; } else if (pcmk__str_eq(name, "remote-allow-migrate", pcmk__str_casei)) { remote_allow_migrate=value; } else if (pcmk__str_eq(name, XML_RSC_ATTR_MANAGED, pcmk__str_casei)) { is_managed = value; } } } if (remote_name == NULL) { return NULL; } if (pe_find_resource(data->resources, remote_name) != NULL) { return NULL; } pe_create_remote_xml(parent, remote_name, container_id, remote_allow_migrate, is_managed, connect_timeout, remote_server, remote_port); return remote_name; } static void handle_startup_fencing(pe_working_set_t *data_set, pe_node_t *new_node) { if ((new_node->details->type == node_remote) && (new_node->details->remote_rsc == NULL)) { /* Ignore fencing for remote nodes that don't have a connection resource * associated with them. This happens when remote node entries get left * in the nodes section after the connection resource is removed. */ return; } if (pcmk_is_set(data_set->flags, pe_flag_startup_fencing)) { // All nodes are unclean until we've seen their status entry new_node->details->unclean = TRUE; } else { // Blind faith ... new_node->details->unclean = FALSE; } /* We need to be able to determine if a node's status section * exists or not separate from whether the node is unclean. */ new_node->details->unseen = TRUE; } gboolean unpack_nodes(xmlNode * xml_nodes, pe_working_set_t * data_set) { xmlNode *xml_obj = NULL; pe_node_t *new_node = NULL; const char *id = NULL; const char *uname = NULL; const char *type = NULL; const char *score = NULL; pe_rule_eval_data_t rule_data = { .node_hash = NULL, .role = RSC_ROLE_UNKNOWN, .now = data_set->now, .match_data = NULL, .rsc_data = NULL, .op_data = NULL }; for (xml_obj = pcmk__xe_first_child(xml_nodes); xml_obj != NULL; xml_obj = pcmk__xe_next(xml_obj)) { if (pcmk__str_eq((const char *)xml_obj->name, XML_CIB_TAG_NODE, pcmk__str_none)) { new_node = NULL; id = crm_element_value(xml_obj, XML_ATTR_ID); uname = crm_element_value(xml_obj, XML_ATTR_UNAME); type = crm_element_value(xml_obj, XML_ATTR_TYPE); score = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE); crm_trace("Processing node %s/%s", uname, id); if (id == NULL) { pcmk__config_err("Ignoring <" XML_CIB_TAG_NODE "> entry in configuration without id"); continue; } new_node = pe_create_node(id, uname, type, score, data_set); if (new_node == NULL) { return FALSE; } /* if(data_set->have_quorum == FALSE */ /* && data_set->no_quorum_policy == no_quorum_stop) { */ /* /\* start shutting resources down *\/ */ /* new_node->weight = -INFINITY; */ /* } */ handle_startup_fencing(data_set, new_node); add_node_attrs(xml_obj, new_node, FALSE, data_set); pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_UTILIZATION, &rule_data, new_node->details->utilization, NULL, FALSE, data_set); crm_trace("Done with node %s", crm_element_value(xml_obj, XML_ATTR_UNAME)); } } if (data_set->localhost && pe_find_node(data_set->nodes, data_set->localhost) == NULL) { crm_info("Creating a fake local node"); pe_create_node(data_set->localhost, data_set->localhost, NULL, 0, data_set); } return TRUE; } static void setup_container(pe_resource_t * rsc, pe_working_set_t * data_set) { const char *container_id = NULL; if (rsc->children) { GListPtr gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; setup_container(child_rsc, data_set); } return; } container_id = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_CONTAINER); if (container_id && !pcmk__str_eq(container_id, rsc->id, pcmk__str_casei)) { pe_resource_t *container = pe_find_resource(data_set->resources, container_id); if (container) { rsc->container = container; pe__set_resource_flags(container, pe_rsc_is_container); container->fillers = g_list_append(container->fillers, rsc); pe_rsc_trace(rsc, "Resource %s's container is %s", rsc->id, container_id); } else { pe_err("Resource %s: Unknown resource container (%s)", rsc->id, container_id); } } } gboolean unpack_remote_nodes(xmlNode * xml_resources, pe_working_set_t * data_set) { xmlNode *xml_obj = NULL; /* Create remote nodes and guest nodes from the resource configuration * before unpacking resources. */ for (xml_obj = pcmk__xe_first_child(xml_resources); xml_obj != NULL; xml_obj = pcmk__xe_next(xml_obj)) { const char *new_node_id = NULL; /* Check for remote nodes, which are defined by ocf:pacemaker:remote * primitives. */ if (xml_contains_remote_node(xml_obj)) { new_node_id = ID(xml_obj); /* The "pe_find_node" check is here to make sure we don't iterate over * an expanded node that has already been added to the node list. */ if (new_node_id && pe_find_node(data_set->nodes, new_node_id) == NULL) { crm_trace("Found remote node %s defined by resource %s", new_node_id, ID(xml_obj)); pe_create_node(new_node_id, new_node_id, "remote", NULL, data_set); } continue; } /* Check for guest nodes, which are defined by special meta-attributes * of a primitive of any type (for example, VirtualDomain or Xen). */ if (pcmk__str_eq((const char *)xml_obj->name, XML_CIB_TAG_RESOURCE, pcmk__str_none)) { /* This will add an ocf:pacemaker:remote primitive to the * configuration for the guest node's connection, to be unpacked * later. */ new_node_id = expand_remote_rsc_meta(xml_obj, xml_resources, data_set); if (new_node_id && pe_find_node(data_set->nodes, new_node_id) == NULL) { crm_trace("Found guest node %s in resource %s", new_node_id, ID(xml_obj)); pe_create_node(new_node_id, new_node_id, "remote", NULL, data_set); } continue; } /* Check for guest nodes inside a group. Clones are currently not * supported as guest nodes. */ if (pcmk__str_eq((const char *)xml_obj->name, XML_CIB_TAG_GROUP, pcmk__str_none)) { xmlNode *xml_obj2 = NULL; for (xml_obj2 = pcmk__xe_first_child(xml_obj); xml_obj2 != NULL; xml_obj2 = pcmk__xe_next(xml_obj2)) { new_node_id = expand_remote_rsc_meta(xml_obj2, xml_resources, data_set); if (new_node_id && pe_find_node(data_set->nodes, new_node_id) == NULL) { crm_trace("Found guest node %s in resource %s inside group %s", new_node_id, ID(xml_obj2), ID(xml_obj)); pe_create_node(new_node_id, new_node_id, "remote", NULL, data_set); } } } } return TRUE; } /* Call this after all the nodes and resources have been * unpacked, but before the status section is read. * * A remote node's online status is reflected by the state * of the remote node's connection resource. We need to link * the remote node to this connection resource so we can have * easy access to the connection resource during the scheduler calculations. */ static void link_rsc2remotenode(pe_working_set_t *data_set, pe_resource_t *new_rsc) { pe_node_t *remote_node = NULL; if (new_rsc->is_remote_node == FALSE) { return; } if (pcmk_is_set(data_set->flags, pe_flag_quick_location)) { /* remote_nodes and remote_resources are not linked in quick location calculations */ return; } remote_node = pe_find_node(data_set->nodes, new_rsc->id); CRM_CHECK(remote_node != NULL, return;); pe_rsc_trace(new_rsc, "Linking remote connection resource %s to node %s", new_rsc->id, remote_node->details->uname); remote_node->details->remote_rsc = new_rsc; if (new_rsc->container == NULL) { /* Handle start-up fencing for remote nodes (as opposed to guest nodes) * the same as is done for cluster nodes. */ handle_startup_fencing(data_set, remote_node); } else { /* pe_create_node() marks the new node as "remote" or "cluster"; now * that we know the node is a guest node, update it correctly. */ g_hash_table_replace(remote_node->details->attrs, strdup(CRM_ATTR_KIND), strdup("container")); } } static void destroy_tag(gpointer data) { pe_tag_t *tag = data; if (tag) { free(tag->id); g_list_free_full(tag->refs, free); free(tag); } } /*! * \internal * \brief Parse configuration XML for resource information * * \param[in] xml_resources Top of resource configuration XML * \param[in,out] data_set Where to put resource information * * \return TRUE * * \note unpack_remote_nodes() MUST be called before this, so that the nodes can * be used when common_unpack() calls resource_location() */ gboolean unpack_resources(xmlNode * xml_resources, pe_working_set_t * data_set) { xmlNode *xml_obj = NULL; GListPtr gIter = NULL; data_set->template_rsc_sets = g_hash_table_new_full(crm_str_hash, g_str_equal, free, destroy_tag); for (xml_obj = pcmk__xe_first_child(xml_resources); xml_obj != NULL; xml_obj = pcmk__xe_next(xml_obj)) { pe_resource_t *new_rsc = NULL; if (pcmk__str_eq((const char *)xml_obj->name, XML_CIB_TAG_RSC_TEMPLATE, pcmk__str_none)) { const char *template_id = ID(xml_obj); if (template_id && g_hash_table_lookup_extended(data_set->template_rsc_sets, template_id, NULL, NULL) == FALSE) { /* Record the template's ID for the knowledge of its existence anyway. */ g_hash_table_insert(data_set->template_rsc_sets, strdup(template_id), NULL); } continue; } crm_trace("Beginning unpack... <%s id=%s... >", crm_element_name(xml_obj), ID(xml_obj)); if (common_unpack(xml_obj, &new_rsc, NULL, data_set) && (new_rsc != NULL)) { data_set->resources = g_list_append(data_set->resources, new_rsc); pe_rsc_trace(new_rsc, "Added resource %s", new_rsc->id); } else { pcmk__config_err("Ignoring <%s> resource '%s' " "because configuration is invalid", crm_element_name(xml_obj), crm_str(ID(xml_obj))); if (new_rsc != NULL && new_rsc->fns != NULL) { new_rsc->fns->free(new_rsc); } } } for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) { pe_resource_t *rsc = (pe_resource_t *) gIter->data; setup_container(rsc, data_set); link_rsc2remotenode(data_set, rsc); } data_set->resources = g_list_sort(data_set->resources, sort_rsc_priority); if (pcmk_is_set(data_set->flags, pe_flag_quick_location)) { /* Ignore */ } else if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled) && !pcmk_is_set(data_set->flags, pe_flag_have_stonith_resource)) { pcmk__config_err("Resource start-up disabled since no STONITH resources have been defined"); pcmk__config_err("Either configure some or disable STONITH with the stonith-enabled option"); pcmk__config_err("NOTE: Clusters with shared data need STONITH to ensure data integrity"); } return TRUE; } gboolean unpack_tags(xmlNode * xml_tags, pe_working_set_t * data_set) { xmlNode *xml_tag = NULL; data_set->tags = g_hash_table_new_full(crm_str_hash, g_str_equal, free, destroy_tag); for (xml_tag = pcmk__xe_first_child(xml_tags); xml_tag != NULL; xml_tag = pcmk__xe_next(xml_tag)) { xmlNode *xml_obj_ref = NULL; const char *tag_id = ID(xml_tag); if (!pcmk__str_eq((const char *)xml_tag->name, XML_CIB_TAG_TAG, pcmk__str_none)) { continue; } if (tag_id == NULL) { pcmk__config_err("Ignoring <%s> without " XML_ATTR_ID, crm_element_name(xml_tag)); continue; } for (xml_obj_ref = pcmk__xe_first_child(xml_tag); xml_obj_ref != NULL; xml_obj_ref = pcmk__xe_next(xml_obj_ref)) { const char *obj_ref = ID(xml_obj_ref); if (!pcmk__str_eq((const char *)xml_obj_ref->name, XML_CIB_TAG_OBJ_REF, pcmk__str_none)) { continue; } if (obj_ref == NULL) { pcmk__config_err("Ignoring <%s> for tag '%s' without " XML_ATTR_ID, crm_element_name(xml_obj_ref), tag_id); continue; } if (add_tag_ref(data_set->tags, tag_id, obj_ref) == FALSE) { return FALSE; } } } return TRUE; } /* The ticket state section: * "/cib/status/tickets/ticket_state" */ static gboolean unpack_ticket_state(xmlNode * xml_ticket, pe_working_set_t * data_set) { const char *ticket_id = NULL; const char *granted = NULL; const char *last_granted = NULL; const char *standby = NULL; xmlAttrPtr xIter = NULL; pe_ticket_t *ticket = NULL; ticket_id = ID(xml_ticket); if (pcmk__str_empty(ticket_id)) { return FALSE; } crm_trace("Processing ticket state for %s", ticket_id); ticket = g_hash_table_lookup(data_set->tickets, ticket_id); if (ticket == NULL) { ticket = ticket_new(ticket_id, data_set); if (ticket == NULL) { return FALSE; } } for (xIter = xml_ticket->properties; xIter; xIter = xIter->next) { const char *prop_name = (const char *)xIter->name; const char *prop_value = crm_element_value(xml_ticket, prop_name); if (pcmk__str_eq(prop_name, XML_ATTR_ID, pcmk__str_none)) { continue; } g_hash_table_replace(ticket->state, strdup(prop_name), strdup(prop_value)); } granted = g_hash_table_lookup(ticket->state, "granted"); if (granted && crm_is_true(granted)) { ticket->granted = TRUE; crm_info("We have ticket '%s'", ticket->id); } else { ticket->granted = FALSE; crm_info("We do not have ticket '%s'", ticket->id); } last_granted = g_hash_table_lookup(ticket->state, "last-granted"); if (last_granted) { ticket->last_granted = crm_parse_int(last_granted, 0); } standby = g_hash_table_lookup(ticket->state, "standby"); if (standby && crm_is_true(standby)) { ticket->standby = TRUE; if (ticket->granted) { crm_info("Granted ticket '%s' is in standby-mode", ticket->id); } } else { ticket->standby = FALSE; } crm_trace("Done with ticket state for %s", ticket_id); return TRUE; } static gboolean unpack_tickets_state(xmlNode * xml_tickets, pe_working_set_t * data_set) { xmlNode *xml_obj = NULL; for (xml_obj = pcmk__xe_first_child(xml_tickets); xml_obj != NULL; xml_obj = pcmk__xe_next(xml_obj)) { if (!pcmk__str_eq((const char *)xml_obj->name, XML_CIB_TAG_TICKET_STATE, pcmk__str_none)) { continue; } unpack_ticket_state(xml_obj, data_set); } return TRUE; } static void unpack_handle_remote_attrs(pe_node_t *this_node, xmlNode *state, pe_working_set_t * data_set) { const char *resource_discovery_enabled = NULL; xmlNode *attrs = NULL; pe_resource_t *rsc = NULL; if (!pcmk__str_eq((const char *)state->name, XML_CIB_TAG_STATE, pcmk__str_none)) { return; } if ((this_node == NULL) || !pe__is_guest_or_remote_node(this_node)) { return; } crm_trace("Processing remote node id=%s, uname=%s", this_node->details->id, this_node->details->uname); this_node->details->remote_maintenance = crm_atoi(crm_element_value(state, XML_NODE_IS_MAINTENANCE), "0"); rsc = this_node->details->remote_rsc; if (this_node->details->remote_requires_reset == FALSE) { this_node->details->unclean = FALSE; this_node->details->unseen = FALSE; } attrs = find_xml_node(state, XML_TAG_TRANSIENT_NODEATTRS, FALSE); add_node_attrs(attrs, this_node, TRUE, data_set); if (pe__shutdown_requested(this_node)) { crm_info("Node %s is shutting down", this_node->details->uname); this_node->details->shutdown = TRUE; - if (rsc) { - rsc->next_role = RSC_ROLE_STOPPED; - } } if (crm_is_true(pe_node_attribute_raw(this_node, "standby"))) { crm_info("Node %s is in standby-mode", this_node->details->uname); this_node->details->standby = TRUE; } if (crm_is_true(pe_node_attribute_raw(this_node, "maintenance")) || ((rsc != NULL) && !pcmk_is_set(rsc->flags, pe_rsc_managed))) { crm_info("Node %s is in maintenance-mode", this_node->details->uname); this_node->details->maintenance = TRUE; } resource_discovery_enabled = pe_node_attribute_raw(this_node, XML_NODE_ATTR_RSC_DISCOVERY); if (resource_discovery_enabled && !crm_is_true(resource_discovery_enabled)) { if (pe__is_remote_node(this_node) && !pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) { crm_warn("Ignoring %s attribute on remote node %s because stonith is disabled", XML_NODE_ATTR_RSC_DISCOVERY, this_node->details->uname); } else { /* This is either a remote node with fencing enabled, or a guest * node. We don't care whether fencing is enabled when fencing guest * nodes, because they are "fenced" by recovering their containing * resource. */ crm_info("Node %s has resource discovery disabled", this_node->details->uname); this_node->details->rsc_discovery_enabled = FALSE; } } } -static bool -unpack_node_loop(xmlNode * status, bool fence, pe_working_set_t * data_set) +/*! + * \internal + * \brief Unpack a cluster node's transient attributes + * + * \param[in] state CIB node state XML + * \param[in] node Cluster node whose attributes are being unpacked + * \param[in] data_set Cluster working set + */ +static void +unpack_transient_attributes(xmlNode *state, pe_node_t *node, + pe_working_set_t *data_set) { - bool changed = false; - xmlNode *lrm_rsc = NULL; + const char *discovery = NULL; + xmlNode *attrs = find_xml_node(state, XML_TAG_TRANSIENT_NODEATTRS, FALSE); - for (xmlNode *state = pcmk__xe_first_child(status); state != NULL; - state = pcmk__xe_next(state)) { + add_node_attrs(attrs, node, TRUE, data_set); + + if (crm_is_true(pe_node_attribute_raw(node, "standby"))) { + crm_info("Node %s is in standby-mode", node->details->uname); + node->details->standby = TRUE; + } + + if (crm_is_true(pe_node_attribute_raw(node, "maintenance"))) { + crm_info("Node %s is in maintenance-mode", node->details->uname); + node->details->maintenance = TRUE; + } + + discovery = pe_node_attribute_raw(node, XML_NODE_ATTR_RSC_DISCOVERY); + if ((discovery != NULL) && !crm_is_true(discovery)) { + crm_warn("Ignoring %s attribute for node %s because disabling " + "resource discovery is not allowed for cluster nodes", + XML_NODE_ATTR_RSC_DISCOVERY, node->details->uname); + } +} + +/*! + * \internal + * \brief Unpack a node state entry (first pass) + * + * Unpack one node state entry from status. This unpacks information from the + * node_state element itself and node attributes inside it, but not the + * resource history inside it. Multiple passes through the status are needed to + * fully unpack everything. + * + * \param[in] state CIB node state XML + * \param[in] data_set Cluster working set + */ +static void +unpack_node_state(xmlNode *state, pe_working_set_t *data_set) +{ + const char *id = NULL; + const char *uname = NULL; + pe_node_t *this_node = NULL; + + id = crm_element_value(state, XML_ATTR_ID); + if (id == NULL) { + crm_warn("Ignoring malformed " XML_CIB_TAG_STATE " entry without " + XML_ATTR_ID); + return; + } + + uname = crm_element_value(state, XML_ATTR_UNAME); + if (uname == NULL) { + crm_warn("Ignoring malformed " XML_CIB_TAG_STATE " entry without " + XML_ATTR_UNAME); + return; + } + + this_node = pe_find_node_any(data_set->nodes, id, uname); + if (this_node == NULL) { + pcmk__config_warn("Ignoring recorded node state for '%s' because " + "it is no longer in the configuration", uname); + return; + } - const char *id = NULL; - const char *uname = NULL; + if (pe__is_guest_or_remote_node(this_node)) { + /* We can't determine the online status of Pacemaker Remote nodes until + * after all resource history has been unpacked. In this first pass, we + * do need to mark whether the node has been fenced, as this plays a + * role during unpacking cluster node resource state. + */ + const char *is_fenced = crm_element_value(state, XML_NODE_IS_FENCED); + + this_node->details->remote_was_fenced = crm_atoi(is_fenced, "0"); + return; + } + + unpack_transient_attributes(state, this_node, data_set); + + /* Provisionally mark this cluster node as clean. We have at least seen it + * in the current cluster's lifetime. + */ + this_node->details->unclean = FALSE; + this_node->details->unseen = FALSE; + + crm_trace("Determining online status of cluster node %s (id %s)", + this_node->details->uname, id); + determine_online_status(state, this_node, data_set); + + if (!pcmk_is_set(data_set->flags, pe_flag_have_quorum) + && this_node->details->online + && (data_set->no_quorum_policy == no_quorum_suicide)) { + /* Everything else should flow from this automatically + * (at least until the scheduler becomes able to migrate off + * healthy resources) + */ + pe_fence_node(data_set, this_node, "cluster does not have quorum", + FALSE); + } +} + +/*! + * \internal + * \brief Unpack nodes' resource history as much as possible + * + * Unpack as many nodes' resource history as possible in one pass through the + * status. We need to process Pacemaker Remote nodes' connections/containers + * before unpacking their history; the connection/container history will be + * in another node's history, so it might take multiple passes to unpack + * everything. + * + * \param[in] status CIB XML status section + * \param[in] fence If true, treat any not-yet-unpacked nodes as unseen + * \param[in] data_set Cluster working set + * + * \return Standard Pacemaker return code (specifically pcmk_rc_ok if done, + * or EAGAIN if more unpacking remains to be done) + */ +static int +unpack_node_history(xmlNode *status, bool fence, pe_working_set_t *data_set) +{ + int rc = pcmk_rc_ok; + + // Loop through all node_state entries in CIB status + for (xmlNode *state = first_named_child(status, XML_CIB_TAG_STATE); + state != NULL; state = crm_next_same_xml(state)) { + + const char *id = ID(state); + const char *uname = crm_element_value(state, XML_ATTR_UNAME); pe_node_t *this_node = NULL; - bool process = FALSE; - if (!pcmk__str_eq((const char *)state->name, XML_CIB_TAG_STATE, pcmk__str_none)) { + if ((id == NULL) || (uname == NULL)) { + // Warning already logged in first pass through status section + crm_trace("Not unpacking resource history from malformed " + XML_CIB_TAG_STATE " without id and/or uname"); continue; } - id = crm_element_value(state, XML_ATTR_ID); - uname = crm_element_value(state, XML_ATTR_UNAME); this_node = pe_find_node_any(data_set->nodes, id, uname); - if (this_node == NULL) { - crm_info("Node %s is unknown", id); + // Warning already logged in first pass through status section + crm_trace("Not unpacking resource history for node %s because " + "no longer in configuration", id); continue; + } - } else if (this_node->details->unpacked) { - crm_trace("Node %s was already processed", id); + if (this_node->details->unpacked) { + crm_trace("Not unpacking resource history for node %s because " + "already unpacked", id); continue; + } - } else if (!pe__is_guest_or_remote_node(this_node) - && pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) { - // A redundant test, but preserves the order for regression tests - process = TRUE; + if (fence) { + // We're processing all remaining nodes - } else if (pe__is_guest_or_remote_node(this_node)) { - bool check = FALSE; + } else if (pe__is_guest_node(this_node)) { + /* We can unpack a guest node's history only after we've unpacked + * other resource history to the point that we know that the node's + * connection and containing resource are both up. + */ pe_resource_t *rsc = this_node->details->remote_rsc; - if(fence) { - check = TRUE; - - } else if(rsc == NULL) { - /* Not ready yet */ - - } else if (pe__is_guest_node(this_node) - && rsc->role == RSC_ROLE_STARTED - && rsc->container->role == RSC_ROLE_STARTED) { - /* Both the connection and its containing resource need to be - * known to be up before we process resources running in it. - */ - check = TRUE; - crm_trace("Checking node %s/%s/%s status %d/%d/%d", id, rsc->id, rsc->container->id, fence, rsc->role, RSC_ROLE_STARTED); - - } else if (!pe__is_guest_node(this_node) - && ((rsc->role == RSC_ROLE_STARTED) - || pcmk_is_set(data_set->flags, pe_flag_shutdown_lock))) { - check = TRUE; - crm_trace("Checking node %s/%s status %d/%d/%d", id, rsc->id, fence, rsc->role, RSC_ROLE_STARTED); + if ((rsc == NULL) || (rsc->role != RSC_ROLE_STARTED) + || (rsc->container->role != RSC_ROLE_STARTED)) { + crm_trace("Not unpacking resource history for guest node %s " + "because container and connection are not known to " + "be up", id); + continue; } - if (check) { - determine_remote_online_status(data_set, this_node); - unpack_handle_remote_attrs(this_node, state, data_set); - process = TRUE; - } + } else if (pe__is_remote_node(this_node)) { + /* We can unpack a remote node's history only after we've unpacked + * other resource history to the point that we know that the node's + * connection is up, with the exception of when shutdown locks are + * in use. + */ + pe_resource_t *rsc = this_node->details->remote_rsc; - } else if (this_node->details->online) { - process = TRUE; + if ((rsc == NULL) + || (!pcmk_is_set(data_set->flags, pe_flag_shutdown_lock) + && (rsc->role != RSC_ROLE_STARTED))) { + crm_trace("Not unpacking resource history for remote node %s " + "because connection is not known to be up", id); + continue; + } - } else if (fence) { - process = TRUE; + /* If fencing and shutdown locks are disabled and we're not processing + * unseen nodes, then we don't want to unpack offline nodes until online + * nodes have been unpacked. This allows us to number active clone + * instances first. + */ + } else if (!pcmk_any_flags_set(data_set->flags, pe_flag_stonith_enabled + |pe_flag_shutdown_lock) + && !this_node->details->online) { + crm_trace("Not unpacking resource history for offline " + "cluster node %s", id); + continue; + } - } else if (pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)) { - process = TRUE; + if (pe__is_guest_or_remote_node(this_node)) { + determine_remote_online_status(data_set, this_node); + unpack_handle_remote_attrs(this_node, state, data_set); } - if(process) { - crm_trace("Processing lrm resource entries on %shealthy%s node: %s", - fence?"un":"", - (pe__is_guest_or_remote_node(this_node)? " remote" : ""), - this_node->details->uname); - changed = TRUE; - this_node->details->unpacked = TRUE; + crm_trace("Unpacking resource history for %snode %s", + (fence? "unseen " : ""), id); - lrm_rsc = find_xml_node(state, XML_CIB_TAG_LRM, FALSE); - lrm_rsc = find_xml_node(lrm_rsc, XML_LRM_TAG_RESOURCES, FALSE); - unpack_lrm_resources(this_node, lrm_rsc, data_set); - } + this_node->details->unpacked = TRUE; + unpack_node_lrm(this_node, state, data_set); + + rc = EAGAIN; // Other node histories might depend on this one } - return changed; + return rc; } /* remove nodes that are down, stopping */ /* create positive rsc_to_node constraints between resources and the nodes they are running on */ /* anything else? */ gboolean unpack_status(xmlNode * status, pe_working_set_t * data_set) { - const char *id = NULL; - const char *uname = NULL; - xmlNode *state = NULL; - pe_node_t *this_node = NULL; crm_trace("Beginning unpack"); if (data_set->tickets == NULL) { data_set->tickets = g_hash_table_new_full(crm_str_hash, g_str_equal, free, destroy_ticket); } for (state = pcmk__xe_first_child(status); state != NULL; state = pcmk__xe_next(state)) { if (pcmk__str_eq((const char *)state->name, XML_CIB_TAG_TICKETS, pcmk__str_none)) { unpack_tickets_state((xmlNode *) state, data_set); } else if (pcmk__str_eq((const char *)state->name, XML_CIB_TAG_STATE, pcmk__str_none)) { - xmlNode *attrs = NULL; - const char *resource_discovery_enabled = NULL; - - id = crm_element_value(state, XML_ATTR_ID); - uname = crm_element_value(state, XML_ATTR_UNAME); - this_node = pe_find_node_any(data_set->nodes, id, uname); - - if (uname == NULL) { - /* error */ - continue; - - } else if (this_node == NULL) { - pcmk__config_warn("Ignoring recorded node status for '%s' " - "because no longer in configuration", uname); - continue; - - } else if (pe__is_guest_or_remote_node(this_node)) { - /* online state for remote nodes is determined by the - * rsc state after all the unpacking is done. we do however - * need to mark whether or not the node has been fenced as this plays - * a role during unpacking cluster node resource state */ - this_node->details->remote_was_fenced = - crm_atoi(crm_element_value(state, XML_NODE_IS_FENCED), "0"); - continue; - } - - crm_trace("Processing node id=%s, uname=%s", id, uname); - - /* Mark the node as provisionally clean - * - at least we have seen it in the current cluster's lifetime - */ - this_node->details->unclean = FALSE; - this_node->details->unseen = FALSE; - attrs = find_xml_node(state, XML_TAG_TRANSIENT_NODEATTRS, FALSE); - add_node_attrs(attrs, this_node, TRUE, data_set); - - if (crm_is_true(pe_node_attribute_raw(this_node, "standby"))) { - crm_info("Node %s is in standby-mode", this_node->details->uname); - this_node->details->standby = TRUE; - } - - if (crm_is_true(pe_node_attribute_raw(this_node, "maintenance"))) { - crm_info("Node %s is in maintenance-mode", this_node->details->uname); - this_node->details->maintenance = TRUE; - } - - resource_discovery_enabled = pe_node_attribute_raw(this_node, XML_NODE_ATTR_RSC_DISCOVERY); - if (resource_discovery_enabled && !crm_is_true(resource_discovery_enabled)) { - crm_warn("ignoring %s attribute on node %s, disabling resource discovery is not allowed on cluster nodes", - XML_NODE_ATTR_RSC_DISCOVERY, this_node->details->uname); - } - - crm_trace("determining node state"); - determine_online_status(state, this_node, data_set); - - if (!pcmk_is_set(data_set->flags, pe_flag_have_quorum) - && this_node->details->online - && (data_set->no_quorum_policy == no_quorum_suicide)) { - /* Everything else should flow from this automatically - * (at least until the scheduler becomes able to migrate off - * healthy resources) - */ - pe_fence_node(data_set, this_node, "cluster does not have quorum", FALSE); - } + unpack_node_state(state, data_set); } } - - while(unpack_node_loop(status, FALSE, data_set)) { - crm_trace("Start another loop"); + while (unpack_node_history(status, FALSE, data_set) == EAGAIN) { + crm_trace("Another pass through node resource histories is needed"); } // Now catch any nodes we didn't see - unpack_node_loop(status, - pcmk_is_set(data_set->flags, pe_flag_stonith_enabled), - data_set); + unpack_node_history(status, + pcmk_is_set(data_set->flags, pe_flag_stonith_enabled), + data_set); /* Now that we know where resources are, we can schedule stops of containers * with failed bundle connections */ if (data_set->stop_needed != NULL) { for (GList *item = data_set->stop_needed; item; item = item->next) { pe_resource_t *container = item->data; pe_node_t *node = pe__current_node(container); if (node) { stop_action(container, node, FALSE); } } g_list_free(data_set->stop_needed); data_set->stop_needed = NULL; } + /* Now that we know status of all Pacemaker Remote connections and nodes, + * we can stop connections for node shutdowns, and check the online status + * of remote/guest nodes that didn't have any node history to unpack. + */ for (GListPtr gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { pe_node_t *this_node = gIter->data; - if (this_node == NULL) { - continue; - } else if (!pe__is_guest_or_remote_node(this_node)) { - continue; - } else if(this_node->details->unpacked) { + if (!pe__is_guest_or_remote_node(this_node)) { continue; } - determine_remote_online_status(data_set, this_node); + if (this_node->details->shutdown + && (this_node->details->remote_rsc != NULL)) { + pe__set_next_role(this_node->details->remote_rsc, RSC_ROLE_STOPPED, + "remote shutdown"); + } + if (!this_node->details->unpacked) { + determine_remote_online_status(data_set, this_node); + } } return TRUE; } static gboolean determine_online_status_no_fencing(pe_working_set_t * data_set, xmlNode * node_state, pe_node_t * this_node) { gboolean online = FALSE; const char *join = crm_element_value(node_state, XML_NODE_JOIN_STATE); const char *is_peer = crm_element_value(node_state, XML_NODE_IS_PEER); const char *in_cluster = crm_element_value(node_state, XML_NODE_IN_CLUSTER); const char *exp_state = crm_element_value(node_state, XML_NODE_EXPECTED); if (!crm_is_true(in_cluster)) { crm_trace("Node is down: in_cluster=%s", crm_str(in_cluster)); } else if (pcmk__str_eq(is_peer, ONLINESTATUS, pcmk__str_casei)) { if (pcmk__str_eq(join, CRMD_JOINSTATE_MEMBER, pcmk__str_casei)) { online = TRUE; } else { crm_debug("Node is not ready to run resources: %s", join); } } else if (this_node->details->expected_up == FALSE) { crm_trace("Controller is down: in_cluster=%s", crm_str(in_cluster)); crm_trace("\tis_peer=%s, join=%s, expected=%s", crm_str(is_peer), crm_str(join), crm_str(exp_state)); } else { /* mark it unclean */ pe_fence_node(data_set, this_node, "peer is unexpectedly down", FALSE); crm_info("\tin_cluster=%s, is_peer=%s, join=%s, expected=%s", crm_str(in_cluster), crm_str(is_peer), crm_str(join), crm_str(exp_state)); } return online; } static gboolean determine_online_status_fencing(pe_working_set_t * data_set, xmlNode * node_state, pe_node_t * this_node) { gboolean online = FALSE; gboolean do_terminate = FALSE; bool crmd_online = FALSE; const char *join = crm_element_value(node_state, XML_NODE_JOIN_STATE); const char *is_peer = crm_element_value(node_state, XML_NODE_IS_PEER); const char *in_cluster = crm_element_value(node_state, XML_NODE_IN_CLUSTER); const char *exp_state = crm_element_value(node_state, XML_NODE_EXPECTED); const char *terminate = pe_node_attribute_raw(this_node, "terminate"); /* - XML_NODE_IN_CLUSTER ::= true|false - XML_NODE_IS_PEER ::= online|offline - XML_NODE_JOIN_STATE ::= member|down|pending|banned - XML_NODE_EXPECTED ::= member|down */ if (crm_is_true(terminate)) { do_terminate = TRUE; } else if (terminate != NULL && strlen(terminate) > 0) { /* could be a time() value */ char t = terminate[0]; if (t != '0' && isdigit(t)) { do_terminate = TRUE; } } crm_trace("%s: in_cluster=%s, is_peer=%s, join=%s, expected=%s, term=%d", this_node->details->uname, crm_str(in_cluster), crm_str(is_peer), crm_str(join), crm_str(exp_state), do_terminate); online = crm_is_true(in_cluster); crmd_online = pcmk__str_eq(is_peer, ONLINESTATUS, pcmk__str_casei); if (exp_state == NULL) { exp_state = CRMD_JOINSTATE_DOWN; } if (this_node->details->shutdown) { crm_debug("%s is shutting down", this_node->details->uname); /* Slightly different criteria since we can't shut down a dead peer */ online = crmd_online; } else if (in_cluster == NULL) { pe_fence_node(data_set, this_node, "peer has not been seen by the cluster", FALSE); } else if (pcmk__str_eq(join, CRMD_JOINSTATE_NACK, pcmk__str_casei)) { pe_fence_node(data_set, this_node, "peer failed the pacemaker membership criteria", FALSE); } else if (do_terminate == FALSE && pcmk__str_eq(exp_state, CRMD_JOINSTATE_DOWN, pcmk__str_casei)) { if (crm_is_true(in_cluster) || crmd_online) { crm_info("- Node %s is not ready to run resources", this_node->details->uname); this_node->details->standby = TRUE; this_node->details->pending = TRUE; } else { crm_trace("%s is down or still coming up", this_node->details->uname); } } else if (do_terminate && pcmk__str_eq(join, CRMD_JOINSTATE_DOWN, pcmk__str_casei) && crm_is_true(in_cluster) == FALSE && !crmd_online) { crm_info("Node %s was just shot", this_node->details->uname); online = FALSE; } else if (crm_is_true(in_cluster) == FALSE) { // Consider `priority-fencing-delay` for lost nodes pe_fence_node(data_set, this_node, "peer is no longer part of the cluster", TRUE); } else if (!crmd_online) { pe_fence_node(data_set, this_node, "peer process is no longer available", FALSE); /* Everything is running at this point, now check join state */ } else if (do_terminate) { pe_fence_node(data_set, this_node, "termination was requested", FALSE); } else if (pcmk__str_eq(join, CRMD_JOINSTATE_MEMBER, pcmk__str_casei)) { crm_info("Node %s is active", this_node->details->uname); } else if (pcmk__strcase_any_of(join, CRMD_JOINSTATE_PENDING, CRMD_JOINSTATE_DOWN, NULL)) { crm_info("Node %s is not ready to run resources", this_node->details->uname); this_node->details->standby = TRUE; this_node->details->pending = TRUE; } else { pe_fence_node(data_set, this_node, "peer was in an unknown state", FALSE); crm_warn("%s: in-cluster=%s, is-peer=%s, join=%s, expected=%s, term=%d, shutdown=%d", this_node->details->uname, crm_str(in_cluster), crm_str(is_peer), crm_str(join), crm_str(exp_state), do_terminate, this_node->details->shutdown); } return online; } static void determine_remote_online_status(pe_working_set_t * data_set, pe_node_t * this_node) { pe_resource_t *rsc = this_node->details->remote_rsc; pe_resource_t *container = NULL; pe_node_t *host = NULL; /* If there is a node state entry for a (former) Pacemaker Remote node * but no resource creating that node, the node's connection resource will * be NULL. Consider it an offline remote node in that case. */ if (rsc == NULL) { this_node->details->online = FALSE; goto remote_online_done; } container = rsc->container; if (container && pcmk__list_of_1(rsc->running_on)) { host = rsc->running_on->data; } /* If the resource is currently started, mark it online. */ if (rsc->role == RSC_ROLE_STARTED) { crm_trace("%s node %s presumed ONLINE because connection resource is started", (container? "Guest" : "Remote"), this_node->details->id); this_node->details->online = TRUE; } /* consider this node shutting down if transitioning start->stop */ if (rsc->role == RSC_ROLE_STARTED && rsc->next_role == RSC_ROLE_STOPPED) { crm_trace("%s node %s shutting down because connection resource is stopping", (container? "Guest" : "Remote"), this_node->details->id); this_node->details->shutdown = TRUE; } /* Now check all the failure conditions. */ if(container && pcmk_is_set(container->flags, pe_rsc_failed)) { crm_trace("Guest node %s UNCLEAN because guest resource failed", this_node->details->id); this_node->details->online = FALSE; this_node->details->remote_requires_reset = TRUE; } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) { crm_trace("%s node %s OFFLINE because connection resource failed", (container? "Guest" : "Remote"), this_node->details->id); this_node->details->online = FALSE; } else if (rsc->role == RSC_ROLE_STOPPED || (container && container->role == RSC_ROLE_STOPPED)) { crm_trace("%s node %s OFFLINE because its resource is stopped", (container? "Guest" : "Remote"), this_node->details->id); this_node->details->online = FALSE; this_node->details->remote_requires_reset = FALSE; } else if (host && (host->details->online == FALSE) && host->details->unclean) { crm_trace("Guest node %s UNCLEAN because host is unclean", this_node->details->id); this_node->details->online = FALSE; this_node->details->remote_requires_reset = TRUE; } remote_online_done: crm_trace("Remote node %s online=%s", this_node->details->id, this_node->details->online ? "TRUE" : "FALSE"); } static void determine_online_status(xmlNode * node_state, pe_node_t * this_node, pe_working_set_t * data_set) { gboolean online = FALSE; const char *exp_state = crm_element_value(node_state, XML_NODE_EXPECTED); CRM_CHECK(this_node != NULL, return); this_node->details->shutdown = FALSE; this_node->details->expected_up = FALSE; if (pe__shutdown_requested(this_node)) { this_node->details->shutdown = TRUE; } else if (pcmk__str_eq(exp_state, CRMD_JOINSTATE_MEMBER, pcmk__str_casei)) { this_node->details->expected_up = TRUE; } if (this_node->details->type == node_ping) { this_node->details->unclean = FALSE; online = FALSE; /* As far as resource management is concerned, * the node is safely offline. * Anyone caught abusing this logic will be shot */ } else if (!pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) { online = determine_online_status_no_fencing(data_set, node_state, this_node); } else { online = determine_online_status_fencing(data_set, node_state, this_node); } if (online) { this_node->details->online = TRUE; } else { /* remove node from contention */ this_node->fixed = TRUE; this_node->weight = -INFINITY; } if (online && this_node->details->shutdown) { /* don't run resources here */ this_node->fixed = TRUE; this_node->weight = -INFINITY; } if (this_node->details->type == node_ping) { crm_info("Node %s is not a pacemaker node", this_node->details->uname); } else if (this_node->details->unclean) { pe_proc_warn("Node %s is unclean", this_node->details->uname); } else if (this_node->details->online) { crm_info("Node %s is %s", this_node->details->uname, this_node->details->shutdown ? "shutting down" : this_node->details->pending ? "pending" : this_node->details->standby ? "standby" : this_node->details->maintenance ? "maintenance" : "online"); } else { crm_trace("Node %s is offline", this_node->details->uname); } } /*! * \internal * \brief Find the end of a resource's name, excluding any clone suffix * * \param[in] id Resource ID to check * * \return Pointer to last character of resource's base name */ const char * pe_base_name_end(const char *id) { if (!pcmk__str_empty(id)) { const char *end = id + strlen(id) - 1; for (const char *s = end; s > id; --s) { switch (*s) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': break; case ':': return (s == end)? s : (s - 1); default: return end; } } return end; } return NULL; } /*! * \internal * \brief Get a resource name excluding any clone suffix * * \param[in] last_rsc_id Resource ID to check * * \return Pointer to newly allocated string with resource's base name * \note It is the caller's responsibility to free() the result. * This asserts on error, so callers can assume result is not NULL. */ char * clone_strip(const char *last_rsc_id) { const char *end = pe_base_name_end(last_rsc_id); char *basename = NULL; CRM_ASSERT(end); basename = strndup(last_rsc_id, end - last_rsc_id + 1); CRM_ASSERT(basename); return basename; } /*! * \internal * \brief Get the name of the first instance of a cloned resource * * \param[in] last_rsc_id Resource ID to check * * \return Pointer to newly allocated string with resource's base name plus :0 * \note It is the caller's responsibility to free() the result. * This asserts on error, so callers can assume result is not NULL. */ char * clone_zero(const char *last_rsc_id) { const char *end = pe_base_name_end(last_rsc_id); size_t base_name_len = end - last_rsc_id + 1; char *zero = NULL; CRM_ASSERT(end); zero = calloc(base_name_len + 3, sizeof(char)); CRM_ASSERT(zero); memcpy(zero, last_rsc_id, base_name_len); zero[base_name_len] = ':'; zero[base_name_len + 1] = '0'; return zero; } static pe_resource_t * create_fake_resource(const char *rsc_id, xmlNode * rsc_entry, pe_working_set_t * data_set) { pe_resource_t *rsc = NULL; xmlNode *xml_rsc = create_xml_node(NULL, XML_CIB_TAG_RESOURCE); copy_in_properties(xml_rsc, rsc_entry); crm_xml_add(xml_rsc, XML_ATTR_ID, rsc_id); crm_log_xml_debug(xml_rsc, "Orphan resource"); if (!common_unpack(xml_rsc, &rsc, NULL, data_set)) { return NULL; } if (xml_contains_remote_node(xml_rsc)) { pe_node_t *node; crm_debug("Detected orphaned remote node %s", rsc_id); node = pe_find_node(data_set->nodes, rsc_id); if (node == NULL) { node = pe_create_node(rsc_id, rsc_id, "remote", NULL, data_set); } link_rsc2remotenode(data_set, rsc); if (node) { crm_trace("Setting node %s as shutting down due to orphaned connection resource", rsc_id); node->details->shutdown = TRUE; } } if (crm_element_value(rsc_entry, XML_RSC_ATTR_CONTAINER)) { /* This orphaned rsc needs to be mapped to a container. */ crm_trace("Detected orphaned container filler %s", rsc_id); pe__set_resource_flags(rsc, pe_rsc_orphan_container_filler); } pe__set_resource_flags(rsc, pe_rsc_orphan); data_set->resources = g_list_append(data_set->resources, rsc); return rsc; } /*! * \internal * \brief Create orphan instance for anonymous clone resource history */ static pe_resource_t * create_anonymous_orphan(pe_resource_t *parent, const char *rsc_id, pe_node_t *node, pe_working_set_t *data_set) { pe_resource_t *top = pe__create_clone_child(parent, data_set); // find_rsc() because we might be a cloned group pe_resource_t *orphan = top->fns->find_rsc(top, rsc_id, NULL, pe_find_clone); pe_rsc_debug(parent, "Created orphan %s for %s: %s on %s", top->id, parent->id, rsc_id, node->details->uname); return orphan; } /*! * \internal * \brief Check a node for an instance of an anonymous clone * * Return a child instance of the specified anonymous clone, in order of * preference: (1) the instance running on the specified node, if any; * (2) an inactive instance (i.e. within the total of clone-max instances); * (3) a newly created orphan (i.e. clone-max instances are already active). * * \param[in] data_set Cluster information * \param[in] node Node on which to check for instance * \param[in] parent Clone to check * \param[in] rsc_id Name of cloned resource in history (without instance) */ static pe_resource_t * find_anonymous_clone(pe_working_set_t * data_set, pe_node_t * node, pe_resource_t * parent, const char *rsc_id) { GListPtr rIter = NULL; pe_resource_t *rsc = NULL; pe_resource_t *inactive_instance = NULL; gboolean skip_inactive = FALSE; CRM_ASSERT(parent != NULL); CRM_ASSERT(pe_rsc_is_clone(parent)); CRM_ASSERT(!pcmk_is_set(parent->flags, pe_rsc_unique)); // Check for active (or partially active, for cloned groups) instance pe_rsc_trace(parent, "Looking for %s on %s in %s", rsc_id, node->details->uname, parent->id); for (rIter = parent->children; rsc == NULL && rIter; rIter = rIter->next) { GListPtr locations = NULL; pe_resource_t *child = rIter->data; /* Check whether this instance is already known to be active or pending * anywhere, at this stage of unpacking. Because this function is called * for a resource before the resource's individual operation history * entries are unpacked, locations will generally not contain the * desired node. * * However, there are three exceptions: * (1) when child is a cloned group and we have already unpacked the * history of another member of the group on the same node; * (2) when we've already unpacked the history of another numbered * instance on the same node (which can happen if globally-unique * was flipped from true to false); and * (3) when we re-run calculations on the same data set as part of a * simulation. */ child->fns->location(child, &locations, 2); if (locations) { /* We should never associate the same numbered anonymous clone * instance with multiple nodes, and clone instances can't migrate, * so there must be only one location, regardless of history. */ CRM_LOG_ASSERT(locations->next == NULL); if (((pe_node_t *)locations->data)->details == node->details) { /* This child instance is active on the requested node, so check * for a corresponding configured resource. We use find_rsc() * instead of child because child may be a cloned group, and we * need the particular member corresponding to rsc_id. * * If the history entry is orphaned, rsc will be NULL. */ rsc = parent->fns->find_rsc(child, rsc_id, NULL, pe_find_clone); if (rsc) { /* If there are multiple instance history entries for an * anonymous clone in a single node's history (which can * happen if globally-unique is switched from true to * false), we want to consider the instances beyond the * first as orphans, even if there are inactive instance * numbers available. */ if (rsc->running_on) { crm_notice("Active (now-)anonymous clone %s has " "multiple (orphan) instance histories on %s", parent->id, node->details->uname); skip_inactive = TRUE; rsc = NULL; } else { pe_rsc_trace(parent, "Resource %s, active", rsc->id); } } } g_list_free(locations); } else { pe_rsc_trace(parent, "Resource %s, skip inactive", child->id); if (!skip_inactive && !inactive_instance && !pcmk_is_set(child->flags, pe_rsc_block)) { // Remember one inactive instance in case we don't find active inactive_instance = parent->fns->find_rsc(child, rsc_id, NULL, pe_find_clone); /* ... but don't use it if it was already associated with a * pending action on another node */ if (inactive_instance && inactive_instance->pending_node && (inactive_instance->pending_node->details != node->details)) { inactive_instance = NULL; } } } } if ((rsc == NULL) && !skip_inactive && (inactive_instance != NULL)) { pe_rsc_trace(parent, "Resource %s, empty slot", inactive_instance->id); rsc = inactive_instance; } /* If the resource has "requires" set to "quorum" or "nothing", and we don't * have a clone instance for every node, we don't want to consume a valid * instance number for unclean nodes. Such instances may appear to be active * according to the history, but should be considered inactive, so we can * start an instance elsewhere. Treat such instances as orphans. * * An exception is instances running on guest nodes -- since guest node * "fencing" is actually just a resource stop, requires shouldn't apply. * * @TODO Ideally, we'd use an inactive instance number if it is not needed * for any clean instances. However, we don't know that at this point. */ if ((rsc != NULL) && !pcmk_is_set(rsc->flags, pe_rsc_needs_fencing) && (!node->details->online || node->details->unclean) && !pe__is_guest_node(node) && !pe__is_universal_clone(parent, data_set)) { rsc = NULL; } if (rsc == NULL) { rsc = create_anonymous_orphan(parent, rsc_id, node, data_set); pe_rsc_trace(parent, "Resource %s, orphan", rsc->id); } return rsc; } static pe_resource_t * unpack_find_resource(pe_working_set_t * data_set, pe_node_t * node, const char *rsc_id, xmlNode * rsc_entry) { pe_resource_t *rsc = NULL; pe_resource_t *parent = NULL; crm_trace("looking for %s", rsc_id); rsc = pe_find_resource(data_set->resources, rsc_id); if (rsc == NULL) { /* If we didn't find the resource by its name in the operation history, * check it again as a clone instance. Even when clone-max=0, we create * a single :0 orphan to match against here. */ char *clone0_id = clone_zero(rsc_id); pe_resource_t *clone0 = pe_find_resource(data_set->resources, clone0_id); if (clone0 && !pcmk_is_set(clone0->flags, pe_rsc_unique)) { rsc = clone0; parent = uber_parent(clone0); crm_trace("%s found as %s (%s)", rsc_id, clone0_id, parent->id); } else { crm_trace("%s is not known as %s either (orphan)", rsc_id, clone0_id); } free(clone0_id); } else if (rsc->variant > pe_native) { crm_trace("Resource history for %s is orphaned because it is no longer primitive", rsc_id); return NULL; } else { parent = uber_parent(rsc); } if (pe_rsc_is_anon_clone(parent)) { if (pe_rsc_is_bundled(parent)) { rsc = pe__find_bundle_replica(parent->parent, node); } else { char *base = clone_strip(rsc_id); rsc = find_anonymous_clone(data_set, node, parent, base); free(base); CRM_ASSERT(rsc != NULL); } } if (rsc && !pcmk__str_eq(rsc_id, rsc->id, pcmk__str_casei) && !pcmk__str_eq(rsc_id, rsc->clone_name, pcmk__str_casei)) { free(rsc->clone_name); rsc->clone_name = strdup(rsc_id); pe_rsc_debug(rsc, "Internally renamed %s on %s to %s%s", rsc_id, node->details->uname, rsc->id, (pcmk_is_set(rsc->flags, pe_rsc_orphan)? " (ORPHAN)" : "")); } return rsc; } static pe_resource_t * process_orphan_resource(xmlNode * rsc_entry, pe_node_t * node, pe_working_set_t * data_set) { pe_resource_t *rsc = NULL; const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID); crm_debug("Detected orphan resource %s on %s", rsc_id, node->details->uname); rsc = create_fake_resource(rsc_id, rsc_entry, data_set); if (!pcmk_is_set(data_set->flags, pe_flag_stop_rsc_orphans)) { pe__clear_resource_flags(rsc, pe_rsc_managed); } else { CRM_CHECK(rsc != NULL, return NULL); pe_rsc_trace(rsc, "Added orphan %s", rsc->id); resource_location(rsc, NULL, -INFINITY, "__orphan_do_not_run__", data_set); } return rsc; } static void process_rsc_state(pe_resource_t * rsc, pe_node_t * node, enum action_fail_response on_fail, xmlNode * migrate_op, pe_working_set_t * data_set) { pe_node_t *tmpnode = NULL; char *reason = NULL; CRM_ASSERT(rsc); pe_rsc_trace(rsc, "Resource %s is %s on %s: on_fail=%s", rsc->id, role2text(rsc->role), node->details->uname, fail2text(on_fail)); /* process current state */ if (rsc->role != RSC_ROLE_UNKNOWN) { pe_resource_t *iter = rsc; while (iter) { if (g_hash_table_lookup(iter->known_on, node->details->id) == NULL) { pe_node_t *n = pe__copy_node(node); pe_rsc_trace(rsc, "%s (aka. %s) known on %s", rsc->id, rsc->clone_name, n->details->uname); g_hash_table_insert(iter->known_on, (gpointer) n->details->id, n); } if (pcmk_is_set(iter->flags, pe_rsc_unique)) { break; } iter = iter->parent; } } /* If a managed resource is believed to be running, but node is down ... */ if (rsc->role > RSC_ROLE_STOPPED && node->details->online == FALSE && node->details->maintenance == FALSE && pcmk_is_set(rsc->flags, pe_rsc_managed)) { gboolean should_fence = FALSE; /* If this is a guest node, fence it (regardless of whether fencing is * enabled, because guest node fencing is done by recovery of the * container resource rather than by the fencer). Mark the resource * we're processing as failed. When the guest comes back up, its * operation history in the CIB will be cleared, freeing the affected * resource to run again once we are sure we know its state. */ if (pe__is_guest_node(node)) { pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop); should_fence = TRUE; } else if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) { if (pe__is_remote_node(node) && node->details->remote_rsc && !pcmk_is_set(node->details->remote_rsc->flags, pe_rsc_failed)) { /* Setting unseen means that fencing of the remote node will * occur only if the connection resource is not going to start * somewhere. This allows connection resources on a failed * cluster node to move to another node without requiring the * remote nodes to be fenced as well. */ node->details->unseen = TRUE; reason = crm_strdup_printf("%s is active there (fencing will be" " revoked if remote connection can " "be re-established elsewhere)", rsc->id); } should_fence = TRUE; } if (should_fence) { if (reason == NULL) { reason = crm_strdup_printf("%s is thought to be active there", rsc->id); } pe_fence_node(data_set, node, reason, FALSE); } free(reason); } if (node->details->unclean) { /* No extra processing needed * Also allows resources to be started again after a node is shot */ on_fail = action_fail_ignore; } switch (on_fail) { case action_fail_ignore: /* nothing to do */ break; case action_fail_demote: pe__set_resource_flags(rsc, pe_rsc_failed); demote_action(rsc, node, FALSE); break; case action_fail_fence: /* treat it as if it is still running * but also mark the node as unclean */ reason = crm_strdup_printf("%s failed there", rsc->id); pe_fence_node(data_set, node, reason, FALSE); free(reason); break; case action_fail_standby: node->details->standby = TRUE; node->details->standby_onfail = TRUE; break; case action_fail_block: /* is_managed == FALSE will prevent any * actions being sent for the resource */ pe__clear_resource_flags(rsc, pe_rsc_managed); pe__set_resource_flags(rsc, pe_rsc_block); break; case action_fail_migrate: /* make sure it comes up somewhere else * or not at all */ resource_location(rsc, node, -INFINITY, "__action_migration_auto__", data_set); break; case action_fail_stop: - rsc->next_role = RSC_ROLE_STOPPED; + pe__set_next_role(rsc, RSC_ROLE_STOPPED, "on-fail=stop"); break; case action_fail_recover: if (rsc->role != RSC_ROLE_STOPPED && rsc->role != RSC_ROLE_UNKNOWN) { pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop); stop_action(rsc, node, FALSE); } break; case action_fail_restart_container: pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop); if (rsc->container && pe_rsc_is_bundled(rsc)) { /* A bundle's remote connection can run on a different node than * the bundle's container. We don't necessarily know where the * container is running yet, so remember it and add a stop * action for it later. */ data_set->stop_needed = g_list_prepend(data_set->stop_needed, rsc->container); } else if (rsc->container) { stop_action(rsc->container, node, FALSE); } else if (rsc->role != RSC_ROLE_STOPPED && rsc->role != RSC_ROLE_UNKNOWN) { stop_action(rsc, node, FALSE); } break; case action_fail_reset_remote: pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop); if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) { tmpnode = NULL; if (rsc->is_remote_node) { tmpnode = pe_find_node(data_set->nodes, rsc->id); } if (tmpnode && pe__is_remote_node(tmpnode) && tmpnode->details->remote_was_fenced == 0) { /* The remote connection resource failed in a way that * should result in fencing the remote node. */ pe_fence_node(data_set, tmpnode, "remote connection is unrecoverable", FALSE); } } /* require the stop action regardless if fencing is occurring or not. */ if (rsc->role > RSC_ROLE_STOPPED) { stop_action(rsc, node, FALSE); } /* if reconnect delay is in use, prevent the connection from exiting the * "STOPPED" role until the failure is cleared by the delay timeout. */ if (rsc->remote_reconnect_ms) { - rsc->next_role = RSC_ROLE_STOPPED; + pe__set_next_role(rsc, RSC_ROLE_STOPPED, "remote reset"); } break; } /* ensure a remote-node connection failure forces an unclean remote-node * to be fenced. By setting unseen = FALSE, the remote-node failure will * result in a fencing operation regardless if we're going to attempt to * reconnect to the remote-node in this transition or not. */ if (pcmk_is_set(rsc->flags, pe_rsc_failed) && rsc->is_remote_node) { tmpnode = pe_find_node(data_set->nodes, rsc->id); if (tmpnode && tmpnode->details->unclean) { tmpnode->details->unseen = FALSE; } } if (rsc->role != RSC_ROLE_STOPPED && rsc->role != RSC_ROLE_UNKNOWN) { if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) { if (pcmk_is_set(rsc->flags, pe_rsc_managed)) { pcmk__config_warn("Detected active orphan %s running on %s", rsc->id, node->details->uname); } else { pcmk__config_warn("Resource '%s' must be stopped manually on " "%s because cluster is configured not to " "stop active orphans", rsc->id, node->details->uname); } } native_add_running(rsc, node, data_set); switch (on_fail) { case action_fail_ignore: break; case action_fail_demote: case action_fail_block: pe__set_resource_flags(rsc, pe_rsc_failed); break; default: pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop); break; } } else if (rsc->clone_name && strchr(rsc->clone_name, ':') != NULL) { /* Only do this for older status sections that included instance numbers * Otherwise stopped instances will appear as orphans */ pe_rsc_trace(rsc, "Resetting clone_name %s for %s (stopped)", rsc->clone_name, rsc->id); free(rsc->clone_name); rsc->clone_name = NULL; } else { GList *possible_matches = pe__resource_actions(rsc, node, RSC_STOP, FALSE); GListPtr gIter = possible_matches; for (; gIter != NULL; gIter = gIter->next) { pe_action_t *stop = (pe_action_t *) gIter->data; pe__set_action_flags(stop, pe_action_optional); } g_list_free(possible_matches); } } /* create active recurring operations as optional */ static void process_recurring(pe_node_t * node, pe_resource_t * rsc, int start_index, int stop_index, GListPtr sorted_op_list, pe_working_set_t * data_set) { int counter = -1; const char *task = NULL; const char *status = NULL; GListPtr gIter = sorted_op_list; CRM_ASSERT(rsc); pe_rsc_trace(rsc, "%s: Start index %d, stop index = %d", rsc->id, start_index, stop_index); for (; gIter != NULL; gIter = gIter->next) { xmlNode *rsc_op = (xmlNode *) gIter->data; guint interval_ms = 0; char *key = NULL; const char *id = ID(rsc_op); counter++; if (node->details->online == FALSE) { pe_rsc_trace(rsc, "Skipping %s/%s: node is offline", rsc->id, node->details->uname); break; /* Need to check if there's a monitor for role="Stopped" */ } else if (start_index < stop_index && counter <= stop_index) { pe_rsc_trace(rsc, "Skipping %s/%s: resource is not active", id, node->details->uname); continue; } else if (counter < start_index) { pe_rsc_trace(rsc, "Skipping %s/%s: old %d", id, node->details->uname, counter); continue; } crm_element_value_ms(rsc_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms); if (interval_ms == 0) { pe_rsc_trace(rsc, "Skipping %s/%s: non-recurring", id, node->details->uname); continue; } status = crm_element_value(rsc_op, XML_LRM_ATTR_OPSTATUS); if (pcmk__str_eq(status, "-1", pcmk__str_casei)) { pe_rsc_trace(rsc, "Skipping %s/%s: status", id, node->details->uname); continue; } task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK); /* create the action */ key = pcmk__op_key(rsc->id, task, interval_ms); pe_rsc_trace(rsc, "Creating %s/%s", key, node->details->uname); custom_action(rsc, key, task, node, TRUE, TRUE, data_set); } } void calculate_active_ops(GListPtr sorted_op_list, int *start_index, int *stop_index) { int counter = -1; int implied_monitor_start = -1; int implied_clone_start = -1; const char *task = NULL; const char *status = NULL; GListPtr gIter = sorted_op_list; *stop_index = -1; *start_index = -1; for (; gIter != NULL; gIter = gIter->next) { xmlNode *rsc_op = (xmlNode *) gIter->data; counter++; task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK); status = crm_element_value(rsc_op, XML_LRM_ATTR_OPSTATUS); if (pcmk__str_eq(task, CRMD_ACTION_STOP, pcmk__str_casei) && pcmk__str_eq(status, "0", pcmk__str_casei)) { *stop_index = counter; } else if (pcmk__strcase_any_of(task, CRMD_ACTION_START, CRMD_ACTION_MIGRATED, NULL)) { *start_index = counter; } else if ((implied_monitor_start <= *stop_index) && pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)) { const char *rc = crm_element_value(rsc_op, XML_LRM_ATTR_RC); if (pcmk__strcase_any_of(rc, "0", "8", NULL)) { implied_monitor_start = counter; } } else if (pcmk__strcase_any_of(task, CRMD_ACTION_PROMOTE, CRMD_ACTION_DEMOTE, NULL)) { implied_clone_start = counter; } } if (*start_index == -1) { if (implied_clone_start != -1) { *start_index = implied_clone_start; } else if (implied_monitor_start != -1) { *start_index = implied_monitor_start; } } } // If resource history entry has shutdown lock, remember lock node and time static void unpack_shutdown_lock(xmlNode *rsc_entry, pe_resource_t *rsc, pe_node_t *node, pe_working_set_t *data_set) { time_t lock_time = 0; // When lock started (i.e. node shutdown time) if ((crm_element_value_epoch(rsc_entry, XML_CONFIG_ATTR_SHUTDOWN_LOCK, &lock_time) == pcmk_ok) && (lock_time != 0)) { if ((data_set->shutdown_lock > 0) && (get_effective_time(data_set) > (lock_time + data_set->shutdown_lock))) { pe_rsc_info(rsc, "Shutdown lock for %s on %s expired", rsc->id, node->details->uname); pe__clear_resource_history(rsc, node, data_set); } else { rsc->lock_node = node; rsc->lock_time = lock_time; } } } +/*! + * \internal + * \brief Unpack one lrm_resource entry from a node's CIB status + * + * \param[in] node Node whose status is being unpacked + * \param[in] rsc_entry lrm_resource XML being unpacked + * \param[in] data_set Cluster working set + * + * \return Resource corresponding to the entry, or NULL if no operation history + */ static pe_resource_t * -unpack_lrm_rsc_state(pe_node_t * node, xmlNode * rsc_entry, pe_working_set_t * data_set) +unpack_lrm_resource(pe_node_t *node, xmlNode *lrm_resource, + pe_working_set_t *data_set) { GListPtr gIter = NULL; int stop_index = -1; int start_index = -1; enum rsc_role_e req_role = RSC_ROLE_UNKNOWN; const char *task = NULL; - const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID); + const char *rsc_id = ID(lrm_resource); pe_resource_t *rsc = NULL; GListPtr op_list = NULL; GListPtr sorted_op_list = NULL; xmlNode *migrate_op = NULL; xmlNode *rsc_op = NULL; xmlNode *last_failure = NULL; enum action_fail_response on_fail = action_fail_ignore; enum rsc_role_e saved_role = RSC_ROLE_UNKNOWN; - crm_trace("[%s] Processing %s on %s", - crm_element_name(rsc_entry), rsc_id, node->details->uname); - - /* extract operations */ - op_list = NULL; - sorted_op_list = NULL; + if (rsc_id == NULL) { + crm_warn("Ignoring malformed " XML_LRM_TAG_RESOURCE + " entry without id"); + return NULL; + } + crm_trace("Unpacking " XML_LRM_TAG_RESOURCE " for %s on %s", + rsc_id, node->details->uname); - for (rsc_op = pcmk__xe_first_child(rsc_entry); rsc_op != NULL; - rsc_op = pcmk__xe_next(rsc_op)) { + // Build a list of individual lrm_rsc_op entries, so we can sort them + for (rsc_op = first_named_child(lrm_resource, XML_LRM_TAG_RSC_OP); + rsc_op != NULL; rsc_op = crm_next_same_xml(rsc_op)) { - if (pcmk__str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, - pcmk__str_none)) { - op_list = g_list_prepend(op_list, rsc_op); - } + op_list = g_list_prepend(op_list, rsc_op); } if (!pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)) { if (op_list == NULL) { // If there are no operations, there is nothing to do return NULL; } } /* find the resource */ - rsc = unpack_find_resource(data_set, node, rsc_id, rsc_entry); + rsc = unpack_find_resource(data_set, node, rsc_id, lrm_resource); if (rsc == NULL) { if (op_list == NULL) { // If there are no operations, there is nothing to do return NULL; } else { - rsc = process_orphan_resource(rsc_entry, node, data_set); + rsc = process_orphan_resource(lrm_resource, node, data_set); } } CRM_ASSERT(rsc != NULL); // Check whether the resource is "shutdown-locked" to this node if (pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)) { - unpack_shutdown_lock(rsc_entry, rsc, node, data_set); + unpack_shutdown_lock(lrm_resource, rsc, node, data_set); } /* process operations */ saved_role = rsc->role; rsc->role = RSC_ROLE_UNKNOWN; sorted_op_list = g_list_sort(op_list, sort_op_by_callid); for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) { xmlNode *rsc_op = (xmlNode *) gIter->data; task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK); if (pcmk__str_eq(task, CRMD_ACTION_MIGRATED, pcmk__str_casei)) { migrate_op = rsc_op; } unpack_rsc_op(rsc, node, rsc_op, &last_failure, &on_fail, data_set); } /* create active recurring operations as optional */ calculate_active_ops(sorted_op_list, &start_index, &stop_index); process_recurring(node, rsc, start_index, stop_index, sorted_op_list, data_set); /* no need to free the contents */ g_list_free(sorted_op_list); process_rsc_state(rsc, node, on_fail, migrate_op, data_set); if (get_target_role(rsc, &req_role)) { if (rsc->next_role == RSC_ROLE_UNKNOWN || req_role < rsc->next_role) { - pe_rsc_debug(rsc, "%s: Overwriting calculated next role %s" - " with requested next role %s", - rsc->id, role2text(rsc->next_role), role2text(req_role)); - rsc->next_role = req_role; + pe__set_next_role(rsc, req_role, XML_RSC_ATTR_TARGET_ROLE); } else if (req_role > rsc->next_role) { pe_rsc_info(rsc, "%s: Not overwriting calculated next role %s" " with requested next role %s", rsc->id, role2text(rsc->next_role), role2text(req_role)); } } if (saved_role > rsc->role) { rsc->role = saved_role; } return rsc; } static void handle_orphaned_container_fillers(xmlNode * lrm_rsc_list, pe_working_set_t * data_set) { xmlNode *rsc_entry = NULL; for (rsc_entry = pcmk__xe_first_child(lrm_rsc_list); rsc_entry != NULL; rsc_entry = pcmk__xe_next(rsc_entry)) { pe_resource_t *rsc; pe_resource_t *container; const char *rsc_id; const char *container_id; if (!pcmk__str_eq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE, pcmk__str_casei)) { continue; } container_id = crm_element_value(rsc_entry, XML_RSC_ATTR_CONTAINER); rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID); if (container_id == NULL || rsc_id == NULL) { continue; } container = pe_find_resource(data_set->resources, container_id); if (container == NULL) { continue; } rsc = pe_find_resource(data_set->resources, rsc_id); if (rsc == NULL || !pcmk_is_set(rsc->flags, pe_rsc_orphan_container_filler) || rsc->container != NULL) { continue; } pe_rsc_trace(rsc, "Mapped container of orphaned resource %s to %s", rsc->id, container_id); rsc->container = container; container->fillers = g_list_append(container->fillers, rsc); } } +/*! + * \internal + * \brief Unpack one node's lrm status section + * + * \param[in] node Node whose status is being unpacked + * \param[in] xml CIB node state XML + * \param[in] data_set Cluster working set + */ static void -unpack_lrm_resources(pe_node_t *node, xmlNode *lrm_rsc_list, - pe_working_set_t *data_set) +unpack_node_lrm(pe_node_t *node, xmlNode *xml, pe_working_set_t *data_set) { - xmlNode *rsc_entry = NULL; - gboolean found_orphaned_container_filler = FALSE; + bool found_orphaned_container_filler = false; - for (rsc_entry = pcmk__xe_first_child(lrm_rsc_list); rsc_entry != NULL; - rsc_entry = pcmk__xe_next(rsc_entry)) { + // Drill down to lrm_resources section + xml = find_xml_node(xml, XML_CIB_TAG_LRM, FALSE); + if (xml == NULL) { + return; + } + xml = find_xml_node(xml, XML_LRM_TAG_RESOURCES, FALSE); + if (xml == NULL) { + return; + } - if (pcmk__str_eq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE, pcmk__str_none)) { - pe_resource_t *rsc = unpack_lrm_rsc_state(node, rsc_entry, data_set); - if (!rsc) { - continue; - } - if (pcmk_is_set(rsc->flags, pe_rsc_orphan_container_filler)) { - found_orphaned_container_filler = TRUE; - } + // Unpack each lrm_resource entry + for (xmlNode *rsc_entry = first_named_child(xml, XML_LRM_TAG_RESOURCE); + rsc_entry != NULL; rsc_entry = crm_next_same_xml(rsc_entry)) { + + pe_resource_t *rsc = unpack_lrm_resource(node, rsc_entry, data_set); + + if ((rsc != NULL) + && pcmk_is_set(rsc->flags, pe_rsc_orphan_container_filler)) { + found_orphaned_container_filler = true; } } - /* now that all the resource state has been unpacked for this node - * we have to go back and map any orphaned container fillers to their - * container resource */ + /* Now that all resource state has been unpacked for this node, map any + * orphaned container fillers to their container resource. + */ if (found_orphaned_container_filler) { - handle_orphaned_container_fillers(lrm_rsc_list, data_set); + handle_orphaned_container_fillers(xml, data_set); } } static void set_active(pe_resource_t * rsc) { pe_resource_t *top = uber_parent(rsc); if (top && pcmk_is_set(top->flags, pe_rsc_promotable)) { rsc->role = RSC_ROLE_SLAVE; } else { rsc->role = RSC_ROLE_STARTED; } } static void set_node_score(gpointer key, gpointer value, gpointer user_data) { pe_node_t *node = value; int *score = user_data; node->weight = *score; } #define STATUS_PATH_MAX 1024 static xmlNode * find_lrm_op(const char *resource, const char *op, const char *node, const char *source, bool success_only, pe_working_set_t *data_set) { int offset = 0; char xpath[STATUS_PATH_MAX]; xmlNode *xml = NULL; offset += snprintf(xpath + offset, STATUS_PATH_MAX - offset, "//node_state[@uname='%s']", node); offset += snprintf(xpath + offset, STATUS_PATH_MAX - offset, "//" XML_LRM_TAG_RESOURCE "[@id='%s']", resource); /* Need to check against transition_magic too? */ if (source && pcmk__str_eq(op, CRMD_ACTION_MIGRATE, pcmk__str_casei)) { offset += snprintf(xpath + offset, STATUS_PATH_MAX - offset, "/" XML_LRM_TAG_RSC_OP "[@operation='%s' and @migrate_target='%s']", op, source); } else if (source && pcmk__str_eq(op, CRMD_ACTION_MIGRATED, pcmk__str_casei)) { offset += snprintf(xpath + offset, STATUS_PATH_MAX - offset, "/" XML_LRM_TAG_RSC_OP "[@operation='%s' and @migrate_source='%s']", op, source); } else { offset += snprintf(xpath + offset, STATUS_PATH_MAX - offset, "/" XML_LRM_TAG_RSC_OP "[@operation='%s']", op); } CRM_LOG_ASSERT(offset > 0); xml = get_xpath_object(xpath, data_set->input, LOG_DEBUG); if (xml && success_only) { int rc = PCMK_OCF_UNKNOWN_ERROR; int status = PCMK_LRM_OP_ERROR; crm_element_value_int(xml, XML_LRM_ATTR_RC, &rc); crm_element_value_int(xml, XML_LRM_ATTR_OPSTATUS, &status); if ((rc != PCMK_OCF_OK) || (status != PCMK_LRM_OP_DONE)) { return NULL; } } return xml; } static int pe__call_id(xmlNode *op_xml) { int id = 0; if (op_xml) { crm_element_value_int(op_xml, XML_LRM_ATTR_CALLID, &id); } return id; } /*! * \brief Check whether a stop happened on the same node after some event * * \param[in] rsc Resource being checked * \param[in] node Node being checked * \param[in] xml_op Event that stop is being compared to * \param[in] data_set Cluster working set * * \return TRUE if stop happened after event, FALSE otherwise * * \note This is really unnecessary, but kept as a safety mechanism. We * currently don't save more than one successful event in history, so this * only matters when processing really old CIB files that we don't * technically support anymore, or as preparation for logging an extended * history in the future. */ static bool stop_happened_after(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op, pe_working_set_t *data_set) { xmlNode *stop_op = find_lrm_op(rsc->id, CRMD_ACTION_STOP, node->details->uname, NULL, TRUE, data_set); return (stop_op && (pe__call_id(stop_op) > pe__call_id(xml_op))); } static void unpack_migrate_to_success(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op, pe_working_set_t *data_set) { /* A successful migration sequence is: * migrate_to on source node * migrate_from on target node * stop on source node * * If a migrate_to is followed by a stop, the entire migration (successful * or failed) is complete, and we don't care what happened on the target. * * If no migrate_from has happened, the migration is considered to be * "partial". If the migrate_from failed, make sure the resource gets * stopped on both source and target (if up). * * If the migrate_to and migrate_from both succeeded (which also implies the * resource is no longer running on the source), but there is no stop, the * migration is considered to be "dangling". Schedule a stop on the source * in this case. */ int from_rc = 0; int from_status = 0; pe_node_t *target_node = NULL; pe_node_t *source_node = NULL; xmlNode *migrate_from = NULL; const char *source = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_SOURCE); const char *target = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_TARGET); // Sanity check CRM_CHECK(source && target && !strcmp(source, node->details->uname), return); if (stop_happened_after(rsc, node, xml_op, data_set)) { return; } // Clones are not allowed to migrate, so role can't be master rsc->role = RSC_ROLE_STARTED; target_node = pe_find_node(data_set->nodes, target); source_node = pe_find_node(data_set->nodes, source); // Check whether there was a migrate_from action on the target migrate_from = find_lrm_op(rsc->id, CRMD_ACTION_MIGRATED, target, source, FALSE, data_set); if (migrate_from) { crm_element_value_int(migrate_from, XML_LRM_ATTR_RC, &from_rc); crm_element_value_int(migrate_from, XML_LRM_ATTR_OPSTATUS, &from_status); pe_rsc_trace(rsc, "%s op on %s exited with status=%d, rc=%d", ID(migrate_from), target, from_status, from_rc); } if (migrate_from && from_rc == PCMK_OCF_OK && from_status == PCMK_LRM_OP_DONE) { /* The migrate_to and migrate_from both succeeded, so mark the migration * as "dangling". This will be used to schedule a stop action on the * source without affecting the target. */ pe_rsc_trace(rsc, "Detected dangling migration op: %s on %s", ID(xml_op), source); rsc->role = RSC_ROLE_STOPPED; rsc->dangling_migrations = g_list_prepend(rsc->dangling_migrations, node); } else if (migrate_from && (from_status != PCMK_LRM_OP_PENDING)) { // Failed if (target_node && target_node->details->online) { pe_rsc_trace(rsc, "Marking active on %s %p %d", target, target_node, target_node->details->online); native_add_running(rsc, target_node, data_set); } } else { // Pending, or complete but erased if (target_node && target_node->details->online) { pe_rsc_trace(rsc, "Marking active on %s %p %d", target, target_node, target_node->details->online); native_add_running(rsc, target_node, data_set); if (source_node && source_node->details->online) { /* This is a partial migration: the migrate_to completed * successfully on the source, but the migrate_from has not * completed. Remember the source and target; if the newly * chosen target remains the same when we schedule actions * later, we may continue with the migration. */ rsc->partial_migration_target = target_node; rsc->partial_migration_source = source_node; } } else { /* Consider it failed here - forces a restart, prevents migration */ pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop); pe__clear_resource_flags(rsc, pe_rsc_allow_migrate); } } } // Is there an action_name in node_name's rsc history newer than call_id? static bool newer_op(pe_resource_t *rsc, const char *action_name, const char *node_name, int call_id, pe_working_set_t *data_set) { xmlNode *action = find_lrm_op(rsc->id, action_name, node_name, NULL, TRUE, data_set); return pe__call_id(action) > call_id; } static void unpack_migrate_to_failure(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op, pe_working_set_t *data_set) { int target_stop_id = 0; int target_migrate_from_id = 0; xmlNode *target_stop = NULL; xmlNode *target_migrate_from = NULL; const char *source = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_SOURCE); const char *target = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_TARGET); // Sanity check CRM_CHECK(source && target && !strcmp(source, node->details->uname), return); /* If a migration failed, we have to assume the resource is active. Clones * are not allowed to migrate, so role can't be master. */ rsc->role = RSC_ROLE_STARTED; // Check for stop on the target target_stop = find_lrm_op(rsc->id, CRMD_ACTION_STOP, target, NULL, TRUE, data_set); target_stop_id = pe__call_id(target_stop); // Check for migrate_from on the target target_migrate_from = find_lrm_op(rsc->id, CRMD_ACTION_MIGRATED, target, source, TRUE, data_set); target_migrate_from_id = pe__call_id(target_migrate_from); if ((target_stop == NULL) || (target_stop_id < target_migrate_from_id)) { /* There was no stop on the target, or a stop that happened before a * migrate_from, so assume the resource is still active on the target * (if it is up). */ pe_node_t *target_node = pe_find_node(data_set->nodes, target); pe_rsc_trace(rsc, "stop (%d) + migrate_from (%d)", target_stop_id, target_migrate_from_id); if (target_node && target_node->details->online) { native_add_running(rsc, target_node, data_set); } } else if (target_migrate_from == NULL) { /* We know there was a stop on the target, but there may not have been a * migrate_from (the stop could have happened before migrate_from was * scheduled or attempted). * * That means this could be a "dangling" migration. But first, check * whether there is a newer successful stop, start, or migrate_from on * the source node -- it's possible the failed migration was followed by * a successful stop, full restart, or migration in the reverse * direction, in which case we don't want to force a stop. */ int source_migrate_to_id = pe__call_id(xml_op); if (newer_op(rsc, CRMD_ACTION_MIGRATED, source, source_migrate_to_id, data_set) || newer_op(rsc, CRMD_ACTION_START, source, source_migrate_to_id, data_set) || newer_op(rsc, CRMD_ACTION_STOP, source, source_migrate_to_id, data_set)) { return; } // Mark node as having dangling migration so we can force a stop later rsc->dangling_migrations = g_list_prepend(rsc->dangling_migrations, node); } } static void unpack_migrate_from_failure(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op, pe_working_set_t *data_set) { xmlNode *source_stop = NULL; xmlNode *source_migrate_to = NULL; const char *source = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_SOURCE); const char *target = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_TARGET); // Sanity check CRM_CHECK(source && target && !strcmp(target, node->details->uname), return); /* If a migration failed, we have to assume the resource is active. Clones * are not allowed to migrate, so role can't be master. */ rsc->role = RSC_ROLE_STARTED; // Check for a stop on the source source_stop = find_lrm_op(rsc->id, CRMD_ACTION_STOP, source, NULL, TRUE, data_set); // Check for a migrate_to on the source source_migrate_to = find_lrm_op(rsc->id, CRMD_ACTION_MIGRATE, source, target, TRUE, data_set); if ((source_stop == NULL) || (pe__call_id(source_stop) < pe__call_id(source_migrate_to))) { /* There was no stop on the source, or a stop that happened before * migrate_to, so assume the resource is still active on the source (if * it is up). */ pe_node_t *source_node = pe_find_node(data_set->nodes, source); if (source_node && source_node->details->online) { native_add_running(rsc, source_node, data_set); } } } static void record_failed_op(xmlNode *op, const pe_node_t *node, const pe_resource_t *rsc, pe_working_set_t *data_set) { xmlNode *xIter = NULL; const char *op_key = crm_element_value(op, XML_LRM_ATTR_TASK_KEY); if (node->details->online == FALSE) { return; } for (xIter = data_set->failed->children; xIter; xIter = xIter->next) { const char *key = crm_element_value(xIter, XML_LRM_ATTR_TASK_KEY); const char *uname = crm_element_value(xIter, XML_ATTR_UNAME); if(pcmk__str_eq(op_key, key, pcmk__str_casei) && pcmk__str_eq(uname, node->details->uname, pcmk__str_casei)) { crm_trace("Skipping duplicate entry %s on %s", op_key, node->details->uname); return; } } crm_trace("Adding entry %s on %s", op_key, node->details->uname); crm_xml_add(op, XML_ATTR_UNAME, node->details->uname); crm_xml_add(op, XML_LRM_ATTR_RSCID, rsc->id); add_node_copy(data_set->failed, op); } static const char *get_op_key(xmlNode *xml_op) { const char *key = crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY); if(key == NULL) { key = ID(xml_op); } return key; } static const char * last_change_str(xmlNode *xml_op) { time_t when; const char *when_s = NULL; if (crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE, &when) == pcmk_ok) { when_s = pcmk__epoch2str(&when); if (when_s) { // Skip day of week to make message shorter when_s = strchr(when_s, ' '); if (when_s) { ++when_s; } } } return ((when_s && *when_s)? when_s : "unknown time"); } /*! * \internal * \brief Compare two on-fail values * * \param[in] first One on-fail value to compare * \param[in] second The other on-fail value to compare * * \return A negative number if second is more severe than first, zero if they * are equal, or a positive number if first is more severe than second. * \note This is only needed until the action_fail_response values can be * renumbered at the next API compatibility break. */ static int cmp_on_fail(enum action_fail_response first, enum action_fail_response second) { switch (first) { case action_fail_demote: switch (second) { case action_fail_ignore: return 1; case action_fail_demote: return 0; default: return -1; } break; case action_fail_reset_remote: switch (second) { case action_fail_ignore: case action_fail_demote: case action_fail_recover: return 1; case action_fail_reset_remote: return 0; default: return -1; } break; case action_fail_restart_container: switch (second) { case action_fail_ignore: case action_fail_demote: case action_fail_recover: case action_fail_reset_remote: return 1; case action_fail_restart_container: return 0; default: return -1; } break; default: break; } switch (second) { case action_fail_demote: return (first == action_fail_ignore)? -1 : 1; case action_fail_reset_remote: switch (first) { case action_fail_ignore: case action_fail_demote: case action_fail_recover: return -1; default: return 1; } break; case action_fail_restart_container: switch (first) { case action_fail_ignore: case action_fail_demote: case action_fail_recover: case action_fail_reset_remote: return -1; default: return 1; } break; default: break; } return first - second; } static void unpack_rsc_op_failure(pe_resource_t * rsc, pe_node_t * node, int rc, xmlNode * xml_op, xmlNode ** last_failure, enum action_fail_response * on_fail, pe_working_set_t * data_set) { guint interval_ms = 0; bool is_probe = false; pe_action_t *action = NULL; const char *key = get_op_key(xml_op); const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); const char *exit_reason = crm_element_value(xml_op, XML_LRM_ATTR_EXIT_REASON); CRM_ASSERT(rsc); CRM_CHECK(task != NULL, return); *last_failure = xml_op; crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms); if ((interval_ms == 0) && !strcmp(task, CRMD_ACTION_STATUS)) { is_probe = true; } if (exit_reason == NULL) { exit_reason = ""; } if (!pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster) && (rc == PCMK_OCF_NOT_INSTALLED)) { crm_trace("Unexpected result (%s%s%s) was recorded for " "%s of %s on %s at %s " CRM_XS " rc=%d id=%s", services_ocf_exitcode_str(rc), (*exit_reason? ": " : ""), exit_reason, (is_probe? "probe" : task), rsc->id, node->details->uname, last_change_str(xml_op), rc, ID(xml_op)); } else { crm_warn("Unexpected result (%s%s%s) was recorded for " "%s of %s on %s at %s " CRM_XS " rc=%d id=%s", services_ocf_exitcode_str(rc), (*exit_reason? ": " : ""), exit_reason, (is_probe? "probe" : task), rsc->id, node->details->uname, last_change_str(xml_op), rc, ID(xml_op)); if (is_probe && (rc != PCMK_OCF_OK) && (rc != PCMK_OCF_NOT_RUNNING) && (rc != PCMK_OCF_RUNNING_MASTER)) { /* A failed (not just unexpected) probe result could mean the user * didn't know resources will be probed even where they can't run. */ crm_notice("If it is not possible for %s to run on %s, see " "the resource-discovery option for location constraints", rsc->id, node->details->uname); } record_failed_op(xml_op, node, rsc, data_set); } action = custom_action(rsc, strdup(key), task, NULL, TRUE, FALSE, data_set); if (cmp_on_fail(*on_fail, action->on_fail) < 0) { pe_rsc_trace(rsc, "on-fail %s -> %s for %s (%s)", fail2text(*on_fail), fail2text(action->on_fail), action->uuid, key); *on_fail = action->on_fail; } if (!strcmp(task, CRMD_ACTION_STOP)) { resource_location(rsc, node, -INFINITY, "__stop_fail__", data_set); } else if (!strcmp(task, CRMD_ACTION_MIGRATE)) { unpack_migrate_to_failure(rsc, node, xml_op, data_set); } else if (!strcmp(task, CRMD_ACTION_MIGRATED)) { unpack_migrate_from_failure(rsc, node, xml_op, data_set); } else if (!strcmp(task, CRMD_ACTION_PROMOTE)) { rsc->role = RSC_ROLE_MASTER; } else if (!strcmp(task, CRMD_ACTION_DEMOTE)) { if (action->on_fail == action_fail_block) { rsc->role = RSC_ROLE_MASTER; - rsc->next_role = RSC_ROLE_STOPPED; + pe__set_next_role(rsc, RSC_ROLE_STOPPED, + "demote with on-fail=block"); } else if(rc == PCMK_OCF_NOT_RUNNING) { rsc->role = RSC_ROLE_STOPPED; } else { /* Staying in master role would put the scheduler and controller * into a loop. Setting slave role is not dangerous because the * resource will be stopped as part of recovery, and any master * promotion will be ordered after that stop. */ rsc->role = RSC_ROLE_SLAVE; } } if(is_probe && rc == PCMK_OCF_NOT_INSTALLED) { /* leave stopped */ pe_rsc_trace(rsc, "Leaving %s stopped", rsc->id); rsc->role = RSC_ROLE_STOPPED; } else if (rsc->role < RSC_ROLE_STARTED) { pe_rsc_trace(rsc, "Setting %s active", rsc->id); set_active(rsc); } pe_rsc_trace(rsc, "Resource %s: role=%s, unclean=%s, on_fail=%s, fail_role=%s", rsc->id, role2text(rsc->role), pcmk__btoa(node->details->unclean), fail2text(action->on_fail), role2text(action->fail_role)); if (action->fail_role != RSC_ROLE_STARTED && rsc->next_role < action->fail_role) { - rsc->next_role = action->fail_role; + pe__set_next_role(rsc, action->fail_role, "failure"); } if (action->fail_role == RSC_ROLE_STOPPED) { int score = -INFINITY; pe_resource_t *fail_rsc = rsc; if (fail_rsc->parent) { pe_resource_t *parent = uber_parent(fail_rsc); if (pe_rsc_is_clone(parent) && !pcmk_is_set(parent->flags, pe_rsc_unique)) { /* For clone resources, if a child fails on an operation * with on-fail = stop, all the resources fail. Do this by preventing * the parent from coming up again. */ fail_rsc = parent; } } crm_notice("%s will not be started under current conditions", fail_rsc->id); /* make sure it doesn't come up again */ if (fail_rsc->allowed_nodes != NULL) { g_hash_table_destroy(fail_rsc->allowed_nodes); } fail_rsc->allowed_nodes = pe__node_list2table(data_set->nodes); g_hash_table_foreach(fail_rsc->allowed_nodes, set_node_score, &score); } pe_free_action(action); } /*! * \internal * \brief Remap operation status based on action result * * Given an action result, determine an appropriate operation status for the * purposes of responding to the action (the status provided by the executor is * not directly usable since the executor does not know what was expected). * * \param[in,out] rsc Resource that operation history entry is for * \param[in] rc Actual return code of operation * \param[in] target_rc Expected return code of operation * \param[in] node Node where operation was executed * \param[in] xml_op Operation history entry XML from CIB status * \param[in,out] on_fail What should be done about the result * \param[in] data_set Current cluster working set * * \return Operation status based on return code and action info * \note This may update the resource's current and next role. */ static int determine_op_status( pe_resource_t *rsc, int rc, int target_rc, pe_node_t * node, xmlNode * xml_op, enum action_fail_response * on_fail, pe_working_set_t * data_set) { guint interval_ms = 0; bool is_probe = false; int result = PCMK_LRM_OP_DONE; const char *key = get_op_key(xml_op); const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); const char *exit_reason = crm_element_value(xml_op, XML_LRM_ATTR_EXIT_REASON); CRM_ASSERT(rsc); CRM_CHECK(task != NULL, return PCMK_LRM_OP_ERROR); if (exit_reason == NULL) { exit_reason = ""; } crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms); if ((interval_ms == 0) && !strcmp(task, CRMD_ACTION_STATUS)) { is_probe = true; task = "probe"; } if (target_rc < 0) { /* Pre-1.0 Pacemaker versions, and Pacemaker 1.1.6 or earlier with * Heartbeat 2.0.7 or earlier as the cluster layer, did not include the * target_rc in the transition key, which (along with the similar case * of a corrupted transition key in the CIB) will be reported to this * function as -1. Pacemaker 2.0+ does not support rolling upgrades from * those versions or processing of saved CIB files from those versions, * so we do not need to care much about this case. */ result = PCMK_LRM_OP_ERROR; crm_warn("Expected result not found for %s on %s (corrupt or obsolete CIB?)", key, node->details->uname); } else if (target_rc != rc) { result = PCMK_LRM_OP_ERROR; pe_rsc_debug(rsc, "%s on %s: expected %d (%s), got %d (%s%s%s)", key, node->details->uname, target_rc, services_ocf_exitcode_str(target_rc), rc, services_ocf_exitcode_str(rc), (*exit_reason? ": " : ""), exit_reason); } switch (rc) { case PCMK_OCF_OK: if (is_probe && (target_rc == PCMK_OCF_NOT_RUNNING)) { result = PCMK_LRM_OP_DONE; pe_rsc_info(rsc, "Probe found %s active on %s at %s", rsc->id, node->details->uname, last_change_str(xml_op)); } break; case PCMK_OCF_NOT_RUNNING: if (is_probe || (target_rc == rc) || !pcmk_is_set(rsc->flags, pe_rsc_managed)) { result = PCMK_LRM_OP_DONE; rsc->role = RSC_ROLE_STOPPED; /* clear any previous failure actions */ *on_fail = action_fail_ignore; - rsc->next_role = RSC_ROLE_UNKNOWN; + pe__set_next_role(rsc, RSC_ROLE_UNKNOWN, "not running"); } break; case PCMK_OCF_RUNNING_MASTER: if (is_probe && (rc != target_rc)) { result = PCMK_LRM_OP_DONE; pe_rsc_info(rsc, "Probe found %s active and promoted on %s at %s", rsc->id, node->details->uname, last_change_str(xml_op)); } rsc->role = RSC_ROLE_MASTER; break; case PCMK_OCF_DEGRADED_MASTER: case PCMK_OCF_FAILED_MASTER: rsc->role = RSC_ROLE_MASTER; result = PCMK_LRM_OP_ERROR; break; case PCMK_OCF_NOT_CONFIGURED: result = PCMK_LRM_OP_ERROR_FATAL; break; case PCMK_OCF_UNIMPLEMENT_FEATURE: if (interval_ms > 0) { result = PCMK_LRM_OP_NOTSUPPORTED; break; } // fall through case PCMK_OCF_NOT_INSTALLED: case PCMK_OCF_INVALID_PARAM: case PCMK_OCF_INSUFFICIENT_PRIV: if (!pe_can_fence(data_set, node) && !strcmp(task, CRMD_ACTION_STOP)) { /* If a stop fails and we can't fence, there's nothing else we can do */ pe_proc_err("No further recovery can be attempted for %s " "because %s on %s failed (%s%s%s) at %s " CRM_XS " rc=%d id=%s", rsc->id, task, node->details->uname, services_ocf_exitcode_str(rc), (*exit_reason? ": " : ""), exit_reason, last_change_str(xml_op), rc, ID(xml_op)); pe__clear_resource_flags(rsc, pe_rsc_managed); pe__set_resource_flags(rsc, pe_rsc_block); } result = PCMK_LRM_OP_ERROR_HARD; break; default: if (result == PCMK_LRM_OP_DONE) { crm_info("Treating unknown exit status %d from %s of %s " "on %s at %s as failure", rc, task, rsc->id, node->details->uname, last_change_str(xml_op)); result = PCMK_LRM_OP_ERROR; } break; } return result; } // return TRUE if start or monitor last failure but parameters changed static bool should_clear_for_param_change(xmlNode *xml_op, const char *task, pe_resource_t *rsc, pe_node_t *node, pe_working_set_t *data_set) { if (!strcmp(task, "start") || !strcmp(task, "monitor")) { if (pe__bundle_needs_remote_name(rsc, data_set)) { /* We haven't allocated resources yet, so we can't reliably * substitute addr parameters for the REMOTE_CONTAINER_HACK. * When that's needed, defer the check until later. */ pe__add_param_check(xml_op, rsc, node, pe_check_last_failure, data_set); } else { op_digest_cache_t *digest_data = NULL; digest_data = rsc_action_digest_cmp(rsc, xml_op, node, data_set); switch (digest_data->rc) { case RSC_DIGEST_UNKNOWN: crm_trace("Resource %s history entry %s on %s" " has no digest to compare", rsc->id, get_op_key(xml_op), node->details->id); break; case RSC_DIGEST_MATCH: break; default: return TRUE; } } } return FALSE; } // Order action after fencing of remote node, given connection rsc static void order_after_remote_fencing(pe_action_t *action, pe_resource_t *remote_conn, pe_working_set_t *data_set) { pe_node_t *remote_node = pe_find_node(data_set->nodes, remote_conn->id); if (remote_node) { pe_action_t *fence = pe_fence_op(remote_node, NULL, TRUE, NULL, FALSE, data_set); order_actions(fence, action, pe_order_implies_then); } } static bool should_ignore_failure_timeout(pe_resource_t *rsc, xmlNode *xml_op, const char *task, guint interval_ms, bool is_last_failure, pe_working_set_t *data_set) { /* Clearing failures of recurring monitors has special concerns. The * executor reports only changes in the monitor result, so if the * monitor is still active and still getting the same failure result, * that will go undetected after the failure is cleared. * * Also, the operation history will have the time when the recurring * monitor result changed to the given code, not the time when the * result last happened. * * @TODO We probably should clear such failures only when the failure * timeout has passed since the last occurrence of the failed result. * However we don't record that information. We could maybe approximate * that by clearing only if there is a more recent successful monitor or * stop result, but we don't even have that information at this point * since we are still unpacking the resource's operation history. * * This is especially important for remote connection resources with a * reconnect interval, so in that case, we skip clearing failures * if the remote node hasn't been fenced. */ if (rsc->remote_reconnect_ms && pcmk_is_set(data_set->flags, pe_flag_stonith_enabled) && (interval_ms != 0) && pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)) { pe_node_t *remote_node = pe_find_node(data_set->nodes, rsc->id); if (remote_node && !remote_node->details->remote_was_fenced) { if (is_last_failure) { crm_info("Waiting to clear monitor failure for remote node %s" " until fencing has occurred", rsc->id); } return TRUE; } } return FALSE; } /*! * \internal * \brief Check operation age and schedule failure clearing when appropriate * * This function has two distinct purposes. The first is to check whether an * operation history entry is expired (i.e. the resource has a failure timeout, * the entry is older than the timeout, and the resource either has no fail * count or its fail count is entirely older than the timeout). The second is to * schedule fail count clearing when appropriate (i.e. the operation is expired * and either the resource has an expired fail count or the operation is a * last_failure for a remote connection resource with a reconnect interval, * or the operation is a last_failure for a start or monitor operation and the * resource's parameters have changed since the operation). * * \param[in] rsc Resource that operation happened to * \param[in] node Node that operation happened on * \param[in] rc Actual result of operation * \param[in] xml_op Operation history entry XML * \param[in] data_set Current working set * * \return TRUE if operation history entry is expired, FALSE otherwise */ static bool check_operation_expiry(pe_resource_t *rsc, pe_node_t *node, int rc, xmlNode *xml_op, pe_working_set_t *data_set) { bool expired = FALSE; bool is_last_failure = pcmk__ends_with(ID(xml_op), "_last_failure_0"); time_t last_run = 0; guint interval_ms = 0; int unexpired_fail_count = 0; const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); const char *clear_reason = NULL; crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms); if ((rsc->failure_timeout > 0) && (crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE, &last_run) == 0)) { // Resource has a failure-timeout, and history entry has a timestamp time_t now = get_effective_time(data_set); time_t last_failure = 0; // Is this particular operation history older than the failure timeout? if ((now >= (last_run + rsc->failure_timeout)) && !should_ignore_failure_timeout(rsc, xml_op, task, interval_ms, is_last_failure, data_set)) { expired = TRUE; } // Does the resource as a whole have an unexpired fail count? unexpired_fail_count = pe_get_failcount(node, rsc, &last_failure, pe_fc_effective, xml_op, data_set); // Update scheduler recheck time according to *last* failure crm_trace("%s@%lld is %sexpired @%lld with unexpired_failures=%d timeout=%ds" " last-failure@%lld", ID(xml_op), (long long) last_run, (expired? "" : "not "), (long long) now, unexpired_fail_count, rsc->failure_timeout, (long long) last_failure); last_failure += rsc->failure_timeout + 1; if (unexpired_fail_count && (now < last_failure)) { pe__update_recheck_time(last_failure, data_set); } } if (expired) { if (pe_get_failcount(node, rsc, NULL, pe_fc_default, xml_op, data_set)) { // There is a fail count ignoring timeout if (unexpired_fail_count == 0) { // There is no fail count considering timeout clear_reason = "it expired"; } else { /* This operation is old, but there is an unexpired fail count. * In a properly functioning cluster, this should only be * possible if this operation is not a failure (otherwise the * fail count should be expired too), so this is really just a * failsafe. */ expired = FALSE; } } else if (is_last_failure && rsc->remote_reconnect_ms) { /* Clear any expired last failure when reconnect interval is set, * even if there is no fail count. */ clear_reason = "reconnect interval is set"; } } if (!expired && is_last_failure && should_clear_for_param_change(xml_op, task, rsc, node, data_set)) { clear_reason = "resource parameters have changed"; } if (clear_reason != NULL) { // Schedule clearing of the fail count pe_action_t *clear_op = pe__clear_failcount(rsc, node, clear_reason, data_set); if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled) && rsc->remote_reconnect_ms) { /* If we're clearing a remote connection due to a reconnect * interval, we want to wait until any scheduled fencing * completes. * * We could limit this to remote_node->details->unclean, but at * this point, that's always true (it won't be reliable until - * after unpack_node_loop() is done). + * after unpack_node_history() is done). */ crm_info("Clearing %s failure will wait until any scheduled " "fencing of %s completes", task, rsc->id); order_after_remote_fencing(clear_op, rsc, data_set); } } if (expired && (interval_ms == 0) && pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)) { switch(rc) { case PCMK_OCF_OK: case PCMK_OCF_NOT_RUNNING: case PCMK_OCF_RUNNING_MASTER: case PCMK_OCF_DEGRADED: case PCMK_OCF_DEGRADED_MASTER: // Don't expire probes that return these values expired = FALSE; break; } } return expired; } int pe__target_rc_from_xml(xmlNode *xml_op) { int target_rc = 0; const char *key = crm_element_value(xml_op, XML_ATTR_TRANSITION_KEY); if (key == NULL) { return -1; } decode_transition_key(key, NULL, NULL, NULL, &target_rc); return target_rc; } static enum action_fail_response get_action_on_fail(pe_resource_t *rsc, const char *key, const char *task, pe_working_set_t * data_set) { enum action_fail_response result = action_fail_recover; pe_action_t *action = custom_action(rsc, strdup(key), task, NULL, TRUE, FALSE, data_set); result = action->on_fail; pe_free_action(action); return result; } static void update_resource_state(pe_resource_t * rsc, pe_node_t * node, xmlNode * xml_op, const char * task, int rc, xmlNode * last_failure, enum action_fail_response * on_fail, pe_working_set_t * data_set) { gboolean clear_past_failure = FALSE; CRM_ASSERT(rsc); CRM_ASSERT(xml_op); if (rc == PCMK_OCF_NOT_RUNNING) { clear_past_failure = TRUE; } else if (rc == PCMK_OCF_NOT_INSTALLED) { rsc->role = RSC_ROLE_STOPPED; } else if (pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)) { if (last_failure) { const char *op_key = get_op_key(xml_op); const char *last_failure_key = get_op_key(last_failure); if (pcmk__str_eq(op_key, last_failure_key, pcmk__str_casei)) { clear_past_failure = TRUE; } } if (rsc->role < RSC_ROLE_STARTED) { set_active(rsc); } } else if (pcmk__str_eq(task, CRMD_ACTION_START, pcmk__str_casei)) { rsc->role = RSC_ROLE_STARTED; clear_past_failure = TRUE; } else if (pcmk__str_eq(task, CRMD_ACTION_STOP, pcmk__str_casei)) { rsc->role = RSC_ROLE_STOPPED; clear_past_failure = TRUE; } else if (pcmk__str_eq(task, CRMD_ACTION_PROMOTE, pcmk__str_casei)) { rsc->role = RSC_ROLE_MASTER; clear_past_failure = TRUE; } else if (pcmk__str_eq(task, CRMD_ACTION_DEMOTE, pcmk__str_casei)) { if (*on_fail == action_fail_demote) { // Demote clears an error only if on-fail=demote clear_past_failure = TRUE; } rsc->role = RSC_ROLE_SLAVE; } else if (pcmk__str_eq(task, CRMD_ACTION_MIGRATED, pcmk__str_casei)) { rsc->role = RSC_ROLE_STARTED; clear_past_failure = TRUE; } else if (pcmk__str_eq(task, CRMD_ACTION_MIGRATE, pcmk__str_casei)) { unpack_migrate_to_success(rsc, node, xml_op, data_set); } else if (rsc->role < RSC_ROLE_STARTED) { pe_rsc_trace(rsc, "%s active on %s", rsc->id, node->details->uname); set_active(rsc); } /* clear any previous failure actions */ if (clear_past_failure) { switch (*on_fail) { case action_fail_stop: case action_fail_fence: case action_fail_migrate: case action_fail_standby: pe_rsc_trace(rsc, "%s.%s is not cleared by a completed stop", rsc->id, fail2text(*on_fail)); break; case action_fail_block: case action_fail_ignore: case action_fail_demote: case action_fail_recover: case action_fail_restart_container: *on_fail = action_fail_ignore; - rsc->next_role = RSC_ROLE_UNKNOWN; + pe__set_next_role(rsc, RSC_ROLE_UNKNOWN, "clear past failures"); break; case action_fail_reset_remote: if (rsc->remote_reconnect_ms == 0) { /* With no reconnect interval, the connection is allowed to * start again after the remote node is fenced and * completely stopped. (With a reconnect interval, we wait * for the failure to be cleared entirely before attempting * to reconnect.) */ *on_fail = action_fail_ignore; - rsc->next_role = RSC_ROLE_UNKNOWN; + pe__set_next_role(rsc, RSC_ROLE_UNKNOWN, + "clear past failures and reset remote"); } break; } } } /*! * \internal * \brief Remap informational monitor results to usual values * * Certain OCF result codes are for providing extended information to the * user about services that aren't yet failed but not entirely healthy either. * These must be treated as the "normal" result by pacemaker. * * \param[in] rc Actual result of a monitor action * \param[in] xml_op Operation history XML * \param[in] node Node that operation happened on * \param[in] rsc Resource that operation happened to * \param[in] data_set Cluster working set * * \return Result code that pacemaker should use * * \note If the result is remapped, and the node is not shutting down or failed, * the operation will be recorded in the data set's list of failed * operations, to highlight it for the user. */ static int remap_monitor_rc(int rc, xmlNode *xml_op, const pe_node_t *node, const pe_resource_t *rsc, pe_working_set_t *data_set) { int remapped_rc = pcmk__effective_rc(rc); if (rc != remapped_rc) { crm_trace("Remapping monitor result %d to %d", rc, remapped_rc); if (!node->details->shutdown || node->details->online) { record_failed_op(xml_op, node, rsc, data_set); } } return remapped_rc; } static void unpack_rsc_op(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op, xmlNode **last_failure, enum action_fail_response *on_fail, pe_working_set_t *data_set) { int rc = 0; int task_id = 0; int target_rc = 0; int status = PCMK_LRM_OP_UNKNOWN; guint interval_ms = 0; const char *task = NULL; const char *task_key = NULL; const char *exit_reason = NULL; bool expired = FALSE; pe_resource_t *parent = rsc; enum action_fail_response failure_strategy = action_fail_recover; CRM_CHECK(rsc && node && xml_op, return); target_rc = pe__target_rc_from_xml(xml_op); task_key = get_op_key(xml_op); task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); exit_reason = crm_element_value(xml_op, XML_LRM_ATTR_EXIT_REASON); if (exit_reason == NULL) { exit_reason = ""; } crm_element_value_int(xml_op, XML_LRM_ATTR_RC, &rc); crm_element_value_int(xml_op, XML_LRM_ATTR_CALLID, &task_id); crm_element_value_int(xml_op, XML_LRM_ATTR_OPSTATUS, &status); crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms); CRM_CHECK(task != NULL, return); CRM_CHECK(status <= PCMK_LRM_OP_INVALID, return); CRM_CHECK(status >= PCMK_LRM_OP_PENDING, return); if (!strcmp(task, CRMD_ACTION_NOTIFY) || !strcmp(task, CRMD_ACTION_METADATA)) { /* safe to ignore these */ return; } if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) { parent = uber_parent(rsc); } pe_rsc_trace(rsc, "Unpacking task %s/%s (call_id=%d, status=%d, rc=%d) on %s (role=%s)", task_key, task, task_id, status, rc, node->details->uname, role2text(rsc->role)); if (node->details->unclean) { pe_rsc_trace(rsc, "Node %s (where %s is running) is unclean." " Further action depends on the value of the stop's on-fail attribute", node->details->uname, rsc->id); } /* It should be possible to call remap_monitor_rc() first then call * check_operation_expiry() only if rc != target_rc, because there should * never be a fail count without at least one unexpected result in the * resource history. That would be more efficient by avoiding having to call * check_operation_expiry() for expected results. * * However, we do have such configurations in the scheduler regression * tests, even if it shouldn't be possible with the current code. It's * probably a good idea anyway, but that would require updating the test * inputs to something currently possible. */ if ((status != PCMK_LRM_OP_NOT_INSTALLED) && check_operation_expiry(rsc, node, rc, xml_op, data_set)) { expired = TRUE; } if (!strcmp(task, CRMD_ACTION_STATUS)) { rc = remap_monitor_rc(rc, xml_op, node, rsc, data_set); } if (expired && (rc != target_rc)) { const char *magic = crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC); if (interval_ms == 0) { crm_notice("Ignoring expired %s failure on %s " CRM_XS " actual=%d expected=%d magic=%s", task_key, node->details->uname, rc, target_rc, magic); goto done; } else if(node->details->online && node->details->unclean == FALSE) { /* Reschedule the recurring monitor. CancelXmlOp() won't work at * this stage, so as a hacky workaround, forcibly change the restart * digest so check_action_definition() does what we want later. * * @TODO We should skip this if there is a newer successful monitor. * Also, this causes rescheduling only if the history entry * has an op-digest (which the expire-non-blocked-failure * scheduler regression test doesn't, but that may not be a * realistic scenario in production). */ crm_notice("Rescheduling %s after failure expired on %s " CRM_XS " actual=%d expected=%d magic=%s", task_key, node->details->uname, rc, target_rc, magic); crm_xml_add(xml_op, XML_LRM_ATTR_RESTART_DIGEST, "calculated-failure-timeout"); goto done; } } /* If the executor reported an operation status of anything but done or * error, consider that final. But for done or error, we know better whether * it should be treated as a failure or not, because we know the expected * result. */ if(status == PCMK_LRM_OP_DONE || status == PCMK_LRM_OP_ERROR) { status = determine_op_status(rsc, rc, target_rc, node, xml_op, on_fail, data_set); pe_rsc_trace(rsc, "Remapped %s status to %d", task_key, status); } switch (status) { case PCMK_LRM_OP_CANCELLED: // Should never happen pe_err("Resource history contains cancellation '%s' " "(%s of %s on %s at %s)", ID(xml_op), task, rsc->id, node->details->uname, last_change_str(xml_op)); break; case PCMK_LRM_OP_PENDING: if (!strcmp(task, CRMD_ACTION_START)) { pe__set_resource_flags(rsc, pe_rsc_start_pending); set_active(rsc); } else if (!strcmp(task, CRMD_ACTION_PROMOTE)) { rsc->role = RSC_ROLE_MASTER; } else if (!strcmp(task, CRMD_ACTION_MIGRATE) && node->details->unclean) { /* If a pending migrate_to action is out on a unclean node, * we have to force the stop action on the target. */ const char *migrate_target = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_TARGET); pe_node_t *target = pe_find_node(data_set->nodes, migrate_target); if (target) { stop_action(rsc, target, FALSE); } } if (rsc->pending_task == NULL) { if ((interval_ms != 0) || strcmp(task, CRMD_ACTION_STATUS)) { rsc->pending_task = strdup(task); rsc->pending_node = node; } else { /* Pending probes are not printed, even if pending * operations are requested. If someone ever requests that * behavior, enable the below and the corresponding part of * native.c:native_pending_task(). */ #if 0 rsc->pending_task = strdup("probe"); rsc->pending_node = node; #endif } } break; case PCMK_LRM_OP_DONE: pe_rsc_trace(rsc, "%s of %s on %s completed at %s " CRM_XS " id=%s", task, rsc->id, node->details->uname, last_change_str(xml_op), ID(xml_op)); update_resource_state(rsc, node, xml_op, task, rc, *last_failure, on_fail, data_set); break; case PCMK_LRM_OP_NOT_INSTALLED: failure_strategy = get_action_on_fail(rsc, task_key, task, data_set); if (failure_strategy == action_fail_ignore) { crm_warn("Cannot ignore failed %s of %s on %s: " "Resource agent doesn't exist " CRM_XS " status=%d rc=%d id=%s", task, rsc->id, node->details->uname, status, rc, ID(xml_op)); /* Also for printing it as "FAILED" by marking it as pe_rsc_failed later */ *on_fail = action_fail_migrate; } resource_location(parent, node, -INFINITY, "hard-error", data_set); unpack_rsc_op_failure(rsc, node, rc, xml_op, last_failure, on_fail, data_set); break; case PCMK_LRM_OP_NOT_CONNECTED: if (pe__is_guest_or_remote_node(node) && pcmk_is_set(node->details->remote_rsc->flags, pe_rsc_managed)) { /* We should never get into a situation where a managed remote * connection resource is considered OK but a resource action * behind the connection gets a "not connected" status. But as a * fail-safe in case a bug or unusual circumstances do lead to * that, ensure the remote connection is considered failed. */ pe__set_resource_flags(node->details->remote_rsc, pe_rsc_failed|pe_rsc_stop); } // fall through case PCMK_LRM_OP_ERROR: case PCMK_LRM_OP_ERROR_HARD: case PCMK_LRM_OP_ERROR_FATAL: case PCMK_LRM_OP_TIMEOUT: case PCMK_LRM_OP_NOTSUPPORTED: case PCMK_LRM_OP_INVALID: failure_strategy = get_action_on_fail(rsc, task_key, task, data_set); if ((failure_strategy == action_fail_ignore) || (failure_strategy == action_fail_restart_container && !strcmp(task, CRMD_ACTION_STOP))) { crm_warn("Pretending failed %s (%s%s%s) of %s on %s at %s " "succeeded " CRM_XS " rc=%d id=%s", task, services_ocf_exitcode_str(rc), (*exit_reason? ": " : ""), exit_reason, rsc->id, node->details->uname, last_change_str(xml_op), rc, ID(xml_op)); update_resource_state(rsc, node, xml_op, task, target_rc, *last_failure, on_fail, data_set); crm_xml_add(xml_op, XML_ATTR_UNAME, node->details->uname); pe__set_resource_flags(rsc, pe_rsc_failure_ignored); record_failed_op(xml_op, node, rsc, data_set); if ((failure_strategy == action_fail_restart_container) && cmp_on_fail(*on_fail, action_fail_recover) <= 0) { *on_fail = failure_strategy; } } else { unpack_rsc_op_failure(rsc, node, rc, xml_op, last_failure, on_fail, data_set); if(status == PCMK_LRM_OP_ERROR_HARD) { do_crm_log(rc != PCMK_OCF_NOT_INSTALLED?LOG_ERR:LOG_NOTICE, "Preventing %s from restarting on %s because " "of hard failure (%s%s%s)" CRM_XS " rc=%d id=%s", parent->id, node->details->uname, services_ocf_exitcode_str(rc), (*exit_reason? ": " : ""), exit_reason, rc, ID(xml_op)); resource_location(parent, node, -INFINITY, "hard-error", data_set); } else if(status == PCMK_LRM_OP_ERROR_FATAL) { crm_err("Preventing %s from restarting anywhere because " "of fatal failure (%s%s%s) " CRM_XS " rc=%d id=%s", parent->id, services_ocf_exitcode_str(rc), (*exit_reason? ": " : ""), exit_reason, rc, ID(xml_op)); resource_location(parent, NULL, -INFINITY, "fatal-error", data_set); } } break; } done: pe_rsc_trace(rsc, "Resource %s after %s: role=%s, next=%s", rsc->id, task, role2text(rsc->role), role2text(rsc->next_role)); } static void add_node_attrs(xmlNode *xml_obj, pe_node_t *node, bool overwrite, pe_working_set_t *data_set) { const char *cluster_name = NULL; pe_rule_eval_data_t rule_data = { .node_hash = NULL, .role = RSC_ROLE_UNKNOWN, .now = data_set->now, .match_data = NULL, .rsc_data = NULL, .op_data = NULL }; g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_UNAME), strdup(node->details->uname)); g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_ID), strdup(node->details->id)); if (pcmk__str_eq(node->details->id, data_set->dc_uuid, pcmk__str_casei)) { data_set->dc_node = node; node->details->is_dc = TRUE; g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_IS_DC), strdup(XML_BOOLEAN_TRUE)); } else { g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_IS_DC), strdup(XML_BOOLEAN_FALSE)); } cluster_name = g_hash_table_lookup(data_set->config_hash, "cluster-name"); if (cluster_name) { g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_CLUSTER_NAME), strdup(cluster_name)); } pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_ATTR_SETS, &rule_data, node->details->attrs, NULL, overwrite, data_set); if (pe_node_attribute_raw(node, CRM_ATTR_SITE_NAME) == NULL) { const char *site_name = pe_node_attribute_raw(node, "site-name"); if (site_name) { g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_SITE_NAME), strdup(site_name)); } else if (cluster_name) { /* Default to cluster-name if unset */ g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_SITE_NAME), strdup(cluster_name)); } } } static GListPtr extract_operations(const char *node, const char *rsc, xmlNode * rsc_entry, gboolean active_filter) { int counter = -1; int stop_index = -1; int start_index = -1; xmlNode *rsc_op = NULL; GListPtr gIter = NULL; GListPtr op_list = NULL; GListPtr sorted_op_list = NULL; /* extract operations */ op_list = NULL; sorted_op_list = NULL; for (rsc_op = pcmk__xe_first_child(rsc_entry); rsc_op != NULL; rsc_op = pcmk__xe_next(rsc_op)) { if (pcmk__str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, pcmk__str_none)) { crm_xml_add(rsc_op, "resource", rsc); crm_xml_add(rsc_op, XML_ATTR_UNAME, node); op_list = g_list_prepend(op_list, rsc_op); } } if (op_list == NULL) { /* if there are no operations, there is nothing to do */ return NULL; } sorted_op_list = g_list_sort(op_list, sort_op_by_callid); /* create active recurring operations as optional */ if (active_filter == FALSE) { return sorted_op_list; } op_list = NULL; calculate_active_ops(sorted_op_list, &start_index, &stop_index); for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) { xmlNode *rsc_op = (xmlNode *) gIter->data; counter++; if (start_index < stop_index) { crm_trace("Skipping %s: not active", ID(rsc_entry)); break; } else if (counter < start_index) { crm_trace("Skipping %s: old", ID(rsc_op)); continue; } op_list = g_list_append(op_list, rsc_op); } g_list_free(sorted_op_list); return op_list; } GListPtr find_operations(const char *rsc, const char *node, gboolean active_filter, pe_working_set_t * data_set) { GListPtr output = NULL; GListPtr intermediate = NULL; xmlNode *tmp = NULL; xmlNode *status = find_xml_node(data_set->input, XML_CIB_TAG_STATUS, TRUE); pe_node_t *this_node = NULL; xmlNode *node_state = NULL; for (node_state = pcmk__xe_first_child(status); node_state != NULL; node_state = pcmk__xe_next(node_state)) { if (pcmk__str_eq((const char *)node_state->name, XML_CIB_TAG_STATE, pcmk__str_none)) { const char *uname = crm_element_value(node_state, XML_ATTR_UNAME); if (node != NULL && !pcmk__str_eq(uname, node, pcmk__str_casei)) { continue; } this_node = pe_find_node(data_set->nodes, uname); if(this_node == NULL) { CRM_LOG_ASSERT(this_node != NULL); continue; } else if (pe__is_guest_or_remote_node(this_node)) { determine_remote_online_status(data_set, this_node); } else { determine_online_status(node_state, this_node, data_set); } if (this_node->details->online || pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) { /* offline nodes run no resources... * unless stonith is enabled in which case we need to * make sure rsc start events happen after the stonith */ xmlNode *lrm_rsc = NULL; tmp = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE); tmp = find_xml_node(tmp, XML_LRM_TAG_RESOURCES, FALSE); for (lrm_rsc = pcmk__xe_first_child(tmp); lrm_rsc != NULL; lrm_rsc = pcmk__xe_next(lrm_rsc)) { if (pcmk__str_eq((const char *)lrm_rsc->name, XML_LRM_TAG_RESOURCE, pcmk__str_none)) { const char *rsc_id = crm_element_value(lrm_rsc, XML_ATTR_ID); if (rsc != NULL && !pcmk__str_eq(rsc_id, rsc, pcmk__str_casei)) { continue; } intermediate = extract_operations(uname, rsc_id, lrm_rsc, active_filter); output = g_list_concat(output, intermediate); } } } } } return output; } diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c index 831f890571..dbfe048b9b 100644 --- a/lib/pengine/utils.c +++ b/lib/pengine/utils.c @@ -1,2511 +1,2512 @@ /* - * Copyright 2004-2020 the Pacemaker project contributors + * Copyright 2004-2021 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include "pe_status_private.h" extern xmlNode *get_object_root(const char *object_type, xmlNode * the_root); void print_str_str(gpointer key, gpointer value, gpointer user_data); gboolean ghash_free_str_str(gpointer key, gpointer value, gpointer user_data); static void unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * container, pe_working_set_t * data_set, guint interval_ms); static xmlNode *find_rsc_op_entry_helper(pe_resource_t * rsc, const char *key, gboolean include_disabled); #if ENABLE_VERSIONED_ATTRS pe_rsc_action_details_t * pe_rsc_action_details(pe_action_t *action) { pe_rsc_action_details_t *details; CRM_CHECK(action != NULL, return NULL); if (action->action_details == NULL) { action->action_details = calloc(1, sizeof(pe_rsc_action_details_t)); CRM_CHECK(action->action_details != NULL, return NULL); } details = (pe_rsc_action_details_t *) action->action_details; if (details->versioned_parameters == NULL) { details->versioned_parameters = create_xml_node(NULL, XML_TAG_OP_VER_ATTRS); } if (details->versioned_meta == NULL) { details->versioned_meta = create_xml_node(NULL, XML_TAG_OP_VER_META); } return details; } static void pe_free_rsc_action_details(pe_action_t *action) { pe_rsc_action_details_t *details; if ((action == NULL) || (action->action_details == NULL)) { return; } details = (pe_rsc_action_details_t *) action->action_details; if (details->versioned_parameters) { free_xml(details->versioned_parameters); } if (details->versioned_meta) { free_xml(details->versioned_meta); } action->action_details = NULL; } #endif /*! * \internal * \brief Check whether we can fence a particular node * * \param[in] data_set Working set for cluster * \param[in] node Name of node to check * * \return true if node can be fenced, false otherwise */ bool pe_can_fence(pe_working_set_t *data_set, pe_node_t *node) { if (pe__is_guest_node(node)) { /* Guest nodes are fenced by stopping their container resource. We can * do that if the container's host is either online or fenceable. */ pe_resource_t *rsc = node->details->remote_rsc->container; for (GList *n = rsc->running_on; n != NULL; n = n->next) { pe_node_t *container_node = n->data; if (!container_node->details->online && !pe_can_fence(data_set, container_node)) { return false; } } return true; } else if (!pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) { return false; /* Turned off */ } else if (!pcmk_is_set(data_set->flags, pe_flag_have_stonith_resource)) { return false; /* No devices */ } else if (pcmk_is_set(data_set->flags, pe_flag_have_quorum)) { return true; } else if (data_set->no_quorum_policy == no_quorum_ignore) { return true; } else if(node == NULL) { return false; } else if(node->details->online) { crm_notice("We can fence %s without quorum because they're in our membership", node->details->uname); return true; } crm_trace("Cannot fence %s", node->details->uname); return false; } /*! * \internal * \brief Copy a node object * * \param[in] this_node Node object to copy * * \return Newly allocated shallow copy of this_node * \note This function asserts on errors and is guaranteed to return non-NULL. */ pe_node_t * pe__copy_node(const pe_node_t *this_node) { pe_node_t *new_node = NULL; CRM_ASSERT(this_node != NULL); new_node = calloc(1, sizeof(pe_node_t)); CRM_ASSERT(new_node != NULL); new_node->rsc_discover_mode = this_node->rsc_discover_mode; new_node->weight = this_node->weight; new_node->fixed = this_node->fixed; new_node->details = this_node->details; return new_node; } /* any node in list1 or list2 and not in the other gets a score of -INFINITY */ void node_list_exclude(GHashTable * hash, GListPtr list, gboolean merge_scores) { GHashTable *result = hash; pe_node_t *other_node = NULL; GListPtr gIter = list; GHashTableIter iter; pe_node_t *node = NULL; g_hash_table_iter_init(&iter, hash); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { other_node = pe_find_node_id(list, node->details->id); if (other_node == NULL) { node->weight = -INFINITY; } else if (merge_scores) { node->weight = pe__add_scores(node->weight, other_node->weight); } } for (; gIter != NULL; gIter = gIter->next) { pe_node_t *node = (pe_node_t *) gIter->data; other_node = pe_hash_table_lookup(result, node->details->id); if (other_node == NULL) { pe_node_t *new_node = pe__copy_node(node); new_node->weight = -INFINITY; g_hash_table_insert(result, (gpointer) new_node->details->id, new_node); } } } /*! * \internal * \brief Create a node hash table from a node list * * \param[in] list Node list * * \return Hash table equivalent of node list */ GHashTable * pe__node_list2table(GList *list) { GHashTable *result = NULL; result = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, free); for (GList *gIter = list; gIter != NULL; gIter = gIter->next) { pe_node_t *new_node = pe__copy_node((pe_node_t *) gIter->data); g_hash_table_insert(result, (gpointer) new_node->details->id, new_node); } return result; } gint sort_node_uname(gconstpointer a, gconstpointer b) { return pcmk_numeric_strcasecmp(((const pe_node_t *) a)->details->uname, ((const pe_node_t *) b)->details->uname); } /*! * \internal * \brief Output node weights to stdout * * \param[in] rsc Use allowed nodes for this resource * \param[in] comment Text description to prefix lines with * \param[in] nodes If rsc is not specified, use these nodes */ static void pe__output_node_weights(pe_resource_t *rsc, const char *comment, GHashTable *nodes) { char score[128]; // Stack-allocated since this is called frequently // Sort the nodes so the output is consistent for regression tests GList *list = g_list_sort(g_hash_table_get_values(nodes), sort_node_uname); for (GList *gIter = list; gIter != NULL; gIter = gIter->next) { pe_node_t *node = (pe_node_t *) gIter->data; score2char_stack(node->weight, score, sizeof(score)); if (rsc) { printf("%s: %s allocation score on %s: %s\n", comment, rsc->id, node->details->uname, score); } else { printf("%s: %s = %s\n", comment, node->details->uname, score); } } g_list_free(list); } /*! * \internal * \brief Log node weights at trace level * * \param[in] file Caller's filename * \param[in] function Caller's function name * \param[in] line Caller's line number * \param[in] rsc Use allowed nodes for this resource * \param[in] comment Text description to prefix lines with * \param[in] nodes If rsc is not specified, use these nodes */ static void pe__log_node_weights(const char *file, const char *function, int line, pe_resource_t *rsc, const char *comment, GHashTable *nodes) { GHashTableIter iter; pe_node_t *node = NULL; char score[128]; // Stack-allocated since this is called frequently // Don't waste time if we're not tracing at this point pcmk__log_else(LOG_TRACE, return); g_hash_table_iter_init(&iter, nodes); while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) { score2char_stack(node->weight, score, sizeof(score)); if (rsc) { qb_log_from_external_source(function, file, "%s: %s allocation score on %s: %s", LOG_TRACE, line, 0, comment, rsc->id, node->details->uname, score); } else { qb_log_from_external_source(function, file, "%s: %s = %s", LOG_TRACE, line, 0, comment, node->details->uname, score); } } } /*! * \internal * \brief Log or output node weights * * \param[in] file Caller's filename * \param[in] function Caller's function name * \param[in] line Caller's line number * \param[in] to_log Log if true, otherwise output * \param[in] rsc Use allowed nodes for this resource * \param[in] comment Text description to prefix lines with * \param[in] nodes If rsc is not specified, use these nodes */ void pe__show_node_weights_as(const char *file, const char *function, int line, bool to_log, pe_resource_t *rsc, const char *comment, GHashTable *nodes) { if (rsc != NULL) { if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) { // Don't show allocation scores for orphans return; } nodes = rsc->allowed_nodes; } if (nodes == NULL) { // Nothing to show return; } if (to_log) { pe__log_node_weights(file, function, line, rsc, comment, nodes); } else { pe__output_node_weights(rsc, comment, nodes); } // If this resource has children, repeat recursively for each if (rsc && rsc->children) { for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) { pe_resource_t *child = (pe_resource_t *) gIter->data; pe__show_node_weights_as(file, function, line, to_log, child, comment, nodes); } } } static void append_dump_text(gpointer key, gpointer value, gpointer user_data) { char **dump_text = user_data; char *new_text = crm_strdup_printf("%s %s=%s", *dump_text, (char *)key, (char *)value); free(*dump_text); *dump_text = new_text; } void dump_node_capacity(int level, const char *comment, pe_node_t * node) { char *dump_text = crm_strdup_printf("%s: %s capacity:", comment, node->details->uname); g_hash_table_foreach(node->details->utilization, append_dump_text, &dump_text); if (level == LOG_STDOUT) { fprintf(stdout, "%s\n", dump_text); } else { crm_trace("%s", dump_text); } free(dump_text); } void dump_rsc_utilization(int level, const char *comment, pe_resource_t * rsc, pe_node_t * node) { char *dump_text = crm_strdup_printf("%s: %s utilization on %s:", comment, rsc->id, node->details->uname); g_hash_table_foreach(rsc->utilization, append_dump_text, &dump_text); switch (level) { case LOG_STDOUT: fprintf(stdout, "%s\n", dump_text); break; case LOG_NEVER: break; default: crm_trace("%s", dump_text); } free(dump_text); } gint sort_rsc_index(gconstpointer a, gconstpointer b) { const pe_resource_t *resource1 = (const pe_resource_t *)a; const pe_resource_t *resource2 = (const pe_resource_t *)b; if (a == NULL && b == NULL) { return 0; } if (a == NULL) { return 1; } if (b == NULL) { return -1; } if (resource1->sort_index > resource2->sort_index) { return -1; } if (resource1->sort_index < resource2->sort_index) { return 1; } return 0; } gint sort_rsc_priority(gconstpointer a, gconstpointer b) { const pe_resource_t *resource1 = (const pe_resource_t *)a; const pe_resource_t *resource2 = (const pe_resource_t *)b; if (a == NULL && b == NULL) { return 0; } if (a == NULL) { return 1; } if (b == NULL) { return -1; } if (resource1->priority > resource2->priority) { return -1; } if (resource1->priority < resource2->priority) { return 1; } return 0; } static enum pe_quorum_policy effective_quorum_policy(pe_resource_t *rsc, pe_working_set_t *data_set) { enum pe_quorum_policy policy = data_set->no_quorum_policy; if (pcmk_is_set(data_set->flags, pe_flag_have_quorum)) { policy = no_quorum_ignore; } else if (data_set->no_quorum_policy == no_quorum_demote) { switch (rsc->role) { case RSC_ROLE_MASTER: case RSC_ROLE_SLAVE: if (rsc->next_role > RSC_ROLE_SLAVE) { - rsc->next_role = RSC_ROLE_SLAVE; + pe__set_next_role(rsc, RSC_ROLE_SLAVE, + "no-quorum-policy=demote"); } policy = no_quorum_ignore; break; default: policy = no_quorum_stop; break; } } return policy; } pe_action_t * custom_action(pe_resource_t * rsc, char *key, const char *task, pe_node_t * on_node, gboolean optional, gboolean save_action, pe_working_set_t * data_set) { pe_action_t *action = NULL; GListPtr possible_matches = NULL; CRM_CHECK(key != NULL, return NULL); CRM_CHECK(task != NULL, free(key); return NULL); if (save_action && rsc != NULL) { possible_matches = find_actions(rsc->actions, key, on_node); } else if(save_action) { #if 0 action = g_hash_table_lookup(data_set->singletons, key); #else /* More expensive but takes 'node' into account */ possible_matches = find_actions(data_set->actions, key, on_node); #endif } if(data_set->singletons == NULL) { data_set->singletons = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, NULL); } if (possible_matches != NULL) { if (pcmk__list_of_multiple(possible_matches)) { pe_warn("Action %s for %s on %s exists %d times", task, rsc ? rsc->id : "", on_node ? on_node->details->uname : "", g_list_length(possible_matches)); } action = g_list_nth_data(possible_matches, 0); pe_rsc_trace(rsc, "Found action %d: %s for %s (%s) on %s", action->id, task, (rsc? rsc->id : "no resource"), action->uuid, (on_node? on_node->details->uname : "no node")); g_list_free(possible_matches); } if (action == NULL) { if (save_action) { pe_rsc_trace(rsc, "Creating action %d (%s): %s for %s (%s) on %s", data_set->action_id, (optional? "optional" : "required"), task, (rsc? rsc->id : "no resource"), key, (on_node? on_node->details->uname : "no node")); } action = calloc(1, sizeof(pe_action_t)); if (save_action) { action->id = data_set->action_id++; } else { action->id = 0; } action->rsc = rsc; action->task = strdup(task); if (on_node) { action->node = pe__copy_node(on_node); } action->uuid = strdup(key); if (pcmk__str_eq(task, CRM_OP_LRM_DELETE, pcmk__str_casei)) { // Resource history deletion for a node can be done on the DC pe__set_action_flags(action, pe_action_dc); } pe__set_action_flags(action, pe_action_runnable); if (optional) { pe__set_action_flags(action, pe_action_optional); } else { pe__clear_action_flags(action, pe_action_optional); } action->extra = crm_str_table_new(); action->meta = crm_str_table_new(); if (save_action) { data_set->actions = g_list_prepend(data_set->actions, action); if(rsc == NULL) { g_hash_table_insert(data_set->singletons, action->uuid, action); } } if (rsc != NULL) { guint interval_ms = 0; action->op_entry = find_rsc_op_entry_helper(rsc, key, TRUE); parse_op_key(key, NULL, NULL, &interval_ms); unpack_operation(action, action->op_entry, rsc->container, data_set, interval_ms); if (save_action) { rsc->actions = g_list_prepend(rsc->actions, action); } } } if (!optional && pcmk_is_set(action->flags, pe_action_optional)) { pe__clear_action_flags(action, pe_action_optional); } if (rsc != NULL) { enum action_tasks a_task = text2task(action->task); enum pe_quorum_policy quorum_policy = effective_quorum_policy(rsc, data_set); int warn_level = LOG_TRACE; if (save_action) { warn_level = LOG_WARNING; } if (!pcmk_is_set(action->flags, pe_action_have_node_attrs) && action->node != NULL && action->op_entry != NULL) { pe_rule_eval_data_t rule_data = { .node_hash = action->node->details->attrs, .role = RSC_ROLE_UNKNOWN, .now = data_set->now, .match_data = NULL, .rsc_data = NULL, .op_data = NULL }; pe__set_action_flags(action, pe_action_have_node_attrs); pe__unpack_dataset_nvpairs(action->op_entry, XML_TAG_ATTR_SETS, &rule_data, action->extra, NULL, FALSE, data_set); } if (pcmk_is_set(action->flags, pe_action_pseudo)) { /* leave untouched */ } else if (action->node == NULL) { pe_rsc_trace(rsc, "%s is unrunnable (unallocated)", action->uuid); pe__clear_action_flags(action, pe_action_runnable); } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed) && g_hash_table_lookup(action->meta, XML_LRM_ATTR_INTERVAL_MS) == NULL) { pe_rsc_debug(rsc, "%s on %s is optional (%s is unmanaged)", action->uuid, action->node->details->uname, rsc->id); pe__set_action_flags(action, pe_action_optional); //pe__clear_action_flags(action, pe_action_runnable); } else if (!pcmk_is_set(action->flags, pe_action_dc) && !(action->node->details->online) && (!pe__is_guest_node(action->node) || action->node->details->remote_requires_reset)) { pe__clear_action_flags(action, pe_action_runnable); do_crm_log(warn_level, "%s on %s is unrunnable (node is offline)", action->uuid, action->node->details->uname); if (pcmk_is_set(action->rsc->flags, pe_rsc_managed) && save_action && a_task == stop_rsc && action->node->details->unclean == FALSE) { pe_fence_node(data_set, action->node, "resource actions are unrunnable", FALSE); } } else if (!pcmk_is_set(action->flags, pe_action_dc) && action->node->details->pending) { pe__clear_action_flags(action, pe_action_runnable); do_crm_log(warn_level, "Action %s on %s is unrunnable (node is pending)", action->uuid, action->node->details->uname); } else if (action->needs == rsc_req_nothing) { pe_action_set_reason(action, NULL, TRUE); if (pe__is_guest_node(action->node) && !pe_can_fence(data_set, action->node)) { /* An action that requires nothing usually does not require any * fencing in order to be runnable. However, there is an * exception: an action cannot be completed if it is on a guest * node whose host is unclean and cannot be fenced. */ pe_rsc_debug(rsc, "%s on %s is unrunnable " "(node's host cannot be fenced)", action->uuid, action->node->details->uname); pe__clear_action_flags(action, pe_action_runnable); } else { pe_rsc_trace(rsc, "%s on %s does not require fencing or quorum", action->uuid, action->node->details->uname); pe__set_action_flags(action, pe_action_runnable); } #if 0 /* * No point checking this * - if we don't have quorum we can't stonith anyway */ } else if (action->needs == rsc_req_stonith) { crm_trace("Action %s requires only stonith", action->uuid); action->runnable = TRUE; #endif } else if (quorum_policy == no_quorum_stop) { pe_rsc_debug(rsc, "%s on %s is unrunnable (no quorum)", action->uuid, action->node->details->uname); pe_action_set_flag_reason(__func__, __LINE__, action, NULL, "no quorum", pe_action_runnable, TRUE); } else if (quorum_policy == no_quorum_freeze) { if (rsc->fns->active(rsc, TRUE) == FALSE || rsc->next_role > rsc->role) { pe_rsc_debug(rsc, "%s on %s is unrunnable (no quorum)", action->uuid, action->node->details->uname); pe_action_set_flag_reason(__func__, __LINE__, action, NULL, "quorum freeze", pe_action_runnable, TRUE); } } else { //pe_action_set_reason(action, NULL, TRUE); pe__set_action_flags(action, pe_action_runnable); } if (save_action) { switch (a_task) { case stop_rsc: pe__set_resource_flags(rsc, pe_rsc_stopping); break; case start_rsc: pe__clear_resource_flags(rsc, pe_rsc_starting); if (pcmk_is_set(action->flags, pe_action_runnable)) { pe__set_resource_flags(rsc, pe_rsc_starting); } break; default: break; } } } free(key); return action; } static bool valid_stop_on_fail(const char *value) { return !pcmk__strcase_any_of(value, "standby", "demote", "stop", NULL); } static const char * unpack_operation_on_fail(pe_action_t * action) { const char *name = NULL; const char *role = NULL; const char *on_fail = NULL; const char *interval_spec = NULL; const char *enabled = NULL; const char *value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ON_FAIL); if (pcmk__str_eq(action->task, CRMD_ACTION_STOP, pcmk__str_casei) && !valid_stop_on_fail(value)) { pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for %s stop " "action to default value because '%s' is not " "allowed for stop", action->rsc->id, value); return NULL; } else if (pcmk__str_eq(action->task, CRMD_ACTION_DEMOTE, pcmk__str_casei) && !value) { /* demote on_fail defaults to master monitor value if present */ xmlNode *operation = NULL; CRM_CHECK(action->rsc != NULL, return NULL); for (operation = pcmk__xe_first_child(action->rsc->ops_xml); (operation != NULL) && (value == NULL); operation = pcmk__xe_next(operation)) { if (!pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) { continue; } name = crm_element_value(operation, "name"); role = crm_element_value(operation, "role"); on_fail = crm_element_value(operation, XML_OP_ATTR_ON_FAIL); enabled = crm_element_value(operation, "enabled"); interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); if (!on_fail) { continue; } else if (enabled && !crm_is_true(enabled)) { continue; } else if (!pcmk__str_eq(name, "monitor", pcmk__str_casei) || !pcmk__str_eq(role, "Master", pcmk__str_casei)) { continue; } else if (crm_parse_interval_spec(interval_spec) == 0) { continue; } else if (pcmk__str_eq(on_fail, "demote", pcmk__str_casei)) { continue; } value = on_fail; } } else if (pcmk__str_eq(action->task, CRM_OP_LRM_DELETE, pcmk__str_casei)) { value = "ignore"; } else if (pcmk__str_eq(value, "demote", pcmk__str_casei)) { name = crm_element_value(action->op_entry, "name"); role = crm_element_value(action->op_entry, "role"); interval_spec = crm_element_value(action->op_entry, XML_LRM_ATTR_INTERVAL); if (!pcmk__str_eq(name, CRMD_ACTION_PROMOTE, pcmk__str_casei) && (!pcmk__str_eq(name, CRMD_ACTION_STATUS, pcmk__str_casei) || !pcmk__str_eq(role, "Master", pcmk__str_casei) || (crm_parse_interval_spec(interval_spec) == 0))) { pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for %s %s " "action to default value because 'demote' is not " "allowed for it", action->rsc->id, name); return NULL; } } return value; } static xmlNode * find_min_interval_mon(pe_resource_t * rsc, gboolean include_disabled) { guint interval_ms = 0; guint min_interval_ms = G_MAXUINT; const char *name = NULL; const char *value = NULL; const char *interval_spec = NULL; xmlNode *op = NULL; xmlNode *operation = NULL; for (operation = pcmk__xe_first_child(rsc->ops_xml); operation != NULL; operation = pcmk__xe_next(operation)) { if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) { name = crm_element_value(operation, "name"); interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); value = crm_element_value(operation, "enabled"); if (!include_disabled && value && crm_is_true(value) == FALSE) { continue; } if (!pcmk__str_eq(name, RSC_STATUS, pcmk__str_casei)) { continue; } interval_ms = crm_parse_interval_spec(interval_spec); if (interval_ms && (interval_ms < min_interval_ms)) { min_interval_ms = interval_ms; op = operation; } } } return op; } static int unpack_start_delay(const char *value, GHashTable *meta) { int start_delay = 0; if (value != NULL) { start_delay = crm_get_msec(value); if (start_delay < 0) { start_delay = 0; } if (meta) { g_hash_table_replace(meta, strdup(XML_OP_ATTR_START_DELAY), crm_itoa(start_delay)); } } return start_delay; } // true if value contains valid, non-NULL interval origin for recurring op static bool unpack_interval_origin(const char *value, xmlNode *xml_obj, guint interval_ms, crm_time_t *now, long long *start_delay) { long long result = 0; guint interval_sec = interval_ms / 1000; crm_time_t *origin = NULL; // Ignore unspecified values and non-recurring operations if ((value == NULL) || (interval_ms == 0) || (now == NULL)) { return false; } // Parse interval origin from text origin = crm_time_new(value); if (origin == NULL) { pcmk__config_err("Ignoring '" XML_OP_ATTR_ORIGIN "' for operation " "'%s' because '%s' is not valid", (ID(xml_obj)? ID(xml_obj) : "(missing ID)"), value); return false; } // Get seconds since origin (negative if origin is in the future) result = crm_time_get_seconds(now) - crm_time_get_seconds(origin); crm_time_free(origin); // Calculate seconds from closest interval to now result = result % interval_sec; // Calculate seconds remaining until next interval result = ((result <= 0)? 0 : interval_sec) - result; crm_info("Calculated a start delay of %llds for operation '%s'", result, (ID(xml_obj)? ID(xml_obj) : "(unspecified)")); if (start_delay != NULL) { *start_delay = result * 1000; // milliseconds } return true; } static int unpack_timeout(const char *value) { int timeout_ms = crm_get_msec(value); if (timeout_ms < 0) { timeout_ms = crm_get_msec(CRM_DEFAULT_OP_TIMEOUT_S); } return timeout_ms; } int pe_get_configured_timeout(pe_resource_t *rsc, const char *action, pe_working_set_t *data_set) { xmlNode *child = NULL; GHashTable *action_meta = NULL; const char *timeout_spec = NULL; int timeout_ms = 0; pe_rule_eval_data_t rule_data = { .node_hash = NULL, .role = RSC_ROLE_UNKNOWN, .now = data_set->now, .match_data = NULL, .rsc_data = NULL, .op_data = NULL }; for (child = first_named_child(rsc->ops_xml, XML_ATTR_OP); child != NULL; child = crm_next_same_xml(child)) { if (pcmk__str_eq(action, crm_element_value(child, XML_NVPAIR_ATTR_NAME), pcmk__str_casei)) { timeout_spec = crm_element_value(child, XML_ATTR_TIMEOUT); break; } } if (timeout_spec == NULL && data_set->op_defaults) { action_meta = crm_str_table_new(); pe__unpack_dataset_nvpairs(data_set->op_defaults, XML_TAG_META_SETS, &rule_data, action_meta, NULL, FALSE, data_set); timeout_spec = g_hash_table_lookup(action_meta, XML_ATTR_TIMEOUT); } // @TODO check meta-attributes (including versioned meta-attributes) // @TODO maybe use min-interval monitor timeout as default for monitors timeout_ms = crm_get_msec(timeout_spec); if (timeout_ms < 0) { timeout_ms = crm_get_msec(CRM_DEFAULT_OP_TIMEOUT_S); } if (action_meta != NULL) { g_hash_table_destroy(action_meta); } return timeout_ms; } #if ENABLE_VERSIONED_ATTRS static void unpack_versioned_meta(xmlNode *versioned_meta, xmlNode *xml_obj, guint interval_ms, crm_time_t *now) { xmlNode *attrs = NULL; xmlNode *attr = NULL; for (attrs = pcmk__xe_first_child(versioned_meta); attrs != NULL; attrs = pcmk__xe_next(attrs)) { for (attr = pcmk__xe_first_child(attrs); attr != NULL; attr = pcmk__xe_next(attr)) { const char *name = crm_element_value(attr, XML_NVPAIR_ATTR_NAME); const char *value = crm_element_value(attr, XML_NVPAIR_ATTR_VALUE); if (pcmk__str_eq(name, XML_OP_ATTR_START_DELAY, pcmk__str_casei)) { int start_delay = unpack_start_delay(value, NULL); crm_xml_add_int(attr, XML_NVPAIR_ATTR_VALUE, start_delay); } else if (pcmk__str_eq(name, XML_OP_ATTR_ORIGIN, pcmk__str_casei)) { long long start_delay = 0; if (unpack_interval_origin(value, xml_obj, interval_ms, now, &start_delay)) { crm_xml_add(attr, XML_NVPAIR_ATTR_NAME, XML_OP_ATTR_START_DELAY); crm_xml_add_ll(attr, XML_NVPAIR_ATTR_VALUE, start_delay); } } else if (pcmk__str_eq(name, XML_ATTR_TIMEOUT, pcmk__str_casei)) { int timeout_ms = unpack_timeout(value); crm_xml_add_int(attr, XML_NVPAIR_ATTR_VALUE, timeout_ms); } } } } #endif /*! * \brief Unpack operation XML into an action structure * * Unpack an operation's meta-attributes (normalizing the interval, timeout, * and start delay values as integer milliseconds), requirements, and * failure policy. * * \param[in,out] action Action to unpack into * \param[in] xml_obj Operation XML (or NULL if all defaults) * \param[in] container Resource that contains affected resource, if any * \param[in] data_set Cluster state * \param[in] interval_ms How frequently to perform the operation */ static void unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * container, pe_working_set_t * data_set, guint interval_ms) { int timeout_ms = 0; const char *value = NULL; bool is_probe = pcmk__str_eq(action->task, RSC_STATUS, pcmk__str_casei) && (interval_ms == 0); #if ENABLE_VERSIONED_ATTRS pe_rsc_action_details_t *rsc_details = NULL; #endif pe_rsc_eval_data_t rsc_rule_data = { .standard = crm_element_value(action->rsc->xml, XML_AGENT_ATTR_CLASS), .provider = crm_element_value(action->rsc->xml, XML_AGENT_ATTR_PROVIDER), .agent = crm_element_value(action->rsc->xml, XML_EXPR_ATTR_TYPE) }; pe_op_eval_data_t op_rule_data = { .op_name = action->task, .interval = interval_ms }; pe_rule_eval_data_t rule_data = { .node_hash = NULL, .role = RSC_ROLE_UNKNOWN, .now = data_set->now, .match_data = NULL, .rsc_data = &rsc_rule_data, .op_data = &op_rule_data }; CRM_CHECK(action && action->rsc, return); // Cluster-wide pe__unpack_dataset_nvpairs(data_set->op_defaults, XML_TAG_META_SETS, &rule_data, action->meta, NULL, FALSE, data_set); // Determine probe default timeout differently if (is_probe) { xmlNode *min_interval_mon = find_min_interval_mon(action->rsc, FALSE); if (min_interval_mon) { value = crm_element_value(min_interval_mon, XML_ATTR_TIMEOUT); if (value) { crm_trace("\t%s: Setting default timeout to minimum-interval " "monitor's timeout '%s'", action->uuid, value); g_hash_table_replace(action->meta, strdup(XML_ATTR_TIMEOUT), strdup(value)); } } } if (xml_obj) { xmlAttrPtr xIter = NULL; // take precedence over defaults pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_META_SETS, &rule_data, action->meta, NULL, TRUE, data_set); #if ENABLE_VERSIONED_ATTRS rsc_details = pe_rsc_action_details(action); pe_eval_versioned_attributes(data_set->input, xml_obj, XML_TAG_ATTR_SETS, &rule_data, rsc_details->versioned_parameters, NULL); pe_eval_versioned_attributes(data_set->input, xml_obj, XML_TAG_META_SETS, &rule_data, rsc_details->versioned_meta, NULL); #endif /* Anything set as an XML property has highest precedence. * This ensures we use the name and interval from the tag. */ for (xIter = xml_obj->properties; xIter; xIter = xIter->next) { const char *prop_name = (const char *)xIter->name; const char *prop_value = crm_element_value(xml_obj, prop_name); g_hash_table_replace(action->meta, strdup(prop_name), strdup(prop_value)); } } g_hash_table_remove(action->meta, "id"); // Normalize interval to milliseconds if (interval_ms > 0) { g_hash_table_replace(action->meta, strdup(XML_LRM_ATTR_INTERVAL), crm_strdup_printf("%u", interval_ms)); } else { g_hash_table_remove(action->meta, XML_LRM_ATTR_INTERVAL); } /* * Timeout order of precedence: * 1. pcmk_monitor_timeout (if rsc has pcmk_ra_cap_fence_params * and task is start or a probe; pcmk_monitor_timeout works * by default for a recurring monitor) * 2. explicit op timeout on the primitive * 3. default op timeout * a. if probe, then min-interval monitor's timeout * b. else, in XML_CIB_TAG_OPCONFIG * 4. CRM_DEFAULT_OP_TIMEOUT_S * * #1 overrides general rule of XML property having highest * precedence. */ if (pcmk_is_set(pcmk_get_ra_caps(rsc_rule_data.standard), pcmk_ra_cap_fence_params) && (pcmk__str_eq(action->task, RSC_START, pcmk__str_casei) || is_probe)) { GHashTable *params = pe_rsc_params(action->rsc, action->node, data_set); value = g_hash_table_lookup(params, "pcmk_monitor_timeout"); if (value) { crm_trace("\t%s: Setting timeout to pcmk_monitor_timeout '%s', " "overriding default", action->uuid, value); g_hash_table_replace(action->meta, strdup(XML_ATTR_TIMEOUT), strdup(value)); } } // Normalize timeout to positive milliseconds value = g_hash_table_lookup(action->meta, XML_ATTR_TIMEOUT); timeout_ms = unpack_timeout(value); g_hash_table_replace(action->meta, strdup(XML_ATTR_TIMEOUT), crm_itoa(timeout_ms)); if (!pcmk__strcase_any_of(action->task, RSC_START, RSC_PROMOTE, NULL)) { action->needs = rsc_req_nothing; value = "nothing (not start or promote)"; } else if (pcmk_is_set(action->rsc->flags, pe_rsc_needs_fencing)) { action->needs = rsc_req_stonith; value = "fencing"; } else if (pcmk_is_set(action->rsc->flags, pe_rsc_needs_quorum)) { action->needs = rsc_req_quorum; value = "quorum"; } else { action->needs = rsc_req_nothing; value = "nothing"; } pe_rsc_trace(action->rsc, "%s requires %s", action->uuid, value); value = unpack_operation_on_fail(action); if (value == NULL) { } else if (pcmk__str_eq(value, "block", pcmk__str_casei)) { action->on_fail = action_fail_block; g_hash_table_insert(action->meta, strdup(XML_OP_ATTR_ON_FAIL), strdup("block")); value = "block"; // The above could destroy the original string } else if (pcmk__str_eq(value, "fence", pcmk__str_casei)) { action->on_fail = action_fail_fence; value = "node fencing"; if (!pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) { pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for " "operation '%s' to 'stop' because 'fence' is not " "valid when fencing is disabled", action->uuid); action->on_fail = action_fail_stop; action->fail_role = RSC_ROLE_STOPPED; value = "stop resource"; } } else if (pcmk__str_eq(value, "standby", pcmk__str_casei)) { action->on_fail = action_fail_standby; value = "node standby"; } else if (pcmk__strcase_any_of(value, "ignore", "nothing", NULL)) { action->on_fail = action_fail_ignore; value = "ignore"; } else if (pcmk__str_eq(value, "migrate", pcmk__str_casei)) { action->on_fail = action_fail_migrate; value = "force migration"; } else if (pcmk__str_eq(value, "stop", pcmk__str_casei)) { action->on_fail = action_fail_stop; action->fail_role = RSC_ROLE_STOPPED; value = "stop resource"; } else if (pcmk__str_eq(value, "restart", pcmk__str_casei)) { action->on_fail = action_fail_recover; value = "restart (and possibly migrate)"; } else if (pcmk__str_eq(value, "restart-container", pcmk__str_casei)) { if (container) { action->on_fail = action_fail_restart_container; value = "restart container (and possibly migrate)"; } else { value = NULL; } } else if (pcmk__str_eq(value, "demote", pcmk__str_casei)) { action->on_fail = action_fail_demote; value = "demote instance"; } else { pe_err("Resource %s: Unknown failure type (%s)", action->rsc->id, value); value = NULL; } /* defaults */ if (value == NULL && container) { action->on_fail = action_fail_restart_container; value = "restart container (and possibly migrate) (default)"; /* For remote nodes, ensure that any failure that results in dropping an * active connection to the node results in fencing of the node. * * There are only two action failures that don't result in fencing. * 1. probes - probe failures are expected. * 2. start - a start failure indicates that an active connection does not already * exist. The user can set op on-fail=fence if they really want to fence start * failures. */ } else if (((value == NULL) || !pcmk_is_set(action->rsc->flags, pe_rsc_managed)) && pe__resource_is_remote_conn(action->rsc, data_set) && !(pcmk__str_eq(action->task, CRMD_ACTION_STATUS, pcmk__str_casei) && (interval_ms == 0)) && !pcmk__str_eq(action->task, CRMD_ACTION_START, pcmk__str_casei)) { if (!pcmk_is_set(action->rsc->flags, pe_rsc_managed)) { action->on_fail = action_fail_stop; action->fail_role = RSC_ROLE_STOPPED; value = "stop unmanaged remote node (enforcing default)"; } else { if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) { value = "fence remote node (default)"; } else { value = "recover remote node connection (default)"; } if (action->rsc->remote_reconnect_ms) { action->fail_role = RSC_ROLE_STOPPED; } action->on_fail = action_fail_reset_remote; } } else if (value == NULL && pcmk__str_eq(action->task, CRMD_ACTION_STOP, pcmk__str_casei)) { if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) { action->on_fail = action_fail_fence; value = "resource fence (default)"; } else { action->on_fail = action_fail_block; value = "resource block (default)"; } } else if (value == NULL) { action->on_fail = action_fail_recover; value = "restart (and possibly migrate) (default)"; } pe_rsc_trace(action->rsc, "%s failure handling: %s", action->uuid, value); value = NULL; if (xml_obj != NULL) { value = g_hash_table_lookup(action->meta, "role_after_failure"); if (value) { pe_warn_once(pe_wo_role_after, "Support for role_after_failure is deprecated and will be removed in a future release"); } } if (value != NULL && action->fail_role == RSC_ROLE_UNKNOWN) { action->fail_role = text2role(value); } /* defaults */ if (action->fail_role == RSC_ROLE_UNKNOWN) { if (pcmk__str_eq(action->task, CRMD_ACTION_PROMOTE, pcmk__str_casei)) { action->fail_role = RSC_ROLE_SLAVE; } else { action->fail_role = RSC_ROLE_STARTED; } } pe_rsc_trace(action->rsc, "%s failure results in: %s", action->uuid, role2text(action->fail_role)); value = g_hash_table_lookup(action->meta, XML_OP_ATTR_START_DELAY); if (value) { unpack_start_delay(value, action->meta); } else { long long start_delay = 0; value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ORIGIN); if (unpack_interval_origin(value, xml_obj, interval_ms, data_set->now, &start_delay)) { g_hash_table_replace(action->meta, strdup(XML_OP_ATTR_START_DELAY), crm_strdup_printf("%lld", start_delay)); } } #if ENABLE_VERSIONED_ATTRS unpack_versioned_meta(rsc_details->versioned_meta, xml_obj, interval_ms, data_set->now); #endif } static xmlNode * find_rsc_op_entry_helper(pe_resource_t * rsc, const char *key, gboolean include_disabled) { guint interval_ms = 0; gboolean do_retry = TRUE; char *local_key = NULL; const char *name = NULL; const char *value = NULL; const char *interval_spec = NULL; char *match_key = NULL; xmlNode *op = NULL; xmlNode *operation = NULL; retry: for (operation = pcmk__xe_first_child(rsc->ops_xml); operation != NULL; operation = pcmk__xe_next(operation)) { if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) { name = crm_element_value(operation, "name"); interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); value = crm_element_value(operation, "enabled"); if (!include_disabled && value && crm_is_true(value) == FALSE) { continue; } interval_ms = crm_parse_interval_spec(interval_spec); match_key = pcmk__op_key(rsc->id, name, interval_ms); if (pcmk__str_eq(key, match_key, pcmk__str_casei)) { op = operation; } free(match_key); if (rsc->clone_name) { match_key = pcmk__op_key(rsc->clone_name, name, interval_ms); if (pcmk__str_eq(key, match_key, pcmk__str_casei)) { op = operation; } free(match_key); } if (op != NULL) { free(local_key); return op; } } } free(local_key); if (do_retry == FALSE) { return NULL; } do_retry = FALSE; if (strstr(key, CRMD_ACTION_MIGRATE) || strstr(key, CRMD_ACTION_MIGRATED)) { local_key = pcmk__op_key(rsc->id, "migrate", 0); key = local_key; goto retry; } else if (strstr(key, "_notify_")) { local_key = pcmk__op_key(rsc->id, "notify", 0); key = local_key; goto retry; } return NULL; } xmlNode * find_rsc_op_entry(pe_resource_t * rsc, const char *key) { return find_rsc_op_entry_helper(rsc, key, FALSE); } void print_node(const char *pre_text, pe_node_t * node, gboolean details) { if (node == NULL) { crm_trace("%s%s: ", pre_text == NULL ? "" : pre_text, pre_text == NULL ? "" : ": "); return; } CRM_ASSERT(node->details); crm_trace("%s%s%sNode %s: (weight=%d, fixed=%s)", pre_text == NULL ? "" : pre_text, pre_text == NULL ? "" : ": ", node->details->online ? "" : "Unavailable/Unclean ", node->details->uname, node->weight, node->fixed ? "True" : "False"); if (details) { int log_level = LOG_TRACE; char *pe_mutable = strdup("\t\t"); GListPtr gIter = node->details->running_rsc; crm_trace("\t\t===Node Attributes"); g_hash_table_foreach(node->details->attrs, print_str_str, pe_mutable); free(pe_mutable); crm_trace("\t\t=== Resources"); for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *rsc = (pe_resource_t *) gIter->data; rsc->fns->print(rsc, "\t\t", pe_print_log|pe_print_pending, &log_level); } } } /* * Used by the HashTable for-loop */ void print_str_str(gpointer key, gpointer value, gpointer user_data) { crm_trace("%s%s %s ==> %s", user_data == NULL ? "" : (char *)user_data, user_data == NULL ? "" : ": ", (char *)key, (char *)value); } void pe_free_action(pe_action_t * action) { if (action == NULL) { return; } g_list_free_full(action->actions_before, free); /* pe_action_wrapper_t* */ g_list_free_full(action->actions_after, free); /* pe_action_wrapper_t* */ if (action->extra) { g_hash_table_destroy(action->extra); } if (action->meta) { g_hash_table_destroy(action->meta); } #if ENABLE_VERSIONED_ATTRS if (action->rsc) { pe_free_rsc_action_details(action); } #endif free(action->cancel_task); free(action->reason); free(action->task); free(action->uuid); free(action->node); free(action); } GListPtr find_recurring_actions(GListPtr input, pe_node_t * not_on_node) { const char *value = NULL; GListPtr result = NULL; GListPtr gIter = input; CRM_CHECK(input != NULL, return NULL); for (; gIter != NULL; gIter = gIter->next) { pe_action_t *action = (pe_action_t *) gIter->data; value = g_hash_table_lookup(action->meta, XML_LRM_ATTR_INTERVAL_MS); if (value == NULL) { /* skip */ } else if (pcmk__str_eq(value, "0", pcmk__str_casei)) { /* skip */ } else if (pcmk__str_eq(CRMD_ACTION_CANCEL, action->task, pcmk__str_casei)) { /* skip */ } else if (not_on_node == NULL) { crm_trace("(null) Found: %s", action->uuid); result = g_list_prepend(result, action); } else if (action->node == NULL) { /* skip */ } else if (action->node->details != not_on_node->details) { crm_trace("Found: %s", action->uuid); result = g_list_prepend(result, action); } } return result; } enum action_tasks get_complex_task(pe_resource_t * rsc, const char *name, gboolean allow_non_atomic) { enum action_tasks task = text2task(name); if (rsc == NULL) { return task; } else if (allow_non_atomic == FALSE || rsc->variant == pe_native) { switch (task) { case stopped_rsc: case started_rsc: case action_demoted: case action_promoted: crm_trace("Folding %s back into its atomic counterpart for %s", name, rsc->id); return task - 1; default: break; } } return task; } pe_action_t * find_first_action(GListPtr input, const char *uuid, const char *task, pe_node_t * on_node) { GListPtr gIter = NULL; CRM_CHECK(uuid || task, return NULL); for (gIter = input; gIter != NULL; gIter = gIter->next) { pe_action_t *action = (pe_action_t *) gIter->data; if (uuid != NULL && !pcmk__str_eq(uuid, action->uuid, pcmk__str_casei)) { continue; } else if (task != NULL && !pcmk__str_eq(task, action->task, pcmk__str_casei)) { continue; } else if (on_node == NULL) { return action; } else if (action->node == NULL) { continue; } else if (on_node->details == action->node->details) { return action; } } return NULL; } GListPtr find_actions(GListPtr input, const char *key, const pe_node_t *on_node) { GListPtr gIter = input; GListPtr result = NULL; CRM_CHECK(key != NULL, return NULL); for (; gIter != NULL; gIter = gIter->next) { pe_action_t *action = (pe_action_t *) gIter->data; if (!pcmk__str_eq(key, action->uuid, pcmk__str_casei)) { crm_trace("%s does not match action %s", key, action->uuid); continue; } else if (on_node == NULL) { crm_trace("Action %s matches (ignoring node)", key); result = g_list_prepend(result, action); } else if (action->node == NULL) { crm_trace("Action %s matches (unallocated, assigning to %s)", key, on_node->details->uname); action->node = pe__copy_node(on_node); result = g_list_prepend(result, action); } else if (on_node->details == action->node->details) { crm_trace("Action %s on %s matches", key, on_node->details->uname); result = g_list_prepend(result, action); } else { crm_trace("Action %s on node %s does not match requested node %s", key, action->node->details->uname, on_node->details->uname); } } return result; } GList * find_actions_exact(GList *input, const char *key, const pe_node_t *on_node) { GList *result = NULL; CRM_CHECK(key != NULL, return NULL); if (on_node == NULL) { crm_trace("Not searching for action %s because node not specified", key); return NULL; } for (GList *gIter = input; gIter != NULL; gIter = gIter->next) { pe_action_t *action = (pe_action_t *) gIter->data; if (action->node == NULL) { crm_trace("Skipping comparison of %s vs action %s without node", key, action->uuid); } else if (!pcmk__str_eq(key, action->uuid, pcmk__str_casei)) { crm_trace("Desired action %s doesn't match %s", key, action->uuid); } else if (!pcmk__str_eq(on_node->details->id, action->node->details->id, pcmk__str_casei)) { crm_trace("Action %s desired node ID %s doesn't match %s", key, on_node->details->id, action->node->details->id); } else { crm_trace("Action %s matches", key); result = g_list_prepend(result, action); } } return result; } /*! * \brief Find all actions of given type for a resource * * \param[in] rsc Resource to search * \param[in] node Find only actions scheduled on this node * \param[in] task Action name to search for * \param[in] require_node If TRUE, NULL node or action node will not match * * \return List of actions found (or NULL if none) * \note If node is not NULL and require_node is FALSE, matching actions * without a node will be assigned to node. */ GList * pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node, const char *task, bool require_node) { GList *result = NULL; char *key = pcmk__op_key(rsc->id, task, 0); if (require_node) { result = find_actions_exact(rsc->actions, key, node); } else { result = find_actions(rsc->actions, key, node); } free(key); return result; } static void resource_node_score(pe_resource_t * rsc, pe_node_t * node, int score, const char *tag) { pe_node_t *match = NULL; if ((rsc->exclusive_discover || (node->rsc_discover_mode == pe_discover_never)) && pcmk__str_eq(tag, "symmetric_default", pcmk__str_casei)) { /* This string comparision may be fragile, but exclusive resources and * exclusive nodes should not have the symmetric_default constraint * applied to them. */ return; } else if (rsc->children) { GListPtr gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; resource_node_score(child_rsc, node, score, tag); } } pe_rsc_trace(rsc, "Setting %s for %s on %s: %d", tag, rsc->id, node->details->uname, score); match = pe_hash_table_lookup(rsc->allowed_nodes, node->details->id); if (match == NULL) { match = pe__copy_node(node); g_hash_table_insert(rsc->allowed_nodes, (gpointer) match->details->id, match); } match->weight = pe__add_scores(match->weight, score); } void resource_location(pe_resource_t * rsc, pe_node_t * node, int score, const char *tag, pe_working_set_t * data_set) { if (node != NULL) { resource_node_score(rsc, node, score, tag); } else if (data_set != NULL) { GListPtr gIter = data_set->nodes; for (; gIter != NULL; gIter = gIter->next) { pe_node_t *node_iter = (pe_node_t *) gIter->data; resource_node_score(rsc, node_iter, score, tag); } } else { GHashTableIter iter; pe_node_t *node_iter = NULL; g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&node_iter)) { resource_node_score(rsc, node_iter, score, tag); } } if (node == NULL && score == -INFINITY) { if (rsc->allocated_to) { crm_info("Deallocating %s from %s", rsc->id, rsc->allocated_to->details->uname); free(rsc->allocated_to); rsc->allocated_to = NULL; } } } #define sort_return(an_int, why) do { \ free(a_uuid); \ free(b_uuid); \ crm_trace("%s (%d) %c %s (%d) : %s", \ a_xml_id, a_call_id, an_int>0?'>':an_int<0?'<':'=', \ b_xml_id, b_call_id, why); \ return an_int; \ } while(0) gint sort_op_by_callid(gconstpointer a, gconstpointer b) { int a_call_id = -1; int b_call_id = -1; char *a_uuid = NULL; char *b_uuid = NULL; const xmlNode *xml_a = a; const xmlNode *xml_b = b; const char *a_xml_id = crm_element_value(xml_a, XML_ATTR_ID); const char *b_xml_id = crm_element_value(xml_b, XML_ATTR_ID); if (pcmk__str_eq(a_xml_id, b_xml_id, pcmk__str_casei)) { /* We have duplicate lrm_rsc_op entries in the status * section which is unlikely to be a good thing * - we can handle it easily enough, but we need to get * to the bottom of why it's happening. */ pe_err("Duplicate lrm_rsc_op entries named %s", a_xml_id); sort_return(0, "duplicate"); } crm_element_value_int(xml_a, XML_LRM_ATTR_CALLID, &a_call_id); crm_element_value_int(xml_b, XML_LRM_ATTR_CALLID, &b_call_id); if (a_call_id == -1 && b_call_id == -1) { /* both are pending ops so it doesn't matter since * stops are never pending */ sort_return(0, "pending"); } else if (a_call_id >= 0 && a_call_id < b_call_id) { sort_return(-1, "call id"); } else if (b_call_id >= 0 && a_call_id > b_call_id) { sort_return(1, "call id"); } else if (b_call_id >= 0 && a_call_id == b_call_id) { /* * The op and last_failed_op are the same * Order on last-rc-change */ time_t last_a = -1; time_t last_b = -1; crm_element_value_epoch(xml_a, XML_RSC_OP_LAST_CHANGE, &last_a); crm_element_value_epoch(xml_b, XML_RSC_OP_LAST_CHANGE, &last_b); crm_trace("rc-change: %lld vs %lld", (long long) last_a, (long long) last_b); if (last_a >= 0 && last_a < last_b) { sort_return(-1, "rc-change"); } else if (last_b >= 0 && last_a > last_b) { sort_return(1, "rc-change"); } sort_return(0, "rc-change"); } else { /* One of the inputs is a pending operation * Attempt to use XML_ATTR_TRANSITION_MAGIC to determine its age relative to the other */ int a_id = -1; int b_id = -1; const char *a_magic = crm_element_value(xml_a, XML_ATTR_TRANSITION_MAGIC); const char *b_magic = crm_element_value(xml_b, XML_ATTR_TRANSITION_MAGIC); CRM_CHECK(a_magic != NULL && b_magic != NULL, sort_return(0, "No magic")); if (!decode_transition_magic(a_magic, &a_uuid, &a_id, NULL, NULL, NULL, NULL)) { sort_return(0, "bad magic a"); } if (!decode_transition_magic(b_magic, &b_uuid, &b_id, NULL, NULL, NULL, NULL)) { sort_return(0, "bad magic b"); } /* try to determine the relative age of the operation... * some pending operations (e.g. a start) may have been superseded * by a subsequent stop * * [a|b]_id == -1 means it's a shutdown operation and _always_ comes last */ if (!pcmk__str_eq(a_uuid, b_uuid, pcmk__str_casei) || a_id == b_id) { /* * some of the logic in here may be redundant... * * if the UUID from the TE doesn't match then one better * be a pending operation. * pending operations don't survive between elections and joins * because we query the LRM directly */ if (b_call_id == -1) { sort_return(-1, "transition + call"); } else if (a_call_id == -1) { sort_return(1, "transition + call"); } } else if ((a_id >= 0 && a_id < b_id) || b_id == -1) { sort_return(-1, "transition"); } else if ((b_id >= 0 && a_id > b_id) || a_id == -1) { sort_return(1, "transition"); } } /* we should never end up here */ CRM_CHECK(FALSE, sort_return(0, "default")); } time_t get_effective_time(pe_working_set_t * data_set) { if(data_set) { if (data_set->now == NULL) { crm_trace("Recording a new 'now'"); data_set->now = crm_time_new(NULL); } return crm_time_get_seconds_since_epoch(data_set->now); } crm_trace("Defaulting to 'now'"); return time(NULL); } gboolean get_target_role(pe_resource_t * rsc, enum rsc_role_e * role) { enum rsc_role_e local_role = RSC_ROLE_UNKNOWN; const char *value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE); CRM_CHECK(role != NULL, return FALSE); if (pcmk__str_eq(value, "started", pcmk__str_null_matches | pcmk__str_casei) || pcmk__str_eq("default", value, pcmk__str_casei)) { return FALSE; } local_role = text2role(value); if (local_role == RSC_ROLE_UNKNOWN) { pcmk__config_err("Ignoring '" XML_RSC_ATTR_TARGET_ROLE "' for %s " "because '%s' is not valid", rsc->id, value); return FALSE; } else if (local_role > RSC_ROLE_STARTED) { if (pcmk_is_set(uber_parent(rsc)->flags, pe_rsc_promotable)) { if (local_role > RSC_ROLE_SLAVE) { /* This is what we'd do anyway, just leave the default to avoid messing up the placement algorithm */ return FALSE; } } else { pcmk__config_err("Ignoring '" XML_RSC_ATTR_TARGET_ROLE "' for %s " "because '%s' only makes sense for promotable " "clones", rsc->id, value); return FALSE; } } *role = local_role; return TRUE; } gboolean order_actions(pe_action_t * lh_action, pe_action_t * rh_action, enum pe_ordering order) { GListPtr gIter = NULL; pe_action_wrapper_t *wrapper = NULL; GListPtr list = NULL; if (order == pe_order_none) { return FALSE; } if (lh_action == NULL || rh_action == NULL) { return FALSE; } crm_trace("Ordering Action %s before %s", lh_action->uuid, rh_action->uuid); /* Ensure we never create a dependency on ourselves... it's happened */ CRM_ASSERT(lh_action != rh_action); /* Filter dups, otherwise update_action_states() has too much work to do */ gIter = lh_action->actions_after; for (; gIter != NULL; gIter = gIter->next) { pe_action_wrapper_t *after = (pe_action_wrapper_t *) gIter->data; if (after->action == rh_action && (after->type & order)) { return FALSE; } } wrapper = calloc(1, sizeof(pe_action_wrapper_t)); wrapper->action = rh_action; wrapper->type = order; list = lh_action->actions_after; list = g_list_prepend(list, wrapper); lh_action->actions_after = list; wrapper = calloc(1, sizeof(pe_action_wrapper_t)); wrapper->action = lh_action; wrapper->type = order; list = rh_action->actions_before; list = g_list_prepend(list, wrapper); rh_action->actions_before = list; return TRUE; } pe_action_t * get_pseudo_op(const char *name, pe_working_set_t * data_set) { pe_action_t *op = NULL; if(data_set->singletons) { op = g_hash_table_lookup(data_set->singletons, name); } if (op == NULL) { op = custom_action(NULL, strdup(name), name, NULL, TRUE, TRUE, data_set); pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable); } return op; } void destroy_ticket(gpointer data) { pe_ticket_t *ticket = data; if (ticket->state) { g_hash_table_destroy(ticket->state); } free(ticket->id); free(ticket); } pe_ticket_t * ticket_new(const char *ticket_id, pe_working_set_t * data_set) { pe_ticket_t *ticket = NULL; if (pcmk__str_empty(ticket_id)) { return NULL; } if (data_set->tickets == NULL) { data_set->tickets = g_hash_table_new_full(crm_str_hash, g_str_equal, free, destroy_ticket); } ticket = g_hash_table_lookup(data_set->tickets, ticket_id); if (ticket == NULL) { ticket = calloc(1, sizeof(pe_ticket_t)); if (ticket == NULL) { crm_err("Cannot allocate ticket '%s'", ticket_id); return NULL; } crm_trace("Creaing ticket entry for %s", ticket_id); ticket->id = strdup(ticket_id); ticket->granted = FALSE; ticket->last_granted = -1; ticket->standby = FALSE; ticket->state = crm_str_table_new(); g_hash_table_insert(data_set->tickets, strdup(ticket->id), ticket); } return ticket; } const char *rsc_printable_id(pe_resource_t *rsc) { if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) { return ID(rsc->xml); } return rsc->id; } void pe__clear_resource_flags_recursive(pe_resource_t *rsc, uint64_t flags) { pe__clear_resource_flags(rsc, flags); for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) { pe__clear_resource_flags_recursive((pe_resource_t *) gIter->data, flags); } } void pe__clear_resource_flags_on_all(pe_working_set_t *data_set, uint64_t flag) { for (GList *lpc = data_set->resources; lpc != NULL; lpc = lpc->next) { pe_resource_t *r = (pe_resource_t *) lpc->data; pe__clear_resource_flags_recursive(r, flag); } } void pe__set_resource_flags_recursive(pe_resource_t *rsc, uint64_t flags) { pe__set_resource_flags(rsc, flags); for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) { pe__set_resource_flags_recursive((pe_resource_t *) gIter->data, flags); } } static GListPtr find_unfencing_devices(GListPtr candidates, GListPtr matches) { for (GListPtr gIter = candidates; gIter != NULL; gIter = gIter->next) { pe_resource_t *candidate = gIter->data; const char *provides = g_hash_table_lookup(candidate->meta, PCMK_STONITH_PROVIDES); const char *requires = g_hash_table_lookup(candidate->meta, XML_RSC_ATTR_REQUIRES); if(candidate->children) { matches = find_unfencing_devices(candidate->children, matches); } else if (!pcmk_is_set(candidate->flags, pe_rsc_fence_device)) { continue; } else if (pcmk__str_eq(provides, "unfencing", pcmk__str_casei) || pcmk__str_eq(requires, "unfencing", pcmk__str_casei)) { matches = g_list_prepend(matches, candidate); } } return matches; } static int node_priority_fencing_delay(pe_node_t * node, pe_working_set_t * data_set) { int member_count = 0; int online_count = 0; int top_priority = 0; int lowest_priority = 0; GListPtr gIter = NULL; // `priority-fencing-delay` is disabled if (data_set->priority_fencing_delay <= 0) { return 0; } /* No need to request a delay if the fencing target is not a normal cluster * member, for example if it's a remote node or a guest node. */ if (node->details->type != node_member) { return 0; } // No need to request a delay if the fencing target is in our partition if (node->details->online) { return 0; } for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { pe_node_t *n = gIter->data; if (n->details->type != node_member) { continue; } member_count ++; if (n->details->online) { online_count++; } if (member_count == 1 || n->details->priority > top_priority) { top_priority = n->details->priority; } if (member_count == 1 || n->details->priority < lowest_priority) { lowest_priority = n->details->priority; } } // No need to delay if we have more than half of the cluster members if (online_count > member_count / 2) { return 0; } /* All the nodes have equal priority. * Any configured corresponding `pcmk_delay_base/max` will be applied. */ if (lowest_priority == top_priority) { return 0; } if (node->details->priority < top_priority) { return 0; } return data_set->priority_fencing_delay; } pe_action_t * pe_fence_op(pe_node_t * node, const char *op, bool optional, const char *reason, bool priority_delay, pe_working_set_t * data_set) { char *op_key = NULL; pe_action_t *stonith_op = NULL; if(op == NULL) { op = data_set->stonith_action; } op_key = crm_strdup_printf("%s-%s-%s", CRM_OP_FENCE, node->details->uname, op); if(data_set->singletons) { stonith_op = g_hash_table_lookup(data_set->singletons, op_key); } if(stonith_op == NULL) { stonith_op = custom_action(NULL, op_key, CRM_OP_FENCE, node, TRUE, TRUE, data_set); add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET, node->details->uname); add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET_UUID, node->details->id); add_hash_param(stonith_op->meta, "stonith_action", op); if (pe__is_guest_or_remote_node(node) && pcmk_is_set(data_set->flags, pe_flag_enable_unfencing)) { /* Extra work to detect device changes on remotes * * We may do this for all nodes in the future, but for now * the check_action_definition() based stuff works fine. */ long max = 1024; long digests_all_offset = 0; long digests_secure_offset = 0; char *digests_all = calloc(max, sizeof(char)); char *digests_secure = calloc(max, sizeof(char)); GListPtr matches = find_unfencing_devices(data_set->resources, NULL); for (GListPtr gIter = matches; gIter != NULL; gIter = gIter->next) { pe_resource_t *match = gIter->data; const char *agent = g_hash_table_lookup(match->meta, XML_ATTR_TYPE); op_digest_cache_t *data = NULL; data = pe__compare_fencing_digest(match, agent, node, data_set); if(data->rc == RSC_DIGEST_ALL) { optional = FALSE; crm_notice("Unfencing %s (remote): because the definition of %s changed", node->details->uname, match->id); if (pcmk_is_set(data_set->flags, pe_flag_stdout)) { fprintf(stdout, " notice: Unfencing %s (remote): because the definition of %s changed\n", node->details->uname, match->id); } } digests_all_offset += snprintf( digests_all+digests_all_offset, max-digests_all_offset, "%s:%s:%s,", match->id, agent, data->digest_all_calc); digests_secure_offset += snprintf( digests_secure+digests_secure_offset, max-digests_secure_offset, "%s:%s:%s,", match->id, agent, data->digest_secure_calc); } g_hash_table_insert(stonith_op->meta, strdup(XML_OP_ATTR_DIGESTS_ALL), digests_all); g_hash_table_insert(stonith_op->meta, strdup(XML_OP_ATTR_DIGESTS_SECURE), digests_secure); } } else { free(op_key); } if (data_set->priority_fencing_delay > 0 /* It's a suitable case where `priority-fencing-delay` applies. * At least add `priority-fencing-delay` field as an indicator. */ && (priority_delay /* Re-calculate priority delay for the suitable case when * pe_fence_op() is called again by stage6() after node priority has * been actually calculated with native_add_running() */ || g_hash_table_lookup(stonith_op->meta, XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY) != NULL)) { /* Add `priority-fencing-delay` to the fencing op even if it's 0 for * the targeting node. So that it takes precedence over any possible * `pcmk_delay_base/max`. */ char *delay_s = crm_itoa(node_priority_fencing_delay(node, data_set)); g_hash_table_insert(stonith_op->meta, strdup(XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY), delay_s); } if(optional == FALSE && pe_can_fence(data_set, node)) { pe_action_required(stonith_op, NULL, reason); } else if(reason && stonith_op->reason == NULL) { stonith_op->reason = strdup(reason); } return stonith_op; } void trigger_unfencing( pe_resource_t * rsc, pe_node_t *node, const char *reason, pe_action_t *dependency, pe_working_set_t * data_set) { if (!pcmk_is_set(data_set->flags, pe_flag_enable_unfencing)) { /* No resources require it */ return; } else if ((rsc != NULL) && !pcmk_is_set(rsc->flags, pe_rsc_fence_device)) { /* Wasn't a stonith device */ return; } else if(node && node->details->online && node->details->unclean == FALSE && node->details->shutdown == FALSE) { pe_action_t *unfence = pe_fence_op(node, "on", FALSE, reason, FALSE, data_set); if(dependency) { order_actions(unfence, dependency, pe_order_optional); } } else if(rsc) { GHashTableIter iter; g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { if(node->details->online && node->details->unclean == FALSE && node->details->shutdown == FALSE) { trigger_unfencing(rsc, node, reason, dependency, data_set); } } } } gboolean add_tag_ref(GHashTable * tags, const char * tag_name, const char * obj_ref) { pe_tag_t *tag = NULL; GListPtr gIter = NULL; gboolean is_existing = FALSE; CRM_CHECK(tags && tag_name && obj_ref, return FALSE); tag = g_hash_table_lookup(tags, tag_name); if (tag == NULL) { tag = calloc(1, sizeof(pe_tag_t)); if (tag == NULL) { return FALSE; } tag->id = strdup(tag_name); tag->refs = NULL; g_hash_table_insert(tags, strdup(tag_name), tag); } for (gIter = tag->refs; gIter != NULL; gIter = gIter->next) { const char *existing_ref = (const char *) gIter->data; if (pcmk__str_eq(existing_ref, obj_ref, pcmk__str_none)){ is_existing = TRUE; break; } } if (is_existing == FALSE) { tag->refs = g_list_append(tag->refs, strdup(obj_ref)); crm_trace("Added: tag=%s ref=%s", tag->id, obj_ref); } return TRUE; } void pe_action_set_flag_reason(const char *function, long line, pe_action_t *action, pe_action_t *reason, const char *text, enum pe_action_flags flags, bool overwrite) { bool unset = FALSE; bool update = FALSE; const char *change = NULL; if (pcmk_is_set(flags, pe_action_runnable)) { unset = TRUE; change = "unrunnable"; } else if (pcmk_is_set(flags, pe_action_optional)) { unset = TRUE; change = "required"; } else if (pcmk_is_set(flags, pe_action_migrate_runnable)) { unset = TRUE; overwrite = TRUE; change = "unrunnable"; } else if (pcmk_is_set(flags, pe_action_dangle)) { change = "dangling"; } else if (pcmk_is_set(flags, pe_action_requires_any)) { change = "required"; } else { crm_err("Unknown flag change to %x by %s: 0x%s", flags, action->uuid, (reason? reason->uuid : "0")); } if(unset) { if (pcmk_is_set(action->flags, flags)) { pe__clear_action_flags_as(function, line, action, flags); update = TRUE; } } else { if (!pcmk_is_set(action->flags, flags)) { pe__set_action_flags_as(function, line, action, flags); update = TRUE; } } if((change && update) || text) { char *reason_text = NULL; if(reason == NULL) { pe_action_set_reason(action, text, overwrite); } else if(reason->rsc == NULL) { reason_text = crm_strdup_printf("%s %s%c %s", change, reason->task, text?':':0, text?text:""); } else { reason_text = crm_strdup_printf("%s %s %s%c %s", change, reason->rsc->id, reason->task, text?':':0, text?text:"NA"); } if(reason_text && action->rsc != reason->rsc) { pe_action_set_reason(action, reason_text, overwrite); } free(reason_text); } } void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite) { if (action->reason != NULL && overwrite) { pe_rsc_trace(action->rsc, "Changing %s reason from '%s' to '%s'", action->uuid, action->reason, crm_str(reason)); free(action->reason); } else if (action->reason == NULL) { pe_rsc_trace(action->rsc, "Set %s reason to '%s'", action->uuid, crm_str(reason)); } else { // crm_assert(action->reason != NULL && !overwrite); return; } if (reason != NULL) { action->reason = strdup(reason); } else { action->reason = NULL; } } /*! * \internal * \brief Check whether shutdown has been requested for a node * * \param[in] node Node to check * * \return TRUE if node has shutdown attribute set and nonzero, FALSE otherwise * \note This differs from simply using node->details->shutdown in that it can * be used before that has been determined (and in fact to determine it), * and it can also be used to distinguish requested shutdown from implicit * shutdown of remote nodes by virtue of their connection stopping. */ bool pe__shutdown_requested(pe_node_t *node) { const char *shutdown = pe_node_attribute_raw(node, XML_CIB_ATTR_SHUTDOWN); return !pcmk__str_eq(shutdown, "0", pcmk__str_null_matches); } /*! * \internal * \brief Update a data set's "recheck by" time * * \param[in] recheck Epoch time when recheck should happen * \param[in,out] data_set Current working set */ void pe__update_recheck_time(time_t recheck, pe_working_set_t *data_set) { if ((recheck > get_effective_time(data_set)) && ((data_set->recheck_by == 0) || (data_set->recheck_by > recheck))) { data_set->recheck_by = recheck; } } /*! * \internal * \brief Wrapper for pe_unpack_nvpairs() using a cluster working set */ void pe__unpack_dataset_nvpairs(xmlNode *xml_obj, const char *set_name, pe_rule_eval_data_t *rule_data, GHashTable *hash, const char *always_first, gboolean overwrite, pe_working_set_t *data_set) { crm_time_t *next_change = crm_time_new_undefined(); pe_eval_nvpairs(data_set->input, xml_obj, set_name, rule_data, hash, always_first, overwrite, next_change); if (crm_time_is_defined(next_change)) { time_t recheck = (time_t) crm_time_get_seconds_since_epoch(next_change); pe__update_recheck_time(recheck, data_set); } crm_time_free(next_change); } bool pe__resource_is_disabled(pe_resource_t *rsc) { const char *target_role = NULL; CRM_CHECK(rsc != NULL, return false); target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE); if (target_role) { enum rsc_role_e target_role_e = text2role(target_role); if ((target_role_e == RSC_ROLE_STOPPED) || ((target_role_e == RSC_ROLE_SLAVE) && pcmk_is_set(uber_parent(rsc)->flags, pe_rsc_promotable))) { return true; } } return false; } /*! * \internal * \brief Create an action to clear a resource's history from CIB * * \param[in] rsc Resource to clear * \param[in] node Node to clear history on * * \return New action to clear resource history */ pe_action_t * pe__clear_resource_history(pe_resource_t *rsc, pe_node_t *node, pe_working_set_t *data_set) { char *key = NULL; CRM_ASSERT(rsc && node); key = pcmk__op_key(rsc->id, CRM_OP_LRM_DELETE, 0); return custom_action(rsc, key, CRM_OP_LRM_DELETE, node, FALSE, TRUE, data_set); } bool pe__rsc_running_on_any_node_in_list(pe_resource_t *rsc, GListPtr node_list) { for (GListPtr ele = rsc->running_on; ele; ele = ele->next) { pe_node_t *node = (pe_node_t *) ele->data; if (pcmk__str_in_list(node_list, node->details->uname)) { return true; } } return false; } bool pcmk__rsc_filtered_by_node(pe_resource_t *rsc, GListPtr only_node) { return (rsc->fns->active(rsc, FALSE) && !pe__rsc_running_on_any_node_in_list(rsc, only_node)); } GListPtr pe__filter_rsc_list(GListPtr rscs, GListPtr filter) { GListPtr retval = NULL; for (GListPtr gIter = rscs; gIter; gIter = gIter->next) { pe_resource_t *rsc = (pe_resource_t *) gIter->data; /* I think the second condition is safe here for all callers of this * function. If not, it needs to move into pe__node_text. */ if (pcmk__str_in_list(filter, rsc_printable_id(rsc)) || (rsc->parent && pcmk__str_in_list(filter, rsc_printable_id(rsc->parent)))) { retval = g_list_prepend(retval, rsc); } } return retval; }