diff --git a/cts/cts.in b/cts/cts.in index df0e5de2fa..8b5074991f 100755 --- a/cts/cts.in +++ b/cts/cts.in @@ -1,331 +1,328 @@ #!@BASH_PATH@ # # Copyright 2012-2018 Andrew Beekhof <andrew@beekhof.net> # # This source code is licensed under the GNU General Public License version 2 # or later (GPLv2+) WITHOUT ANY WARRANTY. # if [ -e $PWD/cts/CTSlab.py ]; then cts_root=$PWD/cts elif [ -e $PWD/CTSlab.py ]; then cts_root=$PWD else cts_root=`dirname $0` fi logfile=0 summary=0 verbose=0 watch=0 saved=0 tests="" install=0 clean=0 build=0 kill=0 run=0 boot=0 setup=0 target=rhel-7 cmd="" trace="" custom_log="" patterns="-e CTS:" helpmsg=$(cat <<EOF Usage: %s [options] {[{init|local-init|setup} [TARGET]] | [OTHER-CMDS]} [--]help, -h show help screen and exit -x turn on debugging -a show relevant screen sessions and exit -c,-g CLUSTER_NAME set the cluster name -S show summary from the last CTS run -s show summary for the current log (see -l) -v increase verbosity -p (currently unused) -e PATTERN grep pattern to apply when 'summary' or 'watch' requested -l print the filename of the log that would be operated on -w continous (filtered) monitoring of the log file -f,-sf FILE show summary for the provided log -t TEST, [0-9]* add a test to the working set [--]build [???] request building Pacemaker [--]kill request termination of cluster software [--]run request CTS run (passing remaining arguments through) [--]boot, start request CTS run (with --boot option) [--]clean request cleaning up after CTS run [--]install, --inst request installing packages to get ready to run CTS [--]setup request initialization to get ready to run CTS trace-ls, tls list traced functions trace-add, tadd FUNC add a function to the list of traced ones trace-rm, trm FUNC remove a function from the list of traced ones trace-set, tset FUNC set function(s) as the only to be traced (f|fedora|r|rhel).* specify target distro init, local-init [local] initialize CTS environment --wget [local] download up-to-date CTS helpers -- delimits tests that follow EOF ) +# Modify an uninstalled source checkout to allow running CTS there +local_init() { + local_root="$(dirname "$cts_root")" + if [ ! -r "$cts_root/CTSvars.py.in" ]; then + echo "$local_root does not appear to be a source code directory" + exit 1 + elif [ ! -r "$cts_root/CTSvars.py" ]; then + echo "You must run configure and make first" + exit 1 + fi + + # Update CTS_home and Fencing_Home in CTSvars.py + sed -e "s:@datadir@/@PACKAGE@/tests/cts:$local_root/cts:" \ + "$cts_root/CTSvars.py" > "$cts_root/CTSvars.py.$$" + mv -- "$cts_root/CTSvars.py.$$" "$cts_root/CTSvars.py" + + files="extra/cluster-init extra/cluster-helper extra/cluster-clean" + for f in $files; do + cp "$local_root/$f" "$cts_root/" + done + + # Update report_data in a local copy of crm_report + sed -e "s:@datadir@/@PACKAGE@:$local_root/tools:" \ + "$local_root/tools/crm_report" > "$cts_root/crm_report" + chmod +x "$cts_root/crm_report" + + # Install the necessary helpers to system locations (must be done as root) + "$cts_root/cts-support" install + + echo "Make sure you add $cts_root to your PATH and set a value for \$cluster_name in .bashrc" + exit 0 +} + while true; do case $1 in -h|--help|help) printf "${helpmsg}\n" "$0"; exit;; -x) set -x; shift;; -a) screen -ls | grep cts exit 0;; -c|-g) cluster_name=$2; shift; shift;; -S) summary=1; saved=1; shift;; -s) summary=1; shift;; -v) verbose=`expr $verbose + 1`; shift;; -p) shift;; -e) patterns="$patterns -e `echo $2 | sed 's/ /\\\W/g'`"; shift; shift;; -l) logfile=1; shift;; -w) watch=1; shift;; -f|-sf) summary=1; custom_log=$2; shift; shift;; -t) tests="$tests $2"; shift; shift;; [0-9]*) tests="$tests $1"; shift;; --build|build) build=1; shift;; --kill|kill) kill=1; shift; break;; --run|run) run=1; shift; break;; --boot|boot|start) boot=1; clean=1; shift; break;; --clean|clean) clean=1; shift;; --inst|--install|install) install=1; clean=1; shift;; --setup|setup) setup=1; shift;; trace-ls|tls) cmd=$1; shift;; trace-add|tadd|trace-rm|trm|trace-set|tset) cmd=$1; trace=$2; shift; shift;; f*|fedora*) target="fedora-`echo $1 | sed -e s/fedora// -e s/-// -e s/f//`" shift;; r|rhel) target="rhel-7"; shift;; r*|rhel*) target="rhel-`echo $1 | sed -e s/rhel// -e s/-// -e s/r//`" shift;; - init|local-init) - local_root= - case $cts_root in - /*) local_root=`dirname $cts_root`;; - *) local_root=`dirname $PWD/$cts_root`;; - esac - - cat << EOF > $cts_root/CTSvars.py -class CTSvars: - CTS_home="$local_root/cts" - Fencing_home="$local_root/daemons/fenced" - CRM_CONFIG_DIR="/var/lib/pacemaker/cib" - CRM_DAEMON_USER="hacluster" - CRM_DAEMON_DIR="/usr/libexec/pacemaker" - OCF_ROOT_DIR="/usr/lib/ocf" -EOF - - files="extra/cluster-init extra/cluster-helper extra/cluster-clean tools/crm_report.in" - for f in $files; do - cp $local_root/$f $cts_root/ - done - - cp $local_root/tools/report.common.in $local_root/tools/report.common - sed -i.sed s:@localstatedir@:/var: $local_root/tools/report.common - - cp $cts_root/crm_report.in $cts_root/crm_report - sed -i.sed s:@datadir@/@PACKAGE@:$local_root/tools: $cts_root/crm_report - chmod +x $cts_root/crm_report - - cp $cts_root/LSBDummy.in $cts_root/LSBDummy - chmod +x $local_root/daemons/fenced/fence_* - sed -i.sed s:@OCF_ROOT_DIR@:/usr/lib/ocf: $cts_root/LSBDummy - - echo "Make sure you add $cts_root to your PATH and set a value for \$cluster_name in .bashrc" - exit 0 - ;; - + init|local-init) local_init ;; --wget) files="cluster-helper cluster-init cluster-clean" for f in $files; do rm -f $cts_root/$f echo "Downloading helper script $f from GitHub" wget -O $cts_root/$f https://raw.github.com/ClusterLabs/pacemaker/master/extra/$f chmod +x $cts_root/$f done shift ;; --) shift; tests="$tests $*"; break;; "") break;; *) echo "Unknown argument: $1"; exit 1;; esac done # Add the location of this script export PATH="$PATH:$cts_root" which cluster-helper &>/dev/null if [ $? != 0 ]; then echo $0 needs the cluster-helper script to be in your path echo You can obtain it from: https://raw.github.com/ClusterLabs/pacemaker/master/extra/cluster-helper exit 1 fi which cluster-clean &>/dev/null if [ $? != 0 ]; then echo $0 needs the cluster-clean script to be in your path echo You can obtain it from: https://raw.github.com/ClusterLabs/pacemaker/master/extra/cluster-clean exit 1 fi if [ "x$cluster_name" = x -o "x$cluster_name" = xpick ]; then clusters=`ls -1 ~/.dsh/group/[a-z]+[0-9] | sed s/.*group.// | tr '\n' ' ' ` echo "custom) interactively define a cluster" for i in $clusters; do echo "$i) `cluster-helper --list short -g $i`" done read -p "Choose a cluster [custom]: " cluster_name echo fi if [ -z $cluster_name ]; then cluster_name=custom fi case $cluster_name in custom) read -p "Cluster name: " cluster_name read -p "Cluster hosts: " cluster_hosts read -p "Cluster log file: " cluster_log cluster-helper add -g "$cluster_name" -w "$cluster_hosts" ;; *) cluster_hosts=`cluster-helper --list short -g $cluster_name` cluster_log=~/cluster-$cluster_name.log; ;; esac if [ x$cmd != x ]; then config=/etc/sysconfig/pacemaker case $cmd in trace-ls|tls) cluster-helper -g $cluster_name -- grep PCMK_trace_functions $config ;; trace-add|tadd) echo "Adding $trace to PCMK_trace_functions" cluster-helper -g $cluster_name -- sed -i "s/.*PCMK_trace_functions=/PCMK_trace_functions=$trace,/" $config ;; trace-rm|trm) echo "Removing $trace from PCMK_trace_functions" cluster-helper -g $cluster_name -- sed -i "s/.*PCMK_trace_functions=\\\\\\(.*\\\\\\)$trace,\\\\\\(.*\\\\\\)/PCMK_trace_functions=\\\\\\1\\\\\\2/" $config ;; trace-set|tset) echo "Setting PCMK_trace_functions to '$trace'" cluster-helper -g $cluster_name -- sed -i "s/.*PCMK_trace_functions.*/PCMK_trace_functions=$trace/" $config ;; esac exit 0 fi if [ $build = 1 -a $run = 1 ]; then install=1 clean=1 fi if [ $build = 1 ]; then which build-pcmk if [ $? != 0 ]; then echo "You'll need to write/obtain build-pcmk in order to build pacemaker from here. Skipping" else build-pcmk r7 rc=$? if [ $rc != 0 ]; then echo "Build failed: $rc" exit $rc fi fi fi if [ $clean = 1 ]; then rm -f $cluster_log; cluster-clean -g $cluster_name --kill elif [ $kill = 1 ]; then cluster-clean -g $cluster_name --kill-only exit 0 fi if [ $install = 1 ]; then cluster-helper -g $cluster_name -- yum install -y pacemaker pacemaker-debuginfo pacemaker-cts libqb libqb-debuginfo fi if [ $setup = 1 ]; then cluster-init -g $cluster_name $target -u --test exit 0 elif [ $boot = 1 ]; then $cts_root/CTSlab.py -r -c -g $cluster_name --boot rc=$? if [ $rc = 0 ]; then echo "The cluster is ready..." fi exit $rc elif [ $run = 1 ]; then $cts_root/CTSlab.py -r -c -g $cluster_name 500 "$@" exit $? elif [ $clean = 1 ]; then exit 0 fi screen -ls | grep cts-$cluster_name &>/dev/null active=$? if [ ! -z $custom_log ]; then cluster_log=$custom_log fi if [ "x$tests" != x -a "x$tests" != "x " ]; then for t in $tests; do echo "crm_report --cts-log $cluster_log -d -T $t" crm_report --cts-log $cluster_log -d -T $t done elif [ $logfile = 1 ]; then echo $cluster_log elif [ $summary = 1 ]; then files=$cluster_log if [ $saved = 1 ]; then files=`ls -1tr ~/CTS-*/cluster-log.txt` fi for f in $files; do echo $f case $verbose in 0) cat -n $f | grep $patterns | grep -v "CTS: debug:" ;; 1) cat -n $f | grep $patterns | grep -v "CTS:.* cmd:" ;; *) cat -n $f | grep $patterns ;; esac echo "" done elif [ $watch = 1 ]; then case $verbose in 0) tail -F $cluster_log | grep $patterns | grep -v "CTS: debug:" ;; 1) tail -F $cluster_log | grep $patterns | grep -v "CTS:.* cmd:" ;; *) tail -F $cluster_log | grep $patterns ;; esac elif [ $active = 0 ]; then screen -x cts-$cluster_name else touch $cluster_log # . ~/.bashrc export cluster_name cluster_hosts cluster_log screen -S cts-$cluster_name bash fi diff --git a/cts/scheduler/order-expired-failure.stderr b/cts/scheduler/order-expired-failure.stderr deleted file mode 100644 index a9e7a8967a..0000000000 --- a/cts/scheduler/order-expired-failure.stderr +++ /dev/null @@ -1 +0,0 @@ -Resources-operation: moving requires under meta_attributes as requires unless already defined there for matching start|promote diff --git a/cts/scheduler/order-expired-failure.summary b/cts/scheduler/order-expired-failure.summary index 07c5af53a0..d1edada584 100644 --- a/cts/scheduler/order-expired-failure.summary +++ b/cts/scheduler/order-expired-failure.summary @@ -1,131 +1,130 @@ -Resources-operation: moving requires under meta_attributes as requires unless already defined there for matching start|promote Using the original execution date of: 2018-04-09 07:55:35Z Current cluster status: RemoteNode overcloud-novacompute-1: UNCLEAN (offline) Online: [ controller-0 controller-1 controller-2 ] RemoteOnline: [ overcloud-novacompute-0 ] Containers: [ galera-bundle-0:galera-bundle-docker-0 galera-bundle-1:galera-bundle-docker-1 galera-bundle-2:galera-bundle-docker-2 rabbitmq-bundle-0:rabbitmq-bundle-docker-0 rabbitmq-bundle-1:rabbitmq-bundle-docker-1 rabbitmq-bundle-2:rabbitmq-bundle-docker-2 redis-bundle-0:redis-bundle-docker-0 redis-bundle-1:redis-bundle-docker-1 redis-bundle-2:redis-bundle-docker-2 ] overcloud-novacompute-0 (ocf::pacemaker:remote): Started controller-0 overcloud-novacompute-1 (ocf::pacemaker:remote): FAILED controller-1 Docker container set: rabbitmq-bundle [192.168.24.1:8787/rhosp13/openstack-rabbitmq:pcmklatest] rabbitmq-bundle-0 (ocf::heartbeat:rabbitmq-cluster): Started controller-2 rabbitmq-bundle-1 (ocf::heartbeat:rabbitmq-cluster): Started controller-0 rabbitmq-bundle-2 (ocf::heartbeat:rabbitmq-cluster): Started controller-1 Docker container set: galera-bundle [192.168.24.1:8787/rhosp13/openstack-mariadb:pcmklatest] galera-bundle-0 (ocf::heartbeat:galera): Master controller-2 galera-bundle-1 (ocf::heartbeat:galera): Master controller-0 galera-bundle-2 (ocf::heartbeat:galera): Master controller-1 Docker container set: redis-bundle [192.168.24.1:8787/rhosp13/openstack-redis:pcmklatest] redis-bundle-0 (ocf::heartbeat:redis): Master controller-2 redis-bundle-1 (ocf::heartbeat:redis): Slave controller-0 redis-bundle-2 (ocf::heartbeat:redis): Slave controller-1 ip-192.168.24.11 (ocf::heartbeat:IPaddr2): Started controller-2 ip-10.0.0.110 (ocf::heartbeat:IPaddr2): Stopped ip-172.17.1.14 (ocf::heartbeat:IPaddr2): Started controller-1 ip-172.17.1.17 (ocf::heartbeat:IPaddr2): Started controller-2 ip-172.17.3.11 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.4.17 (ocf::heartbeat:IPaddr2): Started controller-1 Docker container set: haproxy-bundle [192.168.24.1:8787/rhosp13/openstack-haproxy:pcmklatest] haproxy-bundle-docker-0 (ocf::heartbeat:docker): Started controller-2 haproxy-bundle-docker-1 (ocf::heartbeat:docker): Started controller-0 haproxy-bundle-docker-2 (ocf::heartbeat:docker): Started controller-1 stonith-fence_compute-fence-nova (stonith:fence_compute): FAILED controller-2 Clone Set: compute-unfence-trigger-clone [compute-unfence-trigger] compute-unfence-trigger (ocf::pacemaker:Dummy): Started overcloud-novacompute-1 (UNCLEAN) Started: [ overcloud-novacompute-0 ] Stopped: [ controller-0 controller-1 controller-2 ] nova-evacuate (ocf::openstack:NovaEvacuate): Started controller-0 stonith-fence_ipmilan-5254008be2cc (stonith:fence_ipmilan): Started controller-1 stonith-fence_ipmilan-525400803f9e (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-525400fca120 (stonith:fence_ipmilan): Started controller-2 stonith-fence_ipmilan-525400953d48 (stonith:fence_ipmilan): Started controller-2 stonith-fence_ipmilan-525400b02b86 (stonith:fence_ipmilan): Started controller-1 Docker container: openstack-cinder-volume [192.168.24.1:8787/rhosp13/openstack-cinder-volume:pcmklatest] openstack-cinder-volume-docker-0 (ocf::heartbeat:docker): Started controller-0 Transition Summary: * Fence (reboot) overcloud-novacompute-1 'remote connection is unrecoverable' * Stop overcloud-novacompute-1 ( controller-1 ) due to node availability * Start ip-10.0.0.110 ( controller-1 ) * Recover stonith-fence_compute-fence-nova ( controller-2 ) * Stop compute-unfence-trigger:1 ( overcloud-novacompute-1 ) due to node availability Executing cluster transition: * Resource action: rabbitmq-bundle-0 monitor=30000 on controller-2 * Resource action: rabbitmq-bundle-0 cancel=60000 on controller-2 * Resource action: rabbitmq-bundle-1 monitor=30000 on controller-0 * Resource action: rabbitmq-bundle-1 cancel=60000 on controller-0 * Resource action: rabbitmq-bundle-2 monitor=30000 on controller-1 * Resource action: rabbitmq-bundle-2 cancel=60000 on controller-1 * Resource action: galera-bundle-0 monitor=30000 on controller-2 * Resource action: galera-bundle-0 cancel=60000 on controller-2 * Resource action: galera-bundle-1 monitor=30000 on controller-0 * Resource action: galera-bundle-1 cancel=60000 on controller-0 * Resource action: galera-bundle-2 monitor=30000 on controller-1 * Resource action: galera-bundle-2 cancel=60000 on controller-1 * Resource action: redis-bundle-0 monitor=30000 on controller-2 * Resource action: redis-bundle-0 cancel=60000 on controller-2 * Resource action: redis-bundle-1 monitor=30000 on controller-0 * Resource action: redis-bundle-1 cancel=60000 on controller-0 * Resource action: redis-bundle-2 monitor=30000 on controller-1 * Resource action: redis-bundle-2 cancel=60000 on controller-1 * Resource action: stonith-fence_compute-fence-nova stop on controller-2 * Fencing overcloud-novacompute-1 (reboot) * Cluster action: clear_failcount for overcloud-novacompute-1 on controller-1 * Pseudo action: compute-unfence-trigger-clone_stop_0 * Pseudo action: stonith_complete * Resource action: ip-10.0.0.110 start on controller-1 * Resource action: stonith-fence_compute-fence-nova start on controller-2 * Resource action: stonith-fence_compute-fence-nova monitor=60000 on controller-2 * Pseudo action: compute-unfence-trigger_stop_0 * Pseudo action: compute-unfence-trigger-clone_stopped_0 * Resource action: overcloud-novacompute-1 stop on controller-1 * Resource action: ip-10.0.0.110 monitor=10000 on controller-1 * Pseudo action: all_stopped Using the original execution date of: 2018-04-09 07:55:35Z Revised cluster status: RemoteNode overcloud-novacompute-1: UNCLEAN (offline) Online: [ controller-0 controller-1 controller-2 ] RemoteOnline: [ overcloud-novacompute-0 ] Containers: [ galera-bundle-0:galera-bundle-docker-0 galera-bundle-1:galera-bundle-docker-1 galera-bundle-2:galera-bundle-docker-2 rabbitmq-bundle-0:rabbitmq-bundle-docker-0 rabbitmq-bundle-1:rabbitmq-bundle-docker-1 rabbitmq-bundle-2:rabbitmq-bundle-docker-2 redis-bundle-0:redis-bundle-docker-0 redis-bundle-1:redis-bundle-docker-1 redis-bundle-2:redis-bundle-docker-2 ] overcloud-novacompute-0 (ocf::pacemaker:remote): Started controller-0 overcloud-novacompute-1 (ocf::pacemaker:remote): FAILED Docker container set: rabbitmq-bundle [192.168.24.1:8787/rhosp13/openstack-rabbitmq:pcmklatest] rabbitmq-bundle-0 (ocf::heartbeat:rabbitmq-cluster): Started controller-2 rabbitmq-bundle-1 (ocf::heartbeat:rabbitmq-cluster): Started controller-0 rabbitmq-bundle-2 (ocf::heartbeat:rabbitmq-cluster): Started controller-1 Docker container set: galera-bundle [192.168.24.1:8787/rhosp13/openstack-mariadb:pcmklatest] galera-bundle-0 (ocf::heartbeat:galera): Master controller-2 galera-bundle-1 (ocf::heartbeat:galera): Master controller-0 galera-bundle-2 (ocf::heartbeat:galera): Master controller-1 Docker container set: redis-bundle [192.168.24.1:8787/rhosp13/openstack-redis:pcmklatest] redis-bundle-0 (ocf::heartbeat:redis): Master controller-2 redis-bundle-1 (ocf::heartbeat:redis): Slave controller-0 redis-bundle-2 (ocf::heartbeat:redis): Slave controller-1 ip-192.168.24.11 (ocf::heartbeat:IPaddr2): Started controller-2 ip-10.0.0.110 (ocf::heartbeat:IPaddr2): Started controller-1 ip-172.17.1.14 (ocf::heartbeat:IPaddr2): Started controller-1 ip-172.17.1.17 (ocf::heartbeat:IPaddr2): Started controller-2 ip-172.17.3.11 (ocf::heartbeat:IPaddr2): Started controller-0 ip-172.17.4.17 (ocf::heartbeat:IPaddr2): Started controller-1 Docker container set: haproxy-bundle [192.168.24.1:8787/rhosp13/openstack-haproxy:pcmklatest] haproxy-bundle-docker-0 (ocf::heartbeat:docker): Started controller-2 haproxy-bundle-docker-1 (ocf::heartbeat:docker): Started controller-0 haproxy-bundle-docker-2 (ocf::heartbeat:docker): Started controller-1 stonith-fence_compute-fence-nova (stonith:fence_compute): Started controller-2 Clone Set: compute-unfence-trigger-clone [compute-unfence-trigger] Started: [ overcloud-novacompute-0 ] Stopped: [ controller-0 controller-1 controller-2 overcloud-novacompute-1 ] nova-evacuate (ocf::openstack:NovaEvacuate): Started controller-0 stonith-fence_ipmilan-5254008be2cc (stonith:fence_ipmilan): Started controller-1 stonith-fence_ipmilan-525400803f9e (stonith:fence_ipmilan): Started controller-0 stonith-fence_ipmilan-525400fca120 (stonith:fence_ipmilan): Started controller-2 stonith-fence_ipmilan-525400953d48 (stonith:fence_ipmilan): Started controller-2 stonith-fence_ipmilan-525400b02b86 (stonith:fence_ipmilan): Started controller-1 Docker container: openstack-cinder-volume [192.168.24.1:8787/rhosp13/openstack-cinder-volume:pcmklatest] openstack-cinder-volume-docker-0 (ocf::heartbeat:docker): Started controller-0 diff --git a/cts/scheduler/order-expired-failure.xml b/cts/scheduler/order-expired-failure.xml index bce8a14105..9e8426ed90 100644 --- a/cts/scheduler/order-expired-failure.xml +++ b/cts/scheduler/order-expired-failure.xml @@ -1,1191 +1,1194 @@ <cib crm_feature_set="3.0.14" validate-with="pacemaker-2.10" epoch="104" num_updates="12" admin_epoch="0" cib-last-written="Mon Apr 9 06:28:41 2018" update-origin="controller-0" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="2" execution-date="1523260535"> <configuration> <crm_config> <cluster_property_set id="cib-bootstrap-options"> <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/> <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="1.1.18-11.el7-2b07d5c5a9"/> <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/> <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="tripleo_cluster"/> <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/> <nvpair id="cib-bootstrap-options-cluster-recheck-interval" name="cluster-recheck-interval" value="60s"/> </cluster_property_set> <cluster_property_set id="redis_replication"> <nvpair id="redis_replication-redis_REPL_INFO" name="redis_REPL_INFO" value="controller-2"/> </cluster_property_set> </crm_config> <nodes> <node id="1" uname="controller-0"> <instance_attributes id="nodes-1"> <nvpair id="nodes-1-rabbitmq-role" name="rabbitmq-role" value="true"/> <nvpair id="nodes-1-rmq-node-attr-last-known-rabbitmq" name="rmq-node-attr-last-known-rabbitmq" value="rabbit@controller-0"/> <nvpair id="nodes-1-galera-role" name="galera-role" value="true"/> <nvpair id="nodes-1-redis-role" name="redis-role" value="true"/> <nvpair id="nodes-1-haproxy-role" name="haproxy-role" value="true"/> <nvpair id="nodes-1-cinder-volume-role" name="cinder-volume-role" value="true"/> </instance_attributes> </node> <node id="2" uname="controller-1"> <instance_attributes id="nodes-2"> <nvpair id="nodes-2-rabbitmq-role" name="rabbitmq-role" value="true"/> <nvpair id="nodes-2-rmq-node-attr-last-known-rabbitmq" name="rmq-node-attr-last-known-rabbitmq" value="rabbit@controller-1"/> <nvpair id="nodes-2-galera-role" name="galera-role" value="true"/> <nvpair id="nodes-2-redis-role" name="redis-role" value="true"/> <nvpair id="nodes-2-haproxy-role" name="haproxy-role" value="true"/> <nvpair id="nodes-2-cinder-volume-role" name="cinder-volume-role" value="true"/> </instance_attributes> </node> <node id="3" uname="controller-2"> <instance_attributes id="nodes-3"> <nvpair id="nodes-3-rabbitmq-role" name="rabbitmq-role" value="true"/> <nvpair id="nodes-3-rmq-node-attr-last-known-rabbitmq" name="rmq-node-attr-last-known-rabbitmq" value="rabbit@controller-2"/> <nvpair id="nodes-3-galera-role" name="galera-role" value="true"/> <nvpair id="nodes-3-redis-role" name="redis-role" value="true"/> <nvpair id="nodes-3-haproxy-role" name="haproxy-role" value="true"/> <nvpair id="nodes-3-cinder-volume-role" name="cinder-volume-role" value="true"/> </instance_attributes> </node> <node id="overcloud-novacompute-0" type="remote" uname="overcloud-novacompute-0"> <instance_attributes id="nodes-overcloud-novacompute-0"> <nvpair id="nodes-overcloud-novacompute-0-compute-instanceha-role" name="compute-instanceha-role" value="true"/> </instance_attributes> </node> <node id="overcloud-novacompute-1" type="remote" uname="overcloud-novacompute-1"> <instance_attributes id="nodes-overcloud-novacompute-1"> <nvpair id="nodes-overcloud-novacompute-1-compute-instanceha-role" name="compute-instanceha-role" value="true"/> </instance_attributes> </node> </nodes> <resources> <primitive class="ocf" id="overcloud-novacompute-0" provider="pacemaker" type="remote"> <instance_attributes id="overcloud-novacompute-0-instance_attributes"> <nvpair id="overcloud-novacompute-0-instance_attributes-reconnect_interval" name="reconnect_interval" value="60"/> </instance_attributes> <operations> <op id="overcloud-novacompute-0-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="60"/> <op id="overcloud-novacompute-0-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="60"/> <op id="overcloud-novacompute-0-monitor-interval-20" interval="20" name="monitor"/> <op id="overcloud-novacompute-0-reload-interval-0s" interval="0s" name="reload" timeout="60"/> <op id="overcloud-novacompute-0-start-interval-0s" interval="0s" name="start" timeout="60"/> <op id="overcloud-novacompute-0-stop-interval-0s" interval="0s" name="stop" timeout="60"/> </operations> </primitive> <primitive class="ocf" id="overcloud-novacompute-1" provider="pacemaker" type="remote"> <instance_attributes id="overcloud-novacompute-1-instance_attributes"> <nvpair id="overcloud-novacompute-1-instance_attributes-reconnect_interval" name="reconnect_interval" value="60"/> </instance_attributes> <operations> <op id="overcloud-novacompute-1-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="60"/> <op id="overcloud-novacompute-1-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="60"/> <op id="overcloud-novacompute-1-monitor-interval-20" interval="20" name="monitor"/> <op id="overcloud-novacompute-1-reload-interval-0s" interval="0s" name="reload" timeout="60"/> <op id="overcloud-novacompute-1-start-interval-0s" interval="0s" name="start" timeout="60"/> <op id="overcloud-novacompute-1-stop-interval-0s" interval="0s" name="stop" timeout="60"/> </operations> </primitive> <bundle id="rabbitmq-bundle"> <docker image="192.168.24.1:8787/rhosp13/openstack-rabbitmq:pcmklatest" network="host" options="--user=root --log-driver=journald -e KOLLA_CONFIG_STRATEGY=COPY_ALWAYS" replicas="3" run-command="/bin/bash /usr/local/bin/kolla_start"/> <network control-port="3122"/> <storage> <storage-mapping id="rabbitmq-cfg-files" options="ro" source-dir="/var/lib/kolla/config_files/rabbitmq.json" target-dir="/var/lib/kolla/config_files/config.json"/> <storage-mapping id="rabbitmq-cfg-data" options="ro" source-dir="/var/lib/config-data/puppet-generated/rabbitmq/" target-dir="/var/lib/kolla/config_files/src"/> <storage-mapping id="rabbitmq-hosts" options="ro" source-dir="/etc/hosts" target-dir="/etc/hosts"/> <storage-mapping id="rabbitmq-localtime" options="ro" source-dir="/etc/localtime" target-dir="/etc/localtime"/> <storage-mapping id="rabbitmq-lib" options="rw" source-dir="/var/lib/rabbitmq" target-dir="/var/lib/rabbitmq"/> <storage-mapping id="rabbitmq-pki-extracted" options="ro" source-dir="/etc/pki/ca-trust/extracted" target-dir="/etc/pki/ca-trust/extracted"/> <storage-mapping id="rabbitmq-pki-ca-bundle-crt" options="ro" source-dir="/etc/pki/tls/certs/ca-bundle.crt" target-dir="/etc/pki/tls/certs/ca-bundle.crt"/> <storage-mapping id="rabbitmq-pki-ca-bundle-trust-crt" options="ro" source-dir="/etc/pki/tls/certs/ca-bundle.trust.crt" target-dir="/etc/pki/tls/certs/ca-bundle.trust.crt"/> <storage-mapping id="rabbitmq-pki-cert" options="ro" source-dir="/etc/pki/tls/cert.pem" target-dir="/etc/pki/tls/cert.pem"/> <storage-mapping id="rabbitmq-dev-log" options="rw" source-dir="/dev/log" target-dir="/dev/log"/> </storage> <primitive class="ocf" id="rabbitmq" provider="heartbeat" type="rabbitmq-cluster"> <instance_attributes id="rabbitmq-instance_attributes"> <nvpair id="rabbitmq-instance_attributes-set_policy" name="set_policy" value="ha-all ^(?!amq\.).* {"ha-mode":"all"}"/> </instance_attributes> <meta_attributes id="rabbitmq-meta_attributes"> <nvpair id="rabbitmq-meta_attributes-container-attribute-target" name="container-attribute-target" value="host"/> <nvpair id="rabbitmq-meta_attributes-notify" name="notify" value="true"/> </meta_attributes> <operations> <op id="rabbitmq-monitor-interval-10" interval="10" name="monitor" timeout="40"/> <op id="rabbitmq-start-interval-0s" interval="0s" name="start" timeout="200s"/> <op id="rabbitmq-stop-interval-0s" interval="0s" name="stop" timeout="200s"/> </operations> </primitive> </bundle> <bundle id="galera-bundle"> <docker image="192.168.24.1:8787/rhosp13/openstack-mariadb:pcmklatest" masters="3" network="host" options="--user=root --log-driver=journald -e KOLLA_CONFIG_STRATEGY=COPY_ALWAYS" replicas="3" run-command="/bin/bash /usr/local/bin/kolla_start"/> <network control-port="3123"/> <storage> <storage-mapping id="mysql-cfg-files" options="ro" source-dir="/var/lib/kolla/config_files/mysql.json" target-dir="/var/lib/kolla/config_files/config.json"/> <storage-mapping id="mysql-cfg-data" options="ro" source-dir="/var/lib/config-data/puppet-generated/mysql/" target-dir="/var/lib/kolla/config_files/src"/> <storage-mapping id="mysql-hosts" options="ro" source-dir="/etc/hosts" target-dir="/etc/hosts"/> <storage-mapping id="mysql-localtime" options="ro" source-dir="/etc/localtime" target-dir="/etc/localtime"/> <storage-mapping id="mysql-lib" options="rw" source-dir="/var/lib/mysql" target-dir="/var/lib/mysql"/> <storage-mapping id="mysql-log-mariadb" options="rw" source-dir="/var/log/mariadb" target-dir="/var/log/mariadb"/> <storage-mapping id="mysql-dev-log" options="rw" source-dir="/dev/log" target-dir="/dev/log"/> </storage> <primitive class="ocf" id="galera" provider="heartbeat" type="galera"> <instance_attributes id="galera-instance_attributes"> <nvpair id="galera-instance_attributes-additional_parameters" name="additional_parameters" value="--open-files-limit=16384"/> <nvpair id="galera-instance_attributes-cluster_host_map" name="cluster_host_map" value="controller-0:controller-0.internalapi.localdomain;controller-1:controller-1.internalapi.localdomain;controller-2:controller-2.internalapi.localdomain"/> <nvpair id="galera-instance_attributes-enable_creation" name="enable_creation" value="true"/> <nvpair id="galera-instance_attributes-wsrep_cluster_address" name="wsrep_cluster_address" value="gcomm://controller-0.internalapi.localdomain,controller-1.internalapi.localdomain,controller-2.internalapi.localdomain"/> </instance_attributes> <meta_attributes id="galera-meta_attributes"> <nvpair id="galera-meta_attributes-container-attribute-target" name="container-attribute-target" value="host"/> <nvpair id="galera-meta_attributes-master-max" name="master-max" value="3"/> <nvpair id="galera-meta_attributes-ordered" name="ordered" value="true"/> </meta_attributes> <operations> <op id="galera-demote-interval-0s" interval="0s" name="demote" timeout="120"/> <op id="galera-monitor-interval-20" interval="20" name="monitor" timeout="30"/> <op id="galera-monitor-interval-10" interval="10" name="monitor" role="Master" timeout="30"/> <op id="galera-monitor-interval-30" interval="30" name="monitor" role="Slave" timeout="30"/> <op id="galera-promote-interval-0s" interval="0s" name="promote" on-fail="block" timeout="300s"/> <op id="galera-start-interval-0s" interval="0s" name="start" timeout="120"/> <op id="galera-stop-interval-0s" interval="0s" name="stop" timeout="120"/> </operations> </primitive> </bundle> <bundle id="redis-bundle"> <docker image="192.168.24.1:8787/rhosp13/openstack-redis:pcmklatest" masters="1" network="host" options="--user=root --log-driver=journald -e KOLLA_CONFIG_STRATEGY=COPY_ALWAYS" replicas="3" run-command="/bin/bash /usr/local/bin/kolla_start"/> <network control-port="3124"/> <storage> <storage-mapping id="redis-cfg-files" options="ro" source-dir="/var/lib/kolla/config_files/redis.json" target-dir="/var/lib/kolla/config_files/config.json"/> <storage-mapping id="redis-cfg-data-redis" options="ro" source-dir="/var/lib/config-data/puppet-generated/redis/" target-dir="/var/lib/kolla/config_files/src"/> <storage-mapping id="redis-hosts" options="ro" source-dir="/etc/hosts" target-dir="/etc/hosts"/> <storage-mapping id="redis-localtime" options="ro" source-dir="/etc/localtime" target-dir="/etc/localtime"/> <storage-mapping id="redis-lib" options="rw" source-dir="/var/lib/redis" target-dir="/var/lib/redis"/> <storage-mapping id="redis-log" options="rw" source-dir="/var/log/redis" target-dir="/var/log/redis"/> <storage-mapping id="redis-run" options="rw" source-dir="/var/run/redis" target-dir="/var/run/redis"/> <storage-mapping id="redis-pki-extracted" options="ro" source-dir="/etc/pki/ca-trust/extracted" target-dir="/etc/pki/ca-trust/extracted"/> <storage-mapping id="redis-pki-ca-bundle-crt" options="ro" source-dir="/etc/pki/tls/certs/ca-bundle.crt" target-dir="/etc/pki/tls/certs/ca-bundle.crt"/> <storage-mapping id="redis-pki-ca-bundle-trust-crt" options="ro" source-dir="/etc/pki/tls/certs/ca-bundle.trust.crt" target-dir="/etc/pki/tls/certs/ca-bundle.trust.crt"/> <storage-mapping id="redis-pki-cert" options="ro" source-dir="/etc/pki/tls/cert.pem" target-dir="/etc/pki/tls/cert.pem"/> <storage-mapping id="redis-dev-log" options="rw" source-dir="/dev/log" target-dir="/dev/log"/> </storage> <primitive class="ocf" id="redis" provider="heartbeat" type="redis"> <instance_attributes id="redis-instance_attributes"> <nvpair id="redis-instance_attributes-wait_last_known_master" name="wait_last_known_master" value="true"/> </instance_attributes> <meta_attributes id="redis-meta_attributes"> <nvpair id="redis-meta_attributes-container-attribute-target" name="container-attribute-target" value="host"/> <nvpair id="redis-meta_attributes-interleave" name="interleave" value="true"/> <nvpair id="redis-meta_attributes-notify" name="notify" value="true"/> <nvpair id="redis-meta_attributes-ordered" name="ordered" value="true"/> </meta_attributes> <operations> <op id="redis-demote-interval-0s" interval="0s" name="demote" timeout="120"/> <op id="redis-monitor-interval-45" interval="45" name="monitor" timeout="60"/> <op id="redis-monitor-interval-20" interval="20" name="monitor" role="Master" timeout="60"/> <op id="redis-monitor-interval-60" interval="60" name="monitor" role="Slave" timeout="60"/> <op id="redis-notify-interval-0s" interval="0s" name="notify" timeout="90"/> <op id="redis-promote-interval-0s" interval="0s" name="promote" timeout="120"/> <op id="redis-start-interval-0s" interval="0s" name="start" timeout="200s"/> <op id="redis-stop-interval-0s" interval="0s" name="stop" timeout="200s"/> </operations> </primitive> </bundle> <primitive class="ocf" id="ip-192.168.24.11" provider="heartbeat" type="IPaddr2"> <instance_attributes id="ip-192.168.24.11-instance_attributes"> <nvpair id="ip-192.168.24.11-instance_attributes-cidr_netmask" name="cidr_netmask" value="32"/> <nvpair id="ip-192.168.24.11-instance_attributes-ip" name="ip" value="192.168.24.11"/> </instance_attributes> <operations> <op id="ip-192.168.24.11-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/> <op id="ip-192.168.24.11-start-interval-0s" interval="0s" name="start" timeout="20s"/> <op id="ip-192.168.24.11-stop-interval-0s" interval="0s" name="stop" timeout="20s"/> </operations> </primitive> <primitive class="ocf" id="ip-10.0.0.110" provider="heartbeat" type="IPaddr2"> <instance_attributes id="ip-10.0.0.110-instance_attributes"> <nvpair id="ip-10.0.0.110-instance_attributes-cidr_netmask" name="cidr_netmask" value="32"/> <nvpair id="ip-10.0.0.110-instance_attributes-ip" name="ip" value="10.0.0.110"/> </instance_attributes> <operations> <op id="ip-10.0.0.110-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/> <op id="ip-10.0.0.110-start-interval-0s" interval="0s" name="start" timeout="20s"/> <op id="ip-10.0.0.110-stop-interval-0s" interval="0s" name="stop" timeout="20s"/> </operations> </primitive> <primitive class="ocf" id="ip-172.17.1.14" provider="heartbeat" type="IPaddr2"> <instance_attributes id="ip-172.17.1.14-instance_attributes"> <nvpair id="ip-172.17.1.14-instance_attributes-cidr_netmask" name="cidr_netmask" value="32"/> <nvpair id="ip-172.17.1.14-instance_attributes-ip" name="ip" value="172.17.1.14"/> </instance_attributes> <operations> <op id="ip-172.17.1.14-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/> <op id="ip-172.17.1.14-start-interval-0s" interval="0s" name="start" timeout="20s"/> <op id="ip-172.17.1.14-stop-interval-0s" interval="0s" name="stop" timeout="20s"/> </operations> </primitive> <primitive class="ocf" id="ip-172.17.1.17" provider="heartbeat" type="IPaddr2"> <instance_attributes id="ip-172.17.1.17-instance_attributes"> <nvpair id="ip-172.17.1.17-instance_attributes-cidr_netmask" name="cidr_netmask" value="32"/> <nvpair id="ip-172.17.1.17-instance_attributes-ip" name="ip" value="172.17.1.17"/> </instance_attributes> <operations> <op id="ip-172.17.1.17-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/> <op id="ip-172.17.1.17-start-interval-0s" interval="0s" name="start" timeout="20s"/> <op id="ip-172.17.1.17-stop-interval-0s" interval="0s" name="stop" timeout="20s"/> </operations> </primitive> <primitive class="ocf" id="ip-172.17.3.11" provider="heartbeat" type="IPaddr2"> <instance_attributes id="ip-172.17.3.11-instance_attributes"> <nvpair id="ip-172.17.3.11-instance_attributes-cidr_netmask" name="cidr_netmask" value="32"/> <nvpair id="ip-172.17.3.11-instance_attributes-ip" name="ip" value="172.17.3.11"/> </instance_attributes> <operations> <op id="ip-172.17.3.11-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/> <op id="ip-172.17.3.11-start-interval-0s" interval="0s" name="start" timeout="20s"/> <op id="ip-172.17.3.11-stop-interval-0s" interval="0s" name="stop" timeout="20s"/> </operations> </primitive> <primitive class="ocf" id="ip-172.17.4.17" provider="heartbeat" type="IPaddr2"> <instance_attributes id="ip-172.17.4.17-instance_attributes"> <nvpair id="ip-172.17.4.17-instance_attributes-cidr_netmask" name="cidr_netmask" value="32"/> <nvpair id="ip-172.17.4.17-instance_attributes-ip" name="ip" value="172.17.4.17"/> </instance_attributes> <operations> <op id="ip-172.17.4.17-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/> <op id="ip-172.17.4.17-start-interval-0s" interval="0s" name="start" timeout="20s"/> <op id="ip-172.17.4.17-stop-interval-0s" interval="0s" name="stop" timeout="20s"/> </operations> </primitive> <bundle id="haproxy-bundle"> <docker image="192.168.24.1:8787/rhosp13/openstack-haproxy:pcmklatest" network="host" options="--user=root --log-driver=journald -e KOLLA_CONFIG_STRATEGY=COPY_ALWAYS" replicas="3" run-command="/bin/bash /usr/local/bin/kolla_start"/> <storage> <storage-mapping id="haproxy-cfg-files" options="ro" source-dir="/var/lib/kolla/config_files/haproxy.json" target-dir="/var/lib/kolla/config_files/config.json"/> <storage-mapping id="haproxy-cfg-data" options="ro" source-dir="/var/lib/config-data/puppet-generated/haproxy/" target-dir="/var/lib/kolla/config_files/src"/> <storage-mapping id="haproxy-hosts" options="ro" source-dir="/etc/hosts" target-dir="/etc/hosts"/> <storage-mapping id="haproxy-localtime" options="ro" source-dir="/etc/localtime" target-dir="/etc/localtime"/> <storage-mapping id="haproxy-pki-extracted" options="ro" source-dir="/etc/pki/ca-trust/extracted" target-dir="/etc/pki/ca-trust/extracted"/> <storage-mapping id="haproxy-pki-ca-bundle-crt" options="ro" source-dir="/etc/pki/tls/certs/ca-bundle.crt" target-dir="/etc/pki/tls/certs/ca-bundle.crt"/> <storage-mapping id="haproxy-pki-ca-bundle-trust-crt" options="ro" source-dir="/etc/pki/tls/certs/ca-bundle.trust.crt" target-dir="/etc/pki/tls/certs/ca-bundle.trust.crt"/> <storage-mapping id="haproxy-pki-cert" options="ro" source-dir="/etc/pki/tls/cert.pem" target-dir="/etc/pki/tls/cert.pem"/> <storage-mapping id="haproxy-dev-log" options="rw" source-dir="/dev/log" target-dir="/dev/log"/> </storage> </bundle> <primitive class="stonith" id="stonith-fence_compute-fence-nova" type="fence_compute"> <instance_attributes id="stonith-fence_compute-fence-nova-instance_attributes"> <nvpair id="stonith-fence_compute-fence-nova-instance_attributes-auth_url" name="auth_url" value="http://10.0.0.110:5000"/> <nvpair id="stonith-fence_compute-fence-nova-instance_attributes-compute-domain" name="compute-domain" value="localdomain"/> <nvpair id="stonith-fence_compute-fence-nova-instance_attributes-login" name="login" value="admin"/> <nvpair id="stonith-fence_compute-fence-nova-instance_attributes-passwd" name="passwd" value="C9UkFUsCJrbJgAM7GXcccaeCU"/> <nvpair id="stonith-fence_compute-fence-nova-instance_attributes-project-domain" name="project-domain" value="Default"/> <nvpair id="stonith-fence_compute-fence-nova-instance_attributes-record_only" name="record_only" value="1"/> <nvpair id="stonith-fence_compute-fence-nova-instance_attributes-tenant_name" name="tenant_name" value="admin"/> <nvpair id="stonith-fence_compute-fence-nova-instance_attributes-user-domain" name="user-domain" value="Default"/> </instance_attributes> <meta_attributes id="stonith-fence_compute-fence-nova-meta_attributes"> <nvpair id="stonith-fence_compute-fence-nova-meta_attributes-provides" name="provides" value="unfencing"/> </meta_attributes> <operations> <op id="stonith-fence_compute-fence-nova-monitor-interval-60s" interval="60s" name="monitor"/> </operations> </primitive> <clone id="compute-unfence-trigger-clone"> <primitive class="ocf" id="compute-unfence-trigger" provider="pacemaker" type="Dummy"> + <meta_attributes id="compute-unfence-trigger-instance_attributes"> + <nvpair id="computer-unfence-trigger-instance_attributes-requires" name="requires" value="unfencing"/> + </meta_attributes> <operations> <op id="compute-unfence-trigger-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20"/> <op id="compute-unfence-trigger-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20"/> <op id="compute-unfence-trigger-monitor-interval-10" interval="10" name="monitor" timeout="20"/> <op id="compute-unfence-trigger-reload-interval-0s" interval="0s" name="reload" timeout="20"/> - <op id="compute-unfence-trigger-start-interval-0s" interval="0s" name="start" requires="unfencing"/> + <op id="compute-unfence-trigger-start-interval-0s" interval="0s" name="start"/> <op id="compute-unfence-trigger-stop-interval-0s" interval="0s" name="stop" timeout="20"/> </operations> </primitive> </clone> <primitive class="ocf" id="nova-evacuate" provider="openstack" type="NovaEvacuate"> <instance_attributes id="nova-evacuate-instance_attributes"> <nvpair id="nova-evacuate-instance_attributes-auth_url" name="auth_url" value="http://10.0.0.110:5000"/> <nvpair id="nova-evacuate-instance_attributes-no_shared_storage" name="no_shared_storage" value="true"/> <nvpair id="nova-evacuate-instance_attributes-password" name="password" value="C9UkFUsCJrbJgAM7GXcccaeCU"/> <nvpair id="nova-evacuate-instance_attributes-project_domain" name="project_domain" value="Default"/> <nvpair id="nova-evacuate-instance_attributes-tenant_name" name="tenant_name" value="admin"/> <nvpair id="nova-evacuate-instance_attributes-user_domain" name="user_domain" value="Default"/> <nvpair id="nova-evacuate-instance_attributes-username" name="username" value="admin"/> </instance_attributes> <operations> <op id="nova-evacuate-monitor-interval-10" interval="10" name="monitor" timeout="600"/> <op id="nova-evacuate-start-interval-0s" interval="0s" name="start" timeout="20"/> <op id="nova-evacuate-stop-interval-0s" interval="0s" name="stop" timeout="20"/> </operations> </primitive> <primitive class="stonith" id="stonith-fence_ipmilan-5254008be2cc" type="fence_ipmilan"> <instance_attributes id="stonith-fence_ipmilan-5254008be2cc-instance_attributes"> <nvpair id="stonith-fence_ipmilan-5254008be2cc-instance_attributes-ipaddr" name="ipaddr" value="172.16.0.1"/> <nvpair id="stonith-fence_ipmilan-5254008be2cc-instance_attributes-ipport" name="ipport" value="6230"/> <nvpair id="stonith-fence_ipmilan-5254008be2cc-instance_attributes-lanplus" name="lanplus" value="1"/> <nvpair id="stonith-fence_ipmilan-5254008be2cc-instance_attributes-login" name="login" value="admin"/> <nvpair id="stonith-fence_ipmilan-5254008be2cc-instance_attributes-passwd" name="passwd" value="password"/> <nvpair id="stonith-fence_ipmilan-5254008be2cc-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="overcloud-novacompute-1"/> </instance_attributes> <operations> <op id="stonith-fence_ipmilan-5254008be2cc-monitor-interval-60s" interval="60s" name="monitor"/> </operations> </primitive> <primitive class="stonith" id="stonith-fence_ipmilan-525400803f9e" type="fence_ipmilan"> <instance_attributes id="stonith-fence_ipmilan-525400803f9e-instance_attributes"> <nvpair id="stonith-fence_ipmilan-525400803f9e-instance_attributes-ipaddr" name="ipaddr" value="172.16.0.1"/> <nvpair id="stonith-fence_ipmilan-525400803f9e-instance_attributes-ipport" name="ipport" value="6233"/> <nvpair id="stonith-fence_ipmilan-525400803f9e-instance_attributes-lanplus" name="lanplus" value="1"/> <nvpair id="stonith-fence_ipmilan-525400803f9e-instance_attributes-login" name="login" value="admin"/> <nvpair id="stonith-fence_ipmilan-525400803f9e-instance_attributes-passwd" name="passwd" value="password"/> <nvpair id="stonith-fence_ipmilan-525400803f9e-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="controller-2"/> </instance_attributes> <operations> <op id="stonith-fence_ipmilan-525400803f9e-monitor-interval-60s" interval="60s" name="monitor"/> </operations> </primitive> <primitive class="stonith" id="stonith-fence_ipmilan-525400fca120" type="fence_ipmilan"> <instance_attributes id="stonith-fence_ipmilan-525400fca120-instance_attributes"> <nvpair id="stonith-fence_ipmilan-525400fca120-instance_attributes-ipaddr" name="ipaddr" value="172.16.0.1"/> <nvpair id="stonith-fence_ipmilan-525400fca120-instance_attributes-ipport" name="ipport" value="6234"/> <nvpair id="stonith-fence_ipmilan-525400fca120-instance_attributes-lanplus" name="lanplus" value="1"/> <nvpair id="stonith-fence_ipmilan-525400fca120-instance_attributes-login" name="login" value="admin"/> <nvpair id="stonith-fence_ipmilan-525400fca120-instance_attributes-passwd" name="passwd" value="password"/> <nvpair id="stonith-fence_ipmilan-525400fca120-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="overcloud-novacompute-0"/> </instance_attributes> <operations> <op id="stonith-fence_ipmilan-525400fca120-monitor-interval-60s" interval="60s" name="monitor"/> </operations> </primitive> <primitive class="stonith" id="stonith-fence_ipmilan-525400953d48" type="fence_ipmilan"> <instance_attributes id="stonith-fence_ipmilan-525400953d48-instance_attributes"> <nvpair id="stonith-fence_ipmilan-525400953d48-instance_attributes-ipaddr" name="ipaddr" value="172.16.0.1"/> <nvpair id="stonith-fence_ipmilan-525400953d48-instance_attributes-ipport" name="ipport" value="6232"/> <nvpair id="stonith-fence_ipmilan-525400953d48-instance_attributes-lanplus" name="lanplus" value="1"/> <nvpair id="stonith-fence_ipmilan-525400953d48-instance_attributes-login" name="login" value="admin"/> <nvpair id="stonith-fence_ipmilan-525400953d48-instance_attributes-passwd" name="passwd" value="password"/> <nvpair id="stonith-fence_ipmilan-525400953d48-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="controller-1"/> </instance_attributes> <operations> <op id="stonith-fence_ipmilan-525400953d48-monitor-interval-60s" interval="60s" name="monitor"/> </operations> </primitive> <primitive class="stonith" id="stonith-fence_ipmilan-525400b02b86" type="fence_ipmilan"> <instance_attributes id="stonith-fence_ipmilan-525400b02b86-instance_attributes"> <nvpair id="stonith-fence_ipmilan-525400b02b86-instance_attributes-ipaddr" name="ipaddr" value="172.16.0.1"/> <nvpair id="stonith-fence_ipmilan-525400b02b86-instance_attributes-ipport" name="ipport" value="6231"/> <nvpair id="stonith-fence_ipmilan-525400b02b86-instance_attributes-lanplus" name="lanplus" value="1"/> <nvpair id="stonith-fence_ipmilan-525400b02b86-instance_attributes-login" name="login" value="admin"/> <nvpair id="stonith-fence_ipmilan-525400b02b86-instance_attributes-passwd" name="passwd" value="password"/> <nvpair id="stonith-fence_ipmilan-525400b02b86-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="controller-0"/> </instance_attributes> <operations> <op id="stonith-fence_ipmilan-525400b02b86-monitor-interval-60s" interval="60s" name="monitor"/> </operations> </primitive> <bundle id="openstack-cinder-volume"> <docker image="192.168.24.1:8787/rhosp13/openstack-cinder-volume:pcmklatest" network="host" options="--ipc=host --privileged=true --user=root --log-driver=journald -e KOLLA_CONFIG_STRATEGY=COPY_ALWAYS" replicas="1" run-command="/bin/bash /usr/local/bin/kolla_start"/> <storage> <storage-mapping id="cinder-volume-etc-hosts" options="ro" source-dir="/etc/hosts" target-dir="/etc/hosts"/> <storage-mapping id="cinder-volume-etc-localtime" options="ro" source-dir="/etc/localtime" target-dir="/etc/localtime"/> <storage-mapping id="cinder-volume-etc-pki-ca-trust-extracted" options="ro" source-dir="/etc/pki/ca-trust/extracted" target-dir="/etc/pki/ca-trust/extracted"/> <storage-mapping id="cinder-volume-etc-pki-tls-certs-ca-bundle.crt" options="ro" source-dir="/etc/pki/tls/certs/ca-bundle.crt" target-dir="/etc/pki/tls/certs/ca-bundle.crt"/> <storage-mapping id="cinder-volume-etc-pki-tls-certs-ca-bundle.trust.crt" options="ro" source-dir="/etc/pki/tls/certs/ca-bundle.trust.crt" target-dir="/etc/pki/tls/certs/ca-bundle.trust.crt"/> <storage-mapping id="cinder-volume-etc-pki-tls-cert.pem" options="ro" source-dir="/etc/pki/tls/cert.pem" target-dir="/etc/pki/tls/cert.pem"/> <storage-mapping id="cinder-volume-dev-log" options="rw" source-dir="/dev/log" target-dir="/dev/log"/> <storage-mapping id="cinder-volume-etc-ssh-ssh_known_hosts" options="ro" source-dir="/etc/ssh/ssh_known_hosts" target-dir="/etc/ssh/ssh_known_hosts"/> <storage-mapping id="cinder-volume-etc-puppet" options="ro" source-dir="/etc/puppet" target-dir="/etc/puppet"/> <storage-mapping id="cinder-volume-var-lib-kolla-config_files-cinder_volume.json" options="ro" source-dir="/var/lib/kolla/config_files/cinder_volume.json" target-dir="/var/lib/kolla/config_files/config.json"/> <storage-mapping id="cinder-volume-var-lib-config-data-puppet-generated-cinder-" options="ro" source-dir="/var/lib/config-data/puppet-generated/cinder/" target-dir="/var/lib/kolla/config_files/src"/> <storage-mapping id="cinder-volume-etc-iscsi" options="ro" source-dir="/etc/iscsi" target-dir="/var/lib/kolla/config_files/src-iscsid"/> <storage-mapping id="cinder-volume-etc-ceph" options="ro" source-dir="/etc/ceph" target-dir="/var/lib/kolla/config_files/src-ceph"/> <storage-mapping id="cinder-volume-lib-modules" options="ro" source-dir="/lib/modules" target-dir="/lib/modules"/> <storage-mapping id="cinder-volume-dev-" options="rw" source-dir="/dev/" target-dir="/dev/"/> <storage-mapping id="cinder-volume-run-" options="rw" source-dir="/run/" target-dir="/run/"/> <storage-mapping id="cinder-volume-sys" options="rw" source-dir="/sys" target-dir="/sys"/> <storage-mapping id="cinder-volume-var-lib-cinder" options="rw" source-dir="/var/lib/cinder" target-dir="/var/lib/cinder"/> <storage-mapping id="cinder-volume-var-log-containers-cinder" options="rw" source-dir="/var/log/containers/cinder" target-dir="/var/log/cinder"/> </storage> </bundle> </resources> <constraints> <rsc_location id="location-rabbitmq-bundle" resource-discovery="exclusive" rsc="rabbitmq-bundle"> <rule id="location-rabbitmq-bundle-rule" score="0"> <expression attribute="rabbitmq-role" id="location-rabbitmq-bundle-rule-expr" operation="eq" value="true"/> </rule> </rsc_location> <rsc_location id="location-galera-bundle" resource-discovery="exclusive" rsc="galera-bundle"> <rule id="location-galera-bundle-rule" score="0"> <expression attribute="galera-role" id="location-galera-bundle-rule-expr" operation="eq" value="true"/> </rule> </rsc_location> <rsc_location id="location-redis-bundle" resource-discovery="exclusive" rsc="redis-bundle"> <rule id="location-redis-bundle-rule" score="0"> <expression attribute="redis-role" id="location-redis-bundle-rule-expr" operation="eq" value="true"/> </rule> </rsc_location> <rsc_location id="location-ip-192.168.24.11" resource-discovery="exclusive" rsc="ip-192.168.24.11"> <rule id="location-ip-192.168.24.11-rule" score="0"> <expression attribute="haproxy-role" id="location-ip-192.168.24.11-rule-expr" operation="eq" value="true"/> </rule> </rsc_location> <rsc_location id="location-ip-10.0.0.110" resource-discovery="exclusive" rsc="ip-10.0.0.110"> <rule id="location-ip-10.0.0.110-rule" score="0"> <expression attribute="haproxy-role" id="location-ip-10.0.0.110-rule-expr" operation="eq" value="true"/> </rule> </rsc_location> <rsc_location id="location-ip-172.17.1.14" resource-discovery="exclusive" rsc="ip-172.17.1.14"> <rule id="location-ip-172.17.1.14-rule" score="0"> <expression attribute="haproxy-role" id="location-ip-172.17.1.14-rule-expr" operation="eq" value="true"/> </rule> </rsc_location> <rsc_location id="location-ip-172.17.1.17" resource-discovery="exclusive" rsc="ip-172.17.1.17"> <rule id="location-ip-172.17.1.17-rule" score="0"> <expression attribute="haproxy-role" id="location-ip-172.17.1.17-rule-expr" operation="eq" value="true"/> </rule> </rsc_location> <rsc_location id="location-ip-172.17.3.11" resource-discovery="exclusive" rsc="ip-172.17.3.11"> <rule id="location-ip-172.17.3.11-rule" score="0"> <expression attribute="haproxy-role" id="location-ip-172.17.3.11-rule-expr" operation="eq" value="true"/> </rule> </rsc_location> <rsc_location id="location-ip-172.17.4.17" resource-discovery="exclusive" rsc="ip-172.17.4.17"> <rule id="location-ip-172.17.4.17-rule" score="0"> <expression attribute="haproxy-role" id="location-ip-172.17.4.17-rule-expr" operation="eq" value="true"/> </rule> </rsc_location> <rsc_location id="location-haproxy-bundle" resource-discovery="exclusive" rsc="haproxy-bundle"> <rule id="location-haproxy-bundle-rule" score="0"> <expression attribute="haproxy-role" id="location-haproxy-bundle-rule-expr" operation="eq" value="true"/> </rule> </rsc_location> <rsc_order first="ip-192.168.24.11" first-action="start" id="order-ip-192.168.24.11-haproxy-bundle-Optional" kind="Optional" then="haproxy-bundle" then-action="start"/> <rsc_colocation id="colocation-ip-192.168.24.11-haproxy-bundle-INFINITY" rsc="ip-192.168.24.11" score="INFINITY" with-rsc="haproxy-bundle"/> <rsc_order first="ip-10.0.0.110" first-action="start" id="order-ip-10.0.0.110-haproxy-bundle-Optional" kind="Optional" then="haproxy-bundle" then-action="start"/> <rsc_colocation id="colocation-ip-10.0.0.110-haproxy-bundle-INFINITY" rsc="ip-10.0.0.110" score="INFINITY" with-rsc="haproxy-bundle"/> <rsc_order first="ip-172.17.1.14" first-action="start" id="order-ip-172.17.1.14-haproxy-bundle-Optional" kind="Optional" then="haproxy-bundle" then-action="start"/> <rsc_colocation id="colocation-ip-172.17.1.14-haproxy-bundle-INFINITY" rsc="ip-172.17.1.14" score="INFINITY" with-rsc="haproxy-bundle"/> <rsc_order first="ip-172.17.1.17" first-action="start" id="order-ip-172.17.1.17-haproxy-bundle-Optional" kind="Optional" then="haproxy-bundle" then-action="start"/> <rsc_colocation id="colocation-ip-172.17.1.17-haproxy-bundle-INFINITY" rsc="ip-172.17.1.17" score="INFINITY" with-rsc="haproxy-bundle"/> <rsc_order first="ip-172.17.3.11" first-action="start" id="order-ip-172.17.3.11-haproxy-bundle-Optional" kind="Optional" then="haproxy-bundle" then-action="start"/> <rsc_colocation id="colocation-ip-172.17.3.11-haproxy-bundle-INFINITY" rsc="ip-172.17.3.11" score="INFINITY" with-rsc="haproxy-bundle"/> <rsc_order first="ip-172.17.4.17" first-action="start" id="order-ip-172.17.4.17-haproxy-bundle-Optional" kind="Optional" then="haproxy-bundle" then-action="start"/> <rsc_colocation id="colocation-ip-172.17.4.17-haproxy-bundle-INFINITY" rsc="ip-172.17.4.17" score="INFINITY" with-rsc="haproxy-bundle"/> <rsc_location id="location-compute-unfence-trigger-clone" resource-discovery="never" rsc="compute-unfence-trigger-clone"> <rule id="location-compute-unfence-trigger-clone-rule" score="-INFINITY"> <expression attribute="compute-instanceha-role" id="location-compute-unfence-trigger-clone-rule-expr" operation="ne" value="true"/> </rule> </rsc_location> <rsc_location id="location-nova-evacuate" resource-discovery="never" rsc="nova-evacuate"> <rule id="location-nova-evacuate-rule" score="-INFINITY"> <expression attribute="compute-instanceha-role" id="location-nova-evacuate-rule-expr" operation="eq" value="true"/> </rule> </rsc_location> <rsc_location id="location-stonith-fence_ipmilan-5254008be2cc-overcloud-novacompute-1--INFINITY" node="overcloud-novacompute-1" rsc="stonith-fence_ipmilan-5254008be2cc" score="-INFINITY"/> <rsc_location id="location-stonith-fence_ipmilan-525400803f9e-controller-2--INFINITY" node="controller-2" rsc="stonith-fence_ipmilan-525400803f9e" score="-INFINITY"/> <rsc_location id="location-stonith-fence_ipmilan-525400fca120-overcloud-novacompute-0--INFINITY" node="overcloud-novacompute-0" rsc="stonith-fence_ipmilan-525400fca120" score="-INFINITY"/> <rsc_location id="location-stonith-fence_ipmilan-525400953d48-controller-1--INFINITY" node="controller-1" rsc="stonith-fence_ipmilan-525400953d48" score="-INFINITY"/> <rsc_location id="location-stonith-fence_ipmilan-525400b02b86-controller-0--INFINITY" node="controller-0" rsc="stonith-fence_ipmilan-525400b02b86" score="-INFINITY"/> <rsc_location id="location-openstack-cinder-volume" resource-discovery="exclusive" rsc="openstack-cinder-volume"> <rule id="location-openstack-cinder-volume-rule" score="0"> <expression attribute="cinder-volume-role" id="location-openstack-cinder-volume-rule-expr" operation="eq" value="true"/> </rule> </rsc_location> </constraints> <rsc_defaults> <meta_attributes id="rsc_defaults-options"> <nvpair id="rsc_defaults-options-requires" name="requires" value="fencing"/> </meta_attributes> </rsc_defaults> <fencing-topology> <fencing-level devices="stonith-fence_ipmilan-5254008be2cc,stonith-fence_compute-fence-nova" id="fl-overcloud-novacompute-1-1" index="1" target="overcloud-novacompute-1"/> <fencing-level devices="stonith-fence_ipmilan-525400fca120,stonith-fence_compute-fence-nova" id="fl-overcloud-novacompute-0-1" index="1" target="overcloud-novacompute-0"/> </fencing-topology> </configuration> <status> <node_state id="1" uname="controller-0" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member"> <lrm id="1"> <lrm_resources> <lrm_resource id="overcloud-novacompute-0" type="remote" class="ocf" provider="pacemaker"> <lrm_rsc_op id="overcloud-novacompute-0_last_0" operation_key="overcloud-novacompute-0_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="5:2:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;5:2:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="2" rc-code="0" op-status="0" interval="0" last-run="1523253844" last-rc-change="1523253844" exec-time="0" queue-time="0" op-digest="a48beba1b11f09d6b1c15da8db5bb0a2" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/> <lrm_rsc_op id="overcloud-novacompute-0_monitor_20000" operation_key="overcloud-novacompute-0_monitor_20000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="4:3:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;4:3:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="3" rc-code="0" op-status="0" interval="20000" last-rc-change="1523253845" exec-time="0" queue-time="0" op-digest="6e5bb737f46c381d8a46fb4162afd9e0"/> </lrm_resource> <lrm_resource id="overcloud-novacompute-1" type="remote" class="ocf" provider="pacemaker"> <lrm_rsc_op id="overcloud-novacompute-1_last_0" operation_key="overcloud-novacompute-1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="3:4:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;3:4:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="4" rc-code="7" op-status="0" interval="0" last-run="1523253850" last-rc-change="1523253850" exec-time="0" queue-time="0" op-digest="a48beba1b11f09d6b1c15da8db5bb0a2" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/> </lrm_resource> <lrm_resource id="rabbitmq-bundle-docker-0" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="rabbitmq-bundle-docker-0_last_0" operation_key="rabbitmq-bundle-docker-0_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="4:13:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;4:13:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="7" rc-code="7" op-status="0" interval="0" last-run="1523254206" last-rc-change="1523254206" exec-time="66" queue-time="0" op-digest="c96831c3be25c5a8a7f32edc8243ba0b"/> </lrm_resource> <lrm_resource id="rabbitmq-bundle-docker-1" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="rabbitmq-bundle-docker-1_last_0" operation_key="rabbitmq-bundle-docker-1_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="7:16:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;7:16:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="20" rc-code="0" op-status="0" interval="0" last-run="1523254239" last-rc-change="1523254239" exec-time="476" queue-time="0" op-digest="b98bc83a9464eae737e61f982504f408"/> <lrm_rsc_op id="rabbitmq-bundle-docker-1_monitor_60000" operation_key="rabbitmq-bundle-docker-1_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="2:16:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;2:16:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="22" rc-code="0" op-status="0" interval="60000" last-rc-change="1523254239" exec-time="112" queue-time="0" op-digest="a92ffbf2574a3afb9f74015038198578"/> </lrm_resource> <lrm_resource id="rabbitmq-bundle-docker-2" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="rabbitmq-bundle-docker-2_last_0" operation_key="rabbitmq-bundle-docker-2_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="6:13:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;6:13:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="15" rc-code="7" op-status="0" interval="0" last-run="1523254206" last-rc-change="1523254206" exec-time="50" queue-time="0" op-digest="c96831c3be25c5a8a7f32edc8243ba0b"/> </lrm_resource> <lrm_resource id="rabbitmq-bundle-0" type="remote" class="ocf" provider="pacemaker" container="rabbitmq-bundle-docker-0"> <lrm_rsc_op id="rabbitmq-bundle-0_last_0" operation_key="rabbitmq-bundle-0_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="10:16:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;10:16:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="5" rc-code="7" op-status="0" interval="0" last-run="1523254239" last-rc-change="1523254239" exec-time="0" queue-time="0" op-digest="205c791aea96714f7aa1cb111c727e1b" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/> </lrm_resource> <lrm_resource id="rabbitmq-bundle-1" type="remote" class="ocf" provider="pacemaker" container="rabbitmq-bundle-docker-1"> <lrm_rsc_op id="rabbitmq-bundle-1_last_0" operation_key="rabbitmq-bundle-1_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="30:16:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;30:16:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="7" rc-code="0" op-status="0" interval="0" last-run="1523254239" last-rc-change="1523254239" exec-time="0" queue-time="0" op-digest="8dc3f9844e70d4458a7edcd7091ecf50" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/> <lrm_rsc_op id="rabbitmq-bundle-1_monitor_60000" operation_key="rabbitmq-bundle-1_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="23:17:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;23:17:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="9" rc-code="0" op-status="0" interval="60000" last-rc-change="1523254242" exec-time="0" queue-time="0" op-digest="cca9d25863986607e6440270f0640ff5"/> </lrm_resource> <lrm_resource id="rabbitmq-bundle-2" type="remote" class="ocf" provider="pacemaker" container="rabbitmq-bundle-docker-2"> <lrm_rsc_op id="rabbitmq-bundle-2_last_0" operation_key="rabbitmq-bundle-2_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="12:16:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;12:16:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="8" rc-code="7" op-status="0" interval="0" last-run="1523254239" last-rc-change="1523254239" exec-time="0" queue-time="0" op-digest="f1d68ab267df6867301fcff08041cd2f" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/> </lrm_resource> <lrm_resource id="galera-bundle-docker-0" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="galera-bundle-docker-0_last_0" operation_key="galera-bundle-docker-0_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="13:23:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;13:23:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="28" rc-code="7" op-status="0" interval="0" last-run="1523254331" last-rc-change="1523254331" exec-time="63" queue-time="0" op-digest="81f254cd6c291dbbdacc42886fdace4e"/> </lrm_resource> <lrm_resource id="galera-bundle-docker-1" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="galera-bundle-docker-1_last_0" operation_key="galera-bundle-docker-1_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="16:26:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;16:26:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="41" rc-code="0" op-status="0" interval="0" last-run="1523254365" last-rc-change="1523254365" exec-time="483" queue-time="0" op-digest="f59bb51e6389f4d63dbcb06ac495ab54"/> <lrm_rsc_op id="galera-bundle-docker-1_monitor_60000" operation_key="galera-bundle-docker-1_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="4:26:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;4:26:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="42" rc-code="0" op-status="0" interval="60000" last-rc-change="1523254365" exec-time="125" queue-time="0" op-digest="da69a05244e0542f04b6d4082f952dd2"/> </lrm_resource> <lrm_resource id="galera-bundle-docker-2" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="galera-bundle-docker-2_last_0" operation_key="galera-bundle-docker-2_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="15:23:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;15:23:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="36" rc-code="7" op-status="0" interval="0" last-run="1523254331" last-rc-change="1523254331" exec-time="52" queue-time="0" op-digest="81f254cd6c291dbbdacc42886fdace4e"/> </lrm_resource> <lrm_resource id="galera-bundle-1" type="remote" class="ocf" provider="pacemaker" container="galera-bundle-docker-1"> <lrm_rsc_op id="galera-bundle-1_last_0" operation_key="galera-bundle-1_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="73:26:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;73:26:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="13" rc-code="0" op-status="0" interval="0" last-run="1523254365" last-rc-change="1523254365" exec-time="0" queue-time="0" op-digest="37e9ab931fa7b5d2122c4cd44c56b7d2" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/> <lrm_rsc_op id="galera-bundle-1_monitor_60000" operation_key="galera-bundle-1_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="66:27:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;66:27:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="14" rc-code="0" op-status="0" interval="60000" last-rc-change="1523254368" exec-time="0" queue-time="0" op-digest="1ef5cbd05a3687236c669af720a810bc"/> </lrm_resource> <lrm_resource id="galera-bundle-0" type="remote" class="ocf" provider="pacemaker" container="galera-bundle-docker-0"> <lrm_rsc_op id="galera-bundle-0_last_0" operation_key="galera-bundle-0_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="19:26:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;19:26:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="11" rc-code="7" op-status="0" interval="0" last-run="1523254365" last-rc-change="1523254365" exec-time="0" queue-time="0" op-digest="f282b02648444d9d95065cede4784d97" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/> </lrm_resource> <lrm_resource id="galera-bundle-2" type="remote" class="ocf" provider="pacemaker" container="galera-bundle-docker-2"> <lrm_rsc_op id="galera-bundle-2_last_0" operation_key="galera-bundle-2_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="21:26:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;21:26:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="12" rc-code="7" op-status="0" interval="0" last-run="1523254365" last-rc-change="1523254365" exec-time="0" queue-time="0" op-digest="ed95b71467015049c875f75798f405fb" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/> </lrm_resource> <lrm_resource id="redis-bundle-docker-0" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="redis-bundle-docker-0_last_0" operation_key="redis-bundle-docker-0_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="22:37:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;22:37:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="49" rc-code="7" op-status="0" interval="0" last-run="1523254478" last-rc-change="1523254478" exec-time="77" queue-time="0" op-digest="b8d941a0452ff6b7d1254582ba8b48d5"/> </lrm_resource> <lrm_resource id="redis-bundle-docker-1" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="redis-bundle-docker-1_last_0" operation_key="redis-bundle-docker-1_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="25:40:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;25:40:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="62" rc-code="0" op-status="0" interval="0" last-run="1523254512" last-rc-change="1523254512" exec-time="480" queue-time="0" op-digest="a56be011e18207563896aee9c274b919"/> <lrm_rsc_op id="redis-bundle-docker-1_monitor_60000" operation_key="redis-bundle-docker-1_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="6:40:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;6:40:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="63" rc-code="0" op-status="0" interval="60000" last-rc-change="1523254513" exec-time="118" queue-time="0" op-digest="d1455f1061820f1ac6025d510700ac87"/> </lrm_resource> <lrm_resource id="redis-bundle-docker-2" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="redis-bundle-docker-2_last_0" operation_key="redis-bundle-docker-2_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="24:37:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;24:37:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="57" rc-code="7" op-status="0" interval="0" last-run="1523254479" last-rc-change="1523254479" exec-time="52" queue-time="0" op-digest="b8d941a0452ff6b7d1254582ba8b48d5"/> </lrm_resource> <lrm_resource id="redis-bundle-1" type="remote" class="ocf" provider="pacemaker" container="redis-bundle-docker-1"> <lrm_rsc_op id="redis-bundle-1_last_0" operation_key="redis-bundle-1_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="122:40:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;122:40:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="16" rc-code="0" op-status="0" interval="0" last-run="1523254513" last-rc-change="1523254513" exec-time="0" queue-time="0" op-digest="dd323696d6c8ed14cb71914c411664c9" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/> <lrm_rsc_op id="redis-bundle-1_monitor_60000" operation_key="redis-bundle-1_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="115:41:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;115:41:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="19" rc-code="0" op-status="0" interval="60000" last-rc-change="1523254516" exec-time="0" queue-time="0" op-digest="e3ae7972e4989b9b928f3ff082c5ac23"/> </lrm_resource> <lrm_resource id="redis-bundle-2" type="remote" class="ocf" provider="pacemaker" container="redis-bundle-docker-2"> <lrm_rsc_op id="redis-bundle-2_last_0" operation_key="redis-bundle-2_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="30:40:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;30:40:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="17" rc-code="7" op-status="0" interval="0" last-run="1523254513" last-rc-change="1523254513" exec-time="0" queue-time="0" op-digest="cd98602d6f977947327050913302f861" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/> </lrm_resource> <lrm_resource id="redis-bundle-0" type="remote" class="ocf" provider="pacemaker" container="redis-bundle-docker-0"> <lrm_rsc_op id="redis-bundle-0_last_0" operation_key="redis-bundle-0_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="28:40:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;28:40:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="18" rc-code="7" op-status="0" interval="0" last-run="1523254513" last-rc-change="1523254513" exec-time="0" queue-time="0" op-digest="f325849fbffe2e62970eb201a50da9f3" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/> </lrm_resource> <lrm_resource id="ip-192.168.24.11" type="IPaddr2" class="ocf" provider="heartbeat"> <lrm_rsc_op id="ip-192.168.24.11_last_0" operation_key="ip-192.168.24.11_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="33:47:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;33:47:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="70" rc-code="7" op-status="0" interval="0" last-run="1523254559" last-rc-change="1523254559" exec-time="48" queue-time="0" op-digest="d16224b1cc961154d457d259d944b5f9"/> </lrm_resource> <lrm_resource id="ip-10.0.0.110" type="IPaddr2" class="ocf" provider="heartbeat"> <lrm_rsc_op id="ip-10.0.0.110_last_0" operation_key="ip-10.0.0.110_stop_0" operation="stop" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="183:199:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;183:199:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="153" rc-code="0" op-status="0" interval="0" last-run="1523260399" last-rc-change="1523260399" exec-time="82" queue-time="0" op-digest="95d16a18326229bbadba9e9540c77da8"/> <lrm_rsc_op id="ip-10.0.0.110_monitor_10000" operation_key="ip-10.0.0.110_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="167:52:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;167:52:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="76" rc-code="0" op-status="0" interval="10000" last-rc-change="1523254584" exec-time="44" queue-time="0" op-digest="e41cc851c988a0fc4ea36f5d3d02ce43"/> </lrm_resource> <lrm_resource id="ip-172.17.1.14" type="IPaddr2" class="ocf" provider="heartbeat"> <lrm_rsc_op id="ip-172.17.1.14_last_0" operation_key="ip-172.17.1.14_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="35:53:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;35:53:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="80" rc-code="7" op-status="0" interval="0" last-run="1523254591" last-rc-change="1523254591" exec-time="45" queue-time="0" op-digest="1fab2783a8d283e33a945588908e98a4"/> </lrm_resource> <lrm_resource id="ip-172.17.1.17" type="IPaddr2" class="ocf" provider="heartbeat"> <lrm_rsc_op id="ip-172.17.1.17_last_0" operation_key="ip-172.17.1.17_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="36:56:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;36:56:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="84" rc-code="7" op-status="0" interval="0" last-run="1523254608" last-rc-change="1523254608" exec-time="46" queue-time="0" op-digest="edc8fdae4cc326c15a779f36f28eb3f8"/> </lrm_resource> <lrm_resource id="ip-172.17.3.11" type="IPaddr2" class="ocf" provider="heartbeat"> <lrm_rsc_op id="ip-172.17.3.11_last_0" operation_key="ip-172.17.3.11_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="175:61:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;175:61:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="89" rc-code="0" op-status="0" interval="0" last-run="1523254630" last-rc-change="1523254630" exec-time="4086" queue-time="0" op-digest="e88426fed0f2d870d791fdc3f7380de6"/> <lrm_rsc_op id="ip-172.17.3.11_monitor_10000" operation_key="ip-172.17.3.11_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="176:61:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;176:61:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="90" rc-code="0" op-status="0" interval="10000" last-rc-change="1523254634" exec-time="44" queue-time="0" op-digest="6eab6d68a815450cbf38f3078bdfcf7a"/> </lrm_resource> <lrm_resource id="ip-172.17.4.17" type="IPaddr2" class="ocf" provider="heartbeat"> <lrm_rsc_op id="ip-172.17.4.17_last_0" operation_key="ip-172.17.4.17_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="38:62:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;38:62:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="94" rc-code="7" op-status="0" interval="0" last-run="1523254642" last-rc-change="1523254642" exec-time="51" queue-time="0" op-digest="c0f293651f2b055f54a8b5665a3d28e2"/> </lrm_resource> <lrm_resource id="haproxy-bundle-docker-0" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="haproxy-bundle-docker-0_last_0" operation_key="haproxy-bundle-docker-0_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="39:65:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;39:65:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="98" rc-code="7" op-status="0" interval="0" last-run="1523254656" last-rc-change="1523254656" exec-time="70" queue-time="0" op-digest="f75306f083a5fb44c4ccba3e258423d1"/> </lrm_resource> <lrm_resource id="haproxy-bundle-docker-1" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="haproxy-bundle-docker-1_last_0" operation_key="haproxy-bundle-docker-1_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="183:67:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;183:67:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="107" rc-code="0" op-status="0" interval="0" last-run="1523254662" last-rc-change="1523254662" exec-time="505" queue-time="0" op-digest="f75306f083a5fb44c4ccba3e258423d1"/> <lrm_rsc_op id="haproxy-bundle-docker-1_monitor_60000" operation_key="haproxy-bundle-docker-1_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="184:67:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;184:67:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="108" rc-code="0" op-status="0" interval="60000" last-rc-change="1523254662" exec-time="121" queue-time="0" op-digest="0f463f8dd703ccc5d6db7e8257b7e651"/> </lrm_resource> <lrm_resource id="haproxy-bundle-docker-2" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="haproxy-bundle-docker-2_last_0" operation_key="haproxy-bundle-docker-2_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="41:65:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;41:65:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="106" rc-code="7" op-status="0" interval="0" last-run="1523254656" last-rc-change="1523254656" exec-time="53" queue-time="0" op-digest="f75306f083a5fb44c4ccba3e258423d1"/> </lrm_resource> <lrm_resource id="stonith-fence_compute-fence-nova" type="fence_compute" class="stonith"> <lrm_rsc_op id="stonith-fence_compute-fence-nova_last_0" operation_key="stonith-fence_compute-fence-nova_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="42:86:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;42:86:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="112" rc-code="7" op-status="0" interval="0" last-run="1523255082" last-rc-change="1523255082" exec-time="3" queue-time="0" op-digest="ad312d85623cdb0a792e6fbd5e91a820" op-secure-params=" password passwd " op-secure-digest="caf046d0a0953a2e7d15bbbde1371880"/> </lrm_resource> <lrm_resource id="compute-unfence-trigger" type="Dummy" class="ocf" provider="pacemaker"> <lrm_rsc_op id="compute-unfence-trigger_last_0" operation_key="compute-unfence-trigger_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="43:87:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;43:87:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="117" rc-code="7" op-status="0" interval="0" last-run="1523255087" last-rc-change="1523255087" exec-time="16" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" envfile op_sleep passwd state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params=" passwd " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/> </lrm_resource> <lrm_resource id="nova-evacuate" type="NovaEvacuate" class="ocf" provider="openstack"> <lrm_rsc_op id="nova-evacuate_last_0" operation_key="nova-evacuate_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="210:92:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;210:92:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="122" rc-code="0" op-status="0" interval="0" last-run="1523255098" last-rc-change="1523255098" exec-time="23" queue-time="0" op-digest="3b592b4abd94b571edcf74f5e4246f65" op-secure-params=" password " op-secure-digest="fb702c27ea4fa4baf1aa3ba9f874d9f3"/> <lrm_rsc_op id="nova-evacuate_monitor_10000" operation_key="nova-evacuate_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="211:92:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;211:92:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="123" rc-code="0" op-status="0" interval="10000" last-rc-change="1523255098" exec-time="34" queue-time="0" op-digest="ff0011764ea46c541794367160ba3d8b" op-secure-params=" password " op-secure-digest="fb702c27ea4fa4baf1aa3ba9f874d9f3"/> </lrm_resource> <lrm_resource id="stonith-fence_ipmilan-525400803f9e" type="fence_ipmilan" class="stonith"> <lrm_rsc_op id="stonith-fence_ipmilan-525400803f9e_last_0" operation_key="stonith-fence_ipmilan-525400803f9e_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="226:110:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;226:110:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="136" rc-code="0" op-status="0" interval="0" last-run="1523255309" last-rc-change="1523255309" exec-time="365" queue-time="0" op-digest="4998c836382aeb3820357da01e70b561" op-secure-params=" password passwd " op-secure-digest="00daf85972094b7cf1a1d4fec3e2ed54"/> <lrm_rsc_op id="stonith-fence_ipmilan-525400803f9e_monitor_60000" operation_key="stonith-fence_ipmilan-525400803f9e_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="220:111:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;220:111:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="149" rc-code="0" op-status="0" interval="60000" last-rc-change="1523255310" exec-time="197" queue-time="0" op-digest="477745be04d3d31937853c306f8d724e" op-secure-params=" password passwd " op-secure-digest="00daf85972094b7cf1a1d4fec3e2ed54"/> </lrm_resource> <lrm_resource id="stonith-fence_ipmilan-5254008be2cc" type="fence_ipmilan" class="stonith"> <lrm_rsc_op id="stonith-fence_ipmilan-5254008be2cc_last_0" operation_key="stonith-fence_ipmilan-5254008be2cc_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="46:95:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;46:95:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="131" rc-code="7" op-status="0" interval="0" last-run="1523255212" last-rc-change="1523255212" exec-time="0" queue-time="0" op-digest="4938a34bfc64185ad6e5f9441c84bf68" op-secure-params=" password passwd " op-secure-digest="c106093670a7862f33a04c6c920209b9"/> </lrm_resource> <lrm_resource id="openstack-cinder-volume-docker-0" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="openstack-cinder-volume-docker-0_last_0" operation_key="openstack-cinder-volume-docker-0_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="230:113:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;230:113:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="150" rc-code="0" op-status="0" interval="0" last-run="1523255321" last-rc-change="1523255321" exec-time="555" queue-time="0" op-digest="36b2791c98a366f2f5e7a1d5ef48b3c9"/> <lrm_rsc_op id="openstack-cinder-volume-docker-0_monitor_60000" operation_key="openstack-cinder-volume-docker-0_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="231:113:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;231:113:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="151" rc-code="0" op-status="0" interval="60000" last-rc-change="1523255321" exec-time="138" queue-time="0" op-digest="7f43b574f29809ca71e0fe1cf0494d33"/> </lrm_resource> <lrm_resource id="stonith-fence_ipmilan-525400fca120" type="fence_ipmilan" class="stonith"> <lrm_rsc_op id="stonith-fence_ipmilan-525400fca120_last_0" operation_key="stonith-fence_ipmilan-525400fca120_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="47:110:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;47:110:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="140" rc-code="7" op-status="0" interval="0" last-run="1523255309" last-rc-change="1523255309" exec-time="0" queue-time="0" op-digest="7e9aea21dfd22b59b27b07578090d9d6" op-secure-params=" password passwd " op-secure-digest="562546912c5a20954742c65d069a4fb4"/> </lrm_resource> <lrm_resource id="stonith-fence_ipmilan-525400953d48" type="fence_ipmilan" class="stonith"> <lrm_rsc_op id="stonith-fence_ipmilan-525400953d48_last_0" operation_key="stonith-fence_ipmilan-525400953d48_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="49:110:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;49:110:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="144" rc-code="7" op-status="0" interval="0" last-run="1523255309" last-rc-change="1523255309" exec-time="0" queue-time="0" op-digest="a2ec1560bddbd621ae7ff630da4c9f89" op-secure-params=" password passwd " op-secure-digest="1a1e4d964e3025ec522d4b4d96995bbb"/> </lrm_resource> <lrm_resource id="stonith-fence_ipmilan-525400b02b86" type="fence_ipmilan" class="stonith"> <lrm_rsc_op id="stonith-fence_ipmilan-525400b02b86_last_0" operation_key="stonith-fence_ipmilan-525400b02b86_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="50:110:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;50:110:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="148" rc-code="7" op-status="0" interval="0" last-run="1523255309" last-rc-change="1523255309" exec-time="0" queue-time="0" op-digest="800b95e336cd972373e8e2f87f98171f" op-secure-params=" password passwd " op-secure-digest="d207be35798fa3d6ad04e543cad39795"/> </lrm_resource> </lrm_resources> </lrm> <transient_attributes id="1"> <instance_attributes id="status-1"> <nvpair id="status-1-rmq-node-attr-rabbitmq" name="rmq-node-attr-rabbitmq" value="rabbit@controller-0"/> <nvpair id="status-1-master-galera" name="master-galera" value="100"/> <nvpair id="status-1-master-redis" name="master-redis" value="1"/> <nvpair id="status-1-.node-unfenced" name="#node-unfenced" value="1523255309"/> </instance_attributes> </transient_attributes> </node_state> <node_state id="3" uname="controller-2" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member"> <lrm id="3"> <lrm_resources> <lrm_resource id="overcloud-novacompute-0" type="remote" class="ocf" provider="pacemaker"> <lrm_rsc_op id="overcloud-novacompute-0_last_0" operation_key="overcloud-novacompute-0_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="4:2:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;4:2:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="1" rc-code="7" op-status="0" interval="0" last-run="1523253844" last-rc-change="1523253844" exec-time="0" queue-time="0" op-digest="a48beba1b11f09d6b1c15da8db5bb0a2" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/> </lrm_resource> <lrm_resource id="overcloud-novacompute-1" type="remote" class="ocf" provider="pacemaker"> <lrm_rsc_op id="overcloud-novacompute-1_last_0" operation_key="overcloud-novacompute-1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="5:4:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;5:4:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="2" rc-code="7" op-status="0" interval="0" last-run="1523253850" last-rc-change="1523253850" exec-time="0" queue-time="0" op-digest="a48beba1b11f09d6b1c15da8db5bb0a2" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/> </lrm_resource> <lrm_resource id="rabbitmq-bundle-docker-0" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="rabbitmq-bundle-docker-0_last_0" operation_key="rabbitmq-bundle-docker-0_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="8:16:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;8:16:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="20" rc-code="0" op-status="0" interval="0" last-run="1523254239" last-rc-change="1523254239" exec-time="464" queue-time="0" op-digest="c83e30d1be4c766081a315f8dca3b549"/> <lrm_rsc_op id="rabbitmq-bundle-docker-0_monitor_60000" operation_key="rabbitmq-bundle-docker-0_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="3:16:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;3:16:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="21" rc-code="0" op-status="0" interval="60000" last-rc-change="1523254239" exec-time="122" queue-time="0" op-digest="6808c0eadd739fdd40f1907e33d375e1"/> </lrm_resource> <lrm_resource id="rabbitmq-bundle-docker-1" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="rabbitmq-bundle-docker-1_last_0" operation_key="rabbitmq-bundle-docker-1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="11:13:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;11:13:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="11" rc-code="7" op-status="0" interval="0" last-run="1523254206" last-rc-change="1523254206" exec-time="59" queue-time="0" op-digest="c96831c3be25c5a8a7f32edc8243ba0b"/> </lrm_resource> <lrm_resource id="rabbitmq-bundle-docker-2" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="rabbitmq-bundle-docker-2_last_0" operation_key="rabbitmq-bundle-docker-2_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="12:13:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;12:13:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="15" rc-code="7" op-status="0" interval="0" last-run="1523254206" last-rc-change="1523254206" exec-time="60" queue-time="0" op-digest="c96831c3be25c5a8a7f32edc8243ba0b"/> </lrm_resource> <lrm_resource id="rabbitmq-bundle-0" type="remote" class="ocf" provider="pacemaker" container="rabbitmq-bundle-docker-0"> <lrm_rsc_op id="rabbitmq-bundle-0_last_0" operation_key="rabbitmq-bundle-0_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="27:16:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;27:16:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="4" rc-code="0" op-status="0" interval="0" last-run="1523254239" last-rc-change="1523254239" exec-time="0" queue-time="0" op-digest="205c791aea96714f7aa1cb111c727e1b" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/> <lrm_rsc_op id="rabbitmq-bundle-0_monitor_60000" operation_key="rabbitmq-bundle-0_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="18:17:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;18:17:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="7" rc-code="0" op-status="0" interval="60000" last-rc-change="1523254242" exec-time="0" queue-time="0" op-digest="19642b17998fd21ff264d776e5dcffb7"/> </lrm_resource> <lrm_resource id="rabbitmq-bundle-1" type="remote" class="ocf" provider="pacemaker" container="rabbitmq-bundle-docker-1"> <lrm_rsc_op id="rabbitmq-bundle-1_last_0" operation_key="rabbitmq-bundle-1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="17:16:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;17:16:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="5" rc-code="7" op-status="0" interval="0" last-run="1523254239" last-rc-change="1523254239" exec-time="0" queue-time="0" op-digest="8dc3f9844e70d4458a7edcd7091ecf50" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/> </lrm_resource> <lrm_resource id="rabbitmq-bundle-2" type="remote" class="ocf" provider="pacemaker" container="rabbitmq-bundle-docker-2"> <lrm_rsc_op id="rabbitmq-bundle-2_last_0" operation_key="rabbitmq-bundle-2_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="18:16:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;18:16:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="6" rc-code="7" op-status="0" interval="0" last-run="1523254239" last-rc-change="1523254239" exec-time="0" queue-time="0" op-digest="f1d68ab267df6867301fcff08041cd2f" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/> </lrm_resource> <lrm_resource id="galera-bundle-docker-0" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="galera-bundle-docker-0_last_0" operation_key="galera-bundle-docker-0_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="17:26:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;17:26:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="41" rc-code="0" op-status="0" interval="0" last-run="1523254365" last-rc-change="1523254365" exec-time="490" queue-time="0" op-digest="797df9912d2d65e7a966caa475ac29cb"/> <lrm_rsc_op id="galera-bundle-docker-0_monitor_60000" operation_key="galera-bundle-docker-0_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="7:26:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;7:26:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="43" rc-code="0" op-status="0" interval="60000" last-rc-change="1523254365" exec-time="129" queue-time="0" op-digest="1e9b629cf35f3aed274b0d3cdf2cce1d"/> </lrm_resource> <lrm_resource id="galera-bundle-docker-1" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="galera-bundle-docker-1_last_0" operation_key="galera-bundle-docker-1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="20:23:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;20:23:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="32" rc-code="7" op-status="0" interval="0" last-run="1523254331" last-rc-change="1523254331" exec-time="53" queue-time="0" op-digest="81f254cd6c291dbbdacc42886fdace4e"/> </lrm_resource> <lrm_resource id="galera-bundle-docker-2" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="galera-bundle-docker-2_last_0" operation_key="galera-bundle-docker-2_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="21:23:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;21:23:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="36" rc-code="7" op-status="0" interval="0" last-run="1523254331" last-rc-change="1523254331" exec-time="54" queue-time="0" op-digest="81f254cd6c291dbbdacc42886fdace4e"/> </lrm_resource> <lrm_resource id="galera-bundle-1" type="remote" class="ocf" provider="pacemaker" container="galera-bundle-docker-1"> <lrm_rsc_op id="galera-bundle-1_last_0" operation_key="galera-bundle-1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="26:26:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;26:26:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="8" rc-code="7" op-status="0" interval="0" last-run="1523254365" last-rc-change="1523254365" exec-time="0" queue-time="0" op-digest="37e9ab931fa7b5d2122c4cd44c56b7d2" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/> </lrm_resource> <lrm_resource id="galera-bundle-0" type="remote" class="ocf" provider="pacemaker" container="galera-bundle-docker-0"> <lrm_rsc_op id="galera-bundle-0_last_0" operation_key="galera-bundle-0_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="70:26:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;70:26:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="11" rc-code="0" op-status="0" interval="0" last-run="1523254365" last-rc-change="1523254365" exec-time="0" queue-time="0" op-digest="f282b02648444d9d95065cede4784d97" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/> <lrm_rsc_op id="galera-bundle-0_monitor_60000" operation_key="galera-bundle-0_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="61:27:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;61:27:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="12" rc-code="0" op-status="0" interval="60000" last-rc-change="1523254368" exec-time="0" queue-time="0" op-digest="67d79251e7c202b367dded079a1a25eb"/> </lrm_resource> <lrm_resource id="galera-bundle-2" type="remote" class="ocf" provider="pacemaker" container="galera-bundle-docker-2"> <lrm_rsc_op id="galera-bundle-2_last_0" operation_key="galera-bundle-2_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="27:26:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;27:26:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="10" rc-code="7" op-status="0" interval="0" last-run="1523254365" last-rc-change="1523254365" exec-time="0" queue-time="0" op-digest="ed95b71467015049c875f75798f405fb" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/> </lrm_resource> <lrm_resource id="redis-bundle-docker-0" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="redis-bundle-docker-0_last_0" operation_key="redis-bundle-docker-0_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="26:40:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;26:40:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="62" rc-code="0" op-status="0" interval="0" last-run="1523254512" last-rc-change="1523254512" exec-time="488" queue-time="0" op-digest="04919e02030710638e3b1bf69219c111"/> <lrm_rsc_op id="redis-bundle-docker-0_monitor_60000" operation_key="redis-bundle-docker-0_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="11:40:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;11:40:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="65" rc-code="0" op-status="0" interval="60000" last-rc-change="1523254513" exec-time="109" queue-time="0" op-digest="af571023226cf7a33ba8af96a01ecb45"/> </lrm_resource> <lrm_resource id="redis-bundle-docker-1" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="redis-bundle-docker-1_last_0" operation_key="redis-bundle-docker-1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="29:37:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;29:37:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="53" rc-code="7" op-status="0" interval="0" last-run="1523254478" last-rc-change="1523254478" exec-time="72" queue-time="0" op-digest="b8d941a0452ff6b7d1254582ba8b48d5"/> </lrm_resource> <lrm_resource id="redis-bundle-docker-2" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="redis-bundle-docker-2_last_0" operation_key="redis-bundle-docker-2_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="30:37:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;30:37:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="57" rc-code="7" op-status="0" interval="0" last-run="1523254478" last-rc-change="1523254478" exec-time="61" queue-time="0" op-digest="b8d941a0452ff6b7d1254582ba8b48d5"/> </lrm_resource> <lrm_resource id="redis-bundle-1" type="remote" class="ocf" provider="pacemaker" container="redis-bundle-docker-1"> <lrm_rsc_op id="redis-bundle-1_last_0" operation_key="redis-bundle-1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="35:40:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;35:40:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="13" rc-code="7" op-status="0" interval="0" last-run="1523254513" last-rc-change="1523254513" exec-time="0" queue-time="0" op-digest="dd323696d6c8ed14cb71914c411664c9" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/> </lrm_resource> <lrm_resource id="redis-bundle-2" type="remote" class="ocf" provider="pacemaker" container="redis-bundle-docker-2"> <lrm_rsc_op id="redis-bundle-2_last_0" operation_key="redis-bundle-2_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="36:40:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;36:40:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="14" rc-code="7" op-status="0" interval="0" last-run="1523254513" last-rc-change="1523254513" exec-time="0" queue-time="0" op-digest="cd98602d6f977947327050913302f861" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/> </lrm_resource> <lrm_resource id="redis-bundle-0" type="remote" class="ocf" provider="pacemaker" container="redis-bundle-docker-0"> <lrm_rsc_op id="redis-bundle-0_last_0" operation_key="redis-bundle-0_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="119:40:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;119:40:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="16" rc-code="0" op-status="0" interval="0" last-run="1523254513" last-rc-change="1523254513" exec-time="0" queue-time="0" op-digest="f325849fbffe2e62970eb201a50da9f3" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/> <lrm_rsc_op id="redis-bundle-0_monitor_60000" operation_key="redis-bundle-0_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="110:41:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;110:41:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="17" rc-code="0" op-status="0" interval="60000" last-rc-change="1523254516" exec-time="0" queue-time="0" op-digest="1c97595acf862a2449ada08e9770bdf5"/> </lrm_resource> <lrm_resource id="ip-192.168.24.11" type="IPaddr2" class="ocf" provider="heartbeat"> <lrm_rsc_op id="ip-192.168.24.11_last_0" operation_key="ip-192.168.24.11_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="163:49:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;163:49:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="71" rc-code="0" op-status="0" interval="0" last-run="1523254563" last-rc-change="1523254563" exec-time="4089" queue-time="0" op-digest="d16224b1cc961154d457d259d944b5f9"/> <lrm_rsc_op id="ip-192.168.24.11_monitor_10000" operation_key="ip-192.168.24.11_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="164:49:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;164:49:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="72" rc-code="0" op-status="0" interval="10000" last-rc-change="1523254567" exec-time="41" queue-time="0" op-digest="3c7f50892f8841b4bb5e063452ca4db2"/> </lrm_resource> <lrm_resource id="ip-10.0.0.110" type="IPaddr2" class="ocf" provider="heartbeat"> <lrm_rsc_op id="ip-10.0.0.110_last_0" operation_key="ip-10.0.0.110_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="36:50:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;36:50:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="76" rc-code="7" op-status="0" interval="0" last-run="1523254574" last-rc-change="1523254574" exec-time="46" queue-time="0" op-digest="95d16a18326229bbadba9e9540c77da8"/> </lrm_resource> <lrm_resource id="ip-172.17.1.14" type="IPaddr2" class="ocf" provider="heartbeat"> <lrm_rsc_op id="ip-172.17.1.14_last_0" operation_key="ip-172.17.1.14_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="37:53:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;37:53:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="80" rc-code="7" op-status="0" interval="0" last-run="1523254591" last-rc-change="1523254591" exec-time="48" queue-time="0" op-digest="1fab2783a8d283e33a945588908e98a4"/> </lrm_resource> <lrm_resource id="ip-172.17.1.17" type="IPaddr2" class="ocf" provider="heartbeat"> <lrm_rsc_op id="ip-172.17.1.17_last_0" operation_key="ip-172.17.1.17_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="172:58:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;172:58:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="85" rc-code="0" op-status="0" interval="0" last-run="1523254613" last-rc-change="1523254613" exec-time="4082" queue-time="0" op-digest="edc8fdae4cc326c15a779f36f28eb3f8"/> <lrm_rsc_op id="ip-172.17.1.17_monitor_10000" operation_key="ip-172.17.1.17_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="173:58:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;173:58:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="86" rc-code="0" op-status="0" interval="10000" last-rc-change="1523254617" exec-time="49" queue-time="0" op-digest="637be44014a8de2a8162cb2b062f6955"/> </lrm_resource> <lrm_resource id="ip-172.17.3.11" type="IPaddr2" class="ocf" provider="heartbeat"> <lrm_rsc_op id="ip-172.17.3.11_last_0" operation_key="ip-172.17.3.11_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="39:59:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;39:59:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="90" rc-code="7" op-status="0" interval="0" last-run="1523254624" last-rc-change="1523254624" exec-time="47" queue-time="1" op-digest="e88426fed0f2d870d791fdc3f7380de6"/> </lrm_resource> <lrm_resource id="ip-172.17.4.17" type="IPaddr2" class="ocf" provider="heartbeat"> <lrm_rsc_op id="ip-172.17.4.17_last_0" operation_key="ip-172.17.4.17_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="40:62:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;40:62:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="94" rc-code="7" op-status="0" interval="0" last-run="1523254641" last-rc-change="1523254641" exec-time="56" queue-time="0" op-digest="c0f293651f2b055f54a8b5665a3d28e2"/> </lrm_resource> <lrm_resource id="haproxy-bundle-docker-0" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="haproxy-bundle-docker-0_last_0" operation_key="haproxy-bundle-docker-0_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="181:67:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;181:67:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="107" rc-code="0" op-status="0" interval="0" last-run="1523254662" last-rc-change="1523254662" exec-time="477" queue-time="1" op-digest="f75306f083a5fb44c4ccba3e258423d1"/> <lrm_rsc_op id="haproxy-bundle-docker-0_monitor_60000" operation_key="haproxy-bundle-docker-0_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="182:67:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;182:67:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="108" rc-code="0" op-status="0" interval="60000" last-rc-change="1523254662" exec-time="117" queue-time="1" op-digest="0f463f8dd703ccc5d6db7e8257b7e651"/> </lrm_resource> <lrm_resource id="haproxy-bundle-docker-1" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="haproxy-bundle-docker-1_last_0" operation_key="haproxy-bundle-docker-1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="46:65:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;46:65:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="102" rc-code="7" op-status="0" interval="0" last-run="1523254656" last-rc-change="1523254656" exec-time="71" queue-time="0" op-digest="f75306f083a5fb44c4ccba3e258423d1"/> </lrm_resource> <lrm_resource id="haproxy-bundle-docker-2" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="haproxy-bundle-docker-2_last_0" operation_key="haproxy-bundle-docker-2_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="47:65:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;47:65:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="106" rc-code="7" op-status="0" interval="0" last-run="1523254656" last-rc-change="1523254656" exec-time="64" queue-time="0" op-digest="f75306f083a5fb44c4ccba3e258423d1"/> </lrm_resource> <lrm_resource id="stonith-fence_compute-fence-nova" type="fence_compute" class="stonith"> <lrm_rsc_op id="stonith-fence_compute-fence-nova_last_0" operation_key="stonith-fence_compute-fence-nova_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="204:103:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;204:103:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="134" rc-code="0" op-status="0" interval="0" last-run="1523255234" last-rc-change="1523255234" exec-time="2221" queue-time="0" op-digest="ad312d85623cdb0a792e6fbd5e91a820" op-secure-params=" password passwd " op-secure-digest="caf046d0a0953a2e7d15bbbde1371880"/> <lrm_rsc_op id="stonith-fence_compute-fence-nova_monitor_60000" operation_key="stonith-fence_compute-fence-nova_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="209:104:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;209:104:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="143" rc-code="0" op-status="0" interval="60000" last-rc-change="1523255236" exec-time="2157" queue-time="0" op-digest="097576694f77e42f68d21455cdd11169" op-secure-params=" password passwd " op-secure-digest="caf046d0a0953a2e7d15bbbde1371880"/> <lrm_rsc_op id="stonith-fence_compute-fence-nova_last_failure_0" operation_key="stonith-fence_compute-fence-nova_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="209:104:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="4:1;209:104:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="143" rc-code="1" op-status="4" interval="60000" last-rc-change="1523260448" exec-time="7330" queue-time="0" op-digest="097576694f77e42f68d21455cdd11169" op-secure-params=" password passwd " op-secure-digest="caf046d0a0953a2e7d15bbbde1371880"/> </lrm_resource> <lrm_resource id="compute-unfence-trigger" type="Dummy" class="ocf" provider="pacemaker"> <lrm_rsc_op id="compute-unfence-trigger_last_0" operation_key="compute-unfence-trigger_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="45:87:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;45:87:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="119" rc-code="7" op-status="0" interval="0" last-run="1523255087" last-rc-change="1523255087" exec-time="22" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" envfile op_sleep passwd state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params=" passwd " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/> </lrm_resource> <lrm_resource id="nova-evacuate" type="NovaEvacuate" class="ocf" provider="openstack"> <lrm_rsc_op id="nova-evacuate_last_0" operation_key="nova-evacuate_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="47:90:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;47:90:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="123" rc-code="7" op-status="0" interval="0" last-run="1523255094" last-rc-change="1523255094" exec-time="31" queue-time="0" op-digest="3b592b4abd94b571edcf74f5e4246f65" op-secure-params=" password " op-secure-digest="fb702c27ea4fa4baf1aa3ba9f874d9f3"/> </lrm_resource> <lrm_resource id="stonith-fence_ipmilan-525400803f9e" type="fence_ipmilan" class="stonith"> <lrm_rsc_op id="stonith-fence_ipmilan-525400803f9e_last_0" operation_key="stonith-fence_ipmilan-525400803f9e_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="50:94:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;50:94:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="127" rc-code="7" op-status="0" interval="0" last-run="1523255211" last-rc-change="1523255211" exec-time="0" queue-time="0" op-digest="4998c836382aeb3820357da01e70b561" op-secure-params=" password passwd " op-secure-digest="00daf85972094b7cf1a1d4fec3e2ed54"/> </lrm_resource> <lrm_resource id="stonith-fence_ipmilan-5254008be2cc" type="fence_ipmilan" class="stonith"> <lrm_rsc_op id="stonith-fence_ipmilan-5254008be2cc_last_0" operation_key="stonith-fence_ipmilan-5254008be2cc_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="50:95:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;50:95:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="131" rc-code="7" op-status="0" interval="0" last-run="1523255212" last-rc-change="1523255212" exec-time="0" queue-time="0" op-digest="4938a34bfc64185ad6e5f9441c84bf68" op-secure-params=" password passwd " op-secure-digest="c106093670a7862f33a04c6c920209b9"/> </lrm_resource> <lrm_resource id="stonith-fence_ipmilan-525400fca120" type="fence_ipmilan" class="stonith"> <lrm_rsc_op id="stonith-fence_ipmilan-525400fca120_last_0" operation_key="stonith-fence_ipmilan-525400fca120_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="221:111:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;221:111:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="152" rc-code="0" op-status="0" interval="0" last-run="1523255310" last-rc-change="1523255310" exec-time="244" queue-time="0" op-digest="7e9aea21dfd22b59b27b07578090d9d6" op-secure-params=" password passwd " op-secure-digest="562546912c5a20954742c65d069a4fb4"/> <lrm_rsc_op id="stonith-fence_ipmilan-525400fca120_monitor_60000" operation_key="stonith-fence_ipmilan-525400fca120_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="222:111:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;222:111:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="154" rc-code="0" op-status="0" interval="60000" last-rc-change="1523255310" exec-time="206" queue-time="0" op-digest="053570494221e4b2e9b2cde0413b19b8" op-secure-params=" password passwd " op-secure-digest="562546912c5a20954742c65d069a4fb4"/> </lrm_resource> <lrm_resource id="stonith-fence_ipmilan-525400953d48" type="fence_ipmilan" class="stonith"> <lrm_rsc_op id="stonith-fence_ipmilan-525400953d48_last_0" operation_key="stonith-fence_ipmilan-525400953d48_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="223:111:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;223:111:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="153" rc-code="0" op-status="0" interval="0" last-run="1523255310" last-rc-change="1523255310" exec-time="371" queue-time="0" op-digest="a2ec1560bddbd621ae7ff630da4c9f89" op-secure-params=" password passwd " op-secure-digest="1a1e4d964e3025ec522d4b4d96995bbb"/> <lrm_rsc_op id="stonith-fence_ipmilan-525400953d48_monitor_60000" operation_key="stonith-fence_ipmilan-525400953d48_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="224:111:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;224:111:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="155" rc-code="0" op-status="0" interval="60000" last-rc-change="1523255310" exec-time="171" queue-time="0" op-digest="48dba31eb625f0bae1fbabc1bf7fb44c" op-secure-params=" password passwd " op-secure-digest="1a1e4d964e3025ec522d4b4d96995bbb"/> </lrm_resource> <lrm_resource id="stonith-fence_ipmilan-525400b02b86" type="fence_ipmilan" class="stonith"> <lrm_rsc_op id="stonith-fence_ipmilan-525400b02b86_last_0" operation_key="stonith-fence_ipmilan-525400b02b86_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="53:104:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;53:104:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="147" rc-code="7" op-status="0" interval="0" last-run="1523255236" last-rc-change="1523255236" exec-time="0" queue-time="0" op-digest="800b95e336cd972373e8e2f87f98171f" op-secure-params=" password passwd " op-secure-digest="d207be35798fa3d6ad04e543cad39795"/> </lrm_resource> <lrm_resource id="openstack-cinder-volume-docker-0" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="openstack-cinder-volume-docker-0_last_0" operation_key="openstack-cinder-volume-docker-0_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="53:110:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;53:110:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="151" rc-code="7" op-status="0" interval="0" last-run="1523255309" last-rc-change="1523255309" exec-time="72" queue-time="0" op-digest="36b2791c98a366f2f5e7a1d5ef48b3c9"/> </lrm_resource> </lrm_resources> </lrm> <transient_attributes id="3"> <instance_attributes id="status-3"> <nvpair id="status-3-rmq-node-attr-rabbitmq" name="rmq-node-attr-rabbitmq" value="rabbit@controller-2"/> <nvpair id="status-3-master-galera" name="master-galera" value="100"/> <nvpair id="status-3-master-redis" name="master-redis" value="1"/> <nvpair id="status-3-.node-unfenced" name="#node-unfenced" value="1523255228"/> <nvpair id="status-3-fail-count-stonith-fence_compute-fence-nova.monitor_60000" name="fail-count-stonith-fence_compute-fence-nova#monitor_60000" value="1"/> <nvpair id="status-3-last-failure-stonith-fence_compute-fence-nova.monitor_60000" name="last-failure-stonith-fence_compute-fence-nova#monitor_60000" value="1523260455"/> </instance_attributes> </transient_attributes> </node_state> <node_state id="2" uname="controller-1" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member"> <lrm id="2"> <lrm_resources> <lrm_resource id="overcloud-novacompute-0" type="remote" class="ocf" provider="pacemaker"> <lrm_rsc_op id="overcloud-novacompute-0_last_0" operation_key="overcloud-novacompute-0_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="3:2:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;3:2:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="1" rc-code="7" op-status="0" interval="0" last-run="1523253844" last-rc-change="1523253844" exec-time="0" queue-time="0" op-digest="a48beba1b11f09d6b1c15da8db5bb0a2" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/> </lrm_resource> <lrm_resource id="overcloud-novacompute-1" type="remote" class="ocf" provider="pacemaker"> <lrm_rsc_op id="overcloud-novacompute-1_last_0" operation_key="overcloud-novacompute-1_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="8:4:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;8:4:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="3" rc-code="0" op-status="0" interval="0" last-run="1523253850" last-rc-change="1523253850" exec-time="0" queue-time="0" op-digest="a48beba1b11f09d6b1c15da8db5bb0a2" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/> <lrm_rsc_op id="overcloud-novacompute-1_monitor_20000" operation_key="overcloud-novacompute-1_monitor_20000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="7:5:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;7:5:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="4" rc-code="0" op-status="0" interval="20000" last-rc-change="1523253850" exec-time="0" queue-time="0" op-digest="6e5bb737f46c381d8a46fb4162afd9e0"/> <lrm_rsc_op id="overcloud-novacompute-1_last_failure_0" operation_key="overcloud-novacompute-1_monitor_20000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="7:5:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="4:1;7:5:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="4" rc-code="1" op-status="4" interval="20000" last-rc-change="1523260399" exec-time="0" queue-time="0" op-digest="6e5bb737f46c381d8a46fb4162afd9e0"/> </lrm_resource> <lrm_resource id="rabbitmq-bundle-docker-0" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="rabbitmq-bundle-docker-0_last_0" operation_key="rabbitmq-bundle-docker-0_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="7:13:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;7:13:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="7" rc-code="7" op-status="0" interval="0" last-run="1523254206" last-rc-change="1523254206" exec-time="73" queue-time="0" op-digest="c96831c3be25c5a8a7f32edc8243ba0b"/> </lrm_resource> <lrm_resource id="rabbitmq-bundle-docker-1" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="rabbitmq-bundle-docker-1_last_0" operation_key="rabbitmq-bundle-docker-1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="8:13:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;8:13:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="11" rc-code="7" op-status="0" interval="0" last-run="1523254206" last-rc-change="1523254206" exec-time="68" queue-time="0" op-digest="c96831c3be25c5a8a7f32edc8243ba0b"/> </lrm_resource> <lrm_resource id="rabbitmq-bundle-docker-2" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="rabbitmq-bundle-docker-2_last_0" operation_key="rabbitmq-bundle-docker-2_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="9:16:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;9:16:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="20" rc-code="0" op-status="0" interval="0" last-run="1523254239" last-rc-change="1523254239" exec-time="499" queue-time="0" op-digest="ca3da6f108527dc1b04b197fb932b1ca"/> <lrm_rsc_op id="rabbitmq-bundle-docker-2_monitor_60000" operation_key="rabbitmq-bundle-docker-2_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="5:16:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;5:16:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="23" rc-code="0" op-status="0" interval="60000" last-rc-change="1523254239" exec-time="115" queue-time="0" op-digest="96961ca97770999cad87c224f4dd09d9"/> </lrm_resource> <lrm_resource id="rabbitmq-bundle-0" type="remote" class="ocf" provider="pacemaker" container="rabbitmq-bundle-docker-0"> <lrm_rsc_op id="rabbitmq-bundle-0_last_0" operation_key="rabbitmq-bundle-0_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="13:16:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;13:16:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="5" rc-code="7" op-status="0" interval="0" last-run="1523254239" last-rc-change="1523254239" exec-time="0" queue-time="0" op-digest="205c791aea96714f7aa1cb111c727e1b" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/> </lrm_resource> <lrm_resource id="rabbitmq-bundle-1" type="remote" class="ocf" provider="pacemaker" container="rabbitmq-bundle-docker-1"> <lrm_rsc_op id="rabbitmq-bundle-1_last_0" operation_key="rabbitmq-bundle-1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="14:16:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;14:16:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="6" rc-code="7" op-status="0" interval="0" last-run="1523254239" last-rc-change="1523254239" exec-time="0" queue-time="0" op-digest="8dc3f9844e70d4458a7edcd7091ecf50" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/> </lrm_resource> <lrm_resource id="rabbitmq-bundle-2" type="remote" class="ocf" provider="pacemaker" container="rabbitmq-bundle-docker-2"> <lrm_rsc_op id="rabbitmq-bundle-2_last_0" operation_key="rabbitmq-bundle-2_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="33:16:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;33:16:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="8" rc-code="0" op-status="0" interval="0" last-run="1523254239" last-rc-change="1523254239" exec-time="0" queue-time="0" op-digest="f1d68ab267df6867301fcff08041cd2f" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/> <lrm_rsc_op id="rabbitmq-bundle-2_monitor_60000" operation_key="rabbitmq-bundle-2_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="28:17:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;28:17:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="9" rc-code="0" op-status="0" interval="60000" last-rc-change="1523254242" exec-time="0" queue-time="0" op-digest="76f9ab23bc58402930f7f92ff2ab9d75"/> </lrm_resource> <lrm_resource id="galera-bundle-docker-0" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="galera-bundle-docker-0_last_0" operation_key="galera-bundle-docker-0_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="16:23:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;16:23:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="28" rc-code="7" op-status="0" interval="0" last-run="1523254331" last-rc-change="1523254331" exec-time="59" queue-time="0" op-digest="81f254cd6c291dbbdacc42886fdace4e"/> </lrm_resource> <lrm_resource id="galera-bundle-docker-1" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="galera-bundle-docker-1_last_0" operation_key="galera-bundle-docker-1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="17:23:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;17:23:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="32" rc-code="7" op-status="0" interval="0" last-run="1523254331" last-rc-change="1523254331" exec-time="54" queue-time="0" op-digest="81f254cd6c291dbbdacc42886fdace4e"/> </lrm_resource> <lrm_resource id="galera-bundle-docker-2" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="galera-bundle-docker-2_last_0" operation_key="galera-bundle-docker-2_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="18:26:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;18:26:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="41" rc-code="0" op-status="0" interval="0" last-run="1523254365" last-rc-change="1523254365" exec-time="499" queue-time="0" op-digest="e1e1621b9ba87ff090029601e7ed1605"/> <lrm_rsc_op id="galera-bundle-docker-2_monitor_60000" operation_key="galera-bundle-docker-2_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="11:26:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;11:26:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="44" rc-code="0" op-status="0" interval="60000" last-rc-change="1523254365" exec-time="133" queue-time="0" op-digest="833a23d25a23c2b843bf752bf8e7afd0"/> </lrm_resource> <lrm_resource id="galera-bundle-1" type="remote" class="ocf" provider="pacemaker" container="galera-bundle-docker-1"> <lrm_rsc_op id="galera-bundle-1_last_0" operation_key="galera-bundle-1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="23:26:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;23:26:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="10" rc-code="7" op-status="0" interval="0" last-run="1523254365" last-rc-change="1523254365" exec-time="0" queue-time="0" op-digest="37e9ab931fa7b5d2122c4cd44c56b7d2" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/> </lrm_resource> <lrm_resource id="galera-bundle-0" type="remote" class="ocf" provider="pacemaker" container="galera-bundle-docker-0"> <lrm_rsc_op id="galera-bundle-0_last_0" operation_key="galera-bundle-0_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="22:26:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;22:26:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="11" rc-code="7" op-status="0" interval="0" last-run="1523254365" last-rc-change="1523254365" exec-time="0" queue-time="0" op-digest="f282b02648444d9d95065cede4784d97" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/> </lrm_resource> <lrm_resource id="galera-bundle-2" type="remote" class="ocf" provider="pacemaker" container="galera-bundle-docker-2"> <lrm_rsc_op id="galera-bundle-2_last_0" operation_key="galera-bundle-2_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="76:26:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;76:26:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="13" rc-code="0" op-status="0" interval="0" last-run="1523254365" last-rc-change="1523254365" exec-time="0" queue-time="0" op-digest="ed95b71467015049c875f75798f405fb" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/> <lrm_rsc_op id="galera-bundle-2_monitor_60000" operation_key="galera-bundle-2_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="71:27:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;71:27:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="14" rc-code="0" op-status="0" interval="60000" last-rc-change="1523254368" exec-time="0" queue-time="0" op-digest="cd5e49e967c83be8abedad4b1f3136b7"/> </lrm_resource> <lrm_resource id="redis-bundle-docker-0" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="redis-bundle-docker-0_last_0" operation_key="redis-bundle-docker-0_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="25:37:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;25:37:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="49" rc-code="7" op-status="0" interval="0" last-run="1523254478" last-rc-change="1523254478" exec-time="70" queue-time="0" op-digest="b8d941a0452ff6b7d1254582ba8b48d5"/> </lrm_resource> <lrm_resource id="redis-bundle-docker-1" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="redis-bundle-docker-1_last_0" operation_key="redis-bundle-docker-1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="26:37:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;26:37:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="53" rc-code="7" op-status="0" interval="0" last-run="1523254478" last-rc-change="1523254478" exec-time="75" queue-time="0" op-digest="b8d941a0452ff6b7d1254582ba8b48d5"/> </lrm_resource> <lrm_resource id="redis-bundle-docker-2" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="redis-bundle-docker-2_last_0" operation_key="redis-bundle-docker-2_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="27:40:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;27:40:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="62" rc-code="0" op-status="0" interval="0" last-run="1523254512" last-rc-change="1523254512" exec-time="506" queue-time="0" op-digest="9be9af650e3a8c45e5b72129b8f14ebc"/> <lrm_rsc_op id="redis-bundle-docker-2_monitor_60000" operation_key="redis-bundle-docker-2_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="17:40:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;17:40:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="64" rc-code="0" op-status="0" interval="60000" last-rc-change="1523254513" exec-time="119" queue-time="0" op-digest="223b38a92a94f152192668e126334d84"/> </lrm_resource> <lrm_resource id="redis-bundle-1" type="remote" class="ocf" provider="pacemaker" container="redis-bundle-docker-1"> <lrm_rsc_op id="redis-bundle-1_last_0" operation_key="redis-bundle-1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="32:40:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;32:40:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="15" rc-code="7" op-status="0" interval="0" last-run="1523254513" last-rc-change="1523254513" exec-time="0" queue-time="0" op-digest="dd323696d6c8ed14cb71914c411664c9" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/> </lrm_resource> <lrm_resource id="redis-bundle-2" type="remote" class="ocf" provider="pacemaker" container="redis-bundle-docker-2"> <lrm_rsc_op id="redis-bundle-2_last_0" operation_key="redis-bundle-2_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="125:40:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;125:40:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="18" rc-code="0" op-status="0" interval="0" last-run="1523254513" last-rc-change="1523254513" exec-time="0" queue-time="0" op-digest="cd98602d6f977947327050913302f861" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/> <lrm_rsc_op id="redis-bundle-2_monitor_60000" operation_key="redis-bundle-2_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="120:41:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;120:41:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="19" rc-code="0" op-status="0" interval="60000" last-rc-change="1523254516" exec-time="0" queue-time="0" op-digest="0491b76e934baa33a7f246cd2c3f54a7"/> </lrm_resource> <lrm_resource id="redis-bundle-0" type="remote" class="ocf" provider="pacemaker" container="redis-bundle-docker-0"> <lrm_rsc_op id="redis-bundle-0_last_0" operation_key="redis-bundle-0_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="31:40:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;31:40:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="17" rc-code="7" op-status="0" interval="0" last-run="1523254513" last-rc-change="1523254513" exec-time="0" queue-time="0" op-digest="f325849fbffe2e62970eb201a50da9f3" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/> </lrm_resource> <lrm_resource id="ip-192.168.24.11" type="IPaddr2" class="ocf" provider="heartbeat"> <lrm_rsc_op id="ip-192.168.24.11_last_0" operation_key="ip-192.168.24.11_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="34:47:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;34:47:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="70" rc-code="7" op-status="0" interval="0" last-run="1523254558" last-rc-change="1523254558" exec-time="53" queue-time="0" op-digest="d16224b1cc961154d457d259d944b5f9"/> </lrm_resource> <lrm_resource id="ip-10.0.0.110" type="IPaddr2" class="ocf" provider="heartbeat"> <lrm_rsc_op id="ip-10.0.0.110_last_0" operation_key="ip-10.0.0.110_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="35:50:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;35:50:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="74" rc-code="7" op-status="0" interval="0" last-run="1523254574" last-rc-change="1523254574" exec-time="48" queue-time="0" op-digest="95d16a18326229bbadba9e9540c77da8"/> </lrm_resource> <lrm_resource id="ip-172.17.1.14" type="IPaddr2" class="ocf" provider="heartbeat"> <lrm_rsc_op id="ip-172.17.1.14_last_0" operation_key="ip-172.17.1.14_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="169:55:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;169:55:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="79" rc-code="0" op-status="0" interval="0" last-run="1523254596" last-rc-change="1523254596" exec-time="4083" queue-time="0" op-digest="1fab2783a8d283e33a945588908e98a4"/> <lrm_rsc_op id="ip-172.17.1.14_monitor_10000" operation_key="ip-172.17.1.14_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="170:55:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;170:55:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="80" rc-code="0" op-status="0" interval="10000" last-rc-change="1523254600" exec-time="49" queue-time="0" op-digest="19c32490a75539eb9cf2ca18727e305e"/> </lrm_resource> <lrm_resource id="ip-172.17.1.17" type="IPaddr2" class="ocf" provider="heartbeat"> <lrm_rsc_op id="ip-172.17.1.17_last_0" operation_key="ip-172.17.1.17_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="37:56:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;37:56:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="84" rc-code="7" op-status="0" interval="0" last-run="1523254607" last-rc-change="1523254607" exec-time="47" queue-time="0" op-digest="edc8fdae4cc326c15a779f36f28eb3f8"/> </lrm_resource> <lrm_resource id="ip-172.17.3.11" type="IPaddr2" class="ocf" provider="heartbeat"> <lrm_rsc_op id="ip-172.17.3.11_last_0" operation_key="ip-172.17.3.11_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="38:59:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;38:59:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="88" rc-code="7" op-status="0" interval="0" last-run="1523254624" last-rc-change="1523254624" exec-time="49" queue-time="0" op-digest="e88426fed0f2d870d791fdc3f7380de6"/> </lrm_resource> <lrm_resource id="ip-172.17.4.17" type="IPaddr2" class="ocf" provider="heartbeat"> <lrm_rsc_op id="ip-172.17.4.17_last_0" operation_key="ip-172.17.4.17_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="178:64:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;178:64:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="93" rc-code="0" op-status="0" interval="0" last-run="1523254647" last-rc-change="1523254647" exec-time="4082" queue-time="0" op-digest="c0f293651f2b055f54a8b5665a3d28e2"/> <lrm_rsc_op id="ip-172.17.4.17_monitor_10000" operation_key="ip-172.17.4.17_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="179:64:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;179:64:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="94" rc-code="0" op-status="0" interval="10000" last-rc-change="1523254651" exec-time="44" queue-time="0" op-digest="4f6fa28044077db6ec3b34abe979afcd"/> </lrm_resource> <lrm_resource id="haproxy-bundle-docker-0" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="haproxy-bundle-docker-0_last_0" operation_key="haproxy-bundle-docker-0_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="42:65:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;42:65:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="98" rc-code="7" op-status="0" interval="0" last-run="1523254656" last-rc-change="1523254656" exec-time="68" queue-time="0" op-digest="f75306f083a5fb44c4ccba3e258423d1"/> </lrm_resource> <lrm_resource id="haproxy-bundle-docker-1" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="haproxy-bundle-docker-1_last_0" operation_key="haproxy-bundle-docker-1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="43:65:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;43:65:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="102" rc-code="7" op-status="0" interval="0" last-run="1523254656" last-rc-change="1523254656" exec-time="69" queue-time="0" op-digest="f75306f083a5fb44c4ccba3e258423d1"/> </lrm_resource> <lrm_resource id="haproxy-bundle-docker-2" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="haproxy-bundle-docker-2_last_0" operation_key="haproxy-bundle-docker-2_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="185:67:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;185:67:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="107" rc-code="0" op-status="0" interval="0" last-run="1523254662" last-rc-change="1523254662" exec-time="508" queue-time="0" op-digest="f75306f083a5fb44c4ccba3e258423d1"/> <lrm_rsc_op id="haproxy-bundle-docker-2_monitor_60000" operation_key="haproxy-bundle-docker-2_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="186:67:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;186:67:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="108" rc-code="0" op-status="0" interval="60000" last-rc-change="1523254662" exec-time="115" queue-time="0" op-digest="0f463f8dd703ccc5d6db7e8257b7e651"/> </lrm_resource> <lrm_resource id="stonith-fence_compute-fence-nova" type="fence_compute" class="stonith"> <lrm_rsc_op id="stonith-fence_compute-fence-nova_last_0" operation_key="stonith-fence_compute-fence-nova_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="44:86:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;44:86:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="112" rc-code="7" op-status="0" interval="0" last-run="1523255081" last-rc-change="1523255081" exec-time="2" queue-time="0" op-digest="ad312d85623cdb0a792e6fbd5e91a820" op-secure-params=" password passwd " op-secure-digest="caf046d0a0953a2e7d15bbbde1371880"/> </lrm_resource> <lrm_resource id="compute-unfence-trigger" type="Dummy" class="ocf" provider="pacemaker"> <lrm_rsc_op id="compute-unfence-trigger_last_0" operation_key="compute-unfence-trigger_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="44:87:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;44:87:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="117" rc-code="7" op-status="0" interval="0" last-run="1523255087" last-rc-change="1523255087" exec-time="24" queue-time="1" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" envfile op_sleep passwd state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params=" passwd " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/> </lrm_resource> <lrm_resource id="nova-evacuate" type="NovaEvacuate" class="ocf" provider="openstack"> <lrm_rsc_op id="nova-evacuate_last_0" operation_key="nova-evacuate_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="46:90:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;46:90:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="121" rc-code="7" op-status="0" interval="0" last-run="1523255094" last-rc-change="1523255094" exec-time="28" queue-time="0" op-digest="3b592b4abd94b571edcf74f5e4246f65" op-secure-params=" password " op-secure-digest="fb702c27ea4fa4baf1aa3ba9f874d9f3"/> </lrm_resource> <lrm_resource id="stonith-fence_ipmilan-525400803f9e" type="fence_ipmilan" class="stonith"> <lrm_rsc_op id="stonith-fence_ipmilan-525400803f9e_last_0" operation_key="stonith-fence_ipmilan-525400803f9e_stop_0" operation="stop" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="218:100:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;218:100:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="137" rc-code="0" op-status="0" interval="0" last-run="1523255221" last-rc-change="1523255221" exec-time="0" queue-time="0" op-digest="4998c836382aeb3820357da01e70b561" op-secure-params=" password passwd " op-secure-digest="00daf85972094b7cf1a1d4fec3e2ed54"/> <lrm_rsc_op id="stonith-fence_ipmilan-525400803f9e_monitor_60000" operation_key="stonith-fence_ipmilan-525400803f9e_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="215:97:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;215:97:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="131" rc-code="0" op-status="0" interval="60000" last-rc-change="1523255216" exec-time="102" queue-time="0" op-digest="477745be04d3d31937853c306f8d724e" op-secure-params=" password passwd " op-secure-digest="00daf85972094b7cf1a1d4fec3e2ed54"/> </lrm_resource> <lrm_resource id="stonith-fence_ipmilan-5254008be2cc" type="fence_ipmilan" class="stonith"> <lrm_rsc_op id="stonith-fence_ipmilan-5254008be2cc_last_0" operation_key="stonith-fence_ipmilan-5254008be2cc_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="221:105:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;221:105:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="138" rc-code="0" op-status="0" interval="0" last-run="1523255238" last-rc-change="1523255238" exec-time="344" queue-time="0" op-digest="4938a34bfc64185ad6e5f9441c84bf68" op-secure-params=" password passwd " op-secure-digest="c106093670a7862f33a04c6c920209b9"/> <lrm_rsc_op id="stonith-fence_ipmilan-5254008be2cc_monitor_60000" operation_key="stonith-fence_ipmilan-5254008be2cc_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="222:105:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;222:105:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="151" rc-code="0" op-status="0" interval="60000" last-rc-change="1523255239" exec-time="204" queue-time="0" op-digest="23c4a5570f4f96a7f35427f9d088acb6" op-secure-params=" password passwd " op-secure-digest="c106093670a7862f33a04c6c920209b9"/> </lrm_resource> <lrm_resource id="stonith-fence_ipmilan-525400fca120" type="fence_ipmilan" class="stonith"> <lrm_rsc_op id="stonith-fence_ipmilan-525400fca120_last_0" operation_key="stonith-fence_ipmilan-525400fca120_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="50:105:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;50:105:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="142" rc-code="7" op-status="0" interval="0" last-run="1523255238" last-rc-change="1523255238" exec-time="0" queue-time="1" op-digest="7e9aea21dfd22b59b27b07578090d9d6" op-secure-params=" password passwd " op-secure-digest="562546912c5a20954742c65d069a4fb4"/> </lrm_resource> <lrm_resource id="stonith-fence_ipmilan-525400953d48" type="fence_ipmilan" class="stonith"> <lrm_rsc_op id="stonith-fence_ipmilan-525400953d48_last_0" operation_key="stonith-fence_ipmilan-525400953d48_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="52:105:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;52:105:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="146" rc-code="7" op-status="0" interval="0" last-run="1523255238" last-rc-change="1523255238" exec-time="0" queue-time="0" op-digest="a2ec1560bddbd621ae7ff630da4c9f89" op-secure-params=" password passwd " op-secure-digest="1a1e4d964e3025ec522d4b4d96995bbb"/> </lrm_resource> <lrm_resource id="stonith-fence_ipmilan-525400b02b86" type="fence_ipmilan" class="stonith"> <lrm_rsc_op id="stonith-fence_ipmilan-525400b02b86_last_0" operation_key="stonith-fence_ipmilan-525400b02b86_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="225:111:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;225:111:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="156" rc-code="0" op-status="0" interval="0" last-run="1523255310" last-rc-change="1523255310" exec-time="129" queue-time="0" op-digest="800b95e336cd972373e8e2f87f98171f" op-secure-params=" password passwd " op-secure-digest="d207be35798fa3d6ad04e543cad39795"/> <lrm_rsc_op id="stonith-fence_ipmilan-525400b02b86_monitor_60000" operation_key="stonith-fence_ipmilan-525400b02b86_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="226:111:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;226:111:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="157" rc-code="0" op-status="0" interval="60000" last-rc-change="1523255310" exec-time="193" queue-time="0" op-digest="15b553d8b93933f79c56207a7f3cc02f" op-secure-params=" password passwd " op-secure-digest="d207be35798fa3d6ad04e543cad39795"/> </lrm_resource> <lrm_resource id="openstack-cinder-volume-docker-0" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="openstack-cinder-volume-docker-0_last_0" operation_key="openstack-cinder-volume-docker-0_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="52:110:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;52:110:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="155" rc-code="7" op-status="0" interval="0" last-run="1523255309" last-rc-change="1523255309" exec-time="101" queue-time="0" op-digest="36b2791c98a366f2f5e7a1d5ef48b3c9"/> </lrm_resource> </lrm_resources> </lrm> <transient_attributes id="2"> <instance_attributes id="status-2"> <nvpair id="status-2-rmq-node-attr-rabbitmq" name="rmq-node-attr-rabbitmq" value="rabbit@controller-1"/> <nvpair id="status-2-master-galera" name="master-galera" value="100"/> <nvpair id="status-2-master-redis" name="master-redis" value="1"/> <nvpair id="status-2-.node-unfenced" name="#node-unfenced" value="1523255236"/> <nvpair id="status-2-fail-count-overcloud-novacompute-1.monitor_20000" name="fail-count-overcloud-novacompute-1#monitor_20000" value="1"/> <nvpair id="status-2-last-failure-overcloud-novacompute-1.monitor_20000" name="last-failure-overcloud-novacompute-1#monitor_20000" value="1523260399"/> </instance_attributes> </transient_attributes> </node_state> <node_state remote_node="true" id="overcloud-novacompute-0" uname="overcloud-novacompute-0" in_ccm="true" crm-debug-origin="do_update_resource" node_fenced="0"> <lrm id="overcloud-novacompute-0"> <lrm_resources> <lrm_resource id="rabbitmq-bundle-docker-0" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="rabbitmq-bundle-docker-0_last_0" operation_key="rabbitmq-bundle-docker-0_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="13:13:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;13:13:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="36" rc-code="7" op-status="0" interval="0" last-run="1523254206" last-rc-change="1523254206" exec-time="98" queue-time="0" op-digest="c96831c3be25c5a8a7f32edc8243ba0b"/> </lrm_resource> <lrm_resource id="rabbitmq-bundle-docker-1" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="rabbitmq-bundle-docker-1_last_0" operation_key="rabbitmq-bundle-docker-1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="14:13:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;14:13:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="40" rc-code="7" op-status="0" interval="0" last-run="1523254206" last-rc-change="1523254206" exec-time="86" queue-time="1" op-digest="c96831c3be25c5a8a7f32edc8243ba0b"/> </lrm_resource> <lrm_resource id="rabbitmq-bundle-docker-2" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="rabbitmq-bundle-docker-2_last_0" operation_key="rabbitmq-bundle-docker-2_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="15:13:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;15:13:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="44" rc-code="7" op-status="0" interval="0" last-run="1523254206" last-rc-change="1523254206" exec-time="49" queue-time="0" op-digest="c96831c3be25c5a8a7f32edc8243ba0b"/> </lrm_resource> <lrm_resource id="galera-bundle-docker-0" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="galera-bundle-docker-0_last_0" operation_key="galera-bundle-docker-0_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="22:23:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;22:23:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="54" rc-code="7" op-status="0" interval="0" last-run="1523254331" last-rc-change="1523254331" exec-time="83" queue-time="0" op-digest="81f254cd6c291dbbdacc42886fdace4e"/> </lrm_resource> <lrm_resource id="galera-bundle-docker-1" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="galera-bundle-docker-1_last_0" operation_key="galera-bundle-docker-1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="23:23:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;23:23:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="58" rc-code="7" op-status="0" interval="0" last-run="1523254331" last-rc-change="1523254331" exec-time="84" queue-time="0" op-digest="81f254cd6c291dbbdacc42886fdace4e"/> </lrm_resource> <lrm_resource id="galera-bundle-docker-2" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="galera-bundle-docker-2_last_0" operation_key="galera-bundle-docker-2_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="24:23:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;24:23:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="62" rc-code="7" op-status="0" interval="0" last-run="1523254331" last-rc-change="1523254331" exec-time="49" queue-time="0" op-digest="81f254cd6c291dbbdacc42886fdace4e"/> </lrm_resource> <lrm_resource id="redis-bundle-docker-0" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="redis-bundle-docker-0_last_0" operation_key="redis-bundle-docker-0_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="31:37:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;31:37:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="73" rc-code="7" op-status="0" interval="0" last-run="1523254478" last-rc-change="1523254478" exec-time="84" queue-time="0" op-digest="b8d941a0452ff6b7d1254582ba8b48d5"/> </lrm_resource> <lrm_resource id="redis-bundle-docker-1" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="redis-bundle-docker-1_last_0" operation_key="redis-bundle-docker-1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="32:37:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;32:37:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="77" rc-code="7" op-status="0" interval="0" last-run="1523254478" last-rc-change="1523254478" exec-time="86" queue-time="0" op-digest="b8d941a0452ff6b7d1254582ba8b48d5"/> </lrm_resource> <lrm_resource id="redis-bundle-docker-2" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="redis-bundle-docker-2_last_0" operation_key="redis-bundle-docker-2_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="33:37:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;33:37:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="81" rc-code="7" op-status="0" interval="0" last-run="1523254479" last-rc-change="1523254479" exec-time="57" queue-time="0" op-digest="b8d941a0452ff6b7d1254582ba8b48d5"/> </lrm_resource> <lrm_resource id="ip-192.168.24.11" type="IPaddr2" class="ocf" provider="heartbeat"> <lrm_rsc_op id="ip-192.168.24.11_last_0" operation_key="ip-192.168.24.11_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="36:47:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;36:47:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="89" rc-code="7" op-status="0" interval="0" last-run="1523254559" last-rc-change="1523254559" exec-time="47" queue-time="0" op-digest="d16224b1cc961154d457d259d944b5f9"/> </lrm_resource> <lrm_resource id="ip-10.0.0.110" type="IPaddr2" class="ocf" provider="heartbeat"> <lrm_rsc_op id="ip-10.0.0.110_last_0" operation_key="ip-10.0.0.110_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="37:50:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;37:50:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="94" rc-code="7" op-status="0" interval="0" last-run="1523254575" last-rc-change="1523254575" exec-time="41" queue-time="0" op-digest="95d16a18326229bbadba9e9540c77da8"/> </lrm_resource> <lrm_resource id="ip-172.17.1.14" type="IPaddr2" class="ocf" provider="heartbeat"> <lrm_rsc_op id="ip-172.17.1.14_last_0" operation_key="ip-172.17.1.14_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="38:53:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;38:53:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="99" rc-code="7" op-status="0" interval="0" last-run="1523254591" last-rc-change="1523254591" exec-time="42" queue-time="0" op-digest="1fab2783a8d283e33a945588908e98a4"/> </lrm_resource> <lrm_resource id="ip-172.17.1.17" type="IPaddr2" class="ocf" provider="heartbeat"> <lrm_rsc_op id="ip-172.17.1.17_last_0" operation_key="ip-172.17.1.17_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="39:56:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;39:56:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="104" rc-code="7" op-status="0" interval="0" last-run="1523254608" last-rc-change="1523254608" exec-time="44" queue-time="0" op-digest="edc8fdae4cc326c15a779f36f28eb3f8"/> </lrm_resource> <lrm_resource id="ip-172.17.3.11" type="IPaddr2" class="ocf" provider="heartbeat"> <lrm_rsc_op id="ip-172.17.3.11_last_0" operation_key="ip-172.17.3.11_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="40:59:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;40:59:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="108" rc-code="7" op-status="0" interval="0" last-run="1523254624" last-rc-change="1523254624" exec-time="42" queue-time="0" op-digest="e88426fed0f2d870d791fdc3f7380de6"/> </lrm_resource> <lrm_resource id="ip-172.17.4.17" type="IPaddr2" class="ocf" provider="heartbeat"> <lrm_rsc_op id="ip-172.17.4.17_last_0" operation_key="ip-172.17.4.17_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="41:62:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;41:62:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="113" rc-code="7" op-status="0" interval="0" last-run="1523254642" last-rc-change="1523254642" exec-time="35" queue-time="0" op-digest="c0f293651f2b055f54a8b5665a3d28e2"/> </lrm_resource> <lrm_resource id="haproxy-bundle-docker-0" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="haproxy-bundle-docker-0_last_0" operation_key="haproxy-bundle-docker-0_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="48:65:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;48:65:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="118" rc-code="7" op-status="0" interval="0" last-run="1523254656" last-rc-change="1523254656" exec-time="84" queue-time="0" op-digest="f75306f083a5fb44c4ccba3e258423d1"/> </lrm_resource> <lrm_resource id="haproxy-bundle-docker-1" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="haproxy-bundle-docker-1_last_0" operation_key="haproxy-bundle-docker-1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="49:65:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;49:65:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="122" rc-code="7" op-status="0" interval="0" last-run="1523254656" last-rc-change="1523254656" exec-time="85" queue-time="1" op-digest="f75306f083a5fb44c4ccba3e258423d1"/> </lrm_resource> <lrm_resource id="haproxy-bundle-docker-2" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="haproxy-bundle-docker-2_last_0" operation_key="haproxy-bundle-docker-2_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="50:65:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;50:65:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="126" rc-code="7" op-status="0" interval="0" last-run="1523254656" last-rc-change="1523254656" exec-time="52" queue-time="0" op-digest="f75306f083a5fb44c4ccba3e258423d1"/> </lrm_resource> <lrm_resource id="compute-unfence-trigger" type="Dummy" class="ocf" provider="pacemaker"> <lrm_rsc_op id="compute-unfence-trigger_last_0" operation_key="compute-unfence-trigger_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="206:103:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;206:103:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="233" rc-code="0" op-status="0" interval="0" last-run="1523255234" last-rc-change="1523255234" exec-time="27" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" envfile op_sleep passwd state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params=" passwd " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/> <lrm_rsc_op id="compute-unfence-trigger_monitor_10000" operation_key="compute-unfence-trigger_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="208:103:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;208:103:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="234" rc-code="0" op-status="0" interval="10000" last-rc-change="1523255234" exec-time="22" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" op-secure-params=" passwd " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/> </lrm_resource> <lrm_resource id="nova-evacuate" type="NovaEvacuate" class="ocf" provider="openstack"> <lrm_rsc_op id="nova-evacuate_last_0" operation_key="nova-evacuate_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="48:90:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;48:90:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="167" rc-code="7" op-status="0" interval="0" last-run="1523255094" last-rc-change="1523255094" exec-time="28" queue-time="0" op-digest="3b592b4abd94b571edcf74f5e4246f65" op-secure-params=" password " op-secure-digest="fb702c27ea4fa4baf1aa3ba9f874d9f3"/> </lrm_resource> <lrm_resource id="openstack-cinder-volume-docker-0" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="openstack-cinder-volume-docker-0_last_0" operation_key="openstack-cinder-volume-docker-0_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="54:110:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;54:110:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="242" rc-code="7" op-status="0" interval="0" last-run="1523255309" last-rc-change="1523255309" exec-time="67" queue-time="0" op-digest="36b2791c98a366f2f5e7a1d5ef48b3c9"/> </lrm_resource> </lrm_resources> </lrm> <transient_attributes id="overcloud-novacompute-0"> <instance_attributes id="status-overcloud-novacompute-0"> <nvpair id="status-overcloud-novacompute-0-.node-unfenced" name="#node-unfenced" value="1523255231"/> <nvpair id="status-overcloud-novacompute-0-.digests-all" name="#digests-all" value="stonith-fence_compute-fence-nova:fence_compute:ad312d85623cdb0a792e6fbd5e91a820,"/> <nvpair id="status-overcloud-novacompute-0-.digests-secure" name="#digests-secure" value="stonith-fence_compute-fence-nova:fence_compute:(null),"/> </instance_attributes> </transient_attributes> </node_state> <node_state remote_node="true" id="overcloud-novacompute-1" uname="overcloud-novacompute-1" in_ccm="true" crm-debug-origin="do_update_resource" node_fenced="0"> <lrm id="overcloud-novacompute-1"> <lrm_resources> <lrm_resource id="rabbitmq-bundle-docker-0" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="rabbitmq-bundle-docker-0_last_0" operation_key="rabbitmq-bundle-docker-0_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="16:13:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;16:13:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="35" rc-code="7" op-status="0" interval="0" last-run="1523254206" last-rc-change="1523254206" exec-time="97" queue-time="0" op-digest="c96831c3be25c5a8a7f32edc8243ba0b"/> </lrm_resource> <lrm_resource id="rabbitmq-bundle-docker-1" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="rabbitmq-bundle-docker-1_last_0" operation_key="rabbitmq-bundle-docker-1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="17:13:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;17:13:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="39" rc-code="7" op-status="0" interval="0" last-run="1523254206" last-rc-change="1523254206" exec-time="82" queue-time="0" op-digest="c96831c3be25c5a8a7f32edc8243ba0b"/> </lrm_resource> <lrm_resource id="rabbitmq-bundle-docker-2" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="rabbitmq-bundle-docker-2_last_0" operation_key="rabbitmq-bundle-docker-2_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="18:13:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;18:13:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="43" rc-code="7" op-status="0" interval="0" last-run="1523254206" last-rc-change="1523254206" exec-time="51" queue-time="0" op-digest="c96831c3be25c5a8a7f32edc8243ba0b"/> </lrm_resource> <lrm_resource id="galera-bundle-docker-0" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="galera-bundle-docker-0_last_0" operation_key="galera-bundle-docker-0_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="25:23:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;25:23:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="53" rc-code="7" op-status="0" interval="0" last-run="1523254330" last-rc-change="1523254330" exec-time="82" queue-time="0" op-digest="81f254cd6c291dbbdacc42886fdace4e"/> </lrm_resource> <lrm_resource id="galera-bundle-docker-1" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="galera-bundle-docker-1_last_0" operation_key="galera-bundle-docker-1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="26:23:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;26:23:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="57" rc-code="7" op-status="0" interval="0" last-run="1523254331" last-rc-change="1523254331" exec-time="83" queue-time="0" op-digest="81f254cd6c291dbbdacc42886fdace4e"/> </lrm_resource> <lrm_resource id="galera-bundle-docker-2" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="galera-bundle-docker-2_last_0" operation_key="galera-bundle-docker-2_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="27:23:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;27:23:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="61" rc-code="7" op-status="0" interval="0" last-run="1523254331" last-rc-change="1523254331" exec-time="51" queue-time="0" op-digest="81f254cd6c291dbbdacc42886fdace4e"/> </lrm_resource> <lrm_resource id="redis-bundle-docker-0" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="redis-bundle-docker-0_last_0" operation_key="redis-bundle-docker-0_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="34:37:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;34:37:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="73" rc-code="7" op-status="0" interval="0" last-run="1523254478" last-rc-change="1523254478" exec-time="82" queue-time="0" op-digest="b8d941a0452ff6b7d1254582ba8b48d5"/> </lrm_resource> <lrm_resource id="redis-bundle-docker-1" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="redis-bundle-docker-1_last_0" operation_key="redis-bundle-docker-1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="35:37:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;35:37:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="77" rc-code="7" op-status="0" interval="0" last-run="1523254478" last-rc-change="1523254478" exec-time="83" queue-time="0" op-digest="b8d941a0452ff6b7d1254582ba8b48d5"/> </lrm_resource> <lrm_resource id="redis-bundle-docker-2" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="redis-bundle-docker-2_last_0" operation_key="redis-bundle-docker-2_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="36:37:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;36:37:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="81" rc-code="7" op-status="0" interval="0" last-run="1523254478" last-rc-change="1523254478" exec-time="55" queue-time="0" op-digest="b8d941a0452ff6b7d1254582ba8b48d5"/> </lrm_resource> <lrm_resource id="ip-192.168.24.11" type="IPaddr2" class="ocf" provider="heartbeat"> <lrm_rsc_op id="ip-192.168.24.11_last_0" operation_key="ip-192.168.24.11_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="37:47:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;37:47:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="89" rc-code="7" op-status="0" interval="0" last-run="1523254558" last-rc-change="1523254558" exec-time="43" queue-time="0" op-digest="d16224b1cc961154d457d259d944b5f9"/> </lrm_resource> <lrm_resource id="ip-10.0.0.110" type="IPaddr2" class="ocf" provider="heartbeat"> <lrm_rsc_op id="ip-10.0.0.110_last_0" operation_key="ip-10.0.0.110_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="38:50:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;38:50:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="94" rc-code="7" op-status="0" interval="0" last-run="1523254574" last-rc-change="1523254574" exec-time="37" queue-time="0" op-digest="95d16a18326229bbadba9e9540c77da8"/> </lrm_resource> <lrm_resource id="ip-172.17.1.14" type="IPaddr2" class="ocf" provider="heartbeat"> <lrm_rsc_op id="ip-172.17.1.14_last_0" operation_key="ip-172.17.1.14_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="39:53:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;39:53:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="98" rc-code="7" op-status="0" interval="0" last-run="1523254591" last-rc-change="1523254591" exec-time="40" queue-time="0" op-digest="1fab2783a8d283e33a945588908e98a4"/> </lrm_resource> <lrm_resource id="ip-172.17.1.17" type="IPaddr2" class="ocf" provider="heartbeat"> <lrm_rsc_op id="ip-172.17.1.17_last_0" operation_key="ip-172.17.1.17_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="40:56:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;40:56:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="103" rc-code="7" op-status="0" interval="0" last-run="1523254607" last-rc-change="1523254607" exec-time="37" queue-time="0" op-digest="edc8fdae4cc326c15a779f36f28eb3f8"/> </lrm_resource> <lrm_resource id="ip-172.17.3.11" type="IPaddr2" class="ocf" provider="heartbeat"> <lrm_rsc_op id="ip-172.17.3.11_last_0" operation_key="ip-172.17.3.11_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="41:59:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;41:59:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="108" rc-code="7" op-status="0" interval="0" last-run="1523254624" last-rc-change="1523254624" exec-time="39" queue-time="0" op-digest="e88426fed0f2d870d791fdc3f7380de6"/> </lrm_resource> <lrm_resource id="ip-172.17.4.17" type="IPaddr2" class="ocf" provider="heartbeat"> <lrm_rsc_op id="ip-172.17.4.17_last_0" operation_key="ip-172.17.4.17_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="42:62:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;42:62:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="113" rc-code="7" op-status="0" interval="0" last-run="1523254641" last-rc-change="1523254641" exec-time="35" queue-time="0" op-digest="c0f293651f2b055f54a8b5665a3d28e2"/> </lrm_resource> <lrm_resource id="haproxy-bundle-docker-0" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="haproxy-bundle-docker-0_last_0" operation_key="haproxy-bundle-docker-0_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="51:65:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;51:65:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="118" rc-code="7" op-status="0" interval="0" last-run="1523254656" last-rc-change="1523254656" exec-time="84" queue-time="0" op-digest="f75306f083a5fb44c4ccba3e258423d1"/> </lrm_resource> <lrm_resource id="haproxy-bundle-docker-1" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="haproxy-bundle-docker-1_last_0" operation_key="haproxy-bundle-docker-1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="52:65:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;52:65:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="122" rc-code="7" op-status="0" interval="0" last-run="1523254656" last-rc-change="1523254656" exec-time="84" queue-time="0" op-digest="f75306f083a5fb44c4ccba3e258423d1"/> </lrm_resource> <lrm_resource id="haproxy-bundle-docker-2" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="haproxy-bundle-docker-2_last_0" operation_key="haproxy-bundle-docker-2_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="53:65:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;53:65:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="126" rc-code="7" op-status="0" interval="0" last-run="1523254656" last-rc-change="1523254656" exec-time="56" queue-time="0" op-digest="f75306f083a5fb44c4ccba3e258423d1"/> </lrm_resource> <lrm_resource id="compute-unfence-trigger" type="Dummy" class="ocf" provider="pacemaker"> <lrm_rsc_op id="compute-unfence-trigger_last_0" operation_key="compute-unfence-trigger_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="209:103:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;209:103:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="233" rc-code="0" op-status="0" interval="0" last-run="1523255234" last-rc-change="1523255234" exec-time="27" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" envfile op_sleep passwd state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params=" passwd " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/> <lrm_rsc_op id="compute-unfence-trigger_monitor_10000" operation_key="compute-unfence-trigger_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="211:103:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;211:103:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="234" rc-code="0" op-status="0" interval="10000" last-rc-change="1523255234" exec-time="19" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" op-secure-params=" passwd " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/> </lrm_resource> <lrm_resource id="nova-evacuate" type="NovaEvacuate" class="ocf" provider="openstack"> <lrm_rsc_op id="nova-evacuate_last_0" operation_key="nova-evacuate_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="49:90:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;49:90:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="167" rc-code="7" op-status="0" interval="0" last-run="1523255094" last-rc-change="1523255094" exec-time="25" queue-time="0" op-digest="3b592b4abd94b571edcf74f5e4246f65" op-secure-params=" password " op-secure-digest="fb702c27ea4fa4baf1aa3ba9f874d9f3"/> </lrm_resource> <lrm_resource id="openstack-cinder-volume-docker-0" type="docker" class="ocf" provider="heartbeat"> <lrm_rsc_op id="openstack-cinder-volume-docker-0_last_0" operation_key="openstack-cinder-volume-docker-0_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="55:110:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:7;55:110:7:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="241" rc-code="7" op-status="0" interval="0" last-run="1523255309" last-rc-change="1523255309" exec-time="70" queue-time="0" op-digest="36b2791c98a366f2f5e7a1d5ef48b3c9"/> </lrm_resource> </lrm_resources> </lrm> <transient_attributes id="overcloud-novacompute-1"> <instance_attributes id="status-overcloud-novacompute-1"> <nvpair id="status-overcloud-novacompute-1-.node-unfenced" name="#node-unfenced" value="1523255233"/> <nvpair id="status-overcloud-novacompute-1-.digests-all" name="#digests-all" value="stonith-fence_compute-fence-nova:fence_compute:ad312d85623cdb0a792e6fbd5e91a820,"/> <nvpair id="status-overcloud-novacompute-1-.digests-secure" name="#digests-secure" value="stonith-fence_compute-fence-nova:fence_compute:(null),"/> </instance_attributes> </transient_attributes> </node_state> <node_state remote_node="true" id="rabbitmq-bundle-0" uname="rabbitmq-bundle-0" in_ccm="true" crm-debug-origin="do_update_resource" node_fenced="0"> <lrm id="rabbitmq-bundle-0"> <lrm_resources> <lrm_resource id="rabbitmq" type="rabbitmq-cluster" class="ocf" provider="heartbeat"> <lrm_rsc_op id="rabbitmq_last_0" operation_key="rabbitmq_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="33:17:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;33:17:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="15" rc-code="0" op-status="0" interval="0" last-run="1523254244" last-rc-change="1523254244" exec-time="11989" queue-time="0" op-digest="780d433233eb4f94c1a151623d002e84"/> <lrm_rsc_op id="rabbitmq_pre_notify_start_0" operation_key="rabbitmq_notify_0" operation="notify" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="51:19:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;51:19:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="-1" rc-code="0" op-status="0" interval="0" last-run="1523254280" last-rc-change="1523254280" exec-time="0" queue-time="0" op-digest="780d433233eb4f94c1a151623d002e84"/> <lrm_rsc_op id="rabbitmq_post_notify_start_0" operation_key="rabbitmq_notify_0" operation="notify" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="52:19:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;52:19:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="-1" rc-code="0" op-status="0" interval="0" last-run="1523254306" last-rc-change="1523254306" exec-time="0" queue-time="0" op-digest="780d433233eb4f94c1a151623d002e84"/> <lrm_rsc_op id="rabbitmq_monitor_10000" operation_key="rabbitmq_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="32:20:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;32:20:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="52" rc-code="0" op-status="0" interval="10000" last-rc-change="1523254306" exec-time="4385" queue-time="528" op-digest="6b46cdf9111345cbd0460b2540d3b2c7"/> </lrm_resource> </lrm_resources> </lrm> </node_state> <node_state remote_node="true" id="rabbitmq-bundle-1" uname="rabbitmq-bundle-1" in_ccm="true" crm-debug-origin="do_update_resource" node_fenced="0"> <lrm id="rabbitmq-bundle-1"> <lrm_resources> <lrm_resource id="rabbitmq" type="rabbitmq-cluster" class="ocf" provider="heartbeat"> <lrm_rsc_op id="rabbitmq_last_0" operation_key="rabbitmq_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="33:18:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;33:18:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="15" rc-code="0" op-status="0" interval="0" last-run="1523254256" last-rc-change="1523254256" exec-time="23713" queue-time="0" op-digest="780d433233eb4f94c1a151623d002e84"/> <lrm_rsc_op id="rabbitmq_pre_notify_start_0" operation_key="rabbitmq_notify_0" operation="notify" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="53:19:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;53:19:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="-1" rc-code="0" op-status="0" interval="0" last-run="1523254280" last-rc-change="1523254280" exec-time="0" queue-time="0" op-digest="780d433233eb4f94c1a151623d002e84"/> <lrm_rsc_op id="rabbitmq_post_notify_start_0" operation_key="rabbitmq_notify_0" operation="notify" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="54:19:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;54:19:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="-1" rc-code="0" op-status="0" interval="0" last-run="1523254306" last-rc-change="1523254306" exec-time="0" queue-time="0" op-digest="780d433233eb4f94c1a151623d002e84"/> <lrm_rsc_op id="rabbitmq_monitor_10000" operation_key="rabbitmq_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="35:20:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;35:20:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="55" rc-code="0" op-status="0" interval="10000" last-rc-change="1523254306" exec-time="4637" queue-time="487" op-digest="6b46cdf9111345cbd0460b2540d3b2c7"/> </lrm_resource> </lrm_resources> </lrm> </node_state> <node_state remote_node="true" id="rabbitmq-bundle-2" uname="rabbitmq-bundle-2" in_ccm="true" crm-debug-origin="do_update_resource" node_fenced="0"> <lrm id="rabbitmq-bundle-2"> <lrm_resources> <lrm_resource id="rabbitmq" type="rabbitmq-cluster" class="ocf" provider="heartbeat"> <lrm_rsc_op id="rabbitmq_last_0" operation_key="rabbitmq_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="36:19:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;36:19:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="15" rc-code="0" op-status="0" interval="0" last-run="1523254280" last-rc-change="1523254280" exec-time="25411" queue-time="0" op-digest="780d433233eb4f94c1a151623d002e84"/> <lrm_rsc_op id="rabbitmq_post_notify_start_0" operation_key="rabbitmq_notify_0" operation="notify" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="55:19:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;55:19:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="-1" rc-code="0" op-status="0" interval="0" last-run="1523254306" last-rc-change="1523254306" exec-time="0" queue-time="0" op-digest="780d433233eb4f94c1a151623d002e84"/> <lrm_rsc_op id="rabbitmq_monitor_10000" operation_key="rabbitmq_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="38:20:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;38:20:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="54" rc-code="0" op-status="0" interval="10000" last-rc-change="1523254306" exec-time="4624" queue-time="492" op-digest="6b46cdf9111345cbd0460b2540d3b2c7"/> </lrm_resource> </lrm_resources> </lrm> </node_state> <node_state remote_node="true" id="galera-bundle-1" uname="galera-bundle-1" in_ccm="true" crm-debug-origin="do_update_resource" node_fenced="0"> <lrm id="galera-bundle-1"> <lrm_resources> <lrm_resource id="galera" type="galera" class="ocf" provider="heartbeat"> <lrm_rsc_op id="galera_last_0" operation_key="galera_promote_0" operation="promote" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="84:31:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;84:31:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="156" rc-code="0" op-status="0" interval="0" last-run="1523254410" last-rc-change="1523254410" exec-time="12244" queue-time="0" op-digest="a27a7301241d321527115de47ec94f52" op-secure-params=" user " op-secure-digest="a27a7301241d321527115de47ec94f52"/> <lrm_rsc_op id="galera_monitor_10000" operation_key="galera_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="82:32:8:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:8;82:32:8:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="191" rc-code="8" op-status="0" interval="10000" last-rc-change="1523254423" exec-time="687" queue-time="0" op-digest="5a80d9ca7bb4f6ad0bdb3b4395b9f4ec" op-secure-params=" user " op-secure-digest="a27a7301241d321527115de47ec94f52"/> </lrm_resource> </lrm_resources> </lrm> </node_state> <node_state remote_node="true" id="galera-bundle-0" uname="galera-bundle-0" in_ccm="true" crm-debug-origin="do_update_resource" node_fenced="0"> <lrm id="galera-bundle-0"> <lrm_resources> <lrm_resource id="galera" type="galera" class="ocf" provider="heartbeat"> <lrm_rsc_op id="galera_last_0" operation_key="galera_promote_0" operation="promote" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="79:30:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;79:30:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="156" rc-code="0" op-status="0" interval="0" last-run="1523254399" last-rc-change="1523254399" exec-time="11070" queue-time="0" op-digest="a27a7301241d321527115de47ec94f52" op-secure-params=" user " op-secure-digest="a27a7301241d321527115de47ec94f52"/> <lrm_rsc_op id="galera_monitor_10000" operation_key="galera_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="81:31:8:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:8;81:31:8:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="261" rc-code="8" op-status="0" interval="10000" last-rc-change="1523254411" exec-time="758" queue-time="0" op-digest="5a80d9ca7bb4f6ad0bdb3b4395b9f4ec" op-secure-params=" user " op-secure-digest="a27a7301241d321527115de47ec94f52"/> </lrm_resource> </lrm_resources> </lrm> </node_state> <node_state remote_node="true" id="galera-bundle-2" uname="galera-bundle-2" in_ccm="true" crm-debug-origin="do_update_resource" node_fenced="0"> <lrm id="galera-bundle-2"> <lrm_resources> <lrm_resource id="galera" type="galera" class="ocf" provider="heartbeat"> <lrm_rsc_op id="galera_last_0" operation_key="galera_promote_0" operation="promote" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="85:32:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;85:32:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="184" rc-code="0" op-status="0" interval="0" last-run="1523254423" last-rc-change="1523254423" exec-time="12190" queue-time="0" op-digest="a27a7301241d321527115de47ec94f52" op-secure-params=" user " op-secure-digest="a27a7301241d321527115de47ec94f52"/> <lrm_rsc_op id="galera_monitor_10000" operation_key="galera_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="87:33:8:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:8;87:33:8:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="220" rc-code="8" op-status="0" interval="10000" last-rc-change="1523254436" exec-time="659" queue-time="0" op-digest="5a80d9ca7bb4f6ad0bdb3b4395b9f4ec" op-secure-params=" user " op-secure-digest="a27a7301241d321527115de47ec94f52"/> </lrm_resource> </lrm_resources> </lrm> </node_state> <node_state remote_node="true" id="redis-bundle-2" uname="redis-bundle-2" in_ccm="true" crm-debug-origin="do_update_resource" node_fenced="0"> <lrm id="redis-bundle-2"> <lrm_resources> <lrm_resource id="redis" type="redis" class="ocf" provider="heartbeat"> <lrm_rsc_op id="redis_last_0" operation_key="redis_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="130:43:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;130:43:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="8" rc-code="0" op-status="0" interval="0" last-run="1523254526" last-rc-change="1523254526" exec-time="4679" queue-time="0" op-digest="a5b2a4f5c557278af14d6cbffc5a229d" op-secure-params=" user " op-secure-digest="a5b2a4f5c557278af14d6cbffc5a229d"/> <lrm_rsc_op id="redis_post_notify_start_0" operation_key="redis_notify_0" operation="notify" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="181:43:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;181:43:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="-1" rc-code="0" op-status="0" interval="0" last-run="1523254530" last-rc-change="1523254530" exec-time="0" queue-time="0" op-digest="a5b2a4f5c557278af14d6cbffc5a229d" op-secure-params=" user " op-secure-digest="a5b2a4f5c557278af14d6cbffc5a229d"/> <lrm_rsc_op id="redis_pre_notify_promote_0" operation_key="redis_notify_0" operation="notify" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="193:44:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;193:44:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="-1" rc-code="0" op-status="0" interval="0" last-run="1523254531" last-rc-change="1523254531" exec-time="0" queue-time="0" op-digest="a5b2a4f5c557278af14d6cbffc5a229d" op-secure-params=" user " op-secure-digest="a5b2a4f5c557278af14d6cbffc5a229d"/> <lrm_rsc_op id="redis_post_notify_promote_0" operation_key="redis_notify_0" operation="notify" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="194:44:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;194:44:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="-1" rc-code="0" op-status="0" interval="0" last-run="1523254533" last-rc-change="1523254533" exec-time="0" queue-time="0" op-digest="a5b2a4f5c557278af14d6cbffc5a229d" op-secure-params=" user " op-secure-digest="a5b2a4f5c557278af14d6cbffc5a229d"/> <lrm_rsc_op id="redis_monitor_60000" operation_key="redis_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="134:45:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;134:45:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="50" rc-code="0" op-status="0" interval="60000" last-rc-change="1523254538" exec-time="883" queue-time="4096" op-digest="992feffd37882eb5ce9bfc847b2fa75e" op-secure-params=" user " op-secure-digest="a5b2a4f5c557278af14d6cbffc5a229d"/> <lrm_rsc_op id="redis_monitor_45000" operation_key="redis_monitor_45000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="133:45:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;133:45:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-1" call-id="51" rc-code="0" op-status="0" interval="45000" last-rc-change="1523254538" exec-time="882" queue-time="4940" op-digest="992feffd37882eb5ce9bfc847b2fa75e" op-secure-params=" user " op-secure-digest="a5b2a4f5c557278af14d6cbffc5a229d"/> </lrm_resource> </lrm_resources> </lrm> </node_state> <node_state remote_node="true" id="redis-bundle-0" uname="redis-bundle-0" in_ccm="true" crm-debug-origin="do_update_resource" node_fenced="0"> <lrm id="redis-bundle-0"> <lrm_resources> <lrm_resource id="redis" type="redis" class="ocf" provider="heartbeat"> <lrm_rsc_op id="redis_last_0" operation_key="redis_promote_0" operation="promote" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="124:44:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;124:44:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="51" rc-code="0" op-status="0" interval="0" last-run="1523254532" last-rc-change="1523254532" exec-time="1514" queue-time="1147" op-digest="a5b2a4f5c557278af14d6cbffc5a229d" op-secure-params=" user " op-secure-digest="a5b2a4f5c557278af14d6cbffc5a229d"/> <lrm_rsc_op id="redis_pre_notify_start_0" operation_key="redis_notify_0" operation="notify" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="177:43:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;177:43:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="-1" rc-code="0" op-status="0" interval="0" last-run="1523254526" last-rc-change="1523254526" exec-time="0" queue-time="0" op-digest="a5b2a4f5c557278af14d6cbffc5a229d" op-secure-params=" user " op-secure-digest="a5b2a4f5c557278af14d6cbffc5a229d"/> <lrm_rsc_op id="redis_post_notify_start_0" operation_key="redis_notify_0" operation="notify" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="178:43:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;178:43:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="-1" rc-code="0" op-status="0" interval="0" last-run="1523254530" last-rc-change="1523254530" exec-time="0" queue-time="0" op-digest="a5b2a4f5c557278af14d6cbffc5a229d" op-secure-params=" user " op-secure-digest="a5b2a4f5c557278af14d6cbffc5a229d"/> <lrm_rsc_op id="redis_pre_notify_promote_0" operation_key="redis_notify_0" operation="notify" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="189:44:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;189:44:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="-1" rc-code="0" op-status="0" interval="0" last-run="1523254531" last-rc-change="1523254531" exec-time="0" queue-time="0" op-digest="a5b2a4f5c557278af14d6cbffc5a229d" op-secure-params=" user " op-secure-digest="a5b2a4f5c557278af14d6cbffc5a229d"/> <lrm_rsc_op id="redis_post_notify_promote_0" operation_key="redis_notify_0" operation="notify" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="190:44:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;190:44:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="-1" rc-code="0" op-status="0" interval="0" last-run="1523254533" last-rc-change="1523254533" exec-time="0" queue-time="0" op-digest="a5b2a4f5c557278af14d6cbffc5a229d" op-secure-params=" user " op-secure-digest="a5b2a4f5c557278af14d6cbffc5a229d"/> <lrm_rsc_op id="redis_monitor_20000" operation_key="redis_monitor_20000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="126:45:8:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:8;126:45:8:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-2" call-id="68" rc-code="8" op-status="0" interval="20000" last-rc-change="1523254535" exec-time="1000" queue-time="904" op-digest="992feffd37882eb5ce9bfc847b2fa75e" op-secure-params=" user " op-secure-digest="a5b2a4f5c557278af14d6cbffc5a229d"/> </lrm_resource> </lrm_resources> </lrm> </node_state> <node_state remote_node="true" id="redis-bundle-1" uname="redis-bundle-1" in_ccm="true" crm-debug-origin="do_update_resource" node_fenced="0"> <lrm id="redis-bundle-1"> <lrm_resources> <lrm_resource id="redis" type="redis" class="ocf" provider="heartbeat"> <lrm_rsc_op id="redis_last_0" operation_key="redis_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="126:42:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;126:42:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="8" rc-code="0" op-status="0" interval="0" last-run="1523254521" last-rc-change="1523254521" exec-time="4533" queue-time="0" op-digest="a5b2a4f5c557278af14d6cbffc5a229d" op-secure-params=" user " op-secure-digest="a5b2a4f5c557278af14d6cbffc5a229d"/> <lrm_rsc_op id="redis_pre_notify_start_0" operation_key="redis_notify_0" operation="notify" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="179:43:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;179:43:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="-1" rc-code="0" op-status="0" interval="0" last-run="1523254526" last-rc-change="1523254526" exec-time="0" queue-time="0" op-digest="a5b2a4f5c557278af14d6cbffc5a229d" op-secure-params=" user " op-secure-digest="a5b2a4f5c557278af14d6cbffc5a229d"/> <lrm_rsc_op id="redis_post_notify_start_0" operation_key="redis_notify_0" operation="notify" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="180:43:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;180:43:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="-1" rc-code="0" op-status="0" interval="0" last-run="1523254530" last-rc-change="1523254530" exec-time="0" queue-time="0" op-digest="a5b2a4f5c557278af14d6cbffc5a229d" op-secure-params=" user " op-secure-digest="a5b2a4f5c557278af14d6cbffc5a229d"/> <lrm_rsc_op id="redis_pre_notify_promote_0" operation_key="redis_notify_0" operation="notify" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="191:44:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;191:44:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="-1" rc-code="0" op-status="0" interval="0" last-run="1523254531" last-rc-change="1523254531" exec-time="0" queue-time="0" op-digest="a5b2a4f5c557278af14d6cbffc5a229d" op-secure-params=" user " op-secure-digest="a5b2a4f5c557278af14d6cbffc5a229d"/> <lrm_rsc_op id="redis_post_notify_promote_0" operation_key="redis_notify_0" operation="notify" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="192:44:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;192:44:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="-1" rc-code="0" op-status="0" interval="0" last-run="1523254533" last-rc-change="1523254533" exec-time="0" queue-time="0" op-digest="a5b2a4f5c557278af14d6cbffc5a229d" op-secure-params=" user " op-secure-digest="a5b2a4f5c557278af14d6cbffc5a229d"/> <lrm_rsc_op id="redis_monitor_60000" operation_key="redis_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="130:45:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;130:45:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="51" rc-code="0" op-status="0" interval="60000" last-rc-change="1523254537" exec-time="942" queue-time="4042" op-digest="992feffd37882eb5ce9bfc847b2fa75e" op-secure-params=" user " op-secure-digest="a5b2a4f5c557278af14d6cbffc5a229d"/> <lrm_rsc_op id="redis_monitor_45000" operation_key="redis_monitor_45000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="129:45:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" transition-magic="0:0;129:45:0:69d170fd-52b7-46b4-85cd-cd4a4d08c203" exit-reason="" on_node="controller-0" call-id="52" rc-code="0" op-status="0" interval="45000" last-rc-change="1523254538" exec-time="1012" queue-time="4944" op-digest="992feffd37882eb5ce9bfc847b2fa75e" op-secure-params=" user " op-secure-digest="a5b2a4f5c557278af14d6cbffc5a229d"/> </lrm_resource> </lrm_resources> </lrm> </node_state> </status> </cib> diff --git a/daemons/controld/controld_execd.c b/daemons/controld/controld_execd.c index 6c767a0a59..c7714fd0f0 100644 --- a/daemons/controld/controld_execd.c +++ b/daemons/controld/controld_execd.c @@ -1,2675 +1,2676 @@ /* * Copyright 2004-2018 Andrew Beekhof <andrew@beekhof.net> * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include <crm_internal.h> #include <sys/param.h> #include <sys/types.h> #include <sys/wait.h> #include <crm/crm.h> #include <crm/services.h> #include <crm/msg_xml.h> #include <crm/common/xml.h> #include <pacemaker-controld.h> #include <controld_fsa.h> #include <controld_messages.h> #include <controld_callbacks.h> #include <controld_lrm.h> #include <regex.h> #include <crm/pengine/rules.h> #define START_DELAY_THRESHOLD 5 * 60 * 1000 #define MAX_LRM_REG_FAILS 30 #define s_if_plural(i) (((i) == 1)? "" : "s") struct delete_event_s { int rc; const char *rsc; lrm_state_t *lrm_state; }; static gboolean is_rsc_active(lrm_state_t * lrm_state, const char *rsc_id); static gboolean build_active_RAs(lrm_state_t * lrm_state, xmlNode * rsc_list); static gboolean stop_recurring_actions(gpointer key, gpointer value, gpointer user_data); static int delete_rsc_status(lrm_state_t * lrm_state, const char *rsc_id, int call_options, const char *user_name); static lrmd_event_data_t *construct_op(lrm_state_t * lrm_state, xmlNode * rsc_op, const char *rsc_id, const char *operation); static void do_lrm_rsc_op(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, const char *operation, xmlNode * msg, xmlNode * request); void send_direct_ack(const char *to_host, const char *to_sys, lrmd_rsc_info_t * rsc, lrmd_event_data_t * op, const char *rsc_id); static gboolean lrm_state_verify_stopped(lrm_state_t * lrm_state, enum crmd_fsa_state cur_state, int log_level); static int do_update_resource(const char *node_name, lrmd_rsc_info_t * rsc, lrmd_event_data_t * op); static void lrm_connection_destroy(void) { if (is_set(fsa_input_register, R_LRM_CONNECTED)) { crm_crit("Connection to executor failed"); register_fsa_input(C_FSA_INTERNAL, I_ERROR, NULL); clear_bit(fsa_input_register, R_LRM_CONNECTED); } else { crm_info("Disconnected from executor"); } } static char * make_stop_id(const char *rsc, int call_id) { return crm_strdup_printf("%s:%d", rsc, call_id); } static void copy_instance_keys(gpointer key, gpointer value, gpointer user_data) { if (strstr(key, CRM_META "_") == NULL) { g_hash_table_replace(user_data, strdup((const char *)key), strdup((const char *)value)); } } static void copy_meta_keys(gpointer key, gpointer value, gpointer user_data) { if (strstr(key, CRM_META "_") != NULL) { g_hash_table_replace(user_data, strdup((const char *)key), strdup((const char *)value)); } } /*! * \internal * \brief Remove a recurring operation from a resource's history * * \param[in,out] history Resource history to modify * \param[in] op Operation to remove * * \return TRUE if the operation was found and removed, FALSE otherwise */ static gboolean history_remove_recurring_op(rsc_history_t *history, const lrmd_event_data_t *op) { GList *iter; for (iter = history->recurring_op_list; iter != NULL; iter = iter->next) { lrmd_event_data_t *existing = iter->data; if ((op->interval_ms == existing->interval_ms) && crm_str_eq(op->rsc_id, existing->rsc_id, TRUE) && safe_str_eq(op->op_type, existing->op_type)) { history->recurring_op_list = g_list_delete_link(history->recurring_op_list, iter); lrmd_free_event(existing); return TRUE; } } return FALSE; } /*! * \internal * \brief Free all recurring operations in resource history * * \param[in,out] history Resource history to modify */ static void history_free_recurring_ops(rsc_history_t *history) { GList *iter; for (iter = history->recurring_op_list; iter != NULL; iter = iter->next) { lrmd_free_event(iter->data); } g_list_free(history->recurring_op_list); history->recurring_op_list = NULL; } /*! * \internal * \brief Free resource history * * \param[in,out] history Resource history to free */ void history_free(gpointer data) { rsc_history_t *history = (rsc_history_t*)data; if (history->stop_params) { g_hash_table_destroy(history->stop_params); } /* Don't need to free history->rsc.id because it's set to history->id */ free(history->rsc.type); free(history->rsc.standard); free(history->rsc.provider); lrmd_free_event(history->failed); lrmd_free_event(history->last); free(history->id); history_free_recurring_ops(history); free(history); } static void update_history_cache(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, lrmd_event_data_t * op) { int target_rc = 0; rsc_history_t *entry = NULL; if (op->rsc_deleted) { crm_debug("Purged history for '%s' after %s", op->rsc_id, op->op_type); delete_rsc_status(lrm_state, op->rsc_id, cib_quorum_override, NULL); return; } if (safe_str_eq(op->op_type, RSC_NOTIFY)) { return; } crm_debug("Updating history for '%s' with %s op", op->rsc_id, op->op_type); entry = g_hash_table_lookup(lrm_state->resource_history, op->rsc_id); if (entry == NULL && rsc) { entry = calloc(1, sizeof(rsc_history_t)); entry->id = strdup(op->rsc_id); g_hash_table_insert(lrm_state->resource_history, entry->id, entry); entry->rsc.id = entry->id; entry->rsc.type = strdup(rsc->type); entry->rsc.standard = strdup(rsc->standard); if (rsc->provider) { entry->rsc.provider = strdup(rsc->provider); } else { entry->rsc.provider = NULL; } } else if (entry == NULL) { crm_info("Resource %s no longer exists, not updating cache", op->rsc_id); return; } entry->last_callid = op->call_id; target_rc = rsc_op_expected_rc(op); if (op->op_status == PCMK_LRM_OP_CANCELLED) { if (op->interval_ms > 0) { crm_trace("Removing cancelled recurring op: " CRM_OP_FMT, op->rsc_id, op->op_type, op->interval_ms); history_remove_recurring_op(entry, op); return; } else { crm_trace("Skipping " CRM_OP_FMT " rc=%d, status=%d", op->rsc_id, op->op_type, op->interval_ms, op->rc, op->op_status); } } else if (did_rsc_op_fail(op, target_rc)) { /* Store failed monitors here, otherwise the block below will cause them * to be forgotten when a stop happens. */ if (entry->failed) { lrmd_free_event(entry->failed); } entry->failed = lrmd_copy_event(op); } else if (op->interval_ms == 0) { if (entry->last) { lrmd_free_event(entry->last); } entry->last = lrmd_copy_event(op); if (op->params && (safe_str_eq(CRMD_ACTION_START, op->op_type) || safe_str_eq("reload", op->op_type) || safe_str_eq(CRMD_ACTION_STATUS, op->op_type))) { if (entry->stop_params) { g_hash_table_destroy(entry->stop_params); } entry->stop_params = crm_str_table_new(); g_hash_table_foreach(op->params, copy_instance_keys, entry->stop_params); } } if (op->interval_ms > 0) { /* Ensure there are no duplicates */ history_remove_recurring_op(entry, op); crm_trace("Adding recurring op: " CRM_OP_FMT, op->rsc_id, op->op_type, op->interval_ms); entry->recurring_op_list = g_list_prepend(entry->recurring_op_list, lrmd_copy_event(op)); } else if (entry->recurring_op_list && safe_str_eq(op->op_type, RSC_STATUS) == FALSE) { crm_trace("Dropping %d recurring ops because of: " CRM_OP_FMT, g_list_length(entry->recurring_op_list), op->rsc_id, op->op_type, op->interval_ms); history_free_recurring_ops(entry); } } /*! * \internal * \brief Send a direct OK ack for a resource task * * \param[in] lrm_state LRM connection * \param[in] input Input message being ack'ed * \param[in] rsc_id ID of affected resource * \param[in] rsc Affected resource (if available) * \param[in] task Operation task being ack'ed * \param[in] ack_host Name of host to send ack to * \param[in] ack_sys IPC system name to ack */ static void send_task_ok_ack(lrm_state_t *lrm_state, ha_msg_input_t *input, const char *rsc_id, lrmd_rsc_info_t *rsc, const char *task, const char *ack_host, const char *ack_sys) { lrmd_event_data_t *op = construct_op(lrm_state, input->xml, rsc_id, task); op->rc = PCMK_OCF_OK; op->op_status = PCMK_LRM_OP_DONE; send_direct_ack(ack_host, ack_sys, rsc, op, rsc_id); lrmd_free_event(op); } void lrm_op_callback(lrmd_event_data_t * op) { const char *nodename = NULL; lrm_state_t *lrm_state = NULL; CRM_CHECK(op != NULL, return); /* determine the node name for this connection. */ nodename = op->remote_nodename ? op->remote_nodename : fsa_our_uname; if (op->type == lrmd_event_disconnect && (safe_str_eq(nodename, fsa_our_uname))) { /* If this is the local executor IPC connection, set the right bits in the * controller when the connection goes down. */ lrm_connection_destroy(); return; } else if (op->type != lrmd_event_exec_complete) { /* we only need to process execution results */ return; } lrm_state = lrm_state_find(nodename); CRM_ASSERT(lrm_state != NULL); process_lrm_event(lrm_state, op, NULL); } /* A_LRM_CONNECT */ void do_lrm_control(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { /* This only pertains to local executor connections. Remote connections are * handled as resources within the scheduler. Connecting and disconnecting * from remote executor instances is handled differently. */ lrm_state_t *lrm_state = NULL; if(fsa_our_uname == NULL) { return; /* Nothing to do */ } lrm_state = lrm_state_find_or_create(fsa_our_uname); if (lrm_state == NULL) { register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); return; } if (action & A_LRM_DISCONNECT) { if (lrm_state_verify_stopped(lrm_state, cur_state, LOG_INFO) == FALSE) { if (action == A_LRM_DISCONNECT) { crmd_fsa_stall(FALSE); return; } } clear_bit(fsa_input_register, R_LRM_CONNECTED); crm_info("Disconnecting from the executor"); lrm_state_disconnect(lrm_state); lrm_state_reset_tables(lrm_state, FALSE); crm_notice("Disconnected from the executor"); } if (action & A_LRM_CONNECT) { int ret = pcmk_ok; crm_debug("Connecting to the executor"); ret = lrm_state_ipc_connect(lrm_state); if (ret != pcmk_ok) { if (lrm_state->num_lrm_register_fails < MAX_LRM_REG_FAILS) { crm_warn("Failed to connect to the executor %d time%s (%d max)", lrm_state->num_lrm_register_fails, s_if_plural(lrm_state->num_lrm_register_fails), MAX_LRM_REG_FAILS); crm_timer_start(wait_timer); crmd_fsa_stall(FALSE); return; } } if (ret != pcmk_ok) { crm_err("Failed to connect to the executor the max allowed %d time%s", lrm_state->num_lrm_register_fails, s_if_plural(lrm_state->num_lrm_register_fails)); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); return; } set_bit(fsa_input_register, R_LRM_CONNECTED); crm_info("Connection to the executor established"); } if (action & ~(A_LRM_CONNECT | A_LRM_DISCONNECT)) { crm_err("Unexpected action %s in %s", fsa_action2string(action), __FUNCTION__); } } static gboolean lrm_state_verify_stopped(lrm_state_t * lrm_state, enum crmd_fsa_state cur_state, int log_level) { int counter = 0; gboolean rc = TRUE; const char *when = "lrm disconnect"; GHashTableIter gIter; const char *key = NULL; rsc_history_t *entry = NULL; struct recurring_op_s *pending = NULL; crm_debug("Checking for active resources before exit"); if (cur_state == S_TERMINATE) { log_level = LOG_ERR; when = "shutdown"; } else if (is_set(fsa_input_register, R_SHUTDOWN)) { when = "shutdown... waiting"; } if (lrm_state->pending_ops && lrm_state_is_connected(lrm_state) == TRUE) { guint removed = g_hash_table_foreach_remove( lrm_state->pending_ops, stop_recurring_actions, lrm_state); guint nremaining = g_hash_table_size(lrm_state->pending_ops); if (removed || nremaining) { crm_notice("Stopped %u recurring operation%s at %s (%u remaining)", removed, s_if_plural(removed), when, nremaining); } } if (lrm_state->pending_ops) { g_hash_table_iter_init(&gIter, lrm_state->pending_ops); while (g_hash_table_iter_next(&gIter, NULL, (void **)&pending)) { /* Ignore recurring actions in the shutdown calculations */ if (pending->interval_ms == 0) { counter++; } } } if (counter > 0) { do_crm_log(log_level, "%d pending executor operation%s at %s", counter, s_if_plural(counter), when); if (cur_state == S_TERMINATE || !is_set(fsa_input_register, R_SENT_RSC_STOP)) { g_hash_table_iter_init(&gIter, lrm_state->pending_ops); while (g_hash_table_iter_next(&gIter, (gpointer*)&key, (gpointer*)&pending)) { do_crm_log(log_level, "Pending action: %s (%s)", key, pending->op_key); } } else { rc = FALSE; } return rc; } if (lrm_state->resource_history == NULL) { return rc; } if (is_set(fsa_input_register, R_SHUTDOWN)) { /* At this point we're not waiting, we're just shutting down */ when = "shutdown"; } counter = 0; g_hash_table_iter_init(&gIter, lrm_state->resource_history); while (g_hash_table_iter_next(&gIter, NULL, (gpointer*)&entry)) { if (is_rsc_active(lrm_state, entry->id) == FALSE) { continue; } counter++; if (log_level == LOG_ERR) { crm_info("Found %s active at %s", entry->id, when); } else { crm_trace("Found %s active at %s", entry->id, when); } if (lrm_state->pending_ops) { GHashTableIter hIter; g_hash_table_iter_init(&hIter, lrm_state->pending_ops); while (g_hash_table_iter_next(&hIter, (gpointer*)&key, (gpointer*)&pending)) { if (crm_str_eq(entry->id, pending->rsc_id, TRUE)) { crm_notice("%sction %s (%s) incomplete at %s", pending->interval_ms == 0 ? "A" : "Recurring a", key, pending->op_key, when); } } } } if (counter) { crm_err("%d resource%s active at %s", counter, (counter == 1)? " was" : "s were", when); } return rc; } static char * build_parameter_list(const lrmd_event_data_t *op, const struct ra_metadata_s *metadata, xmlNode *result, enum ra_param_flags_e param_type, bool invert_for_xml) { int len = 0; int max = 0; char *list = NULL; GList *iter = NULL; /* Newer resource agents support the "private" parameter attribute to * indicate sensitive parameters. For backward compatibility with older * agents, this list is used if the agent doesn't specify any as "private". */ const char *secure_terms[] = { "password", "passwd", "user", }; if (is_not_set(metadata->ra_flags, ra_uses_private) && (param_type == ra_param_private)) { max = DIMOF(secure_terms); } for (iter = metadata->ra_params; iter != NULL; iter = iter->next) { struct ra_param_s *param = (struct ra_param_s *) iter->data; bool accept = FALSE; if (is_set(param->rap_flags, param_type)) { accept = TRUE; } else if (max) { for (int lpc = 0; lpc < max; lpc++) { if (safe_str_eq(secure_terms[lpc], param->rap_name)) { accept = TRUE; break; } } } if (accept) { int start = len; crm_trace("Attr %s is %s", param->rap_name, ra_param_flag2text(param_type)); len += strlen(param->rap_name) + 2; // include spaces around list = realloc_safe(list, len + 1); // include null terminator // spaces before and after make parsing simpler sprintf(list + start, " %s ", param->rap_name); } else { crm_trace("Rejecting %s for %s", param->rap_name, ra_param_flag2text(param_type)); } if (result && (invert_for_xml? !accept : accept)) { const char *v = g_hash_table_lookup(op->params, param->rap_name); if (v != NULL) { crm_trace("Adding attr %s=%s to the xml result", param->rap_name, v); crm_xml_add(result, param->rap_name, v); } } } return list; } static void append_restart_list(lrmd_event_data_t *op, struct ra_metadata_s *metadata, xmlNode *update, const char *version) { char *list = NULL; char *digest = NULL; xmlNode *restart = NULL; CRM_LOG_ASSERT(op->params != NULL); if (op->interval_ms > 0) { /* monitors are not reloadable */ return; } if (is_set(metadata->ra_flags, ra_supports_reload)) { restart = create_xml_node(NULL, XML_TAG_PARAMS); /* Add any parameters with unique="1" to the "op-force-restart" list. * * (Currently, we abuse "unique=0" to indicate reloadability. This is * nonstandard and should eventually be replaced once the OCF standard * is updated with something better.) */ list = build_parameter_list(op, metadata, restart, ra_param_unique, FALSE); } else { /* Resource does not support reloads */ return; } digest = calculate_operation_digest(restart, version); /* Add "op-force-restart" and "op-restart-digest" to indicate the resource supports reload, * no matter if it actually supports any parameters with unique="1"). */ crm_xml_add(update, XML_LRM_ATTR_OP_RESTART, list? list: ""); crm_xml_add(update, XML_LRM_ATTR_RESTART_DIGEST, digest); crm_trace("%s: %s, %s", op->rsc_id, digest, list); crm_log_xml_trace(restart, "restart digest source"); free_xml(restart); free(digest); free(list); } static void append_secure_list(lrmd_event_data_t *op, struct ra_metadata_s *metadata, xmlNode *update, const char *version) { char *list = NULL; char *digest = NULL; xmlNode *secure = NULL; CRM_LOG_ASSERT(op->params != NULL); /* * To keep XML_LRM_ATTR_OP_SECURE short, we want it to contain the * secure parameters but XML_LRM_ATTR_SECURE_DIGEST to be based on * the insecure ones */ secure = create_xml_node(NULL, XML_TAG_PARAMS); list = build_parameter_list(op, metadata, secure, ra_param_private, TRUE); if (list != NULL) { digest = calculate_operation_digest(secure, version); crm_xml_add(update, XML_LRM_ATTR_OP_SECURE, list); crm_xml_add(update, XML_LRM_ATTR_SECURE_DIGEST, digest); crm_trace("%s: %s, %s", op->rsc_id, digest, list); crm_log_xml_trace(secure, "secure digest source"); } else { crm_trace("%s: no secure parameters", op->rsc_id); } free_xml(secure); free(digest); free(list); } static gboolean build_operation_update(xmlNode * parent, lrmd_rsc_info_t * rsc, lrmd_event_data_t * op, const char *node_name, const char *src) { int target_rc = 0; xmlNode *xml_op = NULL; struct ra_metadata_s *metadata = NULL; const char *caller_version = NULL; lrm_state_t *lrm_state = NULL; if (op == NULL) { return FALSE; } target_rc = rsc_op_expected_rc(op); /* there is a small risk in formerly mixed clusters that it will * be sub-optimal. * * however with our upgrade policy, the update we send should * still be completely supported anyway */ caller_version = g_hash_table_lookup(op->params, XML_ATTR_CRM_VERSION); CRM_LOG_ASSERT(caller_version != NULL); if(caller_version == NULL) { caller_version = CRM_FEATURE_SET; } crm_trace("Building %s operation update with originator version: %s", op->rsc_id, caller_version); xml_op = create_operation_update(parent, op, caller_version, target_rc, fsa_our_uname, src, LOG_DEBUG); if (xml_op == NULL) { return TRUE; } if ((rsc == NULL) || (op->params == NULL) || !crm_op_needs_metadata(rsc->standard, op->op_type)) { crm_trace("No digests needed for %s action on %s (params=%p rsc=%p)", op->op_type, op->rsc_id, op->params, rsc); return TRUE; } lrm_state = lrm_state_find(node_name); if (lrm_state == NULL) { crm_warn("Cannot calculate digests for operation " CRM_OP_FMT " because we have no connection to executor for %s", op->rsc_id, op->op_type, op->interval_ms, node_name); return TRUE; } metadata = metadata_cache_get(lrm_state->metadata_cache, rsc); if (metadata == NULL) { /* For now, we always collect resource agent meta-data via a local, * synchronous, direct execution of the agent. This has multiple issues: * the executor should execute agents, not the controller; meta-data for * Pacemaker Remote nodes should be collected on those nodes, not * locally; and the meta-data call shouldn't eat into the timeout of the * real action being performed. * * These issues are planned to be addressed by having the scheduler * schedule a meta-data cache check at the beginning of each transition. * Once that is working, this block will only be a fallback in case the * initial collection fails. */ char *metadata_str = NULL; int rc = lrm_state_get_metadata(lrm_state, rsc->standard, rsc->provider, rsc->type, &metadata_str, 0); if (rc != pcmk_ok) { crm_warn("Failed to get metadata for %s (%s:%s:%s)", rsc->id, rsc->standard, rsc->provider, rsc->type); return TRUE; } metadata = metadata_cache_update(lrm_state->metadata_cache, rsc, metadata_str); free(metadata_str); if (metadata == NULL) { crm_warn("Failed to update metadata for %s (%s:%s:%s)", rsc->id, rsc->standard, rsc->provider, rsc->type); return TRUE; } } #if ENABLE_VERSIONED_ATTRS crm_xml_add(xml_op, XML_ATTR_RA_VERSION, metadata->ra_version); #endif crm_trace("Including additional digests for %s::%s:%s", rsc->standard, rsc->provider, rsc->type); append_restart_list(op, metadata, xml_op, caller_version); append_secure_list(op, metadata, xml_op, caller_version); return TRUE; } static gboolean is_rsc_active(lrm_state_t * lrm_state, const char *rsc_id) { rsc_history_t *entry = NULL; entry = g_hash_table_lookup(lrm_state->resource_history, rsc_id); if (entry == NULL || entry->last == NULL) { return FALSE; } crm_trace("Processing %s: %s.%d=%d", rsc_id, entry->last->op_type, entry->last->interval_ms, entry->last->rc); if (entry->last->rc == PCMK_OCF_OK && safe_str_eq(entry->last->op_type, CRMD_ACTION_STOP)) { return FALSE; } else if (entry->last->rc == PCMK_OCF_OK && safe_str_eq(entry->last->op_type, CRMD_ACTION_MIGRATE)) { /* a stricter check is too complex... * leave that to the PE */ return FALSE; } else if (entry->last->rc == PCMK_OCF_NOT_RUNNING) { return FALSE; } else if ((entry->last->interval_ms == 0) && (entry->last->rc == PCMK_OCF_NOT_CONFIGURED)) { /* Badly configured resources can't be reliably stopped */ return FALSE; } return TRUE; } static gboolean build_active_RAs(lrm_state_t * lrm_state, xmlNode * rsc_list) { GHashTableIter iter; rsc_history_t *entry = NULL; g_hash_table_iter_init(&iter, lrm_state->resource_history); while (g_hash_table_iter_next(&iter, NULL, (void **)&entry)) { GList *gIter = NULL; xmlNode *xml_rsc = create_xml_node(rsc_list, XML_LRM_TAG_RESOURCE); crm_xml_add(xml_rsc, XML_ATTR_ID, entry->id); crm_xml_add(xml_rsc, XML_ATTR_TYPE, entry->rsc.type); crm_xml_add(xml_rsc, XML_AGENT_ATTR_CLASS, entry->rsc.standard); crm_xml_add(xml_rsc, XML_AGENT_ATTR_PROVIDER, entry->rsc.provider); if (entry->last && entry->last->params) { const char *container = g_hash_table_lookup(entry->last->params, CRM_META"_"XML_RSC_ATTR_CONTAINER); if (container) { crm_trace("Resource %s is a part of container resource %s", entry->id, container); crm_xml_add(xml_rsc, XML_RSC_ATTR_CONTAINER, container); } } build_operation_update(xml_rsc, &(entry->rsc), entry->failed, lrm_state->node_name, __FUNCTION__); build_operation_update(xml_rsc, &(entry->rsc), entry->last, lrm_state->node_name, __FUNCTION__); for (gIter = entry->recurring_op_list; gIter != NULL; gIter = gIter->next) { build_operation_update(xml_rsc, &(entry->rsc), gIter->data, lrm_state->node_name, __FUNCTION__); } } return FALSE; } static xmlNode * do_lrm_query_internal(lrm_state_t *lrm_state, int update_flags) { xmlNode *xml_state = NULL; xmlNode *xml_data = NULL; xmlNode *rsc_list = NULL; crm_node_t *peer = NULL; peer = crm_get_peer_full(0, lrm_state->node_name, CRM_GET_PEER_ANY); CRM_CHECK(peer != NULL, return NULL); xml_state = create_node_state_update(peer, update_flags, NULL, __FUNCTION__); if (xml_state == NULL) { return NULL; } xml_data = create_xml_node(xml_state, XML_CIB_TAG_LRM); crm_xml_add(xml_data, XML_ATTR_ID, peer->uuid); rsc_list = create_xml_node(xml_data, XML_LRM_TAG_RESOURCES); /* Build a list of active (not always running) resources */ build_active_RAs(lrm_state, rsc_list); crm_log_xml_trace(xml_state, "Current executor state"); return xml_state; } xmlNode * do_lrm_query(gboolean is_replace, const char *node_name) { lrm_state_t *lrm_state = lrm_state_find(node_name); if (!lrm_state) { crm_err("Could not find executor state for node %s", node_name); return NULL; } return do_lrm_query_internal(lrm_state, node_update_cluster|node_update_peer); } static void notify_deleted(lrm_state_t * lrm_state, ha_msg_input_t * input, const char *rsc_id, int rc) { lrmd_event_data_t *op = NULL; const char *from_sys = crm_element_value(input->msg, F_CRM_SYS_FROM); const char *from_host = crm_element_value(input->msg, F_CRM_HOST_FROM); crm_info("Notifying %s on %s that %s was%s deleted", from_sys, (from_host? from_host : "localhost"), rsc_id, ((rc == pcmk_ok)? "" : " not")); op = construct_op(lrm_state, input->xml, rsc_id, CRMD_ACTION_DELETE); if (rc == pcmk_ok) { op->op_status = PCMK_LRM_OP_DONE; op->rc = PCMK_OCF_OK; } else { op->op_status = PCMK_LRM_OP_ERROR; op->rc = PCMK_OCF_UNKNOWN_ERROR; } send_direct_ack(from_host, from_sys, NULL, op, rsc_id); lrmd_free_event(op); if (safe_str_neq(from_sys, CRM_SYSTEM_TENGINE)) { /* this isn't expected - trigger a new transition */ time_t now = time(NULL); char *now_s = crm_itoa(now); crm_debug("Triggering a refresh after %s deleted %s from the executor", from_sys, rsc_id); update_attr_delegate(fsa_cib_conn, cib_none, XML_CIB_TAG_CRMCONFIG, NULL, NULL, NULL, NULL, "last-lrm-refresh", now_s, FALSE, NULL, NULL); free(now_s); } } static gboolean lrm_remove_deleted_rsc(gpointer key, gpointer value, gpointer user_data) { struct delete_event_s *event = user_data; struct pending_deletion_op_s *op = value; if (crm_str_eq(event->rsc, op->rsc, TRUE)) { notify_deleted(event->lrm_state, op->input, event->rsc, event->rc); return TRUE; } return FALSE; } static gboolean lrm_remove_deleted_op(gpointer key, gpointer value, gpointer user_data) { const char *rsc = user_data; struct recurring_op_s *pending = value; if (crm_str_eq(rsc, pending->rsc_id, TRUE)) { crm_info("Removing op %s:%d for deleted resource %s", pending->op_key, pending->call_id, rsc); return TRUE; } return FALSE; } /* * Remove the rsc from the CIB * * Avoids refreshing the entire LRM section of this host */ #define rsc_template "//"XML_CIB_TAG_STATE"[@uname='%s']//"XML_LRM_TAG_RESOURCE"[@id='%s']" static int delete_rsc_status(lrm_state_t * lrm_state, const char *rsc_id, int call_options, const char *user_name) { char *rsc_xpath = NULL; int rc = pcmk_ok; CRM_CHECK(rsc_id != NULL, return -ENXIO); rsc_xpath = crm_strdup_printf(rsc_template, lrm_state->node_name, rsc_id); rc = cib_internal_op(fsa_cib_conn, CIB_OP_DELETE, NULL, rsc_xpath, NULL, NULL, call_options | cib_xpath, user_name); free(rsc_xpath); return rc; } static void delete_rsc_entry(lrm_state_t * lrm_state, ha_msg_input_t * input, const char *rsc_id, GHashTableIter * rsc_gIter, int rc, const char *user_name) { struct delete_event_s event; CRM_CHECK(rsc_id != NULL, return); if (rc == pcmk_ok) { char *rsc_id_copy = strdup(rsc_id); if (rsc_gIter) g_hash_table_iter_remove(rsc_gIter); else g_hash_table_remove(lrm_state->resource_history, rsc_id_copy); crm_debug("sync: Sending delete op for %s", rsc_id_copy); delete_rsc_status(lrm_state, rsc_id_copy, cib_quorum_override, user_name); g_hash_table_foreach_remove(lrm_state->pending_ops, lrm_remove_deleted_op, rsc_id_copy); free(rsc_id_copy); } if (input) { notify_deleted(lrm_state, input, rsc_id, rc); } event.rc = rc; event.rsc = rsc_id; event.lrm_state = lrm_state; g_hash_table_foreach_remove(lrm_state->deletion_ops, lrm_remove_deleted_rsc, &event); } /*! * \internal * \brief Erase an LRM history entry from the CIB, given the operation data * * \param[in] lrm_state LRM state of the desired node * \param[in] op Operation whose history should be deleted */ static void erase_lrm_history_by_op(lrm_state_t *lrm_state, lrmd_event_data_t *op) { xmlNode *xml_top = NULL; CRM_CHECK(op != NULL, return); xml_top = create_xml_node(NULL, XML_LRM_TAG_RSC_OP); crm_xml_add_int(xml_top, XML_LRM_ATTR_CALLID, op->call_id); crm_xml_add(xml_top, XML_ATTR_TRANSITION_KEY, op->user_data); if (op->interval_ms > 0) { char *op_id = generate_op_key(op->rsc_id, op->op_type, op->interval_ms); /* Avoid deleting last_failure too (if it was a result of this recurring op failing) */ crm_xml_add(xml_top, XML_ATTR_ID, op_id); free(op_id); } crm_debug("Erasing resource operation history for " CRM_OP_FMT " (call=%d)", op->rsc_id, op->op_type, op->interval_ms, op->call_id); fsa_cib_conn->cmds->remove(fsa_cib_conn, XML_CIB_TAG_STATUS, xml_top, cib_quorum_override); crm_log_xml_trace(xml_top, "op:cancel"); free_xml(xml_top); } /* Define xpath to find LRM resource history entry by node and resource */ #define XPATH_HISTORY \ "/" XML_TAG_CIB "/" XML_CIB_TAG_STATUS \ "/" XML_CIB_TAG_STATE "[@" XML_ATTR_UNAME "='%s']" \ "/" XML_CIB_TAG_LRM "/" XML_LRM_TAG_RESOURCES \ "/" XML_LRM_TAG_RESOURCE "[@" XML_ATTR_ID "='%s']" \ "/" XML_LRM_TAG_RSC_OP /* ... and also by operation key */ #define XPATH_HISTORY_ID XPATH_HISTORY \ "[@" XML_ATTR_ID "='%s']" /* ... and also by operation key and operation call ID */ #define XPATH_HISTORY_CALL XPATH_HISTORY \ "[@" XML_ATTR_ID "='%s' and @" XML_LRM_ATTR_CALLID "='%d']" /* ... and also by operation key and original operation key */ #define XPATH_HISTORY_ORIG XPATH_HISTORY \ "[@" XML_ATTR_ID "='%s' and @" XML_LRM_ATTR_TASK_KEY "='%s']" /*! * \internal * \brief Erase an LRM history entry from the CIB, given operation identifiers * * \param[in] lrm_state LRM state of the node to clear history for * \param[in] rsc_id Name of resource to clear history for * \param[in] key Operation key of operation to clear history for * \param[in] orig_op If specified, delete only if it has this original op * \param[in] call_id If specified, delete entry only if it has this call ID */ static void erase_lrm_history_by_id(lrm_state_t *lrm_state, const char *rsc_id, const char *key, const char *orig_op, int call_id) { char *op_xpath = NULL; CRM_CHECK((rsc_id != NULL) && (key != NULL), return); if (call_id > 0) { op_xpath = crm_strdup_printf(XPATH_HISTORY_CALL, lrm_state->node_name, rsc_id, key, call_id); } else if (orig_op) { op_xpath = crm_strdup_printf(XPATH_HISTORY_ORIG, lrm_state->node_name, rsc_id, key, orig_op); } else { op_xpath = crm_strdup_printf(XPATH_HISTORY_ID, lrm_state->node_name, rsc_id, key); } crm_debug("Erasing resource operation history for %s on %s (call=%d)", key, rsc_id, call_id); fsa_cib_conn->cmds->remove(fsa_cib_conn, op_xpath, NULL, cib_quorum_override | cib_xpath); free(op_xpath); } static inline gboolean last_failed_matches_op(rsc_history_t *entry, const char *op, guint interval_ms) { if (entry == NULL) { return FALSE; } if (op == NULL) { return TRUE; } return (safe_str_eq(op, entry->failed->op_type) && (interval_ms == entry->failed->interval_ms)); } /*! * \internal * \brief Clear a resource's last failure * * Erase a resource's last failure on a particular node from both the * LRM resource history in the CIB, and the resource history remembered * for the LRM state. * * \param[in] rsc_id Resource name * \param[in] node_name Node name * \param[in] operation If specified, only clear if matching this operation * \param[in] interval_ms If operation is specified, it has this interval */ void lrm_clear_last_failure(const char *rsc_id, const char *node_name, const char *operation, guint interval_ms) { char *op_key = NULL; char *orig_op_key = NULL; lrm_state_t *lrm_state = NULL; lrm_state = lrm_state_find(node_name); if (lrm_state == NULL) { return; } /* Erase from CIB */ op_key = generate_op_key(rsc_id, "last_failure", 0); if (operation) { orig_op_key = generate_op_key(rsc_id, operation, interval_ms); } erase_lrm_history_by_id(lrm_state, rsc_id, op_key, orig_op_key, 0); free(op_key); free(orig_op_key); /* Remove from memory */ if (lrm_state->resource_history) { rsc_history_t *entry = g_hash_table_lookup(lrm_state->resource_history, rsc_id); if (last_failed_matches_op(entry, operation, interval_ms)) { lrmd_free_event(entry->failed); entry->failed = NULL; } } } /* Returns: gboolean - cancellation is in progress */ static gboolean cancel_op(lrm_state_t * lrm_state, const char *rsc_id, const char *key, int op, gboolean remove) { int rc = pcmk_ok; char *local_key = NULL; struct recurring_op_s *pending = NULL; CRM_CHECK(op != 0, return FALSE); CRM_CHECK(rsc_id != NULL, return FALSE); if (key == NULL) { local_key = make_stop_id(rsc_id, op); key = local_key; } pending = g_hash_table_lookup(lrm_state->pending_ops, key); if (pending) { if (remove && pending->remove == FALSE) { pending->remove = TRUE; crm_debug("Scheduling %s for removal", key); } if (pending->cancelled) { crm_debug("Operation %s already cancelled", key); free(local_key); return FALSE; } pending->cancelled = TRUE; } else { crm_info("No pending op found for %s", key); free(local_key); return FALSE; } crm_debug("Cancelling op %d for %s (%s)", op, rsc_id, key); rc = lrm_state_cancel(lrm_state, pending->rsc_id, pending->op_type, pending->interval_ms); if (rc == pcmk_ok) { crm_debug("Op %d for %s (%s): cancelled", op, rsc_id, key); free(local_key); return TRUE; } crm_debug("Op %d for %s (%s): Nothing to cancel", op, rsc_id, key); /* The caller needs to make sure the entry is * removed from the pending_ops list * * Usually by returning TRUE inside the worker function * supplied to g_hash_table_foreach_remove() * * Not removing the entry from pending_ops will block * the node from shutting down */ free(local_key); return FALSE; } struct cancel_data { gboolean done; gboolean remove; const char *key; lrmd_rsc_info_t *rsc; lrm_state_t *lrm_state; }; static gboolean cancel_action_by_key(gpointer key, gpointer value, gpointer user_data) { gboolean remove = FALSE; struct cancel_data *data = user_data; struct recurring_op_s *op = (struct recurring_op_s *)value; if (crm_str_eq(op->op_key, data->key, TRUE)) { data->done = TRUE; remove = !cancel_op(data->lrm_state, data->rsc->id, key, op->call_id, data->remove); } return remove; } static gboolean cancel_op_key(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, const char *key, gboolean remove) { guint removed = 0; struct cancel_data data; CRM_CHECK(rsc != NULL, return FALSE); CRM_CHECK(key != NULL, return FALSE); data.key = key; data.rsc = rsc; data.done = FALSE; data.remove = remove; data.lrm_state = lrm_state; removed = g_hash_table_foreach_remove(lrm_state->pending_ops, cancel_action_by_key, &data); crm_trace("Removed %u op cache entries, new size: %u", removed, g_hash_table_size(lrm_state->pending_ops)); return data.done; } /*! * \internal * \brief Retrieve resource information from LRM * * \param[in] lrm_state LRM connection to use * \param[in] rsc_xml XML containing resource configuration * \param[in] do_create If true, register resource with LRM if not already * \param[out] rsc_info Where to store resource information obtained from LRM * * \retval pcmk_ok Success (and rsc_info holds newly allocated result) * \retval -EINVAL Required information is missing from arguments * \retval -ENOTCONN No active connection to LRM * \retval -ENODEV Resource not found * \retval -errno Error communicating with executor when registering resource * * \note Caller is responsible for freeing result on success. */ static int get_lrm_resource(lrm_state_t *lrm_state, xmlNode *rsc_xml, gboolean do_create, lrmd_rsc_info_t **rsc_info) { const char *id = ID(rsc_xml); CRM_CHECK(lrm_state && rsc_xml && rsc_info, return -EINVAL); CRM_CHECK(id, return -EINVAL); if (lrm_state_is_connected(lrm_state) == FALSE) { return -ENOTCONN; } crm_trace("Retrieving resource information for %s from the executor", id); *rsc_info = lrm_state_get_rsc_info(lrm_state, id, 0); // If resource isn't known by ID, try clone name, if provided if (!*rsc_info) { const char *long_id = crm_element_value(rsc_xml, XML_ATTR_ID_LONG); if (long_id) { *rsc_info = lrm_state_get_rsc_info(lrm_state, long_id, 0); } } if ((*rsc_info == NULL) && do_create) { const char *class = crm_element_value(rsc_xml, XML_AGENT_ATTR_CLASS); const char *provider = crm_element_value(rsc_xml, XML_AGENT_ATTR_PROVIDER); const char *type = crm_element_value(rsc_xml, XML_ATTR_TYPE); int rc; crm_trace("Registering resource %s with the executor", id); rc = lrm_state_register_rsc(lrm_state, id, class, provider, type, lrmd_opt_drop_recurring); if (rc != pcmk_ok) { fsa_data_t *msg_data = NULL; crm_err("Could not register resource %s with the executor on %s: %s " CRM_XS " rc=%d", id, lrm_state->node_name, pcmk_strerror(rc), rc); /* Register this as an internal error if this involves the local * executor. Otherwise, we're likely dealing with an unresponsive * remote node, which is not an FSA failure. */ if (lrm_state_is_local(lrm_state) == TRUE) { register_fsa_error(C_FSA_INTERNAL, I_FAIL, NULL); } return rc; } *rsc_info = lrm_state_get_rsc_info(lrm_state, id, 0); } return *rsc_info? pcmk_ok : -ENODEV; } static void delete_resource(lrm_state_t * lrm_state, const char *id, lrmd_rsc_info_t * rsc, GHashTableIter * gIter, const char *sys, const char *host, const char *user, ha_msg_input_t * request, gboolean unregister) { int rc = pcmk_ok; crm_info("Removing resource %s for %s (%s) on %s", id, sys, user ? user : "internal", host); if (rsc && unregister) { rc = lrm_state_unregister_rsc(lrm_state, id, 0); } if (rc == pcmk_ok) { crm_trace("Resource '%s' deleted", id); } else if (rc == -EINPROGRESS) { crm_info("Deletion of resource '%s' pending", id); if (request) { struct pending_deletion_op_s *op = NULL; char *ref = crm_element_value_copy(request->msg, XML_ATTR_REFERENCE); op = calloc(1, sizeof(struct pending_deletion_op_s)); op->rsc = strdup(rsc->id); op->input = copy_ha_msg_input(request); g_hash_table_insert(lrm_state->deletion_ops, ref, op); } return; } else { crm_warn("Deletion of resource '%s' for %s (%s) on %s failed: %d", id, sys, user ? user : "internal", host, rc); } delete_rsc_entry(lrm_state, request, id, gIter, rc, user); } static int get_fake_call_id(lrm_state_t *lrm_state, const char *rsc_id) { int call_id = 999999999; rsc_history_t *entry = NULL; if(lrm_state) { entry = g_hash_table_lookup(lrm_state->resource_history, rsc_id); } /* Make sure the call id is greater than the last successful operation, * otherwise the failure will not result in a possible recovery of the resource * as it could appear the failure occurred before the successful start */ if (entry) { call_id = entry->last_callid + 1; } if (call_id < 0) { call_id = 1; } return call_id; } static void fake_op_status(lrm_state_t *lrm_state, lrmd_event_data_t *op, int op_status, enum ocf_exitcode op_exitcode) { op->call_id = get_fake_call_id(lrm_state, op->rsc_id); op->t_run = time(NULL); op->t_rcchange = op->t_run; op->op_status = op_status; op->rc = op_exitcode; } static void force_reprobe(lrm_state_t *lrm_state, const char *from_sys, const char *from_host, const char *user_name, gboolean is_remote_node) { GHashTableIter gIter; rsc_history_t *entry = NULL; crm_info("Clearing resource history on node %s", lrm_state->node_name); g_hash_table_iter_init(&gIter, lrm_state->resource_history); while (g_hash_table_iter_next(&gIter, NULL, (void **)&entry)) { /* only unregister the resource during a reprobe if it is not a remote connection * resource. otherwise unregistering the connection will terminate remote-node * membership */ gboolean unregister = TRUE; if (is_remote_lrmd_ra(NULL, NULL, entry->id)) { lrm_state_t *remote_lrm_state = lrm_state_find(entry->id); if (remote_lrm_state) { /* when forcing a reprobe, make sure to clear remote node before * clearing the remote node's connection resource */ force_reprobe(remote_lrm_state, from_sys, from_host, user_name, TRUE); } unregister = FALSE; } delete_resource(lrm_state, entry->id, &entry->rsc, &gIter, from_sys, from_host, user_name, NULL, unregister); } /* Now delete the copy in the CIB */ erase_status_tag(lrm_state->node_name, XML_CIB_TAG_LRM, cib_scope_local); /* Finally, _delete_ the value in pacemaker-attrd -- setting it to FALSE * would result in the scheduler sending us back here again */ update_attrd(lrm_state->node_name, CRM_OP_PROBED, NULL, user_name, is_remote_node); } static void synthesize_lrmd_failure(lrm_state_t *lrm_state, xmlNode *action, int rc) { lrmd_event_data_t *op = NULL; lrmd_rsc_info_t *rsc_info = NULL; const char *operation = crm_element_value(action, XML_LRM_ATTR_TASK); const char *target_node = crm_element_value(action, XML_LRM_ATTR_TARGET); xmlNode *xml_rsc = find_xml_node(action, XML_CIB_TAG_RESOURCE, TRUE); if ((xml_rsc == NULL) || (ID(xml_rsc) == NULL)) { /* @TODO Should we do something else, like direct ack? */ crm_info("Can't fake %s failure (%d) on %s without resource configuration", crm_element_value(action, XML_LRM_ATTR_TASK_KEY), rc, target_node); return; } else if(operation == NULL) { /* This probably came from crm_resource -C, nothing to do */ crm_info("Can't fake %s failure (%d) on %s without operation", ID(xml_rsc), rc, target_node); return; } op = construct_op(lrm_state, action, ID(xml_rsc), operation); if (safe_str_eq(operation, RSC_NOTIFY)) { // Notifications can't fail fake_op_status(lrm_state, op, PCMK_LRM_OP_DONE, PCMK_OCF_OK); } else { fake_op_status(lrm_state, op, PCMK_LRM_OP_ERROR, rc); } crm_info("Faking " CRM_OP_FMT " result (%d) on %s", op->rsc_id, op->op_type, op->interval_ms, op->rc, target_node); /* Process the result as if it came from the LRM, if possible * (i.e. resource info can be obtained from the lrm_state). */ if (lrm_state) { rsc_info = lrm_state_get_rsc_info(lrm_state, op->rsc_id, 0); } if (rsc_info) { + lrmd_free_rsc_info(rsc_info); process_lrm_event(lrm_state, op, NULL); } else { /* If we can't process the result normally, at least write it to the CIB * if possible, so the scheduler can act on it. */ const char *standard = crm_element_value(xml_rsc, XML_AGENT_ATTR_CLASS); const char *provider = crm_element_value(xml_rsc, XML_AGENT_ATTR_PROVIDER); const char *type = crm_element_value(xml_rsc, XML_ATTR_TYPE); if (standard && type) { rsc_info = lrmd_new_rsc_info(op->rsc_id, standard, provider, type); do_update_resource(target_node, rsc_info, op); lrmd_free_rsc_info(rsc_info); } else { // @TODO Should we direct ack? crm_info("Can't fake %s failure (%d) on %s without resource standard and type", crm_element_value(action, XML_LRM_ATTR_TASK_KEY), rc, target_node); } } lrmd_free_event(op); } /*! * \internal * \brief Get target of an LRM operation * * \param[in] xml LRM operation data XML * * \return LRM operation target node name (local node or Pacemaker Remote node) */ static const char * lrm_op_target(xmlNode *xml) { const char *target = NULL; if (xml) { target = crm_element_value(xml, XML_LRM_ATTR_TARGET); } if (target == NULL) { target = fsa_our_uname; } return target; } static void fail_lrm_resource(xmlNode *xml, lrm_state_t *lrm_state, const char *user_name, const char *from_host, const char *from_sys) { lrmd_event_data_t *op = NULL; lrmd_rsc_info_t *rsc = NULL; xmlNode *xml_rsc = find_xml_node(xml, XML_CIB_TAG_RESOURCE, TRUE); CRM_CHECK(xml_rsc != NULL, return); /* The executor simply executes operations and reports the results, without * any concept of success or failure, so to fail a resource, we must fake * what a failure looks like. * * To do this, we create a fake executor operation event for the resource, * and pass that event to the executor client callback so it will be * processed as if it came from the executor. */ op = construct_op(lrm_state, xml, ID(xml_rsc), "asyncmon"); fake_op_status(lrm_state, op, PCMK_LRM_OP_DONE, PCMK_OCF_UNKNOWN_ERROR); free((char*) op->user_data); op->user_data = NULL; op->interval_ms = 0; #if ENABLE_ACL if (user_name && is_privileged(user_name) == FALSE) { crm_err("%s does not have permission to fail %s", user_name, ID(xml_rsc)); send_direct_ack(from_host, from_sys, NULL, op, ID(xml_rsc)); lrmd_free_event(op); return; } #endif if (get_lrm_resource(lrm_state, xml_rsc, TRUE, &rsc) == pcmk_ok) { crm_info("Failing resource %s...", rsc->id); process_lrm_event(lrm_state, op, NULL); op->op_status = PCMK_LRM_OP_DONE; op->rc = PCMK_OCF_OK; lrmd_free_rsc_info(rsc); } else { crm_info("Cannot find/create resource in order to fail it..."); crm_log_xml_warn(xml, "bad input"); } send_direct_ack(from_host, from_sys, NULL, op, ID(xml_rsc)); lrmd_free_event(op); } static void handle_refresh_op(lrm_state_t *lrm_state, const char *user_name, const char *from_host, const char *from_sys) { int rc = pcmk_ok; xmlNode *fragment = do_lrm_query_internal(lrm_state, node_update_all); fsa_cib_update(XML_CIB_TAG_STATUS, fragment, cib_quorum_override, rc, user_name); crm_info("Forced a local resource history refresh: call=%d", rc); if (safe_str_neq(CRM_SYSTEM_CRMD, from_sys)) { xmlNode *reply = create_request(CRM_OP_INVOKE_LRM, fragment, from_host, from_sys, CRM_SYSTEM_LRMD, fsa_our_uuid); crm_debug("ACK'ing refresh from %s (%s)", from_sys, from_host); if (relay_message(reply, TRUE) == FALSE) { crm_log_xml_err(reply, "Unable to route reply"); } free_xml(reply); } free_xml(fragment); } static void handle_query_op(xmlNode *msg, lrm_state_t *lrm_state) { xmlNode *data = do_lrm_query_internal(lrm_state, node_update_all); xmlNode *reply = create_reply(msg, data); if (relay_message(reply, TRUE) == FALSE) { crm_err("Unable to route reply"); crm_log_xml_err(reply, "reply"); } free_xml(reply); free_xml(data); } static void handle_reprobe_op(lrm_state_t *lrm_state, const char *from_sys, const char *from_host, const char *user_name, gboolean is_remote_node) { crm_notice("Forcing the status of all resources to be redetected"); force_reprobe(lrm_state, from_sys, from_host, user_name, is_remote_node); if (safe_str_neq(CRM_SYSTEM_PENGINE, from_sys) && safe_str_neq(CRM_SYSTEM_TENGINE, from_sys)) { xmlNode *reply = create_request(CRM_OP_INVOKE_LRM, NULL, from_host, from_sys, CRM_SYSTEM_LRMD, fsa_our_uuid); crm_debug("ACK'ing re-probe from %s (%s)", from_sys, from_host); if (relay_message(reply, TRUE) == FALSE) { crm_log_xml_err(reply, "Unable to route reply"); } free_xml(reply); } } static bool do_lrm_cancel(ha_msg_input_t *input, lrm_state_t *lrm_state, lrmd_rsc_info_t *rsc, const char *from_host, const char *from_sys) { char *op_key = NULL; char *meta_key = NULL; int call = 0; const char *call_id = NULL; const char *op_task = NULL; const char *interval_ms_s = NULL; gboolean in_progress = FALSE; xmlNode *params = find_xml_node(input->xml, XML_TAG_ATTRS, TRUE); CRM_CHECK(params != NULL, return FALSE); meta_key = crm_meta_name(XML_LRM_ATTR_INTERVAL_MS); interval_ms_s = crm_element_value(params, meta_key); free(meta_key); CRM_CHECK(interval_ms_s != NULL, return FALSE); meta_key = crm_meta_name(XML_LRM_ATTR_TASK); op_task = crm_element_value(params, meta_key); free(meta_key); CRM_CHECK(op_task != NULL, return FALSE); meta_key = crm_meta_name(XML_LRM_ATTR_CALLID); call_id = crm_element_value(params, meta_key); free(meta_key); op_key = generate_op_key(rsc->id, op_task, crm_parse_ms(interval_ms_s)); crm_debug("Scheduler requested op %s (call=%s) be cancelled", op_key, (call_id? call_id : "NA")); call = crm_parse_int(call_id, "0"); if (call == 0) { // Normal case when the scheduler cancels a recurring op in_progress = cancel_op_key(lrm_state, rsc, op_key, TRUE); } else { // Normal case when the scheduler cancels an orphan op in_progress = cancel_op(lrm_state, rsc->id, NULL, call, TRUE); } // Acknowledge cancellation operation if for a remote connection resource if (!in_progress || is_remote_lrmd_ra(NULL, NULL, rsc->id)) { char *op_id = make_stop_id(rsc->id, call); if (is_remote_lrmd_ra(NULL, NULL, rsc->id) == FALSE) { crm_info("Nothing known about operation %d for %s", call, op_key); } erase_lrm_history_by_id(lrm_state, rsc->id, op_key, NULL, call); send_task_ok_ack(lrm_state, input, rsc->id, rsc, op_task, from_host, from_sys); /* needed at least for cancellation of a remote operation */ g_hash_table_remove(lrm_state->pending_ops, op_id); free(op_id); } else { /* No ack is needed since abcdaa8, but peers with older versions * in a rolling upgrade need one. We didn't bump the feature set * at that commit, so we can only compare against the previous * CRM version (3.0.8). If any peers have feature set 3.0.9 but * not abcdaa8, they will time out waiting for the ack (no * released versions of Pacemaker are affected). */ const char *peer_version = crm_element_value(params, XML_ATTR_CRM_VERSION); if (compare_version(peer_version, "3.0.8") <= 0) { crm_info("Sending compatibility ack for %s cancellation to %s (CRM version %s)", op_key, from_host, peer_version); send_task_ok_ack(lrm_state, input, rsc->id, rsc, op_task, from_host, from_sys); } } free(op_key); return TRUE; } static void do_lrm_delete(ha_msg_input_t *input, lrm_state_t *lrm_state, lrmd_rsc_info_t *rsc, const char *from_sys, const char *from_host, bool crm_rsc_delete, const char *user_name) { gboolean unregister = TRUE; #if ENABLE_ACL int cib_rc = delete_rsc_status(lrm_state, rsc->id, cib_dryrun|cib_sync_call, user_name); if (cib_rc != pcmk_ok) { lrmd_event_data_t *op = NULL; crm_err("Could not delete resource status of %s for %s (user %s) on %s: %s" CRM_XS " rc=%d", rsc->id, from_sys, (user_name? user_name : "unknown"), from_host, pcmk_strerror(cib_rc), cib_rc); op = construct_op(lrm_state, input->xml, rsc->id, CRMD_ACTION_DELETE); op->op_status = PCMK_LRM_OP_ERROR; if (cib_rc == -EACCES) { op->rc = PCMK_OCF_INSUFFICIENT_PRIV; } else { op->rc = PCMK_OCF_UNKNOWN_ERROR; } send_direct_ack(from_host, from_sys, NULL, op, rsc->id); lrmd_free_event(op); return; } #endif if (crm_rsc_delete && is_remote_lrmd_ra(NULL, NULL, rsc->id)) { unregister = FALSE; } delete_resource(lrm_state, rsc->id, rsc, NULL, from_sys, from_host, user_name, input, unregister); } /* A_LRM_INVOKE */ void do_lrm_invoke(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { lrm_state_t *lrm_state = NULL; const char *crm_op = NULL; const char *from_sys = NULL; const char *from_host = NULL; const char *operation = NULL; ha_msg_input_t *input = fsa_typed_data(fsa_dt_ha_msg); const char *user_name = NULL; const char *target_node = NULL; gboolean is_remote_node = FALSE; bool crm_rsc_delete = FALSE; target_node = lrm_op_target(input->xml); is_remote_node = safe_str_neq(target_node, fsa_our_uname); lrm_state = lrm_state_find(target_node); if ((lrm_state == NULL) && is_remote_node) { crm_err("Failing action because local node has never had connection to remote node %s", target_node); synthesize_lrmd_failure(NULL, input->xml, PCMK_OCF_CONNECTION_DIED); return; } CRM_ASSERT(lrm_state != NULL); #if ENABLE_ACL user_name = crm_acl_get_set_user(input->msg, F_CRM_USER, NULL); crm_trace("Executor command from user '%s'", user_name); #endif crm_op = crm_element_value(input->msg, F_CRM_TASK); from_sys = crm_element_value(input->msg, F_CRM_SYS_FROM); if (safe_str_neq(from_sys, CRM_SYSTEM_TENGINE)) { from_host = crm_element_value(input->msg, F_CRM_HOST_FROM); } crm_trace("Executor %s command from %s", crm_op, from_sys); if (safe_str_eq(crm_op, CRM_OP_LRM_DELETE)) { crm_rsc_delete = TRUE; // Only crm_resource uses this op operation = CRMD_ACTION_DELETE; } else if (safe_str_eq(crm_op, CRM_OP_LRM_FAIL)) { fail_lrm_resource(input->xml, lrm_state, user_name, from_host, from_sys); return; } else if (input->xml != NULL) { operation = crm_element_value(input->xml, XML_LRM_ATTR_TASK); } if (safe_str_eq(crm_op, CRM_OP_LRM_REFRESH)) { handle_refresh_op(lrm_state, user_name, from_host, from_sys); } else if (safe_str_eq(crm_op, CRM_OP_LRM_QUERY)) { handle_query_op(input->msg, lrm_state); } else if (safe_str_eq(operation, CRM_OP_PROBED)) { update_attrd(lrm_state->node_name, CRM_OP_PROBED, XML_BOOLEAN_TRUE, user_name, is_remote_node); } else if (safe_str_eq(operation, CRM_OP_REPROBE) || safe_str_eq(crm_op, CRM_OP_REPROBE)) { handle_reprobe_op(lrm_state, from_sys, from_host, user_name, is_remote_node); } else if (operation != NULL) { lrmd_rsc_info_t *rsc = NULL; xmlNode *xml_rsc = find_xml_node(input->xml, XML_CIB_TAG_RESOURCE, TRUE); gboolean create_rsc = safe_str_neq(operation, CRMD_ACTION_DELETE); int rc; // We can't return anything meaningful without a resource ID CRM_CHECK(xml_rsc && ID(xml_rsc), return); rc = get_lrm_resource(lrm_state, xml_rsc, create_rsc, &rsc); if (rc == -ENOTCONN) { synthesize_lrmd_failure(lrm_state, input->xml, PCMK_OCF_CONNECTION_DIED); return; } else if ((rc < 0) && !create_rsc) { /* Delete of malformed or nonexistent resource * (deleting something that does not exist is a success) */ crm_notice("Not registering resource '%s' for a %s event " CRM_XS " get-rc=%d (%s) transition-key=%s", ID(xml_rsc), operation, rc, pcmk_strerror(rc), ID(input->xml)); delete_rsc_entry(lrm_state, input, ID(xml_rsc), NULL, pcmk_ok, user_name); send_task_ok_ack(lrm_state, input, ID(xml_rsc), NULL, operation, from_host, from_sys); return; } else if (rc == -EINVAL) { // Resource operation on malformed resource crm_err("Invalid resource definition for %s", ID(xml_rsc)); crm_log_xml_warn(input->msg, "invalid resource"); synthesize_lrmd_failure(lrm_state, input->xml, PCMK_OCF_NOT_CONFIGURED); // fatal error return; } else if (rc < 0) { // Error communicating with the executor crm_err("Could not register resource '%s' with executor: %s " CRM_XS " rc=%d", ID(xml_rsc), pcmk_strerror(rc), rc); crm_log_xml_warn(input->msg, "failed registration"); synthesize_lrmd_failure(lrm_state, input->xml, PCMK_OCF_INVALID_PARAM); // hard error return; } if (safe_str_eq(operation, CRMD_ACTION_CANCEL)) { if (!do_lrm_cancel(input, lrm_state, rsc, from_host, from_sys)) { crm_log_xml_warn(input->xml, "Bad command"); } } else if (safe_str_eq(operation, CRMD_ACTION_DELETE)) { do_lrm_delete(input, lrm_state, rsc, from_sys, from_host, crm_rsc_delete, user_name); } else { do_lrm_rsc_op(lrm_state, rsc, operation, input->xml, input->msg); } lrmd_free_rsc_info(rsc); } else { crm_err("Cannot perform operation %s of unknown type", crm_str(crm_op)); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); } } static lrmd_event_data_t * construct_op(lrm_state_t * lrm_state, xmlNode * rsc_op, const char *rsc_id, const char *operation) { lrmd_event_data_t *op = NULL; const char *op_delay = NULL; const char *op_timeout = NULL; const char *interval_ms_s = NULL; GHashTable *params = NULL; const char *transition = NULL; CRM_ASSERT(rsc_id && operation); op = calloc(1, sizeof(lrmd_event_data_t)); CRM_ASSERT(op != NULL); op->type = lrmd_event_exec_complete; op->op_type = strdup(operation); op->op_status = PCMK_LRM_OP_PENDING; op->rc = -1; op->rsc_id = strdup(rsc_id); op->interval_ms = 0; op->timeout = 0; op->start_delay = 0; if (rsc_op == NULL) { CRM_LOG_ASSERT(safe_str_eq(CRMD_ACTION_STOP, operation)); op->user_data = NULL; /* the stop_all_resources() case * by definition there is no DC (or they'd be shutting * us down). * So we should put our version here. */ op->params = crm_str_table_new(); g_hash_table_insert(op->params, strdup(XML_ATTR_CRM_VERSION), strdup(CRM_FEATURE_SET)); crm_trace("Constructed %s op for %s", operation, rsc_id); return op; } params = xml2list(rsc_op); g_hash_table_remove(params, CRM_META "_op_target_rc"); op_delay = crm_meta_value(params, XML_OP_ATTR_START_DELAY); op_timeout = crm_meta_value(params, XML_ATTR_TIMEOUT); interval_ms_s = crm_meta_value(params, XML_LRM_ATTR_INTERVAL_MS); op->interval_ms = crm_parse_ms(interval_ms_s); op->timeout = crm_parse_int(op_timeout, "0"); op->start_delay = crm_parse_int(op_delay, "0"); #if ENABLE_VERSIONED_ATTRS // Resolve any versioned parameters if (lrm_state && safe_str_neq(op->op_type, RSC_METADATA) && safe_str_neq(op->op_type, CRMD_ACTION_DELETE) && !is_remote_lrmd_ra(NULL, NULL, rsc_id)) { // Resource info *should* already be cached, so we don't get executor call lrmd_rsc_info_t *rsc = lrm_state_get_rsc_info(lrm_state, rsc_id, 0); struct ra_metadata_s *metadata; metadata = metadata_cache_get(lrm_state->metadata_cache, rsc); if (metadata) { xmlNode *versioned_attrs = NULL; GHashTable *hash = NULL; char *key = NULL; char *value = NULL; GHashTableIter iter; versioned_attrs = first_named_child(rsc_op, XML_TAG_OP_VER_ATTRS); hash = pe_unpack_versioned_parameters(versioned_attrs, metadata->ra_version); g_hash_table_iter_init(&iter, hash); while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) { g_hash_table_iter_steal(&iter); g_hash_table_replace(params, key, value); } g_hash_table_destroy(hash); versioned_attrs = first_named_child(rsc_op, XML_TAG_OP_VER_META); hash = pe_unpack_versioned_parameters(versioned_attrs, metadata->ra_version); g_hash_table_iter_init(&iter, hash); while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) { g_hash_table_replace(params, crm_meta_name(key), strdup(value)); if (safe_str_eq(key, XML_ATTR_TIMEOUT)) { op->timeout = crm_parse_int(value, "0"); } else if (safe_str_eq(key, XML_OP_ATTR_START_DELAY)) { op->start_delay = crm_parse_int(value, "0"); } } g_hash_table_destroy(hash); versioned_attrs = first_named_child(rsc_op, XML_TAG_RSC_VER_ATTRS); hash = pe_unpack_versioned_parameters(versioned_attrs, metadata->ra_version); g_hash_table_iter_init(&iter, hash); while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) { g_hash_table_iter_steal(&iter); g_hash_table_replace(params, key, value); } g_hash_table_destroy(hash); } lrmd_free_rsc_info(rsc); } #endif if (safe_str_neq(operation, RSC_STOP)) { op->params = params; } else { rsc_history_t *entry = NULL; if (lrm_state) { entry = g_hash_table_lookup(lrm_state->resource_history, rsc_id); } /* If we do not have stop parameters cached, use * whatever we are given */ if (!entry || !entry->stop_params) { op->params = params; } else { /* Copy the cached parameter list so that we stop the resource * with the old attributes, not the new ones */ op->params = crm_str_table_new(); g_hash_table_foreach(params, copy_meta_keys, op->params); g_hash_table_foreach(entry->stop_params, copy_instance_keys, op->params); g_hash_table_destroy(params); params = NULL; } } /* sanity */ if (op->timeout <= 0) { op->timeout = op->interval_ms; } if (op->start_delay < 0) { op->start_delay = 0; } transition = crm_element_value(rsc_op, XML_ATTR_TRANSITION_KEY); CRM_CHECK(transition != NULL, return op); op->user_data = strdup(transition); if (op->interval_ms != 0) { if (safe_str_eq(operation, CRMD_ACTION_START) || safe_str_eq(operation, CRMD_ACTION_STOP)) { crm_err("Start and Stop actions cannot have an interval: %u", op->interval_ms); op->interval_ms = 0; } } crm_trace("Constructed %s op for %s: interval=%u", operation, rsc_id, op->interval_ms); return op; } void send_direct_ack(const char *to_host, const char *to_sys, lrmd_rsc_info_t * rsc, lrmd_event_data_t * op, const char *rsc_id) { xmlNode *reply = NULL; xmlNode *update, *iter; crm_node_t *peer = NULL; CRM_CHECK(op != NULL, return); if (op->rsc_id == NULL) { CRM_ASSERT(rsc_id != NULL); op->rsc_id = strdup(rsc_id); } if (to_sys == NULL) { to_sys = CRM_SYSTEM_TENGINE; } peer = crm_get_peer(0, fsa_our_uname); update = create_node_state_update(peer, node_update_none, NULL, __FUNCTION__); iter = create_xml_node(update, XML_CIB_TAG_LRM); crm_xml_add(iter, XML_ATTR_ID, fsa_our_uuid); iter = create_xml_node(iter, XML_LRM_TAG_RESOURCES); iter = create_xml_node(iter, XML_LRM_TAG_RESOURCE); crm_xml_add(iter, XML_ATTR_ID, op->rsc_id); build_operation_update(iter, rsc, op, fsa_our_uname, __FUNCTION__); reply = create_request(CRM_OP_INVOKE_LRM, update, to_host, to_sys, CRM_SYSTEM_LRMD, NULL); crm_log_xml_trace(update, "ACK Update"); crm_debug("ACK'ing resource op " CRM_OP_FMT " from %s: %s", op->rsc_id, op->op_type, op->interval_ms, op->user_data, crm_element_value(reply, XML_ATTR_REFERENCE)); if (relay_message(reply, TRUE) == FALSE) { crm_log_xml_err(reply, "Unable to route reply"); } free_xml(update); free_xml(reply); } gboolean verify_stopped(enum crmd_fsa_state cur_state, int log_level) { gboolean res = TRUE; GList *lrm_state_list = lrm_state_get_list(); GList *state_entry; for (state_entry = lrm_state_list; state_entry != NULL; state_entry = state_entry->next) { lrm_state_t *lrm_state = state_entry->data; if (!lrm_state_verify_stopped(lrm_state, cur_state, log_level)) { /* keep iterating through all even when false is returned */ res = FALSE; } } set_bit(fsa_input_register, R_SENT_RSC_STOP); g_list_free(lrm_state_list); lrm_state_list = NULL; return res; } struct stop_recurring_action_s { lrmd_rsc_info_t *rsc; lrm_state_t *lrm_state; }; static gboolean stop_recurring_action_by_rsc(gpointer key, gpointer value, gpointer user_data) { gboolean remove = FALSE; struct stop_recurring_action_s *event = user_data; struct recurring_op_s *op = (struct recurring_op_s *)value; if ((op->interval_ms != 0) && crm_str_eq(op->rsc_id, event->rsc->id, TRUE)) { crm_debug("Cancelling op %d for %s (%s)", op->call_id, op->rsc_id, (char*)key); remove = !cancel_op(event->lrm_state, event->rsc->id, key, op->call_id, FALSE); } return remove; } static gboolean stop_recurring_actions(gpointer key, gpointer value, gpointer user_data) { gboolean remove = FALSE; lrm_state_t *lrm_state = user_data; struct recurring_op_s *op = (struct recurring_op_s *)value; if (op->interval_ms != 0) { crm_info("Cancelling op %d for %s (%s)", op->call_id, op->rsc_id, (const char *) key); remove = !cancel_op(lrm_state, op->rsc_id, key, op->call_id, FALSE); } return remove; } static void record_pending_op(const char *node_name, lrmd_rsc_info_t *rsc, lrmd_event_data_t *op) { const char *record_pending = NULL; CRM_CHECK(node_name != NULL, return); CRM_CHECK(rsc != NULL, return); CRM_CHECK(op != NULL, return); if ((op->op_type == NULL) || (op->params == NULL) || safe_str_eq(op->op_type, CRMD_ACTION_CANCEL) || safe_str_eq(op->op_type, CRMD_ACTION_DELETE)) { return; } // defaults to true record_pending = crm_meta_value(op->params, XML_OP_ATTR_PENDING); if (record_pending && !crm_is_true(record_pending)) { return; } op->call_id = -1; op->op_status = PCMK_LRM_OP_PENDING; op->rc = PCMK_OCF_UNKNOWN; op->t_run = time(NULL); op->t_rcchange = op->t_run; /* write a "pending" entry to the CIB, inhibit notification */ crm_debug("Recording pending op " CRM_OP_FMT " on %s in the CIB", op->rsc_id, op->op_type, op->interval_ms, node_name); do_update_resource(node_name, rsc, op); } static void do_lrm_rsc_op(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, const char *operation, xmlNode * msg, xmlNode * request) { int call_id = 0; char *op_id = NULL; lrmd_event_data_t *op = NULL; lrmd_key_value_t *params = NULL; fsa_data_t *msg_data = NULL; const char *transition = NULL; gboolean stop_recurring = FALSE; bool send_nack = FALSE; CRM_CHECK(rsc != NULL, return); CRM_CHECK(operation != NULL, return); if (msg != NULL) { transition = crm_element_value(msg, XML_ATTR_TRANSITION_KEY); if (transition == NULL) { crm_log_xml_err(msg, "Missing transition number"); } } op = construct_op(lrm_state, msg, rsc->id, operation); CRM_CHECK(op != NULL, return); if (is_remote_lrmd_ra(NULL, NULL, rsc->id) && (op->interval_ms == 0) && strcmp(operation, CRMD_ACTION_MIGRATE) == 0) { /* pcmk remote connections are a special use case. * We never ever want to stop monitoring a connection resource until * the entire migration has completed. If the connection is unexpectedly * severed, even during a migration, this is an event we must detect.*/ stop_recurring = FALSE; } else if ((op->interval_ms == 0) && strcmp(operation, CRMD_ACTION_STATUS) != 0 && strcmp(operation, CRMD_ACTION_NOTIFY) != 0) { /* stop any previous monitor operations before changing the resource state */ stop_recurring = TRUE; } if (stop_recurring == TRUE) { guint removed = 0; struct stop_recurring_action_s data; data.rsc = rsc; data.lrm_state = lrm_state; removed = g_hash_table_foreach_remove( lrm_state->pending_ops, stop_recurring_action_by_rsc, &data); if (removed) { crm_debug("Stopped %u recurring operation%s in preparation for " CRM_OP_FMT, removed, s_if_plural(removed), rsc->id, operation, op->interval_ms); } } /* now do the op */ crm_info("Performing key=%s op=" CRM_OP_FMT, transition, rsc->id, operation, op->interval_ms); if (is_set(fsa_input_register, R_SHUTDOWN) && safe_str_eq(operation, RSC_START)) { register_fsa_input(C_SHUTDOWN, I_SHUTDOWN, NULL); send_nack = TRUE; } else if (fsa_state != S_NOT_DC && fsa_state != S_POLICY_ENGINE /* Recalculating */ && fsa_state != S_TRANSITION_ENGINE && safe_str_neq(operation, CRMD_ACTION_STOP)) { send_nack = TRUE; } if(send_nack) { crm_notice("Discarding attempt to perform action %s on %s in state %s (shutdown=%s)", operation, rsc->id, fsa_state2string(fsa_state), is_set(fsa_input_register, R_SHUTDOWN)?"true":"false"); op->rc = CRM_DIRECT_NACK_RC; op->op_status = PCMK_LRM_OP_ERROR; send_direct_ack(NULL, NULL, rsc, op, rsc->id); lrmd_free_event(op); free(op_id); return; } record_pending_op(lrm_state->node_name, rsc, op); op_id = generate_op_key(rsc->id, op->op_type, op->interval_ms); if (op->interval_ms > 0) { /* cancel it so we can then restart it without conflict */ cancel_op_key(lrm_state, rsc, op_id, FALSE); } if (op->params) { char *key = NULL; char *value = NULL; GHashTableIter iter; g_hash_table_iter_init(&iter, op->params); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) { params = lrmd_key_value_add(params, key, value); } } call_id = lrm_state_exec(lrm_state, rsc->id, op->op_type, op->user_data, op->interval_ms, op->timeout, op->start_delay, params); if (call_id <= 0 && lrm_state_is_local(lrm_state)) { crm_err("Operation %s on %s failed: %d", operation, rsc->id, call_id); register_fsa_error(C_FSA_INTERNAL, I_FAIL, NULL); } else if (call_id <= 0) { crm_err("Operation %s on resource %s failed to execute on remote node %s: %d", operation, rsc->id, lrm_state->node_name, call_id); fake_op_status(lrm_state, op, PCMK_LRM_OP_DONE, PCMK_OCF_UNKNOWN_ERROR); process_lrm_event(lrm_state, op, NULL); } else { /* record all operations so we can wait * for them to complete during shutdown */ char *call_id_s = make_stop_id(rsc->id, call_id); struct recurring_op_s *pending = NULL; pending = calloc(1, sizeof(struct recurring_op_s)); crm_trace("Recording pending op: %d - %s %s", call_id, op_id, call_id_s); pending->call_id = call_id; pending->interval_ms = op->interval_ms; pending->op_type = strdup(operation); pending->op_key = strdup(op_id); pending->rsc_id = strdup(rsc->id); pending->start_time = time(NULL); pending->user_data = strdup(op->user_data); g_hash_table_replace(lrm_state->pending_ops, call_id_s, pending); if ((op->interval_ms > 0) && (op->start_delay > START_DELAY_THRESHOLD)) { char *uuid = NULL; int dummy = 0, target_rc = 0; crm_info("Faking confirmation of %s: execution postponed for over 5 minutes", op_id); decode_transition_key(op->user_data, &uuid, &dummy, &dummy, &target_rc); free(uuid); op->rc = target_rc; op->op_status = PCMK_LRM_OP_DONE; send_direct_ack(NULL, NULL, rsc, op, rsc->id); } pending->params = op->params; op->params = NULL; } free(op_id); lrmd_free_event(op); return; } int last_resource_update = 0; static void cib_rsc_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) { switch (rc) { case pcmk_ok: case -pcmk_err_diff_failed: case -pcmk_err_diff_resync: crm_trace("Resource update %d complete: rc=%d", call_id, rc); break; default: crm_warn("Resource update %d failed: (rc=%d) %s", call_id, rc, pcmk_strerror(rc)); } if (call_id == last_resource_update) { last_resource_update = 0; trigger_fsa(fsa_source); } } static int do_update_resource(const char *node_name, lrmd_rsc_info_t * rsc, lrmd_event_data_t * op) { /* <status> <nodes_status id=uname> <lrm> <lrm_resources> <lrm_resource id=...> </...> */ int rc = pcmk_ok; xmlNode *update, *iter = NULL; int call_opt = crmd_cib_smart_opt(); const char *uuid = NULL; CRM_CHECK(op != NULL, return 0); iter = create_xml_node(iter, XML_CIB_TAG_STATUS); update = iter; iter = create_xml_node(iter, XML_CIB_TAG_STATE); if (safe_str_eq(node_name, fsa_our_uname)) { uuid = fsa_our_uuid; } else { /* remote nodes uuid and uname are equal */ uuid = node_name; crm_xml_add(iter, XML_NODE_IS_REMOTE, "true"); } CRM_LOG_ASSERT(uuid != NULL); if(uuid == NULL) { rc = -EINVAL; goto done; } crm_xml_add(iter, XML_ATTR_UUID, uuid); crm_xml_add(iter, XML_ATTR_UNAME, node_name); crm_xml_add(iter, XML_ATTR_ORIGIN, __FUNCTION__); iter = create_xml_node(iter, XML_CIB_TAG_LRM); crm_xml_add(iter, XML_ATTR_ID, uuid); iter = create_xml_node(iter, XML_LRM_TAG_RESOURCES); iter = create_xml_node(iter, XML_LRM_TAG_RESOURCE); crm_xml_add(iter, XML_ATTR_ID, op->rsc_id); build_operation_update(iter, rsc, op, node_name, __FUNCTION__); if (rsc) { const char *container = NULL; crm_xml_add(iter, XML_ATTR_TYPE, rsc->type); crm_xml_add(iter, XML_AGENT_ATTR_CLASS, rsc->standard); crm_xml_add(iter, XML_AGENT_ATTR_PROVIDER, rsc->provider); if (op->params) { container = g_hash_table_lookup(op->params, CRM_META"_"XML_RSC_ATTR_CONTAINER); } if (container) { crm_trace("Resource %s is a part of container resource %s", op->rsc_id, container); crm_xml_add(iter, XML_RSC_ATTR_CONTAINER, container); } } else { crm_warn("Resource %s no longer exists in the executor", op->rsc_id); send_direct_ack(NULL, NULL, rsc, op, op->rsc_id); goto cleanup; } crm_log_xml_trace(update, __FUNCTION__); /* make it an asynchronous call and be done with it * * Best case: * the resource state will be discovered during * the next signup or election. * * Bad case: * we are shutting down and there is no DC at the time, * but then why were we shutting down then anyway? * (probably because of an internal error) * * Worst case: * we get shot for having resources "running" that really weren't * * the alternative however means blocking here for too long, which * isn't acceptable */ fsa_cib_update(XML_CIB_TAG_STATUS, update, call_opt, rc, NULL); if (rc > 0) { last_resource_update = rc; } done: /* the return code is a call number, not an error code */ crm_trace("Sent resource state update message: %d for %s=%u on %s", rc, op->op_type, op->interval_ms, op->rsc_id); fsa_register_cib_callback(rc, FALSE, NULL, cib_rsc_callback); cleanup: free_xml(update); return rc; } void do_lrm_event(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input cur_input, fsa_data_t * msg_data) { CRM_CHECK(FALSE, return); } static char * unescape_newlines(const char *string) { char *pch = NULL; char *ret = NULL; static const char *escaped_newline = "\\n"; if (!string) { return NULL; } ret = strdup(string); pch = strstr(ret, escaped_newline); while (pch != NULL) { /* 2 chars for 2 chars, null-termination irrelevant */ memcpy(pch, "\n ", 2 * sizeof(char)); pch = strstr(pch, escaped_newline); } return ret; } gboolean process_lrm_event(lrm_state_t * lrm_state, lrmd_event_data_t * op, struct recurring_op_s *pending) { char *op_id = NULL; char *op_key = NULL; int update_id = 0; gboolean remove = FALSE; gboolean removed = FALSE; lrmd_rsc_info_t *rsc = NULL; CRM_CHECK(op != NULL, return FALSE); CRM_CHECK(op->rsc_id != NULL, return FALSE); op_id = make_stop_id(op->rsc_id, op->call_id); op_key = generate_op_key(op->rsc_id, op->op_type, op->interval_ms); rsc = lrm_state_get_rsc_info(lrm_state, op->rsc_id, 0); if(pending == NULL) { remove = TRUE; pending = g_hash_table_lookup(lrm_state->pending_ops, op_id); } if (op->op_status == PCMK_LRM_OP_ERROR) { switch(op->rc) { case PCMK_OCF_NOT_RUNNING: case PCMK_OCF_RUNNING_MASTER: case PCMK_OCF_DEGRADED: case PCMK_OCF_DEGRADED_MASTER: // Leave it to the TE/scheduler to decide if this is an error op->op_status = PCMK_LRM_OP_DONE; break; default: /* Nothing to do */ break; } } if (op->op_status != PCMK_LRM_OP_CANCELLED) { if (safe_str_eq(op->op_type, RSC_NOTIFY) || safe_str_eq(op->op_type, RSC_METADATA)) { /* Keep notify and meta-data ops out of the CIB */ send_direct_ack(NULL, NULL, NULL, op, op->rsc_id); } else { update_id = do_update_resource(lrm_state->node_name, rsc, op); } } else if (op->interval_ms == 0) { /* This will occur when "crm resource cleanup" is called while actions are in-flight */ crm_err("Op %s (call=%d): Cancelled", op_key, op->call_id); send_direct_ack(NULL, NULL, NULL, op, op->rsc_id); } else if (pending == NULL) { /* We don't need to do anything for cancelled ops * that are not in our pending op list. There are no * transition actions waiting on these operations. */ } else if (op->user_data == NULL) { /* At this point we have a pending entry, but no transition * key present in the user_data field. report this */ crm_err("Op %s (call=%d): No user data", op_key, op->call_id); } else if (pending->remove) { /* The tengine canceled this op, we have been waiting for the cancel to finish. */ erase_lrm_history_by_op(lrm_state, op); } else if (op->rsc_deleted) { /* The tengine initiated this op, but it was cancelled outside of the * tengine's control during a resource cleanup/re-probe request. The tengine * must be alerted that this operation completed, otherwise the tengine * will continue waiting for this update to occur until it is timed out. * We don't want this update going to the cib though, so use a direct ack. */ crm_trace("Op %s (call=%d): cancelled due to rsc deletion", op_key, op->call_id); send_direct_ack(NULL, NULL, NULL, op, op->rsc_id); } else { /* Before a stop is called, no need to direct ack */ crm_trace("Op %s (call=%d): no delete event required", op_key, op->call_id); } if(remove == FALSE) { /* The caller will do this afterwards, but keep the logging consistent */ removed = TRUE; } else if ((op->interval_ms == 0) && g_hash_table_remove(lrm_state->pending_ops, op_id)) { removed = TRUE; crm_trace("Op %s (call=%d, stop-id=%s, remaining=%u): Confirmed", op_key, op->call_id, op_id, g_hash_table_size(lrm_state->pending_ops)); } else if ((op->interval_ms != 0) && (op->op_status == PCMK_LRM_OP_CANCELLED)) { removed = TRUE; g_hash_table_remove(lrm_state->pending_ops, op_id); } switch (op->op_status) { case PCMK_LRM_OP_CANCELLED: crm_info("Result of %s operation for %s on %s: %s " CRM_XS " call=%d key=%s confirmed=%s", crm_action_str(op->op_type, op->interval_ms), op->rsc_id, lrm_state->node_name, services_lrm_status_str(op->op_status), op->call_id, op_key, (removed? "true" : "false")); break; case PCMK_LRM_OP_DONE: do_crm_log((op->interval_ms? LOG_INFO : LOG_NOTICE), "Result of %s operation for %s on %s: %d (%s) " CRM_XS " call=%d key=%s confirmed=%s cib-update=%d", crm_action_str(op->op_type, op->interval_ms), op->rsc_id, lrm_state->node_name, op->rc, services_ocf_exitcode_str(op->rc), op->call_id, op_key, (removed? "true" : "false"), update_id); break; case PCMK_LRM_OP_TIMEOUT: crm_err("Result of %s operation for %s on %s: %s " CRM_XS " call=%d key=%s timeout=%dms", crm_action_str(op->op_type, op->interval_ms), op->rsc_id, lrm_state->node_name, services_lrm_status_str(op->op_status), op->call_id, op_key, op->timeout); break; default: crm_err("Result of %s operation for %s on %s: %s " CRM_XS " call=%d key=%s confirmed=%s status=%d cib-update=%d", crm_action_str(op->op_type, op->interval_ms), op->rsc_id, lrm_state->node_name, services_lrm_status_str(op->op_status), op->call_id, op_key, (removed? "true" : "false"), op->op_status, update_id); } if (op->output) { char *prefix = crm_strdup_printf("%s-" CRM_OP_FMT ":%d", lrm_state->node_name, op->rsc_id, op->op_type, op->interval_ms, op->call_id); if (op->rc) { crm_log_output(LOG_NOTICE, prefix, op->output); } else { crm_log_output(LOG_DEBUG, prefix, op->output); } free(prefix); } if (safe_str_neq(op->op_type, RSC_METADATA)) { crmd_alert_resource_op(lrm_state->node_name, op); } else if (op->rc == PCMK_OCF_OK) { char *metadata = unescape_newlines(op->output); metadata_cache_update(lrm_state->metadata_cache, rsc, metadata); free(metadata); } if (op->rsc_deleted) { crm_info("Deletion of resource '%s' complete after %s", op->rsc_id, op_key); delete_rsc_entry(lrm_state, NULL, op->rsc_id, NULL, pcmk_ok, NULL); } /* If a shutdown was escalated while operations were pending, * then the FSA will be stalled right now... allow it to continue */ mainloop_set_trigger(fsa_source); update_history_cache(lrm_state, rsc, op); lrmd_free_rsc_info(rsc); free(op_key); free(op_id); return TRUE; } diff --git a/daemons/execd/remoted_tls.c b/daemons/execd/remoted_tls.c index 9a14b072d0..b451522ec1 100644 --- a/daemons/execd/remoted_tls.c +++ b/daemons/execd/remoted_tls.c @@ -1,385 +1,386 @@ /* * Copyright 2012-2018 David Vossel <davidvossel@gmail.com> * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include <crm_internal.h> #include <glib.h> #include <unistd.h> #include <crm/crm.h> #include <crm/msg_xml.h> #include <crm/crm.h> #include <crm/msg_xml.h> #include <crm/common/mainloop.h> #include <netdb.h> #include <sys/socket.h> #include <netinet/in.h> #include <netinet/ip.h> #include <arpa/inet.h> #include "pacemaker-execd.h" #ifdef HAVE_GNUTLS_GNUTLS_H # define LRMD_REMOTE_AUTH_TIMEOUT 10000 gnutls_psk_server_credentials_t psk_cred_s; gnutls_dh_params_t dh_params; static int ssock = -1; extern int lrmd_call_id; static void debug_log(int level, const char *str) { fputs(str, stderr); } static int lrmd_remote_client_msg(gpointer data) { int id = 0; int rc = 0; int disconnected = 0; xmlNode *request = NULL; crm_client_t *client = data; if (client->remote->tls_handshake_complete == FALSE) { int rc = 0; /* Muliple calls to handshake will be required, this callback * will be invoked once the client sends more handshake data. */ do { rc = gnutls_handshake(*client->remote->tls_session); if (rc < 0 && rc != GNUTLS_E_AGAIN) { crm_err("TLS handshake with Pacemaker Remote failed"); return -1; } } while (rc == GNUTLS_E_INTERRUPTED); if (rc == 0) { crm_debug("TLS handshake with Pacemaker Remote completed"); client->remote->tls_handshake_complete = TRUE; if (client->remote->auth_timeout) { g_source_remove(client->remote->auth_timeout); } client->remote->auth_timeout = 0; /* Alert other clients of the new connection */ notify_of_new_client(client); } return 0; } rc = crm_remote_ready(client->remote, 0); if (rc == 0) { /* no msg to read */ return 0; } else if (rc < 0) { crm_info("Client disconnected while polling it"); return -1; } crm_remote_recv(client->remote, -1, &disconnected); request = crm_remote_parse_buffer(client->remote); while (request) { crm_element_value_int(request, F_LRMD_REMOTE_MSG_ID, &id); crm_trace("processing request from remote client with remote msg id %d", id); if (!client->name) { const char *value = crm_element_value(request, F_LRMD_CLIENTNAME); if (value) { client->name = strdup(value); } } lrmd_call_id++; if (lrmd_call_id < 1) { lrmd_call_id = 1; } crm_xml_add(request, F_LRMD_CLIENTID, client->id); crm_xml_add(request, F_LRMD_CLIENTNAME, client->name); crm_xml_add_int(request, F_LRMD_CALLID, lrmd_call_id); process_lrmd_message(client, id, request); free_xml(request); /* process all the messages in the current buffer */ request = crm_remote_parse_buffer(client->remote); } if (disconnected) { crm_info("Client disconnected while reading from it"); return -1; } return 0; } static void lrmd_remote_client_destroy(gpointer user_data) { crm_client_t *client = user_data; if (client == NULL) { return; } crm_notice("Cleaning up after remote client %s disconnected " CRM_XS " id=%s", (client->name? client->name : ""), client->id); ipc_proxy_remove_provider(client); /* if this is the last remote connection, stop recurring * operations */ if (crm_hash_table_size(client_connections) == 1) { client_disconnect_cleanup(NULL); } if (client->remote->tls_session) { void *sock_ptr; int csock; sock_ptr = gnutls_transport_get_ptr(*client->remote->tls_session); csock = GPOINTER_TO_INT(sock_ptr); gnutls_bye(*client->remote->tls_session, GNUTLS_SHUT_RDWR); gnutls_deinit(*client->remote->tls_session); gnutls_free(client->remote->tls_session); close(csock); } lrmd_client_destroy(client); return; } static gboolean lrmd_auth_timeout_cb(gpointer data) { crm_client_t *client = data; client->remote->auth_timeout = 0; if (client->remote->tls_handshake_complete == TRUE) { return FALSE; } mainloop_del_fd(client->remote->source); client->remote->source = NULL; crm_err("Remote client authentication timed out"); return FALSE; } static int lrmd_remote_listen(gpointer data) { int csock = 0; gnutls_session_t *session = NULL; crm_client_t *new_client = NULL; static struct mainloop_fd_callbacks lrmd_remote_fd_cb = { .dispatch = lrmd_remote_client_msg, .destroy = lrmd_remote_client_destroy, }; csock = crm_remote_accept(ssock); if (csock < 0) { return TRUE; } session = create_psk_tls_session(csock, GNUTLS_SERVER, psk_cred_s); if (session == NULL) { crm_err("TLS session creation failed"); close(csock); return TRUE; } new_client = crm_client_alloc(NULL); new_client->remote = calloc(1, sizeof(crm_remote_t)); new_client->kind = CRM_CLIENT_TLS; new_client->remote->tls_session = session; new_client->remote->auth_timeout = g_timeout_add(LRMD_REMOTE_AUTH_TIMEOUT, lrmd_auth_timeout_cb, new_client); crm_notice("Client connection to Pacemaker Remote established " CRM_XS " %p id: %s", new_client, new_client->id); new_client->remote->source = mainloop_add_fd("pacemaker-remote-client", G_PRIORITY_DEFAULT, csock, new_client, &lrmd_remote_fd_cb); return TRUE; } static void lrmd_remote_connection_destroy(gpointer user_data) { crm_notice("Remote tls server disconnected"); return; } static int lrmd_tls_server_key_cb(gnutls_session_t session, const char *username, gnutls_datum_t * key) { return lrmd_tls_set_key(key); } static int bind_and_listen(struct addrinfo *addr) { int optval; int fd; int rc; char buffer[INET6_ADDRSTRLEN] = { 0, }; crm_sockaddr2str(addr->ai_addr, buffer); crm_trace("Attempting to bind on address %s", buffer); fd = socket(addr->ai_family, addr->ai_socktype, addr->ai_protocol); if (fd < 0) { return -1; } /* reuse address */ optval = 1; rc = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(optval)); if (rc < 0) { crm_perror(LOG_INFO, "Couldn't allow the reuse of local addresses by our remote listener, bind address %s", buffer); close(fd); return -1; } if (addr->ai_family == AF_INET6) { optval = 0; rc = setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &optval, sizeof(optval)); if (rc < 0) { crm_perror(LOG_INFO, "Couldn't disable IPV6 only on address %s", buffer); close(fd); return -1; } } if (bind(fd, addr->ai_addr, addr->ai_addrlen) != 0) { close(fd); return -1; } if (listen(fd, 10) == -1) { crm_err("Can not start listen on address %s", buffer); close(fd); return -1; } crm_notice("Listening on address %s", buffer); return fd; } int lrmd_init_remote_tls_server() { int rc; int filter; int port = crm_default_remote_port(); struct addrinfo hints, *res = NULL, *iter; char port_str[6]; // at most "65535" gnutls_datum_t psk_key = { NULL, 0 }; static struct mainloop_fd_callbacks remote_listen_fd_callbacks = { .dispatch = lrmd_remote_listen, .destroy = lrmd_remote_connection_destroy, }; crm_notice("Starting TLS listener on port %d", port); crm_gnutls_global_init(); gnutls_global_set_log_function(debug_log); gnutls_dh_params_init(&dh_params); gnutls_dh_params_generate2(dh_params, 1024); gnutls_psk_allocate_server_credentials(&psk_cred_s); gnutls_psk_set_server_credentials_function(psk_cred_s, lrmd_tls_server_key_cb); gnutls_psk_set_server_dh_params(psk_cred_s, dh_params); /* The key callback won't get called until the first client connection * attempt. Do it once here, so we can warn the user at start-up if we can't * read the key. We don't error out, though, because it's fine if the key is * going to be added later. */ rc = lrmd_tls_set_key(&psk_key); if (rc != 0) { crm_warn("A cluster connection will not be possible until the key is available"); } + gnutls_free(psk_key.data); memset(&hints, 0, sizeof(struct addrinfo)); /* Bind to the wildcard address (INADDR_ANY or IN6ADDR_ANY_INIT). * @TODO allow user to specify a specific address */ hints.ai_flags = AI_PASSIVE; hints.ai_family = AF_UNSPEC; /* Return IPv6 or IPv4 */ hints.ai_socktype = SOCK_STREAM; hints.ai_protocol = IPPROTO_TCP; snprintf(port_str, sizeof(port_str), "%d", port); rc = getaddrinfo(NULL, port_str, &hints, &res); if (rc) { crm_err("Unable to get IP address info for local node: %s", gai_strerror(rc)); return -1; } iter = res; filter = AF_INET6; /* Try IPv6 addresses first, then IPv4 */ while (iter) { if (iter->ai_family == filter) { ssock = bind_and_listen(iter); } if (ssock != -1) { break; } iter = iter->ai_next; if (iter == NULL && filter == AF_INET6) { iter = res; filter = AF_INET; } } if (ssock < 0) { crm_err("unable to bind to address"); goto init_remote_cleanup; } mainloop_add_fd("pacemaker-remote-server", G_PRIORITY_DEFAULT, ssock, NULL, &remote_listen_fd_callbacks); rc = ssock; init_remote_cleanup: if (rc < 0) { close(ssock); ssock = 0; } freeaddrinfo(res); return rc; } void lrmd_tls_server_destroy(void) { if (psk_cred_s) { gnutls_psk_free_server_credentials(psk_cred_s); psk_cred_s = 0; } if (ssock > 0) { close(ssock); ssock = 0; } } #endif diff --git a/lib/pengine/status.c b/lib/pengine/status.c index 6376eae5bc..09b807e5bf 100644 --- a/lib/pengine/status.c +++ b/lib/pengine/status.c @@ -1,313 +1,295 @@ /* - * Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net> + * Copyright 2004-2018 Andrew Beekhof <andrew@beekhof.net> * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * This source code is licensed under the GNU Lesser General Public License + * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include <crm_internal.h> #include <sys/param.h> #include <crm/crm.h> #include <crm/msg_xml.h> #include <crm/common/xml.h> #include <glib.h> #include <crm/pengine/internal.h> #include <unpack.h> -#define MEMCHECK_STAGE_0 0 - -#define check_and_exit(stage) cleanup_calculations(data_set); \ - crm_mem_stats(NULL); \ - crm_err("Exiting: stage %d", stage); \ - crm_exit(CRM_EX_ERROR); - /* * Unpack everything * At the end you'll have: * - A list of nodes * - A list of resources (each with any dependencies on other resources) * - A list of constraints between resources and nodes * - A list of constraints between start/stop actions * - A list of nodes that need to be stonith'd * - A list of nodes that need to be shutdown * - A list of the possible stop/start actions (without dependencies) */ gboolean cluster_status(pe_working_set_t * data_set) { xmlNode *config = get_xpath_object("//"XML_CIB_TAG_CRMCONFIG, data_set->input, LOG_TRACE); xmlNode *cib_nodes = get_xpath_object("//"XML_CIB_TAG_NODES, data_set->input, LOG_TRACE); xmlNode *cib_resources = get_xpath_object("//"XML_CIB_TAG_RESOURCES, data_set->input, LOG_TRACE); xmlNode *cib_status = get_xpath_object("//"XML_CIB_TAG_STATUS, data_set->input, LOG_TRACE); xmlNode *cib_tags = get_xpath_object("//"XML_CIB_TAG_TAGS, data_set->input, LOG_TRACE); const char *value = crm_element_value(data_set->input, XML_ATTR_HAVE_QUORUM); crm_trace("Beginning unpack"); pe_dataset = data_set; /* reset remaining global variables */ data_set->failed = create_xml_node(NULL, "failed-ops"); if (data_set->input == NULL) { return FALSE; } if (data_set->now == NULL) { data_set->now = crm_time_new(NULL); } if (data_set->dc_uuid == NULL) { data_set->dc_uuid = crm_element_value_copy(data_set->input, XML_ATTR_DC_UUID); } clear_bit(data_set->flags, pe_flag_have_quorum); if (crm_is_true(value)) { set_bit(data_set->flags, pe_flag_have_quorum); } data_set->op_defaults = get_xpath_object("//"XML_CIB_TAG_OPCONFIG, data_set->input, LOG_TRACE); data_set->rsc_defaults = get_xpath_object("//"XML_CIB_TAG_RSCCONFIG, data_set->input, LOG_TRACE); unpack_config(config, data_set); if (is_not_set(data_set->flags, pe_flag_quick_location) && is_not_set(data_set->flags, pe_flag_have_quorum) && data_set->no_quorum_policy != no_quorum_ignore) { crm_warn("Fencing and resource management disabled due to lack of quorum"); } unpack_nodes(cib_nodes, data_set); if(is_not_set(data_set->flags, pe_flag_quick_location)) { unpack_remote_nodes(cib_resources, data_set); } unpack_resources(cib_resources, data_set); unpack_tags(cib_tags, data_set); if(is_not_set(data_set->flags, pe_flag_quick_location)) { unpack_status(cib_status, data_set); } set_bit(data_set->flags, pe_flag_have_status); return TRUE; } static void pe_free_resources(GListPtr resources) { resource_t *rsc = NULL; GListPtr iterator = resources; while (iterator != NULL) { rsc = (resource_t *) iterator->data; iterator = iterator->next; rsc->fns->free(rsc); } if (resources != NULL) { g_list_free(resources); } } static void pe_free_actions(GListPtr actions) { GListPtr iterator = actions; while (iterator != NULL) { pe_free_action(iterator->data); iterator = iterator->next; } if (actions != NULL) { g_list_free(actions); } } static void pe_free_nodes(GListPtr nodes) { GListPtr iterator = nodes; while (iterator != NULL) { node_t *node = (node_t *) iterator->data; struct pe_node_shared_s *details = node->details; iterator = iterator->next; crm_trace("deleting node"); print_node("delete", node, FALSE); if (details != NULL) { crm_trace("%s is being deleted", details->uname); if (details->attrs != NULL) { g_hash_table_destroy(details->attrs); } if (details->utilization != NULL) { g_hash_table_destroy(details->utilization); } if (details->digest_cache != NULL) { g_hash_table_destroy(details->digest_cache); } g_list_free(details->running_rsc); g_list_free(details->allocated_rsc); free(details); } free(node); } if (nodes != NULL) { g_list_free(nodes); } } void cleanup_calculations(pe_working_set_t * data_set) { pe_dataset = NULL; if (data_set == NULL) { return; } clear_bit(data_set->flags, pe_flag_have_status); if (data_set->config_hash != NULL) { g_hash_table_destroy(data_set->config_hash); } if (data_set->singletons != NULL) { g_hash_table_destroy(data_set->singletons); } if (data_set->tickets) { g_hash_table_destroy(data_set->tickets); } if (data_set->template_rsc_sets) { g_hash_table_destroy(data_set->template_rsc_sets); } if (data_set->tags) { g_hash_table_destroy(data_set->tags); } free(data_set->dc_uuid); crm_trace("deleting resources"); pe_free_resources(data_set->resources); crm_trace("deleting actions"); pe_free_actions(data_set->actions); crm_trace("deleting nodes"); pe_free_nodes(data_set->nodes); free_xml(data_set->graph); crm_time_free(data_set->now); free_xml(data_set->input); free_xml(data_set->failed); set_working_set_defaults(data_set); CRM_CHECK(data_set->ordering_constraints == NULL,; ); CRM_CHECK(data_set->placement_constraints == NULL,; ); } void set_working_set_defaults(pe_working_set_t * data_set) { pe_dataset = data_set; memset(data_set, 0, sizeof(pe_working_set_t)); data_set->order_id = 1; data_set->action_id = 1; data_set->no_quorum_policy = no_quorum_freeze; data_set->flags = 0x0ULL; set_bit(data_set->flags, pe_flag_stop_rsc_orphans); set_bit(data_set->flags, pe_flag_symmetric_cluster); set_bit(data_set->flags, pe_flag_stop_action_orphans); } resource_t * pe_find_resource(GListPtr rsc_list, const char *id) { return pe_find_resource_with_flags(rsc_list, id, pe_find_renamed); } resource_t * pe_find_resource_with_flags(GListPtr rsc_list, const char *id, enum pe_find flags) { GListPtr rIter = NULL; for (rIter = rsc_list; id && rIter; rIter = rIter->next) { resource_t *parent = rIter->data; resource_t *match = parent->fns->find_rsc(parent, id, NULL, flags); if (match != NULL) { return match; } } crm_trace("No match for %s", id); return NULL; } node_t * pe_find_node_any(GListPtr nodes, const char *id, const char *uname) { node_t *match = pe_find_node_id(nodes, id); if (match) { return match; } crm_trace("Looking up %s via its uname instead", uname); return pe_find_node(nodes, uname); } node_t * pe_find_node_id(GListPtr nodes, const char *id) { GListPtr gIter = nodes; for (; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; if (node && safe_str_eq(node->details->id, id)) { return node; } } /* error */ return NULL; } node_t * pe_find_node(GListPtr nodes, const char *uname) { GListPtr gIter = nodes; for (; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; if (node && safe_str_eq(node->details->uname, uname)) { return node; } } /* error */ return NULL; }