This might be intended behavior, because I'm only certain that I've observed it with stop failures.
[root@fastvm-rhel-9-0-42 pacemaker]# pcs cluster cib | grep fail <nvpair id="status-1-last-failure-stateful.stop_0" name="last-failure-stateful#stop_0" value="1700645698"/> <nvpair id="status-1-fail-count-stateful.stop_0" name="fail-count-stateful#stop_0" value="INFINITY"/> <lrm_rsc_op id="stateful_last_failure_0" operation_key="stateful_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.19.0" transition-key="2:38:7:00ac1c64-213a-46f1-aa26-209fe24717af" transition-magic="0:0;2:38:7:00ac1c64-213a-46f1-aa26-209fe24717af" exit-reason="" on_node="fastvm-rhel-9-0-42" call-id="37" rc-code="0" op-status="0" interval="0" last-rc-change="1700645746" exec-time="46" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/> <lrm_rsc_op id="stateful_last_failure_0" operation_key="stateful_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.19.0" transition-key="1:2:7:00ac1c64-213a-46f1-aa26-209fe24717af" transition-magic="0:0;1:2:7:00ac1c64-213a-46f1-aa26-209fe24717af" exit-reason="" on_node="fastvm-rhel-9-0-43" call-id="6" rc-code="0" op-status="0" interval="0" last-rc-change="1700644367" exec-time="24" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/> # # Also tried with `--resource stateful` [root@fastvm-rhel-9-0-42 pacemaker]# crm_resource --cleanup --resource stateful-clone Cleaned up stateful:0 on fastvm-rhel-9-0-43 Cleaned up stateful:0 on fastvm-rhel-9-0-42 Cleaned up stateful:1 on fastvm-rhel-9-0-43 Cleaned up stateful:1 on fastvm-rhel-9-0-42 [root@fastvm-rhel-9-0-42 pacemaker]# pcs cluster cib | grep fail <nvpair id="status-1-last-failure-stateful.stop_0" name="last-failure-stateful#stop_0" value="1700645698"/> <nvpair id="status-1-fail-count-stateful.stop_0" name="fail-count-stateful#stop_0" value="INFINITY"/> <lrm_rsc_op id="stateful_last_failure_0" operation_key="stateful_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.19.0" transition-key="2:38:7:00ac1c64-213a-46f1-aa26-209fe24717af" transition-magic="0:0;2:38:7:00ac1c64-213a-46f1-aa26-209fe24717af" exit-reason="" on_node="fastvm-rhel-9-0-42" call-id="37" rc-code="0" op-status="0" interval="0" last-rc-change="1700645746" exec-time="46" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/> <lrm_rsc_op id="stateful_last_failure_0" operation_key="stateful_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.19.0" transition-key="1:2:7:00ac1c64-213a-46f1-aa26-209fe24717af" transition-magic="0:0;1:2:7:00ac1c64-213a-46f1-aa26-209fe24717af" exit-reason="" on_node="fastvm-rhel-9-0-43" call-id="6" rc-code="0" op-status="0" interval="0" last-rc-change="1700644367" exec-time="24" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/> [root@fastvm-rhel-9-0-42 pacemaker]# crm_simulate -LS Current cluster status: * Node List: * Node fastvm-rhel-9-0-43: standby * Online: [ fastvm-rhel-9-0-42 ] * Full List of Resources: * Clone Set: stateful-clone [stateful] (promotable): * Stopped: [ fastvm-rhel-9-0-42 fastvm-rhel-9-0-43 ] Transition Summary: Executing Cluster Transition: Revised Cluster Status: * Node List: * Node fastvm-rhel-9-0-43: standby * Online: [ fastvm-rhel-9-0-42 ] * Full List of Resources: * Clone Set: stateful-clone [stateful] (promotable): * Stopped: [ fastvm-rhel-9-0-42 fastvm-rhel-9-0-43 ]